Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nfs-for-4.19-1' of git://git.linux-nfs.org/projects/anna/linux-nfs

Pull NFS client updates from Anna Schumaker:
"These patches include adding async support for the v4.2 COPY
operation. I think Bruce is planning to send the server patches for
the next release, but I figured we could get the client side out of
the way now since it's been in my tree for a while. This shouldn't
cause any problems, since the server will still respond with
synchronous copies even if the client requests async.

Features:
- Add support for asynchronous server-side COPY operations

Stable bufixes:
- Fix an off-by-one in bl_map_stripe() (v3.17+)
- NFSv4 client live hangs after live data migration recovery (v4.9+)
- xprtrdma: Fix disconnect regression (v4.18+)
- Fix locking in pnfs_generic_recover_commit_reqs (v4.14+)
- Fix a sleep in atomic context in nfs4_callback_sequence() (v4.9+)

Other bugfixes and cleanups:
- Optimizations and fixes involving NFS v4.1 / pNFS layout handling
- Optimize lseek(fd, SEEK_CUR, 0) on directories to avoid locking
- Immediately reschedule writeback when the server replies with an
error
- Fix excessive attribute revalidation in nfs_execute_ok()
- Add error checking to nfs_idmap_prepare_message()
- Use new vm_fault_t return type
- Return a delegation when reclaiming one that the server has
recalled
- Referrals should inherit proto setting from parents
- Make rpc_auth_create_args a const
- Improvements to rpc_iostats tracking
- Fix a potential reference leak when there is an error processing a
callback
- Fix rmdir / mkdir / rename nlink accounting
- Fix updating inode change attribute
- Fix error handling in nfsn4_sp4_select_mode()
- Use an appropriate work queue for direct-write completion
- Don't busy wait if NFSv4 session draining is interrupted"

* tag 'nfs-for-4.19-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (54 commits)
pNFS: Remove unwanted optimisation of layoutget
pNFS/flexfiles: ff_layout_pg_init_read should exit on error
pNFS: Treat RECALLCONFLICT like DELAY...
pNFS: When updating the stateid in layoutreturn, also update the recall range
NFSv4: Fix a sleep in atomic context in nfs4_callback_sequence()
NFSv4: Fix locking in pnfs_generic_recover_commit_reqs
NFSv4: Fix a typo in nfs4_init_channel_attrs()
NFSv4: Don't busy wait if NFSv4 session draining is interrupted
NFS recover from destination server reboot for copies
NFS add a simple sync nfs4_proc_commit after async COPY
NFS handle COPY ERR_OFFLOAD_NO_REQS
NFS send OFFLOAD_CANCEL when COPY killed
NFS export nfs4_async_handle_error
NFS handle COPY reply CB_OFFLOAD call race
NFS add support for asynchronous COPY
NFS COPY xdr handle async reply
NFS OFFLOAD_CANCEL xdr
NFS CB_OFFLOAD xdr
NFS: Use an appropriate work queue for direct-write completion
NFSv4: Fix error handling in nfs4_sp4_select_mode()
...

+924 -216
+1
fs/nfs/blocklayout/blocklayout.c
··· 753 753 case -ENODEV: 754 754 /* Our extent block devices are unavailable */ 755 755 set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags); 756 + /* Fall through */ 756 757 case 0: 757 758 return lseg; 758 759 default:
+1 -1
fs/nfs/blocklayout/dev.c
··· 204 204 chunk = div_u64(offset, dev->chunk_size); 205 205 div_u64_rem(chunk, dev->nr_children, &chunk_idx); 206 206 207 - if (chunk_idx > dev->nr_children) { 207 + if (chunk_idx >= dev->nr_children) { 208 208 dprintk("%s: invalid chunk idx %d (%lld/%lld)\n", 209 209 __func__, chunk_idx, offset, dev->chunk_size); 210 210 /* error, should not happen */
+12
fs/nfs/callback.h
··· 184 184 extern __be32 nfs4_callback_notify_lock(void *argp, void *resp, 185 185 struct cb_process_state *cps); 186 186 #endif /* CONFIG_NFS_V4_1 */ 187 + #ifdef CONFIG_NFS_V4_2 188 + struct cb_offloadargs { 189 + struct nfs_fh coa_fh; 190 + nfs4_stateid coa_stateid; 191 + uint32_t error; 192 + uint64_t wr_count; 193 + struct nfs_writeverf wr_writeverf; 194 + }; 195 + 196 + extern __be32 nfs4_callback_offload(void *args, void *dummy, 197 + struct cb_process_state *cps); 198 + #endif /* CONFIG_NFS_V4_2 */ 187 199 extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *); 188 200 extern __be32 nfs4_callback_getattr(void *argp, void *resp, 189 201 struct cb_process_state *cps);
+79 -18
fs/nfs/callback_proc.c
··· 215 215 { 216 216 u32 oldseq, newseq; 217 217 218 - /* Is the stateid still not initialised? */ 218 + /* Is the stateid not initialised? */ 219 219 if (!pnfs_layout_is_valid(lo)) 220 - return NFS4ERR_DELAY; 220 + return NFS4ERR_NOMATCHING_LAYOUT; 221 221 222 222 /* Mismatched stateid? */ 223 223 if (!nfs4_stateid_match_other(&lo->plh_stateid, new)) ··· 273 273 rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid); 274 274 if (rv != NFS_OK) 275 275 goto unlock; 276 - pnfs_set_layout_stateid(lo, &args->cbl_stateid, true); 277 276 278 277 /* 279 278 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return) ··· 282 283 goto unlock; 283 284 } 284 285 285 - if (pnfs_mark_matching_lsegs_return(lo, &free_me_list, 286 + pnfs_set_layout_stateid(lo, &args->cbl_stateid, true); 287 + switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list, 286 288 &args->cbl_range, 287 289 be32_to_cpu(args->cbl_stateid.seqid))) { 290 + case 0: 291 + case -EBUSY: 292 + /* There are layout segments that need to be returned */ 288 293 rv = NFS4_OK; 289 - goto unlock; 290 - } 294 + break; 295 + case -ENOENT: 296 + /* Embrace your forgetfulness! */ 297 + rv = NFS4ERR_NOMATCHING_LAYOUT; 291 298 292 - /* Embrace your forgetfulness! */ 293 - rv = NFS4ERR_NOMATCHING_LAYOUT; 294 - 295 - if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) { 296 - NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, 297 - &args->cbl_range); 299 + if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) { 300 + NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, 301 + &args->cbl_range); 302 + } 298 303 } 299 304 unlock: 300 305 spin_unlock(&ino->i_lock); ··· 331 328 static u32 do_callback_layoutrecall(struct nfs_client *clp, 332 329 struct cb_layoutrecallargs *args) 333 330 { 334 - write_seqcount_begin(&clp->cl_callback_count); 335 - write_seqcount_end(&clp->cl_callback_count); 336 331 if (args->cbl_recall_type == RETURN_FILE) 337 332 return initiate_file_draining(clp, args); 338 333 return initiate_bulk_draining(clp, args); ··· 442 441 * a match. If the slot is in use and the sequence numbers match, the 443 442 * client is still waiting for a response to the original request. 444 443 */ 445 - static bool referring_call_exists(struct nfs_client *clp, 444 + static int referring_call_exists(struct nfs_client *clp, 446 445 uint32_t nrclists, 447 - struct referring_call_list *rclists) 446 + struct referring_call_list *rclists, 447 + spinlock_t *lock) 448 + __releases(lock) 449 + __acquires(lock) 448 450 { 449 - bool status = false; 451 + int status = 0; 450 452 int i, j; 451 453 struct nfs4_session *session; 452 454 struct nfs4_slot_table *tbl; ··· 472 468 473 469 for (j = 0; j < rclist->rcl_nrefcalls; j++) { 474 470 ref = &rclist->rcl_refcalls[j]; 471 + spin_unlock(lock); 475 472 status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid, 476 473 ref->rc_sequenceid, HZ >> 1) < 0; 474 + spin_lock(lock); 477 475 if (status) 478 476 goto out; 479 477 } ··· 552 546 * related callback was received before the response to the original 553 547 * call. 554 548 */ 555 - if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) { 549 + if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists, 550 + &tbl->slot_tbl_lock) < 0) { 556 551 status = htonl(NFS4ERR_DELAY); 557 552 goto out_unlock; 558 553 } ··· 667 660 return htonl(NFS4_OK); 668 661 } 669 662 #endif /* CONFIG_NFS_V4_1 */ 663 + #ifdef CONFIG_NFS_V4_2 664 + static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state, 665 + struct cb_offloadargs *args) 666 + { 667 + cp_state->count = args->wr_count; 668 + cp_state->error = args->error; 669 + if (!args->error) { 670 + cp_state->verf.committed = args->wr_writeverf.committed; 671 + memcpy(&cp_state->verf.verifier.data[0], 672 + &args->wr_writeverf.verifier.data[0], 673 + NFS4_VERIFIER_SIZE); 674 + } 675 + } 676 + 677 + __be32 nfs4_callback_offload(void *data, void *dummy, 678 + struct cb_process_state *cps) 679 + { 680 + struct cb_offloadargs *args = data; 681 + struct nfs_server *server; 682 + struct nfs4_copy_state *copy; 683 + bool found = false; 684 + 685 + spin_lock(&cps->clp->cl_lock); 686 + rcu_read_lock(); 687 + list_for_each_entry_rcu(server, &cps->clp->cl_superblocks, 688 + client_link) { 689 + list_for_each_entry(copy, &server->ss_copies, copies) { 690 + if (memcmp(args->coa_stateid.other, 691 + copy->stateid.other, 692 + sizeof(args->coa_stateid.other))) 693 + continue; 694 + nfs4_copy_cb_args(copy, args); 695 + complete(&copy->completion); 696 + found = true; 697 + goto out; 698 + } 699 + } 700 + out: 701 + rcu_read_unlock(); 702 + if (!found) { 703 + copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS); 704 + if (!copy) { 705 + spin_unlock(&cps->clp->cl_lock); 706 + return htonl(NFS4ERR_SERVERFAULT); 707 + } 708 + memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE); 709 + nfs4_copy_cb_args(copy, args); 710 + list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids); 711 + } 712 + spin_unlock(&cps->clp->cl_lock); 713 + 714 + return 0; 715 + } 716 + #endif /* CONFIG_NFS_V4_2 */
+87 -4
fs/nfs/callback_xdr.c
··· 38 38 #define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) 39 39 #define CB_OP_NOTIFY_LOCK_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) 40 40 #endif /* CONFIG_NFS_V4_1 */ 41 + #ifdef CONFIG_NFS_V4_2 42 + #define CB_OP_OFFLOAD_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) 43 + #endif /* CONFIG_NFS_V4_2 */ 41 44 42 45 #define NFSDBG_FACILITY NFSDBG_CALLBACK 43 46 ··· 530 527 } 531 528 532 529 #endif /* CONFIG_NFS_V4_1 */ 530 + #ifdef CONFIG_NFS_V4_2 531 + static __be32 decode_write_response(struct xdr_stream *xdr, 532 + struct cb_offloadargs *args) 533 + { 534 + __be32 *p; 533 535 536 + /* skip the always zero field */ 537 + p = read_buf(xdr, 4); 538 + if (unlikely(!p)) 539 + goto out; 540 + p++; 541 + 542 + /* decode count, stable_how, verifier */ 543 + p = xdr_inline_decode(xdr, 8 + 4); 544 + if (unlikely(!p)) 545 + goto out; 546 + p = xdr_decode_hyper(p, &args->wr_count); 547 + args->wr_writeverf.committed = be32_to_cpup(p); 548 + p = xdr_inline_decode(xdr, NFS4_VERIFIER_SIZE); 549 + if (likely(p)) { 550 + memcpy(&args->wr_writeverf.verifier.data[0], p, 551 + NFS4_VERIFIER_SIZE); 552 + return 0; 553 + } 554 + out: 555 + return htonl(NFS4ERR_RESOURCE); 556 + } 557 + 558 + static __be32 decode_offload_args(struct svc_rqst *rqstp, 559 + struct xdr_stream *xdr, 560 + void *data) 561 + { 562 + struct cb_offloadargs *args = data; 563 + __be32 *p; 564 + __be32 status; 565 + 566 + /* decode fh */ 567 + status = decode_fh(xdr, &args->coa_fh); 568 + if (unlikely(status != 0)) 569 + return status; 570 + 571 + /* decode stateid */ 572 + status = decode_stateid(xdr, &args->coa_stateid); 573 + if (unlikely(status != 0)) 574 + return status; 575 + 576 + /* decode status */ 577 + p = read_buf(xdr, 4); 578 + if (unlikely(!p)) 579 + goto out; 580 + args->error = ntohl(*p++); 581 + if (!args->error) { 582 + status = decode_write_response(xdr, args); 583 + if (unlikely(status != 0)) 584 + return status; 585 + } else { 586 + p = xdr_inline_decode(xdr, 8); 587 + if (unlikely(!p)) 588 + goto out; 589 + p = xdr_decode_hyper(p, &args->wr_count); 590 + } 591 + return 0; 592 + out: 593 + return htonl(NFS4ERR_RESOURCE); 594 + } 595 + #endif /* CONFIG_NFS_V4_2 */ 534 596 static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) 535 597 { 536 598 if (unlikely(xdr_stream_encode_opaque(xdr, str, len) < 0)) ··· 841 773 if (status != htonl(NFS4ERR_OP_ILLEGAL)) 842 774 return status; 843 775 844 - if (op_nr == OP_CB_OFFLOAD) 776 + if (op_nr == OP_CB_OFFLOAD) { 777 + *op = &callback_ops[op_nr]; 778 + return htonl(NFS_OK); 779 + } else 845 780 return htonl(NFS4ERR_NOTSUPP); 846 781 return htonl(NFS4ERR_OP_ILLEGAL); 847 782 } ··· 954 883 955 884 if (hdr_arg.minorversion == 0) { 956 885 cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident); 957 - if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) 886 + if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) { 887 + if (cps.clp) 888 + nfs_put_client(cps.clp); 958 889 goto out_invalidcred; 890 + } 959 891 } 960 892 961 893 cps.minorversion = hdr_arg.minorversion; 962 894 hdr_res.taglen = hdr_arg.taglen; 963 895 hdr_res.tag = hdr_arg.tag; 964 - if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) 896 + if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) { 897 + if (cps.clp) 898 + nfs_put_client(cps.clp); 965 899 return rpc_system_err; 966 - 900 + } 967 901 while (status == 0 && nops != hdr_arg.nops) { 968 902 status = process_op(nops, rqstp, &xdr_in, 969 903 rqstp->rq_argp, &xdr_out, rqstp->rq_resp, ··· 1045 969 .res_maxsize = CB_OP_NOTIFY_LOCK_RES_MAXSZ, 1046 970 }, 1047 971 #endif /* CONFIG_NFS_V4_1 */ 972 + #ifdef CONFIG_NFS_V4_2 973 + [OP_CB_OFFLOAD] = { 974 + .process_op = nfs4_callback_offload, 975 + .decode_args = decode_offload_args, 976 + .res_maxsize = CB_OP_OFFLOAD_RES_MAXSZ, 977 + }, 978 + #endif /* CONFIG_NFS_V4_2 */ 1048 979 }; 1049 980 1050 981 /*
+1
fs/nfs/client.c
··· 886 886 INIT_LIST_HEAD(&server->delegations); 887 887 INIT_LIST_HEAD(&server->layouts); 888 888 INIT_LIST_HEAD(&server->state_owners_lru); 889 + INIT_LIST_HEAD(&server->ss_copies); 889 890 890 891 atomic_set(&server->active, 0); 891 892
+20 -12
fs/nfs/dir.c
··· 904 904 dfprintk(FILE, "NFS: llseek dir(%pD2, %lld, %d)\n", 905 905 filp, offset, whence); 906 906 907 - inode_lock(inode); 908 907 switch (whence) { 909 - case 1: 910 - offset += filp->f_pos; 911 - case 0: 912 - if (offset >= 0) 913 - break; 914 - default: 915 - offset = -EINVAL; 916 - goto out; 908 + default: 909 + return -EINVAL; 910 + case SEEK_SET: 911 + if (offset < 0) 912 + return -EINVAL; 913 + inode_lock(inode); 914 + break; 915 + case SEEK_CUR: 916 + if (offset == 0) 917 + return filp->f_pos; 918 + inode_lock(inode); 919 + offset += filp->f_pos; 920 + if (offset < 0) { 921 + inode_unlock(inode); 922 + return -EINVAL; 923 + } 917 924 } 918 925 if (offset != filp->f_pos) { 919 926 filp->f_pos = offset; 920 927 dir_ctx->dir_cookie = 0; 921 928 dir_ctx->duped = 0; 922 929 } 923 - out: 924 930 inode_unlock(inode); 925 931 return offset; 926 932 } ··· 1038 1032 if (flags & LOOKUP_REVAL) 1039 1033 goto out_force; 1040 1034 out: 1041 - return (inode->i_nlink == 0) ? -ENOENT : 0; 1035 + return (inode->i_nlink == 0) ? -ESTALE : 0; 1042 1036 out_force: 1043 1037 if (flags & LOOKUP_RCU) 1044 1038 return -ECHILD; ··· 2505 2499 struct nfs_server *server = NFS_SERVER(inode); 2506 2500 int ret = 0; 2507 2501 2508 - if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS)) { 2502 + if (S_ISDIR(inode->i_mode)) 2503 + return 0; 2504 + if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_OTHER)) { 2509 2505 if (mask & MAY_NOT_BLOCK) 2510 2506 return -ECHILD; 2511 2507 ret = __nfs_revalidate_inode(server, inode);
+1 -1
fs/nfs/direct.c
··· 758 758 759 759 static void nfs_direct_write_complete(struct nfs_direct_req *dreq) 760 760 { 761 - schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */ 761 + queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */ 762 762 } 763 763 764 764 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+2 -2
fs/nfs/file.c
··· 532 532 * writable, implying that someone is about to modify the page through a 533 533 * shared-writable mapping 534 534 */ 535 - static int nfs_vm_page_mkwrite(struct vm_fault *vmf) 535 + static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf) 536 536 { 537 537 struct page *page = vmf->page; 538 538 struct file *filp = vmf->vma->vm_file; 539 539 struct inode *inode = file_inode(filp); 540 540 unsigned pagelen; 541 - int ret = VM_FAULT_NOPAGE; 541 + vm_fault_t ret = VM_FAULT_NOPAGE; 542 542 struct address_space *mapping; 543 543 544 544 dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n",
+14 -17
fs/nfs/flexfilelayout/flexfilelayout.c
··· 812 812 struct nfs_page *req, 813 813 bool strict_iomode) 814 814 { 815 - retry_strict: 816 815 pnfs_put_lseg(pgio->pg_lseg); 817 816 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 818 817 req->wb_context, ··· 823 824 if (IS_ERR(pgio->pg_lseg)) { 824 825 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 825 826 pgio->pg_lseg = NULL; 826 - } 827 - 828 - /* If we don't have checking, do get a IOMODE_RW 829 - * segment, and the server wants to avoid READs 830 - * there, then retry! 831 - */ 832 - if (pgio->pg_lseg && !strict_iomode && 833 - ff_layout_avoid_read_on_rw(pgio->pg_lseg)) { 834 - strict_iomode = true; 835 - goto retry_strict; 836 827 } 837 828 } 838 829 ··· 838 849 retry: 839 850 pnfs_generic_pg_check_layout(pgio); 840 851 /* Use full layout for now */ 841 - if (!pgio->pg_lseg) 852 + if (!pgio->pg_lseg) { 842 853 ff_layout_pg_get_read(pgio, req, false); 843 - else if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) 854 + if (!pgio->pg_lseg) 855 + goto out_nolseg; 856 + } 857 + if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) { 844 858 ff_layout_pg_get_read(pgio, req, true); 845 - 846 - /* If no lseg, fall back to read through mds */ 847 - if (pgio->pg_lseg == NULL) 848 - goto out_mds; 859 + if (!pgio->pg_lseg) 860 + goto out_nolseg; 861 + } 849 862 850 863 ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx); 851 864 if (!ds) { ··· 869 878 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; 870 879 871 880 return; 881 + out_nolseg: 882 + if (pgio->pg_error < 0) 883 + return; 872 884 out_mds: 873 885 pnfs_put_lseg(pgio->pg_lseg); 874 886 pgio->pg_lseg = NULL; ··· 1317 1323 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1318 1324 hdr->args.count, 1319 1325 hdr->res.count); 1326 + set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags); 1320 1327 } 1321 1328 1322 1329 static int ff_layout_read_prepare_common(struct rpc_task *task, ··· 1502 1507 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1503 1508 hdr->args.count, hdr->res.count, 1504 1509 hdr->res.verf->committed); 1510 + set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags); 1505 1511 } 1506 1512 1507 1513 static int ff_layout_write_prepare_common(struct rpc_task *task, ··· 1611 1615 nfs4_ff_layout_stat_io_end_write(task, 1612 1616 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index), 1613 1617 count, count, NFS_FILE_SYNC); 1618 + set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags); 1614 1619 } 1615 1620 1616 1621 static void ff_layout_commit_prepare_common(struct rpc_task *task,
+2
fs/nfs/nfs3acl.c
··· 108 108 case -EPROTONOSUPPORT: 109 109 dprintk("NFS_V3_ACL extension not supported; disabling\n"); 110 110 server->caps &= ~NFS_CAP_ACLS; 111 + /* fall through */ 111 112 case -ENOTSUPP: 112 113 status = -EOPNOTSUPP; 113 114 default: ··· 230 229 dprintk("NFS_V3_ACL SETACL RPC not supported" 231 230 "(will not retry)\n"); 232 231 server->caps &= ~NFS_CAP_ACLS; 232 + /* fall through */ 233 233 case -ENOTSUPP: 234 234 status = -EOPNOTSUPP; 235 235 }
+203 -6
fs/nfs/nfs42proc.c
··· 17 17 #include "internal.h" 18 18 19 19 #define NFSDBG_FACILITY NFSDBG_PROC 20 + static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); 20 21 21 22 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 22 23 struct nfs_lock_context *lock, loff_t offset, loff_t len) ··· 131 130 return err; 132 131 } 133 132 133 + static int handle_async_copy(struct nfs42_copy_res *res, 134 + struct nfs_server *server, 135 + struct file *src, 136 + struct file *dst, 137 + nfs4_stateid *src_stateid) 138 + { 139 + struct nfs4_copy_state *copy; 140 + int status = NFS4_OK; 141 + bool found_pending = false; 142 + struct nfs_open_context *ctx = nfs_file_open_context(dst); 143 + 144 + spin_lock(&server->nfs_client->cl_lock); 145 + list_for_each_entry(copy, &server->nfs_client->pending_cb_stateids, 146 + copies) { 147 + if (memcmp(&res->write_res.stateid, &copy->stateid, 148 + NFS4_STATEID_SIZE)) 149 + continue; 150 + found_pending = true; 151 + list_del(&copy->copies); 152 + break; 153 + } 154 + if (found_pending) { 155 + spin_unlock(&server->nfs_client->cl_lock); 156 + goto out; 157 + } 158 + 159 + copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS); 160 + if (!copy) { 161 + spin_unlock(&server->nfs_client->cl_lock); 162 + return -ENOMEM; 163 + } 164 + memcpy(&copy->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE); 165 + init_completion(&copy->completion); 166 + copy->parent_state = ctx->state; 167 + 168 + list_add_tail(&copy->copies, &server->ss_copies); 169 + spin_unlock(&server->nfs_client->cl_lock); 170 + 171 + status = wait_for_completion_interruptible(&copy->completion); 172 + spin_lock(&server->nfs_client->cl_lock); 173 + list_del_init(&copy->copies); 174 + spin_unlock(&server->nfs_client->cl_lock); 175 + if (status == -ERESTARTSYS) { 176 + goto out_cancel; 177 + } else if (copy->flags) { 178 + status = -EAGAIN; 179 + goto out_cancel; 180 + } 181 + out: 182 + res->write_res.count = copy->count; 183 + memcpy(&res->write_res.verifier, &copy->verf, sizeof(copy->verf)); 184 + status = -copy->error; 185 + 186 + kfree(copy); 187 + return status; 188 + out_cancel: 189 + nfs42_do_offload_cancel_async(dst, &copy->stateid); 190 + kfree(copy); 191 + return status; 192 + } 193 + 194 + static int process_copy_commit(struct file *dst, loff_t pos_dst, 195 + struct nfs42_copy_res *res) 196 + { 197 + struct nfs_commitres cres; 198 + int status = -ENOMEM; 199 + 200 + cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 201 + if (!cres.verf) 202 + goto out; 203 + 204 + status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres); 205 + if (status) 206 + goto out_free; 207 + if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 208 + &cres.verf->verifier)) { 209 + dprintk("commit verf differs from copy verf\n"); 210 + status = -EAGAIN; 211 + } 212 + out_free: 213 + kfree(cres.verf); 214 + out: 215 + return status; 216 + } 217 + 134 218 static ssize_t _nfs42_proc_copy(struct file *src, 135 219 struct nfs_lock_context *src_lock, 136 220 struct file *dst, ··· 254 168 if (status) 255 169 return status; 256 170 257 - res->commit_res.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 258 - if (!res->commit_res.verf) 259 - return -ENOMEM; 171 + res->commit_res.verf = NULL; 172 + if (args->sync) { 173 + res->commit_res.verf = 174 + kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 175 + if (!res->commit_res.verf) 176 + return -ENOMEM; 177 + } 178 + set_bit(NFS_CLNT_DST_SSC_COPY_STATE, 179 + &dst_lock->open_context->state->flags); 180 + 260 181 status = nfs4_call_sync(server->client, server, &msg, 261 182 &args->seq_args, &res->seq_res, 0); 262 183 if (status == -ENOTSUPP) ··· 271 178 if (status) 272 179 goto out; 273 180 274 - if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 181 + if (args->sync && 182 + nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 275 183 &res->commit_res.verf->verifier)) { 276 184 status = -EAGAIN; 277 185 goto out; 186 + } 187 + 188 + if (!res->synchronous) { 189 + status = handle_async_copy(res, server, src, dst, 190 + &args->src_stateid); 191 + if (status) 192 + return status; 193 + } 194 + 195 + if ((!res->synchronous || !args->sync) && 196 + res->write_res.verifier.committed != NFS_FILE_SYNC) { 197 + status = process_copy_commit(dst, pos_dst, res); 198 + if (status) 199 + return status; 278 200 } 279 201 280 202 truncate_pagecache_range(dst_inode, pos_dst, ··· 297 189 298 190 status = res->write_res.count; 299 191 out: 300 - kfree(res->commit_res.verf); 192 + if (args->sync) 193 + kfree(res->commit_res.verf); 301 194 return status; 302 195 } 303 196 ··· 315 206 .dst_fh = NFS_FH(file_inode(dst)), 316 207 .dst_pos = pos_dst, 317 208 .count = count, 209 + .sync = false, 318 210 }; 319 211 struct nfs42_copy_res res; 320 212 struct nfs4_exception src_exception = { ··· 357 247 if (err == -ENOTSUPP) { 358 248 err = -EOPNOTSUPP; 359 249 break; 360 - } if (err == -EAGAIN) { 250 + } else if (err == -EAGAIN) { 251 + dst_exception.retry = 1; 252 + continue; 253 + } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) { 254 + args.sync = true; 361 255 dst_exception.retry = 1; 362 256 continue; 363 257 } ··· 376 262 out_put_src_lock: 377 263 nfs_put_lock_context(src_lock); 378 264 return err; 265 + } 266 + 267 + struct nfs42_offloadcancel_data { 268 + struct nfs_server *seq_server; 269 + struct nfs42_offload_status_args args; 270 + struct nfs42_offload_status_res res; 271 + }; 272 + 273 + static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata) 274 + { 275 + struct nfs42_offloadcancel_data *data = calldata; 276 + 277 + nfs4_setup_sequence(data->seq_server->nfs_client, 278 + &data->args.osa_seq_args, 279 + &data->res.osr_seq_res, task); 280 + } 281 + 282 + static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) 283 + { 284 + struct nfs42_offloadcancel_data *data = calldata; 285 + 286 + nfs41_sequence_done(task, &data->res.osr_seq_res); 287 + if (task->tk_status && 288 + nfs4_async_handle_error(task, data->seq_server, NULL, 289 + NULL) == -EAGAIN) 290 + rpc_restart_call_prepare(task); 291 + } 292 + 293 + static void nfs42_free_offloadcancel_data(void *data) 294 + { 295 + kfree(data); 296 + } 297 + 298 + static const struct rpc_call_ops nfs42_offload_cancel_ops = { 299 + .rpc_call_prepare = nfs42_offload_cancel_prepare, 300 + .rpc_call_done = nfs42_offload_cancel_done, 301 + .rpc_release = nfs42_free_offloadcancel_data, 302 + }; 303 + 304 + static int nfs42_do_offload_cancel_async(struct file *dst, 305 + nfs4_stateid *stateid) 306 + { 307 + struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 308 + struct nfs42_offloadcancel_data *data = NULL; 309 + struct nfs_open_context *ctx = nfs_file_open_context(dst); 310 + struct rpc_task *task; 311 + struct rpc_message msg = { 312 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL], 313 + .rpc_cred = ctx->cred, 314 + }; 315 + struct rpc_task_setup task_setup_data = { 316 + .rpc_client = dst_server->client, 317 + .rpc_message = &msg, 318 + .callback_ops = &nfs42_offload_cancel_ops, 319 + .workqueue = nfsiod_workqueue, 320 + .flags = RPC_TASK_ASYNC, 321 + }; 322 + int status; 323 + 324 + if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL)) 325 + return -EOPNOTSUPP; 326 + 327 + data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS); 328 + if (data == NULL) 329 + return -ENOMEM; 330 + 331 + data->seq_server = dst_server; 332 + data->args.osa_src_fh = NFS_FH(file_inode(dst)); 333 + memcpy(&data->args.osa_stateid, stateid, 334 + sizeof(data->args.osa_stateid)); 335 + msg.rpc_argp = &data->args; 336 + msg.rpc_resp = &data->res; 337 + task_setup_data.callback_data = data; 338 + nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res, 339 + 1, 0); 340 + task = rpc_run_task(&task_setup_data); 341 + if (IS_ERR(task)) 342 + return PTR_ERR(task); 343 + status = rpc_wait_for_completion_task(task); 344 + if (status == -ENOTSUPP) 345 + dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL; 346 + rpc_put_task(task); 347 + return status; 379 348 } 380 349 381 350 static loff_t _nfs42_proc_llseek(struct file *filep,
+85 -13
fs/nfs/nfs42xdr.c
··· 26 26 NFS42_WRITE_RES_SIZE + \ 27 27 1 /* cr_consecutive */ + \ 28 28 1 /* cr_synchronous */) 29 + #define encode_offload_cancel_maxsz (op_encode_hdr_maxsz + \ 30 + XDR_QUADLEN(NFS4_STATEID_SIZE)) 31 + #define decode_offload_cancel_maxsz (op_decode_hdr_maxsz) 29 32 #define encode_deallocate_maxsz (op_encode_hdr_maxsz + \ 30 33 encode_fallocate_maxsz) 31 34 #define decode_deallocate_maxsz (op_decode_hdr_maxsz) ··· 78 75 decode_putfh_maxsz + \ 79 76 decode_copy_maxsz + \ 80 77 decode_commit_maxsz) 78 + #define NFS4_enc_offload_cancel_sz (compound_encode_hdr_maxsz + \ 79 + encode_putfh_maxsz + \ 80 + encode_offload_cancel_maxsz) 81 + #define NFS4_dec_offload_cancel_sz (compound_decode_hdr_maxsz + \ 82 + decode_putfh_maxsz + \ 83 + decode_offload_cancel_maxsz) 81 84 #define NFS4_enc_deallocate_sz (compound_encode_hdr_maxsz + \ 82 85 encode_putfh_maxsz + \ 83 86 encode_deallocate_maxsz + \ ··· 150 141 encode_uint64(xdr, args->count); 151 142 152 143 encode_uint32(xdr, 1); /* consecutive = true */ 153 - encode_uint32(xdr, 1); /* synchronous = true */ 144 + encode_uint32(xdr, args->sync); 154 145 encode_uint32(xdr, 0); /* src server list */ 146 + } 147 + 148 + static void encode_offload_cancel(struct xdr_stream *xdr, 149 + const struct nfs42_offload_status_args *args, 150 + struct compound_hdr *hdr) 151 + { 152 + encode_op_hdr(xdr, OP_OFFLOAD_CANCEL, decode_offload_cancel_maxsz, hdr); 153 + encode_nfs4_stateid(xdr, &args->osa_stateid); 155 154 } 156 155 157 156 static void encode_deallocate(struct xdr_stream *xdr, ··· 273 256 encode_savefh(xdr, &hdr); 274 257 encode_putfh(xdr, args->dst_fh, &hdr); 275 258 encode_copy(xdr, args, &hdr); 276 - encode_copy_commit(xdr, args, &hdr); 259 + if (args->sync) 260 + encode_copy_commit(xdr, args, &hdr); 261 + encode_nops(&hdr); 262 + } 263 + 264 + /* 265 + * Encode OFFLOAD_CANEL request 266 + */ 267 + static void nfs4_xdr_enc_offload_cancel(struct rpc_rqst *req, 268 + struct xdr_stream *xdr, 269 + const void *data) 270 + { 271 + const struct nfs42_offload_status_args *args = data; 272 + struct compound_hdr hdr = { 273 + .minorversion = nfs4_xdr_minorversion(&args->osa_seq_args), 274 + }; 275 + 276 + encode_compound_hdr(xdr, req, &hdr); 277 + encode_sequence(xdr, &args->osa_seq_args, &hdr); 278 + encode_putfh(xdr, args->osa_src_fh, &hdr); 279 + encode_offload_cancel(xdr, args, &hdr); 277 280 encode_nops(&hdr); 278 281 } 279 282 ··· 390 353 struct nfs42_write_res *res) 391 354 { 392 355 __be32 *p; 356 + int status, count; 393 357 394 - p = xdr_inline_decode(xdr, 4 + 8 + 4); 358 + p = xdr_inline_decode(xdr, 4); 395 359 if (unlikely(!p)) 396 360 goto out_overflow; 397 - 398 - /* 399 - * We never use asynchronous mode, so warn if a server returns 400 - * a stateid. 401 - */ 402 - if (unlikely(*p != 0)) { 403 - pr_err_once("%s: server has set unrequested " 404 - "asynchronous mode\n", __func__); 361 + count = be32_to_cpup(p); 362 + if (count > 1) 405 363 return -EREMOTEIO; 364 + else if (count == 1) { 365 + status = decode_opaque_fixed(xdr, &res->stateid, 366 + NFS4_STATEID_SIZE); 367 + if (unlikely(status)) 368 + goto out_overflow; 406 369 } 407 - p++; 370 + p = xdr_inline_decode(xdr, 8 + 4); 371 + if (unlikely(!p)) 372 + goto out_overflow; 408 373 p = xdr_decode_hyper(p, &res->count); 409 374 res->verifier.committed = be32_to_cpup(p); 410 375 return decode_verifier(xdr, &res->verifier.verifier); ··· 450 411 return status; 451 412 452 413 return decode_copy_requirements(xdr, res); 414 + } 415 + 416 + static int decode_offload_cancel(struct xdr_stream *xdr, 417 + struct nfs42_offload_status_res *res) 418 + { 419 + return decode_op_hdr(xdr, OP_OFFLOAD_CANCEL); 453 420 } 454 421 455 422 static int decode_deallocate(struct xdr_stream *xdr, struct nfs42_falloc_res *res) ··· 552 507 status = decode_copy(xdr, res); 553 508 if (status) 554 509 goto out; 555 - status = decode_commit(xdr, &res->commit_res); 510 + if (res->commit_res.verf) 511 + status = decode_commit(xdr, &res->commit_res); 512 + out: 513 + return status; 514 + } 515 + 516 + /* 517 + * Decode OFFLOAD_CANCEL response 518 + */ 519 + static int nfs4_xdr_dec_offload_cancel(struct rpc_rqst *rqstp, 520 + struct xdr_stream *xdr, 521 + void *data) 522 + { 523 + struct nfs42_offload_status_res *res = data; 524 + struct compound_hdr hdr; 525 + int status; 526 + 527 + status = decode_compound_hdr(xdr, &hdr); 528 + if (status) 529 + goto out; 530 + status = decode_sequence(xdr, &res->osr_seq_res, rqstp); 531 + if (status) 532 + goto out; 533 + status = decode_putfh(xdr); 534 + if (status) 535 + goto out; 536 + status = decode_offload_cancel(xdr, res); 537 + 556 538 out: 557 539 return status; 558 540 }
+7 -1
fs/nfs/nfs4_fs.h
··· 163 163 NFS_STATE_RECOVERY_FAILED, /* OPEN stateid state recovery failed */ 164 164 NFS_STATE_MAY_NOTIFY_LOCK, /* server may CB_NOTIFY_LOCK */ 165 165 NFS_STATE_CHANGE_WAIT, /* A state changing operation is outstanding */ 166 + #ifdef CONFIG_NFS_V4_2 167 + NFS_CLNT_DST_SSC_COPY_STATE, /* dst server open state on client*/ 168 + #endif /* CONFIG_NFS_V4_2 */ 166 169 }; 167 170 168 171 struct nfs4_state { ··· 276 273 277 274 /* nfs4proc.c */ 278 275 extern int nfs4_handle_exception(struct nfs_server *, int, struct nfs4_exception *); 276 + extern int nfs4_async_handle_error(struct rpc_task *task, 277 + struct nfs_server *server, 278 + struct nfs4_state *state, long *timeout); 279 279 extern int nfs4_call_sync(struct rpc_clnt *, struct nfs_server *, 280 280 struct rpc_message *, struct nfs4_sequence_args *, 281 281 struct nfs4_sequence_res *, int); ··· 511 505 struct nfs4_sequence_res *res); 512 506 513 507 extern void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp); 514 - 508 + extern int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res); 515 509 extern const nfs4_stateid zero_stateid; 516 510 extern const nfs4_stateid invalid_stateid; 517 511
+18 -3
fs/nfs/nfs4client.c
··· 156 156 } 157 157 } 158 158 159 + static void 160 + nfs4_cleanup_callback(struct nfs_client *clp) 161 + { 162 + struct nfs4_copy_state *cp_state; 163 + 164 + while (!list_empty(&clp->pending_cb_stateids)) { 165 + cp_state = list_entry(clp->pending_cb_stateids.next, 166 + struct nfs4_copy_state, copies); 167 + list_del(&cp_state->copies); 168 + kfree(cp_state); 169 + } 170 + } 171 + 159 172 void nfs41_shutdown_client(struct nfs_client *clp) 160 173 { 161 174 if (nfs4_has_session(clp)) { 175 + nfs4_cleanup_callback(clp); 162 176 nfs4_shutdown_ds_clients(clp); 163 177 nfs4_destroy_session(clp->cl_session); 164 178 nfs4_destroy_clientid(clp); ··· 216 202 #if IS_ENABLED(CONFIG_NFS_V4_1) 217 203 init_waitqueue_head(&clp->cl_lock_waitq); 218 204 #endif 205 + INIT_LIST_HEAD(&clp->pending_cb_stateids); 219 206 return clp; 220 207 221 208 error: ··· 1142 1127 nfs_server_copy_userdata(server, parent_server); 1143 1128 1144 1129 /* Get a client representation */ 1145 - #ifdef CONFIG_SUNRPC_XPRT_RDMA 1130 + #if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) 1146 1131 rpc_set_port(data->addr, NFS_RDMA_PORT); 1147 1132 error = nfs4_set_client(server, data->hostname, 1148 1133 data->addr, ··· 1154 1139 parent_client->cl_net); 1155 1140 if (!error) 1156 1141 goto init_server; 1157 - #endif /* CONFIG_SUNRPC_XPRT_RDMA */ 1142 + #endif /* IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) */ 1158 1143 1159 1144 rpc_set_port(data->addr, NFS_PORT); 1160 1145 error = nfs4_set_client(server, data->hostname, ··· 1168 1153 if (error < 0) 1169 1154 goto error; 1170 1155 1171 - #ifdef CONFIG_SUNRPC_XPRT_RDMA 1156 + #if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) 1172 1157 init_server: 1173 1158 #endif 1174 1159 error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor);
+8 -2
fs/nfs/nfs4file.c
··· 133 133 struct file *file_out, loff_t pos_out, 134 134 size_t count, unsigned int flags) 135 135 { 136 + ssize_t ret; 137 + 136 138 if (file_inode(file_in) == file_inode(file_out)) 137 139 return -EINVAL; 138 - 139 - return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); 140 + retry: 141 + ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); 142 + if (ret == -EAGAIN) 143 + goto retry; 144 + return ret; 140 145 } 141 146 142 147 static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) ··· 154 149 ret = nfs42_proc_llseek(filep, offset, whence); 155 150 if (ret != -ENOTSUPP) 156 151 return ret; 152 + /* Fall through */ 157 153 default: 158 154 return nfs_file_llseek(filep, offset, whence); 159 155 }
+4
fs/nfs/nfs4idmap.c
··· 506 506 switch (token) { 507 507 case Opt_find_uid: 508 508 im->im_type = IDMAP_TYPE_USER; 509 + /* Fall through */ 509 510 case Opt_find_gid: 510 511 im->im_conv = IDMAP_CONV_NAMETOID; 511 512 ret = match_strlcpy(im->im_name, &substr, IDMAP_NAMESZ); ··· 514 513 515 514 case Opt_find_user: 516 515 im->im_type = IDMAP_TYPE_USER; 516 + /* Fall through */ 517 517 case Opt_find_group: 518 518 im->im_conv = IDMAP_CONV_IDTONAME; 519 519 ret = match_int(&substr, &im->im_id); 520 + if (ret) 521 + goto out; 520 522 break; 521 523 522 524 default:
+130 -26
fs/nfs/nfs4proc.c
··· 449 449 stateid); 450 450 goto wait_on_recovery; 451 451 } 452 + /* Fall through */ 452 453 case -NFS4ERR_OPENMODE: 453 454 if (inode) { 454 455 int err; ··· 502 501 ret = -EBUSY; 503 502 break; 504 503 } 504 + /* Fall through */ 505 505 case -NFS4ERR_DELAY: 506 506 nfs_inc_server_stats(server, NFSIOS_DELAY); 507 + /* Fall through */ 507 508 case -NFS4ERR_GRACE: 508 509 case -NFS4ERR_LAYOUTTRYLATER: 509 510 case -NFS4ERR_RECALLCONFLICT: ··· 584 581 ret = -EIO; 585 582 return ret; 586 583 out_retry: 587 - if (ret == 0) 584 + if (ret == 0) { 588 585 exception->retry = 1; 586 + /* 587 + * For NFS4ERR_MOVED, the client transport will need to 588 + * be recomputed after migration recovery has completed. 589 + */ 590 + if (errorcode == -NFS4ERR_MOVED) 591 + rpc_task_release_transport(task); 592 + } 589 593 return ret; 590 594 } 591 595 592 - static int 596 + int 593 597 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 594 598 struct nfs4_state *state, long *timeout) 595 599 { ··· 1081 1071 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 1082 1072 } 1083 1073 1084 - static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, 1085 - unsigned long timestamp) 1074 + static void 1075 + nfs4_inc_nlink_locked(struct inode *inode) 1076 + { 1077 + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER; 1078 + inc_nlink(inode); 1079 + } 1080 + 1081 + static void 1082 + nfs4_dec_nlink_locked(struct inode *inode) 1083 + { 1084 + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER; 1085 + drop_nlink(inode); 1086 + } 1087 + 1088 + static void 1089 + update_changeattr_locked(struct inode *dir, struct nfs4_change_info *cinfo, 1090 + unsigned long timestamp, unsigned long cache_validity) 1086 1091 { 1087 1092 struct nfs_inode *nfsi = NFS_I(dir); 1088 1093 1089 - spin_lock(&dir->i_lock); 1090 1094 nfsi->cache_validity |= NFS_INO_INVALID_CTIME 1091 1095 | NFS_INO_INVALID_MTIME 1092 - | NFS_INO_INVALID_DATA; 1096 + | NFS_INO_INVALID_DATA 1097 + | cache_validity; 1093 1098 if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(dir)) { 1094 1099 nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE; 1095 1100 nfsi->attrtimeo_timestamp = jiffies; ··· 1117 1092 inode_set_iversion_raw(dir, cinfo->after); 1118 1093 nfsi->read_cache_jiffies = timestamp; 1119 1094 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1095 + nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE; 1120 1096 nfs_fscache_invalidate(dir); 1097 + } 1098 + 1099 + static void 1100 + update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, 1101 + unsigned long timestamp, unsigned long cache_validity) 1102 + { 1103 + spin_lock(&dir->i_lock); 1104 + update_changeattr_locked(dir, cinfo, timestamp, cache_validity); 1121 1105 spin_unlock(&dir->i_lock); 1122 1106 } 1123 1107 ··· 1388 1354 case NFS4_OPEN_CLAIM_PREVIOUS: 1389 1355 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1390 1356 break; 1357 + /* Fall through */ 1391 1358 default: 1392 1359 return 0; 1393 1360 } ··· 1808 1773 data->o_res.delegation_type, 1809 1774 &data->o_res.delegation, 1810 1775 data->o_res.pagemod_limit); 1776 + 1777 + if (data->o_res.do_recall) 1778 + nfs_async_inode_return_delegation(state->inode, 1779 + &data->o_res.delegation); 1811 1780 } 1812 1781 1813 1782 /* ··· 2158 2119 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2159 2120 if (err) 2160 2121 break; 2122 + /* Fall through */ 2161 2123 case FMODE_READ: 2162 2124 err = nfs4_open_recover_helper(opendata, FMODE_READ); 2163 2125 } ··· 2288 2248 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2289 2249 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2290 2250 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2251 + /* Fall through */ 2291 2252 case NFS4_OPEN_CLAIM_FH: 2292 2253 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2293 2254 } ··· 2522 2481 if (data->file_created || 2523 2482 inode_peek_iversion_raw(dir) != o_res->cinfo.after) 2524 2483 update_changeattr(dir, &o_res->cinfo, 2525 - o_res->f_attr->time_start); 2484 + o_res->f_attr->time_start, 0); 2526 2485 } 2527 2486 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2528 2487 server->caps &= ~NFS_CAP_POSIX_LOCK; ··· 2884 2843 nfs_save_change_attribute(d_inode(opendata->dir))); 2885 2844 } 2886 2845 2846 + /* Parse layoutget results before we check for access */ 2847 + pnfs_parse_lgopen(state->inode, opendata->lgp, ctx); 2848 + 2887 2849 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags); 2888 2850 if (ret != 0) 2889 2851 goto out; ··· 2895 2851 nfs_inode_attach_open_context(ctx); 2896 2852 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) 2897 2853 nfs4_schedule_stateid_recovery(server, state); 2898 - else 2899 - pnfs_parse_lgopen(state->inode, opendata->lgp, ctx); 2900 2854 } 2901 2855 2902 2856 out: ··· 3262 3220 calldata->res.lr_res = NULL; 3263 3221 break; 3264 3222 case -NFS4ERR_OLD_STATEID: 3265 - if (nfs4_refresh_layout_stateid(&calldata->arg.lr_args->stateid, 3223 + if (nfs4_layoutreturn_refresh_stateid(&calldata->arg.lr_args->stateid, 3224 + &calldata->arg.lr_args->range, 3266 3225 calldata->inode)) 3267 3226 goto lr_restart; 3268 3227 /* Fallthrough */ ··· 4279 4236 return status; 4280 4237 } 4281 4238 4282 - static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name) 4239 + static int 4240 + _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype) 4283 4241 { 4284 4242 struct nfs_server *server = NFS_SERVER(dir); 4285 4243 struct nfs_removeargs args = { ··· 4299 4255 int status; 4300 4256 4301 4257 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 4302 - if (status == 0) 4303 - update_changeattr(dir, &res.cinfo, timestamp); 4258 + if (status == 0) { 4259 + spin_lock(&dir->i_lock); 4260 + update_changeattr_locked(dir, &res.cinfo, timestamp, 0); 4261 + /* Removing a directory decrements nlink in the parent */ 4262 + if (ftype == NF4DIR && dir->i_nlink > 2) 4263 + nfs4_dec_nlink_locked(dir); 4264 + spin_unlock(&dir->i_lock); 4265 + } 4304 4266 return status; 4305 4267 } 4306 4268 ··· 4323 4273 nfs4_inode_make_writeable(inode); 4324 4274 } 4325 4275 do { 4326 - err = _nfs4_proc_remove(dir, &dentry->d_name); 4276 + err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG); 4327 4277 trace_nfs4_remove(dir, &dentry->d_name, err); 4328 4278 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4329 4279 &exception); ··· 4337 4287 int err; 4338 4288 4339 4289 do { 4340 - err = _nfs4_proc_remove(dir, name); 4290 + err = _nfs4_proc_remove(dir, name, NF4DIR); 4341 4291 trace_nfs4_remove(dir, name, err); 4342 4292 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4343 4293 &exception); ··· 4381 4331 &data->timeout) == -EAGAIN) 4382 4332 return 0; 4383 4333 if (task->tk_status == 0) 4384 - update_changeattr(dir, &res->cinfo, res->dir_attr->time_start); 4334 + update_changeattr(dir, &res->cinfo, 4335 + res->dir_attr->time_start, 0); 4385 4336 return 1; 4386 4337 } 4387 4338 ··· 4424 4373 return 0; 4425 4374 4426 4375 if (task->tk_status == 0) { 4427 - update_changeattr(old_dir, &res->old_cinfo, res->old_fattr->time_start); 4428 - if (new_dir != old_dir) 4429 - update_changeattr(new_dir, &res->new_cinfo, res->new_fattr->time_start); 4376 + if (new_dir != old_dir) { 4377 + /* Note: If we moved a directory, nlink will change */ 4378 + update_changeattr(old_dir, &res->old_cinfo, 4379 + res->old_fattr->time_start, 4380 + NFS_INO_INVALID_OTHER); 4381 + update_changeattr(new_dir, &res->new_cinfo, 4382 + res->new_fattr->time_start, 4383 + NFS_INO_INVALID_OTHER); 4384 + } else 4385 + update_changeattr(old_dir, &res->old_cinfo, 4386 + res->old_fattr->time_start, 4387 + 0); 4430 4388 } 4431 4389 return 1; 4432 4390 } ··· 4476 4416 4477 4417 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4478 4418 if (!status) { 4479 - update_changeattr(dir, &res.cinfo, res.fattr->time_start); 4419 + update_changeattr(dir, &res.cinfo, res.fattr->time_start, 0); 4480 4420 status = nfs_post_op_update_inode(inode, res.fattr); 4481 4421 if (!status) 4482 4422 nfs_setsecurity(inode, res.fattr, res.label); ··· 4551 4491 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 4552 4492 &data->arg.seq_args, &data->res.seq_res, 1); 4553 4493 if (status == 0) { 4554 - update_changeattr(dir, &data->res.dir_cinfo, 4555 - data->res.fattr->time_start); 4494 + spin_lock(&dir->i_lock); 4495 + update_changeattr_locked(dir, &data->res.dir_cinfo, 4496 + data->res.fattr->time_start, 0); 4497 + /* Creating a directory bumps nlink in the parent */ 4498 + if (data->arg.ftype == NF4DIR) 4499 + nfs4_inc_nlink_locked(dir); 4500 + spin_unlock(&dir->i_lock); 4556 4501 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); 4557 4502 } 4558 4503 return status; ··· 5136 5071 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 5137 5072 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 5138 5073 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg); 5074 + } 5075 + 5076 + static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args, 5077 + struct nfs_commitres *res) 5078 + { 5079 + struct inode *dst_inode = file_inode(dst); 5080 + struct nfs_server *server = NFS_SERVER(dst_inode); 5081 + struct rpc_message msg = { 5082 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT], 5083 + .rpc_argp = args, 5084 + .rpc_resp = res, 5085 + }; 5086 + 5087 + args->fh = NFS_FH(dst_inode); 5088 + return nfs4_call_sync(server->client, server, &msg, 5089 + &args->seq_args, &res->seq_res, 1); 5090 + } 5091 + 5092 + int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res) 5093 + { 5094 + struct nfs_commitargs args = { 5095 + .offset = offset, 5096 + .count = count, 5097 + }; 5098 + struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 5099 + struct nfs4_exception exception = { }; 5100 + int status; 5101 + 5102 + do { 5103 + status = _nfs4_proc_commit(dst, &args, res); 5104 + status = nfs4_handle_exception(dst_server, status, &exception); 5105 + } while (exception.retry); 5106 + 5107 + return status; 5139 5108 } 5140 5109 5141 5110 struct nfs4_renewdata { ··· 6001 5902 data->res.lr_res = NULL; 6002 5903 break; 6003 5904 case -NFS4ERR_OLD_STATEID: 6004 - if (nfs4_refresh_layout_stateid(&data->args.lr_args->stateid, 5905 + if (nfs4_layoutreturn_refresh_stateid(&data->args.lr_args->stateid, 5906 + &data->args.lr_args->range, 6005 5907 data->inode)) 6006 5908 goto lr_restart; 6007 5909 /* Fallthrough */ ··· 6309 6209 if (nfs4_update_lock_stateid(calldata->lsp, 6310 6210 &calldata->res.stateid)) 6311 6211 break; 6212 + /* Fall through */ 6312 6213 case -NFS4ERR_ADMIN_REVOKED: 6313 6214 case -NFS4ERR_EXPIRED: 6314 6215 nfs4_free_revoked_stateid(calldata->server, 6315 6216 &calldata->arg.stateid, 6316 6217 task->tk_msg.rpc_cred); 6218 + /* Fall through */ 6317 6219 case -NFS4ERR_BAD_STATEID: 6318 6220 case -NFS4ERR_OLD_STATEID: 6319 6221 case -NFS4ERR_STALE_STATEID: ··· 7829 7727 } 7830 7728 out: 7831 7729 clp->cl_sp4_flags = flags; 7832 - return 0; 7730 + return ret; 7833 7731 } 7834 7732 7835 7733 struct nfs41_exchange_id_data { ··· 8270 8168 args->bc_attrs.max_resp_sz = max_bc_payload; 8271 8169 args->bc_attrs.max_resp_sz_cached = 0; 8272 8170 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 8273 - args->bc_attrs.max_reqs = min_t(unsigned short, max_session_cb_slots, 1); 8171 + args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1); 8274 8172 8275 8173 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 8276 8174 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", ··· 8953 8851 server = NFS_SERVER(lrp->args.inode); 8954 8852 switch (task->tk_status) { 8955 8853 case -NFS4ERR_OLD_STATEID: 8956 - if (nfs4_refresh_layout_stateid(&lrp->args.stateid, 8854 + if (nfs4_layoutreturn_refresh_stateid(&lrp->args.stateid, 8855 + &lrp->args.range, 8957 8856 lrp->args.inode)) 8958 8857 goto out_restart; 8959 8858 /* Fallthrough */ ··· 9657 9554 | NFS_CAP_LGOPEN 9658 9555 | NFS_CAP_ALLOCATE 9659 9556 | NFS_CAP_COPY 9557 + | NFS_CAP_OFFLOAD_CANCEL 9660 9558 | NFS_CAP_DEALLOCATE 9661 9559 | NFS_CAP_SEEK 9662 9560 | NFS_CAP_LAYOUTSTATS
+35 -5
fs/nfs/nfs4state.c
··· 274 274 static int nfs4_begin_drain_session(struct nfs_client *clp) 275 275 { 276 276 struct nfs4_session *ses = clp->cl_session; 277 - int ret = 0; 277 + int ret; 278 278 279 279 if (clp->cl_slot_tbl) 280 280 return nfs4_drain_slot_tbl(clp->cl_slot_tbl); ··· 1525 1525 default: 1526 1526 pr_err("NFS: %s: unhandled error %d\n", 1527 1527 __func__, status); 1528 + /* Fall through */ 1528 1529 case -ENOMEM: 1529 1530 case -NFS4ERR_DENIED: 1530 1531 case -NFS4ERR_RECLAIM_BAD: ··· 1589 1588 } 1590 1589 clear_bit(NFS_STATE_RECLAIM_NOGRACE, 1591 1590 &state->flags); 1591 + #ifdef CONFIG_NFS_V4_2 1592 + if (test_bit(NFS_CLNT_DST_SSC_COPY_STATE, &state->flags)) { 1593 + struct nfs4_copy_state *copy; 1594 + 1595 + spin_lock(&sp->so_server->nfs_client->cl_lock); 1596 + list_for_each_entry(copy, &sp->so_server->ss_copies, copies) { 1597 + if (memcmp(&state->stateid.other, &copy->parent_state->stateid.other, NFS4_STATEID_SIZE)) 1598 + continue; 1599 + copy->flags = 1; 1600 + complete(&copy->completion); 1601 + printk("AGLO: server rebooted waking up the copy\n"); 1602 + break; 1603 + } 1604 + spin_unlock(&sp->so_server->nfs_client->cl_lock); 1605 + } 1606 + #endif /* CONFIG_NFS_V4_2 */ 1592 1607 nfs4_put_open_state(state); 1593 1608 spin_lock(&sp->so_lock); 1594 1609 goto restart; ··· 1614 1597 default: 1615 1598 printk(KERN_ERR "NFS: %s: unhandled error %d\n", 1616 1599 __func__, status); 1600 + /* Fall through */ 1617 1601 case -ENOENT: 1618 1602 case -ENOMEM: 1619 1603 case -EACCES: ··· 1626 1608 break; 1627 1609 case -EAGAIN: 1628 1610 ssleep(1); 1611 + /* Fall through */ 1629 1612 case -NFS4ERR_ADMIN_REVOKED: 1630 1613 case -NFS4ERR_STALE_STATEID: 1631 1614 case -NFS4ERR_OLD_STATEID: ··· 1958 1939 clp->cl_mvops->reboot_recovery_ops; 1959 1940 int status; 1960 1941 1961 - nfs4_begin_drain_session(clp); 1942 + status = nfs4_begin_drain_session(clp); 1943 + if (status != 0) 1944 + return status; 1962 1945 cred = nfs4_get_clid_cred(clp); 1963 1946 if (cred == NULL) 1964 1947 return -ENOENT; ··· 2048 2027 goto out; 2049 2028 } 2050 2029 2051 - nfs4_begin_drain_session(clp); 2030 + status = nfs4_begin_drain_session(clp); 2031 + if (status != 0) 2032 + return status; 2052 2033 2053 2034 status = nfs4_replace_transport(server, locations); 2054 2035 if (status != 0) { ··· 2213 2190 case -ETIMEDOUT: 2214 2191 if (clnt->cl_softrtry) 2215 2192 break; 2193 + /* Fall through */ 2216 2194 case -NFS4ERR_DELAY: 2217 2195 case -EAGAIN: 2218 2196 ssleep(1); 2197 + /* Fall through */ 2219 2198 case -NFS4ERR_STALE_CLIENTID: 2220 2199 dprintk("NFS: %s after status %d, retrying\n", 2221 2200 __func__, status); ··· 2229 2204 } 2230 2205 if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX) 2231 2206 break; 2207 + /* Fall through */ 2232 2208 case -NFS4ERR_CLID_INUSE: 2233 2209 case -NFS4ERR_WRONGSEC: 2234 2210 /* No point in retrying if we already used RPC_AUTH_UNIX */ ··· 2400 2374 2401 2375 if (!nfs4_has_session(clp)) 2402 2376 return 0; 2403 - nfs4_begin_drain_session(clp); 2377 + status = nfs4_begin_drain_session(clp); 2378 + if (status != 0) 2379 + return status; 2404 2380 cred = nfs4_get_clid_cred(clp); 2405 2381 status = nfs4_proc_destroy_session(clp->cl_session, cred); 2406 2382 switch (status) { ··· 2445 2417 2446 2418 if (!nfs4_has_session(clp)) 2447 2419 return 0; 2448 - nfs4_begin_drain_session(clp); 2420 + ret = nfs4_begin_drain_session(clp); 2421 + if (ret != 0) 2422 + return ret; 2449 2423 cred = nfs4_get_clid_cred(clp); 2450 2424 ret = nfs4_proc_bind_conn_to_session(clp, cred); 2451 2425 if (cred)
+1
fs/nfs/nfs4xdr.c
··· 7789 7789 PROC42(LAYOUTSTATS, enc_layoutstats, dec_layoutstats), 7790 7790 PROC42(CLONE, enc_clone, dec_clone), 7791 7791 PROC42(COPY, enc_copy, dec_copy), 7792 + PROC42(OFFLOAD_CANCEL, enc_offload_cancel, dec_offload_cancel), 7792 7793 PROC(LOOKUPP, enc_lookupp, dec_lookupp), 7793 7794 }; 7794 7795
+1
fs/nfs/pagelist.c
··· 561 561 case FLUSH_COND_STABLE: 562 562 if (nfs_reqs_to_commit(cinfo)) 563 563 break; 564 + /* fall through */ 564 565 default: 565 566 hdr->args.stable = NFS_FILE_SYNC; 566 567 }
+86 -37
fs/nfs/pnfs.c
··· 361 361 /* 362 362 * Update the seqid of a layout stateid 363 363 */ 364 - bool nfs4_refresh_layout_stateid(nfs4_stateid *dst, struct inode *inode) 364 + bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst, 365 + struct pnfs_layout_range *dst_range, 366 + struct inode *inode) 365 367 { 366 368 struct pnfs_layout_hdr *lo; 369 + struct pnfs_layout_range range = { 370 + .iomode = IOMODE_ANY, 371 + .offset = 0, 372 + .length = NFS4_MAX_UINT64, 373 + }; 367 374 bool ret = false; 375 + LIST_HEAD(head); 376 + int err; 368 377 369 378 spin_lock(&inode->i_lock); 370 379 lo = NFS_I(inode)->layout; 371 380 if (lo && nfs4_stateid_match_other(dst, &lo->plh_stateid)) { 372 - dst->seqid = lo->plh_stateid.seqid; 373 - ret = true; 381 + err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0); 382 + if (err != -EBUSY) { 383 + dst->seqid = lo->plh_stateid.seqid; 384 + *dst_range = range; 385 + ret = true; 386 + } 374 387 } 375 388 spin_unlock(&inode->i_lock); 389 + pnfs_free_lseg_list(&head); 376 390 return ret; 377 391 } 378 392 ··· 1032 1018 nfs4_stateid_copy(&lgp->args.stateid, stateid); 1033 1019 lgp->gfp_flags = gfp_flags; 1034 1020 lgp->cred = get_rpccred(ctx->cred); 1035 - lgp->callback_count = raw_seqcount_begin(&server->nfs_client->cl_callback_count); 1036 1021 return lgp; 1037 1022 } 1038 1023 ··· 1173 1160 pnfs_layout_need_return(struct pnfs_layout_hdr *lo) 1174 1161 { 1175 1162 struct pnfs_layout_segment *s; 1163 + enum pnfs_iomode iomode; 1164 + u32 seq; 1176 1165 1177 1166 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) 1178 1167 return false; 1179 1168 1180 - /* Defer layoutreturn until all lsegs are done */ 1169 + seq = lo->plh_return_seq; 1170 + iomode = lo->plh_return_iomode; 1171 + 1172 + /* Defer layoutreturn until all recalled lsegs are done */ 1181 1173 list_for_each_entry(s, &lo->plh_segs, pls_list) { 1174 + if (seq && pnfs_seqid_is_newer(s->pls_seq, seq)) 1175 + continue; 1176 + if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode) 1177 + continue; 1182 1178 if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags)) 1183 1179 return false; 1184 1180 } ··· 1631 1609 (range->iomode != ls_range->iomode && 1632 1610 strict_iomode) || 1633 1611 !pnfs_lseg_range_intersecting(ls_range, range)) 1634 - return 0; 1612 + return false; 1635 1613 1636 1614 /* range1 covers only the first byte in the range */ 1637 1615 range1 = *range; ··· 1653 1631 1654 1632 list_for_each_entry(lseg, &lo->plh_segs, pls_list) { 1655 1633 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) && 1656 - !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) && 1657 1634 pnfs_lseg_range_match(&lseg->pls_range, range, 1658 1635 strict_iomode)) { 1659 1636 ret = pnfs_get_lseg(lseg); ··· 1752 1731 TASK_UNINTERRUPTIBLE); 1753 1732 } 1754 1733 1734 + static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo) 1735 + { 1736 + atomic_inc(&lo->plh_outstanding); 1737 + } 1738 + 1739 + static void nfs_layoutget_end(struct pnfs_layout_hdr *lo) 1740 + { 1741 + if (atomic_dec_and_test(&lo->plh_outstanding)) 1742 + wake_up_var(&lo->plh_outstanding); 1743 + } 1744 + 1755 1745 static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo) 1756 1746 { 1757 1747 unsigned long *bitlock = &lo->plh_flags; ··· 1823 1791 goto out; 1824 1792 } 1825 1793 1826 - if (iomode == IOMODE_READ && i_size_read(ino) == 0) { 1827 - trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg, 1828 - PNFS_UPDATE_LAYOUT_RD_ZEROLEN); 1829 - goto out; 1830 - } 1831 - 1832 1794 if (pnfs_within_mdsthreshold(ctx, ino, iomode)) { 1833 1795 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg, 1834 1796 PNFS_UPDATE_LAYOUT_MDSTHRESH); ··· 1854 1828 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg, 1855 1829 PNFS_UPDATE_LAYOUT_IO_TEST_FAIL); 1856 1830 goto out_unlock; 1831 + } 1832 + 1833 + /* 1834 + * If the layout segment list is empty, but there are outstanding 1835 + * layoutget calls, then they might be subject to a layoutrecall. 1836 + */ 1837 + if (list_empty(&lo->plh_segs) && 1838 + atomic_read(&lo->plh_outstanding) != 0) { 1839 + spin_unlock(&ino->i_lock); 1840 + if (wait_var_event_killable(&lo->plh_outstanding, 1841 + atomic_read(&lo->plh_outstanding) == 0 1842 + || !list_empty(&lo->plh_segs))) 1843 + goto out_put_layout_hdr; 1844 + pnfs_put_layout_hdr(lo); 1845 + goto lookup_again; 1857 1846 } 1858 1847 1859 1848 lseg = pnfs_find_lseg(lo, &arg, strict_iomode); ··· 1944 1903 PNFS_UPDATE_LAYOUT_BLOCKED); 1945 1904 goto out_unlock; 1946 1905 } 1947 - atomic_inc(&lo->plh_outstanding); 1906 + nfs_layoutget_begin(lo); 1948 1907 spin_unlock(&ino->i_lock); 1949 1908 1950 1909 _add_to_server_list(lo, server); ··· 1961 1920 if (!lgp) { 1962 1921 trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL, 1963 1922 PNFS_UPDATE_LAYOUT_NOMEM); 1964 - atomic_dec(&lo->plh_outstanding); 1923 + nfs_layoutget_end(lo); 1965 1924 goto out_put_layout_hdr; 1966 1925 } 1967 1926 1968 1927 lseg = nfs4_proc_layoutget(lgp, &timeout); 1969 1928 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg, 1970 1929 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET); 1971 - atomic_dec(&lo->plh_outstanding); 1930 + nfs_layoutget_end(lo); 1972 1931 if (IS_ERR(lseg)) { 1973 1932 switch(PTR_ERR(lseg)) { 1974 1933 case -EBUSY: ··· 1976 1935 lseg = NULL; 1977 1936 break; 1978 1937 case -ERECALLCONFLICT: 1979 - /* Huh? We hold no layouts, how is there a recall? */ 1980 - if (first) { 1981 - lseg = NULL; 1982 - break; 1983 - } 1984 - /* Destroy the existing layout and start over */ 1985 - if (time_after(jiffies, giveup)) 1986 - pnfs_destroy_layout(NFS_I(ino)); 1987 - /* Fallthrough */ 1988 1938 case -EAGAIN: 1989 1939 break; 1990 1940 default: ··· 2054 2022 goto out_unlock; 2055 2023 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags)) 2056 2024 goto out_unlock; 2057 - atomic_inc(&lo->plh_outstanding); 2025 + nfs_layoutget_begin(lo); 2058 2026 spin_unlock(&ino->i_lock); 2059 2027 _add_to_server_list(lo, NFS_SERVER(ino)); 2060 2028 return lo; ··· 2178 2146 } else 2179 2147 lo = NFS_I(lgp->args.inode)->layout; 2180 2148 2181 - if (read_seqcount_retry(&srv->nfs_client->cl_callback_count, 2182 - lgp->callback_count)) 2183 - return; 2184 2149 lseg = pnfs_layout_process(lgp); 2185 2150 if (!IS_ERR(lseg)) { 2186 2151 iomode = lgp->args.range.iomode; ··· 2192 2163 struct inode *inode = lgp->args.inode; 2193 2164 if (inode) { 2194 2165 struct pnfs_layout_hdr *lo = NFS_I(inode)->layout; 2195 - atomic_dec(&lo->plh_outstanding); 2196 2166 pnfs_clear_first_layoutget(lo); 2167 + nfs_layoutget_end(lo); 2197 2168 } 2198 2169 pnfs_layoutget_free(lgp); 2199 2170 } ··· 2267 2238 return ERR_PTR(-EAGAIN); 2268 2239 } 2269 2240 2241 + static int 2242 + mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg, 2243 + struct list_head *tmp_list) 2244 + { 2245 + if (!mark_lseg_invalid(lseg, tmp_list)) 2246 + return 0; 2247 + pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg); 2248 + return 1; 2249 + } 2250 + 2270 2251 /** 2271 2252 * pnfs_mark_matching_lsegs_return - Free or return matching layout segments 2272 2253 * @lo: pointer to layout header 2273 2254 * @tmp_list: list header to be used with pnfs_free_lseg_list() 2274 2255 * @return_range: describe layout segment ranges to be returned 2256 + * @seq: stateid seqid to match 2275 2257 * 2276 2258 * This function is mainly intended for use by layoutrecall. It attempts 2277 2259 * to free the layout segment immediately, or else to mark it for return 2278 2260 * as soon as its reference count drops to zero. 2261 + * 2262 + * Returns 2263 + * - 0: a layoutreturn needs to be scheduled. 2264 + * - EBUSY: there are layout segment that are still in use. 2265 + * - ENOENT: there are no layout segments that need to be returned. 2279 2266 */ 2280 2267 int 2281 2268 pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, ··· 2304 2259 2305 2260 dprintk("%s:Begin lo %p\n", __func__, lo); 2306 2261 2307 - if (list_empty(&lo->plh_segs)) 2308 - return 0; 2309 - 2310 2262 assert_spin_locked(&lo->plh_inode->i_lock); 2311 2263 2312 2264 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) ··· 2313 2271 lseg, lseg->pls_range.iomode, 2314 2272 lseg->pls_range.offset, 2315 2273 lseg->pls_range.length); 2316 - if (mark_lseg_invalid(lseg, tmp_list)) 2274 + if (mark_lseg_invalid_or_return(lseg, tmp_list)) 2317 2275 continue; 2318 2276 remaining++; 2319 2277 set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags); 2320 2278 } 2321 2279 2322 - if (remaining) 2280 + if (remaining) { 2323 2281 pnfs_set_plh_return_info(lo, return_range->iomode, seq); 2282 + return -EBUSY; 2283 + } 2324 2284 2325 - return remaining; 2285 + if (!list_empty(&lo->plh_return_segs)) { 2286 + pnfs_set_plh_return_info(lo, return_range->iomode, seq); 2287 + return 0; 2288 + } 2289 + 2290 + return -ENOENT; 2326 2291 } 2327 2292 2328 2293 void pnfs_error_mark_layout_for_return(struct inode *inode, ··· 2354 2305 * segments at hand when sending layoutreturn. See pnfs_put_lseg() 2355 2306 * for how it works. 2356 2307 */ 2357 - if (!pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0)) { 2308 + if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0) != -EBUSY) { 2358 2309 nfs4_stateid stateid; 2359 2310 enum pnfs_iomode iomode; 2360 2311
+5 -2
fs/nfs/pnfs.h
··· 259 259 bool is_recall); 260 260 int pnfs_destroy_layouts_byclid(struct nfs_client *clp, 261 261 bool is_recall); 262 - bool nfs4_refresh_layout_stateid(nfs4_stateid *dst, struct inode *inode); 262 + bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst, 263 + struct pnfs_layout_range *dst_range, 264 + struct inode *inode); 263 265 void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo); 264 266 void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, 265 267 const nfs4_stateid *new, ··· 782 780 { 783 781 } 784 782 785 - static inline bool nfs4_refresh_layout_stateid(nfs4_stateid *dst, 783 + static inline bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst, 784 + struct pnfs_layout_range *dst_range, 786 785 struct inode *inode) 787 786 { 788 787 return false;
+7 -9
fs/nfs/pnfs_nfs.c
··· 61 61 62 62 /* The generic layer is about to remove the req from the commit list. 63 63 * If this will make the bucket empty, it will need to put the lseg reference. 64 - * Note this must be called holding i_lock 64 + * Note this must be called holding nfsi->commit_mutex 65 65 */ 66 66 void 67 67 pnfs_generic_clear_request_commit(struct nfs_page *req, ··· 149 149 if (list_empty(&b->written)) { 150 150 freeme = b->wlseg; 151 151 b->wlseg = NULL; 152 - spin_unlock(&cinfo->inode->i_lock); 153 152 pnfs_put_lseg(freeme); 154 - spin_lock(&cinfo->inode->i_lock); 155 153 goto restart; 156 154 } 157 155 } ··· 165 167 LIST_HEAD(pages); 166 168 int i; 167 169 168 - spin_lock(&cinfo->inode->i_lock); 170 + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 169 171 for (i = idx; i < fl_cinfo->nbuckets; i++) { 170 172 bucket = &fl_cinfo->buckets[i]; 171 173 if (list_empty(&bucket->committing)) ··· 175 177 list_for_each(pos, &bucket->committing) 176 178 cinfo->ds->ncommitting--; 177 179 list_splice_init(&bucket->committing, &pages); 178 - spin_unlock(&cinfo->inode->i_lock); 180 + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 179 181 nfs_retry_commit(&pages, freeme, cinfo, i); 180 182 pnfs_put_lseg(freeme); 181 - spin_lock(&cinfo->inode->i_lock); 183 + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 182 184 } 183 - spin_unlock(&cinfo->inode->i_lock); 185 + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 184 186 } 185 187 186 188 static unsigned int ··· 220 222 struct list_head *pos; 221 223 222 224 bucket = &cinfo->ds->buckets[data->ds_commit_index]; 223 - spin_lock(&cinfo->inode->i_lock); 225 + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 224 226 list_for_each(pos, &bucket->committing) 225 227 cinfo->ds->ncommitting--; 226 228 list_splice_init(&bucket->committing, pages); 227 229 data->lseg = bucket->clseg; 228 230 bucket->clseg = NULL; 229 - spin_unlock(&cinfo->inode->i_lock); 231 + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 230 232 231 233 } 232 234
+2 -2
fs/nfs/super.c
··· 884 884 #endif 885 885 seq_printf(m, "\n"); 886 886 887 - rpc_print_iostats(m, nfss->client); 887 + rpc_clnt_show_stats(m, nfss->client); 888 888 889 889 return 0; 890 890 } ··· 2899 2899 if (!val) 2900 2900 return -EINVAL; 2901 2901 ret = kstrtoul(val, 0, &num); 2902 - if (ret == -EINVAL || num > NFS_CALLBACK_MAXPORTNR) 2902 + if (ret || num > NFS_CALLBACK_MAXPORTNR) 2903 2903 return -EINVAL; 2904 2904 *((unsigned int *)kp->arg) = num; 2905 2905 return 0;
+2
fs/nfs/write.c
··· 1406 1406 static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr) 1407 1407 { 1408 1408 nfs_async_write_error(&hdr->pages); 1409 + filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset, 1410 + hdr->args.offset + hdr->args.count - 1); 1409 1411 } 1410 1412 1411 1413 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
+1
include/linux/nfs4.h
··· 535 535 NFSPROC4_CLNT_LAYOUTSTATS, 536 536 NFSPROC4_CLNT_CLONE, 537 537 NFSPROC4_CLNT_COPY, 538 + NFSPROC4_CLNT_OFFLOAD_CANCEL, 538 539 539 540 NFSPROC4_CLNT_LOOKUPP, 540 541 };
+11
include/linux/nfs_fs.h
··· 185 185 struct inode vfs_inode; 186 186 }; 187 187 188 + struct nfs4_copy_state { 189 + struct list_head copies; 190 + nfs4_stateid stateid; 191 + struct completion completion; 192 + uint64_t count; 193 + struct nfs_writeverf verf; 194 + int error; 195 + int flags; 196 + struct nfs4_state *parent_state; 197 + }; 198 + 188 199 /* 189 200 * Access bit flags 190 201 */
+3 -1
include/linux/nfs_fs_sb.h
··· 28 28 struct nfs_client { 29 29 refcount_t cl_count; 30 30 atomic_t cl_mds_count; 31 - seqcount_t cl_callback_count; 32 31 int cl_cons_state; /* current construction state (-ve: init error) */ 33 32 #define NFS_CS_READY 0 /* ready to be used */ 34 33 #define NFS_CS_INITING 1 /* busy initialising */ ··· 121 122 #endif 122 123 123 124 struct net *cl_net; 125 + struct list_head pending_cb_stateids; 124 126 }; 125 127 126 128 /* ··· 209 209 struct list_head state_owners_lru; 210 210 struct list_head layouts; 211 211 struct list_head delegations; 212 + struct list_head ss_copies; 212 213 213 214 unsigned long mig_gen; 214 215 unsigned long mig_status; ··· 257 256 #define NFS_CAP_LAYOUTSTATS (1U << 22) 258 257 #define NFS_CAP_CLONE (1U << 23) 259 258 #define NFS_CAP_COPY (1U << 24) 259 + #define NFS_CAP_OFFLOAD_CANCEL (1U << 25) 260 260 261 261 #endif
+14 -1
include/linux/nfs_xdr.h
··· 271 271 struct nfs4_layoutget_args args; 272 272 struct nfs4_layoutget_res res; 273 273 struct rpc_cred *cred; 274 - unsigned callback_count; 275 274 gfp_t gfp_flags; 276 275 }; 277 276 ··· 1388 1389 u64 dst_pos; 1389 1390 1390 1391 u64 count; 1392 + bool sync; 1391 1393 }; 1392 1394 1393 1395 struct nfs42_write_res { 1396 + nfs4_stateid stateid; 1394 1397 u64 count; 1395 1398 struct nfs_writeverf verifier; 1396 1399 }; ··· 1403 1402 bool consecutive; 1404 1403 bool synchronous; 1405 1404 struct nfs_commitres commit_res; 1405 + }; 1406 + 1407 + struct nfs42_offload_status_args { 1408 + struct nfs4_sequence_args osa_seq_args; 1409 + struct nfs_fh *osa_src_fh; 1410 + nfs4_stateid osa_stateid; 1411 + }; 1412 + 1413 + struct nfs42_offload_status_res { 1414 + struct nfs4_sequence_res osr_seq_res; 1415 + uint64_t osr_count; 1416 + int osr_status; 1406 1417 }; 1407 1418 1408 1419 struct nfs42_seek_args {
+3 -2
include/linux/sunrpc/auth.h
··· 125 125 struct module *owner; 126 126 rpc_authflavor_t au_flavor; /* flavor (RPC_AUTH_*) */ 127 127 char * au_name; 128 - struct rpc_auth * (*create)(struct rpc_auth_create_args *, struct rpc_clnt *); 128 + struct rpc_auth * (*create)(const struct rpc_auth_create_args *, 129 + struct rpc_clnt *); 129 130 void (*destroy)(struct rpc_auth *); 130 131 131 132 int (*hash_cred)(struct auth_cred *, unsigned int); ··· 175 174 struct rpc_cred * rpc_lookup_machine_cred(const char *service_name); 176 175 int rpcauth_register(const struct rpc_authops *); 177 176 int rpcauth_unregister(const struct rpc_authops *); 178 - struct rpc_auth * rpcauth_create(struct rpc_auth_create_args *, 177 + struct rpc_auth * rpcauth_create(const struct rpc_auth_create_args *, 179 178 struct rpc_clnt *); 180 179 void rpcauth_release(struct rpc_auth *); 181 180 rpc_authflavor_t rpcauth_get_pseudoflavor(rpc_authflavor_t,
+1
include/linux/sunrpc/clnt.h
··· 156 156 157 157 void rpc_shutdown_client(struct rpc_clnt *); 158 158 void rpc_release_client(struct rpc_clnt *); 159 + void rpc_task_release_transport(struct rpc_task *); 159 160 void rpc_task_release_client(struct rpc_task *); 160 161 161 162 int rpcb_create_local(struct net *);
+2 -2
include/linux/sunrpc/metrics.h
··· 82 82 struct rpc_iostats *); 83 83 void rpc_count_iostats_metrics(const struct rpc_task *, 84 84 struct rpc_iostats *); 85 - void rpc_print_iostats(struct seq_file *, struct rpc_clnt *); 85 + void rpc_clnt_show_stats(struct seq_file *, struct rpc_clnt *); 86 86 void rpc_free_iostats(struct rpc_iostats *); 87 87 88 88 #else /* CONFIG_PROC_FS */ ··· 95 95 { 96 96 } 97 97 98 - static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {} 98 + static inline void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt) {} 99 99 static inline void rpc_free_iostats(struct rpc_iostats *stats) {} 100 100 101 101 #endif /* CONFIG_PROC_FS */
+2 -2
net/sunrpc/auth.c
··· 50 50 if (!val) 51 51 goto out_inval; 52 52 ret = kstrtoul(val, 0, &num); 53 - if (ret == -EINVAL) 53 + if (ret) 54 54 goto out_inval; 55 55 nbits = fls(num - 1); 56 56 if (nbits > MAX_HASHTABLE_BITS || nbits < 2) ··· 253 253 EXPORT_SYMBOL_GPL(rpcauth_list_flavors); 254 254 255 255 struct rpc_auth * 256 - rpcauth_create(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 256 + rpcauth_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 257 257 { 258 258 struct rpc_auth *auth; 259 259 const struct rpc_authops *ops;
+6 -5
net/sunrpc/auth_gss/auth_gss.c
··· 1016 1016 * parameters based on the input flavor (which must be a pseudoflavor) 1017 1017 */ 1018 1018 static struct gss_auth * 1019 - gss_create_new(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1019 + gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1020 1020 { 1021 1021 rpc_authflavor_t flavor = args->pseudoflavor; 1022 1022 struct gss_auth *gss_auth; ··· 1163 1163 * (which is guaranteed to last as long as any of its descendants). 1164 1164 */ 1165 1165 static struct gss_auth * 1166 - gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args, 1166 + gss_auth_find_or_add_hashed(const struct rpc_auth_create_args *args, 1167 1167 struct rpc_clnt *clnt, 1168 1168 struct gss_auth *new) 1169 1169 { ··· 1200 1200 } 1201 1201 1202 1202 static struct gss_auth * 1203 - gss_create_hashed(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1203 + gss_create_hashed(const struct rpc_auth_create_args *args, 1204 + struct rpc_clnt *clnt) 1204 1205 { 1205 1206 struct gss_auth *gss_auth; 1206 1207 struct gss_auth *new; ··· 1220 1219 } 1221 1220 1222 1221 static struct rpc_auth * 1223 - gss_create(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1222 + gss_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1224 1223 { 1225 1224 struct gss_auth *gss_auth; 1226 1225 struct rpc_xprt_switch *xps = rcu_access_pointer(clnt->cl_xpi.xpi_xpswitch); ··· 1603 1602 if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) { 1604 1603 unsigned long now = jiffies; 1605 1604 unsigned long begin, expire; 1606 - struct gss_cred *gss_cred; 1605 + struct gss_cred *gss_cred; 1607 1606 1608 1607 gss_cred = container_of(cred, struct gss_cred, gc_base); 1609 1608 begin = gss_cred->gc_upcall_timestamp;
-1
net/sunrpc/auth_gss/gss_generic_token.c
··· 231 231 } 232 232 233 233 EXPORT_SYMBOL_GPL(g_verify_token_header); 234 -
-1
net/sunrpc/auth_gss/gss_krb5_crypto.c
··· 1081 1081 dprintk("%s: returning %d\n", __func__, err); 1082 1082 return err; 1083 1083 } 1084 -
-1
net/sunrpc/auth_gss/gss_krb5_keys.c
··· 324 324 err_out: 325 325 return ret; 326 326 } 327 -
-1
net/sunrpc/auth_gss/gss_krb5_seal.c
··· 229 229 return gss_get_mic_v2(ctx, text, token); 230 230 } 231 231 } 232 -
-1
net/sunrpc/auth_gss/gss_krb5_unseal.c
··· 225 225 return gss_verify_mic_v2(ctx, message_buffer, read_token); 226 226 } 227 227 } 228 -
-1
net/sunrpc/auth_gss/gss_krb5_wrap.c
··· 621 621 return gss_unwrap_kerberos_v2(kctx, offset, buf); 622 622 } 623 623 } 624 -
+1 -1
net/sunrpc/auth_gss/svcauth_gss.c
··· 1389 1389 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1390 1390 1391 1391 if (sn->use_gssp_proc) { 1392 - remove_proc_entry("use-gss-proxy", sn->proc_net_rpc); 1392 + remove_proc_entry("use-gss-proxy", sn->proc_net_rpc); 1393 1393 clear_gssp_clnt(sn); 1394 1394 } 1395 1395 }
+1 -1
net/sunrpc/auth_null.c
··· 19 19 static struct rpc_cred null_cred; 20 20 21 21 static struct rpc_auth * 22 - nul_create(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 22 + nul_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 23 23 { 24 24 atomic_inc(&null_auth.au_count); 25 25 return &null_auth;
+1 -1
net/sunrpc/auth_unix.c
··· 30 30 static const struct rpc_credops unix_credops; 31 31 32 32 static struct rpc_auth * 33 - unx_create(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 33 + unx_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 34 34 { 35 35 dprintk("RPC: creating UNIX authenticator for client %p\n", 36 36 clnt);
-1
net/sunrpc/backchannel_rqst.c
··· 362 362 wake_up(&bc_serv->sv_cb_waitq); 363 363 spin_unlock(&bc_serv->sv_cb_lock); 364 364 } 365 -
+21 -9
net/sunrpc/clnt.c
··· 892 892 /* 893 893 * Free an RPC client 894 894 */ 895 - static struct rpc_clnt * 895 + static struct rpc_clnt * 896 896 rpc_free_auth(struct rpc_clnt *clnt) 897 897 { 898 898 if (clnt->cl_auth == NULL) ··· 965 965 } 966 966 EXPORT_SYMBOL_GPL(rpc_bind_new_program); 967 967 968 + void rpc_task_release_transport(struct rpc_task *task) 969 + { 970 + struct rpc_xprt *xprt = task->tk_xprt; 971 + 972 + if (xprt) { 973 + task->tk_xprt = NULL; 974 + xprt_put(xprt); 975 + } 976 + } 977 + EXPORT_SYMBOL_GPL(rpc_task_release_transport); 978 + 968 979 void rpc_task_release_client(struct rpc_task *task) 969 980 { 970 981 struct rpc_clnt *clnt = task->tk_client; 971 - struct rpc_xprt *xprt = task->tk_xprt; 972 982 973 983 if (clnt != NULL) { 974 984 /* Remove from client task list */ ··· 989 979 990 980 rpc_release_client(clnt); 991 981 } 982 + rpc_task_release_transport(task); 983 + } 992 984 993 - if (xprt != NULL) { 994 - task->tk_xprt = NULL; 995 - 996 - xprt_put(xprt); 997 - } 985 + static 986 + void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) 987 + { 988 + if (!task->tk_xprt) 989 + task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi); 998 990 } 999 991 1000 992 static ··· 1004 992 { 1005 993 1006 994 if (clnt != NULL) { 1007 - if (task->tk_xprt == NULL) 1008 - task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi); 995 + rpc_task_set_transport(task, clnt); 1009 996 task->tk_client = clnt; 1010 997 atomic_inc(&clnt->cl_count); 1011 998 if (clnt->cl_softrtry) ··· 1523 1512 clnt->cl_program->version[clnt->cl_vers]->counts[idx]++; 1524 1513 clnt->cl_stats->rpccnt++; 1525 1514 task->tk_action = call_reserve; 1515 + rpc_task_set_transport(task, clnt); 1526 1516 } 1527 1517 1528 1518 /*
+1 -1
net/sunrpc/rpcb_clnt.c
··· 213 213 sn->rpcb_local_clnt = clnt; 214 214 sn->rpcb_local_clnt4 = clnt4; 215 215 sn->rpcb_is_af_local = is_af_local ? 1 : 0; 216 - smp_wmb(); 216 + smp_wmb(); 217 217 sn->rpcb_users = 1; 218 218 dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: " 219 219 "%p, rpcb_local_clnt4: %p) for net %x%s\n",
+39 -16
net/sunrpc/stats.c
··· 208 208 seq_printf(seq, "\t%12u: ", op); 209 209 } 210 210 211 - void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) 211 + static void _add_rpc_iostats(struct rpc_iostats *a, struct rpc_iostats *b) 212 212 { 213 - struct rpc_iostats *stats = clnt->cl_metrics; 213 + a->om_ops += b->om_ops; 214 + a->om_ntrans += b->om_ntrans; 215 + a->om_timeouts += b->om_timeouts; 216 + a->om_bytes_sent += b->om_bytes_sent; 217 + a->om_bytes_recv += b->om_bytes_recv; 218 + a->om_queue = ktime_add(a->om_queue, b->om_queue); 219 + a->om_rtt = ktime_add(a->om_rtt, b->om_rtt); 220 + a->om_execute = ktime_add(a->om_execute, b->om_execute); 221 + } 222 + 223 + static void _print_rpc_iostats(struct seq_file *seq, struct rpc_iostats *stats, 224 + int op, const struct rpc_procinfo *procs) 225 + { 226 + _print_name(seq, op, procs); 227 + seq_printf(seq, "%lu %lu %lu %Lu %Lu %Lu %Lu %Lu\n", 228 + stats->om_ops, 229 + stats->om_ntrans, 230 + stats->om_timeouts, 231 + stats->om_bytes_sent, 232 + stats->om_bytes_recv, 233 + ktime_to_ms(stats->om_queue), 234 + ktime_to_ms(stats->om_rtt), 235 + ktime_to_ms(stats->om_execute)); 236 + } 237 + 238 + void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt) 239 + { 214 240 struct rpc_xprt *xprt; 215 241 unsigned int op, maxproc = clnt->cl_maxproc; 216 242 217 - if (!stats) 243 + if (!clnt->cl_metrics) 218 244 return; 219 245 220 246 seq_printf(seq, "\tRPC iostats version: %s ", RPC_IOSTATS_VERS); ··· 255 229 256 230 seq_printf(seq, "\tper-op statistics\n"); 257 231 for (op = 0; op < maxproc; op++) { 258 - struct rpc_iostats *metrics = &stats[op]; 259 - _print_name(seq, op, clnt->cl_procinfo); 260 - seq_printf(seq, "%lu %lu %lu %Lu %Lu %Lu %Lu %Lu\n", 261 - metrics->om_ops, 262 - metrics->om_ntrans, 263 - metrics->om_timeouts, 264 - metrics->om_bytes_sent, 265 - metrics->om_bytes_recv, 266 - ktime_to_ms(metrics->om_queue), 267 - ktime_to_ms(metrics->om_rtt), 268 - ktime_to_ms(metrics->om_execute)); 232 + struct rpc_iostats stats = {}; 233 + struct rpc_clnt *next = clnt; 234 + do { 235 + _add_rpc_iostats(&stats, &next->cl_metrics[op]); 236 + if (next == next->cl_parent) 237 + break; 238 + next = next->cl_parent; 239 + } while (next); 240 + _print_rpc_iostats(seq, &stats, op, clnt->cl_procinfo); 269 241 } 270 242 } 271 - EXPORT_SYMBOL_GPL(rpc_print_iostats); 243 + EXPORT_SYMBOL_GPL(rpc_clnt_show_stats); 272 244 273 245 /* 274 246 * Register/unregister RPC proc files ··· 334 310 dprintk("RPC: unregistering /proc/net/rpc\n"); 335 311 remove_proc_entry("rpc", net->proc_net); 336 312 } 337 -
-1
net/sunrpc/sunrpc.h
··· 57 57 int rpc_clients_notifier_register(void); 58 58 void rpc_clients_notifier_unregister(void); 59 59 #endif /* _NET_SUNRPC_SUNRPC_H */ 60 -
+1 -1
net/sunrpc/xprt.c
··· 880 880 __must_hold(&req->rq_xprt->recv_lock) 881 881 { 882 882 struct rpc_task *task = req->rq_task; 883 - 883 + 884 884 if (task && test_bit(RPC_TASK_MSG_RECV, &task->tk_runstate)) { 885 885 spin_unlock(&req->rq_xprt->recv_lock); 886 886 set_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate);
+2 -3
net/sunrpc/xprtrdma/verbs.c
··· 280 280 ++xprt->rx_xprt.connect_cookie; 281 281 connstate = -ECONNABORTED; 282 282 connected: 283 - xprt->rx_buf.rb_credits = 1; 284 283 ep->rep_connected = connstate; 285 284 rpcrdma_conn_func(ep); 286 285 wake_up_all(&ep->rep_connect_wait); ··· 754 755 } 755 756 756 757 ep->rep_connected = 0; 758 + rpcrdma_post_recvs(r_xprt, true); 757 759 758 760 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); 759 761 if (rc) { ··· 772 772 } 773 773 774 774 dprintk("RPC: %s: connected\n", __func__); 775 - 776 - rpcrdma_post_recvs(r_xprt, true); 777 775 778 776 out: 779 777 if (rc) ··· 1169 1171 list_add(&req->rl_list, &buf->rb_send_bufs); 1170 1172 } 1171 1173 1174 + buf->rb_credits = 1; 1172 1175 buf->rb_posted_receives = 0; 1173 1176 INIT_LIST_HEAD(&buf->rb_recv_bufs); 1174 1177
-1
net/sunrpc/xprtsock.c
··· 3375 3375 max_slot_table_size, 0644); 3376 3376 module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries, 3377 3377 slot_table_size, 0644); 3378 -