Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nfs-for-5.7-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client updates from Trond Myklebust:
"Highlights include:

Stable fixes:
- Fix a page leak in nfs_destroy_unlinked_subrequests()

- Fix use-after-free issues in nfs_pageio_add_request()

- Fix new mount code constant_table array definitions

- finish_automount() requires us to hold 2 refs to the mount record

Features:
- Improve the accuracy of telldir/seekdir by using 64-bit cookies
when possible.

- Allow one RDMA active connection and several zombie connections to
prevent blocking if the remote server is unresponsive.

- Limit the size of the NFS access cache by default

- Reduce the number of references to credentials that are taken by
NFS

- pNFS files and flexfiles drivers now support per-layout segment
COMMIT lists.

- Enable partial-file layout segments in the pNFS/flexfiles driver.

- Add support for CB_RECALL_ANY to the pNFS flexfiles layout type

- pNFS/flexfiles Report NFS4ERR_DELAY and NFS4ERR_GRACE errors from
the DS using the layouterror mechanism.

Bugfixes and cleanups:
- SUNRPC: Fix krb5p regressions

- Don't specify NFS version in "UDP not supported" error

- nfsroot: set tcp as the default transport protocol

- pnfs: Return valid stateids in nfs_layout_find_inode_by_stateid()

- alloc_nfs_open_context() must use the file cred when available

- Fix locking when dereferencing the delegation cred

- Fix memory leaks in O_DIRECT when nfs_get_lock_context() fails

- Various clean ups of the NFS O_DIRECT commit code

- Clean up RDMA connect/disconnect

- Replace zero-length arrays with C99-style flexible arrays"

* tag 'nfs-for-5.7-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (86 commits)
NFS: Clean up process of marking inode stale.
SUNRPC: Don't start a timer on an already queued rpc task
NFS/pnfs: Reference the layout cred in pnfs_prepare_layoutreturn()
NFS/pnfs: Fix dereference of layout cred in pnfs_layoutcommit_inode()
NFS: Beware when dereferencing the delegation cred
NFS: Add a module parameter to set nfs_mountpoint_expiry_timeout
NFS: finish_automount() requires us to hold 2 refs to the mount record
NFS: Fix a few constant_table array definitions
NFS: Try to join page groups before an O_DIRECT retransmission
NFS: Refactor nfs_lock_and_join_requests()
NFS: Reverse the submission order of requests in __nfs_pageio_add_request()
NFS: Clean up nfs_lock_and_join_requests()
NFS: Remove the redundant function nfs_pgio_has_mirroring()
NFS: Fix memory leaks in nfs_pageio_stop_mirroring()
NFS: Fix a request reference leak in nfs_direct_write_clear_reqs()
NFS: Fix use-after-free issues in nfs_pageio_add_request()
NFS: Fix races nfs_page_group_destroy() vs nfs_destroy_unlinked_subrequests()
NFS: Fix a page leak in nfs_destroy_unlinked_subrequests()
NFS: Remove unused FLUSH_SYNC support in nfs_initiate_pgio()
pNFS/flexfiles: Specify the layout segment range in LAYOUTGET
...

+2346 -1999
+1 -1
fs/nfs/blocklayout/blocklayout.c
··· 476 476 err = ext_tree_remove(bl, true, 0, LLONG_MAX); 477 477 WARN_ON(err); 478 478 479 - kfree(bl); 479 + kfree_rcu(bl, bl_layout.plh_rcu); 480 480 } 481 481 482 482 static struct pnfs_layout_hdr *__bl_alloc_layout_hdr(struct inode *inode,
+3 -1
fs/nfs/callback.h
··· 127 127 #define RCA4_TYPE_MASK_OBJ_LAYOUT_MAX 9 128 128 #define RCA4_TYPE_MASK_OTHER_LAYOUT_MIN 12 129 129 #define RCA4_TYPE_MASK_OTHER_LAYOUT_MAX 15 130 - #define RCA4_TYPE_MASK_ALL 0xf31f 130 + #define PNFS_FF_RCA4_TYPE_MASK_READ 16 131 + #define PNFS_FF_RCA4_TYPE_MASK_RW 17 132 + #define RCA4_TYPE_MASK_ALL 0x3f31f 131 133 132 134 struct cb_recallanyargs { 133 135 uint32_t craa_objs_to_keep;
+37 -32
fs/nfs/callback_proc.c
··· 121 121 */ 122 122 static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp, 123 123 const nfs4_stateid *stateid) 124 + __must_hold(RCU) 124 125 { 125 126 struct nfs_server *server; 126 127 struct inode *inode; 127 128 struct pnfs_layout_hdr *lo; 128 129 130 + rcu_read_lock(); 129 131 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 130 - list_for_each_entry(lo, &server->layouts, plh_layouts) { 132 + list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) { 133 + if (!pnfs_layout_is_valid(lo)) 134 + continue; 131 135 if (stateid != NULL && 132 136 !nfs4_stateid_match_other(stateid, &lo->plh_stateid)) 133 137 continue; 138 + if (!nfs_sb_active(server->super)) 139 + continue; 134 140 inode = igrab(lo->plh_inode); 135 - if (!inode) 136 - return ERR_PTR(-EAGAIN); 137 - if (!nfs_sb_active(inode->i_sb)) { 138 - rcu_read_unlock(); 139 - spin_unlock(&clp->cl_lock); 140 - iput(inode); 141 - spin_lock(&clp->cl_lock); 142 - rcu_read_lock(); 143 - return ERR_PTR(-EAGAIN); 144 - } 145 - return inode; 141 + rcu_read_unlock(); 142 + if (inode) 143 + return inode; 144 + nfs_sb_deactive(server->super); 145 + return ERR_PTR(-EAGAIN); 146 146 } 147 147 } 148 - 148 + rcu_read_unlock(); 149 149 return ERR_PTR(-ENOENT); 150 150 } 151 151 ··· 163 163 struct inode *inode; 164 164 struct pnfs_layout_hdr *lo; 165 165 166 + rcu_read_lock(); 166 167 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 167 - list_for_each_entry(lo, &server->layouts, plh_layouts) { 168 + list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) { 168 169 nfsi = NFS_I(lo->plh_inode); 169 170 if (nfs_compare_fh(fh, &nfsi->fh)) 170 171 continue; 171 172 if (nfsi->layout != lo) 172 173 continue; 174 + if (!nfs_sb_active(server->super)) 175 + continue; 173 176 inode = igrab(lo->plh_inode); 174 - if (!inode) 175 - return ERR_PTR(-EAGAIN); 176 - if (!nfs_sb_active(inode->i_sb)) { 177 - rcu_read_unlock(); 178 - spin_unlock(&clp->cl_lock); 179 - iput(inode); 180 - spin_lock(&clp->cl_lock); 181 - rcu_read_lock(); 182 - return ERR_PTR(-EAGAIN); 183 - } 184 - return inode; 177 + rcu_read_unlock(); 178 + if (inode) 179 + return inode; 180 + nfs_sb_deactive(server->super); 181 + return ERR_PTR(-EAGAIN); 185 182 } 186 183 } 187 - 184 + rcu_read_unlock(); 188 185 return ERR_PTR(-ENOENT); 189 186 } 190 187 ··· 191 194 { 192 195 struct inode *inode; 193 196 194 - spin_lock(&clp->cl_lock); 195 - rcu_read_lock(); 196 197 inode = nfs_layout_find_inode_by_stateid(clp, stateid); 197 198 if (inode == ERR_PTR(-ENOENT)) 198 199 inode = nfs_layout_find_inode_by_fh(clp, fh); 199 - rcu_read_unlock(); 200 - spin_unlock(&clp->cl_lock); 201 - 202 200 return inode; 203 201 } 204 202 ··· 272 280 goto unlock; 273 281 } 274 282 275 - pnfs_set_layout_stateid(lo, &args->cbl_stateid, true); 283 + pnfs_set_layout_stateid(lo, &args->cbl_stateid, NULL, true); 276 284 switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list, 277 285 &args->cbl_range, 278 286 be32_to_cpu(args->cbl_stateid.seqid))) { ··· 597 605 struct cb_recallanyargs *args = argp; 598 606 __be32 status; 599 607 fmode_t flags = 0; 608 + bool schedule_manager = false; 600 609 601 610 status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION); 602 611 if (!cps->clp) /* set in cb_sequence */ ··· 620 627 621 628 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT)) 622 629 pnfs_recall_all_layouts(cps->clp); 630 + 631 + if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_READ)) { 632 + set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_READ, &cps->clp->cl_state); 633 + schedule_manager = true; 634 + } 635 + if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_RW)) { 636 + set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_RW, &cps->clp->cl_state); 637 + schedule_manager = true; 638 + } 639 + if (schedule_manager) 640 + nfs4_schedule_state_manager(cps->clp); 641 + 623 642 out: 624 643 dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 625 644 return status;
+182 -171
fs/nfs/delegation.c
··· 378 378 } 379 379 380 380 static void 381 + nfs_update_delegation_cred(struct nfs_delegation *delegation, 382 + const struct cred *cred) 383 + { 384 + const struct cred *old; 385 + 386 + if (cred_fscmp(delegation->cred, cred) != 0) { 387 + old = xchg(&delegation->cred, get_cred(cred)); 388 + put_cred(old); 389 + } 390 + } 391 + 392 + static void 381 393 nfs_update_inplace_delegation(struct nfs_delegation *delegation, 382 394 const struct nfs_delegation *update) 383 395 { ··· 397 385 delegation->stateid.seqid = update->stateid.seqid; 398 386 smp_wmb(); 399 387 delegation->type = update->type; 400 - if (test_and_clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) 388 + delegation->pagemod_limit = update->pagemod_limit; 389 + if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { 390 + delegation->change_attr = update->change_attr; 391 + nfs_update_delegation_cred(delegation, update->cred); 392 + /* smp_mb__before_atomic() is implicit due to xchg() */ 393 + clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags); 401 394 atomic_long_inc(&nfs_active_delegations); 395 + } 402 396 } 403 397 } 404 398 ··· 563 545 return ret; 564 546 } 565 547 548 + static int nfs_server_return_marked_delegations(struct nfs_server *server, 549 + void __always_unused *data) 550 + { 551 + struct nfs_delegation *delegation; 552 + struct nfs_delegation *prev; 553 + struct inode *inode; 554 + struct inode *place_holder = NULL; 555 + struct nfs_delegation *place_holder_deleg = NULL; 556 + int err = 0; 557 + 558 + restart: 559 + /* 560 + * To avoid quadratic looping we hold a reference 561 + * to an inode place_holder. Each time we restart, we 562 + * list delegation in the server from the delegations 563 + * of that inode. 564 + * prev is an RCU-protected pointer to a delegation which 565 + * wasn't marked for return and might be a good choice for 566 + * the next place_holder. 567 + */ 568 + prev = NULL; 569 + delegation = NULL; 570 + rcu_read_lock(); 571 + if (place_holder) 572 + delegation = rcu_dereference(NFS_I(place_holder)->delegation); 573 + if (!delegation || delegation != place_holder_deleg) 574 + delegation = list_entry_rcu(server->delegations.next, 575 + struct nfs_delegation, super_list); 576 + list_for_each_entry_from_rcu(delegation, &server->delegations, super_list) { 577 + struct inode *to_put = NULL; 578 + 579 + if (test_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags)) 580 + continue; 581 + if (!nfs_delegation_need_return(delegation)) { 582 + if (nfs4_is_valid_delegation(delegation, 0)) 583 + prev = delegation; 584 + continue; 585 + } 586 + 587 + if (prev) { 588 + struct inode *tmp = nfs_delegation_grab_inode(prev); 589 + if (tmp) { 590 + to_put = place_holder; 591 + place_holder = tmp; 592 + place_holder_deleg = prev; 593 + } 594 + } 595 + 596 + inode = nfs_delegation_grab_inode(delegation); 597 + if (inode == NULL) { 598 + rcu_read_unlock(); 599 + iput(to_put); 600 + goto restart; 601 + } 602 + delegation = nfs_start_delegation_return_locked(NFS_I(inode)); 603 + rcu_read_unlock(); 604 + 605 + iput(to_put); 606 + 607 + err = nfs_end_delegation_return(inode, delegation, 0); 608 + iput(inode); 609 + cond_resched(); 610 + if (!err) 611 + goto restart; 612 + set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); 613 + goto out; 614 + } 615 + rcu_read_unlock(); 616 + out: 617 + iput(place_holder); 618 + return err; 619 + } 620 + 566 621 /** 567 622 * nfs_client_return_marked_delegations - return previously marked delegations 568 623 * @clp: nfs_client to process ··· 648 557 */ 649 558 int nfs_client_return_marked_delegations(struct nfs_client *clp) 650 559 { 651 - struct nfs_delegation *delegation; 652 - struct nfs_delegation *prev; 653 - struct nfs_server *server; 654 - struct inode *inode; 655 - struct inode *place_holder = NULL; 656 - struct nfs_delegation *place_holder_deleg = NULL; 657 - int err = 0; 658 - 659 - restart: 660 - /* 661 - * To avoid quadratic looping we hold a reference 662 - * to an inode place_holder. Each time we restart, we 663 - * list nfs_servers from the server of that inode, and 664 - * delegation in the server from the delegations of that 665 - * inode. 666 - * prev is an RCU-protected pointer to a delegation which 667 - * wasn't marked for return and might be a good choice for 668 - * the next place_holder. 669 - */ 670 - rcu_read_lock(); 671 - prev = NULL; 672 - if (place_holder) 673 - server = NFS_SERVER(place_holder); 674 - else 675 - server = list_entry_rcu(clp->cl_superblocks.next, 676 - struct nfs_server, client_link); 677 - list_for_each_entry_from_rcu(server, &clp->cl_superblocks, client_link) { 678 - delegation = NULL; 679 - if (place_holder && server == NFS_SERVER(place_holder)) 680 - delegation = rcu_dereference(NFS_I(place_holder)->delegation); 681 - if (!delegation || delegation != place_holder_deleg) 682 - delegation = list_entry_rcu(server->delegations.next, 683 - struct nfs_delegation, super_list); 684 - list_for_each_entry_from_rcu(delegation, &server->delegations, super_list) { 685 - struct inode *to_put = NULL; 686 - 687 - if (!nfs_delegation_need_return(delegation)) { 688 - prev = delegation; 689 - continue; 690 - } 691 - if (!nfs_sb_active(server->super)) 692 - break; /* continue in outer loop */ 693 - 694 - if (prev) { 695 - struct inode *tmp; 696 - 697 - tmp = nfs_delegation_grab_inode(prev); 698 - if (tmp) { 699 - to_put = place_holder; 700 - place_holder = tmp; 701 - place_holder_deleg = prev; 702 - } 703 - } 704 - 705 - inode = nfs_delegation_grab_inode(delegation); 706 - if (inode == NULL) { 707 - rcu_read_unlock(); 708 - if (to_put) 709 - iput(to_put); 710 - nfs_sb_deactive(server->super); 711 - goto restart; 712 - } 713 - delegation = nfs_start_delegation_return_locked(NFS_I(inode)); 714 - rcu_read_unlock(); 715 - 716 - if (to_put) 717 - iput(to_put); 718 - 719 - err = nfs_end_delegation_return(inode, delegation, 0); 720 - iput(inode); 721 - nfs_sb_deactive(server->super); 722 - cond_resched(); 723 - if (!err) 724 - goto restart; 725 - set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); 726 - if (place_holder) 727 - iput(place_holder); 728 - return err; 729 - } 730 - } 731 - rcu_read_unlock(); 732 - if (place_holder) 733 - iput(place_holder); 734 - return 0; 560 + return nfs_client_for_each_server(clp, 561 + nfs_server_return_marked_delegations, NULL); 735 562 } 736 563 737 564 /** ··· 1092 1083 rcu_read_unlock(); 1093 1084 } 1094 1085 1086 + static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server, 1087 + void __always_unused *data) 1088 + { 1089 + struct nfs_delegation *delegation; 1090 + struct inode *inode; 1091 + restart: 1092 + rcu_read_lock(); 1093 + restart_locked: 1094 + list_for_each_entry_rcu(delegation, &server->delegations, super_list) { 1095 + if (test_bit(NFS_DELEGATION_INODE_FREEING, 1096 + &delegation->flags) || 1097 + test_bit(NFS_DELEGATION_RETURNING, 1098 + &delegation->flags) || 1099 + test_bit(NFS_DELEGATION_NEED_RECLAIM, 1100 + &delegation->flags) == 0) 1101 + continue; 1102 + inode = nfs_delegation_grab_inode(delegation); 1103 + if (inode == NULL) 1104 + goto restart_locked; 1105 + delegation = nfs_start_delegation_return_locked(NFS_I(inode)); 1106 + rcu_read_unlock(); 1107 + if (delegation != NULL) { 1108 + if (nfs_detach_delegation(NFS_I(inode), delegation, 1109 + server) != NULL) 1110 + nfs_free_delegation(delegation); 1111 + /* Match nfs_start_delegation_return_locked */ 1112 + nfs_put_delegation(delegation); 1113 + } 1114 + iput(inode); 1115 + cond_resched(); 1116 + goto restart; 1117 + } 1118 + rcu_read_unlock(); 1119 + return 0; 1120 + } 1121 + 1095 1122 /** 1096 1123 * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done 1097 1124 * @clp: nfs_client to process ··· 1135 1090 */ 1136 1091 void nfs_delegation_reap_unclaimed(struct nfs_client *clp) 1137 1092 { 1138 - struct nfs_delegation *delegation; 1139 - struct nfs_server *server; 1140 - struct inode *inode; 1141 - 1142 - restart: 1143 - rcu_read_lock(); 1144 - list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 1145 - list_for_each_entry_rcu(delegation, &server->delegations, 1146 - super_list) { 1147 - if (test_bit(NFS_DELEGATION_INODE_FREEING, 1148 - &delegation->flags) || 1149 - test_bit(NFS_DELEGATION_RETURNING, 1150 - &delegation->flags) || 1151 - test_bit(NFS_DELEGATION_NEED_RECLAIM, 1152 - &delegation->flags) == 0) 1153 - continue; 1154 - if (!nfs_sb_active(server->super)) 1155 - break; /* continue in outer loop */ 1156 - inode = nfs_delegation_grab_inode(delegation); 1157 - if (inode == NULL) { 1158 - rcu_read_unlock(); 1159 - nfs_sb_deactive(server->super); 1160 - goto restart; 1161 - } 1162 - delegation = nfs_start_delegation_return_locked(NFS_I(inode)); 1163 - rcu_read_unlock(); 1164 - if (delegation != NULL) { 1165 - if (nfs_detach_delegation(NFS_I(inode), delegation, 1166 - server) != NULL) 1167 - nfs_free_delegation(delegation); 1168 - /* Match nfs_start_delegation_return_locked */ 1169 - nfs_put_delegation(delegation); 1170 - } 1171 - iput(inode); 1172 - nfs_sb_deactive(server->super); 1173 - cond_resched(); 1174 - goto restart; 1175 - } 1176 - } 1177 - rcu_read_unlock(); 1093 + nfs_client_for_each_server(clp, nfs_server_reap_unclaimed_delegations, 1094 + NULL); 1178 1095 } 1179 1096 1180 1097 static inline bool nfs4_server_rebooted(const struct nfs_client *clp) ··· 1222 1215 nfs_remove_bad_delegation(inode, stateid); 1223 1216 } 1224 1217 1218 + static int nfs_server_reap_expired_delegations(struct nfs_server *server, 1219 + void __always_unused *data) 1220 + { 1221 + struct nfs_delegation *delegation; 1222 + struct inode *inode; 1223 + const struct cred *cred; 1224 + nfs4_stateid stateid; 1225 + restart: 1226 + rcu_read_lock(); 1227 + restart_locked: 1228 + list_for_each_entry_rcu(delegation, &server->delegations, super_list) { 1229 + if (test_bit(NFS_DELEGATION_INODE_FREEING, 1230 + &delegation->flags) || 1231 + test_bit(NFS_DELEGATION_RETURNING, 1232 + &delegation->flags) || 1233 + test_bit(NFS_DELEGATION_TEST_EXPIRED, 1234 + &delegation->flags) == 0) 1235 + continue; 1236 + inode = nfs_delegation_grab_inode(delegation); 1237 + if (inode == NULL) 1238 + goto restart_locked; 1239 + spin_lock(&delegation->lock); 1240 + cred = get_cred_rcu(delegation->cred); 1241 + nfs4_stateid_copy(&stateid, &delegation->stateid); 1242 + spin_unlock(&delegation->lock); 1243 + clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags); 1244 + rcu_read_unlock(); 1245 + nfs_delegation_test_free_expired(inode, &stateid, cred); 1246 + put_cred(cred); 1247 + if (!nfs4_server_rebooted(server->nfs_client)) { 1248 + iput(inode); 1249 + cond_resched(); 1250 + goto restart; 1251 + } 1252 + nfs_inode_mark_test_expired_delegation(server,inode); 1253 + iput(inode); 1254 + return -EAGAIN; 1255 + } 1256 + rcu_read_unlock(); 1257 + return 0; 1258 + } 1259 + 1225 1260 /** 1226 1261 * nfs_reap_expired_delegations - reap expired delegations 1227 1262 * @clp: nfs_client to process ··· 1275 1226 */ 1276 1227 void nfs_reap_expired_delegations(struct nfs_client *clp) 1277 1228 { 1278 - struct nfs_delegation *delegation; 1279 - struct nfs_server *server; 1280 - struct inode *inode; 1281 - const struct cred *cred; 1282 - nfs4_stateid stateid; 1283 - 1284 - restart: 1285 - rcu_read_lock(); 1286 - list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 1287 - list_for_each_entry_rcu(delegation, &server->delegations, 1288 - super_list) { 1289 - if (test_bit(NFS_DELEGATION_INODE_FREEING, 1290 - &delegation->flags) || 1291 - test_bit(NFS_DELEGATION_RETURNING, 1292 - &delegation->flags) || 1293 - test_bit(NFS_DELEGATION_TEST_EXPIRED, 1294 - &delegation->flags) == 0) 1295 - continue; 1296 - if (!nfs_sb_active(server->super)) 1297 - break; /* continue in outer loop */ 1298 - inode = nfs_delegation_grab_inode(delegation); 1299 - if (inode == NULL) { 1300 - rcu_read_unlock(); 1301 - nfs_sb_deactive(server->super); 1302 - goto restart; 1303 - } 1304 - cred = get_cred_rcu(delegation->cred); 1305 - nfs4_stateid_copy(&stateid, &delegation->stateid); 1306 - clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags); 1307 - rcu_read_unlock(); 1308 - nfs_delegation_test_free_expired(inode, &stateid, cred); 1309 - put_cred(cred); 1310 - if (nfs4_server_rebooted(clp)) { 1311 - nfs_inode_mark_test_expired_delegation(server,inode); 1312 - iput(inode); 1313 - nfs_sb_deactive(server->super); 1314 - return; 1315 - } 1316 - iput(inode); 1317 - nfs_sb_deactive(server->super); 1318 - cond_resched(); 1319 - goto restart; 1320 - } 1321 - } 1322 - rcu_read_unlock(); 1229 + nfs_client_for_each_server(clp, nfs_server_reap_expired_delegations, 1230 + NULL); 1323 1231 } 1324 1232 1325 1233 void nfs_inode_find_delegation_state_and_recover(struct inode *inode, ··· 1365 1359 { 1366 1360 struct nfs_inode *nfsi = NFS_I(inode); 1367 1361 struct nfs_delegation *delegation; 1368 - bool ret; 1362 + bool ret = false; 1369 1363 1370 1364 flags &= FMODE_READ|FMODE_WRITE; 1371 1365 rcu_read_lock(); 1372 1366 delegation = rcu_dereference(nfsi->delegation); 1367 + if (!delegation) 1368 + goto out; 1369 + spin_lock(&delegation->lock); 1373 1370 ret = nfs4_is_valid_delegation(delegation, flags); 1374 1371 if (ret) { 1375 1372 nfs4_stateid_copy(dst, &delegation->stateid); ··· 1380 1371 if (cred) 1381 1372 *cred = get_cred(delegation->cred); 1382 1373 } 1374 + spin_unlock(&delegation->lock); 1375 + out: 1383 1376 rcu_read_unlock(); 1384 1377 return ret; 1385 1378 }
+50 -29
fs/nfs/dir.c
··· 141 141 int size; 142 142 int eof_index; 143 143 u64 last_cookie; 144 - struct nfs_cache_array_entry array[0]; 144 + struct nfs_cache_array_entry array[]; 145 145 }; 146 146 147 - typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, bool); 148 147 typedef struct { 149 148 struct file *file; 150 149 struct page *page; ··· 152 153 u64 *dir_cookie; 153 154 u64 last_cookie; 154 155 loff_t current_index; 155 - decode_dirent_t decode; 156 + loff_t prev_index; 156 157 157 158 unsigned long dir_verifier; 158 159 unsigned long timestamp; ··· 239 240 return ret; 240 241 } 241 242 243 + static inline 244 + int is_32bit_api(void) 245 + { 246 + #ifdef CONFIG_COMPAT 247 + return in_compat_syscall(); 248 + #else 249 + return (BITS_PER_LONG == 32); 250 + #endif 251 + } 252 + 253 + static 254 + bool nfs_readdir_use_cookie(const struct file *filp) 255 + { 256 + if ((filp->f_mode & FMODE_32BITHASH) || 257 + (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api())) 258 + return false; 259 + return true; 260 + } 261 + 242 262 static 243 263 int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc) 244 264 { ··· 307 289 !nfs_readdir_inode_mapping_valid(nfsi)) { 308 290 ctx->duped = 0; 309 291 ctx->attr_gencount = nfsi->attr_gencount; 310 - } else if (new_pos < desc->ctx->pos) { 292 + } else if (new_pos < desc->prev_index) { 311 293 if (ctx->duped > 0 312 294 && ctx->dup_cookie == *desc->dir_cookie) { 313 295 if (printk_ratelimit()) { ··· 323 305 ctx->dup_cookie = *desc->dir_cookie; 324 306 ctx->duped = -1; 325 307 } 326 - desc->ctx->pos = new_pos; 308 + if (nfs_readdir_use_cookie(desc->file)) 309 + desc->ctx->pos = *desc->dir_cookie; 310 + else 311 + desc->ctx->pos = new_pos; 312 + desc->prev_index = new_pos; 327 313 desc->cache_entry_index = i; 328 314 return 0; 329 315 } ··· 398 376 static int xdr_decode(nfs_readdir_descriptor_t *desc, 399 377 struct nfs_entry *entry, struct xdr_stream *xdr) 400 378 { 379 + struct inode *inode = file_inode(desc->file); 401 380 int error; 402 381 403 - error = desc->decode(xdr, entry, desc->plus); 382 + error = NFS_PROTO(inode)->decode_dirent(xdr, entry, desc->plus); 404 383 if (error) 405 384 return error; 406 385 entry->fattr->time_start = desc->timestamp; ··· 779 756 780 757 if (desc->page_index == 0) { 781 758 desc->current_index = 0; 759 + desc->prev_index = 0; 782 760 desc->last_cookie = 0; 783 761 } 784 762 do { ··· 810 786 desc->eof = true; 811 787 break; 812 788 } 813 - desc->ctx->pos++; 814 789 if (i < (array->size-1)) 815 790 *desc->dir_cookie = array->array[i+1].cookie; 816 791 else 817 792 *desc->dir_cookie = array->last_cookie; 793 + if (nfs_readdir_use_cookie(file)) 794 + desc->ctx->pos = *desc->dir_cookie; 795 + else 796 + desc->ctx->pos++; 818 797 if (ctx->duped != 0) 819 798 ctx->duped = 1; 820 799 } ··· 887 860 { 888 861 struct dentry *dentry = file_dentry(file); 889 862 struct inode *inode = d_inode(dentry); 890 - nfs_readdir_descriptor_t my_desc, 891 - *desc = &my_desc; 892 863 struct nfs_open_dir_context *dir_ctx = file->private_data; 864 + nfs_readdir_descriptor_t my_desc = { 865 + .file = file, 866 + .ctx = ctx, 867 + .dir_cookie = &dir_ctx->dir_cookie, 868 + .plus = nfs_use_readdirplus(inode, ctx), 869 + }, 870 + *desc = &my_desc; 893 871 int res = 0; 894 872 895 873 dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n", ··· 907 875 * to either find the entry with the appropriate number or 908 876 * revalidate the cookie. 909 877 */ 910 - memset(desc, 0, sizeof(*desc)); 911 - 912 - desc->file = file; 913 - desc->ctx = ctx; 914 - desc->dir_cookie = &dir_ctx->dir_cookie; 915 - desc->decode = NFS_PROTO(inode)->decode_dirent; 916 - desc->plus = nfs_use_readdirplus(inode, ctx); 917 - 918 878 if (ctx->pos == 0 || nfs_attribute_cache_expired(inode)) 919 879 res = nfs_revalidate_mapping(inode, file->f_mapping); 920 880 if (res < 0) ··· 978 954 } 979 955 if (offset != filp->f_pos) { 980 956 filp->f_pos = offset; 981 - dir_ctx->dir_cookie = 0; 957 + if (nfs_readdir_use_cookie(filp)) 958 + dir_ctx->dir_cookie = offset; 959 + else 960 + dir_ctx->dir_cookie = 0; 982 961 dir_ctx->duped = 0; 983 962 } 984 963 inode_unlock(inode); ··· 2309 2282 static LIST_HEAD(nfs_access_lru_list); 2310 2283 static atomic_long_t nfs_access_nr_entries; 2311 2284 2312 - static unsigned long nfs_access_max_cachesize = ULONG_MAX; 2285 + static unsigned long nfs_access_max_cachesize = 4*1024*1024; 2313 2286 module_param(nfs_access_max_cachesize, ulong, 0644); 2314 2287 MODULE_PARM_DESC(nfs_access_max_cachesize, "NFS access maximum total cache length"); 2315 2288 ··· 2669 2642 status = NFS_PROTO(inode)->access(inode, &cache); 2670 2643 if (status != 0) { 2671 2644 if (status == -ESTALE) { 2672 - nfs_zap_caches(inode); 2673 2645 if (!S_ISDIR(inode->i_mode)) 2674 - set_bit(NFS_INO_STALE, &NFS_I(inode)->flags); 2646 + nfs_set_inode_stale(inode); 2647 + else 2648 + nfs_zap_caches(inode); 2675 2649 } 2676 2650 goto out; 2677 2651 } ··· 2760 2732 if (!NFS_PROTO(inode)->access) 2761 2733 goto out_notsup; 2762 2734 2763 - /* Always try fast lookups first */ 2764 - rcu_read_lock(); 2765 - res = nfs_do_access(inode, cred, mask|MAY_NOT_BLOCK); 2766 - rcu_read_unlock(); 2767 - if (res == -ECHILD && !(mask & MAY_NOT_BLOCK)) { 2768 - /* Fast lookup failed, try the slow way */ 2769 - res = nfs_do_access(inode, cred, mask); 2770 - } 2735 + res = nfs_do_access(inode, cred, mask); 2771 2736 out: 2772 2737 if (!res && (mask & MAY_EXEC)) 2773 2738 res = nfs_execute_ok(inode, mask);
+69 -128
fs/nfs/direct.c
··· 94 94 #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */ 95 95 /* for read */ 96 96 #define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */ 97 - struct nfs_writeverf verf; /* unstable write verifier */ 97 + #define NFS_ODIRECT_DONE INT_MAX /* write verification failed */ 98 98 }; 99 99 100 100 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops; ··· 151 151 dreq->count = dreq_len; 152 152 } 153 153 154 - /* 155 - * nfs_direct_select_verf - select the right verifier 156 - * @dreq - direct request possibly spanning multiple servers 157 - * @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs 158 - * @commit_idx - commit bucket index for the DS 159 - * 160 - * returns the correct verifier to use given the role of the server 161 - */ 162 - static struct nfs_writeverf * 163 - nfs_direct_select_verf(struct nfs_direct_req *dreq, 164 - struct nfs_client *ds_clp, 165 - int commit_idx) 166 - { 167 - struct nfs_writeverf *verfp = &dreq->verf; 168 - 169 - #ifdef CONFIG_NFS_V4_1 170 - /* 171 - * pNFS is in use, use the DS verf except commit_through_mds is set 172 - * for layout segment where nbuckets is zero. 173 - */ 174 - if (ds_clp && dreq->ds_cinfo.nbuckets > 0) { 175 - if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets) 176 - verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf; 177 - else 178 - WARN_ON_ONCE(1); 179 - } 180 - #endif 181 - return verfp; 182 - } 183 - 184 - 185 - /* 186 - * nfs_direct_set_hdr_verf - set the write/commit verifier 187 - * @dreq - direct request possibly spanning multiple servers 188 - * @hdr - pageio header to validate against previously seen verfs 189 - * 190 - * Set the server's (MDS or DS) "seen" verifier 191 - */ 192 - static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq, 193 - struct nfs_pgio_header *hdr) 194 - { 195 - struct nfs_writeverf *verfp; 196 - 197 - verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx); 198 - WARN_ON_ONCE(verfp->committed >= 0); 199 - memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf)); 200 - WARN_ON_ONCE(verfp->committed < 0); 201 - } 202 - 203 - static int nfs_direct_cmp_verf(const struct nfs_writeverf *v1, 204 - const struct nfs_writeverf *v2) 205 - { 206 - return nfs_write_verifier_cmp(&v1->verifier, &v2->verifier); 207 - } 208 - 209 - /* 210 - * nfs_direct_cmp_hdr_verf - compare verifier for pgio header 211 - * @dreq - direct request possibly spanning multiple servers 212 - * @hdr - pageio header to validate against previously seen verf 213 - * 214 - * set the server's "seen" verf if not initialized. 215 - * returns result of comparison between @hdr->verf and the "seen" 216 - * verf of the server used by @hdr (DS or MDS) 217 - */ 218 - static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq, 219 - struct nfs_pgio_header *hdr) 220 - { 221 - struct nfs_writeverf *verfp; 222 - 223 - verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx); 224 - if (verfp->committed < 0) { 225 - nfs_direct_set_hdr_verf(dreq, hdr); 226 - return 0; 227 - } 228 - return nfs_direct_cmp_verf(verfp, &hdr->verf); 229 - } 230 - 231 - /* 232 - * nfs_direct_cmp_commit_data_verf - compare verifier for commit data 233 - * @dreq - direct request possibly spanning multiple servers 234 - * @data - commit data to validate against previously seen verf 235 - * 236 - * returns result of comparison between @data->verf and the verf of 237 - * the server used by @data (DS or MDS) 238 - */ 239 - static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq, 240 - struct nfs_commit_data *data) 241 - { 242 - struct nfs_writeverf *verfp; 243 - 244 - verfp = nfs_direct_select_verf(dreq, data->ds_clp, 245 - data->ds_commit_index); 246 - 247 - /* verifier not set so always fail */ 248 - if (verfp->committed < 0 || data->res.verf->committed <= NFS_UNSTABLE) 249 - return 1; 250 - 251 - return nfs_direct_cmp_verf(verfp, data->res.verf); 252 - } 253 - 254 154 /** 255 155 * nfs_direct_IO - NFS address space operation for direct I/O 256 156 * @iocb: target I/O control block ··· 205 305 kref_get(&dreq->kref); 206 306 init_completion(&dreq->completion); 207 307 INIT_LIST_HEAD(&dreq->mds_cinfo.list); 208 - dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */ 308 + pnfs_init_ds_commit_info(&dreq->ds_cinfo); 209 309 INIT_WORK(&dreq->work, nfs_direct_write_schedule_work); 210 310 spin_lock_init(&dreq->lock); 211 311 ··· 216 316 { 217 317 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); 218 318 219 - nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo); 319 + pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode); 220 320 if (dreq->l_ctx != NULL) 221 321 nfs_put_lock_context(dreq->l_ctx); 222 322 if (dreq->ctx != NULL) ··· 471 571 l_ctx = nfs_get_lock_context(dreq->ctx); 472 572 if (IS_ERR(l_ctx)) { 473 573 result = PTR_ERR(l_ctx); 574 + nfs_direct_req_release(dreq); 474 575 goto out_release; 475 576 } 476 577 dreq->l_ctx = l_ctx; ··· 506 605 } 507 606 508 607 static void 608 + nfs_direct_join_group(struct list_head *list, struct inode *inode) 609 + { 610 + struct nfs_page *req, *next; 611 + 612 + list_for_each_entry(req, list, wb_list) { 613 + if (req->wb_head != req || req->wb_this_page == req) 614 + continue; 615 + for (next = req->wb_this_page; 616 + next != req->wb_head; 617 + next = next->wb_this_page) { 618 + nfs_list_remove_request(next); 619 + nfs_release_request(next); 620 + } 621 + nfs_join_page_group(req, inode); 622 + } 623 + } 624 + 625 + static void 509 626 nfs_direct_write_scan_commit_list(struct inode *inode, 510 627 struct list_head *list, 511 628 struct nfs_commit_info *cinfo) 512 629 { 513 630 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 514 - #ifdef CONFIG_NFS_V4_1 515 - if (cinfo->ds != NULL && cinfo->ds->nwritten != 0) 516 - NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo); 517 - #endif 631 + pnfs_recover_commit_reqs(list, cinfo); 518 632 nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0); 519 633 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 520 634 } ··· 545 629 nfs_init_cinfo_from_dreq(&cinfo, dreq); 546 630 nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); 547 631 632 + nfs_direct_join_group(&reqs, dreq->inode); 633 + 548 634 dreq->count = 0; 549 635 dreq->max_count = 0; 550 636 list_for_each_entry(req, &reqs, wb_list) 551 637 dreq->max_count += req->wb_bytes; 552 - dreq->verf.committed = NFS_INVALID_STABLE_HOW; 553 638 nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo); 554 639 get_dreq(dreq); 555 640 ··· 587 670 588 671 static void nfs_direct_commit_complete(struct nfs_commit_data *data) 589 672 { 673 + const struct nfs_writeverf *verf = data->res.verf; 590 674 struct nfs_direct_req *dreq = data->dreq; 591 675 struct nfs_commit_info cinfo; 592 676 struct nfs_page *req; 593 677 int status = data->task.tk_status; 594 678 679 + if (status < 0) { 680 + /* Errors in commit are fatal */ 681 + dreq->error = status; 682 + dreq->max_count = 0; 683 + dreq->count = 0; 684 + dreq->flags = NFS_ODIRECT_DONE; 685 + } else if (dreq->flags == NFS_ODIRECT_DONE) 686 + status = dreq->error; 687 + 595 688 nfs_init_cinfo_from_dreq(&cinfo, dreq); 596 - if (status < 0 || nfs_direct_cmp_commit_data_verf(dreq, data)) 597 - dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 598 689 599 690 while (!list_empty(&data->pages)) { 600 691 req = nfs_list_entry(data->pages.next); 601 692 nfs_list_remove_request(req); 602 - if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) { 693 + if (status >= 0 && !nfs_write_match_verf(verf, req)) { 694 + dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 603 695 /* 604 696 * Despite the reboot, the write was successful, 605 697 * so reset wb_nio. 606 698 */ 607 699 req->wb_nio = 0; 608 - /* Note the rewrite will go through mds */ 609 700 nfs_mark_request_commit(req, NULL, &cinfo, 0); 610 - } else 701 + } else /* Error or match */ 611 702 nfs_release_request(req); 612 703 nfs_unlock_and_release_request(req); 613 704 } ··· 630 705 struct nfs_direct_req *dreq = cinfo->dreq; 631 706 632 707 spin_lock(&dreq->lock); 633 - dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 708 + if (dreq->flags != NFS_ODIRECT_DONE) 709 + dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 634 710 spin_unlock(&dreq->lock); 635 711 nfs_mark_request_commit(req, NULL, cinfo, 0); 636 712 } ··· 654 728 nfs_direct_write_reschedule(dreq); 655 729 } 656 730 731 + static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq) 732 + { 733 + struct nfs_commit_info cinfo; 734 + struct nfs_page *req; 735 + LIST_HEAD(reqs); 736 + 737 + nfs_init_cinfo_from_dreq(&cinfo, dreq); 738 + nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); 739 + 740 + while (!list_empty(&reqs)) { 741 + req = nfs_list_entry(reqs.next); 742 + nfs_list_remove_request(req); 743 + nfs_release_request(req); 744 + nfs_unlock_and_release_request(req); 745 + } 746 + } 747 + 657 748 static void nfs_direct_write_schedule_work(struct work_struct *work) 658 749 { 659 750 struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work); ··· 685 742 nfs_direct_write_reschedule(dreq); 686 743 break; 687 744 default: 745 + nfs_direct_write_clear_reqs(dreq); 688 746 nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping); 689 747 nfs_direct_complete(dreq); 690 748 } ··· 712 768 } 713 769 714 770 nfs_direct_count_bytes(dreq, hdr); 715 - if (hdr->good_bytes != 0) { 716 - if (nfs_write_need_commit(hdr)) { 717 - if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) 718 - request_commit = true; 719 - else if (dreq->flags == 0) { 720 - nfs_direct_set_hdr_verf(dreq, hdr); 721 - request_commit = true; 722 - dreq->flags = NFS_ODIRECT_DO_COMMIT; 723 - } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { 724 - request_commit = true; 725 - if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr)) 726 - dreq->flags = 727 - NFS_ODIRECT_RESCHED_WRITES; 728 - } 771 + if (hdr->good_bytes != 0 && nfs_write_need_commit(hdr)) { 772 + switch (dreq->flags) { 773 + case 0: 774 + dreq->flags = NFS_ODIRECT_DO_COMMIT; 775 + request_commit = true; 776 + break; 777 + case NFS_ODIRECT_RESCHED_WRITES: 778 + case NFS_ODIRECT_DO_COMMIT: 779 + request_commit = true; 729 780 } 730 781 } 731 782 spin_unlock(&dreq->lock); ··· 929 990 l_ctx = nfs_get_lock_context(dreq->ctx); 930 991 if (IS_ERR(l_ctx)) { 931 992 result = PTR_ERR(l_ctx); 993 + nfs_direct_req_release(dreq); 932 994 goto out_release; 933 995 } 934 996 dreq->l_ctx = l_ctx; 935 997 if (!is_sync_kiocb(iocb)) 936 998 dreq->iocb = iocb; 999 + pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode); 937 1000 938 1001 nfs_start_io_direct(inode); 939 1002
+53 -112
fs/nfs/filelayout/filelayout.c
··· 49 49 MODULE_DESCRIPTION("The NFSv4 file layout driver"); 50 50 51 51 #define FILELAYOUT_POLL_RETRY_MAX (15*HZ) 52 + static const struct pnfs_commit_ops filelayout_commit_ops; 52 53 53 54 static loff_t 54 55 filelayout_get_dense_offset(struct nfs4_filelayout_segment *flseg, ··· 751 750 /* This assumes a single RW lseg */ 752 751 if (lseg->pls_range.iomode == IOMODE_RW) { 753 752 struct nfs4_filelayout *flo; 753 + struct inode *inode; 754 754 755 755 flo = FILELAYOUT_FROM_HDR(lseg->pls_layout); 756 - flo->commit_info.nbuckets = 0; 757 - kfree(flo->commit_info.buckets); 758 - flo->commit_info.buckets = NULL; 756 + inode = flo->generic_hdr.plh_inode; 757 + spin_lock(&inode->i_lock); 758 + pnfs_generic_ds_cinfo_release_lseg(&flo->commit_info, lseg); 759 + spin_unlock(&inode->i_lock); 759 760 } 760 761 _filelayout_free_lseg(fl); 761 - } 762 - 763 - static int 764 - filelayout_alloc_commit_info(struct pnfs_layout_segment *lseg, 765 - struct nfs_commit_info *cinfo, 766 - gfp_t gfp_flags) 767 - { 768 - struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); 769 - struct pnfs_commit_bucket *buckets; 770 - int size, i; 771 - 772 - if (fl->commit_through_mds) 773 - return 0; 774 - 775 - size = (fl->stripe_type == STRIPE_SPARSE) ? 776 - fl->dsaddr->ds_num : fl->dsaddr->stripe_count; 777 - 778 - if (cinfo->ds->nbuckets >= size) { 779 - /* This assumes there is only one IOMODE_RW lseg. What 780 - * we really want to do is have a layout_hdr level 781 - * dictionary of <multipath_list4, fh> keys, each 782 - * associated with a struct list_head, populated by calls 783 - * to filelayout_write_pagelist(). 784 - * */ 785 - return 0; 786 - } 787 - 788 - buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket), 789 - gfp_flags); 790 - if (!buckets) 791 - return -ENOMEM; 792 - for (i = 0; i < size; i++) { 793 - INIT_LIST_HEAD(&buckets[i].written); 794 - INIT_LIST_HEAD(&buckets[i].committing); 795 - /* mark direct verifier as unset */ 796 - buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW; 797 - } 798 - 799 - spin_lock(&cinfo->inode->i_lock); 800 - if (cinfo->ds->nbuckets >= size) 801 - goto out; 802 - for (i = 0; i < cinfo->ds->nbuckets; i++) { 803 - list_splice(&cinfo->ds->buckets[i].written, 804 - &buckets[i].written); 805 - list_splice(&cinfo->ds->buckets[i].committing, 806 - &buckets[i].committing); 807 - buckets[i].direct_verf.committed = 808 - cinfo->ds->buckets[i].direct_verf.committed; 809 - buckets[i].wlseg = cinfo->ds->buckets[i].wlseg; 810 - buckets[i].clseg = cinfo->ds->buckets[i].clseg; 811 - } 812 - swap(cinfo->ds->buckets, buckets); 813 - cinfo->ds->nbuckets = size; 814 - out: 815 - spin_unlock(&cinfo->inode->i_lock); 816 - kfree(buckets); 817 - return 0; 818 762 } 819 763 820 764 static struct pnfs_layout_segment * ··· 884 938 filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio, 885 939 struct nfs_page *req) 886 940 { 887 - struct nfs_commit_info cinfo; 888 - int status; 889 - 890 941 pnfs_generic_pg_check_layout(pgio); 891 942 if (!pgio->pg_lseg) { 892 943 pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode, ··· 902 959 903 960 /* If no lseg, fall back to write through mds */ 904 961 if (pgio->pg_lseg == NULL) 905 - goto out_mds; 906 - nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq); 907 - status = filelayout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS); 908 - if (status < 0) { 909 - pnfs_put_lseg(pgio->pg_lseg); 910 - pgio->pg_lseg = NULL; 911 - goto out_mds; 912 - } 913 - return; 914 - out_mds: 915 - nfs_pageio_reset_write_mds(pgio); 962 + nfs_pageio_reset_write_mds(pgio); 916 963 } 917 964 918 965 static const struct nfs_pageio_ops filelayout_pg_read_ops = { ··· 1011 1078 return -EAGAIN; 1012 1079 } 1013 1080 1014 - /* filelayout_search_commit_reqs - Search lists in @cinfo for the head reqest 1015 - * for @page 1016 - * @cinfo - commit info for current inode 1017 - * @page - page to search for matching head request 1018 - * 1019 - * Returns a the head request if one is found, otherwise returns NULL. 1020 - */ 1021 - static struct nfs_page * 1022 - filelayout_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page) 1023 - { 1024 - struct nfs_page *freq, *t; 1025 - struct pnfs_commit_bucket *b; 1026 - int i; 1027 - 1028 - /* Linearly search the commit lists for each bucket until a matching 1029 - * request is found */ 1030 - for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) { 1031 - list_for_each_entry_safe(freq, t, &b->written, wb_list) { 1032 - if (freq->wb_page == page) 1033 - return freq->wb_head; 1034 - } 1035 - list_for_each_entry_safe(freq, t, &b->committing, wb_list) { 1036 - if (freq->wb_page == page) 1037 - return freq->wb_head; 1038 - } 1039 - } 1040 - 1041 - return NULL; 1042 - } 1043 - 1044 1081 static int 1045 1082 filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, 1046 1083 int how, struct nfs_commit_info *cinfo) ··· 1043 1140 struct nfs4_filelayout *flo; 1044 1141 1045 1142 flo = kzalloc(sizeof(*flo), gfp_flags); 1046 - return flo != NULL ? &flo->generic_hdr : NULL; 1143 + if (flo == NULL) 1144 + return NULL; 1145 + pnfs_init_ds_commit_info(&flo->commit_info); 1146 + flo->commit_info.ops = &filelayout_commit_ops; 1147 + return &flo->generic_hdr; 1047 1148 } 1048 1149 1049 1150 static void 1050 1151 filelayout_free_layout_hdr(struct pnfs_layout_hdr *lo) 1051 1152 { 1052 - kfree(FILELAYOUT_FROM_HDR(lo)); 1153 + kfree_rcu(FILELAYOUT_FROM_HDR(lo), generic_hdr.plh_rcu); 1053 1154 } 1054 1155 1055 1156 static struct pnfs_ds_commit_info * ··· 1066 1159 else 1067 1160 return &FILELAYOUT_FROM_HDR(layout)->commit_info; 1068 1161 } 1162 + 1163 + static void 1164 + filelayout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo, 1165 + struct pnfs_layout_segment *lseg) 1166 + { 1167 + struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); 1168 + struct inode *inode = lseg->pls_layout->plh_inode; 1169 + struct pnfs_commit_array *array, *new; 1170 + unsigned int size = (fl->stripe_type == STRIPE_SPARSE) ? 1171 + fl->dsaddr->ds_num : fl->dsaddr->stripe_count; 1172 + 1173 + new = pnfs_alloc_commit_array(size, GFP_NOIO); 1174 + if (new) { 1175 + spin_lock(&inode->i_lock); 1176 + array = pnfs_add_commit_array(fl_cinfo, new, lseg); 1177 + spin_unlock(&inode->i_lock); 1178 + if (array != new) 1179 + pnfs_free_commit_array(new); 1180 + } 1181 + } 1182 + 1183 + static void 1184 + filelayout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo, 1185 + struct inode *inode) 1186 + { 1187 + spin_lock(&inode->i_lock); 1188 + pnfs_generic_ds_cinfo_destroy(fl_cinfo); 1189 + spin_unlock(&inode->i_lock); 1190 + } 1191 + 1192 + static const struct pnfs_commit_ops filelayout_commit_ops = { 1193 + .setup_ds_info = filelayout_setup_ds_info, 1194 + .release_ds_info = filelayout_release_ds_info, 1195 + .mark_request_commit = filelayout_mark_request_commit, 1196 + .clear_request_commit = pnfs_generic_clear_request_commit, 1197 + .scan_commit_lists = pnfs_generic_scan_commit_lists, 1198 + .recover_commit_reqs = pnfs_generic_recover_commit_reqs, 1199 + .search_commit_reqs = pnfs_generic_search_commit_reqs, 1200 + .commit_pagelist = filelayout_commit_pagelist, 1201 + }; 1069 1202 1070 1203 static struct pnfs_layoutdriver_type filelayout_type = { 1071 1204 .id = LAYOUT_NFSV4_1_FILES, ··· 1120 1173 .pg_read_ops = &filelayout_pg_read_ops, 1121 1174 .pg_write_ops = &filelayout_pg_write_ops, 1122 1175 .get_ds_info = &filelayout_get_ds_info, 1123 - .mark_request_commit = filelayout_mark_request_commit, 1124 - .clear_request_commit = pnfs_generic_clear_request_commit, 1125 - .scan_commit_lists = pnfs_generic_scan_commit_lists, 1126 - .recover_commit_reqs = pnfs_generic_recover_commit_reqs, 1127 - .search_commit_reqs = filelayout_search_commit_reqs, 1128 - .commit_pagelist = filelayout_commit_pagelist, 1129 1176 .read_pagelist = filelayout_read_pagelist, 1130 1177 .write_pagelist = filelayout_write_pagelist, 1131 1178 .alloc_deviceid_node = filelayout_alloc_deviceid_node,
+94 -137
fs/nfs/flexfilelayout/flexfilelayout.c
··· 32 32 33 33 static unsigned short io_maxretrans; 34 34 35 + static const struct pnfs_commit_ops ff_layout_commit_ops; 35 36 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task, 36 37 struct nfs_pgio_header *hdr); 37 38 static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, ··· 49 48 50 49 ffl = kzalloc(sizeof(*ffl), gfp_flags); 51 50 if (ffl) { 51 + pnfs_init_ds_commit_info(&ffl->commit_info); 52 52 INIT_LIST_HEAD(&ffl->error_list); 53 53 INIT_LIST_HEAD(&ffl->mirrors); 54 54 ffl->last_report_time = ktime_get(); 55 + ffl->commit_info.ops = &ff_layout_commit_ops; 55 56 return &ffl->generic_hdr; 56 57 } else 57 58 return NULL; ··· 62 59 static void 63 60 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo) 64 61 { 62 + struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo); 65 63 struct nfs4_ff_layout_ds_err *err, *n; 66 64 67 - list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list, 68 - list) { 65 + list_for_each_entry_safe(err, n, &ffl->error_list, list) { 69 66 list_del(&err->list); 70 67 kfree(err); 71 68 } 72 - kfree(FF_LAYOUT_FROM_HDR(lo)); 69 + kfree_rcu(ffl, generic_hdr.plh_rcu); 73 70 } 74 71 75 72 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) ··· 251 248 252 249 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls) 253 250 { 254 - int i; 251 + u32 i; 255 252 256 - if (fls->mirror_array) { 257 - for (i = 0; i < fls->mirror_array_cnt; i++) { 258 - /* normally mirror_ds is freed in 259 - * .free_deviceid_node but we still do it here 260 - * for .alloc_lseg error path */ 261 - ff_layout_put_mirror(fls->mirror_array[i]); 262 - } 263 - kfree(fls->mirror_array); 264 - fls->mirror_array = NULL; 265 - } 266 - } 267 - 268 - static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr) 269 - { 270 - int ret = 0; 271 - 272 - dprintk("--> %s\n", __func__); 273 - 274 - /* FIXME: remove this check when layout segment support is added */ 275 - if (lgr->range.offset != 0 || 276 - lgr->range.length != NFS4_MAX_UINT64) { 277 - dprintk("%s Only whole file layouts supported. Use MDS i/o\n", 278 - __func__); 279 - ret = -EINVAL; 280 - } 281 - 282 - dprintk("--> %s returns %d\n", __func__, ret); 283 - return ret; 253 + for (i = 0; i < fls->mirror_array_cnt; i++) 254 + ff_layout_put_mirror(fls->mirror_array[i]); 284 255 } 285 256 286 257 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls) ··· 263 286 ff_layout_free_mirror_array(fls); 264 287 kfree(fls); 265 288 } 289 + } 290 + 291 + static bool 292 + ff_lseg_match_mirrors(struct pnfs_layout_segment *l1, 293 + struct pnfs_layout_segment *l2) 294 + { 295 + const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1); 296 + const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1); 297 + u32 i; 298 + 299 + if (fl1->mirror_array_cnt != fl2->mirror_array_cnt) 300 + return false; 301 + for (i = 0; i < fl1->mirror_array_cnt; i++) { 302 + if (fl1->mirror_array[i] != fl2->mirror_array[i]) 303 + return false; 304 + } 305 + return true; 266 306 } 267 307 268 308 static bool ··· 316 322 new_end = pnfs_calc_offset_end(new->pls_range.offset, 317 323 new->pls_range.length); 318 324 if (new_end < old->pls_range.offset) 325 + return false; 326 + if (!ff_lseg_match_mirrors(new, old)) 319 327 return false; 320 328 321 329 /* Mergeable: copy info from 'old' to 'new' */ ··· 396 400 goto out_err_free; 397 401 398 402 rc = -ENOMEM; 399 - fls = kzalloc(sizeof(*fls), gfp_flags); 403 + fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt), 404 + gfp_flags); 400 405 if (!fls) 401 406 goto out_err_free; 402 407 403 408 fls->mirror_array_cnt = mirror_array_cnt; 404 409 fls->stripe_unit = stripe_unit; 405 - fls->mirror_array = kcalloc(fls->mirror_array_cnt, 406 - sizeof(fls->mirror_array[0]), gfp_flags); 407 - if (fls->mirror_array == NULL) 408 - goto out_err_free; 409 410 410 411 for (i = 0; i < fls->mirror_array_cnt; i++) { 411 412 struct nfs4_ff_layout_mirror *mirror; ··· 538 545 539 546 out_sort_mirrors: 540 547 ff_layout_sort_mirrors(fls); 541 - rc = ff_layout_check_layout(lgr); 542 - if (rc) 543 - goto out_err_free; 544 548 ret = &fls->generic_hdr; 545 549 dprintk("<-- %s (success)\n", __func__); 546 550 out_free_page: ··· 548 558 ret = ERR_PTR(rc); 549 559 dprintk("<-- %s (%d)\n", __func__, rc); 550 560 goto out_free_page; 551 - } 552 - 553 - static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout) 554 - { 555 - struct pnfs_layout_segment *lseg; 556 - 557 - list_for_each_entry(lseg, &layout->plh_segs, pls_list) 558 - if (lseg->pls_range.iomode == IOMODE_RW) 559 - return true; 560 - 561 - return false; 562 561 } 563 562 564 563 static void ··· 564 585 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout); 565 586 inode = ffl->generic_hdr.plh_inode; 566 587 spin_lock(&inode->i_lock); 567 - if (!ff_layout_has_rw_segments(lseg->pls_layout)) { 568 - ffl->commit_info.nbuckets = 0; 569 - kfree(ffl->commit_info.buckets); 570 - ffl->commit_info.buckets = NULL; 571 - } 588 + pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg); 572 589 spin_unlock(&inode->i_lock); 573 590 } 574 591 _ff_layout_free_lseg(fls); 575 - } 576 - 577 - /* Return 1 until we have multiple lsegs support */ 578 - static int 579 - ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls) 580 - { 581 - return 1; 582 592 } 583 593 584 594 static void ··· 714 746 spin_unlock(&mirror->lock); 715 747 } 716 748 717 - static int 718 - ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg, 719 - struct nfs_commit_info *cinfo, 720 - gfp_t gfp_flags) 721 - { 722 - struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); 723 - struct pnfs_commit_bucket *buckets; 724 - int size; 725 - 726 - if (cinfo->ds->nbuckets != 0) { 727 - /* This assumes there is only one RW lseg per file. 728 - * To support multiple lseg per file, we need to 729 - * change struct pnfs_commit_bucket to allow dynamic 730 - * increasing nbuckets. 731 - */ 732 - return 0; 733 - } 734 - 735 - size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg); 736 - 737 - buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket), 738 - gfp_flags); 739 - if (!buckets) 740 - return -ENOMEM; 741 - else { 742 - int i; 743 - 744 - spin_lock(&cinfo->inode->i_lock); 745 - if (cinfo->ds->nbuckets != 0) 746 - kfree(buckets); 747 - else { 748 - cinfo->ds->buckets = buckets; 749 - cinfo->ds->nbuckets = size; 750 - for (i = 0; i < size; i++) { 751 - INIT_LIST_HEAD(&buckets[i].written); 752 - INIT_LIST_HEAD(&buckets[i].committing); 753 - /* mark direct verifier as unset */ 754 - buckets[i].direct_verf.committed = 755 - NFS_INVALID_STABLE_HOW; 756 - } 757 - } 758 - spin_unlock(&cinfo->inode->i_lock); 759 - return 0; 760 - } 761 - } 762 - 763 749 static void 764 750 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, int idx) 765 751 { ··· 798 876 pnfs_put_lseg(pgio->pg_lseg); 799 877 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 800 878 nfs_req_openctx(req), 801 - 0, 802 - NFS4_MAX_UINT64, 879 + req_offset(req), 880 + req->wb_bytes, 803 881 IOMODE_READ, 804 882 strict_iomode, 805 883 GFP_KERNEL); ··· 807 885 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 808 886 pgio->pg_lseg = NULL; 809 887 } 888 + } 889 + 890 + static void 891 + ff_layout_pg_check_layout(struct nfs_pageio_descriptor *pgio, 892 + struct nfs_page *req) 893 + { 894 + pnfs_generic_pg_check_layout(pgio); 895 + pnfs_generic_pg_check_range(pgio, req); 810 896 } 811 897 812 898 static void ··· 827 897 int ds_idx; 828 898 829 899 retry: 830 - pnfs_generic_pg_check_layout(pgio); 900 + ff_layout_pg_check_layout(pgio, req); 831 901 /* Use full layout for now */ 832 902 if (!pgio->pg_lseg) { 833 903 ff_layout_pg_get_read(pgio, req, false); ··· 883 953 { 884 954 struct nfs4_ff_layout_mirror *mirror; 885 955 struct nfs_pgio_mirror *pgm; 886 - struct nfs_commit_info cinfo; 887 956 struct nfs4_pnfs_ds *ds; 888 957 int i; 889 - int status; 890 958 891 959 retry: 892 - pnfs_generic_pg_check_layout(pgio); 960 + ff_layout_pg_check_layout(pgio, req); 893 961 if (!pgio->pg_lseg) { 894 962 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 895 963 nfs_req_openctx(req), 896 - 0, 897 - NFS4_MAX_UINT64, 964 + req_offset(req), 965 + req->wb_bytes, 898 966 IOMODE_RW, 899 967 false, 900 968 GFP_NOFS); ··· 904 976 } 905 977 /* If no lseg, fall back to write through mds */ 906 978 if (pgio->pg_lseg == NULL) 907 - goto out_mds; 908 - 909 - nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq); 910 - status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS); 911 - if (status < 0) 912 979 goto out_mds; 913 980 914 981 /* Use a direct mapping of ds_idx to pgio mirror_idx */ ··· 1220 1297 } 1221 1298 } 1222 1299 1223 - switch (status) { 1224 - case NFS4ERR_DELAY: 1225 - case NFS4ERR_GRACE: 1226 - return; 1227 - default: 1228 - break; 1229 - } 1230 - 1231 1300 mirror = FF_LAYOUT_COMP(lseg, idx); 1232 1301 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), 1233 1302 mirror, offset, length, status, opnum, 1234 1303 GFP_NOIO); 1235 - if (status == NFS4ERR_NXIO) 1304 + 1305 + switch (status) { 1306 + case NFS4ERR_DELAY: 1307 + case NFS4ERR_GRACE: 1308 + break; 1309 + case NFS4ERR_NXIO: 1236 1310 ff_layout_mark_ds_unreachable(lseg, idx); 1237 - pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg); 1311 + /* Fallthrough */ 1312 + default: 1313 + pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, 1314 + lseg); 1315 + } 1316 + 1238 1317 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status); 1239 1318 } 1240 1319 ··· 1937 2012 } 1938 2013 1939 2014 static void 2015 + ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo, 2016 + struct pnfs_layout_segment *lseg) 2017 + { 2018 + struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg); 2019 + struct inode *inode = lseg->pls_layout->plh_inode; 2020 + struct pnfs_commit_array *array, *new; 2021 + 2022 + new = pnfs_alloc_commit_array(flseg->mirror_array_cnt, GFP_NOIO); 2023 + if (new) { 2024 + spin_lock(&inode->i_lock); 2025 + array = pnfs_add_commit_array(fl_cinfo, new, lseg); 2026 + spin_unlock(&inode->i_lock); 2027 + if (array != new) 2028 + pnfs_free_commit_array(new); 2029 + } 2030 + } 2031 + 2032 + static void 2033 + ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo, 2034 + struct inode *inode) 2035 + { 2036 + spin_lock(&inode->i_lock); 2037 + pnfs_generic_ds_cinfo_destroy(fl_cinfo); 2038 + spin_unlock(&inode->i_lock); 2039 + } 2040 + 2041 + static void 1940 2042 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d) 1941 2043 { 1942 2044 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds, ··· 2448 2496 return 0; 2449 2497 } 2450 2498 2499 + static const struct pnfs_commit_ops ff_layout_commit_ops = { 2500 + .setup_ds_info = ff_layout_setup_ds_info, 2501 + .release_ds_info = ff_layout_release_ds_info, 2502 + .mark_request_commit = pnfs_layout_mark_request_commit, 2503 + .clear_request_commit = pnfs_generic_clear_request_commit, 2504 + .scan_commit_lists = pnfs_generic_scan_commit_lists, 2505 + .recover_commit_reqs = pnfs_generic_recover_commit_reqs, 2506 + .commit_pagelist = ff_layout_commit_pagelist, 2507 + }; 2508 + 2451 2509 static struct pnfs_layoutdriver_type flexfilelayout_type = { 2452 2510 .id = LAYOUT_FLEX_FILES, 2453 2511 .name = "LAYOUT_FLEX_FILES", ··· 2474 2512 .pg_write_ops = &ff_layout_pg_write_ops, 2475 2513 .get_ds_info = ff_layout_get_ds_info, 2476 2514 .free_deviceid_node = ff_layout_free_deviceid_node, 2477 - .mark_request_commit = pnfs_layout_mark_request_commit, 2478 - .clear_request_commit = pnfs_generic_clear_request_commit, 2479 - .scan_commit_lists = pnfs_generic_scan_commit_lists, 2480 - .recover_commit_reqs = pnfs_generic_recover_commit_reqs, 2481 - .commit_pagelist = ff_layout_commit_pagelist, 2482 2515 .read_pagelist = ff_layout_read_pagelist, 2483 2516 .write_pagelist = ff_layout_write_pagelist, 2484 2517 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
+1 -1
fs/nfs/flexfilelayout/flexfilelayout.h
··· 99 99 u64 stripe_unit; 100 100 u32 flags; 101 101 u32 mirror_array_cnt; 102 - struct nfs4_ff_layout_mirror **mirror_array; 102 + struct nfs4_ff_layout_mirror *mirror_array[]; 103 103 }; 104 104 105 105 struct nfs4_flexfile_layout {
+6 -3
fs/nfs/fs_context.c
··· 190 190 { "4.0", Opt_vers_4_0 }, 191 191 { "4.1", Opt_vers_4_1 }, 192 192 { "4.2", Opt_vers_4_2 }, 193 + {} 193 194 }; 194 195 195 196 enum { ··· 203 202 nr__Opt_xprt 204 203 }; 205 204 206 - static const struct constant_table nfs_xprt_protocol_tokens[nr__Opt_xprt] = { 205 + static const struct constant_table nfs_xprt_protocol_tokens[] = { 207 206 { "rdma", Opt_xprt_rdma }, 208 207 { "rdma6", Opt_xprt_rdma6 }, 209 208 { "tcp", Opt_xprt_tcp }, 210 209 { "tcp6", Opt_xprt_tcp6 }, 211 210 { "udp", Opt_xprt_udp }, 212 211 { "udp6", Opt_xprt_udp6 }, 212 + {} 213 213 }; 214 214 215 215 enum { ··· 241 239 { "spkm3i", Opt_sec_spkmi }, 242 240 { "spkm3p", Opt_sec_spkmp }, 243 241 { "sys", Opt_sec_sys }, 242 + {} 244 243 }; 245 244 246 245 /* ··· 1138 1135 return nfs_invalf(fc, "NFS4: mount program didn't pass remote address"); 1139 1136 1140 1137 out_invalid_transport_udp: 1141 - return nfs_invalf(fc, "NFSv4: Unsupported transport protocol udp"); 1138 + return nfs_invalf(fc, "NFS: Unsupported transport protocol udp"); 1142 1139 } 1143 1140 #endif 1144 1141 ··· 1260 1257 nfs_errorf(fc, "NFS: NFSv4 is not compiled into kernel"); 1261 1258 return -EPROTONOSUPPORT; 1262 1259 out_invalid_transport_udp: 1263 - return nfs_invalf(fc, "NFSv4: Unsupported transport protocol udp"); 1260 + return nfs_invalf(fc, "NFS: Unsupported transport protocol udp"); 1264 1261 out_no_address: 1265 1262 return nfs_invalf(fc, "NFS: mount program didn't pass remote address"); 1266 1263 out_mountproto_mismatch:
+18 -10
fs/nfs/inode.c
··· 62 62 /* Default is to see 64-bit inode numbers */ 63 63 static bool enable_ino64 = NFS_64_BIT_INODE_NUMBERS_ENABLED; 64 64 65 - static void nfs_invalidate_inode(struct inode *); 66 65 static int nfs_update_inode(struct inode *, struct nfs_fattr *); 67 66 68 67 static struct kmem_cache * nfs_inode_cachep; ··· 283 284 * Invalidate, but do not unhash, the inode. 284 285 * NB: must be called with inode->i_lock held! 285 286 */ 286 - static void nfs_invalidate_inode(struct inode *inode) 287 + static void nfs_set_inode_stale_locked(struct inode *inode) 287 288 { 288 289 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags); 289 290 nfs_zap_caches_locked(inode); 291 + trace_nfs_set_inode_stale(inode); 292 + } 293 + 294 + void nfs_set_inode_stale(struct inode *inode) 295 + { 296 + spin_lock(&inode->i_lock); 297 + nfs_set_inode_stale_locked(inode); 298 + spin_unlock(&inode->i_lock); 290 299 } 291 300 292 301 struct nfs_find_desc { ··· 966 959 struct file *filp) 967 960 { 968 961 struct nfs_open_context *ctx; 969 - const struct cred *cred = get_current_cred(); 970 962 971 963 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 972 - if (!ctx) { 973 - put_cred(cred); 964 + if (!ctx) 974 965 return ERR_PTR(-ENOMEM); 975 - } 976 966 nfs_sb_active(dentry->d_sb); 977 967 ctx->dentry = dget(dentry); 978 - ctx->cred = cred; 968 + if (filp) 969 + ctx->cred = get_cred(filp->f_cred); 970 + else 971 + ctx->cred = get_current_cred(); 979 972 ctx->ll_cred = NULL; 980 973 ctx->state = NULL; 981 974 ctx->mode = f_mode; ··· 1170 1163 status = 0; 1171 1164 break; 1172 1165 case -ESTALE: 1173 - nfs_zap_caches(inode); 1174 1166 if (!S_ISDIR(inode->i_mode)) 1175 - set_bit(NFS_INO_STALE, &NFS_I(inode)->flags); 1167 + nfs_set_inode_stale(inode); 1168 + else 1169 + nfs_zap_caches(inode); 1176 1170 } 1177 1171 goto err_out; 1178 1172 } ··· 2072 2064 * lookup validation will know that the inode is bad. 2073 2065 * (But we fall through to invalidate the caches.) 2074 2066 */ 2075 - nfs_invalidate_inode(inode); 2067 + nfs_set_inode_stale_locked(inode); 2076 2068 return -ESTALE; 2077 2069 } 2078 2070
+26 -10
fs/nfs/internal.h
··· 274 274 struct nfs_pgio_mirror * 275 275 nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc); 276 276 277 - static inline bool nfs_pgio_has_mirroring(struct nfs_pageio_descriptor *desc) 278 - { 279 - WARN_ON_ONCE(desc->pg_mirror_count < 1); 280 - return desc->pg_mirror_count > 1; 281 - } 282 - 283 277 static inline bool nfs_match_open_context(const struct nfs_open_context *ctx1, 284 278 const struct nfs_open_context *ctx2) 285 279 { ··· 411 417 extern void __exit unregister_nfs_fs(void); 412 418 extern bool nfs_sb_active(struct super_block *sb); 413 419 extern void nfs_sb_deactive(struct super_block *sb); 414 - 420 + extern int nfs_client_for_each_server(struct nfs_client *clp, 421 + int (*fn)(struct nfs_server *, void *), 422 + void *data); 415 423 /* io.c */ 416 424 extern void nfs_start_io_read(struct inode *inode); 417 425 extern void nfs_end_io_read(struct inode *inode); ··· 511 515 loff_t lstart, loff_t lend); 512 516 513 517 #ifdef CONFIG_NFS_V4_1 518 + static inline void 519 + pnfs_bucket_clear_pnfs_ds_commit_verifiers(struct pnfs_commit_bucket *buckets, 520 + unsigned int nbuckets) 521 + { 522 + unsigned int i; 523 + 524 + for (i = 0; i < nbuckets; i++) 525 + buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW; 526 + } 514 527 static inline 515 528 void nfs_clear_pnfs_ds_commit_verifiers(struct pnfs_ds_commit_info *cinfo) 516 529 { 517 - int i; 530 + struct pnfs_commit_array *array; 518 531 519 - for (i = 0; i < cinfo->nbuckets; i++) 520 - cinfo->buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW; 532 + rcu_read_lock(); 533 + list_for_each_entry_rcu(array, &cinfo->commits, cinfo_list) 534 + pnfs_bucket_clear_pnfs_ds_commit_verifiers(array->buckets, 535 + array->nbuckets); 536 + rcu_read_unlock(); 521 537 } 522 538 #else 523 539 static inline ··· 548 540 const struct nfs_write_verifier *v2) 549 541 { 550 542 return memcmp(v1->data, v2->data, sizeof(v1->data)); 543 + } 544 + 545 + static inline bool 546 + nfs_write_match_verf(const struct nfs_writeverf *verf, 547 + struct nfs_page *req) 548 + { 549 + return verf->committed > NFS_UNSTABLE && 550 + !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier); 551 551 } 552 552 553 553 /* unlink.c */
+58 -9
fs/nfs/namespace.c
··· 145 145 struct vfsmount *mnt = ERR_PTR(-ENOMEM); 146 146 struct nfs_server *server = NFS_SERVER(d_inode(path->dentry)); 147 147 struct nfs_client *client = server->nfs_client; 148 + int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout); 148 149 int ret; 149 150 150 151 if (IS_ROOT(path->dentry)) ··· 191 190 if (IS_ERR(mnt)) 192 191 goto out_fc; 193 192 194 - if (nfs_mountpoint_expiry_timeout < 0) 193 + mntget(mnt); /* prevent immediate expiration */ 194 + if (timeout <= 0) 195 195 goto out_fc; 196 196 197 - mntget(mnt); /* prevent immediate expiration */ 198 197 mnt_set_expiry(mnt, &nfs_automount_list); 199 - schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout); 198 + schedule_delayed_work(&nfs_automount_task, timeout); 200 199 201 200 out_fc: 202 201 put_fs_context(fc); ··· 234 233 static void nfs_expire_automounts(struct work_struct *work) 235 234 { 236 235 struct list_head *list = &nfs_automount_list; 236 + int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout); 237 237 238 238 mark_mounts_for_expiry(list); 239 - if (!list_empty(list)) 240 - schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout); 239 + if (!list_empty(list) && timeout > 0) 240 + schedule_delayed_work(&nfs_automount_task, timeout); 241 241 } 242 242 243 243 void nfs_release_automount_timer(void) ··· 249 247 250 248 /** 251 249 * nfs_do_submount - set up mountpoint when crossing a filesystem boundary 252 - * @dentry: parent directory 253 - * @fh: filehandle for new root dentry 254 - * @fattr: attributes for new root inode 255 - * @authflavor: security flavor to use when performing the mount 250 + * @fc: pointer to struct nfs_fs_context 256 251 * 257 252 */ 258 253 int nfs_do_submount(struct fs_context *fc) ··· 311 312 return nfs_do_submount(fc); 312 313 } 313 314 EXPORT_SYMBOL_GPL(nfs_submount); 315 + 316 + static int param_set_nfs_timeout(const char *val, const struct kernel_param *kp) 317 + { 318 + long num; 319 + int ret; 320 + 321 + if (!val) 322 + return -EINVAL; 323 + ret = kstrtol(val, 0, &num); 324 + if (ret) 325 + return -EINVAL; 326 + if (num > 0) { 327 + if (num >= INT_MAX / HZ) 328 + num = INT_MAX; 329 + else 330 + num *= HZ; 331 + *((int *)kp->arg) = num; 332 + if (!list_empty(&nfs_automount_list)) 333 + mod_delayed_work(system_wq, &nfs_automount_task, num); 334 + } else { 335 + *((int *)kp->arg) = -1*HZ; 336 + cancel_delayed_work(&nfs_automount_task); 337 + } 338 + return 0; 339 + } 340 + 341 + static int param_get_nfs_timeout(char *buffer, const struct kernel_param *kp) 342 + { 343 + long num = *((int *)kp->arg); 344 + 345 + if (num > 0) { 346 + if (num >= INT_MAX - (HZ - 1)) 347 + num = INT_MAX / HZ; 348 + else 349 + num = (num + (HZ - 1)) / HZ; 350 + } else 351 + num = -1; 352 + return scnprintf(buffer, PAGE_SIZE, "%li\n", num); 353 + } 354 + 355 + static const struct kernel_param_ops param_ops_nfs_timeout = { 356 + .set = param_set_nfs_timeout, 357 + .get = param_get_nfs_timeout, 358 + }; 359 + #define param_check_nfs_timeout(name, p) __param_check(name, p, int); 360 + 361 + module_param(nfs_mountpoint_expiry_timeout, nfs_timeout, 0644); 362 + MODULE_PARM_DESC(nfs_mountpoint_expiry_timeout, 363 + "Set the NFS automounted mountpoint timeout value (seconds)." 364 + "Values <= 0 turn expiration off.");
+3 -1
fs/nfs/nfs4_fs.h
··· 42 42 NFS4CLNT_LEASE_MOVED, 43 43 NFS4CLNT_DELEGATION_EXPIRED, 44 44 NFS4CLNT_RUN_MANAGER, 45 - NFS4CLNT_DELEGRETURN_RUNNING, 45 + NFS4CLNT_RECALL_RUNNING, 46 + NFS4CLNT_RECALL_ANY_LAYOUT_READ, 47 + NFS4CLNT_RECALL_ANY_LAYOUT_RW, 46 48 }; 47 49 48 50 #define NFS4_RENEW_TIMEOUT 0x01
+3
fs/nfs/nfs4file.c
··· 252 252 if (remap_flags & ~REMAP_FILE_ADVISORY) 253 253 return -EINVAL; 254 254 255 + if (IS_SWAPFILE(dst_inode) || IS_SWAPFILE(src_inode)) 256 + return -ETXTBSY; 257 + 255 258 /* check alignment w.r.t. clone_blksize */ 256 259 ret = -EINVAL; 257 260 if (bs) {
+1 -1
fs/nfs/nfs4namespace.c
··· 354 354 355 355 /** 356 356 * nfs_follow_referral - set up mountpoint when hitting a referral on moved error 357 - * @dentry: parent directory 357 + * @fc: pointer to struct nfs_fs_context 358 358 * @locations: array of NFSv4 server location information 359 359 * 360 360 */
+12 -7
fs/nfs/nfs4proc.c
··· 2346 2346 .callback_ops = &nfs4_open_confirm_ops, 2347 2347 .callback_data = data, 2348 2348 .workqueue = nfsiod_workqueue, 2349 - .flags = RPC_TASK_ASYNC, 2349 + .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2350 2350 }; 2351 2351 int status; 2352 2352 ··· 2511 2511 .callback_ops = &nfs4_open_ops, 2512 2512 .callback_data = data, 2513 2513 .workqueue = nfsiod_workqueue, 2514 - .flags = RPC_TASK_ASYNC, 2514 + .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2515 2515 }; 2516 2516 int status; 2517 2517 ··· 2790 2790 return NFS_OK; 2791 2791 } 2792 2792 2793 + spin_lock(&delegation->lock); 2793 2794 nfs4_stateid_copy(&stateid, &delegation->stateid); 2794 2795 2795 2796 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, 2796 2797 &delegation->flags)) { 2798 + spin_unlock(&delegation->lock); 2797 2799 rcu_read_unlock(); 2798 2800 return NFS_OK; 2799 2801 } 2800 2802 2801 2803 if (delegation->cred) 2802 2804 cred = get_cred(delegation->cred); 2805 + spin_unlock(&delegation->lock); 2803 2806 rcu_read_unlock(); 2804 2807 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2805 2808 trace_nfs4_test_delegation_stateid(state, NULL, status); ··· 3654 3651 .rpc_message = &msg, 3655 3652 .callback_ops = &nfs4_close_ops, 3656 3653 .workqueue = nfsiod_workqueue, 3657 - .flags = RPC_TASK_ASYNC, 3654 + .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 3658 3655 }; 3659 3656 int status = -ENOMEM; 3660 3657 ··· 5547 5544 struct nfs4_cached_acl { 5548 5545 int cached; 5549 5546 size_t len; 5550 - char data[0]; 5547 + char data[]; 5551 5548 }; 5552 5549 5553 5550 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) ··· 6256 6253 /* Fallthrough */ 6257 6254 case -NFS4ERR_BAD_STATEID: 6258 6255 case -NFS4ERR_STALE_STATEID: 6256 + case -ETIMEDOUT: 6259 6257 task->tk_status = 0; 6260 6258 break; 6261 6259 case -NFS4ERR_OLD_STATEID: ··· 6347 6343 .rpc_client = server->client, 6348 6344 .rpc_message = &msg, 6349 6345 .callback_ops = &nfs4_delegreturn_ops, 6350 - .flags = RPC_TASK_ASYNC, 6346 + .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | RPC_TASK_TIMEOUT, 6351 6347 }; 6352 6348 int status = 0; 6353 6349 ··· 6930 6926 .rpc_message = &msg, 6931 6927 .callback_ops = &nfs4_lock_ops, 6932 6928 .workqueue = nfsiod_workqueue, 6933 - .flags = RPC_TASK_ASYNC, 6929 + .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 6934 6930 }; 6935 6931 int ret; 6936 6932 ··· 9174 9170 .rpc_message = &msg, 9175 9171 .callback_ops = &nfs4_layoutget_call_ops, 9176 9172 .callback_data = lgp, 9177 - .flags = RPC_TASK_ASYNC, 9173 + .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 9178 9174 }; 9179 9175 struct pnfs_layout_segment *lseg = NULL; 9180 9176 struct nfs4_exception exception = { ··· 9291 9287 lrp->ld_private.ops->free(&lrp->ld_private); 9292 9288 pnfs_put_layout_hdr(lrp->args.layout); 9293 9289 nfs_iput_and_deactive(lrp->inode); 9290 + put_cred(lrp->cred); 9294 9291 kfree(calldata); 9295 9292 dprintk("<-- %s\n", __func__); 9296 9293 }
+22 -2
fs/nfs/nfs4state.c
··· 2524 2524 } 2525 2525 return 0; 2526 2526 } 2527 + 2528 + static void nfs4_layoutreturn_any_run(struct nfs_client *clp) 2529 + { 2530 + int iomode = 0; 2531 + 2532 + if (test_and_clear_bit(NFS4CLNT_RECALL_ANY_LAYOUT_READ, &clp->cl_state)) 2533 + iomode += IOMODE_READ; 2534 + if (test_and_clear_bit(NFS4CLNT_RECALL_ANY_LAYOUT_RW, &clp->cl_state)) 2535 + iomode += IOMODE_RW; 2536 + /* Note: IOMODE_READ + IOMODE_RW == IOMODE_ANY */ 2537 + if (iomode) { 2538 + pnfs_layout_return_unused_byclid(clp, iomode); 2539 + set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); 2540 + } 2541 + } 2527 2542 #else /* CONFIG_NFS_V4_1 */ 2528 2543 static int nfs4_reset_session(struct nfs_client *clp) { return 0; } 2529 2544 2530 2545 static int nfs4_bind_conn_to_session(struct nfs_client *clp) 2531 2546 { 2532 2547 return 0; 2548 + } 2549 + 2550 + static void nfs4_layoutreturn_any_run(struct nfs_client *clp) 2551 + { 2533 2552 } 2534 2553 #endif /* CONFIG_NFS_V4_1 */ 2535 2554 ··· 2654 2635 nfs4_end_drain_session(clp); 2655 2636 nfs4_clear_state_manager_bit(clp); 2656 2637 2657 - if (!test_and_set_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state)) { 2638 + if (!test_and_set_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state)) { 2658 2639 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) { 2659 2640 nfs_client_return_marked_delegations(clp); 2660 2641 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); 2661 2642 } 2662 - clear_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state); 2643 + nfs4_layoutreturn_any_run(clp); 2644 + clear_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state); 2663 2645 } 2664 2646 2665 2647 /* Did we race with an attempt to give us more work? */
+6 -2
fs/nfs/nfs4trace.h
··· 584 584 TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_MOVED); 585 585 TRACE_DEFINE_ENUM(NFS4CLNT_DELEGATION_EXPIRED); 586 586 TRACE_DEFINE_ENUM(NFS4CLNT_RUN_MANAGER); 587 - TRACE_DEFINE_ENUM(NFS4CLNT_DELEGRETURN_RUNNING); 587 + TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_RUNNING); 588 + TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_READ); 589 + TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_RW); 588 590 589 591 #define show_nfs4_clp_state(state) \ 590 592 __print_flags(state, "|", \ ··· 607 605 { NFS4CLNT_LEASE_MOVED, "LEASE_MOVED" }, \ 608 606 { NFS4CLNT_DELEGATION_EXPIRED, "DELEGATION_EXPIRED" }, \ 609 607 { NFS4CLNT_RUN_MANAGER, "RUN_MANAGER" }, \ 610 - { NFS4CLNT_DELEGRETURN_RUNNING, "DELEGRETURN_RUNNING" }) 608 + { NFS4CLNT_RECALL_RUNNING, "RECALL_RUNNING" }, \ 609 + { NFS4CLNT_RECALL_ANY_LAYOUT_READ, "RECALL_ANY_LAYOUT_READ" }, \ 610 + { NFS4CLNT_RECALL_ANY_LAYOUT_RW, "RECALL_ANY_LAYOUT_RW" }) 611 611 612 612 TRACE_EVENT(nfs4_state_mgr, 613 613 TP_PROTO(
+1 -1
fs/nfs/nfsroot.c
··· 88 88 #define NFS_ROOT "/tftpboot/%s" 89 89 90 90 /* Default NFSROOT mount options. */ 91 - #define NFS_DEF_OPTIONS "vers=2,udp,rsize=4096,wsize=4096" 91 + #define NFS_DEF_OPTIONS "vers=2,tcp,rsize=4096,wsize=4096" 92 92 93 93 /* Parameters passed from the kernel command line */ 94 94 static char nfs_root_parms[NFS_MAXPATHLEN + 1] __initdata = "";
+1
fs/nfs/nfstrace.h
··· 181 181 int error \ 182 182 ), \ 183 183 TP_ARGS(inode, error)) 184 + DEFINE_NFS_INODE_EVENT(nfs_set_inode_stale); 184 185 DEFINE_NFS_INODE_EVENT(nfs_refresh_inode_enter); 185 186 DEFINE_NFS_INODE_EVENT_DONE(nfs_refresh_inode_exit); 186 187 DEFINE_NFS_INODE_EVENT(nfs_revalidate_inode_enter);
+234 -133
fs/nfs/pagelist.c
··· 33 33 struct nfs_pgio_mirror * 34 34 nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc) 35 35 { 36 - return nfs_pgio_has_mirroring(desc) ? 37 - &desc->pg_mirrors[desc->pg_mirror_idx] : 38 - &desc->pg_mirrors[0]; 36 + return &desc->pg_mirrors[desc->pg_mirror_idx]; 39 37 } 40 38 EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror); 41 39 ··· 131 133 EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait); 132 134 133 135 /* 136 + * nfs_page_lock_head_request - page lock the head of the page group 137 + * @req: any member of the page group 138 + */ 139 + struct nfs_page * 140 + nfs_page_group_lock_head(struct nfs_page *req) 141 + { 142 + struct nfs_page *head = req->wb_head; 143 + 144 + while (!nfs_lock_request(head)) { 145 + int ret = nfs_wait_on_request(head); 146 + if (ret < 0) 147 + return ERR_PTR(ret); 148 + } 149 + if (head != req) 150 + kref_get(&head->wb_kref); 151 + return head; 152 + } 153 + 154 + /* 155 + * nfs_unroll_locks - unlock all newly locked reqs and wait on @req 156 + * @head: head request of page group, must be holding head lock 157 + * @req: request that couldn't lock and needs to wait on the req bit lock 158 + * 159 + * This is a helper function for nfs_lock_and_join_requests 160 + * returns 0 on success, < 0 on error. 161 + */ 162 + static void 163 + nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req) 164 + { 165 + struct nfs_page *tmp; 166 + 167 + /* relinquish all the locks successfully grabbed this run */ 168 + for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { 169 + if (!kref_read(&tmp->wb_kref)) 170 + continue; 171 + nfs_unlock_and_release_request(tmp); 172 + } 173 + } 174 + 175 + /* 176 + * nfs_page_group_lock_subreq - try to lock a subrequest 177 + * @head: head request of page group 178 + * @subreq: request to lock 179 + * 180 + * This is a helper function for nfs_lock_and_join_requests which 181 + * must be called with the head request and page group both locked. 182 + * On error, it returns with the page group unlocked. 183 + */ 184 + static int 185 + nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq) 186 + { 187 + int ret; 188 + 189 + if (!kref_get_unless_zero(&subreq->wb_kref)) 190 + return 0; 191 + while (!nfs_lock_request(subreq)) { 192 + nfs_page_group_unlock(head); 193 + ret = nfs_wait_on_request(subreq); 194 + if (!ret) 195 + ret = nfs_page_group_lock(head); 196 + if (ret < 0) { 197 + nfs_unroll_locks(head, subreq); 198 + nfs_release_request(subreq); 199 + return ret; 200 + } 201 + } 202 + return 0; 203 + } 204 + 205 + /* 206 + * nfs_page_group_lock_subrequests - try to lock the subrequests 207 + * @head: head request of page group 208 + * 209 + * This is a helper function for nfs_lock_and_join_requests which 210 + * must be called with the head request locked. 211 + */ 212 + int nfs_page_group_lock_subrequests(struct nfs_page *head) 213 + { 214 + struct nfs_page *subreq; 215 + int ret; 216 + 217 + ret = nfs_page_group_lock(head); 218 + if (ret < 0) 219 + return ret; 220 + /* lock each request in the page group */ 221 + for (subreq = head->wb_this_page; subreq != head; 222 + subreq = subreq->wb_this_page) { 223 + ret = nfs_page_group_lock_subreq(head, subreq); 224 + if (ret < 0) 225 + return ret; 226 + } 227 + nfs_page_group_unlock(head); 228 + return 0; 229 + } 230 + 231 + /* 232 + * nfs_page_set_headlock - set the request PG_HEADLOCK 233 + * @req: request that is to be locked 234 + * 235 + * this lock must be held when modifying req->wb_head 236 + * 237 + * return 0 on success, < 0 on error 238 + */ 239 + int 240 + nfs_page_set_headlock(struct nfs_page *req) 241 + { 242 + if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags)) 243 + return 0; 244 + 245 + set_bit(PG_CONTENDED1, &req->wb_flags); 246 + smp_mb__after_atomic(); 247 + return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK, 248 + TASK_UNINTERRUPTIBLE); 249 + } 250 + 251 + /* 252 + * nfs_page_clear_headlock - clear the request PG_HEADLOCK 253 + * @req: request that is to be locked 254 + */ 255 + void 256 + nfs_page_clear_headlock(struct nfs_page *req) 257 + { 258 + smp_mb__before_atomic(); 259 + clear_bit(PG_HEADLOCK, &req->wb_flags); 260 + smp_mb__after_atomic(); 261 + if (!test_bit(PG_CONTENDED1, &req->wb_flags)) 262 + return; 263 + wake_up_bit(&req->wb_flags, PG_HEADLOCK); 264 + } 265 + 266 + /* 134 267 * nfs_page_group_lock - lock the head of the page group 135 - * @req - request in group that is to be locked 268 + * @req: request in group that is to be locked 136 269 * 137 270 * this lock must be held when traversing or modifying the page 138 271 * group list ··· 273 144 int 274 145 nfs_page_group_lock(struct nfs_page *req) 275 146 { 276 - struct nfs_page *head = req->wb_head; 147 + int ret; 277 148 278 - WARN_ON_ONCE(head != head->wb_head); 279 - 280 - if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags)) 281 - return 0; 282 - 283 - set_bit(PG_CONTENDED1, &head->wb_flags); 284 - smp_mb__after_atomic(); 285 - return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, 286 - TASK_UNINTERRUPTIBLE); 149 + ret = nfs_page_set_headlock(req); 150 + if (ret || req->wb_head == req) 151 + return ret; 152 + return nfs_page_set_headlock(req->wb_head); 287 153 } 288 154 289 155 /* 290 156 * nfs_page_group_unlock - unlock the head of the page group 291 - * @req - request in group that is to be unlocked 157 + * @req: request in group that is to be unlocked 292 158 */ 293 159 void 294 160 nfs_page_group_unlock(struct nfs_page *req) 295 161 { 296 - struct nfs_page *head = req->wb_head; 297 - 298 - WARN_ON_ONCE(head != head->wb_head); 299 - 300 - smp_mb__before_atomic(); 301 - clear_bit(PG_HEADLOCK, &head->wb_flags); 302 - smp_mb__after_atomic(); 303 - if (!test_bit(PG_CONTENDED1, &head->wb_flags)) 304 - return; 305 - wake_up_bit(&head->wb_flags, PG_HEADLOCK); 162 + if (req != req->wb_head) 163 + nfs_page_clear_headlock(req->wb_head); 164 + nfs_page_clear_headlock(req); 306 165 } 307 166 308 167 /* ··· 476 359 } 477 360 478 361 static struct nfs_page * 479 - nfs_create_subreq(struct nfs_page *req, struct nfs_page *last, 480 - unsigned int pgbase, unsigned int offset, 362 + nfs_create_subreq(struct nfs_page *req, 363 + unsigned int pgbase, 364 + unsigned int offset, 481 365 unsigned int count) 482 366 { 367 + struct nfs_page *last; 483 368 struct nfs_page *ret; 484 369 485 370 ret = __nfs_create_request(req->wb_lock_context, req->wb_page, 486 371 pgbase, offset, count); 487 372 if (!IS_ERR(ret)) { 373 + /* find the last request */ 374 + for (last = req->wb_head; 375 + last->wb_this_page != req->wb_head; 376 + last = last->wb_this_page) 377 + ; 378 + 488 379 nfs_lock_request(ret); 489 380 ret->wb_index = req->wb_index; 490 381 nfs_page_group_init(ret, last); ··· 752 627 .callback_ops = call_ops, 753 628 .callback_data = hdr, 754 629 .workqueue = nfsiod_workqueue, 755 - .flags = RPC_TASK_ASYNC | flags, 630 + .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | flags, 756 631 }; 757 - int ret = 0; 758 632 759 633 hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how); 760 634 ··· 765 641 (unsigned long long)hdr->args.offset); 766 642 767 643 task = rpc_run_task(&task_setup_data); 768 - if (IS_ERR(task)) { 769 - ret = PTR_ERR(task); 770 - goto out; 771 - } 772 - if (how & FLUSH_SYNC) { 773 - ret = rpc_wait_for_completion_task(task); 774 - if (ret == 0) 775 - ret = task->tk_status; 776 - } 644 + if (IS_ERR(task)) 645 + return PTR_ERR(task); 777 646 rpc_put_task(task); 778 - out: 779 - return ret; 647 + return 0; 780 648 } 781 649 EXPORT_SYMBOL_GPL(nfs_initiate_pgio); 782 650 ··· 1002 886 pgio->pg_mirror_count = mirror_count; 1003 887 } 1004 888 1005 - /* 1006 - * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1) 1007 - */ 1008 - void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio) 1009 - { 1010 - pgio->pg_mirror_count = 1; 1011 - pgio->pg_mirror_idx = 0; 1012 - } 1013 - 1014 889 static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio) 1015 890 { 1016 891 pgio->pg_mirror_count = 1; ··· 1018 911 } 1019 912 1020 913 /** 1021 - * nfs_can_coalesce_requests - test two requests for compatibility 914 + * nfs_coalesce_size - test two requests for compatibility 1022 915 * @prev: pointer to nfs_page 1023 916 * @req: pointer to nfs_page 1024 917 * @pgio: pointer to nfs_pagio_descriptor ··· 1027 920 * page data area they describe is contiguous, and that their RPC 1028 921 * credentials, NFSv4 open state, and lockowners are the same. 1029 922 * 1030 - * Return 'true' if this is the case, else return 'false'. 923 + * Returns size of the request that can be coalesced 1031 924 */ 1032 - static bool nfs_can_coalesce_requests(struct nfs_page *prev, 925 + static unsigned int nfs_coalesce_size(struct nfs_page *prev, 1033 926 struct nfs_page *req, 1034 927 struct nfs_pageio_descriptor *pgio) 1035 928 { 1036 - size_t size; 1037 929 struct file_lock_context *flctx; 1038 930 1039 931 if (prev) { 1040 932 if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev))) 1041 - return false; 933 + return 0; 1042 934 flctx = d_inode(nfs_req_openctx(req)->dentry)->i_flctx; 1043 935 if (flctx != NULL && 1044 936 !(list_empty_careful(&flctx->flc_posix) && 1045 937 list_empty_careful(&flctx->flc_flock)) && 1046 938 !nfs_match_lock_context(req->wb_lock_context, 1047 939 prev->wb_lock_context)) 1048 - return false; 940 + return 0; 1049 941 if (req_offset(req) != req_offset(prev) + prev->wb_bytes) 1050 - return false; 942 + return 0; 1051 943 if (req->wb_page == prev->wb_page) { 1052 944 if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes) 1053 - return false; 945 + return 0; 1054 946 } else { 1055 947 if (req->wb_pgbase != 0 || 1056 948 prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE) 1057 - return false; 949 + return 0; 1058 950 } 1059 951 } 1060 - size = pgio->pg_ops->pg_test(pgio, prev, req); 1061 - WARN_ON_ONCE(size > req->wb_bytes); 1062 - if (size && size < req->wb_bytes) 1063 - req->wb_bytes = size; 1064 - return size > 0; 952 + return pgio->pg_ops->pg_test(pgio, prev, req); 1065 953 } 1066 954 1067 955 /** ··· 1064 962 * @desc: destination io descriptor 1065 963 * @req: request 1066 964 * 1067 - * Returns true if the request 'req' was successfully coalesced into the 1068 - * existing list of pages 'desc'. 965 + * If the request 'req' was successfully coalesced into the existing list 966 + * of pages 'desc', it returns the size of req. 1069 967 */ 1070 - static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, 1071 - struct nfs_page *req) 968 + static unsigned int 969 + nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, 970 + struct nfs_page *req) 1072 971 { 1073 972 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); 1074 - 1075 973 struct nfs_page *prev = NULL; 974 + unsigned int size; 1076 975 1077 976 if (mirror->pg_count != 0) { 1078 977 prev = nfs_list_entry(mirror->pg_list.prev); ··· 1093 990 return 0; 1094 991 } 1095 992 1096 - if (!nfs_can_coalesce_requests(prev, req, desc)) 1097 - return 0; 993 + size = nfs_coalesce_size(prev, req, desc); 994 + if (size < req->wb_bytes) 995 + return size; 1098 996 nfs_list_move_request(req, &mirror->pg_list); 1099 997 mirror->pg_count += req->wb_bytes; 1100 - return 1; 998 + return req->wb_bytes; 1101 999 } 1102 1000 1103 1001 /* ··· 1138 1034 * @req: request 1139 1035 * 1140 1036 * This may split a request into subrequests which are all part of the 1141 - * same page group. 1037 + * same page group. If so, it will submit @req as the last one, to ensure 1038 + * the pointer to @req is still valid in case of failure. 1142 1039 * 1143 1040 * Returns true if the request 'req' was successfully coalesced into the 1144 1041 * existing list of pages 'desc'. ··· 1148 1043 struct nfs_page *req) 1149 1044 { 1150 1045 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); 1151 - 1152 1046 struct nfs_page *subreq; 1153 - unsigned int bytes_left = 0; 1154 - unsigned int offset, pgbase; 1047 + unsigned int size, subreq_size; 1155 1048 1156 1049 nfs_page_group_lock(req); 1157 1050 1158 1051 subreq = req; 1159 - bytes_left = subreq->wb_bytes; 1160 - offset = subreq->wb_offset; 1161 - pgbase = subreq->wb_pgbase; 1162 - 1163 - do { 1164 - if (!nfs_pageio_do_add_request(desc, subreq)) { 1165 - /* make sure pg_test call(s) did nothing */ 1166 - WARN_ON_ONCE(subreq->wb_bytes != bytes_left); 1167 - WARN_ON_ONCE(subreq->wb_offset != offset); 1168 - WARN_ON_ONCE(subreq->wb_pgbase != pgbase); 1169 - 1052 + subreq_size = subreq->wb_bytes; 1053 + for(;;) { 1054 + size = nfs_pageio_do_add_request(desc, subreq); 1055 + if (size == subreq_size) { 1056 + /* We successfully submitted a request */ 1057 + if (subreq == req) 1058 + break; 1059 + req->wb_pgbase += size; 1060 + req->wb_bytes -= size; 1061 + req->wb_offset += size; 1062 + subreq_size = req->wb_bytes; 1063 + subreq = req; 1064 + continue; 1065 + } 1066 + if (WARN_ON_ONCE(subreq != req)) { 1067 + nfs_page_group_unlock(req); 1068 + nfs_pageio_cleanup_request(desc, subreq); 1069 + subreq = req; 1070 + subreq_size = req->wb_bytes; 1071 + nfs_page_group_lock(req); 1072 + } 1073 + if (!size) { 1074 + /* Can't coalesce any more, so do I/O */ 1170 1075 nfs_page_group_unlock(req); 1171 1076 desc->pg_moreio = 1; 1172 1077 nfs_pageio_doio(desc); 1173 1078 if (desc->pg_error < 0 || mirror->pg_recoalesce) 1174 - goto out_cleanup_subreq; 1079 + return 0; 1175 1080 /* retry add_request for this subreq */ 1176 1081 nfs_page_group_lock(req); 1177 1082 continue; 1178 1083 } 1179 - 1180 - /* check for buggy pg_test call(s) */ 1181 - WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE); 1182 - WARN_ON_ONCE(subreq->wb_bytes > bytes_left); 1183 - WARN_ON_ONCE(subreq->wb_bytes == 0); 1184 - 1185 - bytes_left -= subreq->wb_bytes; 1186 - offset += subreq->wb_bytes; 1187 - pgbase += subreq->wb_bytes; 1188 - 1189 - if (bytes_left) { 1190 - subreq = nfs_create_subreq(req, subreq, pgbase, 1191 - offset, bytes_left); 1192 - if (IS_ERR(subreq)) 1193 - goto err_ptr; 1194 - } 1195 - } while (bytes_left > 0); 1084 + subreq = nfs_create_subreq(req, req->wb_pgbase, 1085 + req->wb_offset, size); 1086 + if (IS_ERR(subreq)) 1087 + goto err_ptr; 1088 + subreq_size = size; 1089 + } 1196 1090 1197 1091 nfs_page_group_unlock(req); 1198 1092 return 1; 1199 1093 err_ptr: 1200 1094 desc->pg_error = PTR_ERR(subreq); 1201 1095 nfs_page_group_unlock(req); 1202 - return 0; 1203 - out_cleanup_subreq: 1204 - if (req != subreq) 1205 - nfs_pageio_cleanup_request(desc, subreq); 1206 1096 return 0; 1207 1097 } 1208 1098 ··· 1267 1167 { 1268 1168 u32 midx; 1269 1169 unsigned int pgbase, offset, bytes; 1270 - struct nfs_page *dupreq, *lastreq; 1170 + struct nfs_page *dupreq; 1271 1171 1272 1172 pgbase = req->wb_pgbase; 1273 1173 offset = req->wb_offset; ··· 1277 1177 if (desc->pg_error < 0) 1278 1178 goto out_failed; 1279 1179 1280 - for (midx = 0; midx < desc->pg_mirror_count; midx++) { 1281 - if (midx) { 1282 - nfs_page_group_lock(req); 1180 + /* Create the mirror instances first, and fire them off */ 1181 + for (midx = 1; midx < desc->pg_mirror_count; midx++) { 1182 + nfs_page_group_lock(req); 1283 1183 1284 - /* find the last request */ 1285 - for (lastreq = req->wb_head; 1286 - lastreq->wb_this_page != req->wb_head; 1287 - lastreq = lastreq->wb_this_page) 1288 - ; 1184 + dupreq = nfs_create_subreq(req, 1185 + pgbase, offset, bytes); 1289 1186 1290 - dupreq = nfs_create_subreq(req, lastreq, 1291 - pgbase, offset, bytes); 1187 + nfs_page_group_unlock(req); 1188 + if (IS_ERR(dupreq)) { 1189 + desc->pg_error = PTR_ERR(dupreq); 1190 + goto out_failed; 1191 + } 1292 1192 1293 - nfs_page_group_unlock(req); 1294 - if (IS_ERR(dupreq)) { 1295 - desc->pg_error = PTR_ERR(dupreq); 1296 - goto out_failed; 1297 - } 1298 - } else 1299 - dupreq = req; 1300 - 1301 - if (nfs_pgio_has_mirroring(desc)) 1302 - desc->pg_mirror_idx = midx; 1193 + desc->pg_mirror_idx = midx; 1303 1194 if (!nfs_pageio_add_request_mirror(desc, dupreq)) 1304 1195 goto out_cleanup_subreq; 1305 1196 } 1306 1197 1198 + desc->pg_mirror_idx = 0; 1199 + if (!nfs_pageio_add_request_mirror(desc, req)) 1200 + goto out_failed; 1201 + 1307 1202 return 1; 1308 1203 1309 1204 out_cleanup_subreq: 1310 - if (req != dupreq) 1311 - nfs_pageio_cleanup_request(desc, dupreq); 1205 + nfs_pageio_cleanup_request(desc, dupreq); 1312 1206 out_failed: 1313 1207 nfs_pageio_error_cleanup(desc); 1314 1208 return 0; ··· 1320 1226 struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx]; 1321 1227 u32 restore_idx = desc->pg_mirror_idx; 1322 1228 1323 - if (nfs_pgio_has_mirroring(desc)) 1324 - desc->pg_mirror_idx = mirror_idx; 1229 + desc->pg_mirror_idx = mirror_idx; 1325 1230 for (;;) { 1326 1231 nfs_pageio_doio(desc); 1327 1232 if (desc->pg_error < 0 || !mirror->pg_recoalesce) ··· 1411 1318 } 1412 1319 } 1413 1320 } 1321 + } 1322 + 1323 + /* 1324 + * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1) 1325 + */ 1326 + void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio) 1327 + { 1328 + nfs_pageio_complete(pgio); 1414 1329 } 1415 1330 1416 1331 int __init nfs_init_nfspagecache(void)
+196 -45
fs/nfs/pnfs.c
··· 268 268 struct nfs_server *server = NFS_SERVER(lo->plh_inode); 269 269 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld; 270 270 271 - if (!list_empty(&lo->plh_layouts)) { 271 + if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) { 272 272 struct nfs_client *clp = server->nfs_client; 273 273 274 274 spin_lock(&clp->cl_lock); 275 - list_del_init(&lo->plh_layouts); 275 + list_del_rcu(&lo->plh_layouts); 276 276 spin_unlock(&clp->cl_lock); 277 277 } 278 278 put_cred(lo->plh_lc_cred); ··· 307 307 spin_unlock(&inode->i_lock); 308 308 pnfs_free_layout_hdr(lo); 309 309 } 310 + } 311 + 312 + static struct inode * 313 + pnfs_grab_inode_layout_hdr(struct pnfs_layout_hdr *lo) 314 + { 315 + struct inode *inode = igrab(lo->plh_inode); 316 + if (inode) 317 + return inode; 318 + set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags); 319 + return NULL; 310 320 } 311 321 312 322 static void ··· 506 496 { 507 497 INIT_LIST_HEAD(&lseg->pls_list); 508 498 INIT_LIST_HEAD(&lseg->pls_lc_list); 499 + INIT_LIST_HEAD(&lseg->pls_commits); 509 500 refcount_set(&lseg->pls_refcount, 1); 510 501 set_bit(NFS_LSEG_VALID, &lseg->pls_flags); 511 502 lseg->pls_layout = lo; ··· 793 782 /* If the sb is being destroyed, just bail */ 794 783 if (!nfs_sb_active(server->super)) 795 784 break; 796 - inode = igrab(lo->plh_inode); 785 + inode = pnfs_grab_inode_layout_hdr(lo); 797 786 if (inode != NULL) { 798 - list_del_init(&lo->plh_layouts); 787 + if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) 788 + list_del_rcu(&lo->plh_layouts); 799 789 if (pnfs_layout_add_bulk_destroy_list(inode, 800 790 layout_list)) 801 791 continue; ··· 806 794 } else { 807 795 rcu_read_unlock(); 808 796 spin_unlock(&clp->cl_lock); 809 - set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags); 810 797 } 811 798 nfs_sb_deactive(server->super); 812 799 spin_lock(&clp->cl_lock); ··· 914 903 pnfs_destroy_layouts_byclid(clp, false); 915 904 } 916 905 906 + static void 907 + pnfs_set_layout_cred(struct pnfs_layout_hdr *lo, const struct cred *cred) 908 + { 909 + const struct cred *old; 910 + 911 + if (cred && cred_fscmp(lo->plh_lc_cred, cred) != 0) { 912 + old = xchg(&lo->plh_lc_cred, get_cred(cred)); 913 + put_cred(old); 914 + } 915 + } 916 + 917 917 /* update lo->plh_stateid with new if is more recent */ 918 918 void 919 919 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new, 920 - bool update_barrier) 920 + const struct cred *cred, bool update_barrier) 921 921 { 922 922 u32 oldseq, newseq, new_barrier = 0; 923 923 ··· 936 914 newseq = be32_to_cpu(new->seqid); 937 915 938 916 if (!pnfs_layout_is_valid(lo)) { 917 + pnfs_set_layout_cred(lo, cred); 939 918 nfs4_stateid_copy(&lo->plh_stateid, new); 940 919 lo->plh_barrier = newseq; 941 920 pnfs_clear_layoutreturn_info(lo); ··· 1084 1061 lgp->args.ctx = get_nfs_open_context(ctx); 1085 1062 nfs4_stateid_copy(&lgp->args.stateid, stateid); 1086 1063 lgp->gfp_flags = gfp_flags; 1087 - lgp->cred = get_cred(ctx->cred); 1064 + lgp->cred = ctx->cred; 1088 1065 return lgp; 1089 1066 } 1090 1067 ··· 1095 1072 nfs4_free_pages(lgp->args.layout.pages, max_pages); 1096 1073 if (lgp->args.inode) 1097 1074 pnfs_put_layout_hdr(NFS_I(lgp->args.inode)->layout); 1098 - put_cred(lgp->cred); 1099 1075 put_nfs_open_context(lgp->args.ctx); 1100 1076 kfree(lgp); 1101 1077 } ··· 1131 1109 1132 1110 pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq); 1133 1111 pnfs_free_returned_lsegs(lo, &freeme, range, seq); 1134 - pnfs_set_layout_stateid(lo, stateid, true); 1112 + pnfs_set_layout_stateid(lo, stateid, NULL, true); 1135 1113 } else 1136 1114 pnfs_mark_layout_stateid_invalid(lo, &freeme); 1137 1115 out_unlock: ··· 1144 1122 static bool 1145 1123 pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo, 1146 1124 nfs4_stateid *stateid, 1125 + const struct cred **cred, 1147 1126 enum pnfs_iomode *iomode) 1148 1127 { 1149 1128 /* Serialise LAYOUTGET/LAYOUTRETURN */ ··· 1155 1132 set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags); 1156 1133 pnfs_get_layout_hdr(lo); 1157 1134 if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) { 1158 - if (stateid != NULL) { 1159 - nfs4_stateid_copy(stateid, &lo->plh_stateid); 1160 - if (lo->plh_return_seq != 0) 1161 - stateid->seqid = cpu_to_be32(lo->plh_return_seq); 1162 - } 1135 + nfs4_stateid_copy(stateid, &lo->plh_stateid); 1136 + *cred = get_cred(lo->plh_lc_cred); 1137 + if (lo->plh_return_seq != 0) 1138 + stateid->seqid = cpu_to_be32(lo->plh_return_seq); 1163 1139 if (iomode != NULL) 1164 1140 *iomode = lo->plh_return_iomode; 1165 1141 pnfs_clear_layoutreturn_info(lo); 1166 1142 return true; 1167 1143 } 1168 - if (stateid != NULL) 1169 - nfs4_stateid_copy(stateid, &lo->plh_stateid); 1144 + nfs4_stateid_copy(stateid, &lo->plh_stateid); 1145 + *cred = get_cred(lo->plh_lc_cred); 1170 1146 if (iomode != NULL) 1171 1147 *iomode = IOMODE_ANY; 1172 1148 return true; ··· 1189 1167 } 1190 1168 1191 1169 static int 1192 - pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid, 1193 - enum pnfs_iomode iomode, bool sync) 1170 + pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, 1171 + const nfs4_stateid *stateid, 1172 + const struct cred **pcred, 1173 + enum pnfs_iomode iomode, 1174 + bool sync) 1194 1175 { 1195 1176 struct inode *ino = lo->plh_inode; 1196 1177 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld; 1197 1178 struct nfs4_layoutreturn *lrp; 1179 + const struct cred *cred = *pcred; 1198 1180 int status = 0; 1199 1181 1182 + *pcred = NULL; 1200 1183 lrp = kzalloc(sizeof(*lrp), GFP_NOFS); 1201 1184 if (unlikely(lrp == NULL)) { 1202 1185 status = -ENOMEM; 1203 1186 spin_lock(&ino->i_lock); 1204 1187 pnfs_clear_layoutreturn_waitbit(lo); 1205 1188 spin_unlock(&ino->i_lock); 1189 + put_cred(cred); 1206 1190 pnfs_put_layout_hdr(lo); 1207 1191 goto out; 1208 1192 } ··· 1216 1188 pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode); 1217 1189 lrp->args.ld_private = &lrp->ld_private; 1218 1190 lrp->clp = NFS_SERVER(ino)->nfs_client; 1219 - lrp->cred = lo->plh_lc_cred; 1191 + lrp->cred = cred; 1220 1192 if (ld->prepare_layoutreturn) 1221 1193 ld->prepare_layoutreturn(&lrp->args); 1222 1194 ··· 1261 1233 return; 1262 1234 spin_lock(&inode->i_lock); 1263 1235 if (pnfs_layout_need_return(lo)) { 1236 + const struct cred *cred; 1264 1237 nfs4_stateid stateid; 1265 1238 enum pnfs_iomode iomode; 1266 1239 bool send; 1267 1240 1268 - send = pnfs_prepare_layoutreturn(lo, &stateid, &iomode); 1241 + send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode); 1269 1242 spin_unlock(&inode->i_lock); 1270 1243 if (send) { 1271 1244 /* Send an async layoutreturn so we dont deadlock */ 1272 - pnfs_send_layoutreturn(lo, &stateid, iomode, false); 1245 + pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false); 1273 1246 } 1274 1247 } else 1275 1248 spin_unlock(&inode->i_lock); ··· 1290 1261 struct pnfs_layout_hdr *lo = NULL; 1291 1262 struct nfs_inode *nfsi = NFS_I(ino); 1292 1263 LIST_HEAD(tmp_list); 1264 + const struct cred *cred; 1293 1265 nfs4_stateid stateid; 1294 1266 int status = 0; 1295 1267 bool send, valid_layout; ··· 1335 1305 goto out_put_layout_hdr; 1336 1306 } 1337 1307 1338 - send = pnfs_prepare_layoutreturn(lo, &stateid, NULL); 1308 + send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, NULL); 1339 1309 spin_unlock(&ino->i_lock); 1340 1310 if (send) 1341 - status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true); 1311 + status = pnfs_send_layoutreturn(lo, &stateid, &cred, IOMODE_ANY, true); 1342 1312 out_put_layout_hdr: 1343 1313 pnfs_free_lseg_list(&tmp_list); 1344 1314 pnfs_put_layout_hdr(lo); ··· 1384 1354 struct nfs4_state *state; 1385 1355 struct pnfs_layout_hdr *lo; 1386 1356 struct pnfs_layout_segment *lseg, *next; 1357 + const struct cred *lc_cred; 1387 1358 nfs4_stateid stateid; 1388 1359 enum pnfs_iomode iomode = 0; 1389 1360 bool layoutreturn = false, roc = false; ··· 1454 1423 * 2. we don't send layoutreturn 1455 1424 */ 1456 1425 /* lo ref dropped in pnfs_roc_release() */ 1457 - layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode); 1426 + layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &lc_cred, &iomode); 1458 1427 /* If the creds don't match, we can't compound the layoutreturn */ 1459 - if (!layoutreturn || cred_fscmp(cred, lo->plh_lc_cred) != 0) 1428 + if (!layoutreturn) 1460 1429 goto out_noroc; 1430 + if (cred_fscmp(cred, lc_cred) != 0) 1431 + goto out_noroc_put_cred; 1461 1432 1462 1433 roc = layoutreturn; 1463 1434 pnfs_init_layoutreturn_args(args, lo, &stateid, iomode); 1464 1435 res->lrs_present = 0; 1465 1436 layoutreturn = false; 1466 1437 1438 + out_noroc_put_cred: 1439 + put_cred(lc_cred); 1467 1440 out_noroc: 1468 1441 spin_unlock(&ino->i_lock); 1469 1442 rcu_read_unlock(); ··· 1480 1445 return true; 1481 1446 } 1482 1447 if (layoutreturn) 1483 - pnfs_send_layoutreturn(lo, &stateid, iomode, true); 1448 + pnfs_send_layoutreturn(lo, &stateid, &lc_cred, iomode, true); 1484 1449 pnfs_put_layout_hdr(lo); 1485 1450 return false; 1486 1451 } ··· 1894 1859 static void _add_to_server_list(struct pnfs_layout_hdr *lo, 1895 1860 struct nfs_server *server) 1896 1861 { 1897 - if (list_empty(&lo->plh_layouts)) { 1862 + if (!test_and_set_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) { 1898 1863 struct nfs_client *clp = server->nfs_client; 1899 1864 1900 1865 /* The lo must be on the clp list if there is any 1901 1866 * chance of a CB_LAYOUTRECALL(FILE) coming in. 1902 1867 */ 1903 1868 spin_lock(&clp->cl_lock); 1904 - if (list_empty(&lo->plh_layouts)) 1905 - list_add_tail(&lo->plh_layouts, &server->layouts); 1869 + list_add_tail_rcu(&lo->plh_layouts, &server->layouts); 1906 1870 spin_unlock(&clp->cl_lock); 1907 1871 } 1908 1872 } ··· 2357 2323 2358 2324 if (!pnfs_layout_is_valid(lo)) { 2359 2325 /* We have a completely new layout */ 2360 - pnfs_set_layout_stateid(lo, &res->stateid, true); 2326 + pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true); 2361 2327 } else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) { 2362 2328 /* existing state ID, make sure the sequence number matches. */ 2363 2329 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) { 2364 2330 dprintk("%s forget reply due to sequence\n", __func__); 2365 2331 goto out_forget; 2366 2332 } 2367 - pnfs_set_layout_stateid(lo, &res->stateid, false); 2333 + pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, false); 2368 2334 } else { 2369 2335 /* 2370 2336 * We got an entirely new state ID. Mark all segments for the ··· 2457 2423 return -ENOENT; 2458 2424 } 2459 2425 2460 - void pnfs_error_mark_layout_for_return(struct inode *inode, 2461 - struct pnfs_layout_segment *lseg) 2426 + static void 2427 + pnfs_mark_layout_for_return(struct inode *inode, 2428 + const struct pnfs_layout_range *range) 2462 2429 { 2463 - struct pnfs_layout_hdr *lo = NFS_I(inode)->layout; 2464 - struct pnfs_layout_range range = { 2465 - .iomode = lseg->pls_range.iomode, 2466 - .offset = 0, 2467 - .length = NFS4_MAX_UINT64, 2468 - }; 2430 + struct pnfs_layout_hdr *lo; 2469 2431 bool return_now = false; 2470 2432 2471 2433 spin_lock(&inode->i_lock); 2434 + lo = NFS_I(inode)->layout; 2472 2435 if (!pnfs_layout_is_valid(lo)) { 2473 2436 spin_unlock(&inode->i_lock); 2474 2437 return; 2475 2438 } 2476 - pnfs_set_plh_return_info(lo, range.iomode, 0); 2439 + pnfs_set_plh_return_info(lo, range->iomode, 0); 2477 2440 /* 2478 2441 * mark all matching lsegs so that we are sure to have no live 2479 2442 * segments at hand when sending layoutreturn. See pnfs_put_lseg() 2480 2443 * for how it works. 2481 2444 */ 2482 - if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0) != -EBUSY) { 2445 + if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, range, 0) != -EBUSY) { 2446 + const struct cred *cred; 2483 2447 nfs4_stateid stateid; 2484 2448 enum pnfs_iomode iomode; 2485 2449 2486 - return_now = pnfs_prepare_layoutreturn(lo, &stateid, &iomode); 2450 + return_now = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode); 2487 2451 spin_unlock(&inode->i_lock); 2488 2452 if (return_now) 2489 - pnfs_send_layoutreturn(lo, &stateid, iomode, false); 2453 + pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false); 2490 2454 } else { 2491 2455 spin_unlock(&inode->i_lock); 2492 2456 nfs_commit_inode(inode, 0); 2493 2457 } 2494 2458 } 2459 + 2460 + void pnfs_error_mark_layout_for_return(struct inode *inode, 2461 + struct pnfs_layout_segment *lseg) 2462 + { 2463 + struct pnfs_layout_range range = { 2464 + .iomode = lseg->pls_range.iomode, 2465 + .offset = 0, 2466 + .length = NFS4_MAX_UINT64, 2467 + }; 2468 + 2469 + pnfs_mark_layout_for_return(inode, &range); 2470 + } 2495 2471 EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return); 2472 + 2473 + static bool 2474 + pnfs_layout_can_be_returned(struct pnfs_layout_hdr *lo) 2475 + { 2476 + return pnfs_layout_is_valid(lo) && 2477 + !test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) && 2478 + !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags); 2479 + } 2480 + 2481 + static struct pnfs_layout_segment * 2482 + pnfs_find_first_lseg(struct pnfs_layout_hdr *lo, 2483 + const struct pnfs_layout_range *range, 2484 + enum pnfs_iomode iomode) 2485 + { 2486 + struct pnfs_layout_segment *lseg; 2487 + 2488 + list_for_each_entry(lseg, &lo->plh_segs, pls_list) { 2489 + if (!test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) 2490 + continue; 2491 + if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) 2492 + continue; 2493 + if (lseg->pls_range.iomode != iomode && iomode != IOMODE_ANY) 2494 + continue; 2495 + if (pnfs_lseg_range_intersecting(&lseg->pls_range, range)) 2496 + return lseg; 2497 + } 2498 + return NULL; 2499 + } 2500 + 2501 + /* Find open file states whose mode matches that of the range */ 2502 + static bool 2503 + pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo, 2504 + const struct pnfs_layout_range *range) 2505 + { 2506 + struct list_head *head; 2507 + struct nfs_open_context *ctx; 2508 + fmode_t mode = 0; 2509 + 2510 + if (!pnfs_layout_can_be_returned(lo) || 2511 + !pnfs_find_first_lseg(lo, range, range->iomode)) 2512 + return false; 2513 + 2514 + head = &NFS_I(lo->plh_inode)->open_files; 2515 + list_for_each_entry_rcu(ctx, head, list) { 2516 + if (ctx->state) 2517 + mode |= ctx->state->state & (FMODE_READ|FMODE_WRITE); 2518 + } 2519 + 2520 + switch (range->iomode) { 2521 + default: 2522 + break; 2523 + case IOMODE_READ: 2524 + mode &= ~FMODE_WRITE; 2525 + break; 2526 + case IOMODE_RW: 2527 + if (pnfs_find_first_lseg(lo, range, IOMODE_READ)) 2528 + mode &= ~FMODE_READ; 2529 + } 2530 + return mode == 0; 2531 + } 2532 + 2533 + static int 2534 + pnfs_layout_return_unused_byserver(struct nfs_server *server, void *data) 2535 + { 2536 + const struct pnfs_layout_range *range = data; 2537 + struct pnfs_layout_hdr *lo; 2538 + struct inode *inode; 2539 + restart: 2540 + rcu_read_lock(); 2541 + list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) { 2542 + if (!pnfs_layout_can_be_returned(lo) || 2543 + test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) 2544 + continue; 2545 + inode = lo->plh_inode; 2546 + spin_lock(&inode->i_lock); 2547 + if (!pnfs_should_return_unused_layout(lo, range)) { 2548 + spin_unlock(&inode->i_lock); 2549 + continue; 2550 + } 2551 + spin_unlock(&inode->i_lock); 2552 + inode = pnfs_grab_inode_layout_hdr(lo); 2553 + if (!inode) 2554 + continue; 2555 + rcu_read_unlock(); 2556 + pnfs_mark_layout_for_return(inode, range); 2557 + iput(inode); 2558 + cond_resched(); 2559 + goto restart; 2560 + } 2561 + rcu_read_unlock(); 2562 + return 0; 2563 + } 2564 + 2565 + void 2566 + pnfs_layout_return_unused_byclid(struct nfs_client *clp, 2567 + enum pnfs_iomode iomode) 2568 + { 2569 + struct pnfs_layout_range range = { 2570 + .iomode = iomode, 2571 + .offset = 0, 2572 + .length = NFS4_MAX_UINT64, 2573 + }; 2574 + 2575 + nfs_client_for_each_server(clp, pnfs_layout_return_unused_byserver, 2576 + &range); 2577 + } 2496 2578 2497 2579 void 2498 2580 pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio) ··· 2625 2475 * Check for any intersection between the request and the pgio->pg_lseg, 2626 2476 * and if none, put this pgio->pg_lseg away. 2627 2477 */ 2628 - static void 2478 + void 2629 2479 pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 2630 2480 { 2631 2481 if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) { ··· 2633 2483 pgio->pg_lseg = NULL; 2634 2484 } 2635 2485 } 2486 + EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range); 2636 2487 2637 2488 void 2638 2489 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) ··· 3151 3000 end_pos = nfsi->layout->plh_lwb; 3152 3001 3153 3002 nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid); 3003 + data->cred = get_cred(nfsi->layout->plh_lc_cred); 3154 3004 spin_unlock(&inode->i_lock); 3155 3005 3156 3006 data->args.inode = inode; 3157 - data->cred = get_cred(nfsi->layout->plh_lc_cred); 3158 3007 nfs_fattr_init(&data->fattr); 3159 3008 data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask; 3160 3009 data->res.fattr = &data->fattr;
+111 -32
fs/nfs/pnfs.h
··· 66 66 struct pnfs_layout_segment { 67 67 struct list_head pls_list; 68 68 struct list_head pls_lc_list; 69 + struct list_head pls_commits; 69 70 struct pnfs_layout_range pls_range; 70 71 refcount_t pls_refcount; 71 72 u32 pls_seq; ··· 106 105 NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */ 107 106 NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */ 108 107 NFS_LAYOUT_INODE_FREEING, /* The inode is being freed */ 108 + NFS_LAYOUT_HASHED, /* The layout visible */ 109 109 }; 110 110 111 111 enum layoutdriver_policy_flags { ··· 150 148 const struct nfs_pageio_ops *pg_write_ops; 151 149 152 150 struct pnfs_ds_commit_info *(*get_ds_info) (struct inode *inode); 153 - void (*mark_request_commit) (struct nfs_page *req, 154 - struct pnfs_layout_segment *lseg, 155 - struct nfs_commit_info *cinfo, 156 - u32 ds_commit_idx); 157 - void (*clear_request_commit) (struct nfs_page *req, 158 - struct nfs_commit_info *cinfo); 159 - int (*scan_commit_lists) (struct nfs_commit_info *cinfo, 160 - int max); 161 - void (*recover_commit_reqs) (struct list_head *list, 162 - struct nfs_commit_info *cinfo); 163 - struct nfs_page * (*search_commit_reqs)(struct nfs_commit_info *cinfo, 164 - struct page *page); 165 - int (*commit_pagelist)(struct inode *inode, 166 - struct list_head *mds_pages, 167 - int how, 168 - struct nfs_commit_info *cinfo); 169 151 170 152 int (*sync)(struct inode *inode, bool datasync); 171 153 ··· 172 186 int (*prepare_layoutstats) (struct nfs42_layoutstat_args *args); 173 187 }; 174 188 189 + struct pnfs_commit_ops { 190 + void (*setup_ds_info)(struct pnfs_ds_commit_info *, 191 + struct pnfs_layout_segment *); 192 + void (*release_ds_info)(struct pnfs_ds_commit_info *, 193 + struct inode *inode); 194 + int (*commit_pagelist)(struct inode *inode, 195 + struct list_head *mds_pages, 196 + int how, 197 + struct nfs_commit_info *cinfo); 198 + void (*mark_request_commit) (struct nfs_page *req, 199 + struct pnfs_layout_segment *lseg, 200 + struct nfs_commit_info *cinfo, 201 + u32 ds_commit_idx); 202 + void (*clear_request_commit) (struct nfs_page *req, 203 + struct nfs_commit_info *cinfo); 204 + int (*scan_commit_lists) (struct nfs_commit_info *cinfo, 205 + int max); 206 + void (*recover_commit_reqs) (struct list_head *list, 207 + struct nfs_commit_info *cinfo); 208 + struct nfs_page * (*search_commit_reqs)(struct nfs_commit_info *cinfo, 209 + struct page *page); 210 + }; 211 + 175 212 struct pnfs_layout_hdr { 176 213 refcount_t plh_refcount; 177 214 atomic_t plh_outstanding; /* number of RPCs out */ ··· 212 203 loff_t plh_lwb; /* last write byte for layoutcommit */ 213 204 const struct cred *plh_lc_cred; /* layoutcommit cred */ 214 205 struct inode *plh_inode; 206 + struct rcu_head plh_rcu; 215 207 }; 216 208 217 209 struct pnfs_device { ··· 252 242 void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, struct nfs_fsinfo *); 253 243 void unset_pnfs_layoutdriver(struct nfs_server *); 254 244 void pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio); 245 + void pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req); 255 246 void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *, struct nfs_page *); 256 247 int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc); 257 248 void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, ··· 278 267 void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo); 279 268 void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, 280 269 const nfs4_stateid *new, 270 + const struct cred *cred, 281 271 bool update_barrier); 282 272 int pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, 283 273 struct list_head *tmp_list, ··· 338 326 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void); 339 327 void pnfs_error_mark_layout_for_return(struct inode *inode, 340 328 struct pnfs_layout_segment *lseg); 329 + void pnfs_layout_return_unused_byclid(struct nfs_client *clp, 330 + enum pnfs_iomode iomode); 331 + 341 332 /* nfs4_deviceid_flags */ 342 333 enum { 343 334 NFS_DEVICEID_INVALID = 0, /* set when MDS clientid recalled */ ··· 375 360 void nfs4_deviceid_purge_client(const struct nfs_client *); 376 361 377 362 /* pnfs_nfs.c */ 363 + struct pnfs_commit_array *pnfs_alloc_commit_array(size_t n, gfp_t gfp_flags); 364 + void pnfs_free_commit_array(struct pnfs_commit_array *p); 365 + struct pnfs_commit_array *pnfs_add_commit_array(struct pnfs_ds_commit_info *, 366 + struct pnfs_commit_array *, 367 + struct pnfs_layout_segment *); 368 + 369 + void pnfs_generic_ds_cinfo_release_lseg(struct pnfs_ds_commit_info *fl_cinfo, 370 + struct pnfs_layout_segment *lseg); 371 + void pnfs_generic_ds_cinfo_destroy(struct pnfs_ds_commit_info *fl_cinfo); 372 + 378 373 void pnfs_generic_clear_request_commit(struct nfs_page *req, 379 374 struct nfs_commit_info *cinfo); 380 375 void pnfs_generic_commit_release(void *calldata); ··· 392 367 void pnfs_generic_rw_release(void *data); 393 368 void pnfs_generic_recover_commit_reqs(struct list_head *dst, 394 369 struct nfs_commit_info *cinfo); 370 + struct nfs_page *pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo, 371 + struct page *page); 395 372 int pnfs_generic_commit_pagelist(struct inode *inode, 396 373 struct list_head *mds_pages, 397 374 int how, ··· 465 438 pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how, 466 439 struct nfs_commit_info *cinfo) 467 440 { 468 - if (cinfo->ds == NULL || cinfo->ds->ncommitting == 0) 441 + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; 442 + 443 + if (fl_cinfo == NULL || fl_cinfo->ncommitting == 0) 469 444 return PNFS_NOT_ATTEMPTED; 470 - return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how, cinfo); 445 + return fl_cinfo->ops->commit_pagelist(inode, mds_pages, how, cinfo); 471 446 } 472 447 473 448 static inline struct pnfs_ds_commit_info * ··· 483 454 } 484 455 485 456 static inline void 457 + pnfs_init_ds_commit_info_ops(struct pnfs_ds_commit_info *fl_cinfo, struct inode *inode) 458 + { 459 + struct pnfs_ds_commit_info *inode_cinfo = pnfs_get_ds_info(inode); 460 + if (inode_cinfo != NULL) 461 + fl_cinfo->ops = inode_cinfo->ops; 462 + } 463 + 464 + static inline void 465 + pnfs_init_ds_commit_info(struct pnfs_ds_commit_info *fl_cinfo) 466 + { 467 + INIT_LIST_HEAD(&fl_cinfo->commits); 468 + fl_cinfo->ops = NULL; 469 + } 470 + 471 + static inline void 472 + pnfs_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo, struct inode *inode) 473 + { 474 + if (fl_cinfo->ops != NULL && fl_cinfo->ops->release_ds_info != NULL) 475 + fl_cinfo->ops->release_ds_info(fl_cinfo, inode); 476 + } 477 + 478 + static inline void 486 479 pnfs_generic_mark_devid_invalid(struct nfs4_deviceid_node *node) 487 480 { 488 481 set_bit(NFS_DEVICEID_INVALID, &node->flags); ··· 514 463 pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 515 464 struct nfs_commit_info *cinfo, u32 ds_commit_idx) 516 465 { 517 - struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); 518 - struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; 466 + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; 519 467 520 - if (lseg == NULL || ld->mark_request_commit == NULL) 468 + if (!lseg || !fl_cinfo->ops->mark_request_commit) 521 469 return false; 522 - ld->mark_request_commit(req, lseg, cinfo, ds_commit_idx); 470 + fl_cinfo->ops->mark_request_commit(req, lseg, cinfo, ds_commit_idx); 523 471 return true; 524 472 } 525 473 526 474 static inline bool 527 475 pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo) 528 476 { 529 - struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); 530 - struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; 477 + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; 531 478 532 - if (ld == NULL || ld->clear_request_commit == NULL) 479 + if (!fl_cinfo || !fl_cinfo->ops || !fl_cinfo->ops->clear_request_commit) 533 480 return false; 534 - ld->clear_request_commit(req, cinfo); 481 + fl_cinfo->ops->clear_request_commit(req, cinfo); 535 482 return true; 536 483 } 537 484 ··· 537 488 pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo, 538 489 int max) 539 490 { 540 - if (cinfo->ds == NULL || cinfo->ds->nwritten == 0) 491 + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; 492 + 493 + if (!fl_cinfo || fl_cinfo->nwritten == 0) 541 494 return 0; 542 - else 543 - return NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists(cinfo, max); 495 + return fl_cinfo->ops->scan_commit_lists(cinfo, max); 496 + } 497 + 498 + static inline void 499 + pnfs_recover_commit_reqs(struct list_head *head, struct nfs_commit_info *cinfo) 500 + { 501 + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; 502 + 503 + if (fl_cinfo && fl_cinfo->nwritten != 0) 504 + fl_cinfo->ops->recover_commit_reqs(head, cinfo); 544 505 } 545 506 546 507 static inline struct nfs_page * 547 508 pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo, 548 509 struct page *page) 549 510 { 550 - struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; 511 + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; 551 512 552 - if (ld == NULL || ld->search_commit_reqs == NULL) 513 + if (!fl_cinfo->ops || !fl_cinfo->ops->search_commit_reqs) 553 514 return NULL; 554 - return ld->search_commit_reqs(cinfo, page); 515 + return fl_cinfo->ops->search_commit_reqs(cinfo, page); 555 516 } 556 517 557 518 /* Should the pNFS client commit and return the layout upon a setattr */ ··· 809 750 return NULL; 810 751 } 811 752 753 + static inline void 754 + pnfs_init_ds_commit_info_ops(struct pnfs_ds_commit_info *fl_cinfo, struct inode *inode) 755 + { 756 + } 757 + 758 + static inline void 759 + pnfs_init_ds_commit_info(struct pnfs_ds_commit_info *fl_cinfo) 760 + { 761 + } 762 + 763 + static inline void 764 + pnfs_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo, struct inode *inode) 765 + { 766 + } 767 + 812 768 static inline bool 813 769 pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 814 770 struct nfs_commit_info *cinfo, u32 ds_commit_idx) ··· 842 768 int max) 843 769 { 844 770 return 0; 771 + } 772 + 773 + static inline void 774 + pnfs_recover_commit_reqs(struct list_head *head, struct nfs_commit_info *cinfo) 775 + { 845 776 } 846 777 847 778 static inline struct nfs_page *
+380 -144
fs/nfs/pnfs_nfs.c
··· 59 59 } 60 60 EXPORT_SYMBOL_GPL(pnfs_generic_commit_release); 61 61 62 + static struct pnfs_layout_segment * 63 + pnfs_free_bucket_lseg(struct pnfs_commit_bucket *bucket) 64 + { 65 + if (list_empty(&bucket->committing) && list_empty(&bucket->written)) { 66 + struct pnfs_layout_segment *freeme = bucket->lseg; 67 + bucket->lseg = NULL; 68 + return freeme; 69 + } 70 + return NULL; 71 + } 72 + 62 73 /* The generic layer is about to remove the req from the commit list. 63 74 * If this will make the bucket empty, it will need to put the lseg reference. 64 75 * Note this must be called holding nfsi->commit_mutex ··· 89 78 bucket = list_first_entry(&req->wb_list, 90 79 struct pnfs_commit_bucket, 91 80 written); 92 - freeme = bucket->wlseg; 93 - bucket->wlseg = NULL; 81 + freeme = pnfs_free_bucket_lseg(bucket); 94 82 } 95 83 out: 96 84 nfs_request_remove_commit_list(req, cinfo); ··· 97 87 } 98 88 EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit); 99 89 90 + struct pnfs_commit_array * 91 + pnfs_alloc_commit_array(size_t n, gfp_t gfp_flags) 92 + { 93 + struct pnfs_commit_array *p; 94 + struct pnfs_commit_bucket *b; 95 + 96 + p = kmalloc(struct_size(p, buckets, n), gfp_flags); 97 + if (!p) 98 + return NULL; 99 + p->nbuckets = n; 100 + INIT_LIST_HEAD(&p->cinfo_list); 101 + INIT_LIST_HEAD(&p->lseg_list); 102 + p->lseg = NULL; 103 + for (b = &p->buckets[0]; n != 0; b++, n--) { 104 + INIT_LIST_HEAD(&b->written); 105 + INIT_LIST_HEAD(&b->committing); 106 + b->lseg = NULL; 107 + b->direct_verf.committed = NFS_INVALID_STABLE_HOW; 108 + } 109 + return p; 110 + } 111 + EXPORT_SYMBOL_GPL(pnfs_alloc_commit_array); 112 + 113 + void 114 + pnfs_free_commit_array(struct pnfs_commit_array *p) 115 + { 116 + kfree_rcu(p, rcu); 117 + } 118 + EXPORT_SYMBOL_GPL(pnfs_free_commit_array); 119 + 120 + static struct pnfs_commit_array * 121 + pnfs_find_commit_array_by_lseg(struct pnfs_ds_commit_info *fl_cinfo, 122 + struct pnfs_layout_segment *lseg) 123 + { 124 + struct pnfs_commit_array *array; 125 + 126 + list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) { 127 + if (array->lseg == lseg) 128 + return array; 129 + } 130 + return NULL; 131 + } 132 + 133 + struct pnfs_commit_array * 134 + pnfs_add_commit_array(struct pnfs_ds_commit_info *fl_cinfo, 135 + struct pnfs_commit_array *new, 136 + struct pnfs_layout_segment *lseg) 137 + { 138 + struct pnfs_commit_array *array; 139 + 140 + array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg); 141 + if (array) 142 + return array; 143 + new->lseg = lseg; 144 + refcount_set(&new->refcount, 1); 145 + list_add_rcu(&new->cinfo_list, &fl_cinfo->commits); 146 + list_add(&new->lseg_list, &lseg->pls_commits); 147 + return new; 148 + } 149 + EXPORT_SYMBOL_GPL(pnfs_add_commit_array); 150 + 151 + static struct pnfs_commit_array * 152 + pnfs_lookup_commit_array(struct pnfs_ds_commit_info *fl_cinfo, 153 + struct pnfs_layout_segment *lseg) 154 + { 155 + struct pnfs_commit_array *array; 156 + 157 + rcu_read_lock(); 158 + array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg); 159 + if (!array) { 160 + rcu_read_unlock(); 161 + fl_cinfo->ops->setup_ds_info(fl_cinfo, lseg); 162 + rcu_read_lock(); 163 + array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg); 164 + } 165 + rcu_read_unlock(); 166 + return array; 167 + } 168 + 169 + static void 170 + pnfs_release_commit_array_locked(struct pnfs_commit_array *array) 171 + { 172 + list_del_rcu(&array->cinfo_list); 173 + list_del(&array->lseg_list); 174 + pnfs_free_commit_array(array); 175 + } 176 + 177 + static void 178 + pnfs_put_commit_array_locked(struct pnfs_commit_array *array) 179 + { 180 + if (refcount_dec_and_test(&array->refcount)) 181 + pnfs_release_commit_array_locked(array); 182 + } 183 + 184 + static void 185 + pnfs_put_commit_array(struct pnfs_commit_array *array, struct inode *inode) 186 + { 187 + if (refcount_dec_and_lock(&array->refcount, &inode->i_lock)) { 188 + pnfs_release_commit_array_locked(array); 189 + spin_unlock(&inode->i_lock); 190 + } 191 + } 192 + 193 + static struct pnfs_commit_array * 194 + pnfs_get_commit_array(struct pnfs_commit_array *array) 195 + { 196 + if (refcount_inc_not_zero(&array->refcount)) 197 + return array; 198 + return NULL; 199 + } 200 + 201 + static void 202 + pnfs_remove_and_free_commit_array(struct pnfs_commit_array *array) 203 + { 204 + array->lseg = NULL; 205 + list_del_init(&array->lseg_list); 206 + pnfs_put_commit_array_locked(array); 207 + } 208 + 209 + void 210 + pnfs_generic_ds_cinfo_release_lseg(struct pnfs_ds_commit_info *fl_cinfo, 211 + struct pnfs_layout_segment *lseg) 212 + { 213 + struct pnfs_commit_array *array, *tmp; 214 + 215 + list_for_each_entry_safe(array, tmp, &lseg->pls_commits, lseg_list) 216 + pnfs_remove_and_free_commit_array(array); 217 + } 218 + EXPORT_SYMBOL_GPL(pnfs_generic_ds_cinfo_release_lseg); 219 + 220 + void 221 + pnfs_generic_ds_cinfo_destroy(struct pnfs_ds_commit_info *fl_cinfo) 222 + { 223 + struct pnfs_commit_array *array, *tmp; 224 + 225 + list_for_each_entry_safe(array, tmp, &fl_cinfo->commits, cinfo_list) 226 + pnfs_remove_and_free_commit_array(array); 227 + } 228 + EXPORT_SYMBOL_GPL(pnfs_generic_ds_cinfo_destroy); 229 + 230 + /* 231 + * Locks the nfs_page requests for commit and moves them to 232 + * @bucket->committing. 233 + */ 100 234 static int 101 - pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, 102 - struct nfs_commit_info *cinfo, 103 - int max) 235 + pnfs_bucket_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, 236 + struct nfs_commit_info *cinfo, 237 + int max) 104 238 { 105 239 struct list_head *src = &bucket->written; 106 240 struct list_head *dst = &bucket->committing; ··· 255 101 if (ret) { 256 102 cinfo->ds->nwritten -= ret; 257 103 cinfo->ds->ncommitting += ret; 258 - if (bucket->clseg == NULL) 259 - bucket->clseg = pnfs_get_lseg(bucket->wlseg); 260 - if (list_empty(src)) { 261 - pnfs_put_lseg(bucket->wlseg); 262 - bucket->wlseg = NULL; 263 - } 264 104 } 265 105 return ret; 106 + } 107 + 108 + static int pnfs_bucket_scan_array(struct nfs_commit_info *cinfo, 109 + struct pnfs_commit_bucket *buckets, 110 + unsigned int nbuckets, 111 + int max) 112 + { 113 + unsigned int i; 114 + int rv = 0, cnt; 115 + 116 + for (i = 0; i < nbuckets && max != 0; i++) { 117 + cnt = pnfs_bucket_scan_ds_commit_list(&buckets[i], cinfo, max); 118 + rv += cnt; 119 + max -= cnt; 120 + } 121 + return rv; 266 122 } 267 123 268 124 /* Move reqs from written to committing lists, returning count 269 125 * of number moved. 270 126 */ 271 - int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, 272 - int max) 127 + int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max) 273 128 { 274 - int i, rv = 0, cnt; 129 + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; 130 + struct pnfs_commit_array *array; 131 + int rv = 0, cnt; 275 132 276 - lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex); 277 - for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) { 278 - cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i], 279 - cinfo, max); 280 - max -= cnt; 133 + rcu_read_lock(); 134 + list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) { 135 + if (!array->lseg || !pnfs_get_commit_array(array)) 136 + continue; 137 + rcu_read_unlock(); 138 + cnt = pnfs_bucket_scan_array(cinfo, array->buckets, 139 + array->nbuckets, max); 140 + rcu_read_lock(); 141 + pnfs_put_commit_array(array, cinfo->inode); 281 142 rv += cnt; 143 + max -= cnt; 144 + if (!max) 145 + break; 282 146 } 147 + rcu_read_unlock(); 283 148 return rv; 284 149 } 285 150 EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists); 151 + 152 + static unsigned int 153 + pnfs_bucket_recover_commit_reqs(struct list_head *dst, 154 + struct pnfs_commit_bucket *buckets, 155 + unsigned int nbuckets, 156 + struct nfs_commit_info *cinfo) 157 + { 158 + struct pnfs_commit_bucket *b; 159 + struct pnfs_layout_segment *freeme; 160 + unsigned int nwritten, ret = 0; 161 + unsigned int i; 162 + 163 + restart: 164 + for (i = 0, b = buckets; i < nbuckets; i++, b++) { 165 + nwritten = nfs_scan_commit_list(&b->written, dst, cinfo, 0); 166 + if (!nwritten) 167 + continue; 168 + ret += nwritten; 169 + freeme = pnfs_free_bucket_lseg(b); 170 + if (freeme) { 171 + pnfs_put_lseg(freeme); 172 + goto restart; 173 + } 174 + } 175 + return ret; 176 + } 286 177 287 178 /* Pull everything off the committing lists and dump into @dst. */ 288 179 void pnfs_generic_recover_commit_reqs(struct list_head *dst, 289 180 struct nfs_commit_info *cinfo) 290 181 { 291 - struct pnfs_commit_bucket *b; 292 - struct pnfs_layout_segment *freeme; 293 - int nwritten; 294 - int i; 182 + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; 183 + struct pnfs_commit_array *array; 184 + unsigned int nwritten; 295 185 296 186 lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex); 297 - restart: 298 - for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) { 299 - nwritten = nfs_scan_commit_list(&b->written, dst, cinfo, 0); 300 - if (!nwritten) 187 + rcu_read_lock(); 188 + list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) { 189 + if (!array->lseg || !pnfs_get_commit_array(array)) 301 190 continue; 302 - cinfo->ds->nwritten -= nwritten; 303 - if (list_empty(&b->written)) { 304 - freeme = b->wlseg; 305 - b->wlseg = NULL; 306 - pnfs_put_lseg(freeme); 307 - goto restart; 308 - } 191 + rcu_read_unlock(); 192 + nwritten = pnfs_bucket_recover_commit_reqs(dst, 193 + array->buckets, 194 + array->nbuckets, 195 + cinfo); 196 + rcu_read_lock(); 197 + pnfs_put_commit_array(array, cinfo->inode); 198 + fl_cinfo->nwritten -= nwritten; 309 199 } 200 + rcu_read_unlock(); 310 201 } 311 202 EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs); 312 203 313 - static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx) 204 + static struct nfs_page * 205 + pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets, 206 + unsigned int nbuckets, struct page *page) 207 + { 208 + struct nfs_page *req; 209 + struct pnfs_commit_bucket *b; 210 + unsigned int i; 211 + 212 + /* Linearly search the commit lists for each bucket until a matching 213 + * request is found */ 214 + for (i = 0, b = buckets; i < nbuckets; i++, b++) { 215 + list_for_each_entry(req, &b->written, wb_list) { 216 + if (req->wb_page == page) 217 + return req->wb_head; 218 + } 219 + list_for_each_entry(req, &b->committing, wb_list) { 220 + if (req->wb_page == page) 221 + return req->wb_head; 222 + } 223 + } 224 + return NULL; 225 + } 226 + 227 + /* pnfs_generic_search_commit_reqs - Search lists in @cinfo for the head reqest 228 + * for @page 229 + * @cinfo - commit info for current inode 230 + * @page - page to search for matching head request 231 + * 232 + * Returns a the head request if one is found, otherwise returns NULL. 233 + */ 234 + struct nfs_page * 235 + pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page) 314 236 { 315 237 struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; 238 + struct pnfs_commit_array *array; 239 + struct nfs_page *req; 240 + 241 + list_for_each_entry(array, &fl_cinfo->commits, cinfo_list) { 242 + req = pnfs_bucket_search_commit_reqs(array->buckets, 243 + array->nbuckets, page); 244 + if (req) 245 + return req; 246 + } 247 + return NULL; 248 + } 249 + EXPORT_SYMBOL_GPL(pnfs_generic_search_commit_reqs); 250 + 251 + static struct pnfs_layout_segment * 252 + pnfs_bucket_get_committing(struct list_head *head, 253 + struct pnfs_commit_bucket *bucket, 254 + struct nfs_commit_info *cinfo) 255 + { 256 + struct list_head *pos; 257 + 258 + list_for_each(pos, &bucket->committing) 259 + cinfo->ds->ncommitting--; 260 + list_splice_init(&bucket->committing, head); 261 + return pnfs_free_bucket_lseg(bucket); 262 + } 263 + 264 + static struct nfs_commit_data * 265 + pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket, 266 + struct nfs_commit_info *cinfo) 267 + { 268 + struct nfs_commit_data *data = nfs_commitdata_alloc(false); 269 + 270 + if (!data) 271 + return NULL; 272 + data->lseg = pnfs_bucket_get_committing(&data->pages, bucket, cinfo); 273 + if (!data->lseg) 274 + data->lseg = pnfs_get_lseg(bucket->lseg); 275 + return data; 276 + } 277 + 278 + static void pnfs_generic_retry_commit(struct pnfs_commit_bucket *buckets, 279 + unsigned int nbuckets, 280 + struct nfs_commit_info *cinfo, 281 + unsigned int idx) 282 + { 316 283 struct pnfs_commit_bucket *bucket; 317 284 struct pnfs_layout_segment *freeme; 318 - struct list_head *pos; 319 285 LIST_HEAD(pages); 320 - int i; 321 286 322 - mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 323 - for (i = idx; i < fl_cinfo->nbuckets; i++) { 324 - bucket = &fl_cinfo->buckets[i]; 287 + for (bucket = buckets; idx < nbuckets; bucket++, idx++) { 325 288 if (list_empty(&bucket->committing)) 326 289 continue; 327 - freeme = bucket->clseg; 328 - bucket->clseg = NULL; 329 - list_for_each(pos, &bucket->committing) 330 - cinfo->ds->ncommitting--; 331 - list_splice_init(&bucket->committing, &pages); 332 - mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 333 - nfs_retry_commit(&pages, freeme, cinfo, i); 334 - pnfs_put_lseg(freeme); 335 290 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 291 + freeme = pnfs_bucket_get_committing(&pages, bucket, cinfo); 292 + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 293 + nfs_retry_commit(&pages, freeme, cinfo, idx); 294 + pnfs_put_lseg(freeme); 336 295 } 337 - mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 338 296 } 339 297 340 298 static unsigned int 341 - pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo, 342 - struct list_head *list) 299 + pnfs_bucket_alloc_ds_commits(struct list_head *list, 300 + struct pnfs_commit_bucket *buckets, 301 + unsigned int nbuckets, 302 + struct nfs_commit_info *cinfo) 343 303 { 344 - struct pnfs_ds_commit_info *fl_cinfo; 345 304 struct pnfs_commit_bucket *bucket; 346 305 struct nfs_commit_data *data; 347 - int i; 306 + unsigned int i; 348 307 unsigned int nreq = 0; 349 308 350 - fl_cinfo = cinfo->ds; 351 - bucket = fl_cinfo->buckets; 352 - for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) { 309 + for (i = 0, bucket = buckets; i < nbuckets; i++, bucket++) { 353 310 if (list_empty(&bucket->committing)) 354 311 continue; 355 - data = nfs_commitdata_alloc(false); 356 - if (!data) 357 - break; 358 - data->ds_commit_index = i; 359 - list_add(&data->pages, list); 360 - nreq++; 312 + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 313 + if (!list_empty(&bucket->committing)) { 314 + data = pnfs_bucket_fetch_commitdata(bucket, cinfo); 315 + if (!data) 316 + goto out_error; 317 + data->ds_commit_index = i; 318 + list_add_tail(&data->list, list); 319 + atomic_inc(&cinfo->mds->rpcs_out); 320 + nreq++; 321 + } 322 + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 361 323 } 362 - 324 + return nreq; 325 + out_error: 326 + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 363 327 /* Clean up on error */ 364 - pnfs_generic_retry_commit(cinfo, i); 328 + pnfs_generic_retry_commit(buckets, nbuckets, cinfo, i); 365 329 return nreq; 366 330 } 367 331 368 - static inline 369 - void pnfs_fetch_commit_bucket_list(struct list_head *pages, 370 - struct nfs_commit_data *data, 371 - struct nfs_commit_info *cinfo) 332 + static unsigned int 333 + pnfs_alloc_ds_commits_list(struct list_head *list, 334 + struct pnfs_ds_commit_info *fl_cinfo, 335 + struct nfs_commit_info *cinfo) 372 336 { 373 - struct pnfs_commit_bucket *bucket; 374 - struct list_head *pos; 337 + struct pnfs_commit_array *array; 338 + unsigned int ret = 0; 375 339 376 - bucket = &cinfo->ds->buckets[data->ds_commit_index]; 377 - mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 378 - list_for_each(pos, &bucket->committing) 379 - cinfo->ds->ncommitting--; 380 - list_splice_init(&bucket->committing, pages); 381 - data->lseg = bucket->clseg; 382 - bucket->clseg = NULL; 383 - mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 384 - 385 - } 386 - 387 - /* Helper function for pnfs_generic_commit_pagelist to catch an empty 388 - * page list. This can happen when two commits race. 389 - * 390 - * This must be called instead of nfs_init_commit - call one or the other, but 391 - * not both! 392 - */ 393 - static bool 394 - pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages, 395 - struct nfs_commit_data *data, 396 - struct nfs_commit_info *cinfo) 397 - { 398 - if (list_empty(pages)) { 399 - if (atomic_dec_and_test(&cinfo->mds->rpcs_out)) 400 - wake_up_var(&cinfo->mds->rpcs_out); 401 - /* don't call nfs_commitdata_release - it tries to put 402 - * the open_context which is not acquired until nfs_init_commit 403 - * which has not been called on @data */ 404 - WARN_ON_ONCE(data->context); 405 - nfs_commit_free(data); 406 - return true; 340 + rcu_read_lock(); 341 + list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) { 342 + if (!array->lseg || !pnfs_get_commit_array(array)) 343 + continue; 344 + rcu_read_unlock(); 345 + ret += pnfs_bucket_alloc_ds_commits(list, array->buckets, 346 + array->nbuckets, cinfo); 347 + rcu_read_lock(); 348 + pnfs_put_commit_array(array, cinfo->inode); 407 349 } 408 - 409 - return false; 350 + return ret; 410 351 } 411 352 412 353 /* This follows nfs_commit_list pretty closely */ ··· 511 262 int (*initiate_commit)(struct nfs_commit_data *data, 512 263 int how)) 513 264 { 265 + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; 514 266 struct nfs_commit_data *data, *tmp; 515 267 LIST_HEAD(list); 516 268 unsigned int nreq = 0; ··· 519 269 if (!list_empty(mds_pages)) { 520 270 data = nfs_commitdata_alloc(true); 521 271 data->ds_commit_index = -1; 522 - list_add(&data->pages, &list); 272 + list_splice_init(mds_pages, &data->pages); 273 + list_add_tail(&data->list, &list); 274 + atomic_inc(&cinfo->mds->rpcs_out); 523 275 nreq++; 524 276 } 525 277 526 - nreq += pnfs_generic_alloc_ds_commits(cinfo, &list); 527 - 278 + nreq += pnfs_alloc_ds_commits_list(&list, fl_cinfo, cinfo); 528 279 if (nreq == 0) 529 280 goto out; 530 281 531 - atomic_add(nreq, &cinfo->mds->rpcs_out); 532 - 533 - list_for_each_entry_safe(data, tmp, &list, pages) { 534 - list_del_init(&data->pages); 282 + list_for_each_entry_safe(data, tmp, &list, list) { 283 + list_del(&data->list); 535 284 if (data->ds_commit_index < 0) { 536 - /* another commit raced with us */ 537 - if (pnfs_generic_commit_cancel_empty_pagelist(mds_pages, 538 - data, cinfo)) 539 - continue; 540 - 541 - nfs_init_commit(data, mds_pages, NULL, cinfo); 285 + nfs_init_commit(data, NULL, NULL, cinfo); 542 286 nfs_initiate_commit(NFS_CLIENT(inode), data, 543 287 NFS_PROTO(data->inode), 544 288 data->mds_ops, how, 0); 545 289 } else { 546 - LIST_HEAD(pages); 547 - 548 - pnfs_fetch_commit_bucket_list(&pages, data, cinfo); 549 - 550 - /* another commit raced with us */ 551 - if (pnfs_generic_commit_cancel_empty_pagelist(&pages, 552 - data, cinfo)) 553 - continue; 554 - 555 - nfs_init_commit(data, &pages, data->lseg, cinfo); 290 + nfs_init_commit(data, NULL, data->lseg, cinfo); 556 291 initiate_commit(data, how); 557 292 } 558 293 } ··· 1165 930 u32 ds_commit_idx) 1166 931 { 1167 932 struct list_head *list; 1168 - struct pnfs_commit_bucket *buckets; 933 + struct pnfs_commit_array *array; 934 + struct pnfs_commit_bucket *bucket; 1169 935 1170 936 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 1171 - buckets = cinfo->ds->buckets; 1172 - list = &buckets[ds_commit_idx].written; 1173 - if (list_empty(list)) { 1174 - if (!pnfs_is_valid_lseg(lseg)) { 1175 - mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 1176 - cinfo->completion_ops->resched_write(cinfo, req); 1177 - return; 1178 - } 1179 - /* Non-empty buckets hold a reference on the lseg. That ref 1180 - * is normally transferred to the COMMIT call and released 1181 - * there. It could also be released if the last req is pulled 1182 - * off due to a rewrite, in which case it will be done in 1183 - * pnfs_common_clear_request_commit 1184 - */ 1185 - WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL); 1186 - buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg); 1187 - } 937 + array = pnfs_lookup_commit_array(cinfo->ds, lseg); 938 + if (!array || !pnfs_is_valid_lseg(lseg)) 939 + goto out_resched; 940 + bucket = &array->buckets[ds_commit_idx]; 941 + list = &bucket->written; 942 + /* Non-empty buckets hold a reference on the lseg. That ref 943 + * is normally transferred to the COMMIT call and released 944 + * there. It could also be released if the last req is pulled 945 + * off due to a rewrite, in which case it will be done in 946 + * pnfs_common_clear_request_commit 947 + */ 948 + if (!bucket->lseg) 949 + bucket->lseg = pnfs_get_lseg(lseg); 1188 950 set_bit(PG_COMMIT_TO_DS, &req->wb_flags); 1189 951 cinfo->ds->nwritten++; 1190 952 1191 953 nfs_request_add_commit_list_locked(req, list, cinfo); 1192 954 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 1193 955 nfs_mark_page_unstable(req->wb_page, cinfo); 956 + return; 957 + out_resched: 958 + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 959 + cinfo->completion_ops->resched_write(cinfo, req); 1194 960 } 1195 961 EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit); 1196 962
+1 -1
fs/nfs/read.c
··· 250 250 trace_nfs_readpage_done(task, hdr); 251 251 252 252 if (task->tk_status == -ESTALE) { 253 - set_bit(NFS_INO_STALE, &NFS_I(inode)->flags); 253 + nfs_set_inode_stale(inode); 254 254 nfs_mark_for_revalidate(inode); 255 255 } 256 256 return 0;
+35
fs/nfs/super.c
··· 176 176 } 177 177 EXPORT_SYMBOL_GPL(nfs_sb_deactive); 178 178 179 + static int __nfs_list_for_each_server(struct list_head *head, 180 + int (*fn)(struct nfs_server *, void *), 181 + void *data) 182 + { 183 + struct nfs_server *server, *last = NULL; 184 + int ret = 0; 185 + 186 + rcu_read_lock(); 187 + list_for_each_entry_rcu(server, head, client_link) { 188 + if (!nfs_sb_active(server->super)) 189 + continue; 190 + rcu_read_unlock(); 191 + if (last) 192 + nfs_sb_deactive(last->super); 193 + last = server; 194 + ret = fn(server, data); 195 + if (ret) 196 + goto out; 197 + rcu_read_lock(); 198 + } 199 + rcu_read_unlock(); 200 + out: 201 + if (last) 202 + nfs_sb_deactive(last->super); 203 + return ret; 204 + } 205 + 206 + int nfs_client_for_each_server(struct nfs_client *clp, 207 + int (*fn)(struct nfs_server *, void *), 208 + void *data) 209 + { 210 + return __nfs_list_for_each_server(&clp->cl_superblocks, fn, data); 211 + } 212 + EXPORT_SYMBOL_GPL(nfs_client_for_each_server); 213 + 179 214 /* 180 215 * Deliver file system statistics to userspace 181 216 */
+2 -2
fs/nfs/unlink.c
··· 98 98 .callback_ops = &nfs_unlink_ops, 99 99 .callback_data = data, 100 100 .workqueue = nfsiod_workqueue, 101 - .flags = RPC_TASK_ASYNC, 101 + .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 102 102 }; 103 103 struct rpc_task *task; 104 104 struct inode *dir = d_inode(data->dentry->d_parent); ··· 341 341 .callback_ops = &nfs_rename_ops, 342 342 .workqueue = nfsiod_workqueue, 343 343 .rpc_client = NFS_CLIENT(old_dir), 344 - .flags = RPC_TASK_ASYNC, 344 + .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 345 345 }; 346 346 347 347 data = kzalloc(sizeof(*data), GFP_KERNEL);
+141 -149
fs/nfs/write.c
··· 149 149 kref_put(&ioc->refcount, nfs_io_completion_release); 150 150 } 151 151 152 + static void 153 + nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode) 154 + { 155 + if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) { 156 + kref_get(&req->wb_kref); 157 + atomic_long_inc(&NFS_I(inode)->nrequests); 158 + } 159 + } 160 + 161 + static int 162 + nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode) 163 + { 164 + int ret; 165 + 166 + if (!test_bit(PG_REMOVE, &req->wb_flags)) 167 + return 0; 168 + ret = nfs_page_group_lock(req); 169 + if (ret) 170 + return ret; 171 + if (test_and_clear_bit(PG_REMOVE, &req->wb_flags)) 172 + nfs_page_set_inode_ref(req, inode); 173 + nfs_page_group_unlock(req); 174 + return 0; 175 + } 176 + 152 177 static struct nfs_page * 153 178 nfs_page_private_request(struct page *page) 154 179 { ··· 241 216 if (!req) 242 217 req = nfs_page_find_swap_request(page); 243 218 return req; 219 + } 220 + 221 + static struct nfs_page *nfs_find_and_lock_page_request(struct page *page) 222 + { 223 + struct inode *inode = page_file_mapping(page)->host; 224 + struct nfs_page *req, *head; 225 + int ret; 226 + 227 + for (;;) { 228 + req = nfs_page_find_head_request(page); 229 + if (!req) 230 + return req; 231 + head = nfs_page_group_lock_head(req); 232 + if (head != req) 233 + nfs_release_request(req); 234 + if (IS_ERR(head)) 235 + return head; 236 + ret = nfs_cancel_remove_inode(head, inode); 237 + if (ret < 0) { 238 + nfs_unlock_and_release_request(head); 239 + return ERR_PTR(ret); 240 + } 241 + /* Ensure that nobody removed the request before we locked it */ 242 + if (head == nfs_page_private_request(page)) 243 + break; 244 + if (PageSwapCache(page)) 245 + break; 246 + nfs_unlock_and_release_request(head); 247 + } 248 + return head; 244 249 } 245 250 246 251 /* Adjust the file length if we're writing beyond the end */ ··· 435 380 } 436 381 437 382 /* 438 - * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req 439 - * 440 - * this is a helper function for nfs_lock_and_join_requests 441 - * 442 - * @inode - inode associated with request page group, must be holding inode lock 443 - * @head - head request of page group, must be holding head lock 444 - * @req - request that couldn't lock and needs to wait on the req bit lock 445 - * 446 - * NOTE: this must be called holding page_group bit lock 447 - * which will be released before returning. 448 - * 449 - * returns 0 on success, < 0 on error. 450 - */ 451 - static void 452 - nfs_unroll_locks(struct inode *inode, struct nfs_page *head, 453 - struct nfs_page *req) 454 - { 455 - struct nfs_page *tmp; 456 - 457 - /* relinquish all the locks successfully grabbed this run */ 458 - for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { 459 - if (!kref_read(&tmp->wb_kref)) 460 - continue; 461 - nfs_unlock_and_release_request(tmp); 462 - } 463 - } 464 - 465 - /* 466 383 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests 467 384 * 468 385 * @destroy_list - request list (using wb_this_page) terminated by @old_head ··· 455 428 destroy_list = (subreq->wb_this_page == old_head) ? 456 429 NULL : subreq->wb_this_page; 457 430 431 + /* Note: lock subreq in order to change subreq->wb_head */ 432 + nfs_page_set_headlock(subreq); 458 433 WARN_ON_ONCE(old_head != subreq->wb_head); 459 434 460 435 /* make sure old group is not used */ 461 436 subreq->wb_this_page = subreq; 437 + subreq->wb_head = subreq; 462 438 463 439 clear_bit(PG_REMOVE, &subreq->wb_flags); 464 440 465 441 /* Note: races with nfs_page_group_destroy() */ 466 442 if (!kref_read(&subreq->wb_kref)) { 467 443 /* Check if we raced with nfs_page_group_destroy() */ 468 - if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) 444 + if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { 445 + nfs_page_clear_headlock(subreq); 469 446 nfs_free_request(subreq); 447 + } else 448 + nfs_page_clear_headlock(subreq); 470 449 continue; 471 450 } 451 + nfs_page_clear_headlock(subreq); 472 452 473 - subreq->wb_head = subreq; 453 + nfs_release_request(old_head); 474 454 475 455 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { 476 456 nfs_release_request(subreq); ··· 491 457 } 492 458 493 459 /* 494 - * nfs_lock_and_join_requests - join all subreqs to the head req and return 495 - * a locked reference, cancelling any pending 496 - * operations for this page. 460 + * nfs_join_page_group - destroy subrequests of the head req 461 + * @head: the page used to lookup the "page group" of nfs_page structures 462 + * @inode: Inode to which the request belongs. 497 463 * 498 - * @page - the page used to lookup the "page group" of nfs_page structures 464 + * This function joins all sub requests to the head request by first 465 + * locking all requests in the group, cancelling any pending operations 466 + * and finally updating the head request to cover the whole range covered by 467 + * the (former) group. All subrequests are removed from any write or commit 468 + * lists, unlinked from the group and destroyed. 469 + */ 470 + void 471 + nfs_join_page_group(struct nfs_page *head, struct inode *inode) 472 + { 473 + struct nfs_page *subreq; 474 + struct nfs_page *destroy_list = NULL; 475 + unsigned int pgbase, off, bytes; 476 + 477 + pgbase = head->wb_pgbase; 478 + bytes = head->wb_bytes; 479 + off = head->wb_offset; 480 + for (subreq = head->wb_this_page; subreq != head; 481 + subreq = subreq->wb_this_page) { 482 + /* Subrequests should always form a contiguous range */ 483 + if (pgbase > subreq->wb_pgbase) { 484 + off -= pgbase - subreq->wb_pgbase; 485 + bytes += pgbase - subreq->wb_pgbase; 486 + pgbase = subreq->wb_pgbase; 487 + } 488 + bytes = max(subreq->wb_pgbase + subreq->wb_bytes 489 + - pgbase, bytes); 490 + } 491 + 492 + /* Set the head request's range to cover the former page group */ 493 + head->wb_pgbase = pgbase; 494 + head->wb_bytes = bytes; 495 + head->wb_offset = off; 496 + 497 + /* Now that all requests are locked, make sure they aren't on any list. 498 + * Commit list removal accounting is done after locks are dropped */ 499 + subreq = head; 500 + do { 501 + nfs_clear_request_commit(subreq); 502 + subreq = subreq->wb_this_page; 503 + } while (subreq != head); 504 + 505 + /* unlink subrequests from head, destroy them later */ 506 + if (head->wb_this_page != head) { 507 + /* destroy list will be terminated by head */ 508 + destroy_list = head->wb_this_page; 509 + head->wb_this_page = head; 510 + } 511 + 512 + nfs_destroy_unlinked_subrequests(destroy_list, head, inode); 513 + } 514 + 515 + /* 516 + * nfs_lock_and_join_requests - join all subreqs to the head req 517 + * @page: the page used to lookup the "page group" of nfs_page structures 499 518 * 500 519 * This function joins all sub requests to the head request by first 501 520 * locking all requests in the group, cancelling any pending operations ··· 565 478 nfs_lock_and_join_requests(struct page *page) 566 479 { 567 480 struct inode *inode = page_file_mapping(page)->host; 568 - struct nfs_page *head, *subreq; 569 - struct nfs_page *destroy_list = NULL; 570 - unsigned int total_bytes; 481 + struct nfs_page *head; 571 482 int ret; 572 483 573 - try_again: 574 484 /* 575 485 * A reference is taken only on the head request which acts as a 576 486 * reference to the whole page group - the group will not be destroyed 577 487 * until the head reference is released. 578 488 */ 579 - head = nfs_page_find_head_request(page); 580 - if (!head) 581 - return NULL; 582 - 583 - /* lock the page head first in order to avoid an ABBA inefficiency */ 584 - if (!nfs_lock_request(head)) { 585 - ret = nfs_wait_on_request(head); 586 - nfs_release_request(head); 587 - if (ret < 0) 588 - return ERR_PTR(ret); 589 - goto try_again; 590 - } 591 - 592 - /* Ensure that nobody removed the request before we locked it */ 593 - if (head != nfs_page_private_request(page) && !PageSwapCache(page)) { 594 - nfs_unlock_and_release_request(head); 595 - goto try_again; 596 - } 597 - 598 - ret = nfs_page_group_lock(head); 599 - if (ret < 0) 600 - goto release_request; 489 + head = nfs_find_and_lock_page_request(page); 490 + if (IS_ERR_OR_NULL(head)) 491 + return head; 601 492 602 493 /* lock each request in the page group */ 603 - total_bytes = head->wb_bytes; 604 - for (subreq = head->wb_this_page; subreq != head; 605 - subreq = subreq->wb_this_page) { 606 - 607 - if (!kref_get_unless_zero(&subreq->wb_kref)) { 608 - if (subreq->wb_offset == head->wb_offset + total_bytes) 609 - total_bytes += subreq->wb_bytes; 610 - continue; 611 - } 612 - 613 - while (!nfs_lock_request(subreq)) { 614 - /* 615 - * Unlock page to allow nfs_page_group_sync_on_bit() 616 - * to succeed 617 - */ 618 - nfs_page_group_unlock(head); 619 - ret = nfs_wait_on_request(subreq); 620 - if (!ret) 621 - ret = nfs_page_group_lock(head); 622 - if (ret < 0) { 623 - nfs_unroll_locks(inode, head, subreq); 624 - nfs_release_request(subreq); 625 - goto release_request; 626 - } 627 - } 628 - /* 629 - * Subrequests are always contiguous, non overlapping 630 - * and in order - but may be repeated (mirrored writes). 631 - */ 632 - if (subreq->wb_offset == (head->wb_offset + total_bytes)) { 633 - /* keep track of how many bytes this group covers */ 634 - total_bytes += subreq->wb_bytes; 635 - } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || 636 - ((subreq->wb_offset + subreq->wb_bytes) > 637 - (head->wb_offset + total_bytes)))) { 638 - nfs_page_group_unlock(head); 639 - nfs_unroll_locks(inode, head, subreq); 640 - nfs_unlock_and_release_request(subreq); 641 - ret = -EIO; 642 - goto release_request; 643 - } 644 - } 645 - 646 - /* Now that all requests are locked, make sure they aren't on any list. 647 - * Commit list removal accounting is done after locks are dropped */ 648 - subreq = head; 649 - do { 650 - nfs_clear_request_commit(subreq); 651 - subreq = subreq->wb_this_page; 652 - } while (subreq != head); 653 - 654 - /* unlink subrequests from head, destroy them later */ 655 - if (head->wb_this_page != head) { 656 - /* destroy list will be terminated by head */ 657 - destroy_list = head->wb_this_page; 658 - head->wb_this_page = head; 659 - 660 - /* change head request to cover whole range that 661 - * the former page group covered */ 662 - head->wb_bytes = total_bytes; 663 - } 664 - 665 - /* Postpone destruction of this request */ 666 - if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) { 667 - set_bit(PG_INODE_REF, &head->wb_flags); 668 - kref_get(&head->wb_kref); 669 - atomic_long_inc(&NFS_I(inode)->nrequests); 670 - } 671 - 672 - nfs_page_group_unlock(head); 673 - 674 - nfs_destroy_unlinked_subrequests(destroy_list, head, inode); 675 - 676 - /* Did we lose a race with nfs_inode_remove_request()? */ 677 - if (!(PagePrivate(page) || PageSwapCache(page))) { 494 + ret = nfs_page_group_lock_subrequests(head); 495 + if (ret < 0) { 678 496 nfs_unlock_and_release_request(head); 679 - return NULL; 497 + return ERR_PTR(ret); 680 498 } 681 499 682 - /* still holds ref on head from nfs_page_find_head_request 683 - * and still has lock on head from lock loop */ 684 - return head; 500 + nfs_join_page_group(head, inode); 685 501 686 - release_request: 687 - nfs_unlock_and_release_request(head); 688 - return ERR_PTR(ret); 502 + return head; 689 503 } 690 504 691 505 static void nfs_write_error(struct nfs_page *req, int error) ··· 1695 1707 .callback_ops = call_ops, 1696 1708 .callback_data = data, 1697 1709 .workqueue = nfsiod_workqueue, 1698 - .flags = RPC_TASK_ASYNC | flags, 1710 + .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | flags, 1699 1711 .priority = priority, 1700 1712 }; 1701 1713 /* Set up the initial task struct. */ ··· 1734 1746 struct pnfs_layout_segment *lseg, 1735 1747 struct nfs_commit_info *cinfo) 1736 1748 { 1737 - struct nfs_page *first = nfs_list_entry(head->next); 1738 - struct nfs_open_context *ctx = nfs_req_openctx(first); 1739 - struct inode *inode = d_inode(ctx->dentry); 1749 + struct nfs_page *first; 1750 + struct nfs_open_context *ctx; 1751 + struct inode *inode; 1740 1752 1741 1753 /* Set up the RPC argument and reply structs 1742 1754 * NB: take care not to mess about with data->commit et al. */ 1743 1755 1744 - list_splice_init(head, &data->pages); 1756 + if (head) 1757 + list_splice_init(head, &data->pages); 1758 + 1759 + first = nfs_list_entry(data->pages.next); 1760 + ctx = nfs_req_openctx(first); 1761 + inode = d_inode(ctx->dentry); 1745 1762 1746 1763 data->inode = inode; 1747 1764 data->cred = ctx->cred; ··· 1862 1869 1863 1870 /* Okay, COMMIT succeeded, apparently. Check the verifier 1864 1871 * returned by the server against all stored verfs. */ 1865 - if (verf->committed > NFS_UNSTABLE && 1866 - !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier)) { 1872 + if (nfs_write_match_verf(verf, req)) { 1867 1873 /* We have a match */ 1868 1874 if (req->wb_page) 1869 1875 nfs_inode_remove_request(req);
+1
include/linux/nfs_fs.h
··· 354 354 extern int nfs_sync_mapping(struct address_space *mapping); 355 355 extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping); 356 356 extern void nfs_zap_caches(struct inode *); 357 + extern void nfs_set_inode_stale(struct inode *inode); 357 358 extern void nfs_invalidate_atime(struct inode *); 358 359 extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *, 359 360 struct nfs_fattr *, struct nfs4_label *);
+5
include/linux/nfs_page.h
··· 139 139 extern int nfs_wait_on_request(struct nfs_page *); 140 140 extern void nfs_unlock_request(struct nfs_page *req); 141 141 extern void nfs_unlock_and_release_request(struct nfs_page *); 142 + extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req); 143 + extern int nfs_page_group_lock_subrequests(struct nfs_page *head); 144 + extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode); 142 145 extern int nfs_page_group_lock(struct nfs_page *); 143 146 extern void nfs_page_group_unlock(struct nfs_page *); 144 147 extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); 148 + extern int nfs_page_set_headlock(struct nfs_page *req); 149 + extern void nfs_page_clear_headlock(struct nfs_page *req); 145 150 extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); 146 151 147 152 /*
+15 -17
include/linux/nfs_xdr.h
··· 1266 1266 struct pnfs_commit_bucket { 1267 1267 struct list_head written; 1268 1268 struct list_head committing; 1269 - struct pnfs_layout_segment *wlseg; 1270 - struct pnfs_layout_segment *clseg; 1269 + struct pnfs_layout_segment *lseg; 1271 1270 struct nfs_writeverf direct_verf; 1272 1271 }; 1273 1272 1273 + struct pnfs_commit_array { 1274 + struct list_head cinfo_list; 1275 + struct list_head lseg_list; 1276 + struct pnfs_layout_segment *lseg; 1277 + struct rcu_head rcu; 1278 + refcount_t refcount; 1279 + unsigned int nbuckets; 1280 + struct pnfs_commit_bucket buckets[]; 1281 + }; 1282 + 1274 1283 struct pnfs_ds_commit_info { 1275 - int nwritten; 1276 - int ncommitting; 1277 - int nbuckets; 1278 - struct pnfs_commit_bucket *buckets; 1284 + struct list_head commits; 1285 + unsigned int nwritten; 1286 + unsigned int ncommitting; 1287 + const struct pnfs_commit_ops *ops; 1279 1288 }; 1280 1289 1281 1290 struct nfs41_state_protection { ··· 1395 1386 unsigned int status; 1396 1387 }; 1397 1388 1398 - static inline void 1399 - nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) 1400 - { 1401 - kfree(cinfo->buckets); 1402 - } 1403 - 1404 1389 #else 1405 1390 1406 1391 struct pnfs_ds_commit_info { 1407 1392 }; 1408 - 1409 - static inline void 1410 - nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) 1411 - { 1412 - } 1413 1393 1414 1394 #endif /* CONFIG_NFS_V4_1 */ 1415 1395
+1
include/linux/sunrpc/sched.h
··· 132 132 #define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ 133 133 #define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */ 134 134 #define RPC_TASK_NO_RETRANS_TIMEOUT 0x4000 /* wait forever for a reply */ 135 + #define RPC_TASK_CRED_NOREF 0x8000 /* No refcount on the credential */ 135 136 136 137 #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) 137 138 #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
-1
include/linux/sunrpc/xdr.h
··· 184 184 extern void xdr_shift_buf(struct xdr_buf *, size_t); 185 185 extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); 186 186 extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); 187 - extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int); 188 187 extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); 189 188 extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); 190 189
+63 -90
include/trace/events/rpcrdma.h
··· 104 104 TP_fast_assign( 105 105 __entry->r_xprt = r_xprt; 106 106 __entry->rc = rc; 107 - __entry->connect_status = r_xprt->rx_ep.rep_connected; 107 + __entry->connect_status = r_xprt->rx_ep->re_connect_status; 108 108 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 109 109 __assign_str(port, rpcrdma_portstr(r_xprt)); 110 110 ), 111 111 112 - TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connect status=%d", 112 + TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d", 113 113 __get_str(addr), __get_str(port), __entry->r_xprt, 114 114 __entry->rc, __entry->connect_status 115 115 ) ··· 228 228 TP_ARGS(wc, frwr), 229 229 230 230 TP_STRUCT__entry( 231 - __field(const void *, mr) 231 + __field(u32, mr_id) 232 232 __field(unsigned int, status) 233 233 __field(unsigned int, vendor_err) 234 234 ), 235 235 236 236 TP_fast_assign( 237 - __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr); 237 + __entry->mr_id = frwr->fr_mr->res.id; 238 238 __entry->status = wc->status; 239 239 __entry->vendor_err = __entry->status ? wc->vendor_err : 0; 240 240 ), 241 241 242 242 TP_printk( 243 - "mr=%p: %s (%u/0x%x)", 244 - __entry->mr, rdma_show_wc_status(__entry->status), 243 + "mr.id=%u: %s (%u/0x%x)", 244 + __entry->mr_id, rdma_show_wc_status(__entry->status), 245 245 __entry->status, __entry->vendor_err 246 246 ) 247 247 ); ··· 274 274 TP_ARGS(mr), 275 275 276 276 TP_STRUCT__entry( 277 - __field(const void *, mr) 277 + __field(u32, mr_id) 278 + __field(int, nents) 278 279 __field(u32, handle) 279 280 __field(u32, length) 280 281 __field(u64, offset) ··· 283 282 ), 284 283 285 284 TP_fast_assign( 286 - __entry->mr = mr; 285 + __entry->mr_id = mr->frwr.fr_mr->res.id; 286 + __entry->nents = mr->mr_nents; 287 287 __entry->handle = mr->mr_handle; 288 288 __entry->length = mr->mr_length; 289 289 __entry->offset = mr->mr_offset; 290 290 __entry->dir = mr->mr_dir; 291 291 ), 292 292 293 - TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)", 294 - __entry->mr, __entry->length, 293 + TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)", 294 + __entry->mr_id, __entry->nents, __entry->length, 295 295 (unsigned long long)__entry->offset, __entry->handle, 296 296 xprtrdma_show_direction(__entry->dir) 297 297 ) ··· 342 340 ** Connection events 343 341 **/ 344 342 345 - TRACE_EVENT(xprtrdma_cm_event, 346 - TP_PROTO( 347 - const struct rpcrdma_xprt *r_xprt, 348 - struct rdma_cm_event *event 349 - ), 350 - 351 - TP_ARGS(r_xprt, event), 352 - 353 - TP_STRUCT__entry( 354 - __field(const void *, r_xprt) 355 - __field(unsigned int, event) 356 - __field(int, status) 357 - __string(addr, rpcrdma_addrstr(r_xprt)) 358 - __string(port, rpcrdma_portstr(r_xprt)) 359 - ), 360 - 361 - TP_fast_assign( 362 - __entry->r_xprt = r_xprt; 363 - __entry->event = event->event; 364 - __entry->status = event->status; 365 - __assign_str(addr, rpcrdma_addrstr(r_xprt)); 366 - __assign_str(port, rpcrdma_portstr(r_xprt)); 367 - ), 368 - 369 - TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)", 370 - __get_str(addr), __get_str(port), 371 - __entry->r_xprt, rdma_show_cm_event(__entry->event), 372 - __entry->event, __entry->status 373 - ) 374 - ); 375 - 376 343 TRACE_EVENT(xprtrdma_inline_thresh, 377 344 TP_PROTO( 378 - const struct rpcrdma_xprt *r_xprt 345 + const struct rpcrdma_ep *ep 379 346 ), 380 347 381 - TP_ARGS(r_xprt), 348 + TP_ARGS(ep), 382 349 383 350 TP_STRUCT__entry( 384 - __field(const void *, r_xprt) 385 351 __field(unsigned int, inline_send) 386 352 __field(unsigned int, inline_recv) 387 353 __field(unsigned int, max_send) 388 354 __field(unsigned int, max_recv) 389 - __string(addr, rpcrdma_addrstr(r_xprt)) 390 - __string(port, rpcrdma_portstr(r_xprt)) 355 + __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 356 + __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 391 357 ), 392 358 393 359 TP_fast_assign( 394 - const struct rpcrdma_ep *ep = &r_xprt->rx_ep; 360 + const struct rdma_cm_id *id = ep->re_id; 395 361 396 - __entry->r_xprt = r_xprt; 397 - __entry->inline_send = ep->rep_inline_send; 398 - __entry->inline_recv = ep->rep_inline_recv; 399 - __entry->max_send = ep->rep_max_inline_send; 400 - __entry->max_recv = ep->rep_max_inline_recv; 401 - __assign_str(addr, rpcrdma_addrstr(r_xprt)); 402 - __assign_str(port, rpcrdma_portstr(r_xprt)); 362 + __entry->inline_send = ep->re_inline_send; 363 + __entry->inline_recv = ep->re_inline_recv; 364 + __entry->max_send = ep->re_max_inline_send; 365 + __entry->max_recv = ep->re_max_inline_recv; 366 + memcpy(__entry->srcaddr, &id->route.addr.src_addr, 367 + sizeof(struct sockaddr_in6)); 368 + memcpy(__entry->dstaddr, &id->route.addr.dst_addr, 369 + sizeof(struct sockaddr_in6)); 403 370 ), 404 371 405 - TP_printk("peer=[%s]:%s r_xprt=%p neg send/recv=%u/%u, calc send/recv=%u/%u", 406 - __get_str(addr), __get_str(port), __entry->r_xprt, 372 + TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u", 373 + __entry->srcaddr, __entry->dstaddr, 407 374 __entry->inline_send, __entry->inline_recv, 408 375 __entry->max_send, __entry->max_recv 409 376 ) ··· 380 409 381 410 DEFINE_CONN_EVENT(connect); 382 411 DEFINE_CONN_EVENT(disconnect); 412 + DEFINE_CONN_EVENT(flush_dct); 383 413 384 414 DEFINE_RXPRT_EVENT(xprtrdma_create); 385 415 DEFINE_RXPRT_EVENT(xprtrdma_op_destroy); 386 - DEFINE_RXPRT_EVENT(xprtrdma_remove); 387 - DEFINE_RXPRT_EVENT(xprtrdma_reinsert); 388 416 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc); 389 417 DEFINE_RXPRT_EVENT(xprtrdma_op_close); 390 418 DEFINE_RXPRT_EVENT(xprtrdma_op_setport); ··· 450 480 451 481 TRACE_EVENT(xprtrdma_qp_event, 452 482 TP_PROTO( 453 - const struct rpcrdma_xprt *r_xprt, 483 + const struct rpcrdma_ep *ep, 454 484 const struct ib_event *event 455 485 ), 456 486 457 - TP_ARGS(r_xprt, event), 487 + TP_ARGS(ep, event), 458 488 459 489 TP_STRUCT__entry( 460 - __field(const void *, r_xprt) 461 - __field(unsigned int, event) 490 + __field(unsigned long, event) 462 491 __string(name, event->device->name) 463 - __string(addr, rpcrdma_addrstr(r_xprt)) 464 - __string(port, rpcrdma_portstr(r_xprt)) 492 + __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 493 + __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 465 494 ), 466 495 467 496 TP_fast_assign( 468 - __entry->r_xprt = r_xprt; 497 + const struct rdma_cm_id *id = ep->re_id; 498 + 469 499 __entry->event = event->event; 470 500 __assign_str(name, event->device->name); 471 - __assign_str(addr, rpcrdma_addrstr(r_xprt)); 472 - __assign_str(port, rpcrdma_portstr(r_xprt)); 501 + memcpy(__entry->srcaddr, &id->route.addr.src_addr, 502 + sizeof(struct sockaddr_in6)); 503 + memcpy(__entry->dstaddr, &id->route.addr.dst_addr, 504 + sizeof(struct sockaddr_in6)); 473 505 ), 474 506 475 - TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)", 476 - __get_str(addr), __get_str(port), __entry->r_xprt, 477 - __get_str(name), rdma_show_ib_event(__entry->event), 478 - __entry->event 507 + TP_printk("%pISpc -> %pISpc device=%s %s (%lu)", 508 + __entry->srcaddr, __entry->dstaddr, __get_str(name), 509 + rdma_show_ib_event(__entry->event), __entry->event 479 510 ) 480 511 ); 481 512 ··· 772 801 __entry->r_xprt = r_xprt; 773 802 __entry->count = count; 774 803 __entry->status = status; 775 - __entry->posted = r_xprt->rx_ep.rep_receive_count; 804 + __entry->posted = r_xprt->rx_ep->re_receive_count; 776 805 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 777 806 __assign_str(port, rpcrdma_portstr(r_xprt)); 778 807 ), ··· 891 920 TP_ARGS(mr, rc), 892 921 893 922 TP_STRUCT__entry( 894 - __field(const void *, mr) 923 + __field(u32, mr_id) 895 924 __field(int, rc) 896 925 ), 897 926 898 927 TP_fast_assign( 899 - __entry->mr = mr; 900 - __entry->rc = rc; 928 + __entry->mr_id = mr->frwr.fr_mr->res.id; 929 + __entry->rc = rc; 901 930 ), 902 931 903 - TP_printk("mr=%p: rc=%d", 904 - __entry->mr, __entry->rc 932 + TP_printk("mr.id=%u: rc=%d", 933 + __entry->mr_id, __entry->rc 905 934 ) 906 935 ); 907 936 ··· 914 943 TP_ARGS(mr, rc), 915 944 916 945 TP_STRUCT__entry( 917 - __field(const void *, mr) 946 + __field(u32, mr_id) 947 + __field(int, nents) 918 948 __field(u32, handle) 919 949 __field(u32, length) 920 950 __field(u64, offset) ··· 924 952 ), 925 953 926 954 TP_fast_assign( 927 - __entry->mr = mr; 955 + __entry->mr_id = mr->frwr.fr_mr->res.id; 956 + __entry->nents = mr->mr_nents; 928 957 __entry->handle = mr->mr_handle; 929 958 __entry->length = mr->mr_length; 930 959 __entry->offset = mr->mr_offset; ··· 933 960 __entry->rc = rc; 934 961 ), 935 962 936 - TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d", 937 - __entry->mr, __entry->length, 963 + TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d", 964 + __entry->mr_id, __entry->nents, __entry->length, 938 965 (unsigned long long)__entry->offset, __entry->handle, 939 966 xprtrdma_show_direction(__entry->dir), 940 967 __entry->rc ··· 950 977 TP_ARGS(mr, sg_nents), 951 978 952 979 TP_STRUCT__entry( 953 - __field(const void *, mr) 980 + __field(u32, mr_id) 954 981 __field(u64, addr) 955 982 __field(u32, dir) 956 983 __field(int, nents) 957 984 ), 958 985 959 986 TP_fast_assign( 960 - __entry->mr = mr; 987 + __entry->mr_id = mr->frwr.fr_mr->res.id; 961 988 __entry->addr = mr->mr_sg->dma_address; 962 989 __entry->dir = mr->mr_dir; 963 990 __entry->nents = sg_nents; 964 991 ), 965 992 966 - TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d", 967 - __entry->mr, __entry->addr, 993 + TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d", 994 + __entry->mr_id, __entry->addr, 968 995 xprtrdma_show_direction(__entry->dir), 969 996 __entry->nents 970 997 ) ··· 979 1006 TP_ARGS(mr, num_mapped), 980 1007 981 1008 TP_STRUCT__entry( 982 - __field(const void *, mr) 1009 + __field(u32, mr_id) 983 1010 __field(u64, addr) 984 1011 __field(u32, dir) 985 1012 __field(int, num_mapped) ··· 987 1014 ), 988 1015 989 1016 TP_fast_assign( 990 - __entry->mr = mr; 1017 + __entry->mr_id = mr->frwr.fr_mr->res.id; 991 1018 __entry->addr = mr->mr_sg->dma_address; 992 1019 __entry->dir = mr->mr_dir; 993 1020 __entry->num_mapped = num_mapped; 994 1021 __entry->nents = mr->mr_nents; 995 1022 ), 996 1023 997 - TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d", 998 - __entry->mr, __entry->addr, 1024 + TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d", 1025 + __entry->mr_id, __entry->addr, 999 1026 xprtrdma_show_direction(__entry->dir), 1000 1027 __entry->num_mapped, __entry->nents 1001 1028 ) ··· 1004 1031 DEFINE_MR_EVENT(localinv); 1005 1032 DEFINE_MR_EVENT(map); 1006 1033 DEFINE_MR_EVENT(unmap); 1007 - DEFINE_MR_EVENT(remoteinv); 1034 + DEFINE_MR_EVENT(reminv); 1008 1035 DEFINE_MR_EVENT(recycle); 1009 1036 1010 1037 TRACE_EVENT(xprtrdma_dma_maperr,
+68 -26
net/sunrpc/auth_gss/auth_gss.c
··· 20 20 #include <linux/sunrpc/clnt.h> 21 21 #include <linux/sunrpc/auth.h> 22 22 #include <linux/sunrpc/auth_gss.h> 23 + #include <linux/sunrpc/gss_krb5.h> 23 24 #include <linux/sunrpc/svcauth_gss.h> 24 25 #include <linux/sunrpc/gss_err.h> 25 26 #include <linux/workqueue.h> ··· 1051 1050 goto err_put_mech; 1052 1051 auth = &gss_auth->rpc_auth; 1053 1052 auth->au_cslack = GSS_CRED_SLACK >> 2; 1054 - auth->au_rslack = GSS_VERF_SLACK >> 2; 1053 + auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2; 1055 1054 auth->au_verfsize = GSS_VERF_SLACK >> 2; 1056 1055 auth->au_ralign = GSS_VERF_SLACK >> 2; 1057 1056 auth->au_flags = 0; ··· 1725 1724 goto out; 1726 1725 } 1727 1726 1728 - static int gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1729 - struct rpc_task *task, struct xdr_stream *xdr) 1727 + static noinline_for_stack int 1728 + gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1729 + struct rpc_task *task, struct xdr_stream *xdr) 1730 1730 { 1731 1731 struct rpc_rqst *rqstp = task->tk_rqstp; 1732 1732 struct xdr_buf integ_buf, *snd_buf = &rqstp->rq_snd_buf; ··· 1818 1816 return -EAGAIN; 1819 1817 } 1820 1818 1821 - static int gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1822 - struct rpc_task *task, struct xdr_stream *xdr) 1819 + static noinline_for_stack int 1820 + gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1821 + struct rpc_task *task, struct xdr_stream *xdr) 1823 1822 { 1824 1823 struct rpc_rqst *rqstp = task->tk_rqstp; 1825 1824 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; ··· 1937 1934 return 0; 1938 1935 } 1939 1936 1940 - static int 1937 + /* 1938 + * RFC 2203, Section 5.3.2.2 1939 + * 1940 + * struct rpc_gss_integ_data { 1941 + * opaque databody_integ<>; 1942 + * opaque checksum<>; 1943 + * }; 1944 + * 1945 + * struct rpc_gss_data_t { 1946 + * unsigned int seq_num; 1947 + * proc_req_arg_t arg; 1948 + * }; 1949 + */ 1950 + static noinline_for_stack int 1941 1951 gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred, 1942 1952 struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp, 1943 1953 struct xdr_stream *xdr) 1944 1954 { 1945 - struct xdr_buf integ_buf, *rcv_buf = &rqstp->rq_rcv_buf; 1946 - u32 data_offset, mic_offset, integ_len, maj_stat; 1955 + struct xdr_buf gss_data, *rcv_buf = &rqstp->rq_rcv_buf; 1947 1956 struct rpc_auth *auth = cred->cr_auth; 1957 + u32 len, offset, seqno, maj_stat; 1948 1958 struct xdr_netobj mic; 1949 - __be32 *p; 1959 + int ret; 1950 1960 1951 - p = xdr_inline_decode(xdr, 2 * sizeof(*p)); 1952 - if (unlikely(!p)) 1961 + ret = -EIO; 1962 + mic.data = NULL; 1963 + 1964 + /* opaque databody_integ<>; */ 1965 + if (xdr_stream_decode_u32(xdr, &len)) 1953 1966 goto unwrap_failed; 1954 - integ_len = be32_to_cpup(p++); 1955 - if (integ_len & 3) 1967 + if (len & 3) 1956 1968 goto unwrap_failed; 1957 - data_offset = (u8 *)(p) - (u8 *)rcv_buf->head[0].iov_base; 1958 - mic_offset = integ_len + data_offset; 1959 - if (mic_offset > rcv_buf->len) 1969 + offset = rcv_buf->len - xdr_stream_remaining(xdr); 1970 + if (xdr_stream_decode_u32(xdr, &seqno)) 1960 1971 goto unwrap_failed; 1961 - if (be32_to_cpup(p) != rqstp->rq_seqno) 1972 + if (seqno != rqstp->rq_seqno) 1962 1973 goto bad_seqno; 1974 + if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len)) 1975 + goto unwrap_failed; 1963 1976 1964 - if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, integ_len)) 1977 + /* 1978 + * The xdr_stream now points to the beginning of the 1979 + * upper layer payload, to be passed below to 1980 + * rpcauth_unwrap_resp_decode(). The checksum, which 1981 + * follows the upper layer payload in @rcv_buf, is 1982 + * located and parsed without updating the xdr_stream. 1983 + */ 1984 + 1985 + /* opaque checksum<>; */ 1986 + offset += len; 1987 + if (xdr_decode_word(rcv_buf, offset, &len)) 1965 1988 goto unwrap_failed; 1966 - if (xdr_buf_read_mic(rcv_buf, &mic, mic_offset)) 1989 + offset += sizeof(__be32); 1990 + if (offset + len > rcv_buf->len) 1967 1991 goto unwrap_failed; 1968 - maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1992 + mic.len = len; 1993 + mic.data = kmalloc(len, GFP_NOFS); 1994 + if (!mic.data) 1995 + goto unwrap_failed; 1996 + if (read_bytes_from_xdr_buf(rcv_buf, offset, mic.data, mic.len)) 1997 + goto unwrap_failed; 1998 + 1999 + maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &gss_data, &mic); 1969 2000 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1970 2001 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1971 2002 if (maj_stat != GSS_S_COMPLETE) ··· 2007 1970 2008 1971 auth->au_rslack = auth->au_verfsize + 2 + 1 + XDR_QUADLEN(mic.len); 2009 1972 auth->au_ralign = auth->au_verfsize + 2; 2010 - return 0; 1973 + ret = 0; 1974 + 1975 + out: 1976 + kfree(mic.data); 1977 + return ret; 1978 + 2011 1979 unwrap_failed: 2012 1980 trace_rpcgss_unwrap_failed(task); 2013 - return -EIO; 1981 + goto out; 2014 1982 bad_seqno: 2015 - trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(p)); 2016 - return -EIO; 1983 + trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno); 1984 + goto out; 2017 1985 bad_mic: 2018 1986 trace_rpcgss_verify_mic(task, maj_stat); 2019 - return -EIO; 1987 + goto out; 2020 1988 } 2021 1989 2022 - static int 1990 + static noinline_for_stack int 2023 1991 gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred, 2024 1992 struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp, 2025 1993 struct xdr_stream *xdr)
+6 -2
net/sunrpc/clnt.c
··· 1099 1099 task->tk_msg.rpc_proc = msg->rpc_proc; 1100 1100 task->tk_msg.rpc_argp = msg->rpc_argp; 1101 1101 task->tk_msg.rpc_resp = msg->rpc_resp; 1102 - if (msg->rpc_cred != NULL) 1103 - task->tk_msg.rpc_cred = get_cred(msg->rpc_cred); 1102 + task->tk_msg.rpc_cred = msg->rpc_cred; 1103 + if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) 1104 + get_cred(task->tk_msg.rpc_cred); 1104 1105 } 1105 1106 } 1106 1107 ··· 1126 1125 struct rpc_task *task; 1127 1126 1128 1127 task = rpc_new_task(task_setup_data); 1128 + 1129 + if (!RPC_IS_ASYNC(task)) 1130 + task->tk_flags |= RPC_TASK_CRED_NOREF; 1129 1131 1130 1132 rpc_task_set_client(task, task_setup_data->rpc_client); 1131 1133 rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
+15 -7
net/sunrpc/sched.c
··· 204 204 struct rpc_task *task, 205 205 unsigned char queue_priority) 206 206 { 207 - WARN_ON_ONCE(RPC_IS_QUEUED(task)); 208 - if (RPC_IS_QUEUED(task)) 209 - return; 210 - 211 207 INIT_LIST_HEAD(&task->u.tk_wait.timer_list); 212 208 if (RPC_IS_PRIORITY(queue)) 213 209 __rpc_add_wait_queue_priority(queue, task, queue_priority); ··· 378 382 * NB: An RPC task will only receive interrupt-driven events as long 379 383 * as it's on a wait queue. 380 384 */ 381 - static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, 385 + static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q, 382 386 struct rpc_task *task, 383 387 unsigned char queue_priority) 384 388 { ··· 391 395 392 396 } 393 397 398 + static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, 399 + struct rpc_task *task, 400 + unsigned char queue_priority) 401 + { 402 + if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) 403 + return; 404 + __rpc_do_sleep_on_priority(q, task, queue_priority); 405 + } 406 + 394 407 static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, 395 408 struct rpc_task *task, unsigned long timeout, 396 409 unsigned char queue_priority) 397 410 { 411 + if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) 412 + return; 398 413 if (time_is_after_jiffies(timeout)) { 399 - __rpc_sleep_on_priority(q, task, queue_priority); 414 + __rpc_do_sleep_on_priority(q, task, queue_priority); 400 415 __rpc_add_timer(q, task, timeout); 401 416 } else 402 417 task->tk_status = -ETIMEDOUT; ··· 1169 1162 { 1170 1163 xprt_release(task); 1171 1164 if (task->tk_msg.rpc_cred) { 1172 - put_cred(task->tk_msg.rpc_cred); 1165 + if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) 1166 + put_cred(task->tk_msg.rpc_cred); 1173 1167 task->tk_msg.rpc_cred = NULL; 1174 1168 } 1175 1169 rpc_task_release_client(task);
-55
net/sunrpc/xdr.c
··· 1235 1235 } 1236 1236 EXPORT_SYMBOL_GPL(xdr_encode_word); 1237 1237 1238 - /** 1239 - * xdr_buf_read_mic() - obtain the address of the GSS mic from xdr buf 1240 - * @buf: pointer to buffer containing a mic 1241 - * @mic: on success, returns the address of the mic 1242 - * @offset: the offset in buf where mic may be found 1243 - * 1244 - * This function may modify the xdr buf if the mic is found to be straddling 1245 - * a boundary between head, pages, and tail. On success the mic can be read 1246 - * from the address returned. There is no need to free the mic. 1247 - * 1248 - * Return: Success returns 0, otherwise an integer error. 1249 - */ 1250 - int xdr_buf_read_mic(struct xdr_buf *buf, struct xdr_netobj *mic, unsigned int offset) 1251 - { 1252 - struct xdr_buf subbuf; 1253 - unsigned int boundary; 1254 - 1255 - if (xdr_decode_word(buf, offset, &mic->len)) 1256 - return -EFAULT; 1257 - offset += 4; 1258 - 1259 - /* Is the mic partially in the head? */ 1260 - boundary = buf->head[0].iov_len; 1261 - if (offset < boundary && (offset + mic->len) > boundary) 1262 - xdr_shift_buf(buf, boundary - offset); 1263 - 1264 - /* Is the mic partially in the pages? */ 1265 - boundary += buf->page_len; 1266 - if (offset < boundary && (offset + mic->len) > boundary) 1267 - xdr_shrink_pagelen(buf, boundary - offset); 1268 - 1269 - if (xdr_buf_subsegment(buf, &subbuf, offset, mic->len)) 1270 - return -EFAULT; 1271 - 1272 - /* Is the mic contained entirely in the head? */ 1273 - mic->data = subbuf.head[0].iov_base; 1274 - if (subbuf.head[0].iov_len == mic->len) 1275 - return 0; 1276 - /* ..or is the mic contained entirely in the tail? */ 1277 - mic->data = subbuf.tail[0].iov_base; 1278 - if (subbuf.tail[0].iov_len == mic->len) 1279 - return 0; 1280 - 1281 - /* Find a contiguous area in @buf to hold all of @mic */ 1282 - if (mic->len > buf->buflen - buf->len) 1283 - return -ENOMEM; 1284 - if (buf->tail[0].iov_len != 0) 1285 - mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len; 1286 - else 1287 - mic->data = buf->head[0].iov_base + buf->head[0].iov_len; 1288 - __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len); 1289 - return 0; 1290 - } 1291 - EXPORT_SYMBOL_GPL(xdr_buf_read_mic); 1292 - 1293 1238 /* Returns 0 on success, or else a negative error code. */ 1294 1239 static int 1295 1240 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
+4 -4
net/sunrpc/xprtrdma/backchannel.c
··· 44 44 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt) 45 45 { 46 46 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 47 - struct rpcrdma_ep *ep = &r_xprt->rx_ep; 47 + struct rpcrdma_ep *ep = r_xprt->rx_ep; 48 48 size_t maxmsg; 49 49 50 - maxmsg = min_t(unsigned int, ep->rep_inline_send, ep->rep_inline_recv); 50 + maxmsg = min_t(unsigned int, ep->re_inline_send, ep->re_inline_recv); 51 51 maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE); 52 52 return maxmsg - RPCRDMA_HDRLEN_MIN; 53 53 } ··· 115 115 if (rc < 0) 116 116 goto failed_marshal; 117 117 118 - if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) 118 + if (rpcrdma_post_sends(r_xprt, req)) 119 119 goto drop_connection; 120 120 return 0; 121 121 ··· 190 190 if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS) 191 191 return NULL; 192 192 193 - size = min_t(size_t, r_xprt->rx_ep.rep_inline_recv, PAGE_SIZE); 193 + size = min_t(size_t, r_xprt->rx_ep->re_inline_recv, PAGE_SIZE); 194 194 req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL); 195 195 if (!req) 196 196 return NULL;
+80 -72
net/sunrpc/xprtrdma/frwr_ops.c
··· 52 52 53 53 /** 54 54 * frwr_release_mr - Destroy one MR 55 - * @mr: MR allocated by frwr_init_mr 55 + * @mr: MR allocated by frwr_mr_init 56 56 * 57 57 */ 58 58 void frwr_release_mr(struct rpcrdma_mr *mr) ··· 74 74 75 75 if (mr->mr_dir != DMA_NONE) { 76 76 trace_xprtrdma_mr_unmap(mr); 77 - ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device, 77 + ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device, 78 78 mr->mr_sg, mr->mr_nents, mr->mr_dir); 79 79 mr->mr_dir = DMA_NONE; 80 80 } ··· 106 106 } 107 107 108 108 /** 109 - * frwr_init_mr - Initialize one MR 110 - * @ia: interface adapter 109 + * frwr_mr_init - Initialize one MR 110 + * @r_xprt: controlling transport instance 111 111 * @mr: generic MR to prepare for FRWR 112 112 * 113 113 * Returns zero if successful. Otherwise a negative errno 114 114 * is returned. 115 115 */ 116 - int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr) 116 + int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) 117 117 { 118 - unsigned int depth = ia->ri_max_frwr_depth; 118 + struct rpcrdma_ep *ep = r_xprt->rx_ep; 119 + unsigned int depth = ep->re_max_fr_depth; 119 120 struct scatterlist *sg; 120 121 struct ib_mr *frmr; 121 122 int rc; 122 123 123 - frmr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth); 124 + frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth); 124 125 if (IS_ERR(frmr)) 125 126 goto out_mr_err; 126 127 ··· 129 128 if (!sg) 130 129 goto out_list_err; 131 130 131 + mr->mr_xprt = r_xprt; 132 132 mr->frwr.fr_mr = frmr; 133 133 mr->mr_dir = DMA_NONE; 134 134 INIT_LIST_HEAD(&mr->mr_list); ··· 151 149 152 150 /** 153 151 * frwr_query_device - Prepare a transport for use with FRWR 154 - * @r_xprt: controlling transport instance 152 + * @ep: endpoint to fill in 155 153 * @device: RDMA device to query 156 154 * 157 155 * On success, sets: 158 - * ep->rep_attr 159 - * ep->rep_max_requests 160 - * ia->ri_max_rdma_segs 161 - * 162 - * And these FRWR-related fields: 163 - * ia->ri_max_frwr_depth 164 - * ia->ri_mrtype 156 + * ep->re_attr 157 + * ep->re_max_requests 158 + * ep->re_max_rdma_segs 159 + * ep->re_max_fr_depth 160 + * ep->re_mrtype 165 161 * 166 162 * Return values: 167 163 * On success, returns zero. 168 164 * %-EINVAL - the device does not support FRWR memory registration 169 165 * %-ENOMEM - the device is not sufficiently capable for NFS/RDMA 170 166 */ 171 - int frwr_query_device(struct rpcrdma_xprt *r_xprt, 172 - const struct ib_device *device) 167 + int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device) 173 168 { 174 169 const struct ib_device_attr *attrs = &device->attrs; 175 - struct rpcrdma_ia *ia = &r_xprt->rx_ia; 176 - struct rpcrdma_ep *ep = &r_xprt->rx_ep; 177 170 int max_qp_wr, depth, delta; 178 171 unsigned int max_sge; 179 172 ··· 185 188 pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge); 186 189 return -ENOMEM; 187 190 } 188 - ep->rep_attr.cap.max_send_sge = max_sge; 189 - ep->rep_attr.cap.max_recv_sge = 1; 191 + ep->re_attr.cap.max_send_sge = max_sge; 192 + ep->re_attr.cap.max_recv_sge = 1; 190 193 191 - ia->ri_mrtype = IB_MR_TYPE_MEM_REG; 194 + ep->re_mrtype = IB_MR_TYPE_MEM_REG; 192 195 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG) 193 - ia->ri_mrtype = IB_MR_TYPE_SG_GAPS; 196 + ep->re_mrtype = IB_MR_TYPE_SG_GAPS; 194 197 195 198 /* Quirk: Some devices advertise a large max_fast_reg_page_list_len 196 199 * capability, but perform optimally when the MRs are not larger 197 200 * than a page. 198 201 */ 199 202 if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS) 200 - ia->ri_max_frwr_depth = attrs->max_sge_rd; 203 + ep->re_max_fr_depth = attrs->max_sge_rd; 201 204 else 202 - ia->ri_max_frwr_depth = attrs->max_fast_reg_page_list_len; 203 - if (ia->ri_max_frwr_depth > RPCRDMA_MAX_DATA_SEGS) 204 - ia->ri_max_frwr_depth = RPCRDMA_MAX_DATA_SEGS; 205 + ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len; 206 + if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS) 207 + ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS; 205 208 206 209 /* Add room for frwr register and invalidate WRs. 207 210 * 1. FRWR reg WR for head ··· 217 220 /* Calculate N if the device max FRWR depth is smaller than 218 221 * RPCRDMA_MAX_DATA_SEGS. 219 222 */ 220 - if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) { 221 - delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth; 223 + if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) { 224 + delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth; 222 225 do { 223 226 depth += 2; /* FRWR reg + invalidate */ 224 - delta -= ia->ri_max_frwr_depth; 227 + delta -= ep->re_max_fr_depth; 225 228 } while (delta > 0); 226 229 } 227 230 ··· 230 233 max_qp_wr -= 1; 231 234 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE) 232 235 return -ENOMEM; 233 - if (ep->rep_max_requests > max_qp_wr) 234 - ep->rep_max_requests = max_qp_wr; 235 - ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth; 236 - if (ep->rep_attr.cap.max_send_wr > max_qp_wr) { 237 - ep->rep_max_requests = max_qp_wr / depth; 238 - if (!ep->rep_max_requests) 236 + if (ep->re_max_requests > max_qp_wr) 237 + ep->re_max_requests = max_qp_wr; 238 + ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth; 239 + if (ep->re_attr.cap.max_send_wr > max_qp_wr) { 240 + ep->re_max_requests = max_qp_wr / depth; 241 + if (!ep->re_max_requests) 239 242 return -ENOMEM; 240 - ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth; 243 + ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth; 241 244 } 242 - ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS; 243 - ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */ 244 - ep->rep_attr.cap.max_recv_wr = ep->rep_max_requests; 245 - ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; 246 - ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */ 245 + ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS; 246 + ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */ 247 + ep->re_attr.cap.max_recv_wr = ep->re_max_requests; 248 + ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; 249 + ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */ 247 250 248 - ia->ri_max_rdma_segs = 249 - DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ia->ri_max_frwr_depth); 251 + ep->re_max_rdma_segs = 252 + DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth); 250 253 /* Reply chunks require segments for head and tail buffers */ 251 - ia->ri_max_rdma_segs += 2; 252 - if (ia->ri_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS) 253 - ia->ri_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS; 254 + ep->re_max_rdma_segs += 2; 255 + if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS) 256 + ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS; 254 257 255 258 /* Ensure the underlying device is capable of conveying the 256 259 * largest r/wsize NFS will ask for. This guarantees that 257 260 * failing over from one RDMA device to another will not 258 261 * break NFS I/O. 259 262 */ 260 - if ((ia->ri_max_rdma_segs * ia->ri_max_frwr_depth) < RPCRDMA_MAX_SEGS) 263 + if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS) 261 264 return -ENOMEM; 262 265 263 266 return 0; ··· 283 286 int nsegs, bool writing, __be32 xid, 284 287 struct rpcrdma_mr *mr) 285 288 { 286 - struct rpcrdma_ia *ia = &r_xprt->rx_ia; 289 + struct rpcrdma_ep *ep = r_xprt->rx_ep; 287 290 struct ib_reg_wr *reg_wr; 288 291 int i, n, dma_nents; 289 292 struct ib_mr *ibmr; 290 293 u8 key; 291 294 292 - if (nsegs > ia->ri_max_frwr_depth) 293 - nsegs = ia->ri_max_frwr_depth; 295 + if (nsegs > ep->re_max_fr_depth) 296 + nsegs = ep->re_max_fr_depth; 294 297 for (i = 0; i < nsegs;) { 295 298 if (seg->mr_page) 296 299 sg_set_page(&mr->mr_sg[i], ··· 303 306 304 307 ++seg; 305 308 ++i; 306 - if (ia->ri_mrtype == IB_MR_TYPE_SG_GAPS) 309 + if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS) 307 310 continue; 308 311 if ((i < nsegs && offset_in_page(seg->mr_offset)) || 309 312 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) ··· 312 315 mr->mr_dir = rpcrdma_data_dir(writing); 313 316 mr->mr_nents = i; 314 317 315 - dma_nents = ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, mr->mr_nents, 318 + dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents, 316 319 mr->mr_dir); 317 320 if (!dma_nents) 318 321 goto out_dmamap_err; ··· 353 356 354 357 /** 355 358 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC 356 - * @cq: completion queue (ignored) 357 - * @wc: completed WR 359 + * @cq: completion queue 360 + * @wc: WCE for a completed FastReg WR 358 361 * 359 362 */ 360 363 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) ··· 366 369 /* WARNING: Only wr_cqe and status are reliable at this point */ 367 370 trace_xprtrdma_wc_fastreg(wc, frwr); 368 371 /* The MR will get recycled when the associated req is retransmitted */ 372 + 373 + rpcrdma_flush_disconnect(cq, wc); 369 374 } 370 375 371 376 /** 372 - * frwr_send - post Send WR containing the RPC Call message 373 - * @ia: interface adapter 374 - * @req: Prepared RPC Call 377 + * frwr_send - post Send WRs containing the RPC Call message 378 + * @r_xprt: controlling transport instance 379 + * @req: prepared RPC Call 375 380 * 376 381 * For FRWR, chain any FastReg WRs to the Send WR. Only a 377 382 * single ib_post_send call is needed to register memory 378 383 * and then post the Send WR. 379 384 * 380 - * Returns the result of ib_post_send. 385 + * Returns the return code from ib_post_send. 386 + * 387 + * Caller must hold the transport send lock to ensure that the 388 + * pointers to the transport's rdma_cm_id and QP are stable. 381 389 */ 382 - int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) 390 + int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) 383 391 { 384 392 struct ib_send_wr *post_wr; 385 393 struct rpcrdma_mr *mr; ··· 405 403 post_wr = &frwr->fr_regwr.wr; 406 404 } 407 405 408 - return ib_post_send(ia->ri_id->qp, post_wr, NULL); 406 + return ib_post_send(r_xprt->rx_ep->re_id->qp, post_wr, NULL); 409 407 } 410 408 411 409 /** ··· 421 419 list_for_each_entry(mr, mrs, mr_list) 422 420 if (mr->mr_handle == rep->rr_inv_rkey) { 423 421 list_del_init(&mr->mr_list); 424 - trace_xprtrdma_mr_remoteinv(mr); 422 + trace_xprtrdma_mr_reminv(mr); 425 423 rpcrdma_mr_put(mr); 426 424 break; /* only one invalidated MR per RPC */ 427 425 } ··· 437 435 438 436 /** 439 437 * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC 440 - * @cq: completion queue (ignored) 441 - * @wc: completed WR 438 + * @cq: completion queue 439 + * @wc: WCE for a completed LocalInv WR 442 440 * 443 441 */ 444 442 static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) ··· 451 449 /* WARNING: Only wr_cqe and status are reliable at this point */ 452 450 trace_xprtrdma_wc_li(wc, frwr); 453 451 __frwr_release_mr(wc, mr); 452 + 453 + rpcrdma_flush_disconnect(cq, wc); 454 454 } 455 455 456 456 /** 457 457 * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC 458 - * @cq: completion queue (ignored) 459 - * @wc: completed WR 458 + * @cq: completion queue 459 + * @wc: WCE for a completed LocalInv WR 460 460 * 461 461 * Awaken anyone waiting for an MR to finish being fenced. 462 462 */ ··· 473 469 trace_xprtrdma_wc_li_wake(wc, frwr); 474 470 __frwr_release_mr(wc, mr); 475 471 complete(&frwr->fr_linv_done); 472 + 473 + rpcrdma_flush_disconnect(cq, wc); 476 474 } 477 475 478 476 /** ··· 532 526 533 527 /* Transport disconnect drains the receive CQ before it 534 528 * replaces the QP. The RPC reply handler won't call us 535 - * unless ri_id->qp is a valid pointer. 529 + * unless re_id->qp is a valid pointer. 536 530 */ 537 531 bad_wr = NULL; 538 - rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr); 532 + rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr); 539 533 540 534 /* The final LOCAL_INV WR in the chain is supposed to 541 535 * do the wake. If it was never posted, the wake will ··· 562 556 563 557 /** 564 558 * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC 565 - * @cq: completion queue (ignored) 566 - * @wc: completed WR 559 + * @cq: completion queue 560 + * @wc: WCE for a completed LocalInv WR 567 561 * 568 562 */ 569 563 static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) ··· 581 575 /* Ensure @rep is generated before __frwr_release_mr */ 582 576 smp_rmb(); 583 577 rpcrdma_complete_rqst(rep); 578 + 579 + rpcrdma_flush_disconnect(cq, wc); 584 580 } 585 581 586 582 /** ··· 637 629 638 630 /* Transport disconnect drains the receive CQ before it 639 631 * replaces the QP. The RPC reply handler won't call us 640 - * unless ri_id->qp is a valid pointer. 632 + * unless re_id->qp is a valid pointer. 641 633 */ 642 634 bad_wr = NULL; 643 - rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr); 635 + rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr); 644 636 if (!rc) 645 637 return; 646 638
+16 -16
net/sunrpc/xprtrdma/rpc_rdma.c
··· 103 103 104 104 /** 105 105 * rpcrdma_set_max_header_sizes - Initialize inline payload sizes 106 - * @r_xprt: transport instance to initialize 106 + * @ep: endpoint to initialize 107 107 * 108 108 * The max_inline fields contain the maximum size of an RPC message 109 109 * so the marshaling code doesn't have to repeat this calculation 110 110 * for every RPC. 111 111 */ 112 - void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt) 112 + void rpcrdma_set_max_header_sizes(struct rpcrdma_ep *ep) 113 113 { 114 - unsigned int maxsegs = r_xprt->rx_ia.ri_max_rdma_segs; 115 - struct rpcrdma_ep *ep = &r_xprt->rx_ep; 114 + unsigned int maxsegs = ep->re_max_rdma_segs; 116 115 117 - ep->rep_max_inline_send = 118 - ep->rep_inline_send - rpcrdma_max_call_header_size(maxsegs); 119 - ep->rep_max_inline_recv = 120 - ep->rep_inline_recv - rpcrdma_max_reply_header_size(maxsegs); 116 + ep->re_max_inline_send = 117 + ep->re_inline_send - rpcrdma_max_call_header_size(maxsegs); 118 + ep->re_max_inline_recv = 119 + ep->re_inline_recv - rpcrdma_max_reply_header_size(maxsegs); 121 120 } 122 121 123 122 /* The client can send a request inline as long as the RPCRDMA header ··· 131 132 struct rpc_rqst *rqst) 132 133 { 133 134 struct xdr_buf *xdr = &rqst->rq_snd_buf; 135 + struct rpcrdma_ep *ep = r_xprt->rx_ep; 134 136 unsigned int count, remaining, offset; 135 137 136 - if (xdr->len > r_xprt->rx_ep.rep_max_inline_send) 138 + if (xdr->len > ep->re_max_inline_send) 137 139 return false; 138 140 139 141 if (xdr->page_len) { ··· 145 145 remaining -= min_t(unsigned int, 146 146 PAGE_SIZE - offset, remaining); 147 147 offset = 0; 148 - if (++count > r_xprt->rx_ep.rep_attr.cap.max_send_sge) 148 + if (++count > ep->re_attr.cap.max_send_sge) 149 149 return false; 150 150 } 151 151 } ··· 162 162 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt, 163 163 struct rpc_rqst *rqst) 164 164 { 165 - return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep.rep_max_inline_recv; 165 + return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv; 166 166 } 167 167 168 168 /* The client is required to provide a Reply chunk if the maximum ··· 176 176 const struct xdr_buf *buf = &rqst->rq_rcv_buf; 177 177 178 178 return (buf->head[0].iov_len + buf->tail[0].iov_len) < 179 - r_xprt->rx_ep.rep_max_inline_recv; 179 + r_xprt->rx_ep->re_max_inline_recv; 180 180 } 181 181 182 182 /* Split @vec on page boundaries into SGEs. FMR registers pages, not ··· 255 255 /* When encoding a Read chunk, the tail iovec contains an 256 256 * XDR pad and may be omitted. 257 257 */ 258 - if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup) 258 + if (type == rpcrdma_readch && r_xprt->rx_ep->re_implicit_roundup) 259 259 goto out; 260 260 261 261 /* When encoding a Write chunk, some servers need to see an ··· 263 263 * layer provides space in the tail iovec that may be used 264 264 * for this purpose. 265 265 */ 266 - if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup) 266 + if (type == rpcrdma_writech && r_xprt->rx_ep->re_implicit_roundup) 267 267 goto out; 268 268 269 269 if (xdrbuf->tail[0].iov_len) ··· 1450 1450 1451 1451 if (credits == 0) 1452 1452 credits = 1; /* don't deadlock */ 1453 - else if (credits > r_xprt->rx_ep.rep_max_requests) 1454 - credits = r_xprt->rx_ep.rep_max_requests; 1453 + else if (credits > r_xprt->rx_ep->re_max_requests) 1454 + credits = r_xprt->rx_ep->re_max_requests; 1455 1455 if (buf->rb_credits != credits) 1456 1456 rpcrdma_update_cwnd(r_xprt, credits); 1457 1457 rpcrdma_post_recvs(r_xprt, false);
+22 -50
net/sunrpc/xprtrdma/transport.c
··· 240 240 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 241 241 int rc; 242 242 243 - rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia); 243 + rc = rpcrdma_xprt_connect(r_xprt); 244 244 xprt_clear_connecting(xprt); 245 - if (r_xprt->rx_ep.rep_connected > 0) { 245 + if (r_xprt->rx_ep && r_xprt->rx_ep->re_connect_status > 0) { 246 + xprt->connect_cookie++; 246 247 xprt->stat.connect_count++; 247 248 xprt->stat.connect_time += (long)jiffies - 248 249 xprt->stat.connect_start; ··· 266 265 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 267 266 268 267 trace_xprtrdma_op_inject_dsc(r_xprt); 269 - rdma_disconnect(r_xprt->rx_ia.ri_id); 268 + rdma_disconnect(r_xprt->rx_ep->re_id); 270 269 } 271 270 272 271 /** ··· 285 284 286 285 cancel_delayed_work_sync(&r_xprt->rx_connect_worker); 287 286 288 - rpcrdma_ep_destroy(r_xprt); 287 + rpcrdma_xprt_disconnect(r_xprt); 289 288 rpcrdma_buffer_destroy(&r_xprt->rx_buf); 290 - rpcrdma_ia_close(&r_xprt->rx_ia); 291 289 292 290 xprt_rdma_free_addresses(xprt); 293 291 xprt_free(xprt); ··· 316 316 if (args->addrlen > sizeof(xprt->addr)) 317 317 return ERR_PTR(-EBADF); 318 318 319 + if (!try_module_get(THIS_MODULE)) 320 + return ERR_PTR(-EIO); 321 + 319 322 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0, 320 323 xprt_rdma_slot_table_entries); 321 - if (!xprt) 324 + if (!xprt) { 325 + module_put(THIS_MODULE); 322 326 return ERR_PTR(-ENOMEM); 327 + } 323 328 324 329 xprt->timeout = &xprt_rdma_default_timeout; 325 330 xprt->connect_timeout = xprt->timeout->to_initval; ··· 352 347 xprt_rdma_format_addresses(xprt, sap); 353 348 354 349 new_xprt = rpcx_to_rdmax(xprt); 355 - rc = rpcrdma_ia_open(new_xprt); 356 - if (rc) 357 - goto out1; 358 - 359 - rc = rpcrdma_ep_create(new_xprt); 360 - if (rc) 361 - goto out2; 362 - 363 350 rc = rpcrdma_buffer_create(new_xprt); 364 - if (rc) 365 - goto out3; 366 - 367 - if (!try_module_get(THIS_MODULE)) 368 - goto out4; 351 + if (rc) { 352 + xprt_rdma_free_addresses(xprt); 353 + xprt_free(xprt); 354 + module_put(THIS_MODULE); 355 + return ERR_PTR(rc); 356 + } 369 357 370 358 INIT_DELAYED_WORK(&new_xprt->rx_connect_worker, 371 359 xprt_rdma_connect_worker); 360 + 372 361 xprt->max_payload = RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT; 373 362 374 363 dprintk("RPC: %s: %s:%s\n", __func__, ··· 370 371 xprt->address_strings[RPC_DISPLAY_PORT]); 371 372 trace_xprtrdma_create(new_xprt); 372 373 return xprt; 373 - 374 - out4: 375 - rpcrdma_buffer_destroy(&new_xprt->rx_buf); 376 - rc = -ENODEV; 377 - out3: 378 - rpcrdma_ep_destroy(new_xprt); 379 - out2: 380 - rpcrdma_ia_close(&new_xprt->rx_ia); 381 - out1: 382 - trace_xprtrdma_op_destroy(new_xprt); 383 - xprt_rdma_free_addresses(xprt); 384 - xprt_free(xprt); 385 - return ERR_PTR(rc); 386 374 } 387 375 388 376 /** ··· 384 398 void xprt_rdma_close(struct rpc_xprt *xprt) 385 399 { 386 400 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 387 - struct rpcrdma_ep *ep = &r_xprt->rx_ep; 388 - struct rpcrdma_ia *ia = &r_xprt->rx_ia; 389 - 390 - might_sleep(); 391 401 392 402 trace_xprtrdma_op_close(r_xprt); 393 403 394 - /* Prevent marshaling and sending of new requests */ 395 - xprt_clear_connected(xprt); 404 + rpcrdma_xprt_disconnect(r_xprt); 396 405 397 - if (test_and_clear_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags)) { 398 - rpcrdma_ia_remove(ia); 399 - goto out; 400 - } 401 - 402 - if (ep->rep_connected == -ENODEV) 403 - return; 404 - rpcrdma_ep_disconnect(ep, ia); 405 - 406 - out: 407 406 xprt->reestablish_timeout = 0; 408 407 ++xprt->connect_cookie; 409 408 xprt_disconnect_done(xprt); ··· 488 517 xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task) 489 518 { 490 519 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 520 + struct rpcrdma_ep *ep = r_xprt->rx_ep; 491 521 unsigned long delay; 492 522 493 523 delay = 0; 494 - if (r_xprt->rx_ep.rep_connected != 0) { 524 + if (ep && ep->re_connect_status != 0) { 495 525 delay = xprt_reconnect_delay(xprt); 496 526 xprt_reconnect_backoff(xprt, RPCRDMA_INIT_REEST_TO); 497 527 } ··· 666 694 goto drop_connection; 667 695 rqst->rq_xtime = ktime_get(); 668 696 669 - if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) 697 + if (rpcrdma_post_sends(r_xprt, req)) 670 698 goto drop_connection; 671 699 672 700 rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
+268 -439
net/sunrpc/xprtrdma/verbs.c
··· 84 84 static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt); 85 85 static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt); 86 86 static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt); 87 + static int rpcrdma_ep_destroy(struct rpcrdma_ep *ep); 87 88 static struct rpcrdma_regbuf * 88 89 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction, 89 90 gfp_t flags); ··· 97 96 */ 98 97 static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt) 99 98 { 100 - struct rpcrdma_ia *ia = &r_xprt->rx_ia; 99 + struct rdma_cm_id *id = r_xprt->rx_ep->re_id; 101 100 102 101 /* Flush Receives, then wait for deferred Reply work 103 102 * to complete. 104 103 */ 105 - ib_drain_rq(ia->ri_id->qp); 104 + ib_drain_rq(id->qp); 106 105 107 106 /* Deferred Reply processing might have scheduled 108 107 * local invalidations. 109 108 */ 110 - ib_drain_sq(ia->ri_id->qp); 109 + ib_drain_sq(id->qp); 111 110 } 112 111 113 112 /** ··· 116 115 * @context: ep that owns QP where event occurred 117 116 * 118 117 * Called from the RDMA provider (device driver) possibly in an interrupt 119 - * context. 118 + * context. The QP is always destroyed before the ID, so the ID will be 119 + * reliably available when this handler is invoked. 120 120 */ 121 - static void 122 - rpcrdma_qp_event_handler(struct ib_event *event, void *context) 121 + static void rpcrdma_qp_event_handler(struct ib_event *event, void *context) 123 122 { 124 123 struct rpcrdma_ep *ep = context; 125 - struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt, 126 - rx_ep); 127 124 128 - trace_xprtrdma_qp_event(r_xprt, event); 125 + trace_xprtrdma_qp_event(ep, event); 126 + } 127 + 128 + /** 129 + * rpcrdma_flush_disconnect - Disconnect on flushed completion 130 + * @cq: completion queue 131 + * @wc: work completion entry 132 + * 133 + * Must be called in process context. 134 + */ 135 + void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc) 136 + { 137 + struct rpcrdma_xprt *r_xprt = cq->cq_context; 138 + struct rpc_xprt *xprt = &r_xprt->rx_xprt; 139 + 140 + if (wc->status != IB_WC_SUCCESS && 141 + r_xprt->rx_ep->re_connect_status == 1) { 142 + r_xprt->rx_ep->re_connect_status = -ECONNABORTED; 143 + trace_xprtrdma_flush_dct(r_xprt, wc->status); 144 + xprt_force_disconnect(xprt); 145 + } 129 146 } 130 147 131 148 /** 132 149 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC 133 150 * @cq: completion queue 134 - * @wc: completed WR 151 + * @wc: WCE for a completed Send WR 135 152 * 136 153 */ 137 - static void 138 - rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) 154 + static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) 139 155 { 140 156 struct ib_cqe *cqe = wc->wr_cqe; 141 157 struct rpcrdma_sendctx *sc = ··· 161 143 /* WARNING: Only wr_cqe and status are reliable at this point */ 162 144 trace_xprtrdma_wc_send(sc, wc); 163 145 rpcrdma_sendctx_put_locked((struct rpcrdma_xprt *)cq->cq_context, sc); 146 + rpcrdma_flush_disconnect(cq, wc); 164 147 } 165 148 166 149 /** 167 150 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC 168 - * @cq: completion queue (ignored) 169 - * @wc: completed WR 151 + * @cq: completion queue 152 + * @wc: WCE for a completed Receive WR 170 153 * 171 154 */ 172 - static void 173 - rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) 155 + static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) 174 156 { 175 157 struct ib_cqe *cqe = wc->wr_cqe; 176 158 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep, 177 159 rr_cqe); 178 - struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; 160 + struct rpcrdma_xprt *r_xprt = cq->cq_context; 179 161 180 162 /* WARNING: Only wr_cqe and status are reliable at this point */ 181 163 trace_xprtrdma_wc_receive(wc); 182 - --r_xprt->rx_ep.rep_receive_count; 164 + --r_xprt->rx_ep->re_receive_count; 183 165 if (wc->status != IB_WC_SUCCESS) 184 166 goto out_flushed; 185 167 ··· 196 178 return; 197 179 198 180 out_flushed: 181 + rpcrdma_flush_disconnect(cq, wc); 199 182 rpcrdma_rep_destroy(rep); 200 183 } 201 184 202 - static void rpcrdma_update_cm_private(struct rpcrdma_xprt *r_xprt, 185 + static void rpcrdma_update_cm_private(struct rpcrdma_ep *ep, 203 186 struct rdma_conn_param *param) 204 187 { 205 188 const struct rpcrdma_connect_private *pmsg = param->private_data; 206 - struct rpcrdma_ep *ep = &r_xprt->rx_ep; 207 189 unsigned int rsize, wsize; 208 190 209 191 /* Default settings for RPC-over-RDMA Version One */ 210 - r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize; 192 + ep->re_implicit_roundup = xprt_rdma_pad_optimize; 211 193 rsize = RPCRDMA_V1_DEF_INLINE_SIZE; 212 194 wsize = RPCRDMA_V1_DEF_INLINE_SIZE; 213 195 214 196 if (pmsg && 215 197 pmsg->cp_magic == rpcrdma_cmp_magic && 216 198 pmsg->cp_version == RPCRDMA_CMP_VERSION) { 217 - r_xprt->rx_ia.ri_implicit_roundup = true; 199 + ep->re_implicit_roundup = true; 218 200 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); 219 201 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); 220 202 } 221 203 222 - if (rsize < ep->rep_inline_recv) 223 - ep->rep_inline_recv = rsize; 224 - if (wsize < ep->rep_inline_send) 225 - ep->rep_inline_send = wsize; 204 + if (rsize < ep->re_inline_recv) 205 + ep->re_inline_recv = rsize; 206 + if (wsize < ep->re_inline_send) 207 + ep->re_inline_send = wsize; 226 208 227 - rpcrdma_set_max_header_sizes(r_xprt); 209 + rpcrdma_set_max_header_sizes(ep); 228 210 } 229 211 230 212 /** ··· 238 220 static int 239 221 rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) 240 222 { 241 - struct rpcrdma_xprt *r_xprt = id->context; 242 - struct rpcrdma_ia *ia = &r_xprt->rx_ia; 243 - struct rpcrdma_ep *ep = &r_xprt->rx_ep; 244 - struct rpc_xprt *xprt = &r_xprt->rx_xprt; 223 + struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr; 224 + struct rpcrdma_ep *ep = id->context; 225 + struct rpc_xprt *xprt = ep->re_xprt; 245 226 246 227 might_sleep(); 247 228 248 - trace_xprtrdma_cm_event(r_xprt, event); 249 229 switch (event->event) { 250 230 case RDMA_CM_EVENT_ADDR_RESOLVED: 251 231 case RDMA_CM_EVENT_ROUTE_RESOLVED: 252 - ia->ri_async_rc = 0; 253 - complete(&ia->ri_done); 232 + ep->re_async_rc = 0; 233 + complete(&ep->re_done); 254 234 return 0; 255 235 case RDMA_CM_EVENT_ADDR_ERROR: 256 - ia->ri_async_rc = -EPROTO; 257 - complete(&ia->ri_done); 236 + ep->re_async_rc = -EPROTO; 237 + complete(&ep->re_done); 258 238 return 0; 259 239 case RDMA_CM_EVENT_ROUTE_ERROR: 260 - ia->ri_async_rc = -ENETUNREACH; 261 - complete(&ia->ri_done); 240 + ep->re_async_rc = -ENETUNREACH; 241 + complete(&ep->re_done); 262 242 return 0; 263 243 case RDMA_CM_EVENT_DEVICE_REMOVAL: 264 - #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 265 - pr_info("rpcrdma: removing device %s for %s:%s\n", 266 - ia->ri_id->device->name, 267 - rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt)); 268 - #endif 269 - init_completion(&ia->ri_remove_done); 270 - set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); 271 - ep->rep_connected = -ENODEV; 244 + pr_info("rpcrdma: removing device %s for %pISpc\n", 245 + ep->re_id->device->name, sap); 246 + /* fall through */ 247 + case RDMA_CM_EVENT_ADDR_CHANGE: 248 + ep->re_connect_status = -ENODEV; 272 249 xprt_force_disconnect(xprt); 273 - wait_for_completion(&ia->ri_remove_done); 274 - 275 - ia->ri_id = NULL; 276 - /* Return 1 to ensure the core destroys the id. */ 277 - return 1; 250 + goto disconnected; 278 251 case RDMA_CM_EVENT_ESTABLISHED: 279 - ++xprt->connect_cookie; 280 - ep->rep_connected = 1; 281 - rpcrdma_update_cm_private(r_xprt, &event->param.conn); 282 - trace_xprtrdma_inline_thresh(r_xprt); 283 - wake_up_all(&ep->rep_connect_wait); 252 + kref_get(&ep->re_kref); 253 + ep->re_connect_status = 1; 254 + rpcrdma_update_cm_private(ep, &event->param.conn); 255 + trace_xprtrdma_inline_thresh(ep); 256 + wake_up_all(&ep->re_connect_wait); 284 257 break; 285 258 case RDMA_CM_EVENT_CONNECT_ERROR: 286 - ep->rep_connected = -ENOTCONN; 259 + ep->re_connect_status = -ENOTCONN; 287 260 goto disconnected; 288 261 case RDMA_CM_EVENT_UNREACHABLE: 289 - ep->rep_connected = -ENETUNREACH; 262 + ep->re_connect_status = -ENETUNREACH; 290 263 goto disconnected; 291 264 case RDMA_CM_EVENT_REJECTED: 292 - dprintk("rpcrdma: connection to %s:%s rejected: %s\n", 293 - rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt), 294 - rdma_reject_msg(id, event->status)); 295 - ep->rep_connected = -ECONNREFUSED; 265 + dprintk("rpcrdma: connection to %pISpc rejected: %s\n", 266 + sap, rdma_reject_msg(id, event->status)); 267 + ep->re_connect_status = -ECONNREFUSED; 296 268 if (event->status == IB_CM_REJ_STALE_CONN) 297 - ep->rep_connected = -EAGAIN; 269 + ep->re_connect_status = -EAGAIN; 298 270 goto disconnected; 299 271 case RDMA_CM_EVENT_DISCONNECTED: 300 - ep->rep_connected = -ECONNABORTED; 272 + ep->re_connect_status = -ECONNABORTED; 301 273 disconnected: 302 - xprt_force_disconnect(xprt); 303 - wake_up_all(&ep->rep_connect_wait); 304 - break; 274 + return rpcrdma_ep_destroy(ep); 305 275 default: 306 276 break; 307 277 } 308 278 309 - dprintk("RPC: %s: %s:%s on %s/frwr: %s\n", __func__, 310 - rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt), 311 - ia->ri_id->device->name, rdma_event_msg(event->event)); 279 + dprintk("RPC: %s: %pISpc on %s/frwr: %s\n", __func__, sap, 280 + ep->re_id->device->name, rdma_event_msg(event->event)); 312 281 return 0; 313 282 } 314 283 315 - static struct rdma_cm_id * 316 - rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia) 284 + static struct rdma_cm_id *rpcrdma_create_id(struct rpcrdma_xprt *r_xprt, 285 + struct rpcrdma_ep *ep) 317 286 { 318 287 unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1; 288 + struct rpc_xprt *xprt = &r_xprt->rx_xprt; 319 289 struct rdma_cm_id *id; 320 290 int rc; 321 291 322 - init_completion(&ia->ri_done); 292 + init_completion(&ep->re_done); 323 293 324 - id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler, 325 - xprt, RDMA_PS_TCP, IB_QPT_RC); 294 + id = rdma_create_id(xprt->xprt_net, rpcrdma_cm_event_handler, ep, 295 + RDMA_PS_TCP, IB_QPT_RC); 326 296 if (IS_ERR(id)) 327 297 return id; 328 298 329 - ia->ri_async_rc = -ETIMEDOUT; 330 - rc = rdma_resolve_addr(id, NULL, 331 - (struct sockaddr *)&xprt->rx_xprt.addr, 299 + ep->re_async_rc = -ETIMEDOUT; 300 + rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)&xprt->addr, 332 301 RDMA_RESOLVE_TIMEOUT); 333 302 if (rc) 334 303 goto out; 335 - rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); 304 + rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout); 336 305 if (rc < 0) 337 306 goto out; 338 307 339 - rc = ia->ri_async_rc; 308 + rc = ep->re_async_rc; 340 309 if (rc) 341 310 goto out; 342 311 343 - ia->ri_async_rc = -ETIMEDOUT; 312 + ep->re_async_rc = -ETIMEDOUT; 344 313 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); 345 314 if (rc) 346 315 goto out; 347 - rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); 316 + rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout); 348 317 if (rc < 0) 349 318 goto out; 350 - rc = ia->ri_async_rc; 319 + rc = ep->re_async_rc; 351 320 if (rc) 352 321 goto out; 353 322 ··· 345 340 return ERR_PTR(rc); 346 341 } 347 342 348 - /* 349 - * Exported functions. 350 - */ 351 - 352 - /** 353 - * rpcrdma_ia_open - Open and initialize an Interface Adapter. 354 - * @xprt: transport with IA to (re)initialize 355 - * 356 - * Returns 0 on success, negative errno if an appropriate 357 - * Interface Adapter could not be found and opened. 358 - */ 359 - int 360 - rpcrdma_ia_open(struct rpcrdma_xprt *xprt) 343 + static void rpcrdma_ep_put(struct kref *kref) 361 344 { 362 - struct rpcrdma_ia *ia = &xprt->rx_ia; 345 + struct rpcrdma_ep *ep = container_of(kref, struct rpcrdma_ep, re_kref); 346 + 347 + if (ep->re_id->qp) { 348 + rdma_destroy_qp(ep->re_id); 349 + ep->re_id->qp = NULL; 350 + } 351 + 352 + if (ep->re_attr.recv_cq) 353 + ib_free_cq(ep->re_attr.recv_cq); 354 + ep->re_attr.recv_cq = NULL; 355 + if (ep->re_attr.send_cq) 356 + ib_free_cq(ep->re_attr.send_cq); 357 + ep->re_attr.send_cq = NULL; 358 + 359 + if (ep->re_pd) 360 + ib_dealloc_pd(ep->re_pd); 361 + ep->re_pd = NULL; 362 + 363 + kfree(ep); 364 + module_put(THIS_MODULE); 365 + } 366 + 367 + /* Returns: 368 + * %0 if @ep still has a positive kref count, or 369 + * %1 if @ep was destroyed successfully. 370 + */ 371 + static int rpcrdma_ep_destroy(struct rpcrdma_ep *ep) 372 + { 373 + return kref_put(&ep->re_kref, rpcrdma_ep_put); 374 + } 375 + 376 + static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) 377 + { 378 + struct rpcrdma_connect_private *pmsg; 379 + struct ib_device *device; 380 + struct rdma_cm_id *id; 381 + struct rpcrdma_ep *ep; 363 382 int rc; 364 383 365 - ia->ri_id = rpcrdma_create_id(xprt, ia); 366 - if (IS_ERR(ia->ri_id)) { 367 - rc = PTR_ERR(ia->ri_id); 368 - goto out_err; 384 + ep = kzalloc(sizeof(*ep), GFP_NOFS); 385 + if (!ep) 386 + return -EAGAIN; 387 + ep->re_xprt = &r_xprt->rx_xprt; 388 + kref_init(&ep->re_kref); 389 + 390 + id = rpcrdma_create_id(r_xprt, ep); 391 + if (IS_ERR(id)) { 392 + rc = PTR_ERR(id); 393 + goto out_free; 369 394 } 395 + __module_get(THIS_MODULE); 396 + device = id->device; 397 + ep->re_id = id; 370 398 371 - ia->ri_pd = ib_alloc_pd(ia->ri_id->device, 0); 372 - if (IS_ERR(ia->ri_pd)) { 373 - rc = PTR_ERR(ia->ri_pd); 374 - pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc); 375 - goto out_err; 376 - } 377 - 378 - return 0; 379 - 380 - out_err: 381 - rpcrdma_ia_close(ia); 382 - return rc; 383 - } 384 - 385 - /** 386 - * rpcrdma_ia_remove - Handle device driver unload 387 - * @ia: interface adapter being removed 388 - * 389 - * Divest transport H/W resources associated with this adapter, 390 - * but allow it to be restored later. 391 - * 392 - * Caller must hold the transport send lock. 393 - */ 394 - void 395 - rpcrdma_ia_remove(struct rpcrdma_ia *ia) 396 - { 397 - struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, 398 - rx_ia); 399 - struct rpcrdma_ep *ep = &r_xprt->rx_ep; 400 - 401 - /* This is similar to rpcrdma_ep_destroy, but: 402 - * - Don't cancel the connect worker. 403 - * - Don't call rpcrdma_ep_disconnect, which waits 404 - * for another conn upcall, which will deadlock. 405 - * - rdma_disconnect is unneeded, the underlying 406 - * connection is already gone. 407 - */ 408 - if (ia->ri_id->qp) { 409 - rpcrdma_xprt_drain(r_xprt); 410 - rdma_destroy_qp(ia->ri_id); 411 - ia->ri_id->qp = NULL; 412 - } 413 - ib_free_cq(ep->rep_attr.recv_cq); 414 - ep->rep_attr.recv_cq = NULL; 415 - ib_free_cq(ep->rep_attr.send_cq); 416 - ep->rep_attr.send_cq = NULL; 417 - 418 - /* The ULP is responsible for ensuring all DMA 419 - * mappings and MRs are gone. 420 - */ 421 - rpcrdma_reps_unmap(r_xprt); 422 - rpcrdma_reqs_reset(r_xprt); 423 - rpcrdma_mrs_destroy(r_xprt); 424 - rpcrdma_sendctxs_destroy(r_xprt); 425 - ib_dealloc_pd(ia->ri_pd); 426 - ia->ri_pd = NULL; 427 - 428 - /* Allow waiters to continue */ 429 - complete(&ia->ri_remove_done); 430 - 431 - trace_xprtrdma_remove(r_xprt); 432 - } 433 - 434 - /** 435 - * rpcrdma_ia_close - Clean up/close an IA. 436 - * @ia: interface adapter to close 437 - * 438 - */ 439 - void 440 - rpcrdma_ia_close(struct rpcrdma_ia *ia) 441 - { 442 - if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { 443 - if (ia->ri_id->qp) 444 - rdma_destroy_qp(ia->ri_id); 445 - rdma_destroy_id(ia->ri_id); 446 - } 447 - ia->ri_id = NULL; 448 - 449 - /* If the pd is still busy, xprtrdma missed freeing a resource */ 450 - if (ia->ri_pd && !IS_ERR(ia->ri_pd)) 451 - ib_dealloc_pd(ia->ri_pd); 452 - ia->ri_pd = NULL; 453 - } 454 - 455 - /** 456 - * rpcrdma_ep_create - Create unconnected endpoint 457 - * @r_xprt: transport to instantiate 458 - * 459 - * Returns zero on success, or a negative errno. 460 - */ 461 - int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) 462 - { 463 - struct rpcrdma_ep *ep = &r_xprt->rx_ep; 464 - struct rpcrdma_ia *ia = &r_xprt->rx_ia; 465 - struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; 466 - struct ib_cq *sendcq, *recvcq; 467 - int rc; 468 - 469 - ep->rep_max_requests = r_xprt->rx_xprt.max_reqs; 470 - ep->rep_inline_send = xprt_rdma_max_inline_write; 471 - ep->rep_inline_recv = xprt_rdma_max_inline_read; 472 - 473 - rc = frwr_query_device(r_xprt, ia->ri_id->device); 399 + ep->re_max_requests = r_xprt->rx_xprt.max_reqs; 400 + ep->re_inline_send = xprt_rdma_max_inline_write; 401 + ep->re_inline_recv = xprt_rdma_max_inline_read; 402 + rc = frwr_query_device(ep, device); 474 403 if (rc) 475 - return rc; 476 - r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->rep_max_requests); 404 + goto out_destroy; 477 405 478 - ep->rep_attr.event_handler = rpcrdma_qp_event_handler; 479 - ep->rep_attr.qp_context = ep; 480 - ep->rep_attr.srq = NULL; 481 - ep->rep_attr.cap.max_inline_data = 0; 482 - ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 483 - ep->rep_attr.qp_type = IB_QPT_RC; 484 - ep->rep_attr.port_num = ~0; 406 + r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->re_max_requests); 407 + 408 + ep->re_attr.event_handler = rpcrdma_qp_event_handler; 409 + ep->re_attr.qp_context = ep; 410 + ep->re_attr.srq = NULL; 411 + ep->re_attr.cap.max_inline_data = 0; 412 + ep->re_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 413 + ep->re_attr.qp_type = IB_QPT_RC; 414 + ep->re_attr.port_num = ~0; 485 415 486 416 dprintk("RPC: %s: requested max: dtos: send %d recv %d; " 487 417 "iovs: send %d recv %d\n", 488 418 __func__, 489 - ep->rep_attr.cap.max_send_wr, 490 - ep->rep_attr.cap.max_recv_wr, 491 - ep->rep_attr.cap.max_send_sge, 492 - ep->rep_attr.cap.max_recv_sge); 419 + ep->re_attr.cap.max_send_wr, 420 + ep->re_attr.cap.max_recv_wr, 421 + ep->re_attr.cap.max_send_sge, 422 + ep->re_attr.cap.max_recv_sge); 493 423 494 - ep->rep_send_batch = ep->rep_max_requests >> 3; 495 - ep->rep_send_count = ep->rep_send_batch; 496 - init_waitqueue_head(&ep->rep_connect_wait); 497 - ep->rep_receive_count = 0; 424 + ep->re_send_batch = ep->re_max_requests >> 3; 425 + ep->re_send_count = ep->re_send_batch; 426 + init_waitqueue_head(&ep->re_connect_wait); 498 427 499 - sendcq = ib_alloc_cq_any(ia->ri_id->device, r_xprt, 500 - ep->rep_attr.cap.max_send_wr + 1, 501 - IB_POLL_WORKQUEUE); 502 - if (IS_ERR(sendcq)) { 503 - rc = PTR_ERR(sendcq); 504 - goto out1; 428 + ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt, 429 + ep->re_attr.cap.max_send_wr, 430 + IB_POLL_WORKQUEUE); 431 + if (IS_ERR(ep->re_attr.send_cq)) { 432 + rc = PTR_ERR(ep->re_attr.send_cq); 433 + goto out_destroy; 505 434 } 506 435 507 - recvcq = ib_alloc_cq_any(ia->ri_id->device, NULL, 508 - ep->rep_attr.cap.max_recv_wr + 1, 509 - IB_POLL_WORKQUEUE); 510 - if (IS_ERR(recvcq)) { 511 - rc = PTR_ERR(recvcq); 512 - goto out2; 436 + ep->re_attr.recv_cq = ib_alloc_cq_any(device, r_xprt, 437 + ep->re_attr.cap.max_recv_wr, 438 + IB_POLL_WORKQUEUE); 439 + if (IS_ERR(ep->re_attr.recv_cq)) { 440 + rc = PTR_ERR(ep->re_attr.recv_cq); 441 + goto out_destroy; 513 442 } 514 - 515 - ep->rep_attr.send_cq = sendcq; 516 - ep->rep_attr.recv_cq = recvcq; 443 + ep->re_receive_count = 0; 517 444 518 445 /* Initialize cma parameters */ 519 - memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma)); 446 + memset(&ep->re_remote_cma, 0, sizeof(ep->re_remote_cma)); 520 447 521 448 /* Prepare RDMA-CM private message */ 449 + pmsg = &ep->re_cm_private; 522 450 pmsg->cp_magic = rpcrdma_cmp_magic; 523 451 pmsg->cp_version = RPCRDMA_CMP_VERSION; 524 452 pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK; 525 - pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->rep_inline_send); 526 - pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->rep_inline_recv); 527 - ep->rep_remote_cma.private_data = pmsg; 528 - ep->rep_remote_cma.private_data_len = sizeof(*pmsg); 453 + pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->re_inline_send); 454 + pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->re_inline_recv); 455 + ep->re_remote_cma.private_data = pmsg; 456 + ep->re_remote_cma.private_data_len = sizeof(*pmsg); 529 457 530 458 /* Client offers RDMA Read but does not initiate */ 531 - ep->rep_remote_cma.initiator_depth = 0; 532 - ep->rep_remote_cma.responder_resources = 533 - min_t(int, U8_MAX, ia->ri_id->device->attrs.max_qp_rd_atom); 459 + ep->re_remote_cma.initiator_depth = 0; 460 + ep->re_remote_cma.responder_resources = 461 + min_t(int, U8_MAX, device->attrs.max_qp_rd_atom); 534 462 535 463 /* Limit transport retries so client can detect server 536 464 * GID changes quickly. RPC layer handles re-establishing 537 465 * transport connection and retransmission. 538 466 */ 539 - ep->rep_remote_cma.retry_count = 6; 467 + ep->re_remote_cma.retry_count = 6; 540 468 541 469 /* RPC-over-RDMA handles its own flow control. In addition, 542 470 * make all RNR NAKs visible so we know that RPC-over-RDMA 543 471 * flow control is working correctly (no NAKs should be seen). 544 472 */ 545 - ep->rep_remote_cma.flow_control = 0; 546 - ep->rep_remote_cma.rnr_retry_count = 0; 473 + ep->re_remote_cma.flow_control = 0; 474 + ep->re_remote_cma.rnr_retry_count = 0; 547 475 476 + ep->re_pd = ib_alloc_pd(device, 0); 477 + if (IS_ERR(ep->re_pd)) { 478 + rc = PTR_ERR(ep->re_pd); 479 + goto out_destroy; 480 + } 481 + 482 + rc = rdma_create_qp(id, ep->re_pd, &ep->re_attr); 483 + if (rc) 484 + goto out_destroy; 485 + 486 + r_xprt->rx_ep = ep; 548 487 return 0; 549 488 550 - out2: 551 - ib_free_cq(sendcq); 552 - out1: 489 + out_destroy: 490 + rpcrdma_ep_destroy(ep); 491 + rdma_destroy_id(id); 492 + out_free: 493 + kfree(ep); 494 + r_xprt->rx_ep = NULL; 553 495 return rc; 554 496 } 555 497 556 498 /** 557 - * rpcrdma_ep_destroy - Disconnect and destroy endpoint. 558 - * @r_xprt: transport instance to shut down 499 + * rpcrdma_xprt_connect - Connect an unconnected transport 500 + * @r_xprt: controlling transport instance 559 501 * 502 + * Returns 0 on success or a negative errno. 560 503 */ 561 - void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt) 504 + int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt) 562 505 { 563 - struct rpcrdma_ep *ep = &r_xprt->rx_ep; 564 - struct rpcrdma_ia *ia = &r_xprt->rx_ia; 565 - 566 - if (ia->ri_id && ia->ri_id->qp) { 567 - rpcrdma_ep_disconnect(ep, ia); 568 - rdma_destroy_qp(ia->ri_id); 569 - ia->ri_id->qp = NULL; 570 - } 571 - 572 - if (ep->rep_attr.recv_cq) 573 - ib_free_cq(ep->rep_attr.recv_cq); 574 - if (ep->rep_attr.send_cq) 575 - ib_free_cq(ep->rep_attr.send_cq); 576 - } 577 - 578 - /* Re-establish a connection after a device removal event. 579 - * Unlike a normal reconnection, a fresh PD and a new set 580 - * of MRs and buffers is needed. 581 - */ 582 - static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, 583 - struct ib_qp_init_attr *qp_init_attr) 584 - { 585 - struct rpcrdma_ia *ia = &r_xprt->rx_ia; 586 - struct rpcrdma_ep *ep = &r_xprt->rx_ep; 587 - int rc, err; 588 - 589 - trace_xprtrdma_reinsert(r_xprt); 590 - 591 - rc = -EHOSTUNREACH; 592 - if (rpcrdma_ia_open(r_xprt)) 593 - goto out1; 594 - 595 - rc = -ENOMEM; 596 - err = rpcrdma_ep_create(r_xprt); 597 - if (err) { 598 - pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err); 599 - goto out2; 600 - } 601 - memcpy(qp_init_attr, &ep->rep_attr, sizeof(*qp_init_attr)); 602 - 603 - rc = -ENETUNREACH; 604 - err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr); 605 - if (err) { 606 - pr_err("rpcrdma: rdma_create_qp returned %d\n", err); 607 - goto out3; 608 - } 609 - return 0; 610 - 611 - out3: 612 - rpcrdma_ep_destroy(r_xprt); 613 - out2: 614 - rpcrdma_ia_close(ia); 615 - out1: 616 - return rc; 617 - } 618 - 619 - static int rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, 620 - struct ib_qp_init_attr *qp_init_attr) 621 - { 622 - struct rpcrdma_ia *ia = &r_xprt->rx_ia; 623 - struct rdma_cm_id *id, *old; 624 - int err, rc; 625 - 626 - rpcrdma_ep_disconnect(&r_xprt->rx_ep, ia); 627 - 628 - rc = -EHOSTUNREACH; 629 - id = rpcrdma_create_id(r_xprt, ia); 630 - if (IS_ERR(id)) 631 - goto out; 632 - 633 - /* As long as the new ID points to the same device as the 634 - * old ID, we can reuse the transport's existing PD and all 635 - * previously allocated MRs. Also, the same device means 636 - * the transport's previous DMA mappings are still valid. 637 - * 638 - * This is a sanity check only. There should be no way these 639 - * point to two different devices here. 640 - */ 641 - old = id; 642 - rc = -ENETUNREACH; 643 - if (ia->ri_id->device != id->device) { 644 - pr_err("rpcrdma: can't reconnect on different device!\n"); 645 - goto out_destroy; 646 - } 647 - 648 - err = rdma_create_qp(id, ia->ri_pd, qp_init_attr); 649 - if (err) 650 - goto out_destroy; 651 - 652 - /* Atomically replace the transport's ID and QP. */ 653 - rc = 0; 654 - old = ia->ri_id; 655 - ia->ri_id = id; 656 - rdma_destroy_qp(old); 657 - 658 - out_destroy: 659 - rdma_destroy_id(old); 660 - out: 661 - return rc; 662 - } 663 - 664 - /* 665 - * Connect unconnected endpoint. 666 - */ 667 - int 668 - rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) 669 - { 670 - struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, 671 - rx_ia); 672 506 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 673 - struct ib_qp_init_attr qp_init_attr; 507 + struct rpcrdma_ep *ep; 674 508 int rc; 675 509 676 510 retry: 677 - memcpy(&qp_init_attr, &ep->rep_attr, sizeof(qp_init_attr)); 678 - switch (ep->rep_connected) { 679 - case 0: 680 - rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &qp_init_attr); 681 - if (rc) { 682 - rc = -ENETUNREACH; 683 - goto out_noupdate; 684 - } 685 - break; 686 - case -ENODEV: 687 - rc = rpcrdma_ep_recreate_xprt(r_xprt, &qp_init_attr); 688 - if (rc) 689 - goto out_noupdate; 690 - break; 691 - default: 692 - rc = rpcrdma_ep_reconnect(r_xprt, &qp_init_attr); 693 - if (rc) 694 - goto out; 695 - } 511 + rpcrdma_xprt_disconnect(r_xprt); 512 + rc = rpcrdma_ep_create(r_xprt); 513 + if (rc) 514 + return rc; 515 + ep = r_xprt->rx_ep; 696 516 697 - ep->rep_connected = 0; 517 + ep->re_connect_status = 0; 698 518 xprt_clear_connected(xprt); 699 519 700 520 rpcrdma_reset_cwnd(r_xprt); ··· 529 699 if (rc) 530 700 goto out; 531 701 532 - rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); 702 + rc = rdma_connect(ep->re_id, &ep->re_remote_cma); 533 703 if (rc) 534 704 goto out; 535 705 536 706 if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO) 537 707 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; 538 - wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); 539 - if (ep->rep_connected <= 0) { 540 - if (ep->rep_connected == -EAGAIN) 708 + wait_event_interruptible(ep->re_connect_wait, 709 + ep->re_connect_status != 0); 710 + if (ep->re_connect_status <= 0) { 711 + if (ep->re_connect_status == -EAGAIN) 541 712 goto retry; 542 - rc = ep->rep_connected; 713 + rc = ep->re_connect_status; 543 714 goto out; 544 715 } 545 716 546 717 rc = rpcrdma_reqs_setup(r_xprt); 547 718 if (rc) { 548 - rpcrdma_ep_disconnect(ep, ia); 719 + rpcrdma_xprt_disconnect(r_xprt); 549 720 goto out; 550 721 } 551 722 rpcrdma_mrs_create(r_xprt); 552 723 553 724 out: 554 725 if (rc) 555 - ep->rep_connected = rc; 556 - 557 - out_noupdate: 726 + ep->re_connect_status = rc; 558 727 trace_xprtrdma_connect(r_xprt, rc); 559 728 return rc; 560 729 } 561 730 562 731 /** 563 - * rpcrdma_ep_disconnect - Disconnect underlying transport 564 - * @ep: endpoint to disconnect 565 - * @ia: associated interface adapter 732 + * rpcrdma_xprt_disconnect - Disconnect underlying transport 733 + * @r_xprt: controlling transport instance 566 734 * 567 735 * Caller serializes. Either the transport send lock is held, 568 736 * or we're being called to destroy the transport. 737 + * 738 + * On return, @r_xprt is completely divested of all hardware 739 + * resources and prepared for the next ->connect operation. 569 740 */ 570 - void 571 - rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) 741 + void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt) 572 742 { 573 - struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt, 574 - rx_ep); 743 + struct rpcrdma_ep *ep = r_xprt->rx_ep; 744 + struct rdma_cm_id *id; 575 745 int rc; 576 746 577 - /* returns without wait if ID is not connected */ 578 - rc = rdma_disconnect(ia->ri_id); 579 - if (!rc) 580 - wait_event_interruptible(ep->rep_connect_wait, 581 - ep->rep_connected != 1); 582 - else 583 - ep->rep_connected = rc; 747 + if (!ep) 748 + return; 749 + 750 + id = ep->re_id; 751 + rc = rdma_disconnect(id); 584 752 trace_xprtrdma_disconnect(r_xprt, rc); 585 753 586 754 rpcrdma_xprt_drain(r_xprt); 755 + rpcrdma_reps_unmap(r_xprt); 587 756 rpcrdma_reqs_reset(r_xprt); 588 757 rpcrdma_mrs_destroy(r_xprt); 589 758 rpcrdma_sendctxs_destroy(r_xprt); 759 + 760 + if (rpcrdma_ep_destroy(ep)) 761 + rdma_destroy_id(id); 762 + 763 + r_xprt->rx_ep = NULL; 590 764 } 591 765 592 766 /* Fixed-size circular FIFO queue. This implementation is wait-free and ··· 627 793 { 628 794 struct rpcrdma_sendctx *sc; 629 795 630 - sc = kzalloc(struct_size(sc, sc_sges, ep->rep_attr.cap.max_send_sge), 796 + sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge), 631 797 GFP_KERNEL); 632 798 if (!sc) 633 799 return NULL; ··· 647 813 * the ->send_request call to fail temporarily before too many 648 814 * Sends are posted. 649 815 */ 650 - i = r_xprt->rx_ep.rep_max_requests + RPCRDMA_MAX_BC_REQUESTS; 816 + i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS; 651 817 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL); 652 818 if (!buf->rb_sc_ctxs) 653 819 return -ENOMEM; 654 820 655 821 buf->rb_sc_last = i - 1; 656 822 for (i = 0; i <= buf->rb_sc_last; i++) { 657 - sc = rpcrdma_sendctx_create(&r_xprt->rx_ep); 823 + sc = rpcrdma_sendctx_create(r_xprt->rx_ep); 658 824 if (!sc) 659 825 return -ENOMEM; 660 826 ··· 758 924 rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) 759 925 { 760 926 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 761 - struct rpcrdma_ia *ia = &r_xprt->rx_ia; 927 + struct rpcrdma_ep *ep = r_xprt->rx_ep; 762 928 unsigned int count; 763 929 764 - for (count = 0; count < ia->ri_max_rdma_segs; count++) { 930 + for (count = 0; count < ep->re_max_rdma_segs; count++) { 765 931 struct rpcrdma_mr *mr; 766 932 int rc; 767 933 ··· 769 935 if (!mr) 770 936 break; 771 937 772 - rc = frwr_init_mr(ia, mr); 938 + rc = frwr_mr_init(r_xprt, mr); 773 939 if (rc) { 774 940 kfree(mr); 775 941 break; 776 942 } 777 - 778 - mr->mr_xprt = r_xprt; 779 943 780 944 spin_lock(&buf->rb_lock); 781 945 rpcrdma_mr_push(mr, &buf->rb_mrs); ··· 805 973 void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt) 806 974 { 807 975 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 808 - struct rpcrdma_ep *ep = &r_xprt->rx_ep; 976 + struct rpcrdma_ep *ep = r_xprt->rx_ep; 809 977 810 - /* If there is no underlying device, it's no use to 811 - * wake the refresh worker. 978 + /* If there is no underlying connection, it's no use 979 + * to wake the refresh worker. 812 980 */ 813 - if (ep->rep_connected != -ENODEV) { 981 + if (ep->re_connect_status == 1) { 814 982 /* The work is scheduled on a WQ_MEM_RECLAIM 815 983 * workqueue in order to prevent MR allocation 816 984 * from recursing into NFS during direct reclaim. ··· 874 1042 875 1043 /* Compute maximum header buffer size in bytes */ 876 1044 maxhdrsize = rpcrdma_fixed_maxsz + 3 + 877 - r_xprt->rx_ia.ri_max_rdma_segs * rpcrdma_readchunk_maxsz; 1045 + r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz; 878 1046 maxhdrsize *= sizeof(__be32); 879 1047 rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize), 880 1048 DMA_TO_DEVICE, GFP_KERNEL); ··· 952 1120 if (rep == NULL) 953 1121 goto out; 954 1122 955 - rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep.rep_inline_recv, 1123 + rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv, 956 1124 DMA_FROM_DEVICE, GFP_KERNEL); 957 1125 if (!rep->rr_rdmabuf) 958 1126 goto out_free; ··· 1177 1345 1178 1346 if (mr->mr_dir != DMA_NONE) { 1179 1347 trace_xprtrdma_mr_unmap(mr); 1180 - ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device, 1348 + ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device, 1181 1349 mr->mr_sg, mr->mr_nents, mr->mr_dir); 1182 1350 mr->mr_dir = DMA_NONE; 1183 1351 } ··· 1295 1463 bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt, 1296 1464 struct rpcrdma_regbuf *rb) 1297 1465 { 1298 - struct ib_device *device = r_xprt->rx_ia.ri_id->device; 1466 + struct ib_device *device = r_xprt->rx_ep->re_id->device; 1299 1467 1300 1468 if (rb->rg_direction == DMA_NONE) 1301 1469 return false; ··· 1308 1476 } 1309 1477 1310 1478 rb->rg_device = device; 1311 - rb->rg_iov.lkey = r_xprt->rx_ia.ri_pd->local_dma_lkey; 1479 + rb->rg_iov.lkey = r_xprt->rx_ep->re_pd->local_dma_lkey; 1312 1480 return true; 1313 1481 } 1314 1482 ··· 1334 1502 } 1335 1503 1336 1504 /** 1337 - * rpcrdma_ep_post - Post WRs to a transport's Send Queue 1338 - * @ia: transport's device information 1339 - * @ep: transport's RDMA endpoint information 1505 + * rpcrdma_post_sends - Post WRs to a transport's Send Queue 1506 + * @r_xprt: controlling transport instance 1340 1507 * @req: rpcrdma_req containing the Send WR to post 1341 1508 * 1342 1509 * Returns 0 if the post was successful, otherwise -ENOTCONN 1343 1510 * is returned. 1344 1511 */ 1345 - int 1346 - rpcrdma_ep_post(struct rpcrdma_ia *ia, 1347 - struct rpcrdma_ep *ep, 1348 - struct rpcrdma_req *req) 1512 + int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) 1349 1513 { 1350 1514 struct ib_send_wr *send_wr = &req->rl_wr; 1515 + struct rpcrdma_ep *ep = r_xprt->rx_ep; 1351 1516 int rc; 1352 1517 1353 - if (!ep->rep_send_count || kref_read(&req->rl_kref) > 1) { 1518 + if (!ep->re_send_count || kref_read(&req->rl_kref) > 1) { 1354 1519 send_wr->send_flags |= IB_SEND_SIGNALED; 1355 - ep->rep_send_count = ep->rep_send_batch; 1520 + ep->re_send_count = ep->re_send_batch; 1356 1521 } else { 1357 1522 send_wr->send_flags &= ~IB_SEND_SIGNALED; 1358 - --ep->rep_send_count; 1523 + --ep->re_send_count; 1359 1524 } 1360 1525 1361 - rc = frwr_send(ia, req); 1526 + rc = frwr_send(r_xprt, req); 1362 1527 trace_xprtrdma_post_send(req, rc); 1363 1528 if (rc) 1364 1529 return -ENOTCONN; ··· 1371 1542 void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) 1372 1543 { 1373 1544 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 1374 - struct rpcrdma_ep *ep = &r_xprt->rx_ep; 1545 + struct rpcrdma_ep *ep = r_xprt->rx_ep; 1375 1546 struct ib_recv_wr *wr, *bad_wr; 1376 1547 struct rpcrdma_rep *rep; 1377 1548 int needed, count, rc; ··· 1380 1551 count = 0; 1381 1552 1382 1553 needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1); 1383 - if (likely(ep->rep_receive_count > needed)) 1554 + if (likely(ep->re_receive_count > needed)) 1384 1555 goto out; 1385 - needed -= ep->rep_receive_count; 1556 + needed -= ep->re_receive_count; 1386 1557 if (!temp) 1387 1558 needed += RPCRDMA_MAX_RECV_BATCH; 1388 1559 ··· 1408 1579 if (!wr) 1409 1580 goto out; 1410 1581 1411 - rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr, 1582 + rc = ib_post_recv(ep->re_id->qp, wr, 1412 1583 (const struct ib_recv_wr **)&bad_wr); 1413 1584 out: 1414 1585 trace_xprtrdma_post_recvs(r_xprt, count, rc); ··· 1422 1593 --count; 1423 1594 } 1424 1595 } 1425 - ep->rep_receive_count += count; 1596 + ep->re_receive_count += count; 1426 1597 return; 1427 1598 }
+34 -55
net/sunrpc/xprtrdma/xprt_rdma.h
··· 65 65 #define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ) 66 66 67 67 /* 68 - * Interface Adapter -- one per transport instance 68 + * RDMA Endpoint -- connection endpoint details 69 69 */ 70 - struct rpcrdma_ia { 71 - struct rdma_cm_id *ri_id; 72 - struct ib_pd *ri_pd; 73 - int ri_async_rc; 74 - unsigned int ri_max_rdma_segs; 75 - unsigned int ri_max_frwr_depth; 76 - bool ri_implicit_roundup; 77 - enum ib_mr_type ri_mrtype; 78 - unsigned long ri_flags; 79 - struct completion ri_done; 80 - struct completion ri_remove_done; 81 - }; 82 - 83 - enum { 84 - RPCRDMA_IAF_REMOVING = 0, 85 - }; 86 - 87 - /* 88 - * RDMA Endpoint -- one per transport instance 89 - */ 90 - 91 70 struct rpcrdma_ep { 92 - unsigned int rep_send_count; 93 - unsigned int rep_send_batch; 94 - unsigned int rep_max_inline_send; 95 - unsigned int rep_max_inline_recv; 96 - int rep_connected; 97 - struct ib_qp_init_attr rep_attr; 98 - wait_queue_head_t rep_connect_wait; 99 - struct rpcrdma_connect_private rep_cm_private; 100 - struct rdma_conn_param rep_remote_cma; 101 - unsigned int rep_max_requests; /* depends on device */ 102 - unsigned int rep_inline_send; /* negotiated */ 103 - unsigned int rep_inline_recv; /* negotiated */ 104 - int rep_receive_count; 71 + struct kref re_kref; 72 + struct rdma_cm_id *re_id; 73 + struct ib_pd *re_pd; 74 + unsigned int re_max_rdma_segs; 75 + unsigned int re_max_fr_depth; 76 + bool re_implicit_roundup; 77 + enum ib_mr_type re_mrtype; 78 + struct completion re_done; 79 + unsigned int re_send_count; 80 + unsigned int re_send_batch; 81 + unsigned int re_max_inline_send; 82 + unsigned int re_max_inline_recv; 83 + int re_async_rc; 84 + int re_connect_status; 85 + struct ib_qp_init_attr re_attr; 86 + wait_queue_head_t re_connect_wait; 87 + struct rpc_xprt *re_xprt; 88 + struct rpcrdma_connect_private 89 + re_cm_private; 90 + struct rdma_conn_param re_remote_cma; 91 + int re_receive_count; 92 + unsigned int re_max_requests; /* depends on device */ 93 + unsigned int re_inline_send; /* negotiated */ 94 + unsigned int re_inline_recv; /* negotiated */ 105 95 }; 106 96 107 97 /* Pre-allocate extra Work Requests for handling backward receives ··· 412 422 */ 413 423 struct rpcrdma_xprt { 414 424 struct rpc_xprt rx_xprt; 415 - struct rpcrdma_ia rx_ia; 416 - struct rpcrdma_ep rx_ep; 425 + struct rpcrdma_ep *rx_ep; 417 426 struct rpcrdma_buffer rx_buf; 418 427 struct delayed_work rx_connect_worker; 419 428 struct rpc_timeout rx_timeout; ··· 444 455 extern unsigned int xprt_rdma_memreg_strategy; 445 456 446 457 /* 447 - * Interface Adapter calls - xprtrdma/verbs.c 448 - */ 449 - int rpcrdma_ia_open(struct rpcrdma_xprt *xprt); 450 - void rpcrdma_ia_remove(struct rpcrdma_ia *ia); 451 - void rpcrdma_ia_close(struct rpcrdma_ia *); 452 - 453 - /* 454 458 * Endpoint calls - xprtrdma/verbs.c 455 459 */ 456 - int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt); 457 - void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt); 458 - int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *); 459 - void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *); 460 + void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc); 461 + int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt); 462 + void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt); 460 463 461 - int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *, 462 - struct rpcrdma_req *); 464 + int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req); 463 465 void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp); 464 466 465 467 /* ··· 516 536 /* Memory registration calls xprtrdma/frwr_ops.c 517 537 */ 518 538 void frwr_reset(struct rpcrdma_req *req); 519 - int frwr_query_device(struct rpcrdma_xprt *r_xprt, 520 - const struct ib_device *device); 521 - int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr); 539 + int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device); 540 + int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr); 522 541 void frwr_release_mr(struct rpcrdma_mr *mr); 523 542 struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, 524 543 struct rpcrdma_mr_seg *seg, 525 544 int nsegs, bool writing, __be32 xid, 526 545 struct rpcrdma_mr *mr); 527 - int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req); 546 + int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req); 528 547 void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs); 529 548 void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req); 530 549 void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req); ··· 548 569 enum rpcrdma_chunktype rtype); 549 570 void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc); 550 571 int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst); 551 - void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *); 572 + void rpcrdma_set_max_header_sizes(struct rpcrdma_ep *ep); 552 573 void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt); 553 574 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep); 554 575 void rpcrdma_reply_handler(struct rpcrdma_rep *rep);
+1 -1
net/sunrpc/xprtsock.c
··· 1861 1861 struct rpc_xprt *xprt = &transport->xprt; 1862 1862 struct file *filp; 1863 1863 struct socket *sock; 1864 - int status = -EIO; 1864 + int status; 1865 1865 1866 1866 status = __sock_create(xprt->xprt_net, AF_LOCAL, 1867 1867 SOCK_STREAM, 0, &sock, 1);