Merge tag 'nfs-for-6.15-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client updates from Trond Myklebust:
"Bugfixes:

- Three fixes for looping in the NFSv4 state manager delegation code

- Fix for the NFSv4 state XDR code (Neil Brown)

- Fix a leaked reference in nfs_lock_and_join_requests()

- Fix a use-after-free in the delegation return code

Features:

- Implement the NFSv4.2 copy offload OFFLOAD_STATUS operation to
allow monitoring of an in-progress copy

- Add a mount option to force NFSv3/NFSv4 to use READDIRPLUS in a
getdents() call

- SUNRPC now allows some basic management of an existing RPC client's
connections using sysfs

- Improvements to the automated teardown of a NFS client when the
container it was initiated from gets killed

- Improvements to prevent tasks from getting stuck in a killable wait
state after calling exit_signals()"

* tag 'nfs-for-6.15-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (29 commits)
nfs: Add missing release on error in nfs_lock_and_join_requests()
NFSv4: Check for delegation validity in nfs_start_delegation_return_locked()
NFS: Don't allow waiting for exiting tasks
SUNRPC: Don't allow waiting for exiting tasks
NFSv4: Treat ENETUNREACH errors as fatal for state recovery
NFSv4: clp->cl_cons_state < 0 signifies an invalid nfs_client
NFSv4: Further cleanups to shutdown loops
NFS: Shut down the nfs_client only after all the superblocks
SUNRPC: rpc_clnt_set_transport() must not change the autobind setting
SUNRPC: rpcbind should never reset the port to the value '0'
pNFS/flexfiles: Report ENETDOWN as a connection error
pNFS/flexfiles: Treat ENETUNREACH errors as fatal in containers
NFS: Treat ENETUNREACH errors as fatal in containers
NFS: Add a mount option to make ENETUNREACH errors fatal
sunrpc: Add a sysfs file for one-step xprt deletion
sunrpc: Add a sysfs file for adding a new xprt
sunrpc: Add a sysfs files for rpc_clnt information
sunrpc: Add a sysfs attr for xprtsec
NFS: Add implid to sysfs
NFS: Extend rdirplus mount option with "force|none"
...

+807 -74
+5
fs/nfs/client.c
··· 546 546 args.flags |= RPC_CLNT_CREATE_NOPING; 547 547 if (test_bit(NFS_CS_REUSEPORT, &clp->cl_flags)) 548 548 args.flags |= RPC_CLNT_CREATE_REUSEPORT; 549 + if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags)) 550 + args.flags |= RPC_CLNT_CREATE_NETUNREACH_FATAL; 549 551 550 552 if (!IS_ERR(clp->cl_rpcclient)) 551 553 return 0; ··· 710 708 ctx->timeo, ctx->retrans); 711 709 if (ctx->flags & NFS_MOUNT_NORESVPORT) 712 710 set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags); 711 + 712 + if (ctx->flags & NFS_MOUNT_NETUNREACH_FATAL) 713 + __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags); 713 714 714 715 /* Allocate or find a client reference we can use */ 715 716 clp = nfs_get_client(&cl_init);
+44 -22
fs/nfs/delegation.c
··· 79 79 struct nfs_delegation *delegation) 80 80 { 81 81 set_bit(NFS_DELEGATION_RETURN, &delegation->flags); 82 + set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags); 82 83 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); 83 84 } 84 85 ··· 307 306 if (delegation == NULL) 308 307 goto out; 309 308 spin_lock(&delegation->lock); 310 - if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { 309 + if (delegation->inode && 310 + !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { 311 311 clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags); 312 312 /* Refcount matched in nfs_end_delegation_return() */ 313 313 ret = nfs_get_delegation(delegation); ··· 332 330 } 333 331 334 332 static void nfs_abort_delegation_return(struct nfs_delegation *delegation, 335 - struct nfs_client *clp, int err) 333 + struct nfs_server *server, int err) 336 334 { 337 - 338 335 spin_lock(&delegation->lock); 339 336 clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags); 340 337 if (err == -EAGAIN) { 341 338 set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags); 342 - set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state); 339 + set_bit(NFS4SERV_DELEGRETURN_DELAYED, 340 + &server->delegation_flags); 341 + set_bit(NFS4CLNT_DELEGRETURN_DELAYED, 342 + &server->nfs_client->cl_state); 343 343 } 344 344 spin_unlock(&delegation->lock); 345 345 } ··· 551 547 */ 552 548 static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync) 553 549 { 554 - struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 550 + struct nfs_server *server = NFS_SERVER(inode); 555 551 unsigned int mode = O_WRONLY | O_RDWR; 556 552 int err = 0; 557 553 ··· 573 569 /* 574 570 * Guard against state recovery 575 571 */ 576 - err = nfs4_wait_clnt_recover(clp); 572 + err = nfs4_wait_clnt_recover(server->nfs_client); 577 573 } 578 574 579 575 if (err) { 580 - nfs_abort_delegation_return(delegation, clp, err); 576 + nfs_abort_delegation_return(delegation, server, err); 581 577 goto out; 582 578 } 583 579 ··· 594 590 595 591 if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) 596 592 ret = true; 597 - else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) { 598 - struct inode *inode; 599 - 600 - spin_lock(&delegation->lock); 601 - inode = delegation->inode; 602 - if (inode && list_empty(&NFS_I(inode)->open_files)) 603 - ret = true; 604 - spin_unlock(&delegation->lock); 605 - } 606 - if (ret) 607 - clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); 608 593 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) || 609 594 test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) || 610 595 test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) ··· 612 619 struct nfs_delegation *place_holder_deleg = NULL; 613 620 int err = 0; 614 621 622 + if (!test_and_clear_bit(NFS4SERV_DELEGRETURN, 623 + &server->delegation_flags)) 624 + return 0; 615 625 restart: 616 626 /* 617 627 * To avoid quadratic looping we hold a reference ··· 666 670 cond_resched(); 667 671 if (!err) 668 672 goto restart; 673 + set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags); 669 674 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); 670 675 goto out; 671 676 } ··· 681 684 struct nfs_delegation *d; 682 685 bool ret = false; 683 686 687 + if (!test_and_clear_bit(NFS4SERV_DELEGRETURN_DELAYED, 688 + &server->delegation_flags)) 689 + goto out; 684 690 list_for_each_entry_rcu (d, &server->delegations, super_list) { 685 691 if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags)) 686 692 continue; ··· 691 691 clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags); 692 692 ret = true; 693 693 } 694 + out: 694 695 return ret; 695 696 } 696 697 ··· 879 878 return nfs4_inode_return_delegation(inode); 880 879 } 881 880 882 - static void nfs_mark_return_if_closed_delegation(struct nfs_server *server, 883 - struct nfs_delegation *delegation) 881 + static void 882 + nfs_mark_return_if_closed_delegation(struct nfs_server *server, 883 + struct nfs_delegation *delegation) 884 884 { 885 - set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); 886 - set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); 885 + struct inode *inode; 886 + 887 + if (test_bit(NFS_DELEGATION_RETURN, &delegation->flags) || 888 + test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) 889 + return; 890 + spin_lock(&delegation->lock); 891 + inode = delegation->inode; 892 + if (!inode) 893 + goto out; 894 + if (list_empty(&NFS_I(inode)->open_files)) 895 + nfs_mark_return_delegation(server, delegation); 896 + else 897 + set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); 898 + out: 899 + spin_unlock(&delegation->lock); 887 900 } 888 901 889 902 static bool nfs_server_mark_return_all_delegations(struct nfs_server *server) ··· 1291 1276 return; 1292 1277 clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); 1293 1278 set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags); 1279 + set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags); 1294 1280 set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state); 1295 1281 } 1296 1282 ··· 1370 1354 nfs4_stateid stateid; 1371 1355 unsigned long gen = ++server->delegation_gen; 1372 1356 1357 + if (!test_and_clear_bit(NFS4SERV_DELEGATION_EXPIRED, 1358 + &server->delegation_flags)) 1359 + return 0; 1373 1360 restart: 1374 1361 rcu_read_lock(); 1375 1362 list_for_each_entry_rcu(delegation, &server->delegations, super_list) { ··· 1402 1383 goto restart; 1403 1384 } 1404 1385 nfs_inode_mark_test_expired_delegation(server,inode); 1386 + set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags); 1387 + set_bit(NFS4CLNT_DELEGATION_EXPIRED, 1388 + &server->nfs_client->cl_state); 1405 1389 iput(inode); 1406 1390 return -EAGAIN; 1407 1391 }
+2
fs/nfs/dir.c
··· 666 666 { 667 667 if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS)) 668 668 return false; 669 + if (NFS_SERVER(dir)->flags & NFS_MOUNT_FORCE_RDIRPLUS) 670 + return true; 669 671 if (ctx->pos == 0 || 670 672 cache_hits + cache_misses > NFS_READDIR_CACHE_USAGE_THRESHOLD) 671 673 return true;
+22 -2
fs/nfs/flexfilelayout/flexfilelayout.c
··· 1154 1154 rpc_wake_up(&tbl->slot_tbl_waitq); 1155 1155 goto reset; 1156 1156 /* RPC connection errors */ 1157 + case -ENETDOWN: 1158 + case -ENETUNREACH: 1159 + if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags)) 1160 + return -NFS4ERR_FATAL_IOERROR; 1161 + fallthrough; 1157 1162 case -ECONNREFUSED: 1158 1163 case -EHOSTDOWN: 1159 1164 case -EHOSTUNREACH: 1160 - case -ENETUNREACH: 1161 1165 case -EIO: 1162 1166 case -ETIMEDOUT: 1163 1167 case -EPIPE: ··· 1187 1183 1188 1184 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */ 1189 1185 static int ff_layout_async_handle_error_v3(struct rpc_task *task, 1186 + struct nfs_client *clp, 1190 1187 struct pnfs_layout_segment *lseg, 1191 1188 u32 idx) 1192 1189 { ··· 1205 1200 case -EJUKEBOX: 1206 1201 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); 1207 1202 goto out_retry; 1203 + case -ENETDOWN: 1204 + case -ENETUNREACH: 1205 + if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags)) 1206 + return -NFS4ERR_FATAL_IOERROR; 1207 + fallthrough; 1208 1208 default: 1209 1209 dprintk("%s DS connection error %d\n", __func__, 1210 1210 task->tk_status); ··· 1244 1234 1245 1235 switch (vers) { 1246 1236 case 3: 1247 - return ff_layout_async_handle_error_v3(task, lseg, idx); 1237 + return ff_layout_async_handle_error_v3(task, clp, lseg, idx); 1248 1238 case 4: 1249 1239 return ff_layout_async_handle_error_v4(task, state, clp, 1250 1240 lseg, idx); ··· 1274 1264 case -ECONNRESET: 1275 1265 case -EHOSTDOWN: 1276 1266 case -EHOSTUNREACH: 1267 + case -ENETDOWN: 1277 1268 case -ENETUNREACH: 1278 1269 case -EADDRINUSE: 1279 1270 case -ENOBUFS: ··· 1348 1337 return task->tk_status; 1349 1338 case -EAGAIN: 1350 1339 goto out_eagain; 1340 + case -NFS4ERR_FATAL_IOERROR: 1341 + task->tk_status = -EIO; 1342 + return 0; 1351 1343 } 1352 1344 1353 1345 return 0; ··· 1521 1507 return task->tk_status; 1522 1508 case -EAGAIN: 1523 1509 return -EAGAIN; 1510 + case -NFS4ERR_FATAL_IOERROR: 1511 + task->tk_status = -EIO; 1512 + return 0; 1524 1513 } 1525 1514 1526 1515 if (hdr->res.verf->committed == NFS_FILE_SYNC || ··· 1568 1551 case -EAGAIN: 1569 1552 rpc_restart_call_prepare(task); 1570 1553 return -EAGAIN; 1554 + case -NFS4ERR_FATAL_IOERROR: 1555 + task->tk_status = -EIO; 1556 + return 0; 1571 1557 } 1572 1558 1573 1559 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
+67 -4
fs/nfs/fs_context.c
··· 50 50 Opt_clientaddr, 51 51 Opt_cto, 52 52 Opt_alignwrite, 53 + Opt_fatal_neterrors, 53 54 Opt_fg, 54 55 Opt_fscache, 55 56 Opt_fscache_flag, ··· 73 72 Opt_posix, 74 73 Opt_proto, 75 74 Opt_rdirplus, 75 + Opt_rdirplus_none, 76 + Opt_rdirplus_force, 76 77 Opt_rdma, 77 78 Opt_resvport, 78 79 Opt_retrans, ··· 96 93 Opt_wsize, 97 94 Opt_write, 98 95 Opt_xprtsec, 96 + }; 97 + 98 + enum { 99 + Opt_fatal_neterrors_default, 100 + Opt_fatal_neterrors_enetunreach, 101 + Opt_fatal_neterrors_none, 102 + }; 103 + 104 + static const struct constant_table nfs_param_enums_fatal_neterrors[] = { 105 + { "default", Opt_fatal_neterrors_default }, 106 + { "ENETDOWN:ENETUNREACH", Opt_fatal_neterrors_enetunreach }, 107 + { "ENETUNREACH:ENETDOWN", Opt_fatal_neterrors_enetunreach }, 108 + { "none", Opt_fatal_neterrors_none }, 109 + {} 99 110 }; 100 111 101 112 enum { ··· 168 151 fsparam_string("clientaddr", Opt_clientaddr), 169 152 fsparam_flag_no("cto", Opt_cto), 170 153 fsparam_flag_no("alignwrite", Opt_alignwrite), 154 + fsparam_enum("fatal_neterrors", Opt_fatal_neterrors, 155 + nfs_param_enums_fatal_neterrors), 171 156 fsparam_flag ("fg", Opt_fg), 172 157 fsparam_flag_no("fsc", Opt_fscache_flag), 173 158 fsparam_string("fsc", Opt_fscache), ··· 193 174 fsparam_u32 ("port", Opt_port), 194 175 fsparam_flag_no("posix", Opt_posix), 195 176 fsparam_string("proto", Opt_proto), 196 - fsparam_flag_no("rdirplus", Opt_rdirplus), 177 + fsparam_flag_no("rdirplus", Opt_rdirplus), // rdirplus|nordirplus 178 + fsparam_string("rdirplus", Opt_rdirplus), // rdirplus=... 197 179 fsparam_flag ("rdma", Opt_rdma), 198 180 fsparam_flag_no("resvport", Opt_resvport), 199 181 fsparam_u32 ("retrans", Opt_retrans), ··· 305 285 { "none", Opt_xprtsec_none }, 306 286 { "tls", Opt_xprtsec_tls }, 307 287 { "mtls", Opt_xprtsec_mtls }, 288 + {} 289 + }; 290 + 291 + static const struct constant_table nfs_rdirplus_tokens[] = { 292 + { "none", Opt_rdirplus_none }, 293 + { "force", Opt_rdirplus_force }, 308 294 {} 309 295 }; 310 296 ··· 662 636 ctx->flags &= ~NFS_MOUNT_NOACL; 663 637 break; 664 638 case Opt_rdirplus: 665 - if (result.negated) 639 + if (result.negated) { 640 + ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS; 666 641 ctx->flags |= NFS_MOUNT_NORDIRPLUS; 667 - else 668 - ctx->flags &= ~NFS_MOUNT_NORDIRPLUS; 642 + } else if (!param->string) { 643 + ctx->flags &= ~(NFS_MOUNT_NORDIRPLUS | NFS_MOUNT_FORCE_RDIRPLUS); 644 + } else { 645 + switch (lookup_constant(nfs_rdirplus_tokens, param->string, -1)) { 646 + case Opt_rdirplus_none: 647 + ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS; 648 + ctx->flags |= NFS_MOUNT_NORDIRPLUS; 649 + break; 650 + case Opt_rdirplus_force: 651 + ctx->flags &= ~NFS_MOUNT_NORDIRPLUS; 652 + ctx->flags |= NFS_MOUNT_FORCE_RDIRPLUS; 653 + break; 654 + default: 655 + goto out_invalid_value; 656 + } 657 + } 669 658 break; 670 659 case Opt_sharecache: 671 660 if (result.negated) ··· 912 871 if (result.uint_32 < 1 || result.uint_32 > NFS_MAX_TRANSPORTS) 913 872 goto out_of_bounds; 914 873 ctx->nfs_server.max_connect = result.uint_32; 874 + break; 875 + case Opt_fatal_neterrors: 876 + trace_nfs_mount_assign(param->key, param->string); 877 + switch (result.uint_32) { 878 + case Opt_fatal_neterrors_default: 879 + if (fc->net_ns != &init_net) 880 + ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL; 881 + else 882 + ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL; 883 + break; 884 + case Opt_fatal_neterrors_enetunreach: 885 + ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL; 886 + break; 887 + case Opt_fatal_neterrors_none: 888 + ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL; 889 + break; 890 + default: 891 + goto out_invalid_value; 892 + } 915 893 break; 916 894 case Opt_lookupcache: 917 895 trace_nfs_mount_assign(param->key, param->string); ··· 1710 1650 ctx->xprtsec.policy = RPC_XPRTSEC_NONE; 1711 1651 ctx->xprtsec.cert_serial = TLS_NO_CERT; 1712 1652 ctx->xprtsec.privkey_serial = TLS_NO_PRIVKEY; 1653 + 1654 + if (fc->net_ns != &init_net) 1655 + ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL; 1713 1656 1714 1657 fc->s_iflags |= SB_I_STABLE_WRITES; 1715 1658 }
+2
fs/nfs/inode.c
··· 74 74 75 75 int nfs_wait_bit_killable(struct wait_bit_key *key, int mode) 76 76 { 77 + if (unlikely(nfs_current_task_exiting())) 78 + return -EINTR; 77 79 schedule(); 78 80 if (signal_pending_state(mode, current)) 79 81 return -ERESTARTSYS;
+5
fs/nfs/internal.h
··· 912 912 } 913 913 #endif 914 914 915 + static inline bool nfs_current_task_exiting(void) 916 + { 917 + return (current->flags & PF_EXITING) != 0; 918 + } 919 + 915 920 static inline bool nfs_error_is_fatal(int err) 916 921 { 917 922 switch (err) {
+2
fs/nfs/nfs3client.c
··· 120 120 121 121 if (mds_srv->flags & NFS_MOUNT_NORESVPORT) 122 122 __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags); 123 + if (test_bit(NFS_CS_NETUNREACH_FATAL, &mds_clp->cl_flags)) 124 + __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags); 123 125 124 126 __set_bit(NFS_CS_DS, &cl_init.init_flags); 125 127
+1 -1
fs/nfs/nfs3proc.c
··· 39 39 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 40 40 schedule_timeout(NFS_JUKEBOX_RETRY_TIME); 41 41 res = -ERESTARTSYS; 42 - } while (!fatal_signal_pending(current)); 42 + } while (!fatal_signal_pending(current) && !nfs_current_task_exiting()); 43 43 return res; 44 44 } 45 45
+162 -10
fs/nfs/nfs42proc.c
··· 21 21 22 22 #define NFSDBG_FACILITY NFSDBG_PROC 23 23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); 24 + static int nfs42_proc_offload_status(struct file *file, nfs4_stateid *stateid, 25 + u64 *copied); 24 26 25 27 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr) 26 28 { ··· 175 173 return err; 176 174 } 177 175 176 + static void nfs4_copy_dequeue_callback(struct nfs_server *dst_server, 177 + struct nfs_server *src_server, 178 + struct nfs4_copy_state *copy) 179 + { 180 + spin_lock(&dst_server->nfs_client->cl_lock); 181 + list_del_init(&copy->copies); 182 + spin_unlock(&dst_server->nfs_client->cl_lock); 183 + if (dst_server != src_server) { 184 + spin_lock(&src_server->nfs_client->cl_lock); 185 + list_del_init(&copy->src_copies); 186 + spin_unlock(&src_server->nfs_client->cl_lock); 187 + } 188 + } 189 + 178 190 static int handle_async_copy(struct nfs42_copy_res *res, 179 191 struct nfs_server *dst_server, 180 192 struct nfs_server *src_server, ··· 198 182 bool *restart) 199 183 { 200 184 struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter; 201 - int status = NFS4_OK; 202 185 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); 203 186 struct nfs_open_context *src_ctx = nfs_file_open_context(src); 187 + struct nfs_client *clp = dst_server->nfs_client; 188 + unsigned long timeout = 3 * HZ; 189 + int status = NFS4_OK; 190 + u64 copied; 204 191 205 192 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL); 206 193 if (!copy) ··· 241 222 spin_unlock(&src_server->nfs_client->cl_lock); 242 223 } 243 224 244 - status = wait_for_completion_interruptible(&copy->completion); 245 - spin_lock(&dst_server->nfs_client->cl_lock); 246 - list_del_init(&copy->copies); 247 - spin_unlock(&dst_server->nfs_client->cl_lock); 248 - if (dst_server != src_server) { 249 - spin_lock(&src_server->nfs_client->cl_lock); 250 - list_del_init(&copy->src_copies); 251 - spin_unlock(&src_server->nfs_client->cl_lock); 252 - } 225 + wait: 226 + status = wait_for_completion_interruptible_timeout(&copy->completion, 227 + timeout); 228 + if (!status) 229 + goto timeout; 230 + nfs4_copy_dequeue_callback(dst_server, src_server, copy); 253 231 if (status == -ERESTARTSYS) { 254 232 goto out_cancel; 255 233 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) { ··· 256 240 } 257 241 out: 258 242 res->write_res.count = copy->count; 243 + /* Copy out the updated write verifier provided by CB_OFFLOAD. */ 259 244 memcpy(&res->write_res.verifier, &copy->verf, sizeof(copy->verf)); 260 245 status = -copy->error; 261 246 ··· 267 250 nfs42_do_offload_cancel_async(dst, &copy->stateid); 268 251 if (!nfs42_files_from_same_server(src, dst)) 269 252 nfs42_do_offload_cancel_async(src, src_stateid); 253 + goto out_free; 254 + timeout: 255 + timeout <<= 1; 256 + if (timeout > (clp->cl_lease_time >> 1)) 257 + timeout = clp->cl_lease_time >> 1; 258 + status = nfs42_proc_offload_status(dst, &copy->stateid, &copied); 259 + if (status == -EINPROGRESS) 260 + goto wait; 261 + nfs4_copy_dequeue_callback(dst_server, src_server, copy); 262 + switch (status) { 263 + case 0: 264 + /* The server recognized the copy stateid, so it hasn't 265 + * rebooted. Don't overwrite the verifier returned in the 266 + * COPY result. */ 267 + res->write_res.count = copied; 268 + goto out_free; 269 + case -EREMOTEIO: 270 + /* COPY operation failed on the server. */ 271 + status = -EOPNOTSUPP; 272 + res->write_res.count = copied; 273 + goto out_free; 274 + case -EBADF: 275 + /* Server did not recognize the copy stateid. It has 276 + * probably restarted and lost the plot. */ 277 + res->write_res.count = 0; 278 + status = -EOPNOTSUPP; 279 + break; 280 + case -EOPNOTSUPP: 281 + /* RFC 7862 REQUIREs server to support OFFLOAD_STATUS when 282 + * it has signed up for an async COPY, so server is not 283 + * spec-compliant. */ 284 + res->write_res.count = 0; 285 + } 270 286 goto out_free; 271 287 } 272 288 ··· 629 579 if (status == -ENOTSUPP) 630 580 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL; 631 581 rpc_put_task(task); 582 + return status; 583 + } 584 + 585 + static int 586 + _nfs42_proc_offload_status(struct nfs_server *server, struct file *file, 587 + struct nfs42_offload_data *data) 588 + { 589 + struct nfs_open_context *ctx = nfs_file_open_context(file); 590 + struct rpc_message msg = { 591 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_STATUS], 592 + .rpc_argp = &data->args, 593 + .rpc_resp = &data->res, 594 + .rpc_cred = ctx->cred, 595 + }; 596 + int status; 597 + 598 + status = nfs4_call_sync(server->client, server, &msg, 599 + &data->args.osa_seq_args, 600 + &data->res.osr_seq_res, 1); 601 + trace_nfs4_offload_status(&data->args, status); 602 + switch (status) { 603 + case 0: 604 + break; 605 + 606 + case -NFS4ERR_ADMIN_REVOKED: 607 + case -NFS4ERR_BAD_STATEID: 608 + case -NFS4ERR_OLD_STATEID: 609 + /* 610 + * Server does not recognize the COPY stateid. CB_OFFLOAD 611 + * could have purged it, or server might have rebooted. 612 + * Since COPY stateids don't have an associated inode, 613 + * avoid triggering state recovery. 614 + */ 615 + status = -EBADF; 616 + break; 617 + case -NFS4ERR_NOTSUPP: 618 + case -ENOTSUPP: 619 + case -EOPNOTSUPP: 620 + server->caps &= ~NFS_CAP_OFFLOAD_STATUS; 621 + status = -EOPNOTSUPP; 622 + break; 623 + } 624 + 625 + return status; 626 + } 627 + 628 + /** 629 + * nfs42_proc_offload_status - Poll completion status of an async copy operation 630 + * @dst: handle of file being copied into 631 + * @stateid: copy stateid (from async COPY result) 632 + * @copied: OUT: number of bytes copied so far 633 + * 634 + * Return values: 635 + * %0: Server returned an NFS4_OK completion status 636 + * %-EINPROGRESS: Server returned no completion status 637 + * %-EREMOTEIO: Server returned an error completion status 638 + * %-EBADF: Server did not recognize the copy stateid 639 + * %-EOPNOTSUPP: Server does not support OFFLOAD_STATUS 640 + * %-ERESTARTSYS: Wait interrupted by signal 641 + * 642 + * Other negative errnos indicate the client could not complete the 643 + * request. 644 + */ 645 + static int 646 + nfs42_proc_offload_status(struct file *dst, nfs4_stateid *stateid, u64 *copied) 647 + { 648 + struct inode *inode = file_inode(dst); 649 + struct nfs_server *server = NFS_SERVER(inode); 650 + struct nfs4_exception exception = { 651 + .inode = inode, 652 + }; 653 + struct nfs42_offload_data *data; 654 + int status; 655 + 656 + if (!(server->caps & NFS_CAP_OFFLOAD_STATUS)) 657 + return -EOPNOTSUPP; 658 + 659 + data = kzalloc(sizeof(*data), GFP_KERNEL); 660 + if (!data) 661 + return -ENOMEM; 662 + data->seq_server = server; 663 + data->args.osa_src_fh = NFS_FH(inode); 664 + memcpy(&data->args.osa_stateid, stateid, 665 + sizeof(data->args.osa_stateid)); 666 + exception.stateid = &data->args.osa_stateid; 667 + do { 668 + status = _nfs42_proc_offload_status(server, dst, data); 669 + if (status == -EOPNOTSUPP) 670 + goto out; 671 + status = nfs4_handle_exception(server, status, &exception); 672 + } while (exception.retry); 673 + if (status) 674 + goto out; 675 + 676 + *copied = data->res.osr_count; 677 + if (!data->res.complete_count) 678 + status = -EINPROGRESS; 679 + else if (data->res.osr_complete != NFS_OK) 680 + status = -EREMOTEIO; 681 + 682 + out: 683 + kfree(data); 632 684 return status; 633 685 } 634 686
+86
fs/nfs/nfs42xdr.c
··· 35 35 #define encode_offload_cancel_maxsz (op_encode_hdr_maxsz + \ 36 36 XDR_QUADLEN(NFS4_STATEID_SIZE)) 37 37 #define decode_offload_cancel_maxsz (op_decode_hdr_maxsz) 38 + #define encode_offload_status_maxsz (op_encode_hdr_maxsz + \ 39 + XDR_QUADLEN(NFS4_STATEID_SIZE)) 40 + #define decode_offload_status_maxsz (op_decode_hdr_maxsz + \ 41 + 2 /* osr_count */ + \ 42 + 2 /* osr_complete */) 38 43 #define encode_copy_notify_maxsz (op_encode_hdr_maxsz + \ 39 44 XDR_QUADLEN(NFS4_STATEID_SIZE) + \ 40 45 1 + /* nl4_type */ \ ··· 148 143 decode_sequence_maxsz + \ 149 144 decode_putfh_maxsz + \ 150 145 decode_offload_cancel_maxsz) 146 + #define NFS4_enc_offload_status_sz (compound_encode_hdr_maxsz + \ 147 + encode_sequence_maxsz + \ 148 + encode_putfh_maxsz + \ 149 + encode_offload_status_maxsz) 150 + #define NFS4_dec_offload_status_sz (compound_decode_hdr_maxsz + \ 151 + decode_sequence_maxsz + \ 152 + decode_putfh_maxsz + \ 153 + decode_offload_status_maxsz) 151 154 #define NFS4_enc_copy_notify_sz (compound_encode_hdr_maxsz + \ 152 155 encode_sequence_maxsz + \ 153 156 encode_putfh_maxsz + \ ··· 355 342 struct compound_hdr *hdr) 356 343 { 357 344 encode_op_hdr(xdr, OP_OFFLOAD_CANCEL, decode_offload_cancel_maxsz, hdr); 345 + encode_nfs4_stateid(xdr, &args->osa_stateid); 346 + } 347 + 348 + static void encode_offload_status(struct xdr_stream *xdr, 349 + const struct nfs42_offload_status_args *args, 350 + struct compound_hdr *hdr) 351 + { 352 + encode_op_hdr(xdr, OP_OFFLOAD_STATUS, decode_offload_status_maxsz, hdr); 358 353 encode_nfs4_stateid(xdr, &args->osa_stateid); 359 354 } 360 355 ··· 587 566 encode_sequence(xdr, &args->osa_seq_args, &hdr); 588 567 encode_putfh(xdr, args->osa_src_fh, &hdr); 589 568 encode_offload_cancel(xdr, args, &hdr); 569 + encode_nops(&hdr); 570 + } 571 + 572 + /* 573 + * Encode OFFLOAD_STATUS request 574 + */ 575 + static void nfs4_xdr_enc_offload_status(struct rpc_rqst *req, 576 + struct xdr_stream *xdr, 577 + const void *data) 578 + { 579 + const struct nfs42_offload_status_args *args = data; 580 + struct compound_hdr hdr = { 581 + .minorversion = nfs4_xdr_minorversion(&args->osa_seq_args), 582 + }; 583 + 584 + encode_compound_hdr(xdr, req, &hdr); 585 + encode_sequence(xdr, &args->osa_seq_args, &hdr); 586 + encode_putfh(xdr, args->osa_src_fh, &hdr); 587 + encode_offload_status(xdr, args, &hdr); 590 588 encode_nops(&hdr); 591 589 } 592 590 ··· 959 919 struct nfs42_offload_status_res *res) 960 920 { 961 921 return decode_op_hdr(xdr, OP_OFFLOAD_CANCEL); 922 + } 923 + 924 + static int decode_offload_status(struct xdr_stream *xdr, 925 + struct nfs42_offload_status_res *res) 926 + { 927 + ssize_t result; 928 + int status; 929 + 930 + status = decode_op_hdr(xdr, OP_OFFLOAD_STATUS); 931 + if (status) 932 + return status; 933 + /* osr_count */ 934 + if (xdr_stream_decode_u64(xdr, &res->osr_count) < 0) 935 + return -EIO; 936 + /* osr_complete<1> */ 937 + result = xdr_stream_decode_uint32_array(xdr, &res->osr_complete, 1); 938 + if (result < 0) 939 + return -EIO; 940 + res->complete_count = result; 941 + return 0; 962 942 } 963 943 964 944 static int decode_copy_notify(struct xdr_stream *xdr, ··· 1425 1365 if (status) 1426 1366 goto out; 1427 1367 status = decode_offload_cancel(xdr, res); 1368 + 1369 + out: 1370 + return status; 1371 + } 1372 + 1373 + /* 1374 + * Decode OFFLOAD_STATUS response 1375 + */ 1376 + static int nfs4_xdr_dec_offload_status(struct rpc_rqst *rqstp, 1377 + struct xdr_stream *xdr, 1378 + void *data) 1379 + { 1380 + struct nfs42_offload_status_res *res = data; 1381 + struct compound_hdr hdr; 1382 + int status; 1383 + 1384 + status = decode_compound_hdr(xdr, &hdr); 1385 + if (status) 1386 + goto out; 1387 + status = decode_sequence(xdr, &res->osr_seq_res, rqstp); 1388 + if (status) 1389 + goto out; 1390 + status = decode_putfh(xdr); 1391 + if (status) 1392 + goto out; 1393 + status = decode_offload_status(xdr, res); 1428 1394 1429 1395 out: 1430 1396 return status;
+7
fs/nfs/nfs4client.c
··· 233 233 __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags); 234 234 if (test_bit(NFS_CS_PNFS, &cl_init->init_flags)) 235 235 __set_bit(NFS_CS_PNFS, &clp->cl_flags); 236 + if (test_bit(NFS_CS_NETUNREACH_FATAL, &cl_init->init_flags)) 237 + __set_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags); 236 238 /* 237 239 * Set up the connection to the server before we add add to the 238 240 * global list. ··· 939 937 __set_bit(NFS_CS_TSM_POSSIBLE, &cl_init.init_flags); 940 938 server->port = rpc_get_port((struct sockaddr *)addr); 941 939 940 + if (server->flags & NFS_MOUNT_NETUNREACH_FATAL) 941 + __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags); 942 + 942 943 /* Allocate or find a client reference we can use */ 943 944 clp = nfs_get_client(&cl_init); 944 945 if (IS_ERR(clp)) ··· 1016 1011 1017 1012 if (mds_srv->flags & NFS_MOUNT_NORESVPORT) 1018 1013 __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags); 1014 + if (test_bit(NFS_CS_NETUNREACH_FATAL, &mds_clp->cl_flags)) 1015 + __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags); 1019 1016 1020 1017 __set_bit(NFS_CS_PNFS, &cl_init.init_flags); 1021 1018 cl_init.max_connect = NFS_MAX_TRANSPORTS;
+13 -4
fs/nfs/nfs4proc.c
··· 195 195 return -EBUSY; 196 196 case -NFS4ERR_NOT_SAME: 197 197 return -ENOTSYNC; 198 + case -ENETDOWN: 199 + case -ENETUNREACH: 200 + break; 198 201 default: 199 202 dprintk("%s could not handle NFSv4 error %d\n", 200 203 __func__, -err); ··· 446 443 { 447 444 might_sleep(); 448 445 446 + if (unlikely(nfs_current_task_exiting())) 447 + return -EINTR; 449 448 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 450 449 schedule_timeout(nfs4_update_delay(timeout)); 451 450 if (!__fatal_signal_pending(current)) ··· 459 454 { 460 455 might_sleep(); 461 456 457 + if (unlikely(nfs_current_task_exiting())) 458 + return -EINTR; 462 459 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); 463 460 schedule_timeout(nfs4_update_delay(timeout)); 464 461 if (!signal_pending(current)) ··· 1781 1774 rcu_read_unlock(); 1782 1775 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); 1783 1776 1784 - if (!fatal_signal_pending(current)) { 1777 + if (!fatal_signal_pending(current) && 1778 + !nfs_current_task_exiting()) { 1785 1779 if (schedule_timeout(5*HZ) == 0) 1786 1780 status = -EAGAIN; 1787 1781 else ··· 3584 3576 write_sequnlock(&state->seqlock); 3585 3577 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); 3586 3578 3587 - if (fatal_signal_pending(current)) 3579 + if (fatal_signal_pending(current) || nfs_current_task_exiting()) 3588 3580 status = -EINTR; 3589 3581 else 3590 3582 if (schedule_timeout(5*HZ) != 0) ··· 9602 9594 return; 9603 9595 9604 9596 trace_nfs4_sequence(clp, task->tk_status); 9605 - if (task->tk_status < 0 && !task->tk_client->cl_shutdown) { 9597 + if (task->tk_status < 0 && clp->cl_cons_state >= 0) { 9606 9598 dprintk("%s ERROR %d\n", __func__, task->tk_status); 9607 9599 if (refcount_read(&clp->cl_count) == 1) 9608 9600 return; ··· 10806 10798 | NFS_CAP_CLONE 10807 10799 | NFS_CAP_LAYOUTERROR 10808 10800 | NFS_CAP_READ_PLUS 10809 - | NFS_CAP_MOVEABLE, 10801 + | NFS_CAP_MOVEABLE 10802 + | NFS_CAP_OFFLOAD_STATUS, 10810 10803 .init_client = nfs41_init_client, 10811 10804 .shutdown_client = nfs41_shutdown_client, 10812 10805 .match_stateid = nfs41_match_stateid,
+11 -3
fs/nfs/nfs4state.c
··· 1198 1198 struct rpc_clnt *clnt = clp->cl_rpcclient; 1199 1199 bool swapon = false; 1200 1200 1201 - if (clnt->cl_shutdown) 1201 + if (clp->cl_cons_state < 0) 1202 1202 return; 1203 1203 1204 1204 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); ··· 1403 1403 dprintk("%s: scheduling stateid recovery for server %s\n", __func__, 1404 1404 clp->cl_hostname); 1405 1405 nfs4_schedule_state_manager(clp); 1406 - return 0; 1406 + return clp->cl_cons_state < 0 ? clp->cl_cons_state : 0; 1407 1407 } 1408 1408 EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery); 1409 1409 ··· 2739 2739 pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s" 2740 2740 " with error %d\n", section_sep, section, 2741 2741 clp->cl_hostname, -status); 2742 - ssleep(1); 2742 + switch (status) { 2743 + case -ENETDOWN: 2744 + case -ENETUNREACH: 2745 + nfs_mark_client_ready(clp, -EIO); 2746 + break; 2747 + default: 2748 + ssleep(1); 2749 + break; 2750 + } 2743 2751 out_drain: 2744 2752 memalloc_nofs_restore(memflags); 2745 2753 nfs4_end_drain_session(clp);
+10 -1
fs/nfs/nfs4trace.h
··· 2608 2608 ) 2609 2609 ); 2610 2610 2611 - TRACE_EVENT(nfs4_offload_cancel, 2611 + DECLARE_EVENT_CLASS(nfs4_offload_class, 2612 2612 TP_PROTO( 2613 2613 const struct nfs42_offload_status_args *args, 2614 2614 int error ··· 2640 2640 __entry->stateid_seq, __entry->stateid_hash 2641 2641 ) 2642 2642 ); 2643 + #define DEFINE_NFS4_OFFLOAD_EVENT(name) \ 2644 + DEFINE_EVENT(nfs4_offload_class, name, \ 2645 + TP_PROTO( \ 2646 + const struct nfs42_offload_status_args *args, \ 2647 + int error \ 2648 + ), \ 2649 + TP_ARGS(args, error)) 2650 + DEFINE_NFS4_OFFLOAD_EVENT(nfs4_offload_cancel); 2651 + DEFINE_NFS4_OFFLOAD_EVENT(nfs4_offload_status); 2643 2652 2644 2653 DECLARE_EVENT_CLASS(nfs4_xattr_event, 2645 2654 TP_PROTO(
+10 -9
fs/nfs/nfs4xdr.c
··· 82 82 * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) 83 83 */ 84 84 #define pagepad_maxsz (1) 85 - #define open_owner_id_maxsz (1 + 2 + 1 + 1 + 2) 86 - #define lock_owner_id_maxsz (1 + 1 + 4) 87 - #define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) 85 + #define open_owner_id_maxsz (2 + 1 + 2 + 2) 86 + #define lock_owner_id_maxsz (2 + 1 + 2) 88 87 #define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 89 88 #define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 90 89 #define op_encode_hdr_maxsz (1) ··· 184 185 #define encode_claim_null_maxsz (1 + nfs4_name_maxsz) 185 186 #define encode_open_maxsz (op_encode_hdr_maxsz + \ 186 187 2 + encode_share_access_maxsz + 2 + \ 187 - open_owner_id_maxsz + \ 188 + 1 + open_owner_id_maxsz + \ 188 189 encode_opentype_maxsz + \ 189 190 encode_claim_null_maxsz) 190 191 #define decode_space_limit_maxsz (3) ··· 254 255 #define encode_link_maxsz (op_encode_hdr_maxsz + \ 255 256 nfs4_name_maxsz) 256 257 #define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz) 257 - #define encode_lockowner_maxsz (7) 258 + #define encode_lockowner_maxsz (2 + 1 + lock_owner_id_maxsz) 259 + 258 260 #define encode_lock_maxsz (op_encode_hdr_maxsz + \ 259 261 7 + \ 260 262 1 + encode_stateid_maxsz + 1 + \ 261 263 encode_lockowner_maxsz) 262 264 #define decode_lock_denied_maxsz \ 263 - (8 + decode_lockowner_maxsz) 265 + (2 + 2 + 1 + 2 + 1 + lock_owner_id_maxsz) 264 266 #define decode_lock_maxsz (op_decode_hdr_maxsz + \ 265 267 decode_lock_denied_maxsz) 266 268 #define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \ ··· 617 617 encode_lockowner_maxsz) 618 618 #define NFS4_dec_release_lockowner_sz \ 619 619 (compound_decode_hdr_maxsz + \ 620 - decode_lockowner_maxsz) 620 + decode_release_lockowner_maxsz) 621 621 #define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \ 622 622 encode_sequence_maxsz + \ 623 623 encode_putfh_maxsz + \ ··· 1412 1412 __be32 *p; 1413 1413 /* 1414 1414 * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4, 1415 - * owner 4 = 32 1415 + * owner 28 1416 1416 */ 1417 1417 encode_nfs4_seqid(xdr, arg->seqid); 1418 1418 encode_share_access(xdr, arg->share_access); ··· 5077 5077 /* 5078 5078 * We create the owner, so we know a proper owner.id length is 4. 5079 5079 */ 5080 - static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl) 5080 + static int decode_lock_denied(struct xdr_stream *xdr, struct file_lock *fl) 5081 5081 { 5082 5082 uint64_t offset, length, clientid; 5083 5083 __be32 *p; ··· 7702 7702 PROC42(CLONE, enc_clone, dec_clone), 7703 7703 PROC42(COPY, enc_copy, dec_copy), 7704 7704 PROC42(OFFLOAD_CANCEL, enc_offload_cancel, dec_offload_cancel), 7705 + PROC42(OFFLOAD_STATUS, enc_offload_status, dec_offload_status), 7705 7706 PROC42(COPY_NOTIFY, enc_copy_notify, dec_copy_notify), 7706 7707 PROC(LOOKUPP, enc_lookupp, dec_lookupp), 7707 7708 PROC42(LAYOUTERROR, enc_layouterror, dec_layouterror),
+4
fs/nfs/super.c
··· 454 454 { NFS_MOUNT_NONLM, ",nolock", "" }, 455 455 { NFS_MOUNT_NOACL, ",noacl", "" }, 456 456 { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" }, 457 + { NFS_MOUNT_FORCE_RDIRPLUS, ",rdirplus=force", "" }, 457 458 { NFS_MOUNT_UNSHARED, ",nosharecache", "" }, 458 459 { NFS_MOUNT_NORESVPORT, ",noresvport", "" }, 460 + { NFS_MOUNT_NETUNREACH_FATAL, 461 + ",fatal_neterrors=ENETDOWN:ENETUNREACH", 462 + ",fatal_neterrors=none" }, 459 463 { 0, NULL, NULL } 460 464 }; 461 465 const struct proc_nfs_info *nfs_infop;
+81 -1
fs/nfs/sysfs.c
··· 14 14 #include <linux/rcupdate.h> 15 15 #include <linux/lockd/lockd.h> 16 16 17 + #include "internal.h" 17 18 #include "nfs4_fs.h" 18 19 #include "netns.h" 19 20 #include "sysfs.h" ··· 229 228 rpc_cancel_tasks(clnt, -EIO, shutdown_match_client, NULL); 230 229 } 231 230 231 + /* 232 + * Shut down the nfs_client only once all the superblocks 233 + * have been shut down. 234 + */ 235 + static void shutdown_nfs_client(struct nfs_client *clp) 236 + { 237 + struct nfs_server *server; 238 + rcu_read_lock(); 239 + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 240 + if (!(server->flags & NFS_MOUNT_SHUTDOWN)) { 241 + rcu_read_unlock(); 242 + return; 243 + } 244 + } 245 + rcu_read_unlock(); 246 + nfs_mark_client_ready(clp, -EIO); 247 + shutdown_client(clp->cl_rpcclient); 248 + } 249 + 232 250 static ssize_t 233 251 shutdown_show(struct kobject *kobj, struct kobj_attribute *attr, 234 252 char *buf) ··· 279 259 280 260 server->flags |= NFS_MOUNT_SHUTDOWN; 281 261 shutdown_client(server->client); 282 - shutdown_client(server->nfs_client->cl_rpcclient); 283 262 284 263 if (!IS_ERR(server->client_acl)) 285 264 shutdown_client(server->client_acl); ··· 286 267 if (server->nlm_host) 287 268 shutdown_client(server->nlm_host->h_rpcclnt); 288 269 out: 270 + shutdown_nfs_client(server->nfs_client); 289 271 return count; 290 272 } 291 273 292 274 static struct kobj_attribute nfs_sysfs_attr_shutdown = __ATTR_RW(shutdown); 275 + 276 + #if IS_ENABLED(CONFIG_NFS_V4_1) 277 + static ssize_t 278 + implid_domain_show(struct kobject *kobj, struct kobj_attribute *attr, 279 + char *buf) 280 + { 281 + struct nfs_server *server = container_of(kobj, struct nfs_server, kobj); 282 + struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid; 283 + 284 + if (!impl_id || strlen(impl_id->domain) == 0) 285 + return 0; //sysfs_emit(buf, ""); 286 + return sysfs_emit(buf, "%s\n", impl_id->domain); 287 + } 288 + 289 + static struct kobj_attribute nfs_sysfs_attr_implid_domain = __ATTR_RO(implid_domain); 290 + 291 + 292 + static ssize_t 293 + implid_name_show(struct kobject *kobj, struct kobj_attribute *attr, 294 + char *buf) 295 + { 296 + struct nfs_server *server = container_of(kobj, struct nfs_server, kobj); 297 + struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid; 298 + 299 + if (!impl_id || strlen(impl_id->name) == 0) 300 + return 0; //sysfs_emit(buf, ""); 301 + return sysfs_emit(buf, "%s\n", impl_id->name); 302 + } 303 + 304 + static struct kobj_attribute nfs_sysfs_attr_implid_name = __ATTR_RO(implid_name); 305 + 306 + #endif /* IS_ENABLED(CONFIG_NFS_V4_1) */ 293 307 294 308 #define RPC_CLIENT_NAME_SIZE 64 295 309 ··· 361 309 .child_ns_type = nfs_netns_object_child_ns_type, 362 310 }; 363 311 312 + #if IS_ENABLED(CONFIG_NFS_V4_1) 313 + static void nfs_sysfs_add_nfsv41_server(struct nfs_server *server) 314 + { 315 + int ret; 316 + 317 + if (!server->nfs_client->cl_implid) 318 + return; 319 + 320 + ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_domain.attr, 321 + nfs_netns_server_namespace(&server->kobj)); 322 + if (ret < 0) 323 + pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n", 324 + server->s_sysfs_id, ret); 325 + 326 + ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_name.attr, 327 + nfs_netns_server_namespace(&server->kobj)); 328 + if (ret < 0) 329 + pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n", 330 + server->s_sysfs_id, ret); 331 + } 332 + #else /* CONFIG_NFS_V4_1 */ 333 + static inline void nfs_sysfs_add_nfsv41_server(struct nfs_server *server) 334 + { 335 + } 336 + #endif /* CONFIG_NFS_V4_1 */ 337 + 364 338 void nfs_sysfs_add_server(struct nfs_server *server) 365 339 { 366 340 int ret; ··· 403 325 if (ret < 0) 404 326 pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n", 405 327 server->s_sysfs_id, ret); 328 + 329 + nfs_sysfs_add_nfsv41_server(server); 406 330 } 407 331 EXPORT_SYMBOL_GPL(nfs_sysfs_add_server); 408 332
+3 -1
fs/nfs/write.c
··· 579 579 580 580 while (!nfs_lock_request(head)) { 581 581 ret = nfs_wait_on_request(head); 582 - if (ret < 0) 582 + if (ret < 0) { 583 + nfs_release_request(head); 583 584 return ERR_PTR(ret); 585 + } 584 586 } 585 587 586 588 /* Ensure that nobody removed the request before we locked it */
+2
include/linux/nfs4.h
··· 300 300 /* error codes for internal client use */ 301 301 #define NFS4ERR_RESET_TO_MDS 12001 302 302 #define NFS4ERR_RESET_TO_PNFS 12002 303 + #define NFS4ERR_FATAL_IOERROR 12003 303 304 304 305 static inline bool seqid_mutating_err(u32 err) 305 306 { ··· 692 691 NFSPROC4_CLNT_LISTXATTRS, 693 692 NFSPROC4_CLNT_REMOVEXATTR, 694 693 NFSPROC4_CLNT_READ_PLUS, 694 + NFSPROC4_CLNT_OFFLOAD_STATUS, 695 695 }; 696 696 697 697 /* nfs41 types */
+8
include/linux/nfs_fs_sb.h
··· 50 50 #define NFS_CS_DS 7 /* - Server is a DS */ 51 51 #define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */ 52 52 #define NFS_CS_PNFS 9 /* - Server used for pnfs */ 53 + #define NFS_CS_NETUNREACH_FATAL 10 /* - ENETUNREACH errors are fatal */ 53 54 struct sockaddr_storage cl_addr; /* server identifier */ 54 55 size_t cl_addrlen; 55 56 char * cl_hostname; /* hostname of server */ ··· 168 167 #define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000 169 168 #define NFS_MOUNT_SHUTDOWN 0x08000000 170 169 #define NFS_MOUNT_NO_ALIGNWRITE 0x10000000 170 + #define NFS_MOUNT_FORCE_RDIRPLUS 0x20000000 171 + #define NFS_MOUNT_NETUNREACH_FATAL 0x40000000 171 172 172 173 unsigned int fattr_valid; /* Valid attributes */ 173 174 unsigned int caps; /* server capabilities */ ··· 253 250 struct list_head ss_copies; 254 251 struct list_head ss_src_copies; 255 252 253 + unsigned long delegation_flags; 254 + #define NFS4SERV_DELEGRETURN (1) 255 + #define NFS4SERV_DELEGATION_EXPIRED (2) 256 + #define NFS4SERV_DELEGRETURN_DELAYED (3) 256 257 unsigned long delegation_gen; 257 258 unsigned long mig_gen; 258 259 unsigned long mig_status; ··· 296 289 #define NFS_CAP_CASE_INSENSITIVE (1U << 6) 297 290 #define NFS_CAP_CASE_PRESERVING (1U << 7) 298 291 #define NFS_CAP_REBOOT_LAYOUTRETURN (1U << 8) 292 + #define NFS_CAP_OFFLOAD_STATUS (1U << 9) 299 293 #define NFS_CAP_OPEN_XOR (1U << 12) 300 294 #define NFS_CAP_DELEGTIME (1U << 13) 301 295 #define NFS_CAP_POSIX_LOCK (1U << 14)
+3 -2
include/linux/nfs_xdr.h
··· 1515 1515 1516 1516 struct nfs42_offload_status_res { 1517 1517 struct nfs4_sequence_res osr_seq_res; 1518 - uint64_t osr_count; 1519 - int osr_status; 1518 + u64 osr_count; 1519 + int complete_count; 1520 + u32 osr_complete; 1520 1521 }; 1521 1522 1522 1523 struct nfs42_copy_notify_args {
+4 -1
include/linux/sunrpc/clnt.h
··· 64 64 cl_noretranstimeo: 1,/* No retransmit timeouts */ 65 65 cl_autobind : 1,/* use getport() */ 66 66 cl_chatty : 1,/* be verbose */ 67 - cl_shutdown : 1;/* rpc immediate -EIO */ 67 + cl_shutdown : 1,/* rpc immediate -EIO */ 68 + cl_netunreach_fatal : 1; 69 + /* Treat ENETUNREACH errors as fatal */ 68 70 struct xprtsec_parms cl_xprtsec; /* transport security policy */ 69 71 70 72 struct rpc_rtt * cl_rtt; /* RTO estimator data */ ··· 177 175 #define RPC_CLNT_CREATE_SOFTERR (1UL << 10) 178 176 #define RPC_CLNT_CREATE_REUSEPORT (1UL << 11) 179 177 #define RPC_CLNT_CREATE_CONNECTED (1UL << 12) 178 + #define RPC_CLNT_CREATE_NETUNREACH_FATAL (1UL << 13) 180 179 181 180 struct rpc_clnt *rpc_create(struct rpc_create_args *args); 182 181 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
+1
include/linux/sunrpc/sched.h
··· 134 134 #define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */ 135 135 #define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */ 136 136 #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ 137 + #define RPC_TASK_NETUNREACH_FATAL 0x0040 /* ENETUNREACH is fatal */ 137 138 #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ 138 139 #define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */ 139 140 #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
+1
include/linux/sunrpc/xprtmultipath.h
··· 56 56 struct rpc_xprt *xprt); 57 57 extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps, 58 58 struct rpc_xprt *xprt, bool offline); 59 + extern struct rpc_xprt *rpc_xprt_switch_get_main_xprt(struct rpc_xprt_switch *xps); 59 60 60 61 extern void xprt_iter_init(struct rpc_xprt_iter *xpi, 61 62 struct rpc_xprt_switch *xps);
+1
include/trace/events/sunrpc.h
··· 343 343 { RPC_TASK_MOVEABLE, "MOVEABLE" }, \ 344 344 { RPC_TASK_NULLCREDS, "NULLCREDS" }, \ 345 345 { RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \ 346 + { RPC_TASK_NETUNREACH_FATAL, "NETUNREACH_FATAL"}, \ 346 347 { RPC_TASK_DYNAMIC, "DYNAMIC" }, \ 347 348 { RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \ 348 349 { RPC_TASK_SOFT, "SOFT" }, \
+22 -11
net/sunrpc/clnt.c
··· 270 270 old = rcu_dereference_protected(clnt->cl_xprt, 271 271 lockdep_is_held(&clnt->cl_lock)); 272 272 273 - if (!xprt_bound(xprt)) 274 - clnt->cl_autobind = 1; 275 - 276 273 clnt->cl_timeout = timeout; 277 274 rcu_assign_pointer(clnt->cl_xprt, xprt); 278 275 spin_unlock(&clnt->cl_lock); ··· 509 512 clnt->cl_discrtry = 1; 510 513 if (!(args->flags & RPC_CLNT_CREATE_QUIET)) 511 514 clnt->cl_chatty = 1; 515 + if (args->flags & RPC_CLNT_CREATE_NETUNREACH_FATAL) 516 + clnt->cl_netunreach_fatal = 1; 512 517 513 518 return clnt; 514 519 } ··· 661 662 new->cl_noretranstimeo = clnt->cl_noretranstimeo; 662 663 new->cl_discrtry = clnt->cl_discrtry; 663 664 new->cl_chatty = clnt->cl_chatty; 665 + new->cl_netunreach_fatal = clnt->cl_netunreach_fatal; 664 666 new->cl_principal = clnt->cl_principal; 665 667 new->cl_max_connect = clnt->cl_max_connect; 666 668 return new; ··· 1195 1195 task->tk_flags |= RPC_TASK_TIMEOUT; 1196 1196 if (clnt->cl_noretranstimeo) 1197 1197 task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; 1198 + if (clnt->cl_netunreach_fatal) 1199 + task->tk_flags |= RPC_TASK_NETUNREACH_FATAL; 1198 1200 atomic_inc(&clnt->cl_task_count); 1199 1201 } 1200 1202 ··· 2104 2102 case -EPROTONOSUPPORT: 2105 2103 trace_rpcb_bind_version_err(task); 2106 2104 goto retry_timeout; 2105 + case -ENETDOWN: 2106 + case -ENETUNREACH: 2107 + if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL) 2108 + break; 2109 + fallthrough; 2107 2110 case -ECONNREFUSED: /* connection problems */ 2108 2111 case -ECONNRESET: 2109 2112 case -ECONNABORTED: 2110 2113 case -ENOTCONN: 2111 2114 case -EHOSTDOWN: 2112 - case -ENETDOWN: 2113 2115 case -EHOSTUNREACH: 2114 - case -ENETUNREACH: 2115 2116 case -EPIPE: 2116 2117 trace_rpcb_unreachable_err(task); 2117 2118 if (!RPC_IS_SOFTCONN(task)) { ··· 2196 2191 2197 2192 task->tk_status = 0; 2198 2193 switch (status) { 2194 + case -ENETDOWN: 2195 + case -ENETUNREACH: 2196 + if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL) 2197 + break; 2198 + fallthrough; 2199 2199 case -ECONNREFUSED: 2200 2200 case -ECONNRESET: 2201 2201 /* A positive refusal suggests a rebind is needed. */ 2202 - if (RPC_IS_SOFTCONN(task)) 2203 - break; 2204 2202 if (clnt->cl_autobind) { 2205 2203 rpc_force_rebind(clnt); 2204 + if (RPC_IS_SOFTCONN(task)) 2205 + break; 2206 2206 goto out_retry; 2207 2207 } 2208 2208 fallthrough; 2209 2209 case -ECONNABORTED: 2210 - case -ENETDOWN: 2211 - case -ENETUNREACH: 2212 2210 case -EHOSTUNREACH: 2213 2211 case -EPIPE: 2214 2212 case -EPROTO: ··· 2463 2455 trace_rpc_call_status(task); 2464 2456 task->tk_status = 0; 2465 2457 switch(status) { 2466 - case -EHOSTDOWN: 2467 2458 case -ENETDOWN: 2468 - case -EHOSTUNREACH: 2469 2459 case -ENETUNREACH: 2460 + if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL) 2461 + goto out_exit; 2462 + fallthrough; 2463 + case -EHOSTDOWN: 2464 + case -EHOSTUNREACH: 2470 2465 case -EPERM: 2471 2466 if (RPC_IS_SOFTCONN(task)) 2472 2467 goto out_exit;
+3 -2
net/sunrpc/rpcb_clnt.c
··· 820 820 } 821 821 822 822 trace_rpcb_setport(child, map->r_status, map->r_port); 823 - xprt->ops->set_port(xprt, map->r_port); 824 - if (map->r_port) 823 + if (map->r_port) { 824 + xprt->ops->set_port(xprt, map->r_port); 825 825 xprt_set_bound(xprt); 826 + } 826 827 } 827 828 828 829 /*
+2
net/sunrpc/sched.c
··· 276 276 277 277 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) 278 278 { 279 + if (unlikely(current->flags & PF_EXITING)) 280 + return -EINTR; 279 281 schedule(); 280 282 if (signal_pending_state(mode, current)) 281 283 return -ERESTARTSYS;
+202
net/sunrpc/sysfs.c
··· 59 59 return NULL; 60 60 } 61 61 62 + static inline struct rpc_clnt * 63 + rpc_sysfs_client_kobj_get_clnt(struct kobject *kobj) 64 + { 65 + struct rpc_sysfs_client *c = container_of(kobj, 66 + struct rpc_sysfs_client, kobject); 67 + struct rpc_clnt *ret = c->clnt; 68 + 69 + return refcount_inc_not_zero(&ret->cl_count) ? ret : NULL; 70 + } 71 + 62 72 static inline struct rpc_xprt * 63 73 rpc_sysfs_xprt_kobj_get_xprt(struct kobject *kobj) 64 74 { ··· 94 84 struct rpc_sysfs_xprt_switch, kobject); 95 85 96 86 return xprt_switch_get(x->xprt_switch); 87 + } 88 + 89 + static ssize_t rpc_sysfs_clnt_version_show(struct kobject *kobj, 90 + struct kobj_attribute *attr, 91 + char *buf) 92 + { 93 + struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj); 94 + ssize_t ret; 95 + 96 + if (!clnt) 97 + return sprintf(buf, "<closed>\n"); 98 + 99 + ret = sprintf(buf, "%u", clnt->cl_vers); 100 + refcount_dec(&clnt->cl_count); 101 + return ret; 102 + } 103 + 104 + static ssize_t rpc_sysfs_clnt_program_show(struct kobject *kobj, 105 + struct kobj_attribute *attr, 106 + char *buf) 107 + { 108 + struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj); 109 + ssize_t ret; 110 + 111 + if (!clnt) 112 + return sprintf(buf, "<closed>\n"); 113 + 114 + ret = sprintf(buf, "%s", clnt->cl_program->name); 115 + refcount_dec(&clnt->cl_count); 116 + return ret; 117 + } 118 + 119 + static ssize_t rpc_sysfs_clnt_max_connect_show(struct kobject *kobj, 120 + struct kobj_attribute *attr, 121 + char *buf) 122 + { 123 + struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj); 124 + ssize_t ret; 125 + 126 + if (!clnt) 127 + return sprintf(buf, "<closed>\n"); 128 + 129 + ret = sprintf(buf, "%u\n", clnt->cl_max_connect); 130 + refcount_dec(&clnt->cl_count); 131 + return ret; 97 132 } 98 133 99 134 static ssize_t rpc_sysfs_xprt_dstaddr_show(struct kobject *kobj, ··· 182 127 ret = sprintf(buf, "<not a socket>\n"); 183 128 xprt_put(xprt); 184 129 return ret; 130 + } 131 + 132 + static const char *xprtsec_strings[] = { 133 + [RPC_XPRTSEC_NONE] = "none", 134 + [RPC_XPRTSEC_TLS_ANON] = "tls-anon", 135 + [RPC_XPRTSEC_TLS_X509] = "tls-x509", 136 + }; 137 + 138 + static ssize_t rpc_sysfs_xprt_xprtsec_show(struct kobject *kobj, 139 + struct kobj_attribute *attr, 140 + char *buf) 141 + { 142 + struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj); 143 + ssize_t ret; 144 + 145 + if (!xprt) { 146 + ret = sprintf(buf, "<closed>\n"); 147 + goto out; 148 + } 149 + 150 + ret = sprintf(buf, "%s\n", xprtsec_strings[xprt->xprtsec.policy]); 151 + xprt_put(xprt); 152 + out: 153 + return ret; 154 + 185 155 } 186 156 187 157 static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj, ··· 286 206 return ret; 287 207 } 288 208 209 + static ssize_t rpc_sysfs_xprt_del_xprt_show(struct kobject *kobj, 210 + struct kobj_attribute *attr, 211 + char *buf) 212 + { 213 + return sprintf(buf, "# delete this xprt\n"); 214 + } 215 + 216 + 289 217 static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj, 290 218 struct kobj_attribute *attr, 291 219 char *buf) ··· 311 223 atomic_long_read(&xprt_switch->xps_queuelen)); 312 224 xprt_switch_put(xprt_switch); 313 225 return ret; 226 + } 227 + 228 + static ssize_t rpc_sysfs_xprt_switch_add_xprt_show(struct kobject *kobj, 229 + struct kobj_attribute *attr, 230 + char *buf) 231 + { 232 + return sprintf(buf, "# add one xprt to this xprt_switch\n"); 233 + } 234 + 235 + static ssize_t rpc_sysfs_xprt_switch_add_xprt_store(struct kobject *kobj, 236 + struct kobj_attribute *attr, 237 + const char *buf, size_t count) 238 + { 239 + struct rpc_xprt_switch *xprt_switch = 240 + rpc_sysfs_xprt_switch_kobj_get_xprt(kobj); 241 + struct xprt_create xprt_create_args; 242 + struct rpc_xprt *xprt, *new; 243 + 244 + if (!xprt_switch) 245 + return 0; 246 + 247 + xprt = rpc_xprt_switch_get_main_xprt(xprt_switch); 248 + if (!xprt) 249 + goto out; 250 + 251 + xprt_create_args.ident = xprt->xprt_class->ident; 252 + xprt_create_args.net = xprt->xprt_net; 253 + xprt_create_args.dstaddr = (struct sockaddr *)&xprt->addr; 254 + xprt_create_args.addrlen = xprt->addrlen; 255 + xprt_create_args.servername = xprt->servername; 256 + xprt_create_args.bc_xprt = xprt->bc_xprt; 257 + xprt_create_args.xprtsec = xprt->xprtsec; 258 + xprt_create_args.connect_timeout = xprt->connect_timeout; 259 + xprt_create_args.reconnect_timeout = xprt->max_reconnect_timeout; 260 + 261 + new = xprt_create_transport(&xprt_create_args); 262 + if (IS_ERR_OR_NULL(new)) { 263 + count = PTR_ERR(new); 264 + goto out_put_xprt; 265 + } 266 + 267 + rpc_xprt_switch_add_xprt(xprt_switch, new); 268 + xprt_put(new); 269 + 270 + out_put_xprt: 271 + xprt_put(xprt); 272 + out: 273 + xprt_switch_put(xprt_switch); 274 + return count; 314 275 } 315 276 316 277 static ssize_t rpc_sysfs_xprt_dstaddr_store(struct kobject *kobj, ··· 472 335 return count; 473 336 } 474 337 338 + static ssize_t rpc_sysfs_xprt_del_xprt(struct kobject *kobj, 339 + struct kobj_attribute *attr, 340 + const char *buf, size_t count) 341 + { 342 + struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj); 343 + struct rpc_xprt_switch *xps = rpc_sysfs_xprt_kobj_get_xprt_switch(kobj); 344 + 345 + if (!xprt || !xps) { 346 + count = 0; 347 + goto out; 348 + } 349 + 350 + if (xprt->main) { 351 + count = -EINVAL; 352 + goto release_tasks; 353 + } 354 + 355 + if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) { 356 + count = -EINTR; 357 + goto out_put; 358 + } 359 + 360 + xprt_set_offline_locked(xprt, xps); 361 + xprt_delete_locked(xprt, xps); 362 + 363 + release_tasks: 364 + xprt_release_write(xprt, NULL); 365 + out_put: 366 + xprt_put(xprt); 367 + xprt_switch_put(xps); 368 + out: 369 + return count; 370 + } 371 + 475 372 int rpc_sysfs_init(void) 476 373 { 477 374 rpc_sunrpc_kset = kset_create_and_add("sunrpc", NULL, kernel_kobj); ··· 569 398 kobject)->xprt->xprt_net; 570 399 } 571 400 401 + static struct kobj_attribute rpc_sysfs_clnt_version = __ATTR(rpc_version, 402 + 0444, rpc_sysfs_clnt_version_show, NULL); 403 + 404 + static struct kobj_attribute rpc_sysfs_clnt_program = __ATTR(program, 405 + 0444, rpc_sysfs_clnt_program_show, NULL); 406 + 407 + static struct kobj_attribute rpc_sysfs_clnt_max_connect = __ATTR(max_connect, 408 + 0444, rpc_sysfs_clnt_max_connect_show, NULL); 409 + 410 + static struct attribute *rpc_sysfs_rpc_clnt_attrs[] = { 411 + &rpc_sysfs_clnt_version.attr, 412 + &rpc_sysfs_clnt_program.attr, 413 + &rpc_sysfs_clnt_max_connect.attr, 414 + NULL, 415 + }; 416 + ATTRIBUTE_GROUPS(rpc_sysfs_rpc_clnt); 417 + 572 418 static struct kobj_attribute rpc_sysfs_xprt_dstaddr = __ATTR(dstaddr, 573 419 0644, rpc_sysfs_xprt_dstaddr_show, rpc_sysfs_xprt_dstaddr_store); 574 420 575 421 static struct kobj_attribute rpc_sysfs_xprt_srcaddr = __ATTR(srcaddr, 576 422 0644, rpc_sysfs_xprt_srcaddr_show, NULL); 423 + 424 + static struct kobj_attribute rpc_sysfs_xprt_xprtsec = __ATTR(xprtsec, 425 + 0644, rpc_sysfs_xprt_xprtsec_show, NULL); 577 426 578 427 static struct kobj_attribute rpc_sysfs_xprt_info = __ATTR(xprt_info, 579 428 0444, rpc_sysfs_xprt_info_show, NULL); ··· 601 410 static struct kobj_attribute rpc_sysfs_xprt_change_state = __ATTR(xprt_state, 602 411 0644, rpc_sysfs_xprt_state_show, rpc_sysfs_xprt_state_change); 603 412 413 + static struct kobj_attribute rpc_sysfs_xprt_del = __ATTR(del_xprt, 414 + 0644, rpc_sysfs_xprt_del_xprt_show, rpc_sysfs_xprt_del_xprt); 415 + 604 416 static struct attribute *rpc_sysfs_xprt_attrs[] = { 605 417 &rpc_sysfs_xprt_dstaddr.attr, 606 418 &rpc_sysfs_xprt_srcaddr.attr, 419 + &rpc_sysfs_xprt_xprtsec.attr, 607 420 &rpc_sysfs_xprt_info.attr, 608 421 &rpc_sysfs_xprt_change_state.attr, 422 + &rpc_sysfs_xprt_del.attr, 609 423 NULL, 610 424 }; 611 425 ATTRIBUTE_GROUPS(rpc_sysfs_xprt); ··· 618 422 static struct kobj_attribute rpc_sysfs_xprt_switch_info = 619 423 __ATTR(xprt_switch_info, 0444, rpc_sysfs_xprt_switch_info_show, NULL); 620 424 425 + static struct kobj_attribute rpc_sysfs_xprt_switch_add_xprt = 426 + __ATTR(add_xprt, 0644, rpc_sysfs_xprt_switch_add_xprt_show, 427 + rpc_sysfs_xprt_switch_add_xprt_store); 428 + 621 429 static struct attribute *rpc_sysfs_xprt_switch_attrs[] = { 622 430 &rpc_sysfs_xprt_switch_info.attr, 431 + &rpc_sysfs_xprt_switch_add_xprt.attr, 623 432 NULL, 624 433 }; 625 434 ATTRIBUTE_GROUPS(rpc_sysfs_xprt_switch); 626 435 627 436 static const struct kobj_type rpc_sysfs_client_type = { 628 437 .release = rpc_sysfs_client_release, 438 + .default_groups = rpc_sysfs_rpc_clnt_groups, 629 439 .sysfs_ops = &kobj_sysfs_ops, 630 440 .namespace = rpc_sysfs_client_namespace, 631 441 };
+21
net/sunrpc/xprtmultipath.c
··· 92 92 xprt_put(xprt); 93 93 } 94 94 95 + /** 96 + * rpc_xprt_switch_get_main_xprt - Get the 'main' xprt for an xprt switch. 97 + * @xps: pointer to struct rpc_xprt_switch. 98 + */ 99 + struct rpc_xprt *rpc_xprt_switch_get_main_xprt(struct rpc_xprt_switch *xps) 100 + { 101 + struct rpc_xprt_iter xpi; 102 + struct rpc_xprt *xprt; 103 + 104 + xprt_iter_init_listall(&xpi, xps); 105 + 106 + xprt = xprt_iter_get_next(&xpi); 107 + while (xprt && !xprt->main) { 108 + xprt_put(xprt); 109 + xprt = xprt_iter_get_next(&xpi); 110 + } 111 + 112 + xprt_iter_destroy(&xpi); 113 + return xprt; 114 + } 115 + 95 116 static DEFINE_IDA(rpc_xprtswitch_ids); 96 117 97 118 void xprt_multipath_cleanup_ids(void)