Merge tag 'nfs-for-6.15-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client updates from Trond Myklebust:
"Bugfixes:

- Three fixes for looping in the NFSv4 state manager delegation code

- Fix for the NFSv4 state XDR code (Neil Brown)

- Fix a leaked reference in nfs_lock_and_join_requests()

- Fix a use-after-free in the delegation return code

Features:

- Implement the NFSv4.2 copy offload OFFLOAD_STATUS operation to
allow monitoring of an in-progress copy

- Add a mount option to force NFSv3/NFSv4 to use READDIRPLUS in a
getdents() call

- SUNRPC now allows some basic management of an existing RPC client's
connections using sysfs

- Improvements to the automated teardown of a NFS client when the
container it was initiated from gets killed

- Improvements to prevent tasks from getting stuck in a killable wait
state after calling exit_signals()"

* tag 'nfs-for-6.15-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (29 commits)
nfs: Add missing release on error in nfs_lock_and_join_requests()
NFSv4: Check for delegation validity in nfs_start_delegation_return_locked()
NFS: Don't allow waiting for exiting tasks
SUNRPC: Don't allow waiting for exiting tasks
NFSv4: Treat ENETUNREACH errors as fatal for state recovery
NFSv4: clp->cl_cons_state < 0 signifies an invalid nfs_client
NFSv4: Further cleanups to shutdown loops
NFS: Shut down the nfs_client only after all the superblocks
SUNRPC: rpc_clnt_set_transport() must not change the autobind setting
SUNRPC: rpcbind should never reset the port to the value '0'
pNFS/flexfiles: Report ENETDOWN as a connection error
pNFS/flexfiles: Treat ENETUNREACH errors as fatal in containers
NFS: Treat ENETUNREACH errors as fatal in containers
NFS: Add a mount option to make ENETUNREACH errors fatal
sunrpc: Add a sysfs file for one-step xprt deletion
sunrpc: Add a sysfs file for adding a new xprt
sunrpc: Add a sysfs files for rpc_clnt information
sunrpc: Add a sysfs attr for xprtsec
NFS: Add implid to sysfs
NFS: Extend rdirplus mount option with "force|none"
...

+807 -74
+5
fs/nfs/client.c
··· 546 args.flags |= RPC_CLNT_CREATE_NOPING; 547 if (test_bit(NFS_CS_REUSEPORT, &clp->cl_flags)) 548 args.flags |= RPC_CLNT_CREATE_REUSEPORT; 549 550 if (!IS_ERR(clp->cl_rpcclient)) 551 return 0; ··· 710 ctx->timeo, ctx->retrans); 711 if (ctx->flags & NFS_MOUNT_NORESVPORT) 712 set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags); 713 714 /* Allocate or find a client reference we can use */ 715 clp = nfs_get_client(&cl_init);
··· 546 args.flags |= RPC_CLNT_CREATE_NOPING; 547 if (test_bit(NFS_CS_REUSEPORT, &clp->cl_flags)) 548 args.flags |= RPC_CLNT_CREATE_REUSEPORT; 549 + if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags)) 550 + args.flags |= RPC_CLNT_CREATE_NETUNREACH_FATAL; 551 552 if (!IS_ERR(clp->cl_rpcclient)) 553 return 0; ··· 708 ctx->timeo, ctx->retrans); 709 if (ctx->flags & NFS_MOUNT_NORESVPORT) 710 set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags); 711 + 712 + if (ctx->flags & NFS_MOUNT_NETUNREACH_FATAL) 713 + __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags); 714 715 /* Allocate or find a client reference we can use */ 716 clp = nfs_get_client(&cl_init);
+44 -22
fs/nfs/delegation.c
··· 79 struct nfs_delegation *delegation) 80 { 81 set_bit(NFS_DELEGATION_RETURN, &delegation->flags); 82 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); 83 } 84 ··· 307 if (delegation == NULL) 308 goto out; 309 spin_lock(&delegation->lock); 310 - if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { 311 clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags); 312 /* Refcount matched in nfs_end_delegation_return() */ 313 ret = nfs_get_delegation(delegation); ··· 332 } 333 334 static void nfs_abort_delegation_return(struct nfs_delegation *delegation, 335 - struct nfs_client *clp, int err) 336 { 337 - 338 spin_lock(&delegation->lock); 339 clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags); 340 if (err == -EAGAIN) { 341 set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags); 342 - set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state); 343 } 344 spin_unlock(&delegation->lock); 345 } ··· 551 */ 552 static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync) 553 { 554 - struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 555 unsigned int mode = O_WRONLY | O_RDWR; 556 int err = 0; 557 ··· 573 /* 574 * Guard against state recovery 575 */ 576 - err = nfs4_wait_clnt_recover(clp); 577 } 578 579 if (err) { 580 - nfs_abort_delegation_return(delegation, clp, err); 581 goto out; 582 } 583 ··· 594 595 if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) 596 ret = true; 597 - else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) { 598 - struct inode *inode; 599 - 600 - spin_lock(&delegation->lock); 601 - inode = delegation->inode; 602 - if (inode && list_empty(&NFS_I(inode)->open_files)) 603 - ret = true; 604 - spin_unlock(&delegation->lock); 605 - } 606 - if (ret) 607 - clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); 608 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) || 609 test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) || 610 test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) ··· 612 struct nfs_delegation *place_holder_deleg = NULL; 613 int err = 0; 614 615 restart: 616 /* 617 * To avoid quadratic looping we hold a reference ··· 666 cond_resched(); 667 if (!err) 668 goto restart; 669 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); 670 goto out; 671 } ··· 681 struct nfs_delegation *d; 682 bool ret = false; 683 684 list_for_each_entry_rcu (d, &server->delegations, super_list) { 685 if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags)) 686 continue; ··· 691 clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags); 692 ret = true; 693 } 694 return ret; 695 } 696 ··· 879 return nfs4_inode_return_delegation(inode); 880 } 881 882 - static void nfs_mark_return_if_closed_delegation(struct nfs_server *server, 883 - struct nfs_delegation *delegation) 884 { 885 - set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); 886 - set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); 887 } 888 889 static bool nfs_server_mark_return_all_delegations(struct nfs_server *server) ··· 1291 return; 1292 clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); 1293 set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags); 1294 set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state); 1295 } 1296 ··· 1370 nfs4_stateid stateid; 1371 unsigned long gen = ++server->delegation_gen; 1372 1373 restart: 1374 rcu_read_lock(); 1375 list_for_each_entry_rcu(delegation, &server->delegations, super_list) { ··· 1402 goto restart; 1403 } 1404 nfs_inode_mark_test_expired_delegation(server,inode); 1405 iput(inode); 1406 return -EAGAIN; 1407 }
··· 79 struct nfs_delegation *delegation) 80 { 81 set_bit(NFS_DELEGATION_RETURN, &delegation->flags); 82 + set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags); 83 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); 84 } 85 ··· 306 if (delegation == NULL) 307 goto out; 308 spin_lock(&delegation->lock); 309 + if (delegation->inode && 310 + !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { 311 clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags); 312 /* Refcount matched in nfs_end_delegation_return() */ 313 ret = nfs_get_delegation(delegation); ··· 330 } 331 332 static void nfs_abort_delegation_return(struct nfs_delegation *delegation, 333 + struct nfs_server *server, int err) 334 { 335 spin_lock(&delegation->lock); 336 clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags); 337 if (err == -EAGAIN) { 338 set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags); 339 + set_bit(NFS4SERV_DELEGRETURN_DELAYED, 340 + &server->delegation_flags); 341 + set_bit(NFS4CLNT_DELEGRETURN_DELAYED, 342 + &server->nfs_client->cl_state); 343 } 344 spin_unlock(&delegation->lock); 345 } ··· 547 */ 548 static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync) 549 { 550 + struct nfs_server *server = NFS_SERVER(inode); 551 unsigned int mode = O_WRONLY | O_RDWR; 552 int err = 0; 553 ··· 569 /* 570 * Guard against state recovery 571 */ 572 + err = nfs4_wait_clnt_recover(server->nfs_client); 573 } 574 575 if (err) { 576 + nfs_abort_delegation_return(delegation, server, err); 577 goto out; 578 } 579 ··· 590 591 if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) 592 ret = true; 593 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) || 594 test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) || 595 test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) ··· 619 struct nfs_delegation *place_holder_deleg = NULL; 620 int err = 0; 621 622 + if (!test_and_clear_bit(NFS4SERV_DELEGRETURN, 623 + &server->delegation_flags)) 624 + return 0; 625 restart: 626 /* 627 * To avoid quadratic looping we hold a reference ··· 670 cond_resched(); 671 if (!err) 672 goto restart; 673 + set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags); 674 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); 675 goto out; 676 } ··· 684 struct nfs_delegation *d; 685 bool ret = false; 686 687 + if (!test_and_clear_bit(NFS4SERV_DELEGRETURN_DELAYED, 688 + &server->delegation_flags)) 689 + goto out; 690 list_for_each_entry_rcu (d, &server->delegations, super_list) { 691 if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags)) 692 continue; ··· 691 clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags); 692 ret = true; 693 } 694 + out: 695 return ret; 696 } 697 ··· 878 return nfs4_inode_return_delegation(inode); 879 } 880 881 + static void 882 + nfs_mark_return_if_closed_delegation(struct nfs_server *server, 883 + struct nfs_delegation *delegation) 884 { 885 + struct inode *inode; 886 + 887 + if (test_bit(NFS_DELEGATION_RETURN, &delegation->flags) || 888 + test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) 889 + return; 890 + spin_lock(&delegation->lock); 891 + inode = delegation->inode; 892 + if (!inode) 893 + goto out; 894 + if (list_empty(&NFS_I(inode)->open_files)) 895 + nfs_mark_return_delegation(server, delegation); 896 + else 897 + set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); 898 + out: 899 + spin_unlock(&delegation->lock); 900 } 901 902 static bool nfs_server_mark_return_all_delegations(struct nfs_server *server) ··· 1276 return; 1277 clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); 1278 set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags); 1279 + set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags); 1280 set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state); 1281 } 1282 ··· 1354 nfs4_stateid stateid; 1355 unsigned long gen = ++server->delegation_gen; 1356 1357 + if (!test_and_clear_bit(NFS4SERV_DELEGATION_EXPIRED, 1358 + &server->delegation_flags)) 1359 + return 0; 1360 restart: 1361 rcu_read_lock(); 1362 list_for_each_entry_rcu(delegation, &server->delegations, super_list) { ··· 1383 goto restart; 1384 } 1385 nfs_inode_mark_test_expired_delegation(server,inode); 1386 + set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags); 1387 + set_bit(NFS4CLNT_DELEGATION_EXPIRED, 1388 + &server->nfs_client->cl_state); 1389 iput(inode); 1390 return -EAGAIN; 1391 }
+2
fs/nfs/dir.c
··· 666 { 667 if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS)) 668 return false; 669 if (ctx->pos == 0 || 670 cache_hits + cache_misses > NFS_READDIR_CACHE_USAGE_THRESHOLD) 671 return true;
··· 666 { 667 if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS)) 668 return false; 669 + if (NFS_SERVER(dir)->flags & NFS_MOUNT_FORCE_RDIRPLUS) 670 + return true; 671 if (ctx->pos == 0 || 672 cache_hits + cache_misses > NFS_READDIR_CACHE_USAGE_THRESHOLD) 673 return true;
+22 -2
fs/nfs/flexfilelayout/flexfilelayout.c
··· 1154 rpc_wake_up(&tbl->slot_tbl_waitq); 1155 goto reset; 1156 /* RPC connection errors */ 1157 case -ECONNREFUSED: 1158 case -EHOSTDOWN: 1159 case -EHOSTUNREACH: 1160 - case -ENETUNREACH: 1161 case -EIO: 1162 case -ETIMEDOUT: 1163 case -EPIPE: ··· 1187 1188 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */ 1189 static int ff_layout_async_handle_error_v3(struct rpc_task *task, 1190 struct pnfs_layout_segment *lseg, 1191 u32 idx) 1192 { ··· 1205 case -EJUKEBOX: 1206 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); 1207 goto out_retry; 1208 default: 1209 dprintk("%s DS connection error %d\n", __func__, 1210 task->tk_status); ··· 1244 1245 switch (vers) { 1246 case 3: 1247 - return ff_layout_async_handle_error_v3(task, lseg, idx); 1248 case 4: 1249 return ff_layout_async_handle_error_v4(task, state, clp, 1250 lseg, idx); ··· 1274 case -ECONNRESET: 1275 case -EHOSTDOWN: 1276 case -EHOSTUNREACH: 1277 case -ENETUNREACH: 1278 case -EADDRINUSE: 1279 case -ENOBUFS: ··· 1348 return task->tk_status; 1349 case -EAGAIN: 1350 goto out_eagain; 1351 } 1352 1353 return 0; ··· 1521 return task->tk_status; 1522 case -EAGAIN: 1523 return -EAGAIN; 1524 } 1525 1526 if (hdr->res.verf->committed == NFS_FILE_SYNC || ··· 1568 case -EAGAIN: 1569 rpc_restart_call_prepare(task); 1570 return -EAGAIN; 1571 } 1572 1573 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
··· 1154 rpc_wake_up(&tbl->slot_tbl_waitq); 1155 goto reset; 1156 /* RPC connection errors */ 1157 + case -ENETDOWN: 1158 + case -ENETUNREACH: 1159 + if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags)) 1160 + return -NFS4ERR_FATAL_IOERROR; 1161 + fallthrough; 1162 case -ECONNREFUSED: 1163 case -EHOSTDOWN: 1164 case -EHOSTUNREACH: 1165 case -EIO: 1166 case -ETIMEDOUT: 1167 case -EPIPE: ··· 1183 1184 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */ 1185 static int ff_layout_async_handle_error_v3(struct rpc_task *task, 1186 + struct nfs_client *clp, 1187 struct pnfs_layout_segment *lseg, 1188 u32 idx) 1189 { ··· 1200 case -EJUKEBOX: 1201 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); 1202 goto out_retry; 1203 + case -ENETDOWN: 1204 + case -ENETUNREACH: 1205 + if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags)) 1206 + return -NFS4ERR_FATAL_IOERROR; 1207 + fallthrough; 1208 default: 1209 dprintk("%s DS connection error %d\n", __func__, 1210 task->tk_status); ··· 1234 1235 switch (vers) { 1236 case 3: 1237 + return ff_layout_async_handle_error_v3(task, clp, lseg, idx); 1238 case 4: 1239 return ff_layout_async_handle_error_v4(task, state, clp, 1240 lseg, idx); ··· 1264 case -ECONNRESET: 1265 case -EHOSTDOWN: 1266 case -EHOSTUNREACH: 1267 + case -ENETDOWN: 1268 case -ENETUNREACH: 1269 case -EADDRINUSE: 1270 case -ENOBUFS: ··· 1337 return task->tk_status; 1338 case -EAGAIN: 1339 goto out_eagain; 1340 + case -NFS4ERR_FATAL_IOERROR: 1341 + task->tk_status = -EIO; 1342 + return 0; 1343 } 1344 1345 return 0; ··· 1507 return task->tk_status; 1508 case -EAGAIN: 1509 return -EAGAIN; 1510 + case -NFS4ERR_FATAL_IOERROR: 1511 + task->tk_status = -EIO; 1512 + return 0; 1513 } 1514 1515 if (hdr->res.verf->committed == NFS_FILE_SYNC || ··· 1551 case -EAGAIN: 1552 rpc_restart_call_prepare(task); 1553 return -EAGAIN; 1554 + case -NFS4ERR_FATAL_IOERROR: 1555 + task->tk_status = -EIO; 1556 + return 0; 1557 } 1558 1559 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
+67 -4
fs/nfs/fs_context.c
··· 50 Opt_clientaddr, 51 Opt_cto, 52 Opt_alignwrite, 53 Opt_fg, 54 Opt_fscache, 55 Opt_fscache_flag, ··· 73 Opt_posix, 74 Opt_proto, 75 Opt_rdirplus, 76 Opt_rdma, 77 Opt_resvport, 78 Opt_retrans, ··· 96 Opt_wsize, 97 Opt_write, 98 Opt_xprtsec, 99 }; 100 101 enum { ··· 168 fsparam_string("clientaddr", Opt_clientaddr), 169 fsparam_flag_no("cto", Opt_cto), 170 fsparam_flag_no("alignwrite", Opt_alignwrite), 171 fsparam_flag ("fg", Opt_fg), 172 fsparam_flag_no("fsc", Opt_fscache_flag), 173 fsparam_string("fsc", Opt_fscache), ··· 193 fsparam_u32 ("port", Opt_port), 194 fsparam_flag_no("posix", Opt_posix), 195 fsparam_string("proto", Opt_proto), 196 - fsparam_flag_no("rdirplus", Opt_rdirplus), 197 fsparam_flag ("rdma", Opt_rdma), 198 fsparam_flag_no("resvport", Opt_resvport), 199 fsparam_u32 ("retrans", Opt_retrans), ··· 305 { "none", Opt_xprtsec_none }, 306 { "tls", Opt_xprtsec_tls }, 307 { "mtls", Opt_xprtsec_mtls }, 308 {} 309 }; 310 ··· 662 ctx->flags &= ~NFS_MOUNT_NOACL; 663 break; 664 case Opt_rdirplus: 665 - if (result.negated) 666 ctx->flags |= NFS_MOUNT_NORDIRPLUS; 667 - else 668 - ctx->flags &= ~NFS_MOUNT_NORDIRPLUS; 669 break; 670 case Opt_sharecache: 671 if (result.negated) ··· 912 if (result.uint_32 < 1 || result.uint_32 > NFS_MAX_TRANSPORTS) 913 goto out_of_bounds; 914 ctx->nfs_server.max_connect = result.uint_32; 915 break; 916 case Opt_lookupcache: 917 trace_nfs_mount_assign(param->key, param->string); ··· 1710 ctx->xprtsec.policy = RPC_XPRTSEC_NONE; 1711 ctx->xprtsec.cert_serial = TLS_NO_CERT; 1712 ctx->xprtsec.privkey_serial = TLS_NO_PRIVKEY; 1713 1714 fc->s_iflags |= SB_I_STABLE_WRITES; 1715 }
··· 50 Opt_clientaddr, 51 Opt_cto, 52 Opt_alignwrite, 53 + Opt_fatal_neterrors, 54 Opt_fg, 55 Opt_fscache, 56 Opt_fscache_flag, ··· 72 Opt_posix, 73 Opt_proto, 74 Opt_rdirplus, 75 + Opt_rdirplus_none, 76 + Opt_rdirplus_force, 77 Opt_rdma, 78 Opt_resvport, 79 Opt_retrans, ··· 93 Opt_wsize, 94 Opt_write, 95 Opt_xprtsec, 96 + }; 97 + 98 + enum { 99 + Opt_fatal_neterrors_default, 100 + Opt_fatal_neterrors_enetunreach, 101 + Opt_fatal_neterrors_none, 102 + }; 103 + 104 + static const struct constant_table nfs_param_enums_fatal_neterrors[] = { 105 + { "default", Opt_fatal_neterrors_default }, 106 + { "ENETDOWN:ENETUNREACH", Opt_fatal_neterrors_enetunreach }, 107 + { "ENETUNREACH:ENETDOWN", Opt_fatal_neterrors_enetunreach }, 108 + { "none", Opt_fatal_neterrors_none }, 109 + {} 110 }; 111 112 enum { ··· 151 fsparam_string("clientaddr", Opt_clientaddr), 152 fsparam_flag_no("cto", Opt_cto), 153 fsparam_flag_no("alignwrite", Opt_alignwrite), 154 + fsparam_enum("fatal_neterrors", Opt_fatal_neterrors, 155 + nfs_param_enums_fatal_neterrors), 156 fsparam_flag ("fg", Opt_fg), 157 fsparam_flag_no("fsc", Opt_fscache_flag), 158 fsparam_string("fsc", Opt_fscache), ··· 174 fsparam_u32 ("port", Opt_port), 175 fsparam_flag_no("posix", Opt_posix), 176 fsparam_string("proto", Opt_proto), 177 + fsparam_flag_no("rdirplus", Opt_rdirplus), // rdirplus|nordirplus 178 + fsparam_string("rdirplus", Opt_rdirplus), // rdirplus=... 179 fsparam_flag ("rdma", Opt_rdma), 180 fsparam_flag_no("resvport", Opt_resvport), 181 fsparam_u32 ("retrans", Opt_retrans), ··· 285 { "none", Opt_xprtsec_none }, 286 { "tls", Opt_xprtsec_tls }, 287 { "mtls", Opt_xprtsec_mtls }, 288 + {} 289 + }; 290 + 291 + static const struct constant_table nfs_rdirplus_tokens[] = { 292 + { "none", Opt_rdirplus_none }, 293 + { "force", Opt_rdirplus_force }, 294 {} 295 }; 296 ··· 636 ctx->flags &= ~NFS_MOUNT_NOACL; 637 break; 638 case Opt_rdirplus: 639 + if (result.negated) { 640 + ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS; 641 ctx->flags |= NFS_MOUNT_NORDIRPLUS; 642 + } else if (!param->string) { 643 + ctx->flags &= ~(NFS_MOUNT_NORDIRPLUS | NFS_MOUNT_FORCE_RDIRPLUS); 644 + } else { 645 + switch (lookup_constant(nfs_rdirplus_tokens, param->string, -1)) { 646 + case Opt_rdirplus_none: 647 + ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS; 648 + ctx->flags |= NFS_MOUNT_NORDIRPLUS; 649 + break; 650 + case Opt_rdirplus_force: 651 + ctx->flags &= ~NFS_MOUNT_NORDIRPLUS; 652 + ctx->flags |= NFS_MOUNT_FORCE_RDIRPLUS; 653 + break; 654 + default: 655 + goto out_invalid_value; 656 + } 657 + } 658 break; 659 case Opt_sharecache: 660 if (result.negated) ··· 871 if (result.uint_32 < 1 || result.uint_32 > NFS_MAX_TRANSPORTS) 872 goto out_of_bounds; 873 ctx->nfs_server.max_connect = result.uint_32; 874 + break; 875 + case Opt_fatal_neterrors: 876 + trace_nfs_mount_assign(param->key, param->string); 877 + switch (result.uint_32) { 878 + case Opt_fatal_neterrors_default: 879 + if (fc->net_ns != &init_net) 880 + ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL; 881 + else 882 + ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL; 883 + break; 884 + case Opt_fatal_neterrors_enetunreach: 885 + ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL; 886 + break; 887 + case Opt_fatal_neterrors_none: 888 + ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL; 889 + break; 890 + default: 891 + goto out_invalid_value; 892 + } 893 break; 894 case Opt_lookupcache: 895 trace_nfs_mount_assign(param->key, param->string); ··· 1650 ctx->xprtsec.policy = RPC_XPRTSEC_NONE; 1651 ctx->xprtsec.cert_serial = TLS_NO_CERT; 1652 ctx->xprtsec.privkey_serial = TLS_NO_PRIVKEY; 1653 + 1654 + if (fc->net_ns != &init_net) 1655 + ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL; 1656 1657 fc->s_iflags |= SB_I_STABLE_WRITES; 1658 }
+2
fs/nfs/inode.c
··· 74 75 int nfs_wait_bit_killable(struct wait_bit_key *key, int mode) 76 { 77 schedule(); 78 if (signal_pending_state(mode, current)) 79 return -ERESTARTSYS;
··· 74 75 int nfs_wait_bit_killable(struct wait_bit_key *key, int mode) 76 { 77 + if (unlikely(nfs_current_task_exiting())) 78 + return -EINTR; 79 schedule(); 80 if (signal_pending_state(mode, current)) 81 return -ERESTARTSYS;
+5
fs/nfs/internal.h
··· 912 } 913 #endif 914 915 static inline bool nfs_error_is_fatal(int err) 916 { 917 switch (err) {
··· 912 } 913 #endif 914 915 + static inline bool nfs_current_task_exiting(void) 916 + { 917 + return (current->flags & PF_EXITING) != 0; 918 + } 919 + 920 static inline bool nfs_error_is_fatal(int err) 921 { 922 switch (err) {
+2
fs/nfs/nfs3client.c
··· 120 121 if (mds_srv->flags & NFS_MOUNT_NORESVPORT) 122 __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags); 123 124 __set_bit(NFS_CS_DS, &cl_init.init_flags); 125
··· 120 121 if (mds_srv->flags & NFS_MOUNT_NORESVPORT) 122 __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags); 123 + if (test_bit(NFS_CS_NETUNREACH_FATAL, &mds_clp->cl_flags)) 124 + __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags); 125 126 __set_bit(NFS_CS_DS, &cl_init.init_flags); 127
+1 -1
fs/nfs/nfs3proc.c
··· 39 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 40 schedule_timeout(NFS_JUKEBOX_RETRY_TIME); 41 res = -ERESTARTSYS; 42 - } while (!fatal_signal_pending(current)); 43 return res; 44 } 45
··· 39 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 40 schedule_timeout(NFS_JUKEBOX_RETRY_TIME); 41 res = -ERESTARTSYS; 42 + } while (!fatal_signal_pending(current) && !nfs_current_task_exiting()); 43 return res; 44 } 45
+162 -10
fs/nfs/nfs42proc.c
··· 21 22 #define NFSDBG_FACILITY NFSDBG_PROC 23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); 24 25 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr) 26 { ··· 175 return err; 176 } 177 178 static int handle_async_copy(struct nfs42_copy_res *res, 179 struct nfs_server *dst_server, 180 struct nfs_server *src_server, ··· 198 bool *restart) 199 { 200 struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter; 201 - int status = NFS4_OK; 202 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); 203 struct nfs_open_context *src_ctx = nfs_file_open_context(src); 204 205 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL); 206 if (!copy) ··· 241 spin_unlock(&src_server->nfs_client->cl_lock); 242 } 243 244 - status = wait_for_completion_interruptible(&copy->completion); 245 - spin_lock(&dst_server->nfs_client->cl_lock); 246 - list_del_init(&copy->copies); 247 - spin_unlock(&dst_server->nfs_client->cl_lock); 248 - if (dst_server != src_server) { 249 - spin_lock(&src_server->nfs_client->cl_lock); 250 - list_del_init(&copy->src_copies); 251 - spin_unlock(&src_server->nfs_client->cl_lock); 252 - } 253 if (status == -ERESTARTSYS) { 254 goto out_cancel; 255 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) { ··· 256 } 257 out: 258 res->write_res.count = copy->count; 259 memcpy(&res->write_res.verifier, &copy->verf, sizeof(copy->verf)); 260 status = -copy->error; 261 ··· 267 nfs42_do_offload_cancel_async(dst, &copy->stateid); 268 if (!nfs42_files_from_same_server(src, dst)) 269 nfs42_do_offload_cancel_async(src, src_stateid); 270 goto out_free; 271 } 272 ··· 629 if (status == -ENOTSUPP) 630 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL; 631 rpc_put_task(task); 632 return status; 633 } 634
··· 21 22 #define NFSDBG_FACILITY NFSDBG_PROC 23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); 24 + static int nfs42_proc_offload_status(struct file *file, nfs4_stateid *stateid, 25 + u64 *copied); 26 27 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr) 28 { ··· 173 return err; 174 } 175 176 + static void nfs4_copy_dequeue_callback(struct nfs_server *dst_server, 177 + struct nfs_server *src_server, 178 + struct nfs4_copy_state *copy) 179 + { 180 + spin_lock(&dst_server->nfs_client->cl_lock); 181 + list_del_init(&copy->copies); 182 + spin_unlock(&dst_server->nfs_client->cl_lock); 183 + if (dst_server != src_server) { 184 + spin_lock(&src_server->nfs_client->cl_lock); 185 + list_del_init(&copy->src_copies); 186 + spin_unlock(&src_server->nfs_client->cl_lock); 187 + } 188 + } 189 + 190 static int handle_async_copy(struct nfs42_copy_res *res, 191 struct nfs_server *dst_server, 192 struct nfs_server *src_server, ··· 182 bool *restart) 183 { 184 struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter; 185 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); 186 struct nfs_open_context *src_ctx = nfs_file_open_context(src); 187 + struct nfs_client *clp = dst_server->nfs_client; 188 + unsigned long timeout = 3 * HZ; 189 + int status = NFS4_OK; 190 + u64 copied; 191 192 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL); 193 if (!copy) ··· 222 spin_unlock(&src_server->nfs_client->cl_lock); 223 } 224 225 + wait: 226 + status = wait_for_completion_interruptible_timeout(&copy->completion, 227 + timeout); 228 + if (!status) 229 + goto timeout; 230 + nfs4_copy_dequeue_callback(dst_server, src_server, copy); 231 if (status == -ERESTARTSYS) { 232 goto out_cancel; 233 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) { ··· 240 } 241 out: 242 res->write_res.count = copy->count; 243 + /* Copy out the updated write verifier provided by CB_OFFLOAD. */ 244 memcpy(&res->write_res.verifier, &copy->verf, sizeof(copy->verf)); 245 status = -copy->error; 246 ··· 250 nfs42_do_offload_cancel_async(dst, &copy->stateid); 251 if (!nfs42_files_from_same_server(src, dst)) 252 nfs42_do_offload_cancel_async(src, src_stateid); 253 + goto out_free; 254 + timeout: 255 + timeout <<= 1; 256 + if (timeout > (clp->cl_lease_time >> 1)) 257 + timeout = clp->cl_lease_time >> 1; 258 + status = nfs42_proc_offload_status(dst, &copy->stateid, &copied); 259 + if (status == -EINPROGRESS) 260 + goto wait; 261 + nfs4_copy_dequeue_callback(dst_server, src_server, copy); 262 + switch (status) { 263 + case 0: 264 + /* The server recognized the copy stateid, so it hasn't 265 + * rebooted. Don't overwrite the verifier returned in the 266 + * COPY result. */ 267 + res->write_res.count = copied; 268 + goto out_free; 269 + case -EREMOTEIO: 270 + /* COPY operation failed on the server. */ 271 + status = -EOPNOTSUPP; 272 + res->write_res.count = copied; 273 + goto out_free; 274 + case -EBADF: 275 + /* Server did not recognize the copy stateid. It has 276 + * probably restarted and lost the plot. */ 277 + res->write_res.count = 0; 278 + status = -EOPNOTSUPP; 279 + break; 280 + case -EOPNOTSUPP: 281 + /* RFC 7862 REQUIREs server to support OFFLOAD_STATUS when 282 + * it has signed up for an async COPY, so server is not 283 + * spec-compliant. */ 284 + res->write_res.count = 0; 285 + } 286 goto out_free; 287 } 288 ··· 579 if (status == -ENOTSUPP) 580 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL; 581 rpc_put_task(task); 582 + return status; 583 + } 584 + 585 + static int 586 + _nfs42_proc_offload_status(struct nfs_server *server, struct file *file, 587 + struct nfs42_offload_data *data) 588 + { 589 + struct nfs_open_context *ctx = nfs_file_open_context(file); 590 + struct rpc_message msg = { 591 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_STATUS], 592 + .rpc_argp = &data->args, 593 + .rpc_resp = &data->res, 594 + .rpc_cred = ctx->cred, 595 + }; 596 + int status; 597 + 598 + status = nfs4_call_sync(server->client, server, &msg, 599 + &data->args.osa_seq_args, 600 + &data->res.osr_seq_res, 1); 601 + trace_nfs4_offload_status(&data->args, status); 602 + switch (status) { 603 + case 0: 604 + break; 605 + 606 + case -NFS4ERR_ADMIN_REVOKED: 607 + case -NFS4ERR_BAD_STATEID: 608 + case -NFS4ERR_OLD_STATEID: 609 + /* 610 + * Server does not recognize the COPY stateid. CB_OFFLOAD 611 + * could have purged it, or server might have rebooted. 612 + * Since COPY stateids don't have an associated inode, 613 + * avoid triggering state recovery. 614 + */ 615 + status = -EBADF; 616 + break; 617 + case -NFS4ERR_NOTSUPP: 618 + case -ENOTSUPP: 619 + case -EOPNOTSUPP: 620 + server->caps &= ~NFS_CAP_OFFLOAD_STATUS; 621 + status = -EOPNOTSUPP; 622 + break; 623 + } 624 + 625 + return status; 626 + } 627 + 628 + /** 629 + * nfs42_proc_offload_status - Poll completion status of an async copy operation 630 + * @dst: handle of file being copied into 631 + * @stateid: copy stateid (from async COPY result) 632 + * @copied: OUT: number of bytes copied so far 633 + * 634 + * Return values: 635 + * %0: Server returned an NFS4_OK completion status 636 + * %-EINPROGRESS: Server returned no completion status 637 + * %-EREMOTEIO: Server returned an error completion status 638 + * %-EBADF: Server did not recognize the copy stateid 639 + * %-EOPNOTSUPP: Server does not support OFFLOAD_STATUS 640 + * %-ERESTARTSYS: Wait interrupted by signal 641 + * 642 + * Other negative errnos indicate the client could not complete the 643 + * request. 644 + */ 645 + static int 646 + nfs42_proc_offload_status(struct file *dst, nfs4_stateid *stateid, u64 *copied) 647 + { 648 + struct inode *inode = file_inode(dst); 649 + struct nfs_server *server = NFS_SERVER(inode); 650 + struct nfs4_exception exception = { 651 + .inode = inode, 652 + }; 653 + struct nfs42_offload_data *data; 654 + int status; 655 + 656 + if (!(server->caps & NFS_CAP_OFFLOAD_STATUS)) 657 + return -EOPNOTSUPP; 658 + 659 + data = kzalloc(sizeof(*data), GFP_KERNEL); 660 + if (!data) 661 + return -ENOMEM; 662 + data->seq_server = server; 663 + data->args.osa_src_fh = NFS_FH(inode); 664 + memcpy(&data->args.osa_stateid, stateid, 665 + sizeof(data->args.osa_stateid)); 666 + exception.stateid = &data->args.osa_stateid; 667 + do { 668 + status = _nfs42_proc_offload_status(server, dst, data); 669 + if (status == -EOPNOTSUPP) 670 + goto out; 671 + status = nfs4_handle_exception(server, status, &exception); 672 + } while (exception.retry); 673 + if (status) 674 + goto out; 675 + 676 + *copied = data->res.osr_count; 677 + if (!data->res.complete_count) 678 + status = -EINPROGRESS; 679 + else if (data->res.osr_complete != NFS_OK) 680 + status = -EREMOTEIO; 681 + 682 + out: 683 + kfree(data); 684 return status; 685 } 686
+86
fs/nfs/nfs42xdr.c
··· 35 #define encode_offload_cancel_maxsz (op_encode_hdr_maxsz + \ 36 XDR_QUADLEN(NFS4_STATEID_SIZE)) 37 #define decode_offload_cancel_maxsz (op_decode_hdr_maxsz) 38 #define encode_copy_notify_maxsz (op_encode_hdr_maxsz + \ 39 XDR_QUADLEN(NFS4_STATEID_SIZE) + \ 40 1 + /* nl4_type */ \ ··· 148 decode_sequence_maxsz + \ 149 decode_putfh_maxsz + \ 150 decode_offload_cancel_maxsz) 151 #define NFS4_enc_copy_notify_sz (compound_encode_hdr_maxsz + \ 152 encode_sequence_maxsz + \ 153 encode_putfh_maxsz + \ ··· 355 struct compound_hdr *hdr) 356 { 357 encode_op_hdr(xdr, OP_OFFLOAD_CANCEL, decode_offload_cancel_maxsz, hdr); 358 encode_nfs4_stateid(xdr, &args->osa_stateid); 359 } 360 ··· 587 encode_sequence(xdr, &args->osa_seq_args, &hdr); 588 encode_putfh(xdr, args->osa_src_fh, &hdr); 589 encode_offload_cancel(xdr, args, &hdr); 590 encode_nops(&hdr); 591 } 592 ··· 959 struct nfs42_offload_status_res *res) 960 { 961 return decode_op_hdr(xdr, OP_OFFLOAD_CANCEL); 962 } 963 964 static int decode_copy_notify(struct xdr_stream *xdr, ··· 1425 if (status) 1426 goto out; 1427 status = decode_offload_cancel(xdr, res); 1428 1429 out: 1430 return status;
··· 35 #define encode_offload_cancel_maxsz (op_encode_hdr_maxsz + \ 36 XDR_QUADLEN(NFS4_STATEID_SIZE)) 37 #define decode_offload_cancel_maxsz (op_decode_hdr_maxsz) 38 + #define encode_offload_status_maxsz (op_encode_hdr_maxsz + \ 39 + XDR_QUADLEN(NFS4_STATEID_SIZE)) 40 + #define decode_offload_status_maxsz (op_decode_hdr_maxsz + \ 41 + 2 /* osr_count */ + \ 42 + 2 /* osr_complete */) 43 #define encode_copy_notify_maxsz (op_encode_hdr_maxsz + \ 44 XDR_QUADLEN(NFS4_STATEID_SIZE) + \ 45 1 + /* nl4_type */ \ ··· 143 decode_sequence_maxsz + \ 144 decode_putfh_maxsz + \ 145 decode_offload_cancel_maxsz) 146 + #define NFS4_enc_offload_status_sz (compound_encode_hdr_maxsz + \ 147 + encode_sequence_maxsz + \ 148 + encode_putfh_maxsz + \ 149 + encode_offload_status_maxsz) 150 + #define NFS4_dec_offload_status_sz (compound_decode_hdr_maxsz + \ 151 + decode_sequence_maxsz + \ 152 + decode_putfh_maxsz + \ 153 + decode_offload_status_maxsz) 154 #define NFS4_enc_copy_notify_sz (compound_encode_hdr_maxsz + \ 155 encode_sequence_maxsz + \ 156 encode_putfh_maxsz + \ ··· 342 struct compound_hdr *hdr) 343 { 344 encode_op_hdr(xdr, OP_OFFLOAD_CANCEL, decode_offload_cancel_maxsz, hdr); 345 + encode_nfs4_stateid(xdr, &args->osa_stateid); 346 + } 347 + 348 + static void encode_offload_status(struct xdr_stream *xdr, 349 + const struct nfs42_offload_status_args *args, 350 + struct compound_hdr *hdr) 351 + { 352 + encode_op_hdr(xdr, OP_OFFLOAD_STATUS, decode_offload_status_maxsz, hdr); 353 encode_nfs4_stateid(xdr, &args->osa_stateid); 354 } 355 ··· 566 encode_sequence(xdr, &args->osa_seq_args, &hdr); 567 encode_putfh(xdr, args->osa_src_fh, &hdr); 568 encode_offload_cancel(xdr, args, &hdr); 569 + encode_nops(&hdr); 570 + } 571 + 572 + /* 573 + * Encode OFFLOAD_STATUS request 574 + */ 575 + static void nfs4_xdr_enc_offload_status(struct rpc_rqst *req, 576 + struct xdr_stream *xdr, 577 + const void *data) 578 + { 579 + const struct nfs42_offload_status_args *args = data; 580 + struct compound_hdr hdr = { 581 + .minorversion = nfs4_xdr_minorversion(&args->osa_seq_args), 582 + }; 583 + 584 + encode_compound_hdr(xdr, req, &hdr); 585 + encode_sequence(xdr, &args->osa_seq_args, &hdr); 586 + encode_putfh(xdr, args->osa_src_fh, &hdr); 587 + encode_offload_status(xdr, args, &hdr); 588 encode_nops(&hdr); 589 } 590 ··· 919 struct nfs42_offload_status_res *res) 920 { 921 return decode_op_hdr(xdr, OP_OFFLOAD_CANCEL); 922 + } 923 + 924 + static int decode_offload_status(struct xdr_stream *xdr, 925 + struct nfs42_offload_status_res *res) 926 + { 927 + ssize_t result; 928 + int status; 929 + 930 + status = decode_op_hdr(xdr, OP_OFFLOAD_STATUS); 931 + if (status) 932 + return status; 933 + /* osr_count */ 934 + if (xdr_stream_decode_u64(xdr, &res->osr_count) < 0) 935 + return -EIO; 936 + /* osr_complete<1> */ 937 + result = xdr_stream_decode_uint32_array(xdr, &res->osr_complete, 1); 938 + if (result < 0) 939 + return -EIO; 940 + res->complete_count = result; 941 + return 0; 942 } 943 944 static int decode_copy_notify(struct xdr_stream *xdr, ··· 1365 if (status) 1366 goto out; 1367 status = decode_offload_cancel(xdr, res); 1368 + 1369 + out: 1370 + return status; 1371 + } 1372 + 1373 + /* 1374 + * Decode OFFLOAD_STATUS response 1375 + */ 1376 + static int nfs4_xdr_dec_offload_status(struct rpc_rqst *rqstp, 1377 + struct xdr_stream *xdr, 1378 + void *data) 1379 + { 1380 + struct nfs42_offload_status_res *res = data; 1381 + struct compound_hdr hdr; 1382 + int status; 1383 + 1384 + status = decode_compound_hdr(xdr, &hdr); 1385 + if (status) 1386 + goto out; 1387 + status = decode_sequence(xdr, &res->osr_seq_res, rqstp); 1388 + if (status) 1389 + goto out; 1390 + status = decode_putfh(xdr); 1391 + if (status) 1392 + goto out; 1393 + status = decode_offload_status(xdr, res); 1394 1395 out: 1396 return status;
+7
fs/nfs/nfs4client.c
··· 233 __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags); 234 if (test_bit(NFS_CS_PNFS, &cl_init->init_flags)) 235 __set_bit(NFS_CS_PNFS, &clp->cl_flags); 236 /* 237 * Set up the connection to the server before we add add to the 238 * global list. ··· 939 __set_bit(NFS_CS_TSM_POSSIBLE, &cl_init.init_flags); 940 server->port = rpc_get_port((struct sockaddr *)addr); 941 942 /* Allocate or find a client reference we can use */ 943 clp = nfs_get_client(&cl_init); 944 if (IS_ERR(clp)) ··· 1016 1017 if (mds_srv->flags & NFS_MOUNT_NORESVPORT) 1018 __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags); 1019 1020 __set_bit(NFS_CS_PNFS, &cl_init.init_flags); 1021 cl_init.max_connect = NFS_MAX_TRANSPORTS;
··· 233 __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags); 234 if (test_bit(NFS_CS_PNFS, &cl_init->init_flags)) 235 __set_bit(NFS_CS_PNFS, &clp->cl_flags); 236 + if (test_bit(NFS_CS_NETUNREACH_FATAL, &cl_init->init_flags)) 237 + __set_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags); 238 /* 239 * Set up the connection to the server before we add add to the 240 * global list. ··· 937 __set_bit(NFS_CS_TSM_POSSIBLE, &cl_init.init_flags); 938 server->port = rpc_get_port((struct sockaddr *)addr); 939 940 + if (server->flags & NFS_MOUNT_NETUNREACH_FATAL) 941 + __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags); 942 + 943 /* Allocate or find a client reference we can use */ 944 clp = nfs_get_client(&cl_init); 945 if (IS_ERR(clp)) ··· 1011 1012 if (mds_srv->flags & NFS_MOUNT_NORESVPORT) 1013 __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags); 1014 + if (test_bit(NFS_CS_NETUNREACH_FATAL, &mds_clp->cl_flags)) 1015 + __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags); 1016 1017 __set_bit(NFS_CS_PNFS, &cl_init.init_flags); 1018 cl_init.max_connect = NFS_MAX_TRANSPORTS;
+13 -4
fs/nfs/nfs4proc.c
··· 195 return -EBUSY; 196 case -NFS4ERR_NOT_SAME: 197 return -ENOTSYNC; 198 default: 199 dprintk("%s could not handle NFSv4 error %d\n", 200 __func__, -err); ··· 446 { 447 might_sleep(); 448 449 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 450 schedule_timeout(nfs4_update_delay(timeout)); 451 if (!__fatal_signal_pending(current)) ··· 459 { 460 might_sleep(); 461 462 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); 463 schedule_timeout(nfs4_update_delay(timeout)); 464 if (!signal_pending(current)) ··· 1781 rcu_read_unlock(); 1782 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); 1783 1784 - if (!fatal_signal_pending(current)) { 1785 if (schedule_timeout(5*HZ) == 0) 1786 status = -EAGAIN; 1787 else ··· 3584 write_sequnlock(&state->seqlock); 3585 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); 3586 3587 - if (fatal_signal_pending(current)) 3588 status = -EINTR; 3589 else 3590 if (schedule_timeout(5*HZ) != 0) ··· 9602 return; 9603 9604 trace_nfs4_sequence(clp, task->tk_status); 9605 - if (task->tk_status < 0 && !task->tk_client->cl_shutdown) { 9606 dprintk("%s ERROR %d\n", __func__, task->tk_status); 9607 if (refcount_read(&clp->cl_count) == 1) 9608 return; ··· 10806 | NFS_CAP_CLONE 10807 | NFS_CAP_LAYOUTERROR 10808 | NFS_CAP_READ_PLUS 10809 - | NFS_CAP_MOVEABLE, 10810 .init_client = nfs41_init_client, 10811 .shutdown_client = nfs41_shutdown_client, 10812 .match_stateid = nfs41_match_stateid,
··· 195 return -EBUSY; 196 case -NFS4ERR_NOT_SAME: 197 return -ENOTSYNC; 198 + case -ENETDOWN: 199 + case -ENETUNREACH: 200 + break; 201 default: 202 dprintk("%s could not handle NFSv4 error %d\n", 203 __func__, -err); ··· 443 { 444 might_sleep(); 445 446 + if (unlikely(nfs_current_task_exiting())) 447 + return -EINTR; 448 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 449 schedule_timeout(nfs4_update_delay(timeout)); 450 if (!__fatal_signal_pending(current)) ··· 454 { 455 might_sleep(); 456 457 + if (unlikely(nfs_current_task_exiting())) 458 + return -EINTR; 459 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); 460 schedule_timeout(nfs4_update_delay(timeout)); 461 if (!signal_pending(current)) ··· 1774 rcu_read_unlock(); 1775 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); 1776 1777 + if (!fatal_signal_pending(current) && 1778 + !nfs_current_task_exiting()) { 1779 if (schedule_timeout(5*HZ) == 0) 1780 status = -EAGAIN; 1781 else ··· 3576 write_sequnlock(&state->seqlock); 3577 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); 3578 3579 + if (fatal_signal_pending(current) || nfs_current_task_exiting()) 3580 status = -EINTR; 3581 else 3582 if (schedule_timeout(5*HZ) != 0) ··· 9594 return; 9595 9596 trace_nfs4_sequence(clp, task->tk_status); 9597 + if (task->tk_status < 0 && clp->cl_cons_state >= 0) { 9598 dprintk("%s ERROR %d\n", __func__, task->tk_status); 9599 if (refcount_read(&clp->cl_count) == 1) 9600 return; ··· 10798 | NFS_CAP_CLONE 10799 | NFS_CAP_LAYOUTERROR 10800 | NFS_CAP_READ_PLUS 10801 + | NFS_CAP_MOVEABLE 10802 + | NFS_CAP_OFFLOAD_STATUS, 10803 .init_client = nfs41_init_client, 10804 .shutdown_client = nfs41_shutdown_client, 10805 .match_stateid = nfs41_match_stateid,
+11 -3
fs/nfs/nfs4state.c
··· 1198 struct rpc_clnt *clnt = clp->cl_rpcclient; 1199 bool swapon = false; 1200 1201 - if (clnt->cl_shutdown) 1202 return; 1203 1204 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); ··· 1403 dprintk("%s: scheduling stateid recovery for server %s\n", __func__, 1404 clp->cl_hostname); 1405 nfs4_schedule_state_manager(clp); 1406 - return 0; 1407 } 1408 EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery); 1409 ··· 2739 pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s" 2740 " with error %d\n", section_sep, section, 2741 clp->cl_hostname, -status); 2742 - ssleep(1); 2743 out_drain: 2744 memalloc_nofs_restore(memflags); 2745 nfs4_end_drain_session(clp);
··· 1198 struct rpc_clnt *clnt = clp->cl_rpcclient; 1199 bool swapon = false; 1200 1201 + if (clp->cl_cons_state < 0) 1202 return; 1203 1204 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); ··· 1403 dprintk("%s: scheduling stateid recovery for server %s\n", __func__, 1404 clp->cl_hostname); 1405 nfs4_schedule_state_manager(clp); 1406 + return clp->cl_cons_state < 0 ? clp->cl_cons_state : 0; 1407 } 1408 EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery); 1409 ··· 2739 pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s" 2740 " with error %d\n", section_sep, section, 2741 clp->cl_hostname, -status); 2742 + switch (status) { 2743 + case -ENETDOWN: 2744 + case -ENETUNREACH: 2745 + nfs_mark_client_ready(clp, -EIO); 2746 + break; 2747 + default: 2748 + ssleep(1); 2749 + break; 2750 + } 2751 out_drain: 2752 memalloc_nofs_restore(memflags); 2753 nfs4_end_drain_session(clp);
+10 -1
fs/nfs/nfs4trace.h
··· 2608 ) 2609 ); 2610 2611 - TRACE_EVENT(nfs4_offload_cancel, 2612 TP_PROTO( 2613 const struct nfs42_offload_status_args *args, 2614 int error ··· 2640 __entry->stateid_seq, __entry->stateid_hash 2641 ) 2642 ); 2643 2644 DECLARE_EVENT_CLASS(nfs4_xattr_event, 2645 TP_PROTO(
··· 2608 ) 2609 ); 2610 2611 + DECLARE_EVENT_CLASS(nfs4_offload_class, 2612 TP_PROTO( 2613 const struct nfs42_offload_status_args *args, 2614 int error ··· 2640 __entry->stateid_seq, __entry->stateid_hash 2641 ) 2642 ); 2643 + #define DEFINE_NFS4_OFFLOAD_EVENT(name) \ 2644 + DEFINE_EVENT(nfs4_offload_class, name, \ 2645 + TP_PROTO( \ 2646 + const struct nfs42_offload_status_args *args, \ 2647 + int error \ 2648 + ), \ 2649 + TP_ARGS(args, error)) 2650 + DEFINE_NFS4_OFFLOAD_EVENT(nfs4_offload_cancel); 2651 + DEFINE_NFS4_OFFLOAD_EVENT(nfs4_offload_status); 2652 2653 DECLARE_EVENT_CLASS(nfs4_xattr_event, 2654 TP_PROTO(
+10 -9
fs/nfs/nfs4xdr.c
··· 82 * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) 83 */ 84 #define pagepad_maxsz (1) 85 - #define open_owner_id_maxsz (1 + 2 + 1 + 1 + 2) 86 - #define lock_owner_id_maxsz (1 + 1 + 4) 87 - #define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) 88 #define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 89 #define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 90 #define op_encode_hdr_maxsz (1) ··· 184 #define encode_claim_null_maxsz (1 + nfs4_name_maxsz) 185 #define encode_open_maxsz (op_encode_hdr_maxsz + \ 186 2 + encode_share_access_maxsz + 2 + \ 187 - open_owner_id_maxsz + \ 188 encode_opentype_maxsz + \ 189 encode_claim_null_maxsz) 190 #define decode_space_limit_maxsz (3) ··· 254 #define encode_link_maxsz (op_encode_hdr_maxsz + \ 255 nfs4_name_maxsz) 256 #define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz) 257 - #define encode_lockowner_maxsz (7) 258 #define encode_lock_maxsz (op_encode_hdr_maxsz + \ 259 7 + \ 260 1 + encode_stateid_maxsz + 1 + \ 261 encode_lockowner_maxsz) 262 #define decode_lock_denied_maxsz \ 263 - (8 + decode_lockowner_maxsz) 264 #define decode_lock_maxsz (op_decode_hdr_maxsz + \ 265 decode_lock_denied_maxsz) 266 #define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \ ··· 617 encode_lockowner_maxsz) 618 #define NFS4_dec_release_lockowner_sz \ 619 (compound_decode_hdr_maxsz + \ 620 - decode_lockowner_maxsz) 621 #define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \ 622 encode_sequence_maxsz + \ 623 encode_putfh_maxsz + \ ··· 1412 __be32 *p; 1413 /* 1414 * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4, 1415 - * owner 4 = 32 1416 */ 1417 encode_nfs4_seqid(xdr, arg->seqid); 1418 encode_share_access(xdr, arg->share_access); ··· 5077 /* 5078 * We create the owner, so we know a proper owner.id length is 4. 5079 */ 5080 - static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl) 5081 { 5082 uint64_t offset, length, clientid; 5083 __be32 *p; ··· 7702 PROC42(CLONE, enc_clone, dec_clone), 7703 PROC42(COPY, enc_copy, dec_copy), 7704 PROC42(OFFLOAD_CANCEL, enc_offload_cancel, dec_offload_cancel), 7705 PROC42(COPY_NOTIFY, enc_copy_notify, dec_copy_notify), 7706 PROC(LOOKUPP, enc_lookupp, dec_lookupp), 7707 PROC42(LAYOUTERROR, enc_layouterror, dec_layouterror),
··· 82 * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) 83 */ 84 #define pagepad_maxsz (1) 85 + #define open_owner_id_maxsz (2 + 1 + 2 + 2) 86 + #define lock_owner_id_maxsz (2 + 1 + 2) 87 #define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 88 #define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 89 #define op_encode_hdr_maxsz (1) ··· 185 #define encode_claim_null_maxsz (1 + nfs4_name_maxsz) 186 #define encode_open_maxsz (op_encode_hdr_maxsz + \ 187 2 + encode_share_access_maxsz + 2 + \ 188 + 1 + open_owner_id_maxsz + \ 189 encode_opentype_maxsz + \ 190 encode_claim_null_maxsz) 191 #define decode_space_limit_maxsz (3) ··· 255 #define encode_link_maxsz (op_encode_hdr_maxsz + \ 256 nfs4_name_maxsz) 257 #define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz) 258 + #define encode_lockowner_maxsz (2 + 1 + lock_owner_id_maxsz) 259 + 260 #define encode_lock_maxsz (op_encode_hdr_maxsz + \ 261 7 + \ 262 1 + encode_stateid_maxsz + 1 + \ 263 encode_lockowner_maxsz) 264 #define decode_lock_denied_maxsz \ 265 + (2 + 2 + 1 + 2 + 1 + lock_owner_id_maxsz) 266 #define decode_lock_maxsz (op_decode_hdr_maxsz + \ 267 decode_lock_denied_maxsz) 268 #define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \ ··· 617 encode_lockowner_maxsz) 618 #define NFS4_dec_release_lockowner_sz \ 619 (compound_decode_hdr_maxsz + \ 620 + decode_release_lockowner_maxsz) 621 #define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \ 622 encode_sequence_maxsz + \ 623 encode_putfh_maxsz + \ ··· 1412 __be32 *p; 1413 /* 1414 * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4, 1415 + * owner 28 1416 */ 1417 encode_nfs4_seqid(xdr, arg->seqid); 1418 encode_share_access(xdr, arg->share_access); ··· 5077 /* 5078 * We create the owner, so we know a proper owner.id length is 4. 5079 */ 5080 + static int decode_lock_denied(struct xdr_stream *xdr, struct file_lock *fl) 5081 { 5082 uint64_t offset, length, clientid; 5083 __be32 *p; ··· 7702 PROC42(CLONE, enc_clone, dec_clone), 7703 PROC42(COPY, enc_copy, dec_copy), 7704 PROC42(OFFLOAD_CANCEL, enc_offload_cancel, dec_offload_cancel), 7705 + PROC42(OFFLOAD_STATUS, enc_offload_status, dec_offload_status), 7706 PROC42(COPY_NOTIFY, enc_copy_notify, dec_copy_notify), 7707 PROC(LOOKUPP, enc_lookupp, dec_lookupp), 7708 PROC42(LAYOUTERROR, enc_layouterror, dec_layouterror),
+4
fs/nfs/super.c
··· 454 { NFS_MOUNT_NONLM, ",nolock", "" }, 455 { NFS_MOUNT_NOACL, ",noacl", "" }, 456 { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" }, 457 { NFS_MOUNT_UNSHARED, ",nosharecache", "" }, 458 { NFS_MOUNT_NORESVPORT, ",noresvport", "" }, 459 { 0, NULL, NULL } 460 }; 461 const struct proc_nfs_info *nfs_infop;
··· 454 { NFS_MOUNT_NONLM, ",nolock", "" }, 455 { NFS_MOUNT_NOACL, ",noacl", "" }, 456 { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" }, 457 + { NFS_MOUNT_FORCE_RDIRPLUS, ",rdirplus=force", "" }, 458 { NFS_MOUNT_UNSHARED, ",nosharecache", "" }, 459 { NFS_MOUNT_NORESVPORT, ",noresvport", "" }, 460 + { NFS_MOUNT_NETUNREACH_FATAL, 461 + ",fatal_neterrors=ENETDOWN:ENETUNREACH", 462 + ",fatal_neterrors=none" }, 463 { 0, NULL, NULL } 464 }; 465 const struct proc_nfs_info *nfs_infop;
+81 -1
fs/nfs/sysfs.c
··· 14 #include <linux/rcupdate.h> 15 #include <linux/lockd/lockd.h> 16 17 #include "nfs4_fs.h" 18 #include "netns.h" 19 #include "sysfs.h" ··· 229 rpc_cancel_tasks(clnt, -EIO, shutdown_match_client, NULL); 230 } 231 232 static ssize_t 233 shutdown_show(struct kobject *kobj, struct kobj_attribute *attr, 234 char *buf) ··· 279 280 server->flags |= NFS_MOUNT_SHUTDOWN; 281 shutdown_client(server->client); 282 - shutdown_client(server->nfs_client->cl_rpcclient); 283 284 if (!IS_ERR(server->client_acl)) 285 shutdown_client(server->client_acl); ··· 286 if (server->nlm_host) 287 shutdown_client(server->nlm_host->h_rpcclnt); 288 out: 289 return count; 290 } 291 292 static struct kobj_attribute nfs_sysfs_attr_shutdown = __ATTR_RW(shutdown); 293 294 #define RPC_CLIENT_NAME_SIZE 64 295 ··· 361 .child_ns_type = nfs_netns_object_child_ns_type, 362 }; 363 364 void nfs_sysfs_add_server(struct nfs_server *server) 365 { 366 int ret; ··· 403 if (ret < 0) 404 pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n", 405 server->s_sysfs_id, ret); 406 } 407 EXPORT_SYMBOL_GPL(nfs_sysfs_add_server); 408
··· 14 #include <linux/rcupdate.h> 15 #include <linux/lockd/lockd.h> 16 17 + #include "internal.h" 18 #include "nfs4_fs.h" 19 #include "netns.h" 20 #include "sysfs.h" ··· 228 rpc_cancel_tasks(clnt, -EIO, shutdown_match_client, NULL); 229 } 230 231 + /* 232 + * Shut down the nfs_client only once all the superblocks 233 + * have been shut down. 234 + */ 235 + static void shutdown_nfs_client(struct nfs_client *clp) 236 + { 237 + struct nfs_server *server; 238 + rcu_read_lock(); 239 + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 240 + if (!(server->flags & NFS_MOUNT_SHUTDOWN)) { 241 + rcu_read_unlock(); 242 + return; 243 + } 244 + } 245 + rcu_read_unlock(); 246 + nfs_mark_client_ready(clp, -EIO); 247 + shutdown_client(clp->cl_rpcclient); 248 + } 249 + 250 static ssize_t 251 shutdown_show(struct kobject *kobj, struct kobj_attribute *attr, 252 char *buf) ··· 259 260 server->flags |= NFS_MOUNT_SHUTDOWN; 261 shutdown_client(server->client); 262 263 if (!IS_ERR(server->client_acl)) 264 shutdown_client(server->client_acl); ··· 267 if (server->nlm_host) 268 shutdown_client(server->nlm_host->h_rpcclnt); 269 out: 270 + shutdown_nfs_client(server->nfs_client); 271 return count; 272 } 273 274 static struct kobj_attribute nfs_sysfs_attr_shutdown = __ATTR_RW(shutdown); 275 + 276 + #if IS_ENABLED(CONFIG_NFS_V4_1) 277 + static ssize_t 278 + implid_domain_show(struct kobject *kobj, struct kobj_attribute *attr, 279 + char *buf) 280 + { 281 + struct nfs_server *server = container_of(kobj, struct nfs_server, kobj); 282 + struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid; 283 + 284 + if (!impl_id || strlen(impl_id->domain) == 0) 285 + return 0; //sysfs_emit(buf, ""); 286 + return sysfs_emit(buf, "%s\n", impl_id->domain); 287 + } 288 + 289 + static struct kobj_attribute nfs_sysfs_attr_implid_domain = __ATTR_RO(implid_domain); 290 + 291 + 292 + static ssize_t 293 + implid_name_show(struct kobject *kobj, struct kobj_attribute *attr, 294 + char *buf) 295 + { 296 + struct nfs_server *server = container_of(kobj, struct nfs_server, kobj); 297 + struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid; 298 + 299 + if (!impl_id || strlen(impl_id->name) == 0) 300 + return 0; //sysfs_emit(buf, ""); 301 + return sysfs_emit(buf, "%s\n", impl_id->name); 302 + } 303 + 304 + static struct kobj_attribute nfs_sysfs_attr_implid_name = __ATTR_RO(implid_name); 305 + 306 + #endif /* IS_ENABLED(CONFIG_NFS_V4_1) */ 307 308 #define RPC_CLIENT_NAME_SIZE 64 309 ··· 309 .child_ns_type = nfs_netns_object_child_ns_type, 310 }; 311 312 + #if IS_ENABLED(CONFIG_NFS_V4_1) 313 + static void nfs_sysfs_add_nfsv41_server(struct nfs_server *server) 314 + { 315 + int ret; 316 + 317 + if (!server->nfs_client->cl_implid) 318 + return; 319 + 320 + ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_domain.attr, 321 + nfs_netns_server_namespace(&server->kobj)); 322 + if (ret < 0) 323 + pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n", 324 + server->s_sysfs_id, ret); 325 + 326 + ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_name.attr, 327 + nfs_netns_server_namespace(&server->kobj)); 328 + if (ret < 0) 329 + pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n", 330 + server->s_sysfs_id, ret); 331 + } 332 + #else /* CONFIG_NFS_V4_1 */ 333 + static inline void nfs_sysfs_add_nfsv41_server(struct nfs_server *server) 334 + { 335 + } 336 + #endif /* CONFIG_NFS_V4_1 */ 337 + 338 void nfs_sysfs_add_server(struct nfs_server *server) 339 { 340 int ret; ··· 325 if (ret < 0) 326 pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n", 327 server->s_sysfs_id, ret); 328 + 329 + nfs_sysfs_add_nfsv41_server(server); 330 } 331 EXPORT_SYMBOL_GPL(nfs_sysfs_add_server); 332
+3 -1
fs/nfs/write.c
··· 579 580 while (!nfs_lock_request(head)) { 581 ret = nfs_wait_on_request(head); 582 - if (ret < 0) 583 return ERR_PTR(ret); 584 } 585 586 /* Ensure that nobody removed the request before we locked it */
··· 579 580 while (!nfs_lock_request(head)) { 581 ret = nfs_wait_on_request(head); 582 + if (ret < 0) { 583 + nfs_release_request(head); 584 return ERR_PTR(ret); 585 + } 586 } 587 588 /* Ensure that nobody removed the request before we locked it */
+2
include/linux/nfs4.h
··· 300 /* error codes for internal client use */ 301 #define NFS4ERR_RESET_TO_MDS 12001 302 #define NFS4ERR_RESET_TO_PNFS 12002 303 304 static inline bool seqid_mutating_err(u32 err) 305 { ··· 692 NFSPROC4_CLNT_LISTXATTRS, 693 NFSPROC4_CLNT_REMOVEXATTR, 694 NFSPROC4_CLNT_READ_PLUS, 695 }; 696 697 /* nfs41 types */
··· 300 /* error codes for internal client use */ 301 #define NFS4ERR_RESET_TO_MDS 12001 302 #define NFS4ERR_RESET_TO_PNFS 12002 303 + #define NFS4ERR_FATAL_IOERROR 12003 304 305 static inline bool seqid_mutating_err(u32 err) 306 { ··· 691 NFSPROC4_CLNT_LISTXATTRS, 692 NFSPROC4_CLNT_REMOVEXATTR, 693 NFSPROC4_CLNT_READ_PLUS, 694 + NFSPROC4_CLNT_OFFLOAD_STATUS, 695 }; 696 697 /* nfs41 types */
+8
include/linux/nfs_fs_sb.h
··· 50 #define NFS_CS_DS 7 /* - Server is a DS */ 51 #define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */ 52 #define NFS_CS_PNFS 9 /* - Server used for pnfs */ 53 struct sockaddr_storage cl_addr; /* server identifier */ 54 size_t cl_addrlen; 55 char * cl_hostname; /* hostname of server */ ··· 168 #define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000 169 #define NFS_MOUNT_SHUTDOWN 0x08000000 170 #define NFS_MOUNT_NO_ALIGNWRITE 0x10000000 171 172 unsigned int fattr_valid; /* Valid attributes */ 173 unsigned int caps; /* server capabilities */ ··· 253 struct list_head ss_copies; 254 struct list_head ss_src_copies; 255 256 unsigned long delegation_gen; 257 unsigned long mig_gen; 258 unsigned long mig_status; ··· 296 #define NFS_CAP_CASE_INSENSITIVE (1U << 6) 297 #define NFS_CAP_CASE_PRESERVING (1U << 7) 298 #define NFS_CAP_REBOOT_LAYOUTRETURN (1U << 8) 299 #define NFS_CAP_OPEN_XOR (1U << 12) 300 #define NFS_CAP_DELEGTIME (1U << 13) 301 #define NFS_CAP_POSIX_LOCK (1U << 14)
··· 50 #define NFS_CS_DS 7 /* - Server is a DS */ 51 #define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */ 52 #define NFS_CS_PNFS 9 /* - Server used for pnfs */ 53 + #define NFS_CS_NETUNREACH_FATAL 10 /* - ENETUNREACH errors are fatal */ 54 struct sockaddr_storage cl_addr; /* server identifier */ 55 size_t cl_addrlen; 56 char * cl_hostname; /* hostname of server */ ··· 167 #define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000 168 #define NFS_MOUNT_SHUTDOWN 0x08000000 169 #define NFS_MOUNT_NO_ALIGNWRITE 0x10000000 170 + #define NFS_MOUNT_FORCE_RDIRPLUS 0x20000000 171 + #define NFS_MOUNT_NETUNREACH_FATAL 0x40000000 172 173 unsigned int fattr_valid; /* Valid attributes */ 174 unsigned int caps; /* server capabilities */ ··· 250 struct list_head ss_copies; 251 struct list_head ss_src_copies; 252 253 + unsigned long delegation_flags; 254 + #define NFS4SERV_DELEGRETURN (1) 255 + #define NFS4SERV_DELEGATION_EXPIRED (2) 256 + #define NFS4SERV_DELEGRETURN_DELAYED (3) 257 unsigned long delegation_gen; 258 unsigned long mig_gen; 259 unsigned long mig_status; ··· 289 #define NFS_CAP_CASE_INSENSITIVE (1U << 6) 290 #define NFS_CAP_CASE_PRESERVING (1U << 7) 291 #define NFS_CAP_REBOOT_LAYOUTRETURN (1U << 8) 292 + #define NFS_CAP_OFFLOAD_STATUS (1U << 9) 293 #define NFS_CAP_OPEN_XOR (1U << 12) 294 #define NFS_CAP_DELEGTIME (1U << 13) 295 #define NFS_CAP_POSIX_LOCK (1U << 14)
+3 -2
include/linux/nfs_xdr.h
··· 1515 1516 struct nfs42_offload_status_res { 1517 struct nfs4_sequence_res osr_seq_res; 1518 - uint64_t osr_count; 1519 - int osr_status; 1520 }; 1521 1522 struct nfs42_copy_notify_args {
··· 1515 1516 struct nfs42_offload_status_res { 1517 struct nfs4_sequence_res osr_seq_res; 1518 + u64 osr_count; 1519 + int complete_count; 1520 + u32 osr_complete; 1521 }; 1522 1523 struct nfs42_copy_notify_args {
+4 -1
include/linux/sunrpc/clnt.h
··· 64 cl_noretranstimeo: 1,/* No retransmit timeouts */ 65 cl_autobind : 1,/* use getport() */ 66 cl_chatty : 1,/* be verbose */ 67 - cl_shutdown : 1;/* rpc immediate -EIO */ 68 struct xprtsec_parms cl_xprtsec; /* transport security policy */ 69 70 struct rpc_rtt * cl_rtt; /* RTO estimator data */ ··· 177 #define RPC_CLNT_CREATE_SOFTERR (1UL << 10) 178 #define RPC_CLNT_CREATE_REUSEPORT (1UL << 11) 179 #define RPC_CLNT_CREATE_CONNECTED (1UL << 12) 180 181 struct rpc_clnt *rpc_create(struct rpc_create_args *args); 182 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
··· 64 cl_noretranstimeo: 1,/* No retransmit timeouts */ 65 cl_autobind : 1,/* use getport() */ 66 cl_chatty : 1,/* be verbose */ 67 + cl_shutdown : 1,/* rpc immediate -EIO */ 68 + cl_netunreach_fatal : 1; 69 + /* Treat ENETUNREACH errors as fatal */ 70 struct xprtsec_parms cl_xprtsec; /* transport security policy */ 71 72 struct rpc_rtt * cl_rtt; /* RTO estimator data */ ··· 175 #define RPC_CLNT_CREATE_SOFTERR (1UL << 10) 176 #define RPC_CLNT_CREATE_REUSEPORT (1UL << 11) 177 #define RPC_CLNT_CREATE_CONNECTED (1UL << 12) 178 + #define RPC_CLNT_CREATE_NETUNREACH_FATAL (1UL << 13) 179 180 struct rpc_clnt *rpc_create(struct rpc_create_args *args); 181 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
+1
include/linux/sunrpc/sched.h
··· 134 #define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */ 135 #define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */ 136 #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ 137 #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ 138 #define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */ 139 #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
··· 134 #define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */ 135 #define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */ 136 #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ 137 + #define RPC_TASK_NETUNREACH_FATAL 0x0040 /* ENETUNREACH is fatal */ 138 #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ 139 #define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */ 140 #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
+1
include/linux/sunrpc/xprtmultipath.h
··· 56 struct rpc_xprt *xprt); 57 extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps, 58 struct rpc_xprt *xprt, bool offline); 59 60 extern void xprt_iter_init(struct rpc_xprt_iter *xpi, 61 struct rpc_xprt_switch *xps);
··· 56 struct rpc_xprt *xprt); 57 extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps, 58 struct rpc_xprt *xprt, bool offline); 59 + extern struct rpc_xprt *rpc_xprt_switch_get_main_xprt(struct rpc_xprt_switch *xps); 60 61 extern void xprt_iter_init(struct rpc_xprt_iter *xpi, 62 struct rpc_xprt_switch *xps);
+1
include/trace/events/sunrpc.h
··· 343 { RPC_TASK_MOVEABLE, "MOVEABLE" }, \ 344 { RPC_TASK_NULLCREDS, "NULLCREDS" }, \ 345 { RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \ 346 { RPC_TASK_DYNAMIC, "DYNAMIC" }, \ 347 { RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \ 348 { RPC_TASK_SOFT, "SOFT" }, \
··· 343 { RPC_TASK_MOVEABLE, "MOVEABLE" }, \ 344 { RPC_TASK_NULLCREDS, "NULLCREDS" }, \ 345 { RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \ 346 + { RPC_TASK_NETUNREACH_FATAL, "NETUNREACH_FATAL"}, \ 347 { RPC_TASK_DYNAMIC, "DYNAMIC" }, \ 348 { RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \ 349 { RPC_TASK_SOFT, "SOFT" }, \
+22 -11
net/sunrpc/clnt.c
··· 270 old = rcu_dereference_protected(clnt->cl_xprt, 271 lockdep_is_held(&clnt->cl_lock)); 272 273 - if (!xprt_bound(xprt)) 274 - clnt->cl_autobind = 1; 275 - 276 clnt->cl_timeout = timeout; 277 rcu_assign_pointer(clnt->cl_xprt, xprt); 278 spin_unlock(&clnt->cl_lock); ··· 509 clnt->cl_discrtry = 1; 510 if (!(args->flags & RPC_CLNT_CREATE_QUIET)) 511 clnt->cl_chatty = 1; 512 513 return clnt; 514 } ··· 661 new->cl_noretranstimeo = clnt->cl_noretranstimeo; 662 new->cl_discrtry = clnt->cl_discrtry; 663 new->cl_chatty = clnt->cl_chatty; 664 new->cl_principal = clnt->cl_principal; 665 new->cl_max_connect = clnt->cl_max_connect; 666 return new; ··· 1195 task->tk_flags |= RPC_TASK_TIMEOUT; 1196 if (clnt->cl_noretranstimeo) 1197 task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; 1198 atomic_inc(&clnt->cl_task_count); 1199 } 1200 ··· 2104 case -EPROTONOSUPPORT: 2105 trace_rpcb_bind_version_err(task); 2106 goto retry_timeout; 2107 case -ECONNREFUSED: /* connection problems */ 2108 case -ECONNRESET: 2109 case -ECONNABORTED: 2110 case -ENOTCONN: 2111 case -EHOSTDOWN: 2112 - case -ENETDOWN: 2113 case -EHOSTUNREACH: 2114 - case -ENETUNREACH: 2115 case -EPIPE: 2116 trace_rpcb_unreachable_err(task); 2117 if (!RPC_IS_SOFTCONN(task)) { ··· 2196 2197 task->tk_status = 0; 2198 switch (status) { 2199 case -ECONNREFUSED: 2200 case -ECONNRESET: 2201 /* A positive refusal suggests a rebind is needed. */ 2202 - if (RPC_IS_SOFTCONN(task)) 2203 - break; 2204 if (clnt->cl_autobind) { 2205 rpc_force_rebind(clnt); 2206 goto out_retry; 2207 } 2208 fallthrough; 2209 case -ECONNABORTED: 2210 - case -ENETDOWN: 2211 - case -ENETUNREACH: 2212 case -EHOSTUNREACH: 2213 case -EPIPE: 2214 case -EPROTO: ··· 2463 trace_rpc_call_status(task); 2464 task->tk_status = 0; 2465 switch(status) { 2466 - case -EHOSTDOWN: 2467 case -ENETDOWN: 2468 - case -EHOSTUNREACH: 2469 case -ENETUNREACH: 2470 case -EPERM: 2471 if (RPC_IS_SOFTCONN(task)) 2472 goto out_exit;
··· 270 old = rcu_dereference_protected(clnt->cl_xprt, 271 lockdep_is_held(&clnt->cl_lock)); 272 273 clnt->cl_timeout = timeout; 274 rcu_assign_pointer(clnt->cl_xprt, xprt); 275 spin_unlock(&clnt->cl_lock); ··· 512 clnt->cl_discrtry = 1; 513 if (!(args->flags & RPC_CLNT_CREATE_QUIET)) 514 clnt->cl_chatty = 1; 515 + if (args->flags & RPC_CLNT_CREATE_NETUNREACH_FATAL) 516 + clnt->cl_netunreach_fatal = 1; 517 518 return clnt; 519 } ··· 662 new->cl_noretranstimeo = clnt->cl_noretranstimeo; 663 new->cl_discrtry = clnt->cl_discrtry; 664 new->cl_chatty = clnt->cl_chatty; 665 + new->cl_netunreach_fatal = clnt->cl_netunreach_fatal; 666 new->cl_principal = clnt->cl_principal; 667 new->cl_max_connect = clnt->cl_max_connect; 668 return new; ··· 1195 task->tk_flags |= RPC_TASK_TIMEOUT; 1196 if (clnt->cl_noretranstimeo) 1197 task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; 1198 + if (clnt->cl_netunreach_fatal) 1199 + task->tk_flags |= RPC_TASK_NETUNREACH_FATAL; 1200 atomic_inc(&clnt->cl_task_count); 1201 } 1202 ··· 2102 case -EPROTONOSUPPORT: 2103 trace_rpcb_bind_version_err(task); 2104 goto retry_timeout; 2105 + case -ENETDOWN: 2106 + case -ENETUNREACH: 2107 + if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL) 2108 + break; 2109 + fallthrough; 2110 case -ECONNREFUSED: /* connection problems */ 2111 case -ECONNRESET: 2112 case -ECONNABORTED: 2113 case -ENOTCONN: 2114 case -EHOSTDOWN: 2115 case -EHOSTUNREACH: 2116 case -EPIPE: 2117 trace_rpcb_unreachable_err(task); 2118 if (!RPC_IS_SOFTCONN(task)) { ··· 2191 2192 task->tk_status = 0; 2193 switch (status) { 2194 + case -ENETDOWN: 2195 + case -ENETUNREACH: 2196 + if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL) 2197 + break; 2198 + fallthrough; 2199 case -ECONNREFUSED: 2200 case -ECONNRESET: 2201 /* A positive refusal suggests a rebind is needed. */ 2202 if (clnt->cl_autobind) { 2203 rpc_force_rebind(clnt); 2204 + if (RPC_IS_SOFTCONN(task)) 2205 + break; 2206 goto out_retry; 2207 } 2208 fallthrough; 2209 case -ECONNABORTED: 2210 case -EHOSTUNREACH: 2211 case -EPIPE: 2212 case -EPROTO: ··· 2455 trace_rpc_call_status(task); 2456 task->tk_status = 0; 2457 switch(status) { 2458 case -ENETDOWN: 2459 case -ENETUNREACH: 2460 + if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL) 2461 + goto out_exit; 2462 + fallthrough; 2463 + case -EHOSTDOWN: 2464 + case -EHOSTUNREACH: 2465 case -EPERM: 2466 if (RPC_IS_SOFTCONN(task)) 2467 goto out_exit;
+3 -2
net/sunrpc/rpcb_clnt.c
··· 820 } 821 822 trace_rpcb_setport(child, map->r_status, map->r_port); 823 - xprt->ops->set_port(xprt, map->r_port); 824 - if (map->r_port) 825 xprt_set_bound(xprt); 826 } 827 828 /*
··· 820 } 821 822 trace_rpcb_setport(child, map->r_status, map->r_port); 823 + if (map->r_port) { 824 + xprt->ops->set_port(xprt, map->r_port); 825 xprt_set_bound(xprt); 826 + } 827 } 828 829 /*
+2
net/sunrpc/sched.c
··· 276 277 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) 278 { 279 schedule(); 280 if (signal_pending_state(mode, current)) 281 return -ERESTARTSYS;
··· 276 277 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) 278 { 279 + if (unlikely(current->flags & PF_EXITING)) 280 + return -EINTR; 281 schedule(); 282 if (signal_pending_state(mode, current)) 283 return -ERESTARTSYS;
+202
net/sunrpc/sysfs.c
··· 59 return NULL; 60 } 61 62 static inline struct rpc_xprt * 63 rpc_sysfs_xprt_kobj_get_xprt(struct kobject *kobj) 64 { ··· 94 struct rpc_sysfs_xprt_switch, kobject); 95 96 return xprt_switch_get(x->xprt_switch); 97 } 98 99 static ssize_t rpc_sysfs_xprt_dstaddr_show(struct kobject *kobj, ··· 182 ret = sprintf(buf, "<not a socket>\n"); 183 xprt_put(xprt); 184 return ret; 185 } 186 187 static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj, ··· 286 return ret; 287 } 288 289 static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj, 290 struct kobj_attribute *attr, 291 char *buf) ··· 311 atomic_long_read(&xprt_switch->xps_queuelen)); 312 xprt_switch_put(xprt_switch); 313 return ret; 314 } 315 316 static ssize_t rpc_sysfs_xprt_dstaddr_store(struct kobject *kobj, ··· 472 return count; 473 } 474 475 int rpc_sysfs_init(void) 476 { 477 rpc_sunrpc_kset = kset_create_and_add("sunrpc", NULL, kernel_kobj); ··· 569 kobject)->xprt->xprt_net; 570 } 571 572 static struct kobj_attribute rpc_sysfs_xprt_dstaddr = __ATTR(dstaddr, 573 0644, rpc_sysfs_xprt_dstaddr_show, rpc_sysfs_xprt_dstaddr_store); 574 575 static struct kobj_attribute rpc_sysfs_xprt_srcaddr = __ATTR(srcaddr, 576 0644, rpc_sysfs_xprt_srcaddr_show, NULL); 577 578 static struct kobj_attribute rpc_sysfs_xprt_info = __ATTR(xprt_info, 579 0444, rpc_sysfs_xprt_info_show, NULL); ··· 601 static struct kobj_attribute rpc_sysfs_xprt_change_state = __ATTR(xprt_state, 602 0644, rpc_sysfs_xprt_state_show, rpc_sysfs_xprt_state_change); 603 604 static struct attribute *rpc_sysfs_xprt_attrs[] = { 605 &rpc_sysfs_xprt_dstaddr.attr, 606 &rpc_sysfs_xprt_srcaddr.attr, 607 &rpc_sysfs_xprt_info.attr, 608 &rpc_sysfs_xprt_change_state.attr, 609 NULL, 610 }; 611 ATTRIBUTE_GROUPS(rpc_sysfs_xprt); ··· 618 static struct kobj_attribute rpc_sysfs_xprt_switch_info = 619 __ATTR(xprt_switch_info, 0444, rpc_sysfs_xprt_switch_info_show, NULL); 620 621 static struct attribute *rpc_sysfs_xprt_switch_attrs[] = { 622 &rpc_sysfs_xprt_switch_info.attr, 623 NULL, 624 }; 625 ATTRIBUTE_GROUPS(rpc_sysfs_xprt_switch); 626 627 static const struct kobj_type rpc_sysfs_client_type = { 628 .release = rpc_sysfs_client_release, 629 .sysfs_ops = &kobj_sysfs_ops, 630 .namespace = rpc_sysfs_client_namespace, 631 };
··· 59 return NULL; 60 } 61 62 + static inline struct rpc_clnt * 63 + rpc_sysfs_client_kobj_get_clnt(struct kobject *kobj) 64 + { 65 + struct rpc_sysfs_client *c = container_of(kobj, 66 + struct rpc_sysfs_client, kobject); 67 + struct rpc_clnt *ret = c->clnt; 68 + 69 + return refcount_inc_not_zero(&ret->cl_count) ? ret : NULL; 70 + } 71 + 72 static inline struct rpc_xprt * 73 rpc_sysfs_xprt_kobj_get_xprt(struct kobject *kobj) 74 { ··· 84 struct rpc_sysfs_xprt_switch, kobject); 85 86 return xprt_switch_get(x->xprt_switch); 87 + } 88 + 89 + static ssize_t rpc_sysfs_clnt_version_show(struct kobject *kobj, 90 + struct kobj_attribute *attr, 91 + char *buf) 92 + { 93 + struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj); 94 + ssize_t ret; 95 + 96 + if (!clnt) 97 + return sprintf(buf, "<closed>\n"); 98 + 99 + ret = sprintf(buf, "%u", clnt->cl_vers); 100 + refcount_dec(&clnt->cl_count); 101 + return ret; 102 + } 103 + 104 + static ssize_t rpc_sysfs_clnt_program_show(struct kobject *kobj, 105 + struct kobj_attribute *attr, 106 + char *buf) 107 + { 108 + struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj); 109 + ssize_t ret; 110 + 111 + if (!clnt) 112 + return sprintf(buf, "<closed>\n"); 113 + 114 + ret = sprintf(buf, "%s", clnt->cl_program->name); 115 + refcount_dec(&clnt->cl_count); 116 + return ret; 117 + } 118 + 119 + static ssize_t rpc_sysfs_clnt_max_connect_show(struct kobject *kobj, 120 + struct kobj_attribute *attr, 121 + char *buf) 122 + { 123 + struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj); 124 + ssize_t ret; 125 + 126 + if (!clnt) 127 + return sprintf(buf, "<closed>\n"); 128 + 129 + ret = sprintf(buf, "%u\n", clnt->cl_max_connect); 130 + refcount_dec(&clnt->cl_count); 131 + return ret; 132 } 133 134 static ssize_t rpc_sysfs_xprt_dstaddr_show(struct kobject *kobj, ··· 127 ret = sprintf(buf, "<not a socket>\n"); 128 xprt_put(xprt); 129 return ret; 130 + } 131 + 132 + static const char *xprtsec_strings[] = { 133 + [RPC_XPRTSEC_NONE] = "none", 134 + [RPC_XPRTSEC_TLS_ANON] = "tls-anon", 135 + [RPC_XPRTSEC_TLS_X509] = "tls-x509", 136 + }; 137 + 138 + static ssize_t rpc_sysfs_xprt_xprtsec_show(struct kobject *kobj, 139 + struct kobj_attribute *attr, 140 + char *buf) 141 + { 142 + struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj); 143 + ssize_t ret; 144 + 145 + if (!xprt) { 146 + ret = sprintf(buf, "<closed>\n"); 147 + goto out; 148 + } 149 + 150 + ret = sprintf(buf, "%s\n", xprtsec_strings[xprt->xprtsec.policy]); 151 + xprt_put(xprt); 152 + out: 153 + return ret; 154 + 155 } 156 157 static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj, ··· 206 return ret; 207 } 208 209 + static ssize_t rpc_sysfs_xprt_del_xprt_show(struct kobject *kobj, 210 + struct kobj_attribute *attr, 211 + char *buf) 212 + { 213 + return sprintf(buf, "# delete this xprt\n"); 214 + } 215 + 216 + 217 static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj, 218 struct kobj_attribute *attr, 219 char *buf) ··· 223 atomic_long_read(&xprt_switch->xps_queuelen)); 224 xprt_switch_put(xprt_switch); 225 return ret; 226 + } 227 + 228 + static ssize_t rpc_sysfs_xprt_switch_add_xprt_show(struct kobject *kobj, 229 + struct kobj_attribute *attr, 230 + char *buf) 231 + { 232 + return sprintf(buf, "# add one xprt to this xprt_switch\n"); 233 + } 234 + 235 + static ssize_t rpc_sysfs_xprt_switch_add_xprt_store(struct kobject *kobj, 236 + struct kobj_attribute *attr, 237 + const char *buf, size_t count) 238 + { 239 + struct rpc_xprt_switch *xprt_switch = 240 + rpc_sysfs_xprt_switch_kobj_get_xprt(kobj); 241 + struct xprt_create xprt_create_args; 242 + struct rpc_xprt *xprt, *new; 243 + 244 + if (!xprt_switch) 245 + return 0; 246 + 247 + xprt = rpc_xprt_switch_get_main_xprt(xprt_switch); 248 + if (!xprt) 249 + goto out; 250 + 251 + xprt_create_args.ident = xprt->xprt_class->ident; 252 + xprt_create_args.net = xprt->xprt_net; 253 + xprt_create_args.dstaddr = (struct sockaddr *)&xprt->addr; 254 + xprt_create_args.addrlen = xprt->addrlen; 255 + xprt_create_args.servername = xprt->servername; 256 + xprt_create_args.bc_xprt = xprt->bc_xprt; 257 + xprt_create_args.xprtsec = xprt->xprtsec; 258 + xprt_create_args.connect_timeout = xprt->connect_timeout; 259 + xprt_create_args.reconnect_timeout = xprt->max_reconnect_timeout; 260 + 261 + new = xprt_create_transport(&xprt_create_args); 262 + if (IS_ERR_OR_NULL(new)) { 263 + count = PTR_ERR(new); 264 + goto out_put_xprt; 265 + } 266 + 267 + rpc_xprt_switch_add_xprt(xprt_switch, new); 268 + xprt_put(new); 269 + 270 + out_put_xprt: 271 + xprt_put(xprt); 272 + out: 273 + xprt_switch_put(xprt_switch); 274 + return count; 275 } 276 277 static ssize_t rpc_sysfs_xprt_dstaddr_store(struct kobject *kobj, ··· 335 return count; 336 } 337 338 + static ssize_t rpc_sysfs_xprt_del_xprt(struct kobject *kobj, 339 + struct kobj_attribute *attr, 340 + const char *buf, size_t count) 341 + { 342 + struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj); 343 + struct rpc_xprt_switch *xps = rpc_sysfs_xprt_kobj_get_xprt_switch(kobj); 344 + 345 + if (!xprt || !xps) { 346 + count = 0; 347 + goto out; 348 + } 349 + 350 + if (xprt->main) { 351 + count = -EINVAL; 352 + goto release_tasks; 353 + } 354 + 355 + if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) { 356 + count = -EINTR; 357 + goto out_put; 358 + } 359 + 360 + xprt_set_offline_locked(xprt, xps); 361 + xprt_delete_locked(xprt, xps); 362 + 363 + release_tasks: 364 + xprt_release_write(xprt, NULL); 365 + out_put: 366 + xprt_put(xprt); 367 + xprt_switch_put(xps); 368 + out: 369 + return count; 370 + } 371 + 372 int rpc_sysfs_init(void) 373 { 374 rpc_sunrpc_kset = kset_create_and_add("sunrpc", NULL, kernel_kobj); ··· 398 kobject)->xprt->xprt_net; 399 } 400 401 + static struct kobj_attribute rpc_sysfs_clnt_version = __ATTR(rpc_version, 402 + 0444, rpc_sysfs_clnt_version_show, NULL); 403 + 404 + static struct kobj_attribute rpc_sysfs_clnt_program = __ATTR(program, 405 + 0444, rpc_sysfs_clnt_program_show, NULL); 406 + 407 + static struct kobj_attribute rpc_sysfs_clnt_max_connect = __ATTR(max_connect, 408 + 0444, rpc_sysfs_clnt_max_connect_show, NULL); 409 + 410 + static struct attribute *rpc_sysfs_rpc_clnt_attrs[] = { 411 + &rpc_sysfs_clnt_version.attr, 412 + &rpc_sysfs_clnt_program.attr, 413 + &rpc_sysfs_clnt_max_connect.attr, 414 + NULL, 415 + }; 416 + ATTRIBUTE_GROUPS(rpc_sysfs_rpc_clnt); 417 + 418 static struct kobj_attribute rpc_sysfs_xprt_dstaddr = __ATTR(dstaddr, 419 0644, rpc_sysfs_xprt_dstaddr_show, rpc_sysfs_xprt_dstaddr_store); 420 421 static struct kobj_attribute rpc_sysfs_xprt_srcaddr = __ATTR(srcaddr, 422 0644, rpc_sysfs_xprt_srcaddr_show, NULL); 423 + 424 + static struct kobj_attribute rpc_sysfs_xprt_xprtsec = __ATTR(xprtsec, 425 + 0644, rpc_sysfs_xprt_xprtsec_show, NULL); 426 427 static struct kobj_attribute rpc_sysfs_xprt_info = __ATTR(xprt_info, 428 0444, rpc_sysfs_xprt_info_show, NULL); ··· 410 static struct kobj_attribute rpc_sysfs_xprt_change_state = __ATTR(xprt_state, 411 0644, rpc_sysfs_xprt_state_show, rpc_sysfs_xprt_state_change); 412 413 + static struct kobj_attribute rpc_sysfs_xprt_del = __ATTR(del_xprt, 414 + 0644, rpc_sysfs_xprt_del_xprt_show, rpc_sysfs_xprt_del_xprt); 415 + 416 static struct attribute *rpc_sysfs_xprt_attrs[] = { 417 &rpc_sysfs_xprt_dstaddr.attr, 418 &rpc_sysfs_xprt_srcaddr.attr, 419 + &rpc_sysfs_xprt_xprtsec.attr, 420 &rpc_sysfs_xprt_info.attr, 421 &rpc_sysfs_xprt_change_state.attr, 422 + &rpc_sysfs_xprt_del.attr, 423 NULL, 424 }; 425 ATTRIBUTE_GROUPS(rpc_sysfs_xprt); ··· 422 static struct kobj_attribute rpc_sysfs_xprt_switch_info = 423 __ATTR(xprt_switch_info, 0444, rpc_sysfs_xprt_switch_info_show, NULL); 424 425 + static struct kobj_attribute rpc_sysfs_xprt_switch_add_xprt = 426 + __ATTR(add_xprt, 0644, rpc_sysfs_xprt_switch_add_xprt_show, 427 + rpc_sysfs_xprt_switch_add_xprt_store); 428 + 429 static struct attribute *rpc_sysfs_xprt_switch_attrs[] = { 430 &rpc_sysfs_xprt_switch_info.attr, 431 + &rpc_sysfs_xprt_switch_add_xprt.attr, 432 NULL, 433 }; 434 ATTRIBUTE_GROUPS(rpc_sysfs_xprt_switch); 435 436 static const struct kobj_type rpc_sysfs_client_type = { 437 .release = rpc_sysfs_client_release, 438 + .default_groups = rpc_sysfs_rpc_clnt_groups, 439 .sysfs_ops = &kobj_sysfs_ops, 440 .namespace = rpc_sysfs_client_namespace, 441 };
+21
net/sunrpc/xprtmultipath.c
··· 92 xprt_put(xprt); 93 } 94 95 static DEFINE_IDA(rpc_xprtswitch_ids); 96 97 void xprt_multipath_cleanup_ids(void)
··· 92 xprt_put(xprt); 93 } 94 95 + /** 96 + * rpc_xprt_switch_get_main_xprt - Get the 'main' xprt for an xprt switch. 97 + * @xps: pointer to struct rpc_xprt_switch. 98 + */ 99 + struct rpc_xprt *rpc_xprt_switch_get_main_xprt(struct rpc_xprt_switch *xps) 100 + { 101 + struct rpc_xprt_iter xpi; 102 + struct rpc_xprt *xprt; 103 + 104 + xprt_iter_init_listall(&xpi, xps); 105 + 106 + xprt = xprt_iter_get_next(&xpi); 107 + while (xprt && !xprt->main) { 108 + xprt_put(xprt); 109 + xprt = xprt_iter_get_next(&xpi); 110 + } 111 + 112 + xprt_iter_destroy(&xpi); 113 + return xprt; 114 + } 115 + 116 static DEFINE_IDA(rpc_xprtswitch_ids); 117 118 void xprt_multipath_cleanup_ids(void)