Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nfs-for-6.6-1' of git://git.linux-nfs.org/projects/anna/linux-nfs

Pull NFS client updates from Anna Schumaker:
"New Features:
- Enable the NFS v4.2 READ_PLUS operation by default

Stable Fixes:
- NFSv4/pnfs: minor fix for cleanup path in nfs4_get_device_info
- NFS: Fix a potential data corruption

Bugfixes:
- Fix various READ_PLUS issues including:
- smatch warnings
- xdr size calculations
- scratch buffer handling
- 32bit / highmem xdr page handling
- Fix checkpatch errors in file.c
- Fix redundant readdir request after an EOF
- Fix handling of COPY ERR_OFFLOAD_NO_REQ
- Fix assignment of xprtdata.cred

Cleanups:
- Remove unused xprtrdma function declarations
- Clean up an integer overflow check to avoid a warning
- Clean up #includes in dns_resolve.c
- Clean up nfs4_get_device_info so we don't pass a NULL pointer
to __free_page()
- Clean up sunrpc TCP socket timeout configuration
- Guard against READDIR loops when entry names are too long
- Use EXCHID4_FLAG_USE_PNFS_DS for DS servers"

* tag 'nfs-for-6.6-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (22 commits)
pNFS: Fix assignment of xprtdata.cred
NFSv4.2: fix handling of COPY ERR_OFFLOAD_NO_REQ
NFS: Guard against READDIR loop when entry names exceed MAXNAMELEN
NFSv4.1: use EXCHGID4_FLAG_USE_PNFS_DS for DS server
NFS/pNFS: Set the connect timeout for the pNFS flexfiles driver
SUNRPC: Don't override connect timeouts in rpc_clnt_add_xprt()
SUNRPC: Allow specification of TCP client connect timeout at setup
SUNRPC: Refactor and simplify connect timeout
SUNRPC: Set the TCP_SYNCNT to match the socket timeout
NFS: Fix a potential data corruption
nfs: fix redundant readdir request after get eof
nfs/blocklayout: Use the passed in gfp flags
filemap: Fix errors in file.c
NFSv4/pnfs: minor fix for cleanup path in nfs4_get_device_info
NFS: Move common includes outside ifdef
SUNRPC: clean up integer overflow check
xprtrdma: Remove unused function declaration rpcrdma_bc_post_recv()
NFS: Enable the READ_PLUS operation by default
SUNRPC: kmap() the xdr pages during decode
NFSv4.2: Rework scratch handling for READ_PLUS (again)
...

+171 -63
+2 -4
fs/nfs/Kconfig
··· 209 209 config NFS_V4_2_READ_PLUS 210 210 bool "NFS: Enable support for the NFSv4.2 READ_PLUS operation" 211 211 depends on NFS_V4_2 212 - default n 212 + default y 213 213 help 214 - This is intended for developers only. The READ_PLUS operation has 215 - been shown to have issues under specific conditions and should not 216 - be used in production. 214 + Choose Y here to enable use of the NFS v4.2 READ_PLUS operation.
+2 -2
fs/nfs/blocklayout/dev.c
··· 404 404 int ret, i; 405 405 406 406 d->children = kcalloc(v->concat.volumes_count, 407 - sizeof(struct pnfs_block_dev), GFP_KERNEL); 407 + sizeof(struct pnfs_block_dev), gfp_mask); 408 408 if (!d->children) 409 409 return -ENOMEM; 410 410 ··· 433 433 int ret, i; 434 434 435 435 d->children = kcalloc(v->stripe.volumes_count, 436 - sizeof(struct pnfs_block_dev), GFP_KERNEL); 436 + sizeof(struct pnfs_block_dev), gfp_mask); 437 437 if (!d->children) 438 438 return -ENOMEM; 439 439
+2
fs/nfs/client.c
··· 517 517 .authflavor = flavor, 518 518 .cred = cl_init->cred, 519 519 .xprtsec = cl_init->xprtsec, 520 + .connect_timeout = cl_init->connect_timeout, 521 + .reconnect_timeout = cl_init->reconnect_timeout, 520 522 }; 521 523 522 524 if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags))
+11 -4
fs/nfs/dir.c
··· 1089 1089 for (i = desc->cache_entry_index; i < array->size; i++) { 1090 1090 struct nfs_cache_array_entry *ent; 1091 1091 1092 + /* 1093 + * nfs_readdir_handle_cache_misses return force clear at 1094 + * (cache_misses > NFS_READDIR_CACHE_MISS_THRESHOLD) for 1095 + * readdir heuristic, NFS_READDIR_CACHE_MISS_THRESHOLD + 1 1096 + * entries need be emitted here. 1097 + */ 1098 + if (first_emit && i > NFS_READDIR_CACHE_MISS_THRESHOLD + 2) { 1099 + desc->eob = true; 1100 + break; 1101 + } 1102 + 1092 1103 ent = &array->array[i]; 1093 1104 if (!dir_emit(desc->ctx, ent->name, ent->name_len, 1094 1105 nfs_compat_user_ino64(ent->ino), ent->d_type)) { ··· 1118 1107 desc->ctx->pos = desc->dir_cookie; 1119 1108 else 1120 1109 desc->ctx->pos++; 1121 - if (first_emit && i > NFS_READDIR_CACHE_MISS_THRESHOLD + 1) { 1122 - desc->eob = true; 1123 - break; 1124 - } 1125 1110 } 1126 1111 if (array->folio_is_eof) 1127 1112 desc->eof = !desc->eob;
+19 -1
fs/nfs/direct.c
··· 472 472 return result; 473 473 } 474 474 475 + static void nfs_direct_add_page_head(struct list_head *list, 476 + struct nfs_page *req) 477 + { 478 + struct nfs_page *head = req->wb_head; 479 + 480 + if (!list_empty(&head->wb_list) || !nfs_lock_request(head)) 481 + return; 482 + if (!list_empty(&head->wb_list)) { 483 + nfs_unlock_request(head); 484 + return; 485 + } 486 + list_add(&head->wb_list, list); 487 + kref_get(&head->wb_kref); 488 + kref_get(&head->wb_kref); 489 + } 490 + 475 491 static void nfs_direct_join_group(struct list_head *list, struct inode *inode) 476 492 { 477 493 struct nfs_page *req, *subreq; 478 494 479 495 list_for_each_entry(req, list, wb_list) { 480 - if (req->wb_head != req) 496 + if (req->wb_head != req) { 497 + nfs_direct_add_page_head(&req->wb_list, req); 481 498 continue; 499 + } 482 500 subreq = req->wb_this_page; 483 501 if (subreq == req) 484 502 continue;
+5 -7
fs/nfs/dns_resolve.c
··· 7 7 * Resolves DNS hostnames into valid ip addresses 8 8 */ 9 9 10 - #ifdef CONFIG_NFS_USE_KERNEL_DNS 11 - 12 10 #include <linux/module.h> 13 11 #include <linux/sunrpc/clnt.h> 14 12 #include <linux/sunrpc/addr.h> 15 - #include <linux/dns_resolver.h> 13 + 16 14 #include "dns_resolve.h" 15 + 16 + #ifdef CONFIG_NFS_USE_KERNEL_DNS 17 + 18 + #include <linux/dns_resolver.h> 17 19 18 20 ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen, 19 21 struct sockaddr_storage *ss, size_t salen) ··· 37 35 38 36 #else 39 37 40 - #include <linux/module.h> 41 38 #include <linux/hash.h> 42 39 #include <linux/string.h> 43 40 #include <linux/kmod.h> ··· 44 43 #include <linux/socket.h> 45 44 #include <linux/seq_file.h> 46 45 #include <linux/inet.h> 47 - #include <linux/sunrpc/clnt.h> 48 - #include <linux/sunrpc/addr.h> 49 46 #include <linux/sunrpc/cache.h> 50 47 #include <linux/sunrpc/svcauth.h> 51 48 #include <linux/sunrpc/rpc_pipe_fs.h> 52 49 #include <linux/nfs_fs.h> 53 50 54 51 #include "nfs4_fs.h" 55 - #include "dns_resolve.h" 56 52 #include "cache_lib.h" 57 53 #include "netns.h" 58 54
+1 -1
fs/nfs/file.c
··· 200 200 EXPORT_SYMBOL_GPL(nfs_file_splice_read); 201 201 202 202 int 203 - nfs_file_mmap(struct file * file, struct vm_area_struct * vma) 203 + nfs_file_mmap(struct file *file, struct vm_area_struct *vma) 204 204 { 205 205 struct inode *inode = file_inode(file); 206 206 int status;
+3
fs/nfs/internal.h
··· 82 82 const struct rpc_timeout *timeparms; 83 83 const struct cred *cred; 84 84 struct xprtsec_parms xprtsec; 85 + unsigned long connect_timeout; 86 + unsigned long reconnect_timeout; 85 87 }; 86 88 87 89 /* ··· 495 493 extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, 496 494 struct inode *inode, bool force_mds, 497 495 const struct nfs_pgio_completion_ops *compl_ops); 496 + extern bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size); 498 497 extern int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio, 499 498 struct nfs_open_context *ctx, 500 499 struct folio *folio);
+1 -1
fs/nfs/nfs2xdr.c
··· 949 949 950 950 error = decode_filename_inline(xdr, &entry->name, &entry->len); 951 951 if (unlikely(error)) 952 - return -EAGAIN; 952 + return error == -ENAMETOOLONG ? -ENAMETOOLONG : -EAGAIN; 953 953 954 954 /* 955 955 * The type (size and byte order) of nfscookie isn't defined in
+3
fs/nfs/nfs3client.c
··· 86 86 int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans) 87 87 { 88 88 struct rpc_timeout ds_timeout; 89 + unsigned long connect_timeout = ds_timeo * (ds_retrans + 1) * HZ / 10; 89 90 struct nfs_client *mds_clp = mds_srv->nfs_client; 90 91 struct nfs_client_initdata cl_init = { 91 92 .addr = ds_addr, ··· 99 98 .timeparms = &ds_timeout, 100 99 .cred = mds_srv->cred, 101 100 .xprtsec = mds_clp->cl_xprtsec, 101 + .connect_timeout = connect_timeout, 102 + .reconnect_timeout = connect_timeout, 102 103 }; 103 104 struct nfs_client *clp; 104 105 char buf[INET6_ADDRSTRLEN + 1];
+1 -1
fs/nfs/nfs3xdr.c
··· 1991 1991 1992 1992 error = decode_inline_filename3(xdr, &entry->name, &entry->len); 1993 1993 if (unlikely(error)) 1994 - return -EAGAIN; 1994 + return error == -ENAMETOOLONG ? -ENAMETOOLONG : -EAGAIN; 1995 1995 1996 1996 error = decode_cookie3(xdr, &new_cookie); 1997 1997 if (unlikely(error))
+1
fs/nfs/nfs42.h
··· 13 13 * more? Need to consider not to pre-alloc too much for a compound. 14 14 */ 15 15 #define PNFS_LAYOUTSTATS_MAXDEV (4) 16 + #define READ_PLUS_SCRATCH_SIZE (16) 16 17 17 18 /* nfs4.2proc.c */ 18 19 #ifdef CONFIG_NFS_V4_2
+3 -2
fs/nfs/nfs42proc.c
··· 471 471 continue; 472 472 } 473 473 break; 474 - } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) { 475 - args.sync = true; 474 + } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && 475 + args.sync != res.synchronous) { 476 + args.sync = res.synchronous; 476 477 dst_exception.retry = 1; 477 478 continue; 478 479 } else if ((err == -ESTALE ||
+11 -6
fs/nfs/nfs42xdr.c
··· 54 54 (1 /* data_content4 */ + \ 55 55 2 /* data_info4.di_offset */ + \ 56 56 1 /* data_info4.di_length */) 57 + #define NFS42_READ_PLUS_HOLE_SEGMENT_SIZE \ 58 + (1 /* data_content4 */ + \ 59 + 2 /* data_info4.di_offset */ + \ 60 + 2 /* data_info4.di_length */) 61 + #define READ_PLUS_SEGMENT_SIZE_DIFF (NFS42_READ_PLUS_HOLE_SEGMENT_SIZE - \ 62 + NFS42_READ_PLUS_DATA_SEGMENT_SIZE) 57 63 #define decode_read_plus_maxsz (op_decode_hdr_maxsz + \ 58 64 1 /* rpr_eof */ + \ 59 65 1 /* rpr_contents count */ + \ 60 - NFS42_READ_PLUS_DATA_SEGMENT_SIZE) 66 + NFS42_READ_PLUS_HOLE_SEGMENT_SIZE) 61 67 #define encode_seek_maxsz (op_encode_hdr_maxsz + \ 62 68 encode_stateid_maxsz + \ 63 69 2 /* offset */ + \ ··· 623 617 encode_putfh(xdr, args->fh, &hdr); 624 618 encode_read_plus(xdr, args, &hdr); 625 619 626 - rpc_prepare_reply_pages(req, args->pages, args->pgbase, 627 - args->count, hdr.replen); 620 + rpc_prepare_reply_pages(req, args->pages, args->pgbase, args->count, 621 + hdr.replen - READ_PLUS_SEGMENT_SIZE_DIFF); 628 622 encode_nops(&hdr); 629 623 } 630 624 ··· 1062 1056 res->eof = be32_to_cpup(p++); 1063 1057 segments = be32_to_cpup(p++); 1064 1058 if (segments == 0) 1065 - return status; 1059 + return 0; 1066 1060 1067 1061 segs = kmalloc_array(segments, sizeof(*segs), GFP_KERNEL); 1068 1062 if (!segs) 1069 1063 return -ENOMEM; 1070 1064 1071 - status = -EIO; 1072 1065 for (i = 0; i < segments; i++) { 1073 1066 status = decode_read_plus_segment(xdr, &segs[i]); 1074 1067 if (status < 0) ··· 1433 1428 struct compound_hdr hdr; 1434 1429 int status; 1435 1430 1436 - xdr_set_scratch_buffer(xdr, res->scratch, sizeof(res->scratch)); 1431 + xdr_set_scratch_buffer(xdr, res->scratch, READ_PLUS_SCRATCH_SIZE); 1437 1432 1438 1433 status = decode_compound_hdr(xdr, &hdr); 1439 1434 if (status)
+3
fs/nfs/nfs4client.c
··· 232 232 __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags); 233 233 __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags); 234 234 235 + if (test_bit(NFS_CS_DS, &cl_init->init_flags)) 236 + __set_bit(NFS_CS_DS, &clp->cl_flags); 235 237 /* 236 238 * Set up the connection to the server before we add add to the 237 239 * global list. ··· 1009 1007 if (mds_srv->flags & NFS_MOUNT_NORESVPORT) 1010 1008 __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags); 1011 1009 1010 + __set_bit(NFS_CS_DS, &cl_init.init_flags); 1012 1011 /* 1013 1012 * Set an authflavor equual to the MDS value. Use the MDS nfs_client 1014 1013 * cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
+5 -12
fs/nfs/nfs4proc.c
··· 5438 5438 return false; 5439 5439 } 5440 5440 5441 - static inline void nfs4_read_plus_scratch_free(struct nfs_pgio_header *hdr) 5442 - { 5443 - if (hdr->res.scratch) { 5444 - kfree(hdr->res.scratch); 5445 - hdr->res.scratch = NULL; 5446 - } 5447 - } 5448 - 5449 5441 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5450 5442 { 5451 - nfs4_read_plus_scratch_free(hdr); 5452 - 5453 5443 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5454 5444 return -EAGAIN; 5455 5445 if (nfs4_read_stateid_changed(task, &hdr->args)) ··· 5459 5469 /* Note: We don't use READ_PLUS with pNFS yet */ 5460 5470 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) { 5461 5471 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS]; 5462 - hdr->res.scratch = kmalloc(32, GFP_KERNEL); 5463 - return hdr->res.scratch != NULL; 5472 + return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE); 5464 5473 } 5465 5474 return false; 5466 5475 } ··· 8787 8798 #ifdef CONFIG_NFS_V4_1_MIGRATION 8788 8799 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR; 8789 8800 #endif 8801 + if (test_bit(NFS_CS_DS, &clp->cl_flags)) 8802 + calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS; 8790 8803 msg.rpc_argp = &calldata->args; 8791 8804 msg.rpc_resp = &calldata->res; 8792 8805 task_setup_data.callback_data = calldata; ··· 8866 8875 /* Save the EXCHANGE_ID verifier session trunk tests */ 8867 8876 memcpy(clp->cl_confirm.data, argp->verifier.data, 8868 8877 sizeof(clp->cl_confirm.data)); 8878 + if (resp->flags & EXCHGID4_FLAG_USE_PNFS_DS) 8879 + set_bit(NFS_CS_DS, &clp->cl_flags); 8869 8880 out: 8870 8881 trace_nfs4_exchange_id(clp, status); 8871 8882 rpc_put_task(task);
+1 -1
fs/nfs/pnfs_dev.c
··· 154 154 set_bit(NFS_DEVICEID_NOCACHE, &d->flags); 155 155 156 156 out_free_pages: 157 - for (i = 0; i < max_pages; i++) 157 + while (--i >= 0) 158 158 __free_page(pages[i]); 159 159 kfree(pages); 160 160 out_free_pdev:
+4 -1
fs/nfs/pnfs_nfs.c
··· 852 852 { 853 853 struct nfs_client *clp = ERR_PTR(-EIO); 854 854 struct nfs4_pnfs_ds_addr *da; 855 + unsigned long connect_timeout = timeo * (retrans + 1) * HZ / 10; 855 856 int status = 0; 856 857 857 858 dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr); ··· 871 870 .dstaddr = (struct sockaddr *)&da->da_addr, 872 871 .addrlen = da->da_addrlen, 873 872 .servername = clp->cl_hostname, 873 + .connect_timeout = connect_timeout, 874 + .reconnect_timeout = connect_timeout, 874 875 }; 875 876 876 877 if (da->da_transport != clp->cl_proto) ··· 946 943 * Test this address for session trunking and 947 944 * add as an alias 948 945 */ 949 - xprtdata.cred = nfs4_get_clid_cred(clp), 946 + xprtdata.cred = nfs4_get_clid_cred(clp); 950 947 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 951 948 rpc_clnt_setup_test_and_add_xprt, 952 949 &rpcdata);
+10
fs/nfs/read.c
··· 47 47 48 48 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr) 49 49 { 50 + if (rhdr->res.scratch != NULL) 51 + kfree(rhdr->res.scratch); 50 52 kmem_cache_free(nfs_rdata_cachep, rhdr); 51 53 } 52 54 ··· 109 107 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize; 110 108 } 111 109 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); 110 + 111 + bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size) 112 + { 113 + WARN_ON(hdr->res.scratch != NULL); 114 + hdr->res.scratch = kmalloc(size, GFP_KERNEL); 115 + return hdr->res.scratch != NULL; 116 + } 117 + EXPORT_SYMBOL_GPL(nfs_read_alloc_scratch); 112 118 113 119 static void nfs_readpage_release(struct nfs_page *req, int error) 114 120 {
+2
include/linux/sunrpc/clnt.h
··· 148 148 const struct cred *cred; 149 149 unsigned int max_connect; 150 150 struct xprtsec_parms xprtsec; 151 + unsigned long connect_timeout; 152 + unsigned long reconnect_timeout; 151 153 }; 152 154 153 155 struct rpc_add_xprt_test {
+3 -3
include/linux/sunrpc/xdr.h
··· 226 226 struct kvec *iov; /* pointer to the current kvec */ 227 227 struct kvec scratch; /* Scratch buffer */ 228 228 struct page **page_ptr; /* pointer to the current page */ 229 + void *page_kaddr; /* kmapped address of the current page */ 229 230 unsigned int nwords; /* Remaining decode buffer length */ 230 231 231 232 struct rpc_rqst *rqst; /* For debugging */ ··· 258 257 __be32 *p, struct rpc_rqst *rqst); 259 258 extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, 260 259 struct page **pages, unsigned int len); 260 + extern void xdr_finish_decode(struct xdr_stream *xdr); 261 261 extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); 262 262 extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); 263 263 extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); ··· 779 777 780 778 if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) 781 779 return -EBADMSG; 782 - if (len > SIZE_MAX / sizeof(*p)) 783 - return -EBADMSG; 784 - p = xdr_inline_decode(xdr, len * sizeof(*p)); 780 + p = xdr_inline_decode(xdr, size_mul(len, sizeof(*p))); 785 781 if (unlikely(!p)) 786 782 return -EBADMSG; 787 783 if (array == NULL)
+2
include/linux/sunrpc/xprt.h
··· 351 351 struct rpc_xprt_switch *bc_xps; 352 352 unsigned int flags; 353 353 struct xprtsec_parms xprtsec; 354 + unsigned long connect_timeout; 355 + unsigned long reconnect_timeout; 354 356 }; 355 357 356 358 struct xprt_class {
+8
net/sunrpc/clnt.c
··· 534 534 .servername = args->servername, 535 535 .bc_xprt = args->bc_xprt, 536 536 .xprtsec = args->xprtsec, 537 + .connect_timeout = args->connect_timeout, 538 + .reconnect_timeout = args->reconnect_timeout, 537 539 }; 538 540 char servername[48]; 539 541 struct rpc_clnt *clnt; ··· 2604 2602 case 0: 2605 2603 task->tk_action = rpc_exit_task; 2606 2604 task->tk_status = rpcauth_unwrap_resp(task, &xdr); 2605 + xdr_finish_decode(&xdr); 2607 2606 return; 2608 2607 case -EAGAIN: 2609 2608 task->tk_status = 0; ··· 3072 3069 } 3073 3070 xprt->resvport = resvport; 3074 3071 xprt->reuseport = reuseport; 3072 + 3073 + if (xprtargs->connect_timeout) 3074 + connect_timeout = xprtargs->connect_timeout; 3075 + if (xprtargs->reconnect_timeout) 3076 + reconnect_timeout = xprtargs->reconnect_timeout; 3075 3077 if (xprt->ops->set_connect_timeout != NULL) 3076 3078 xprt->ops->set_connect_timeout(xprt, 3077 3079 connect_timeout,
+2
net/sunrpc/svc.c
··· 1394 1394 rc = process.dispatch(rqstp); 1395 1395 if (procp->pc_release) 1396 1396 procp->pc_release(rqstp); 1397 + xdr_finish_decode(xdr); 1398 + 1397 1399 if (!rc) 1398 1400 goto dropit; 1399 1401 if (rqstp->rq_auth_stat != rpc_auth_ok)
+26 -1
net/sunrpc/xdr.c
··· 1338 1338 return xdr_set_iov(xdr, buf->tail, base, len); 1339 1339 } 1340 1340 1341 + static void xdr_stream_unmap_current_page(struct xdr_stream *xdr) 1342 + { 1343 + if (xdr->page_kaddr) { 1344 + kunmap_local(xdr->page_kaddr); 1345 + xdr->page_kaddr = NULL; 1346 + } 1347 + } 1348 + 1341 1349 static unsigned int xdr_set_page_base(struct xdr_stream *xdr, 1342 1350 unsigned int base, unsigned int len) 1343 1351 { ··· 1363 1355 if (len > maxlen) 1364 1356 len = maxlen; 1365 1357 1358 + xdr_stream_unmap_current_page(xdr); 1366 1359 xdr_stream_page_set_pos(xdr, base); 1367 1360 base += xdr->buf->page_base; 1368 1361 1369 1362 pgnr = base >> PAGE_SHIFT; 1370 1363 xdr->page_ptr = &xdr->buf->pages[pgnr]; 1371 - kaddr = page_address(*xdr->page_ptr); 1364 + 1365 + if (PageHighMem(*xdr->page_ptr)) { 1366 + xdr->page_kaddr = kmap_local_page(*xdr->page_ptr); 1367 + kaddr = xdr->page_kaddr; 1368 + } else 1369 + kaddr = page_address(*xdr->page_ptr); 1372 1370 1373 1371 pgoff = base & ~PAGE_MASK; 1374 1372 xdr->p = (__be32*)(kaddr + pgoff); ··· 1428 1414 struct rpc_rqst *rqst) 1429 1415 { 1430 1416 xdr->buf = buf; 1417 + xdr->page_kaddr = NULL; 1431 1418 xdr_reset_scratch_buffer(xdr); 1432 1419 xdr->nwords = XDR_QUADLEN(buf->len); 1433 1420 if (xdr_set_iov(xdr, buf->head, 0, buf->len) == 0 && ··· 1460 1445 xdr_init_decode(xdr, buf, NULL, NULL); 1461 1446 } 1462 1447 EXPORT_SYMBOL_GPL(xdr_init_decode_pages); 1448 + 1449 + /** 1450 + * xdr_finish_decode - Clean up the xdr_stream after decoding data. 1451 + * @xdr: pointer to xdr_stream struct 1452 + */ 1453 + void xdr_finish_decode(struct xdr_stream *xdr) 1454 + { 1455 + xdr_stream_unmap_current_page(xdr); 1456 + } 1457 + EXPORT_SYMBOL(xdr_finish_decode); 1463 1458 1464 1459 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 1465 1460 {
-1
net/sunrpc/xprtrdma/xprt_rdma.h
··· 593 593 int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int); 594 594 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *); 595 595 unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *); 596 - int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int); 597 596 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *); 598 597 int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst); 599 598 void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
+40 -15
net/sunrpc/xprtsock.c
··· 2237 2237 struct socket *sock) 2238 2238 { 2239 2239 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2240 + struct net *net = sock_net(sock->sk); 2241 + unsigned long connect_timeout; 2242 + unsigned long syn_retries; 2240 2243 unsigned int keepidle; 2241 2244 unsigned int keepcnt; 2242 2245 unsigned int timeo; 2246 + unsigned long t; 2243 2247 2244 2248 spin_lock(&xprt->transport_lock); 2245 2249 keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ); ··· 2261 2257 2262 2258 /* TCP user timeout (see RFC5482) */ 2263 2259 tcp_sock_set_user_timeout(sock->sk, timeo); 2260 + 2261 + /* Connect timeout */ 2262 + connect_timeout = max_t(unsigned long, 2263 + DIV_ROUND_UP(xprt->connect_timeout, HZ), 1); 2264 + syn_retries = max_t(unsigned long, 2265 + READ_ONCE(net->ipv4.sysctl_tcp_syn_retries), 1); 2266 + for (t = 0; t <= syn_retries && (1UL << t) < connect_timeout; t++) 2267 + ; 2268 + if (t <= syn_retries) 2269 + tcp_sock_set_syncnt(sock->sk, t - 1); 2270 + } 2271 + 2272 + static void xs_tcp_do_set_connect_timeout(struct rpc_xprt *xprt, 2273 + unsigned long connect_timeout) 2274 + { 2275 + struct sock_xprt *transport = 2276 + container_of(xprt, struct sock_xprt, xprt); 2277 + struct rpc_timeout to; 2278 + unsigned long initval; 2279 + 2280 + memcpy(&to, xprt->timeout, sizeof(to)); 2281 + /* Arbitrary lower limit */ 2282 + initval = max_t(unsigned long, connect_timeout, XS_TCP_INIT_REEST_TO); 2283 + to.to_initval = initval; 2284 + to.to_maxval = initval; 2285 + to.to_retries = 0; 2286 + memcpy(&transport->tcp_timeout, &to, sizeof(transport->tcp_timeout)); 2287 + xprt->timeout = &transport->tcp_timeout; 2288 + xprt->connect_timeout = connect_timeout; 2264 2289 } 2265 2290 2266 2291 static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt, ··· 2297 2264 unsigned long reconnect_timeout) 2298 2265 { 2299 2266 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2300 - struct rpc_timeout to; 2301 - unsigned long initval; 2302 2267 2303 2268 spin_lock(&xprt->transport_lock); 2304 2269 if (reconnect_timeout < xprt->max_reconnect_timeout) 2305 2270 xprt->max_reconnect_timeout = reconnect_timeout; 2306 - if (connect_timeout < xprt->connect_timeout) { 2307 - memcpy(&to, xprt->timeout, sizeof(to)); 2308 - initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1); 2309 - /* Arbitrary lower limit */ 2310 - if (initval < XS_TCP_INIT_REEST_TO << 1) 2311 - initval = XS_TCP_INIT_REEST_TO << 1; 2312 - to.to_initval = initval; 2313 - to.to_maxval = initval; 2314 - memcpy(&transport->tcp_timeout, &to, 2315 - sizeof(transport->tcp_timeout)); 2316 - xprt->timeout = &transport->tcp_timeout; 2317 - xprt->connect_timeout = connect_timeout; 2318 - } 2271 + if (connect_timeout < xprt->connect_timeout) 2272 + xs_tcp_do_set_connect_timeout(xprt, connect_timeout); 2319 2273 set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); 2320 2274 spin_unlock(&xprt->transport_lock); 2321 2275 } ··· 3355 3335 xprt->timeout = &xs_tcp_default_timeout; 3356 3336 3357 3337 xprt->max_reconnect_timeout = xprt->timeout->to_maxval; 3338 + if (args->reconnect_timeout) 3339 + xprt->max_reconnect_timeout = args->reconnect_timeout; 3340 + 3358 3341 xprt->connect_timeout = xprt->timeout->to_initval * 3359 3342 (xprt->timeout->to_retries + 1); 3343 + if (args->connect_timeout) 3344 + xs_tcp_do_set_connect_timeout(xprt, args->connect_timeout); 3360 3345 3361 3346 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); 3362 3347 INIT_WORK(&transport->error_worker, xs_error_handle);