Merge tag 'v6.11-rc5-client-fixes' of git://git.samba.org/sfrench/cifs-2.6

Pull smb client fixes from Steve French:

- two RDMA/smbdirect fixes and a minor cleanup

- punch hole fix

* tag 'v6.11-rc5-client-fixes' of git://git.samba.org/sfrench/cifs-2.6:
cifs: Fix FALLOC_FL_PUNCH_HOLE support
smb/client: fix rdma usage in smb2_async_writev()
smb/client: remove unused rq_iter_size from struct smb_rqst
smb/client: avoid dereferencing rdata=NULL in smb2_new_read_req()

+43 -27
-1
fs/smb/client/cifsglob.h
··· 254 254 struct smb_rqst { 255 255 struct kvec *rq_iov; /* array of kvecs */ 256 256 unsigned int rq_nvec; /* number of kvecs in array */ 257 - size_t rq_iter_size; /* Amount of data in ->rq_iter */ 258 257 struct iov_iter rq_iter; /* Data iterator */ 259 258 struct xarray rq_buffer; /* Page buffer for encryption */ 260 259 };
-1
fs/smb/client/cifssmb.c
··· 1713 1713 rqst.rq_iov = iov; 1714 1714 rqst.rq_nvec = 2; 1715 1715 rqst.rq_iter = wdata->subreq.io_iter; 1716 - rqst.rq_iter_size = iov_iter_count(&wdata->subreq.io_iter); 1717 1716 1718 1717 cifs_dbg(FYI, "async write at %llu %zu bytes\n", 1719 1718 wdata->subreq.start, wdata->subreq.len);
+22 -2
fs/smb/client/smb2ops.c
··· 3305 3305 struct inode *inode = file_inode(file); 3306 3306 struct cifsFileInfo *cfile = file->private_data; 3307 3307 struct file_zero_data_information fsctl_buf; 3308 + unsigned long long end = offset + len, i_size, remote_i_size; 3308 3309 long rc; 3309 3310 unsigned int xid; 3310 3311 __u8 set_sparse = 1; ··· 3337 3336 (char *)&fsctl_buf, 3338 3337 sizeof(struct file_zero_data_information), 3339 3338 CIFSMaxBufSize, NULL, NULL); 3339 + 3340 + if (rc) 3341 + goto unlock; 3342 + 3343 + /* If there's dirty data in the buffer that would extend the EOF if it 3344 + * were written, then we need to move the EOF marker over to the lower 3345 + * of the high end of the hole and the proposed EOF. The problem is 3346 + * that we locally hole-punch the tail of the dirty data, the proposed 3347 + * EOF update will end up in the wrong place. 3348 + */ 3349 + i_size = i_size_read(inode); 3350 + remote_i_size = netfs_inode(inode)->remote_i_size; 3351 + if (end > remote_i_size && i_size > remote_i_size) { 3352 + unsigned long long extend_to = umin(end, i_size); 3353 + rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, 3354 + cfile->fid.volatile_fid, cfile->pid, extend_to); 3355 + if (rc >= 0) 3356 + netfs_inode(inode)->remote_i_size = extend_to; 3357 + } 3358 + 3359 + unlock: 3340 3360 filemap_invalidate_unlock(inode->i_mapping); 3341 3361 out: 3342 3362 inode_unlock(inode); ··· 4468 4446 } 4469 4447 iov_iter_xarray(&new->rq_iter, ITER_SOURCE, 4470 4448 buffer, 0, size); 4471 - new->rq_iter_size = size; 4472 4449 } 4473 4450 } 4474 4451 ··· 4513 4492 rqst.rq_nvec = 2; 4514 4493 if (iter) { 4515 4494 rqst.rq_iter = *iter; 4516 - rqst.rq_iter_size = iov_iter_count(iter); 4517 4495 iter_size = iov_iter_count(iter); 4518 4496 } 4519 4497
+21 -23
fs/smb/client/smb2pdu.c
··· 4441 4441 * If we want to do a RDMA write, fill in and append 4442 4442 * smbd_buffer_descriptor_v1 to the end of read request 4443 4443 */ 4444 - if (smb3_use_rdma_offload(io_parms)) { 4444 + if (rdata && smb3_use_rdma_offload(io_parms)) { 4445 4445 struct smbd_buffer_descriptor_v1 *v1; 4446 4446 bool need_invalidate = server->dialect == SMB30_PROT_ID; 4447 4447 ··· 4523 4523 4524 4524 if (rdata->got_bytes) { 4525 4525 rqst.rq_iter = rdata->subreq.io_iter; 4526 - rqst.rq_iter_size = iov_iter_count(&rdata->subreq.io_iter); 4527 4526 } 4528 4527 4529 4528 WARN_ONCE(rdata->server != mid->server, ··· 4913 4914 if (rc) 4914 4915 goto out; 4915 4916 4917 + rqst.rq_iov = iov; 4918 + rqst.rq_iter = wdata->subreq.io_iter; 4919 + 4920 + rqst.rq_iov[0].iov_len = total_len - 1; 4921 + rqst.rq_iov[0].iov_base = (char *)req; 4922 + rqst.rq_nvec += 1; 4923 + 4916 4924 if (smb3_encryption_required(tcon)) 4917 4925 flags |= CIFS_TRANSFORM_REQ; 4918 4926 ··· 4931 4925 req->WriteChannelInfoOffset = 0; 4932 4926 req->WriteChannelInfoLength = 0; 4933 4927 req->Channel = SMB2_CHANNEL_NONE; 4928 + req->Length = cpu_to_le32(io_parms->length); 4934 4929 req->Offset = cpu_to_le64(io_parms->offset); 4935 4930 req->DataOffset = cpu_to_le16( 4936 4931 offsetof(struct smb2_write_req, Buffer)); ··· 4951 4944 */ 4952 4945 if (smb3_use_rdma_offload(io_parms)) { 4953 4946 struct smbd_buffer_descriptor_v1 *v1; 4954 - size_t data_size = iov_iter_count(&wdata->subreq.io_iter); 4955 4947 bool need_invalidate = server->dialect == SMB30_PROT_ID; 4956 4948 4957 4949 wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter, ··· 4959 4953 rc = -EAGAIN; 4960 4954 goto async_writev_out; 4961 4955 } 4956 + /* For RDMA read, I/O size is in RemainingBytes not in Length */ 4957 + req->RemainingBytes = req->Length; 4962 4958 req->Length = 0; 4963 4959 req->DataOffset = 0; 4964 - req->RemainingBytes = cpu_to_le32(data_size); 4965 4960 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; 4966 4961 if (need_invalidate) 4967 4962 req->Channel = SMB2_CHANNEL_RDMA_V1; ··· 4974 4967 v1->offset = cpu_to_le64(wdata->mr->mr->iova); 4975 4968 v1->token = cpu_to_le32(wdata->mr->mr->rkey); 4976 4969 v1->length = cpu_to_le32(wdata->mr->mr->length); 4970 + 4971 + rqst.rq_iov[0].iov_len += sizeof(*v1); 4972 + 4973 + /* 4974 + * We keep wdata->subreq.io_iter, 4975 + * but we have to truncate rqst.rq_iter 4976 + */ 4977 + iov_iter_truncate(&rqst.rq_iter, 0); 4977 4978 } 4978 4979 #endif 4979 - iov[0].iov_len = total_len - 1; 4980 - iov[0].iov_base = (char *)req; 4981 4980 4982 - rqst.rq_iov = iov; 4983 - rqst.rq_nvec = 1; 4984 - rqst.rq_iter = wdata->subreq.io_iter; 4985 - rqst.rq_iter_size = iov_iter_count(&rqst.rq_iter); 4986 4981 if (test_bit(NETFS_SREQ_RETRYING, &wdata->subreq.flags)) 4987 4982 smb2_set_replay(server, &rqst); 4988 - #ifdef CONFIG_CIFS_SMB_DIRECT 4989 - if (wdata->mr) 4990 - iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1); 4991 - #endif 4992 - cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n", 4993 - io_parms->offset, io_parms->length, iov_iter_count(&rqst.rq_iter)); 4994 4983 4995 - #ifdef CONFIG_CIFS_SMB_DIRECT 4996 - /* For RDMA read, I/O size is in RemainingBytes not in Length */ 4997 - if (!wdata->mr) 4998 - req->Length = cpu_to_le32(io_parms->length); 4999 - #else 5000 - req->Length = cpu_to_le32(io_parms->length); 5001 - #endif 4984 + cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n", 4985 + io_parms->offset, io_parms->length, iov_iter_count(&wdata->subreq.io_iter)); 5002 4986 5003 4987 if (wdata->credits.value > 0) { 5004 4988 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->subreq.len,