Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cifs: Cut over to using netfslib

Make the cifs filesystem use netfslib to handle reading and writing on
behalf of cifs. The changes include:

(1) Various read_iter/write_iter type functions are turned into wrappers
around netfslib API functions or are pointed directly at those
functions:

cifs_file_direct{,_nobrl}_ops switch to use
netfs_unbuffered_read_iter and netfs_unbuffered_write_iter.

Large pieces of code that will be removed are #if'd out and will be removed
in subsequent patches.

[?] Why does cifs mark the page dirty in the destination buffer of a DIO
read? Should that happen automatically? Does netfs need to do that?

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Steve French <sfrench@samba.org>
cc: Shyam Prasad N <nspmangalore@gmail.com>
cc: Rohith Surabattula <rohiths.msft@gmail.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: linux-cifs@vger.kernel.org
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org

+361 -180
+6 -1
fs/netfs/io.c
··· 213 213 unsigned int i; 214 214 size_t transferred = 0; 215 215 216 - for (i = 0; i < rreq->direct_bv_count; i++) 216 + for (i = 0; i < rreq->direct_bv_count; i++) { 217 217 flush_dcache_page(rreq->direct_bv[i].bv_page); 218 + // TODO: cifs marks pages in the destination buffer 219 + // dirty under some circumstances after a read. Do we 220 + // need to do that too? 221 + set_page_dirty(rreq->direct_bv[i].bv_page); 222 + } 218 223 219 224 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { 220 225 if (subreq->error || subreq->transferred == 0)
+4 -4
fs/smb/client/cifsfs.c
··· 1522 1522 }; 1523 1523 1524 1524 const struct file_operations cifs_file_direct_ops = { 1525 - .read_iter = cifs_direct_readv, 1526 - .write_iter = cifs_direct_writev, 1525 + .read_iter = netfs_unbuffered_read_iter, 1526 + .write_iter = netfs_file_write_iter, 1527 1527 .open = cifs_open, 1528 1528 .release = cifs_close, 1529 1529 .lock = cifs_lock, ··· 1578 1578 }; 1579 1579 1580 1580 const struct file_operations cifs_file_direct_nobrl_ops = { 1581 - .read_iter = cifs_direct_readv, 1582 - .write_iter = cifs_direct_writev, 1581 + .read_iter = netfs_unbuffered_read_iter, 1582 + .write_iter = netfs_file_write_iter, 1583 1583 .open = cifs_open, 1584 1584 .release = cifs_close, 1585 1585 .fsync = cifs_fsync,
-7
fs/smb/client/cifsfs.h
··· 94 94 extern int cifs_open(struct inode *inode, struct file *file); 95 95 extern int cifs_close(struct inode *inode, struct file *file); 96 96 extern int cifs_closedir(struct inode *inode, struct file *file); 97 - extern ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to); 98 - extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to); 99 97 extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to); 100 - extern ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from); 101 - extern ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from); 102 98 extern ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from); 103 99 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from); 104 100 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter); ··· 108 112 extern const struct file_operations cifs_dir_ops; 109 113 extern int cifs_dir_open(struct inode *inode, struct file *file); 110 114 extern int cifs_readdir(struct file *file, struct dir_context *ctx); 111 - extern void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len); 112 - extern void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len); 113 - extern void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int len); 114 115 115 116 /* Functions related to dir entries */ 116 117 extern const struct dentry_operations cifs_dentry_ops;
+3 -2
fs/smb/client/cifsglob.h
··· 451 451 /* async read from the server */ 452 452 int (*async_readv)(struct cifs_io_subrequest *); 453 453 /* async write to the server */ 454 - int (*async_writev)(struct cifs_io_subrequest *); 454 + void (*async_writev)(struct cifs_io_subrequest *); 455 455 /* sync read from the server */ 456 456 int (*sync_read)(const unsigned int, struct cifs_fid *, 457 457 struct cifs_io_parms *, unsigned int *, char **, ··· 1516 1516 #endif 1517 1517 struct cifs_credits credits; 1518 1518 1519 - // TODO: Remove following elements 1519 + #if 0 // TODO: Remove following elements 1520 1520 struct list_head list; 1521 1521 struct completion done; 1522 1522 struct work_struct work; ··· 1526 1526 enum writeback_sync_modes sync_mode; 1527 1527 bool uncached; 1528 1528 struct bio_vec *bv; 1529 + #endif 1529 1530 }; 1530 1531 1531 1532 /*
+7 -1
fs/smb/client/cifsproto.h
··· 148 148 bool from_readdir); 149 149 extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, 150 150 unsigned int bytes_written); 151 + void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result, 152 + bool was_async); 151 153 extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int); 152 154 extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, 153 155 int flags, ··· 601 599 extern struct cifs_ses * 602 600 cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx); 603 601 602 + #if 0 // TODO Remove 604 603 void cifs_readdata_release(struct cifs_io_subrequest *rdata); 605 604 static inline void cifs_get_readdata(struct cifs_io_subrequest *rdata) 606 605 { ··· 612 609 if (refcount_dec_and_test(&rdata->subreq.ref)) 613 610 cifs_readdata_release(rdata); 614 611 } 612 + #endif 615 613 int cifs_async_readv(struct cifs_io_subrequest *rdata); 616 614 int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid); 617 615 618 - int cifs_async_writev(struct cifs_io_subrequest *wdata); 616 + void cifs_async_writev(struct cifs_io_subrequest *wdata); 619 617 void cifs_writev_complete(struct work_struct *work); 618 + #if 0 // TODO Remove 620 619 struct cifs_io_subrequest *cifs_writedata_alloc(work_func_t complete); 621 620 void cifs_writedata_release(struct cifs_io_subrequest *rdata); 622 621 static inline void cifs_get_writedata(struct cifs_io_subrequest *wdata) ··· 630 625 if (refcount_dec_and_test(&wdata->subreq.ref)) 631 626 cifs_writedata_release(wdata); 632 627 } 628 + #endif 633 629 int cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, 634 630 struct cifs_sb_info *cifs_sb, 635 631 const unsigned char *path, char *pbuf,
+32 -22
fs/smb/client/cifssmb.c
··· 1265 1265 cifs_readv_callback(struct mid_q_entry *mid) 1266 1266 { 1267 1267 struct cifs_io_subrequest *rdata = mid->callback_data; 1268 - struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); 1268 + struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink); 1269 1269 struct TCP_Server_Info *server = tcon->ses->server; 1270 1270 struct smb_rqst rqst = { .rq_iov = rdata->iov, 1271 1271 .rq_nvec = 2, ··· 1306 1306 rdata->result = -EIO; 1307 1307 } 1308 1308 1309 - queue_work(cifsiod_wq, &rdata->work); 1309 + if (rdata->result == 0 || rdata->result == -EAGAIN) 1310 + iov_iter_advance(&rdata->subreq.io_iter, rdata->got_bytes); 1311 + rdata->credits.value = 0; 1312 + netfs_subreq_terminated(&rdata->subreq, 1313 + (rdata->result == 0 || rdata->result == -EAGAIN) ? 1314 + rdata->got_bytes : rdata->result, 1315 + false); 1310 1316 release_mid(mid); 1311 1317 add_credits(server, &credits, 0); 1312 1318 } ··· 1324 1318 int rc; 1325 1319 READ_REQ *smb = NULL; 1326 1320 int wct; 1327 - struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); 1321 + struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink); 1328 1322 struct smb_rqst rqst = { .rq_iov = rdata->iov, 1329 1323 .rq_nvec = 2 }; 1330 1324 ··· 1349 1343 smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16)); 1350 1344 1351 1345 smb->AndXCommand = 0xFF; /* none */ 1352 - smb->Fid = rdata->cfile->fid.netfid; 1346 + smb->Fid = rdata->req->cfile->fid.netfid; 1353 1347 smb->OffsetLow = cpu_to_le32(rdata->subreq.start & 0xFFFFFFFF); 1354 1348 if (wct == 12) 1355 1349 smb->OffsetHigh = cpu_to_le32(rdata->subreq.start >> 32); ··· 1619 1613 cifs_writev_callback(struct mid_q_entry *mid) 1620 1614 { 1621 1615 struct cifs_io_subrequest *wdata = mid->callback_data; 1622 - struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 1623 - unsigned int written; 1616 + struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink); 1624 1617 WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf; 1625 1618 struct cifs_credits credits = { .value = 1, .instance = 0 }; 1619 + ssize_t result; 1620 + size_t written; 1626 1621 1627 1622 switch (mid->mid_state) { 1628 1623 case MID_RESPONSE_RECEIVED: 1629 - wdata->result = cifs_check_receive(mid, tcon->ses->server, 0); 1630 - if (wdata->result != 0) 1624 + result = cifs_check_receive(mid, tcon->ses->server, 0); 1625 + if (result != 0) 1631 1626 break; 1632 1627 1633 1628 written = le16_to_cpu(smb->CountHigh); ··· 1644 1637 written &= 0xFFFF; 1645 1638 1646 1639 if (written < wdata->subreq.len) 1647 - wdata->result = -ENOSPC; 1640 + result = -ENOSPC; 1648 1641 else 1649 - wdata->subreq.len = written; 1642 + result = written; 1650 1643 break; 1651 1644 case MID_REQUEST_SUBMITTED: 1652 1645 case MID_RETRY_NEEDED: 1653 - wdata->result = -EAGAIN; 1646 + result = -EAGAIN; 1654 1647 break; 1655 1648 default: 1656 - wdata->result = -EIO; 1649 + result = -EIO; 1657 1650 break; 1658 1651 } 1659 1652 1660 - queue_work(cifsiod_wq, &wdata->work); 1653 + wdata->credits.value = 0; 1654 + cifs_write_subrequest_terminated(wdata, result, true); 1661 1655 release_mid(mid); 1662 1656 add_credits(tcon->ses->server, &credits, 0); 1663 1657 } 1664 1658 1665 1659 /* cifs_async_writev - send an async write, and set up mid to handle result */ 1666 - int 1660 + void 1667 1661 cifs_async_writev(struct cifs_io_subrequest *wdata) 1668 1662 { 1669 1663 int rc = -EACCES; 1670 1664 WRITE_REQ *smb = NULL; 1671 1665 int wct; 1672 - struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 1666 + struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink); 1673 1667 struct kvec iov[2]; 1674 1668 struct smb_rqst rqst = { }; 1675 1669 ··· 1680 1672 wct = 12; 1681 1673 if (wdata->subreq.start >> 32 > 0) { 1682 1674 /* can not handle big offset for old srv */ 1683 - return -EIO; 1675 + rc = -EIO; 1676 + goto out; 1684 1677 } 1685 1678 } 1686 1679 ··· 1693 1684 smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->pid >> 16)); 1694 1685 1695 1686 smb->AndXCommand = 0xFF; /* none */ 1696 - smb->Fid = wdata->cfile->fid.netfid; 1687 + smb->Fid = wdata->req->cfile->fid.netfid; 1697 1688 smb->OffsetLow = cpu_to_le32(wdata->subreq.start & 0xFFFFFFFF); 1698 1689 if (wct == 14) 1699 1690 smb->OffsetHigh = cpu_to_le32(wdata->subreq.start >> 32); ··· 1733 1724 iov[1].iov_len += 4; /* pad bigger by four bytes */ 1734 1725 } 1735 1726 1736 - cifs_get_writedata(wdata); 1737 1727 rc = cifs_call_async(tcon->ses->server, &rqst, NULL, 1738 1728 cifs_writev_callback, NULL, wdata, 0, NULL); 1739 - 1729 + /* Can't touch wdata if rc == 0 */ 1740 1730 if (rc == 0) 1741 1731 cifs_stats_inc(&tcon->stats.cifs_stats.num_writes); 1742 - else 1743 - cifs_put_writedata(wdata); 1744 1732 1745 1733 async_writev_out: 1746 1734 cifs_small_buf_release(smb); 1747 - return rc; 1735 + out: 1736 + if (rc) { 1737 + add_credits_and_wake_if(wdata->server, &wdata->credits, 0); 1738 + cifs_write_subrequest_terminated(wdata, rc, false); 1739 + } 1748 1740 } 1749 1741 1750 1742 int
+86 -75
fs/smb/client/file.c
··· 119 119 else 120 120 trace_netfs_sreq(subreq, netfs_sreq_trace_fail); 121 121 add_credits_and_wake_if(wdata->server, &wdata->credits, 0); 122 - netfs_write_subrequest_terminated(wdata, rc, false); 122 + cifs_write_subrequest_terminated(wdata, rc, false); 123 123 goto out; 124 124 } 125 125 ··· 352 352 .issue_write = cifs_issue_write, 353 353 }; 354 354 355 + #if 0 // TODO remove 397 355 356 /* 356 357 * Remove the dirty flags from a span of pages. 357 358 */ ··· 477 476 478 477 rcu_read_unlock(); 479 478 } 479 + #endif // end netfslib remove 397 480 480 481 481 /* 482 482 * Mark as invalid, all open files on tree connections since they ··· 2524 2522 return rc; 2525 2523 } 2526 2524 2527 - /* 2528 - * update the file size (if needed) after a write. Should be called with 2529 - * the inode->i_lock held 2530 - */ 2531 - void 2532 - cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, 2533 - unsigned int bytes_written) 2525 + void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result, 2526 + bool was_async) 2534 2527 { 2535 - loff_t end_of_write = offset + bytes_written; 2528 + struct netfs_io_request *wreq = wdata->rreq; 2529 + loff_t new_server_eof; 2536 2530 2537 - if (end_of_write > cifsi->netfs.remote_i_size) 2538 - netfs_resize_file(&cifsi->netfs, end_of_write, true); 2531 + if (result > 0) { 2532 + new_server_eof = wdata->subreq.start + wdata->subreq.transferred + result; 2533 + 2534 + if (new_server_eof > netfs_inode(wreq->inode)->remote_i_size) 2535 + netfs_resize_file(netfs_inode(wreq->inode), new_server_eof, true); 2536 + } 2537 + 2538 + netfs_write_subrequest_terminated(&wdata->subreq, result, was_async); 2539 2539 } 2540 2540 2541 + #if 0 // TODO remove 2483 2541 2542 static ssize_t 2542 2543 cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data, 2543 2544 size_t write_size, loff_t *offset) ··· 2624 2619 free_xid(xid); 2625 2620 return total_written; 2626 2621 } 2622 + #endif // end netfslib remove 2483 2627 2623 2628 2624 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, 2629 2625 bool fsuid_only) ··· 2830 2824 return -ENOENT; 2831 2825 } 2832 2826 2827 + #if 0 // TODO remove 2773 2833 2828 void 2834 2829 cifs_writedata_release(struct cifs_io_subrequest *wdata) 2835 2830 { ··· 3461 3454 3462 3455 return rc; 3463 3456 } 3457 + #endif // End netfs removal 2773 3464 3458 3459 + /* 3460 + * Flush data on a strict file. 3461 + */ 3465 3462 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end, 3466 3463 int datasync) 3467 3464 { ··· 3520 3509 return rc; 3521 3510 } 3522 3511 3512 + /* 3513 + * Flush data on a non-strict data. 3514 + */ 3523 3515 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 3524 3516 { 3525 3517 unsigned int xid; ··· 3589 3575 return rc; 3590 3576 } 3591 3577 3578 + #if 0 // TODO remove 3594 3592 3579 static void collect_uncached_write_data(struct cifs_aio_ctx *ctx); 3593 3580 3594 3581 static void ··· 4052 4037 { 4053 4038 return __cifs_writev(iocb, from, false); 4054 4039 } 4040 + #endif // TODO remove 3594 4055 4041 4056 4042 static ssize_t 4057 4043 cifs_writev(struct kiocb *iocb, struct iov_iter *from) ··· 4064 4048 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 4065 4049 ssize_t rc; 4066 4050 4067 - inode_lock(inode); 4051 + rc = netfs_start_io_write(inode); 4052 + if (rc < 0) 4053 + return rc; 4054 + 4068 4055 /* 4069 4056 * We need to hold the sem to be sure nobody modifies lock list 4070 4057 * with a brlock that prevents writing. ··· 4081 4062 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from), 4082 4063 server->vals->exclusive_lock_type, 0, 4083 4064 NULL, CIFS_WRITE_OP)) 4084 - rc = __generic_file_write_iter(iocb, from); 4065 + rc = netfs_buffered_write_iter_locked(iocb, from, NULL); 4085 4066 else 4086 4067 rc = -EACCES; 4087 4068 out: 4088 4069 up_read(&cinode->lock_sem); 4089 - inode_unlock(inode); 4090 - 4070 + netfs_end_io_write(inode); 4091 4071 if (rc > 0) 4092 4072 rc = generic_write_sync(iocb, rc); 4093 4073 return rc; ··· 4109 4091 4110 4092 if (CIFS_CACHE_WRITE(cinode)) { 4111 4093 if (cap_unix(tcon->ses) && 4112 - (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) 4113 - && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) { 4114 - written = generic_file_write_iter(iocb, from); 4094 + (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 4095 + ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) { 4096 + written = netfs_file_write_iter(iocb, from); 4115 4097 goto out; 4116 4098 } 4117 4099 written = cifs_writev(iocb, from); ··· 4123 4105 * affected pages because it may cause a error with mandatory locks on 4124 4106 * these pages but not on the region from pos to ppos+len-1. 4125 4107 */ 4126 - written = cifs_user_writev(iocb, from); 4108 + written = netfs_file_write_iter(iocb, from); 4127 4109 if (CIFS_CACHE_READ(cinode)) { 4128 4110 /* 4129 4111 * We have read level caching and we have just sent a write ··· 4142 4124 return written; 4143 4125 } 4144 4126 4127 + #if 0 // TODO remove 4143 4145 4128 static struct cifs_io_subrequest *cifs_readdata_alloc(work_func_t complete) 4146 4129 { 4147 4130 struct cifs_io_subrequest *rdata; ··· 4582 4563 ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to) 4583 4564 { 4584 4565 return __cifs_readv(iocb, to, false); 4566 + 4585 4567 } 4568 + #endif // end netfslib removal 4143 4586 4569 4587 4570 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter) 4588 4571 { ··· 4592 4571 struct inode *inode = file_inode(iocb->ki_filp); 4593 4572 4594 4573 if (iocb->ki_flags & IOCB_DIRECT) 4595 - return cifs_user_readv(iocb, iter); 4574 + return netfs_unbuffered_read_iter(iocb, iter); 4596 4575 4597 4576 rc = cifs_revalidate_mapping(inode); 4598 4577 if (rc) 4599 4578 return rc; 4600 4579 4601 - return generic_file_read_iter(iocb, iter); 4580 + return netfs_file_read_iter(iocb, iter); 4602 4581 } 4603 4582 4604 4583 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ··· 4609 4588 int rc; 4610 4589 4611 4590 if (iocb->ki_filp->f_flags & O_DIRECT) { 4612 - written = cifs_user_writev(iocb, from); 4591 + written = netfs_unbuffered_write_iter(iocb, from); 4613 4592 if (written > 0 && CIFS_CACHE_READ(cinode)) { 4614 4593 cifs_zap_mapping(inode); 4615 4594 cifs_dbg(FYI, ··· 4624 4603 if (written) 4625 4604 return written; 4626 4605 4627 - written = generic_file_write_iter(iocb, from); 4606 + written = netfs_file_write_iter(iocb, from); 4628 4607 4629 - if (CIFS_CACHE_WRITE(CIFS_I(inode))) 4630 - goto out; 4608 + if (!CIFS_CACHE_WRITE(CIFS_I(inode))) { 4609 + rc = filemap_fdatawrite(inode->i_mapping); 4610 + if (rc) 4611 + cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n", 4612 + rc, inode); 4613 + } 4631 4614 4632 - rc = filemap_fdatawrite(inode->i_mapping); 4633 - if (rc) 4634 - cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n", 4635 - rc, inode); 4636 - 4637 - out: 4638 4615 cifs_put_writer(cinode); 4639 4616 return written; 4640 4617 } ··· 4657 4638 * pos+len-1. 4658 4639 */ 4659 4640 if (!CIFS_CACHE_READ(cinode)) 4660 - return cifs_user_readv(iocb, to); 4641 + return netfs_unbuffered_read_iter(iocb, to); 4661 4642 4662 4643 if (cap_unix(tcon->ses) && 4663 4644 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 4664 - ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 4665 - return generic_file_read_iter(iocb, to); 4645 + ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) { 4646 + if (iocb->ki_flags & IOCB_DIRECT) 4647 + return netfs_unbuffered_read_iter(iocb, to); 4648 + return netfs_buffered_read_iter(iocb, to); 4649 + } 4666 4650 4667 4651 /* 4668 4652 * We need to hold the sem to be sure nobody modifies lock list ··· 4674 4652 down_read(&cinode->lock_sem); 4675 4653 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to), 4676 4654 tcon->ses->server->vals->shared_lock_type, 4677 - 0, NULL, CIFS_READ_OP)) 4678 - rc = generic_file_read_iter(iocb, to); 4655 + 0, NULL, CIFS_READ_OP)) { 4656 + if (iocb->ki_flags & IOCB_DIRECT) 4657 + rc = netfs_unbuffered_read_iter(iocb, to); 4658 + else 4659 + rc = netfs_buffered_read_iter(iocb, to); 4660 + } 4679 4661 up_read(&cinode->lock_sem); 4680 4662 return rc; 4681 4663 } 4682 4664 4665 + #if 0 // TODO remove 4633 4683 4666 static ssize_t 4684 4667 cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) 4685 4668 { ··· 4776 4749 free_xid(xid); 4777 4750 return total_read; 4778 4751 } 4752 + #endif // end netfslib remove 4633 4779 4753 4780 - /* 4781 - * If the page is mmap'ed into a process' page tables, then we need to make 4782 - * sure that it doesn't change while being written back. 4783 - */ 4784 4754 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf) 4785 4755 { 4786 - struct folio *folio = page_folio(vmf->page); 4787 - 4788 - /* Wait for the folio to be written to the cache before we allow it to 4789 - * be modified. We then assume the entire folio will need writing back. 4790 - */ 4791 - #ifdef CONFIG_CIFS_FSCACHE 4792 - if (folio_test_private_2(folio) && /* [DEPRECATED] */ 4793 - folio_wait_private_2_killable(folio) < 0) 4794 - return VM_FAULT_RETRY; 4795 - #endif 4796 - 4797 - folio_wait_writeback(folio); 4798 - 4799 - if (folio_lock_killable(folio) < 0) 4800 - return VM_FAULT_RETRY; 4801 - return VM_FAULT_LOCKED; 4756 + return netfs_page_mkwrite(vmf, NULL); 4802 4757 } 4803 4758 4804 4759 static const struct vm_operations_struct cifs_file_vm_ops = { ··· 4826 4817 return rc; 4827 4818 } 4828 4819 4820 + #if 0 // TODO remove 4794 4829 4821 /* 4830 4822 * Unlock a bunch of folios in the pagecache. 4831 4823 */ ··· 5111 5101 free_xid(xid); 5112 5102 return rc; 5113 5103 } 5104 + #endif // end netfslib remove 4794 5114 5105 5115 5106 static int is_inode_writable(struct cifsInodeInfo *cifs_inode) 5116 5107 { ··· 5160 5149 return true; 5161 5150 } 5162 5151 5152 + #if 0 // TODO remove 5152 5163 5153 static int cifs_write_begin(struct file *file, struct address_space *mapping, 5164 5154 loff_t pos, unsigned len, 5165 5155 struct page **pagep, void **fsdata) ··· 5256 5244 { 5257 5245 folio_wait_private_2(folio); /* [DEPRECATED] */ 5258 5246 } 5247 + #endif // end netfslib remove 5152 5259 5248 5260 5249 void cifs_oplock_break(struct work_struct *work) 5261 5250 { ··· 5347 5334 cifs_done_oplock_break(cinode); 5348 5335 } 5349 5336 5337 + #if 0 // TODO remove 5333 5350 5338 /* 5351 5339 * The presence of cifs_direct_io() in the address space ops vector 5352 5340 * allowes open() O_DIRECT flags which would have failed otherwise. ··· 5366 5352 */ 5367 5353 return -EINVAL; 5368 5354 } 5355 + #endif // netfs end remove 5333 5369 5356 5370 5357 static int cifs_swap_activate(struct swap_info_struct *sis, 5371 5358 struct file *swap_file, sector_t *span) ··· 5429 5414 } 5430 5415 5431 5416 const struct address_space_operations cifs_addr_ops = { 5432 - .read_folio = cifs_read_folio, 5433 - .readahead = cifs_readahead, 5434 - .writepages = cifs_writepages, 5435 - .write_begin = cifs_write_begin, 5436 - .write_end = cifs_write_end, 5437 - .dirty_folio = netfs_dirty_folio, 5438 - .release_folio = cifs_release_folio, 5439 - .direct_IO = cifs_direct_io, 5440 - .invalidate_folio = cifs_invalidate_folio, 5441 - .migrate_folio = filemap_migrate_folio, 5417 + .read_folio = netfs_read_folio, 5418 + .readahead = netfs_readahead, 5419 + .writepages = netfs_writepages, 5420 + .dirty_folio = netfs_dirty_folio, 5421 + .release_folio = netfs_release_folio, 5422 + .direct_IO = noop_direct_IO, 5423 + .invalidate_folio = netfs_invalidate_folio, 5424 + .migrate_folio = filemap_migrate_folio, 5442 5425 /* 5443 5426 * TODO: investigate and if useful we could add an is_dirty_writeback 5444 5427 * helper if needed 5445 5428 */ 5446 - .swap_activate = cifs_swap_activate, 5429 + .swap_activate = cifs_swap_activate, 5447 5430 .swap_deactivate = cifs_swap_deactivate, 5448 5431 }; 5449 5432 ··· 5451 5438 * to leave cifs_readahead out of the address space operations. 5452 5439 */ 5453 5440 const struct address_space_operations cifs_addr_ops_smallbuf = { 5454 - .read_folio = cifs_read_folio, 5455 - .writepages = cifs_writepages, 5456 - .write_begin = cifs_write_begin, 5457 - .write_end = cifs_write_end, 5458 - .dirty_folio = netfs_dirty_folio, 5459 - .release_folio = cifs_release_folio, 5460 - .invalidate_folio = cifs_invalidate_folio, 5461 - .migrate_folio = filemap_migrate_folio, 5441 + .read_folio = netfs_read_folio, 5442 + .writepages = netfs_writepages, 5443 + .dirty_folio = netfs_dirty_folio, 5444 + .release_folio = netfs_release_folio, 5445 + .invalidate_folio = netfs_invalidate_folio, 5446 + .migrate_folio = filemap_migrate_folio, 5462 5447 };
+2
fs/smb/client/fscache.c
··· 171 171 } 172 172 } 173 173 174 + #if 0 // TODO remove 174 175 /* 175 176 * Fallback page reading interface. 176 177 */ ··· 280 279 fscache_end_operation(&cres); 281 280 return ret; 282 281 } 282 + #endif
+4
fs/smb/client/fscache.h
··· 74 74 i_size_read(inode), flags); 75 75 } 76 76 77 + #if 0 // TODO remove 77 78 extern int __cifs_fscache_query_occupancy(struct inode *inode, 78 79 pgoff_t first, unsigned int nr_pages, 79 80 pgoff_t *_data_first, ··· 109 108 if (cifs_inode_cookie(inode)) 110 109 __cifs_readahead_to_fscache(inode, pos, len); 111 110 } 111 + #endif 112 112 113 113 static inline bool cifs_fscache_enabled(struct inode *inode) 114 114 { ··· 133 131 static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {} 134 132 static inline bool cifs_fscache_enabled(struct inode *inode) { return false; } 135 133 134 + #if 0 // TODO remove 136 135 static inline int cifs_fscache_query_occupancy(struct inode *inode, 137 136 pgoff_t first, unsigned int nr_pages, 138 137 pgoff_t *_data_first, ··· 152 149 153 150 static inline 154 151 void cifs_readahead_to_fscache(struct inode *inode, loff_t pos, size_t len) {} 152 + #endif 155 153 156 154 #endif /* CONFIG_CIFS_FSCACHE */ 157 155
+18 -1
fs/smb/client/inode.c
··· 28 28 #include "cached_dir.h" 29 29 #include "reparse.h" 30 30 31 + /* 32 + * Set parameters for the netfs library 33 + */ 34 + static void cifs_set_netfs_context(struct inode *inode) 35 + { 36 + struct cifsInodeInfo *cifs_i = CIFS_I(inode); 37 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 38 + 39 + netfs_inode_init(&cifs_i->netfs, &cifs_req_ops, true); 40 + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) 41 + __set_bit(NETFS_ICTX_WRITETHROUGH, &cifs_i->netfs.flags); 42 + } 43 + 31 44 static void cifs_set_ops(struct inode *inode) 32 45 { 33 46 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 47 + struct netfs_inode *ictx = netfs_inode(inode); 34 48 35 49 switch (inode->i_mode & S_IFMT) { 36 50 case S_IFREG: 37 51 inode->i_op = &cifs_file_inode_ops; 38 52 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { 53 + set_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags); 39 54 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 40 55 inode->i_fop = &cifs_file_direct_nobrl_ops; 41 56 else ··· 236 221 237 222 if (fattr->cf_flags & CIFS_FATTR_JUNCTION) 238 223 inode->i_flags |= S_AUTOMOUNT; 239 - if (inode->i_state & I_NEW) 224 + if (inode->i_state & I_NEW) { 225 + cifs_set_netfs_context(inode); 240 226 cifs_set_ops(inode); 227 + } 241 228 return 0; 242 229 } 243 230
+70 -47
fs/smb/client/smb2pdu.c
··· 4421 4421 req->Length = cpu_to_le32(io_parms->length); 4422 4422 req->Offset = cpu_to_le64(io_parms->offset); 4423 4423 4424 - trace_smb3_read_enter(0 /* xid */, 4425 - io_parms->persistent_fid, 4426 - io_parms->tcon->tid, io_parms->tcon->ses->Suid, 4427 - io_parms->offset, io_parms->length); 4424 + trace_smb3_read_enter(rdata ? rdata->rreq->debug_id : 0, 4425 + rdata ? rdata->subreq.debug_index : 0, 4426 + rdata ? rdata->xid : 0, 4427 + io_parms->persistent_fid, 4428 + io_parms->tcon->tid, io_parms->tcon->ses->Suid, 4429 + io_parms->offset, io_parms->length); 4428 4430 #ifdef CONFIG_CIFS_SMB_DIRECT 4429 4431 /* 4430 4432 * If we want to do a RDMA write, fill in and append ··· 4488 4486 smb2_readv_callback(struct mid_q_entry *mid) 4489 4487 { 4490 4488 struct cifs_io_subrequest *rdata = mid->callback_data; 4491 - struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); 4489 + struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink); 4492 4490 struct TCP_Server_Info *server = rdata->server; 4493 4491 struct smb2_hdr *shdr = 4494 4492 (struct smb2_hdr *)rdata->iov[0].iov_base; ··· 4516 4514 if (server->sign && !mid->decrypted) { 4517 4515 int rc; 4518 4516 4519 - iov_iter_revert(&rqst.rq_iter, rdata->got_bytes); 4520 4517 iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes); 4521 4518 rc = smb2_verify_signature(&rqst, server); 4522 4519 if (rc) ··· 4556 4555 #endif 4557 4556 if (rdata->result && rdata->result != -ENODATA) { 4558 4557 cifs_stats_fail_inc(tcon, SMB2_READ_HE); 4559 - trace_smb3_read_err(0 /* xid */, 4560 - rdata->cfile->fid.persistent_fid, 4558 + trace_smb3_read_err(rdata->rreq->debug_id, 4559 + rdata->subreq.debug_index, 4560 + rdata->xid, 4561 + rdata->req->cfile->fid.persistent_fid, 4561 4562 tcon->tid, tcon->ses->Suid, rdata->subreq.start, 4562 4563 rdata->subreq.len, rdata->result); 4563 4564 } else 4564 - trace_smb3_read_done(0 /* xid */, 4565 - rdata->cfile->fid.persistent_fid, 4565 + trace_smb3_read_done(rdata->rreq->debug_id, 4566 + rdata->subreq.debug_index, 4567 + rdata->xid, 4568 + rdata->req->cfile->fid.persistent_fid, 4566 4569 tcon->tid, tcon->ses->Suid, 4567 4570 rdata->subreq.start, rdata->got_bytes); 4568 4571 4569 - queue_work(cifsiod_wq, &rdata->work); 4572 + if (rdata->result == -ENODATA) { 4573 + /* We may have got an EOF error because fallocate 4574 + * failed to enlarge the file. 4575 + */ 4576 + if (rdata->subreq.start < rdata->subreq.rreq->i_size) 4577 + rdata->result = 0; 4578 + } 4579 + if (rdata->result == 0 || rdata->result == -EAGAIN) 4580 + iov_iter_advance(&rdata->subreq.io_iter, rdata->got_bytes); 4581 + rdata->credits.value = 0; 4582 + netfs_subreq_terminated(&rdata->subreq, 4583 + (rdata->result == 0 || rdata->result == -EAGAIN) ? 4584 + rdata->got_bytes : rdata->result, true); 4570 4585 release_mid(mid); 4571 4586 add_credits(server, &credits, 0); 4572 4587 } ··· 4598 4581 struct smb_rqst rqst = { .rq_iov = rdata->iov, 4599 4582 .rq_nvec = 1 }; 4600 4583 struct TCP_Server_Info *server; 4601 - struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); 4584 + struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink); 4602 4585 unsigned int total_len; 4603 4586 int credit_request; 4604 4587 ··· 4608 4591 if (!rdata->server) 4609 4592 rdata->server = cifs_pick_channel(tcon->ses); 4610 4593 4611 - io_parms.tcon = tlink_tcon(rdata->cfile->tlink); 4594 + io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink); 4612 4595 io_parms.server = server = rdata->server; 4613 4596 io_parms.offset = rdata->subreq.start; 4614 4597 io_parms.length = rdata->subreq.len; 4615 - io_parms.persistent_fid = rdata->cfile->fid.persistent_fid; 4616 - io_parms.volatile_fid = rdata->cfile->fid.volatile_fid; 4598 + io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid; 4599 + io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid; 4617 4600 io_parms.pid = rdata->pid; 4618 4601 4619 4602 rc = smb2_new_read_req( ··· 4647 4630 flags |= CIFS_HAS_CREDITS; 4648 4631 } 4649 4632 4650 - cifs_get_readdata(rdata); 4651 4633 rc = cifs_call_async(server, &rqst, 4652 4634 cifs_readv_receive, smb2_readv_callback, 4653 4635 smb3_handle_read_data, rdata, flags, 4654 4636 &rdata->credits); 4655 4637 if (rc) { 4656 - cifs_put_readdata(rdata); 4657 4638 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); 4658 - trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid, 4639 + trace_smb3_read_err(rdata->rreq->debug_id, 4640 + rdata->subreq.debug_index, 4641 + rdata->xid, io_parms.persistent_fid, 4659 4642 io_parms.tcon->tid, 4660 4643 io_parms.tcon->ses->Suid, 4661 4644 io_parms.offset, io_parms.length, rc); ··· 4706 4689 if (rc != -ENODATA) { 4707 4690 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); 4708 4691 cifs_dbg(VFS, "Send error in read = %d\n", rc); 4709 - trace_smb3_read_err(xid, 4692 + trace_smb3_read_err(0, 0, xid, 4710 4693 req->PersistentFileId, 4711 4694 io_parms->tcon->tid, ses->Suid, 4712 4695 io_parms->offset, io_parms->length, 4713 4696 rc); 4714 4697 } else 4715 - trace_smb3_read_done(xid, req->PersistentFileId, io_parms->tcon->tid, 4698 + trace_smb3_read_done(0, 0, xid, 4699 + req->PersistentFileId, io_parms->tcon->tid, 4716 4700 ses->Suid, io_parms->offset, 0); 4717 4701 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4718 4702 cifs_small_buf_release(req); 4719 4703 return rc == -ENODATA ? 0 : rc; 4720 4704 } else 4721 - trace_smb3_read_done(xid, 4722 - req->PersistentFileId, 4723 - io_parms->tcon->tid, ses->Suid, 4724 - io_parms->offset, io_parms->length); 4705 + trace_smb3_read_done(0, 0, xid, 4706 + req->PersistentFileId, 4707 + io_parms->tcon->tid, ses->Suid, 4708 + io_parms->offset, io_parms->length); 4725 4709 4726 4710 cifs_small_buf_release(req); 4727 4711 ··· 4756 4738 smb2_writev_callback(struct mid_q_entry *mid) 4757 4739 { 4758 4740 struct cifs_io_subrequest *wdata = mid->callback_data; 4759 - struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 4741 + struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink); 4760 4742 struct TCP_Server_Info *server = wdata->server; 4761 - unsigned int written; 4762 4743 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; 4763 4744 struct cifs_credits credits = { .value = 0, .instance = 0 }; 4745 + ssize_t result = 0; 4746 + size_t written; 4764 4747 4765 4748 WARN_ONCE(wdata->server != mid->server, 4766 4749 "wdata server %p != mid server %p", ··· 4771 4752 case MID_RESPONSE_RECEIVED: 4772 4753 credits.value = le16_to_cpu(rsp->hdr.CreditRequest); 4773 4754 credits.instance = server->reconnect_instance; 4774 - wdata->result = smb2_check_receive(mid, server, 0); 4775 - if (wdata->result != 0) 4755 + result = smb2_check_receive(mid, server, 0); 4756 + if (result != 0) 4776 4757 break; 4777 4758 4778 4759 written = le32_to_cpu(rsp->DataLength); ··· 4789 4770 wdata->result = -ENOSPC; 4790 4771 else 4791 4772 wdata->subreq.len = written; 4773 + iov_iter_advance(&wdata->subreq.io_iter, written); 4792 4774 break; 4793 4775 case MID_REQUEST_SUBMITTED: 4794 4776 case MID_RETRY_NEEDED: 4795 - wdata->result = -EAGAIN; 4777 + result = -EAGAIN; 4796 4778 break; 4797 4779 case MID_RESPONSE_MALFORMED: 4798 4780 credits.value = le16_to_cpu(rsp->hdr.CreditRequest); 4799 4781 credits.instance = server->reconnect_instance; 4800 4782 fallthrough; 4801 4783 default: 4802 - wdata->result = -EIO; 4784 + result = -EIO; 4803 4785 break; 4804 4786 } 4805 4787 #ifdef CONFIG_CIFS_SMB_DIRECT ··· 4816 4796 wdata->mr = NULL; 4817 4797 } 4818 4798 #endif 4819 - if (wdata->result) { 4799 + if (result) { 4820 4800 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 4821 - trace_smb3_write_err(0 /* no xid */, 4822 - wdata->cfile->fid.persistent_fid, 4801 + trace_smb3_write_err(wdata->xid, 4802 + wdata->req->cfile->fid.persistent_fid, 4823 4803 tcon->tid, tcon->ses->Suid, wdata->subreq.start, 4824 4804 wdata->subreq.len, wdata->result); 4825 4805 if (wdata->result == -ENOSPC) ··· 4827 4807 tcon->tree_name); 4828 4808 } else 4829 4809 trace_smb3_write_done(0 /* no xid */, 4830 - wdata->cfile->fid.persistent_fid, 4810 + wdata->req->cfile->fid.persistent_fid, 4831 4811 tcon->tid, tcon->ses->Suid, 4832 4812 wdata->subreq.start, wdata->subreq.len); 4833 4813 4834 - queue_work(cifsiod_wq, &wdata->work); 4814 + wdata->credits.value = 0; 4815 + cifs_write_subrequest_terminated(wdata, result ?: written, true); 4835 4816 release_mid(mid); 4836 4817 add_credits(server, &credits, 0); 4837 4818 } 4838 4819 4839 4820 /* smb2_async_writev - send an async write, and set up mid to handle result */ 4840 - int 4821 + void 4841 4822 smb2_async_writev(struct cifs_io_subrequest *wdata) 4842 4823 { 4843 4824 int rc = -EACCES, flags = 0; 4844 4825 struct smb2_write_req *req = NULL; 4845 4826 struct smb2_hdr *shdr; 4846 - struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 4827 + struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink); 4847 4828 struct TCP_Server_Info *server = wdata->server; 4848 4829 struct kvec iov[1]; 4849 4830 struct smb_rqst rqst = { }; 4850 - unsigned int total_len; 4831 + unsigned int total_len, xid = wdata->xid; 4851 4832 struct cifs_io_parms _io_parms; 4852 4833 struct cifs_io_parms *io_parms = NULL; 4853 4834 int credit_request; ··· 4865 4844 .server = server, 4866 4845 .offset = wdata->subreq.start, 4867 4846 .length = wdata->subreq.len, 4868 - .persistent_fid = wdata->cfile->fid.persistent_fid, 4869 - .volatile_fid = wdata->cfile->fid.volatile_fid, 4847 + .persistent_fid = wdata->req->cfile->fid.persistent_fid, 4848 + .volatile_fid = wdata->req->cfile->fid.volatile_fid, 4870 4849 .pid = wdata->pid, 4871 4850 }; 4872 4851 io_parms = &_io_parms; ··· 4874 4853 rc = smb2_plain_req_init(SMB2_WRITE, tcon, server, 4875 4854 (void **) &req, &total_len); 4876 4855 if (rc) 4877 - return rc; 4856 + goto out; 4878 4857 4879 4858 if (smb3_encryption_required(tcon)) 4880 4859 flags |= CIFS_TRANSFORM_REQ; ··· 4892 4871 offsetof(struct smb2_write_req, Buffer)); 4893 4872 req->RemainingBytes = 0; 4894 4873 4895 - trace_smb3_write_enter(0 /* xid */, 4874 + trace_smb3_write_enter(wdata->xid, 4896 4875 io_parms->persistent_fid, 4897 4876 io_parms->tcon->tid, 4898 4877 io_parms->tcon->ses->Suid, ··· 4973 4952 flags |= CIFS_HAS_CREDITS; 4974 4953 } 4975 4954 4976 - cifs_get_writedata(wdata); 4977 4955 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL, 4978 4956 wdata, flags, &wdata->credits); 4979 - 4957 + /* Can't touch wdata if rc == 0 */ 4980 4958 if (rc) { 4981 - trace_smb3_write_err(0 /* no xid */, 4959 + trace_smb3_write_err(xid, 4982 4960 io_parms->persistent_fid, 4983 4961 io_parms->tcon->tid, 4984 4962 io_parms->tcon->ses->Suid, 4985 4963 io_parms->offset, 4986 4964 io_parms->length, 4987 4965 rc); 4988 - cifs_put_writedata(wdata); 4989 4966 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 4990 4967 } 4991 4968 4992 4969 async_writev_out: 4993 4970 cifs_small_buf_release(req); 4994 - return rc; 4971 + out: 4972 + if (rc) { 4973 + add_credits_and_wake_if(wdata->server, &wdata->credits, 0); 4974 + cifs_write_subrequest_terminated(wdata, rc, true); 4975 + } 4995 4976 } 4996 4977 4997 4978 /*
+1 -1
fs/smb/client/smb2proto.h
··· 213 213 extern int smb2_async_readv(struct cifs_io_subrequest *rdata); 214 214 extern int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, 215 215 unsigned int *nbytes, char **buf, int *buf_type); 216 - extern int smb2_async_writev(struct cifs_io_subrequest *wdata); 216 + extern void smb2_async_writev(struct cifs_io_subrequest *wdata); 217 217 extern int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, 218 218 unsigned int *nbytes, struct kvec *iov, int n_vec); 219 219 extern int SMB2_echo(struct TCP_Server_Info *server);
+125 -19
fs/smb/client/trace.h
··· 85 85 86 86 /* For logging errors in read or write */ 87 87 DECLARE_EVENT_CLASS(smb3_rw_err_class, 88 + TP_PROTO(unsigned int rreq_debug_id, 89 + unsigned int rreq_debug_index, 90 + unsigned int xid, 91 + __u64 fid, 92 + __u32 tid, 93 + __u64 sesid, 94 + __u64 offset, 95 + __u32 len, 96 + int rc), 97 + TP_ARGS(rreq_debug_id, rreq_debug_index, 98 + xid, fid, tid, sesid, offset, len, rc), 99 + TP_STRUCT__entry( 100 + __field(unsigned int, rreq_debug_id) 101 + __field(unsigned int, rreq_debug_index) 102 + __field(unsigned int, xid) 103 + __field(__u64, fid) 104 + __field(__u32, tid) 105 + __field(__u64, sesid) 106 + __field(__u64, offset) 107 + __field(__u32, len) 108 + __field(int, rc) 109 + ), 110 + TP_fast_assign( 111 + __entry->rreq_debug_id = rreq_debug_id; 112 + __entry->rreq_debug_index = rreq_debug_index; 113 + __entry->xid = xid; 114 + __entry->fid = fid; 115 + __entry->tid = tid; 116 + __entry->sesid = sesid; 117 + __entry->offset = offset; 118 + __entry->len = len; 119 + __entry->rc = rc; 120 + ), 121 + TP_printk("\tR=%08x[%x] xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d", 122 + __entry->rreq_debug_id, __entry->rreq_debug_index, 123 + __entry->xid, __entry->sesid, __entry->tid, __entry->fid, 124 + __entry->offset, __entry->len, __entry->rc) 125 + ) 126 + 127 + #define DEFINE_SMB3_RW_ERR_EVENT(name) \ 128 + DEFINE_EVENT(smb3_rw_err_class, smb3_##name, \ 129 + TP_PROTO(unsigned int rreq_debug_id, \ 130 + unsigned int rreq_debug_index, \ 131 + unsigned int xid, \ 132 + __u64 fid, \ 133 + __u32 tid, \ 134 + __u64 sesid, \ 135 + __u64 offset, \ 136 + __u32 len, \ 137 + int rc), \ 138 + TP_ARGS(rreq_debug_id, rreq_debug_index, xid, fid, tid, sesid, offset, len, rc)) 139 + 140 + DEFINE_SMB3_RW_ERR_EVENT(read_err); 141 + 142 + /* For logging errors in other file I/O ops */ 143 + DECLARE_EVENT_CLASS(smb3_other_err_class, 88 144 TP_PROTO(unsigned int xid, 89 145 __u64 fid, 90 146 __u32 tid, ··· 172 116 __entry->offset, __entry->len, __entry->rc) 173 117 ) 174 118 175 - #define DEFINE_SMB3_RW_ERR_EVENT(name) \ 176 - DEFINE_EVENT(smb3_rw_err_class, smb3_##name, \ 119 + #define DEFINE_SMB3_OTHER_ERR_EVENT(name) \ 120 + DEFINE_EVENT(smb3_other_err_class, smb3_##name, \ 177 121 TP_PROTO(unsigned int xid, \ 178 122 __u64 fid, \ 179 123 __u32 tid, \ ··· 183 127 int rc), \ 184 128 TP_ARGS(xid, fid, tid, sesid, offset, len, rc)) 185 129 186 - DEFINE_SMB3_RW_ERR_EVENT(write_err); 187 - DEFINE_SMB3_RW_ERR_EVENT(read_err); 188 - DEFINE_SMB3_RW_ERR_EVENT(query_dir_err); 189 - DEFINE_SMB3_RW_ERR_EVENT(zero_err); 190 - DEFINE_SMB3_RW_ERR_EVENT(falloc_err); 130 + DEFINE_SMB3_OTHER_ERR_EVENT(write_err); 131 + DEFINE_SMB3_OTHER_ERR_EVENT(query_dir_err); 132 + DEFINE_SMB3_OTHER_ERR_EVENT(zero_err); 133 + DEFINE_SMB3_OTHER_ERR_EVENT(falloc_err); 191 134 192 135 193 136 /* For logging successful read or write */ 194 137 DECLARE_EVENT_CLASS(smb3_rw_done_class, 138 + TP_PROTO(unsigned int rreq_debug_id, 139 + unsigned int rreq_debug_index, 140 + unsigned int xid, 141 + __u64 fid, 142 + __u32 tid, 143 + __u64 sesid, 144 + __u64 offset, 145 + __u32 len), 146 + TP_ARGS(rreq_debug_id, rreq_debug_index, 147 + xid, fid, tid, sesid, offset, len), 148 + TP_STRUCT__entry( 149 + __field(unsigned int, rreq_debug_id) 150 + __field(unsigned int, rreq_debug_index) 151 + __field(unsigned int, xid) 152 + __field(__u64, fid) 153 + __field(__u32, tid) 154 + __field(__u64, sesid) 155 + __field(__u64, offset) 156 + __field(__u32, len) 157 + ), 158 + TP_fast_assign( 159 + __entry->rreq_debug_id = rreq_debug_id; 160 + __entry->rreq_debug_index = rreq_debug_index; 161 + __entry->xid = xid; 162 + __entry->fid = fid; 163 + __entry->tid = tid; 164 + __entry->sesid = sesid; 165 + __entry->offset = offset; 166 + __entry->len = len; 167 + ), 168 + TP_printk("R=%08x[%x] xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x", 169 + __entry->rreq_debug_id, __entry->rreq_debug_index, 170 + __entry->xid, __entry->sesid, __entry->tid, __entry->fid, 171 + __entry->offset, __entry->len) 172 + ) 173 + 174 + #define DEFINE_SMB3_RW_DONE_EVENT(name) \ 175 + DEFINE_EVENT(smb3_rw_done_class, smb3_##name, \ 176 + TP_PROTO(unsigned int rreq_debug_id, \ 177 + unsigned int rreq_debug_index, \ 178 + unsigned int xid, \ 179 + __u64 fid, \ 180 + __u32 tid, \ 181 + __u64 sesid, \ 182 + __u64 offset, \ 183 + __u32 len), \ 184 + TP_ARGS(rreq_debug_id, rreq_debug_index, xid, fid, tid, sesid, offset, len)) 185 + 186 + DEFINE_SMB3_RW_DONE_EVENT(read_enter); 187 + DEFINE_SMB3_RW_DONE_EVENT(read_done); 188 + 189 + /* For logging successful other op */ 190 + DECLARE_EVENT_CLASS(smb3_other_done_class, 195 191 TP_PROTO(unsigned int xid, 196 192 __u64 fid, 197 193 __u32 tid, ··· 272 164 __entry->offset, __entry->len) 273 165 ) 274 166 275 - #define DEFINE_SMB3_RW_DONE_EVENT(name) \ 276 - DEFINE_EVENT(smb3_rw_done_class, smb3_##name, \ 167 + #define DEFINE_SMB3_OTHER_DONE_EVENT(name) \ 168 + DEFINE_EVENT(smb3_other_done_class, smb3_##name, \ 277 169 TP_PROTO(unsigned int xid, \ 278 170 __u64 fid, \ 279 171 __u32 tid, \ ··· 282 174 __u32 len), \ 283 175 TP_ARGS(xid, fid, tid, sesid, offset, len)) 284 176 285 - DEFINE_SMB3_RW_DONE_EVENT(write_enter); 286 - DEFINE_SMB3_RW_DONE_EVENT(read_enter); 287 - DEFINE_SMB3_RW_DONE_EVENT(query_dir_enter); 288 - DEFINE_SMB3_RW_DONE_EVENT(zero_enter); 289 - DEFINE_SMB3_RW_DONE_EVENT(falloc_enter); 290 - DEFINE_SMB3_RW_DONE_EVENT(write_done); 291 - DEFINE_SMB3_RW_DONE_EVENT(read_done); 292 - DEFINE_SMB3_RW_DONE_EVENT(query_dir_done); 293 - DEFINE_SMB3_RW_DONE_EVENT(zero_done); 294 - DEFINE_SMB3_RW_DONE_EVENT(falloc_done); 177 + DEFINE_SMB3_OTHER_DONE_EVENT(write_enter); 178 + DEFINE_SMB3_OTHER_DONE_EVENT(query_dir_enter); 179 + DEFINE_SMB3_OTHER_DONE_EVENT(zero_enter); 180 + DEFINE_SMB3_OTHER_DONE_EVENT(falloc_enter); 181 + DEFINE_SMB3_OTHER_DONE_EVENT(write_done); 182 + DEFINE_SMB3_OTHER_DONE_EVENT(query_dir_done); 183 + DEFINE_SMB3_OTHER_DONE_EVENT(zero_done); 184 + DEFINE_SMB3_OTHER_DONE_EVENT(falloc_done); 295 185 296 186 /* For logging successful set EOF (truncate) */ 297 187 DECLARE_EVENT_CLASS(smb3_eof_class,
+3
fs/smb/client/transport.c
··· 1813 1813 length = data_len; /* An RDMA read is already done. */ 1814 1814 else 1815 1815 #endif 1816 + { 1816 1817 length = cifs_read_iter_from_socket(server, &rdata->subreq.io_iter, 1817 1818 data_len); 1819 + iov_iter_revert(&rdata->subreq.io_iter, data_len); 1820 + } 1818 1821 if (length > 0) 1819 1822 rdata->got_bytes += length; 1820 1823 server->total_read += length;