Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cifs: Implement netfslib hooks

Provide implementation of the netfslib hooks that will be used by netfslib
to ask cifs to set up and perform operations. Of particular note are

(*) cifs_clamp_length() - This is used to negotiate the size of the next
subrequest in a read request, taking into account the credit available
and the rsize. The credits are attached to the subrequest.

(*) cifs_req_issue_read() - This is used to issue a subrequest that has
been set up and clamped.

(*) cifs_prepare_write() - This prepares to fill a subrequest by picking a
channel, reopening the file and requesting credits so that we can set
the maximum size of the subrequest and also sets the maximum number of
segments if we're doing RDMA.

(*) cifs_issue_write() - This releases any unneeded credits and issues an
asynchronous data write for the contiguous slice of file covered by
the subrequest. This should possibly be folded in to all
->async_writev() ops and that called directly.

(*) cifs_begin_writeback() - This gets the cached writable handle through
which we do writeback (this does not affect writethrough, unbuffered
or direct writes).

At this point, cifs is not wired up to actually *use* netfslib; that will
be done in a subsequent patch.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Steve French <sfrench@samba.org>
cc: Shyam Prasad N <nspmangalore@gmail.com>
cc: Rohith Surabattula <rohiths.msft@gmail.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: linux-cifs@vger.kernel.org
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org

+345 -10
+6
fs/netfs/buffered_write.c
··· 405 405 } while (iov_iter_count(iter)); 406 406 407 407 out: 408 + if (likely(written) && ctx->ops->post_modify) 409 + ctx->ops->post_modify(inode); 410 + 408 411 if (unlikely(wreq)) { 409 412 ret2 = netfs_end_writethrough(wreq, &wbc, writethrough); 410 413 wbc_detach_inode(&wbc); ··· 524 521 struct folio *folio = page_folio(vmf->page); 525 522 struct file *file = vmf->vma->vm_file; 526 523 struct inode *inode = file_inode(file); 524 + struct netfs_inode *ictx = netfs_inode(inode); 527 525 vm_fault_t ret = VM_FAULT_RETRY; 528 526 int err; 529 527 ··· 571 567 trace_netfs_folio(folio, netfs_folio_trace_mkwrite); 572 568 netfs_set_group(folio, netfs_group); 573 569 file_update_time(file); 570 + if (ictx->ops->post_modify) 571 + ictx->ops->post_modify(inode); 574 572 ret = VM_FAULT_LOCKED; 575 573 out: 576 574 sb_end_pagefault(inode->i_sb);
+1
fs/smb/client/Kconfig
··· 2 2 config CIFS 3 3 tristate "SMB3 and CIFS support (advanced network filesystem)" 4 4 depends on INET 5 + select NETFS_SUPPORT 5 6 select NLS 6 7 select NLS_UCS2_UTILS 7 8 select CRYPTO
+1 -1
fs/smb/client/cifsfs.c
··· 1758 1758 { 1759 1759 cifs_io_request_cachep = 1760 1760 kmem_cache_create("cifs_io_request", 1761 - sizeof(struct netfs_io_request), 0, 1761 + sizeof(struct cifs_io_request), 0, 1762 1762 SLAB_HWCACHE_ALIGN, NULL); 1763 1763 if (!cifs_io_request_cachep) 1764 1764 goto nomem_req;
+1
fs/smb/client/cifsfs.h
··· 84 84 85 85 86 86 /* Functions related to files and directories */ 87 + extern const struct netfs_request_ops cifs_req_ops; 87 88 extern const struct file_operations cifs_file_ops; 88 89 extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */ 89 90 extern const struct file_operations cifs_file_strict_ops; /* if strictio mnt */
+19 -9
fs/smb/client/cifsglob.h
··· 1491 1491 bool direct_io; 1492 1492 }; 1493 1493 1494 + struct cifs_io_request { 1495 + struct netfs_io_request rreq; 1496 + struct cifsFileInfo *cfile; 1497 + }; 1498 + 1494 1499 /* asynchronous read support */ 1495 1500 struct cifs_io_subrequest { 1496 - struct netfs_io_subrequest subreq; 1497 - struct cifsFileInfo *cfile; 1498 - struct address_space *mapping; 1499 - struct cifs_aio_ctx *ctx; 1501 + union { 1502 + struct netfs_io_subrequest subreq; 1503 + struct netfs_io_request *rreq; 1504 + struct cifs_io_request *req; 1505 + }; 1500 1506 ssize_t got_bytes; 1501 1507 pid_t pid; 1508 + unsigned int xid; 1502 1509 int result; 1510 + bool have_xid; 1511 + bool replay; 1503 1512 struct kvec iov[2]; 1504 1513 struct TCP_Server_Info *server; 1505 1514 #ifdef CONFIG_CIFS_SMB_DIRECT ··· 1516 1507 #endif 1517 1508 struct cifs_credits credits; 1518 1509 1519 - enum writeback_sync_modes sync_mode; 1520 - bool uncached; 1521 - bool replay; 1522 - struct bio_vec *bv; 1523 - 1524 1510 // TODO: Remove following elements 1525 1511 struct list_head list; 1526 1512 struct completion done; 1527 1513 struct work_struct work; 1514 + struct cifsFileInfo *cfile; 1515 + struct address_space *mapping; 1516 + struct cifs_aio_ctx *ctx; 1517 + enum writeback_sync_modes sync_mode; 1518 + bool uncached; 1519 + struct bio_vec *bv; 1528 1520 }; 1529 1521 1530 1522 /*
+315
fs/smb/client/file.c
··· 36 36 #include "fs_context.h" 37 37 #include "cifs_ioctl.h" 38 38 #include "cached_dir.h" 39 + #include <trace/events/netfs.h> 40 + 41 + static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush); 42 + 43 + /* 44 + * Prepare a subrequest to upload to the server. We need to allocate credits 45 + * so that we know the maximum amount of data that we can include in it. 46 + */ 47 + static void cifs_prepare_write(struct netfs_io_subrequest *subreq) 48 + { 49 + struct cifs_io_subrequest *wdata = 50 + container_of(subreq, struct cifs_io_subrequest, subreq); 51 + struct cifs_io_request *req = wdata->req; 52 + struct TCP_Server_Info *server; 53 + struct cifsFileInfo *open_file = req->cfile; 54 + size_t wsize = req->rreq.wsize; 55 + int rc; 56 + 57 + if (!wdata->have_xid) { 58 + wdata->xid = get_xid(); 59 + wdata->have_xid = true; 60 + } 61 + 62 + server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); 63 + wdata->server = server; 64 + 65 + retry: 66 + if (open_file->invalidHandle) { 67 + rc = cifs_reopen_file(open_file, false); 68 + if (rc < 0) { 69 + if (rc == -EAGAIN) 70 + goto retry; 71 + subreq->error = rc; 72 + return netfs_prepare_write_failed(subreq); 73 + } 74 + } 75 + 76 + rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len, 77 + &wdata->credits); 78 + if (rc < 0) { 79 + subreq->error = rc; 80 + return netfs_prepare_write_failed(subreq); 81 + } 82 + 83 + #ifdef CONFIG_CIFS_SMB_DIRECT 84 + if (server->smbd_conn) 85 + subreq->max_nr_segs = server->smbd_conn->max_frmr_depth; 86 + #endif 87 + } 88 + 89 + /* 90 + * Issue a subrequest to upload to the server. 91 + */ 92 + static void cifs_issue_write(struct netfs_io_subrequest *subreq) 93 + { 94 + struct cifs_io_subrequest *wdata = 95 + container_of(subreq, struct cifs_io_subrequest, subreq); 96 + struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb); 97 + int rc; 98 + 99 + if (cifs_forced_shutdown(sbi)) { 100 + rc = -EIO; 101 + goto fail; 102 + } 103 + 104 + rc = adjust_credits(wdata->server, &wdata->credits, wdata->subreq.len); 105 + if (rc) 106 + goto fail; 107 + 108 + rc = -EAGAIN; 109 + if (wdata->req->cfile->invalidHandle) 110 + goto fail; 111 + 112 + wdata->server->ops->async_writev(wdata); 113 + out: 114 + return; 115 + 116 + fail: 117 + if (rc == -EAGAIN) 118 + trace_netfs_sreq(subreq, netfs_sreq_trace_retry); 119 + else 120 + trace_netfs_sreq(subreq, netfs_sreq_trace_fail); 121 + add_credits_and_wake_if(wdata->server, &wdata->credits, 0); 122 + netfs_write_subrequest_terminated(wdata, rc, false); 123 + goto out; 124 + } 125 + 126 + /* 127 + * Split the read up according to how many credits we can get for each piece. 128 + * It's okay to sleep here if we need to wait for more credit to become 129 + * available. 130 + * 131 + * We also choose the server and allocate an operation ID to be cleaned up 132 + * later. 133 + */ 134 + static bool cifs_clamp_length(struct netfs_io_subrequest *subreq) 135 + { 136 + struct netfs_io_request *rreq = subreq->rreq; 137 + struct TCP_Server_Info *server; 138 + struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq); 139 + struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); 140 + struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); 141 + size_t rsize = 0; 142 + int rc; 143 + 144 + rdata->xid = get_xid(); 145 + rdata->have_xid = true; 146 + 147 + server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses); 148 + rdata->server = server; 149 + 150 + if (cifs_sb->ctx->rsize == 0) 151 + cifs_sb->ctx->rsize = 152 + server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink), 153 + cifs_sb->ctx); 154 + 155 + 156 + rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize, 157 + &rdata->credits); 158 + if (rc) { 159 + subreq->error = rc; 160 + return false; 161 + } 162 + 163 + subreq->len = min_t(size_t, subreq->len, rsize); 164 + #ifdef CONFIG_CIFS_SMB_DIRECT 165 + if (server->smbd_conn) 166 + subreq->max_nr_segs = server->smbd_conn->max_frmr_depth; 167 + #endif 168 + return true; 169 + } 170 + 171 + /* 172 + * Issue a read operation on behalf of the netfs helper functions. We're asked 173 + * to make a read of a certain size at a point in the file. We are permitted 174 + * to only read a portion of that, but as long as we read something, the netfs 175 + * helper will call us again so that we can issue another read. 176 + */ 177 + static void cifs_req_issue_read(struct netfs_io_subrequest *subreq) 178 + { 179 + struct netfs_io_request *rreq = subreq->rreq; 180 + struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq); 181 + struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); 182 + struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); 183 + pid_t pid; 184 + int rc = 0; 185 + 186 + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 187 + pid = req->cfile->pid; 188 + else 189 + pid = current->tgid; // Ummm... This may be a workqueue 190 + 191 + cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n", 192 + __func__, rreq->debug_id, subreq->debug_index, rreq->mapping, 193 + subreq->transferred, subreq->len); 194 + 195 + if (req->cfile->invalidHandle) { 196 + do { 197 + rc = cifs_reopen_file(req->cfile, true); 198 + } while (rc == -EAGAIN); 199 + if (rc) 200 + goto out; 201 + } 202 + 203 + __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 204 + rdata->pid = pid; 205 + 206 + rc = adjust_credits(rdata->server, &rdata->credits, rdata->subreq.len); 207 + if (!rc) { 208 + if (rdata->req->cfile->invalidHandle) 209 + rc = -EAGAIN; 210 + else 211 + rc = rdata->server->ops->async_readv(rdata); 212 + } 213 + 214 + out: 215 + if (rc) 216 + netfs_subreq_terminated(subreq, rc, false); 217 + } 218 + 219 + /* 220 + * Writeback calls this when it finds a folio that needs uploading. This isn't 221 + * called if writeback only has copy-to-cache to deal with. 222 + */ 223 + static void cifs_begin_writeback(struct netfs_io_request *wreq) 224 + { 225 + struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq); 226 + int ret; 227 + 228 + ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile); 229 + if (ret) { 230 + cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret); 231 + return; 232 + } 233 + 234 + wreq->io_streams[0].avail = true; 235 + } 236 + 237 + /* 238 + * Initialise a request. 239 + */ 240 + static int cifs_init_request(struct netfs_io_request *rreq, struct file *file) 241 + { 242 + struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq); 243 + struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); 244 + struct cifsFileInfo *open_file = NULL; 245 + 246 + rreq->rsize = cifs_sb->ctx->rsize; 247 + rreq->wsize = cifs_sb->ctx->wsize; 248 + 249 + if (file) { 250 + open_file = file->private_data; 251 + rreq->netfs_priv = file->private_data; 252 + req->cfile = cifsFileInfo_get(open_file); 253 + } else if (rreq->origin != NETFS_WRITEBACK) { 254 + WARN_ON_ONCE(1); 255 + return -EIO; 256 + } 257 + 258 + return 0; 259 + } 260 + 261 + /* 262 + * Expand the size of a readahead to the size of the rsize, if at least as 263 + * large as a page, allowing for the possibility that rsize is not pow-2 264 + * aligned. 265 + */ 266 + static void cifs_expand_readahead(struct netfs_io_request *rreq) 267 + { 268 + unsigned int rsize = rreq->rsize; 269 + loff_t misalignment, i_size = i_size_read(rreq->inode); 270 + 271 + if (rsize < PAGE_SIZE) 272 + return; 273 + 274 + if (rsize < INT_MAX) 275 + rsize = roundup_pow_of_two(rsize); 276 + else 277 + rsize = ((unsigned int)INT_MAX + 1) / 2; 278 + 279 + misalignment = rreq->start & (rsize - 1); 280 + if (misalignment) { 281 + rreq->start -= misalignment; 282 + rreq->len += misalignment; 283 + } 284 + 285 + rreq->len = round_up(rreq->len, rsize); 286 + if (rreq->start < i_size && rreq->len > i_size - rreq->start) 287 + rreq->len = i_size - rreq->start; 288 + } 289 + 290 + /* 291 + * Completion of a request operation. 292 + */ 293 + static void cifs_rreq_done(struct netfs_io_request *rreq) 294 + { 295 + struct timespec64 atime, mtime; 296 + struct inode *inode = rreq->inode; 297 + 298 + /* we do not want atime to be less than mtime, it broke some apps */ 299 + atime = inode_set_atime_to_ts(inode, current_time(inode)); 300 + mtime = inode_get_mtime(inode); 301 + if (timespec64_compare(&atime, &mtime)) 302 + inode_set_atime_to_ts(inode, inode_get_mtime(inode)); 303 + } 304 + 305 + static void cifs_post_modify(struct inode *inode) 306 + { 307 + /* Indication to update ctime and mtime as close is deferred */ 308 + set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); 309 + } 310 + 311 + static void cifs_free_request(struct netfs_io_request *rreq) 312 + { 313 + struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq); 314 + 315 + if (req->cfile) 316 + cifsFileInfo_put(req->cfile); 317 + } 318 + 319 + static void cifs_free_subrequest(struct netfs_io_subrequest *subreq) 320 + { 321 + struct cifs_io_subrequest *rdata = 322 + container_of(subreq, struct cifs_io_subrequest, subreq); 323 + int rc = subreq->error; 324 + 325 + if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) { 326 + #ifdef CONFIG_CIFS_SMB_DIRECT 327 + if (rdata->mr) { 328 + smbd_deregister_mr(rdata->mr); 329 + rdata->mr = NULL; 330 + } 331 + #endif 332 + } 333 + 334 + add_credits_and_wake_if(rdata->server, &rdata->credits, 0); 335 + if (rdata->have_xid) 336 + free_xid(rdata->xid); 337 + } 338 + 339 + const struct netfs_request_ops cifs_req_ops = { 340 + .request_pool = &cifs_io_request_pool, 341 + .subrequest_pool = &cifs_io_subrequest_pool, 342 + .init_request = cifs_init_request, 343 + .free_request = cifs_free_request, 344 + .free_subrequest = cifs_free_subrequest, 345 + .expand_readahead = cifs_expand_readahead, 346 + .clamp_length = cifs_clamp_length, 347 + .issue_read = cifs_req_issue_read, 348 + .done = cifs_rreq_done, 349 + .post_modify = cifs_post_modify, 350 + .begin_writeback = cifs_begin_writeback, 351 + .prepare_write = cifs_prepare_write, 352 + .issue_write = cifs_issue_write, 353 + }; 39 354 40 355 /* 41 356 * Remove the dirty flags from a span of pages.
+1
include/linux/netfs.h
··· 302 302 303 303 /* Modification handling */ 304 304 void (*update_i_size)(struct inode *inode, loff_t i_size); 305 + void (*post_modify)(struct inode *inode); 305 306 306 307 /* Write request handling */ 307 308 void (*begin_writeback)(struct netfs_io_request *wreq);
+1
include/trace/events/netfs.h
··· 112 112 #define netfs_sreq_ref_traces \ 113 113 EM(netfs_sreq_trace_get_copy_to_cache, "GET COPY2C ") \ 114 114 EM(netfs_sreq_trace_get_resubmit, "GET RESUBMIT") \ 115 + EM(netfs_sreq_trace_get_submit, "GET SUBMIT") \ 115 116 EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \ 116 117 EM(netfs_sreq_trace_new, "NEW ") \ 117 118 EM(netfs_sreq_trace_put_cancel, "PUT CANCEL ") \