Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cifs: Remove some code that's no longer used, part 1

Remove some code that was #if'd out with the netfslib conversion. This is
split into parts for file.c as the diff generator otherwise produces a hard
to read diff for part of it where a big chunk is cut out.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Steve French <sfrench@samba.org>
cc: Shyam Prasad N <nspmangalore@gmail.com>
cc: Rohith Surabattula <rohiths.msft@gmail.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: linux-cifs@vger.kernel.org
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org

-825
-12
fs/smb/client/cifsglob.h
··· 1515 1515 struct smbd_mr *mr; 1516 1516 #endif 1517 1517 struct cifs_credits credits; 1518 - 1519 - #if 0 // TODO: Remove following elements 1520 - struct list_head list; 1521 - struct completion done; 1522 - struct work_struct work; 1523 - struct cifsFileInfo *cfile; 1524 - struct address_space *mapping; 1525 - struct cifs_aio_ctx *ctx; 1526 - enum writeback_sync_modes sync_mode; 1527 - bool uncached; 1528 - struct bio_vec *bv; 1529 - #endif 1530 1518 }; 1531 1519 1532 1520 /*
-25
fs/smb/client/cifsproto.h
··· 601 601 extern struct cifs_ses * 602 602 cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx); 603 603 604 - #if 0 // TODO Remove 605 - void cifs_readdata_release(struct cifs_io_subrequest *rdata); 606 - static inline void cifs_get_readdata(struct cifs_io_subrequest *rdata) 607 - { 608 - refcount_inc(&rdata->subreq.ref); 609 - } 610 - static inline void cifs_put_readdata(struct cifs_io_subrequest *rdata) 611 - { 612 - if (refcount_dec_and_test(&rdata->subreq.ref)) 613 - cifs_readdata_release(rdata); 614 - } 615 - #endif 616 604 int cifs_async_readv(struct cifs_io_subrequest *rdata); 617 605 int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid); 618 606 619 607 void cifs_async_writev(struct cifs_io_subrequest *wdata); 620 608 void cifs_writev_complete(struct work_struct *work); 621 - #if 0 // TODO Remove 622 - struct cifs_io_subrequest *cifs_writedata_alloc(work_func_t complete); 623 - void cifs_writedata_release(struct cifs_io_subrequest *rdata); 624 - static inline void cifs_get_writedata(struct cifs_io_subrequest *wdata) 625 - { 626 - refcount_inc(&wdata->subreq.ref); 627 - } 628 - static inline void cifs_put_writedata(struct cifs_io_subrequest *wdata) 629 - { 630 - if (refcount_dec_and_test(&wdata->subreq.ref)) 631 - cifs_writedata_release(wdata); 632 - } 633 - #endif 634 609 int cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, 635 610 struct cifs_sb_info *cifs_sb, 636 611 const unsigned char *path, char *pbuf,
-619
fs/smb/client/file.c
··· 352 352 .issue_write = cifs_issue_write, 353 353 }; 354 354 355 - #if 0 // TODO remove 397 356 - /* 357 - * Remove the dirty flags from a span of pages. 358 - */ 359 - static void cifs_undirty_folios(struct inode *inode, loff_t start, unsigned int len) 360 - { 361 - struct address_space *mapping = inode->i_mapping; 362 - struct folio *folio; 363 - pgoff_t end; 364 - 365 - XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); 366 - 367 - rcu_read_lock(); 368 - 369 - end = (start + len - 1) / PAGE_SIZE; 370 - xas_for_each_marked(&xas, folio, end, PAGECACHE_TAG_DIRTY) { 371 - if (xas_retry(&xas, folio)) 372 - continue; 373 - xas_pause(&xas); 374 - rcu_read_unlock(); 375 - folio_lock(folio); 376 - folio_clear_dirty_for_io(folio); 377 - folio_unlock(folio); 378 - rcu_read_lock(); 379 - } 380 - 381 - rcu_read_unlock(); 382 - } 383 - 384 - /* 385 - * Completion of write to server. 386 - */ 387 - void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len) 388 - { 389 - struct address_space *mapping = inode->i_mapping; 390 - struct folio *folio; 391 - pgoff_t end; 392 - 393 - XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); 394 - 395 - if (!len) 396 - return; 397 - 398 - rcu_read_lock(); 399 - 400 - end = (start + len - 1) / PAGE_SIZE; 401 - xas_for_each(&xas, folio, end) { 402 - if (xas_retry(&xas, folio)) 403 - continue; 404 - if (!folio_test_writeback(folio)) { 405 - WARN_ONCE(1, "bad %x @%llx page %lx %lx\n", 406 - len, start, folio->index, end); 407 - continue; 408 - } 409 - 410 - folio_detach_private(folio); 411 - folio_end_writeback(folio); 412 - } 413 - 414 - rcu_read_unlock(); 415 - } 416 - 417 - /* 418 - * Failure of write to server. 419 - */ 420 - void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len) 421 - { 422 - struct address_space *mapping = inode->i_mapping; 423 - struct folio *folio; 424 - pgoff_t end; 425 - 426 - XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); 427 - 428 - if (!len) 429 - return; 430 - 431 - rcu_read_lock(); 432 - 433 - end = (start + len - 1) / PAGE_SIZE; 434 - xas_for_each(&xas, folio, end) { 435 - if (xas_retry(&xas, folio)) 436 - continue; 437 - if (!folio_test_writeback(folio)) { 438 - WARN_ONCE(1, "bad %x @%llx page %lx %lx\n", 439 - len, start, folio->index, end); 440 - continue; 441 - } 442 - 443 - folio_set_error(folio); 444 - folio_end_writeback(folio); 445 - } 446 - 447 - rcu_read_unlock(); 448 - } 449 - 450 - /* 451 - * Redirty pages after a temporary failure. 452 - */ 453 - void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int len) 454 - { 455 - struct address_space *mapping = inode->i_mapping; 456 - struct folio *folio; 457 - pgoff_t end; 458 - 459 - XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); 460 - 461 - if (!len) 462 - return; 463 - 464 - rcu_read_lock(); 465 - 466 - end = (start + len - 1) / PAGE_SIZE; 467 - xas_for_each(&xas, folio, end) { 468 - if (!folio_test_writeback(folio)) { 469 - WARN_ONCE(1, "bad %x @%llx page %lx %lx\n", 470 - len, start, folio->index, end); 471 - continue; 472 - } 473 - 474 - filemap_dirty_folio(folio->mapping, folio); 475 - folio_end_writeback(folio); 476 - } 477 - 478 - rcu_read_unlock(); 479 - } 480 - #endif // end netfslib remove 397 481 - 482 355 /* 483 356 * Mark as invalid, all open files on tree connections since they 484 357 * were closed when session to server was lost. ··· 2412 2539 2413 2540 netfs_write_subrequest_terminated(&wdata->subreq, result, was_async); 2414 2541 } 2415 - 2416 - #if 0 // TODO remove 2483 2417 - static ssize_t 2418 - cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data, 2419 - size_t write_size, loff_t *offset) 2420 - { 2421 - int rc = 0; 2422 - unsigned int bytes_written = 0; 2423 - unsigned int total_written; 2424 - struct cifs_tcon *tcon; 2425 - struct TCP_Server_Info *server; 2426 - unsigned int xid; 2427 - struct dentry *dentry = open_file->dentry; 2428 - struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry)); 2429 - struct cifs_io_parms io_parms = {0}; 2430 - 2431 - cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n", 2432 - write_size, *offset, dentry); 2433 - 2434 - tcon = tlink_tcon(open_file->tlink); 2435 - server = tcon->ses->server; 2436 - 2437 - if (!server->ops->sync_write) 2438 - return -ENOSYS; 2439 - 2440 - xid = get_xid(); 2441 - 2442 - for (total_written = 0; write_size > total_written; 2443 - total_written += bytes_written) { 2444 - rc = -EAGAIN; 2445 - while (rc == -EAGAIN) { 2446 - struct kvec iov[2]; 2447 - unsigned int len; 2448 - 2449 - if (open_file->invalidHandle) { 2450 - /* we could deadlock if we called 2451 - filemap_fdatawait from here so tell 2452 - reopen_file not to flush data to 2453 - server now */ 2454 - rc = cifs_reopen_file(open_file, false); 2455 - if (rc != 0) 2456 - break; 2457 - } 2458 - 2459 - len = min(server->ops->wp_retry_size(d_inode(dentry)), 2460 - (unsigned int)write_size - total_written); 2461 - /* iov[0] is reserved for smb header */ 2462 - iov[1].iov_base = (char *)write_data + total_written; 2463 - iov[1].iov_len = len; 2464 - io_parms.pid = pid; 2465 - io_parms.tcon = tcon; 2466 - io_parms.offset = *offset; 2467 - io_parms.length = len; 2468 - rc = server->ops->sync_write(xid, &open_file->fid, 2469 - &io_parms, &bytes_written, iov, 1); 2470 - } 2471 - if (rc || (bytes_written == 0)) { 2472 - if (total_written) 2473 - break; 2474 - else { 2475 - free_xid(xid); 2476 - return rc; 2477 - } 2478 - } else { 2479 - spin_lock(&d_inode(dentry)->i_lock); 2480 - cifs_update_eof(cifsi, *offset, bytes_written); 2481 - spin_unlock(&d_inode(dentry)->i_lock); 2482 - *offset += bytes_written; 2483 - } 2484 - } 2485 - 2486 - cifs_stats_bytes_written(tcon, total_written); 2487 - 2488 - if (total_written > 0) { 2489 - spin_lock(&d_inode(dentry)->i_lock); 2490 - if (*offset > d_inode(dentry)->i_size) { 2491 - i_size_write(d_inode(dentry), *offset); 2492 - d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9; 2493 - } 2494 - spin_unlock(&d_inode(dentry)->i_lock); 2495 - } 2496 - mark_inode_dirty_sync(d_inode(dentry)); 2497 - free_xid(xid); 2498 - return total_written; 2499 - } 2500 - #endif // end netfslib remove 2483 2501 2542 2502 2543 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, 2503 2544 bool fsuid_only) ··· 4613 4826 return rc; 4614 4827 } 4615 4828 4616 - #if 0 // TODO remove 4794 4617 - /* 4618 - * Unlock a bunch of folios in the pagecache. 4619 - */ 4620 - static void cifs_unlock_folios(struct address_space *mapping, pgoff_t first, pgoff_t last) 4621 - { 4622 - struct folio *folio; 4623 - XA_STATE(xas, &mapping->i_pages, first); 4624 - 4625 - rcu_read_lock(); 4626 - xas_for_each(&xas, folio, last) { 4627 - folio_unlock(folio); 4628 - } 4629 - rcu_read_unlock(); 4630 - } 4631 - 4632 - static void cifs_readahead_complete(struct work_struct *work) 4633 - { 4634 - struct cifs_io_subrequest *rdata = container_of(work, 4635 - struct cifs_io_subrequest, work); 4636 - struct folio *folio; 4637 - pgoff_t last; 4638 - bool good = rdata->result == 0 || (rdata->result == -EAGAIN && rdata->got_bytes); 4639 - 4640 - XA_STATE(xas, &rdata->mapping->i_pages, rdata->subreq.start / PAGE_SIZE); 4641 - 4642 - if (good) 4643 - cifs_readahead_to_fscache(rdata->mapping->host, 4644 - rdata->subreq.start, rdata->subreq.len); 4645 - 4646 - if (iov_iter_count(&rdata->subreq.io_iter) > 0) 4647 - iov_iter_zero(iov_iter_count(&rdata->subreq.io_iter), &rdata->subreq.io_iter); 4648 - 4649 - last = (rdata->subreq.start + rdata->subreq.len - 1) / PAGE_SIZE; 4650 - 4651 - rcu_read_lock(); 4652 - xas_for_each(&xas, folio, last) { 4653 - if (good) { 4654 - flush_dcache_folio(folio); 4655 - folio_mark_uptodate(folio); 4656 - } 4657 - folio_unlock(folio); 4658 - } 4659 - rcu_read_unlock(); 4660 - 4661 - cifs_put_readdata(rdata); 4662 - } 4663 - 4664 - static void cifs_readahead(struct readahead_control *ractl) 4665 - { 4666 - struct cifsFileInfo *open_file = ractl->file->private_data; 4667 - struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file); 4668 - struct TCP_Server_Info *server; 4669 - unsigned int xid, nr_pages, cache_nr_pages = 0; 4670 - unsigned int ra_pages; 4671 - pgoff_t next_cached = ULONG_MAX, ra_index; 4672 - bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) && 4673 - cifs_inode_cookie(ractl->mapping->host)->cache_priv; 4674 - bool check_cache = caching; 4675 - pid_t pid; 4676 - int rc = 0; 4677 - 4678 - /* Note that readahead_count() lags behind our dequeuing of pages from 4679 - * the ractl, wo we have to keep track for ourselves. 4680 - */ 4681 - ra_pages = readahead_count(ractl); 4682 - ra_index = readahead_index(ractl); 4683 - 4684 - xid = get_xid(); 4685 - 4686 - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 4687 - pid = open_file->pid; 4688 - else 4689 - pid = current->tgid; 4690 - 4691 - server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); 4692 - 4693 - cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n", 4694 - __func__, ractl->file, ractl->mapping, ra_pages); 4695 - 4696 - /* 4697 - * Chop the readahead request up into rsize-sized read requests. 4698 - */ 4699 - while ((nr_pages = ra_pages)) { 4700 - unsigned int i; 4701 - struct cifs_io_subrequest *rdata; 4702 - struct cifs_credits credits_on_stack; 4703 - struct cifs_credits *credits = &credits_on_stack; 4704 - struct folio *folio; 4705 - pgoff_t fsize; 4706 - size_t rsize; 4707 - 4708 - /* 4709 - * Find out if we have anything cached in the range of 4710 - * interest, and if so, where the next chunk of cached data is. 4711 - */ 4712 - if (caching) { 4713 - if (check_cache) { 4714 - rc = cifs_fscache_query_occupancy( 4715 - ractl->mapping->host, ra_index, nr_pages, 4716 - &next_cached, &cache_nr_pages); 4717 - if (rc < 0) 4718 - caching = false; 4719 - check_cache = false; 4720 - } 4721 - 4722 - if (ra_index == next_cached) { 4723 - /* 4724 - * TODO: Send a whole batch of pages to be read 4725 - * by the cache. 4726 - */ 4727 - folio = readahead_folio(ractl); 4728 - fsize = folio_nr_pages(folio); 4729 - ra_pages -= fsize; 4730 - ra_index += fsize; 4731 - if (cifs_readpage_from_fscache(ractl->mapping->host, 4732 - &folio->page) < 0) { 4733 - /* 4734 - * TODO: Deal with cache read failure 4735 - * here, but for the moment, delegate 4736 - * that to readpage. 4737 - */ 4738 - caching = false; 4739 - } 4740 - folio_unlock(folio); 4741 - next_cached += fsize; 4742 - cache_nr_pages -= fsize; 4743 - if (cache_nr_pages == 0) 4744 - check_cache = true; 4745 - continue; 4746 - } 4747 - } 4748 - 4749 - if (open_file->invalidHandle) { 4750 - rc = cifs_reopen_file(open_file, true); 4751 - if (rc) { 4752 - if (rc == -EAGAIN) 4753 - continue; 4754 - break; 4755 - } 4756 - } 4757 - 4758 - if (cifs_sb->ctx->rsize == 0) 4759 - cifs_sb->ctx->rsize = 4760 - server->ops->negotiate_rsize(tlink_tcon(open_file->tlink), 4761 - cifs_sb->ctx); 4762 - 4763 - rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, 4764 - &rsize, credits); 4765 - if (rc) 4766 - break; 4767 - nr_pages = min_t(size_t, rsize / PAGE_SIZE, ra_pages); 4768 - if (next_cached != ULONG_MAX) 4769 - nr_pages = min_t(size_t, nr_pages, next_cached - ra_index); 4770 - 4771 - /* 4772 - * Give up immediately if rsize is too small to read an entire 4773 - * page. The VFS will fall back to readpage. We should never 4774 - * reach this point however since we set ra_pages to 0 when the 4775 - * rsize is smaller than a cache page. 4776 - */ 4777 - if (unlikely(!nr_pages)) { 4778 - add_credits_and_wake_if(server, credits, 0); 4779 - break; 4780 - } 4781 - 4782 - rdata = cifs_readdata_alloc(cifs_readahead_complete); 4783 - if (!rdata) { 4784 - /* best to give up if we're out of mem */ 4785 - add_credits_and_wake_if(server, credits, 0); 4786 - break; 4787 - } 4788 - 4789 - rdata->subreq.start = ra_index * PAGE_SIZE; 4790 - rdata->subreq.len = nr_pages * PAGE_SIZE; 4791 - rdata->cfile = cifsFileInfo_get(open_file); 4792 - rdata->server = server; 4793 - rdata->mapping = ractl->mapping; 4794 - rdata->pid = pid; 4795 - rdata->credits = credits_on_stack; 4796 - 4797 - for (i = 0; i < nr_pages; i++) { 4798 - if (!readahead_folio(ractl)) 4799 - WARN_ON(1); 4800 - } 4801 - ra_pages -= nr_pages; 4802 - ra_index += nr_pages; 4803 - 4804 - iov_iter_xarray(&rdata->subreq.io_iter, ITER_DEST, &rdata->mapping->i_pages, 4805 - rdata->subreq.start, rdata->subreq.len); 4806 - 4807 - rc = adjust_credits(server, &rdata->credits, rdata->subreq.len); 4808 - if (!rc) { 4809 - if (rdata->cfile->invalidHandle) 4810 - rc = -EAGAIN; 4811 - else 4812 - rc = server->ops->async_readv(rdata); 4813 - } 4814 - 4815 - if (rc) { 4816 - add_credits_and_wake_if(server, &rdata->credits, 0); 4817 - cifs_unlock_folios(rdata->mapping, 4818 - rdata->subreq.start / PAGE_SIZE, 4819 - (rdata->subreq.start + rdata->subreq.len - 1) / PAGE_SIZE); 4820 - /* Fallback to the readpage in error/reconnect cases */ 4821 - cifs_put_readdata(rdata); 4822 - break; 4823 - } 4824 - 4825 - cifs_put_readdata(rdata); 4826 - } 4827 - 4828 - free_xid(xid); 4829 - } 4830 - 4831 - /* 4832 - * cifs_readpage_worker must be called with the page pinned 4833 - */ 4834 - static int cifs_readpage_worker(struct file *file, struct page *page, 4835 - loff_t *poffset) 4836 - { 4837 - struct inode *inode = file_inode(file); 4838 - struct timespec64 atime, mtime; 4839 - char *read_data; 4840 - int rc; 4841 - 4842 - /* Is the page cached? */ 4843 - rc = cifs_readpage_from_fscache(inode, page); 4844 - if (rc == 0) 4845 - goto read_complete; 4846 - 4847 - read_data = kmap(page); 4848 - /* for reads over a certain size could initiate async read ahead */ 4849 - 4850 - rc = cifs_read(file, read_data, PAGE_SIZE, poffset); 4851 - 4852 - if (rc < 0) 4853 - goto io_error; 4854 - else 4855 - cifs_dbg(FYI, "Bytes read %d\n", rc); 4856 - 4857 - /* we do not want atime to be less than mtime, it broke some apps */ 4858 - atime = inode_set_atime_to_ts(inode, current_time(inode)); 4859 - mtime = inode_get_mtime(inode); 4860 - if (timespec64_compare(&atime, &mtime) < 0) 4861 - inode_set_atime_to_ts(inode, inode_get_mtime(inode)); 4862 - 4863 - if (PAGE_SIZE > rc) 4864 - memset(read_data + rc, 0, PAGE_SIZE - rc); 4865 - 4866 - flush_dcache_page(page); 4867 - SetPageUptodate(page); 4868 - rc = 0; 4869 - 4870 - io_error: 4871 - kunmap(page); 4872 - 4873 - read_complete: 4874 - unlock_page(page); 4875 - return rc; 4876 - } 4877 - 4878 - static int cifs_read_folio(struct file *file, struct folio *folio) 4879 - { 4880 - struct page *page = &folio->page; 4881 - loff_t offset = page_file_offset(page); 4882 - int rc = -EACCES; 4883 - unsigned int xid; 4884 - 4885 - xid = get_xid(); 4886 - 4887 - if (file->private_data == NULL) { 4888 - rc = -EBADF; 4889 - free_xid(xid); 4890 - return rc; 4891 - } 4892 - 4893 - cifs_dbg(FYI, "read_folio %p at offset %d 0x%x\n", 4894 - page, (int)offset, (int)offset); 4895 - 4896 - rc = cifs_readpage_worker(file, page, &offset); 4897 - 4898 - free_xid(xid); 4899 - return rc; 4900 - } 4901 - #endif // end netfslib remove 4794 4902 - 4903 4829 static int is_inode_writable(struct cifsInodeInfo *cifs_inode) 4904 4830 { 4905 4831 struct cifsFileInfo *open_file; ··· 4659 5159 } else 4660 5160 return true; 4661 5161 } 4662 - 4663 - #if 0 // TODO remove 5152 4664 - static int cifs_write_begin(struct file *file, struct address_space *mapping, 4665 - loff_t pos, unsigned len, 4666 - struct page **pagep, void **fsdata) 4667 - { 4668 - int oncethru = 0; 4669 - pgoff_t index = pos >> PAGE_SHIFT; 4670 - loff_t offset = pos & (PAGE_SIZE - 1); 4671 - loff_t page_start = pos & PAGE_MASK; 4672 - loff_t i_size; 4673 - struct page *page; 4674 - int rc = 0; 4675 - 4676 - cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len); 4677 - 4678 - start: 4679 - page = grab_cache_page_write_begin(mapping, index); 4680 - if (!page) { 4681 - rc = -ENOMEM; 4682 - goto out; 4683 - } 4684 - 4685 - if (PageUptodate(page)) 4686 - goto out; 4687 - 4688 - /* 4689 - * If we write a full page it will be up to date, no need to read from 4690 - * the server. If the write is short, we'll end up doing a sync write 4691 - * instead. 4692 - */ 4693 - if (len == PAGE_SIZE) 4694 - goto out; 4695 - 4696 - /* 4697 - * optimize away the read when we have an oplock, and we're not 4698 - * expecting to use any of the data we'd be reading in. That 4699 - * is, when the page lies beyond the EOF, or straddles the EOF 4700 - * and the write will cover all of the existing data. 4701 - */ 4702 - if (CIFS_CACHE_READ(CIFS_I(mapping->host))) { 4703 - i_size = i_size_read(mapping->host); 4704 - if (page_start >= i_size || 4705 - (offset == 0 && (pos + len) >= i_size)) { 4706 - zero_user_segments(page, 0, offset, 4707 - offset + len, 4708 - PAGE_SIZE); 4709 - /* 4710 - * PageChecked means that the parts of the page 4711 - * to which we're not writing are considered up 4712 - * to date. Once the data is copied to the 4713 - * page, it can be set uptodate. 4714 - */ 4715 - SetPageChecked(page); 4716 - goto out; 4717 - } 4718 - } 4719 - 4720 - if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) { 4721 - /* 4722 - * might as well read a page, it is fast enough. If we get 4723 - * an error, we don't need to return it. cifs_write_end will 4724 - * do a sync write instead since PG_uptodate isn't set. 4725 - */ 4726 - cifs_readpage_worker(file, page, &page_start); 4727 - put_page(page); 4728 - oncethru = 1; 4729 - goto start; 4730 - } else { 4731 - /* we could try using another file handle if there is one - 4732 - but how would we lock it to prevent close of that handle 4733 - racing with this read? In any case 4734 - this will be written out by write_end so is fine */ 4735 - } 4736 - out: 4737 - *pagep = page; 4738 - return rc; 4739 - } 4740 - 4741 - static bool cifs_release_folio(struct folio *folio, gfp_t gfp) 4742 - { 4743 - if (folio_test_private(folio)) 4744 - return 0; 4745 - if (folio_test_private_2(folio)) { /* [DEPRECATED] */ 4746 - if (current_is_kswapd() || !(gfp & __GFP_FS)) 4747 - return false; 4748 - folio_wait_private_2(folio); 4749 - } 4750 - fscache_note_page_release(cifs_inode_cookie(folio->mapping->host)); 4751 - return true; 4752 - } 4753 - 4754 - static void cifs_invalidate_folio(struct folio *folio, size_t offset, 4755 - size_t length) 4756 - { 4757 - folio_wait_private_2(folio); /* [DEPRECATED] */ 4758 - } 4759 - #endif // end netfslib remove 5152 4760 5162 4761 5163 void cifs_oplock_break(struct work_struct *work) 4762 5164 { ··· 4748 5346 out: 4749 5347 cifs_done_oplock_break(cinode); 4750 5348 } 4751 - 4752 - #if 0 // TODO remove 5333 4753 - /* 4754 - * The presence of cifs_direct_io() in the address space ops vector 4755 - * allowes open() O_DIRECT flags which would have failed otherwise. 4756 - * 4757 - * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests 4758 - * so this method should never be called. 4759 - * 4760 - * Direct IO is not yet supported in the cached mode. 4761 - */ 4762 - static ssize_t 4763 - cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter) 4764 - { 4765 - /* 4766 - * FIXME 4767 - * Eventually need to support direct IO for non forcedirectio mounts 4768 - */ 4769 - return -EINVAL; 4770 - } 4771 - #endif // netfs end remove 5333 4772 5349 4773 5350 static int cifs_swap_activate(struct swap_info_struct *sis, 4774 5351 struct file *swap_file, sector_t *span)
-111
fs/smb/client/fscache.c
··· 170 170 cifsi->netfs.cache = NULL; 171 171 } 172 172 } 173 - 174 - #if 0 // TODO remove 175 - /* 176 - * Fallback page reading interface. 177 - */ 178 - static int fscache_fallback_read_page(struct inode *inode, struct page *page) 179 - { 180 - struct netfs_cache_resources cres; 181 - struct fscache_cookie *cookie = cifs_inode_cookie(inode); 182 - struct iov_iter iter; 183 - struct bio_vec bvec; 184 - int ret; 185 - 186 - memset(&cres, 0, sizeof(cres)); 187 - bvec_set_page(&bvec, page, PAGE_SIZE, 0); 188 - iov_iter_bvec(&iter, ITER_DEST, &bvec, 1, PAGE_SIZE); 189 - 190 - ret = fscache_begin_read_operation(&cres, cookie); 191 - if (ret < 0) 192 - return ret; 193 - 194 - ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL, 195 - NULL, NULL); 196 - fscache_end_operation(&cres); 197 - return ret; 198 - } 199 - 200 - /* 201 - * Fallback page writing interface. 202 - */ 203 - static int fscache_fallback_write_pages(struct inode *inode, loff_t start, size_t len, 204 - bool no_space_allocated_yet) 205 - { 206 - struct netfs_cache_resources cres; 207 - struct fscache_cookie *cookie = cifs_inode_cookie(inode); 208 - struct iov_iter iter; 209 - int ret; 210 - 211 - memset(&cres, 0, sizeof(cres)); 212 - iov_iter_xarray(&iter, ITER_SOURCE, &inode->i_mapping->i_pages, start, len); 213 - 214 - ret = fscache_begin_write_operation(&cres, cookie); 215 - if (ret < 0) 216 - return ret; 217 - 218 - ret = cres.ops->prepare_write(&cres, &start, &len, len, i_size_read(inode), 219 - no_space_allocated_yet); 220 - if (ret == 0) 221 - ret = fscache_write(&cres, start, &iter, NULL, NULL); 222 - fscache_end_operation(&cres); 223 - return ret; 224 - } 225 - 226 - /* 227 - * Retrieve a page from FS-Cache 228 - */ 229 - int __cifs_readpage_from_fscache(struct inode *inode, struct page *page) 230 - { 231 - int ret; 232 - 233 - cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n", 234 - __func__, cifs_inode_cookie(inode), page, inode); 235 - 236 - ret = fscache_fallback_read_page(inode, page); 237 - if (ret < 0) 238 - return ret; 239 - 240 - /* Read completed synchronously */ 241 - SetPageUptodate(page); 242 - return 0; 243 - } 244 - 245 - void __cifs_readahead_to_fscache(struct inode *inode, loff_t pos, size_t len) 246 - { 247 - cifs_dbg(FYI, "%s: (fsc: %p, p: %llx, l: %zx, i: %p)\n", 248 - __func__, cifs_inode_cookie(inode), pos, len, inode); 249 - 250 - fscache_fallback_write_pages(inode, pos, len, true); 251 - } 252 - 253 - /* 254 - * Query the cache occupancy. 255 - */ 256 - int __cifs_fscache_query_occupancy(struct inode *inode, 257 - pgoff_t first, unsigned int nr_pages, 258 - pgoff_t *_data_first, 259 - unsigned int *_data_nr_pages) 260 - { 261 - struct netfs_cache_resources cres; 262 - struct fscache_cookie *cookie = cifs_inode_cookie(inode); 263 - loff_t start, data_start; 264 - size_t len, data_len; 265 - int ret; 266 - 267 - ret = fscache_begin_read_operation(&cres, cookie); 268 - if (ret < 0) 269 - return ret; 270 - 271 - start = first * PAGE_SIZE; 272 - len = nr_pages * PAGE_SIZE; 273 - ret = cres.ops->query_occupancy(&cres, start, len, PAGE_SIZE, 274 - &data_start, &data_len); 275 - if (ret == 0) { 276 - *_data_first = data_start / PAGE_SIZE; 277 - *_data_nr_pages = len / PAGE_SIZE; 278 - } 279 - 280 - fscache_end_operation(&cres); 281 - return ret; 282 - } 283 - #endif
-58
fs/smb/client/fscache.h
··· 74 74 i_size_read(inode), flags); 75 75 } 76 76 77 - #if 0 // TODO remove 78 - extern int __cifs_fscache_query_occupancy(struct inode *inode, 79 - pgoff_t first, unsigned int nr_pages, 80 - pgoff_t *_data_first, 81 - unsigned int *_data_nr_pages); 82 - 83 - static inline int cifs_fscache_query_occupancy(struct inode *inode, 84 - pgoff_t first, unsigned int nr_pages, 85 - pgoff_t *_data_first, 86 - unsigned int *_data_nr_pages) 87 - { 88 - if (!cifs_inode_cookie(inode)) 89 - return -ENOBUFS; 90 - return __cifs_fscache_query_occupancy(inode, first, nr_pages, 91 - _data_first, _data_nr_pages); 92 - } 93 - 94 - extern int __cifs_readpage_from_fscache(struct inode *pinode, struct page *ppage); 95 - extern void __cifs_readahead_to_fscache(struct inode *pinode, loff_t pos, size_t len); 96 - 97 - 98 - static inline int cifs_readpage_from_fscache(struct inode *inode, 99 - struct page *page) 100 - { 101 - if (cifs_inode_cookie(inode)) 102 - return __cifs_readpage_from_fscache(inode, page); 103 - return -ENOBUFS; 104 - } 105 - 106 - static inline void cifs_readahead_to_fscache(struct inode *inode, 107 - loff_t pos, size_t len) 108 - { 109 - if (cifs_inode_cookie(inode)) 110 - __cifs_readahead_to_fscache(inode, pos, len); 111 - } 112 - #endif 113 - 114 77 static inline bool cifs_fscache_enabled(struct inode *inode) 115 78 { 116 79 return fscache_cookie_enabled(cifs_inode_cookie(inode)); ··· 95 132 static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; } 96 133 static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {} 97 134 static inline bool cifs_fscache_enabled(struct inode *inode) { return false; } 98 - 99 - #if 0 // TODO remove 100 - static inline int cifs_fscache_query_occupancy(struct inode *inode, 101 - pgoff_t first, unsigned int nr_pages, 102 - pgoff_t *_data_first, 103 - unsigned int *_data_nr_pages) 104 - { 105 - *_data_first = ULONG_MAX; 106 - *_data_nr_pages = 0; 107 - return -ENOBUFS; 108 - } 109 - 110 - static inline int 111 - cifs_readpage_from_fscache(struct inode *inode, struct page *page) 112 - { 113 - return -ENOBUFS; 114 - } 115 - 116 - static inline 117 - void cifs_readahead_to_fscache(struct inode *inode, loff_t pos, size_t len) {} 118 - #endif 119 135 120 136 #endif /* CONFIG_CIFS_FSCACHE */ 121 137