Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfs: Add a function to consolidate beginning a read

Add a function to do the steps needed to begin a read request, allowing
this code to be removed from several other functions and consolidated.

Changes
=======
ver #2)
- Move before the unstaticking patch so that some functions can be left
static.
- Set uninitialised return code in netfs_begin_read()[1][2].
- Fixed a refleak caused by non-removal of a get from netfs_write_begin()
when the request submission code got moved to netfs_begin_read().
- Use INIT_WORK() to (re-)init the request work_struct[3].

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: linux-cachefs@redhat.com

Link: https://lore.kernel.org/r/20220303163826.1120936-1-nathan@kernel.org/ [1]
Link: https://lore.kernel.org/r/20220303235647.1297171-1-colin.i.king@gmail.com/ [2]
Link: https://lore.kernel.org/r/9d69be49081bccff44260e4c6e0049c63d6d04a1.camel@redhat.com/ [3]
Link: https://lore.kernel.org/r/164623004355.3564931.7275693529042495641.stgit@warthog.procyon.org.uk/ # v1
Link: https://lore.kernel.org/r/164678214287.1200972.16734134007649832160.stgit@warthog.procyon.org.uk/ # v2
Link: https://lore.kernel.org/r/164692911113.2099075.1060868473229451371.stgit@warthog.procyon.org.uk/ # v3

+76 -76
+1 -1
fs/netfs/internal.h
··· 39 39 */ 40 40 extern unsigned int netfs_debug; 41 41 42 - void netfs_rreq_work(struct work_struct *work); 42 + int netfs_begin_read(struct netfs_io_request *rreq, bool sync); 43 43 44 44 /* 45 45 * stats.c
-1
fs/netfs/objects.c
··· 35 35 rreq->i_size = i_size_read(inode); 36 36 rreq->debug_id = atomic_inc_return(&debug_ids); 37 37 INIT_LIST_HEAD(&rreq->subrequests); 38 - INIT_WORK(&rreq->work, netfs_rreq_work); 39 38 refcount_set(&rreq->ref, 1); 40 39 __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); 41 40 if (rreq->netfs_ops->init_request) {
+72 -72
fs/netfs/read_helper.c
··· 443 443 netfs_rreq_completed(rreq, was_async); 444 444 } 445 445 446 - void netfs_rreq_work(struct work_struct *work) 446 + static void netfs_rreq_work(struct work_struct *work) 447 447 { 448 448 struct netfs_io_request *rreq = 449 449 container_of(work, struct netfs_io_request, work); ··· 688 688 return false; 689 689 } 690 690 691 + /* 692 + * Begin the process of reading in a chunk of data, where that data may be 693 + * stitched together from multiple sources, including multiple servers and the 694 + * local cache. 695 + */ 696 + int netfs_begin_read(struct netfs_io_request *rreq, bool sync) 697 + { 698 + unsigned int debug_index = 0; 699 + int ret; 700 + 701 + _enter("R=%x %llx-%llx", 702 + rreq->debug_id, rreq->start, rreq->start + rreq->len - 1); 703 + 704 + if (rreq->len == 0) { 705 + pr_err("Zero-sized read [R=%x]\n", rreq->debug_id); 706 + netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len); 707 + return -EIO; 708 + } 709 + 710 + INIT_WORK(&rreq->work, netfs_rreq_work); 711 + 712 + if (sync) 713 + netfs_get_request(rreq, netfs_rreq_trace_get_hold); 714 + 715 + /* Chop the read into slices according to what the cache and the netfs 716 + * want and submit each one. 717 + */ 718 + atomic_set(&rreq->nr_outstanding, 1); 719 + do { 720 + if (!netfs_rreq_submit_slice(rreq, &debug_index)) 721 + break; 722 + 723 + } while (rreq->submitted < rreq->len); 724 + 725 + if (sync) { 726 + /* Keep nr_outstanding incremented so that the ref always belongs to 727 + * us, and the service code isn't punted off to a random thread pool to 728 + * process. 729 + */ 730 + for (;;) { 731 + wait_var_event(&rreq->nr_outstanding, 732 + atomic_read(&rreq->nr_outstanding) == 1); 733 + netfs_rreq_assess(rreq, false); 734 + if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) 735 + break; 736 + cond_resched(); 737 + } 738 + 739 + ret = rreq->error; 740 + if (ret == 0 && rreq->submitted < rreq->len) { 741 + trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read); 742 + ret = -EIO; 743 + } 744 + netfs_put_request(rreq, false, netfs_rreq_trace_put_hold); 745 + } else { 746 + /* If we decrement nr_outstanding to 0, the ref belongs to us. */ 747 + if (atomic_dec_and_test(&rreq->nr_outstanding)) 748 + netfs_rreq_assess(rreq, false); 749 + ret = 0; 750 + } 751 + return ret; 752 + } 753 + 691 754 static void netfs_cache_expand_readahead(struct netfs_io_request *rreq, 692 755 loff_t *_start, size_t *_len, loff_t i_size) 693 756 { ··· 813 750 { 814 751 struct netfs_io_request *rreq; 815 752 struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host); 816 - unsigned int debug_index = 0; 817 753 int ret; 818 754 819 755 _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl)); ··· 839 777 840 778 netfs_rreq_expand(rreq, ractl); 841 779 842 - atomic_set(&rreq->nr_outstanding, 1); 843 - do { 844 - if (!netfs_rreq_submit_slice(rreq, &debug_index)) 845 - break; 846 - 847 - } while (rreq->submitted < rreq->len); 848 - 849 780 /* Drop the refs on the folios here rather than in the cache or 850 781 * filesystem. The locks will be dropped in netfs_rreq_unlock(). 851 782 */ 852 783 while (readahead_folio(ractl)) 853 784 ; 854 785 855 - /* If we decrement nr_outstanding to 0, the ref belongs to us. */ 856 - if (atomic_dec_and_test(&rreq->nr_outstanding)) 857 - netfs_rreq_assess(rreq, false); 786 + netfs_begin_read(rreq, false); 858 787 return; 859 788 860 789 cleanup_free: ··· 874 821 struct address_space *mapping = folio->mapping; 875 822 struct netfs_io_request *rreq; 876 823 struct netfs_i_context *ctx = netfs_i_context(mapping->host); 877 - unsigned int debug_index = 0; 878 824 int ret; 879 825 880 826 _enter("%lx", folio_index(folio)); ··· 888 836 889 837 if (ctx->ops->begin_cache_operation) { 890 838 ret = ctx->ops->begin_cache_operation(rreq); 891 - if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) { 892 - folio_unlock(folio); 893 - goto out; 894 - } 839 + if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) 840 + goto discard; 895 841 } 896 842 897 843 netfs_stat(&netfs_n_rh_readpage); 898 844 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); 845 + return netfs_begin_read(rreq, true); 899 846 900 - netfs_get_request(rreq, netfs_rreq_trace_get_hold); 901 - 902 - atomic_set(&rreq->nr_outstanding, 1); 903 - do { 904 - if (!netfs_rreq_submit_slice(rreq, &debug_index)) 905 - break; 906 - 907 - } while (rreq->submitted < rreq->len); 908 - 909 - /* Keep nr_outstanding incremented so that the ref always belongs to us, and 910 - * the service code isn't punted off to a random thread pool to 911 - * process. 912 - */ 913 - do { 914 - wait_var_event(&rreq->nr_outstanding, 915 - atomic_read(&rreq->nr_outstanding) == 1); 916 - netfs_rreq_assess(rreq, false); 917 - } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)); 918 - 919 - ret = rreq->error; 920 - if (ret == 0 && rreq->submitted < rreq->len) { 921 - trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage); 922 - ret = -EIO; 923 - } 924 - out: 925 - netfs_put_request(rreq, false, netfs_rreq_trace_put_hold); 926 - return ret; 847 + discard: 848 + netfs_put_request(rreq, false, netfs_rreq_trace_put_discard); 927 849 alloc_error: 928 850 folio_unlock(folio); 929 851 return ret; ··· 992 966 struct netfs_io_request *rreq; 993 967 struct netfs_i_context *ctx = netfs_i_context(file_inode(file )); 994 968 struct folio *folio; 995 - unsigned int debug_index = 0, fgp_flags; 969 + unsigned int fgp_flags; 996 970 pgoff_t index = pos >> PAGE_SHIFT; 997 971 int ret; 998 972 ··· 1055 1029 */ 1056 1030 ractl._nr_pages = folio_nr_pages(folio); 1057 1031 netfs_rreq_expand(rreq, &ractl); 1058 - netfs_get_request(rreq, netfs_rreq_trace_get_hold); 1059 1032 1060 1033 /* We hold the folio locks, so we can drop the references */ 1061 1034 folio_get(folio); 1062 1035 while (readahead_folio(&ractl)) 1063 1036 ; 1064 1037 1065 - atomic_set(&rreq->nr_outstanding, 1); 1066 - do { 1067 - if (!netfs_rreq_submit_slice(rreq, &debug_index)) 1068 - break; 1069 - 1070 - } while (rreq->submitted < rreq->len); 1071 - 1072 - /* Keep nr_outstanding incremented so that the ref always belongs to 1073 - * us, and the service code isn't punted off to a random thread pool to 1074 - * process. 1075 - */ 1076 - for (;;) { 1077 - wait_var_event(&rreq->nr_outstanding, 1078 - atomic_read(&rreq->nr_outstanding) == 1); 1079 - netfs_rreq_assess(rreq, false); 1080 - if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) 1081 - break; 1082 - cond_resched(); 1083 - } 1084 - 1085 - ret = rreq->error; 1086 - if (ret == 0 && rreq->submitted < rreq->len) { 1087 - trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin); 1088 - ret = -EIO; 1089 - } 1090 - netfs_put_request(rreq, false, netfs_rreq_trace_put_hold); 1038 + ret = netfs_begin_read(rreq, true); 1091 1039 if (ret < 0) 1092 1040 goto error; 1093 1041
+3 -2
include/trace/events/netfs.h
··· 56 56 EM(netfs_fail_check_write_begin, "check-write-begin") \ 57 57 EM(netfs_fail_copy_to_cache, "copy-to-cache") \ 58 58 EM(netfs_fail_read, "read") \ 59 - EM(netfs_fail_short_readpage, "short-readpage") \ 60 - EM(netfs_fail_short_write_begin, "short-write-begin") \ 59 + EM(netfs_fail_short_read, "short-read") \ 61 60 E_(netfs_fail_prepare_write, "prep-write") 62 61 63 62 #define netfs_rreq_ref_traces \ 64 63 EM(netfs_rreq_trace_get_hold, "GET HOLD ") \ 65 64 EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \ 66 65 EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \ 66 + EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \ 67 67 EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \ 68 68 EM(netfs_rreq_trace_put_hold, "PUT HOLD ") \ 69 69 EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \ 70 + EM(netfs_rreq_trace_put_zero_len, "PUT ZEROLEN") \ 70 71 E_(netfs_rreq_trace_new, "NEW ") 71 72 72 73 #define netfs_sreq_ref_traces \