Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfs: Split retry code out of fs/netfs/write_collect.c

Split write-retry code out of fs/netfs/write_collect.c as it will become
more elaborate when content crypto is introduced.

Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241216204124.3752367-8-dhowells@redhat.com
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>

authored by

David Howells and committed by
Christian Brauner
751e213f d606c362

+233 -215
+2 -1
fs/netfs/Makefile
··· 15 15 read_retry.o \ 16 16 rolling_buffer.o \ 17 17 write_collect.o \ 18 - write_issue.o 18 + write_issue.o \ 19 + write_retry.o 19 20 20 21 netfs-$(CONFIG_NETFS_STATS) += stats.o 21 22
+5
fs/netfs/internal.h
··· 190 190 int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len); 191 191 192 192 /* 193 + * write_retry.c 194 + */ 195 + void netfs_retry_writes(struct netfs_io_request *wreq); 196 + 197 + /* 193 198 * Miscellaneous functions. 194 199 */ 195 200 static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
-214
fs/netfs/write_collect.c
··· 152 152 } 153 153 154 154 /* 155 - * Perform retries on the streams that need it. 156 - */ 157 - static void netfs_retry_write_stream(struct netfs_io_request *wreq, 158 - struct netfs_io_stream *stream) 159 - { 160 - struct list_head *next; 161 - 162 - _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr); 163 - 164 - if (list_empty(&stream->subrequests)) 165 - return; 166 - 167 - if (stream->source == NETFS_UPLOAD_TO_SERVER && 168 - wreq->netfs_ops->retry_request) 169 - wreq->netfs_ops->retry_request(wreq, stream); 170 - 171 - if (unlikely(stream->failed)) 172 - return; 173 - 174 - /* If there's no renegotiation to do, just resend each failed subreq. */ 175 - if (!stream->prepare_write) { 176 - struct netfs_io_subrequest *subreq; 177 - 178 - list_for_each_entry(subreq, &stream->subrequests, rreq_link) { 179 - if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) 180 - break; 181 - if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) { 182 - struct iov_iter source = subreq->io_iter; 183 - 184 - iov_iter_revert(&source, subreq->len - source.count); 185 - netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); 186 - netfs_reissue_write(stream, subreq, &source); 187 - } 188 - } 189 - return; 190 - } 191 - 192 - next = stream->subrequests.next; 193 - 194 - do { 195 - struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp; 196 - struct iov_iter source; 197 - unsigned long long start, len; 198 - size_t part; 199 - bool boundary = false; 200 - 201 - /* Go through the stream and find the next span of contiguous 202 - * data that we then rejig (cifs, for example, needs the wsize 203 - * renegotiating) and reissue. 204 - */ 205 - from = list_entry(next, struct netfs_io_subrequest, rreq_link); 206 - to = from; 207 - start = from->start + from->transferred; 208 - len = from->len - from->transferred; 209 - 210 - if (test_bit(NETFS_SREQ_FAILED, &from->flags) || 211 - !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags)) 212 - return; 213 - 214 - list_for_each_continue(next, &stream->subrequests) { 215 - subreq = list_entry(next, struct netfs_io_subrequest, rreq_link); 216 - if (subreq->start + subreq->transferred != start + len || 217 - test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) || 218 - !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) 219 - break; 220 - to = subreq; 221 - len += to->len; 222 - } 223 - 224 - /* Determine the set of buffers we're going to use. Each 225 - * subreq gets a subset of a single overall contiguous buffer. 226 - */ 227 - netfs_reset_iter(from); 228 - source = from->io_iter; 229 - source.count = len; 230 - 231 - /* Work through the sublist. */ 232 - subreq = from; 233 - list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) { 234 - if (!len) 235 - break; 236 - /* Renegotiate max_len (wsize) */ 237 - trace_netfs_sreq(subreq, netfs_sreq_trace_retry); 238 - __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); 239 - subreq->retry_count++; 240 - stream->prepare_write(subreq); 241 - 242 - part = min(len, stream->sreq_max_len); 243 - subreq->len = part; 244 - subreq->start = start; 245 - subreq->transferred = 0; 246 - len -= part; 247 - start += part; 248 - if (len && subreq == to && 249 - __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags)) 250 - boundary = true; 251 - 252 - netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); 253 - netfs_reissue_write(stream, subreq, &source); 254 - if (subreq == to) 255 - break; 256 - } 257 - 258 - /* If we managed to use fewer subreqs, we can discard the 259 - * excess; if we used the same number, then we're done. 260 - */ 261 - if (!len) { 262 - if (subreq == to) 263 - continue; 264 - list_for_each_entry_safe_from(subreq, tmp, 265 - &stream->subrequests, rreq_link) { 266 - trace_netfs_sreq(subreq, netfs_sreq_trace_discard); 267 - list_del(&subreq->rreq_link); 268 - netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done); 269 - if (subreq == to) 270 - break; 271 - } 272 - continue; 273 - } 274 - 275 - /* We ran out of subrequests, so we need to allocate some more 276 - * and insert them after. 277 - */ 278 - do { 279 - subreq = netfs_alloc_subrequest(wreq); 280 - subreq->source = to->source; 281 - subreq->start = start; 282 - subreq->debug_index = atomic_inc_return(&wreq->subreq_counter); 283 - subreq->stream_nr = to->stream_nr; 284 - subreq->retry_count = 1; 285 - 286 - trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index, 287 - refcount_read(&subreq->ref), 288 - netfs_sreq_trace_new); 289 - netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); 290 - 291 - list_add(&subreq->rreq_link, &to->rreq_link); 292 - to = list_next_entry(to, rreq_link); 293 - trace_netfs_sreq(subreq, netfs_sreq_trace_retry); 294 - 295 - stream->sreq_max_len = len; 296 - stream->sreq_max_segs = INT_MAX; 297 - switch (stream->source) { 298 - case NETFS_UPLOAD_TO_SERVER: 299 - netfs_stat(&netfs_n_wh_upload); 300 - stream->sreq_max_len = umin(len, wreq->wsize); 301 - break; 302 - case NETFS_WRITE_TO_CACHE: 303 - netfs_stat(&netfs_n_wh_write); 304 - break; 305 - default: 306 - WARN_ON_ONCE(1); 307 - } 308 - 309 - stream->prepare_write(subreq); 310 - 311 - part = umin(len, stream->sreq_max_len); 312 - subreq->len = subreq->transferred + part; 313 - len -= part; 314 - start += part; 315 - if (!len && boundary) { 316 - __set_bit(NETFS_SREQ_BOUNDARY, &to->flags); 317 - boundary = false; 318 - } 319 - 320 - netfs_reissue_write(stream, subreq, &source); 321 - if (!len) 322 - break; 323 - 324 - } while (len); 325 - 326 - } while (!list_is_head(next, &stream->subrequests)); 327 - } 328 - 329 - /* 330 - * Perform retries on the streams that need it. If we're doing content 331 - * encryption and the server copy changed due to a third-party write, we may 332 - * need to do an RMW cycle and also rewrite the data to the cache. 333 - */ 334 - static void netfs_retry_writes(struct netfs_io_request *wreq) 335 - { 336 - struct netfs_io_subrequest *subreq; 337 - struct netfs_io_stream *stream; 338 - int s; 339 - 340 - /* Wait for all outstanding I/O to quiesce before performing retries as 341 - * we may need to renegotiate the I/O sizes. 342 - */ 343 - for (s = 0; s < NR_IO_STREAMS; s++) { 344 - stream = &wreq->io_streams[s]; 345 - if (!stream->active) 346 - continue; 347 - 348 - list_for_each_entry(subreq, &stream->subrequests, rreq_link) { 349 - wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS, 350 - TASK_UNINTERRUPTIBLE); 351 - } 352 - } 353 - 354 - // TODO: Enc: Fetch changed partial pages 355 - // TODO: Enc: Reencrypt content if needed. 356 - // TODO: Enc: Wind back transferred point. 357 - // TODO: Enc: Mark cache pages for retry. 358 - 359 - for (s = 0; s < NR_IO_STREAMS; s++) { 360 - stream = &wreq->io_streams[s]; 361 - if (stream->need_retry) { 362 - stream->need_retry = false; 363 - netfs_retry_write_stream(wreq, stream); 364 - } 365 - } 366 - } 367 - 368 - /* 369 155 * Collect and assess the results of various write subrequests. We may need to 370 156 * retry some of the results - or even do an RMW cycle for content crypto. 371 157 *
+226
fs/netfs/write_retry.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Network filesystem write retrying. 3 + * 4 + * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. 5 + * Written by David Howells (dhowells@redhat.com) 6 + */ 7 + 8 + #include <linux/fs.h> 9 + #include <linux/mm.h> 10 + #include <linux/pagemap.h> 11 + #include <linux/slab.h> 12 + #include "internal.h" 13 + 14 + /* 15 + * Perform retries on the streams that need it. 16 + */ 17 + static void netfs_retry_write_stream(struct netfs_io_request *wreq, 18 + struct netfs_io_stream *stream) 19 + { 20 + struct list_head *next; 21 + 22 + _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr); 23 + 24 + if (list_empty(&stream->subrequests)) 25 + return; 26 + 27 + if (stream->source == NETFS_UPLOAD_TO_SERVER && 28 + wreq->netfs_ops->retry_request) 29 + wreq->netfs_ops->retry_request(wreq, stream); 30 + 31 + if (unlikely(stream->failed)) 32 + return; 33 + 34 + /* If there's no renegotiation to do, just resend each failed subreq. */ 35 + if (!stream->prepare_write) { 36 + struct netfs_io_subrequest *subreq; 37 + 38 + list_for_each_entry(subreq, &stream->subrequests, rreq_link) { 39 + if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) 40 + break; 41 + if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) { 42 + struct iov_iter source = subreq->io_iter; 43 + 44 + iov_iter_revert(&source, subreq->len - source.count); 45 + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); 46 + netfs_reissue_write(stream, subreq, &source); 47 + } 48 + } 49 + return; 50 + } 51 + 52 + next = stream->subrequests.next; 53 + 54 + do { 55 + struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp; 56 + struct iov_iter source; 57 + unsigned long long start, len; 58 + size_t part; 59 + bool boundary = false; 60 + 61 + /* Go through the stream and find the next span of contiguous 62 + * data that we then rejig (cifs, for example, needs the wsize 63 + * renegotiating) and reissue. 64 + */ 65 + from = list_entry(next, struct netfs_io_subrequest, rreq_link); 66 + to = from; 67 + start = from->start + from->transferred; 68 + len = from->len - from->transferred; 69 + 70 + if (test_bit(NETFS_SREQ_FAILED, &from->flags) || 71 + !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags)) 72 + return; 73 + 74 + list_for_each_continue(next, &stream->subrequests) { 75 + subreq = list_entry(next, struct netfs_io_subrequest, rreq_link); 76 + if (subreq->start + subreq->transferred != start + len || 77 + test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) || 78 + !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) 79 + break; 80 + to = subreq; 81 + len += to->len; 82 + } 83 + 84 + /* Determine the set of buffers we're going to use. Each 85 + * subreq gets a subset of a single overall contiguous buffer. 86 + */ 87 + netfs_reset_iter(from); 88 + source = from->io_iter; 89 + source.count = len; 90 + 91 + /* Work through the sublist. */ 92 + subreq = from; 93 + list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) { 94 + if (!len) 95 + break; 96 + /* Renegotiate max_len (wsize) */ 97 + trace_netfs_sreq(subreq, netfs_sreq_trace_retry); 98 + __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); 99 + subreq->retry_count++; 100 + stream->prepare_write(subreq); 101 + 102 + part = min(len, stream->sreq_max_len); 103 + subreq->len = part; 104 + subreq->start = start; 105 + subreq->transferred = 0; 106 + len -= part; 107 + start += part; 108 + if (len && subreq == to && 109 + __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags)) 110 + boundary = true; 111 + 112 + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); 113 + netfs_reissue_write(stream, subreq, &source); 114 + if (subreq == to) 115 + break; 116 + } 117 + 118 + /* If we managed to use fewer subreqs, we can discard the 119 + * excess; if we used the same number, then we're done. 120 + */ 121 + if (!len) { 122 + if (subreq == to) 123 + continue; 124 + list_for_each_entry_safe_from(subreq, tmp, 125 + &stream->subrequests, rreq_link) { 126 + trace_netfs_sreq(subreq, netfs_sreq_trace_discard); 127 + list_del(&subreq->rreq_link); 128 + netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done); 129 + if (subreq == to) 130 + break; 131 + } 132 + continue; 133 + } 134 + 135 + /* We ran out of subrequests, so we need to allocate some more 136 + * and insert them after. 137 + */ 138 + do { 139 + subreq = netfs_alloc_subrequest(wreq); 140 + subreq->source = to->source; 141 + subreq->start = start; 142 + subreq->debug_index = atomic_inc_return(&wreq->subreq_counter); 143 + subreq->stream_nr = to->stream_nr; 144 + subreq->retry_count = 1; 145 + 146 + trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index, 147 + refcount_read(&subreq->ref), 148 + netfs_sreq_trace_new); 149 + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); 150 + 151 + list_add(&subreq->rreq_link, &to->rreq_link); 152 + to = list_next_entry(to, rreq_link); 153 + trace_netfs_sreq(subreq, netfs_sreq_trace_retry); 154 + 155 + stream->sreq_max_len = len; 156 + stream->sreq_max_segs = INT_MAX; 157 + switch (stream->source) { 158 + case NETFS_UPLOAD_TO_SERVER: 159 + netfs_stat(&netfs_n_wh_upload); 160 + stream->sreq_max_len = umin(len, wreq->wsize); 161 + break; 162 + case NETFS_WRITE_TO_CACHE: 163 + netfs_stat(&netfs_n_wh_write); 164 + break; 165 + default: 166 + WARN_ON_ONCE(1); 167 + } 168 + 169 + stream->prepare_write(subreq); 170 + 171 + part = umin(len, stream->sreq_max_len); 172 + subreq->len = subreq->transferred + part; 173 + len -= part; 174 + start += part; 175 + if (!len && boundary) { 176 + __set_bit(NETFS_SREQ_BOUNDARY, &to->flags); 177 + boundary = false; 178 + } 179 + 180 + netfs_reissue_write(stream, subreq, &source); 181 + if (!len) 182 + break; 183 + 184 + } while (len); 185 + 186 + } while (!list_is_head(next, &stream->subrequests)); 187 + } 188 + 189 + /* 190 + * Perform retries on the streams that need it. If we're doing content 191 + * encryption and the server copy changed due to a third-party write, we may 192 + * need to do an RMW cycle and also rewrite the data to the cache. 193 + */ 194 + void netfs_retry_writes(struct netfs_io_request *wreq) 195 + { 196 + struct netfs_io_subrequest *subreq; 197 + struct netfs_io_stream *stream; 198 + int s; 199 + 200 + /* Wait for all outstanding I/O to quiesce before performing retries as 201 + * we may need to renegotiate the I/O sizes. 202 + */ 203 + for (s = 0; s < NR_IO_STREAMS; s++) { 204 + stream = &wreq->io_streams[s]; 205 + if (!stream->active) 206 + continue; 207 + 208 + list_for_each_entry(subreq, &stream->subrequests, rreq_link) { 209 + wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS, 210 + TASK_UNINTERRUPTIBLE); 211 + } 212 + } 213 + 214 + // TODO: Enc: Fetch changed partial pages 215 + // TODO: Enc: Reencrypt content if needed. 216 + // TODO: Enc: Wind back transferred point. 217 + // TODO: Enc: Mark cache pages for retry. 218 + 219 + for (s = 0; s < NR_IO_STREAMS; s++) { 220 + stream = &wreq->io_streams[s]; 221 + if (stream->need_retry) { 222 + stream->need_retry = false; 223 + netfs_retry_write_stream(wreq, stream); 224 + } 225 + } 226 + }