Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'vfs-6.12-rc2.fixes.2' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull vfs fixes from Christian Brauner:
"vfs:

- Ensure that iter_folioq_get_pages() advances to the next slot
otherwise it will end up using the same folio with an out-of-bound
offset.

iomap:

- Dont unshare delalloc extents which can't be reflinked, and thus
can't be shared.

- Constrain the file range passed to iomap_file_unshare() directly in
iomap instead of requiring the callers to do it.

netfs:

- Use folioq_count instead of folioq_nr_slot to prevent an
unitialized value warning in netfs_clear_buffer().

- Fix missing wakeup after issuing writes by scheduling the write
collector only if all the subrequest queues are empty and thus no
writes are pending.

- Fix two minor documentation bugs"

* tag 'vfs-6.12-rc2.fixes.2' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
iomap: constrain the file range passed to iomap_file_unshare
iomap: don't bother unsharing delalloc extents
netfs: Fix missing wakeup after issuing writes
Documentation: add missing folio_queue entry
folio_queue: fix documentation
netfs: Fix a KMSAN uninit-value error in netfs_clear_buffer
iov_iter: fix advancing slot in iter_folioq_get_pages()

+43 -21
+1
Documentation/core-api/index.rst
··· 37 37 kref 38 38 cleanup 39 39 assoc_array 40 + folio_queue 40 41 xarray 41 42 maple_tree 42 43 idr
+5 -1
fs/dax.c
··· 1305 1305 struct iomap_iter iter = { 1306 1306 .inode = inode, 1307 1307 .pos = pos, 1308 - .len = len, 1309 1308 .flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX, 1310 1309 }; 1310 + loff_t size = i_size_read(inode); 1311 1311 int ret; 1312 1312 1313 + if (pos < 0 || pos >= size) 1314 + return 0; 1315 + 1316 + iter.len = min(len, size - pos); 1313 1317 while ((ret = iomap_iter(&iter, ops)) > 0) 1314 1318 iter.processed = dax_unshare_iter(&iter); 1315 1319 return ret;
+7 -2
fs/iomap/buffered-io.c
··· 1321 1321 return length; 1322 1322 1323 1323 /* 1324 - * Don't bother with holes or unwritten extents. 1324 + * Don't bother with delalloc reservations, holes or unwritten extents. 1325 1325 * 1326 1326 * Note that we use srcmap directly instead of iomap_iter_srcmap as 1327 1327 * unsharing requires providing a separate source map, and the presence ··· 1330 1330 * fork for XFS. 1331 1331 */ 1332 1332 if (iter->srcmap.type == IOMAP_HOLE || 1333 + iter->srcmap.type == IOMAP_DELALLOC || 1333 1334 iter->srcmap.type == IOMAP_UNWRITTEN) 1334 1335 return length; 1335 1336 ··· 1375 1374 struct iomap_iter iter = { 1376 1375 .inode = inode, 1377 1376 .pos = pos, 1378 - .len = len, 1379 1377 .flags = IOMAP_WRITE | IOMAP_UNSHARE, 1380 1378 }; 1379 + loff_t size = i_size_read(inode); 1381 1380 int ret; 1382 1381 1382 + if (pos < 0 || pos >= size) 1383 + return 0; 1384 + 1385 + iter.len = min(len, size - pos); 1383 1386 while ((ret = iomap_iter(&iter, ops)) > 0) 1384 1387 iter.processed = iomap_unshare_iter(&iter); 1385 1388 return ret;
+1 -1
fs/netfs/misc.c
··· 102 102 103 103 while ((p = rreq->buffer)) { 104 104 rreq->buffer = p->next; 105 - for (int slot = 0; slot < folioq_nr_slots(p); slot++) { 105 + for (int slot = 0; slot < folioq_count(p); slot++) { 106 106 struct folio *folio = folioq_folio(p, slot); 107 107 if (!folio) 108 108 continue;
+27 -15
fs/netfs/write_issue.c
··· 509 509 } 510 510 511 511 /* 512 + * End the issuing of writes, letting the collector know we're done. 513 + */ 514 + static void netfs_end_issue_write(struct netfs_io_request *wreq) 515 + { 516 + bool needs_poke = true; 517 + 518 + smp_wmb(); /* Write subreq lists before ALL_QUEUED. */ 519 + set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); 520 + 521 + for (int s = 0; s < NR_IO_STREAMS; s++) { 522 + struct netfs_io_stream *stream = &wreq->io_streams[s]; 523 + 524 + if (!stream->active) 525 + continue; 526 + if (!list_empty(&stream->subrequests)) 527 + needs_poke = false; 528 + netfs_issue_write(wreq, stream); 529 + } 530 + 531 + if (needs_poke) 532 + netfs_wake_write_collector(wreq, false); 533 + } 534 + 535 + /* 512 536 * Write some of the pending data back to the server 513 537 */ 514 538 int netfs_writepages(struct address_space *mapping, ··· 583 559 break; 584 560 } while ((folio = writeback_iter(mapping, wbc, folio, &error))); 585 561 586 - for (int s = 0; s < NR_IO_STREAMS; s++) 587 - netfs_issue_write(wreq, &wreq->io_streams[s]); 588 - smp_wmb(); /* Write lists before ALL_QUEUED. */ 589 - set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); 562 + netfs_end_issue_write(wreq); 590 563 591 564 mutex_unlock(&ictx->wb_lock); 592 565 ··· 671 650 if (writethrough_cache) 672 651 netfs_write_folio(wreq, wbc, writethrough_cache); 673 652 674 - netfs_issue_write(wreq, &wreq->io_streams[0]); 675 - netfs_issue_write(wreq, &wreq->io_streams[1]); 676 - smp_wmb(); /* Write lists before ALL_QUEUED. */ 677 - set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); 653 + netfs_end_issue_write(wreq); 678 654 679 655 mutex_unlock(&ictx->wb_lock); 680 656 ··· 717 699 break; 718 700 } 719 701 720 - netfs_issue_write(wreq, upload); 721 - 722 - smp_wmb(); /* Write lists before ALL_QUEUED. */ 723 - set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); 724 - if (list_empty(&upload->subrequests)) 725 - netfs_wake_write_collector(wreq, false); 726 - 702 + netfs_end_issue_write(wreq); 727 703 _leave(" = %d", error); 728 704 return error; 729 705 }
+1 -1
include/linux/folio_queue.h
··· 81 81 } 82 82 83 83 /** 84 - * folioq_count: Query if a folio queue segment is full 84 + * folioq_full: Query if a folio queue segment is full 85 85 * @folioq: The segment to query 86 86 * 87 87 * Query if a folio queue segment is fully occupied. Note that this does not
+1 -1
lib/iov_iter.c
··· 1033 1033 if (maxpages == 0 || extracted >= maxsize) 1034 1034 break; 1035 1035 1036 - if (offset >= fsize) { 1036 + if (iov_offset >= fsize) { 1037 1037 iov_offset = 0; 1038 1038 slot++; 1039 1039 if (slot == folioq_nr_slots(folioq) && folioq->next) {