Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge patch series "Finish converting jffs2 to folios"

Matthew Wilcox (Oracle) <willy@infradead.org> says:

This patch series applies on top of fs-next. After applying these two
patches, there are no more references to 'struct page' in jffs2. I
obviously haven't tested it at all beyond compilation.

* patches from https://lore.kernel.org/r/20240814195915.249871-1-willy@infradead.org:
jffs2: Use a folio in jffs2_garbage_collect_dnode()
jffs2: Convert jffs2_do_readpage_nolock to take a folio

Link: https://lore.kernel.org/r/20240814195915.249871-1-willy@infradead.org
Signed-off-by: Christian Brauner <brauner@kernel.org>

+23 -26
+11 -13
fs/jffs2/file.c
··· 77 77 .write_end = jffs2_write_end, 78 78 }; 79 79 80 - static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) 80 + static int jffs2_do_readpage_nolock(struct inode *inode, struct folio *folio) 81 81 { 82 82 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 83 83 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 84 - unsigned char *pg_buf; 84 + unsigned char *kaddr; 85 85 int ret; 86 86 87 87 jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n", 88 - __func__, inode->i_ino, pg->index << PAGE_SHIFT); 88 + __func__, inode->i_ino, folio->index << PAGE_SHIFT); 89 89 90 - BUG_ON(!PageLocked(pg)); 90 + BUG_ON(!folio_test_locked(folio)); 91 91 92 - pg_buf = kmap(pg); 93 - /* FIXME: Can kmap fail? */ 94 - 95 - ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT, 92 + kaddr = kmap_local_folio(folio, 0); 93 + ret = jffs2_read_inode_range(c, f, kaddr, folio->index << PAGE_SHIFT, 96 94 PAGE_SIZE); 95 + kunmap_local(kaddr); 97 96 98 97 if (!ret) 99 - SetPageUptodate(pg); 98 + folio_mark_uptodate(folio); 100 99 101 - flush_dcache_page(pg); 102 - kunmap(pg); 100 + flush_dcache_folio(folio); 103 101 104 102 jffs2_dbg(2, "readpage finished\n"); 105 103 return ret; ··· 105 107 106 108 int __jffs2_read_folio(struct file *file, struct folio *folio) 107 109 { 108 - int ret = jffs2_do_readpage_nolock(folio->mapping->host, &folio->page); 110 + int ret = jffs2_do_readpage_nolock(folio->mapping->host, folio); 109 111 folio_unlock(folio); 110 112 return ret; 111 113 } ··· 219 221 */ 220 222 if (!folio_test_uptodate(folio)) { 221 223 mutex_lock(&f->sem); 222 - ret = jffs2_do_readpage_nolock(inode, &folio->page); 224 + ret = jffs2_do_readpage_nolock(inode, folio); 223 225 mutex_unlock(&f->sem); 224 226 if (ret) { 225 227 folio_unlock(folio);
+12 -13
fs/jffs2/gc.c
··· 1171 1171 uint32_t alloclen, offset, orig_end, orig_start; 1172 1172 int ret = 0; 1173 1173 unsigned char *comprbuf = NULL, *writebuf; 1174 - struct page *page; 1174 + struct folio *folio; 1175 1175 unsigned char *pg_ptr; 1176 1176 1177 1177 memset(&ri, 0, sizeof(ri)); ··· 1317 1317 BUG_ON(start > orig_start); 1318 1318 } 1319 1319 1320 - /* The rules state that we must obtain the page lock *before* f->sem, so 1320 + /* The rules state that we must obtain the folio lock *before* f->sem, so 1321 1321 * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's 1322 1322 * actually going to *change* so we're safe; we only allow reading. 1323 1323 * 1324 1324 * It is important to note that jffs2_write_begin() will ensure that its 1325 - * page is marked Uptodate before allocating space. That means that if we 1326 - * end up here trying to GC the *same* page that jffs2_write_begin() is 1327 - * trying to write out, read_cache_page() will not deadlock. */ 1325 + * folio is marked uptodate before allocating space. That means that if we 1326 + * end up here trying to GC the *same* folio that jffs2_write_begin() is 1327 + * trying to write out, read_cache_folio() will not deadlock. */ 1328 1328 mutex_unlock(&f->sem); 1329 - page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT, 1329 + folio = read_cache_folio(inode->i_mapping, start >> PAGE_SHIFT, 1330 1330 __jffs2_read_folio, NULL); 1331 - if (IS_ERR(page)) { 1332 - pr_warn("read_cache_page() returned error: %ld\n", 1333 - PTR_ERR(page)); 1331 + if (IS_ERR(folio)) { 1332 + pr_warn("read_cache_folio() returned error: %ld\n", 1333 + PTR_ERR(folio)); 1334 1334 mutex_lock(&f->sem); 1335 - return PTR_ERR(page); 1335 + return PTR_ERR(folio); 1336 1336 } 1337 1337 1338 - pg_ptr = kmap(page); 1338 + pg_ptr = kmap_local_folio(folio, 0); 1339 1339 mutex_lock(&f->sem); 1340 1340 1341 1341 offset = start; ··· 1400 1400 } 1401 1401 } 1402 1402 1403 - kunmap(page); 1404 - put_page(page); 1403 + folio_release_kmap(folio, pg_ptr); 1405 1404 return ret; 1406 1405 }