at master 456 lines 12 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * linux/fs/ext4/readpage.c 4 * 5 * Copyright (C) 2002, Linus Torvalds. 6 * Copyright (C) 2015, Google, Inc. 7 * 8 * This was originally taken from fs/mpage.c 9 * 10 * The ext4_mpage_readpages() function here is intended to 11 * replace mpage_readahead() in the general case, not just for 12 * encrypted files. It has some limitations (see below), where it 13 * will fall back to read_block_full_page(), but these limitations 14 * should only be hit when page_size != block_size. 15 * 16 * This will allow us to attach a callback function to support ext4 17 * encryption. 18 * 19 * If anything unusual happens, such as: 20 * 21 * - encountering a page which has buffers 22 * - encountering a page which has a non-hole after a hole 23 * - encountering a page with non-contiguous blocks 24 * 25 * then this code just gives up and calls the buffer_head-based read function. 26 * It does handle a page which has holes at the end - that is a common case: 27 * the end-of-file on blocksize < PAGE_SIZE setups. 28 * 29 */ 30 31#include <linux/kernel.h> 32#include <linux/export.h> 33#include <linux/mm.h> 34#include <linux/kdev_t.h> 35#include <linux/gfp.h> 36#include <linux/bio.h> 37#include <linux/fs.h> 38#include <linux/buffer_head.h> 39#include <linux/blk-crypto.h> 40#include <linux/blkdev.h> 41#include <linux/highmem.h> 42#include <linux/prefetch.h> 43#include <linux/mpage.h> 44#include <linux/writeback.h> 45#include <linux/backing-dev.h> 46#include <linux/pagevec.h> 47 48#include "ext4.h" 49#include <trace/events/ext4.h> 50 51#define NUM_PREALLOC_POST_READ_CTXS 128 52 53static struct kmem_cache *bio_post_read_ctx_cache; 54static mempool_t *bio_post_read_ctx_pool; 55 56/* postprocessing steps for read bios */ 57enum bio_post_read_step { 58 STEP_INITIAL = 0, 59 STEP_DECRYPT, 60 STEP_VERITY, 61 STEP_MAX, 62}; 63 64struct bio_post_read_ctx { 65 struct bio *bio; 66 struct fsverity_info *vi; 67 struct work_struct work; 68 unsigned int cur_step; 69 unsigned int enabled_steps; 70}; 71 72static void __read_end_io(struct bio *bio) 73{ 74 struct folio_iter fi; 75 76 bio_for_each_folio_all(fi, bio) 77 folio_end_read(fi.folio, bio->bi_status == 0); 78 if (bio->bi_private) 79 mempool_free(bio->bi_private, bio_post_read_ctx_pool); 80 bio_put(bio); 81} 82 83static void bio_post_read_processing(struct bio_post_read_ctx *ctx); 84 85static void decrypt_work(struct work_struct *work) 86{ 87 struct bio_post_read_ctx *ctx = 88 container_of(work, struct bio_post_read_ctx, work); 89 struct bio *bio = ctx->bio; 90 91 if (fscrypt_decrypt_bio(bio)) 92 bio_post_read_processing(ctx); 93 else 94 __read_end_io(bio); 95} 96 97static void verity_work(struct work_struct *work) 98{ 99 struct bio_post_read_ctx *ctx = 100 container_of(work, struct bio_post_read_ctx, work); 101 struct bio *bio = ctx->bio; 102 struct fsverity_info *vi = ctx->vi; 103 104 /* 105 * fsverity_verify_bio() may call readahead() again, and although verity 106 * will be disabled for that, decryption may still be needed, causing 107 * another bio_post_read_ctx to be allocated. So to guarantee that 108 * mempool_alloc() never deadlocks we must free the current ctx first. 109 * This is safe because verity is the last post-read step. 110 */ 111 BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX); 112 mempool_free(ctx, bio_post_read_ctx_pool); 113 bio->bi_private = NULL; 114 115 fsverity_verify_bio(vi, bio); 116 117 __read_end_io(bio); 118} 119 120static void bio_post_read_processing(struct bio_post_read_ctx *ctx) 121{ 122 /* 123 * We use different work queues for decryption and for verity because 124 * verity may require reading metadata pages that need decryption, and 125 * we shouldn't recurse to the same workqueue. 126 */ 127 switch (++ctx->cur_step) { 128 case STEP_DECRYPT: 129 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) { 130 INIT_WORK(&ctx->work, decrypt_work); 131 fscrypt_enqueue_decrypt_work(&ctx->work); 132 return; 133 } 134 ctx->cur_step++; 135 fallthrough; 136 case STEP_VERITY: 137 if (IS_ENABLED(CONFIG_FS_VERITY) && 138 ctx->enabled_steps & (1 << STEP_VERITY)) { 139 INIT_WORK(&ctx->work, verity_work); 140 fsverity_enqueue_verify_work(&ctx->work); 141 return; 142 } 143 ctx->cur_step++; 144 fallthrough; 145 default: 146 __read_end_io(ctx->bio); 147 } 148} 149 150static bool bio_post_read_required(struct bio *bio) 151{ 152 return bio->bi_private && !bio->bi_status; 153} 154 155/* 156 * I/O completion handler for multipage BIOs. 157 * 158 * The mpage code never puts partial pages into a BIO (except for end-of-file). 159 * If a page does not map to a contiguous run of blocks then it simply falls 160 * back to block_read_full_folio(). 161 * 162 * Why is this? If a page's completion depends on a number of different BIOs 163 * which can complete in any order (or at the same time) then determining the 164 * status of that page is hard. See end_buffer_async_read() for the details. 165 * There is no point in duplicating all that complexity. 166 */ 167static void mpage_end_io(struct bio *bio) 168{ 169 if (bio_post_read_required(bio)) { 170 struct bio_post_read_ctx *ctx = bio->bi_private; 171 172 ctx->cur_step = STEP_INITIAL; 173 bio_post_read_processing(ctx); 174 return; 175 } 176 __read_end_io(bio); 177} 178 179static void ext4_set_bio_post_read_ctx(struct bio *bio, 180 const struct inode *inode, 181 struct fsverity_info *vi) 182{ 183 unsigned int post_read_steps = 0; 184 185 if (fscrypt_inode_uses_fs_layer_crypto(inode)) 186 post_read_steps |= 1 << STEP_DECRYPT; 187 188 if (vi) 189 post_read_steps |= 1 << STEP_VERITY; 190 191 if (post_read_steps) { 192 /* Due to the mempool, this never fails. */ 193 struct bio_post_read_ctx *ctx = 194 mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS); 195 196 ctx->bio = bio; 197 ctx->vi = vi; 198 ctx->enabled_steps = post_read_steps; 199 bio->bi_private = ctx; 200 } 201} 202 203static inline loff_t ext4_readpage_limit(struct inode *inode) 204{ 205 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode)) 206 return inode->i_sb->s_maxbytes; 207 208 return i_size_read(inode); 209} 210 211static int ext4_mpage_readpages(struct inode *inode, struct fsverity_info *vi, 212 struct readahead_control *rac, struct folio *folio) 213{ 214 struct bio *bio = NULL; 215 sector_t last_block_in_bio = 0; 216 const unsigned blkbits = inode->i_blkbits; 217 const unsigned blocksize = 1 << blkbits; 218 sector_t next_block; 219 sector_t block_in_file; 220 sector_t last_block; 221 sector_t last_block_in_file; 222 sector_t first_block; 223 unsigned page_block; 224 struct block_device *bdev = inode->i_sb->s_bdev; 225 int length; 226 unsigned relative_block = 0; 227 struct ext4_map_blocks map; 228 unsigned int nr_pages, folio_pages; 229 230 map.m_pblk = 0; 231 map.m_lblk = 0; 232 map.m_len = 0; 233 map.m_flags = 0; 234 235 nr_pages = rac ? readahead_count(rac) : folio_nr_pages(folio); 236 for (; nr_pages; nr_pages -= folio_pages) { 237 int fully_mapped = 1; 238 unsigned int first_hole; 239 unsigned int blocks_per_folio; 240 241 if (rac) 242 folio = readahead_folio(rac); 243 244 folio_pages = folio_nr_pages(folio); 245 prefetchw(&folio->flags); 246 247 if (folio_buffers(folio)) 248 goto confused; 249 250 blocks_per_folio = folio_size(folio) >> blkbits; 251 first_hole = blocks_per_folio; 252 block_in_file = next_block = EXT4_PG_TO_LBLK(inode, folio->index); 253 last_block = EXT4_PG_TO_LBLK(inode, folio->index + nr_pages); 254 last_block_in_file = (ext4_readpage_limit(inode) + 255 blocksize - 1) >> blkbits; 256 if (last_block > last_block_in_file) 257 last_block = last_block_in_file; 258 page_block = 0; 259 260 /* 261 * Map blocks using the previous result first. 262 */ 263 if ((map.m_flags & EXT4_MAP_MAPPED) && 264 block_in_file > map.m_lblk && 265 block_in_file < (map.m_lblk + map.m_len)) { 266 unsigned map_offset = block_in_file - map.m_lblk; 267 unsigned last = map.m_len - map_offset; 268 269 first_block = map.m_pblk + map_offset; 270 for (relative_block = 0; ; relative_block++) { 271 if (relative_block == last) { 272 /* needed? */ 273 map.m_flags &= ~EXT4_MAP_MAPPED; 274 break; 275 } 276 if (page_block == blocks_per_folio) 277 break; 278 page_block++; 279 block_in_file++; 280 } 281 } 282 283 /* 284 * Then do more ext4_map_blocks() calls until we are 285 * done with this folio. 286 */ 287 while (page_block < blocks_per_folio) { 288 if (block_in_file < last_block) { 289 map.m_lblk = block_in_file; 290 map.m_len = last_block - block_in_file; 291 292 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) { 293 set_error_page: 294 folio_zero_segment(folio, 0, 295 folio_size(folio)); 296 folio_unlock(folio); 297 goto next_page; 298 } 299 } 300 if ((map.m_flags & EXT4_MAP_MAPPED) == 0) { 301 fully_mapped = 0; 302 if (first_hole == blocks_per_folio) 303 first_hole = page_block; 304 page_block++; 305 block_in_file++; 306 continue; 307 } 308 if (first_hole != blocks_per_folio) 309 goto confused; /* hole -> non-hole */ 310 311 /* Contiguous blocks? */ 312 if (!page_block) 313 first_block = map.m_pblk; 314 else if (first_block + page_block != map.m_pblk) 315 goto confused; 316 for (relative_block = 0; ; relative_block++) { 317 if (relative_block == map.m_len) { 318 /* needed? */ 319 map.m_flags &= ~EXT4_MAP_MAPPED; 320 break; 321 } else if (page_block == blocks_per_folio) 322 break; 323 page_block++; 324 block_in_file++; 325 } 326 } 327 if (first_hole != blocks_per_folio) { 328 folio_zero_segment(folio, first_hole << blkbits, 329 folio_size(folio)); 330 if (first_hole == 0) { 331 if (vi && !fsverity_verify_folio(vi, folio)) 332 goto set_error_page; 333 folio_end_read(folio, true); 334 continue; 335 } 336 } else if (fully_mapped) { 337 folio_set_mappedtodisk(folio); 338 } 339 340 /* 341 * This folio will go to BIO. Do we need to send this 342 * BIO off first? 343 */ 344 if (bio && (last_block_in_bio != first_block - 1 || 345 !fscrypt_mergeable_bio(bio, inode, next_block))) { 346 submit_and_realloc: 347 blk_crypto_submit_bio(bio); 348 bio = NULL; 349 } 350 if (bio == NULL) { 351 /* 352 * bio_alloc will _always_ be able to allocate a bio if 353 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset(). 354 */ 355 bio = bio_alloc(bdev, bio_max_segs(nr_pages), 356 REQ_OP_READ, GFP_KERNEL); 357 fscrypt_set_bio_crypt_ctx(bio, inode, next_block, 358 GFP_KERNEL); 359 ext4_set_bio_post_read_ctx(bio, inode, vi); 360 bio->bi_iter.bi_sector = first_block << (blkbits - 9); 361 bio->bi_end_io = mpage_end_io; 362 if (rac) 363 bio->bi_opf |= REQ_RAHEAD; 364 } 365 366 length = first_hole << blkbits; 367 if (!bio_add_folio(bio, folio, length, 0)) 368 goto submit_and_realloc; 369 370 if (((map.m_flags & EXT4_MAP_BOUNDARY) && 371 (relative_block == map.m_len)) || 372 (first_hole != blocks_per_folio)) { 373 blk_crypto_submit_bio(bio); 374 bio = NULL; 375 } else 376 last_block_in_bio = first_block + blocks_per_folio - 1; 377 continue; 378 confused: 379 if (bio) { 380 blk_crypto_submit_bio(bio); 381 bio = NULL; 382 } 383 if (!folio_test_uptodate(folio)) 384 block_read_full_folio(folio, ext4_get_block); 385 else 386 folio_unlock(folio); 387next_page: 388 ; /* A label shall be followed by a statement until C23 */ 389 } 390 if (bio) 391 blk_crypto_submit_bio(bio); 392 return 0; 393} 394 395int ext4_read_folio(struct file *file, struct folio *folio) 396{ 397 struct inode *inode = folio->mapping->host; 398 struct fsverity_info *vi = NULL; 399 int ret; 400 401 trace_ext4_read_folio(inode, folio); 402 403 if (ext4_has_inline_data(inode)) { 404 ret = ext4_readpage_inline(inode, folio); 405 if (ret != -EAGAIN) 406 return ret; 407 } 408 409 if (folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE)) 410 vi = fsverity_get_info(inode); 411 if (vi) 412 fsverity_readahead(vi, folio->index, folio_nr_pages(folio)); 413 return ext4_mpage_readpages(inode, vi, NULL, folio); 414} 415 416void ext4_readahead(struct readahead_control *rac) 417{ 418 struct inode *inode = rac->mapping->host; 419 struct fsverity_info *vi = NULL; 420 421 /* If the file has inline data, no need to do readahead. */ 422 if (ext4_has_inline_data(inode)) 423 return; 424 425 if (readahead_index(rac) < DIV_ROUND_UP(inode->i_size, PAGE_SIZE)) 426 vi = fsverity_get_info(inode); 427 if (vi) 428 fsverity_readahead(vi, readahead_index(rac), 429 readahead_count(rac)); 430 ext4_mpage_readpages(inode, vi, rac, NULL); 431} 432 433int __init ext4_init_post_read_processing(void) 434{ 435 bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, SLAB_RECLAIM_ACCOUNT); 436 437 if (!bio_post_read_ctx_cache) 438 goto fail; 439 bio_post_read_ctx_pool = 440 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS, 441 bio_post_read_ctx_cache); 442 if (!bio_post_read_ctx_pool) 443 goto fail_free_cache; 444 return 0; 445 446fail_free_cache: 447 kmem_cache_destroy(bio_post_read_ctx_cache); 448fail: 449 return -ENOMEM; 450} 451 452void ext4_exit_post_read_processing(void) 453{ 454 mempool_destroy(bio_post_read_ctx_pool); 455 kmem_cache_destroy(bio_post_read_ctx_cache); 456}