Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/data.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/buffer_head.h>
11#include <linux/mpage.h>
12#include <linux/writeback.h>
13#include <linux/backing-dev.h>
14#include <linux/pagevec.h>
15#include <linux/blkdev.h>
16#include <linux/bio.h>
17#include <linux/blk-crypto.h>
18#include <linux/swap.h>
19#include <linux/prefetch.h>
20#include <linux/uio.h>
21#include <linux/cleancache.h>
22#include <linux/sched/signal.h>
23#include <linux/fiemap.h>
24
25#include "f2fs.h"
26#include "node.h"
27#include "segment.h"
28#include "iostat.h"
29#include <trace/events/f2fs.h>
30
31#define NUM_PREALLOC_POST_READ_CTXS 128
32
33static struct kmem_cache *bio_post_read_ctx_cache;
34static struct kmem_cache *bio_entry_slab;
35static mempool_t *bio_post_read_ctx_pool;
36static struct bio_set f2fs_bioset;
37
38#define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
39
40int __init f2fs_init_bioset(void)
41{
42 if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
43 0, BIOSET_NEED_BVECS))
44 return -ENOMEM;
45 return 0;
46}
47
48void f2fs_destroy_bioset(void)
49{
50 bioset_exit(&f2fs_bioset);
51}
52
53static bool __is_cp_guaranteed(struct page *page)
54{
55 struct address_space *mapping = page->mapping;
56 struct inode *inode;
57 struct f2fs_sb_info *sbi;
58
59 if (!mapping)
60 return false;
61
62 inode = mapping->host;
63 sbi = F2FS_I_SB(inode);
64
65 if (inode->i_ino == F2FS_META_INO(sbi) ||
66 inode->i_ino == F2FS_NODE_INO(sbi) ||
67 S_ISDIR(inode->i_mode))
68 return true;
69
70 if (f2fs_is_compressed_page(page))
71 return false;
72 if ((S_ISREG(inode->i_mode) &&
73 (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
74 page_private_gcing(page))
75 return true;
76 return false;
77}
78
79static enum count_type __read_io_type(struct page *page)
80{
81 struct address_space *mapping = page_file_mapping(page);
82
83 if (mapping) {
84 struct inode *inode = mapping->host;
85 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
86
87 if (inode->i_ino == F2FS_META_INO(sbi))
88 return F2FS_RD_META;
89
90 if (inode->i_ino == F2FS_NODE_INO(sbi))
91 return F2FS_RD_NODE;
92 }
93 return F2FS_RD_DATA;
94}
95
96/* postprocessing steps for read bios */
97enum bio_post_read_step {
98#ifdef CONFIG_FS_ENCRYPTION
99 STEP_DECRYPT = 1 << 0,
100#else
101 STEP_DECRYPT = 0, /* compile out the decryption-related code */
102#endif
103#ifdef CONFIG_F2FS_FS_COMPRESSION
104 STEP_DECOMPRESS = 1 << 1,
105#else
106 STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
107#endif
108#ifdef CONFIG_FS_VERITY
109 STEP_VERITY = 1 << 2,
110#else
111 STEP_VERITY = 0, /* compile out the verity-related code */
112#endif
113};
114
115struct bio_post_read_ctx {
116 struct bio *bio;
117 struct f2fs_sb_info *sbi;
118 struct work_struct work;
119 unsigned int enabled_steps;
120 block_t fs_blkaddr;
121};
122
123static void f2fs_finish_read_bio(struct bio *bio)
124{
125 struct bio_vec *bv;
126 struct bvec_iter_all iter_all;
127
128 /*
129 * Update and unlock the bio's pagecache pages, and put the
130 * decompression context for any compressed pages.
131 */
132 bio_for_each_segment_all(bv, bio, iter_all) {
133 struct page *page = bv->bv_page;
134
135 if (f2fs_is_compressed_page(page)) {
136 if (bio->bi_status)
137 f2fs_end_read_compressed_page(page, true, 0);
138 f2fs_put_page_dic(page);
139 continue;
140 }
141
142 /* PG_error was set if decryption or verity failed. */
143 if (bio->bi_status || PageError(page)) {
144 ClearPageUptodate(page);
145 /* will re-read again later */
146 ClearPageError(page);
147 } else {
148 SetPageUptodate(page);
149 }
150 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
151 unlock_page(page);
152 }
153
154 if (bio->bi_private)
155 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
156 bio_put(bio);
157}
158
159static void f2fs_verify_bio(struct work_struct *work)
160{
161 struct bio_post_read_ctx *ctx =
162 container_of(work, struct bio_post_read_ctx, work);
163 struct bio *bio = ctx->bio;
164 bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
165
166 /*
167 * fsverity_verify_bio() may call readpages() again, and while verity
168 * will be disabled for this, decryption and/or decompression may still
169 * be needed, resulting in another bio_post_read_ctx being allocated.
170 * So to prevent deadlocks we need to release the current ctx to the
171 * mempool first. This assumes that verity is the last post-read step.
172 */
173 mempool_free(ctx, bio_post_read_ctx_pool);
174 bio->bi_private = NULL;
175
176 /*
177 * Verify the bio's pages with fs-verity. Exclude compressed pages,
178 * as those were handled separately by f2fs_end_read_compressed_page().
179 */
180 if (may_have_compressed_pages) {
181 struct bio_vec *bv;
182 struct bvec_iter_all iter_all;
183
184 bio_for_each_segment_all(bv, bio, iter_all) {
185 struct page *page = bv->bv_page;
186
187 if (!f2fs_is_compressed_page(page) &&
188 !PageError(page) && !fsverity_verify_page(page))
189 SetPageError(page);
190 }
191 } else {
192 fsverity_verify_bio(bio);
193 }
194
195 f2fs_finish_read_bio(bio);
196}
197
198/*
199 * If the bio's data needs to be verified with fs-verity, then enqueue the
200 * verity work for the bio. Otherwise finish the bio now.
201 *
202 * Note that to avoid deadlocks, the verity work can't be done on the
203 * decryption/decompression workqueue. This is because verifying the data pages
204 * can involve reading verity metadata pages from the file, and these verity
205 * metadata pages may be encrypted and/or compressed.
206 */
207static void f2fs_verify_and_finish_bio(struct bio *bio)
208{
209 struct bio_post_read_ctx *ctx = bio->bi_private;
210
211 if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
212 INIT_WORK(&ctx->work, f2fs_verify_bio);
213 fsverity_enqueue_verify_work(&ctx->work);
214 } else {
215 f2fs_finish_read_bio(bio);
216 }
217}
218
219/*
220 * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
221 * remaining page was read by @ctx->bio.
222 *
223 * Note that a bio may span clusters (even a mix of compressed and uncompressed
224 * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
225 * that the bio includes at least one compressed page. The actual decompression
226 * is done on a per-cluster basis, not a per-bio basis.
227 */
228static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
229{
230 struct bio_vec *bv;
231 struct bvec_iter_all iter_all;
232 bool all_compressed = true;
233 block_t blkaddr = ctx->fs_blkaddr;
234
235 bio_for_each_segment_all(bv, ctx->bio, iter_all) {
236 struct page *page = bv->bv_page;
237
238 /* PG_error was set if decryption failed. */
239 if (f2fs_is_compressed_page(page))
240 f2fs_end_read_compressed_page(page, PageError(page),
241 blkaddr);
242 else
243 all_compressed = false;
244
245 blkaddr++;
246 }
247
248 /*
249 * Optimization: if all the bio's pages are compressed, then scheduling
250 * the per-bio verity work is unnecessary, as verity will be fully
251 * handled at the compression cluster level.
252 */
253 if (all_compressed)
254 ctx->enabled_steps &= ~STEP_VERITY;
255}
256
257static void f2fs_post_read_work(struct work_struct *work)
258{
259 struct bio_post_read_ctx *ctx =
260 container_of(work, struct bio_post_read_ctx, work);
261
262 if (ctx->enabled_steps & STEP_DECRYPT)
263 fscrypt_decrypt_bio(ctx->bio);
264
265 if (ctx->enabled_steps & STEP_DECOMPRESS)
266 f2fs_handle_step_decompress(ctx);
267
268 f2fs_verify_and_finish_bio(ctx->bio);
269}
270
271static void f2fs_read_end_io(struct bio *bio)
272{
273 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
274 struct bio_post_read_ctx *ctx;
275
276 iostat_update_and_unbind_ctx(bio, 0);
277 ctx = bio->bi_private;
278
279 if (time_to_inject(sbi, FAULT_READ_IO)) {
280 f2fs_show_injection_info(sbi, FAULT_READ_IO);
281 bio->bi_status = BLK_STS_IOERR;
282 }
283
284 if (bio->bi_status) {
285 f2fs_finish_read_bio(bio);
286 return;
287 }
288
289 if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
290 INIT_WORK(&ctx->work, f2fs_post_read_work);
291 queue_work(ctx->sbi->post_read_wq, &ctx->work);
292 } else {
293 f2fs_verify_and_finish_bio(bio);
294 }
295}
296
297static void f2fs_write_end_io(struct bio *bio)
298{
299 struct f2fs_sb_info *sbi;
300 struct bio_vec *bvec;
301 struct bvec_iter_all iter_all;
302
303 iostat_update_and_unbind_ctx(bio, 1);
304 sbi = bio->bi_private;
305
306 if (time_to_inject(sbi, FAULT_WRITE_IO)) {
307 f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
308 bio->bi_status = BLK_STS_IOERR;
309 }
310
311 bio_for_each_segment_all(bvec, bio, iter_all) {
312 struct page *page = bvec->bv_page;
313 enum count_type type = WB_DATA_TYPE(page);
314
315 if (page_private_dummy(page)) {
316 clear_page_private_dummy(page);
317 unlock_page(page);
318 mempool_free(page, sbi->write_io_dummy);
319
320 if (unlikely(bio->bi_status))
321 f2fs_stop_checkpoint(sbi, true);
322 continue;
323 }
324
325 fscrypt_finalize_bounce_page(&page);
326
327#ifdef CONFIG_F2FS_FS_COMPRESSION
328 if (f2fs_is_compressed_page(page)) {
329 f2fs_compress_write_end_io(bio, page);
330 continue;
331 }
332#endif
333
334 if (unlikely(bio->bi_status)) {
335 mapping_set_error(page->mapping, -EIO);
336 if (type == F2FS_WB_CP_DATA)
337 f2fs_stop_checkpoint(sbi, true);
338 }
339
340 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
341 page->index != nid_of_node(page));
342
343 dec_page_count(sbi, type);
344 if (f2fs_in_warm_node_list(sbi, page))
345 f2fs_del_fsync_node_entry(sbi, page);
346 clear_page_private_gcing(page);
347 end_page_writeback(page);
348 }
349 if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
350 wq_has_sleeper(&sbi->cp_wait))
351 wake_up(&sbi->cp_wait);
352
353 bio_put(bio);
354}
355
356struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
357 block_t blk_addr, struct bio *bio)
358{
359 struct block_device *bdev = sbi->sb->s_bdev;
360 int i;
361
362 if (f2fs_is_multi_device(sbi)) {
363 for (i = 0; i < sbi->s_ndevs; i++) {
364 if (FDEV(i).start_blk <= blk_addr &&
365 FDEV(i).end_blk >= blk_addr) {
366 blk_addr -= FDEV(i).start_blk;
367 bdev = FDEV(i).bdev;
368 break;
369 }
370 }
371 }
372 if (bio) {
373 bio_set_dev(bio, bdev);
374 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
375 }
376 return bdev;
377}
378
379int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
380{
381 int i;
382
383 if (!f2fs_is_multi_device(sbi))
384 return 0;
385
386 for (i = 0; i < sbi->s_ndevs; i++)
387 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
388 return i;
389 return 0;
390}
391
392static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
393{
394 struct f2fs_sb_info *sbi = fio->sbi;
395 struct bio *bio;
396
397 bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
398
399 f2fs_target_device(sbi, fio->new_blkaddr, bio);
400 if (is_read_io(fio->op)) {
401 bio->bi_end_io = f2fs_read_end_io;
402 bio->bi_private = NULL;
403 } else {
404 bio->bi_end_io = f2fs_write_end_io;
405 bio->bi_private = sbi;
406 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
407 fio->type, fio->temp);
408 }
409 iostat_alloc_and_bind_ctx(sbi, bio, NULL);
410
411 if (fio->io_wbc)
412 wbc_init_bio(fio->io_wbc, bio);
413
414 return bio;
415}
416
417static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
418 pgoff_t first_idx,
419 const struct f2fs_io_info *fio,
420 gfp_t gfp_mask)
421{
422 /*
423 * The f2fs garbage collector sets ->encrypted_page when it wants to
424 * read/write raw data without encryption.
425 */
426 if (!fio || !fio->encrypted_page)
427 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
428}
429
430static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
431 pgoff_t next_idx,
432 const struct f2fs_io_info *fio)
433{
434 /*
435 * The f2fs garbage collector sets ->encrypted_page when it wants to
436 * read/write raw data without encryption.
437 */
438 if (fio && fio->encrypted_page)
439 return !bio_has_crypt_ctx(bio);
440
441 return fscrypt_mergeable_bio(bio, inode, next_idx);
442}
443
444static inline void __submit_bio(struct f2fs_sb_info *sbi,
445 struct bio *bio, enum page_type type)
446{
447 if (!is_read_io(bio_op(bio))) {
448 unsigned int start;
449
450 if (type != DATA && type != NODE)
451 goto submit_io;
452
453 if (f2fs_lfs_mode(sbi) && current->plug)
454 blk_finish_plug(current->plug);
455
456 if (!F2FS_IO_ALIGNED(sbi))
457 goto submit_io;
458
459 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
460 start %= F2FS_IO_SIZE(sbi);
461
462 if (start == 0)
463 goto submit_io;
464
465 /* fill dummy pages */
466 for (; start < F2FS_IO_SIZE(sbi); start++) {
467 struct page *page =
468 mempool_alloc(sbi->write_io_dummy,
469 GFP_NOIO | __GFP_NOFAIL);
470 f2fs_bug_on(sbi, !page);
471
472 lock_page(page);
473
474 zero_user_segment(page, 0, PAGE_SIZE);
475 set_page_private_dummy(page);
476
477 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
478 f2fs_bug_on(sbi, 1);
479 }
480 /*
481 * In the NODE case, we lose next block address chain. So, we
482 * need to do checkpoint in f2fs_sync_file.
483 */
484 if (type == NODE)
485 set_sbi_flag(sbi, SBI_NEED_CP);
486 }
487submit_io:
488 if (is_read_io(bio_op(bio)))
489 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
490 else
491 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
492
493 iostat_update_submit_ctx(bio, type);
494 submit_bio(bio);
495}
496
497void f2fs_submit_bio(struct f2fs_sb_info *sbi,
498 struct bio *bio, enum page_type type)
499{
500 __submit_bio(sbi, bio, type);
501}
502
503static void __attach_io_flag(struct f2fs_io_info *fio)
504{
505 struct f2fs_sb_info *sbi = fio->sbi;
506 unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
507 unsigned int io_flag, fua_flag, meta_flag;
508
509 if (fio->type == DATA)
510 io_flag = sbi->data_io_flag;
511 else if (fio->type == NODE)
512 io_flag = sbi->node_io_flag;
513 else
514 return;
515
516 fua_flag = io_flag & temp_mask;
517 meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
518
519 /*
520 * data/node io flag bits per temp:
521 * REQ_META | REQ_FUA |
522 * 5 | 4 | 3 | 2 | 1 | 0 |
523 * Cold | Warm | Hot | Cold | Warm | Hot |
524 */
525 if ((1 << fio->temp) & meta_flag)
526 fio->op_flags |= REQ_META;
527 if ((1 << fio->temp) & fua_flag)
528 fio->op_flags |= REQ_FUA;
529}
530
531static void __submit_merged_bio(struct f2fs_bio_info *io)
532{
533 struct f2fs_io_info *fio = &io->fio;
534
535 if (!io->bio)
536 return;
537
538 __attach_io_flag(fio);
539 bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
540
541 if (is_read_io(fio->op))
542 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
543 else
544 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
545
546 __submit_bio(io->sbi, io->bio, fio->type);
547 io->bio = NULL;
548}
549
550static bool __has_merged_page(struct bio *bio, struct inode *inode,
551 struct page *page, nid_t ino)
552{
553 struct bio_vec *bvec;
554 struct bvec_iter_all iter_all;
555
556 if (!bio)
557 return false;
558
559 if (!inode && !page && !ino)
560 return true;
561
562 bio_for_each_segment_all(bvec, bio, iter_all) {
563 struct page *target = bvec->bv_page;
564
565 if (fscrypt_is_bounce_page(target)) {
566 target = fscrypt_pagecache_page(target);
567 if (IS_ERR(target))
568 continue;
569 }
570 if (f2fs_is_compressed_page(target)) {
571 target = f2fs_compress_control_page(target);
572 if (IS_ERR(target))
573 continue;
574 }
575
576 if (inode && inode == target->mapping->host)
577 return true;
578 if (page && page == target)
579 return true;
580 if (ino && ino == ino_of_node(target))
581 return true;
582 }
583
584 return false;
585}
586
587static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
588 enum page_type type, enum temp_type temp)
589{
590 enum page_type btype = PAGE_TYPE_OF_BIO(type);
591 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
592
593 down_write(&io->io_rwsem);
594
595 /* change META to META_FLUSH in the checkpoint procedure */
596 if (type >= META_FLUSH) {
597 io->fio.type = META_FLUSH;
598 io->fio.op = REQ_OP_WRITE;
599 io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
600 if (!test_opt(sbi, NOBARRIER))
601 io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
602 }
603 __submit_merged_bio(io);
604 up_write(&io->io_rwsem);
605}
606
607static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
608 struct inode *inode, struct page *page,
609 nid_t ino, enum page_type type, bool force)
610{
611 enum temp_type temp;
612 bool ret = true;
613
614 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
615 if (!force) {
616 enum page_type btype = PAGE_TYPE_OF_BIO(type);
617 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
618
619 down_read(&io->io_rwsem);
620 ret = __has_merged_page(io->bio, inode, page, ino);
621 up_read(&io->io_rwsem);
622 }
623 if (ret)
624 __f2fs_submit_merged_write(sbi, type, temp);
625
626 /* TODO: use HOT temp only for meta pages now. */
627 if (type >= META)
628 break;
629 }
630}
631
632void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
633{
634 __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
635}
636
637void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
638 struct inode *inode, struct page *page,
639 nid_t ino, enum page_type type)
640{
641 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
642}
643
644void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
645{
646 f2fs_submit_merged_write(sbi, DATA);
647 f2fs_submit_merged_write(sbi, NODE);
648 f2fs_submit_merged_write(sbi, META);
649}
650
651/*
652 * Fill the locked page with data located in the block address.
653 * A caller needs to unlock the page on failure.
654 */
655int f2fs_submit_page_bio(struct f2fs_io_info *fio)
656{
657 struct bio *bio;
658 struct page *page = fio->encrypted_page ?
659 fio->encrypted_page : fio->page;
660
661 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
662 fio->is_por ? META_POR : (__is_meta_io(fio) ?
663 META_GENERIC : DATA_GENERIC_ENHANCE)))
664 return -EFSCORRUPTED;
665
666 trace_f2fs_submit_page_bio(page, fio);
667
668 /* Allocate a new bio */
669 bio = __bio_alloc(fio, 1);
670
671 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
672 fio->page->index, fio, GFP_NOIO);
673
674 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
675 bio_put(bio);
676 return -EFAULT;
677 }
678
679 if (fio->io_wbc && !is_read_io(fio->op))
680 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
681
682 __attach_io_flag(fio);
683 bio_set_op_attrs(bio, fio->op, fio->op_flags);
684
685 inc_page_count(fio->sbi, is_read_io(fio->op) ?
686 __read_io_type(page): WB_DATA_TYPE(fio->page));
687
688 __submit_bio(fio->sbi, bio, fio->type);
689 return 0;
690}
691
692static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
693 block_t last_blkaddr, block_t cur_blkaddr)
694{
695 if (unlikely(sbi->max_io_bytes &&
696 bio->bi_iter.bi_size >= sbi->max_io_bytes))
697 return false;
698 if (last_blkaddr + 1 != cur_blkaddr)
699 return false;
700 return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
701}
702
703static bool io_type_is_mergeable(struct f2fs_bio_info *io,
704 struct f2fs_io_info *fio)
705{
706 if (io->fio.op != fio->op)
707 return false;
708 return io->fio.op_flags == fio->op_flags;
709}
710
711static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
712 struct f2fs_bio_info *io,
713 struct f2fs_io_info *fio,
714 block_t last_blkaddr,
715 block_t cur_blkaddr)
716{
717 if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
718 unsigned int filled_blocks =
719 F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
720 unsigned int io_size = F2FS_IO_SIZE(sbi);
721 unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
722
723 /* IOs in bio is aligned and left space of vectors is not enough */
724 if (!(filled_blocks % io_size) && left_vecs < io_size)
725 return false;
726 }
727 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
728 return false;
729 return io_type_is_mergeable(io, fio);
730}
731
732static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
733 struct page *page, enum temp_type temp)
734{
735 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
736 struct bio_entry *be;
737
738 be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL);
739 be->bio = bio;
740 bio_get(bio);
741
742 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
743 f2fs_bug_on(sbi, 1);
744
745 down_write(&io->bio_list_lock);
746 list_add_tail(&be->list, &io->bio_list);
747 up_write(&io->bio_list_lock);
748}
749
750static void del_bio_entry(struct bio_entry *be)
751{
752 list_del(&be->list);
753 kmem_cache_free(bio_entry_slab, be);
754}
755
756static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
757 struct page *page)
758{
759 struct f2fs_sb_info *sbi = fio->sbi;
760 enum temp_type temp;
761 bool found = false;
762 int ret = -EAGAIN;
763
764 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
765 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
766 struct list_head *head = &io->bio_list;
767 struct bio_entry *be;
768
769 down_write(&io->bio_list_lock);
770 list_for_each_entry(be, head, list) {
771 if (be->bio != *bio)
772 continue;
773
774 found = true;
775
776 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
777 *fio->last_block,
778 fio->new_blkaddr));
779 if (f2fs_crypt_mergeable_bio(*bio,
780 fio->page->mapping->host,
781 fio->page->index, fio) &&
782 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
783 PAGE_SIZE) {
784 ret = 0;
785 break;
786 }
787
788 /* page can't be merged into bio; submit the bio */
789 del_bio_entry(be);
790 __submit_bio(sbi, *bio, DATA);
791 break;
792 }
793 up_write(&io->bio_list_lock);
794 }
795
796 if (ret) {
797 bio_put(*bio);
798 *bio = NULL;
799 }
800
801 return ret;
802}
803
804void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
805 struct bio **bio, struct page *page)
806{
807 enum temp_type temp;
808 bool found = false;
809 struct bio *target = bio ? *bio : NULL;
810
811 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
812 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
813 struct list_head *head = &io->bio_list;
814 struct bio_entry *be;
815
816 if (list_empty(head))
817 continue;
818
819 down_read(&io->bio_list_lock);
820 list_for_each_entry(be, head, list) {
821 if (target)
822 found = (target == be->bio);
823 else
824 found = __has_merged_page(be->bio, NULL,
825 page, 0);
826 if (found)
827 break;
828 }
829 up_read(&io->bio_list_lock);
830
831 if (!found)
832 continue;
833
834 found = false;
835
836 down_write(&io->bio_list_lock);
837 list_for_each_entry(be, head, list) {
838 if (target)
839 found = (target == be->bio);
840 else
841 found = __has_merged_page(be->bio, NULL,
842 page, 0);
843 if (found) {
844 target = be->bio;
845 del_bio_entry(be);
846 break;
847 }
848 }
849 up_write(&io->bio_list_lock);
850 }
851
852 if (found)
853 __submit_bio(sbi, target, DATA);
854 if (bio && *bio) {
855 bio_put(*bio);
856 *bio = NULL;
857 }
858}
859
860int f2fs_merge_page_bio(struct f2fs_io_info *fio)
861{
862 struct bio *bio = *fio->bio;
863 struct page *page = fio->encrypted_page ?
864 fio->encrypted_page : fio->page;
865
866 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
867 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
868 return -EFSCORRUPTED;
869
870 trace_f2fs_submit_page_bio(page, fio);
871
872 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
873 fio->new_blkaddr))
874 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
875alloc_new:
876 if (!bio) {
877 bio = __bio_alloc(fio, BIO_MAX_VECS);
878 __attach_io_flag(fio);
879 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
880 fio->page->index, fio, GFP_NOIO);
881 bio_set_op_attrs(bio, fio->op, fio->op_flags);
882
883 add_bio_entry(fio->sbi, bio, page, fio->temp);
884 } else {
885 if (add_ipu_page(fio, &bio, page))
886 goto alloc_new;
887 }
888
889 if (fio->io_wbc)
890 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
891
892 inc_page_count(fio->sbi, WB_DATA_TYPE(page));
893
894 *fio->last_block = fio->new_blkaddr;
895 *fio->bio = bio;
896
897 return 0;
898}
899
900void f2fs_submit_page_write(struct f2fs_io_info *fio)
901{
902 struct f2fs_sb_info *sbi = fio->sbi;
903 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
904 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
905 struct page *bio_page;
906
907 f2fs_bug_on(sbi, is_read_io(fio->op));
908
909 down_write(&io->io_rwsem);
910next:
911 if (fio->in_list) {
912 spin_lock(&io->io_lock);
913 if (list_empty(&io->io_list)) {
914 spin_unlock(&io->io_lock);
915 goto out;
916 }
917 fio = list_first_entry(&io->io_list,
918 struct f2fs_io_info, list);
919 list_del(&fio->list);
920 spin_unlock(&io->io_lock);
921 }
922
923 verify_fio_blkaddr(fio);
924
925 if (fio->encrypted_page)
926 bio_page = fio->encrypted_page;
927 else if (fio->compressed_page)
928 bio_page = fio->compressed_page;
929 else
930 bio_page = fio->page;
931
932 /* set submitted = true as a return value */
933 fio->submitted = true;
934
935 inc_page_count(sbi, WB_DATA_TYPE(bio_page));
936
937 if (io->bio &&
938 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
939 fio->new_blkaddr) ||
940 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
941 bio_page->index, fio)))
942 __submit_merged_bio(io);
943alloc_new:
944 if (io->bio == NULL) {
945 if (F2FS_IO_ALIGNED(sbi) &&
946 (fio->type == DATA || fio->type == NODE) &&
947 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
948 dec_page_count(sbi, WB_DATA_TYPE(bio_page));
949 fio->retry = true;
950 goto skip;
951 }
952 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
953 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
954 bio_page->index, fio, GFP_NOIO);
955 io->fio = *fio;
956 }
957
958 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
959 __submit_merged_bio(io);
960 goto alloc_new;
961 }
962
963 if (fio->io_wbc)
964 wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
965
966 io->last_block_in_bio = fio->new_blkaddr;
967
968 trace_f2fs_submit_page_write(fio->page, fio);
969skip:
970 if (fio->in_list)
971 goto next;
972out:
973 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
974 !f2fs_is_checkpoint_ready(sbi))
975 __submit_merged_bio(io);
976 up_write(&io->io_rwsem);
977}
978
979static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
980 unsigned nr_pages, unsigned op_flag,
981 pgoff_t first_idx, bool for_write)
982{
983 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
984 struct bio *bio;
985 struct bio_post_read_ctx *ctx = NULL;
986 unsigned int post_read_steps = 0;
987
988 bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
989 bio_max_segs(nr_pages), &f2fs_bioset);
990 if (!bio)
991 return ERR_PTR(-ENOMEM);
992
993 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
994
995 f2fs_target_device(sbi, blkaddr, bio);
996 bio->bi_end_io = f2fs_read_end_io;
997 bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
998
999 if (fscrypt_inode_uses_fs_layer_crypto(inode))
1000 post_read_steps |= STEP_DECRYPT;
1001
1002 if (f2fs_need_verity(inode, first_idx))
1003 post_read_steps |= STEP_VERITY;
1004
1005 /*
1006 * STEP_DECOMPRESS is handled specially, since a compressed file might
1007 * contain both compressed and uncompressed clusters. We'll allocate a
1008 * bio_post_read_ctx if the file is compressed, but the caller is
1009 * responsible for enabling STEP_DECOMPRESS if it's actually needed.
1010 */
1011
1012 if (post_read_steps || f2fs_compressed_file(inode)) {
1013 /* Due to the mempool, this never fails. */
1014 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
1015 ctx->bio = bio;
1016 ctx->sbi = sbi;
1017 ctx->enabled_steps = post_read_steps;
1018 ctx->fs_blkaddr = blkaddr;
1019 bio->bi_private = ctx;
1020 }
1021 iostat_alloc_and_bind_ctx(sbi, bio, ctx);
1022
1023 return bio;
1024}
1025
1026/* This can handle encryption stuffs */
1027static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1028 block_t blkaddr, int op_flags, bool for_write)
1029{
1030 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1031 struct bio *bio;
1032
1033 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1034 page->index, for_write);
1035 if (IS_ERR(bio))
1036 return PTR_ERR(bio);
1037
1038 /* wait for GCed page writeback via META_MAPPING */
1039 f2fs_wait_on_block_writeback(inode, blkaddr);
1040
1041 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1042 bio_put(bio);
1043 return -EFAULT;
1044 }
1045 ClearPageError(page);
1046 inc_page_count(sbi, F2FS_RD_DATA);
1047 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1048 __submit_bio(sbi, bio, DATA);
1049 return 0;
1050}
1051
1052static void __set_data_blkaddr(struct dnode_of_data *dn)
1053{
1054 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
1055 __le32 *addr_array;
1056 int base = 0;
1057
1058 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
1059 base = get_extra_isize(dn->inode);
1060
1061 /* Get physical address of data block */
1062 addr_array = blkaddr_in_node(rn);
1063 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
1064}
1065
1066/*
1067 * Lock ordering for the change of data block address:
1068 * ->data_page
1069 * ->node_page
1070 * update block addresses in the node page
1071 */
1072void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
1073{
1074 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1075 __set_data_blkaddr(dn);
1076 if (set_page_dirty(dn->node_page))
1077 dn->node_changed = true;
1078}
1079
1080void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1081{
1082 dn->data_blkaddr = blkaddr;
1083 f2fs_set_data_blkaddr(dn);
1084 f2fs_update_extent_cache(dn);
1085}
1086
1087/* dn->ofs_in_node will be returned with up-to-date last block pointer */
1088int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1089{
1090 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1091 int err;
1092
1093 if (!count)
1094 return 0;
1095
1096 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1097 return -EPERM;
1098 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1099 return err;
1100
1101 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1102 dn->ofs_in_node, count);
1103
1104 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1105
1106 for (; count > 0; dn->ofs_in_node++) {
1107 block_t blkaddr = f2fs_data_blkaddr(dn);
1108
1109 if (blkaddr == NULL_ADDR) {
1110 dn->data_blkaddr = NEW_ADDR;
1111 __set_data_blkaddr(dn);
1112 count--;
1113 }
1114 }
1115
1116 if (set_page_dirty(dn->node_page))
1117 dn->node_changed = true;
1118 return 0;
1119}
1120
1121/* Should keep dn->ofs_in_node unchanged */
1122int f2fs_reserve_new_block(struct dnode_of_data *dn)
1123{
1124 unsigned int ofs_in_node = dn->ofs_in_node;
1125 int ret;
1126
1127 ret = f2fs_reserve_new_blocks(dn, 1);
1128 dn->ofs_in_node = ofs_in_node;
1129 return ret;
1130}
1131
1132int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1133{
1134 bool need_put = dn->inode_page ? false : true;
1135 int err;
1136
1137 err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1138 if (err)
1139 return err;
1140
1141 if (dn->data_blkaddr == NULL_ADDR)
1142 err = f2fs_reserve_new_block(dn);
1143 if (err || need_put)
1144 f2fs_put_dnode(dn);
1145 return err;
1146}
1147
1148int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
1149{
1150 struct extent_info ei = {0, };
1151 struct inode *inode = dn->inode;
1152
1153 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1154 dn->data_blkaddr = ei.blk + index - ei.fofs;
1155 return 0;
1156 }
1157
1158 return f2fs_reserve_block(dn, index);
1159}
1160
1161struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1162 int op_flags, bool for_write)
1163{
1164 struct address_space *mapping = inode->i_mapping;
1165 struct dnode_of_data dn;
1166 struct page *page;
1167 struct extent_info ei = {0, };
1168 int err;
1169
1170 page = f2fs_grab_cache_page(mapping, index, for_write);
1171 if (!page)
1172 return ERR_PTR(-ENOMEM);
1173
1174 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1175 dn.data_blkaddr = ei.blk + index - ei.fofs;
1176 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1177 DATA_GENERIC_ENHANCE_READ)) {
1178 err = -EFSCORRUPTED;
1179 goto put_err;
1180 }
1181 goto got_it;
1182 }
1183
1184 set_new_dnode(&dn, inode, NULL, NULL, 0);
1185 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1186 if (err)
1187 goto put_err;
1188 f2fs_put_dnode(&dn);
1189
1190 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1191 err = -ENOENT;
1192 goto put_err;
1193 }
1194 if (dn.data_blkaddr != NEW_ADDR &&
1195 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1196 dn.data_blkaddr,
1197 DATA_GENERIC_ENHANCE)) {
1198 err = -EFSCORRUPTED;
1199 goto put_err;
1200 }
1201got_it:
1202 if (PageUptodate(page)) {
1203 unlock_page(page);
1204 return page;
1205 }
1206
1207 /*
1208 * A new dentry page is allocated but not able to be written, since its
1209 * new inode page couldn't be allocated due to -ENOSPC.
1210 * In such the case, its blkaddr can be remained as NEW_ADDR.
1211 * see, f2fs_add_link -> f2fs_get_new_data_page ->
1212 * f2fs_init_inode_metadata.
1213 */
1214 if (dn.data_blkaddr == NEW_ADDR) {
1215 zero_user_segment(page, 0, PAGE_SIZE);
1216 if (!PageUptodate(page))
1217 SetPageUptodate(page);
1218 unlock_page(page);
1219 return page;
1220 }
1221
1222 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1223 op_flags, for_write);
1224 if (err)
1225 goto put_err;
1226 return page;
1227
1228put_err:
1229 f2fs_put_page(page, 1);
1230 return ERR_PTR(err);
1231}
1232
1233struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
1234{
1235 struct address_space *mapping = inode->i_mapping;
1236 struct page *page;
1237
1238 page = find_get_page(mapping, index);
1239 if (page && PageUptodate(page))
1240 return page;
1241 f2fs_put_page(page, 0);
1242
1243 page = f2fs_get_read_data_page(inode, index, 0, false);
1244 if (IS_ERR(page))
1245 return page;
1246
1247 if (PageUptodate(page))
1248 return page;
1249
1250 wait_on_page_locked(page);
1251 if (unlikely(!PageUptodate(page))) {
1252 f2fs_put_page(page, 0);
1253 return ERR_PTR(-EIO);
1254 }
1255 return page;
1256}
1257
1258/*
1259 * If it tries to access a hole, return an error.
1260 * Because, the callers, functions in dir.c and GC, should be able to know
1261 * whether this page exists or not.
1262 */
1263struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1264 bool for_write)
1265{
1266 struct address_space *mapping = inode->i_mapping;
1267 struct page *page;
1268repeat:
1269 page = f2fs_get_read_data_page(inode, index, 0, for_write);
1270 if (IS_ERR(page))
1271 return page;
1272
1273 /* wait for read completion */
1274 lock_page(page);
1275 if (unlikely(page->mapping != mapping)) {
1276 f2fs_put_page(page, 1);
1277 goto repeat;
1278 }
1279 if (unlikely(!PageUptodate(page))) {
1280 f2fs_put_page(page, 1);
1281 return ERR_PTR(-EIO);
1282 }
1283 return page;
1284}
1285
1286/*
1287 * Caller ensures that this data page is never allocated.
1288 * A new zero-filled data page is allocated in the page cache.
1289 *
1290 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1291 * f2fs_unlock_op().
1292 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1293 * ipage should be released by this function.
1294 */
1295struct page *f2fs_get_new_data_page(struct inode *inode,
1296 struct page *ipage, pgoff_t index, bool new_i_size)
1297{
1298 struct address_space *mapping = inode->i_mapping;
1299 struct page *page;
1300 struct dnode_of_data dn;
1301 int err;
1302
1303 page = f2fs_grab_cache_page(mapping, index, true);
1304 if (!page) {
1305 /*
1306 * before exiting, we should make sure ipage will be released
1307 * if any error occur.
1308 */
1309 f2fs_put_page(ipage, 1);
1310 return ERR_PTR(-ENOMEM);
1311 }
1312
1313 set_new_dnode(&dn, inode, ipage, NULL, 0);
1314 err = f2fs_reserve_block(&dn, index);
1315 if (err) {
1316 f2fs_put_page(page, 1);
1317 return ERR_PTR(err);
1318 }
1319 if (!ipage)
1320 f2fs_put_dnode(&dn);
1321
1322 if (PageUptodate(page))
1323 goto got_it;
1324
1325 if (dn.data_blkaddr == NEW_ADDR) {
1326 zero_user_segment(page, 0, PAGE_SIZE);
1327 if (!PageUptodate(page))
1328 SetPageUptodate(page);
1329 } else {
1330 f2fs_put_page(page, 1);
1331
1332 /* if ipage exists, blkaddr should be NEW_ADDR */
1333 f2fs_bug_on(F2FS_I_SB(inode), ipage);
1334 page = f2fs_get_lock_data_page(inode, index, true);
1335 if (IS_ERR(page))
1336 return page;
1337 }
1338got_it:
1339 if (new_i_size && i_size_read(inode) <
1340 ((loff_t)(index + 1) << PAGE_SHIFT))
1341 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1342 return page;
1343}
1344
1345static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1346{
1347 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1348 struct f2fs_summary sum;
1349 struct node_info ni;
1350 block_t old_blkaddr;
1351 blkcnt_t count = 1;
1352 int err;
1353
1354 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1355 return -EPERM;
1356
1357 err = f2fs_get_node_info(sbi, dn->nid, &ni);
1358 if (err)
1359 return err;
1360
1361 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1362 if (dn->data_blkaddr != NULL_ADDR)
1363 goto alloc;
1364
1365 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1366 return err;
1367
1368alloc:
1369 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1370 old_blkaddr = dn->data_blkaddr;
1371 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1372 &sum, seg_type, NULL);
1373 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
1374 invalidate_mapping_pages(META_MAPPING(sbi),
1375 old_blkaddr, old_blkaddr);
1376 f2fs_invalidate_compress_page(sbi, old_blkaddr);
1377 }
1378 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1379
1380 /*
1381 * i_size will be updated by direct_IO. Otherwise, we'll get stale
1382 * data from unwritten block via dio_read.
1383 */
1384 return 0;
1385}
1386
1387int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
1388{
1389 struct inode *inode = file_inode(iocb->ki_filp);
1390 struct f2fs_map_blocks map;
1391 int flag;
1392 int err = 0;
1393 bool direct_io = iocb->ki_flags & IOCB_DIRECT;
1394
1395 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
1396 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
1397 if (map.m_len > map.m_lblk)
1398 map.m_len -= map.m_lblk;
1399 else
1400 map.m_len = 0;
1401
1402 map.m_next_pgofs = NULL;
1403 map.m_next_extent = NULL;
1404 map.m_seg_type = NO_CHECK_TYPE;
1405 map.m_may_create = true;
1406
1407 if (direct_io) {
1408 map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
1409 flag = f2fs_force_buffered_io(inode, iocb, from) ?
1410 F2FS_GET_BLOCK_PRE_AIO :
1411 F2FS_GET_BLOCK_PRE_DIO;
1412 goto map_blocks;
1413 }
1414 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
1415 err = f2fs_convert_inline_inode(inode);
1416 if (err)
1417 return err;
1418 }
1419 if (f2fs_has_inline_data(inode))
1420 return err;
1421
1422 flag = F2FS_GET_BLOCK_PRE_AIO;
1423
1424map_blocks:
1425 err = f2fs_map_blocks(inode, &map, 1, flag);
1426 if (map.m_len > 0 && err == -ENOSPC) {
1427 if (!direct_io)
1428 set_inode_flag(inode, FI_NO_PREALLOC);
1429 err = 0;
1430 }
1431 return err;
1432}
1433
1434void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
1435{
1436 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1437 if (lock)
1438 down_read(&sbi->node_change);
1439 else
1440 up_read(&sbi->node_change);
1441 } else {
1442 if (lock)
1443 f2fs_lock_op(sbi);
1444 else
1445 f2fs_unlock_op(sbi);
1446 }
1447}
1448
1449/*
1450 * f2fs_map_blocks() tries to find or build mapping relationship which
1451 * maps continuous logical blocks to physical blocks, and return such
1452 * info via f2fs_map_blocks structure.
1453 */
1454int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1455 int create, int flag)
1456{
1457 unsigned int maxblocks = map->m_len;
1458 struct dnode_of_data dn;
1459 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1460 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1461 pgoff_t pgofs, end_offset, end;
1462 int err = 0, ofs = 1;
1463 unsigned int ofs_in_node, last_ofs_in_node;
1464 blkcnt_t prealloc;
1465 struct extent_info ei = {0, };
1466 block_t blkaddr;
1467 unsigned int start_pgofs;
1468 int bidx = 0;
1469
1470 if (!maxblocks)
1471 return 0;
1472
1473 map->m_bdev = inode->i_sb->s_bdev;
1474 map->m_multidev_dio =
1475 f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
1476
1477 map->m_len = 0;
1478 map->m_flags = 0;
1479
1480 /* it only supports block size == page size */
1481 pgofs = (pgoff_t)map->m_lblk;
1482 end = pgofs + maxblocks;
1483
1484 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1485 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1486 map->m_may_create)
1487 goto next_dnode;
1488
1489 map->m_pblk = ei.blk + pgofs - ei.fofs;
1490 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1491 map->m_flags = F2FS_MAP_MAPPED;
1492 if (map->m_next_extent)
1493 *map->m_next_extent = pgofs + map->m_len;
1494
1495 /* for hardware encryption, but to avoid potential issue in future */
1496 if (flag == F2FS_GET_BLOCK_DIO)
1497 f2fs_wait_on_block_writeback_range(inode,
1498 map->m_pblk, map->m_len);
1499
1500 if (map->m_multidev_dio) {
1501 block_t blk_addr = map->m_pblk;
1502
1503 bidx = f2fs_target_device_index(sbi, map->m_pblk);
1504
1505 map->m_bdev = FDEV(bidx).bdev;
1506 map->m_pblk -= FDEV(bidx).start_blk;
1507 map->m_len = min(map->m_len,
1508 FDEV(bidx).end_blk + 1 - map->m_pblk);
1509
1510 if (map->m_may_create)
1511 f2fs_update_device_state(sbi, inode->i_ino,
1512 blk_addr, map->m_len);
1513 }
1514 goto out;
1515 }
1516
1517next_dnode:
1518 if (map->m_may_create)
1519 f2fs_do_map_lock(sbi, flag, true);
1520
1521 /* When reading holes, we need its node page */
1522 set_new_dnode(&dn, inode, NULL, NULL, 0);
1523 err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1524 if (err) {
1525 if (flag == F2FS_GET_BLOCK_BMAP)
1526 map->m_pblk = 0;
1527
1528 if (err == -ENOENT) {
1529 /*
1530 * There is one exceptional case that read_node_page()
1531 * may return -ENOENT due to filesystem has been
1532 * shutdown or cp_error, so force to convert error
1533 * number to EIO for such case.
1534 */
1535 if (map->m_may_create &&
1536 (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1537 f2fs_cp_error(sbi))) {
1538 err = -EIO;
1539 goto unlock_out;
1540 }
1541
1542 err = 0;
1543 if (map->m_next_pgofs)
1544 *map->m_next_pgofs =
1545 f2fs_get_next_page_offset(&dn, pgofs);
1546 if (map->m_next_extent)
1547 *map->m_next_extent =
1548 f2fs_get_next_page_offset(&dn, pgofs);
1549 }
1550 goto unlock_out;
1551 }
1552
1553 start_pgofs = pgofs;
1554 prealloc = 0;
1555 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1556 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1557
1558next_block:
1559 blkaddr = f2fs_data_blkaddr(&dn);
1560
1561 if (__is_valid_data_blkaddr(blkaddr) &&
1562 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1563 err = -EFSCORRUPTED;
1564 goto sync_out;
1565 }
1566
1567 if (__is_valid_data_blkaddr(blkaddr)) {
1568 /* use out-place-update for driect IO under LFS mode */
1569 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1570 map->m_may_create) {
1571 err = __allocate_data_block(&dn, map->m_seg_type);
1572 if (err)
1573 goto sync_out;
1574 blkaddr = dn.data_blkaddr;
1575 set_inode_flag(inode, FI_APPEND_WRITE);
1576 }
1577 } else {
1578 if (create) {
1579 if (unlikely(f2fs_cp_error(sbi))) {
1580 err = -EIO;
1581 goto sync_out;
1582 }
1583 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1584 if (blkaddr == NULL_ADDR) {
1585 prealloc++;
1586 last_ofs_in_node = dn.ofs_in_node;
1587 }
1588 } else {
1589 WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1590 flag != F2FS_GET_BLOCK_DIO);
1591 err = __allocate_data_block(&dn,
1592 map->m_seg_type);
1593 if (!err)
1594 set_inode_flag(inode, FI_APPEND_WRITE);
1595 }
1596 if (err)
1597 goto sync_out;
1598 map->m_flags |= F2FS_MAP_NEW;
1599 blkaddr = dn.data_blkaddr;
1600 } else {
1601 if (f2fs_compressed_file(inode) &&
1602 f2fs_sanity_check_cluster(&dn) &&
1603 (flag != F2FS_GET_BLOCK_FIEMAP ||
1604 IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
1605 err = -EFSCORRUPTED;
1606 goto sync_out;
1607 }
1608 if (flag == F2FS_GET_BLOCK_BMAP) {
1609 map->m_pblk = 0;
1610 goto sync_out;
1611 }
1612 if (flag == F2FS_GET_BLOCK_PRECACHE)
1613 goto sync_out;
1614 if (flag == F2FS_GET_BLOCK_FIEMAP &&
1615 blkaddr == NULL_ADDR) {
1616 if (map->m_next_pgofs)
1617 *map->m_next_pgofs = pgofs + 1;
1618 goto sync_out;
1619 }
1620 if (flag != F2FS_GET_BLOCK_FIEMAP) {
1621 /* for defragment case */
1622 if (map->m_next_pgofs)
1623 *map->m_next_pgofs = pgofs + 1;
1624 goto sync_out;
1625 }
1626 }
1627 }
1628
1629 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1630 goto skip;
1631
1632 if (map->m_multidev_dio)
1633 bidx = f2fs_target_device_index(sbi, blkaddr);
1634
1635 if (map->m_len == 0) {
1636 /* preallocated unwritten block should be mapped for fiemap. */
1637 if (blkaddr == NEW_ADDR)
1638 map->m_flags |= F2FS_MAP_UNWRITTEN;
1639 map->m_flags |= F2FS_MAP_MAPPED;
1640
1641 map->m_pblk = blkaddr;
1642 map->m_len = 1;
1643
1644 if (map->m_multidev_dio)
1645 map->m_bdev = FDEV(bidx).bdev;
1646 } else if ((map->m_pblk != NEW_ADDR &&
1647 blkaddr == (map->m_pblk + ofs)) ||
1648 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1649 flag == F2FS_GET_BLOCK_PRE_DIO) {
1650 if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
1651 goto sync_out;
1652 ofs++;
1653 map->m_len++;
1654 } else {
1655 goto sync_out;
1656 }
1657
1658skip:
1659 dn.ofs_in_node++;
1660 pgofs++;
1661
1662 /* preallocate blocks in batch for one dnode page */
1663 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1664 (pgofs == end || dn.ofs_in_node == end_offset)) {
1665
1666 dn.ofs_in_node = ofs_in_node;
1667 err = f2fs_reserve_new_blocks(&dn, prealloc);
1668 if (err)
1669 goto sync_out;
1670
1671 map->m_len += dn.ofs_in_node - ofs_in_node;
1672 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1673 err = -ENOSPC;
1674 goto sync_out;
1675 }
1676 dn.ofs_in_node = end_offset;
1677 }
1678
1679 if (pgofs >= end)
1680 goto sync_out;
1681 else if (dn.ofs_in_node < end_offset)
1682 goto next_block;
1683
1684 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1685 if (map->m_flags & F2FS_MAP_MAPPED) {
1686 unsigned int ofs = start_pgofs - map->m_lblk;
1687
1688 f2fs_update_extent_cache_range(&dn,
1689 start_pgofs, map->m_pblk + ofs,
1690 map->m_len - ofs);
1691 }
1692 }
1693
1694 f2fs_put_dnode(&dn);
1695
1696 if (map->m_may_create) {
1697 f2fs_do_map_lock(sbi, flag, false);
1698 f2fs_balance_fs(sbi, dn.node_changed);
1699 }
1700 goto next_dnode;
1701
1702sync_out:
1703
1704 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) {
1705 /*
1706 * for hardware encryption, but to avoid potential issue
1707 * in future
1708 */
1709 f2fs_wait_on_block_writeback_range(inode,
1710 map->m_pblk, map->m_len);
1711 invalidate_mapping_pages(META_MAPPING(sbi),
1712 map->m_pblk, map->m_pblk);
1713
1714 if (map->m_multidev_dio) {
1715 block_t blk_addr = map->m_pblk;
1716
1717 bidx = f2fs_target_device_index(sbi, map->m_pblk);
1718
1719 map->m_bdev = FDEV(bidx).bdev;
1720 map->m_pblk -= FDEV(bidx).start_blk;
1721
1722 if (map->m_may_create)
1723 f2fs_update_device_state(sbi, inode->i_ino,
1724 blk_addr, map->m_len);
1725
1726 f2fs_bug_on(sbi, blk_addr + map->m_len >
1727 FDEV(bidx).end_blk + 1);
1728 }
1729 }
1730
1731 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1732 if (map->m_flags & F2FS_MAP_MAPPED) {
1733 unsigned int ofs = start_pgofs - map->m_lblk;
1734
1735 f2fs_update_extent_cache_range(&dn,
1736 start_pgofs, map->m_pblk + ofs,
1737 map->m_len - ofs);
1738 }
1739 if (map->m_next_extent)
1740 *map->m_next_extent = pgofs + 1;
1741 }
1742 f2fs_put_dnode(&dn);
1743unlock_out:
1744 if (map->m_may_create) {
1745 f2fs_do_map_lock(sbi, flag, false);
1746 f2fs_balance_fs(sbi, dn.node_changed);
1747 }
1748out:
1749 trace_f2fs_map_blocks(inode, map, create, flag, err);
1750 return err;
1751}
1752
1753bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1754{
1755 struct f2fs_map_blocks map;
1756 block_t last_lblk;
1757 int err;
1758
1759 if (pos + len > i_size_read(inode))
1760 return false;
1761
1762 map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1763 map.m_next_pgofs = NULL;
1764 map.m_next_extent = NULL;
1765 map.m_seg_type = NO_CHECK_TYPE;
1766 map.m_may_create = false;
1767 last_lblk = F2FS_BLK_ALIGN(pos + len);
1768
1769 while (map.m_lblk < last_lblk) {
1770 map.m_len = last_lblk - map.m_lblk;
1771 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1772 if (err || map.m_len == 0)
1773 return false;
1774 map.m_lblk += map.m_len;
1775 }
1776 return true;
1777}
1778
1779static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
1780{
1781 return (bytes >> inode->i_blkbits);
1782}
1783
1784static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1785{
1786 return (blks << inode->i_blkbits);
1787}
1788
1789static int __get_data_block(struct inode *inode, sector_t iblock,
1790 struct buffer_head *bh, int create, int flag,
1791 pgoff_t *next_pgofs, int seg_type, bool may_write)
1792{
1793 struct f2fs_map_blocks map;
1794 int err;
1795
1796 map.m_lblk = iblock;
1797 map.m_len = bytes_to_blks(inode, bh->b_size);
1798 map.m_next_pgofs = next_pgofs;
1799 map.m_next_extent = NULL;
1800 map.m_seg_type = seg_type;
1801 map.m_may_create = may_write;
1802
1803 err = f2fs_map_blocks(inode, &map, create, flag);
1804 if (!err) {
1805 map_bh(bh, inode->i_sb, map.m_pblk);
1806 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1807 bh->b_size = blks_to_bytes(inode, map.m_len);
1808
1809 if (map.m_multidev_dio)
1810 bh->b_bdev = map.m_bdev;
1811 }
1812 return err;
1813}
1814
1815static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1816 struct buffer_head *bh_result, int create)
1817{
1818 return __get_data_block(inode, iblock, bh_result, create,
1819 F2FS_GET_BLOCK_DIO, NULL,
1820 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1821 true);
1822}
1823
1824static int get_data_block_dio(struct inode *inode, sector_t iblock,
1825 struct buffer_head *bh_result, int create)
1826{
1827 return __get_data_block(inode, iblock, bh_result, create,
1828 F2FS_GET_BLOCK_DIO, NULL,
1829 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1830 false);
1831}
1832
1833static int f2fs_xattr_fiemap(struct inode *inode,
1834 struct fiemap_extent_info *fieinfo)
1835{
1836 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1837 struct page *page;
1838 struct node_info ni;
1839 __u64 phys = 0, len;
1840 __u32 flags;
1841 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1842 int err = 0;
1843
1844 if (f2fs_has_inline_xattr(inode)) {
1845 int offset;
1846
1847 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1848 inode->i_ino, false);
1849 if (!page)
1850 return -ENOMEM;
1851
1852 err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
1853 if (err) {
1854 f2fs_put_page(page, 1);
1855 return err;
1856 }
1857
1858 phys = blks_to_bytes(inode, ni.blk_addr);
1859 offset = offsetof(struct f2fs_inode, i_addr) +
1860 sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1861 get_inline_xattr_addrs(inode));
1862
1863 phys += offset;
1864 len = inline_xattr_size(inode);
1865
1866 f2fs_put_page(page, 1);
1867
1868 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1869
1870 if (!xnid)
1871 flags |= FIEMAP_EXTENT_LAST;
1872
1873 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1874 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1875 if (err || err == 1)
1876 return err;
1877 }
1878
1879 if (xnid) {
1880 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1881 if (!page)
1882 return -ENOMEM;
1883
1884 err = f2fs_get_node_info(sbi, xnid, &ni);
1885 if (err) {
1886 f2fs_put_page(page, 1);
1887 return err;
1888 }
1889
1890 phys = blks_to_bytes(inode, ni.blk_addr);
1891 len = inode->i_sb->s_blocksize;
1892
1893 f2fs_put_page(page, 1);
1894
1895 flags = FIEMAP_EXTENT_LAST;
1896 }
1897
1898 if (phys) {
1899 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1900 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1901 }
1902
1903 return (err < 0 ? err : 0);
1904}
1905
1906static loff_t max_inode_blocks(struct inode *inode)
1907{
1908 loff_t result = ADDRS_PER_INODE(inode);
1909 loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1910
1911 /* two direct node blocks */
1912 result += (leaf_count * 2);
1913
1914 /* two indirect node blocks */
1915 leaf_count *= NIDS_PER_BLOCK;
1916 result += (leaf_count * 2);
1917
1918 /* one double indirect node block */
1919 leaf_count *= NIDS_PER_BLOCK;
1920 result += leaf_count;
1921
1922 return result;
1923}
1924
1925int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1926 u64 start, u64 len)
1927{
1928 struct f2fs_map_blocks map;
1929 sector_t start_blk, last_blk;
1930 pgoff_t next_pgofs;
1931 u64 logical = 0, phys = 0, size = 0;
1932 u32 flags = 0;
1933 int ret = 0;
1934 bool compr_cluster = false, compr_appended;
1935 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1936 unsigned int count_in_cluster = 0;
1937 loff_t maxbytes;
1938
1939 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1940 ret = f2fs_precache_extents(inode);
1941 if (ret)
1942 return ret;
1943 }
1944
1945 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
1946 if (ret)
1947 return ret;
1948
1949 inode_lock(inode);
1950
1951 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
1952 if (start > maxbytes) {
1953 ret = -EFBIG;
1954 goto out;
1955 }
1956
1957 if (len > maxbytes || (maxbytes - len) < start)
1958 len = maxbytes - start;
1959
1960 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1961 ret = f2fs_xattr_fiemap(inode, fieinfo);
1962 goto out;
1963 }
1964
1965 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
1966 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1967 if (ret != -EAGAIN)
1968 goto out;
1969 }
1970
1971 if (bytes_to_blks(inode, len) == 0)
1972 len = blks_to_bytes(inode, 1);
1973
1974 start_blk = bytes_to_blks(inode, start);
1975 last_blk = bytes_to_blks(inode, start + len - 1);
1976
1977next:
1978 memset(&map, 0, sizeof(map));
1979 map.m_lblk = start_blk;
1980 map.m_len = bytes_to_blks(inode, len);
1981 map.m_next_pgofs = &next_pgofs;
1982 map.m_seg_type = NO_CHECK_TYPE;
1983
1984 if (compr_cluster) {
1985 map.m_lblk += 1;
1986 map.m_len = cluster_size - count_in_cluster;
1987 }
1988
1989 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
1990 if (ret)
1991 goto out;
1992
1993 /* HOLE */
1994 if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
1995 start_blk = next_pgofs;
1996
1997 if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
1998 max_inode_blocks(inode)))
1999 goto prep_next;
2000
2001 flags |= FIEMAP_EXTENT_LAST;
2002 }
2003
2004 compr_appended = false;
2005 /* In a case of compressed cluster, append this to the last extent */
2006 if (compr_cluster && ((map.m_flags & F2FS_MAP_UNWRITTEN) ||
2007 !(map.m_flags & F2FS_MAP_FLAGS))) {
2008 compr_appended = true;
2009 goto skip_fill;
2010 }
2011
2012 if (size) {
2013 flags |= FIEMAP_EXTENT_MERGED;
2014 if (IS_ENCRYPTED(inode))
2015 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
2016
2017 ret = fiemap_fill_next_extent(fieinfo, logical,
2018 phys, size, flags);
2019 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
2020 if (ret)
2021 goto out;
2022 size = 0;
2023 }
2024
2025 if (start_blk > last_blk)
2026 goto out;
2027
2028skip_fill:
2029 if (map.m_pblk == COMPRESS_ADDR) {
2030 compr_cluster = true;
2031 count_in_cluster = 1;
2032 } else if (compr_appended) {
2033 unsigned int appended_blks = cluster_size -
2034 count_in_cluster + 1;
2035 size += blks_to_bytes(inode, appended_blks);
2036 start_blk += appended_blks;
2037 compr_cluster = false;
2038 } else {
2039 logical = blks_to_bytes(inode, start_blk);
2040 phys = __is_valid_data_blkaddr(map.m_pblk) ?
2041 blks_to_bytes(inode, map.m_pblk) : 0;
2042 size = blks_to_bytes(inode, map.m_len);
2043 flags = 0;
2044
2045 if (compr_cluster) {
2046 flags = FIEMAP_EXTENT_ENCODED;
2047 count_in_cluster += map.m_len;
2048 if (count_in_cluster == cluster_size) {
2049 compr_cluster = false;
2050 size += blks_to_bytes(inode, 1);
2051 }
2052 } else if (map.m_flags & F2FS_MAP_UNWRITTEN) {
2053 flags = FIEMAP_EXTENT_UNWRITTEN;
2054 }
2055
2056 start_blk += bytes_to_blks(inode, size);
2057 }
2058
2059prep_next:
2060 cond_resched();
2061 if (fatal_signal_pending(current))
2062 ret = -EINTR;
2063 else
2064 goto next;
2065out:
2066 if (ret == 1)
2067 ret = 0;
2068
2069 inode_unlock(inode);
2070 return ret;
2071}
2072
2073static inline loff_t f2fs_readpage_limit(struct inode *inode)
2074{
2075 if (IS_ENABLED(CONFIG_FS_VERITY) &&
2076 (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
2077 return inode->i_sb->s_maxbytes;
2078
2079 return i_size_read(inode);
2080}
2081
2082static int f2fs_read_single_page(struct inode *inode, struct page *page,
2083 unsigned nr_pages,
2084 struct f2fs_map_blocks *map,
2085 struct bio **bio_ret,
2086 sector_t *last_block_in_bio,
2087 bool is_readahead)
2088{
2089 struct bio *bio = *bio_ret;
2090 const unsigned blocksize = blks_to_bytes(inode, 1);
2091 sector_t block_in_file;
2092 sector_t last_block;
2093 sector_t last_block_in_file;
2094 sector_t block_nr;
2095 int ret = 0;
2096
2097 block_in_file = (sector_t)page_index(page);
2098 last_block = block_in_file + nr_pages;
2099 last_block_in_file = bytes_to_blks(inode,
2100 f2fs_readpage_limit(inode) + blocksize - 1);
2101 if (last_block > last_block_in_file)
2102 last_block = last_block_in_file;
2103
2104 /* just zeroing out page which is beyond EOF */
2105 if (block_in_file >= last_block)
2106 goto zero_out;
2107 /*
2108 * Map blocks using the previous result first.
2109 */
2110 if ((map->m_flags & F2FS_MAP_MAPPED) &&
2111 block_in_file > map->m_lblk &&
2112 block_in_file < (map->m_lblk + map->m_len))
2113 goto got_it;
2114
2115 /*
2116 * Then do more f2fs_map_blocks() calls until we are
2117 * done with this page.
2118 */
2119 map->m_lblk = block_in_file;
2120 map->m_len = last_block - block_in_file;
2121
2122 ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
2123 if (ret)
2124 goto out;
2125got_it:
2126 if ((map->m_flags & F2FS_MAP_MAPPED)) {
2127 block_nr = map->m_pblk + block_in_file - map->m_lblk;
2128 SetPageMappedToDisk(page);
2129
2130 if (!PageUptodate(page) && (!PageSwapCache(page) &&
2131 !cleancache_get_page(page))) {
2132 SetPageUptodate(page);
2133 goto confused;
2134 }
2135
2136 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2137 DATA_GENERIC_ENHANCE_READ)) {
2138 ret = -EFSCORRUPTED;
2139 goto out;
2140 }
2141 } else {
2142zero_out:
2143 zero_user_segment(page, 0, PAGE_SIZE);
2144 if (f2fs_need_verity(inode, page->index) &&
2145 !fsverity_verify_page(page)) {
2146 ret = -EIO;
2147 goto out;
2148 }
2149 if (!PageUptodate(page))
2150 SetPageUptodate(page);
2151 unlock_page(page);
2152 goto out;
2153 }
2154
2155 /*
2156 * This page will go to BIO. Do we need to send this
2157 * BIO off first?
2158 */
2159 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2160 *last_block_in_bio, block_nr) ||
2161 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2162submit_and_realloc:
2163 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2164 bio = NULL;
2165 }
2166 if (bio == NULL) {
2167 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2168 is_readahead ? REQ_RAHEAD : 0, page->index,
2169 false);
2170 if (IS_ERR(bio)) {
2171 ret = PTR_ERR(bio);
2172 bio = NULL;
2173 goto out;
2174 }
2175 }
2176
2177 /*
2178 * If the page is under writeback, we need to wait for
2179 * its completion to see the correct decrypted data.
2180 */
2181 f2fs_wait_on_block_writeback(inode, block_nr);
2182
2183 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2184 goto submit_and_realloc;
2185
2186 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2187 f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
2188 ClearPageError(page);
2189 *last_block_in_bio = block_nr;
2190 goto out;
2191confused:
2192 if (bio) {
2193 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2194 bio = NULL;
2195 }
2196 unlock_page(page);
2197out:
2198 *bio_ret = bio;
2199 return ret;
2200}
2201
2202#ifdef CONFIG_F2FS_FS_COMPRESSION
2203int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2204 unsigned nr_pages, sector_t *last_block_in_bio,
2205 bool is_readahead, bool for_write)
2206{
2207 struct dnode_of_data dn;
2208 struct inode *inode = cc->inode;
2209 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2210 struct bio *bio = *bio_ret;
2211 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2212 sector_t last_block_in_file;
2213 const unsigned blocksize = blks_to_bytes(inode, 1);
2214 struct decompress_io_ctx *dic = NULL;
2215 struct extent_info ei = {0, };
2216 bool from_dnode = true;
2217 int i;
2218 int ret = 0;
2219
2220 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2221
2222 last_block_in_file = bytes_to_blks(inode,
2223 f2fs_readpage_limit(inode) + blocksize - 1);
2224
2225 /* get rid of pages beyond EOF */
2226 for (i = 0; i < cc->cluster_size; i++) {
2227 struct page *page = cc->rpages[i];
2228
2229 if (!page)
2230 continue;
2231 if ((sector_t)page->index >= last_block_in_file) {
2232 zero_user_segment(page, 0, PAGE_SIZE);
2233 if (!PageUptodate(page))
2234 SetPageUptodate(page);
2235 } else if (!PageUptodate(page)) {
2236 continue;
2237 }
2238 unlock_page(page);
2239 if (for_write)
2240 put_page(page);
2241 cc->rpages[i] = NULL;
2242 cc->nr_rpages--;
2243 }
2244
2245 /* we are done since all pages are beyond EOF */
2246 if (f2fs_cluster_is_empty(cc))
2247 goto out;
2248
2249 if (f2fs_lookup_extent_cache(inode, start_idx, &ei))
2250 from_dnode = false;
2251
2252 if (!from_dnode)
2253 goto skip_reading_dnode;
2254
2255 set_new_dnode(&dn, inode, NULL, NULL, 0);
2256 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2257 if (ret)
2258 goto out;
2259
2260 f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
2261
2262skip_reading_dnode:
2263 for (i = 1; i < cc->cluster_size; i++) {
2264 block_t blkaddr;
2265
2266 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2267 dn.ofs_in_node + i) :
2268 ei.blk + i - 1;
2269
2270 if (!__is_valid_data_blkaddr(blkaddr))
2271 break;
2272
2273 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2274 ret = -EFAULT;
2275 goto out_put_dnode;
2276 }
2277 cc->nr_cpages++;
2278
2279 if (!from_dnode && i >= ei.c_len)
2280 break;
2281 }
2282
2283 /* nothing to decompress */
2284 if (cc->nr_cpages == 0) {
2285 ret = 0;
2286 goto out_put_dnode;
2287 }
2288
2289 dic = f2fs_alloc_dic(cc);
2290 if (IS_ERR(dic)) {
2291 ret = PTR_ERR(dic);
2292 goto out_put_dnode;
2293 }
2294
2295 for (i = 0; i < cc->nr_cpages; i++) {
2296 struct page *page = dic->cpages[i];
2297 block_t blkaddr;
2298 struct bio_post_read_ctx *ctx;
2299
2300 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2301 dn.ofs_in_node + i + 1) :
2302 ei.blk + i;
2303
2304 f2fs_wait_on_block_writeback(inode, blkaddr);
2305
2306 if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
2307 if (atomic_dec_and_test(&dic->remaining_pages))
2308 f2fs_decompress_cluster(dic);
2309 continue;
2310 }
2311
2312 if (bio && (!page_is_mergeable(sbi, bio,
2313 *last_block_in_bio, blkaddr) ||
2314 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2315submit_and_realloc:
2316 __submit_bio(sbi, bio, DATA);
2317 bio = NULL;
2318 }
2319
2320 if (!bio) {
2321 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2322 is_readahead ? REQ_RAHEAD : 0,
2323 page->index, for_write);
2324 if (IS_ERR(bio)) {
2325 ret = PTR_ERR(bio);
2326 f2fs_decompress_end_io(dic, ret);
2327 f2fs_put_dnode(&dn);
2328 *bio_ret = NULL;
2329 return ret;
2330 }
2331 }
2332
2333 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2334 goto submit_and_realloc;
2335
2336 ctx = get_post_read_ctx(bio);
2337 ctx->enabled_steps |= STEP_DECOMPRESS;
2338 refcount_inc(&dic->refcnt);
2339
2340 inc_page_count(sbi, F2FS_RD_DATA);
2341 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
2342 f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
2343 ClearPageError(page);
2344 *last_block_in_bio = blkaddr;
2345 }
2346
2347 if (from_dnode)
2348 f2fs_put_dnode(&dn);
2349
2350 *bio_ret = bio;
2351 return 0;
2352
2353out_put_dnode:
2354 if (from_dnode)
2355 f2fs_put_dnode(&dn);
2356out:
2357 for (i = 0; i < cc->cluster_size; i++) {
2358 if (cc->rpages[i]) {
2359 ClearPageUptodate(cc->rpages[i]);
2360 ClearPageError(cc->rpages[i]);
2361 unlock_page(cc->rpages[i]);
2362 }
2363 }
2364 *bio_ret = bio;
2365 return ret;
2366}
2367#endif
2368
2369/*
2370 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2371 * Major change was from block_size == page_size in f2fs by default.
2372 */
2373static int f2fs_mpage_readpages(struct inode *inode,
2374 struct readahead_control *rac, struct page *page)
2375{
2376 struct bio *bio = NULL;
2377 sector_t last_block_in_bio = 0;
2378 struct f2fs_map_blocks map;
2379#ifdef CONFIG_F2FS_FS_COMPRESSION
2380 struct compress_ctx cc = {
2381 .inode = inode,
2382 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2383 .cluster_size = F2FS_I(inode)->i_cluster_size,
2384 .cluster_idx = NULL_CLUSTER,
2385 .rpages = NULL,
2386 .cpages = NULL,
2387 .nr_rpages = 0,
2388 .nr_cpages = 0,
2389 };
2390 pgoff_t nc_cluster_idx = NULL_CLUSTER;
2391#endif
2392 unsigned nr_pages = rac ? readahead_count(rac) : 1;
2393 unsigned max_nr_pages = nr_pages;
2394 int ret = 0;
2395
2396 map.m_pblk = 0;
2397 map.m_lblk = 0;
2398 map.m_len = 0;
2399 map.m_flags = 0;
2400 map.m_next_pgofs = NULL;
2401 map.m_next_extent = NULL;
2402 map.m_seg_type = NO_CHECK_TYPE;
2403 map.m_may_create = false;
2404
2405 for (; nr_pages; nr_pages--) {
2406 if (rac) {
2407 page = readahead_page(rac);
2408 prefetchw(&page->flags);
2409 }
2410
2411#ifdef CONFIG_F2FS_FS_COMPRESSION
2412 if (f2fs_compressed_file(inode)) {
2413 /* there are remained comressed pages, submit them */
2414 if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2415 ret = f2fs_read_multi_pages(&cc, &bio,
2416 max_nr_pages,
2417 &last_block_in_bio,
2418 rac != NULL, false);
2419 f2fs_destroy_compress_ctx(&cc, false);
2420 if (ret)
2421 goto set_error_page;
2422 }
2423 if (cc.cluster_idx == NULL_CLUSTER) {
2424 if (nc_cluster_idx ==
2425 page->index >> cc.log_cluster_size) {
2426 goto read_single_page;
2427 }
2428
2429 ret = f2fs_is_compressed_cluster(inode, page->index);
2430 if (ret < 0)
2431 goto set_error_page;
2432 else if (!ret) {
2433 nc_cluster_idx =
2434 page->index >> cc.log_cluster_size;
2435 goto read_single_page;
2436 }
2437
2438 nc_cluster_idx = NULL_CLUSTER;
2439 }
2440 ret = f2fs_init_compress_ctx(&cc);
2441 if (ret)
2442 goto set_error_page;
2443
2444 f2fs_compress_ctx_add_page(&cc, page);
2445
2446 goto next_page;
2447 }
2448read_single_page:
2449#endif
2450
2451 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2452 &bio, &last_block_in_bio, rac);
2453 if (ret) {
2454#ifdef CONFIG_F2FS_FS_COMPRESSION
2455set_error_page:
2456#endif
2457 SetPageError(page);
2458 zero_user_segment(page, 0, PAGE_SIZE);
2459 unlock_page(page);
2460 }
2461#ifdef CONFIG_F2FS_FS_COMPRESSION
2462next_page:
2463#endif
2464 if (rac)
2465 put_page(page);
2466
2467#ifdef CONFIG_F2FS_FS_COMPRESSION
2468 if (f2fs_compressed_file(inode)) {
2469 /* last page */
2470 if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2471 ret = f2fs_read_multi_pages(&cc, &bio,
2472 max_nr_pages,
2473 &last_block_in_bio,
2474 rac != NULL, false);
2475 f2fs_destroy_compress_ctx(&cc, false);
2476 }
2477 }
2478#endif
2479 }
2480 if (bio)
2481 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2482 return ret;
2483}
2484
2485static int f2fs_read_data_page(struct file *file, struct page *page)
2486{
2487 struct inode *inode = page_file_mapping(page)->host;
2488 int ret = -EAGAIN;
2489
2490 trace_f2fs_readpage(page, DATA);
2491
2492 if (!f2fs_is_compress_backend_ready(inode)) {
2493 unlock_page(page);
2494 return -EOPNOTSUPP;
2495 }
2496
2497 /* If the file has inline data, try to read it directly */
2498 if (f2fs_has_inline_data(inode))
2499 ret = f2fs_read_inline_data(inode, page);
2500 if (ret == -EAGAIN)
2501 ret = f2fs_mpage_readpages(inode, NULL, page);
2502 return ret;
2503}
2504
2505static void f2fs_readahead(struct readahead_control *rac)
2506{
2507 struct inode *inode = rac->mapping->host;
2508
2509 trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2510
2511 if (!f2fs_is_compress_backend_ready(inode))
2512 return;
2513
2514 /* If the file has inline data, skip readpages */
2515 if (f2fs_has_inline_data(inode))
2516 return;
2517
2518 f2fs_mpage_readpages(inode, rac, NULL);
2519}
2520
2521int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2522{
2523 struct inode *inode = fio->page->mapping->host;
2524 struct page *mpage, *page;
2525 gfp_t gfp_flags = GFP_NOFS;
2526
2527 if (!f2fs_encrypted_file(inode))
2528 return 0;
2529
2530 page = fio->compressed_page ? fio->compressed_page : fio->page;
2531
2532 /* wait for GCed page writeback via META_MAPPING */
2533 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2534
2535 if (fscrypt_inode_uses_inline_crypto(inode))
2536 return 0;
2537
2538retry_encrypt:
2539 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2540 PAGE_SIZE, 0, gfp_flags);
2541 if (IS_ERR(fio->encrypted_page)) {
2542 /* flush pending IOs and wait for a while in the ENOMEM case */
2543 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2544 f2fs_flush_merged_writes(fio->sbi);
2545 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2546 gfp_flags |= __GFP_NOFAIL;
2547 goto retry_encrypt;
2548 }
2549 return PTR_ERR(fio->encrypted_page);
2550 }
2551
2552 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2553 if (mpage) {
2554 if (PageUptodate(mpage))
2555 memcpy(page_address(mpage),
2556 page_address(fio->encrypted_page), PAGE_SIZE);
2557 f2fs_put_page(mpage, 1);
2558 }
2559 return 0;
2560}
2561
2562static inline bool check_inplace_update_policy(struct inode *inode,
2563 struct f2fs_io_info *fio)
2564{
2565 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2566 unsigned int policy = SM_I(sbi)->ipu_policy;
2567
2568 if (policy & (0x1 << F2FS_IPU_FORCE))
2569 return true;
2570 if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
2571 return true;
2572 if (policy & (0x1 << F2FS_IPU_UTIL) &&
2573 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2574 return true;
2575 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
2576 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2577 return true;
2578
2579 /*
2580 * IPU for rewrite async pages
2581 */
2582 if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2583 fio && fio->op == REQ_OP_WRITE &&
2584 !(fio->op_flags & REQ_SYNC) &&
2585 !IS_ENCRYPTED(inode))
2586 return true;
2587
2588 /* this is only set during fdatasync */
2589 if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2590 is_inode_flag_set(inode, FI_NEED_IPU))
2591 return true;
2592
2593 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2594 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2595 return true;
2596
2597 return false;
2598}
2599
2600bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2601{
2602 /* swap file is migrating in aligned write mode */
2603 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2604 return false;
2605
2606 if (f2fs_is_pinned_file(inode))
2607 return true;
2608
2609 /* if this is cold file, we should overwrite to avoid fragmentation */
2610 if (file_is_cold(inode))
2611 return true;
2612
2613 return check_inplace_update_policy(inode, fio);
2614}
2615
2616bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2617{
2618 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2619
2620 if (f2fs_lfs_mode(sbi))
2621 return true;
2622 if (S_ISDIR(inode->i_mode))
2623 return true;
2624 if (IS_NOQUOTA(inode))
2625 return true;
2626 if (f2fs_is_atomic_file(inode))
2627 return true;
2628 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
2629 return true;
2630
2631 /* swap file is migrating in aligned write mode */
2632 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2633 return true;
2634
2635 if (fio) {
2636 if (page_private_gcing(fio->page))
2637 return true;
2638 if (page_private_dummy(fio->page))
2639 return true;
2640 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2641 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2642 return true;
2643 }
2644 return false;
2645}
2646
2647static inline bool need_inplace_update(struct f2fs_io_info *fio)
2648{
2649 struct inode *inode = fio->page->mapping->host;
2650
2651 if (f2fs_should_update_outplace(inode, fio))
2652 return false;
2653
2654 return f2fs_should_update_inplace(inode, fio);
2655}
2656
2657int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2658{
2659 struct page *page = fio->page;
2660 struct inode *inode = page->mapping->host;
2661 struct dnode_of_data dn;
2662 struct extent_info ei = {0, };
2663 struct node_info ni;
2664 bool ipu_force = false;
2665 int err = 0;
2666
2667 set_new_dnode(&dn, inode, NULL, NULL, 0);
2668 if (need_inplace_update(fio) &&
2669 f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2670 fio->old_blkaddr = ei.blk + page->index - ei.fofs;
2671
2672 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2673 DATA_GENERIC_ENHANCE))
2674 return -EFSCORRUPTED;
2675
2676 ipu_force = true;
2677 fio->need_lock = LOCK_DONE;
2678 goto got_it;
2679 }
2680
2681 /* Deadlock due to between page->lock and f2fs_lock_op */
2682 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2683 return -EAGAIN;
2684
2685 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2686 if (err)
2687 goto out;
2688
2689 fio->old_blkaddr = dn.data_blkaddr;
2690
2691 /* This page is already truncated */
2692 if (fio->old_blkaddr == NULL_ADDR) {
2693 ClearPageUptodate(page);
2694 clear_page_private_gcing(page);
2695 goto out_writepage;
2696 }
2697got_it:
2698 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2699 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2700 DATA_GENERIC_ENHANCE)) {
2701 err = -EFSCORRUPTED;
2702 goto out_writepage;
2703 }
2704 /*
2705 * If current allocation needs SSR,
2706 * it had better in-place writes for updated data.
2707 */
2708 if (ipu_force ||
2709 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2710 need_inplace_update(fio))) {
2711 err = f2fs_encrypt_one_page(fio);
2712 if (err)
2713 goto out_writepage;
2714
2715 set_page_writeback(page);
2716 ClearPageError(page);
2717 f2fs_put_dnode(&dn);
2718 if (fio->need_lock == LOCK_REQ)
2719 f2fs_unlock_op(fio->sbi);
2720 err = f2fs_inplace_write_data(fio);
2721 if (err) {
2722 if (fscrypt_inode_uses_fs_layer_crypto(inode))
2723 fscrypt_finalize_bounce_page(&fio->encrypted_page);
2724 if (PageWriteback(page))
2725 end_page_writeback(page);
2726 } else {
2727 set_inode_flag(inode, FI_UPDATE_WRITE);
2728 }
2729 trace_f2fs_do_write_data_page(fio->page, IPU);
2730 return err;
2731 }
2732
2733 if (fio->need_lock == LOCK_RETRY) {
2734 if (!f2fs_trylock_op(fio->sbi)) {
2735 err = -EAGAIN;
2736 goto out_writepage;
2737 }
2738 fio->need_lock = LOCK_REQ;
2739 }
2740
2741 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
2742 if (err)
2743 goto out_writepage;
2744
2745 fio->version = ni.version;
2746
2747 err = f2fs_encrypt_one_page(fio);
2748 if (err)
2749 goto out_writepage;
2750
2751 set_page_writeback(page);
2752 ClearPageError(page);
2753
2754 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2755 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2756
2757 /* LFS mode write path */
2758 f2fs_outplace_write_data(&dn, fio);
2759 trace_f2fs_do_write_data_page(page, OPU);
2760 set_inode_flag(inode, FI_APPEND_WRITE);
2761 if (page->index == 0)
2762 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
2763out_writepage:
2764 f2fs_put_dnode(&dn);
2765out:
2766 if (fio->need_lock == LOCK_REQ)
2767 f2fs_unlock_op(fio->sbi);
2768 return err;
2769}
2770
2771int f2fs_write_single_data_page(struct page *page, int *submitted,
2772 struct bio **bio,
2773 sector_t *last_block,
2774 struct writeback_control *wbc,
2775 enum iostat_type io_type,
2776 int compr_blocks,
2777 bool allow_balance)
2778{
2779 struct inode *inode = page->mapping->host;
2780 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2781 loff_t i_size = i_size_read(inode);
2782 const pgoff_t end_index = ((unsigned long long)i_size)
2783 >> PAGE_SHIFT;
2784 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
2785 unsigned offset = 0;
2786 bool need_balance_fs = false;
2787 int err = 0;
2788 struct f2fs_io_info fio = {
2789 .sbi = sbi,
2790 .ino = inode->i_ino,
2791 .type = DATA,
2792 .op = REQ_OP_WRITE,
2793 .op_flags = wbc_to_write_flags(wbc),
2794 .old_blkaddr = NULL_ADDR,
2795 .page = page,
2796 .encrypted_page = NULL,
2797 .submitted = false,
2798 .compr_blocks = compr_blocks,
2799 .need_lock = LOCK_RETRY,
2800 .io_type = io_type,
2801 .io_wbc = wbc,
2802 .bio = bio,
2803 .last_block = last_block,
2804 };
2805
2806 trace_f2fs_writepage(page, DATA);
2807
2808 /* we should bypass data pages to proceed the kworkder jobs */
2809 if (unlikely(f2fs_cp_error(sbi))) {
2810 mapping_set_error(page->mapping, -EIO);
2811 /*
2812 * don't drop any dirty dentry pages for keeping lastest
2813 * directory structure.
2814 */
2815 if (S_ISDIR(inode->i_mode))
2816 goto redirty_out;
2817 goto out;
2818 }
2819
2820 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2821 goto redirty_out;
2822
2823 if (page->index < end_index ||
2824 f2fs_verity_in_progress(inode) ||
2825 compr_blocks)
2826 goto write;
2827
2828 /*
2829 * If the offset is out-of-range of file size,
2830 * this page does not have to be written to disk.
2831 */
2832 offset = i_size & (PAGE_SIZE - 1);
2833 if ((page->index >= end_index + 1) || !offset)
2834 goto out;
2835
2836 zero_user_segment(page, offset, PAGE_SIZE);
2837write:
2838 if (f2fs_is_drop_cache(inode))
2839 goto out;
2840 /* we should not write 0'th page having journal header */
2841 if (f2fs_is_volatile_file(inode) && (!page->index ||
2842 (!wbc->for_reclaim &&
2843 f2fs_available_free_memory(sbi, BASE_CHECK))))
2844 goto redirty_out;
2845
2846 /* Dentry/quota blocks are controlled by checkpoint */
2847 if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
2848 /*
2849 * We need to wait for node_write to avoid block allocation during
2850 * checkpoint. This can only happen to quota writes which can cause
2851 * the below discard race condition.
2852 */
2853 if (IS_NOQUOTA(inode))
2854 down_read(&sbi->node_write);
2855
2856 fio.need_lock = LOCK_DONE;
2857 err = f2fs_do_write_data_page(&fio);
2858
2859 if (IS_NOQUOTA(inode))
2860 up_read(&sbi->node_write);
2861
2862 goto done;
2863 }
2864
2865 if (!wbc->for_reclaim)
2866 need_balance_fs = true;
2867 else if (has_not_enough_free_secs(sbi, 0, 0))
2868 goto redirty_out;
2869 else
2870 set_inode_flag(inode, FI_HOT_DATA);
2871
2872 err = -EAGAIN;
2873 if (f2fs_has_inline_data(inode)) {
2874 err = f2fs_write_inline_data(inode, page);
2875 if (!err)
2876 goto out;
2877 }
2878
2879 if (err == -EAGAIN) {
2880 err = f2fs_do_write_data_page(&fio);
2881 if (err == -EAGAIN) {
2882 fio.need_lock = LOCK_REQ;
2883 err = f2fs_do_write_data_page(&fio);
2884 }
2885 }
2886
2887 if (err) {
2888 file_set_keep_isize(inode);
2889 } else {
2890 spin_lock(&F2FS_I(inode)->i_size_lock);
2891 if (F2FS_I(inode)->last_disk_size < psize)
2892 F2FS_I(inode)->last_disk_size = psize;
2893 spin_unlock(&F2FS_I(inode)->i_size_lock);
2894 }
2895
2896done:
2897 if (err && err != -ENOENT)
2898 goto redirty_out;
2899
2900out:
2901 inode_dec_dirty_pages(inode);
2902 if (err) {
2903 ClearPageUptodate(page);
2904 clear_page_private_gcing(page);
2905 }
2906
2907 if (wbc->for_reclaim) {
2908 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2909 clear_inode_flag(inode, FI_HOT_DATA);
2910 f2fs_remove_dirty_inode(inode);
2911 submitted = NULL;
2912 }
2913 unlock_page(page);
2914 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2915 !F2FS_I(inode)->cp_task && allow_balance)
2916 f2fs_balance_fs(sbi, need_balance_fs);
2917
2918 if (unlikely(f2fs_cp_error(sbi))) {
2919 f2fs_submit_merged_write(sbi, DATA);
2920 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2921 submitted = NULL;
2922 }
2923
2924 if (submitted)
2925 *submitted = fio.submitted ? 1 : 0;
2926
2927 return 0;
2928
2929redirty_out:
2930 redirty_page_for_writepage(wbc, page);
2931 /*
2932 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2933 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2934 * file_write_and_wait_range() will see EIO error, which is critical
2935 * to return value of fsync() followed by atomic_write failure to user.
2936 */
2937 if (!err || wbc->for_reclaim)
2938 return AOP_WRITEPAGE_ACTIVATE;
2939 unlock_page(page);
2940 return err;
2941}
2942
2943static int f2fs_write_data_page(struct page *page,
2944 struct writeback_control *wbc)
2945{
2946#ifdef CONFIG_F2FS_FS_COMPRESSION
2947 struct inode *inode = page->mapping->host;
2948
2949 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2950 goto out;
2951
2952 if (f2fs_compressed_file(inode)) {
2953 if (f2fs_is_compressed_cluster(inode, page->index)) {
2954 redirty_page_for_writepage(wbc, page);
2955 return AOP_WRITEPAGE_ACTIVATE;
2956 }
2957 }
2958out:
2959#endif
2960
2961 return f2fs_write_single_data_page(page, NULL, NULL, NULL,
2962 wbc, FS_DATA_IO, 0, true);
2963}
2964
2965/*
2966 * This function was copied from write_cche_pages from mm/page-writeback.c.
2967 * The major change is making write step of cold data page separately from
2968 * warm/hot data page.
2969 */
2970static int f2fs_write_cache_pages(struct address_space *mapping,
2971 struct writeback_control *wbc,
2972 enum iostat_type io_type)
2973{
2974 int ret = 0;
2975 int done = 0, retry = 0;
2976 struct pagevec pvec;
2977 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2978 struct bio *bio = NULL;
2979 sector_t last_block;
2980#ifdef CONFIG_F2FS_FS_COMPRESSION
2981 struct inode *inode = mapping->host;
2982 struct compress_ctx cc = {
2983 .inode = inode,
2984 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2985 .cluster_size = F2FS_I(inode)->i_cluster_size,
2986 .cluster_idx = NULL_CLUSTER,
2987 .rpages = NULL,
2988 .nr_rpages = 0,
2989 .cpages = NULL,
2990 .rbuf = NULL,
2991 .cbuf = NULL,
2992 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2993 .private = NULL,
2994 };
2995#endif
2996 int nr_pages;
2997 pgoff_t index;
2998 pgoff_t end; /* Inclusive */
2999 pgoff_t done_index;
3000 int range_whole = 0;
3001 xa_mark_t tag;
3002 int nwritten = 0;
3003 int submitted = 0;
3004 int i;
3005
3006 pagevec_init(&pvec);
3007
3008 if (get_dirty_pages(mapping->host) <=
3009 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
3010 set_inode_flag(mapping->host, FI_HOT_DATA);
3011 else
3012 clear_inode_flag(mapping->host, FI_HOT_DATA);
3013
3014 if (wbc->range_cyclic) {
3015 index = mapping->writeback_index; /* prev offset */
3016 end = -1;
3017 } else {
3018 index = wbc->range_start >> PAGE_SHIFT;
3019 end = wbc->range_end >> PAGE_SHIFT;
3020 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
3021 range_whole = 1;
3022 }
3023 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3024 tag = PAGECACHE_TAG_TOWRITE;
3025 else
3026 tag = PAGECACHE_TAG_DIRTY;
3027retry:
3028 retry = 0;
3029 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3030 tag_pages_for_writeback(mapping, index, end);
3031 done_index = index;
3032 while (!done && !retry && (index <= end)) {
3033 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
3034 tag);
3035 if (nr_pages == 0)
3036 break;
3037
3038 for (i = 0; i < nr_pages; i++) {
3039 struct page *page = pvec.pages[i];
3040 bool need_readd;
3041readd:
3042 need_readd = false;
3043#ifdef CONFIG_F2FS_FS_COMPRESSION
3044 if (f2fs_compressed_file(inode)) {
3045 void *fsdata = NULL;
3046 struct page *pagep;
3047 int ret2;
3048
3049 ret = f2fs_init_compress_ctx(&cc);
3050 if (ret) {
3051 done = 1;
3052 break;
3053 }
3054
3055 if (!f2fs_cluster_can_merge_page(&cc,
3056 page->index)) {
3057 ret = f2fs_write_multi_pages(&cc,
3058 &submitted, wbc, io_type);
3059 if (!ret)
3060 need_readd = true;
3061 goto result;
3062 }
3063
3064 if (unlikely(f2fs_cp_error(sbi)))
3065 goto lock_page;
3066
3067 if (!f2fs_cluster_is_empty(&cc))
3068 goto lock_page;
3069
3070 ret2 = f2fs_prepare_compress_overwrite(
3071 inode, &pagep,
3072 page->index, &fsdata);
3073 if (ret2 < 0) {
3074 ret = ret2;
3075 done = 1;
3076 break;
3077 } else if (ret2 &&
3078 (!f2fs_compress_write_end(inode,
3079 fsdata, page->index, 1) ||
3080 !f2fs_all_cluster_page_loaded(&cc,
3081 &pvec, i, nr_pages))) {
3082 retry = 1;
3083 break;
3084 }
3085 }
3086#endif
3087 /* give a priority to WB_SYNC threads */
3088 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
3089 wbc->sync_mode == WB_SYNC_NONE) {
3090 done = 1;
3091 break;
3092 }
3093#ifdef CONFIG_F2FS_FS_COMPRESSION
3094lock_page:
3095#endif
3096 done_index = page->index;
3097retry_write:
3098 lock_page(page);
3099
3100 if (unlikely(page->mapping != mapping)) {
3101continue_unlock:
3102 unlock_page(page);
3103 continue;
3104 }
3105
3106 if (!PageDirty(page)) {
3107 /* someone wrote it for us */
3108 goto continue_unlock;
3109 }
3110
3111 if (PageWriteback(page)) {
3112 if (wbc->sync_mode != WB_SYNC_NONE)
3113 f2fs_wait_on_page_writeback(page,
3114 DATA, true, true);
3115 else
3116 goto continue_unlock;
3117 }
3118
3119 if (!clear_page_dirty_for_io(page))
3120 goto continue_unlock;
3121
3122#ifdef CONFIG_F2FS_FS_COMPRESSION
3123 if (f2fs_compressed_file(inode)) {
3124 get_page(page);
3125 f2fs_compress_ctx_add_page(&cc, page);
3126 continue;
3127 }
3128#endif
3129 ret = f2fs_write_single_data_page(page, &submitted,
3130 &bio, &last_block, wbc, io_type,
3131 0, true);
3132 if (ret == AOP_WRITEPAGE_ACTIVATE)
3133 unlock_page(page);
3134#ifdef CONFIG_F2FS_FS_COMPRESSION
3135result:
3136#endif
3137 nwritten += submitted;
3138 wbc->nr_to_write -= submitted;
3139
3140 if (unlikely(ret)) {
3141 /*
3142 * keep nr_to_write, since vfs uses this to
3143 * get # of written pages.
3144 */
3145 if (ret == AOP_WRITEPAGE_ACTIVATE) {
3146 ret = 0;
3147 goto next;
3148 } else if (ret == -EAGAIN) {
3149 ret = 0;
3150 if (wbc->sync_mode == WB_SYNC_ALL) {
3151 cond_resched();
3152 congestion_wait(BLK_RW_ASYNC,
3153 DEFAULT_IO_TIMEOUT);
3154 goto retry_write;
3155 }
3156 goto next;
3157 }
3158 done_index = page->index + 1;
3159 done = 1;
3160 break;
3161 }
3162
3163 if (wbc->nr_to_write <= 0 &&
3164 wbc->sync_mode == WB_SYNC_NONE) {
3165 done = 1;
3166 break;
3167 }
3168next:
3169 if (need_readd)
3170 goto readd;
3171 }
3172 pagevec_release(&pvec);
3173 cond_resched();
3174 }
3175#ifdef CONFIG_F2FS_FS_COMPRESSION
3176 /* flush remained pages in compress cluster */
3177 if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3178 ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3179 nwritten += submitted;
3180 wbc->nr_to_write -= submitted;
3181 if (ret) {
3182 done = 1;
3183 retry = 0;
3184 }
3185 }
3186 if (f2fs_compressed_file(inode))
3187 f2fs_destroy_compress_ctx(&cc, false);
3188#endif
3189 if (retry) {
3190 index = 0;
3191 end = -1;
3192 goto retry;
3193 }
3194 if (wbc->range_cyclic && !done)
3195 done_index = 0;
3196 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3197 mapping->writeback_index = done_index;
3198
3199 if (nwritten)
3200 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3201 NULL, 0, DATA);
3202 /* submit cached bio of IPU write */
3203 if (bio)
3204 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
3205
3206 return ret;
3207}
3208
3209static inline bool __should_serialize_io(struct inode *inode,
3210 struct writeback_control *wbc)
3211{
3212 /* to avoid deadlock in path of data flush */
3213 if (F2FS_I(inode)->cp_task)
3214 return false;
3215
3216 if (!S_ISREG(inode->i_mode))
3217 return false;
3218 if (IS_NOQUOTA(inode))
3219 return false;
3220
3221 if (f2fs_need_compress_data(inode))
3222 return true;
3223 if (wbc->sync_mode != WB_SYNC_ALL)
3224 return true;
3225 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3226 return true;
3227 return false;
3228}
3229
3230static int __f2fs_write_data_pages(struct address_space *mapping,
3231 struct writeback_control *wbc,
3232 enum iostat_type io_type)
3233{
3234 struct inode *inode = mapping->host;
3235 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3236 struct blk_plug plug;
3237 int ret;
3238 bool locked = false;
3239
3240 /* deal with chardevs and other special file */
3241 if (!mapping->a_ops->writepage)
3242 return 0;
3243
3244 /* skip writing if there is no dirty page in this inode */
3245 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3246 return 0;
3247
3248 /* during POR, we don't need to trigger writepage at all. */
3249 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3250 goto skip_write;
3251
3252 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3253 wbc->sync_mode == WB_SYNC_NONE &&
3254 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3255 f2fs_available_free_memory(sbi, DIRTY_DENTS))
3256 goto skip_write;
3257
3258 /* skip writing during file defragment */
3259 if (is_inode_flag_set(inode, FI_DO_DEFRAG))
3260 goto skip_write;
3261
3262 trace_f2fs_writepages(mapping->host, wbc, DATA);
3263
3264 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3265 if (wbc->sync_mode == WB_SYNC_ALL)
3266 atomic_inc(&sbi->wb_sync_req[DATA]);
3267 else if (atomic_read(&sbi->wb_sync_req[DATA]))
3268 goto skip_write;
3269
3270 if (__should_serialize_io(inode, wbc)) {
3271 mutex_lock(&sbi->writepages);
3272 locked = true;
3273 }
3274
3275 blk_start_plug(&plug);
3276 ret = f2fs_write_cache_pages(mapping, wbc, io_type);
3277 blk_finish_plug(&plug);
3278
3279 if (locked)
3280 mutex_unlock(&sbi->writepages);
3281
3282 if (wbc->sync_mode == WB_SYNC_ALL)
3283 atomic_dec(&sbi->wb_sync_req[DATA]);
3284 /*
3285 * if some pages were truncated, we cannot guarantee its mapping->host
3286 * to detect pending bios.
3287 */
3288
3289 f2fs_remove_dirty_inode(inode);
3290 return ret;
3291
3292skip_write:
3293 wbc->pages_skipped += get_dirty_pages(inode);
3294 trace_f2fs_writepages(mapping->host, wbc, DATA);
3295 return 0;
3296}
3297
3298static int f2fs_write_data_pages(struct address_space *mapping,
3299 struct writeback_control *wbc)
3300{
3301 struct inode *inode = mapping->host;
3302
3303 return __f2fs_write_data_pages(mapping, wbc,
3304 F2FS_I(inode)->cp_task == current ?
3305 FS_CP_DATA_IO : FS_DATA_IO);
3306}
3307
3308static void f2fs_write_failed(struct inode *inode, loff_t to)
3309{
3310 loff_t i_size = i_size_read(inode);
3311
3312 if (IS_NOQUOTA(inode))
3313 return;
3314
3315 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3316 if (to > i_size && !f2fs_verity_in_progress(inode)) {
3317 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3318 filemap_invalidate_lock(inode->i_mapping);
3319
3320 truncate_pagecache(inode, i_size);
3321 f2fs_truncate_blocks(inode, i_size, true);
3322
3323 filemap_invalidate_unlock(inode->i_mapping);
3324 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3325 }
3326}
3327
3328static int prepare_write_begin(struct f2fs_sb_info *sbi,
3329 struct page *page, loff_t pos, unsigned len,
3330 block_t *blk_addr, bool *node_changed)
3331{
3332 struct inode *inode = page->mapping->host;
3333 pgoff_t index = page->index;
3334 struct dnode_of_data dn;
3335 struct page *ipage;
3336 bool locked = false;
3337 struct extent_info ei = {0, };
3338 int err = 0;
3339 int flag;
3340
3341 /*
3342 * we already allocated all the blocks, so we don't need to get
3343 * the block addresses when there is no need to fill the page.
3344 */
3345 if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
3346 !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
3347 !f2fs_verity_in_progress(inode))
3348 return 0;
3349
3350 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3351 if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3352 flag = F2FS_GET_BLOCK_DEFAULT;
3353 else
3354 flag = F2FS_GET_BLOCK_PRE_AIO;
3355
3356 if (f2fs_has_inline_data(inode) ||
3357 (pos & PAGE_MASK) >= i_size_read(inode)) {
3358 f2fs_do_map_lock(sbi, flag, true);
3359 locked = true;
3360 }
3361
3362restart:
3363 /* check inline_data */
3364 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3365 if (IS_ERR(ipage)) {
3366 err = PTR_ERR(ipage);
3367 goto unlock_out;
3368 }
3369
3370 set_new_dnode(&dn, inode, ipage, ipage, 0);
3371
3372 if (f2fs_has_inline_data(inode)) {
3373 if (pos + len <= MAX_INLINE_DATA(inode)) {
3374 f2fs_do_read_inline_data(page, ipage);
3375 set_inode_flag(inode, FI_DATA_EXIST);
3376 if (inode->i_nlink)
3377 set_page_private_inline(ipage);
3378 } else {
3379 err = f2fs_convert_inline_page(&dn, page);
3380 if (err)
3381 goto out;
3382 if (dn.data_blkaddr == NULL_ADDR)
3383 err = f2fs_get_block(&dn, index);
3384 }
3385 } else if (locked) {
3386 err = f2fs_get_block(&dn, index);
3387 } else {
3388 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3389 dn.data_blkaddr = ei.blk + index - ei.fofs;
3390 } else {
3391 /* hole case */
3392 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3393 if (err || dn.data_blkaddr == NULL_ADDR) {
3394 f2fs_put_dnode(&dn);
3395 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
3396 true);
3397 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3398 locked = true;
3399 goto restart;
3400 }
3401 }
3402 }
3403
3404 /* convert_inline_page can make node_changed */
3405 *blk_addr = dn.data_blkaddr;
3406 *node_changed = dn.node_changed;
3407out:
3408 f2fs_put_dnode(&dn);
3409unlock_out:
3410 if (locked)
3411 f2fs_do_map_lock(sbi, flag, false);
3412 return err;
3413}
3414
3415static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3416 loff_t pos, unsigned len, unsigned flags,
3417 struct page **pagep, void **fsdata)
3418{
3419 struct inode *inode = mapping->host;
3420 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3421 struct page *page = NULL;
3422 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
3423 bool need_balance = false, drop_atomic = false;
3424 block_t blkaddr = NULL_ADDR;
3425 int err = 0;
3426
3427 trace_f2fs_write_begin(inode, pos, len, flags);
3428
3429 if (!f2fs_is_checkpoint_ready(sbi)) {
3430 err = -ENOSPC;
3431 goto fail;
3432 }
3433
3434 if ((f2fs_is_atomic_file(inode) &&
3435 !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
3436 is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
3437 err = -ENOMEM;
3438 drop_atomic = true;
3439 goto fail;
3440 }
3441
3442 /*
3443 * We should check this at this moment to avoid deadlock on inode page
3444 * and #0 page. The locking rule for inline_data conversion should be:
3445 * lock_page(page #0) -> lock_page(inode_page)
3446 */
3447 if (index != 0) {
3448 err = f2fs_convert_inline_inode(inode);
3449 if (err)
3450 goto fail;
3451 }
3452
3453#ifdef CONFIG_F2FS_FS_COMPRESSION
3454 if (f2fs_compressed_file(inode)) {
3455 int ret;
3456
3457 *fsdata = NULL;
3458
3459 if (len == PAGE_SIZE)
3460 goto repeat;
3461
3462 ret = f2fs_prepare_compress_overwrite(inode, pagep,
3463 index, fsdata);
3464 if (ret < 0) {
3465 err = ret;
3466 goto fail;
3467 } else if (ret) {
3468 return 0;
3469 }
3470 }
3471#endif
3472
3473repeat:
3474 /*
3475 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3476 * wait_for_stable_page. Will wait that below with our IO control.
3477 */
3478 page = f2fs_pagecache_get_page(mapping, index,
3479 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
3480 if (!page) {
3481 err = -ENOMEM;
3482 goto fail;
3483 }
3484
3485 /* TODO: cluster can be compressed due to race with .writepage */
3486
3487 *pagep = page;
3488
3489 err = prepare_write_begin(sbi, page, pos, len,
3490 &blkaddr, &need_balance);
3491 if (err)
3492 goto fail;
3493
3494 if (need_balance && !IS_NOQUOTA(inode) &&
3495 has_not_enough_free_secs(sbi, 0, 0)) {
3496 unlock_page(page);
3497 f2fs_balance_fs(sbi, true);
3498 lock_page(page);
3499 if (page->mapping != mapping) {
3500 /* The page got truncated from under us */
3501 f2fs_put_page(page, 1);
3502 goto repeat;
3503 }
3504 }
3505
3506 f2fs_wait_on_page_writeback(page, DATA, false, true);
3507
3508 if (len == PAGE_SIZE || PageUptodate(page))
3509 return 0;
3510
3511 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3512 !f2fs_verity_in_progress(inode)) {
3513 zero_user_segment(page, len, PAGE_SIZE);
3514 return 0;
3515 }
3516
3517 if (blkaddr == NEW_ADDR) {
3518 zero_user_segment(page, 0, PAGE_SIZE);
3519 SetPageUptodate(page);
3520 } else {
3521 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3522 DATA_GENERIC_ENHANCE_READ)) {
3523 err = -EFSCORRUPTED;
3524 goto fail;
3525 }
3526 err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
3527 if (err)
3528 goto fail;
3529
3530 lock_page(page);
3531 if (unlikely(page->mapping != mapping)) {
3532 f2fs_put_page(page, 1);
3533 goto repeat;
3534 }
3535 if (unlikely(!PageUptodate(page))) {
3536 err = -EIO;
3537 goto fail;
3538 }
3539 }
3540 return 0;
3541
3542fail:
3543 f2fs_put_page(page, 1);
3544 f2fs_write_failed(inode, pos + len);
3545 if (drop_atomic)
3546 f2fs_drop_inmem_pages_all(sbi, false);
3547 return err;
3548}
3549
3550static int f2fs_write_end(struct file *file,
3551 struct address_space *mapping,
3552 loff_t pos, unsigned len, unsigned copied,
3553 struct page *page, void *fsdata)
3554{
3555 struct inode *inode = page->mapping->host;
3556
3557 trace_f2fs_write_end(inode, pos, len, copied);
3558
3559 /*
3560 * This should be come from len == PAGE_SIZE, and we expect copied
3561 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3562 * let generic_perform_write() try to copy data again through copied=0.
3563 */
3564 if (!PageUptodate(page)) {
3565 if (unlikely(copied != len))
3566 copied = 0;
3567 else
3568 SetPageUptodate(page);
3569 }
3570
3571#ifdef CONFIG_F2FS_FS_COMPRESSION
3572 /* overwrite compressed file */
3573 if (f2fs_compressed_file(inode) && fsdata) {
3574 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3575 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3576
3577 if (pos + copied > i_size_read(inode) &&
3578 !f2fs_verity_in_progress(inode))
3579 f2fs_i_size_write(inode, pos + copied);
3580 return copied;
3581 }
3582#endif
3583
3584 if (!copied)
3585 goto unlock_out;
3586
3587 set_page_dirty(page);
3588
3589 if (pos + copied > i_size_read(inode) &&
3590 !f2fs_verity_in_progress(inode))
3591 f2fs_i_size_write(inode, pos + copied);
3592unlock_out:
3593 f2fs_put_page(page, 1);
3594 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3595 return copied;
3596}
3597
3598static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
3599 loff_t offset)
3600{
3601 unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
3602 unsigned blkbits = i_blkbits;
3603 unsigned blocksize_mask = (1 << blkbits) - 1;
3604 unsigned long align = offset | iov_iter_alignment(iter);
3605 struct block_device *bdev = inode->i_sb->s_bdev;
3606
3607 if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
3608 return 1;
3609
3610 if (align & blocksize_mask) {
3611 if (bdev)
3612 blkbits = blksize_bits(bdev_logical_block_size(bdev));
3613 blocksize_mask = (1 << blkbits) - 1;
3614 if (align & blocksize_mask)
3615 return -EINVAL;
3616 return 1;
3617 }
3618 return 0;
3619}
3620
3621static void f2fs_dio_end_io(struct bio *bio)
3622{
3623 struct f2fs_private_dio *dio = bio->bi_private;
3624
3625 dec_page_count(F2FS_I_SB(dio->inode),
3626 dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3627
3628 bio->bi_private = dio->orig_private;
3629 bio->bi_end_io = dio->orig_end_io;
3630
3631 kfree(dio);
3632
3633 bio_endio(bio);
3634}
3635
3636static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
3637 loff_t file_offset)
3638{
3639 struct f2fs_private_dio *dio;
3640 bool write = (bio_op(bio) == REQ_OP_WRITE);
3641
3642 dio = f2fs_kzalloc(F2FS_I_SB(inode),
3643 sizeof(struct f2fs_private_dio), GFP_NOFS);
3644 if (!dio)
3645 goto out;
3646
3647 dio->inode = inode;
3648 dio->orig_end_io = bio->bi_end_io;
3649 dio->orig_private = bio->bi_private;
3650 dio->write = write;
3651
3652 bio->bi_end_io = f2fs_dio_end_io;
3653 bio->bi_private = dio;
3654
3655 inc_page_count(F2FS_I_SB(inode),
3656 write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3657
3658 submit_bio(bio);
3659 return;
3660out:
3661 bio->bi_status = BLK_STS_IOERR;
3662 bio_endio(bio);
3663}
3664
3665static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3666{
3667 struct address_space *mapping = iocb->ki_filp->f_mapping;
3668 struct inode *inode = mapping->host;
3669 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3670 struct f2fs_inode_info *fi = F2FS_I(inode);
3671 size_t count = iov_iter_count(iter);
3672 loff_t offset = iocb->ki_pos;
3673 int rw = iov_iter_rw(iter);
3674 int err;
3675 enum rw_hint hint = iocb->ki_hint;
3676 int whint_mode = F2FS_OPTION(sbi).whint_mode;
3677 bool do_opu;
3678
3679 err = check_direct_IO(inode, iter, offset);
3680 if (err)
3681 return err < 0 ? err : 0;
3682
3683 if (f2fs_force_buffered_io(inode, iocb, iter))
3684 return 0;
3685
3686 do_opu = rw == WRITE && f2fs_lfs_mode(sbi);
3687
3688 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
3689
3690 if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
3691 iocb->ki_hint = WRITE_LIFE_NOT_SET;
3692
3693 if (iocb->ki_flags & IOCB_NOWAIT) {
3694 if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
3695 iocb->ki_hint = hint;
3696 err = -EAGAIN;
3697 goto out;
3698 }
3699 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
3700 up_read(&fi->i_gc_rwsem[rw]);
3701 iocb->ki_hint = hint;
3702 err = -EAGAIN;
3703 goto out;
3704 }
3705 } else {
3706 down_read(&fi->i_gc_rwsem[rw]);
3707 if (do_opu)
3708 down_read(&fi->i_gc_rwsem[READ]);
3709 }
3710
3711 err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3712 iter, rw == WRITE ? get_data_block_dio_write :
3713 get_data_block_dio, NULL, f2fs_dio_submit_bio,
3714 rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
3715 DIO_SKIP_HOLES);
3716
3717 if (do_opu)
3718 up_read(&fi->i_gc_rwsem[READ]);
3719
3720 up_read(&fi->i_gc_rwsem[rw]);
3721
3722 if (rw == WRITE) {
3723 if (whint_mode == WHINT_MODE_OFF)
3724 iocb->ki_hint = hint;
3725 if (err > 0) {
3726 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3727 err);
3728 if (!do_opu)
3729 set_inode_flag(inode, FI_UPDATE_WRITE);
3730 } else if (err == -EIOCBQUEUED) {
3731 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3732 count - iov_iter_count(iter));
3733 } else if (err < 0) {
3734 f2fs_write_failed(inode, offset + count);
3735 }
3736 } else {
3737 if (err > 0)
3738 f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
3739 else if (err == -EIOCBQUEUED)
3740 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
3741 count - iov_iter_count(iter));
3742 }
3743
3744out:
3745 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
3746
3747 return err;
3748}
3749
3750void f2fs_invalidate_page(struct page *page, unsigned int offset,
3751 unsigned int length)
3752{
3753 struct inode *inode = page->mapping->host;
3754 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3755
3756 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3757 (offset % PAGE_SIZE || length != PAGE_SIZE))
3758 return;
3759
3760 if (PageDirty(page)) {
3761 if (inode->i_ino == F2FS_META_INO(sbi)) {
3762 dec_page_count(sbi, F2FS_DIRTY_META);
3763 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3764 dec_page_count(sbi, F2FS_DIRTY_NODES);
3765 } else {
3766 inode_dec_dirty_pages(inode);
3767 f2fs_remove_dirty_inode(inode);
3768 }
3769 }
3770
3771 clear_page_private_gcing(page);
3772
3773 if (test_opt(sbi, COMPRESS_CACHE)) {
3774 if (f2fs_compressed_file(inode))
3775 f2fs_invalidate_compress_pages(sbi, inode->i_ino);
3776 if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
3777 clear_page_private_data(page);
3778 }
3779
3780 if (page_private_atomic(page))
3781 return f2fs_drop_inmem_page(inode, page);
3782
3783 detach_page_private(page);
3784 set_page_private(page, 0);
3785}
3786
3787int f2fs_release_page(struct page *page, gfp_t wait)
3788{
3789 /* If this is dirty page, keep PagePrivate */
3790 if (PageDirty(page))
3791 return 0;
3792
3793 /* This is atomic written page, keep Private */
3794 if (page_private_atomic(page))
3795 return 0;
3796
3797 if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
3798 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3799 struct inode *inode = page->mapping->host;
3800
3801 if (f2fs_compressed_file(inode))
3802 f2fs_invalidate_compress_pages(sbi, inode->i_ino);
3803 if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
3804 clear_page_private_data(page);
3805 }
3806
3807 clear_page_private_gcing(page);
3808
3809 detach_page_private(page);
3810 set_page_private(page, 0);
3811 return 1;
3812}
3813
3814static int f2fs_set_data_page_dirty(struct page *page)
3815{
3816 struct inode *inode = page_file_mapping(page)->host;
3817
3818 trace_f2fs_set_page_dirty(page, DATA);
3819
3820 if (!PageUptodate(page))
3821 SetPageUptodate(page);
3822 if (PageSwapCache(page))
3823 return __set_page_dirty_nobuffers(page);
3824
3825 if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
3826 if (!page_private_atomic(page)) {
3827 f2fs_register_inmem_page(inode, page);
3828 return 1;
3829 }
3830 /*
3831 * Previously, this page has been registered, we just
3832 * return here.
3833 */
3834 return 0;
3835 }
3836
3837 if (!PageDirty(page)) {
3838 __set_page_dirty_nobuffers(page);
3839 f2fs_update_dirty_page(inode, page);
3840 return 1;
3841 }
3842 return 0;
3843}
3844
3845
3846static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3847{
3848#ifdef CONFIG_F2FS_FS_COMPRESSION
3849 struct dnode_of_data dn;
3850 sector_t start_idx, blknr = 0;
3851 int ret;
3852
3853 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3854
3855 set_new_dnode(&dn, inode, NULL, NULL, 0);
3856 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3857 if (ret)
3858 return 0;
3859
3860 if (dn.data_blkaddr != COMPRESS_ADDR) {
3861 dn.ofs_in_node += block - start_idx;
3862 blknr = f2fs_data_blkaddr(&dn);
3863 if (!__is_valid_data_blkaddr(blknr))
3864 blknr = 0;
3865 }
3866
3867 f2fs_put_dnode(&dn);
3868 return blknr;
3869#else
3870 return 0;
3871#endif
3872}
3873
3874
3875static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3876{
3877 struct inode *inode = mapping->host;
3878 sector_t blknr = 0;
3879
3880 if (f2fs_has_inline_data(inode))
3881 goto out;
3882
3883 /* make sure allocating whole blocks */
3884 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3885 filemap_write_and_wait(mapping);
3886
3887 /* Block number less than F2FS MAX BLOCKS */
3888 if (unlikely(block >= max_file_blocks(inode)))
3889 goto out;
3890
3891 if (f2fs_compressed_file(inode)) {
3892 blknr = f2fs_bmap_compress(inode, block);
3893 } else {
3894 struct f2fs_map_blocks map;
3895
3896 memset(&map, 0, sizeof(map));
3897 map.m_lblk = block;
3898 map.m_len = 1;
3899 map.m_next_pgofs = NULL;
3900 map.m_seg_type = NO_CHECK_TYPE;
3901
3902 if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
3903 blknr = map.m_pblk;
3904 }
3905out:
3906 trace_f2fs_bmap(inode, block, blknr);
3907 return blknr;
3908}
3909
3910#ifdef CONFIG_MIGRATION
3911#include <linux/migrate.h>
3912
3913int f2fs_migrate_page(struct address_space *mapping,
3914 struct page *newpage, struct page *page, enum migrate_mode mode)
3915{
3916 int rc, extra_count;
3917 struct f2fs_inode_info *fi = F2FS_I(mapping->host);
3918 bool atomic_written = page_private_atomic(page);
3919
3920 BUG_ON(PageWriteback(page));
3921
3922 /* migrating an atomic written page is safe with the inmem_lock hold */
3923 if (atomic_written) {
3924 if (mode != MIGRATE_SYNC)
3925 return -EBUSY;
3926 if (!mutex_trylock(&fi->inmem_lock))
3927 return -EAGAIN;
3928 }
3929
3930 /* one extra reference was held for atomic_write page */
3931 extra_count = atomic_written ? 1 : 0;
3932 rc = migrate_page_move_mapping(mapping, newpage,
3933 page, extra_count);
3934 if (rc != MIGRATEPAGE_SUCCESS) {
3935 if (atomic_written)
3936 mutex_unlock(&fi->inmem_lock);
3937 return rc;
3938 }
3939
3940 if (atomic_written) {
3941 struct inmem_pages *cur;
3942
3943 list_for_each_entry(cur, &fi->inmem_pages, list)
3944 if (cur->page == page) {
3945 cur->page = newpage;
3946 break;
3947 }
3948 mutex_unlock(&fi->inmem_lock);
3949 put_page(page);
3950 get_page(newpage);
3951 }
3952
3953 /* guarantee to start from no stale private field */
3954 set_page_private(newpage, 0);
3955 if (PagePrivate(page)) {
3956 set_page_private(newpage, page_private(page));
3957 SetPagePrivate(newpage);
3958 get_page(newpage);
3959
3960 set_page_private(page, 0);
3961 ClearPagePrivate(page);
3962 put_page(page);
3963 }
3964
3965 if (mode != MIGRATE_SYNC_NO_COPY)
3966 migrate_page_copy(newpage, page);
3967 else
3968 migrate_page_states(newpage, page);
3969
3970 return MIGRATEPAGE_SUCCESS;
3971}
3972#endif
3973
3974#ifdef CONFIG_SWAP
3975static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
3976 unsigned int blkcnt)
3977{
3978 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3979 unsigned int blkofs;
3980 unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
3981 unsigned int secidx = start_blk / blk_per_sec;
3982 unsigned int end_sec = secidx + blkcnt / blk_per_sec;
3983 int ret = 0;
3984
3985 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3986 filemap_invalidate_lock(inode->i_mapping);
3987
3988 set_inode_flag(inode, FI_ALIGNED_WRITE);
3989
3990 for (; secidx < end_sec; secidx++) {
3991 down_write(&sbi->pin_sem);
3992
3993 f2fs_lock_op(sbi);
3994 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
3995 f2fs_unlock_op(sbi);
3996
3997 set_inode_flag(inode, FI_DO_DEFRAG);
3998
3999 for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
4000 struct page *page;
4001 unsigned int blkidx = secidx * blk_per_sec + blkofs;
4002
4003 page = f2fs_get_lock_data_page(inode, blkidx, true);
4004 if (IS_ERR(page)) {
4005 up_write(&sbi->pin_sem);
4006 ret = PTR_ERR(page);
4007 goto done;
4008 }
4009
4010 set_page_dirty(page);
4011 f2fs_put_page(page, 1);
4012 }
4013
4014 clear_inode_flag(inode, FI_DO_DEFRAG);
4015
4016 ret = filemap_fdatawrite(inode->i_mapping);
4017
4018 up_write(&sbi->pin_sem);
4019
4020 if (ret)
4021 break;
4022 }
4023
4024done:
4025 clear_inode_flag(inode, FI_DO_DEFRAG);
4026 clear_inode_flag(inode, FI_ALIGNED_WRITE);
4027
4028 filemap_invalidate_unlock(inode->i_mapping);
4029 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4030
4031 return ret;
4032}
4033
4034static int check_swap_activate(struct swap_info_struct *sis,
4035 struct file *swap_file, sector_t *span)
4036{
4037 struct address_space *mapping = swap_file->f_mapping;
4038 struct inode *inode = mapping->host;
4039 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4040 sector_t cur_lblock;
4041 sector_t last_lblock;
4042 sector_t pblock;
4043 sector_t lowest_pblock = -1;
4044 sector_t highest_pblock = 0;
4045 int nr_extents = 0;
4046 unsigned long nr_pblocks;
4047 unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
4048 unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
4049 unsigned int not_aligned = 0;
4050 int ret = 0;
4051
4052 /*
4053 * Map all the blocks into the extent list. This code doesn't try
4054 * to be very smart.
4055 */
4056 cur_lblock = 0;
4057 last_lblock = bytes_to_blks(inode, i_size_read(inode));
4058
4059 while (cur_lblock < last_lblock && cur_lblock < sis->max) {
4060 struct f2fs_map_blocks map;
4061retry:
4062 cond_resched();
4063
4064 memset(&map, 0, sizeof(map));
4065 map.m_lblk = cur_lblock;
4066 map.m_len = last_lblock - cur_lblock;
4067 map.m_next_pgofs = NULL;
4068 map.m_next_extent = NULL;
4069 map.m_seg_type = NO_CHECK_TYPE;
4070 map.m_may_create = false;
4071
4072 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
4073 if (ret)
4074 goto out;
4075
4076 /* hole */
4077 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
4078 f2fs_err(sbi, "Swapfile has holes");
4079 ret = -EINVAL;
4080 goto out;
4081 }
4082
4083 pblock = map.m_pblk;
4084 nr_pblocks = map.m_len;
4085
4086 if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
4087 nr_pblocks & sec_blks_mask) {
4088 not_aligned++;
4089
4090 nr_pblocks = roundup(nr_pblocks, blks_per_sec);
4091 if (cur_lblock + nr_pblocks > sis->max)
4092 nr_pblocks -= blks_per_sec;
4093
4094 if (!nr_pblocks) {
4095 /* this extent is last one */
4096 nr_pblocks = map.m_len;
4097 f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
4098 goto next;
4099 }
4100
4101 ret = f2fs_migrate_blocks(inode, cur_lblock,
4102 nr_pblocks);
4103 if (ret)
4104 goto out;
4105 goto retry;
4106 }
4107next:
4108 if (cur_lblock + nr_pblocks >= sis->max)
4109 nr_pblocks = sis->max - cur_lblock;
4110
4111 if (cur_lblock) { /* exclude the header page */
4112 if (pblock < lowest_pblock)
4113 lowest_pblock = pblock;
4114 if (pblock + nr_pblocks - 1 > highest_pblock)
4115 highest_pblock = pblock + nr_pblocks - 1;
4116 }
4117
4118 /*
4119 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4120 */
4121 ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
4122 if (ret < 0)
4123 goto out;
4124 nr_extents += ret;
4125 cur_lblock += nr_pblocks;
4126 }
4127 ret = nr_extents;
4128 *span = 1 + highest_pblock - lowest_pblock;
4129 if (cur_lblock == 0)
4130 cur_lblock = 1; /* force Empty message */
4131 sis->max = cur_lblock;
4132 sis->pages = cur_lblock - 1;
4133 sis->highest_bit = cur_lblock - 1;
4134out:
4135 if (not_aligned)
4136 f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
4137 not_aligned, blks_per_sec * F2FS_BLKSIZE);
4138 return ret;
4139}
4140
4141static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4142 sector_t *span)
4143{
4144 struct inode *inode = file_inode(file);
4145 int ret;
4146
4147 if (!S_ISREG(inode->i_mode))
4148 return -EINVAL;
4149
4150 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
4151 return -EROFS;
4152
4153 if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
4154 f2fs_err(F2FS_I_SB(inode),
4155 "Swapfile not supported in LFS mode");
4156 return -EINVAL;
4157 }
4158
4159 ret = f2fs_convert_inline_inode(inode);
4160 if (ret)
4161 return ret;
4162
4163 if (!f2fs_disable_compressed_file(inode))
4164 return -EINVAL;
4165
4166 f2fs_precache_extents(inode);
4167
4168 ret = check_swap_activate(sis, file, span);
4169 if (ret < 0)
4170 return ret;
4171
4172 set_inode_flag(inode, FI_PIN_FILE);
4173 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
4174 return ret;
4175}
4176
4177static void f2fs_swap_deactivate(struct file *file)
4178{
4179 struct inode *inode = file_inode(file);
4180
4181 clear_inode_flag(inode, FI_PIN_FILE);
4182}
4183#else
4184static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4185 sector_t *span)
4186{
4187 return -EOPNOTSUPP;
4188}
4189
4190static void f2fs_swap_deactivate(struct file *file)
4191{
4192}
4193#endif
4194
4195const struct address_space_operations f2fs_dblock_aops = {
4196 .readpage = f2fs_read_data_page,
4197 .readahead = f2fs_readahead,
4198 .writepage = f2fs_write_data_page,
4199 .writepages = f2fs_write_data_pages,
4200 .write_begin = f2fs_write_begin,
4201 .write_end = f2fs_write_end,
4202 .set_page_dirty = f2fs_set_data_page_dirty,
4203 .invalidatepage = f2fs_invalidate_page,
4204 .releasepage = f2fs_release_page,
4205 .direct_IO = f2fs_direct_IO,
4206 .bmap = f2fs_bmap,
4207 .swap_activate = f2fs_swap_activate,
4208 .swap_deactivate = f2fs_swap_deactivate,
4209#ifdef CONFIG_MIGRATION
4210 .migratepage = f2fs_migrate_page,
4211#endif
4212};
4213
4214void f2fs_clear_page_cache_dirty_tag(struct page *page)
4215{
4216 struct address_space *mapping = page_mapping(page);
4217 unsigned long flags;
4218
4219 xa_lock_irqsave(&mapping->i_pages, flags);
4220 __xa_clear_mark(&mapping->i_pages, page_index(page),
4221 PAGECACHE_TAG_DIRTY);
4222 xa_unlock_irqrestore(&mapping->i_pages, flags);
4223}
4224
4225int __init f2fs_init_post_read_processing(void)
4226{
4227 bio_post_read_ctx_cache =
4228 kmem_cache_create("f2fs_bio_post_read_ctx",
4229 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
4230 if (!bio_post_read_ctx_cache)
4231 goto fail;
4232 bio_post_read_ctx_pool =
4233 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4234 bio_post_read_ctx_cache);
4235 if (!bio_post_read_ctx_pool)
4236 goto fail_free_cache;
4237 return 0;
4238
4239fail_free_cache:
4240 kmem_cache_destroy(bio_post_read_ctx_cache);
4241fail:
4242 return -ENOMEM;
4243}
4244
4245void f2fs_destroy_post_read_processing(void)
4246{
4247 mempool_destroy(bio_post_read_ctx_pool);
4248 kmem_cache_destroy(bio_post_read_ctx_cache);
4249}
4250
4251int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4252{
4253 if (!f2fs_sb_has_encrypt(sbi) &&
4254 !f2fs_sb_has_verity(sbi) &&
4255 !f2fs_sb_has_compression(sbi))
4256 return 0;
4257
4258 sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4259 WQ_UNBOUND | WQ_HIGHPRI,
4260 num_online_cpus());
4261 if (!sbi->post_read_wq)
4262 return -ENOMEM;
4263 return 0;
4264}
4265
4266void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4267{
4268 if (sbi->post_read_wq)
4269 destroy_workqueue(sbi->post_read_wq);
4270}
4271
4272int __init f2fs_init_bio_entry_cache(void)
4273{
4274 bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4275 sizeof(struct bio_entry));
4276 if (!bio_entry_slab)
4277 return -ENOMEM;
4278 return 0;
4279}
4280
4281void f2fs_destroy_bio_entry_cache(void)
4282{
4283 kmem_cache_destroy(bio_entry_slab);
4284}