Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/checkpoint.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>
9#include <linux/bio.h>
10#include <linux/mpage.h>
11#include <linux/writeback.h>
12#include <linux/blkdev.h>
13#include <linux/f2fs_fs.h>
14#include <linux/pagevec.h>
15#include <linux/swap.h>
16#include <linux/kthread.h>
17
18#include "f2fs.h"
19#include "node.h"
20#include "segment.h"
21#include "iostat.h"
22#include <trace/events/f2fs.h>
23
24#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
25
26static struct kmem_cache *ino_entry_slab;
27struct kmem_cache *f2fs_inode_entry_slab;
28
29void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
30{
31 f2fs_build_fault_attr(sbi, 0, 0);
32 set_ckpt_flags(sbi, CP_ERROR_FLAG);
33 if (!end_io)
34 f2fs_flush_merged_writes(sbi);
35}
36
37/*
38 * We guarantee no failure on the returned page.
39 */
40struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
41{
42 struct address_space *mapping = META_MAPPING(sbi);
43 struct page *page;
44repeat:
45 page = f2fs_grab_cache_page(mapping, index, false);
46 if (!page) {
47 cond_resched();
48 goto repeat;
49 }
50 f2fs_wait_on_page_writeback(page, META, true, true);
51 if (!PageUptodate(page))
52 SetPageUptodate(page);
53 return page;
54}
55
56static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
57 bool is_meta)
58{
59 struct address_space *mapping = META_MAPPING(sbi);
60 struct page *page;
61 struct f2fs_io_info fio = {
62 .sbi = sbi,
63 .type = META,
64 .op = REQ_OP_READ,
65 .op_flags = REQ_META | REQ_PRIO,
66 .old_blkaddr = index,
67 .new_blkaddr = index,
68 .encrypted_page = NULL,
69 .is_por = !is_meta,
70 };
71 int err;
72
73 if (unlikely(!is_meta))
74 fio.op_flags &= ~REQ_META;
75repeat:
76 page = f2fs_grab_cache_page(mapping, index, false);
77 if (!page) {
78 cond_resched();
79 goto repeat;
80 }
81 if (PageUptodate(page))
82 goto out;
83
84 fio.page = page;
85
86 err = f2fs_submit_page_bio(&fio);
87 if (err) {
88 f2fs_put_page(page, 1);
89 return ERR_PTR(err);
90 }
91
92 f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE);
93
94 lock_page(page);
95 if (unlikely(page->mapping != mapping)) {
96 f2fs_put_page(page, 1);
97 goto repeat;
98 }
99
100 if (unlikely(!PageUptodate(page))) {
101 f2fs_handle_page_eio(sbi, page->index, META);
102 f2fs_put_page(page, 1);
103 return ERR_PTR(-EIO);
104 }
105out:
106 return page;
107}
108
109struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
110{
111 return __get_meta_page(sbi, index, true);
112}
113
114struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index)
115{
116 struct page *page;
117 int count = 0;
118
119retry:
120 page = __get_meta_page(sbi, index, true);
121 if (IS_ERR(page)) {
122 if (PTR_ERR(page) == -EIO &&
123 ++count <= DEFAULT_RETRY_IO_COUNT)
124 goto retry;
125 f2fs_stop_checkpoint(sbi, false);
126 }
127 return page;
128}
129
130/* for POR only */
131struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
132{
133 return __get_meta_page(sbi, index, false);
134}
135
136static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
137 int type)
138{
139 struct seg_entry *se;
140 unsigned int segno, offset;
141 bool exist;
142
143 if (type != DATA_GENERIC_ENHANCE && type != DATA_GENERIC_ENHANCE_READ)
144 return true;
145
146 segno = GET_SEGNO(sbi, blkaddr);
147 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
148 se = get_seg_entry(sbi, segno);
149
150 exist = f2fs_test_bit(offset, se->cur_valid_map);
151 if (!exist && type == DATA_GENERIC_ENHANCE) {
152 f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
153 blkaddr, exist);
154 set_sbi_flag(sbi, SBI_NEED_FSCK);
155 dump_stack();
156 }
157 return exist;
158}
159
160bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
161 block_t blkaddr, int type)
162{
163 switch (type) {
164 case META_NAT:
165 break;
166 case META_SIT:
167 if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
168 return false;
169 break;
170 case META_SSA:
171 if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
172 blkaddr < SM_I(sbi)->ssa_blkaddr))
173 return false;
174 break;
175 case META_CP:
176 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
177 blkaddr < __start_cp_addr(sbi)))
178 return false;
179 break;
180 case META_POR:
181 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
182 blkaddr < MAIN_BLKADDR(sbi)))
183 return false;
184 break;
185 case DATA_GENERIC:
186 case DATA_GENERIC_ENHANCE:
187 case DATA_GENERIC_ENHANCE_READ:
188 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
189 blkaddr < MAIN_BLKADDR(sbi))) {
190 f2fs_warn(sbi, "access invalid blkaddr:%u",
191 blkaddr);
192 set_sbi_flag(sbi, SBI_NEED_FSCK);
193 dump_stack();
194 return false;
195 } else {
196 return __is_bitmap_valid(sbi, blkaddr, type);
197 }
198 break;
199 case META_GENERIC:
200 if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
201 blkaddr >= MAIN_BLKADDR(sbi)))
202 return false;
203 break;
204 default:
205 BUG();
206 }
207
208 return true;
209}
210
211/*
212 * Readahead CP/NAT/SIT/SSA/POR pages
213 */
214int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
215 int type, bool sync)
216{
217 struct page *page;
218 block_t blkno = start;
219 struct f2fs_io_info fio = {
220 .sbi = sbi,
221 .type = META,
222 .op = REQ_OP_READ,
223 .op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
224 .encrypted_page = NULL,
225 .in_list = false,
226 .is_por = (type == META_POR),
227 };
228 struct blk_plug plug;
229 int err;
230
231 if (unlikely(type == META_POR))
232 fio.op_flags &= ~REQ_META;
233
234 blk_start_plug(&plug);
235 for (; nrpages-- > 0; blkno++) {
236
237 if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
238 goto out;
239
240 switch (type) {
241 case META_NAT:
242 if (unlikely(blkno >=
243 NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
244 blkno = 0;
245 /* get nat block addr */
246 fio.new_blkaddr = current_nat_addr(sbi,
247 blkno * NAT_ENTRY_PER_BLOCK);
248 break;
249 case META_SIT:
250 if (unlikely(blkno >= TOTAL_SEGS(sbi)))
251 goto out;
252 /* get sit block addr */
253 fio.new_blkaddr = current_sit_addr(sbi,
254 blkno * SIT_ENTRY_PER_BLOCK);
255 break;
256 case META_SSA:
257 case META_CP:
258 case META_POR:
259 fio.new_blkaddr = blkno;
260 break;
261 default:
262 BUG();
263 }
264
265 page = f2fs_grab_cache_page(META_MAPPING(sbi),
266 fio.new_blkaddr, false);
267 if (!page)
268 continue;
269 if (PageUptodate(page)) {
270 f2fs_put_page(page, 1);
271 continue;
272 }
273
274 fio.page = page;
275 err = f2fs_submit_page_bio(&fio);
276 f2fs_put_page(page, err ? 1 : 0);
277
278 if (!err)
279 f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE);
280 }
281out:
282 blk_finish_plug(&plug);
283 return blkno - start;
284}
285
286void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
287 unsigned int ra_blocks)
288{
289 struct page *page;
290 bool readahead = false;
291
292 if (ra_blocks == RECOVERY_MIN_RA_BLOCKS)
293 return;
294
295 page = find_get_page(META_MAPPING(sbi), index);
296 if (!page || !PageUptodate(page))
297 readahead = true;
298 f2fs_put_page(page, 0);
299
300 if (readahead)
301 f2fs_ra_meta_pages(sbi, index, ra_blocks, META_POR, true);
302}
303
304static int __f2fs_write_meta_page(struct page *page,
305 struct writeback_control *wbc,
306 enum iostat_type io_type)
307{
308 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
309
310 trace_f2fs_writepage(page, META);
311
312 if (unlikely(f2fs_cp_error(sbi)))
313 goto redirty_out;
314 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
315 goto redirty_out;
316 if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
317 goto redirty_out;
318
319 f2fs_do_write_meta_page(sbi, page, io_type);
320 dec_page_count(sbi, F2FS_DIRTY_META);
321
322 if (wbc->for_reclaim)
323 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META);
324
325 unlock_page(page);
326
327 if (unlikely(f2fs_cp_error(sbi)))
328 f2fs_submit_merged_write(sbi, META);
329
330 return 0;
331
332redirty_out:
333 redirty_page_for_writepage(wbc, page);
334 return AOP_WRITEPAGE_ACTIVATE;
335}
336
337static int f2fs_write_meta_page(struct page *page,
338 struct writeback_control *wbc)
339{
340 return __f2fs_write_meta_page(page, wbc, FS_META_IO);
341}
342
343static int f2fs_write_meta_pages(struct address_space *mapping,
344 struct writeback_control *wbc)
345{
346 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
347 long diff, written;
348
349 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
350 goto skip_write;
351
352 /* collect a number of dirty meta pages and write together */
353 if (wbc->sync_mode != WB_SYNC_ALL &&
354 get_pages(sbi, F2FS_DIRTY_META) <
355 nr_pages_to_skip(sbi, META))
356 goto skip_write;
357
358 /* if locked failed, cp will flush dirty pages instead */
359 if (!f2fs_down_write_trylock(&sbi->cp_global_sem))
360 goto skip_write;
361
362 trace_f2fs_writepages(mapping->host, wbc, META);
363 diff = nr_pages_to_write(sbi, META, wbc);
364 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
365 f2fs_up_write(&sbi->cp_global_sem);
366 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
367 return 0;
368
369skip_write:
370 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
371 trace_f2fs_writepages(mapping->host, wbc, META);
372 return 0;
373}
374
375long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
376 long nr_to_write, enum iostat_type io_type)
377{
378 struct address_space *mapping = META_MAPPING(sbi);
379 pgoff_t index = 0, prev = ULONG_MAX;
380 struct pagevec pvec;
381 long nwritten = 0;
382 int nr_pages;
383 struct writeback_control wbc = {
384 .for_reclaim = 0,
385 };
386 struct blk_plug plug;
387
388 pagevec_init(&pvec);
389
390 blk_start_plug(&plug);
391
392 while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
393 PAGECACHE_TAG_DIRTY))) {
394 int i;
395
396 for (i = 0; i < nr_pages; i++) {
397 struct page *page = pvec.pages[i];
398
399 if (prev == ULONG_MAX)
400 prev = page->index - 1;
401 if (nr_to_write != LONG_MAX && page->index != prev + 1) {
402 pagevec_release(&pvec);
403 goto stop;
404 }
405
406 lock_page(page);
407
408 if (unlikely(page->mapping != mapping)) {
409continue_unlock:
410 unlock_page(page);
411 continue;
412 }
413 if (!PageDirty(page)) {
414 /* someone wrote it for us */
415 goto continue_unlock;
416 }
417
418 f2fs_wait_on_page_writeback(page, META, true, true);
419
420 if (!clear_page_dirty_for_io(page))
421 goto continue_unlock;
422
423 if (__f2fs_write_meta_page(page, &wbc, io_type)) {
424 unlock_page(page);
425 break;
426 }
427 nwritten++;
428 prev = page->index;
429 if (unlikely(nwritten >= nr_to_write))
430 break;
431 }
432 pagevec_release(&pvec);
433 cond_resched();
434 }
435stop:
436 if (nwritten)
437 f2fs_submit_merged_write(sbi, type);
438
439 blk_finish_plug(&plug);
440
441 return nwritten;
442}
443
444static bool f2fs_dirty_meta_folio(struct address_space *mapping,
445 struct folio *folio)
446{
447 trace_f2fs_set_page_dirty(&folio->page, META);
448
449 if (!folio_test_uptodate(folio))
450 folio_mark_uptodate(folio);
451 if (!folio_test_dirty(folio)) {
452 filemap_dirty_folio(mapping, folio);
453 inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_META);
454 set_page_private_reference(&folio->page);
455 return true;
456 }
457 return false;
458}
459
460const struct address_space_operations f2fs_meta_aops = {
461 .writepage = f2fs_write_meta_page,
462 .writepages = f2fs_write_meta_pages,
463 .dirty_folio = f2fs_dirty_meta_folio,
464 .invalidate_folio = f2fs_invalidate_folio,
465 .release_folio = f2fs_release_folio,
466 .migrate_folio = filemap_migrate_folio,
467};
468
469static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
470 unsigned int devidx, int type)
471{
472 struct inode_management *im = &sbi->im[type];
473 struct ino_entry *e = NULL, *new = NULL;
474
475 if (type == FLUSH_INO) {
476 rcu_read_lock();
477 e = radix_tree_lookup(&im->ino_root, ino);
478 rcu_read_unlock();
479 }
480
481retry:
482 if (!e)
483 new = f2fs_kmem_cache_alloc(ino_entry_slab,
484 GFP_NOFS, true, NULL);
485
486 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
487
488 spin_lock(&im->ino_lock);
489 e = radix_tree_lookup(&im->ino_root, ino);
490 if (!e) {
491 if (!new) {
492 spin_unlock(&im->ino_lock);
493 goto retry;
494 }
495 e = new;
496 if (unlikely(radix_tree_insert(&im->ino_root, ino, e)))
497 f2fs_bug_on(sbi, 1);
498
499 memset(e, 0, sizeof(struct ino_entry));
500 e->ino = ino;
501
502 list_add_tail(&e->list, &im->ino_list);
503 if (type != ORPHAN_INO)
504 im->ino_num++;
505 }
506
507 if (type == FLUSH_INO)
508 f2fs_set_bit(devidx, (char *)&e->dirty_device);
509
510 spin_unlock(&im->ino_lock);
511 radix_tree_preload_end();
512
513 if (new && e != new)
514 kmem_cache_free(ino_entry_slab, new);
515}
516
517static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
518{
519 struct inode_management *im = &sbi->im[type];
520 struct ino_entry *e;
521
522 spin_lock(&im->ino_lock);
523 e = radix_tree_lookup(&im->ino_root, ino);
524 if (e) {
525 list_del(&e->list);
526 radix_tree_delete(&im->ino_root, ino);
527 im->ino_num--;
528 spin_unlock(&im->ino_lock);
529 kmem_cache_free(ino_entry_slab, e);
530 return;
531 }
532 spin_unlock(&im->ino_lock);
533}
534
535void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
536{
537 /* add new dirty ino entry into list */
538 __add_ino_entry(sbi, ino, 0, type);
539}
540
541void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
542{
543 /* remove dirty ino entry from list */
544 __remove_ino_entry(sbi, ino, type);
545}
546
547/* mode should be APPEND_INO, UPDATE_INO or TRANS_DIR_INO */
548bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
549{
550 struct inode_management *im = &sbi->im[mode];
551 struct ino_entry *e;
552
553 spin_lock(&im->ino_lock);
554 e = radix_tree_lookup(&im->ino_root, ino);
555 spin_unlock(&im->ino_lock);
556 return e ? true : false;
557}
558
559void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all)
560{
561 struct ino_entry *e, *tmp;
562 int i;
563
564 for (i = all ? ORPHAN_INO : APPEND_INO; i < MAX_INO_ENTRY; i++) {
565 struct inode_management *im = &sbi->im[i];
566
567 spin_lock(&im->ino_lock);
568 list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
569 list_del(&e->list);
570 radix_tree_delete(&im->ino_root, e->ino);
571 kmem_cache_free(ino_entry_slab, e);
572 im->ino_num--;
573 }
574 spin_unlock(&im->ino_lock);
575 }
576}
577
578void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
579 unsigned int devidx, int type)
580{
581 __add_ino_entry(sbi, ino, devidx, type);
582}
583
584bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
585 unsigned int devidx, int type)
586{
587 struct inode_management *im = &sbi->im[type];
588 struct ino_entry *e;
589 bool is_dirty = false;
590
591 spin_lock(&im->ino_lock);
592 e = radix_tree_lookup(&im->ino_root, ino);
593 if (e && f2fs_test_bit(devidx, (char *)&e->dirty_device))
594 is_dirty = true;
595 spin_unlock(&im->ino_lock);
596 return is_dirty;
597}
598
599int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
600{
601 struct inode_management *im = &sbi->im[ORPHAN_INO];
602 int err = 0;
603
604 spin_lock(&im->ino_lock);
605
606 if (time_to_inject(sbi, FAULT_ORPHAN)) {
607 spin_unlock(&im->ino_lock);
608 f2fs_show_injection_info(sbi, FAULT_ORPHAN);
609 return -ENOSPC;
610 }
611
612 if (unlikely(im->ino_num >= sbi->max_orphans))
613 err = -ENOSPC;
614 else
615 im->ino_num++;
616 spin_unlock(&im->ino_lock);
617
618 return err;
619}
620
621void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi)
622{
623 struct inode_management *im = &sbi->im[ORPHAN_INO];
624
625 spin_lock(&im->ino_lock);
626 f2fs_bug_on(sbi, im->ino_num == 0);
627 im->ino_num--;
628 spin_unlock(&im->ino_lock);
629}
630
631void f2fs_add_orphan_inode(struct inode *inode)
632{
633 /* add new orphan ino entry into list */
634 __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, 0, ORPHAN_INO);
635 f2fs_update_inode_page(inode);
636}
637
638void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
639{
640 /* remove orphan entry from orphan list */
641 __remove_ino_entry(sbi, ino, ORPHAN_INO);
642}
643
644static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
645{
646 struct inode *inode;
647 struct node_info ni;
648 int err;
649
650 inode = f2fs_iget_retry(sbi->sb, ino);
651 if (IS_ERR(inode)) {
652 /*
653 * there should be a bug that we can't find the entry
654 * to orphan inode.
655 */
656 f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
657 return PTR_ERR(inode);
658 }
659
660 err = f2fs_dquot_initialize(inode);
661 if (err) {
662 iput(inode);
663 goto err_out;
664 }
665
666 clear_nlink(inode);
667
668 /* truncate all the data during iput */
669 iput(inode);
670
671 err = f2fs_get_node_info(sbi, ino, &ni, false);
672 if (err)
673 goto err_out;
674
675 /* ENOMEM was fully retried in f2fs_evict_inode. */
676 if (ni.blk_addr != NULL_ADDR) {
677 err = -EIO;
678 goto err_out;
679 }
680 return 0;
681
682err_out:
683 set_sbi_flag(sbi, SBI_NEED_FSCK);
684 f2fs_warn(sbi, "%s: orphan failed (ino=%x), run fsck to fix.",
685 __func__, ino);
686 return err;
687}
688
689int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
690{
691 block_t start_blk, orphan_blocks, i, j;
692 unsigned int s_flags = sbi->sb->s_flags;
693 int err = 0;
694#ifdef CONFIG_QUOTA
695 int quota_enabled;
696#endif
697
698 if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
699 return 0;
700
701 if (bdev_read_only(sbi->sb->s_bdev)) {
702 f2fs_info(sbi, "write access unavailable, skipping orphan cleanup");
703 return 0;
704 }
705
706 if (s_flags & SB_RDONLY) {
707 f2fs_info(sbi, "orphan cleanup on readonly fs");
708 sbi->sb->s_flags &= ~SB_RDONLY;
709 }
710
711#ifdef CONFIG_QUOTA
712 /*
713 * Turn on quotas which were not enabled for read-only mounts if
714 * filesystem has quota feature, so that they are updated correctly.
715 */
716 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
717#endif
718
719 start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
720 orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
721
722 f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
723
724 for (i = 0; i < orphan_blocks; i++) {
725 struct page *page;
726 struct f2fs_orphan_block *orphan_blk;
727
728 page = f2fs_get_meta_page(sbi, start_blk + i);
729 if (IS_ERR(page)) {
730 err = PTR_ERR(page);
731 goto out;
732 }
733
734 orphan_blk = (struct f2fs_orphan_block *)page_address(page);
735 for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
736 nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
737
738 err = recover_orphan_inode(sbi, ino);
739 if (err) {
740 f2fs_put_page(page, 1);
741 goto out;
742 }
743 }
744 f2fs_put_page(page, 1);
745 }
746 /* clear Orphan Flag */
747 clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
748out:
749 set_sbi_flag(sbi, SBI_IS_RECOVERED);
750
751#ifdef CONFIG_QUOTA
752 /* Turn quotas off */
753 if (quota_enabled)
754 f2fs_quota_off_umount(sbi->sb);
755#endif
756 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
757
758 return err;
759}
760
761static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
762{
763 struct list_head *head;
764 struct f2fs_orphan_block *orphan_blk = NULL;
765 unsigned int nentries = 0;
766 unsigned short index = 1;
767 unsigned short orphan_blocks;
768 struct page *page = NULL;
769 struct ino_entry *orphan = NULL;
770 struct inode_management *im = &sbi->im[ORPHAN_INO];
771
772 orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
773
774 /*
775 * we don't need to do spin_lock(&im->ino_lock) here, since all the
776 * orphan inode operations are covered under f2fs_lock_op().
777 * And, spin_lock should be avoided due to page operations below.
778 */
779 head = &im->ino_list;
780
781 /* loop for each orphan inode entry and write them in Jornal block */
782 list_for_each_entry(orphan, head, list) {
783 if (!page) {
784 page = f2fs_grab_meta_page(sbi, start_blk++);
785 orphan_blk =
786 (struct f2fs_orphan_block *)page_address(page);
787 memset(orphan_blk, 0, sizeof(*orphan_blk));
788 }
789
790 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
791
792 if (nentries == F2FS_ORPHANS_PER_BLOCK) {
793 /*
794 * an orphan block is full of 1020 entries,
795 * then we need to flush current orphan blocks
796 * and bring another one in memory
797 */
798 orphan_blk->blk_addr = cpu_to_le16(index);
799 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
800 orphan_blk->entry_count = cpu_to_le32(nentries);
801 set_page_dirty(page);
802 f2fs_put_page(page, 1);
803 index++;
804 nentries = 0;
805 page = NULL;
806 }
807 }
808
809 if (page) {
810 orphan_blk->blk_addr = cpu_to_le16(index);
811 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
812 orphan_blk->entry_count = cpu_to_le32(nentries);
813 set_page_dirty(page);
814 f2fs_put_page(page, 1);
815 }
816}
817
818static __u32 f2fs_checkpoint_chksum(struct f2fs_sb_info *sbi,
819 struct f2fs_checkpoint *ckpt)
820{
821 unsigned int chksum_ofs = le32_to_cpu(ckpt->checksum_offset);
822 __u32 chksum;
823
824 chksum = f2fs_crc32(sbi, ckpt, chksum_ofs);
825 if (chksum_ofs < CP_CHKSUM_OFFSET) {
826 chksum_ofs += sizeof(chksum);
827 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ckpt + chksum_ofs,
828 F2FS_BLKSIZE - chksum_ofs);
829 }
830 return chksum;
831}
832
833static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
834 struct f2fs_checkpoint **cp_block, struct page **cp_page,
835 unsigned long long *version)
836{
837 size_t crc_offset = 0;
838 __u32 crc;
839
840 *cp_page = f2fs_get_meta_page(sbi, cp_addr);
841 if (IS_ERR(*cp_page))
842 return PTR_ERR(*cp_page);
843
844 *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
845
846 crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
847 if (crc_offset < CP_MIN_CHKSUM_OFFSET ||
848 crc_offset > CP_CHKSUM_OFFSET) {
849 f2fs_put_page(*cp_page, 1);
850 f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset);
851 return -EINVAL;
852 }
853
854 crc = f2fs_checkpoint_chksum(sbi, *cp_block);
855 if (crc != cur_cp_crc(*cp_block)) {
856 f2fs_put_page(*cp_page, 1);
857 f2fs_warn(sbi, "invalid crc value");
858 return -EINVAL;
859 }
860
861 *version = cur_cp_version(*cp_block);
862 return 0;
863}
864
865static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
866 block_t cp_addr, unsigned long long *version)
867{
868 struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
869 struct f2fs_checkpoint *cp_block = NULL;
870 unsigned long long cur_version = 0, pre_version = 0;
871 unsigned int cp_blocks;
872 int err;
873
874 err = get_checkpoint_version(sbi, cp_addr, &cp_block,
875 &cp_page_1, version);
876 if (err)
877 return NULL;
878
879 cp_blocks = le32_to_cpu(cp_block->cp_pack_total_block_count);
880
881 if (cp_blocks > sbi->blocks_per_seg || cp_blocks <= F2FS_CP_PACKS) {
882 f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u",
883 le32_to_cpu(cp_block->cp_pack_total_block_count));
884 goto invalid_cp;
885 }
886 pre_version = *version;
887
888 cp_addr += cp_blocks - 1;
889 err = get_checkpoint_version(sbi, cp_addr, &cp_block,
890 &cp_page_2, version);
891 if (err)
892 goto invalid_cp;
893 cur_version = *version;
894
895 if (cur_version == pre_version) {
896 *version = cur_version;
897 f2fs_put_page(cp_page_2, 1);
898 return cp_page_1;
899 }
900 f2fs_put_page(cp_page_2, 1);
901invalid_cp:
902 f2fs_put_page(cp_page_1, 1);
903 return NULL;
904}
905
906int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
907{
908 struct f2fs_checkpoint *cp_block;
909 struct f2fs_super_block *fsb = sbi->raw_super;
910 struct page *cp1, *cp2, *cur_page;
911 unsigned long blk_size = sbi->blocksize;
912 unsigned long long cp1_version = 0, cp2_version = 0;
913 unsigned long long cp_start_blk_no;
914 unsigned int cp_blks = 1 + __cp_payload(sbi);
915 block_t cp_blk_no;
916 int i;
917 int err;
918
919 sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks),
920 GFP_KERNEL);
921 if (!sbi->ckpt)
922 return -ENOMEM;
923 /*
924 * Finding out valid cp block involves read both
925 * sets( cp pack 1 and cp pack 2)
926 */
927 cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
928 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
929
930 /* The second checkpoint pack should start at the next segment */
931 cp_start_blk_no += ((unsigned long long)1) <<
932 le32_to_cpu(fsb->log_blocks_per_seg);
933 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
934
935 if (cp1 && cp2) {
936 if (ver_after(cp2_version, cp1_version))
937 cur_page = cp2;
938 else
939 cur_page = cp1;
940 } else if (cp1) {
941 cur_page = cp1;
942 } else if (cp2) {
943 cur_page = cp2;
944 } else {
945 err = -EFSCORRUPTED;
946 goto fail_no_cp;
947 }
948
949 cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
950 memcpy(sbi->ckpt, cp_block, blk_size);
951
952 if (cur_page == cp1)
953 sbi->cur_cp_pack = 1;
954 else
955 sbi->cur_cp_pack = 2;
956
957 /* Sanity checking of checkpoint */
958 if (f2fs_sanity_check_ckpt(sbi)) {
959 err = -EFSCORRUPTED;
960 goto free_fail_no_cp;
961 }
962
963 if (cp_blks <= 1)
964 goto done;
965
966 cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
967 if (cur_page == cp2)
968 cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
969
970 for (i = 1; i < cp_blks; i++) {
971 void *sit_bitmap_ptr;
972 unsigned char *ckpt = (unsigned char *)sbi->ckpt;
973
974 cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
975 if (IS_ERR(cur_page)) {
976 err = PTR_ERR(cur_page);
977 goto free_fail_no_cp;
978 }
979 sit_bitmap_ptr = page_address(cur_page);
980 memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
981 f2fs_put_page(cur_page, 1);
982 }
983done:
984 f2fs_put_page(cp1, 1);
985 f2fs_put_page(cp2, 1);
986 return 0;
987
988free_fail_no_cp:
989 f2fs_put_page(cp1, 1);
990 f2fs_put_page(cp2, 1);
991fail_no_cp:
992 kvfree(sbi->ckpt);
993 return err;
994}
995
996static void __add_dirty_inode(struct inode *inode, enum inode_type type)
997{
998 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
999 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
1000
1001 if (is_inode_flag_set(inode, flag))
1002 return;
1003
1004 set_inode_flag(inode, flag);
1005 list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
1006 stat_inc_dirty_inode(sbi, type);
1007}
1008
1009static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
1010{
1011 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
1012
1013 if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
1014 return;
1015
1016 list_del_init(&F2FS_I(inode)->dirty_list);
1017 clear_inode_flag(inode, flag);
1018 stat_dec_dirty_inode(F2FS_I_SB(inode), type);
1019}
1020
1021void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio)
1022{
1023 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1024 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
1025
1026 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
1027 !S_ISLNK(inode->i_mode))
1028 return;
1029
1030 spin_lock(&sbi->inode_lock[type]);
1031 if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
1032 __add_dirty_inode(inode, type);
1033 inode_inc_dirty_pages(inode);
1034 spin_unlock(&sbi->inode_lock[type]);
1035
1036 set_page_private_reference(&folio->page);
1037}
1038
1039void f2fs_remove_dirty_inode(struct inode *inode)
1040{
1041 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1042 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
1043
1044 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
1045 !S_ISLNK(inode->i_mode))
1046 return;
1047
1048 if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
1049 return;
1050
1051 spin_lock(&sbi->inode_lock[type]);
1052 __remove_dirty_inode(inode, type);
1053 spin_unlock(&sbi->inode_lock[type]);
1054}
1055
1056int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
1057{
1058 struct list_head *head;
1059 struct inode *inode;
1060 struct f2fs_inode_info *fi;
1061 bool is_dir = (type == DIR_INODE);
1062 unsigned long ino = 0;
1063
1064 trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
1065 get_pages(sbi, is_dir ?
1066 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
1067retry:
1068 if (unlikely(f2fs_cp_error(sbi))) {
1069 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
1070 get_pages(sbi, is_dir ?
1071 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
1072 return -EIO;
1073 }
1074
1075 spin_lock(&sbi->inode_lock[type]);
1076
1077 head = &sbi->inode_list[type];
1078 if (list_empty(head)) {
1079 spin_unlock(&sbi->inode_lock[type]);
1080 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
1081 get_pages(sbi, is_dir ?
1082 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
1083 return 0;
1084 }
1085 fi = list_first_entry(head, struct f2fs_inode_info, dirty_list);
1086 inode = igrab(&fi->vfs_inode);
1087 spin_unlock(&sbi->inode_lock[type]);
1088 if (inode) {
1089 unsigned long cur_ino = inode->i_ino;
1090
1091 F2FS_I(inode)->cp_task = current;
1092
1093 filemap_fdatawrite(inode->i_mapping);
1094
1095 F2FS_I(inode)->cp_task = NULL;
1096
1097 iput(inode);
1098 /* We need to give cpu to another writers. */
1099 if (ino == cur_ino)
1100 cond_resched();
1101 else
1102 ino = cur_ino;
1103 } else {
1104 /*
1105 * We should submit bio, since it exists several
1106 * wribacking dentry pages in the freeing inode.
1107 */
1108 f2fs_submit_merged_write(sbi, DATA);
1109 cond_resched();
1110 }
1111 goto retry;
1112}
1113
1114int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
1115{
1116 struct list_head *head = &sbi->inode_list[DIRTY_META];
1117 struct inode *inode;
1118 struct f2fs_inode_info *fi;
1119 s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
1120
1121 while (total--) {
1122 if (unlikely(f2fs_cp_error(sbi)))
1123 return -EIO;
1124
1125 spin_lock(&sbi->inode_lock[DIRTY_META]);
1126 if (list_empty(head)) {
1127 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1128 return 0;
1129 }
1130 fi = list_first_entry(head, struct f2fs_inode_info,
1131 gdirty_list);
1132 inode = igrab(&fi->vfs_inode);
1133 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1134 if (inode) {
1135 sync_inode_metadata(inode, 0);
1136
1137 /* it's on eviction */
1138 if (is_inode_flag_set(inode, FI_DIRTY_INODE))
1139 f2fs_update_inode_page(inode);
1140 iput(inode);
1141 }
1142 }
1143 return 0;
1144}
1145
1146static void __prepare_cp_block(struct f2fs_sb_info *sbi)
1147{
1148 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1149 struct f2fs_nm_info *nm_i = NM_I(sbi);
1150 nid_t last_nid = nm_i->next_scan_nid;
1151
1152 next_free_nid(sbi, &last_nid);
1153 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
1154 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
1155 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
1156 ckpt->next_free_nid = cpu_to_le32(last_nid);
1157}
1158
1159static bool __need_flush_quota(struct f2fs_sb_info *sbi)
1160{
1161 bool ret = false;
1162
1163 if (!is_journalled_quota(sbi))
1164 return false;
1165
1166 if (!f2fs_down_write_trylock(&sbi->quota_sem))
1167 return true;
1168 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
1169 ret = false;
1170 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
1171 ret = false;
1172 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) {
1173 clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1174 ret = true;
1175 } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) {
1176 ret = true;
1177 }
1178 f2fs_up_write(&sbi->quota_sem);
1179 return ret;
1180}
1181
1182/*
1183 * Freeze all the FS-operations for checkpoint.
1184 */
1185static int block_operations(struct f2fs_sb_info *sbi)
1186{
1187 struct writeback_control wbc = {
1188 .sync_mode = WB_SYNC_ALL,
1189 .nr_to_write = LONG_MAX,
1190 .for_reclaim = 0,
1191 };
1192 int err = 0, cnt = 0;
1193
1194 /*
1195 * Let's flush inline_data in dirty node pages.
1196 */
1197 f2fs_flush_inline_data(sbi);
1198
1199retry_flush_quotas:
1200 f2fs_lock_all(sbi);
1201 if (__need_flush_quota(sbi)) {
1202 int locked;
1203
1204 if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
1205 set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
1206 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1207 goto retry_flush_dents;
1208 }
1209 f2fs_unlock_all(sbi);
1210
1211 /* only failed during mount/umount/freeze/quotactl */
1212 locked = down_read_trylock(&sbi->sb->s_umount);
1213 f2fs_quota_sync(sbi->sb, -1);
1214 if (locked)
1215 up_read(&sbi->sb->s_umount);
1216 cond_resched();
1217 goto retry_flush_quotas;
1218 }
1219
1220retry_flush_dents:
1221 /* write all the dirty dentry pages */
1222 if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
1223 f2fs_unlock_all(sbi);
1224 err = f2fs_sync_dirty_inodes(sbi, DIR_INODE);
1225 if (err)
1226 return err;
1227 cond_resched();
1228 goto retry_flush_quotas;
1229 }
1230
1231 /*
1232 * POR: we should ensure that there are no dirty node pages
1233 * until finishing nat/sit flush. inode->i_blocks can be updated.
1234 */
1235 f2fs_down_write(&sbi->node_change);
1236
1237 if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
1238 f2fs_up_write(&sbi->node_change);
1239 f2fs_unlock_all(sbi);
1240 err = f2fs_sync_inode_meta(sbi);
1241 if (err)
1242 return err;
1243 cond_resched();
1244 goto retry_flush_quotas;
1245 }
1246
1247retry_flush_nodes:
1248 f2fs_down_write(&sbi->node_write);
1249
1250 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
1251 f2fs_up_write(&sbi->node_write);
1252 atomic_inc(&sbi->wb_sync_req[NODE]);
1253 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
1254 atomic_dec(&sbi->wb_sync_req[NODE]);
1255 if (err) {
1256 f2fs_up_write(&sbi->node_change);
1257 f2fs_unlock_all(sbi);
1258 return err;
1259 }
1260 cond_resched();
1261 goto retry_flush_nodes;
1262 }
1263
1264 /*
1265 * sbi->node_change is used only for AIO write_begin path which produces
1266 * dirty node blocks and some checkpoint values by block allocation.
1267 */
1268 __prepare_cp_block(sbi);
1269 f2fs_up_write(&sbi->node_change);
1270 return err;
1271}
1272
1273static void unblock_operations(struct f2fs_sb_info *sbi)
1274{
1275 f2fs_up_write(&sbi->node_write);
1276 f2fs_unlock_all(sbi);
1277}
1278
1279void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
1280{
1281 DEFINE_WAIT(wait);
1282
1283 for (;;) {
1284 if (!get_pages(sbi, type))
1285 break;
1286
1287 if (unlikely(f2fs_cp_error(sbi)))
1288 break;
1289
1290 if (type == F2FS_DIRTY_META)
1291 f2fs_sync_meta_pages(sbi, META, LONG_MAX,
1292 FS_CP_META_IO);
1293 else if (type == F2FS_WB_CP_DATA)
1294 f2fs_submit_merged_write(sbi, DATA);
1295
1296 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
1297 io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1298 }
1299 finish_wait(&sbi->cp_wait, &wait);
1300}
1301
1302static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1303{
1304 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
1305 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1306 unsigned long flags;
1307
1308 if (cpc->reason & CP_UMOUNT) {
1309 if (le32_to_cpu(ckpt->cp_pack_total_block_count) +
1310 NM_I(sbi)->nat_bits_blocks > sbi->blocks_per_seg) {
1311 clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
1312 f2fs_notice(sbi, "Disable nat_bits due to no space");
1313 } else if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG) &&
1314 f2fs_nat_bitmap_enabled(sbi)) {
1315 f2fs_enable_nat_bits(sbi);
1316 set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
1317 f2fs_notice(sbi, "Rebuild and enable nat_bits");
1318 }
1319 }
1320
1321 spin_lock_irqsave(&sbi->cp_lock, flags);
1322
1323 if (cpc->reason & CP_TRIMMED)
1324 __set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
1325 else
1326 __clear_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
1327
1328 if (cpc->reason & CP_UMOUNT)
1329 __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1330 else
1331 __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1332
1333 if (cpc->reason & CP_FASTBOOT)
1334 __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1335 else
1336 __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1337
1338 if (orphan_num)
1339 __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1340 else
1341 __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1342
1343 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1344 __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
1345
1346 if (is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
1347 __set_ckpt_flags(ckpt, CP_RESIZEFS_FLAG);
1348 else
1349 __clear_ckpt_flags(ckpt, CP_RESIZEFS_FLAG);
1350
1351 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1352 __set_ckpt_flags(ckpt, CP_DISABLED_FLAG);
1353 else
1354 __clear_ckpt_flags(ckpt, CP_DISABLED_FLAG);
1355
1356 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK))
1357 __set_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
1358 else
1359 __clear_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
1360
1361 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
1362 __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1363 else
1364 __clear_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1365
1366 if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
1367 __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1368
1369 /* set this flag to activate crc|cp_ver for recovery */
1370 __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
1371 __clear_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG);
1372
1373 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1374}
1375
1376static void commit_checkpoint(struct f2fs_sb_info *sbi,
1377 void *src, block_t blk_addr)
1378{
1379 struct writeback_control wbc = {
1380 .for_reclaim = 0,
1381 };
1382
1383 /*
1384 * pagevec_lookup_tag and lock_page again will take
1385 * some extra time. Therefore, f2fs_update_meta_pages and
1386 * f2fs_sync_meta_pages are combined in this function.
1387 */
1388 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
1389 int err;
1390
1391 f2fs_wait_on_page_writeback(page, META, true, true);
1392
1393 memcpy(page_address(page), src, PAGE_SIZE);
1394
1395 set_page_dirty(page);
1396 if (unlikely(!clear_page_dirty_for_io(page)))
1397 f2fs_bug_on(sbi, 1);
1398
1399 /* writeout cp pack 2 page */
1400 err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
1401 if (unlikely(err && f2fs_cp_error(sbi))) {
1402 f2fs_put_page(page, 1);
1403 return;
1404 }
1405
1406 f2fs_bug_on(sbi, err);
1407 f2fs_put_page(page, 0);
1408
1409 /* submit checkpoint (with barrier if NOBARRIER is not set) */
1410 f2fs_submit_merged_write(sbi, META_FLUSH);
1411}
1412
1413static inline u64 get_sectors_written(struct block_device *bdev)
1414{
1415 return (u64)part_stat_read(bdev, sectors[STAT_WRITE]);
1416}
1417
1418u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi)
1419{
1420 if (f2fs_is_multi_device(sbi)) {
1421 u64 sectors = 0;
1422 int i;
1423
1424 for (i = 0; i < sbi->s_ndevs; i++)
1425 sectors += get_sectors_written(FDEV(i).bdev);
1426
1427 return sectors;
1428 }
1429
1430 return get_sectors_written(sbi->sb->s_bdev);
1431}
1432
1433static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1434{
1435 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1436 struct f2fs_nm_info *nm_i = NM_I(sbi);
1437 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags;
1438 block_t start_blk;
1439 unsigned int data_sum_blocks, orphan_blocks;
1440 __u32 crc32 = 0;
1441 int i;
1442 int cp_payload_blks = __cp_payload(sbi);
1443 struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1444 u64 kbytes_written;
1445 int err;
1446
1447 /* Flush all the NAT/SIT pages */
1448 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
1449
1450 /* start to update checkpoint, cp ver is already updated previously */
1451 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
1452 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
1453 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1454 ckpt->cur_node_segno[i] =
1455 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
1456 ckpt->cur_node_blkoff[i] =
1457 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
1458 ckpt->alloc_type[i + CURSEG_HOT_NODE] =
1459 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
1460 }
1461 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1462 ckpt->cur_data_segno[i] =
1463 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
1464 ckpt->cur_data_blkoff[i] =
1465 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
1466 ckpt->alloc_type[i + CURSEG_HOT_DATA] =
1467 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
1468 }
1469
1470 /* 2 cp + n data seg summary + orphan inode blocks */
1471 data_sum_blocks = f2fs_npages_for_summary_flush(sbi, false);
1472 spin_lock_irqsave(&sbi->cp_lock, flags);
1473 if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
1474 __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1475 else
1476 __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1477 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1478
1479 orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
1480 ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
1481 orphan_blocks);
1482
1483 if (__remain_node_summaries(cpc->reason))
1484 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
1485 cp_payload_blks + data_sum_blocks +
1486 orphan_blocks + NR_CURSEG_NODE_TYPE);
1487 else
1488 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
1489 cp_payload_blks + data_sum_blocks +
1490 orphan_blocks);
1491
1492 /* update ckpt flag for checkpoint */
1493 update_ckpt_flags(sbi, cpc);
1494
1495 /* update SIT/NAT bitmap */
1496 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
1497 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
1498
1499 crc32 = f2fs_checkpoint_chksum(sbi, ckpt);
1500 *((__le32 *)((unsigned char *)ckpt +
1501 le32_to_cpu(ckpt->checksum_offset)))
1502 = cpu_to_le32(crc32);
1503
1504 start_blk = __start_cp_next_addr(sbi);
1505
1506 /* write nat bits */
1507 if ((cpc->reason & CP_UMOUNT) &&
1508 is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) {
1509 __u64 cp_ver = cur_cp_version(ckpt);
1510 block_t blk;
1511
1512 cp_ver |= ((__u64)crc32 << 32);
1513 *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
1514
1515 blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
1516 for (i = 0; i < nm_i->nat_bits_blocks; i++)
1517 f2fs_update_meta_page(sbi, nm_i->nat_bits +
1518 (i << F2FS_BLKSIZE_BITS), blk + i);
1519 }
1520
1521 /* write out checkpoint buffer at block 0 */
1522 f2fs_update_meta_page(sbi, ckpt, start_blk++);
1523
1524 for (i = 1; i < 1 + cp_payload_blks; i++)
1525 f2fs_update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
1526 start_blk++);
1527
1528 if (orphan_num) {
1529 write_orphan_inodes(sbi, start_blk);
1530 start_blk += orphan_blocks;
1531 }
1532
1533 f2fs_write_data_summaries(sbi, start_blk);
1534 start_blk += data_sum_blocks;
1535
1536 /* Record write statistics in the hot node summary */
1537 kbytes_written = sbi->kbytes_written;
1538 kbytes_written += (f2fs_get_sectors_written(sbi) -
1539 sbi->sectors_written_start) >> 1;
1540 seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
1541
1542 if (__remain_node_summaries(cpc->reason)) {
1543 f2fs_write_node_summaries(sbi, start_blk);
1544 start_blk += NR_CURSEG_NODE_TYPE;
1545 }
1546
1547 /* update user_block_counts */
1548 sbi->last_valid_block_count = sbi->total_valid_block_count;
1549 percpu_counter_set(&sbi->alloc_valid_block_count, 0);
1550 percpu_counter_set(&sbi->rf_node_block_count, 0);
1551
1552 /* Here, we have one bio having CP pack except cp pack 2 page */
1553 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
1554 /* Wait for all dirty meta pages to be submitted for IO */
1555 f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META);
1556
1557 /* wait for previous submitted meta pages writeback */
1558 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1559
1560 /* flush all device cache */
1561 err = f2fs_flush_device_cache(sbi);
1562 if (err)
1563 return err;
1564
1565 /* barrier and flush checkpoint cp pack 2 page if it can */
1566 commit_checkpoint(sbi, ckpt, start_blk);
1567 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1568
1569 /*
1570 * invalidate intermediate page cache borrowed from meta inode which are
1571 * used for migration of encrypted, verity or compressed inode's blocks.
1572 */
1573 if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi) ||
1574 f2fs_sb_has_compression(sbi))
1575 invalidate_mapping_pages(META_MAPPING(sbi),
1576 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
1577
1578 f2fs_release_ino_entry(sbi, false);
1579
1580 f2fs_reset_fsync_node_info(sbi);
1581
1582 clear_sbi_flag(sbi, SBI_IS_DIRTY);
1583 clear_sbi_flag(sbi, SBI_NEED_CP);
1584 clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
1585
1586 spin_lock(&sbi->stat_lock);
1587 sbi->unusable_block_count = 0;
1588 spin_unlock(&sbi->stat_lock);
1589
1590 __set_cp_next_pack(sbi);
1591
1592 /*
1593 * redirty superblock if metadata like node page or inode cache is
1594 * updated during writing checkpoint.
1595 */
1596 if (get_pages(sbi, F2FS_DIRTY_NODES) ||
1597 get_pages(sbi, F2FS_DIRTY_IMETA))
1598 set_sbi_flag(sbi, SBI_IS_DIRTY);
1599
1600 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
1601
1602 return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
1603}
1604
1605int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1606{
1607 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1608 unsigned long long ckpt_ver;
1609 int err = 0;
1610
1611 if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi))
1612 return -EROFS;
1613
1614 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1615 if (cpc->reason != CP_PAUSE)
1616 return 0;
1617 f2fs_warn(sbi, "Start checkpoint disabled!");
1618 }
1619 if (cpc->reason != CP_RESIZE)
1620 f2fs_down_write(&sbi->cp_global_sem);
1621
1622 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
1623 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
1624 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
1625 goto out;
1626 if (unlikely(f2fs_cp_error(sbi))) {
1627 err = -EIO;
1628 goto out;
1629 }
1630
1631 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
1632
1633 err = block_operations(sbi);
1634 if (err)
1635 goto out;
1636
1637 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
1638
1639 f2fs_flush_merged_writes(sbi);
1640
1641 /* this is the case of multiple fstrims without any changes */
1642 if (cpc->reason & CP_DISCARD) {
1643 if (!f2fs_exist_trim_candidates(sbi, cpc)) {
1644 unblock_operations(sbi);
1645 goto out;
1646 }
1647
1648 if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 &&
1649 SIT_I(sbi)->dirty_sentries == 0 &&
1650 prefree_segments(sbi) == 0) {
1651 f2fs_flush_sit_entries(sbi, cpc);
1652 f2fs_clear_prefree_segments(sbi, cpc);
1653 unblock_operations(sbi);
1654 goto out;
1655 }
1656 }
1657
1658 /*
1659 * update checkpoint pack index
1660 * Increase the version number so that
1661 * SIT entries and seg summaries are written at correct place
1662 */
1663 ckpt_ver = cur_cp_version(ckpt);
1664 ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
1665
1666 /* write cached NAT/SIT entries to NAT/SIT area */
1667 err = f2fs_flush_nat_entries(sbi, cpc);
1668 if (err) {
1669 f2fs_err(sbi, "f2fs_flush_nat_entries failed err:%d, stop checkpoint", err);
1670 f2fs_bug_on(sbi, !f2fs_cp_error(sbi));
1671 goto stop;
1672 }
1673
1674 f2fs_flush_sit_entries(sbi, cpc);
1675
1676 /* save inmem log status */
1677 f2fs_save_inmem_curseg(sbi);
1678
1679 err = do_checkpoint(sbi, cpc);
1680 if (err) {
1681 f2fs_err(sbi, "do_checkpoint failed err:%d, stop checkpoint", err);
1682 f2fs_bug_on(sbi, !f2fs_cp_error(sbi));
1683 f2fs_release_discard_addrs(sbi);
1684 } else {
1685 f2fs_clear_prefree_segments(sbi, cpc);
1686 }
1687
1688 f2fs_restore_inmem_curseg(sbi);
1689stop:
1690 unblock_operations(sbi);
1691 stat_inc_cp_count(sbi->stat_info);
1692
1693 if (cpc->reason & CP_RECOVERY)
1694 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
1695
1696 /* update CP_TIME to trigger checkpoint periodically */
1697 f2fs_update_time(sbi, CP_TIME);
1698 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
1699out:
1700 if (cpc->reason != CP_RESIZE)
1701 f2fs_up_write(&sbi->cp_global_sem);
1702 return err;
1703}
1704
1705void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
1706{
1707 int i;
1708
1709 for (i = 0; i < MAX_INO_ENTRY; i++) {
1710 struct inode_management *im = &sbi->im[i];
1711
1712 INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC);
1713 spin_lock_init(&im->ino_lock);
1714 INIT_LIST_HEAD(&im->ino_list);
1715 im->ino_num = 0;
1716 }
1717
1718 sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
1719 NR_CURSEG_PERSIST_TYPE - __cp_payload(sbi)) *
1720 F2FS_ORPHANS_PER_BLOCK;
1721}
1722
1723int __init f2fs_create_checkpoint_caches(void)
1724{
1725 ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
1726 sizeof(struct ino_entry));
1727 if (!ino_entry_slab)
1728 return -ENOMEM;
1729 f2fs_inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
1730 sizeof(struct inode_entry));
1731 if (!f2fs_inode_entry_slab) {
1732 kmem_cache_destroy(ino_entry_slab);
1733 return -ENOMEM;
1734 }
1735 return 0;
1736}
1737
1738void f2fs_destroy_checkpoint_caches(void)
1739{
1740 kmem_cache_destroy(ino_entry_slab);
1741 kmem_cache_destroy(f2fs_inode_entry_slab);
1742}
1743
1744static int __write_checkpoint_sync(struct f2fs_sb_info *sbi)
1745{
1746 struct cp_control cpc = { .reason = CP_SYNC, };
1747 int err;
1748
1749 f2fs_down_write(&sbi->gc_lock);
1750 err = f2fs_write_checkpoint(sbi, &cpc);
1751 f2fs_up_write(&sbi->gc_lock);
1752
1753 return err;
1754}
1755
1756static void __checkpoint_and_complete_reqs(struct f2fs_sb_info *sbi)
1757{
1758 struct ckpt_req_control *cprc = &sbi->cprc_info;
1759 struct ckpt_req *req, *next;
1760 struct llist_node *dispatch_list;
1761 u64 sum_diff = 0, diff, count = 0;
1762 int ret;
1763
1764 dispatch_list = llist_del_all(&cprc->issue_list);
1765 if (!dispatch_list)
1766 return;
1767 dispatch_list = llist_reverse_order(dispatch_list);
1768
1769 ret = __write_checkpoint_sync(sbi);
1770 atomic_inc(&cprc->issued_ckpt);
1771
1772 llist_for_each_entry_safe(req, next, dispatch_list, llnode) {
1773 diff = (u64)ktime_ms_delta(ktime_get(), req->queue_time);
1774 req->ret = ret;
1775 complete(&req->wait);
1776
1777 sum_diff += diff;
1778 count++;
1779 }
1780 atomic_sub(count, &cprc->queued_ckpt);
1781 atomic_add(count, &cprc->total_ckpt);
1782
1783 spin_lock(&cprc->stat_lock);
1784 cprc->cur_time = (unsigned int)div64_u64(sum_diff, count);
1785 if (cprc->peak_time < cprc->cur_time)
1786 cprc->peak_time = cprc->cur_time;
1787 spin_unlock(&cprc->stat_lock);
1788}
1789
1790static int issue_checkpoint_thread(void *data)
1791{
1792 struct f2fs_sb_info *sbi = data;
1793 struct ckpt_req_control *cprc = &sbi->cprc_info;
1794 wait_queue_head_t *q = &cprc->ckpt_wait_queue;
1795repeat:
1796 if (kthread_should_stop())
1797 return 0;
1798
1799 if (!llist_empty(&cprc->issue_list))
1800 __checkpoint_and_complete_reqs(sbi);
1801
1802 wait_event_interruptible(*q,
1803 kthread_should_stop() || !llist_empty(&cprc->issue_list));
1804 goto repeat;
1805}
1806
1807static void flush_remained_ckpt_reqs(struct f2fs_sb_info *sbi,
1808 struct ckpt_req *wait_req)
1809{
1810 struct ckpt_req_control *cprc = &sbi->cprc_info;
1811
1812 if (!llist_empty(&cprc->issue_list)) {
1813 __checkpoint_and_complete_reqs(sbi);
1814 } else {
1815 /* already dispatched by issue_checkpoint_thread */
1816 if (wait_req)
1817 wait_for_completion(&wait_req->wait);
1818 }
1819}
1820
1821static void init_ckpt_req(struct ckpt_req *req)
1822{
1823 memset(req, 0, sizeof(struct ckpt_req));
1824
1825 init_completion(&req->wait);
1826 req->queue_time = ktime_get();
1827}
1828
1829int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi)
1830{
1831 struct ckpt_req_control *cprc = &sbi->cprc_info;
1832 struct ckpt_req req;
1833 struct cp_control cpc;
1834
1835 cpc.reason = __get_cp_reason(sbi);
1836 if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) {
1837 int ret;
1838
1839 f2fs_down_write(&sbi->gc_lock);
1840 ret = f2fs_write_checkpoint(sbi, &cpc);
1841 f2fs_up_write(&sbi->gc_lock);
1842
1843 return ret;
1844 }
1845
1846 if (!cprc->f2fs_issue_ckpt)
1847 return __write_checkpoint_sync(sbi);
1848
1849 init_ckpt_req(&req);
1850
1851 llist_add(&req.llnode, &cprc->issue_list);
1852 atomic_inc(&cprc->queued_ckpt);
1853
1854 /*
1855 * update issue_list before we wake up issue_checkpoint thread,
1856 * this smp_mb() pairs with another barrier in ___wait_event(),
1857 * see more details in comments of waitqueue_active().
1858 */
1859 smp_mb();
1860
1861 if (waitqueue_active(&cprc->ckpt_wait_queue))
1862 wake_up(&cprc->ckpt_wait_queue);
1863
1864 if (cprc->f2fs_issue_ckpt)
1865 wait_for_completion(&req.wait);
1866 else
1867 flush_remained_ckpt_reqs(sbi, &req);
1868
1869 return req.ret;
1870}
1871
1872int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi)
1873{
1874 dev_t dev = sbi->sb->s_bdev->bd_dev;
1875 struct ckpt_req_control *cprc = &sbi->cprc_info;
1876
1877 if (cprc->f2fs_issue_ckpt)
1878 return 0;
1879
1880 cprc->f2fs_issue_ckpt = kthread_run(issue_checkpoint_thread, sbi,
1881 "f2fs_ckpt-%u:%u", MAJOR(dev), MINOR(dev));
1882 if (IS_ERR(cprc->f2fs_issue_ckpt)) {
1883 cprc->f2fs_issue_ckpt = NULL;
1884 return -ENOMEM;
1885 }
1886
1887 set_task_ioprio(cprc->f2fs_issue_ckpt, cprc->ckpt_thread_ioprio);
1888
1889 return 0;
1890}
1891
1892void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi)
1893{
1894 struct ckpt_req_control *cprc = &sbi->cprc_info;
1895
1896 if (cprc->f2fs_issue_ckpt) {
1897 struct task_struct *ckpt_task = cprc->f2fs_issue_ckpt;
1898
1899 cprc->f2fs_issue_ckpt = NULL;
1900 kthread_stop(ckpt_task);
1901
1902 flush_remained_ckpt_reqs(sbi, NULL);
1903 }
1904}
1905
1906void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi)
1907{
1908 struct ckpt_req_control *cprc = &sbi->cprc_info;
1909
1910 atomic_set(&cprc->issued_ckpt, 0);
1911 atomic_set(&cprc->total_ckpt, 0);
1912 atomic_set(&cprc->queued_ckpt, 0);
1913 cprc->ckpt_thread_ioprio = DEFAULT_CHECKPOINT_IOPRIO;
1914 init_waitqueue_head(&cprc->ckpt_wait_queue);
1915 init_llist_head(&cprc->issue_list);
1916 spin_lock_init(&cprc->stat_lock);
1917}