Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * NILFS segment constructor.
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi.
8 *
9 */
10
11#include <linux/pagemap.h>
12#include <linux/buffer_head.h>
13#include <linux/writeback.h>
14#include <linux/bitops.h>
15#include <linux/bio.h>
16#include <linux/completion.h>
17#include <linux/blkdev.h>
18#include <linux/backing-dev.h>
19#include <linux/freezer.h>
20#include <linux/kthread.h>
21#include <linux/crc32.h>
22#include <linux/pagevec.h>
23#include <linux/slab.h>
24#include <linux/sched/signal.h>
25
26#include "nilfs.h"
27#include "btnode.h"
28#include "page.h"
29#include "segment.h"
30#include "sufile.h"
31#include "cpfile.h"
32#include "ifile.h"
33#include "segbuf.h"
34
35
36/*
37 * Segment constructor
38 */
39#define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
40
41#define SC_MAX_SEGDELTA 64 /*
42 * Upper limit of the number of segments
43 * appended in collection retry loop
44 */
45
46/* Construction mode */
47enum {
48 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
49 SC_LSEG_DSYNC, /*
50 * Flush data blocks of a given file and make
51 * a logical segment without a super root.
52 */
53 SC_FLUSH_FILE, /*
54 * Flush data files, leads to segment writes without
55 * creating a checkpoint.
56 */
57 SC_FLUSH_DAT, /*
58 * Flush DAT file. This also creates segments
59 * without a checkpoint.
60 */
61};
62
63/* Stage numbers of dirty block collection */
64enum {
65 NILFS_ST_INIT = 0,
66 NILFS_ST_GC, /* Collecting dirty blocks for GC */
67 NILFS_ST_FILE,
68 NILFS_ST_IFILE,
69 NILFS_ST_CPFILE,
70 NILFS_ST_SUFILE,
71 NILFS_ST_DAT,
72 NILFS_ST_SR, /* Super root */
73 NILFS_ST_DSYNC, /* Data sync blocks */
74 NILFS_ST_DONE,
75};
76
77#define CREATE_TRACE_POINTS
78#include <trace/events/nilfs2.h>
79
80/*
81 * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
82 * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
83 * the variable must use them because transition of stage count must involve
84 * trace events (trace_nilfs2_collection_stage_transition).
85 *
86 * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
87 * produce tracepoint events. It is provided just for making the intention
88 * clear.
89 */
90static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
91{
92 sci->sc_stage.scnt++;
93 trace_nilfs2_collection_stage_transition(sci);
94}
95
96static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
97{
98 sci->sc_stage.scnt = next_scnt;
99 trace_nilfs2_collection_stage_transition(sci);
100}
101
102static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
103{
104 return sci->sc_stage.scnt;
105}
106
107/* State flags of collection */
108#define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
109#define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
110#define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
111#define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
112
113/* Operations depending on the construction mode and file type */
114struct nilfs_sc_operations {
115 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
116 struct inode *);
117 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
118 struct inode *);
119 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
120 struct inode *);
121 void (*write_data_binfo)(struct nilfs_sc_info *,
122 struct nilfs_segsum_pointer *,
123 union nilfs_binfo *);
124 void (*write_node_binfo)(struct nilfs_sc_info *,
125 struct nilfs_segsum_pointer *,
126 union nilfs_binfo *);
127};
128
129/*
130 * Other definitions
131 */
132static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
133static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
134static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
135static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
136
137#define nilfs_cnt32_ge(a, b) \
138 (typecheck(__u32, a) && typecheck(__u32, b) && \
139 ((__s32)(a) - (__s32)(b) >= 0))
140
141static int nilfs_prepare_segment_lock(struct super_block *sb,
142 struct nilfs_transaction_info *ti)
143{
144 struct nilfs_transaction_info *cur_ti = current->journal_info;
145 void *save = NULL;
146
147 if (cur_ti) {
148 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
149 return ++cur_ti->ti_count;
150
151 /*
152 * If journal_info field is occupied by other FS,
153 * it is saved and will be restored on
154 * nilfs_transaction_commit().
155 */
156 nilfs_warn(sb, "journal info from a different FS");
157 save = current->journal_info;
158 }
159 if (!ti) {
160 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
161 if (!ti)
162 return -ENOMEM;
163 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
164 } else {
165 ti->ti_flags = 0;
166 }
167 ti->ti_count = 0;
168 ti->ti_save = save;
169 ti->ti_magic = NILFS_TI_MAGIC;
170 current->journal_info = ti;
171 return 0;
172}
173
174/**
175 * nilfs_transaction_begin - start indivisible file operations.
176 * @sb: super block
177 * @ti: nilfs_transaction_info
178 * @vacancy_check: flags for vacancy rate checks
179 *
180 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
181 * the segment semaphore, to make a segment construction and write tasks
182 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
183 * The region enclosed by these two functions can be nested. To avoid a
184 * deadlock, the semaphore is only acquired or released in the outermost call.
185 *
186 * This function allocates a nilfs_transaction_info struct to keep context
187 * information on it. It is initialized and hooked onto the current task in
188 * the outermost call. If a pre-allocated struct is given to @ti, it is used
189 * instead; otherwise a new struct is assigned from a slab.
190 *
191 * When @vacancy_check flag is set, this function will check the amount of
192 * free space, and will wait for the GC to reclaim disk space if low capacity.
193 *
194 * Return Value: On success, 0 is returned. On error, one of the following
195 * negative error code is returned.
196 *
197 * %-ENOMEM - Insufficient memory available.
198 *
199 * %-ENOSPC - No space left on device
200 */
201int nilfs_transaction_begin(struct super_block *sb,
202 struct nilfs_transaction_info *ti,
203 int vacancy_check)
204{
205 struct the_nilfs *nilfs;
206 int ret = nilfs_prepare_segment_lock(sb, ti);
207 struct nilfs_transaction_info *trace_ti;
208
209 if (unlikely(ret < 0))
210 return ret;
211 if (ret > 0) {
212 trace_ti = current->journal_info;
213
214 trace_nilfs2_transaction_transition(sb, trace_ti,
215 trace_ti->ti_count, trace_ti->ti_flags,
216 TRACE_NILFS2_TRANSACTION_BEGIN);
217 return 0;
218 }
219
220 sb_start_intwrite(sb);
221
222 nilfs = sb->s_fs_info;
223 down_read(&nilfs->ns_segctor_sem);
224 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
225 up_read(&nilfs->ns_segctor_sem);
226 ret = -ENOSPC;
227 goto failed;
228 }
229
230 trace_ti = current->journal_info;
231 trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
232 trace_ti->ti_flags,
233 TRACE_NILFS2_TRANSACTION_BEGIN);
234 return 0;
235
236 failed:
237 ti = current->journal_info;
238 current->journal_info = ti->ti_save;
239 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
240 kmem_cache_free(nilfs_transaction_cachep, ti);
241 sb_end_intwrite(sb);
242 return ret;
243}
244
245/**
246 * nilfs_transaction_commit - commit indivisible file operations.
247 * @sb: super block
248 *
249 * nilfs_transaction_commit() releases the read semaphore which is
250 * acquired by nilfs_transaction_begin(). This is only performed
251 * in outermost call of this function. If a commit flag is set,
252 * nilfs_transaction_commit() sets a timer to start the segment
253 * constructor. If a sync flag is set, it starts construction
254 * directly.
255 */
256int nilfs_transaction_commit(struct super_block *sb)
257{
258 struct nilfs_transaction_info *ti = current->journal_info;
259 struct the_nilfs *nilfs = sb->s_fs_info;
260 int err = 0;
261
262 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
263 ti->ti_flags |= NILFS_TI_COMMIT;
264 if (ti->ti_count > 0) {
265 ti->ti_count--;
266 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
267 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
268 return 0;
269 }
270 if (nilfs->ns_writer) {
271 struct nilfs_sc_info *sci = nilfs->ns_writer;
272
273 if (ti->ti_flags & NILFS_TI_COMMIT)
274 nilfs_segctor_start_timer(sci);
275 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
276 nilfs_segctor_do_flush(sci, 0);
277 }
278 up_read(&nilfs->ns_segctor_sem);
279 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
280 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
281
282 current->journal_info = ti->ti_save;
283
284 if (ti->ti_flags & NILFS_TI_SYNC)
285 err = nilfs_construct_segment(sb);
286 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
287 kmem_cache_free(nilfs_transaction_cachep, ti);
288 sb_end_intwrite(sb);
289 return err;
290}
291
292void nilfs_transaction_abort(struct super_block *sb)
293{
294 struct nilfs_transaction_info *ti = current->journal_info;
295 struct the_nilfs *nilfs = sb->s_fs_info;
296
297 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
298 if (ti->ti_count > 0) {
299 ti->ti_count--;
300 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
301 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
302 return;
303 }
304 up_read(&nilfs->ns_segctor_sem);
305
306 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
307 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
308
309 current->journal_info = ti->ti_save;
310 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
311 kmem_cache_free(nilfs_transaction_cachep, ti);
312 sb_end_intwrite(sb);
313}
314
315void nilfs_relax_pressure_in_lock(struct super_block *sb)
316{
317 struct the_nilfs *nilfs = sb->s_fs_info;
318 struct nilfs_sc_info *sci = nilfs->ns_writer;
319
320 if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request)
321 return;
322
323 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
324 up_read(&nilfs->ns_segctor_sem);
325
326 down_write(&nilfs->ns_segctor_sem);
327 if (sci->sc_flush_request &&
328 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
329 struct nilfs_transaction_info *ti = current->journal_info;
330
331 ti->ti_flags |= NILFS_TI_WRITER;
332 nilfs_segctor_do_immediate_flush(sci);
333 ti->ti_flags &= ~NILFS_TI_WRITER;
334 }
335 downgrade_write(&nilfs->ns_segctor_sem);
336}
337
338static void nilfs_transaction_lock(struct super_block *sb,
339 struct nilfs_transaction_info *ti,
340 int gcflag)
341{
342 struct nilfs_transaction_info *cur_ti = current->journal_info;
343 struct the_nilfs *nilfs = sb->s_fs_info;
344 struct nilfs_sc_info *sci = nilfs->ns_writer;
345
346 WARN_ON(cur_ti);
347 ti->ti_flags = NILFS_TI_WRITER;
348 ti->ti_count = 0;
349 ti->ti_save = cur_ti;
350 ti->ti_magic = NILFS_TI_MAGIC;
351 current->journal_info = ti;
352
353 for (;;) {
354 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
355 ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
356
357 down_write(&nilfs->ns_segctor_sem);
358 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
359 break;
360
361 nilfs_segctor_do_immediate_flush(sci);
362
363 up_write(&nilfs->ns_segctor_sem);
364 cond_resched();
365 }
366 if (gcflag)
367 ti->ti_flags |= NILFS_TI_GC;
368
369 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
370 ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
371}
372
373static void nilfs_transaction_unlock(struct super_block *sb)
374{
375 struct nilfs_transaction_info *ti = current->journal_info;
376 struct the_nilfs *nilfs = sb->s_fs_info;
377
378 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
379 BUG_ON(ti->ti_count > 0);
380
381 up_write(&nilfs->ns_segctor_sem);
382 current->journal_info = ti->ti_save;
383
384 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
385 ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
386}
387
388static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
389 struct nilfs_segsum_pointer *ssp,
390 unsigned int bytes)
391{
392 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
393 unsigned int blocksize = sci->sc_super->s_blocksize;
394 void *p;
395
396 if (unlikely(ssp->offset + bytes > blocksize)) {
397 ssp->offset = 0;
398 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
399 &segbuf->sb_segsum_buffers));
400 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
401 }
402 p = ssp->bh->b_data + ssp->offset;
403 ssp->offset += bytes;
404 return p;
405}
406
407/**
408 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
409 * @sci: nilfs_sc_info
410 */
411static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
412{
413 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
414 struct buffer_head *sumbh;
415 unsigned int sumbytes;
416 unsigned int flags = 0;
417 int err;
418
419 if (nilfs_doing_gc())
420 flags = NILFS_SS_GC;
421 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
422 if (unlikely(err))
423 return err;
424
425 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
426 sumbytes = segbuf->sb_sum.sumbytes;
427 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
428 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
429 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
430 return 0;
431}
432
433/**
434 * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
435 * @sci: segment constructor object
436 *
437 * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
438 * the current segment summary block.
439 */
440static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
441{
442 struct nilfs_segsum_pointer *ssp;
443
444 ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
445 if (ssp->offset < ssp->bh->b_size)
446 memset(ssp->bh->b_data + ssp->offset, 0,
447 ssp->bh->b_size - ssp->offset);
448}
449
450static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
451{
452 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
453 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
454 return -E2BIG; /*
455 * The current segment is filled up
456 * (internal code)
457 */
458 nilfs_segctor_zeropad_segsum(sci);
459 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
460 return nilfs_segctor_reset_segment_buffer(sci);
461}
462
463static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
464{
465 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
466 int err;
467
468 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
469 err = nilfs_segctor_feed_segment(sci);
470 if (err)
471 return err;
472 segbuf = sci->sc_curseg;
473 }
474 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
475 if (likely(!err))
476 segbuf->sb_sum.flags |= NILFS_SS_SR;
477 return err;
478}
479
480/*
481 * Functions for making segment summary and payloads
482 */
483static int nilfs_segctor_segsum_block_required(
484 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
485 unsigned int binfo_size)
486{
487 unsigned int blocksize = sci->sc_super->s_blocksize;
488 /* Size of finfo and binfo is enough small against blocksize */
489
490 return ssp->offset + binfo_size +
491 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
492 blocksize;
493}
494
495static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
496 struct inode *inode)
497{
498 sci->sc_curseg->sb_sum.nfinfo++;
499 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
500 nilfs_segctor_map_segsum_entry(
501 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
502
503 if (NILFS_I(inode)->i_root &&
504 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
505 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
506 /* skip finfo */
507}
508
509static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
510 struct inode *inode)
511{
512 struct nilfs_finfo *finfo;
513 struct nilfs_inode_info *ii;
514 struct nilfs_segment_buffer *segbuf;
515 __u64 cno;
516
517 if (sci->sc_blk_cnt == 0)
518 return;
519
520 ii = NILFS_I(inode);
521
522 if (test_bit(NILFS_I_GCINODE, &ii->i_state))
523 cno = ii->i_cno;
524 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
525 cno = 0;
526 else
527 cno = sci->sc_cno;
528
529 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
530 sizeof(*finfo));
531 finfo->fi_ino = cpu_to_le64(inode->i_ino);
532 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
533 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
534 finfo->fi_cno = cpu_to_le64(cno);
535
536 segbuf = sci->sc_curseg;
537 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
538 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
539 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
540 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
541}
542
543static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
544 struct buffer_head *bh,
545 struct inode *inode,
546 unsigned int binfo_size)
547{
548 struct nilfs_segment_buffer *segbuf;
549 int required, err = 0;
550
551 retry:
552 segbuf = sci->sc_curseg;
553 required = nilfs_segctor_segsum_block_required(
554 sci, &sci->sc_binfo_ptr, binfo_size);
555 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
556 nilfs_segctor_end_finfo(sci, inode);
557 err = nilfs_segctor_feed_segment(sci);
558 if (err)
559 return err;
560 goto retry;
561 }
562 if (unlikely(required)) {
563 nilfs_segctor_zeropad_segsum(sci);
564 err = nilfs_segbuf_extend_segsum(segbuf);
565 if (unlikely(err))
566 goto failed;
567 }
568 if (sci->sc_blk_cnt == 0)
569 nilfs_segctor_begin_finfo(sci, inode);
570
571 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
572 /* Substitution to vblocknr is delayed until update_blocknr() */
573 nilfs_segbuf_add_file_buffer(segbuf, bh);
574 sci->sc_blk_cnt++;
575 failed:
576 return err;
577}
578
579/*
580 * Callback functions that enumerate, mark, and collect dirty blocks
581 */
582static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
583 struct buffer_head *bh, struct inode *inode)
584{
585 int err;
586
587 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
588 if (err < 0)
589 return err;
590
591 err = nilfs_segctor_add_file_block(sci, bh, inode,
592 sizeof(struct nilfs_binfo_v));
593 if (!err)
594 sci->sc_datablk_cnt++;
595 return err;
596}
597
598static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
599 struct buffer_head *bh,
600 struct inode *inode)
601{
602 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
603}
604
605static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
606 struct buffer_head *bh,
607 struct inode *inode)
608{
609 WARN_ON(!buffer_dirty(bh));
610 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
611}
612
613static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
614 struct nilfs_segsum_pointer *ssp,
615 union nilfs_binfo *binfo)
616{
617 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
618 sci, ssp, sizeof(*binfo_v));
619 *binfo_v = binfo->bi_v;
620}
621
622static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
623 struct nilfs_segsum_pointer *ssp,
624 union nilfs_binfo *binfo)
625{
626 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
627 sci, ssp, sizeof(*vblocknr));
628 *vblocknr = binfo->bi_v.bi_vblocknr;
629}
630
631static const struct nilfs_sc_operations nilfs_sc_file_ops = {
632 .collect_data = nilfs_collect_file_data,
633 .collect_node = nilfs_collect_file_node,
634 .collect_bmap = nilfs_collect_file_bmap,
635 .write_data_binfo = nilfs_write_file_data_binfo,
636 .write_node_binfo = nilfs_write_file_node_binfo,
637};
638
639static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
640 struct buffer_head *bh, struct inode *inode)
641{
642 int err;
643
644 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
645 if (err < 0)
646 return err;
647
648 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
649 if (!err)
650 sci->sc_datablk_cnt++;
651 return err;
652}
653
654static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
655 struct buffer_head *bh, struct inode *inode)
656{
657 WARN_ON(!buffer_dirty(bh));
658 return nilfs_segctor_add_file_block(sci, bh, inode,
659 sizeof(struct nilfs_binfo_dat));
660}
661
662static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
663 struct nilfs_segsum_pointer *ssp,
664 union nilfs_binfo *binfo)
665{
666 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
667 sizeof(*blkoff));
668 *blkoff = binfo->bi_dat.bi_blkoff;
669}
670
671static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
672 struct nilfs_segsum_pointer *ssp,
673 union nilfs_binfo *binfo)
674{
675 struct nilfs_binfo_dat *binfo_dat =
676 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
677 *binfo_dat = binfo->bi_dat;
678}
679
680static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
681 .collect_data = nilfs_collect_dat_data,
682 .collect_node = nilfs_collect_file_node,
683 .collect_bmap = nilfs_collect_dat_bmap,
684 .write_data_binfo = nilfs_write_dat_data_binfo,
685 .write_node_binfo = nilfs_write_dat_node_binfo,
686};
687
688static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
689 .collect_data = nilfs_collect_file_data,
690 .collect_node = NULL,
691 .collect_bmap = NULL,
692 .write_data_binfo = nilfs_write_file_data_binfo,
693 .write_node_binfo = NULL,
694};
695
696static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
697 struct list_head *listp,
698 size_t nlimit,
699 loff_t start, loff_t end)
700{
701 struct address_space *mapping = inode->i_mapping;
702 struct folio_batch fbatch;
703 pgoff_t index = 0, last = ULONG_MAX;
704 size_t ndirties = 0;
705 int i;
706
707 if (unlikely(start != 0 || end != LLONG_MAX)) {
708 /*
709 * A valid range is given for sync-ing data pages. The
710 * range is rounded to per-page; extra dirty buffers
711 * may be included if blocksize < pagesize.
712 */
713 index = start >> PAGE_SHIFT;
714 last = end >> PAGE_SHIFT;
715 }
716 folio_batch_init(&fbatch);
717 repeat:
718 if (unlikely(index > last) ||
719 !filemap_get_folios_tag(mapping, &index, last,
720 PAGECACHE_TAG_DIRTY, &fbatch))
721 return ndirties;
722
723 for (i = 0; i < folio_batch_count(&fbatch); i++) {
724 struct buffer_head *bh, *head;
725 struct folio *folio = fbatch.folios[i];
726
727 folio_lock(folio);
728 if (unlikely(folio->mapping != mapping)) {
729 /* Exclude folios removed from the address space */
730 folio_unlock(folio);
731 continue;
732 }
733 head = folio_buffers(folio);
734 if (!head)
735 head = create_empty_buffers(folio,
736 i_blocksize(inode), 0);
737 folio_unlock(folio);
738
739 bh = head;
740 do {
741 if (!buffer_dirty(bh) || buffer_async_write(bh))
742 continue;
743 get_bh(bh);
744 list_add_tail(&bh->b_assoc_buffers, listp);
745 ndirties++;
746 if (unlikely(ndirties >= nlimit)) {
747 folio_batch_release(&fbatch);
748 cond_resched();
749 return ndirties;
750 }
751 } while (bh = bh->b_this_page, bh != head);
752 }
753 folio_batch_release(&fbatch);
754 cond_resched();
755 goto repeat;
756}
757
758static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
759 struct list_head *listp)
760{
761 struct nilfs_inode_info *ii = NILFS_I(inode);
762 struct inode *btnc_inode = ii->i_assoc_inode;
763 struct folio_batch fbatch;
764 struct buffer_head *bh, *head;
765 unsigned int i;
766 pgoff_t index = 0;
767
768 if (!btnc_inode)
769 return;
770 folio_batch_init(&fbatch);
771
772 while (filemap_get_folios_tag(btnc_inode->i_mapping, &index,
773 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) {
774 for (i = 0; i < folio_batch_count(&fbatch); i++) {
775 bh = head = folio_buffers(fbatch.folios[i]);
776 do {
777 if (buffer_dirty(bh) &&
778 !buffer_async_write(bh)) {
779 get_bh(bh);
780 list_add_tail(&bh->b_assoc_buffers,
781 listp);
782 }
783 bh = bh->b_this_page;
784 } while (bh != head);
785 }
786 folio_batch_release(&fbatch);
787 cond_resched();
788 }
789}
790
791static void nilfs_dispose_list(struct the_nilfs *nilfs,
792 struct list_head *head, int force)
793{
794 struct nilfs_inode_info *ii, *n;
795 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
796 unsigned int nv = 0;
797
798 while (!list_empty(head)) {
799 spin_lock(&nilfs->ns_inode_lock);
800 list_for_each_entry_safe(ii, n, head, i_dirty) {
801 list_del_init(&ii->i_dirty);
802 if (force) {
803 if (unlikely(ii->i_bh)) {
804 brelse(ii->i_bh);
805 ii->i_bh = NULL;
806 }
807 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
808 set_bit(NILFS_I_QUEUED, &ii->i_state);
809 list_add_tail(&ii->i_dirty,
810 &nilfs->ns_dirty_files);
811 continue;
812 }
813 ivec[nv++] = ii;
814 if (nv == SC_N_INODEVEC)
815 break;
816 }
817 spin_unlock(&nilfs->ns_inode_lock);
818
819 for (pii = ivec; nv > 0; pii++, nv--)
820 iput(&(*pii)->vfs_inode);
821 }
822}
823
824static void nilfs_iput_work_func(struct work_struct *work)
825{
826 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
827 sc_iput_work);
828 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
829
830 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
831}
832
833static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
834 struct nilfs_root *root)
835{
836 int ret = 0;
837
838 if (nilfs_mdt_fetch_dirty(root->ifile))
839 ret++;
840 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
841 ret++;
842 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
843 ret++;
844 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
845 ret++;
846 return ret;
847}
848
849static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
850{
851 return list_empty(&sci->sc_dirty_files) &&
852 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
853 sci->sc_nfreesegs == 0 &&
854 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
855}
856
857static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
858{
859 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
860 int ret = 0;
861
862 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
863 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
864
865 spin_lock(&nilfs->ns_inode_lock);
866 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
867 ret++;
868
869 spin_unlock(&nilfs->ns_inode_lock);
870 return ret;
871}
872
873static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
874{
875 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
876
877 nilfs_mdt_clear_dirty(sci->sc_root->ifile);
878 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
879 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
880 nilfs_mdt_clear_dirty(nilfs->ns_dat);
881}
882
883static void nilfs_fill_in_file_bmap(struct inode *ifile,
884 struct nilfs_inode_info *ii)
885
886{
887 struct buffer_head *ibh;
888 struct nilfs_inode *raw_inode;
889
890 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
891 ibh = ii->i_bh;
892 BUG_ON(!ibh);
893 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
894 ibh);
895 nilfs_bmap_write(ii->i_bmap, raw_inode);
896 nilfs_ifile_unmap_inode(raw_inode);
897 }
898}
899
900static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
901{
902 struct nilfs_inode_info *ii;
903
904 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
905 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
906 set_bit(NILFS_I_COLLECTED, &ii->i_state);
907 }
908}
909
910/**
911 * nilfs_write_root_mdt_inode - export root metadata inode information to
912 * the on-disk inode
913 * @inode: inode object of the root metadata file
914 * @raw_inode: on-disk inode
915 *
916 * nilfs_write_root_mdt_inode() writes inode information and bmap data of
917 * @inode to the inode area of the metadata file allocated on the super root
918 * block created to finalize the log. Since super root blocks are configured
919 * each time, this function zero-fills the unused area of @raw_inode.
920 */
921static void nilfs_write_root_mdt_inode(struct inode *inode,
922 struct nilfs_inode *raw_inode)
923{
924 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
925
926 nilfs_write_inode_common(inode, raw_inode);
927
928 /* zero-fill unused portion of raw_inode */
929 raw_inode->i_xattr = 0;
930 raw_inode->i_pad = 0;
931 memset((void *)raw_inode + sizeof(*raw_inode), 0,
932 nilfs->ns_inode_size - sizeof(*raw_inode));
933
934 nilfs_bmap_write(NILFS_I(inode)->i_bmap, raw_inode);
935}
936
937static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
938 struct the_nilfs *nilfs)
939{
940 struct buffer_head *bh_sr;
941 struct nilfs_super_root *raw_sr;
942 unsigned int isz, srsz;
943
944 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
945
946 lock_buffer(bh_sr);
947 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
948 isz = nilfs->ns_inode_size;
949 srsz = NILFS_SR_BYTES(isz);
950
951 raw_sr->sr_sum = 0; /* Ensure initialization within this update */
952 raw_sr->sr_bytes = cpu_to_le16(srsz);
953 raw_sr->sr_nongc_ctime
954 = cpu_to_le64(nilfs_doing_gc() ?
955 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
956 raw_sr->sr_flags = 0;
957
958 nilfs_write_root_mdt_inode(nilfs->ns_dat, (void *)raw_sr +
959 NILFS_SR_DAT_OFFSET(isz));
960 nilfs_write_root_mdt_inode(nilfs->ns_cpfile, (void *)raw_sr +
961 NILFS_SR_CPFILE_OFFSET(isz));
962 nilfs_write_root_mdt_inode(nilfs->ns_sufile, (void *)raw_sr +
963 NILFS_SR_SUFILE_OFFSET(isz));
964
965 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
966 set_buffer_uptodate(bh_sr);
967 unlock_buffer(bh_sr);
968}
969
970static void nilfs_redirty_inodes(struct list_head *head)
971{
972 struct nilfs_inode_info *ii;
973
974 list_for_each_entry(ii, head, i_dirty) {
975 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
976 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
977 }
978}
979
980static void nilfs_drop_collected_inodes(struct list_head *head)
981{
982 struct nilfs_inode_info *ii;
983
984 list_for_each_entry(ii, head, i_dirty) {
985 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
986 continue;
987
988 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
989 set_bit(NILFS_I_UPDATED, &ii->i_state);
990 }
991}
992
993static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
994 struct inode *inode,
995 struct list_head *listp,
996 int (*collect)(struct nilfs_sc_info *,
997 struct buffer_head *,
998 struct inode *))
999{
1000 struct buffer_head *bh, *n;
1001 int err = 0;
1002
1003 if (collect) {
1004 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1005 list_del_init(&bh->b_assoc_buffers);
1006 err = collect(sci, bh, inode);
1007 brelse(bh);
1008 if (unlikely(err))
1009 goto dispose_buffers;
1010 }
1011 return 0;
1012 }
1013
1014 dispose_buffers:
1015 while (!list_empty(listp)) {
1016 bh = list_first_entry(listp, struct buffer_head,
1017 b_assoc_buffers);
1018 list_del_init(&bh->b_assoc_buffers);
1019 brelse(bh);
1020 }
1021 return err;
1022}
1023
1024static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1025{
1026 /* Remaining number of blocks within segment buffer */
1027 return sci->sc_segbuf_nblocks -
1028 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1029}
1030
1031static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1032 struct inode *inode,
1033 const struct nilfs_sc_operations *sc_ops)
1034{
1035 LIST_HEAD(data_buffers);
1036 LIST_HEAD(node_buffers);
1037 int err;
1038
1039 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1040 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1041
1042 n = nilfs_lookup_dirty_data_buffers(
1043 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1044 if (n > rest) {
1045 err = nilfs_segctor_apply_buffers(
1046 sci, inode, &data_buffers,
1047 sc_ops->collect_data);
1048 BUG_ON(!err); /* always receive -E2BIG or true error */
1049 goto break_or_fail;
1050 }
1051 }
1052 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1053
1054 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1055 err = nilfs_segctor_apply_buffers(
1056 sci, inode, &data_buffers, sc_ops->collect_data);
1057 if (unlikely(err)) {
1058 /* dispose node list */
1059 nilfs_segctor_apply_buffers(
1060 sci, inode, &node_buffers, NULL);
1061 goto break_or_fail;
1062 }
1063 sci->sc_stage.flags |= NILFS_CF_NODE;
1064 }
1065 /* Collect node */
1066 err = nilfs_segctor_apply_buffers(
1067 sci, inode, &node_buffers, sc_ops->collect_node);
1068 if (unlikely(err))
1069 goto break_or_fail;
1070
1071 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1072 err = nilfs_segctor_apply_buffers(
1073 sci, inode, &node_buffers, sc_ops->collect_bmap);
1074 if (unlikely(err))
1075 goto break_or_fail;
1076
1077 nilfs_segctor_end_finfo(sci, inode);
1078 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1079
1080 break_or_fail:
1081 return err;
1082}
1083
1084static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1085 struct inode *inode)
1086{
1087 LIST_HEAD(data_buffers);
1088 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1089 int err;
1090
1091 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1092 sci->sc_dsync_start,
1093 sci->sc_dsync_end);
1094
1095 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1096 nilfs_collect_file_data);
1097 if (!err) {
1098 nilfs_segctor_end_finfo(sci, inode);
1099 BUG_ON(n > rest);
1100 /* always receive -E2BIG or true error if n > rest */
1101 }
1102 return err;
1103}
1104
1105static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1106{
1107 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1108 struct list_head *head;
1109 struct nilfs_inode_info *ii;
1110 size_t ndone;
1111 int err = 0;
1112
1113 switch (nilfs_sc_cstage_get(sci)) {
1114 case NILFS_ST_INIT:
1115 /* Pre-processes */
1116 sci->sc_stage.flags = 0;
1117
1118 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1119 sci->sc_nblk_inc = 0;
1120 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1121 if (mode == SC_LSEG_DSYNC) {
1122 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
1123 goto dsync_mode;
1124 }
1125 }
1126
1127 sci->sc_stage.dirty_file_ptr = NULL;
1128 sci->sc_stage.gc_inode_ptr = NULL;
1129 if (mode == SC_FLUSH_DAT) {
1130 nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
1131 goto dat_stage;
1132 }
1133 nilfs_sc_cstage_inc(sci);
1134 fallthrough;
1135 case NILFS_ST_GC:
1136 if (nilfs_doing_gc()) {
1137 head = &sci->sc_gc_inodes;
1138 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1139 head, i_dirty);
1140 list_for_each_entry_continue(ii, head, i_dirty) {
1141 err = nilfs_segctor_scan_file(
1142 sci, &ii->vfs_inode,
1143 &nilfs_sc_file_ops);
1144 if (unlikely(err)) {
1145 sci->sc_stage.gc_inode_ptr = list_entry(
1146 ii->i_dirty.prev,
1147 struct nilfs_inode_info,
1148 i_dirty);
1149 goto break_or_fail;
1150 }
1151 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1152 }
1153 sci->sc_stage.gc_inode_ptr = NULL;
1154 }
1155 nilfs_sc_cstage_inc(sci);
1156 fallthrough;
1157 case NILFS_ST_FILE:
1158 head = &sci->sc_dirty_files;
1159 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1160 i_dirty);
1161 list_for_each_entry_continue(ii, head, i_dirty) {
1162 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1163
1164 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1165 &nilfs_sc_file_ops);
1166 if (unlikely(err)) {
1167 sci->sc_stage.dirty_file_ptr =
1168 list_entry(ii->i_dirty.prev,
1169 struct nilfs_inode_info,
1170 i_dirty);
1171 goto break_or_fail;
1172 }
1173 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1174 /* XXX: required ? */
1175 }
1176 sci->sc_stage.dirty_file_ptr = NULL;
1177 if (mode == SC_FLUSH_FILE) {
1178 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1179 return 0;
1180 }
1181 nilfs_sc_cstage_inc(sci);
1182 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1183 fallthrough;
1184 case NILFS_ST_IFILE:
1185 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1186 &nilfs_sc_file_ops);
1187 if (unlikely(err))
1188 break;
1189 nilfs_sc_cstage_inc(sci);
1190 /* Creating a checkpoint */
1191 err = nilfs_cpfile_create_checkpoint(nilfs->ns_cpfile,
1192 nilfs->ns_cno);
1193 if (unlikely(err))
1194 break;
1195 fallthrough;
1196 case NILFS_ST_CPFILE:
1197 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1198 &nilfs_sc_file_ops);
1199 if (unlikely(err))
1200 break;
1201 nilfs_sc_cstage_inc(sci);
1202 fallthrough;
1203 case NILFS_ST_SUFILE:
1204 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1205 sci->sc_nfreesegs, &ndone);
1206 if (unlikely(err)) {
1207 nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1208 sci->sc_freesegs, ndone,
1209 NULL);
1210 break;
1211 }
1212 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1213
1214 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1215 &nilfs_sc_file_ops);
1216 if (unlikely(err))
1217 break;
1218 nilfs_sc_cstage_inc(sci);
1219 fallthrough;
1220 case NILFS_ST_DAT:
1221 dat_stage:
1222 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1223 &nilfs_sc_dat_ops);
1224 if (unlikely(err))
1225 break;
1226 if (mode == SC_FLUSH_DAT) {
1227 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1228 return 0;
1229 }
1230 nilfs_sc_cstage_inc(sci);
1231 fallthrough;
1232 case NILFS_ST_SR:
1233 if (mode == SC_LSEG_SR) {
1234 /* Appending a super root */
1235 err = nilfs_segctor_add_super_root(sci);
1236 if (unlikely(err))
1237 break;
1238 }
1239 /* End of a logical segment */
1240 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1241 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1242 return 0;
1243 case NILFS_ST_DSYNC:
1244 dsync_mode:
1245 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1246 ii = sci->sc_dsync_inode;
1247 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1248 break;
1249
1250 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1251 if (unlikely(err))
1252 break;
1253 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1254 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1255 return 0;
1256 case NILFS_ST_DONE:
1257 return 0;
1258 default:
1259 BUG();
1260 }
1261
1262 break_or_fail:
1263 return err;
1264}
1265
1266/**
1267 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1268 * @sci: nilfs_sc_info
1269 * @nilfs: nilfs object
1270 */
1271static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1272 struct the_nilfs *nilfs)
1273{
1274 struct nilfs_segment_buffer *segbuf, *prev;
1275 __u64 nextnum;
1276 int err, alloc = 0;
1277
1278 segbuf = nilfs_segbuf_new(sci->sc_super);
1279 if (unlikely(!segbuf))
1280 return -ENOMEM;
1281
1282 if (list_empty(&sci->sc_write_logs)) {
1283 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1284 nilfs->ns_pseg_offset, nilfs);
1285 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1286 nilfs_shift_to_next_segment(nilfs);
1287 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1288 }
1289
1290 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1291 nextnum = nilfs->ns_nextnum;
1292
1293 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1294 /* Start from the head of a new full segment */
1295 alloc++;
1296 } else {
1297 /* Continue logs */
1298 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1299 nilfs_segbuf_map_cont(segbuf, prev);
1300 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1301 nextnum = prev->sb_nextnum;
1302
1303 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1304 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1305 segbuf->sb_sum.seg_seq++;
1306 alloc++;
1307 }
1308 }
1309
1310 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1311 if (err)
1312 goto failed;
1313
1314 if (alloc) {
1315 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1316 if (err)
1317 goto failed;
1318 }
1319 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1320
1321 BUG_ON(!list_empty(&sci->sc_segbufs));
1322 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1323 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1324 return 0;
1325
1326 failed:
1327 nilfs_segbuf_free(segbuf);
1328 return err;
1329}
1330
1331static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1332 struct the_nilfs *nilfs, int nadd)
1333{
1334 struct nilfs_segment_buffer *segbuf, *prev;
1335 struct inode *sufile = nilfs->ns_sufile;
1336 __u64 nextnextnum;
1337 LIST_HEAD(list);
1338 int err, ret, i;
1339
1340 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1341 /*
1342 * Since the segment specified with nextnum might be allocated during
1343 * the previous construction, the buffer including its segusage may
1344 * not be dirty. The following call ensures that the buffer is dirty
1345 * and will pin the buffer on memory until the sufile is written.
1346 */
1347 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1348 if (unlikely(err))
1349 return err;
1350
1351 for (i = 0; i < nadd; i++) {
1352 /* extend segment info */
1353 err = -ENOMEM;
1354 segbuf = nilfs_segbuf_new(sci->sc_super);
1355 if (unlikely(!segbuf))
1356 goto failed;
1357
1358 /* map this buffer to region of segment on-disk */
1359 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1360 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1361
1362 /* allocate the next next full segment */
1363 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1364 if (unlikely(err))
1365 goto failed_segbuf;
1366
1367 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1368 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1369
1370 list_add_tail(&segbuf->sb_list, &list);
1371 prev = segbuf;
1372 }
1373 list_splice_tail(&list, &sci->sc_segbufs);
1374 return 0;
1375
1376 failed_segbuf:
1377 nilfs_segbuf_free(segbuf);
1378 failed:
1379 list_for_each_entry(segbuf, &list, sb_list) {
1380 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1381 WARN_ON(ret); /* never fails */
1382 }
1383 nilfs_destroy_logs(&list);
1384 return err;
1385}
1386
1387static void nilfs_free_incomplete_logs(struct list_head *logs,
1388 struct the_nilfs *nilfs)
1389{
1390 struct nilfs_segment_buffer *segbuf, *prev;
1391 struct inode *sufile = nilfs->ns_sufile;
1392 int ret;
1393
1394 segbuf = NILFS_FIRST_SEGBUF(logs);
1395 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1396 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1397 WARN_ON(ret); /* never fails */
1398 }
1399 if (atomic_read(&segbuf->sb_err)) {
1400 /* Case 1: The first segment failed */
1401 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1402 /*
1403 * Case 1a: Partial segment appended into an existing
1404 * segment
1405 */
1406 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1407 segbuf->sb_fseg_end);
1408 else /* Case 1b: New full segment */
1409 set_nilfs_discontinued(nilfs);
1410 }
1411
1412 prev = segbuf;
1413 list_for_each_entry_continue(segbuf, logs, sb_list) {
1414 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1415 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1416 WARN_ON(ret); /* never fails */
1417 }
1418 if (atomic_read(&segbuf->sb_err) &&
1419 segbuf->sb_segnum != nilfs->ns_nextnum)
1420 /* Case 2: extended segment (!= next) failed */
1421 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1422 prev = segbuf;
1423 }
1424}
1425
1426static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1427 struct inode *sufile)
1428{
1429 struct nilfs_segment_buffer *segbuf;
1430 unsigned long live_blocks;
1431 int ret;
1432
1433 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1434 live_blocks = segbuf->sb_sum.nblocks +
1435 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1436 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1437 live_blocks,
1438 sci->sc_seg_ctime);
1439 WARN_ON(ret); /* always succeed because the segusage is dirty */
1440 }
1441}
1442
1443static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1444{
1445 struct nilfs_segment_buffer *segbuf;
1446 int ret;
1447
1448 segbuf = NILFS_FIRST_SEGBUF(logs);
1449 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1450 segbuf->sb_pseg_start -
1451 segbuf->sb_fseg_start, 0);
1452 WARN_ON(ret); /* always succeed because the segusage is dirty */
1453
1454 list_for_each_entry_continue(segbuf, logs, sb_list) {
1455 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1456 0, 0);
1457 WARN_ON(ret); /* always succeed */
1458 }
1459}
1460
1461static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1462 struct nilfs_segment_buffer *last,
1463 struct inode *sufile)
1464{
1465 struct nilfs_segment_buffer *segbuf = last;
1466 int ret;
1467
1468 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1469 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1470 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1471 WARN_ON(ret);
1472 }
1473 nilfs_truncate_logs(&sci->sc_segbufs, last);
1474}
1475
1476
1477static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1478 struct the_nilfs *nilfs, int mode)
1479{
1480 struct nilfs_cstage prev_stage = sci->sc_stage;
1481 int err, nadd = 1;
1482
1483 /* Collection retry loop */
1484 for (;;) {
1485 sci->sc_nblk_this_inc = 0;
1486 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1487
1488 err = nilfs_segctor_reset_segment_buffer(sci);
1489 if (unlikely(err))
1490 goto failed;
1491
1492 err = nilfs_segctor_collect_blocks(sci, mode);
1493 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1494 if (!err)
1495 break;
1496
1497 if (unlikely(err != -E2BIG))
1498 goto failed;
1499
1500 /* The current segment is filled up */
1501 if (mode != SC_LSEG_SR ||
1502 nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
1503 break;
1504
1505 nilfs_clear_logs(&sci->sc_segbufs);
1506
1507 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1508 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1509 sci->sc_freesegs,
1510 sci->sc_nfreesegs,
1511 NULL);
1512 WARN_ON(err); /* do not happen */
1513 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1514 }
1515
1516 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1517 if (unlikely(err))
1518 return err;
1519
1520 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1521 sci->sc_stage = prev_stage;
1522 }
1523 nilfs_segctor_zeropad_segsum(sci);
1524 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1525 return 0;
1526
1527 failed:
1528 return err;
1529}
1530
1531static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1532 struct buffer_head *new_bh)
1533{
1534 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1535
1536 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1537 /* The caller must release old_bh */
1538}
1539
1540static int
1541nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1542 struct nilfs_segment_buffer *segbuf,
1543 int mode)
1544{
1545 struct inode *inode = NULL;
1546 sector_t blocknr;
1547 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1548 unsigned long nblocks = 0, ndatablk = 0;
1549 const struct nilfs_sc_operations *sc_op = NULL;
1550 struct nilfs_segsum_pointer ssp;
1551 struct nilfs_finfo *finfo = NULL;
1552 union nilfs_binfo binfo;
1553 struct buffer_head *bh, *bh_org;
1554 ino_t ino = 0;
1555 int err = 0;
1556
1557 if (!nfinfo)
1558 goto out;
1559
1560 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1561 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1562 ssp.offset = sizeof(struct nilfs_segment_summary);
1563
1564 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1565 if (bh == segbuf->sb_super_root)
1566 break;
1567 if (!finfo) {
1568 finfo = nilfs_segctor_map_segsum_entry(
1569 sci, &ssp, sizeof(*finfo));
1570 ino = le64_to_cpu(finfo->fi_ino);
1571 nblocks = le32_to_cpu(finfo->fi_nblocks);
1572 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1573
1574 inode = bh->b_folio->mapping->host;
1575
1576 if (mode == SC_LSEG_DSYNC)
1577 sc_op = &nilfs_sc_dsync_ops;
1578 else if (ino == NILFS_DAT_INO)
1579 sc_op = &nilfs_sc_dat_ops;
1580 else /* file blocks */
1581 sc_op = &nilfs_sc_file_ops;
1582 }
1583 bh_org = bh;
1584 get_bh(bh_org);
1585 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1586 &binfo);
1587 if (bh != bh_org)
1588 nilfs_list_replace_buffer(bh_org, bh);
1589 brelse(bh_org);
1590 if (unlikely(err))
1591 goto failed_bmap;
1592
1593 if (ndatablk > 0)
1594 sc_op->write_data_binfo(sci, &ssp, &binfo);
1595 else
1596 sc_op->write_node_binfo(sci, &ssp, &binfo);
1597
1598 blocknr++;
1599 if (--nblocks == 0) {
1600 finfo = NULL;
1601 if (--nfinfo == 0)
1602 break;
1603 } else if (ndatablk > 0)
1604 ndatablk--;
1605 }
1606 out:
1607 return 0;
1608
1609 failed_bmap:
1610 return err;
1611}
1612
1613static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1614{
1615 struct nilfs_segment_buffer *segbuf;
1616 int err;
1617
1618 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1619 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1620 if (unlikely(err))
1621 return err;
1622 nilfs_segbuf_fill_in_segsum(segbuf);
1623 }
1624 return 0;
1625}
1626
1627static void nilfs_begin_folio_io(struct folio *folio)
1628{
1629 if (!folio || folio_test_writeback(folio))
1630 /*
1631 * For split b-tree node pages, this function may be called
1632 * twice. We ignore the 2nd or later calls by this check.
1633 */
1634 return;
1635
1636 folio_lock(folio);
1637 folio_clear_dirty_for_io(folio);
1638 folio_start_writeback(folio);
1639 folio_unlock(folio);
1640}
1641
1642static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1643{
1644 struct nilfs_segment_buffer *segbuf;
1645 struct folio *bd_folio = NULL, *fs_folio = NULL;
1646
1647 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1648 struct buffer_head *bh;
1649
1650 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1651 b_assoc_buffers) {
1652 if (bh->b_folio != bd_folio) {
1653 if (bd_folio) {
1654 folio_lock(bd_folio);
1655 folio_wait_writeback(bd_folio);
1656 folio_clear_dirty_for_io(bd_folio);
1657 folio_start_writeback(bd_folio);
1658 folio_unlock(bd_folio);
1659 }
1660 bd_folio = bh->b_folio;
1661 }
1662 }
1663
1664 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1665 b_assoc_buffers) {
1666 if (bh == segbuf->sb_super_root) {
1667 if (bh->b_folio != bd_folio) {
1668 folio_lock(bd_folio);
1669 folio_wait_writeback(bd_folio);
1670 folio_clear_dirty_for_io(bd_folio);
1671 folio_start_writeback(bd_folio);
1672 folio_unlock(bd_folio);
1673 bd_folio = bh->b_folio;
1674 }
1675 break;
1676 }
1677 set_buffer_async_write(bh);
1678 if (bh->b_folio != fs_folio) {
1679 nilfs_begin_folio_io(fs_folio);
1680 fs_folio = bh->b_folio;
1681 }
1682 }
1683 }
1684 if (bd_folio) {
1685 folio_lock(bd_folio);
1686 folio_wait_writeback(bd_folio);
1687 folio_clear_dirty_for_io(bd_folio);
1688 folio_start_writeback(bd_folio);
1689 folio_unlock(bd_folio);
1690 }
1691 nilfs_begin_folio_io(fs_folio);
1692}
1693
1694static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1695 struct the_nilfs *nilfs)
1696{
1697 int ret;
1698
1699 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1700 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1701 return ret;
1702}
1703
1704static void nilfs_end_folio_io(struct folio *folio, int err)
1705{
1706 if (!folio)
1707 return;
1708
1709 if (buffer_nilfs_node(folio_buffers(folio)) &&
1710 !folio_test_writeback(folio)) {
1711 /*
1712 * For b-tree node pages, this function may be called twice
1713 * or more because they might be split in a segment.
1714 */
1715 if (folio_test_dirty(folio)) {
1716 /*
1717 * For pages holding split b-tree node buffers, dirty
1718 * flag on the buffers may be cleared discretely.
1719 * In that case, the page is once redirtied for
1720 * remaining buffers, and it must be cancelled if
1721 * all the buffers get cleaned later.
1722 */
1723 folio_lock(folio);
1724 if (nilfs_folio_buffers_clean(folio))
1725 __nilfs_clear_folio_dirty(folio);
1726 folio_unlock(folio);
1727 }
1728 return;
1729 }
1730
1731 if (err || !nilfs_folio_buffers_clean(folio))
1732 filemap_dirty_folio(folio->mapping, folio);
1733
1734 folio_end_writeback(folio);
1735}
1736
1737static void nilfs_abort_logs(struct list_head *logs, int err)
1738{
1739 struct nilfs_segment_buffer *segbuf;
1740 struct folio *bd_folio = NULL, *fs_folio = NULL;
1741 struct buffer_head *bh;
1742
1743 if (list_empty(logs))
1744 return;
1745
1746 list_for_each_entry(segbuf, logs, sb_list) {
1747 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1748 b_assoc_buffers) {
1749 clear_buffer_uptodate(bh);
1750 if (bh->b_folio != bd_folio) {
1751 if (bd_folio)
1752 folio_end_writeback(bd_folio);
1753 bd_folio = bh->b_folio;
1754 }
1755 }
1756
1757 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1758 b_assoc_buffers) {
1759 if (bh == segbuf->sb_super_root) {
1760 clear_buffer_uptodate(bh);
1761 if (bh->b_folio != bd_folio) {
1762 folio_end_writeback(bd_folio);
1763 bd_folio = bh->b_folio;
1764 }
1765 break;
1766 }
1767 clear_buffer_async_write(bh);
1768 if (bh->b_folio != fs_folio) {
1769 nilfs_end_folio_io(fs_folio, err);
1770 fs_folio = bh->b_folio;
1771 }
1772 }
1773 }
1774 if (bd_folio)
1775 folio_end_writeback(bd_folio);
1776
1777 nilfs_end_folio_io(fs_folio, err);
1778}
1779
1780static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1781 struct the_nilfs *nilfs, int err)
1782{
1783 LIST_HEAD(logs);
1784 int ret;
1785
1786 list_splice_tail_init(&sci->sc_write_logs, &logs);
1787 ret = nilfs_wait_on_logs(&logs);
1788 nilfs_abort_logs(&logs, ret ? : err);
1789
1790 list_splice_tail_init(&sci->sc_segbufs, &logs);
1791 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1792 nilfs_free_incomplete_logs(&logs, nilfs);
1793
1794 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1795 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1796 sci->sc_freesegs,
1797 sci->sc_nfreesegs,
1798 NULL);
1799 WARN_ON(ret); /* do not happen */
1800 }
1801
1802 nilfs_destroy_logs(&logs);
1803}
1804
1805static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1806 struct nilfs_segment_buffer *segbuf)
1807{
1808 nilfs->ns_segnum = segbuf->sb_segnum;
1809 nilfs->ns_nextnum = segbuf->sb_nextnum;
1810 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1811 + segbuf->sb_sum.nblocks;
1812 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1813 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1814}
1815
1816static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1817{
1818 struct nilfs_segment_buffer *segbuf;
1819 struct folio *bd_folio = NULL, *fs_folio = NULL;
1820 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1821 int update_sr = false;
1822
1823 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1824 struct buffer_head *bh;
1825
1826 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1827 b_assoc_buffers) {
1828 set_buffer_uptodate(bh);
1829 clear_buffer_dirty(bh);
1830 if (bh->b_folio != bd_folio) {
1831 if (bd_folio)
1832 folio_end_writeback(bd_folio);
1833 bd_folio = bh->b_folio;
1834 }
1835 }
1836 /*
1837 * We assume that the buffers which belong to the same folio
1838 * continue over the buffer list.
1839 * Under this assumption, the last BHs of folios is
1840 * identifiable by the discontinuity of bh->b_folio
1841 * (folio != fs_folio).
1842 *
1843 * For B-tree node blocks, however, this assumption is not
1844 * guaranteed. The cleanup code of B-tree node folios needs
1845 * special care.
1846 */
1847 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1848 b_assoc_buffers) {
1849 const unsigned long set_bits = BIT(BH_Uptodate);
1850 const unsigned long clear_bits =
1851 (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1852 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1853 BIT(BH_NILFS_Redirected));
1854
1855 if (bh == segbuf->sb_super_root) {
1856 set_buffer_uptodate(bh);
1857 clear_buffer_dirty(bh);
1858 if (bh->b_folio != bd_folio) {
1859 folio_end_writeback(bd_folio);
1860 bd_folio = bh->b_folio;
1861 }
1862 update_sr = true;
1863 break;
1864 }
1865 set_mask_bits(&bh->b_state, clear_bits, set_bits);
1866 if (bh->b_folio != fs_folio) {
1867 nilfs_end_folio_io(fs_folio, 0);
1868 fs_folio = bh->b_folio;
1869 }
1870 }
1871
1872 if (!nilfs_segbuf_simplex(segbuf)) {
1873 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1874 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1875 sci->sc_lseg_stime = jiffies;
1876 }
1877 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1878 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1879 }
1880 }
1881 /*
1882 * Since folios may continue over multiple segment buffers,
1883 * end of the last folio must be checked outside of the loop.
1884 */
1885 if (bd_folio)
1886 folio_end_writeback(bd_folio);
1887
1888 nilfs_end_folio_io(fs_folio, 0);
1889
1890 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1891
1892 if (nilfs_doing_gc())
1893 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1894 else
1895 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1896
1897 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1898
1899 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1900 nilfs_set_next_segment(nilfs, segbuf);
1901
1902 if (update_sr) {
1903 nilfs->ns_flushed_device = 0;
1904 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1905 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1906
1907 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1908 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1909 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1910 nilfs_segctor_clear_metadata_dirty(sci);
1911 } else
1912 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1913}
1914
1915static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1916{
1917 int ret;
1918
1919 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1920 if (!ret) {
1921 nilfs_segctor_complete_write(sci);
1922 nilfs_destroy_logs(&sci->sc_write_logs);
1923 }
1924 return ret;
1925}
1926
1927static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1928 struct the_nilfs *nilfs)
1929{
1930 struct nilfs_inode_info *ii, *n;
1931 struct inode *ifile = sci->sc_root->ifile;
1932
1933 spin_lock(&nilfs->ns_inode_lock);
1934 retry:
1935 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1936 if (!ii->i_bh) {
1937 struct buffer_head *ibh;
1938 int err;
1939
1940 spin_unlock(&nilfs->ns_inode_lock);
1941 err = nilfs_ifile_get_inode_block(
1942 ifile, ii->vfs_inode.i_ino, &ibh);
1943 if (unlikely(err)) {
1944 nilfs_warn(sci->sc_super,
1945 "log writer: error %d getting inode block (ino=%lu)",
1946 err, ii->vfs_inode.i_ino);
1947 return err;
1948 }
1949 spin_lock(&nilfs->ns_inode_lock);
1950 if (likely(!ii->i_bh))
1951 ii->i_bh = ibh;
1952 else
1953 brelse(ibh);
1954 goto retry;
1955 }
1956
1957 // Always redirty the buffer to avoid race condition
1958 mark_buffer_dirty(ii->i_bh);
1959 nilfs_mdt_mark_dirty(ifile);
1960
1961 clear_bit(NILFS_I_QUEUED, &ii->i_state);
1962 set_bit(NILFS_I_BUSY, &ii->i_state);
1963 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
1964 }
1965 spin_unlock(&nilfs->ns_inode_lock);
1966
1967 return 0;
1968}
1969
1970static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1971 struct the_nilfs *nilfs)
1972{
1973 struct nilfs_inode_info *ii, *n;
1974 int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
1975 int defer_iput = false;
1976
1977 spin_lock(&nilfs->ns_inode_lock);
1978 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1979 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
1980 test_bit(NILFS_I_DIRTY, &ii->i_state))
1981 continue;
1982
1983 clear_bit(NILFS_I_BUSY, &ii->i_state);
1984 brelse(ii->i_bh);
1985 ii->i_bh = NULL;
1986 list_del_init(&ii->i_dirty);
1987 if (!ii->vfs_inode.i_nlink || during_mount) {
1988 /*
1989 * Defer calling iput() to avoid deadlocks if
1990 * i_nlink == 0 or mount is not yet finished.
1991 */
1992 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
1993 defer_iput = true;
1994 } else {
1995 spin_unlock(&nilfs->ns_inode_lock);
1996 iput(&ii->vfs_inode);
1997 spin_lock(&nilfs->ns_inode_lock);
1998 }
1999 }
2000 spin_unlock(&nilfs->ns_inode_lock);
2001
2002 if (defer_iput)
2003 schedule_work(&sci->sc_iput_work);
2004}
2005
2006/*
2007 * Main procedure of segment constructor
2008 */
2009static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2010{
2011 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2012 int err;
2013
2014 if (sb_rdonly(sci->sc_super))
2015 return -EROFS;
2016
2017 nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
2018 sci->sc_cno = nilfs->ns_cno;
2019
2020 err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2021 if (unlikely(err))
2022 goto out;
2023
2024 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2025 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2026
2027 if (nilfs_segctor_clean(sci))
2028 goto out;
2029
2030 do {
2031 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2032
2033 err = nilfs_segctor_begin_construction(sci, nilfs);
2034 if (unlikely(err))
2035 goto out;
2036
2037 /* Update time stamp */
2038 sci->sc_seg_ctime = ktime_get_real_seconds();
2039
2040 err = nilfs_segctor_collect(sci, nilfs, mode);
2041 if (unlikely(err))
2042 goto failed;
2043
2044 /* Avoid empty segment */
2045 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
2046 nilfs_segbuf_empty(sci->sc_curseg)) {
2047 nilfs_segctor_abort_construction(sci, nilfs, 1);
2048 goto out;
2049 }
2050
2051 err = nilfs_segctor_assign(sci, mode);
2052 if (unlikely(err))
2053 goto failed;
2054
2055 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2056 nilfs_segctor_fill_in_file_bmap(sci);
2057
2058 if (mode == SC_LSEG_SR &&
2059 nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
2060 err = nilfs_cpfile_finalize_checkpoint(
2061 nilfs->ns_cpfile, nilfs->ns_cno, sci->sc_root,
2062 sci->sc_nblk_inc + sci->sc_nblk_this_inc,
2063 sci->sc_seg_ctime,
2064 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags));
2065 if (unlikely(err))
2066 goto failed_to_write;
2067
2068 nilfs_segctor_fill_in_super_root(sci, nilfs);
2069 }
2070 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2071
2072 /* Write partial segments */
2073 nilfs_segctor_prepare_write(sci);
2074
2075 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2076 nilfs->ns_crc_seed);
2077
2078 err = nilfs_segctor_write(sci, nilfs);
2079 if (unlikely(err))
2080 goto failed_to_write;
2081
2082 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2083 nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2084 /*
2085 * At this point, we avoid double buffering
2086 * for blocksize < pagesize because page dirty
2087 * flag is turned off during write and dirty
2088 * buffers are not properly collected for
2089 * pages crossing over segments.
2090 */
2091 err = nilfs_segctor_wait(sci);
2092 if (err)
2093 goto failed_to_write;
2094 }
2095 } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
2096
2097 out:
2098 nilfs_segctor_drop_written_files(sci, nilfs);
2099 return err;
2100
2101 failed_to_write:
2102 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2103 nilfs_redirty_inodes(&sci->sc_dirty_files);
2104
2105 failed:
2106 if (nilfs_doing_gc())
2107 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2108 nilfs_segctor_abort_construction(sci, nilfs, err);
2109 goto out;
2110}
2111
2112/**
2113 * nilfs_segctor_start_timer - set timer of background write
2114 * @sci: nilfs_sc_info
2115 *
2116 * If the timer has already been set, it ignores the new request.
2117 * This function MUST be called within a section locking the segment
2118 * semaphore.
2119 */
2120static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2121{
2122 spin_lock(&sci->sc_state_lock);
2123 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2124 if (sci->sc_task) {
2125 sci->sc_timer.expires = jiffies + sci->sc_interval;
2126 add_timer(&sci->sc_timer);
2127 }
2128 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2129 }
2130 spin_unlock(&sci->sc_state_lock);
2131}
2132
2133static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2134{
2135 spin_lock(&sci->sc_state_lock);
2136 if (!(sci->sc_flush_request & BIT(bn))) {
2137 unsigned long prev_req = sci->sc_flush_request;
2138
2139 sci->sc_flush_request |= BIT(bn);
2140 if (!prev_req)
2141 wake_up(&sci->sc_wait_daemon);
2142 }
2143 spin_unlock(&sci->sc_state_lock);
2144}
2145
2146/**
2147 * nilfs_flush_segment - trigger a segment construction for resource control
2148 * @sb: super block
2149 * @ino: inode number of the file to be flushed out.
2150 */
2151void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2152{
2153 struct the_nilfs *nilfs = sb->s_fs_info;
2154 struct nilfs_sc_info *sci = nilfs->ns_writer;
2155
2156 if (!sci || nilfs_doing_construction())
2157 return;
2158 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2159 /* assign bit 0 to data files */
2160}
2161
2162struct nilfs_segctor_wait_request {
2163 wait_queue_entry_t wq;
2164 __u32 seq;
2165 int err;
2166 atomic_t done;
2167};
2168
2169static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2170{
2171 struct nilfs_segctor_wait_request wait_req;
2172 int err = 0;
2173
2174 init_wait(&wait_req.wq);
2175 wait_req.err = 0;
2176 atomic_set(&wait_req.done, 0);
2177 init_waitqueue_entry(&wait_req.wq, current);
2178
2179 /*
2180 * To prevent a race issue where completion notifications from the
2181 * log writer thread are missed, increment the request sequence count
2182 * "sc_seq_request" and insert a wait queue entry using the current
2183 * sequence number into the "sc_wait_request" queue at the same time
2184 * within the lock section of "sc_state_lock".
2185 */
2186 spin_lock(&sci->sc_state_lock);
2187 wait_req.seq = ++sci->sc_seq_request;
2188 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2189 spin_unlock(&sci->sc_state_lock);
2190
2191 wake_up(&sci->sc_wait_daemon);
2192
2193 for (;;) {
2194 set_current_state(TASK_INTERRUPTIBLE);
2195
2196 /*
2197 * Synchronize only while the log writer thread is alive.
2198 * Leave flushing out after the log writer thread exits to
2199 * the cleanup work in nilfs_segctor_destroy().
2200 */
2201 if (!sci->sc_task)
2202 break;
2203
2204 if (atomic_read(&wait_req.done)) {
2205 err = wait_req.err;
2206 break;
2207 }
2208 if (!signal_pending(current)) {
2209 schedule();
2210 continue;
2211 }
2212 err = -ERESTARTSYS;
2213 break;
2214 }
2215 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2216 return err;
2217}
2218
2219static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
2220{
2221 struct nilfs_segctor_wait_request *wrq, *n;
2222 unsigned long flags;
2223
2224 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2225 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
2226 if (!atomic_read(&wrq->done) &&
2227 (force || nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq))) {
2228 wrq->err = err;
2229 atomic_set(&wrq->done, 1);
2230 }
2231 if (atomic_read(&wrq->done)) {
2232 wrq->wq.func(&wrq->wq,
2233 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2234 0, NULL);
2235 }
2236 }
2237 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2238}
2239
2240/**
2241 * nilfs_construct_segment - construct a logical segment
2242 * @sb: super block
2243 *
2244 * Return Value: On success, 0 is returned. On errors, one of the following
2245 * negative error code is returned.
2246 *
2247 * %-EROFS - Read only filesystem.
2248 *
2249 * %-EIO - I/O error
2250 *
2251 * %-ENOSPC - No space left on device (only in a panic state).
2252 *
2253 * %-ERESTARTSYS - Interrupted.
2254 *
2255 * %-ENOMEM - Insufficient memory available.
2256 */
2257int nilfs_construct_segment(struct super_block *sb)
2258{
2259 struct the_nilfs *nilfs = sb->s_fs_info;
2260 struct nilfs_sc_info *sci = nilfs->ns_writer;
2261 struct nilfs_transaction_info *ti;
2262
2263 if (sb_rdonly(sb) || unlikely(!sci))
2264 return -EROFS;
2265
2266 /* A call inside transactions causes a deadlock. */
2267 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2268
2269 return nilfs_segctor_sync(sci);
2270}
2271
2272/**
2273 * nilfs_construct_dsync_segment - construct a data-only logical segment
2274 * @sb: super block
2275 * @inode: inode whose data blocks should be written out
2276 * @start: start byte offset
2277 * @end: end byte offset (inclusive)
2278 *
2279 * Return Value: On success, 0 is returned. On errors, one of the following
2280 * negative error code is returned.
2281 *
2282 * %-EROFS - Read only filesystem.
2283 *
2284 * %-EIO - I/O error
2285 *
2286 * %-ENOSPC - No space left on device (only in a panic state).
2287 *
2288 * %-ERESTARTSYS - Interrupted.
2289 *
2290 * %-ENOMEM - Insufficient memory available.
2291 */
2292int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2293 loff_t start, loff_t end)
2294{
2295 struct the_nilfs *nilfs = sb->s_fs_info;
2296 struct nilfs_sc_info *sci = nilfs->ns_writer;
2297 struct nilfs_inode_info *ii;
2298 struct nilfs_transaction_info ti;
2299 int err = 0;
2300
2301 if (sb_rdonly(sb) || unlikely(!sci))
2302 return -EROFS;
2303
2304 nilfs_transaction_lock(sb, &ti, 0);
2305
2306 ii = NILFS_I(inode);
2307 if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
2308 nilfs_test_opt(nilfs, STRICT_ORDER) ||
2309 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2310 nilfs_discontinued(nilfs)) {
2311 nilfs_transaction_unlock(sb);
2312 err = nilfs_segctor_sync(sci);
2313 return err;
2314 }
2315
2316 spin_lock(&nilfs->ns_inode_lock);
2317 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2318 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2319 spin_unlock(&nilfs->ns_inode_lock);
2320 nilfs_transaction_unlock(sb);
2321 return 0;
2322 }
2323 spin_unlock(&nilfs->ns_inode_lock);
2324 sci->sc_dsync_inode = ii;
2325 sci->sc_dsync_start = start;
2326 sci->sc_dsync_end = end;
2327
2328 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2329 if (!err)
2330 nilfs->ns_flushed_device = 0;
2331
2332 nilfs_transaction_unlock(sb);
2333 return err;
2334}
2335
2336#define FLUSH_FILE_BIT (0x1) /* data file only */
2337#define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */
2338
2339/**
2340 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2341 * @sci: segment constructor object
2342 */
2343static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2344{
2345 bool thread_is_alive;
2346
2347 spin_lock(&sci->sc_state_lock);
2348 sci->sc_seq_accepted = sci->sc_seq_request;
2349 thread_is_alive = (bool)sci->sc_task;
2350 spin_unlock(&sci->sc_state_lock);
2351
2352 /*
2353 * This function does not race with the log writer thread's
2354 * termination. Therefore, deleting sc_timer, which should not be
2355 * done after the log writer thread exits, can be done safely outside
2356 * the area protected by sc_state_lock.
2357 */
2358 if (thread_is_alive)
2359 del_timer_sync(&sci->sc_timer);
2360}
2361
2362/**
2363 * nilfs_segctor_notify - notify the result of request to caller threads
2364 * @sci: segment constructor object
2365 * @mode: mode of log forming
2366 * @err: error code to be notified
2367 */
2368static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2369{
2370 /* Clear requests (even when the construction failed) */
2371 spin_lock(&sci->sc_state_lock);
2372
2373 if (mode == SC_LSEG_SR) {
2374 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2375 sci->sc_seq_done = sci->sc_seq_accepted;
2376 nilfs_segctor_wakeup(sci, err, false);
2377 sci->sc_flush_request = 0;
2378 } else {
2379 if (mode == SC_FLUSH_FILE)
2380 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2381 else if (mode == SC_FLUSH_DAT)
2382 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2383
2384 /* re-enable timer if checkpoint creation was not done */
2385 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && sci->sc_task &&
2386 time_before(jiffies, sci->sc_timer.expires))
2387 add_timer(&sci->sc_timer);
2388 }
2389 spin_unlock(&sci->sc_state_lock);
2390}
2391
2392/**
2393 * nilfs_segctor_construct - form logs and write them to disk
2394 * @sci: segment constructor object
2395 * @mode: mode of log forming
2396 */
2397static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2398{
2399 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2400 struct nilfs_super_block **sbp;
2401 int err = 0;
2402
2403 nilfs_segctor_accept(sci);
2404
2405 if (nilfs_discontinued(nilfs))
2406 mode = SC_LSEG_SR;
2407 if (!nilfs_segctor_confirm(sci))
2408 err = nilfs_segctor_do_construct(sci, mode);
2409
2410 if (likely(!err)) {
2411 if (mode != SC_FLUSH_DAT)
2412 atomic_set(&nilfs->ns_ndirtyblks, 0);
2413 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2414 nilfs_discontinued(nilfs)) {
2415 down_write(&nilfs->ns_sem);
2416 err = -EIO;
2417 sbp = nilfs_prepare_super(sci->sc_super,
2418 nilfs_sb_will_flip(nilfs));
2419 if (likely(sbp)) {
2420 nilfs_set_log_cursor(sbp[0], nilfs);
2421 err = nilfs_commit_super(sci->sc_super,
2422 NILFS_SB_COMMIT);
2423 }
2424 up_write(&nilfs->ns_sem);
2425 }
2426 }
2427
2428 nilfs_segctor_notify(sci, mode, err);
2429 return err;
2430}
2431
2432static void nilfs_construction_timeout(struct timer_list *t)
2433{
2434 struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
2435
2436 wake_up_process(sci->sc_timer_task);
2437}
2438
2439static void
2440nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2441{
2442 struct nilfs_inode_info *ii, *n;
2443
2444 list_for_each_entry_safe(ii, n, head, i_dirty) {
2445 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2446 continue;
2447 list_del_init(&ii->i_dirty);
2448 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2449 nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
2450 iput(&ii->vfs_inode);
2451 }
2452}
2453
2454int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2455 void **kbufs)
2456{
2457 struct the_nilfs *nilfs = sb->s_fs_info;
2458 struct nilfs_sc_info *sci = nilfs->ns_writer;
2459 struct nilfs_transaction_info ti;
2460 int err;
2461
2462 if (unlikely(!sci))
2463 return -EROFS;
2464
2465 nilfs_transaction_lock(sb, &ti, 1);
2466
2467 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2468 if (unlikely(err))
2469 goto out_unlock;
2470
2471 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2472 if (unlikely(err)) {
2473 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2474 goto out_unlock;
2475 }
2476
2477 sci->sc_freesegs = kbufs[4];
2478 sci->sc_nfreesegs = argv[4].v_nmembs;
2479 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2480
2481 for (;;) {
2482 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2483 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2484
2485 if (likely(!err))
2486 break;
2487
2488 nilfs_warn(sb, "error %d cleaning segments", err);
2489 set_current_state(TASK_INTERRUPTIBLE);
2490 schedule_timeout(sci->sc_interval);
2491 }
2492 if (nilfs_test_opt(nilfs, DISCARD)) {
2493 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2494 sci->sc_nfreesegs);
2495 if (ret) {
2496 nilfs_warn(sb,
2497 "error %d on discard request, turning discards off for the device",
2498 ret);
2499 nilfs_clear_opt(nilfs, DISCARD);
2500 }
2501 }
2502
2503 out_unlock:
2504 sci->sc_freesegs = NULL;
2505 sci->sc_nfreesegs = 0;
2506 nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2507 nilfs_transaction_unlock(sb);
2508 return err;
2509}
2510
2511static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2512{
2513 struct nilfs_transaction_info ti;
2514
2515 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2516 nilfs_segctor_construct(sci, mode);
2517
2518 /*
2519 * Unclosed segment should be retried. We do this using sc_timer.
2520 * Timeout of sc_timer will invoke complete construction which leads
2521 * to close the current logical segment.
2522 */
2523 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2524 nilfs_segctor_start_timer(sci);
2525
2526 nilfs_transaction_unlock(sci->sc_super);
2527}
2528
2529static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2530{
2531 int mode = 0;
2532
2533 spin_lock(&sci->sc_state_lock);
2534 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2535 SC_FLUSH_DAT : SC_FLUSH_FILE;
2536 spin_unlock(&sci->sc_state_lock);
2537
2538 if (mode) {
2539 nilfs_segctor_do_construct(sci, mode);
2540
2541 spin_lock(&sci->sc_state_lock);
2542 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2543 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2544 spin_unlock(&sci->sc_state_lock);
2545 }
2546 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2547}
2548
2549static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2550{
2551 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2552 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2553 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2554 return SC_FLUSH_FILE;
2555 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2556 return SC_FLUSH_DAT;
2557 }
2558 return SC_LSEG_SR;
2559}
2560
2561/**
2562 * nilfs_segctor_thread - main loop of the segment constructor thread.
2563 * @arg: pointer to a struct nilfs_sc_info.
2564 *
2565 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2566 * to execute segment constructions.
2567 */
2568static int nilfs_segctor_thread(void *arg)
2569{
2570 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2571 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2572 int timeout = 0;
2573
2574 sci->sc_timer_task = current;
2575 timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
2576
2577 /* start sync. */
2578 sci->sc_task = current;
2579 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2580 nilfs_info(sci->sc_super,
2581 "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2582 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2583
2584 set_freezable();
2585 spin_lock(&sci->sc_state_lock);
2586 loop:
2587 for (;;) {
2588 int mode;
2589
2590 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2591 goto end_thread;
2592
2593 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2594 mode = SC_LSEG_SR;
2595 else if (sci->sc_flush_request)
2596 mode = nilfs_segctor_flush_mode(sci);
2597 else
2598 break;
2599
2600 spin_unlock(&sci->sc_state_lock);
2601 nilfs_segctor_thread_construct(sci, mode);
2602 spin_lock(&sci->sc_state_lock);
2603 timeout = 0;
2604 }
2605
2606
2607 if (freezing(current)) {
2608 spin_unlock(&sci->sc_state_lock);
2609 try_to_freeze();
2610 spin_lock(&sci->sc_state_lock);
2611 } else {
2612 DEFINE_WAIT(wait);
2613 int should_sleep = 1;
2614
2615 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2616 TASK_INTERRUPTIBLE);
2617
2618 if (sci->sc_seq_request != sci->sc_seq_done)
2619 should_sleep = 0;
2620 else if (sci->sc_flush_request)
2621 should_sleep = 0;
2622 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2623 should_sleep = time_before(jiffies,
2624 sci->sc_timer.expires);
2625
2626 if (should_sleep) {
2627 spin_unlock(&sci->sc_state_lock);
2628 schedule();
2629 spin_lock(&sci->sc_state_lock);
2630 }
2631 finish_wait(&sci->sc_wait_daemon, &wait);
2632 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2633 time_after_eq(jiffies, sci->sc_timer.expires));
2634
2635 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2636 set_nilfs_discontinued(nilfs);
2637 }
2638 goto loop;
2639
2640 end_thread:
2641 /* end sync. */
2642 sci->sc_task = NULL;
2643 timer_shutdown_sync(&sci->sc_timer);
2644 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2645 spin_unlock(&sci->sc_state_lock);
2646 return 0;
2647}
2648
2649static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2650{
2651 struct task_struct *t;
2652
2653 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2654 if (IS_ERR(t)) {
2655 int err = PTR_ERR(t);
2656
2657 nilfs_err(sci->sc_super, "error %d creating segctord thread",
2658 err);
2659 return err;
2660 }
2661 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2662 return 0;
2663}
2664
2665static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2666 __acquires(&sci->sc_state_lock)
2667 __releases(&sci->sc_state_lock)
2668{
2669 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2670
2671 while (sci->sc_task) {
2672 wake_up(&sci->sc_wait_daemon);
2673 spin_unlock(&sci->sc_state_lock);
2674 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2675 spin_lock(&sci->sc_state_lock);
2676 }
2677}
2678
2679/*
2680 * Setup & clean-up functions
2681 */
2682static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2683 struct nilfs_root *root)
2684{
2685 struct the_nilfs *nilfs = sb->s_fs_info;
2686 struct nilfs_sc_info *sci;
2687
2688 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2689 if (!sci)
2690 return NULL;
2691
2692 sci->sc_super = sb;
2693
2694 nilfs_get_root(root);
2695 sci->sc_root = root;
2696
2697 init_waitqueue_head(&sci->sc_wait_request);
2698 init_waitqueue_head(&sci->sc_wait_daemon);
2699 init_waitqueue_head(&sci->sc_wait_task);
2700 spin_lock_init(&sci->sc_state_lock);
2701 INIT_LIST_HEAD(&sci->sc_dirty_files);
2702 INIT_LIST_HEAD(&sci->sc_segbufs);
2703 INIT_LIST_HEAD(&sci->sc_write_logs);
2704 INIT_LIST_HEAD(&sci->sc_gc_inodes);
2705 INIT_LIST_HEAD(&sci->sc_iput_queue);
2706 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2707
2708 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2709 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2710 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2711
2712 if (nilfs->ns_interval)
2713 sci->sc_interval = HZ * nilfs->ns_interval;
2714 if (nilfs->ns_watermark)
2715 sci->sc_watermark = nilfs->ns_watermark;
2716 return sci;
2717}
2718
2719static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2720{
2721 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2722
2723 /*
2724 * The segctord thread was stopped and its timer was removed.
2725 * But some tasks remain.
2726 */
2727 do {
2728 struct nilfs_transaction_info ti;
2729
2730 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2731 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2732 nilfs_transaction_unlock(sci->sc_super);
2733
2734 flush_work(&sci->sc_iput_work);
2735
2736 } while (ret && ret != -EROFS && retrycount-- > 0);
2737}
2738
2739/**
2740 * nilfs_segctor_destroy - destroy the segment constructor.
2741 * @sci: nilfs_sc_info
2742 *
2743 * nilfs_segctor_destroy() kills the segctord thread and frees
2744 * the nilfs_sc_info struct.
2745 * Caller must hold the segment semaphore.
2746 */
2747static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2748{
2749 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2750 int flag;
2751
2752 up_write(&nilfs->ns_segctor_sem);
2753
2754 spin_lock(&sci->sc_state_lock);
2755 nilfs_segctor_kill_thread(sci);
2756 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2757 || sci->sc_seq_request != sci->sc_seq_done);
2758 spin_unlock(&sci->sc_state_lock);
2759
2760 /*
2761 * Forcibly wake up tasks waiting in nilfs_segctor_sync(), which can
2762 * be called from delayed iput() via nilfs_evict_inode() and can race
2763 * with the above log writer thread termination.
2764 */
2765 nilfs_segctor_wakeup(sci, 0, true);
2766
2767 if (flush_work(&sci->sc_iput_work))
2768 flag = true;
2769
2770 if (flag || !nilfs_segctor_confirm(sci))
2771 nilfs_segctor_write_out(sci);
2772
2773 if (!list_empty(&sci->sc_dirty_files)) {
2774 nilfs_warn(sci->sc_super,
2775 "disposed unprocessed dirty file(s) when stopping log writer");
2776 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2777 }
2778
2779 if (!list_empty(&sci->sc_iput_queue)) {
2780 nilfs_warn(sci->sc_super,
2781 "disposed unprocessed inode(s) in iput queue when stopping log writer");
2782 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2783 }
2784
2785 WARN_ON(!list_empty(&sci->sc_segbufs));
2786 WARN_ON(!list_empty(&sci->sc_write_logs));
2787
2788 nilfs_put_root(sci->sc_root);
2789
2790 down_write(&nilfs->ns_segctor_sem);
2791
2792 kfree(sci);
2793}
2794
2795/**
2796 * nilfs_attach_log_writer - attach log writer
2797 * @sb: super block instance
2798 * @root: root object of the current filesystem tree
2799 *
2800 * This allocates a log writer object, initializes it, and starts the
2801 * log writer.
2802 *
2803 * Return Value: On success, 0 is returned. On error, one of the following
2804 * negative error code is returned.
2805 *
2806 * %-ENOMEM - Insufficient memory available.
2807 */
2808int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2809{
2810 struct the_nilfs *nilfs = sb->s_fs_info;
2811 int err;
2812
2813 if (nilfs->ns_writer) {
2814 /*
2815 * This happens if the filesystem is made read-only by
2816 * __nilfs_error or nilfs_remount and then remounted
2817 * read/write. In these cases, reuse the existing
2818 * writer.
2819 */
2820 return 0;
2821 }
2822
2823 nilfs->ns_writer = nilfs_segctor_new(sb, root);
2824 if (!nilfs->ns_writer)
2825 return -ENOMEM;
2826
2827 inode_attach_wb(nilfs->ns_bdev->bd_mapping->host, NULL);
2828
2829 err = nilfs_segctor_start_thread(nilfs->ns_writer);
2830 if (unlikely(err))
2831 nilfs_detach_log_writer(sb);
2832
2833 return err;
2834}
2835
2836/**
2837 * nilfs_detach_log_writer - destroy log writer
2838 * @sb: super block instance
2839 *
2840 * This kills log writer daemon, frees the log writer object, and
2841 * destroys list of dirty files.
2842 */
2843void nilfs_detach_log_writer(struct super_block *sb)
2844{
2845 struct the_nilfs *nilfs = sb->s_fs_info;
2846 LIST_HEAD(garbage_list);
2847
2848 down_write(&nilfs->ns_segctor_sem);
2849 if (nilfs->ns_writer) {
2850 nilfs_segctor_destroy(nilfs->ns_writer);
2851 nilfs->ns_writer = NULL;
2852 }
2853 set_nilfs_purging(nilfs);
2854
2855 /* Force to free the list of dirty files */
2856 spin_lock(&nilfs->ns_inode_lock);
2857 if (!list_empty(&nilfs->ns_dirty_files)) {
2858 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2859 nilfs_warn(sb,
2860 "disposed unprocessed dirty file(s) when detaching log writer");
2861 }
2862 spin_unlock(&nilfs->ns_inode_lock);
2863 up_write(&nilfs->ns_segctor_sem);
2864
2865 nilfs_dispose_list(nilfs, &garbage_list, 1);
2866 clear_nilfs_purging(nilfs);
2867}