Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/mempool.h>
13#include <linux/gfs2_ondisk.h>
14#include <linux/bio.h>
15#include <linux/fs.h>
16#include <linux/list_sort.h>
17#include <linux/blkdev.h>
18
19#include "bmap.h"
20#include "dir.h"
21#include "gfs2.h"
22#include "incore.h"
23#include "inode.h"
24#include "glock.h"
25#include "glops.h"
26#include "log.h"
27#include "lops.h"
28#include "meta_io.h"
29#include "recovery.h"
30#include "rgrp.h"
31#include "trans.h"
32#include "util.h"
33#include "trace_gfs2.h"
34
35/**
36 * gfs2_pin - Pin a buffer in memory
37 * @sdp: The superblock
38 * @bh: The buffer to be pinned
39 *
40 * The log lock must be held when calling this function
41 */
42void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
43{
44 struct gfs2_bufdata *bd;
45
46 BUG_ON(!current->journal_info);
47
48 clear_buffer_dirty(bh);
49 if (test_set_buffer_pinned(bh))
50 gfs2_assert_withdraw(sdp, 0);
51 if (!buffer_uptodate(bh))
52 gfs2_io_error_bh_wd(sdp, bh);
53 bd = bh->b_private;
54 /* If this buffer is in the AIL and it has already been written
55 * to in-place disk block, remove it from the AIL.
56 */
57 spin_lock(&sdp->sd_ail_lock);
58 if (bd->bd_tr)
59 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
60 spin_unlock(&sdp->sd_ail_lock);
61 get_bh(bh);
62 atomic_inc(&sdp->sd_log_pinned);
63 trace_gfs2_pin(bd, 1);
64}
65
66static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
67{
68 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
69}
70
71static void maybe_release_space(struct gfs2_bufdata *bd)
72{
73 struct gfs2_glock *gl = bd->bd_gl;
74 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
75 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
76 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
77 struct gfs2_bitmap *bi = rgd->rd_bits + index;
78
79 rgrp_lock_local(rgd);
80 if (bi->bi_clone == NULL)
81 goto out;
82 if (sdp->sd_args.ar_discard)
83 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
84 memcpy(bi->bi_clone + bi->bi_offset,
85 bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
86 clear_bit(GBF_FULL, &bi->bi_flags);
87 rgd->rd_free_clone = rgd->rd_free;
88 BUG_ON(rgd->rd_free_clone < rgd->rd_reserved);
89 rgd->rd_extfail_pt = rgd->rd_free;
90
91out:
92 rgrp_unlock_local(rgd);
93}
94
95/**
96 * gfs2_unpin - Unpin a buffer
97 * @sdp: the filesystem the buffer belongs to
98 * @bh: The buffer to unpin
99 * @ai:
100 * @flags: The inode dirty flags
101 *
102 */
103
104static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
105 struct gfs2_trans *tr)
106{
107 struct gfs2_bufdata *bd = bh->b_private;
108
109 BUG_ON(!buffer_uptodate(bh));
110 BUG_ON(!buffer_pinned(bh));
111
112 lock_buffer(bh);
113 mark_buffer_dirty(bh);
114 clear_buffer_pinned(bh);
115
116 if (buffer_is_rgrp(bd))
117 maybe_release_space(bd);
118
119 spin_lock(&sdp->sd_ail_lock);
120 if (bd->bd_tr) {
121 list_del(&bd->bd_ail_st_list);
122 brelse(bh);
123 } else {
124 struct gfs2_glock *gl = bd->bd_gl;
125 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
126 atomic_inc(&gl->gl_ail_count);
127 }
128 bd->bd_tr = tr;
129 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
130 spin_unlock(&sdp->sd_ail_lock);
131
132 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
133 trace_gfs2_pin(bd, 0);
134 unlock_buffer(bh);
135 atomic_dec(&sdp->sd_log_pinned);
136}
137
138void gfs2_log_incr_head(struct gfs2_sbd *sdp)
139{
140 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
141 (sdp->sd_log_flush_head != sdp->sd_log_head));
142
143 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
144 sdp->sd_log_flush_head = 0;
145}
146
147u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
148{
149 struct gfs2_journal_extent *je;
150
151 list_for_each_entry(je, &jd->extent_list, list) {
152 if (lblock >= je->lblock && lblock < je->lblock + je->blocks)
153 return je->dblock + lblock - je->lblock;
154 }
155
156 return -1;
157}
158
159/**
160 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
161 * @sdp: The superblock
162 * @bvec: The bio_vec
163 * @error: The i/o status
164 *
165 * This finds the relevant buffers and unlocks them and sets the
166 * error flag according to the status of the i/o request. This is
167 * used when the log is writing data which has an in-place version
168 * that is pinned in the pagecache.
169 */
170
171static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
172 struct bio_vec *bvec,
173 blk_status_t error)
174{
175 struct buffer_head *bh, *next;
176 struct page *page = bvec->bv_page;
177 unsigned size;
178
179 bh = page_buffers(page);
180 size = bvec->bv_len;
181 while (bh_offset(bh) < bvec->bv_offset)
182 bh = bh->b_this_page;
183 do {
184 if (error)
185 mark_buffer_write_io_error(bh);
186 unlock_buffer(bh);
187 next = bh->b_this_page;
188 size -= bh->b_size;
189 brelse(bh);
190 bh = next;
191 } while(bh && size);
192}
193
194/**
195 * gfs2_end_log_write - end of i/o to the log
196 * @bio: The bio
197 *
198 * Each bio_vec contains either data from the pagecache or data
199 * relating to the log itself. Here we iterate over the bio_vec
200 * array, processing both kinds of data.
201 *
202 */
203
204static void gfs2_end_log_write(struct bio *bio)
205{
206 struct gfs2_sbd *sdp = bio->bi_private;
207 struct bio_vec *bvec;
208 struct page *page;
209 struct bvec_iter_all iter_all;
210
211 if (bio->bi_status) {
212 if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status))
213 fs_err(sdp, "Error %d writing to journal, jid=%u\n",
214 bio->bi_status, sdp->sd_jdesc->jd_jid);
215 gfs2_withdraw_delayed(sdp);
216 /* prevent more writes to the journal */
217 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
218 wake_up(&sdp->sd_logd_waitq);
219 }
220
221 bio_for_each_segment_all(bvec, bio, iter_all) {
222 page = bvec->bv_page;
223 if (page_has_buffers(page))
224 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
225 else
226 mempool_free(page, gfs2_page_pool);
227 }
228
229 bio_put(bio);
230 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
231 wake_up(&sdp->sd_log_flush_wait);
232}
233
234/**
235 * gfs2_log_submit_bio - Submit any pending log bio
236 * @biop: Address of the bio pointer
237 * @opf: REQ_OP | op_flags
238 *
239 * Submit any pending part-built or full bio to the block device. If
240 * there is no pending bio, then this is a no-op.
241 */
242
243void gfs2_log_submit_bio(struct bio **biop, int opf)
244{
245 struct bio *bio = *biop;
246 if (bio) {
247 struct gfs2_sbd *sdp = bio->bi_private;
248 atomic_inc(&sdp->sd_log_in_flight);
249 bio->bi_opf = opf;
250 submit_bio(bio);
251 *biop = NULL;
252 }
253}
254
255/**
256 * gfs2_log_alloc_bio - Allocate a bio
257 * @sdp: The super block
258 * @blkno: The device block number we want to write to
259 * @end_io: The bi_end_io callback
260 *
261 * Allocate a new bio, initialize it with the given parameters and return it.
262 *
263 * Returns: The newly allocated bio
264 */
265
266static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
267 bio_end_io_t *end_io)
268{
269 struct super_block *sb = sdp->sd_vfs;
270 struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
271
272 bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
273 bio_set_dev(bio, sb->s_bdev);
274 bio->bi_end_io = end_io;
275 bio->bi_private = sdp;
276
277 return bio;
278}
279
280/**
281 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
282 * @sdp: The super block
283 * @blkno: The device block number we want to write to
284 * @bio: The bio to get or allocate
285 * @op: REQ_OP
286 * @end_io: The bi_end_io callback
287 * @flush: Always flush the current bio and allocate a new one?
288 *
289 * If there is a cached bio, then if the next block number is sequential
290 * with the previous one, return it, otherwise flush the bio to the
291 * device. If there is no cached bio, or we just flushed it, then
292 * allocate a new one.
293 *
294 * Returns: The bio to use for log writes
295 */
296
297static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
298 struct bio **biop, int op,
299 bio_end_io_t *end_io, bool flush)
300{
301 struct bio *bio = *biop;
302
303 if (bio) {
304 u64 nblk;
305
306 nblk = bio_end_sector(bio);
307 nblk >>= sdp->sd_fsb2bb_shift;
308 if (blkno == nblk && !flush)
309 return bio;
310 gfs2_log_submit_bio(biop, op);
311 }
312
313 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
314 return *biop;
315}
316
317/**
318 * gfs2_log_write - write to log
319 * @sdp: the filesystem
320 * @page: the page to write
321 * @size: the size of the data to write
322 * @offset: the offset within the page
323 * @blkno: block number of the log entry
324 *
325 * Try and add the page segment to the current bio. If that fails,
326 * submit the current bio to the device and create a new one, and
327 * then add the page segment to that.
328 */
329
330void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
331 struct page *page, unsigned size, unsigned offset,
332 u64 blkno)
333{
334 struct bio *bio;
335 int ret;
336
337 bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE,
338 gfs2_end_log_write, false);
339 ret = bio_add_page(bio, page, size, offset);
340 if (ret == 0) {
341 bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio,
342 REQ_OP_WRITE, gfs2_end_log_write, true);
343 ret = bio_add_page(bio, page, size, offset);
344 WARN_ON(ret == 0);
345 }
346}
347
348/**
349 * gfs2_log_write_bh - write a buffer's content to the log
350 * @sdp: The super block
351 * @bh: The buffer pointing to the in-place location
352 *
353 * This writes the content of the buffer to the next available location
354 * in the log. The buffer will be unlocked once the i/o to the log has
355 * completed.
356 */
357
358static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
359{
360 u64 dblock;
361
362 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
363 gfs2_log_incr_head(sdp);
364 gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size,
365 bh_offset(bh), dblock);
366}
367
368/**
369 * gfs2_log_write_page - write one block stored in a page, into the log
370 * @sdp: The superblock
371 * @page: The struct page
372 *
373 * This writes the first block-sized part of the page into the log. Note
374 * that the page must have been allocated from the gfs2_page_pool mempool
375 * and that after this has been called, ownership has been transferred and
376 * the page may be freed at any time.
377 */
378
379static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
380{
381 struct super_block *sb = sdp->sd_vfs;
382 u64 dblock;
383
384 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
385 gfs2_log_incr_head(sdp);
386 gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock);
387}
388
389/**
390 * gfs2_end_log_read - end I/O callback for reads from the log
391 * @bio: The bio
392 *
393 * Simply unlock the pages in the bio. The main thread will wait on them and
394 * process them in order as necessary.
395 */
396
397static void gfs2_end_log_read(struct bio *bio)
398{
399 struct page *page;
400 struct bio_vec *bvec;
401 struct bvec_iter_all iter_all;
402
403 bio_for_each_segment_all(bvec, bio, iter_all) {
404 page = bvec->bv_page;
405 if (bio->bi_status) {
406 int err = blk_status_to_errno(bio->bi_status);
407
408 SetPageError(page);
409 mapping_set_error(page->mapping, err);
410 }
411 unlock_page(page);
412 }
413
414 bio_put(bio);
415}
416
417/**
418 * gfs2_jhead_pg_srch - Look for the journal head in a given page.
419 * @jd: The journal descriptor
420 * @page: The page to look in
421 *
422 * Returns: 1 if found, 0 otherwise.
423 */
424
425static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
426 struct gfs2_log_header_host *head,
427 struct page *page)
428{
429 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
430 struct gfs2_log_header_host lh;
431 void *kaddr = kmap_atomic(page);
432 unsigned int offset;
433 bool ret = false;
434
435 for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
436 if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
437 if (lh.lh_sequence >= head->lh_sequence)
438 *head = lh;
439 else {
440 ret = true;
441 break;
442 }
443 }
444 }
445 kunmap_atomic(kaddr);
446 return ret;
447}
448
449/**
450 * gfs2_jhead_process_page - Search/cleanup a page
451 * @jd: The journal descriptor
452 * @index: Index of the page to look into
453 * @done: If set, perform only cleanup, else search and set if found.
454 *
455 * Find the page with 'index' in the journal's mapping. Search the page for
456 * the journal head if requested (cleanup == false). Release refs on the
457 * page so the page cache can reclaim it (put_page() twice). We grabbed a
458 * reference on this page two times, first when we did a find_or_create_page()
459 * to obtain the page to add it to the bio and second when we do a
460 * find_get_page() here to get the page to wait on while I/O on it is being
461 * completed.
462 * This function is also used to free up a page we might've grabbed but not
463 * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
464 * submitted the I/O, but we already found the jhead so we only need to drop
465 * our references to the page.
466 */
467
468static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
469 struct gfs2_log_header_host *head,
470 bool *done)
471{
472 struct page *page;
473
474 page = find_get_page(jd->jd_inode->i_mapping, index);
475 wait_on_page_locked(page);
476
477 if (PageError(page))
478 *done = true;
479
480 if (!*done)
481 *done = gfs2_jhead_pg_srch(jd, head, page);
482
483 put_page(page); /* Once for find_get_page */
484 put_page(page); /* Once more for find_or_create_page */
485}
486
487static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
488{
489 struct bio *new;
490
491 new = bio_alloc(GFP_NOIO, nr_iovecs);
492 bio_copy_dev(new, prev);
493 new->bi_iter.bi_sector = bio_end_sector(prev);
494 new->bi_opf = prev->bi_opf;
495 new->bi_write_hint = prev->bi_write_hint;
496 bio_chain(new, prev);
497 submit_bio(prev);
498 return new;
499}
500
501/**
502 * gfs2_find_jhead - find the head of a log
503 * @jd: The journal descriptor
504 * @head: The log descriptor for the head of the log is returned here
505 *
506 * Do a search of a journal by reading it in large chunks using bios and find
507 * the valid log entry with the highest sequence number. (i.e. the log head)
508 *
509 * Returns: 0 on success, errno otherwise
510 */
511int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
512 bool keep_cache)
513{
514 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
515 struct address_space *mapping = jd->jd_inode->i_mapping;
516 unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
517 unsigned int bsize = sdp->sd_sb.sb_bsize, off;
518 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
519 unsigned int shift = PAGE_SHIFT - bsize_shift;
520 unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
521 struct gfs2_journal_extent *je;
522 int sz, ret = 0;
523 struct bio *bio = NULL;
524 struct page *page = NULL;
525 bool done = false;
526 errseq_t since;
527
528 memset(head, 0, sizeof(*head));
529 if (list_empty(&jd->extent_list))
530 gfs2_map_journal_extents(sdp, jd);
531
532 since = filemap_sample_wb_err(mapping);
533 list_for_each_entry(je, &jd->extent_list, list) {
534 u64 dblock = je->dblock;
535
536 for (; block < je->lblock + je->blocks; block++, dblock++) {
537 if (!page) {
538 page = find_or_create_page(mapping,
539 block >> shift, GFP_NOFS);
540 if (!page) {
541 ret = -ENOMEM;
542 done = true;
543 goto out;
544 }
545 off = 0;
546 }
547
548 if (bio && (off || block < blocks_submitted + max_blocks)) {
549 sector_t sector = dblock << sdp->sd_fsb2bb_shift;
550
551 if (bio_end_sector(bio) == sector) {
552 sz = bio_add_page(bio, page, bsize, off);
553 if (sz == bsize)
554 goto block_added;
555 }
556 if (off) {
557 unsigned int blocks =
558 (PAGE_SIZE - off) >> bsize_shift;
559
560 bio = gfs2_chain_bio(bio, blocks);
561 goto add_block_to_new_bio;
562 }
563 }
564
565 if (bio) {
566 blocks_submitted = block;
567 submit_bio(bio);
568 }
569
570 bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
571 bio->bi_opf = REQ_OP_READ;
572add_block_to_new_bio:
573 sz = bio_add_page(bio, page, bsize, off);
574 BUG_ON(sz != bsize);
575block_added:
576 off += bsize;
577 if (off == PAGE_SIZE)
578 page = NULL;
579 if (blocks_submitted <= blocks_read + max_blocks) {
580 /* Keep at least one bio in flight */
581 continue;
582 }
583
584 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
585 blocks_read += PAGE_SIZE >> bsize_shift;
586 if (done)
587 goto out; /* found */
588 }
589 }
590
591out:
592 if (bio)
593 submit_bio(bio);
594 while (blocks_read < block) {
595 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
596 blocks_read += PAGE_SIZE >> bsize_shift;
597 }
598
599 if (!ret)
600 ret = filemap_check_wb_err(mapping, since);
601
602 if (!keep_cache)
603 truncate_inode_pages(mapping, 0);
604
605 return ret;
606}
607
608static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
609 u32 ld_length, u32 ld_data1)
610{
611 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
612 struct gfs2_log_descriptor *ld = page_address(page);
613 clear_page(ld);
614 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
615 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
616 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
617 ld->ld_type = cpu_to_be32(ld_type);
618 ld->ld_length = cpu_to_be32(ld_length);
619 ld->ld_data1 = cpu_to_be32(ld_data1);
620 ld->ld_data2 = 0;
621 return page;
622}
623
624static void gfs2_check_magic(struct buffer_head *bh)
625{
626 void *kaddr;
627 __be32 *ptr;
628
629 clear_buffer_escaped(bh);
630 kaddr = kmap_atomic(bh->b_page);
631 ptr = kaddr + bh_offset(bh);
632 if (*ptr == cpu_to_be32(GFS2_MAGIC))
633 set_buffer_escaped(bh);
634 kunmap_atomic(kaddr);
635}
636
637static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
638{
639 struct gfs2_bufdata *bda, *bdb;
640
641 bda = list_entry(a, struct gfs2_bufdata, bd_list);
642 bdb = list_entry(b, struct gfs2_bufdata, bd_list);
643
644 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
645 return -1;
646 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
647 return 1;
648 return 0;
649}
650
651static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
652 unsigned int total, struct list_head *blist,
653 bool is_databuf)
654{
655 struct gfs2_log_descriptor *ld;
656 struct gfs2_bufdata *bd1 = NULL, *bd2;
657 struct page *page;
658 unsigned int num;
659 unsigned n;
660 __be64 *ptr;
661
662 gfs2_log_lock(sdp);
663 list_sort(NULL, blist, blocknr_cmp);
664 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
665 while(total) {
666 num = total;
667 if (total > limit)
668 num = limit;
669 gfs2_log_unlock(sdp);
670 page = gfs2_get_log_desc(sdp,
671 is_databuf ? GFS2_LOG_DESC_JDATA :
672 GFS2_LOG_DESC_METADATA, num + 1, num);
673 ld = page_address(page);
674 gfs2_log_lock(sdp);
675 ptr = (__be64 *)(ld + 1);
676
677 n = 0;
678 list_for_each_entry_continue(bd1, blist, bd_list) {
679 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
680 if (is_databuf) {
681 gfs2_check_magic(bd1->bd_bh);
682 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
683 }
684 if (++n >= num)
685 break;
686 }
687
688 gfs2_log_unlock(sdp);
689 gfs2_log_write_page(sdp, page);
690 gfs2_log_lock(sdp);
691
692 n = 0;
693 list_for_each_entry_continue(bd2, blist, bd_list) {
694 get_bh(bd2->bd_bh);
695 gfs2_log_unlock(sdp);
696 lock_buffer(bd2->bd_bh);
697
698 if (buffer_escaped(bd2->bd_bh)) {
699 void *kaddr;
700 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
701 ptr = page_address(page);
702 kaddr = kmap_atomic(bd2->bd_bh->b_page);
703 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
704 bd2->bd_bh->b_size);
705 kunmap_atomic(kaddr);
706 *(__be32 *)ptr = 0;
707 clear_buffer_escaped(bd2->bd_bh);
708 unlock_buffer(bd2->bd_bh);
709 brelse(bd2->bd_bh);
710 gfs2_log_write_page(sdp, page);
711 } else {
712 gfs2_log_write_bh(sdp, bd2->bd_bh);
713 }
714 gfs2_log_lock(sdp);
715 if (++n >= num)
716 break;
717 }
718
719 BUG_ON(total < num);
720 total -= num;
721 }
722 gfs2_log_unlock(sdp);
723}
724
725static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
726{
727 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
728 unsigned int nbuf;
729 if (tr == NULL)
730 return;
731 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
732 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
733}
734
735static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
736{
737 struct list_head *head;
738 struct gfs2_bufdata *bd;
739
740 if (tr == NULL)
741 return;
742
743 head = &tr->tr_buf;
744 while (!list_empty(head)) {
745 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
746 list_del_init(&bd->bd_list);
747 gfs2_unpin(sdp, bd->bd_bh, tr);
748 }
749}
750
751static void buf_lo_before_scan(struct gfs2_jdesc *jd,
752 struct gfs2_log_header_host *head, int pass)
753{
754 if (pass != 0)
755 return;
756
757 jd->jd_found_blocks = 0;
758 jd->jd_replayed_blocks = 0;
759}
760
761static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
762 struct gfs2_log_descriptor *ld, __be64 *ptr,
763 int pass)
764{
765 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
766 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
767 struct gfs2_glock *gl = ip->i_gl;
768 unsigned int blks = be32_to_cpu(ld->ld_data1);
769 struct buffer_head *bh_log, *bh_ip;
770 u64 blkno;
771 int error = 0;
772
773 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
774 return 0;
775
776 gfs2_replay_incr_blk(jd, &start);
777
778 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
779 blkno = be64_to_cpu(*ptr++);
780
781 jd->jd_found_blocks++;
782
783 if (gfs2_revoke_check(jd, blkno, start))
784 continue;
785
786 error = gfs2_replay_read_block(jd, start, &bh_log);
787 if (error)
788 return error;
789
790 bh_ip = gfs2_meta_new(gl, blkno);
791 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
792
793 if (gfs2_meta_check(sdp, bh_ip))
794 error = -EIO;
795 else {
796 struct gfs2_meta_header *mh =
797 (struct gfs2_meta_header *)bh_ip->b_data;
798
799 if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG)) {
800 struct gfs2_rgrpd *rgd;
801
802 rgd = gfs2_blk2rgrpd(sdp, blkno, false);
803 if (rgd && rgd->rd_addr == blkno &&
804 rgd->rd_bits && rgd->rd_bits->bi_bh) {
805 fs_info(sdp, "Replaying 0x%llx but we "
806 "already have a bh!\n",
807 (unsigned long long)blkno);
808 fs_info(sdp, "busy:%d, pinned:%d\n",
809 buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
810 buffer_pinned(rgd->rd_bits->bi_bh));
811 gfs2_dump_glock(NULL, rgd->rd_gl, true);
812 }
813 }
814 mark_buffer_dirty(bh_ip);
815 }
816 brelse(bh_log);
817 brelse(bh_ip);
818
819 if (error)
820 break;
821
822 jd->jd_replayed_blocks++;
823 }
824
825 return error;
826}
827
828static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
829{
830 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
831 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
832
833 if (error) {
834 gfs2_inode_metasync(ip->i_gl);
835 return;
836 }
837 if (pass != 1)
838 return;
839
840 gfs2_inode_metasync(ip->i_gl);
841
842 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
843 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
844}
845
846static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
847{
848 struct gfs2_meta_header *mh;
849 unsigned int offset;
850 struct list_head *head = &sdp->sd_log_revokes;
851 struct gfs2_bufdata *bd;
852 struct page *page;
853 unsigned int length;
854
855 gfs2_flush_revokes(sdp);
856 if (!sdp->sd_log_num_revoke)
857 return;
858
859 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke);
860 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
861 offset = sizeof(struct gfs2_log_descriptor);
862
863 list_for_each_entry(bd, head, bd_list) {
864 sdp->sd_log_num_revoke--;
865
866 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
867 gfs2_log_write_page(sdp, page);
868 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
869 mh = page_address(page);
870 clear_page(mh);
871 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
872 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
873 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
874 offset = sizeof(struct gfs2_meta_header);
875 }
876
877 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
878 offset += sizeof(u64);
879 }
880 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
881
882 gfs2_log_write_page(sdp, page);
883}
884
885static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
886{
887 struct list_head *head = &sdp->sd_log_revokes;
888 struct gfs2_bufdata *bd;
889 struct gfs2_glock *gl;
890
891 while (!list_empty(head)) {
892 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
893 list_del_init(&bd->bd_list);
894 gl = bd->bd_gl;
895 gfs2_glock_remove_revoke(gl);
896 kmem_cache_free(gfs2_bufdata_cachep, bd);
897 }
898}
899
900static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
901 struct gfs2_log_header_host *head, int pass)
902{
903 if (pass != 0)
904 return;
905
906 jd->jd_found_revokes = 0;
907 jd->jd_replay_tail = head->lh_tail;
908}
909
910static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
911 struct gfs2_log_descriptor *ld, __be64 *ptr,
912 int pass)
913{
914 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
915 unsigned int blks = be32_to_cpu(ld->ld_length);
916 unsigned int revokes = be32_to_cpu(ld->ld_data1);
917 struct buffer_head *bh;
918 unsigned int offset;
919 u64 blkno;
920 int first = 1;
921 int error;
922
923 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
924 return 0;
925
926 offset = sizeof(struct gfs2_log_descriptor);
927
928 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
929 error = gfs2_replay_read_block(jd, start, &bh);
930 if (error)
931 return error;
932
933 if (!first)
934 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
935
936 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
937 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
938
939 error = gfs2_revoke_add(jd, blkno, start);
940 if (error < 0) {
941 brelse(bh);
942 return error;
943 }
944 else if (error)
945 jd->jd_found_revokes++;
946
947 if (!--revokes)
948 break;
949 offset += sizeof(u64);
950 }
951
952 brelse(bh);
953 offset = sizeof(struct gfs2_meta_header);
954 first = 0;
955 }
956
957 return 0;
958}
959
960static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
961{
962 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
963
964 if (error) {
965 gfs2_revoke_clean(jd);
966 return;
967 }
968 if (pass != 1)
969 return;
970
971 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
972 jd->jd_jid, jd->jd_found_revokes);
973
974 gfs2_revoke_clean(jd);
975}
976
977/**
978 * databuf_lo_before_commit - Scan the data buffers, writing as we go
979 *
980 */
981
982static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
983{
984 unsigned int limit = databuf_limit(sdp);
985 unsigned int nbuf;
986 if (tr == NULL)
987 return;
988 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
989 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
990}
991
992static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
993 struct gfs2_log_descriptor *ld,
994 __be64 *ptr, int pass)
995{
996 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
997 struct gfs2_glock *gl = ip->i_gl;
998 unsigned int blks = be32_to_cpu(ld->ld_data1);
999 struct buffer_head *bh_log, *bh_ip;
1000 u64 blkno;
1001 u64 esc;
1002 int error = 0;
1003
1004 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
1005 return 0;
1006
1007 gfs2_replay_incr_blk(jd, &start);
1008 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
1009 blkno = be64_to_cpu(*ptr++);
1010 esc = be64_to_cpu(*ptr++);
1011
1012 jd->jd_found_blocks++;
1013
1014 if (gfs2_revoke_check(jd, blkno, start))
1015 continue;
1016
1017 error = gfs2_replay_read_block(jd, start, &bh_log);
1018 if (error)
1019 return error;
1020
1021 bh_ip = gfs2_meta_new(gl, blkno);
1022 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
1023
1024 /* Unescape */
1025 if (esc) {
1026 __be32 *eptr = (__be32 *)bh_ip->b_data;
1027 *eptr = cpu_to_be32(GFS2_MAGIC);
1028 }
1029 mark_buffer_dirty(bh_ip);
1030
1031 brelse(bh_log);
1032 brelse(bh_ip);
1033
1034 jd->jd_replayed_blocks++;
1035 }
1036
1037 return error;
1038}
1039
1040/* FIXME: sort out accounting for log blocks etc. */
1041
1042static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
1043{
1044 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1045 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
1046
1047 if (error) {
1048 gfs2_inode_metasync(ip->i_gl);
1049 return;
1050 }
1051 if (pass != 1)
1052 return;
1053
1054 /* data sync? */
1055 gfs2_inode_metasync(ip->i_gl);
1056
1057 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
1058 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
1059}
1060
1061static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1062{
1063 struct list_head *head;
1064 struct gfs2_bufdata *bd;
1065
1066 if (tr == NULL)
1067 return;
1068
1069 head = &tr->tr_databuf;
1070 while (!list_empty(head)) {
1071 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1072 list_del_init(&bd->bd_list);
1073 gfs2_unpin(sdp, bd->bd_bh, tr);
1074 }
1075}
1076
1077
1078static const struct gfs2_log_operations gfs2_buf_lops = {
1079 .lo_before_commit = buf_lo_before_commit,
1080 .lo_after_commit = buf_lo_after_commit,
1081 .lo_before_scan = buf_lo_before_scan,
1082 .lo_scan_elements = buf_lo_scan_elements,
1083 .lo_after_scan = buf_lo_after_scan,
1084 .lo_name = "buf",
1085};
1086
1087static const struct gfs2_log_operations gfs2_revoke_lops = {
1088 .lo_before_commit = revoke_lo_before_commit,
1089 .lo_after_commit = revoke_lo_after_commit,
1090 .lo_before_scan = revoke_lo_before_scan,
1091 .lo_scan_elements = revoke_lo_scan_elements,
1092 .lo_after_scan = revoke_lo_after_scan,
1093 .lo_name = "revoke",
1094};
1095
1096static const struct gfs2_log_operations gfs2_databuf_lops = {
1097 .lo_before_commit = databuf_lo_before_commit,
1098 .lo_after_commit = databuf_lo_after_commit,
1099 .lo_scan_elements = databuf_lo_scan_elements,
1100 .lo_after_scan = databuf_lo_after_scan,
1101 .lo_name = "databuf",
1102};
1103
1104const struct gfs2_log_operations *gfs2_log_ops[] = {
1105 &gfs2_databuf_lops,
1106 &gfs2_buf_lops,
1107 &gfs2_revoke_lops,
1108 NULL,
1109};
1110