Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
13#include <linux/pagevec.h>
14#include <linux/mpage.h>
15#include <linux/fs.h>
16#include <linux/writeback.h>
17#include <linux/swap.h>
18#include <linux/gfs2_ondisk.h>
19#include <linux/backing-dev.h>
20#include <linux/uio.h>
21#include <trace/events/writeback.h>
22#include <linux/sched/signal.h>
23
24#include "gfs2.h"
25#include "incore.h"
26#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
29#include "log.h"
30#include "meta_io.h"
31#include "quota.h"
32#include "trans.h"
33#include "rgrp.h"
34#include "super.h"
35#include "util.h"
36#include "glops.h"
37#include "aops.h"
38
39
40/**
41 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
42 * @inode: The inode
43 * @lblock: The block number to look up
44 * @bh_result: The buffer head to return the result in
45 * @create: Non-zero if we may add block to the file
46 *
47 * Returns: errno
48 */
49
50static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
51 struct buffer_head *bh_result, int create)
52{
53 int error;
54
55 error = gfs2_block_map(inode, lblock, bh_result, 0);
56 if (error)
57 return error;
58 if (!buffer_mapped(bh_result))
59 return -ENODATA;
60 return 0;
61}
62
63/**
64 * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio
65 * @folio: The folio to write
66 * @wbc: The writeback control
67 *
68 * This is the same as calling block_write_full_folio, but it also
69 * writes pages outside of i_size
70 */
71static int gfs2_write_jdata_folio(struct folio *folio,
72 struct writeback_control *wbc)
73{
74 struct inode * const inode = folio->mapping->host;
75 loff_t i_size = i_size_read(inode);
76
77 /*
78 * The folio straddles i_size. It must be zeroed out on each and every
79 * writepage invocation because it may be mmapped. "A file is mapped
80 * in multiples of the page size. For a file that is not a multiple of
81 * the page size, the remaining memory is zeroed when mapped, and
82 * writes to that region are not written out to the file."
83 */
84 if (folio_pos(folio) < i_size && i_size < folio_next_pos(folio))
85 folio_zero_segment(folio, offset_in_folio(folio, i_size),
86 folio_size(folio));
87
88 return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
89 wbc);
90}
91
92/**
93 * __gfs2_jdata_write_folio - The core of jdata writepage
94 * @folio: The folio to write
95 * @wbc: The writeback control
96 *
97 * Implements the core of write back. If a transaction is required then
98 * the checked flag will have been set and the transaction will have
99 * already been started before this is called.
100 */
101static int __gfs2_jdata_write_folio(struct folio *folio,
102 struct writeback_control *wbc)
103{
104 struct inode *inode = folio->mapping->host;
105 struct gfs2_inode *ip = GFS2_I(inode);
106
107 if (folio_test_checked(folio)) {
108 folio_clear_checked(folio);
109 if (!folio_buffers(folio)) {
110 create_empty_buffers(folio,
111 inode->i_sb->s_blocksize,
112 BIT(BH_Dirty)|BIT(BH_Uptodate));
113 }
114 gfs2_trans_add_databufs(ip->i_gl, folio, 0, folio_size(folio));
115 }
116 return gfs2_write_jdata_folio(folio, wbc);
117}
118
119/**
120 * gfs2_jdata_writeback - Write jdata folios to the log
121 * @mapping: The mapping to write
122 * @wbc: The writeback control
123 *
124 * Returns: errno
125 */
126int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc)
127{
128 struct inode *inode = mapping->host;
129 struct gfs2_inode *ip = GFS2_I(inode);
130 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
131 struct folio *folio = NULL;
132 int error;
133
134 BUG_ON(current->journal_info);
135 if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
136 return 0;
137
138 while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
139 if (folio_test_checked(folio)) {
140 folio_redirty_for_writepage(wbc, folio);
141 folio_unlock(folio);
142 continue;
143 }
144 error = __gfs2_jdata_write_folio(folio, wbc);
145 }
146
147 return error;
148}
149
150/**
151 * gfs2_writepages - Write a bunch of dirty pages back to disk
152 * @mapping: The mapping to write
153 * @wbc: Write-back control
154 *
155 * Used for both ordered and writeback modes.
156 */
157static int gfs2_writepages(struct address_space *mapping,
158 struct writeback_control *wbc)
159{
160 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
161 struct iomap_writepage_ctx wpc = {
162 .inode = mapping->host,
163 .wbc = wbc,
164 .ops = &gfs2_writeback_ops,
165 };
166 int ret;
167
168 /*
169 * Even if we didn't write enough pages here, we might still be holding
170 * dirty pages in the ail. We forcibly flush the ail because we don't
171 * want balance_dirty_pages() to loop indefinitely trying to write out
172 * pages held in the ail that it can't find.
173 */
174 ret = iomap_writepages(&wpc);
175 if (ret == 0 && wbc->nr_to_write > 0)
176 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
177 return ret;
178}
179
180/**
181 * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
182 * @mapping: The mapping
183 * @wbc: The writeback control
184 * @fbatch: The batch of folios
185 * @done_index: Page index
186 *
187 * Returns: non-zero if loop should terminate, zero otherwise
188 */
189
190static int gfs2_write_jdata_batch(struct address_space *mapping,
191 struct writeback_control *wbc,
192 struct folio_batch *fbatch,
193 pgoff_t *done_index)
194{
195 struct inode *inode = mapping->host;
196 struct gfs2_sbd *sdp = GFS2_SB(inode);
197 unsigned nrblocks;
198 int i;
199 int ret;
200 size_t size = 0;
201 int nr_folios = folio_batch_count(fbatch);
202
203 for (i = 0; i < nr_folios; i++)
204 size += folio_size(fbatch->folios[i]);
205 nrblocks = size >> inode->i_blkbits;
206
207 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
208 if (ret < 0)
209 return ret;
210
211 for (i = 0; i < nr_folios; i++) {
212 struct folio *folio = fbatch->folios[i];
213
214 *done_index = folio->index;
215
216 folio_lock(folio);
217
218 if (unlikely(folio->mapping != mapping)) {
219continue_unlock:
220 folio_unlock(folio);
221 continue;
222 }
223
224 if (!folio_test_dirty(folio)) {
225 /* someone wrote it for us */
226 goto continue_unlock;
227 }
228
229 if (folio_test_writeback(folio)) {
230 if (wbc->sync_mode != WB_SYNC_NONE)
231 folio_wait_writeback(folio);
232 else
233 goto continue_unlock;
234 }
235
236 BUG_ON(folio_test_writeback(folio));
237 if (!folio_clear_dirty_for_io(folio))
238 goto continue_unlock;
239
240 trace_wbc_writepage(wbc, inode_to_bdi(inode));
241
242 ret = __gfs2_jdata_write_folio(folio, wbc);
243 if (unlikely(ret)) {
244 /*
245 * done_index is set past this page, so media errors
246 * will not choke background writeout for the entire
247 * file. This has consequences for range_cyclic
248 * semantics (ie. it may not be suitable for data
249 * integrity writeout).
250 */
251 *done_index = folio_next_index(folio);
252 ret = 1;
253 break;
254 }
255
256 /*
257 * We stop writing back only if we are not doing
258 * integrity sync. In case of integrity sync we have to
259 * keep going until we have written all the pages
260 * we tagged for writeback prior to entering this loop.
261 */
262 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
263 ret = 1;
264 break;
265 }
266
267 }
268 gfs2_trans_end(sdp);
269 return ret;
270}
271
272/**
273 * gfs2_write_cache_jdata - Like write_cache_pages but different
274 * @mapping: The mapping to write
275 * @wbc: The writeback control
276 *
277 * The reason that we use our own function here is that we need to
278 * start transactions before we grab page locks. This allows us
279 * to get the ordering right.
280 */
281
282static int gfs2_write_cache_jdata(struct address_space *mapping,
283 struct writeback_control *wbc)
284{
285 int ret = 0;
286 int done = 0;
287 struct folio_batch fbatch;
288 int nr_folios;
289 pgoff_t writeback_index;
290 pgoff_t index;
291 pgoff_t end;
292 pgoff_t done_index;
293 int cycled;
294 int range_whole = 0;
295 xa_mark_t tag;
296
297 folio_batch_init(&fbatch);
298 if (wbc->range_cyclic) {
299 writeback_index = mapping->writeback_index; /* prev offset */
300 index = writeback_index;
301 if (index == 0)
302 cycled = 1;
303 else
304 cycled = 0;
305 end = -1;
306 } else {
307 index = wbc->range_start >> PAGE_SHIFT;
308 end = wbc->range_end >> PAGE_SHIFT;
309 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
310 range_whole = 1;
311 cycled = 1; /* ignore range_cyclic tests */
312 }
313 tag = wbc_to_tag(wbc);
314
315retry:
316 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
317 tag_pages_for_writeback(mapping, index, end);
318 done_index = index;
319 while (!done && (index <= end)) {
320 nr_folios = filemap_get_folios_tag(mapping, &index, end,
321 tag, &fbatch);
322 if (nr_folios == 0)
323 break;
324
325 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
326 &done_index);
327 if (ret)
328 done = 1;
329 if (ret > 0)
330 ret = 0;
331 folio_batch_release(&fbatch);
332 cond_resched();
333 }
334
335 if (!cycled && !done) {
336 /*
337 * range_cyclic:
338 * We hit the last page and there is more work to be done: wrap
339 * back to the start of the file
340 */
341 cycled = 1;
342 index = 0;
343 end = writeback_index - 1;
344 goto retry;
345 }
346
347 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
348 mapping->writeback_index = done_index;
349
350 return ret;
351}
352
353
354/**
355 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
356 * @mapping: The mapping to write
357 * @wbc: The writeback control
358 *
359 */
360
361static int gfs2_jdata_writepages(struct address_space *mapping,
362 struct writeback_control *wbc)
363{
364 struct gfs2_inode *ip = GFS2_I(mapping->host);
365 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
366 int ret;
367
368 ret = gfs2_write_cache_jdata(mapping, wbc);
369 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
370 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
371 GFS2_LFC_JDATA_WPAGES);
372 ret = gfs2_write_cache_jdata(mapping, wbc);
373 }
374 return ret;
375}
376
377/**
378 * stuffed_read_folio - Fill in a Linux folio with stuffed file data
379 * @ip: the inode
380 * @folio: the folio
381 *
382 * Returns: errno
383 */
384static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio)
385{
386 struct buffer_head *dibh = NULL;
387 size_t dsize = i_size_read(&ip->i_inode);
388 void *from = NULL;
389 int error = 0;
390
391 /*
392 * Due to the order of unstuffing files and ->fault(), we can be
393 * asked for a zero folio in the case of a stuffed file being extended,
394 * so we need to supply one here. It doesn't happen often.
395 */
396 if (unlikely(folio->index)) {
397 dsize = 0;
398 } else {
399 error = gfs2_meta_inode_buffer(ip, &dibh);
400 if (error)
401 goto out;
402 from = dibh->b_data + sizeof(struct gfs2_dinode);
403 }
404
405 folio_fill_tail(folio, 0, from, dsize);
406 brelse(dibh);
407out:
408 folio_end_read(folio, error == 0);
409
410 return error;
411}
412
413/**
414 * gfs2_read_folio - read a folio from a file
415 * @file: The file to read
416 * @folio: The folio in the file
417 */
418static int gfs2_read_folio(struct file *file, struct folio *folio)
419{
420 struct inode *inode = folio->mapping->host;
421 struct gfs2_inode *ip = GFS2_I(inode);
422 struct gfs2_sbd *sdp = GFS2_SB(inode);
423 int error = 0;
424
425 if (!gfs2_is_jdata(ip) ||
426 (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
427 iomap_bio_read_folio(folio, &gfs2_iomap_ops);
428 } else if (gfs2_is_stuffed(ip)) {
429 error = stuffed_read_folio(ip, folio);
430 } else {
431 error = mpage_read_folio(folio, gfs2_block_map);
432 }
433
434 if (gfs2_withdrawn(sdp))
435 return -EIO;
436
437 return error;
438}
439
440/**
441 * gfs2_internal_read - read an internal file
442 * @ip: The gfs2 inode
443 * @buf: The buffer to fill
444 * @pos: The file position
445 * @size: The amount to read
446 *
447 */
448
449ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
450 size_t size)
451{
452 struct address_space *mapping = ip->i_inode.i_mapping;
453 unsigned long index = *pos >> PAGE_SHIFT;
454 size_t copied = 0;
455
456 do {
457 size_t offset, chunk;
458 struct folio *folio;
459
460 folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
461 if (IS_ERR(folio)) {
462 if (PTR_ERR(folio) == -EINTR)
463 continue;
464 return PTR_ERR(folio);
465 }
466 offset = *pos + copied - folio_pos(folio);
467 chunk = min(size - copied, folio_size(folio) - offset);
468 memcpy_from_folio(buf + copied, folio, offset, chunk);
469 index = folio_next_index(folio);
470 folio_put(folio);
471 copied += chunk;
472 } while(copied < size);
473 (*pos) += size;
474 return size;
475}
476
477/**
478 * gfs2_readahead - Read a bunch of pages at once
479 * @rac: Read-ahead control structure
480 *
481 * Some notes:
482 * 1. This is only for readahead, so we can simply ignore any things
483 * which are slightly inconvenient (such as locking conflicts between
484 * the page lock and the glock) and return having done no I/O. Its
485 * obviously not something we'd want to do on too regular a basis.
486 * Any I/O we ignore at this time will be done via readpage later.
487 * 2. We don't handle stuffed files here we let readpage do the honours.
488 * 3. mpage_readahead() does most of the heavy lifting in the common case.
489 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
490 */
491
492static void gfs2_readahead(struct readahead_control *rac)
493{
494 struct inode *inode = rac->mapping->host;
495 struct gfs2_inode *ip = GFS2_I(inode);
496
497 if (gfs2_is_stuffed(ip))
498 ;
499 else if (gfs2_is_jdata(ip))
500 mpage_readahead(rac, gfs2_block_map);
501 else
502 iomap_bio_readahead(rac, &gfs2_iomap_ops);
503}
504
505/**
506 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
507 * @inode: the rindex inode
508 */
509void adjust_fs_space(struct inode *inode)
510{
511 struct gfs2_sbd *sdp = GFS2_SB(inode);
512 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
513 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
514 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
515 struct buffer_head *m_bh;
516 u64 fs_total, new_free;
517
518 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
519 return;
520
521 /* Total up the file system space, according to the latest rindex. */
522 fs_total = gfs2_ri_total(sdp);
523 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
524 goto out;
525
526 spin_lock(&sdp->sd_statfs_spin);
527 gfs2_statfs_change_in(m_sc, m_bh->b_data +
528 sizeof(struct gfs2_dinode));
529 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
530 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
531 else
532 new_free = 0;
533 spin_unlock(&sdp->sd_statfs_spin);
534 fs_warn(sdp, "File system extended by %llu blocks.\n",
535 (unsigned long long)new_free);
536 gfs2_statfs_change(sdp, new_free, new_free, 0);
537
538 update_statfs(sdp, m_bh);
539 brelse(m_bh);
540out:
541 sdp->sd_rindex_uptodate = 0;
542 gfs2_trans_end(sdp);
543}
544
545static bool gfs2_jdata_dirty_folio(struct address_space *mapping,
546 struct folio *folio)
547{
548 if (current->journal_info)
549 folio_set_checked(folio);
550 return block_dirty_folio(mapping, folio);
551}
552
553/**
554 * gfs2_bmap - Block map function
555 * @mapping: Address space info
556 * @lblock: The block to map
557 *
558 * Returns: The disk address for the block or 0 on hole or error
559 */
560
561static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
562{
563 struct gfs2_inode *ip = GFS2_I(mapping->host);
564 struct gfs2_holder i_gh;
565 sector_t dblock = 0;
566 int error;
567
568 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
569 if (error)
570 return 0;
571
572 if (!gfs2_is_stuffed(ip))
573 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
574
575 gfs2_glock_dq_uninit(&i_gh);
576
577 return dblock;
578}
579
580static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
581{
582 struct gfs2_bufdata *bd;
583
584 lock_buffer(bh);
585 gfs2_log_lock(sdp);
586 clear_buffer_dirty(bh);
587 bd = bh->b_private;
588 if (bd) {
589 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
590 list_del_init(&bd->bd_list);
591 else {
592 spin_lock(&sdp->sd_ail_lock);
593 gfs2_remove_from_journal(bh, REMOVE_JDATA);
594 spin_unlock(&sdp->sd_ail_lock);
595 }
596 }
597 bh->b_bdev = NULL;
598 clear_buffer_mapped(bh);
599 clear_buffer_req(bh);
600 clear_buffer_new(bh);
601 gfs2_log_unlock(sdp);
602 unlock_buffer(bh);
603}
604
605static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
606 size_t length)
607{
608 struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
609 size_t stop = offset + length;
610 int partial_page = (offset || length < folio_size(folio));
611 struct buffer_head *bh, *head;
612 unsigned long pos = 0;
613
614 BUG_ON(!folio_test_locked(folio));
615 if (!partial_page)
616 folio_clear_checked(folio);
617 head = folio_buffers(folio);
618 if (!head)
619 goto out;
620
621 bh = head;
622 do {
623 if (pos + bh->b_size > stop)
624 return;
625
626 if (offset <= pos)
627 gfs2_discard(sdp, bh);
628 pos += bh->b_size;
629 bh = bh->b_this_page;
630 } while (bh != head);
631out:
632 if (!partial_page)
633 filemap_release_folio(folio, 0);
634}
635
636/**
637 * gfs2_release_folio - free the metadata associated with a folio
638 * @folio: the folio that's being released
639 * @gfp_mask: passed from Linux VFS, ignored by us
640 *
641 * Calls try_to_free_buffers() to free the buffers and put the folio if the
642 * buffers can be released.
643 *
644 * Returns: true if the folio was put or else false
645 */
646
647bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
648{
649 struct address_space *mapping = folio->mapping;
650 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
651 struct buffer_head *bh, *head;
652 struct gfs2_bufdata *bd;
653
654 head = folio_buffers(folio);
655 if (!head)
656 return false;
657
658 /*
659 * mm accommodates an old ext3 case where clean folios might
660 * not have had the dirty bit cleared. Thus, it can send actual
661 * dirty folios to ->release_folio() via shrink_active_list().
662 *
663 * As a workaround, we skip folios that contain dirty buffers
664 * below. Once ->release_folio isn't called on dirty folios
665 * anymore, we can warn on dirty buffers like we used to here
666 * again.
667 */
668
669 gfs2_log_lock(sdp);
670 bh = head;
671 do {
672 if (atomic_read(&bh->b_count))
673 goto cannot_release;
674 bd = bh->b_private;
675 if (bd && bd->bd_tr)
676 goto cannot_release;
677 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
678 goto cannot_release;
679 bh = bh->b_this_page;
680 } while (bh != head);
681
682 bh = head;
683 do {
684 bd = bh->b_private;
685 if (bd) {
686 gfs2_assert_warn(sdp, bd->bd_bh == bh);
687 bd->bd_bh = NULL;
688 bh->b_private = NULL;
689 /*
690 * The bd may still be queued as a revoke, in which
691 * case we must not dequeue nor free it.
692 */
693 if (!bd->bd_blkno && !list_empty(&bd->bd_list))
694 list_del_init(&bd->bd_list);
695 if (list_empty(&bd->bd_list))
696 kmem_cache_free(gfs2_bufdata_cachep, bd);
697 }
698
699 bh = bh->b_this_page;
700 } while (bh != head);
701 gfs2_log_unlock(sdp);
702
703 return try_to_free_buffers(folio);
704
705cannot_release:
706 gfs2_log_unlock(sdp);
707 return false;
708}
709
710static const struct address_space_operations gfs2_aops = {
711 .writepages = gfs2_writepages,
712 .read_folio = gfs2_read_folio,
713 .readahead = gfs2_readahead,
714 .dirty_folio = iomap_dirty_folio,
715 .release_folio = iomap_release_folio,
716 .invalidate_folio = iomap_invalidate_folio,
717 .bmap = gfs2_bmap,
718 .migrate_folio = filemap_migrate_folio,
719 .is_partially_uptodate = iomap_is_partially_uptodate,
720 .error_remove_folio = generic_error_remove_folio,
721};
722
723static const struct address_space_operations gfs2_jdata_aops = {
724 .writepages = gfs2_jdata_writepages,
725 .read_folio = gfs2_read_folio,
726 .readahead = gfs2_readahead,
727 .dirty_folio = gfs2_jdata_dirty_folio,
728 .bmap = gfs2_bmap,
729 .migrate_folio = buffer_migrate_folio,
730 .invalidate_folio = gfs2_invalidate_folio,
731 .release_folio = gfs2_release_folio,
732 .is_partially_uptodate = block_is_partially_uptodate,
733 .error_remove_folio = generic_error_remove_folio,
734};
735
736void gfs2_set_aops(struct inode *inode)
737{
738 if (gfs2_is_jdata(GFS2_I(inode)))
739 inode->i_mapping->a_ops = &gfs2_jdata_aops;
740 else
741 inode->i_mapping->a_ops = &gfs2_aops;
742}