Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7#include <linux/spinlock.h>
8#include <linux/completion.h>
9#include <linux/buffer_head.h>
10#include <linux/gfs2_ondisk.h>
11#include <linux/bio.h>
12#include <linux/posix_acl.h>
13#include <linux/security.h>
14#include <linux/log2.h>
15
16#include "gfs2.h"
17#include "incore.h"
18#include "bmap.h"
19#include "glock.h"
20#include "glops.h"
21#include "inode.h"
22#include "log.h"
23#include "meta_io.h"
24#include "recovery.h"
25#include "rgrp.h"
26#include "util.h"
27#include "trans.h"
28#include "dir.h"
29#include "lops.h"
30
31struct workqueue_struct *gfs2_freeze_wq;
32
33static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
34{
35 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
36
37 fs_err(sdp,
38 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
39 "state 0x%lx\n",
40 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
41 bh->b_folio->mapping, bh->b_folio->flags.f);
42 fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
43 gl->gl_name.ln_type, gl->gl_name.ln_number,
44 gfs2_glock2aspace(gl));
45 gfs2_lm(sdp, "AIL error\n");
46 gfs2_withdraw(sdp);
47}
48
49/**
50 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
51 * @gl: the glock
52 * @fsync: set when called from fsync (not all buffers will be clean)
53 * @nr_revokes: Number of buffers to revoke
54 *
55 * None of the buffers should be dirty, locked, or pinned.
56 */
57
58static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
59 unsigned int nr_revokes)
60{
61 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
62 struct list_head *head = &gl->gl_ail_list;
63 struct gfs2_bufdata *bd, *tmp;
64 struct buffer_head *bh;
65 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
66
67 gfs2_log_lock(sdp);
68 spin_lock(&sdp->sd_ail_lock);
69 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
70 if (nr_revokes == 0)
71 break;
72 bh = bd->bd_bh;
73 if (bh->b_state & b_state) {
74 if (fsync)
75 continue;
76 gfs2_ail_error(gl, bh);
77 }
78 gfs2_trans_add_revoke(sdp, bd);
79 nr_revokes--;
80 }
81 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
82 spin_unlock(&sdp->sd_ail_lock);
83 gfs2_log_unlock(sdp);
84}
85
86
87static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
88{
89 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
90 struct gfs2_trans tr;
91 unsigned int revokes;
92 int ret = 0;
93
94 revokes = atomic_read(&gl->gl_ail_count);
95
96 if (!revokes) {
97 bool have_revokes;
98 bool log_in_flight;
99
100 /*
101 * We have nothing on the ail, but there could be revokes on
102 * the sdp revoke queue, in which case, we still want to flush
103 * the log and wait for it to finish.
104 *
105 * If the sdp revoke list is empty too, we might still have an
106 * io outstanding for writing revokes, so we should wait for
107 * it before returning.
108 *
109 * If none of these conditions are true, our revokes are all
110 * flushed and we can return.
111 */
112 gfs2_log_lock(sdp);
113 have_revokes = !list_empty(&sdp->sd_log_revokes);
114 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
115 gfs2_log_unlock(sdp);
116 if (have_revokes)
117 goto flush;
118 if (log_in_flight)
119 log_flush_wait(sdp);
120 return 0;
121 }
122
123 memset(&tr, 0, sizeof(tr));
124 set_bit(TR_ONSTACK, &tr.tr_flags);
125 ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
126 if (ret) {
127 fs_err(sdp, "Transaction error %d: Unable to write revokes.", ret);
128 goto flush;
129 }
130 __gfs2_ail_flush(gl, 0, revokes);
131 gfs2_trans_end(sdp);
132
133flush:
134 if (!ret)
135 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
136 GFS2_LFC_AIL_EMPTY_GL);
137 return ret;
138}
139
140void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
141{
142 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
143 unsigned int revokes = atomic_read(&gl->gl_ail_count);
144 int ret;
145
146 if (!revokes)
147 return;
148
149 ret = gfs2_trans_begin(sdp, 0, revokes);
150 if (ret)
151 return;
152 __gfs2_ail_flush(gl, fsync, revokes);
153 gfs2_trans_end(sdp);
154 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
155 GFS2_LFC_AIL_FLUSH);
156}
157
158/**
159 * gfs2_rgrp_metasync - sync out the metadata of a resource group
160 * @gl: the glock protecting the resource group
161 *
162 */
163
164static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
165{
166 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
167 struct address_space *metamapping = gfs2_aspace(sdp);
168 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
169 const unsigned bsize = sdp->sd_sb.sb_bsize;
170 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
171 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
172 int error;
173
174 filemap_fdatawrite_range(metamapping, start, end);
175 error = filemap_fdatawait_range(metamapping, start, end);
176 WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
177 mapping_set_error(metamapping, error);
178 if (error)
179 gfs2_io_error(sdp);
180 return error;
181}
182
183/**
184 * rgrp_go_sync - sync out the metadata for this glock
185 * @gl: the glock
186 *
187 * Called when demoting or unlocking an EX glock. We must flush
188 * to disk all dirty buffers/pages relating to this glock, and must not
189 * return to caller to demote/unlock the glock until I/O is complete.
190 */
191
192static int rgrp_go_sync(struct gfs2_glock *gl)
193{
194 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
195 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
196 int error;
197
198 if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
199 return 0;
200 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
201
202 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
203 GFS2_LFC_RGRP_GO_SYNC);
204 error = gfs2_rgrp_metasync(gl);
205 if (!error)
206 error = gfs2_ail_empty_gl(gl);
207 gfs2_free_clones(rgd);
208 return error;
209}
210
211/**
212 * rgrp_go_inval - invalidate the metadata for this glock
213 * @gl: the glock
214 * @flags:
215 *
216 * We never used LM_ST_DEFERRED with resource groups, so that we
217 * should always see the metadata flag set here.
218 *
219 */
220
221static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
222{
223 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
224 struct address_space *mapping = gfs2_aspace(sdp);
225 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
226 const unsigned bsize = sdp->sd_sb.sb_bsize;
227 loff_t start, end;
228
229 if (!rgd)
230 return;
231 start = (rgd->rd_addr * bsize) & PAGE_MASK;
232 end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
233 gfs2_rgrp_brelse(rgd);
234 WARN_ON_ONCE(!(flags & DIO_METADATA));
235 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
236 truncate_inode_pages_range(mapping, start, end);
237}
238
239static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
240 const char *fs_id_buf)
241{
242 struct gfs2_rgrpd *rgd = gl->gl_object;
243
244 if (rgd)
245 gfs2_rgrp_dump(seq, rgd, fs_id_buf);
246}
247
248static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
249{
250 struct gfs2_inode *ip;
251
252 spin_lock(&gl->gl_lockref.lock);
253 ip = gl->gl_object;
254 if (ip)
255 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
256 spin_unlock(&gl->gl_lockref.lock);
257 return ip;
258}
259
260struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
261{
262 struct gfs2_rgrpd *rgd;
263
264 spin_lock(&gl->gl_lockref.lock);
265 rgd = gl->gl_object;
266 spin_unlock(&gl->gl_lockref.lock);
267
268 return rgd;
269}
270
271static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
272{
273 if (!ip)
274 return;
275
276 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
277 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
278}
279
280/**
281 * gfs2_inode_metasync - sync out the metadata of an inode
282 * @gl: the glock protecting the inode
283 *
284 */
285int gfs2_inode_metasync(struct gfs2_glock *gl)
286{
287 struct address_space *metamapping = gfs2_glock2aspace(gl);
288 int error;
289
290 filemap_fdatawrite(metamapping);
291 error = filemap_fdatawait(metamapping);
292 if (error)
293 gfs2_io_error(gl->gl_name.ln_sbd);
294 return error;
295}
296
297/**
298 * inode_go_sync - Sync the dirty metadata of an inode
299 * @gl: the glock protecting the inode
300 *
301 */
302
303static int inode_go_sync(struct gfs2_glock *gl)
304{
305 struct gfs2_inode *ip = gfs2_glock2inode(gl);
306 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
307 struct address_space *metamapping = gfs2_glock2aspace(gl);
308 int error = 0, ret;
309
310 if (isreg) {
311 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
312 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
313 inode_dio_wait(&ip->i_inode);
314 }
315 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
316 goto out;
317
318 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
319
320 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
321 GFS2_LFC_INODE_GO_SYNC);
322 filemap_fdatawrite(metamapping);
323 if (isreg) {
324 struct address_space *mapping = ip->i_inode.i_mapping;
325 filemap_fdatawrite(mapping);
326 error = filemap_fdatawait(mapping);
327 mapping_set_error(mapping, error);
328 }
329 ret = gfs2_inode_metasync(gl);
330 if (!error)
331 error = ret;
332 ret = gfs2_ail_empty_gl(gl);
333 if (!error)
334 error = ret;
335 /*
336 * Writeback of the data mapping may cause the dirty flag to be set
337 * so we have to clear it again here.
338 */
339 smp_mb__before_atomic();
340 clear_bit(GLF_DIRTY, &gl->gl_flags);
341
342out:
343 gfs2_clear_glop_pending(ip);
344 return error;
345}
346
347/**
348 * inode_go_inval - prepare a inode glock to be released
349 * @gl: the glock
350 * @flags:
351 *
352 * Normally we invalidate everything, but if we are moving into
353 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
354 * can keep hold of the metadata, since it won't have changed.
355 *
356 */
357
358static void inode_go_inval(struct gfs2_glock *gl, int flags)
359{
360 struct gfs2_inode *ip = gfs2_glock2inode(gl);
361
362 gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
363
364 if (flags & DIO_METADATA) {
365 struct address_space *mapping = gfs2_glock2aspace(gl);
366 truncate_inode_pages(mapping, 0);
367 if (ip) {
368 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
369 forget_all_cached_acls(&ip->i_inode);
370 security_inode_invalidate_secctx(&ip->i_inode);
371 gfs2_dir_hash_inval(ip);
372 }
373 }
374
375 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
376 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
377 GFS2_LOG_HEAD_FLUSH_NORMAL |
378 GFS2_LFC_INODE_GO_INVAL);
379 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
380 }
381 if (ip && S_ISREG(ip->i_inode.i_mode))
382 truncate_inode_pages(ip->i_inode.i_mapping, 0);
383
384 gfs2_clear_glop_pending(ip);
385}
386
387static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
388{
389 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
390 const struct gfs2_dinode *str = buf;
391 struct timespec64 atime, iatime;
392 u16 height, depth;
393 umode_t mode = be32_to_cpu(str->di_mode);
394 struct inode *inode = &ip->i_inode;
395 bool is_new = inode_state_read_once(inode) & I_NEW;
396
397 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) {
398 gfs2_consist_inode(ip);
399 return -EIO;
400 }
401 if (unlikely(!is_new && inode_wrong_type(inode, mode))) {
402 gfs2_consist_inode(ip);
403 return -EIO;
404 }
405 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
406 inode->i_mode = mode;
407 if (is_new) {
408 inode->i_rdev = 0;
409 switch (mode & S_IFMT) {
410 case S_IFBLK:
411 case S_IFCHR:
412 inode->i_rdev = MKDEV(be32_to_cpu(str->di_major),
413 be32_to_cpu(str->di_minor));
414 break;
415 }
416 }
417
418 i_uid_write(inode, be32_to_cpu(str->di_uid));
419 i_gid_write(inode, be32_to_cpu(str->di_gid));
420 set_nlink(inode, be32_to_cpu(str->di_nlink));
421 i_size_write(inode, be64_to_cpu(str->di_size));
422 gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
423 atime.tv_sec = be64_to_cpu(str->di_atime);
424 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
425 iatime = inode_get_atime(inode);
426 if (timespec64_compare(&iatime, &atime) < 0)
427 inode_set_atime_to_ts(inode, atime);
428 inode_set_mtime(inode, be64_to_cpu(str->di_mtime),
429 be32_to_cpu(str->di_mtime_nsec));
430 inode_set_ctime(inode, be64_to_cpu(str->di_ctime),
431 be32_to_cpu(str->di_ctime_nsec));
432
433 ip->i_goal = be64_to_cpu(str->di_goal_meta);
434 ip->i_generation = be64_to_cpu(str->di_generation);
435
436 ip->i_diskflags = be32_to_cpu(str->di_flags);
437 ip->i_eattr = be64_to_cpu(str->di_eattr);
438 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
439 gfs2_set_inode_flags(inode);
440 height = be16_to_cpu(str->di_height);
441 if (unlikely(height > sdp->sd_max_height)) {
442 gfs2_consist_inode(ip);
443 return -EIO;
444 }
445 ip->i_height = (u8)height;
446
447 depth = be16_to_cpu(str->di_depth);
448 if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) {
449 gfs2_consist_inode(ip);
450 return -EIO;
451 }
452 if ((ip->i_diskflags & GFS2_DIF_EXHASH) &&
453 depth < ilog2(sdp->sd_hash_ptrs)) {
454 gfs2_consist_inode(ip);
455 return -EIO;
456 }
457 ip->i_depth = (u8)depth;
458 ip->i_entries = be32_to_cpu(str->di_entries);
459
460 if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) {
461 gfs2_consist_inode(ip);
462 return -EIO;
463 }
464 if (S_ISREG(inode->i_mode))
465 gfs2_set_aops(inode);
466
467 return 0;
468}
469
470/**
471 * gfs2_inode_refresh - Refresh the incore copy of the dinode
472 * @ip: The GFS2 inode
473 *
474 * Returns: errno
475 */
476
477static int gfs2_inode_refresh(struct gfs2_inode *ip)
478{
479 struct buffer_head *dibh;
480 int error;
481
482 error = gfs2_meta_inode_buffer(ip, &dibh);
483 if (error)
484 return error;
485
486 error = gfs2_dinode_in(ip, dibh->b_data);
487 brelse(dibh);
488 return error;
489}
490
491/**
492 * inode_go_instantiate - read in an inode if necessary
493 * @gl: The glock
494 *
495 * Returns: errno
496 */
497
498static int inode_go_instantiate(struct gfs2_glock *gl)
499{
500 struct gfs2_inode *ip = gl->gl_object;
501 struct gfs2_glock *io_gl;
502 int error;
503
504 if (!ip) /* no inode to populate - read it in later */
505 return 0;
506
507 error = gfs2_inode_refresh(ip);
508 if (error)
509 return error;
510 io_gl = ip->i_iopen_gh.gh_gl;
511 io_gl->gl_no_formal_ino = ip->i_no_formal_ino;
512 return 0;
513}
514
515static int inode_go_held(struct gfs2_holder *gh)
516{
517 struct gfs2_glock *gl = gh->gh_gl;
518 struct gfs2_inode *ip = gl->gl_object;
519 int error = 0;
520
521 if (!ip) /* no inode to populate - read it in later */
522 return 0;
523
524 if (gh->gh_state != LM_ST_DEFERRED)
525 inode_dio_wait(&ip->i_inode);
526
527 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
528 (gl->gl_state == LM_ST_EXCLUSIVE) &&
529 (gh->gh_state == LM_ST_EXCLUSIVE))
530 error = gfs2_truncatei_resume(ip);
531
532 return error;
533}
534
535/**
536 * inode_go_dump - print information about an inode
537 * @seq: The iterator
538 * @gl: The glock
539 * @fs_id_buf: file system id (may be empty)
540 *
541 */
542
543static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
544 const char *fs_id_buf)
545{
546 struct gfs2_inode *ip = gl->gl_object;
547 const struct inode *inode = &ip->i_inode;
548
549 if (ip == NULL)
550 return;
551
552 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
553 "p:%lu\n", fs_id_buf,
554 (unsigned long long)ip->i_no_formal_ino,
555 (unsigned long long)ip->i_no_addr,
556 IF2DT(inode->i_mode), ip->i_flags,
557 (unsigned int)ip->i_diskflags,
558 (unsigned long long)i_size_read(inode),
559 inode->i_data.nrpages);
560}
561
562/**
563 * freeze_go_callback - A cluster node is requesting a freeze
564 * @gl: the glock
565 * @remote: true if this came from a different cluster node
566 */
567
568static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
569{
570 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
571 struct super_block *sb = sdp->sd_vfs;
572
573 if (!remote ||
574 (gl->gl_state != LM_ST_SHARED &&
575 gl->gl_state != LM_ST_UNLOCKED) ||
576 gl->gl_demote_state != LM_ST_UNLOCKED)
577 return;
578
579 /*
580 * Try to get an active super block reference to prevent racing with
581 * unmount (see super_trylock_shared()). But note that unmount isn't
582 * the only place where a write lock on s_umount is taken, and we can
583 * fail here because of things like remount as well.
584 */
585 if (down_read_trylock(&sb->s_umount)) {
586 atomic_inc(&sb->s_active);
587 up_read(&sb->s_umount);
588 if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work))
589 deactivate_super(sb);
590 }
591}
592
593/**
594 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
595 * @gl: the glock
596 */
597static int freeze_go_xmote_bh(struct gfs2_glock *gl)
598{
599 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
600 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
601 struct gfs2_glock *j_gl = ip->i_gl;
602 struct gfs2_log_header_host head;
603 int error;
604
605 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
606 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
607
608 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
609 if (gfs2_assert_withdraw(sdp, !error))
610 return error;
611 if (gfs2_assert_withdraw(sdp, head.lh_flags &
612 GFS2_LOG_HEAD_UNMOUNT))
613 return -EIO;
614 gfs2_log_pointers_init(sdp, &head);
615 }
616 return 0;
617}
618
619/**
620 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
621 * @gl: the glock
622 * @remote: true if this came from a different cluster node
623 *
624 * gl_lockref.lock lock is held while calling this
625 */
626static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
627{
628 struct gfs2_inode *ip = gl->gl_object;
629 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
630
631 if (!remote || test_bit(SDF_KILL, &sdp->sd_flags))
632 return;
633
634 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
635 gl->gl_state == LM_ST_SHARED && ip) {
636 gl->gl_lockref.count++;
637 if (!gfs2_queue_try_to_evict(gl))
638 gl->gl_lockref.count--;
639 }
640}
641
642const struct gfs2_glock_operations gfs2_meta_glops = {
643 .go_type = LM_TYPE_META,
644};
645
646const struct gfs2_glock_operations gfs2_inode_glops = {
647 .go_sync = inode_go_sync,
648 .go_inval = inode_go_inval,
649 .go_instantiate = inode_go_instantiate,
650 .go_held = inode_go_held,
651 .go_dump = inode_go_dump,
652 .go_type = LM_TYPE_INODE,
653 .go_flags = GLOF_ASPACE | GLOF_LVB,
654};
655
656const struct gfs2_glock_operations gfs2_rgrp_glops = {
657 .go_sync = rgrp_go_sync,
658 .go_inval = rgrp_go_inval,
659 .go_instantiate = gfs2_rgrp_go_instantiate,
660 .go_dump = gfs2_rgrp_go_dump,
661 .go_type = LM_TYPE_RGRP,
662 .go_flags = GLOF_LVB,
663};
664
665const struct gfs2_glock_operations gfs2_freeze_glops = {
666 .go_xmote_bh = freeze_go_xmote_bh,
667 .go_callback = freeze_go_callback,
668 .go_type = LM_TYPE_NONDISK,
669};
670
671const struct gfs2_glock_operations gfs2_iopen_glops = {
672 .go_type = LM_TYPE_IOPEN,
673 .go_callback = iopen_go_callback,
674 .go_dump = inode_go_dump,
675 .go_subclass = 1,
676};
677
678const struct gfs2_glock_operations gfs2_flock_glops = {
679 .go_type = LM_TYPE_FLOCK,
680};
681
682const struct gfs2_glock_operations gfs2_nondisk_glops = {
683 .go_type = LM_TYPE_NONDISK,
684};
685
686const struct gfs2_glock_operations gfs2_quota_glops = {
687 .go_type = LM_TYPE_QUOTA,
688 .go_flags = GLOF_LVB,
689};
690
691const struct gfs2_glock_operations gfs2_journal_glops = {
692 .go_type = LM_TYPE_JOURNAL,
693};
694
695const struct gfs2_glock_operations *gfs2_glops_list[] = {
696 [LM_TYPE_META] = &gfs2_meta_glops,
697 [LM_TYPE_INODE] = &gfs2_inode_glops,
698 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
699 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
700 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
701 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
702 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
703 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
704};
705