Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.18 359 lines 11 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 5 */ 6 7#ifndef __GLOCK_DOT_H__ 8#define __GLOCK_DOT_H__ 9 10#include <linux/sched.h> 11#include <linux/parser.h> 12#include "incore.h" 13#include "util.h" 14 15/* Options for hostdata parser */ 16 17enum { 18 Opt_jid, 19 Opt_id, 20 Opt_first, 21 Opt_nodir, 22 Opt_err, 23}; 24 25/* 26 * lm_lockname types 27 */ 28 29#define LM_TYPE_RESERVED 0x00 30#define LM_TYPE_NONDISK 0x01 31#define LM_TYPE_INODE 0x02 32#define LM_TYPE_RGRP 0x03 33#define LM_TYPE_META 0x04 34#define LM_TYPE_IOPEN 0x05 35#define LM_TYPE_FLOCK 0x06 36#define LM_TYPE_PLOCK 0x07 37#define LM_TYPE_QUOTA 0x08 38#define LM_TYPE_JOURNAL 0x09 39 40/* 41 * lm_lock() states 42 * 43 * SHARED is compatible with SHARED, not with DEFERRED or EX. 44 * DEFERRED is compatible with DEFERRED, not with SHARED or EX. 45 */ 46 47#define LM_ST_UNLOCKED 0 48#define LM_ST_EXCLUSIVE 1 49#define LM_ST_DEFERRED 2 50#define LM_ST_SHARED 3 51 52/* 53 * lm_lock() flags 54 * 55 * LM_FLAG_TRY 56 * Don't wait to acquire the lock if it can't be granted immediately. 57 * 58 * LM_FLAG_TRY_1CB 59 * Send one blocking callback if TRY is set and the lock is not granted. 60 * 61 * LM_FLAG_NOEXP 62 * GFS sets this flag on lock requests it makes while doing journal recovery. 63 * These special requests should not be blocked due to the recovery like 64 * ordinary locks would be. 65 * 66 * LM_FLAG_ANY 67 * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may 68 * also be granted in SHARED. The preferred state is whichever is compatible 69 * with other granted locks, or the specified state if no other locks exist. 70 * 71 * LM_FLAG_PRIORITY 72 * Override fairness considerations. Suppose a lock is held in a shared state 73 * and there is a pending request for the deferred state. A shared lock 74 * request with the priority flag would be allowed to bypass the deferred 75 * request and directly join the other shared lock. A shared lock request 76 * without the priority flag might be forced to wait until the deferred 77 * requested had acquired and released the lock. 78 * 79 * LM_FLAG_NODE_SCOPE 80 * This holder agrees to share the lock within this node. In other words, 81 * the glock is held in EX mode according to DLM, but local holders on the 82 * same node can share it. 83 */ 84 85#define LM_FLAG_TRY 0x0001 86#define LM_FLAG_TRY_1CB 0x0002 87#define LM_FLAG_NOEXP 0x0004 88#define LM_FLAG_ANY 0x0008 89#define LM_FLAG_PRIORITY 0x0010 90#define LM_FLAG_NODE_SCOPE 0x0020 91#define GL_ASYNC 0x0040 92#define GL_EXACT 0x0080 93#define GL_SKIP 0x0100 94#define GL_NOCACHE 0x0400 95 96/* 97 * lm_async_cb return flags 98 * 99 * LM_OUT_ST_MASK 100 * Masks the lower two bits of lock state in the returned value. 101 * 102 * LM_OUT_CANCELED 103 * The lock request was canceled. 104 * 105 */ 106 107#define LM_OUT_ST_MASK 0x00000003 108#define LM_OUT_CANCELED 0x00000008 109#define LM_OUT_ERROR 0x00000004 110 111/* 112 * lm_recovery_done() messages 113 */ 114 115#define LM_RD_GAVEUP 308 116#define LM_RD_SUCCESS 309 117 118#define GLR_TRYFAILED 13 119 120#define GL_GLOCK_MAX_HOLD (long)(HZ / 5) 121#define GL_GLOCK_DFT_HOLD (long)(HZ / 5) 122#define GL_GLOCK_MIN_HOLD (long)(10) 123#define GL_GLOCK_HOLD_INCR (long)(HZ / 20) 124#define GL_GLOCK_HOLD_DECR (long)(HZ / 40) 125 126struct lm_lockops { 127 const char *lm_proto_name; 128 int (*lm_mount) (struct gfs2_sbd *sdp, const char *table); 129 void (*lm_first_done) (struct gfs2_sbd *sdp); 130 void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid, 131 unsigned int result); 132 void (*lm_unmount) (struct gfs2_sbd *sdp); 133 void (*lm_withdraw) (struct gfs2_sbd *sdp); 134 void (*lm_put_lock) (struct gfs2_glock *gl); 135 int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state, 136 unsigned int flags); 137 void (*lm_cancel) (struct gfs2_glock *gl); 138 const match_table_t *lm_tokens; 139}; 140 141extern struct workqueue_struct *gfs2_delete_workqueue; 142static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) 143{ 144 struct gfs2_holder *gh; 145 struct pid *pid; 146 147 /* Look in glock's list of holders for one with current task as owner */ 148 spin_lock(&gl->gl_lockref.lock); 149 pid = task_pid(current); 150 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 151 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 152 break; 153 if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags)) 154 continue; 155 if (gh->gh_owner_pid == pid) 156 goto out; 157 } 158 gh = NULL; 159out: 160 spin_unlock(&gl->gl_lockref.lock); 161 162 return gh; 163} 164 165static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl) 166{ 167 return gl->gl_state == LM_ST_EXCLUSIVE; 168} 169 170static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl) 171{ 172 return gl->gl_state == LM_ST_DEFERRED; 173} 174 175static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl) 176{ 177 return gl->gl_state == LM_ST_SHARED; 178} 179 180static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl) 181{ 182 if (gl->gl_ops->go_flags & GLOF_ASPACE) 183 return (struct address_space *)(gl + 1); 184 return NULL; 185} 186 187extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 188 const struct gfs2_glock_operations *glops, 189 int create, struct gfs2_glock **glp); 190extern void gfs2_glock_hold(struct gfs2_glock *gl); 191extern void gfs2_glock_put(struct gfs2_glock *gl); 192extern void gfs2_glock_queue_put(struct gfs2_glock *gl); 193 194extern void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, 195 u16 flags, struct gfs2_holder *gh, 196 unsigned long ip); 197static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, 198 u16 flags, struct gfs2_holder *gh) { 199 __gfs2_holder_init(gl, state, flags, gh, _RET_IP_); 200} 201 202extern void gfs2_holder_reinit(unsigned int state, u16 flags, 203 struct gfs2_holder *gh); 204extern void gfs2_holder_uninit(struct gfs2_holder *gh); 205extern int gfs2_glock_nq(struct gfs2_holder *gh); 206extern int gfs2_glock_poll(struct gfs2_holder *gh); 207extern int gfs2_instantiate(struct gfs2_holder *gh); 208extern int gfs2_glock_wait(struct gfs2_holder *gh); 209extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs); 210extern void gfs2_glock_dq(struct gfs2_holder *gh); 211extern void gfs2_glock_dq_wait(struct gfs2_holder *gh); 212extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh); 213extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, 214 const struct gfs2_glock_operations *glops, 215 unsigned int state, u16 flags, 216 struct gfs2_holder *gh); 217extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs); 218extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs); 219extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, 220 bool fsid); 221#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \ 222 gfs2_dump_glock(NULL, gl, true); \ 223 BUG(); } } while(0) 224#define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \ 225 gfs2_dump_glock(NULL, gl, true); \ 226 gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \ 227 while (0) 228#define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \ 229 gfs2_dump_glock(NULL, gl, true); \ 230 gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \ 231 while (0) 232 233extern __printf(2, 3) 234void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...); 235 236/** 237 * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock 238 * @gl: the glock 239 * @state: the state we're requesting 240 * @flags: the modifier flags 241 * @gh: the holder structure 242 * 243 * Returns: 0, GLR_*, or errno 244 */ 245 246static inline int gfs2_glock_nq_init(struct gfs2_glock *gl, 247 unsigned int state, u16 flags, 248 struct gfs2_holder *gh) 249{ 250 int error; 251 252 __gfs2_holder_init(gl, state, flags, gh, _RET_IP_); 253 254 error = gfs2_glock_nq(gh); 255 if (error) 256 gfs2_holder_uninit(gh); 257 258 return error; 259} 260 261extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state); 262extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret); 263extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay); 264extern void gfs2_cancel_delete_work(struct gfs2_glock *gl); 265extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl); 266extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp); 267extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); 268extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip); 269extern void gfs2_glock_thaw(struct gfs2_sbd *sdp); 270extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl); 271extern void gfs2_glock_free(struct gfs2_glock *gl); 272 273extern int __init gfs2_glock_init(void); 274extern void gfs2_glock_exit(void); 275 276extern void gfs2_create_debugfs_file(struct gfs2_sbd *sdp); 277extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp); 278extern void gfs2_register_debugfs(void); 279extern void gfs2_unregister_debugfs(void); 280 281extern const struct lm_lockops gfs2_dlm_ops; 282 283static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh) 284{ 285 gh->gh_gl = NULL; 286} 287 288static inline bool gfs2_holder_initialized(struct gfs2_holder *gh) 289{ 290 return gh->gh_gl; 291} 292 293static inline bool gfs2_holder_queued(struct gfs2_holder *gh) 294{ 295 return !list_empty(&gh->gh_list); 296} 297 298/** 299 * glock_set_object - set the gl_object field of a glock 300 * @gl: the glock 301 * @object: the object 302 */ 303static inline void glock_set_object(struct gfs2_glock *gl, void *object) 304{ 305 spin_lock(&gl->gl_lockref.lock); 306 if (gfs2_assert_warn(gl->gl_name.ln_sbd, gl->gl_object == NULL)) 307 gfs2_dump_glock(NULL, gl, true); 308 gl->gl_object = object; 309 spin_unlock(&gl->gl_lockref.lock); 310} 311 312/** 313 * glock_clear_object - clear the gl_object field of a glock 314 * @gl: the glock 315 * @object: the object 316 * 317 * I'd love to similarly add this: 318 * else if (gfs2_assert_warn(gl->gl_sbd, gl->gl_object == object)) 319 * gfs2_dump_glock(NULL, gl, true); 320 * Unfortunately, that's not possible because as soon as gfs2_delete_inode 321 * frees the block in the rgrp, another process can reassign it for an I_NEW 322 * inode in gfs2_create_inode because that calls new_inode, not gfs2_iget. 323 * That means gfs2_delete_inode may subsequently try to call this function 324 * for a glock that's already pointing to a brand new inode. If we clear the 325 * new inode's gl_object, we'll introduce metadata corruption. Function 326 * gfs2_delete_inode calls clear_inode which calls gfs2_clear_inode which also 327 * tries to clear gl_object, so it's more than just gfs2_delete_inode. 328 * 329 */ 330static inline void glock_clear_object(struct gfs2_glock *gl, void *object) 331{ 332 spin_lock(&gl->gl_lockref.lock); 333 if (gl->gl_object == object) 334 gl->gl_object = NULL; 335 spin_unlock(&gl->gl_lockref.lock); 336} 337 338static inline void gfs2_holder_allow_demote(struct gfs2_holder *gh) 339{ 340 struct gfs2_glock *gl = gh->gh_gl; 341 342 spin_lock(&gl->gl_lockref.lock); 343 set_bit(HIF_MAY_DEMOTE, &gh->gh_iflags); 344 spin_unlock(&gl->gl_lockref.lock); 345} 346 347static inline void gfs2_holder_disallow_demote(struct gfs2_holder *gh) 348{ 349 struct gfs2_glock *gl = gh->gh_gl; 350 351 spin_lock(&gl->gl_lockref.lock); 352 clear_bit(HIF_MAY_DEMOTE, &gh->gh_iflags); 353 spin_unlock(&gl->gl_lockref.lock); 354} 355 356extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation); 357extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation); 358 359#endif /* __GLOCK_DOT_H__ */