Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

GFS2: Move glock superblock pointer to field gl_name

What uniquely identifies a glock in the glock hash table is not
gl_name, but gl_name and its superblock pointer. This patch makes
the gl_name field correspond to a unique glock identifier. That will
allow us to simplify hashing with a future patch, since the hash
algorithm can then take the gl_name and hash its components in one
operation.

Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Acked-by: Steven Whitehouse <swhiteho@redhat.com>

+75 -74
+15 -17
fs/gfs2/glock.c
··· 119 119 120 120 void gfs2_glock_free(struct gfs2_glock *gl) 121 121 { 122 - struct gfs2_sbd *sdp = gl->gl_sbd; 122 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 123 123 124 124 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); 125 125 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) ··· 192 192 193 193 void gfs2_glock_put(struct gfs2_glock *gl) 194 194 { 195 - struct gfs2_sbd *sdp = gl->gl_sbd; 195 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 196 196 struct address_space *mapping = gfs2_glock2aspace(gl); 197 197 198 198 if (lockref_put_or_lock(&gl->gl_lockref)) ··· 220 220 */ 221 221 222 222 static struct gfs2_glock *search_bucket(unsigned int hash, 223 - const struct gfs2_sbd *sdp, 224 223 const struct lm_lockname *name) 225 224 { 226 225 struct gfs2_glock *gl; ··· 227 228 228 229 hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) { 229 230 if (!lm_name_equal(&gl->gl_name, name)) 230 - continue; 231 - if (gl->gl_sbd != sdp) 232 231 continue; 233 232 if (lockref_get_not_dead(&gl->gl_lockref)) 234 233 return gl; ··· 503 506 __acquires(&gl->gl_spin) 504 507 { 505 508 const struct gfs2_glock_operations *glops = gl->gl_ops; 506 - struct gfs2_sbd *sdp = gl->gl_sbd; 509 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 507 510 unsigned int lck_flags = gh ? gh->gh_flags : 0; 508 511 int ret; 509 512 ··· 625 628 static void delete_work_func(struct work_struct *work) 626 629 { 627 630 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); 628 - struct gfs2_sbd *sdp = gl->gl_sbd; 631 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 629 632 struct gfs2_inode *ip; 630 633 struct inode *inode; 631 634 u64 no_addr = gl->gl_name.ln_number; ··· 701 704 struct gfs2_glock **glp) 702 705 { 703 706 struct super_block *s = sdp->sd_vfs; 704 - struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; 707 + struct lm_lockname name = { .ln_number = number, 708 + .ln_type = glops->go_type, 709 + .ln_sbd = sdp }; 705 710 struct gfs2_glock *gl, *tmp; 706 711 unsigned int hash = gl_hash(sdp, &name); 707 712 struct address_space *mapping; 708 713 struct kmem_cache *cachep; 709 714 710 715 rcu_read_lock(); 711 - gl = search_bucket(hash, sdp, &name); 716 + gl = search_bucket(hash, &name); 712 717 rcu_read_unlock(); 713 718 714 719 *glp = gl; ··· 738 739 } 739 740 740 741 atomic_inc(&sdp->sd_glock_disposal); 741 - gl->gl_sbd = sdp; 742 742 gl->gl_flags = 0; 743 743 gl->gl_name = name; 744 744 gl->gl_lockref.count = 1; ··· 770 772 } 771 773 772 774 spin_lock_bucket(hash); 773 - tmp = search_bucket(hash, sdp, &name); 775 + tmp = search_bucket(hash, &name); 774 776 if (tmp) { 775 777 spin_unlock_bucket(hash); 776 778 kfree(gl->gl_lksb.sb_lvbptr); ··· 926 928 __acquires(&gl->gl_spin) 927 929 { 928 930 struct gfs2_glock *gl = gh->gh_gl; 929 - struct gfs2_sbd *sdp = gl->gl_sbd; 931 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 930 932 struct list_head *insert_pt = NULL; 931 933 struct gfs2_holder *gh2; 932 934 int try_futile = 0; ··· 1004 1006 int gfs2_glock_nq(struct gfs2_holder *gh) 1005 1007 { 1006 1008 struct gfs2_glock *gl = gh->gh_gl; 1007 - struct gfs2_sbd *sdp = gl->gl_sbd; 1009 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1008 1010 int error = 0; 1009 1011 1010 1012 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) ··· 1311 1313 1312 1314 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1313 1315 { 1314 - struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 1316 + struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; 1315 1317 1316 1318 spin_lock(&gl->gl_spin); 1317 1319 gl->gl_reply = ret; ··· 1469 1471 1470 1472 rcu_read_lock(); 1471 1473 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) { 1472 - if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref)) 1474 + if ((gl->gl_name.ln_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref)) 1473 1475 examiner(gl); 1474 1476 } 1475 1477 rcu_read_unlock(); ··· 1567 1569 int ret; 1568 1570 1569 1571 ret = gfs2_truncatei_resume(ip); 1570 - gfs2_assert_withdraw(gl->gl_sbd, ret == 0); 1572 + gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0); 1571 1573 1572 1574 spin_lock(&gl->gl_spin); 1573 1575 clear_bit(GLF_LOCK, &gl->gl_flags); ··· 1870 1872 gi->nhash = 0; 1871 1873 } 1872 1874 /* Skip entries for other sb and dead entries */ 1873 - } while (gi->sdp != gi->gl->gl_sbd || 1875 + } while (gi->sdp != gi->gl->gl_name.ln_sbd || 1874 1876 __lockref_is_dead(&gi->gl->gl_lockref)); 1875 1877 1876 1878 return 0;
+20 -18
fs/gfs2/glops.c
··· 32 32 33 33 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) 34 34 { 35 - fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n", 35 + fs_err(gl->gl_name.ln_sbd, 36 + "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page " 37 + "state 0x%lx\n", 36 38 bh, (unsigned long long)bh->b_blocknr, bh->b_state, 37 39 bh->b_page->mapping, bh->b_page->flags); 38 - fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n", 40 + fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n", 39 41 gl->gl_name.ln_type, gl->gl_name.ln_number, 40 42 gfs2_glock2aspace(gl)); 41 - gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n"); 43 + gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n"); 42 44 } 43 45 44 46 /** ··· 54 52 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, 55 53 unsigned int nr_revokes) 56 54 { 57 - struct gfs2_sbd *sdp = gl->gl_sbd; 55 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 58 56 struct list_head *head = &gl->gl_ail_list; 59 57 struct gfs2_bufdata *bd, *tmp; 60 58 struct buffer_head *bh; ··· 82 80 83 81 static void gfs2_ail_empty_gl(struct gfs2_glock *gl) 84 82 { 85 - struct gfs2_sbd *sdp = gl->gl_sbd; 83 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 86 84 struct gfs2_trans tr; 87 85 88 86 memset(&tr, 0, sizeof(tr)); ··· 111 109 112 110 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) 113 111 { 114 - struct gfs2_sbd *sdp = gl->gl_sbd; 112 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 115 113 unsigned int revokes = atomic_read(&gl->gl_ail_count); 116 114 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); 117 115 int ret; ··· 141 139 142 140 static void rgrp_go_sync(struct gfs2_glock *gl) 143 141 { 144 - struct gfs2_sbd *sdp = gl->gl_sbd; 142 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 145 143 struct address_space *mapping = &sdp->sd_aspace; 146 144 struct gfs2_rgrpd *rgd; 147 145 int error; ··· 181 179 182 180 static void rgrp_go_inval(struct gfs2_glock *gl, int flags) 183 181 { 184 - struct gfs2_sbd *sdp = gl->gl_sbd; 182 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 185 183 struct address_space *mapping = &sdp->sd_aspace; 186 184 struct gfs2_rgrpd *rgd = gl->gl_object; 187 185 ··· 220 218 221 219 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 222 220 223 - gfs2_log_flush(gl->gl_sbd, gl, NORMAL_FLUSH); 221 + gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH); 224 222 filemap_fdatawrite(metamapping); 225 223 if (ip) { 226 224 struct address_space *mapping = ip->i_inode.i_mapping; ··· 254 252 { 255 253 struct gfs2_inode *ip = gl->gl_object; 256 254 257 - gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); 255 + gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count)); 258 256 259 257 if (flags & DIO_METADATA) { 260 258 struct address_space *mapping = gfs2_glock2aspace(gl); ··· 266 264 } 267 265 } 268 266 269 - if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) { 270 - gfs2_log_flush(gl->gl_sbd, NULL, NORMAL_FLUSH); 271 - gl->gl_sbd->sd_rindex_uptodate = 0; 267 + if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { 268 + gfs2_log_flush(gl->gl_name.ln_sbd, NULL, NORMAL_FLUSH); 269 + gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; 272 270 } 273 271 if (ip && S_ISREG(ip->i_inode.i_mode)) 274 272 truncate_inode_pages(ip->i_inode.i_mapping, 0); ··· 283 281 284 282 static int inode_go_demote_ok(const struct gfs2_glock *gl) 285 283 { 286 - struct gfs2_sbd *sdp = gl->gl_sbd; 284 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 287 285 struct gfs2_holder *gh; 288 286 289 287 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) ··· 418 416 static int inode_go_lock(struct gfs2_holder *gh) 419 417 { 420 418 struct gfs2_glock *gl = gh->gh_gl; 421 - struct gfs2_sbd *sdp = gl->gl_sbd; 419 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 422 420 struct gfs2_inode *ip = gl->gl_object; 423 421 int error = 0; 424 422 ··· 479 477 static void freeze_go_sync(struct gfs2_glock *gl) 480 478 { 481 479 int error = 0; 482 - struct gfs2_sbd *sdp = gl->gl_sbd; 480 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 483 481 484 482 if (gl->gl_state == LM_ST_SHARED && 485 483 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { ··· 502 500 503 501 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) 504 502 { 505 - struct gfs2_sbd *sdp = gl->gl_sbd; 503 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 506 504 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 507 505 struct gfs2_glock *j_gl = ip->i_gl; 508 506 struct gfs2_log_header_host head; ··· 547 545 static void iopen_go_callback(struct gfs2_glock *gl, bool remote) 548 546 { 549 547 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; 550 - struct gfs2_sbd *sdp = gl->gl_sbd; 548 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 551 549 552 550 if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY)) 553 551 return;
+5 -4
fs/gfs2/incore.h
··· 203 203 }; 204 204 205 205 struct lm_lockname { 206 + struct gfs2_sbd *ln_sbd; 206 207 u64 ln_number; 207 208 unsigned int ln_type; 208 209 }; 209 210 210 211 #define lm_name_equal(name1, name2) \ 211 - (((name1)->ln_number == (name2)->ln_number) && \ 212 - ((name1)->ln_type == (name2)->ln_type)) 212 + (((name1)->ln_number == (name2)->ln_number) && \ 213 + ((name1)->ln_type == (name2)->ln_type) && \ 214 + ((name1)->ln_sbd == (name2)->ln_sbd)) 213 215 214 216 215 217 struct gfs2_glock_operations { ··· 329 327 330 328 struct gfs2_glock { 331 329 struct hlist_bl_node gl_list; 332 - struct gfs2_sbd *gl_sbd; 333 330 unsigned long gl_flags; /* GLF_... */ 334 331 struct lm_lockname gl_name; 335 332 ··· 836 835 837 836 static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which) 838 837 { 839 - const struct gfs2_sbd *sdp = gl->gl_sbd; 838 + const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 840 839 preempt_disable(); 841 840 this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++; 842 841 preempt_enable();
+5 -5
fs/gfs2/lock_dlm.c
··· 80 80 81 81 preempt_disable(); 82 82 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); 83 - lks = this_cpu_ptr(gl->gl_sbd->sd_lkstats); 83 + lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); 84 84 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ 85 85 gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */ 86 86 preempt_enable(); ··· 108 108 dstamp = gl->gl_dstamp; 109 109 gl->gl_dstamp = ktime_get_real(); 110 110 irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp)); 111 - lks = this_cpu_ptr(gl->gl_sbd->sd_lkstats); 111 + lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); 112 112 gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */ 113 113 gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */ 114 114 preempt_enable(); ··· 253 253 static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, 254 254 unsigned int flags) 255 255 { 256 - struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 256 + struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; 257 257 int req; 258 258 u32 lkf; 259 259 char strname[GDLM_STRNAME_BYTES] = ""; ··· 281 281 282 282 static void gdlm_put_lock(struct gfs2_glock *gl) 283 283 { 284 - struct gfs2_sbd *sdp = gl->gl_sbd; 284 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 285 285 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 286 286 int lvb_needs_unlock = 0; 287 287 int error; ··· 319 319 320 320 static void gdlm_cancel(struct gfs2_glock *gl) 321 321 { 322 - struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 322 + struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; 323 323 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); 324 324 } 325 325
+3 -3
fs/gfs2/lops.c
··· 70 70 static void maybe_release_space(struct gfs2_bufdata *bd) 71 71 { 72 72 struct gfs2_glock *gl = bd->bd_gl; 73 - struct gfs2_sbd *sdp = gl->gl_sbd; 73 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 74 74 struct gfs2_rgrpd *rgd = gl->gl_object; 75 75 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; 76 76 struct gfs2_bitmap *bi = rgd->rd_bits + index; ··· 585 585 static void gfs2_meta_sync(struct gfs2_glock *gl) 586 586 { 587 587 struct address_space *mapping = gfs2_glock2aspace(gl); 588 - struct gfs2_sbd *sdp = gl->gl_sbd; 588 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 589 589 int error; 590 590 591 591 if (mapping == NULL) ··· 595 595 error = filemap_fdatawait(mapping); 596 596 597 597 if (error) 598 - gfs2_io_error(gl->gl_sbd); 598 + gfs2_io_error(gl->gl_name.ln_sbd); 599 599 } 600 600 601 601 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
+3 -3
fs/gfs2/meta_io.c
··· 114 114 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) 115 115 { 116 116 struct address_space *mapping = gfs2_glock2aspace(gl); 117 - struct gfs2_sbd *sdp = gl->gl_sbd; 117 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 118 118 struct page *page; 119 119 struct buffer_head *bh; 120 120 unsigned int shift; ··· 200 200 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, 201 201 struct buffer_head **bhp) 202 202 { 203 - struct gfs2_sbd *sdp = gl->gl_sbd; 203 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 204 204 struct buffer_head *bh; 205 205 206 206 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { ··· 362 362 363 363 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) 364 364 { 365 - struct gfs2_sbd *sdp = gl->gl_sbd; 365 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 366 366 struct buffer_head *first_bh, *bh; 367 367 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >> 368 368 sdp->sd_sb.sb_bsize_shift;
+1 -1
fs/gfs2/meta_io.h
··· 44 44 { 45 45 struct inode *inode = mapping->host; 46 46 if (mapping->a_ops == &gfs2_meta_aops) 47 - return (((struct gfs2_glock *)mapping) - 1)->gl_sbd; 47 + return (((struct gfs2_glock *)mapping) - 1)->gl_name.ln_sbd; 48 48 else if (mapping->a_ops == &gfs2_rgrp_aops) 49 49 return container_of(mapping, struct gfs2_sbd, sd_aspace); 50 50 else
+11 -11
fs/gfs2/quota.c
··· 119 119 120 120 while (!list_empty(list)) { 121 121 qd = list_entry(list->next, struct gfs2_quota_data, qd_lru); 122 - sdp = qd->qd_gl->gl_sbd; 122 + sdp = qd->qd_gl->gl_name.ln_sbd; 123 123 124 124 list_del(&qd->qd_lru); 125 125 ··· 302 302 303 303 static void qd_hold(struct gfs2_quota_data *qd) 304 304 { 305 - struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 305 + struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 306 306 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref)); 307 307 lockref_get(&qd->qd_lockref); 308 308 } ··· 367 367 368 368 static int bh_get(struct gfs2_quota_data *qd) 369 369 { 370 - struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 370 + struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 371 371 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 372 372 unsigned int block, offset; 373 373 struct buffer_head *bh; ··· 414 414 415 415 static void bh_put(struct gfs2_quota_data *qd) 416 416 { 417 - struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 417 + struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 418 418 419 419 mutex_lock(&sdp->sd_quota_mutex); 420 420 gfs2_assert(sdp, qd->qd_bh_count); ··· 486 486 487 487 static void qd_unlock(struct gfs2_quota_data *qd) 488 488 { 489 - gfs2_assert_warn(qd->qd_gl->gl_sbd, 489 + gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd, 490 490 test_bit(QDF_LOCKED, &qd->qd_flags)); 491 491 clear_bit(QDF_LOCKED, &qd->qd_flags); 492 492 bh_put(qd); ··· 614 614 615 615 static void do_qc(struct gfs2_quota_data *qd, s64 change) 616 616 { 617 - struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 617 + struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 618 618 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 619 619 struct gfs2_quota_change *qc = qd->qd_bh_qc; 620 620 s64 x; ··· 831 831 832 832 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) 833 833 { 834 - struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd; 834 + struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd; 835 835 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 836 836 struct gfs2_alloc_parms ap = { .aflags = 0, }; 837 837 unsigned int data_blocks, ind_blocks; ··· 922 922 gfs2_glock_dq_uninit(&ghs[qx]); 923 923 mutex_unlock(&ip->i_inode.i_mutex); 924 924 kfree(ghs); 925 - gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl, NORMAL_FLUSH); 925 + gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, NORMAL_FLUSH); 926 926 return error; 927 927 } 928 928 ··· 954 954 static int do_glock(struct gfs2_quota_data *qd, int force_refresh, 955 955 struct gfs2_holder *q_gh) 956 956 { 957 - struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 957 + struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 958 958 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 959 959 struct gfs2_holder i_gh; 960 960 int error; ··· 1037 1037 1038 1038 static int need_sync(struct gfs2_quota_data *qd) 1039 1039 { 1040 - struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 1040 + struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 1041 1041 struct gfs2_tune *gt = &sdp->sd_tune; 1042 1042 s64 value; 1043 1043 unsigned int num, den; ··· 1125 1125 1126 1126 static int print_message(struct gfs2_quota_data *qd, char *type) 1127 1127 { 1128 - struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 1128 + struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 1129 1129 1130 1130 fs_info(sdp, "quota %s for %s %u\n", 1131 1131 type,
+1 -1
fs/gfs2/rgrp.c
··· 1860 1860 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops) 1861 1861 { 1862 1862 const struct gfs2_glock *gl = rgd->rd_gl; 1863 - const struct gfs2_sbd *sdp = gl->gl_sbd; 1863 + const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1864 1864 struct gfs2_lkstats *st; 1865 1865 s64 r_dcount, l_dcount; 1866 1866 s64 l_srttb, a_srttb = 0;
+9 -9
fs/gfs2/trace_gfs2.h
··· 104 104 ), 105 105 106 106 TP_fast_assign( 107 - __entry->dev = gl->gl_sbd->sd_vfs->s_dev; 107 + __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; 108 108 __entry->glnum = gl->gl_name.ln_number; 109 109 __entry->gltype = gl->gl_name.ln_type; 110 110 __entry->cur_state = glock_trace_state(gl->gl_state); ··· 140 140 ), 141 141 142 142 TP_fast_assign( 143 - __entry->dev = gl->gl_sbd->sd_vfs->s_dev; 143 + __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; 144 144 __entry->gltype = gl->gl_name.ln_type; 145 145 __entry->glnum = gl->gl_name.ln_number; 146 146 __entry->cur_state = glock_trace_state(gl->gl_state); ··· 174 174 ), 175 175 176 176 TP_fast_assign( 177 - __entry->dev = gl->gl_sbd->sd_vfs->s_dev; 177 + __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; 178 178 __entry->gltype = gl->gl_name.ln_type; 179 179 __entry->glnum = gl->gl_name.ln_number; 180 180 __entry->cur_state = glock_trace_state(gl->gl_state); ··· 209 209 ), 210 210 211 211 TP_fast_assign( 212 - __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev; 212 + __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev; 213 213 __entry->glnum = gh->gh_gl->gl_name.ln_number; 214 214 __entry->gltype = gh->gh_gl->gl_name.ln_type; 215 215 __entry->first = first; ··· 239 239 ), 240 240 241 241 TP_fast_assign( 242 - __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev; 242 + __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev; 243 243 __entry->glnum = gh->gh_gl->gl_name.ln_number; 244 244 __entry->gltype = gh->gh_gl->gl_name.ln_type; 245 245 __entry->queue = queue; ··· 278 278 ), 279 279 280 280 TP_fast_assign( 281 - __entry->dev = gl->gl_sbd->sd_vfs->s_dev; 281 + __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; 282 282 __entry->glnum = gl->gl_name.ln_number; 283 283 __entry->gltype = gl->gl_name.ln_type; 284 284 __entry->status = gl->gl_lksb.sb_status; ··· 333 333 ), 334 334 335 335 TP_fast_assign( 336 - __entry->dev = bd->bd_gl->gl_sbd->sd_vfs->s_dev; 336 + __entry->dev = bd->bd_gl->gl_name.ln_sbd->sd_vfs->s_dev; 337 337 __entry->pin = pin; 338 338 __entry->len = bd->bd_bh->b_size; 339 339 __entry->block = bd->bd_bh->b_blocknr; ··· 449 449 ), 450 450 451 451 TP_fast_assign( 452 - __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev; 452 + __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev; 453 453 __entry->lblock = lblock; 454 454 __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0; 455 455 __entry->inum = ip->i_no_addr; ··· 489 489 ), 490 490 491 491 TP_fast_assign( 492 - __entry->dev = rgd->rd_gl->gl_sbd->sd_vfs->s_dev; 492 + __entry->dev = rgd->rd_gl->gl_name.ln_sbd->sd_vfs->s_dev; 493 493 __entry->start = block; 494 494 __entry->inum = ip->i_no_addr; 495 495 __entry->len = len;
+2 -2
fs/gfs2/trans.c
··· 158 158 void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) 159 159 { 160 160 struct gfs2_trans *tr = current->journal_info; 161 - struct gfs2_sbd *sdp = gl->gl_sbd; 161 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 162 162 struct address_space *mapping = bh->b_page->mapping; 163 163 struct gfs2_inode *ip = GFS2_I(mapping->host); 164 164 struct gfs2_bufdata *bd; ··· 224 224 void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) 225 225 { 226 226 227 - struct gfs2_sbd *sdp = gl->gl_sbd; 227 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 228 228 struct gfs2_bufdata *bd; 229 229 230 230 lock_buffer(bh);