Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcache: only use block_bytes() on struct cache

Because struct cache_set and struct cache both have struct cache_sb,
therefore macro block_bytes() can be used on both of them. When removing
the embedded struct cache_sb from struct cache_set, this macro won't be
used on struct cache_set anymore.

This patch unifies all block_bytes() usage only on struct cache, this is
one of the preparation to remove the embedded struct cache_sb from
struct cache_set.

Signed-off-by: Coly Li <colyli@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Coly Li and committed by
Jens Axboe
4e1ebae3 1132e56e

+24 -24
+1 -1
drivers/md/bcache/bcache.h
··· 759 759 760 760 #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS) 761 761 #define bucket_bytes(c) ((c)->sb.bucket_size << 9) 762 - #define block_bytes(c) ((c)->sb.block_size << 9) 762 + #define block_bytes(ca) ((ca)->sb.block_size << 9) 763 763 764 764 static inline unsigned int meta_bucket_pages(struct cache_sb *sb) 765 765 {
+12 -12
drivers/md/bcache/btree.c
··· 104 104 105 105 static inline struct bset *write_block(struct btree *b) 106 106 { 107 - return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); 107 + return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache); 108 108 } 109 109 110 110 static void bch_btree_init_next(struct btree *b) ··· 173 173 goto err; 174 174 175 175 err = "bad btree header"; 176 - if (b->written + set_blocks(i, block_bytes(b->c)) > 176 + if (b->written + set_blocks(i, block_bytes(b->c->cache)) > 177 177 btree_blocks(b)) 178 178 goto err; 179 179 ··· 199 199 200 200 bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); 201 201 202 - b->written += set_blocks(i, block_bytes(b->c)); 202 + b->written += set_blocks(i, block_bytes(b->c->cache)); 203 203 } 204 204 205 205 err = "corrupted btree"; 206 206 for (i = write_block(b); 207 207 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); 208 - i = ((void *) i) + block_bytes(b->c)) 208 + i = ((void *) i) + block_bytes(b->c->cache)) 209 209 if (i->seq == b->keys.set[0].data->seq) 210 210 goto err; 211 211 ··· 347 347 348 348 b->bio->bi_end_io = btree_node_write_endio; 349 349 b->bio->bi_private = cl; 350 - b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); 350 + b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache)); 351 351 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; 352 352 bch_bio_map(b->bio, i); 353 353 ··· 423 423 424 424 do_btree_node_write(b); 425 425 426 - atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size, 426 + atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size, 427 427 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); 428 428 429 - b->written += set_blocks(i, block_bytes(b->c)); 429 + b->written += set_blocks(i, block_bytes(b->c->cache)); 430 430 } 431 431 432 432 void bch_btree_node_write(struct btree *b, struct closure *parent) ··· 1344 1344 1345 1345 if (nodes < 2 || 1346 1346 __set_blocks(b->keys.set[0].data, keys, 1347 - block_bytes(b->c)) > blocks * (nodes - 1)) 1347 + block_bytes(b->c->cache)) > blocks * (nodes - 1)) 1348 1348 return 0; 1349 1349 1350 1350 for (i = 0; i < nodes; i++) { ··· 1378 1378 k = bkey_next(k)) { 1379 1379 if (__set_blocks(n1, n1->keys + keys + 1380 1380 bkey_u64s(k), 1381 - block_bytes(b->c)) > blocks) 1381 + block_bytes(b->c->cache)) > blocks) 1382 1382 break; 1383 1383 1384 1384 last = k; ··· 1394 1394 * though) 1395 1395 */ 1396 1396 if (__set_blocks(n1, n1->keys + n2->keys, 1397 - block_bytes(b->c)) > 1397 + block_bytes(b->c->cache)) > 1398 1398 btree_blocks(new_nodes[i])) 1399 1399 goto out_unlock_nocoalesce; 1400 1400 ··· 1403 1403 last = &r->b->key; 1404 1404 } 1405 1405 1406 - BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) > 1406 + BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) > 1407 1407 btree_blocks(new_nodes[i])); 1408 1408 1409 1409 if (last) ··· 2210 2210 goto err; 2211 2211 2212 2212 split = set_blocks(btree_bset_first(n1), 2213 - block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; 2213 + block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5; 2214 2214 2215 2215 if (split) { 2216 2216 unsigned int keys = 0;
+4 -4
drivers/md/bcache/debug.c
··· 25 25 for (i = (start); \ 26 26 (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\ 27 27 i->seq == (start)->seq; \ 28 - i = (void *) i + set_blocks(i, block_bytes(b->c)) * \ 29 - block_bytes(b->c)) 28 + i = (void *) i + set_blocks(i, block_bytes(b->c->cache)) * \ 29 + block_bytes(b->c->cache)) 30 30 31 31 void bch_btree_verify(struct btree *b) 32 32 { ··· 82 82 83 83 for_each_written_bset(b, ondisk, i) { 84 84 unsigned int block = ((void *) i - (void *) ondisk) / 85 - block_bytes(b->c); 85 + block_bytes(b->c->cache); 86 86 87 87 pr_err("*** on disk block %u:\n", block); 88 88 bch_dump_bset(&b->keys, i, block); 89 89 } 90 90 91 91 pr_err("*** block %zu not written\n", 92 - ((void *) i - (void *) ondisk) / block_bytes(b->c)); 92 + ((void *) i - (void *) ondisk) / block_bytes(b->c->cache)); 93 93 94 94 for (j = 0; j < inmemory->keys; j++) 95 95 if (inmemory->d[j] != sorted->d[j])
+4 -4
drivers/md/bcache/journal.c
··· 98 98 return ret; 99 99 } 100 100 101 - blocks = set_blocks(j, block_bytes(ca->set)); 101 + blocks = set_blocks(j, block_bytes(ca)); 102 102 103 103 /* 104 104 * Nodes in 'list' are in linear increasing order of ··· 734 734 struct cache *ca = c->cache; 735 735 struct journal_write *w = c->journal.cur; 736 736 struct bkey *k = &c->journal.key; 737 - unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) * 737 + unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) * 738 738 c->sb.block_size; 739 739 740 740 struct bio *bio; ··· 754 754 return; 755 755 } 756 756 757 - c->journal.blocks_free -= set_blocks(w->data, block_bytes(c)); 757 + c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca)); 758 758 759 759 w->data->btree_level = c->root->level; 760 760 ··· 847 847 struct journal_write *w = c->journal.cur; 848 848 849 849 sectors = __set_blocks(w->data, w->data->keys + nkeys, 850 - block_bytes(c)) * c->sb.block_size; 850 + block_bytes(c->cache)) * c->sb.block_size; 851 851 852 852 if (sectors <= min_t(size_t, 853 853 c->journal.blocks_free * c->sb.block_size,
+1 -1
drivers/md/bcache/request.c
··· 99 99 * bch_data_insert_keys() will insert the keys created so far 100 100 * and finish the rest when the keylist is empty. 101 101 */ 102 - if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset)) 102 + if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset)) 103 103 return -ENOMEM; 104 104 105 105 return __bch_keylist_realloc(l, u64s);
+1 -1
drivers/md/bcache/super.c
··· 1527 1527 1528 1528 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1529 1529 1530 - if (bcache_device_init(d, block_bytes(c), u->sectors, 1530 + if (bcache_device_init(d, block_bytes(c->cache), u->sectors, 1531 1531 NULL, &bcache_flash_ops)) 1532 1532 goto err; 1533 1533
+1 -1
drivers/md/bcache/sysfs.c
··· 714 714 sysfs_print(synchronous, CACHE_SYNC(&c->sb)); 715 715 sysfs_print(journal_delay_ms, c->journal_delay_ms); 716 716 sysfs_hprint(bucket_size, bucket_bytes(c)); 717 - sysfs_hprint(block_size, block_bytes(c)); 717 + sysfs_hprint(block_size, block_bytes(c->cache)); 718 718 sysfs_print(tree_depth, c->root->level); 719 719 sysfs_print(root_usage_percent, bch_root_usage(c)); 720 720