Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcache: remove for_each_cache()

Since now each cache_set explicitly has single cache, for_each_cache()
is unnecessary. This patch removes this macro, and update all locations
where it is used, and makes sure all code logic still being consistent.

Signed-off-by: Coly Li <colyli@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Coly Li and committed by
Jens Axboe
08fdb2cd 697e2349

+251 -308
+8 -9
drivers/md/bcache/alloc.c
··· 88 88 struct cache *ca; 89 89 struct bucket *b; 90 90 unsigned long next = c->nbuckets * c->sb.bucket_size / 1024; 91 - unsigned int i; 92 91 int r; 93 92 94 93 atomic_sub(sectors, &c->rescale); ··· 103 104 104 105 c->min_prio = USHRT_MAX; 105 106 106 - for_each_cache(ca, c, i) 107 - for_each_bucket(b, ca) 108 - if (b->prio && 109 - b->prio != BTREE_PRIO && 110 - !atomic_read(&b->pin)) { 111 - b->prio--; 112 - c->min_prio = min(c->min_prio, b->prio); 113 - } 107 + ca = c->cache; 108 + for_each_bucket(b, ca) 109 + if (b->prio && 110 + b->prio != BTREE_PRIO && 111 + !atomic_read(&b->pin)) { 112 + b->prio--; 113 + c->min_prio = min(c->min_prio, b->prio); 114 + } 114 115 115 116 mutex_unlock(&c->bucket_lock); 116 117 }
+2 -7
drivers/md/bcache/bcache.h
··· 887 887 888 888 /* Looping macros */ 889 889 890 - #define for_each_cache(ca, cs, iter) \ 891 - for (iter = 0; ca = cs->cache, iter < 1; iter++) 892 - 893 890 #define for_each_bucket(b, ca) \ 894 891 for (b = (ca)->buckets + (ca)->sb.first_bucket; \ 895 892 b < (ca)->buckets + (ca)->sb.nbuckets; b++) ··· 928 931 929 932 static inline void wake_up_allocators(struct cache_set *c) 930 933 { 931 - struct cache *ca; 932 - unsigned int i; 934 + struct cache *ca = c->cache; 933 935 934 - for_each_cache(ca, c, i) 935 - wake_up_process(ca->alloc_thread); 936 + wake_up_process(ca->alloc_thread); 936 937 } 937 938 938 939 static inline void closure_bio_submit(struct cache_set *c,
+47 -56
drivers/md/bcache/btree.c
··· 1167 1167 static int btree_check_reserve(struct btree *b, struct btree_op *op) 1168 1168 { 1169 1169 struct cache_set *c = b->c; 1170 - struct cache *ca; 1171 - unsigned int i, reserve = (c->root->level - b->level) * 2 + 1; 1170 + struct cache *ca = c->cache; 1171 + unsigned int reserve = (c->root->level - b->level) * 2 + 1; 1172 1172 1173 1173 mutex_lock(&c->bucket_lock); 1174 1174 1175 - for_each_cache(ca, c, i) 1176 - if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) { 1177 - if (op) 1178 - prepare_to_wait(&c->btree_cache_wait, &op->wait, 1179 - TASK_UNINTERRUPTIBLE); 1180 - mutex_unlock(&c->bucket_lock); 1181 - return -EINTR; 1182 - } 1175 + if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) { 1176 + if (op) 1177 + prepare_to_wait(&c->btree_cache_wait, &op->wait, 1178 + TASK_UNINTERRUPTIBLE); 1179 + mutex_unlock(&c->bucket_lock); 1180 + return -EINTR; 1181 + } 1183 1182 1184 1183 mutex_unlock(&c->bucket_lock); 1185 1184 ··· 1694 1695 { 1695 1696 struct cache *ca; 1696 1697 struct bucket *b; 1697 - unsigned int i; 1698 1698 1699 1699 if (!c->gc_mark_valid) 1700 1700 return; ··· 1703 1705 c->gc_mark_valid = 0; 1704 1706 c->gc_done = ZERO_KEY; 1705 1707 1706 - for_each_cache(ca, c, i) 1707 - for_each_bucket(b, ca) { 1708 - b->last_gc = b->gen; 1709 - if (!atomic_read(&b->pin)) { 1710 - SET_GC_MARK(b, 0); 1711 - SET_GC_SECTORS_USED(b, 0); 1712 - } 1708 + ca = c->cache; 1709 + for_each_bucket(b, ca) { 1710 + b->last_gc = b->gen; 1711 + if (!atomic_read(&b->pin)) { 1712 + SET_GC_MARK(b, 0); 1713 + SET_GC_SECTORS_USED(b, 0); 1713 1714 } 1715 + } 1714 1716 1715 1717 mutex_unlock(&c->bucket_lock); 1716 1718 } ··· 1719 1721 { 1720 1722 struct bucket *b; 1721 1723 struct cache *ca; 1722 - unsigned int i; 1724 + unsigned int i, j; 1725 + uint64_t *k; 1723 1726 1724 1727 mutex_lock(&c->bucket_lock); 1725 1728 ··· 1738 1739 struct bcache_device *d = c->devices[i]; 1739 1740 struct cached_dev *dc; 1740 1741 struct keybuf_key *w, *n; 1741 - unsigned int j; 1742 1742 1743 1743 if (!d || UUID_FLASH_ONLY(&c->uuids[i])) 1744 1744 continue; ··· 1754 1756 rcu_read_unlock(); 1755 1757 1756 1758 c->avail_nbuckets = 0; 1757 - for_each_cache(ca, c, i) { 1758 - uint64_t *i; 1759 1759 1760 - ca->invalidate_needs_gc = 0; 1760 + ca = c->cache; 1761 + ca->invalidate_needs_gc = 0; 1761 1762 1762 - for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++) 1763 - SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); 1763 + for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++) 1764 + SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); 1764 1765 1765 - for (i = ca->prio_buckets; 1766 - i < ca->prio_buckets + prio_buckets(ca) * 2; i++) 1767 - SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); 1766 + for (k = ca->prio_buckets; 1767 + k < ca->prio_buckets + prio_buckets(ca) * 2; k++) 1768 + SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); 1768 1769 1769 - for_each_bucket(b, ca) { 1770 - c->need_gc = max(c->need_gc, bucket_gc_gen(b)); 1770 + for_each_bucket(b, ca) { 1771 + c->need_gc = max(c->need_gc, bucket_gc_gen(b)); 1771 1772 1772 - if (atomic_read(&b->pin)) 1773 - continue; 1773 + if (atomic_read(&b->pin)) 1774 + continue; 1774 1775 1775 - BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); 1776 + BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); 1776 1777 1777 - if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) 1778 - c->avail_nbuckets++; 1779 - } 1778 + if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) 1779 + c->avail_nbuckets++; 1780 1780 } 1781 1781 1782 1782 mutex_unlock(&c->bucket_lock); ··· 1826 1830 1827 1831 static bool gc_should_run(struct cache_set *c) 1828 1832 { 1829 - struct cache *ca; 1830 - unsigned int i; 1833 + struct cache *ca = c->cache; 1831 1834 1832 - for_each_cache(ca, c, i) 1833 - if (ca->invalidate_needs_gc) 1834 - return true; 1835 + if (ca->invalidate_needs_gc) 1836 + return true; 1835 1837 1836 1838 if (atomic_read(&c->sectors_to_gc) < 0) 1837 1839 return true; ··· 2075 2081 2076 2082 void bch_initial_gc_finish(struct cache_set *c) 2077 2083 { 2078 - struct cache *ca; 2084 + struct cache *ca = c->cache; 2079 2085 struct bucket *b; 2080 - unsigned int i; 2081 2086 2082 2087 bch_btree_gc_finish(c); 2083 2088 ··· 2091 2098 * This is only safe for buckets that have no live data in them, which 2092 2099 * there should always be some of. 2093 2100 */ 2094 - for_each_cache(ca, c, i) { 2095 - for_each_bucket(b, ca) { 2096 - if (fifo_full(&ca->free[RESERVE_PRIO]) && 2097 - fifo_full(&ca->free[RESERVE_BTREE])) 2098 - break; 2101 + for_each_bucket(b, ca) { 2102 + if (fifo_full(&ca->free[RESERVE_PRIO]) && 2103 + fifo_full(&ca->free[RESERVE_BTREE])) 2104 + break; 2099 2105 2100 - if (bch_can_invalidate_bucket(ca, b) && 2101 - !GC_MARK(b)) { 2102 - __bch_invalidate_one_bucket(ca, b); 2103 - if (!fifo_push(&ca->free[RESERVE_PRIO], 2104 - b - ca->buckets)) 2105 - fifo_push(&ca->free[RESERVE_BTREE], 2106 - b - ca->buckets); 2107 - } 2106 + if (bch_can_invalidate_bucket(ca, b) && 2107 + !GC_MARK(b)) { 2108 + __bch_invalidate_one_bucket(ca, b); 2109 + if (!fifo_push(&ca->free[RESERVE_PRIO], 2110 + b - ca->buckets)) 2111 + fifo_push(&ca->free[RESERVE_BTREE], 2112 + b - ca->buckets); 2108 2113 } 2109 2114 } 2110 2115
+119 -140
drivers/md/bcache/journal.c
··· 179 179 ret; \ 180 180 }) 181 181 182 - struct cache *ca; 183 - unsigned int iter; 182 + struct cache *ca = c->cache; 184 183 int ret = 0; 184 + struct journal_device *ja = &ca->journal; 185 + DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS); 186 + unsigned int i, l, r, m; 187 + uint64_t seq; 185 188 186 - for_each_cache(ca, c, iter) { 187 - struct journal_device *ja = &ca->journal; 188 - DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS); 189 - unsigned int i, l, r, m; 190 - uint64_t seq; 189 + bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); 190 + pr_debug("%u journal buckets\n", ca->sb.njournal_buckets); 191 191 192 - bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); 193 - pr_debug("%u journal buckets\n", ca->sb.njournal_buckets); 194 - 192 + /* 193 + * Read journal buckets ordered by golden ratio hash to quickly 194 + * find a sequence of buckets with valid journal entries 195 + */ 196 + for (i = 0; i < ca->sb.njournal_buckets; i++) { 195 197 /* 196 - * Read journal buckets ordered by golden ratio hash to quickly 197 - * find a sequence of buckets with valid journal entries 198 + * We must try the index l with ZERO first for 199 + * correctness due to the scenario that the journal 200 + * bucket is circular buffer which might have wrapped 198 201 */ 199 - for (i = 0; i < ca->sb.njournal_buckets; i++) { 200 - /* 201 - * We must try the index l with ZERO first for 202 - * correctness due to the scenario that the journal 203 - * bucket is circular buffer which might have wrapped 204 - */ 205 - l = (i * 2654435769U) % ca->sb.njournal_buckets; 202 + l = (i * 2654435769U) % ca->sb.njournal_buckets; 206 203 207 - if (test_bit(l, bitmap)) 208 - break; 204 + if (test_bit(l, bitmap)) 205 + break; 209 206 210 - if (read_bucket(l)) 211 - goto bsearch; 212 - } 213 - 214 - /* 215 - * If that fails, check all the buckets we haven't checked 216 - * already 217 - */ 218 - pr_debug("falling back to linear search\n"); 219 - 220 - for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets) 221 - if (read_bucket(l)) 222 - goto bsearch; 223 - 224 - /* no journal entries on this device? */ 225 - if (l == ca->sb.njournal_buckets) 226 - continue; 227 - bsearch: 228 - BUG_ON(list_empty(list)); 229 - 230 - /* Binary search */ 231 - m = l; 232 - r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); 233 - pr_debug("starting binary search, l %u r %u\n", l, r); 234 - 235 - while (l + 1 < r) { 236 - seq = list_entry(list->prev, struct journal_replay, 237 - list)->j.seq; 238 - 239 - m = (l + r) >> 1; 240 - read_bucket(m); 241 - 242 - if (seq != list_entry(list->prev, struct journal_replay, 243 - list)->j.seq) 244 - l = m; 245 - else 246 - r = m; 247 - } 248 - 249 - /* 250 - * Read buckets in reverse order until we stop finding more 251 - * journal entries 252 - */ 253 - pr_debug("finishing up: m %u njournal_buckets %u\n", 254 - m, ca->sb.njournal_buckets); 255 - l = m; 256 - 257 - while (1) { 258 - if (!l--) 259 - l = ca->sb.njournal_buckets - 1; 260 - 261 - if (l == m) 262 - break; 263 - 264 - if (test_bit(l, bitmap)) 265 - continue; 266 - 267 - if (!read_bucket(l)) 268 - break; 269 - } 270 - 271 - seq = 0; 272 - 273 - for (i = 0; i < ca->sb.njournal_buckets; i++) 274 - if (ja->seq[i] > seq) { 275 - seq = ja->seq[i]; 276 - /* 277 - * When journal_reclaim() goes to allocate for 278 - * the first time, it'll use the bucket after 279 - * ja->cur_idx 280 - */ 281 - ja->cur_idx = i; 282 - ja->last_idx = ja->discard_idx = (i + 1) % 283 - ca->sb.njournal_buckets; 284 - 285 - } 207 + if (read_bucket(l)) 208 + goto bsearch; 286 209 } 287 210 211 + /* 212 + * If that fails, check all the buckets we haven't checked 213 + * already 214 + */ 215 + pr_debug("falling back to linear search\n"); 216 + 217 + for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets) 218 + if (read_bucket(l)) 219 + goto bsearch; 220 + 221 + /* no journal entries on this device? */ 222 + if (l == ca->sb.njournal_buckets) 223 + goto out; 224 + bsearch: 225 + BUG_ON(list_empty(list)); 226 + 227 + /* Binary search */ 228 + m = l; 229 + r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); 230 + pr_debug("starting binary search, l %u r %u\n", l, r); 231 + 232 + while (l + 1 < r) { 233 + seq = list_entry(list->prev, struct journal_replay, 234 + list)->j.seq; 235 + 236 + m = (l + r) >> 1; 237 + read_bucket(m); 238 + 239 + if (seq != list_entry(list->prev, struct journal_replay, 240 + list)->j.seq) 241 + l = m; 242 + else 243 + r = m; 244 + } 245 + 246 + /* 247 + * Read buckets in reverse order until we stop finding more 248 + * journal entries 249 + */ 250 + pr_debug("finishing up: m %u njournal_buckets %u\n", 251 + m, ca->sb.njournal_buckets); 252 + l = m; 253 + 254 + while (1) { 255 + if (!l--) 256 + l = ca->sb.njournal_buckets - 1; 257 + 258 + if (l == m) 259 + break; 260 + 261 + if (test_bit(l, bitmap)) 262 + continue; 263 + 264 + if (!read_bucket(l)) 265 + break; 266 + } 267 + 268 + seq = 0; 269 + 270 + for (i = 0; i < ca->sb.njournal_buckets; i++) 271 + if (ja->seq[i] > seq) { 272 + seq = ja->seq[i]; 273 + /* 274 + * When journal_reclaim() goes to allocate for 275 + * the first time, it'll use the bucket after 276 + * ja->cur_idx 277 + */ 278 + ja->cur_idx = i; 279 + ja->last_idx = ja->discard_idx = (i + 1) % 280 + ca->sb.njournal_buckets; 281 + 282 + } 283 + 284 + out: 288 285 if (!list_empty(list)) 289 286 c->journal.seq = list_entry(list->prev, 290 287 struct journal_replay, ··· 339 342 340 343 static bool is_discard_enabled(struct cache_set *s) 341 344 { 342 - struct cache *ca; 343 - unsigned int i; 345 + struct cache *ca = s->cache; 344 346 345 - for_each_cache(ca, s, i) 346 - if (ca->discard) 347 - return true; 347 + if (ca->discard) 348 + return true; 348 349 349 350 return false; 350 351 } ··· 628 633 static void journal_reclaim(struct cache_set *c) 629 634 { 630 635 struct bkey *k = &c->journal.key; 631 - struct cache *ca; 636 + struct cache *ca = c->cache; 632 637 uint64_t last_seq; 633 - unsigned int iter, n = 0; 638 + unsigned int next; 639 + struct journal_device *ja = &ca->journal; 634 640 atomic_t p __maybe_unused; 635 641 636 642 atomic_long_inc(&c->reclaim); ··· 643 647 644 648 /* Update last_idx */ 645 649 646 - for_each_cache(ca, c, iter) { 647 - struct journal_device *ja = &ca->journal; 650 + while (ja->last_idx != ja->cur_idx && 651 + ja->seq[ja->last_idx] < last_seq) 652 + ja->last_idx = (ja->last_idx + 1) % 653 + ca->sb.njournal_buckets; 648 654 649 - while (ja->last_idx != ja->cur_idx && 650 - ja->seq[ja->last_idx] < last_seq) 651 - ja->last_idx = (ja->last_idx + 1) % 652 - ca->sb.njournal_buckets; 653 - } 654 - 655 - for_each_cache(ca, c, iter) 656 - do_journal_discard(ca); 655 + do_journal_discard(ca); 657 656 658 657 if (c->journal.blocks_free) 659 658 goto out; 660 659 661 - /* 662 - * Allocate: 663 - * XXX: Sort by free journal space 664 - */ 660 + next = (ja->cur_idx + 1) % ca->sb.njournal_buckets; 661 + /* No space available on this device */ 662 + if (next == ja->discard_idx) 663 + goto out; 665 664 666 - for_each_cache(ca, c, iter) { 667 - struct journal_device *ja = &ca->journal; 668 - unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets; 665 + ja->cur_idx = next; 666 + k->ptr[0] = MAKE_PTR(0, 667 + bucket_to_sector(c, ca->sb.d[ja->cur_idx]), 668 + ca->sb.nr_this_dev); 669 + atomic_long_inc(&c->reclaimed_journal_buckets); 669 670 670 - /* No space available on this device */ 671 - if (next == ja->discard_idx) 672 - continue; 671 + bkey_init(k); 672 + SET_KEY_PTRS(k, 1); 673 + c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; 673 674 674 - ja->cur_idx = next; 675 - k->ptr[n++] = MAKE_PTR(0, 676 - bucket_to_sector(c, ca->sb.d[ja->cur_idx]), 677 - ca->sb.nr_this_dev); 678 - atomic_long_inc(&c->reclaimed_journal_buckets); 679 - } 680 - 681 - if (n) { 682 - bkey_init(k); 683 - SET_KEY_PTRS(k, n); 684 - c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; 685 - } 686 675 out: 687 676 if (!journal_full(&c->journal)) 688 677 __closure_wake_up(&c->journal.wait); ··· 731 750 __releases(c->journal.lock) 732 751 { 733 752 struct cache_set *c = container_of(cl, struct cache_set, journal.io); 734 - struct cache *ca; 753 + struct cache *ca = c->cache; 735 754 struct journal_write *w = c->journal.cur; 736 755 struct bkey *k = &c->journal.key; 737 756 unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) * ··· 761 780 bkey_copy(&w->data->btree_root, &c->root->key); 762 781 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket); 763 782 764 - for_each_cache(ca, c, i) 765 - w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0]; 766 - 783 + w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0]; 767 784 w->data->magic = jset_magic(&c->sb); 768 785 w->data->version = BCACHE_JSET_VERSION; 769 786 w->data->last_seq = last_seq(&c->journal);
+27 -29
drivers/md/bcache/movinggc.c
··· 196 196 197 197 void bch_moving_gc(struct cache_set *c) 198 198 { 199 - struct cache *ca; 199 + struct cache *ca = c->cache; 200 200 struct bucket *b; 201 - unsigned int i; 201 + unsigned long sectors_to_move, reserve_sectors; 202 202 203 203 if (!c->copy_gc_enabled) 204 204 return; 205 205 206 206 mutex_lock(&c->bucket_lock); 207 207 208 - for_each_cache(ca, c, i) { 209 - unsigned long sectors_to_move = 0; 210 - unsigned long reserve_sectors = ca->sb.bucket_size * 208 + sectors_to_move = 0; 209 + reserve_sectors = ca->sb.bucket_size * 211 210 fifo_used(&ca->free[RESERVE_MOVINGGC]); 212 211 213 - ca->heap.used = 0; 212 + ca->heap.used = 0; 214 213 215 - for_each_bucket(b, ca) { 216 - if (GC_MARK(b) == GC_MARK_METADATA || 217 - !GC_SECTORS_USED(b) || 218 - GC_SECTORS_USED(b) == ca->sb.bucket_size || 219 - atomic_read(&b->pin)) 220 - continue; 214 + for_each_bucket(b, ca) { 215 + if (GC_MARK(b) == GC_MARK_METADATA || 216 + !GC_SECTORS_USED(b) || 217 + GC_SECTORS_USED(b) == ca->sb.bucket_size || 218 + atomic_read(&b->pin)) 219 + continue; 221 220 222 - if (!heap_full(&ca->heap)) { 223 - sectors_to_move += GC_SECTORS_USED(b); 224 - heap_add(&ca->heap, b, bucket_cmp); 225 - } else if (bucket_cmp(b, heap_peek(&ca->heap))) { 226 - sectors_to_move -= bucket_heap_top(ca); 227 - sectors_to_move += GC_SECTORS_USED(b); 221 + if (!heap_full(&ca->heap)) { 222 + sectors_to_move += GC_SECTORS_USED(b); 223 + heap_add(&ca->heap, b, bucket_cmp); 224 + } else if (bucket_cmp(b, heap_peek(&ca->heap))) { 225 + sectors_to_move -= bucket_heap_top(ca); 226 + sectors_to_move += GC_SECTORS_USED(b); 228 227 229 - ca->heap.data[0] = b; 230 - heap_sift(&ca->heap, 0, bucket_cmp); 231 - } 228 + ca->heap.data[0] = b; 229 + heap_sift(&ca->heap, 0, bucket_cmp); 232 230 } 233 - 234 - while (sectors_to_move > reserve_sectors) { 235 - heap_pop(&ca->heap, b, bucket_cmp); 236 - sectors_to_move -= GC_SECTORS_USED(b); 237 - } 238 - 239 - while (heap_pop(&ca->heap, b, bucket_cmp)) 240 - SET_GC_MOVE(b, 1); 241 231 } 232 + 233 + while (sectors_to_move > reserve_sectors) { 234 + heap_pop(&ca->heap, b, bucket_cmp); 235 + sectors_to_move -= GC_SECTORS_USED(b); 236 + } 237 + 238 + while (heap_pop(&ca->heap, b, bucket_cmp)) 239 + SET_GC_MOVE(b, 1); 242 240 243 241 mutex_unlock(&c->bucket_lock); 244 242
+48 -67
drivers/md/bcache/super.c
··· 343 343 void bcache_write_super(struct cache_set *c) 344 344 { 345 345 struct closure *cl = &c->sb_write; 346 - struct cache *ca; 347 - unsigned int i, version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 346 + struct cache *ca = c->cache; 347 + struct bio *bio = &ca->sb_bio; 348 + unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 348 349 349 350 down(&c->sb_write_mutex); 350 351 closure_init(cl, &c->cl); ··· 355 354 if (c->sb.version > version) 356 355 version = c->sb.version; 357 356 358 - for_each_cache(ca, c, i) { 359 - struct bio *bio = &ca->sb_bio; 357 + ca->sb.version = version; 358 + ca->sb.seq = c->sb.seq; 359 + ca->sb.last_mount = c->sb.last_mount; 360 360 361 - ca->sb.version = version; 362 - ca->sb.seq = c->sb.seq; 363 - ca->sb.last_mount = c->sb.last_mount; 361 + SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 364 362 365 - SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 363 + bio_init(bio, ca->sb_bv, 1); 364 + bio_set_dev(bio, ca->bdev); 365 + bio->bi_end_io = write_super_endio; 366 + bio->bi_private = ca; 366 367 367 - bio_init(bio, ca->sb_bv, 1); 368 - bio_set_dev(bio, ca->bdev); 369 - bio->bi_end_io = write_super_endio; 370 - bio->bi_private = ca; 371 - 372 - closure_get(cl); 373 - __write_super(&ca->sb, ca->sb_disk, bio); 374 - } 368 + closure_get(cl); 369 + __write_super(&ca->sb, ca->sb_disk, bio); 375 370 376 371 closure_return_with_destructor(cl, bcache_write_super_unlock); 377 372 } ··· 769 772 lockdep_assert_held(&bch_register_lock); 770 773 771 774 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { 772 - unsigned int i; 773 - struct cache *ca; 775 + struct cache *ca = d->c->cache; 774 776 775 777 sysfs_remove_link(&d->c->kobj, d->name); 776 778 sysfs_remove_link(&d->kobj, "cache"); 777 779 778 - for_each_cache(ca, d->c, i) 779 - bd_unlink_disk_holder(ca->bdev, d->disk); 780 + bd_unlink_disk_holder(ca->bdev, d->disk); 780 781 } 781 782 } 782 783 783 784 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, 784 785 const char *name) 785 786 { 786 - unsigned int i; 787 - struct cache *ca; 787 + struct cache *ca = c->cache; 788 788 int ret; 789 789 790 - for_each_cache(ca, d->c, i) 791 - bd_link_disk_holder(ca->bdev, d->disk); 790 + bd_link_disk_holder(ca->bdev, d->disk); 792 791 793 792 snprintf(d->name, BCACHEDEVNAME_SIZE, 794 793 "%s%u", name, d->id); ··· 1655 1662 { 1656 1663 struct cache_set *c = container_of(cl, struct cache_set, cl); 1657 1664 struct cache *ca; 1658 - unsigned int i; 1659 1665 1660 1666 debugfs_remove(c->debug); 1661 1667 ··· 1663 1671 bch_journal_free(c); 1664 1672 1665 1673 mutex_lock(&bch_register_lock); 1666 - for_each_cache(ca, c, i) 1667 - if (ca) { 1668 - ca->set = NULL; 1669 - c->cache = NULL; 1670 - kobject_put(&ca->kobj); 1671 - } 1674 + ca = c->cache; 1675 + if (ca) { 1676 + ca->set = NULL; 1677 + c->cache = NULL; 1678 + kobject_put(&ca->kobj); 1679 + } 1672 1680 1673 1681 bch_bset_sort_state_free(&c->sort); 1674 1682 free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb))); ··· 1694 1702 static void cache_set_flush(struct closure *cl) 1695 1703 { 1696 1704 struct cache_set *c = container_of(cl, struct cache_set, caching); 1697 - struct cache *ca; 1705 + struct cache *ca = c->cache; 1698 1706 struct btree *b; 1699 - unsigned int i; 1700 1707 1701 1708 bch_cache_accounting_destroy(&c->accounting); 1702 1709 ··· 1720 1729 mutex_unlock(&b->write_lock); 1721 1730 } 1722 1731 1723 - for_each_cache(ca, c, i) 1724 - if (ca->alloc_thread) 1725 - kthread_stop(ca->alloc_thread); 1732 + if (ca->alloc_thread) 1733 + kthread_stop(ca->alloc_thread); 1726 1734 1727 1735 if (c->journal.cur) { 1728 1736 cancel_delayed_work_sync(&c->journal.work); ··· 1962 1972 { 1963 1973 const char *err = "cannot allocate memory"; 1964 1974 struct cached_dev *dc, *t; 1965 - struct cache *ca; 1975 + struct cache *ca = c->cache; 1966 1976 struct closure cl; 1967 - unsigned int i; 1968 1977 LIST_HEAD(journal); 1969 1978 struct journal_replay *l; 1970 1979 1971 1980 closure_init_stack(&cl); 1972 1981 1973 - for_each_cache(ca, c, i) 1974 - c->nbuckets += ca->sb.nbuckets; 1982 + c->nbuckets = ca->sb.nbuckets; 1975 1983 set_gc_sectors(c); 1976 1984 1977 1985 if (CACHE_SYNC(&c->sb)) { ··· 1989 2001 j = &list_entry(journal.prev, struct journal_replay, list)->j; 1990 2002 1991 2003 err = "IO error reading priorities"; 1992 - for_each_cache(ca, c, i) { 1993 - if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev])) 1994 - goto err; 1995 - } 2004 + if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev])) 2005 + goto err; 1996 2006 1997 2007 /* 1998 2008 * If prio_read() fails it'll call cache_set_error and we'll ··· 2034 2048 bch_journal_next(&c->journal); 2035 2049 2036 2050 err = "error starting allocator thread"; 2037 - for_each_cache(ca, c, i) 2038 - if (bch_cache_allocator_start(ca)) 2039 - goto err; 2051 + if (bch_cache_allocator_start(ca)) 2052 + goto err; 2040 2053 2041 2054 /* 2042 2055 * First place it's safe to allocate: btree_check() and ··· 2054 2069 if (bch_journal_replay(c, &journal)) 2055 2070 goto err; 2056 2071 } else { 2072 + unsigned int j; 2073 + 2057 2074 pr_notice("invalidating existing data\n"); 2075 + ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 2076 + 2, SB_JOURNAL_BUCKETS); 2058 2077 2059 - for_each_cache(ca, c, i) { 2060 - unsigned int j; 2061 - 2062 - ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 2063 - 2, SB_JOURNAL_BUCKETS); 2064 - 2065 - for (j = 0; j < ca->sb.keys; j++) 2066 - ca->sb.d[j] = ca->sb.first_bucket + j; 2067 - } 2078 + for (j = 0; j < ca->sb.keys; j++) 2079 + ca->sb.d[j] = ca->sb.first_bucket + j; 2068 2080 2069 2081 bch_initial_gc_finish(c); 2070 2082 2071 2083 err = "error starting allocator thread"; 2072 - for_each_cache(ca, c, i) 2073 - if (bch_cache_allocator_start(ca)) 2074 - goto err; 2084 + if (bch_cache_allocator_start(ca)) 2085 + goto err; 2075 2086 2076 2087 mutex_lock(&c->bucket_lock); 2077 - for_each_cache(ca, c, i) 2078 - bch_prio_write(ca, true); 2088 + bch_prio_write(ca, true); 2079 2089 mutex_unlock(&c->bucket_lock); 2080 2090 2081 2091 err = "cannot allocate new UUID bucket"; ··· 2445 2465 static bool bch_is_open_cache(struct block_device *bdev) 2446 2466 { 2447 2467 struct cache_set *c, *tc; 2448 - struct cache *ca; 2449 - unsigned int i; 2450 2468 2451 - list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2452 - for_each_cache(ca, c, i) 2453 - if (ca->bdev == bdev) 2454 - return true; 2469 + list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { 2470 + struct cache *ca = c->cache; 2471 + 2472 + if (ca->bdev == bdev) 2473 + return true; 2474 + } 2475 + 2455 2476 return false; 2456 2477 } 2457 2478