Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcachefs: for_each_member_device_rcu() now declares loop iter

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

+53 -81
+1 -3
fs/bcachefs/alloc_background.c
··· 891 891 static bool next_bucket(struct bch_fs *c, struct bpos *bucket) 892 892 { 893 893 struct bch_dev *ca; 894 - unsigned iter; 895 894 896 895 if (bch2_dev_bucket_exists(c, *bucket)) 897 896 return true; ··· 908 909 } 909 910 910 911 rcu_read_lock(); 911 - iter = bucket->inode; 912 - ca = __bch2_next_dev(c, &iter, NULL); 912 + ca = __bch2_next_dev_idx(c, bucket->inode, NULL); 913 913 if (ca) 914 914 *bucket = POS(ca->dev_idx, ca->mi.first_bucket); 915 915 rcu_read_unlock();
+1 -4
fs/bcachefs/alloc_foreground.c
··· 69 69 70 70 void bch2_reset_alloc_cursors(struct bch_fs *c) 71 71 { 72 - struct bch_dev *ca; 73 - unsigned i; 74 - 75 72 rcu_read_lock(); 76 - for_each_member_device_rcu(ca, c, i, NULL) 73 + for_each_member_device_rcu(c, ca, NULL) 77 74 ca->alloc_cursor = 0; 78 75 rcu_read_unlock(); 79 76 }
+2 -3
fs/bcachefs/buckets.c
··· 154 154 155 155 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx) 156 156 { 157 - struct bch_dev *ca; 158 - unsigned i, u64s = fs_usage_u64s(c); 157 + unsigned u64s = fs_usage_u64s(c); 159 158 160 159 BUG_ON(idx >= ARRAY_SIZE(c->usage)); 161 160 ··· 166 167 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64)); 167 168 168 169 rcu_read_lock(); 169 - for_each_member_device_rcu(ca, c, i, NULL) { 170 + for_each_member_device_rcu(c, ca, NULL) { 170 171 u64s = dev_usage_u64s(); 171 172 172 173 acc_u64s_percpu((u64 *) ca->usage_base,
+3 -8
fs/bcachefs/disk_groups.c
··· 89 89 90 90 void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c) 91 91 { 92 - struct bch_disk_groups_cpu *g; 93 - struct bch_dev *ca; 94 - int i; 95 - unsigned iter; 96 - 97 92 out->atomic++; 98 93 rcu_read_lock(); 99 94 100 - g = rcu_dereference(c->disk_groups); 95 + struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups); 101 96 if (!g) 102 97 goto out; 103 98 104 - for (i = 0; i < g->nr; i++) { 99 + for (unsigned i = 0; i < g->nr; i++) { 105 100 if (i) 106 101 prt_printf(out, " "); 107 102 ··· 106 111 } 107 112 108 113 prt_printf(out, "[parent %d devs", g->entries[i].parent); 109 - for_each_member_device_rcu(ca, c, iter, &g->entries[i].devs) 114 + for_each_member_device_rcu(c, ca, &g->entries[i].devs) 110 115 prt_printf(out, " %s", ca->name); 111 116 prt_printf(out, "]"); 112 117 }
+6 -9
fs/bcachefs/ec.c
··· 1243 1243 static unsigned pick_blocksize(struct bch_fs *c, 1244 1244 struct bch_devs_mask *devs) 1245 1245 { 1246 - struct bch_dev *ca; 1247 - unsigned i, nr = 0, sizes[BCH_SB_MEMBERS_MAX]; 1246 + unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX]; 1248 1247 struct { 1249 1248 unsigned nr, size; 1250 1249 } cur = { 0, 0 }, best = { 0, 0 }; 1251 1250 1252 - for_each_member_device_rcu(ca, c, i, devs) 1251 + for_each_member_device_rcu(c, ca, devs) 1253 1252 sizes[nr++] = ca->mi.bucket_size; 1254 1253 1255 1254 sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL); 1256 1255 1257 - for (i = 0; i < nr; i++) { 1256 + for (unsigned i = 0; i < nr; i++) { 1258 1257 if (sizes[i] != cur.size) { 1259 1258 if (cur.nr > best.nr) 1260 1259 best = cur; ··· 1336 1337 enum bch_watermark watermark) 1337 1338 { 1338 1339 struct ec_stripe_head *h; 1339 - struct bch_dev *ca; 1340 - unsigned i; 1341 1340 1342 1341 h = kzalloc(sizeof(*h), GFP_KERNEL); 1343 1342 if (!h) ··· 1352 1355 rcu_read_lock(); 1353 1356 h->devs = target_rw_devs(c, BCH_DATA_user, target); 1354 1357 1355 - for_each_member_device_rcu(ca, c, i, &h->devs) 1358 + for_each_member_device_rcu(c, ca, &h->devs) 1356 1359 if (!ca->mi.durability) 1357 - __clear_bit(i, h->devs.d); 1360 + __clear_bit(ca->dev_idx, h->devs.d); 1358 1361 1359 1362 h->blocksize = pick_blocksize(c, &h->devs); 1360 1363 1361 - for_each_member_device_rcu(ca, c, i, &h->devs) 1364 + for_each_member_device_rcu(c, ca, &h->devs) 1362 1365 if (ca->mi.bucket_size == h->blocksize) 1363 1366 h->nr_active_devs++; 1364 1367
+4 -8
fs/bcachefs/journal.c
··· 1294 1294 { 1295 1295 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1296 1296 union journal_res_state s; 1297 - struct bch_dev *ca; 1298 1297 unsigned long now = jiffies; 1299 1298 u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes; 1300 - u64 seq; 1301 - unsigned i; 1302 1299 1303 1300 if (!out->nr_tabstops) 1304 1301 printbuf_tabstop_push(out, 24); ··· 1340 1343 1341 1344 prt_newline(out); 1342 1345 1343 - for (seq = journal_cur_seq(j); 1346 + for (u64 seq = journal_cur_seq(j); 1344 1347 seq >= journal_last_unwritten_seq(j); 1345 1348 --seq) { 1346 - i = seq & JOURNAL_BUF_MASK; 1349 + unsigned i = seq & JOURNAL_BUF_MASK; 1347 1350 1348 1351 prt_printf(out, "unwritten entry:"); 1349 1352 prt_tab(out); ··· 1387 1390 j->space[journal_space_total].next_entry, 1388 1391 j->space[journal_space_total].total); 1389 1392 1390 - for_each_member_device_rcu(ca, c, i, 1391 - &c->rw_devs[BCH_DATA_journal]) { 1393 + for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) { 1392 1394 struct journal_device *ja = &ca->journal; 1393 1395 1394 1396 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d)) ··· 1396 1400 if (!ja->nr) 1397 1401 continue; 1398 1402 1399 - prt_printf(out, "dev %u:\n", i); 1403 + prt_printf(out, "dev %u:\n", ca->dev_idx); 1400 1404 prt_printf(out, "\tnr\t\t%u\n", ja->nr); 1401 1405 prt_printf(out, "\tbucket size\t%u\n", ca->mi.bucket_size); 1402 1406 prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
+5 -9
fs/bcachefs/journal_reclaim.c
··· 136 136 enum journal_space_from from) 137 137 { 138 138 struct bch_fs *c = container_of(j, struct bch_fs, journal); 139 - struct bch_dev *ca; 140 - unsigned i, pos, nr_devs = 0; 139 + unsigned pos, nr_devs = 0; 141 140 struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX]; 142 141 143 142 BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space)); 144 143 145 144 rcu_read_lock(); 146 - for_each_member_device_rcu(ca, c, i, 147 - &c->rw_devs[BCH_DATA_journal]) { 145 + for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) { 148 146 if (!ca->journal.nr) 149 147 continue; 150 148 ··· 171 173 void bch2_journal_space_available(struct journal *j) 172 174 { 173 175 struct bch_fs *c = container_of(j, struct bch_fs, journal); 174 - struct bch_dev *ca; 175 176 unsigned clean, clean_ondisk, total; 176 177 unsigned max_entry_size = min(j->buf[0].buf_size >> 9, 177 178 j->buf[1].buf_size >> 9); 178 - unsigned i, nr_online = 0, nr_devs_want; 179 + unsigned nr_online = 0, nr_devs_want; 179 180 bool can_discard = false; 180 181 int ret = 0; 181 182 182 183 lockdep_assert_held(&j->lock); 183 184 184 185 rcu_read_lock(); 185 - for_each_member_device_rcu(ca, c, i, 186 - &c->rw_devs[BCH_DATA_journal]) { 186 + for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) { 187 187 struct journal_device *ja = &ca->journal; 188 188 189 189 if (!ja->nr) ··· 212 216 213 217 nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas); 214 218 215 - for (i = 0; i < journal_space_nr; i++) 219 + for (unsigned i = 0; i < journal_space_nr; i++) 216 220 j->space[i] = __journal_space_available(j, nr_devs_want, i); 217 221 218 222 clean_ondisk = j->space[journal_space_clean_ondisk].total;
+3 -5
fs/bcachefs/sb-members.c
··· 358 358 void bch2_sb_members_from_cpu(struct bch_fs *c) 359 359 { 360 360 struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2); 361 - struct bch_dev *ca; 362 - unsigned i, e; 363 361 364 362 rcu_read_lock(); 365 - for_each_member_device_rcu(ca, c, i, NULL) { 366 - struct bch_member *m = __bch2_members_v2_get_mut(mi, i); 363 + for_each_member_device_rcu(c, ca, NULL) { 364 + struct bch_member *m = __bch2_members_v2_get_mut(mi, ca->dev_idx); 367 365 368 - for (e = 0; e < BCH_MEMBER_ERROR_NR; e++) 366 + for (unsigned e = 0; e < BCH_MEMBER_ERROR_NR; e++) 369 367 m->errors[e] = cpu_to_le64(atomic64_read(&ca->errors[e])); 370 368 } 371 369 rcu_read_unlock();
+19 -16
fs/bcachefs/sb-members.h
··· 79 79 return (struct bch_devs_list) { .nr = 1, .data[0] = dev }; 80 80 } 81 81 82 - static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter, 83 - const struct bch_devs_mask *mask) 82 + static inline struct bch_dev *__bch2_next_dev_idx(struct bch_fs *c, unsigned idx, 83 + const struct bch_devs_mask *mask) 84 84 { 85 85 struct bch_dev *ca = NULL; 86 86 87 - while ((*iter = mask 88 - ? find_next_bit(mask->d, c->sb.nr_devices, *iter) 89 - : *iter) < c->sb.nr_devices && 90 - !(ca = rcu_dereference_check(c->devs[*iter], 87 + while ((idx = mask 88 + ? find_next_bit(mask->d, c->sb.nr_devices, idx) 89 + : idx) < c->sb.nr_devices && 90 + !(ca = rcu_dereference_check(c->devs[idx], 91 91 lockdep_is_held(&c->state_lock)))) 92 - (*iter)++; 92 + idx++; 93 93 94 94 return ca; 95 95 } 96 96 97 - #define for_each_member_device_rcu(ca, c, iter, mask) \ 98 - for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++) 97 + static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev *ca, 98 + const struct bch_devs_mask *mask) 99 + { 100 + return __bch2_next_dev_idx(c, ca ? ca->dev_idx + 1 : 0, mask); 101 + } 102 + 103 + #define for_each_member_device_rcu(_c, _ca, _mask) \ 104 + for (struct bch_dev *_ca = NULL; \ 105 + (_ca = __bch2_next_dev((_c), _ca, (_mask)));) 99 106 100 107 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca) 101 108 { 102 - unsigned idx = ca ? ca->dev_idx + 1 : 0; 103 - 104 109 if (ca) 105 110 percpu_ref_put(&ca->ref); 106 111 107 112 rcu_read_lock(); 108 - if ((ca = __bch2_next_dev(c, &idx, NULL))) 113 + if ((ca = __bch2_next_dev(c, ca, NULL))) 109 114 percpu_ref_get(&ca->ref); 110 115 rcu_read_unlock(); 111 116 ··· 131 126 struct bch_dev *ca, 132 127 unsigned state_mask) 133 128 { 134 - unsigned idx = ca ? ca->dev_idx + 1 : 0; 135 - 136 129 if (ca) 137 130 percpu_ref_put(&ca->io_ref); 138 131 139 132 rcu_read_lock(); 140 - while ((ca = __bch2_next_dev(c, &idx, NULL)) && 133 + while ((ca = __bch2_next_dev(c, ca, NULL)) && 141 134 (!((1 << ca->mi.state) & state_mask) || 142 135 !percpu_ref_tryget(&ca->io_ref))) 143 - idx++; 136 + ; 144 137 rcu_read_unlock(); 145 138 146 139 return ca;
+9 -16
fs/bcachefs/super.c
··· 167 167 struct bch_fs *bch2_dev_to_fs(dev_t dev) 168 168 { 169 169 struct bch_fs *c; 170 - struct bch_dev *ca; 171 - unsigned i; 172 170 173 171 mutex_lock(&bch_fs_list_lock); 174 172 rcu_read_lock(); 175 173 176 174 list_for_each_entry(c, &bch_fs_list, list) 177 - for_each_member_device_rcu(ca, c, i, NULL) 175 + for_each_member_device_rcu(c, ca, NULL) 178 176 if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) { 179 177 closure_get(&c->cl); 180 178 goto found; ··· 213 215 214 216 static void bch2_dev_usage_journal_reserve(struct bch_fs *c) 215 217 { 216 - struct bch_dev *ca; 217 - unsigned i, nr = 0, u64s = 218 + unsigned nr = 0, u64s = 218 219 ((sizeof(struct jset_entry_dev_usage) + 219 220 sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) / 220 221 sizeof(u64); 221 222 222 223 rcu_read_lock(); 223 - for_each_member_device_rcu(ca, c, i, NULL) 224 + for_each_member_device_rcu(c, ca, NULL) 224 225 nr++; 225 226 rcu_read_unlock(); 226 227 ··· 1903 1906 /* return with ref on ca->ref: */ 1904 1907 struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name) 1905 1908 { 1906 - struct bch_dev *ca; 1907 - unsigned i; 1908 - 1909 1909 rcu_read_lock(); 1910 - for_each_member_device_rcu(ca, c, i, NULL) 1911 - if (!strcmp(name, ca->name)) 1912 - goto found; 1913 - ca = ERR_PTR(-BCH_ERR_ENOENT_dev_not_found); 1914 - found: 1910 + for_each_member_device_rcu(c, ca, NULL) 1911 + if (!strcmp(name, ca->name)) { 1912 + rcu_read_unlock(); 1913 + return ca; 1914 + } 1915 1915 rcu_read_unlock(); 1916 - 1917 - return ca; 1916 + return ERR_PTR(-BCH_ERR_ENOENT_dev_not_found); 1918 1917 } 1919 1918 1920 1919 /* Filesystem open: */