Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcachefs: Limit pointers to being in only one stripe

This make the disk accounting code saner, and it's not clear why we'd
ever want the same data to be in multiple stripes simultaneously.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

authored by

Kent Overstreet and committed by
Kent Overstreet
37954a27 9ef6068c

+29 -38
+11 -17
fs/bcachefs/buckets.c
··· 1027 1027 struct extent_ptr_decoded p; 1028 1028 struct bch_replicas_padded r; 1029 1029 s64 dirty_sectors = 0; 1030 - unsigned i; 1031 1030 int ret; 1032 1031 1033 1032 r.e.data_type = data_type; ··· 1046 1047 if (!stale) 1047 1048 update_cached_sectors(c, fs_usage, p.ptr.dev, 1048 1049 disk_sectors); 1049 - } else if (!p.ec_nr) { 1050 + } else if (!p.has_ec) { 1050 1051 dirty_sectors += disk_sectors; 1051 1052 r.e.devs[r.e.nr_devs++] = p.ptr.dev; 1052 1053 } else { 1053 - for (i = 0; i < p.ec_nr; i++) { 1054 - ret = bch2_mark_stripe_ptr(c, p.ec[i], 1055 - data_type, fs_usage, 1056 - disk_sectors, flags); 1057 - if (ret) 1058 - return ret; 1059 - } 1054 + ret = bch2_mark_stripe_ptr(c, p.ec, 1055 + data_type, fs_usage, 1056 + disk_sectors, flags); 1057 + if (ret) 1058 + return ret; 1060 1059 1061 1060 r.e.nr_required = 0; 1062 1061 } ··· 1561 1564 struct bch_replicas_padded r; 1562 1565 s64 dirty_sectors = 0; 1563 1566 bool stale; 1564 - unsigned i; 1565 1567 int ret; 1566 1568 1567 1569 r.e.data_type = data_type; ··· 1585 1589 if (!stale) 1586 1590 update_cached_sectors_list(trans, p.ptr.dev, 1587 1591 disk_sectors); 1588 - } else if (!p.ec_nr) { 1592 + } else if (!p.has_ec) { 1589 1593 dirty_sectors += disk_sectors; 1590 1594 r.e.devs[r.e.nr_devs++] = p.ptr.dev; 1591 1595 } else { 1592 - for (i = 0; i < p.ec_nr; i++) { 1593 - ret = bch2_trans_mark_stripe_ptr(trans, p.ec[i], 1594 - disk_sectors, data_type); 1595 - if (ret) 1596 - return ret; 1597 - } 1596 + ret = bch2_trans_mark_stripe_ptr(trans, p.ec, 1597 + disk_sectors, data_type); 1598 + if (ret) 1599 + return ret; 1598 1600 1599 1601 r.e.nr_required = 0; 1600 1602 }
+2 -3
fs/bcachefs/ec.c
··· 433 433 434 434 closure_init_stack(&cl); 435 435 436 - BUG_ON(!rbio->pick.idx || 437 - rbio->pick.idx - 1 >= rbio->pick.ec_nr); 436 + BUG_ON(!rbio->pick.has_ec); 438 437 439 - stripe_idx = rbio->pick.ec[rbio->pick.idx - 1].idx; 438 + stripe_idx = rbio->pick.ec.idx; 440 439 441 440 buf = kzalloc(sizeof(*buf), GFP_NOIO); 442 441 if (!buf)
+10 -11
fs/bcachefs/extents.c
··· 66 66 static unsigned bch2_extent_ptr_durability(struct bch_fs *c, 67 67 struct extent_ptr_decoded p) 68 68 { 69 - unsigned i, durability = 0; 69 + unsigned durability = 0; 70 70 struct bch_dev *ca; 71 71 72 72 if (p.ptr.cached) ··· 77 77 if (ca->mi.state != BCH_MEMBER_STATE_FAILED) 78 78 durability = max_t(unsigned, durability, ca->mi.durability); 79 79 80 - for (i = 0; i < p.ec_nr; i++) { 80 + if (p.has_ec) { 81 81 struct stripe *s = 82 - genradix_ptr(&c->stripes[0], p.ec[i].idx); 82 + genradix_ptr(&c->stripes[0], p.ec.idx); 83 83 84 84 if (WARN_ON(!s)) 85 - continue; 85 + goto out; 86 86 87 87 durability = max_t(unsigned, durability, s->nr_redundant); 88 88 } 89 - 89 + out: 90 90 return durability; 91 91 } 92 92 ··· 205 205 p.idx++; 206 206 207 207 if (force_reconstruct_read(c) && 208 - !p.idx && p.ec_nr) 208 + !p.idx && p.has_ec) 209 209 p.idx++; 210 210 211 - if (p.idx >= p.ec_nr + 1) 211 + if (p.idx >= (unsigned) p.has_ec + 1) 212 212 continue; 213 213 214 214 if (ret > 0 && !ptr_better(c, p, *pick)) ··· 1543 1543 struct bch_extent_crc_unpacked crc = 1544 1544 bch2_extent_crc_unpack(&k->k, NULL); 1545 1545 union bch_extent_entry *pos; 1546 - unsigned i; 1547 1546 1548 1547 if (!bch2_crc_unpacked_cmp(crc, p->crc)) { 1549 1548 pos = ptrs.start; ··· 1561 1562 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr; 1562 1563 __extent_entry_insert(k, pos, to_entry(&p->ptr)); 1563 1564 1564 - for (i = 0; i < p->ec_nr; i++) { 1565 - p->ec[i].type = 1 << BCH_EXTENT_ENTRY_stripe_ptr; 1566 - __extent_entry_insert(k, pos, to_entry(&p->ec[i])); 1565 + if (p->has_ec) { 1566 + p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr; 1567 + __extent_entry_insert(k, pos, to_entry(&p->ec)); 1567 1568 } 1568 1569 } 1569 1570
+3 -2
fs/bcachefs/extents.h
··· 228 228 __label__ out; \ 229 229 \ 230 230 (_ptr).idx = 0; \ 231 - (_ptr).ec_nr = 0; \ 231 + (_ptr).has_ec = false; \ 232 232 \ 233 233 __bkey_extent_entry_for_each_from(_entry, _end, _entry) \ 234 234 switch (extent_entry_type(_entry)) { \ ··· 242 242 entry_to_crc(_entry)); \ 243 243 break; \ 244 244 case BCH_EXTENT_ENTRY_stripe_ptr: \ 245 - (_ptr).ec[(_ptr).ec_nr++] = _entry->stripe_ptr; \ 245 + (_ptr).ec = _entry->stripe_ptr; \ 246 + (_ptr).has_ec = true; \ 246 247 break; \ 247 248 } \ 248 249 out: \
+2 -2
fs/bcachefs/extents_types.h
··· 21 21 22 22 struct extent_ptr_decoded { 23 23 unsigned idx; 24 - unsigned ec_nr; 24 + bool has_ec; 25 25 struct bch_extent_crc_unpacked crc; 26 26 struct bch_extent_ptr ptr; 27 - struct bch_extent_stripe_ptr ec[4]; 27 + struct bch_extent_stripe_ptr ec; 28 28 }; 29 29 30 30 struct bch_io_failures {
+1 -3
fs/bcachefs/replicas.c
··· 84 84 if (p.ptr.cached) 85 85 continue; 86 86 87 - if (p.ec_nr) { 87 + if (p.has_ec) 88 88 r->nr_required = 0; 89 - break; 90 - } 91 89 92 90 r->devs[r->nr_devs++] = p.ptr.dev; 93 91 }