Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcachefs: Array bounds fixes

It's no longer legal to use a zero size array as a flexible array
member - this causes UBSAN to complain.

This patch switches our zero size arrays to normal flexible array
members when possible, and inserts casts in other places (e.g. where we
use the zero size array as a marker partway through an array).

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

+64 -66
+27 -39
fs/bcachefs/bcachefs_format.h
··· 723 723 __le64 bi_hash_seed; 724 724 __le32 bi_flags; 725 725 __le16 bi_mode; 726 - __u8 fields[0]; 726 + __u8 fields[]; 727 727 } __packed __aligned(8); 728 728 729 729 struct bch_inode_v2 { ··· 733 733 __le64 bi_hash_seed; 734 734 __le64 bi_flags; 735 735 __le16 bi_mode; 736 - __u8 fields[0]; 736 + __u8 fields[]; 737 737 } __packed __aligned(8); 738 738 739 739 struct bch_inode_v3 { ··· 745 745 __le64 bi_sectors; 746 746 __le64 bi_size; 747 747 __le64 bi_version; 748 - __u8 fields[0]; 748 + __u8 fields[]; 749 749 } __packed __aligned(8); 750 750 751 751 #define INODEv3_FIELDS_START_INITIAL 6 ··· 1097 1097 struct bch_val v; 1098 1098 __le64 refcount; 1099 1099 union bch_extent_entry start[0]; 1100 - __u64 _data[0]; 1100 + __u64 _data[]; 1101 1101 } __packed __aligned(8); 1102 1102 1103 1103 struct bch_indirect_inline_data { 1104 1104 struct bch_val v; 1105 1105 __le64 refcount; 1106 - u8 data[0]; 1106 + u8 data[]; 1107 1107 }; 1108 1108 1109 1109 /* Inline data */ 1110 1110 1111 1111 struct bch_inline_data { 1112 1112 struct bch_val v; 1113 - u8 data[0]; 1113 + u8 data[]; 1114 1114 }; 1115 1115 1116 1116 /* Subvolumes: */ ··· 1223 1223 1224 1224 struct bch_sb_field_journal { 1225 1225 struct bch_sb_field field; 1226 - __le64 buckets[0]; 1226 + __le64 buckets[]; 1227 1227 }; 1228 1228 1229 1229 struct bch_sb_field_journal_v2 { ··· 1232 1232 struct bch_sb_field_journal_v2_entry { 1233 1233 __le64 start; 1234 1234 __le64 nr; 1235 - } d[0]; 1235 + } d[]; 1236 1236 }; 1237 1237 1238 1238 /* BCH_SB_FIELD_members: */ ··· 1279 1279 1280 1280 struct bch_sb_field_members { 1281 1281 struct bch_sb_field field; 1282 - struct bch_member members[0]; 1282 + struct bch_member members[]; 1283 1283 }; 1284 1284 1285 1285 /* BCH_SB_FIELD_crypt: */ ··· 1377 1377 struct bch_replicas_entry_v0 { 1378 1378 __u8 data_type; 1379 1379 __u8 nr_devs; 1380 - __u8 devs[0]; 1380 + __u8 devs[]; 1381 1381 } __packed; 1382 1382 1383 1383 struct bch_sb_field_replicas_v0 { 1384 1384 struct bch_sb_field field; 1385 - struct bch_replicas_entry_v0 entries[0]; 1385 + struct bch_replicas_entry_v0 entries[]; 1386 1386 } __packed __aligned(8); 1387 1387 1388 1388 struct bch_replicas_entry { 1389 1389 __u8 data_type; 1390 1390 __u8 nr_devs; 1391 1391 __u8 nr_required; 1392 - __u8 devs[0]; 1392 + __u8 devs[]; 1393 1393 } __packed; 1394 1394 1395 1395 #define replicas_entry_bytes(_i) \ ··· 1397 1397 1398 1398 struct bch_sb_field_replicas { 1399 1399 struct bch_sb_field field; 1400 - struct bch_replicas_entry entries[0]; 1400 + struct bch_replicas_entry entries[]; 1401 1401 } __packed __aligned(8); 1402 1402 1403 1403 /* BCH_SB_FIELD_quota: */ ··· 1432 1432 1433 1433 struct bch_sb_field_disk_groups { 1434 1434 struct bch_sb_field field; 1435 - struct bch_disk_group entries[0]; 1435 + struct bch_disk_group entries[]; 1436 1436 } __packed __aligned(8); 1437 1437 1438 1438 /* BCH_SB_FIELD_counters */ ··· 1525 1525 1526 1526 struct bch_sb_field_counters { 1527 1527 struct bch_sb_field field; 1528 - __le64 d[0]; 1528 + __le64 d[]; 1529 1529 }; 1530 1530 1531 1531 /* ··· 1539 1539 __u8 type; /* designates what this jset holds */ 1540 1540 __u8 pad[3]; 1541 1541 1542 - union { 1543 - struct bkey_i start[0]; 1544 - __u64 _data[0]; 1545 - }; 1542 + struct bkey_i start[0]; 1543 + __u64 _data[]; 1546 1544 }; 1547 1545 1548 1546 struct bch_sb_field_clean { ··· 1551 1553 __le16 _write_clock; 1552 1554 __le64 journal_seq; 1553 1555 1554 - union { 1555 - struct jset_entry start[0]; 1556 - __u64 _data[0]; 1557 - }; 1556 + struct jset_entry start[0]; 1557 + __u64 _data[]; 1558 1558 }; 1559 1559 1560 1560 struct journal_seq_blacklist_entry { ··· 1563 1567 struct bch_sb_field_journal_seq_blacklist { 1564 1568 struct bch_sb_field field; 1565 1569 1566 - union { 1567 - struct journal_seq_blacklist_entry start[0]; 1568 - __u64 _data[0]; 1569 - }; 1570 + struct journal_seq_blacklist_entry start[0]; 1571 + __u64 _data[]; 1570 1572 }; 1571 1573 1572 1574 /* Superblock: */ ··· 1700 1706 1701 1707 struct bch_sb_layout layout; 1702 1708 1703 - union { 1704 - struct bch_sb_field start[0]; 1705 - __le64 _data[0]; 1706 - }; 1709 + struct bch_sb_field start[0]; 1710 + __le64 _data[]; 1707 1711 } __packed __aligned(8); 1708 1712 1709 1713 /* ··· 2178 2186 __le64 last_seq; 2179 2187 2180 2188 2181 - union { 2182 - struct jset_entry start[0]; 2183 - __u64 _data[0]; 2184 - }; 2189 + struct jset_entry start[0]; 2190 + __u64 _data[]; 2185 2191 } __packed __aligned(8); 2186 2192 2187 2193 LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4); ··· 2284 2294 __le16 version; 2285 2295 __le16 u64s; /* count of d[] in u64s */ 2286 2296 2287 - union { 2288 - struct bkey_packed start[0]; 2289 - __u64 _data[0]; 2290 - }; 2297 + struct bkey_packed start[0]; 2298 + __u64 _data[]; 2291 2299 } __packed __aligned(8); 2292 2300 2293 2301 LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
+1 -1
fs/bcachefs/bkey.c
··· 127 127 struct bkey_packed *k) 128 128 { 129 129 EBUG_ON(state->p < k->_data); 130 - EBUG_ON(state->p >= k->_data + state->format->key_u64s); 130 + EBUG_ON(state->p >= (u64 *) k->_data + state->format->key_u64s); 131 131 132 132 *state->p = state->w; 133 133 }
+3 -3
fs/bcachefs/bkey.h
··· 52 52 53 53 static inline struct bkey_i *bkey_next(struct bkey_i *k) 54 54 { 55 - return (struct bkey_i *) (k->_data + k->k.u64s); 55 + return (struct bkey_i *) ((u64 *) k->_data + k->k.u64s); 56 56 } 57 57 58 58 #define bkey_val_u64s(_k) ((_k)->u64s - BKEY_U64s) ··· 397 397 } 398 398 399 399 #define bkeyp_val(_format, _k) \ 400 - ((struct bch_val *) ((_k)->_data + bkeyp_key_u64s(_format, _k))) 400 + ((struct bch_val *) ((u64 *) (_k)->_data + bkeyp_key_u64s(_format, _k))) 401 401 402 402 extern const struct bkey_format bch2_bkey_format_current; 403 403 ··· 732 732 #error edit for your odd byteorder. 733 733 #endif 734 734 735 - #define high_word(f, k) ((k)->_data + high_word_offset(f)) 735 + #define high_word(f, k) ((u64 *) (k)->_data + high_word_offset(f)) 736 736 #define next_word(p) nth_word(p, 1) 737 737 #define prev_word(p) nth_word(p, -1) 738 738
+13 -3
fs/bcachefs/bkey_sort.h
··· 9 9 10 10 struct sort_iter_set { 11 11 struct bkey_packed *k, *end; 12 - } data[MAX_BSETS + 1]; 12 + } data[]; 13 13 }; 14 14 15 - static inline void sort_iter_init(struct sort_iter *iter, struct btree *b) 15 + static inline void sort_iter_init(struct sort_iter *iter, struct btree *b, unsigned size) 16 16 { 17 17 iter->b = b; 18 18 iter->used = 0; 19 - iter->size = ARRAY_SIZE(iter->data); 19 + iter->size = size; 20 + } 21 + 22 + struct sort_iter_stack { 23 + struct sort_iter iter; 24 + struct sort_iter_set sets[MAX_BSETS + 1]; 25 + }; 26 + 27 + static inline void sort_iter_stack_init(struct sort_iter_stack *iter, struct btree *b) 28 + { 29 + sort_iter_init(&iter->iter, b, ARRAY_SIZE(iter->sets)); 20 30 } 21 31 22 32 static inline void sort_iter_add(struct sort_iter *iter,
+7 -6
fs/bcachefs/bset.c
··· 232 232 { 233 233 struct bset_tree *t = bch2_bkey_to_bset(b, where); 234 234 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where); 235 - struct bkey_packed *next = (void *) (where->_data + clobber_u64s); 235 + struct bkey_packed *next = (void *) ((u64 *) where->_data + clobber_u64s); 236 236 struct printbuf buf1 = PRINTBUF; 237 237 struct printbuf buf2 = PRINTBUF; 238 238 #if 0 ··· 300 300 } 301 301 302 302 struct ro_aux_tree { 303 - struct bkey_float f[0]; 303 + u8 nothing[0]; 304 + struct bkey_float f[]; 304 305 }; 305 306 306 307 struct rw_aux_tree { ··· 477 476 { 478 477 unsigned prev_u64s = ro_aux_tree_prev(b, t)[j]; 479 478 480 - return (void *) (tree_to_bkey(b, t, j)->_data - prev_u64s); 479 + return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s); 481 480 } 482 481 483 482 static struct rw_aux_tree *rw_aux_tree(const struct btree *b, ··· 1011 1010 btree_keys_account_key_add(&b->nr, t - b->set, src); 1012 1011 1013 1012 if (src->u64s != clobber_u64s) { 1014 - u64 *src_p = where->_data + clobber_u64s; 1015 - u64 *dst_p = where->_data + src->u64s; 1013 + u64 *src_p = (u64 *) where->_data + clobber_u64s; 1014 + u64 *dst_p = (u64 *) where->_data + src->u64s; 1016 1015 1017 1016 EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) < 1018 1017 (int) clobber_u64s - src->u64s); ··· 1038 1037 unsigned clobber_u64s) 1039 1038 { 1040 1039 struct bset_tree *t = bset_tree_last(b); 1041 - u64 *src_p = where->_data + clobber_u64s; 1040 + u64 *src_p = (u64 *) where->_data + clobber_u64s; 1042 1041 u64 *dst_p = where->_data; 1043 1042 1044 1043 bch2_bset_verify_rw_aux_tree(b, t);
+10 -11
fs/bcachefs/btree_io.c
··· 292 292 bool filter_whiteouts) 293 293 { 294 294 struct btree_node *out; 295 - struct sort_iter sort_iter; 295 + struct sort_iter_stack sort_iter; 296 296 struct bset_tree *t; 297 297 struct bset *start_bset = bset(b, &b->set[start_idx]); 298 298 bool used_mempool = false; ··· 301 301 bool sorting_entire_node = start_idx == 0 && 302 302 end_idx == b->nsets; 303 303 304 - sort_iter_init(&sort_iter, b); 304 + sort_iter_stack_init(&sort_iter, b); 305 305 306 306 for (t = b->set + start_idx; 307 307 t < b->set + end_idx; 308 308 t++) { 309 309 u64s += le16_to_cpu(bset(b, t)->u64s); 310 - sort_iter_add(&sort_iter, 310 + sort_iter_add(&sort_iter.iter, 311 311 btree_bkey_first(b, t), 312 312 btree_bkey_last(b, t)); 313 313 } ··· 320 320 321 321 start_time = local_clock(); 322 322 323 - u64s = bch2_sort_keys(out->keys.start, &sort_iter, filter_whiteouts); 323 + u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter, filter_whiteouts); 324 324 325 325 out->keys.u64s = cpu_to_le16(u64s); 326 326 ··· 918 918 b->written = 0; 919 919 920 920 iter = mempool_alloc(&c->fill_iter, GFP_NOFS); 921 - sort_iter_init(iter, b); 922 - iter->size = (btree_blocks(c) + 1) * 2; 921 + sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2); 923 922 924 923 if (bch2_meta_read_fault("btree")) 925 924 btree_err(-BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL, ··· 1851 1852 struct bset *i; 1852 1853 struct btree_node *bn = NULL; 1853 1854 struct btree_node_entry *bne = NULL; 1854 - struct sort_iter sort_iter; 1855 + struct sort_iter_stack sort_iter; 1855 1856 struct nonce nonce; 1856 1857 unsigned bytes_to_write, sectors_to_write, bytes, u64s; 1857 1858 u64 seq = 0; ··· 1924 1925 1925 1926 bch2_sort_whiteouts(c, b); 1926 1927 1927 - sort_iter_init(&sort_iter, b); 1928 + sort_iter_stack_init(&sort_iter, b); 1928 1929 1929 1930 bytes = !b->written 1930 1931 ? sizeof(struct btree_node) ··· 1939 1940 continue; 1940 1941 1941 1942 bytes += le16_to_cpu(i->u64s) * sizeof(u64); 1942 - sort_iter_add(&sort_iter, 1943 + sort_iter_add(&sort_iter.iter, 1943 1944 btree_bkey_first(b, t), 1944 1945 btree_bkey_last(b, t)); 1945 1946 seq = max(seq, le64_to_cpu(i->journal_seq)); ··· 1968 1969 i->journal_seq = cpu_to_le64(seq); 1969 1970 i->u64s = 0; 1970 1971 1971 - sort_iter_add(&sort_iter, 1972 + sort_iter_add(&sort_iter.iter, 1972 1973 unwritten_whiteouts_start(c, b), 1973 1974 unwritten_whiteouts_end(c, b)); 1974 1975 SET_BSET_SEPARATE_WHITEOUTS(i, false); 1975 1976 1976 1977 b->whiteout_u64s = 0; 1977 1978 1978 - u64s = bch2_sort_keys(i->start, &sort_iter, false); 1979 + u64s = bch2_sort_keys(i->start, &sort_iter.iter, false); 1979 1980 le16_add_cpu(&i->u64s, u64s); 1980 1981 1981 1982 BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
+3 -3
fs/bcachefs/vstructs.h
··· 41 41 (round_up(vstruct_bytes(_s), 512 << (_sector_block_bits)) >> 9) 42 42 43 43 #define vstruct_next(_s) \ 44 - ((typeof(_s)) ((_s)->_data + __vstruct_u64s(_s))) 44 + ((typeof(_s)) ((u64 *) (_s)->_data + __vstruct_u64s(_s))) 45 45 #define vstruct_last(_s) \ 46 - ((typeof(&(_s)->start[0])) ((_s)->_data + __vstruct_u64s(_s))) 46 + ((typeof(&(_s)->start[0])) ((u64 *) (_s)->_data + __vstruct_u64s(_s))) 47 47 #define vstruct_end(_s) \ 48 - ((void *) ((_s)->_data + __vstruct_u64s(_s))) 48 + ((void *) ((u64 *) (_s)->_data + __vstruct_u64s(_s))) 49 49 50 50 #define vstruct_for_each(_s, _i) \ 51 51 for (_i = (_s)->start; \