Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

md-bitmap: don't use ->index for pages backing the bitmap file

The md driver allocates pages for storing the bitmap file data, which
are not page cache pages, and then stores the page granularity file
offset in page->index, which is a field that isn't really valid except
for page cache pages.

Use a separate index for the superblock, and use the scheme used at
read size to recalculate the index for the bitmap pages instead.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20230615064840.629492-10-hch@lst.de

authored by

Christoph Hellwig and committed by
Song Liu
d7038f95 f5f2d5ac

+39 -27
+38 -27
drivers/md/md-bitmap.c
··· 157 157 test_bit(Bitmap_sync, &rdev->flags)) 158 158 continue; 159 159 160 - if (sync_page_io(rdev, sector, iosize, page, REQ_OP_READ, 161 - true)) { 162 - page->index = index; 160 + if (sync_page_io(rdev, sector, iosize, page, REQ_OP_READ, true)) 163 161 return 0; 164 - } 165 162 } 166 163 return -EIO; 167 164 } ··· 222 225 } 223 226 224 227 static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap, 225 - struct page *page) 228 + unsigned long pg_index, struct page *page) 226 229 { 227 230 struct block_device *bdev; 228 231 struct mddev *mddev = bitmap->mddev; 229 232 struct bitmap_storage *store = &bitmap->storage; 230 233 loff_t sboff, offset = mddev->bitmap_info.offset; 231 - sector_t ps, doff; 234 + sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE; 232 235 unsigned int size = PAGE_SIZE; 233 236 unsigned int opt_size = PAGE_SIZE; 237 + sector_t doff; 234 238 235 239 bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; 236 - if (page->index == store->file_pages - 1) { 240 + if (pg_index == store->file_pages - 1) { 237 241 unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1); 238 242 239 243 if (last_page_size == 0) ··· 243 245 opt_size = optimal_io_size(bdev, last_page_size, size); 244 246 } 245 247 246 - ps = page->index * PAGE_SIZE / SECTOR_SIZE; 247 248 sboff = rdev->sb_start + offset; 248 249 doff = rdev->data_offset; 249 250 ··· 276 279 return 0; 277 280 } 278 281 279 - static void write_sb_page(struct bitmap *bitmap, struct page *page, int wait) 282 + static void write_sb_page(struct bitmap *bitmap, unsigned long pg_index, 283 + struct page *page, bool wait) 280 284 { 281 285 struct mddev *mddev = bitmap->mddev; 282 286 ··· 285 287 struct md_rdev *rdev = NULL; 286 288 287 289 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { 288 - if (__write_sb_page(rdev, bitmap, page) < 0) { 290 + if (__write_sb_page(rdev, bitmap, pg_index, page) < 0) { 289 291 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); 290 292 return; 291 293 } ··· 395 397 blk_cur++; 396 398 bh = bh->b_this_page; 397 399 } 398 - page->index = index; 399 400 400 401 wait_event(bitmap->write_wait, 401 402 atomic_read(&bitmap->pending_writes)==0); ··· 416 419 /* 417 420 * write out a page to a file 418 421 */ 419 - static void write_page(struct bitmap *bitmap, struct page *page, int wait) 422 + static void filemap_write_page(struct bitmap *bitmap, unsigned long pg_index, 423 + bool wait) 420 424 { 421 - if (bitmap->storage.file) 425 + struct bitmap_storage *store = &bitmap->storage; 426 + struct page *page = store->filemap[pg_index]; 427 + 428 + if (mddev_is_clustered(bitmap->mddev)) { 429 + pg_index += bitmap->cluster_slot * 430 + DIV_ROUND_UP(store->bytes, PAGE_SIZE); 431 + } 432 + 433 + if (store->file) 422 434 write_file_page(bitmap, page, wait); 423 435 else 424 - write_sb_page(bitmap, page, wait); 436 + write_sb_page(bitmap, pg_index, page, wait); 425 437 } 426 438 427 439 /* ··· 487 481 sb->sectors_reserved = cpu_to_le32(bitmap->mddev-> 488 482 bitmap_info.space); 489 483 kunmap_atomic(sb); 490 - write_page(bitmap, bitmap->storage.sb_page, 1); 484 + 485 + if (bitmap->storage.file) 486 + write_file_page(bitmap, bitmap->storage.sb_page, 1); 487 + else 488 + write_sb_page(bitmap, bitmap->storage.sb_index, 489 + bitmap->storage.sb_page, 1); 491 490 } 492 491 EXPORT_SYMBOL(md_bitmap_update_sb); 493 492 ··· 544 533 bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 545 534 if (bitmap->storage.sb_page == NULL) 546 535 return -ENOMEM; 547 - bitmap->storage.sb_page->index = 0; 536 + bitmap->storage.sb_index = 0; 548 537 549 538 sb = kmap_atomic(bitmap->storage.sb_page); 550 539 ··· 821 810 if (store->sb_page) { 822 811 store->filemap[0] = store->sb_page; 823 812 pnum = 1; 824 - store->sb_page->index = offset; 813 + store->sb_index = offset; 825 814 } 826 815 827 816 for ( ; pnum < num_pages; pnum++) { ··· 830 819 store->file_pages = pnum; 831 820 return -ENOMEM; 832 821 } 833 - store->filemap[pnum]->index = pnum + offset; 834 822 } 835 823 store->file_pages = pnum; 836 824 ··· 934 924 void *kaddr; 935 925 unsigned long chunk = block >> bitmap->counts.chunkshift; 936 926 struct bitmap_storage *store = &bitmap->storage; 927 + unsigned long index = file_page_index(store, chunk); 937 928 unsigned long node_offset = 0; 938 929 939 930 if (mddev_is_clustered(bitmap->mddev)) ··· 952 941 else 953 942 set_bit_le(bit, kaddr); 954 943 kunmap_atomic(kaddr); 955 - pr_debug("set file bit %lu page %lu\n", bit, page->index); 944 + pr_debug("set file bit %lu page %lu\n", bit, index); 956 945 /* record page number so it gets flushed to disk when unplug occurs */ 957 - set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY); 946 + set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_DIRTY); 958 947 } 959 948 960 949 static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block) ··· 964 953 void *paddr; 965 954 unsigned long chunk = block >> bitmap->counts.chunkshift; 966 955 struct bitmap_storage *store = &bitmap->storage; 956 + unsigned long index = file_page_index(store, chunk); 967 957 unsigned long node_offset = 0; 968 958 969 959 if (mddev_is_clustered(bitmap->mddev)) ··· 980 968 else 981 969 clear_bit_le(bit, paddr); 982 970 kunmap_atomic(paddr); 983 - if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) { 984 - set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING); 971 + if (!test_page_attr(bitmap, index - node_offset, BITMAP_PAGE_NEEDWRITE)) { 972 + set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_PENDING); 985 973 bitmap->allclean = 0; 986 974 } 987 975 } ··· 1033 1021 "md bitmap_unplug"); 1034 1022 } 1035 1023 clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING); 1036 - write_page(bitmap, bitmap->storage.filemap[i], 0); 1024 + filemap_write_page(bitmap, i, false); 1037 1025 writing = 1; 1038 1026 } 1039 1027 } ··· 1165 1153 memset(paddr + offset, 0xff, PAGE_SIZE - offset); 1166 1154 kunmap_atomic(paddr); 1167 1155 1168 - write_page(bitmap, page, 1); 1156 + filemap_write_page(bitmap, i, true); 1169 1157 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) { 1170 1158 ret = -EIO; 1171 1159 goto err; ··· 1386 1374 break; 1387 1375 if (bitmap->storage.filemap && 1388 1376 test_and_clear_page_attr(bitmap, j, 1389 - BITMAP_PAGE_NEEDWRITE)) { 1390 - write_page(bitmap, bitmap->storage.filemap[j], 0); 1391 - } 1377 + BITMAP_PAGE_NEEDWRITE)) 1378 + filemap_write_page(bitmap, j, false); 1392 1379 } 1393 1380 1394 1381 done:
+1
drivers/md/md-bitmap.h
··· 201 201 struct file *file; /* backing disk file */ 202 202 struct page *sb_page; /* cached copy of the bitmap 203 203 * file superblock */ 204 + unsigned long sb_index; 204 205 struct page **filemap; /* list of cache pages for 205 206 * the file */ 206 207 unsigned long *filemap_attr; /* attributes associated