Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

md/raid1/10: reset bio allocated from mempool

Data allocated from mempool doesn't always get initialized, this happens when
the data is reused instead of fresh allocation. In the raid1/10 case, we must
reinitialize the bios.

Reported-by: Jonathan G. Underwood <jonathan.underwood@gmail.com>
Fixes: f0250618361d(md: raid10: don't use bio's vec table to manage resync pages)
Fixes: 98d30c5812c3(md: raid1: don't use bio's vec table to manage resync pages)
Cc: stable@vger.kernel.org (4.12+)
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Shaohua Li <shli@fb.com>

+50 -4
+18 -1
drivers/md/raid1.c
··· 2564 2564 return 0; 2565 2565 } 2566 2566 2567 + static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) 2568 + { 2569 + struct r1bio *r1bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); 2570 + struct resync_pages *rps; 2571 + struct bio *bio; 2572 + int i; 2573 + 2574 + for (i = conf->poolinfo->raid_disks; i--; ) { 2575 + bio = r1bio->bios[i]; 2576 + rps = bio->bi_private; 2577 + bio_reset(bio); 2578 + bio->bi_private = rps; 2579 + } 2580 + r1bio->master_bio = NULL; 2581 + return r1bio; 2582 + } 2583 + 2567 2584 /* 2568 2585 * perform a "sync" on one "block" 2569 2586 * ··· 2666 2649 2667 2650 bitmap_cond_end_sync(mddev->bitmap, sector_nr, 2668 2651 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); 2669 - r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); 2652 + r1_bio = raid1_alloc_init_r1buf(conf); 2670 2653 2671 2654 raise_barrier(conf, sector_nr); 2672 2655
+32 -3
drivers/md/raid10.c
··· 2798 2798 return 0; 2799 2799 } 2800 2800 2801 + static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf) 2802 + { 2803 + struct r10bio *r10bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 2804 + struct rsync_pages *rp; 2805 + struct bio *bio; 2806 + int nalloc; 2807 + int i; 2808 + 2809 + if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || 2810 + test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) 2811 + nalloc = conf->copies; /* resync */ 2812 + else 2813 + nalloc = 2; /* recovery */ 2814 + 2815 + for (i = 0; i < nalloc; i++) { 2816 + bio = r10bio->devs[i].bio; 2817 + rp = bio->bi_private; 2818 + bio_reset(bio); 2819 + bio->bi_private = rp; 2820 + bio = r10bio->devs[i].repl_bio; 2821 + if (bio) { 2822 + rp = bio->bi_private; 2823 + bio_reset(bio); 2824 + bio->bi_private = rp; 2825 + } 2826 + } 2827 + return r10bio; 2828 + } 2829 + 2801 2830 /* 2802 2831 * perform a "sync" on one "block" 2803 2832 * ··· 3056 3027 atomic_inc(&mreplace->nr_pending); 3057 3028 rcu_read_unlock(); 3058 3029 3059 - r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 3030 + r10_bio = raid10_alloc_init_r10buf(conf); 3060 3031 r10_bio->state = 0; 3061 3032 raise_barrier(conf, rb2 != NULL); 3062 3033 atomic_set(&r10_bio->remaining, 0); ··· 3265 3236 } 3266 3237 if (sync_blocks < max_sync) 3267 3238 max_sync = sync_blocks; 3268 - r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 3239 + r10_bio = raid10_alloc_init_r10buf(conf); 3269 3240 r10_bio->state = 0; 3270 3241 3271 3242 r10_bio->mddev = mddev; ··· 4389 4360 4390 4361 read_more: 4391 4362 /* Now schedule reads for blocks from sector_nr to last */ 4392 - r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 4363 + r10_bio = raid10_alloc_init_r10buf(conf); 4393 4364 r10_bio->state = 0; 4394 4365 raise_barrier(conf, sectors_done != 0); 4395 4366 atomic_set(&r10_bio->remaining, 0);