Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

dm thin: dont use map_context

This patch removes endio_hook_pool from dm-thin and uses per-bio data instead.

This patch removes any use of map_info in preparation for the next patch
that removes map_info from bio-based device mapper.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>

authored by

Mikulas Patocka and committed by
Alasdair G Kergon
59c3d2c6 0045d61b

+13 -36
+13 -36
drivers/md/dm-thin.c
··· 186 186 187 187 struct dm_thin_new_mapping *next_mapping; 188 188 mempool_t *mapping_pool; 189 - mempool_t *endio_hook_pool; 190 189 191 190 process_bio_fn process_bio; 192 191 process_bio_fn process_discard; ··· 303 304 bio_list_init(master); 304 305 305 306 while ((bio = bio_list_pop(&bios))) { 306 - struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 307 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 307 308 308 309 if (h->tc == tc) 309 310 bio_endio(bio, DM_ENDIO_REQUEUE); ··· 374 375 if (bio->bi_rw & REQ_DISCARD) 375 376 return; 376 377 377 - h = dm_get_mapinfo(bio)->ptr; 378 + h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 378 379 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds); 379 380 } 380 381 ··· 484 485 static void overwrite_endio(struct bio *bio, int err) 485 486 { 486 487 unsigned long flags; 487 - struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 488 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 488 489 struct dm_thin_new_mapping *m = h->overwrite_mapping; 489 490 struct pool *pool = m->tc->pool; 490 491 ··· 713 714 * bio immediately. Otherwise we use kcopyd to clone the data first. 714 715 */ 715 716 if (io_overwrites_block(pool, bio)) { 716 - struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 717 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 717 718 718 719 h->overwrite_mapping = m; 719 720 m->bio = bio; ··· 783 784 process_prepared_mapping(m); 784 785 785 786 else if (io_overwrites_block(pool, bio)) { 786 - struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 787 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 787 788 788 789 h->overwrite_mapping = m; 789 790 m->bio = bio; ··· 898 899 */ 899 900 static void retry_on_resume(struct bio *bio) 900 901 { 901 - struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 902 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 902 903 struct thin_c *tc = h->tc; 903 904 struct pool *pool = tc->pool; 904 905 unsigned long flags; ··· 1050 1051 if (bio_data_dir(bio) == WRITE && bio->bi_size) 1051 1052 break_sharing(tc, bio, block, &key, lookup_result, cell); 1052 1053 else { 1053 - struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 1054 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1054 1055 1055 1056 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); 1056 1057 inc_all_io_entry(pool, bio); ··· 1225 1226 spin_unlock_irqrestore(&pool->lock, flags); 1226 1227 1227 1228 while ((bio = bio_list_pop(&bios))) { 1228 - struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 1229 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1229 1230 struct thin_c *tc = h->tc; 1230 1231 1231 1232 /* ··· 1358 1359 wake_worker(pool); 1359 1360 } 1360 1361 1361 - static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio) 1362 + static void thin_hook_bio(struct thin_c *tc, struct bio *bio) 1362 1363 { 1363 - struct pool *pool = tc->pool; 1364 - struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO); 1364 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1365 1365 1366 1366 h->tc = tc; 1367 1367 h->shared_read_entry = NULL; 1368 1368 h->all_io_entry = NULL; 1369 1369 h->overwrite_mapping = NULL; 1370 - 1371 - return h; 1372 1370 } 1373 1371 1374 1372 /* ··· 1382 1386 struct dm_bio_prison_cell *cell1, *cell2; 1383 1387 struct dm_cell_key key; 1384 1388 1385 - map_context->ptr = thin_hook_bio(tc, bio); 1389 + thin_hook_bio(tc, bio); 1386 1390 1387 1391 if (get_pool_mode(tc->pool) == PM_FAIL) { 1388 1392 bio_io_error(bio); ··· 1591 1595 if (pool->next_mapping) 1592 1596 mempool_free(pool->next_mapping, pool->mapping_pool); 1593 1597 mempool_destroy(pool->mapping_pool); 1594 - mempool_destroy(pool->endio_hook_pool); 1595 1598 dm_deferred_set_destroy(pool->shared_read_ds); 1596 1599 dm_deferred_set_destroy(pool->all_io_ds); 1597 1600 kfree(pool); 1598 1601 } 1599 1602 1600 1603 static struct kmem_cache *_new_mapping_cache; 1601 - static struct kmem_cache *_endio_hook_cache; 1602 1604 1603 1605 static struct pool *pool_create(struct mapped_device *pool_md, 1604 1606 struct block_device *metadata_dev, ··· 1690 1696 goto bad_mapping_pool; 1691 1697 } 1692 1698 1693 - pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE, 1694 - _endio_hook_cache); 1695 - if (!pool->endio_hook_pool) { 1696 - *error = "Error creating pool's endio_hook mempool"; 1697 - err_p = ERR_PTR(-ENOMEM); 1698 - goto bad_endio_hook_pool; 1699 - } 1700 1699 pool->ref_count = 1; 1701 1700 pool->last_commit_jiffies = jiffies; 1702 1701 pool->pool_md = pool_md; ··· 1698 1711 1699 1712 return pool; 1700 1713 1701 - bad_endio_hook_pool: 1702 - mempool_destroy(pool->mapping_pool); 1703 1714 bad_mapping_pool: 1704 1715 dm_deferred_set_destroy(pool->all_io_ds); 1705 1716 bad_all_io_ds: ··· 2592 2607 2593 2608 ti->num_flush_requests = 1; 2594 2609 ti->flush_supported = true; 2610 + ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook); 2595 2611 2596 2612 /* In case the pool supports discards, pass them on. */ 2597 2613 if (tc->pool->pf.discard_enabled) { ··· 2639 2653 union map_info *map_context) 2640 2654 { 2641 2655 unsigned long flags; 2642 - struct dm_thin_endio_hook *h = map_context->ptr; 2656 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 2643 2657 struct list_head work; 2644 2658 struct dm_thin_new_mapping *m, *tmp; 2645 2659 struct pool *pool = h->tc->pool; ··· 2668 2682 wake_worker(pool); 2669 2683 } 2670 2684 } 2671 - 2672 - mempool_free(h, pool->endio_hook_pool); 2673 2685 2674 2686 return 0; 2675 2687 } ··· 2797 2813 if (!_new_mapping_cache) 2798 2814 goto bad_new_mapping_cache; 2799 2815 2800 - _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0); 2801 - if (!_endio_hook_cache) 2802 - goto bad_endio_hook_cache; 2803 - 2804 2816 return 0; 2805 2817 2806 - bad_endio_hook_cache: 2807 - kmem_cache_destroy(_new_mapping_cache); 2808 2818 bad_new_mapping_cache: 2809 2819 dm_unregister_target(&pool_target); 2810 2820 bad_pool_target: ··· 2813 2835 dm_unregister_target(&pool_target); 2814 2836 2815 2837 kmem_cache_destroy(_new_mapping_cache); 2816 - kmem_cache_destroy(_endio_hook_cache); 2817 2838 } 2818 2839 2819 2840 module_init(dm_thin_init);