Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

dm thin: use slab mempools

Use dedicated caches prefixed with a "dm_" name rather than relying on
kmalloc mempools backed by generic slab caches so the memory usage of
thin provisioning (and any leaks) can be accounted for independently.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>

authored by

Mike Snitzer and committed by
Alasdair G Kergon
a24c2569 35991652

+99 -62
+99 -62
drivers/md/dm-thin.c
··· 111 dm_block_t block; 112 }; 113 114 - struct cell { 115 struct hlist_node list; 116 struct bio_prison *prison; 117 struct cell_key key; ··· 141 return n; 142 } 143 144 /* 145 * @nr_cells should be the number of cells you want in use _concurrently_. 146 * Don't confuse it with the number of distinct keys. ··· 159 return NULL; 160 161 spin_lock_init(&prison->lock); 162 - prison->cell_pool = mempool_create_kmalloc_pool(nr_cells, 163 - sizeof(struct cell)); 164 if (!prison->cell_pool) { 165 kfree(prison); 166 return NULL; ··· 195 (lhs->block == rhs->block); 196 } 197 198 - static struct cell *__search_bucket(struct hlist_head *bucket, 199 - struct cell_key *key) 200 { 201 - struct cell *cell; 202 struct hlist_node *tmp; 203 204 hlist_for_each_entry(cell, tmp, bucket, list) ··· 215 * Returns 1 if the cell was already held, 0 if @inmate is the new holder. 216 */ 217 static int bio_detain(struct bio_prison *prison, struct cell_key *key, 218 - struct bio *inmate, struct cell **ref) 219 { 220 int r = 1; 221 unsigned long flags; 222 uint32_t hash = hash_key(prison, key); 223 - struct cell *cell, *cell2; 224 225 BUG_ON(hash > prison->nr_buckets); 226 ··· 274 /* 275 * @inmates must have been initialised prior to this call 276 */ 277 - static void __cell_release(struct cell *cell, struct bio_list *inmates) 278 { 279 struct bio_prison *prison = cell->prison; 280 ··· 288 mempool_free(cell, prison->cell_pool); 289 } 290 291 - static void cell_release(struct cell *cell, struct bio_list *bios) 292 { 293 unsigned long flags; 294 struct bio_prison *prison = cell->prison; ··· 304 * bio may be in the cell. This function releases the cell, and also does 305 * a sanity check. 306 */ 307 - static void __cell_release_singleton(struct cell *cell, struct bio *bio) 308 { 309 BUG_ON(cell->holder != bio); 310 BUG_ON(!bio_list_empty(&cell->bios)); ··· 312 __cell_release(cell, NULL); 313 } 314 315 - static void cell_release_singleton(struct cell *cell, struct bio *bio) 316 { 317 unsigned long flags; 318 struct bio_prison *prison = cell->prison; ··· 325 /* 326 * Sometimes we don't want the holder, just the additional bios. 327 */ 328 - static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates) 329 { 330 struct bio_prison *prison = cell->prison; 331 ··· 336 mempool_free(cell, prison->cell_pool); 337 } 338 339 - static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates) 340 { 341 unsigned long flags; 342 struct bio_prison *prison = cell->prison; ··· 347 spin_unlock_irqrestore(&prison->lock, flags); 348 } 349 350 - static void cell_error(struct cell *cell) 351 { 352 struct bio_prison *prison = cell->prison; 353 struct bio_list bios; ··· 494 * also provides the interface for creating and destroying internal 495 * devices. 496 */ 497 - struct new_mapping; 498 499 struct pool_features { 500 unsigned zero_new_blocks:1; ··· 540 struct deferred_set shared_read_ds; 541 struct deferred_set all_io_ds; 542 543 - struct new_mapping *next_mapping; 544 mempool_t *mapping_pool; 545 mempool_t *endio_hook_pool; 546 }; ··· 633 634 /*----------------------------------------------------------------*/ 635 636 - struct endio_hook { 637 struct thin_c *tc; 638 struct deferred_entry *shared_read_entry; 639 struct deferred_entry *all_io_entry; 640 - struct new_mapping *overwrite_mapping; 641 }; 642 643 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) ··· 650 bio_list_init(master); 651 652 while ((bio = bio_list_pop(&bios))) { 653 - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 654 if (h->tc == tc) 655 bio_endio(bio, DM_ENDIO_REQUEUE); 656 else ··· 740 /* 741 * Bio endio functions. 742 */ 743 - struct new_mapping { 744 struct list_head list; 745 746 unsigned quiesced:1; ··· 750 struct thin_c *tc; 751 dm_block_t virt_block; 752 dm_block_t data_block; 753 - struct cell *cell, *cell2; 754 int err; 755 756 /* ··· 763 bio_end_io_t *saved_bi_end_io; 764 }; 765 766 - static void __maybe_add_mapping(struct new_mapping *m) 767 { 768 struct pool *pool = m->tc->pool; 769 ··· 776 static void copy_complete(int read_err, unsigned long write_err, void *context) 777 { 778 unsigned long flags; 779 - struct new_mapping *m = context; 780 struct pool *pool = m->tc->pool; 781 782 m->err = read_err || write_err ? -EIO : 0; ··· 790 static void overwrite_endio(struct bio *bio, int err) 791 { 792 unsigned long flags; 793 - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 794 - struct new_mapping *m = h->overwrite_mapping; 795 struct pool *pool = m->tc->pool; 796 797 m->err = err; ··· 815 /* 816 * This sends the bios in the cell back to the deferred_bios list. 817 */ 818 - static void cell_defer(struct thin_c *tc, struct cell *cell, 819 dm_block_t data_block) 820 { 821 struct pool *pool = tc->pool; ··· 832 * Same as cell_defer above, except it omits one particular detainee, 833 * a write bio that covers the block and has already been processed. 834 */ 835 - static void cell_defer_except(struct thin_c *tc, struct cell *cell) 836 { 837 struct bio_list bios; 838 struct pool *pool = tc->pool; ··· 847 wake_worker(pool); 848 } 849 850 - static void process_prepared_mapping(struct new_mapping *m) 851 { 852 struct thin_c *tc = m->tc; 853 struct bio *bio; ··· 890 mempool_free(m, tc->pool->mapping_pool); 891 } 892 893 - static void process_prepared_discard(struct new_mapping *m) 894 { 895 int r; 896 struct thin_c *tc = m->tc; ··· 913 } 914 915 static void process_prepared(struct pool *pool, struct list_head *head, 916 - void (*fn)(struct new_mapping *)) 917 { 918 unsigned long flags; 919 struct list_head maps; 920 - struct new_mapping *m, *tmp; 921 922 INIT_LIST_HEAD(&maps); 923 spin_lock_irqsave(&pool->lock, flags); ··· 961 return pool->next_mapping ? 0 : -ENOMEM; 962 } 963 964 - static struct new_mapping *get_next_mapping(struct pool *pool) 965 { 966 - struct new_mapping *r = pool->next_mapping; 967 968 BUG_ON(!pool->next_mapping); 969 ··· 975 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, 976 struct dm_dev *origin, dm_block_t data_origin, 977 dm_block_t data_dest, 978 - struct cell *cell, struct bio *bio) 979 { 980 int r; 981 struct pool *pool = tc->pool; 982 - struct new_mapping *m = get_next_mapping(pool); 983 984 INIT_LIST_HEAD(&m->list); 985 m->quiesced = 0; ··· 1001 * bio immediately. Otherwise we use kcopyd to clone the data first. 1002 */ 1003 if (io_overwrites_block(pool, bio)) { 1004 - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1005 h->overwrite_mapping = m; 1006 m->bio = bio; 1007 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); ··· 1030 1031 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, 1032 dm_block_t data_origin, dm_block_t data_dest, 1033 - struct cell *cell, struct bio *bio) 1034 { 1035 schedule_copy(tc, virt_block, tc->pool_dev, 1036 data_origin, data_dest, cell, bio); ··· 1038 1039 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, 1040 dm_block_t data_dest, 1041 - struct cell *cell, struct bio *bio) 1042 { 1043 schedule_copy(tc, virt_block, tc->origin_dev, 1044 virt_block, data_dest, cell, bio); 1045 } 1046 1047 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, 1048 - dm_block_t data_block, struct cell *cell, 1049 struct bio *bio) 1050 { 1051 struct pool *pool = tc->pool; 1052 - struct new_mapping *m = get_next_mapping(pool); 1053 1054 INIT_LIST_HEAD(&m->list); 1055 m->quiesced = 1; ··· 1070 process_prepared_mapping(m); 1071 1072 else if (io_overwrites_block(pool, bio)) { 1073 - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1074 h->overwrite_mapping = m; 1075 m->bio = bio; 1076 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 1077 remap_and_issue(tc, bio, data_block); 1078 - 1079 } else { 1080 int r; 1081 struct dm_io_region to; ··· 1160 */ 1161 static void retry_on_resume(struct bio *bio) 1162 { 1163 - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1164 struct thin_c *tc = h->tc; 1165 struct pool *pool = tc->pool; 1166 unsigned long flags; ··· 1170 spin_unlock_irqrestore(&pool->lock, flags); 1171 } 1172 1173 - static void no_space(struct cell *cell) 1174 { 1175 struct bio *bio; 1176 struct bio_list bios; ··· 1187 int r; 1188 unsigned long flags; 1189 struct pool *pool = tc->pool; 1190 - struct cell *cell, *cell2; 1191 struct cell_key key, key2; 1192 dm_block_t block = get_bio_block(tc, bio); 1193 struct dm_thin_lookup_result lookup_result; 1194 - struct new_mapping *m; 1195 1196 build_virtual_key(tc->td, block, &key); 1197 if (bio_detain(tc->pool->prison, &key, bio, &cell)) ··· 1268 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, 1269 struct cell_key *key, 1270 struct dm_thin_lookup_result *lookup_result, 1271 - struct cell *cell) 1272 { 1273 int r; 1274 dm_block_t data_block; ··· 1295 dm_block_t block, 1296 struct dm_thin_lookup_result *lookup_result) 1297 { 1298 - struct cell *cell; 1299 struct pool *pool = tc->pool; 1300 struct cell_key key; 1301 ··· 1310 if (bio_data_dir(bio) == WRITE) 1311 break_sharing(tc, bio, block, &key, lookup_result, cell); 1312 else { 1313 - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1314 1315 h->shared_read_entry = ds_inc(&pool->shared_read_ds); 1316 ··· 1320 } 1321 1322 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, 1323 - struct cell *cell) 1324 { 1325 int r; 1326 dm_block_t data_block; ··· 1368 { 1369 int r; 1370 dm_block_t block = get_bio_block(tc, bio); 1371 - struct cell *cell; 1372 struct cell_key key; 1373 struct dm_thin_lookup_result lookup_result; 1374 ··· 1437 spin_unlock_irqrestore(&pool->lock, flags); 1438 1439 while ((bio = bio_list_pop(&bios))) { 1440 - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1441 struct thin_c *tc = h->tc; 1442 1443 /* ··· 1527 wake_worker(pool); 1528 } 1529 1530 - static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio) 1531 { 1532 struct pool *pool = tc->pool; 1533 - struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO); 1534 1535 h->tc = tc; 1536 h->shared_read_entry = NULL; ··· 1692 kfree(pool); 1693 } 1694 1695 static struct pool *pool_create(struct mapped_device *pool_md, 1696 struct block_device *metadata_dev, 1697 unsigned long block_size, char **error) ··· 1763 ds_init(&pool->all_io_ds); 1764 1765 pool->next_mapping = NULL; 1766 - pool->mapping_pool = 1767 - mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping)); 1768 if (!pool->mapping_pool) { 1769 *error = "Error creating pool's mapping mempool"; 1770 err_p = ERR_PTR(-ENOMEM); 1771 goto bad_mapping_pool; 1772 } 1773 1774 - pool->endio_hook_pool = 1775 - mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook)); 1776 if (!pool->endio_hook_pool) { 1777 *error = "Error creating pool's endio_hook mempool"; 1778 err_p = ERR_PTR(-ENOMEM); ··· 2621 union map_info *map_context) 2622 { 2623 unsigned long flags; 2624 - struct endio_hook *h = map_context->ptr; 2625 struct list_head work; 2626 - struct new_mapping *m, *tmp; 2627 struct pool *pool = h->tc->pool; 2628 2629 if (h->shared_read_entry) { ··· 2763 2764 r = dm_register_target(&pool_target); 2765 if (r) 2766 - dm_unregister_target(&thin_target); 2767 2768 return r; 2769 } ··· 2797 { 2798 dm_unregister_target(&thin_target); 2799 dm_unregister_target(&pool_target); 2800 } 2801 2802 module_init(dm_thin_init);
··· 111 dm_block_t block; 112 }; 113 114 + struct dm_bio_prison_cell { 115 struct hlist_node list; 116 struct bio_prison *prison; 117 struct cell_key key; ··· 141 return n; 142 } 143 144 + static struct kmem_cache *_cell_cache; 145 + 146 /* 147 * @nr_cells should be the number of cells you want in use _concurrently_. 148 * Don't confuse it with the number of distinct keys. ··· 157 return NULL; 158 159 spin_lock_init(&prison->lock); 160 + prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache); 161 if (!prison->cell_pool) { 162 kfree(prison); 163 return NULL; ··· 194 (lhs->block == rhs->block); 195 } 196 197 + static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket, 198 + struct cell_key *key) 199 { 200 + struct dm_bio_prison_cell *cell; 201 struct hlist_node *tmp; 202 203 hlist_for_each_entry(cell, tmp, bucket, list) ··· 214 * Returns 1 if the cell was already held, 0 if @inmate is the new holder. 215 */ 216 static int bio_detain(struct bio_prison *prison, struct cell_key *key, 217 + struct bio *inmate, struct dm_bio_prison_cell **ref) 218 { 219 int r = 1; 220 unsigned long flags; 221 uint32_t hash = hash_key(prison, key); 222 + struct dm_bio_prison_cell *cell, *cell2; 223 224 BUG_ON(hash > prison->nr_buckets); 225 ··· 273 /* 274 * @inmates must have been initialised prior to this call 275 */ 276 + static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates) 277 { 278 struct bio_prison *prison = cell->prison; 279 ··· 287 mempool_free(cell, prison->cell_pool); 288 } 289 290 + static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios) 291 { 292 unsigned long flags; 293 struct bio_prison *prison = cell->prison; ··· 303 * bio may be in the cell. This function releases the cell, and also does 304 * a sanity check. 305 */ 306 + static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) 307 { 308 BUG_ON(cell->holder != bio); 309 BUG_ON(!bio_list_empty(&cell->bios)); ··· 311 __cell_release(cell, NULL); 312 } 313 314 + static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) 315 { 316 unsigned long flags; 317 struct bio_prison *prison = cell->prison; ··· 324 /* 325 * Sometimes we don't want the holder, just the additional bios. 326 */ 327 + static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, 328 + struct bio_list *inmates) 329 { 330 struct bio_prison *prison = cell->prison; 331 ··· 334 mempool_free(cell, prison->cell_pool); 335 } 336 337 + static void cell_release_no_holder(struct dm_bio_prison_cell *cell, 338 + struct bio_list *inmates) 339 { 340 unsigned long flags; 341 struct bio_prison *prison = cell->prison; ··· 344 spin_unlock_irqrestore(&prison->lock, flags); 345 } 346 347 + static void cell_error(struct dm_bio_prison_cell *cell) 348 { 349 struct bio_prison *prison = cell->prison; 350 struct bio_list bios; ··· 491 * also provides the interface for creating and destroying internal 492 * devices. 493 */ 494 + struct dm_thin_new_mapping; 495 496 struct pool_features { 497 unsigned zero_new_blocks:1; ··· 537 struct deferred_set shared_read_ds; 538 struct deferred_set all_io_ds; 539 540 + struct dm_thin_new_mapping *next_mapping; 541 mempool_t *mapping_pool; 542 mempool_t *endio_hook_pool; 543 }; ··· 630 631 /*----------------------------------------------------------------*/ 632 633 + struct dm_thin_endio_hook { 634 struct thin_c *tc; 635 struct deferred_entry *shared_read_entry; 636 struct deferred_entry *all_io_entry; 637 + struct dm_thin_new_mapping *overwrite_mapping; 638 }; 639 640 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) ··· 647 bio_list_init(master); 648 649 while ((bio = bio_list_pop(&bios))) { 650 + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 651 + 652 if (h->tc == tc) 653 bio_endio(bio, DM_ENDIO_REQUEUE); 654 else ··· 736 /* 737 * Bio endio functions. 738 */ 739 + struct dm_thin_new_mapping { 740 struct list_head list; 741 742 unsigned quiesced:1; ··· 746 struct thin_c *tc; 747 dm_block_t virt_block; 748 dm_block_t data_block; 749 + struct dm_bio_prison_cell *cell, *cell2; 750 int err; 751 752 /* ··· 759 bio_end_io_t *saved_bi_end_io; 760 }; 761 762 + static void __maybe_add_mapping(struct dm_thin_new_mapping *m) 763 { 764 struct pool *pool = m->tc->pool; 765 ··· 772 static void copy_complete(int read_err, unsigned long write_err, void *context) 773 { 774 unsigned long flags; 775 + struct dm_thin_new_mapping *m = context; 776 struct pool *pool = m->tc->pool; 777 778 m->err = read_err || write_err ? -EIO : 0; ··· 786 static void overwrite_endio(struct bio *bio, int err) 787 { 788 unsigned long flags; 789 + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 790 + struct dm_thin_new_mapping *m = h->overwrite_mapping; 791 struct pool *pool = m->tc->pool; 792 793 m->err = err; ··· 811 /* 812 * This sends the bios in the cell back to the deferred_bios list. 813 */ 814 + static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell, 815 dm_block_t data_block) 816 { 817 struct pool *pool = tc->pool; ··· 828 * Same as cell_defer above, except it omits one particular detainee, 829 * a write bio that covers the block and has already been processed. 830 */ 831 + static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell) 832 { 833 struct bio_list bios; 834 struct pool *pool = tc->pool; ··· 843 wake_worker(pool); 844 } 845 846 + static void process_prepared_mapping(struct dm_thin_new_mapping *m) 847 { 848 struct thin_c *tc = m->tc; 849 struct bio *bio; ··· 886 mempool_free(m, tc->pool->mapping_pool); 887 } 888 889 + static void process_prepared_discard(struct dm_thin_new_mapping *m) 890 { 891 int r; 892 struct thin_c *tc = m->tc; ··· 909 } 910 911 static void process_prepared(struct pool *pool, struct list_head *head, 912 + void (*fn)(struct dm_thin_new_mapping *)) 913 { 914 unsigned long flags; 915 struct list_head maps; 916 + struct dm_thin_new_mapping *m, *tmp; 917 918 INIT_LIST_HEAD(&maps); 919 spin_lock_irqsave(&pool->lock, flags); ··· 957 return pool->next_mapping ? 0 : -ENOMEM; 958 } 959 960 + static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) 961 { 962 + struct dm_thin_new_mapping *r = pool->next_mapping; 963 964 BUG_ON(!pool->next_mapping); 965 ··· 971 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, 972 struct dm_dev *origin, dm_block_t data_origin, 973 dm_block_t data_dest, 974 + struct dm_bio_prison_cell *cell, struct bio *bio) 975 { 976 int r; 977 struct pool *pool = tc->pool; 978 + struct dm_thin_new_mapping *m = get_next_mapping(pool); 979 980 INIT_LIST_HEAD(&m->list); 981 m->quiesced = 0; ··· 997 * bio immediately. Otherwise we use kcopyd to clone the data first. 998 */ 999 if (io_overwrites_block(pool, bio)) { 1000 + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 1001 + 1002 h->overwrite_mapping = m; 1003 m->bio = bio; 1004 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); ··· 1025 1026 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, 1027 dm_block_t data_origin, dm_block_t data_dest, 1028 + struct dm_bio_prison_cell *cell, struct bio *bio) 1029 { 1030 schedule_copy(tc, virt_block, tc->pool_dev, 1031 data_origin, data_dest, cell, bio); ··· 1033 1034 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, 1035 dm_block_t data_dest, 1036 + struct dm_bio_prison_cell *cell, struct bio *bio) 1037 { 1038 schedule_copy(tc, virt_block, tc->origin_dev, 1039 virt_block, data_dest, cell, bio); 1040 } 1041 1042 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, 1043 + dm_block_t data_block, struct dm_bio_prison_cell *cell, 1044 struct bio *bio) 1045 { 1046 struct pool *pool = tc->pool; 1047 + struct dm_thin_new_mapping *m = get_next_mapping(pool); 1048 1049 INIT_LIST_HEAD(&m->list); 1050 m->quiesced = 1; ··· 1065 process_prepared_mapping(m); 1066 1067 else if (io_overwrites_block(pool, bio)) { 1068 + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 1069 + 1070 h->overwrite_mapping = m; 1071 m->bio = bio; 1072 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 1073 remap_and_issue(tc, bio, data_block); 1074 } else { 1075 int r; 1076 struct dm_io_region to; ··· 1155 */ 1156 static void retry_on_resume(struct bio *bio) 1157 { 1158 + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 1159 struct thin_c *tc = h->tc; 1160 struct pool *pool = tc->pool; 1161 unsigned long flags; ··· 1165 spin_unlock_irqrestore(&pool->lock, flags); 1166 } 1167 1168 + static void no_space(struct dm_bio_prison_cell *cell) 1169 { 1170 struct bio *bio; 1171 struct bio_list bios; ··· 1182 int r; 1183 unsigned long flags; 1184 struct pool *pool = tc->pool; 1185 + struct dm_bio_prison_cell *cell, *cell2; 1186 struct cell_key key, key2; 1187 dm_block_t block = get_bio_block(tc, bio); 1188 struct dm_thin_lookup_result lookup_result; 1189 + struct dm_thin_new_mapping *m; 1190 1191 build_virtual_key(tc->td, block, &key); 1192 if (bio_detain(tc->pool->prison, &key, bio, &cell)) ··· 1263 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, 1264 struct cell_key *key, 1265 struct dm_thin_lookup_result *lookup_result, 1266 + struct dm_bio_prison_cell *cell) 1267 { 1268 int r; 1269 dm_block_t data_block; ··· 1290 dm_block_t block, 1291 struct dm_thin_lookup_result *lookup_result) 1292 { 1293 + struct dm_bio_prison_cell *cell; 1294 struct pool *pool = tc->pool; 1295 struct cell_key key; 1296 ··· 1305 if (bio_data_dir(bio) == WRITE) 1306 break_sharing(tc, bio, block, &key, lookup_result, cell); 1307 else { 1308 + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 1309 1310 h->shared_read_entry = ds_inc(&pool->shared_read_ds); 1311 ··· 1315 } 1316 1317 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, 1318 + struct dm_bio_prison_cell *cell) 1319 { 1320 int r; 1321 dm_block_t data_block; ··· 1363 { 1364 int r; 1365 dm_block_t block = get_bio_block(tc, bio); 1366 + struct dm_bio_prison_cell *cell; 1367 struct cell_key key; 1368 struct dm_thin_lookup_result lookup_result; 1369 ··· 1432 spin_unlock_irqrestore(&pool->lock, flags); 1433 1434 while ((bio = bio_list_pop(&bios))) { 1435 + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 1436 struct thin_c *tc = h->tc; 1437 1438 /* ··· 1522 wake_worker(pool); 1523 } 1524 1525 + static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio) 1526 { 1527 struct pool *pool = tc->pool; 1528 + struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO); 1529 1530 h->tc = tc; 1531 h->shared_read_entry = NULL; ··· 1687 kfree(pool); 1688 } 1689 1690 + static struct kmem_cache *_new_mapping_cache; 1691 + static struct kmem_cache *_endio_hook_cache; 1692 + 1693 static struct pool *pool_create(struct mapped_device *pool_md, 1694 struct block_device *metadata_dev, 1695 unsigned long block_size, char **error) ··· 1755 ds_init(&pool->all_io_ds); 1756 1757 pool->next_mapping = NULL; 1758 + pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, 1759 + _new_mapping_cache); 1760 if (!pool->mapping_pool) { 1761 *error = "Error creating pool's mapping mempool"; 1762 err_p = ERR_PTR(-ENOMEM); 1763 goto bad_mapping_pool; 1764 } 1765 1766 + pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE, 1767 + _endio_hook_cache); 1768 if (!pool->endio_hook_pool) { 1769 *error = "Error creating pool's endio_hook mempool"; 1770 err_p = ERR_PTR(-ENOMEM); ··· 2613 union map_info *map_context) 2614 { 2615 unsigned long flags; 2616 + struct dm_thin_endio_hook *h = map_context->ptr; 2617 struct list_head work; 2618 + struct dm_thin_new_mapping *m, *tmp; 2619 struct pool *pool = h->tc->pool; 2620 2621 if (h->shared_read_entry) { ··· 2755 2756 r = dm_register_target(&pool_target); 2757 if (r) 2758 + goto bad_pool_target; 2759 + 2760 + r = -ENOMEM; 2761 + 2762 + _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0); 2763 + if (!_cell_cache) 2764 + goto bad_cell_cache; 2765 + 2766 + _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); 2767 + if (!_new_mapping_cache) 2768 + goto bad_new_mapping_cache; 2769 + 2770 + _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0); 2771 + if (!_endio_hook_cache) 2772 + goto bad_endio_hook_cache; 2773 + 2774 + return 0; 2775 + 2776 + bad_endio_hook_cache: 2777 + kmem_cache_destroy(_new_mapping_cache); 2778 + bad_new_mapping_cache: 2779 + kmem_cache_destroy(_cell_cache); 2780 + bad_cell_cache: 2781 + dm_unregister_target(&pool_target); 2782 + bad_pool_target: 2783 + dm_unregister_target(&thin_target); 2784 2785 return r; 2786 } ··· 2764 { 2765 dm_unregister_target(&thin_target); 2766 dm_unregister_target(&pool_target); 2767 + 2768 + kmem_cache_destroy(_cell_cache); 2769 + kmem_cache_destroy(_new_mapping_cache); 2770 + kmem_cache_destroy(_endio_hook_cache); 2771 } 2772 2773 module_init(dm_thin_init);