+99
-62
drivers/md/dm-thin.c
+99
-62
drivers/md/dm-thin.c
···
111
111
dm_block_t block;
112
112
};
113
113
114
-
struct cell {
114
+
struct dm_bio_prison_cell {
115
115
struct hlist_node list;
116
116
struct bio_prison *prison;
117
117
struct cell_key key;
···
141
141
return n;
142
142
}
143
143
144
+
static struct kmem_cache *_cell_cache;
145
+
144
146
/*
145
147
* @nr_cells should be the number of cells you want in use _concurrently_.
146
148
* Don't confuse it with the number of distinct keys.
···
159
157
return NULL;
160
158
161
159
spin_lock_init(&prison->lock);
162
-
prison->cell_pool = mempool_create_kmalloc_pool(nr_cells,
163
-
sizeof(struct cell));
160
+
prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
164
161
if (!prison->cell_pool) {
165
162
kfree(prison);
166
163
return NULL;
···
195
194
(lhs->block == rhs->block);
196
195
}
197
196
198
-
static struct cell *__search_bucket(struct hlist_head *bucket,
199
-
struct cell_key *key)
197
+
static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
198
+
struct cell_key *key)
200
199
{
201
-
struct cell *cell;
200
+
struct dm_bio_prison_cell *cell;
202
201
struct hlist_node *tmp;
203
202
204
203
hlist_for_each_entry(cell, tmp, bucket, list)
···
215
214
* Returns 1 if the cell was already held, 0 if @inmate is the new holder.
216
215
*/
217
216
static int bio_detain(struct bio_prison *prison, struct cell_key *key,
218
-
struct bio *inmate, struct cell **ref)
217
+
struct bio *inmate, struct dm_bio_prison_cell **ref)
219
218
{
220
219
int r = 1;
221
220
unsigned long flags;
222
221
uint32_t hash = hash_key(prison, key);
223
-
struct cell *cell, *cell2;
222
+
struct dm_bio_prison_cell *cell, *cell2;
224
223
225
224
BUG_ON(hash > prison->nr_buckets);
226
225
···
274
273
/*
275
274
* @inmates must have been initialised prior to this call
276
275
*/
277
-
static void __cell_release(struct cell *cell, struct bio_list *inmates)
276
+
static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
278
277
{
279
278
struct bio_prison *prison = cell->prison;
280
279
···
288
287
mempool_free(cell, prison->cell_pool);
289
288
}
290
289
291
-
static void cell_release(struct cell *cell, struct bio_list *bios)
290
+
static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
292
291
{
293
292
unsigned long flags;
294
293
struct bio_prison *prison = cell->prison;
···
304
303
* bio may be in the cell. This function releases the cell, and also does
305
304
* a sanity check.
306
305
*/
307
-
static void __cell_release_singleton(struct cell *cell, struct bio *bio)
306
+
static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
308
307
{
309
308
BUG_ON(cell->holder != bio);
310
309
BUG_ON(!bio_list_empty(&cell->bios));
···
312
311
__cell_release(cell, NULL);
313
312
}
314
313
315
-
static void cell_release_singleton(struct cell *cell, struct bio *bio)
314
+
static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
316
315
{
317
316
unsigned long flags;
318
317
struct bio_prison *prison = cell->prison;
···
325
324
/*
326
325
* Sometimes we don't want the holder, just the additional bios.
327
326
*/
328
-
static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
327
+
static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
328
+
struct bio_list *inmates)
329
329
{
330
330
struct bio_prison *prison = cell->prison;
331
331
···
336
334
mempool_free(cell, prison->cell_pool);
337
335
}
338
336
339
-
static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
337
+
static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
338
+
struct bio_list *inmates)
340
339
{
341
340
unsigned long flags;
342
341
struct bio_prison *prison = cell->prison;
···
347
344
spin_unlock_irqrestore(&prison->lock, flags);
348
345
}
349
346
350
-
static void cell_error(struct cell *cell)
347
+
static void cell_error(struct dm_bio_prison_cell *cell)
351
348
{
352
349
struct bio_prison *prison = cell->prison;
353
350
struct bio_list bios;
···
494
491
* also provides the interface for creating and destroying internal
495
492
* devices.
496
493
*/
497
-
struct new_mapping;
494
+
struct dm_thin_new_mapping;
498
495
499
496
struct pool_features {
500
497
unsigned zero_new_blocks:1;
···
540
537
struct deferred_set shared_read_ds;
541
538
struct deferred_set all_io_ds;
542
539
543
-
struct new_mapping *next_mapping;
540
+
struct dm_thin_new_mapping *next_mapping;
544
541
mempool_t *mapping_pool;
545
542
mempool_t *endio_hook_pool;
546
543
};
···
633
630
634
631
/*----------------------------------------------------------------*/
635
632
636
-
struct endio_hook {
633
+
struct dm_thin_endio_hook {
637
634
struct thin_c *tc;
638
635
struct deferred_entry *shared_read_entry;
639
636
struct deferred_entry *all_io_entry;
640
-
struct new_mapping *overwrite_mapping;
637
+
struct dm_thin_new_mapping *overwrite_mapping;
641
638
};
642
639
643
640
static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
···
650
647
bio_list_init(master);
651
648
652
649
while ((bio = bio_list_pop(&bios))) {
653
-
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
650
+
struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
651
+
654
652
if (h->tc == tc)
655
653
bio_endio(bio, DM_ENDIO_REQUEUE);
656
654
else
···
740
736
/*
741
737
* Bio endio functions.
742
738
*/
743
-
struct new_mapping {
739
+
struct dm_thin_new_mapping {
744
740
struct list_head list;
745
741
746
742
unsigned quiesced:1;
···
750
746
struct thin_c *tc;
751
747
dm_block_t virt_block;
752
748
dm_block_t data_block;
753
-
struct cell *cell, *cell2;
749
+
struct dm_bio_prison_cell *cell, *cell2;
754
750
int err;
755
751
756
752
/*
···
763
759
bio_end_io_t *saved_bi_end_io;
764
760
};
765
761
766
-
static void __maybe_add_mapping(struct new_mapping *m)
762
+
static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
767
763
{
768
764
struct pool *pool = m->tc->pool;
769
765
···
776
772
static void copy_complete(int read_err, unsigned long write_err, void *context)
777
773
{
778
774
unsigned long flags;
779
-
struct new_mapping *m = context;
775
+
struct dm_thin_new_mapping *m = context;
780
776
struct pool *pool = m->tc->pool;
781
777
782
778
m->err = read_err || write_err ? -EIO : 0;
···
790
786
static void overwrite_endio(struct bio *bio, int err)
791
787
{
792
788
unsigned long flags;
793
-
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
794
-
struct new_mapping *m = h->overwrite_mapping;
789
+
struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
790
+
struct dm_thin_new_mapping *m = h->overwrite_mapping;
795
791
struct pool *pool = m->tc->pool;
796
792
797
793
m->err = err;
···
815
811
/*
816
812
* This sends the bios in the cell back to the deferred_bios list.
817
813
*/
818
-
static void cell_defer(struct thin_c *tc, struct cell *cell,
814
+
static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
819
815
dm_block_t data_block)
820
816
{
821
817
struct pool *pool = tc->pool;
···
832
828
* Same as cell_defer above, except it omits one particular detainee,
833
829
* a write bio that covers the block and has already been processed.
834
830
*/
835
-
static void cell_defer_except(struct thin_c *tc, struct cell *cell)
831
+
static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
836
832
{
837
833
struct bio_list bios;
838
834
struct pool *pool = tc->pool;
···
847
843
wake_worker(pool);
848
844
}
849
845
850
-
static void process_prepared_mapping(struct new_mapping *m)
846
+
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
851
847
{
852
848
struct thin_c *tc = m->tc;
853
849
struct bio *bio;
···
890
886
mempool_free(m, tc->pool->mapping_pool);
891
887
}
892
888
893
-
static void process_prepared_discard(struct new_mapping *m)
889
+
static void process_prepared_discard(struct dm_thin_new_mapping *m)
894
890
{
895
891
int r;
896
892
struct thin_c *tc = m->tc;
···
913
909
}
914
910
915
911
static void process_prepared(struct pool *pool, struct list_head *head,
916
-
void (*fn)(struct new_mapping *))
912
+
void (*fn)(struct dm_thin_new_mapping *))
917
913
{
918
914
unsigned long flags;
919
915
struct list_head maps;
920
-
struct new_mapping *m, *tmp;
916
+
struct dm_thin_new_mapping *m, *tmp;
921
917
922
918
INIT_LIST_HEAD(&maps);
923
919
spin_lock_irqsave(&pool->lock, flags);
···
961
957
return pool->next_mapping ? 0 : -ENOMEM;
962
958
}
963
959
964
-
static struct new_mapping *get_next_mapping(struct pool *pool)
960
+
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
965
961
{
966
-
struct new_mapping *r = pool->next_mapping;
962
+
struct dm_thin_new_mapping *r = pool->next_mapping;
967
963
968
964
BUG_ON(!pool->next_mapping);
969
965
···
975
971
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
976
972
struct dm_dev *origin, dm_block_t data_origin,
977
973
dm_block_t data_dest,
978
-
struct cell *cell, struct bio *bio)
974
+
struct dm_bio_prison_cell *cell, struct bio *bio)
979
975
{
980
976
int r;
981
977
struct pool *pool = tc->pool;
982
-
struct new_mapping *m = get_next_mapping(pool);
978
+
struct dm_thin_new_mapping *m = get_next_mapping(pool);
983
979
984
980
INIT_LIST_HEAD(&m->list);
985
981
m->quiesced = 0;
···
1001
997
* bio immediately. Otherwise we use kcopyd to clone the data first.
1002
998
*/
1003
999
if (io_overwrites_block(pool, bio)) {
1004
-
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1000
+
struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1001
+
1005
1002
h->overwrite_mapping = m;
1006
1003
m->bio = bio;
1007
1004
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
···
1030
1025
1031
1026
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1032
1027
dm_block_t data_origin, dm_block_t data_dest,
1033
-
struct cell *cell, struct bio *bio)
1028
+
struct dm_bio_prison_cell *cell, struct bio *bio)
1034
1029
{
1035
1030
schedule_copy(tc, virt_block, tc->pool_dev,
1036
1031
data_origin, data_dest, cell, bio);
···
1038
1033
1039
1034
static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1040
1035
dm_block_t data_dest,
1041
-
struct cell *cell, struct bio *bio)
1036
+
struct dm_bio_prison_cell *cell, struct bio *bio)
1042
1037
{
1043
1038
schedule_copy(tc, virt_block, tc->origin_dev,
1044
1039
virt_block, data_dest, cell, bio);
1045
1040
}
1046
1041
1047
1042
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1048
-
dm_block_t data_block, struct cell *cell,
1043
+
dm_block_t data_block, struct dm_bio_prison_cell *cell,
1049
1044
struct bio *bio)
1050
1045
{
1051
1046
struct pool *pool = tc->pool;
1052
-
struct new_mapping *m = get_next_mapping(pool);
1047
+
struct dm_thin_new_mapping *m = get_next_mapping(pool);
1053
1048
1054
1049
INIT_LIST_HEAD(&m->list);
1055
1050
m->quiesced = 1;
···
1070
1065
process_prepared_mapping(m);
1071
1066
1072
1067
else if (io_overwrites_block(pool, bio)) {
1073
-
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1068
+
struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1069
+
1074
1070
h->overwrite_mapping = m;
1075
1071
m->bio = bio;
1076
1072
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1077
1073
remap_and_issue(tc, bio, data_block);
1078
-
1079
1074
} else {
1080
1075
int r;
1081
1076
struct dm_io_region to;
···
1160
1155
*/
1161
1156
static void retry_on_resume(struct bio *bio)
1162
1157
{
1163
-
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1158
+
struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1164
1159
struct thin_c *tc = h->tc;
1165
1160
struct pool *pool = tc->pool;
1166
1161
unsigned long flags;
···
1170
1165
spin_unlock_irqrestore(&pool->lock, flags);
1171
1166
}
1172
1167
1173
-
static void no_space(struct cell *cell)
1168
+
static void no_space(struct dm_bio_prison_cell *cell)
1174
1169
{
1175
1170
struct bio *bio;
1176
1171
struct bio_list bios;
···
1187
1182
int r;
1188
1183
unsigned long flags;
1189
1184
struct pool *pool = tc->pool;
1190
-
struct cell *cell, *cell2;
1185
+
struct dm_bio_prison_cell *cell, *cell2;
1191
1186
struct cell_key key, key2;
1192
1187
dm_block_t block = get_bio_block(tc, bio);
1193
1188
struct dm_thin_lookup_result lookup_result;
1194
-
struct new_mapping *m;
1189
+
struct dm_thin_new_mapping *m;
1195
1190
1196
1191
build_virtual_key(tc->td, block, &key);
1197
1192
if (bio_detain(tc->pool->prison, &key, bio, &cell))
···
1268
1263
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1269
1264
struct cell_key *key,
1270
1265
struct dm_thin_lookup_result *lookup_result,
1271
-
struct cell *cell)
1266
+
struct dm_bio_prison_cell *cell)
1272
1267
{
1273
1268
int r;
1274
1269
dm_block_t data_block;
···
1295
1290
dm_block_t block,
1296
1291
struct dm_thin_lookup_result *lookup_result)
1297
1292
{
1298
-
struct cell *cell;
1293
+
struct dm_bio_prison_cell *cell;
1299
1294
struct pool *pool = tc->pool;
1300
1295
struct cell_key key;
1301
1296
···
1310
1305
if (bio_data_dir(bio) == WRITE)
1311
1306
break_sharing(tc, bio, block, &key, lookup_result, cell);
1312
1307
else {
1313
-
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1308
+
struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1314
1309
1315
1310
h->shared_read_entry = ds_inc(&pool->shared_read_ds);
1316
1311
···
1320
1315
}
1321
1316
1322
1317
static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1323
-
struct cell *cell)
1318
+
struct dm_bio_prison_cell *cell)
1324
1319
{
1325
1320
int r;
1326
1321
dm_block_t data_block;
···
1368
1363
{
1369
1364
int r;
1370
1365
dm_block_t block = get_bio_block(tc, bio);
1371
-
struct cell *cell;
1366
+
struct dm_bio_prison_cell *cell;
1372
1367
struct cell_key key;
1373
1368
struct dm_thin_lookup_result lookup_result;
1374
1369
···
1437
1432
spin_unlock_irqrestore(&pool->lock, flags);
1438
1433
1439
1434
while ((bio = bio_list_pop(&bios))) {
1440
-
struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
1435
+
struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1441
1436
struct thin_c *tc = h->tc;
1442
1437
1443
1438
/*
···
1527
1522
wake_worker(pool);
1528
1523
}
1529
1524
1530
-
static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
1525
+
static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
1531
1526
{
1532
1527
struct pool *pool = tc->pool;
1533
-
struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1528
+
struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1534
1529
1535
1530
h->tc = tc;
1536
1531
h->shared_read_entry = NULL;
···
1692
1687
kfree(pool);
1693
1688
}
1694
1689
1690
+
static struct kmem_cache *_new_mapping_cache;
1691
+
static struct kmem_cache *_endio_hook_cache;
1692
+
1695
1693
static struct pool *pool_create(struct mapped_device *pool_md,
1696
1694
struct block_device *metadata_dev,
1697
1695
unsigned long block_size, char **error)
···
1763
1755
ds_init(&pool->all_io_ds);
1764
1756
1765
1757
pool->next_mapping = NULL;
1766
-
pool->mapping_pool =
1767
-
mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping));
1758
+
pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1759
+
_new_mapping_cache);
1768
1760
if (!pool->mapping_pool) {
1769
1761
*error = "Error creating pool's mapping mempool";
1770
1762
err_p = ERR_PTR(-ENOMEM);
1771
1763
goto bad_mapping_pool;
1772
1764
}
1773
1765
1774
-
pool->endio_hook_pool =
1775
-
mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook));
1766
+
pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
1767
+
_endio_hook_cache);
1776
1768
if (!pool->endio_hook_pool) {
1777
1769
*error = "Error creating pool's endio_hook mempool";
1778
1770
err_p = ERR_PTR(-ENOMEM);
···
2621
2613
union map_info *map_context)
2622
2614
{
2623
2615
unsigned long flags;
2624
-
struct endio_hook *h = map_context->ptr;
2616
+
struct dm_thin_endio_hook *h = map_context->ptr;
2625
2617
struct list_head work;
2626
-
struct new_mapping *m, *tmp;
2618
+
struct dm_thin_new_mapping *m, *tmp;
2627
2619
struct pool *pool = h->tc->pool;
2628
2620
2629
2621
if (h->shared_read_entry) {
···
2763
2755
2764
2756
r = dm_register_target(&pool_target);
2765
2757
if (r)
2766
-
dm_unregister_target(&thin_target);
2758
+
goto bad_pool_target;
2759
+
2760
+
r = -ENOMEM;
2761
+
2762
+
_cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
2763
+
if (!_cell_cache)
2764
+
goto bad_cell_cache;
2765
+
2766
+
_new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
2767
+
if (!_new_mapping_cache)
2768
+
goto bad_new_mapping_cache;
2769
+
2770
+
_endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
2771
+
if (!_endio_hook_cache)
2772
+
goto bad_endio_hook_cache;
2773
+
2774
+
return 0;
2775
+
2776
+
bad_endio_hook_cache:
2777
+
kmem_cache_destroy(_new_mapping_cache);
2778
+
bad_new_mapping_cache:
2779
+
kmem_cache_destroy(_cell_cache);
2780
+
bad_cell_cache:
2781
+
dm_unregister_target(&pool_target);
2782
+
bad_pool_target:
2783
+
dm_unregister_target(&thin_target);
2767
2784
2768
2785
return r;
2769
2786
}
···
2797
2764
{
2798
2765
dm_unregister_target(&thin_target);
2799
2766
dm_unregister_target(&pool_target);
2767
+
2768
+
kmem_cache_destroy(_cell_cache);
2769
+
kmem_cache_destroy(_new_mapping_cache);
2770
+
kmem_cache_destroy(_endio_hook_cache);
2800
2771
}
2801
2772
2802
2773
module_init(dm_thin_init);