Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm: remove unused _rq_tio_cache and _rq_cache

Also move dm_rq_target_io structure definition from dm-rq.h to dm-rq.c

Fixes: 6a23e05c2fe3c6 ("dm: remove legacy request-based IO path")
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

+18 -36
+16
drivers/md/dm-rq.c
··· 12 12 13 13 #define DM_MSG_PREFIX "core-rq" 14 14 15 + /* 16 + * One of these is allocated per request. 17 + */ 18 + struct dm_rq_target_io { 19 + struct mapped_device *md; 20 + struct dm_target *ti; 21 + struct request *orig, *clone; 22 + struct kthread_work work; 23 + blk_status_t error; 24 + union map_info info; 25 + struct dm_stats_aux stats_aux; 26 + unsigned long duration_jiffies; 27 + unsigned n_sectors; 28 + unsigned completed; 29 + }; 30 + 15 31 #define DM_MQ_NR_HW_QUEUES 1 16 32 #define DM_MQ_QUEUE_DEPTH 2048 17 33 static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
-16
drivers/md/dm-rq.h
··· 17 17 struct mapped_device; 18 18 19 19 /* 20 - * One of these is allocated per request. 21 - */ 22 - struct dm_rq_target_io { 23 - struct mapped_device *md; 24 - struct dm_target *ti; 25 - struct request *orig, *clone; 26 - struct kthread_work work; 27 - blk_status_t error; 28 - union map_info info; 29 - struct dm_stats_aux stats_aux; 30 - unsigned long duration_jiffies; 31 - unsigned n_sectors; 32 - unsigned completed; 33 - }; 34 - 35 - /* 36 20 * For request-based dm - the bio clones we allocate are embedded in these 37 21 * structs. 38 22 *
+2 -20
drivers/md/dm.c
··· 158 158 struct dm_dev dm_dev; 159 159 }; 160 160 161 - static struct kmem_cache *_rq_tio_cache; 162 - static struct kmem_cache *_rq_cache; 163 - 164 161 /* 165 162 * Bio-based DM's mempools' reserved IOs set by the user. 166 163 */ ··· 219 222 220 223 static int __init local_init(void) 221 224 { 222 - int r = -ENOMEM; 223 - 224 - _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 225 - if (!_rq_tio_cache) 226 - return r; 227 - 228 - _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), 229 - __alignof__(struct request), 0, NULL); 230 - if (!_rq_cache) 231 - goto out_free_rq_tio_cache; 225 + int r; 232 226 233 227 r = dm_uevent_init(); 234 228 if (r) 235 - goto out_free_rq_cache; 229 + return r; 236 230 237 231 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 238 232 if (!deferred_remove_workqueue) { ··· 245 257 destroy_workqueue(deferred_remove_workqueue); 246 258 out_uevent_exit: 247 259 dm_uevent_exit(); 248 - out_free_rq_cache: 249 - kmem_cache_destroy(_rq_cache); 250 - out_free_rq_tio_cache: 251 - kmem_cache_destroy(_rq_tio_cache); 252 260 253 261 return r; 254 262 } ··· 254 270 flush_scheduled_work(); 255 271 destroy_workqueue(deferred_remove_workqueue); 256 272 257 - kmem_cache_destroy(_rq_cache); 258 - kmem_cache_destroy(_rq_tio_cache); 259 273 unregister_blkdev(_major, _name); 260 274 dm_uevent_exit(); 261 275