Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm delay: use per-bio data instead of a mempool and slab cache

Starting with commit c0820cf5ad095 ("dm: introduce per_bio_data"),
device mapper has the capability to pre-allocate a target-specific
structure with the bio.

This patch changes dm-delay to use this facility instead of a slab cache
and mempool.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

authored by

Mikulas Patocka and committed by
Mike Snitzer
42065460 57a2f238

+7 -28
+7 -28
drivers/md/dm-delay.c
··· 24 24 struct work_struct flush_expired_bios; 25 25 struct list_head delayed_bios; 26 26 atomic_t may_delay; 27 - mempool_t *delayed_pool; 28 27 29 28 struct dm_dev *dev_read; 30 29 sector_t start_read; ··· 39 40 struct dm_delay_info { 40 41 struct delay_c *context; 41 42 struct list_head list; 42 - struct bio *bio; 43 43 unsigned long expires; 44 44 }; 45 45 46 46 static DEFINE_MUTEX(delayed_bios_lock); 47 - 48 - static struct kmem_cache *delayed_cache; 49 47 50 48 static void handle_delayed_timer(unsigned long data) 51 49 { ··· 83 87 mutex_lock(&delayed_bios_lock); 84 88 list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { 85 89 if (flush_all || time_after_eq(jiffies, delayed->expires)) { 90 + struct bio *bio = dm_bio_from_per_bio_data(delayed, 91 + sizeof(struct dm_delay_info)); 86 92 list_del(&delayed->list); 87 - bio_list_add(&flush_bios, delayed->bio); 88 - if ((bio_data_dir(delayed->bio) == WRITE)) 93 + bio_list_add(&flush_bios, bio); 94 + if ((bio_data_dir(bio) == WRITE)) 89 95 delayed->context->writes--; 90 96 else 91 97 delayed->context->reads--; 92 - mempool_free(delayed, dc->delayed_pool); 93 98 continue; 94 99 } 95 100 ··· 182 185 } 183 186 184 187 out: 185 - dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache); 186 - if (!dc->delayed_pool) { 187 - DMERR("Couldn't create delayed bio pool."); 188 - goto bad_dev_write; 189 - } 190 - 191 188 dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); 192 189 if (!dc->kdelayd_wq) { 193 190 DMERR("Couldn't start kdelayd"); ··· 197 206 198 207 ti->num_flush_bios = 1; 199 208 ti->num_discard_bios = 1; 209 + ti->per_bio_data_size = sizeof(struct dm_delay_info); 200 210 ti->private = dc; 201 211 return 0; 202 212 203 213 bad_queue: 204 - mempool_destroy(dc->delayed_pool); 205 - bad_dev_write: 206 214 if (dc->dev_write) 207 215 dm_put_device(ti, dc->dev_write); 208 216 bad_dev_read: ··· 222 232 if (dc->dev_write) 223 233 dm_put_device(ti, dc->dev_write); 224 234 225 - mempool_destroy(dc->delayed_pool); 226 235 kfree(dc); 227 236 } 228 237 ··· 233 244 if (!delay || !atomic_read(&dc->may_delay)) 234 245 return 1; 235 246 236 - delayed = mempool_alloc(dc->delayed_pool, GFP_NOIO); 247 + delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info)); 237 248 238 249 delayed->context = dc; 239 - delayed->bio = bio; 240 250 delayed->expires = expires = jiffies + (delay * HZ / 1000); 241 251 242 252 mutex_lock(&delayed_bios_lock); ··· 344 356 345 357 static int __init dm_delay_init(void) 346 358 { 347 - int r = -ENOMEM; 348 - 349 - delayed_cache = KMEM_CACHE(dm_delay_info, 0); 350 - if (!delayed_cache) { 351 - DMERR("Couldn't create delayed bio cache."); 352 - goto bad_memcache; 353 - } 359 + int r; 354 360 355 361 r = dm_register_target(&delay_target); 356 362 if (r < 0) { ··· 355 373 return 0; 356 374 357 375 bad_register: 358 - kmem_cache_destroy(delayed_cache); 359 - bad_memcache: 360 376 return r; 361 377 } 362 378 363 379 static void __exit dm_delay_exit(void) 364 380 { 365 381 dm_unregister_target(&delay_target); 366 - kmem_cache_destroy(delayed_cache); 367 382 } 368 383 369 384 /* Module hooks */