Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm: adjust structure members to improve alignment

Eliminate most holes in DM data structures that were modified by
commit 6f1c819c21 ("dm: convert to bioset_init()/mempool_init()").
Also prevent structure members from unnecessarily spanning cache
lines.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>

+85 -79
+1 -1
drivers/md/dm-bio-prison-v1.c
··· 19 19 20 20 struct dm_bio_prison { 21 21 spinlock_t lock; 22 - mempool_t cell_pool; 23 22 struct rb_root cells; 23 + mempool_t cell_pool; 24 24 }; 25 25 26 26 static struct kmem_cache *_cell_cache;
+1 -1
drivers/md/dm-bio-prison-v2.c
··· 21 21 struct workqueue_struct *wq; 22 22 23 23 spinlock_t lock; 24 - mempool_t cell_pool; 25 24 struct rb_root cells; 25 + mempool_t cell_pool; 26 26 }; 27 27 28 28 static struct kmem_cache *_cell_cache;
+33 -30
drivers/md/dm-cache-target.c
··· 371 371 372 372 struct cache { 373 373 struct dm_target *ti; 374 - struct dm_target_callbacks callbacks; 374 + spinlock_t lock; 375 + 376 + /* 377 + * Fields for converting from sectors to blocks. 378 + */ 379 + int sectors_per_block_shift; 380 + sector_t sectors_per_block; 375 381 376 382 struct dm_cache_metadata *cmd; 377 383 ··· 408 402 dm_cblock_t cache_size; 409 403 410 404 /* 411 - * Fields for converting from sectors to blocks. 405 + * Invalidation fields. 412 406 */ 413 - sector_t sectors_per_block; 414 - int sectors_per_block_shift; 407 + spinlock_t invalidation_lock; 408 + struct list_head invalidation_requests; 415 409 416 - spinlock_t lock; 417 - struct bio_list deferred_bios; 418 410 sector_t migration_threshold; 419 411 wait_queue_head_t migration_wait; 420 412 atomic_t nr_allocated_migrations; ··· 423 419 */ 424 420 atomic_t nr_io_migrations; 425 421 422 + struct bio_list deferred_bios; 423 + 426 424 struct rw_semaphore quiesce_lock; 427 425 428 - /* 429 - * cache_size entries, dirty if set 430 - */ 431 - atomic_t nr_dirty; 432 - unsigned long *dirty_bitset; 426 + struct dm_target_callbacks callbacks; 433 427 434 428 /* 435 429 * origin_blocks entries, discarded if set. ··· 444 442 const char **ctr_args; 445 443 446 444 struct dm_kcopyd_client *copier; 447 - struct workqueue_struct *wq; 448 445 struct work_struct deferred_bio_worker; 449 446 struct work_struct migration_worker; 447 + struct workqueue_struct *wq; 450 448 struct delayed_work waker; 451 449 struct dm_bio_prison_v2 *prison; 452 - struct bio_set bs; 453 450 454 - mempool_t migration_pool; 451 + /* 452 + * cache_size entries, dirty if set 453 + */ 454 + unsigned long *dirty_bitset; 455 + atomic_t nr_dirty; 455 456 456 - struct dm_cache_policy *policy; 457 457 unsigned policy_nr_args; 458 - 459 - bool need_tick_bio:1; 460 - bool sized:1; 461 - bool invalidate:1; 462 - bool commit_requested:1; 463 - bool loaded_mappings:1; 464 - bool loaded_discards:1; 458 + struct dm_cache_policy *policy; 465 459 466 460 /* 467 461 * Cache features such as write-through. ··· 466 468 467 469 struct cache_stats stats; 468 470 469 - /* 470 - * Invalidation fields. 471 - */ 472 - spinlock_t invalidation_lock; 473 - struct list_head invalidation_requests; 471 + bool need_tick_bio:1; 472 + bool sized:1; 473 + bool invalidate:1; 474 + bool commit_requested:1; 475 + bool loaded_mappings:1; 476 + bool loaded_discards:1; 477 + 478 + struct rw_semaphore background_work_lock; 479 + 480 + struct batcher committer; 481 + struct work_struct commit_ws; 474 482 475 483 struct io_tracker tracker; 476 484 477 - struct work_struct commit_ws; 478 - struct batcher committer; 485 + mempool_t migration_pool; 479 486 480 - struct rw_semaphore background_work_lock; 487 + struct bio_set bs; 481 488 }; 482 489 483 490 struct per_bio_data {
+19 -19
drivers/md/dm-core.h
··· 31 31 struct mapped_device { 32 32 struct mutex suspend_lock; 33 33 34 + struct mutex table_devices_lock; 35 + struct list_head table_devices; 36 + 34 37 /* 35 38 * The current mapping (struct dm_table *). 36 39 * Use dm_get_live_table{_fast} or take suspend_lock for ··· 41 38 */ 42 39 void __rcu *map; 43 40 44 - struct list_head table_devices; 45 - struct mutex table_devices_lock; 46 - 47 41 unsigned long flags; 48 42 49 - struct request_queue *queue; 50 - int numa_node_id; 51 - 52 - enum dm_queue_mode type; 53 43 /* Protect queue and type against concurrent access. */ 54 44 struct mutex type_lock; 45 + enum dm_queue_mode type; 46 + 47 + int numa_node_id; 48 + struct request_queue *queue; 55 49 56 50 atomic_t holders; 57 51 atomic_t open_count; ··· 56 56 struct dm_target *immutable_target; 57 57 struct target_type *immutable_target_type; 58 58 59 + char name[16]; 59 60 struct gendisk *disk; 60 61 struct dax_device *dax_dev; 61 - char name[16]; 62 - 63 - void *interface_ptr; 64 62 65 63 /* 66 64 * A list of ios that arrived while we were suspended. 67 65 */ 68 - atomic_t pending[2]; 69 - wait_queue_head_t wait; 70 66 struct work_struct work; 67 + wait_queue_head_t wait; 68 + atomic_t pending[2]; 71 69 spinlock_t deferred_lock; 72 70 struct bio_list deferred; 71 + 72 + void *interface_ptr; 73 73 74 74 /* 75 75 * Event handling. ··· 84 84 unsigned internal_suspend_count; 85 85 86 86 /* 87 - * Processing queue (flush) 88 - */ 89 - struct workqueue_struct *wq; 90 - 91 - /* 92 87 * io objects are allocated from here. 93 88 */ 94 89 struct bio_set io_bs; 95 90 struct bio_set bs; 91 + 92 + /* 93 + * Processing queue (flush) 94 + */ 95 + struct workqueue_struct *wq; 96 96 97 97 /* 98 98 * freeze/thaw support require holding onto a super block ··· 102 102 /* forced geometry settings */ 103 103 struct hd_geometry geometry; 104 104 105 - struct block_device *bdev; 106 - 107 105 /* kobject and completion */ 108 106 struct dm_kobject_holder kobj_holder; 107 + 108 + struct block_device *bdev; 109 109 110 110 /* zero-length flush that will be cloned and submitted to targets */ 111 111 struct bio flush_bio;
+13 -13
drivers/md/dm-crypt.c
··· 139 139 struct dm_dev *dev; 140 140 sector_t start; 141 141 142 - /* 143 - * pool for per bio private data, crypto requests, 144 - * encryption requeusts/buffer pages and integrity tags 145 - */ 146 - mempool_t req_pool; 147 - mempool_t page_pool; 148 - mempool_t tag_pool; 149 - unsigned tag_pool_max_sectors; 150 - 151 142 struct percpu_counter n_allocated_pages; 152 - 153 - struct bio_set bs; 154 - struct mutex bio_alloc_lock; 155 143 156 144 struct workqueue_struct *io_queue; 157 145 struct workqueue_struct *crypt_queue; 158 146 159 - struct task_struct *write_thread; 160 147 wait_queue_head_t write_thread_wait; 148 + struct task_struct *write_thread; 161 149 struct rb_root write_tree; 162 150 163 151 char *cipher; ··· 200 212 unsigned int integrity_tag_size; 201 213 unsigned int integrity_iv_size; 202 214 unsigned int on_disk_tag_size; 215 + 216 + /* 217 + * pool for per bio private data, crypto requests, 218 + * encryption requeusts/buffer pages and integrity tags 219 + */ 220 + unsigned tag_pool_max_sectors; 221 + mempool_t tag_pool; 222 + mempool_t req_pool; 223 + mempool_t page_pool; 224 + 225 + struct bio_set bs; 226 + struct mutex bio_alloc_lock; 203 227 204 228 u8 *authenc_key; /* space for keys in authenc() format (if used) */ 205 229 u8 key[0];
+2 -1
drivers/md/dm-kcopyd.c
··· 45 45 struct dm_io_client *io_client; 46 46 47 47 wait_queue_head_t destroyq; 48 - atomic_t nr_jobs; 49 48 50 49 mempool_t job_pool; 51 50 ··· 52 53 struct work_struct kcopyd_work; 53 54 54 55 struct dm_kcopyd_throttle *throttle; 56 + 57 + atomic_t nr_jobs; 55 58 56 59 /* 57 60 * We maintain three lists of jobs:
+12 -11
drivers/md/dm-region-hash.c
··· 63 63 64 64 /* hash table */ 65 65 rwlock_t hash_lock; 66 - mempool_t region_pool; 67 66 unsigned mask; 68 67 unsigned nr_buckets; 69 68 unsigned prime; 70 69 unsigned shift; 71 70 struct list_head *buckets; 72 71 73 - unsigned max_recovery; /* Max # of regions to recover in parallel */ 74 - 75 - spinlock_t region_lock; 76 - atomic_t recovery_in_flight; 77 - struct semaphore recovery_count; 78 - struct list_head clean_regions; 79 - struct list_head quiesced_regions; 80 - struct list_head recovered_regions; 81 - struct list_head failed_recovered_regions; 82 - 83 72 /* 84 73 * If there was a flush failure no regions can be marked clean. 85 74 */ 86 75 int flush_failure; 76 + 77 + unsigned max_recovery; /* Max # of regions to recover in parallel */ 78 + 79 + spinlock_t region_lock; 80 + atomic_t recovery_in_flight; 81 + struct list_head clean_regions; 82 + struct list_head quiesced_regions; 83 + struct list_head recovered_regions; 84 + struct list_head failed_recovered_regions; 85 + struct semaphore recovery_count; 86 + 87 + mempool_t region_pool; 87 88 88 89 void *context; 89 90 sector_t target_begin;
+3 -2
drivers/md/dm-thin.c
··· 240 240 struct dm_bio_prison *prison; 241 241 struct dm_kcopyd_client *copier; 242 242 243 + struct work_struct worker; 243 244 struct workqueue_struct *wq; 244 245 struct throttle throttle; 245 - struct work_struct worker; 246 246 struct delayed_work waker; 247 247 struct delayed_work no_space_timeout; 248 248 ··· 260 260 struct dm_deferred_set *all_io_ds; 261 261 262 262 struct dm_thin_new_mapping *next_mapping; 263 - mempool_t mapping_pool; 264 263 265 264 process_bio_fn process_bio; 266 265 process_bio_fn process_discard; ··· 272 273 process_mapping_fn process_prepared_discard_pt2; 273 274 274 275 struct dm_bio_prison_cell **cell_sort_array; 276 + 277 + mempool_t mapping_pool; 275 278 }; 276 279 277 280 static enum pool_mode get_pool_mode(struct pool *pool);
+1 -1
drivers/md/dm-zoned-target.c
··· 52 52 struct dmz_reclaim *reclaim; 53 53 54 54 /* For chunk work */ 55 - struct mutex chunk_lock; 56 55 struct radix_tree_root chunk_rxtree; 57 56 struct workqueue_struct *chunk_wq; 57 + struct mutex chunk_lock; 58 58 59 59 /* For cloned BIOs to zones */ 60 60 struct bio_set bio_set;