Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm io: use fixed initial mempool size

Replace the arbitrary calculation of an initial io struct mempool size
with a constant.

The code calculated the number of reserved structures based on the request
size and used a "magic" multiplication constant of 4. This patch changes
it to reserve a fixed number - itself still chosen quite arbitrarily.
Further testing might show if there is a better number to choose.

Note that if there is no memory pressure, we can still allocate an
arbitrary number of "struct io" structures. One structure is enough to
process the whole request.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>

authored by

Mikulas Patocka and committed by
Alasdair G Kergon
bda8efec d0471458

+10 -41
+5 -22
drivers/md/dm-io.c
··· 19 19 #define DM_MSG_PREFIX "io" 20 20 21 21 #define DM_IO_MAX_REGIONS BITS_PER_LONG 22 + #define MIN_IOS 16 23 + #define MIN_BIOS 16 22 24 23 25 struct dm_io_client { 24 26 mempool_t *pool; ··· 43 41 static struct kmem_cache *_dm_io_cache; 44 42 45 43 /* 46 - * io contexts are only dynamically allocated for asynchronous 47 - * io. Since async io is likely to be the majority of io we'll 48 - * have the same number of io contexts as bios! (FIXME: must reduce this). 49 - */ 50 - 51 - static unsigned int pages_to_ios(unsigned int pages) 52 - { 53 - return 4 * pages; /* too many ? */ 54 - } 55 - 56 - /* 57 44 * Create a client with mempool and bioset. 58 45 */ 59 - struct dm_io_client *dm_io_client_create(unsigned num_pages) 46 + struct dm_io_client *dm_io_client_create(void) 60 47 { 61 - unsigned ios = pages_to_ios(num_pages); 62 48 struct dm_io_client *client; 63 49 64 50 client = kmalloc(sizeof(*client), GFP_KERNEL); 65 51 if (!client) 66 52 return ERR_PTR(-ENOMEM); 67 53 68 - client->pool = mempool_create_slab_pool(ios, _dm_io_cache); 54 + client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); 69 55 if (!client->pool) 70 56 goto bad; 71 57 72 - client->bios = bioset_create(16, 0); 58 + client->bios = bioset_create(MIN_BIOS, 0); 73 59 if (!client->bios) 74 60 goto bad; 75 61 ··· 70 80 return ERR_PTR(-ENOMEM); 71 81 } 72 82 EXPORT_SYMBOL(dm_io_client_create); 73 - 74 - int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client) 75 - { 76 - return mempool_resize(client->pool, pages_to_ios(num_pages), 77 - GFP_KERNEL); 78 - } 79 - EXPORT_SYMBOL(dm_io_client_resize); 80 83 81 84 void dm_io_client_destroy(struct dm_io_client *client) 82 85 {
+1 -1
drivers/md/dm-kcopyd.c
··· 667 667 if (r) 668 668 goto bad_client_pages; 669 669 670 - kc->io_client = dm_io_client_create(min_pages); 670 + kc->io_client = dm_io_client_create(); 671 671 if (IS_ERR(kc->io_client)) { 672 672 r = PTR_ERR(kc->io_client); 673 673 goto bad_io_client;
+1 -2
drivers/md/dm-log.c
··· 449 449 450 450 lc->io_req.mem.type = DM_IO_VMA; 451 451 lc->io_req.notify.fn = NULL; 452 - lc->io_req.client = dm_io_client_create(dm_div_up(buf_size, 453 - PAGE_SIZE)); 452 + lc->io_req.client = dm_io_client_create(); 454 453 if (IS_ERR(lc->io_req.client)) { 455 454 r = PTR_ERR(lc->io_req.client); 456 455 DMWARN("couldn't allocate disk io client");
+1 -2
drivers/md/dm-raid1.c
··· 22 22 #define DM_MSG_PREFIX "raid1" 23 23 24 24 #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ 25 - #define DM_IO_PAGES 64 26 25 #define DM_KCOPYD_PAGES 64 27 26 28 27 #define DM_RAID1_HANDLE_ERRORS 0x01 ··· 886 887 return NULL; 887 888 } 888 889 889 - ms->io_client = dm_io_client_create(DM_IO_PAGES); 890 + ms->io_client = dm_io_client_create(); 890 891 if (IS_ERR(ms->io_client)) { 891 892 ti->error = "Error creating dm_io client"; 892 893 mempool_destroy(ms->read_record_pool);
+1 -12
drivers/md/dm-snap-persistent.c
··· 154 154 struct workqueue_struct *metadata_wq; 155 155 }; 156 156 157 - static unsigned sectors_to_pages(unsigned sectors) 158 - { 159 - return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9); 160 - } 161 - 162 157 static int alloc_area(struct pstore *ps) 163 158 { 164 159 int r = -ENOMEM; ··· 313 318 chunk_size_supplied = 0; 314 319 } 315 320 316 - ps->io_client = dm_io_client_create(sectors_to_pages(ps->store-> 317 - chunk_size)); 321 + ps->io_client = dm_io_client_create(); 318 322 if (IS_ERR(ps->io_client)) 319 323 return PTR_ERR(ps->io_client); 320 324 ··· 361 367 chunk_size, chunk_err); 362 368 return r; 363 369 } 364 - 365 - r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size), 366 - ps->io_client); 367 - if (r) 368 - return r; 369 370 370 371 r = alloc_area(ps); 371 372 return r;
+1 -2
include/linux/dm-io.h
··· 69 69 * 70 70 * Create/destroy may block. 71 71 */ 72 - struct dm_io_client *dm_io_client_create(unsigned num_pages); 73 - int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client); 72 + struct dm_io_client *dm_io_client_create(void); 74 73 void dm_io_client_destroy(struct dm_io_client *client); 75 74 76 75 /*