Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

btrfs: cleanup, use kmalloc_array/kcalloc array helpers

Convert kmalloc(nr * size, ..) to kmalloc_array that does additional
overflow checks, the zeroing variant is kcalloc.

Signed-off-by: David Sterba <dsterba@suse.cz>

+18 -21
+2 -2
fs/btrfs/check-integrity.c
··· 2990 2990 (unsigned long long)bio->bi_iter.bi_sector, 2991 2991 dev_bytenr, bio->bi_bdev); 2992 2992 2993 - mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt, 2994 - GFP_NOFS); 2993 + mapped_datav = kmalloc_array(bio->bi_vcnt, 2994 + sizeof(*mapped_datav), GFP_NOFS); 2995 2995 if (!mapped_datav) 2996 2996 goto leave; 2997 2997 cur_bytenr = dev_bytenr;
+1 -1
fs/btrfs/compression.c
··· 622 622 cb->orig_bio = bio; 623 623 624 624 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE); 625 - cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages, 625 + cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), 626 626 GFP_NOFS); 627 627 if (!cb->compressed_pages) 628 628 goto fail1;
+4 -5
fs/btrfs/ctree.c
··· 578 578 if (!tree_mod_need_log(fs_info, eb)) 579 579 return 0; 580 580 581 - tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags); 581 + tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags); 582 582 if (!tm_list) 583 583 return -ENOMEM; 584 584 ··· 677 677 678 678 if (log_removal && btrfs_header_level(old_root) > 0) { 679 679 nritems = btrfs_header_nritems(old_root); 680 - tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *), 680 + tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), 681 681 flags); 682 682 if (!tm_list) { 683 683 ret = -ENOMEM; ··· 814 814 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) 815 815 return 0; 816 816 817 - tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *), 817 + tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *), 818 818 GFP_NOFS); 819 819 if (!tm_list) 820 820 return -ENOMEM; ··· 905 905 return 0; 906 906 907 907 nritems = btrfs_header_nritems(eb); 908 - tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *), 909 - GFP_NOFS); 908 + tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS); 910 909 if (!tm_list) 911 910 return -ENOMEM; 912 911
+1 -1
fs/btrfs/disk-io.c
··· 302 302 offset += cur_len; 303 303 } 304 304 if (csum_size > sizeof(inline_result)) { 305 - result = kzalloc(csum_size * sizeof(char), GFP_NOFS); 305 + result = kzalloc(csum_size, GFP_NOFS); 306 306 if (!result) 307 307 return 1; 308 308 } else {
+2 -2
fs/btrfs/file-item.c
··· 185 185 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; 186 186 if (!dst) { 187 187 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 188 - btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size, 189 - GFP_NOFS); 188 + btrfs_bio->csum_allocated = kmalloc_array(nblocks, 189 + csum_size, GFP_NOFS); 190 190 if (!btrfs_bio->csum_allocated) { 191 191 btrfs_free_path(path); 192 192 return -ENOMEM;
+1 -1
fs/btrfs/file.c
··· 1481 1481 PAGE_CACHE_SIZE / (sizeof(struct page *))); 1482 1482 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); 1483 1483 nrptrs = max(nrptrs, 8); 1484 - pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 1484 + pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL); 1485 1485 if (!pages) 1486 1486 return -ENOMEM; 1487 1487
+1 -1
fs/btrfs/free-space-cache.c
··· 298 298 299 299 memset(io_ctl, 0, sizeof(struct io_ctl)); 300 300 301 - io_ctl->pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); 301 + io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS); 302 302 if (!io_ctl->pages) 303 303 return -ENOMEM; 304 304
+1 -1
fs/btrfs/inode.c
··· 463 463 */ 464 464 if (inode_need_compress(inode)) { 465 465 WARN_ON(pages); 466 - pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); 466 + pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 467 467 if (!pages) { 468 468 /* just bail out to the uncompressed code */ 469 469 goto cont;
+1 -2
fs/btrfs/raid56.c
··· 1807 1807 int err; 1808 1808 int i; 1809 1809 1810 - pointers = kzalloc(rbio->real_stripes * sizeof(void *), 1811 - GFP_NOFS); 1810 + pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 1812 1811 if (!pointers) { 1813 1812 err = -ENOMEM; 1814 1813 goto cleanup_io;
+2 -3
fs/btrfs/scrub.c
··· 964 964 * the statistics. 965 965 */ 966 966 967 - sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS * 968 - sizeof(*sblocks_for_recheck), 969 - GFP_NOFS); 967 + sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS, 968 + sizeof(*sblocks_for_recheck), GFP_NOFS); 970 969 if (!sblocks_for_recheck) { 971 970 spin_lock(&sctx->stat_lock); 972 971 sctx->stat.malloc_errors++;
+2 -2
fs/btrfs/volumes.c
··· 4288 4288 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 4289 4289 max_chunk_size); 4290 4290 4291 - devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, 4291 + devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), 4292 4292 GFP_NOFS); 4293 4293 if (!devices_info) 4294 4294 return -ENOMEM; ··· 5542 5542 rmap_len = map->stripe_len * nr_data_stripes(map); 5543 5543 } 5544 5544 5545 - buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS); 5545 + buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 5546 5546 BUG_ON(!buf); /* -ENOMEM */ 5547 5547 5548 5548 for (i = 0; i < map->num_stripes; i++) {