Convert remaining multi-line kmalloc_obj/flex GFP_KERNEL uses

Conversion performed via this Coccinelle script:

// SPDX-License-Identifier: GPL-2.0-only
// Options: --include-headers-for-types --all-includes --include-headers --keep-comments
virtual patch

@gfp depends on patch && !(file in "tools") && !(file in "samples")@
identifier ALLOC = {kmalloc_obj,kmalloc_objs,kmalloc_flex,
kzalloc_obj,kzalloc_objs,kzalloc_flex,
kvmalloc_obj,kvmalloc_objs,kvmalloc_flex,
kvzalloc_obj,kvzalloc_objs,kvzalloc_flex};
@@

ALLOC(...
- , GFP_KERNEL
)

$ make coccicheck MODE=patch COCCI=gfp.cocci

Build and boot tested x86_64 with Fedora 42's GCC and Clang:

Linux version 6.19.0+ (user@host) (gcc (GCC) 15.2.1 20260123 (Red Hat 15.2.1-7), GNU ld version 2.44-12.fc42) #1 SMP PREEMPT_DYNAMIC 1970-01-01
Linux version 6.19.0+ (user@host) (clang version 20.1.8 (Fedora 20.1.8-4.fc42), LLD 20.1.8) #1 SMP PREEMPT_DYNAMIC 1970-01-01

Signed-off-by: Kees Cook <kees@kernel.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Kees Cook and committed by
Linus Torvalds
189f164e 32a92f8c

+612 -833
+1 -1
arch/arm/probes/kprobes/test-core.c
··· 764 764 static int coverage_start(const union decode_item *table) 765 765 { 766 766 coverage.base = kmalloc_objs(struct coverage_entry, 767 - MAX_COVERAGE_ENTRIES, GFP_KERNEL); 767 + MAX_COVERAGE_ENTRIES); 768 768 coverage.num_entries = 0; 769 769 coverage.nesting = 0; 770 770 return table_iter(table, coverage_start_fn, &coverage);
+2 -3
arch/mips/kernel/smp-cps.c
··· 342 342 /* Allocate cluster boot configuration structs */ 343 343 nclusters = mips_cps_numclusters(); 344 344 mips_cps_cluster_bootcfg = kzalloc_objs(*mips_cps_cluster_bootcfg, 345 - nclusters, GFP_KERNEL); 345 + nclusters); 346 346 if (!mips_cps_cluster_bootcfg) 347 347 goto err_out; 348 348 ··· 368 368 int v; 369 369 core_vpes = core_vpe_count(cl, c); 370 370 core_bootcfg[c].vpe_config = kzalloc_objs(*core_bootcfg[c].vpe_config, 371 - core_vpes, 372 - GFP_KERNEL); 371 + core_vpes); 373 372 for (v = 0; v < core_vpes; v++) 374 373 cpumask_set_cpu(nvpe++, &mips_cps_cluster_bootcfg[cl].cpumask); 375 374 if (!core_bootcfg[c].vpe_config)
+3 -6
arch/powerpc/kvm/e500_mmu.c
··· 913 913 vcpu_e500->gtlb_params[1].sets = 1; 914 914 915 915 vcpu_e500->gtlb_arch = kmalloc_objs(*vcpu_e500->gtlb_arch, 916 - KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE, 917 - GFP_KERNEL); 916 + KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE); 918 917 if (!vcpu_e500->gtlb_arch) 919 918 return -ENOMEM; 920 919 ··· 921 922 vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; 922 923 923 924 vcpu_e500->gtlb_priv[0] = kzalloc_objs(struct tlbe_ref, 924 - vcpu_e500->gtlb_params[0].entries, 925 - GFP_KERNEL); 925 + vcpu_e500->gtlb_params[0].entries); 926 926 if (!vcpu_e500->gtlb_priv[0]) 927 927 goto free_vcpu; 928 928 929 929 vcpu_e500->gtlb_priv[1] = kzalloc_objs(struct tlbe_ref, 930 - vcpu_e500->gtlb_params[1].entries, 931 - GFP_KERNEL); 930 + vcpu_e500->gtlb_params[1].entries); 932 931 if (!vcpu_e500->gtlb_priv[1]) 933 932 goto free_vcpu; 934 933
+1 -2
arch/powerpc/perf/imc-pmu.c
··· 1699 1699 /* Needed for hotplug/migration */ 1700 1700 if (!per_nest_pmu_arr) { 1701 1701 per_nest_pmu_arr = kzalloc_objs(struct imc_pmu *, 1702 - get_max_nest_dev() + 1, 1703 - GFP_KERNEL); 1702 + get_max_nest_dev() + 1); 1704 1703 if (!per_nest_pmu_arr) 1705 1704 goto err; 1706 1705 }
+1 -1
arch/powerpc/platforms/cell/spufs/file.c
··· 2282 2282 } 2283 2283 2284 2284 ctx->switch_log = kmalloc_flex(*ctx->switch_log, log, 2285 - SWITCH_LOG_BUFSIZE, GFP_KERNEL); 2285 + SWITCH_LOG_BUFSIZE); 2286 2286 2287 2287 if (!ctx->switch_log) { 2288 2288 rc = -ENOMEM;
+1 -1
arch/powerpc/platforms/powernv/opal-async.c
··· 266 266 267 267 opal_max_async_tokens = be32_to_cpup(async); 268 268 opal_async_tokens = kzalloc_objs(*opal_async_tokens, 269 - opal_max_async_tokens, GFP_KERNEL); 269 + opal_max_async_tokens); 270 270 if (!opal_async_tokens) { 271 271 err = -ENOMEM; 272 272 goto out_opal_node;
+1 -2
arch/powerpc/platforms/pseries/papr_platform_attributes.c
··· 314 314 /* Allocate the groups before registering */ 315 315 for (idx = 0; idx < num_attrs; idx++) { 316 316 papr_groups[idx].pg.attrs = kzalloc_objs(*papr_groups[idx].pg.attrs, 317 - KOBJ_MAX_ATTRS + 1, 318 - GFP_KERNEL); 317 + KOBJ_MAX_ATTRS + 1); 319 318 if (!papr_groups[idx].pg.attrs) 320 319 goto out_pgattrs; 321 320
+1 -1
arch/riscv/kernel/module.c
··· 754 754 755 755 /* Number of relocations may be large, so kvmalloc it */ 756 756 *relocation_hashtable = kvmalloc_objs(**relocation_hashtable, 757 - hashtable_size, GFP_KERNEL); 757 + hashtable_size); 758 758 if (!*relocation_hashtable) 759 759 return 0; 760 760
+1 -1
arch/riscv/kvm/vcpu_sbi_fwft.c
··· 353 353 int i; 354 354 355 355 fwft->configs = kzalloc_objs(struct kvm_sbi_fwft_config, 356 - ARRAY_SIZE(features), GFP_KERNEL); 356 + ARRAY_SIZE(features)); 357 357 if (!fwft->configs) 358 358 return -ENOMEM; 359 359
+1 -2
arch/s390/kernel/perf_pai.c
··· 282 282 need_paiext_cb = true; 283 283 } 284 284 cpump->save = kvmalloc_objs(struct pai_userdata, 285 - pai_pmu[idx].num_avail + 1, 286 - GFP_KERNEL); 285 + pai_pmu[idx].num_avail + 1); 287 286 if (!cpump->area || !cpump->save || 288 287 (need_paiext_cb && !cpump->paiext_cb)) { 289 288 pai_free(mp);
+2 -2
arch/um/drivers/ubd_kern.c
··· 1070 1070 return -1; 1071 1071 1072 1072 irq_req_buffer = kmalloc_objs(struct io_thread_req *, 1073 - UBD_REQ_BUFFER_SIZE, GFP_KERNEL); 1073 + UBD_REQ_BUFFER_SIZE); 1074 1074 irq_remainder = 0; 1075 1075 1076 1076 if (irq_req_buffer == NULL) { ··· 1078 1078 return -ENOMEM; 1079 1079 } 1080 1080 io_req_buffer = kmalloc_objs(struct io_thread_req *, 1081 - UBD_REQ_BUFFER_SIZE, GFP_KERNEL); 1081 + UBD_REQ_BUFFER_SIZE); 1082 1082 1083 1083 io_remainder = 0; 1084 1084
+1 -1
arch/um/drivers/vfio_kern.c
··· 515 515 } 516 516 517 517 dev->intr_ctx = kmalloc_objs(struct uml_vfio_intr_ctx, 518 - dev->udev.irq_count, GFP_KERNEL); 518 + dev->udev.irq_count); 519 519 if (!dev->intr_ctx) { 520 520 pr_err("Failed to allocate interrupt context (%s)\n", 521 521 dev->name);
+1 -1
arch/x86/events/intel/core.c
··· 7379 7379 7380 7380 x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask); 7381 7381 x86_pmu.hybrid_pmu = kzalloc_objs(struct x86_hybrid_pmu, 7382 - x86_pmu.num_hybrid_pmus, GFP_KERNEL); 7382 + x86_pmu.num_hybrid_pmus); 7383 7383 if (!x86_pmu.hybrid_pmu) 7384 7384 return -ENOMEM; 7385 7385
+1 -2
arch/x86/events/intel/uncore_discovery.c
··· 745 745 int i = 0; 746 746 747 747 uncores = kzalloc_objs(struct intel_uncore_type *, 748 - num_discovered_types[type_id] + num_extra + 1, 749 - GFP_KERNEL); 748 + num_discovered_types[type_id] + num_extra + 1); 750 749 if (!uncores) 751 750 return empty_uncore; 752 751
+1 -1
arch/x86/kvm/vmx/tdx.c
··· 2407 2407 /* TDVPS = TDVPR(4K page) + TDCX(multiple 4K pages), -1 for TDVPR. */ 2408 2408 kvm_tdx->td.tdcx_nr_pages = tdx_sysinfo->td_ctrl.tdvps_base_size / PAGE_SIZE - 1; 2409 2409 tdcs_pages = kzalloc_objs(*kvm_tdx->td.tdcs_pages, 2410 - kvm_tdx->td.tdcs_nr_pages, GFP_KERNEL); 2410 + kvm_tdx->td.tdcs_nr_pages); 2411 2411 if (!tdcs_pages) 2412 2412 goto free_tdr; 2413 2413
+1 -1
block/blk-crypto-fallback.c
··· 574 574 goto fail_destroy_profile; 575 575 576 576 blk_crypto_keyslots = kzalloc_objs(blk_crypto_keyslots[0], 577 - blk_crypto_num_keyslots, GFP_KERNEL); 577 + blk_crypto_num_keyslots); 578 578 if (!blk_crypto_keyslots) 579 579 goto fail_free_wq; 580 580
+1 -1
block/blk-zoned.c
··· 1805 1805 1806 1806 disk->zone_wplugs_hash = 1807 1807 kzalloc_objs(struct hlist_head, 1808 - disk_zone_wplugs_hash_size(disk), GFP_KERNEL); 1808 + disk_zone_wplugs_hash_size(disk)); 1809 1809 if (!disk->zone_wplugs_hash) 1810 1810 return -ENOMEM; 1811 1811
+1 -2
drivers/accel/habanalabs/common/context.c
··· 210 210 atomic_set(&ctx->thread_ctx_switch_token, 1); 211 211 ctx->thread_ctx_switch_wait_token = 0; 212 212 ctx->cs_pending = kzalloc_objs(struct hl_fence *, 213 - hdev->asic_prop.max_pending_cs, 214 - GFP_KERNEL); 213 + hdev->asic_prop.max_pending_cs); 215 214 if (!ctx->cs_pending) 216 215 return -ENOMEM; 217 216
+4 -7
drivers/accel/habanalabs/common/device.c
··· 893 893 894 894 if (hdev->asic_prop.completion_queues_count) { 895 895 hdev->cq_wq = kzalloc_objs(struct workqueue_struct *, 896 - hdev->asic_prop.completion_queues_count, 897 - GFP_KERNEL); 896 + hdev->asic_prop.completion_queues_count); 898 897 if (!hdev->cq_wq) { 899 898 rc = -ENOMEM; 900 899 goto asid_fini; ··· 2158 2159 2159 2160 if (user_interrupt_cnt) { 2160 2161 hdev->user_interrupt = kzalloc_objs(*hdev->user_interrupt, 2161 - user_interrupt_cnt, 2162 - GFP_KERNEL); 2162 + user_interrupt_cnt); 2163 2163 if (!hdev->user_interrupt) { 2164 2164 rc = -ENOMEM; 2165 2165 goto early_fini; ··· 2225 2227 */ 2226 2228 if (cq_cnt) { 2227 2229 hdev->completion_queue = kzalloc_objs(*hdev->completion_queue, 2228 - cq_cnt, GFP_KERNEL); 2230 + cq_cnt); 2229 2231 2230 2232 if (!hdev->completion_queue) { 2231 2233 dev_err(hdev->dev, ··· 2247 2249 } 2248 2250 2249 2251 hdev->shadow_cs_queue = kzalloc_objs(struct hl_cs *, 2250 - hdev->asic_prop.max_pending_cs, 2251 - GFP_KERNEL); 2252 + hdev->asic_prop.max_pending_cs); 2252 2253 if (!hdev->shadow_cs_queue) { 2253 2254 rc = -ENOMEM; 2254 2255 goto cq_fini;
+1 -1
drivers/accel/habanalabs/common/hw_queue.c
··· 1083 1083 int i, rc, q_ready_cnt; 1084 1084 1085 1085 hdev->kernel_queues = kzalloc_objs(*hdev->kernel_queues, 1086 - asic->max_queues, GFP_KERNEL); 1086 + asic->max_queues); 1087 1087 1088 1088 if (!hdev->kernel_queues) { 1089 1089 dev_err(hdev->dev, "Not enough memory for H/W queues\n");
+1 -1
drivers/accel/habanalabs/common/hwmon.c
··· 196 196 } 197 197 198 198 channels_info = kzalloc_objs(struct hwmon_channel_info *, 199 - num_active_sensor_types + 1, GFP_KERNEL); 199 + num_active_sensor_types + 1); 200 200 if (!channels_info) { 201 201 rc = -ENOMEM; 202 202 goto channels_info_array_err;
+1 -1
drivers/accel/habanalabs/gaudi/gaudi.c
··· 540 540 541 541 prop->max_queues = GAUDI_QUEUE_ID_SIZE; 542 542 prop->hw_queues_props = kzalloc_objs(struct hw_queue_properties, 543 - prop->max_queues, GFP_KERNEL); 543 + prop->max_queues); 544 544 545 545 if (!prop->hw_queues_props) 546 546 return -ENOMEM;
+4 -7
drivers/accel/habanalabs/gaudi2/gaudi2.c
··· 2763 2763 2764 2764 prop->max_queues = GAUDI2_QUEUE_ID_SIZE; 2765 2765 prop->hw_queues_props = kzalloc_objs(struct hw_queue_properties, 2766 - prop->max_queues, GFP_KERNEL); 2766 + prop->max_queues); 2767 2767 2768 2768 if (!prop->hw_queues_props) 2769 2769 return -ENOMEM; ··· 3944 3944 prop->glbl_err_max_cause_num = GAUDI2_GLBL_ERR_MAX_CAUSE_NUM; 3945 3945 prop->num_of_special_blocks = ARRAY_SIZE(gaudi2_special_blocks); 3946 3946 prop->special_blocks = kmalloc_objs(*prop->special_blocks, 3947 - prop->num_of_special_blocks, 3948 - GFP_KERNEL); 3947 + prop->num_of_special_blocks); 3949 3948 if (!prop->special_blocks) 3950 3949 return -ENOMEM; 3951 3950 ··· 3959 3960 if (ARRAY_SIZE(gaudi2_iterator_skip_block_types)) { 3960 3961 prop->skip_special_blocks_cfg.block_types = 3961 3962 kmalloc_objs(gaudi2_iterator_skip_block_types[0], 3962 - ARRAY_SIZE(gaudi2_iterator_skip_block_types), 3963 - GFP_KERNEL); 3963 + ARRAY_SIZE(gaudi2_iterator_skip_block_types)); 3964 3964 if (!prop->skip_special_blocks_cfg.block_types) { 3965 3965 rc = -ENOMEM; 3966 3966 goto free_special_blocks; ··· 3975 3977 if (ARRAY_SIZE(gaudi2_iterator_skip_block_ranges)) { 3976 3978 prop->skip_special_blocks_cfg.block_ranges = 3977 3979 kmalloc_objs(gaudi2_iterator_skip_block_ranges[0], 3978 - ARRAY_SIZE(gaudi2_iterator_skip_block_ranges), 3979 - GFP_KERNEL); 3980 + ARRAY_SIZE(gaudi2_iterator_skip_block_ranges)); 3980 3981 if (!prop->skip_special_blocks_cfg.block_ranges) { 3981 3982 rc = -ENOMEM; 3982 3983 goto free_skip_special_blocks_types;
+1 -1
drivers/accel/habanalabs/goya/goya.c
··· 364 364 365 365 prop->max_queues = GOYA_QUEUE_ID_SIZE; 366 366 prop->hw_queues_props = kzalloc_objs(struct hw_queue_properties, 367 - prop->max_queues, GFP_KERNEL); 367 + prop->max_queues); 368 368 369 369 if (!prop->hw_queues_props) 370 370 return -ENOMEM;
+1 -2
drivers/accel/rocket/rocket_job.c
··· 497 497 { 498 498 struct rocket_device *rdev = rocket_priv->rdev; 499 499 struct drm_gpu_scheduler **scheds = kmalloc_objs(*scheds, 500 - rdev->num_cores, 501 - GFP_KERNEL); 500 + rdev->num_cores); 502 501 unsigned int core; 503 502 int ret; 504 503
+2 -3
drivers/acpi/acpi_video.c
··· 837 837 * special levels (see below) 838 838 */ 839 839 br->levels = kmalloc_objs(*br->levels, 840 - obj->package.count + ACPI_VIDEO_FIRST_LEVEL, 841 - GFP_KERNEL); 840 + obj->package.count + ACPI_VIDEO_FIRST_LEVEL); 842 841 if (!br->levels) { 843 842 result = -ENOMEM; 844 843 goto out_free; ··· 1330 1331 dod->package.count); 1331 1332 1332 1333 active_list = kzalloc_objs(struct acpi_video_enumerated_device, 1333 - 1 + dod->package.count, GFP_KERNEL); 1334 + 1 + dod->package.count); 1334 1335 if (!active_list) { 1335 1336 status = -ENOMEM; 1336 1337 goto out;
+2 -2
drivers/acpi/riscv/irq.c
··· 343 343 acpi_get_handle(handle, entry->source, &link_handle); 344 344 dep_devices.count = 1; 345 345 dep_devices.handles = kzalloc_objs(*dep_devices.handles, 346 - 1, GFP_KERNEL); 346 + 1); 347 347 if (!dep_devices.handles) { 348 348 acpi_handle_err(handle, "failed to allocate memory\n"); 349 349 continue; ··· 355 355 gsi_handle = riscv_acpi_get_gsi_handle(entry->source_index); 356 356 dep_devices.count = 1; 357 357 dep_devices.handles = kzalloc_objs(*dep_devices.handles, 358 - 1, GFP_KERNEL); 358 + 1); 359 359 if (!dep_devices.handles) { 360 360 acpi_handle_err(handle, "failed to allocate memory\n"); 361 361 continue;
+2 -3
drivers/acpi/x86/s2idle.c
··· 130 130 } 131 131 132 132 lpi_constraints_table = kzalloc_objs(*lpi_constraints_table, 133 - package->package.count, 134 - GFP_KERNEL); 133 + package->package.count); 135 134 136 135 if (!lpi_constraints_table) 137 136 goto free_acpi_buffer; ··· 209 210 return; 210 211 211 212 lpi_constraints_table = kzalloc_objs(*lpi_constraints_table, 212 - out_obj->package.count, GFP_KERNEL); 213 + out_obj->package.count); 213 214 if (!lpi_constraints_table) 214 215 goto free_acpi_buffer; 215 216
+1 -1
drivers/android/binder.c
··· 5903 5903 } 5904 5904 5905 5905 target_procs = kzalloc_objs(struct binder_proc *, 5906 - target_procs_count, GFP_KERNEL); 5906 + target_procs_count); 5907 5907 5908 5908 if (!target_procs) { 5909 5909 mutex_unlock(&binder_procs_lock);
+1 -1
drivers/android/binder_alloc.c
··· 917 917 alloc->vm_start = vma->vm_start; 918 918 919 919 alloc->pages = kvzalloc_objs(alloc->pages[0], 920 - alloc->buffer_size / PAGE_SIZE, GFP_KERNEL); 920 + alloc->buffer_size / PAGE_SIZE); 921 921 if (!alloc->pages) { 922 922 ret = -ENOMEM; 923 923 failure_string = "alloc page array";
+1 -2
drivers/atm/eni.c
··· 1846 1846 buffer_mem = eni_dev->mem - (buf - eni_dev->ram); 1847 1847 eni_dev->free_list_size = buffer_mem/MID_MIN_BUF_SIZE/2; 1848 1848 eni_dev->free_list = kmalloc_objs(*eni_dev->free_list, 1849 - eni_dev->free_list_size + 1, 1850 - GFP_KERNEL); 1849 + eni_dev->free_list_size + 1); 1851 1850 if (!eni_dev->free_list) { 1852 1851 printk(KERN_ERR DEV_LABEL "(itf %d): couldn't get free page\n", 1853 1852 dev->number);
+1 -1
drivers/base/cacheinfo.c
··· 884 884 885 885 /* Allocate all required memory */ 886 886 per_cpu_index_dev(cpu) = kzalloc_objs(struct device *, 887 - cache_leaves(cpu), GFP_KERNEL); 887 + cache_leaves(cpu)); 888 888 if (unlikely(per_cpu_index_dev(cpu) == NULL)) 889 889 goto err_out; 890 890
+1 -1
drivers/block/rnbd/rnbd-clt.c
··· 1418 1418 * nr_poll_queues: the number of polling queues 1419 1419 */ 1420 1420 dev->hw_queues = kzalloc_objs(*dev->hw_queues, 1421 - nr_cpu_ids + nr_poll_queues, GFP_KERNEL); 1421 + nr_cpu_ids + nr_poll_queues); 1422 1422 if (!dev->hw_queues) { 1423 1423 ret = -ENOMEM; 1424 1424 goto out_alloc;
+2 -3
drivers/block/xen-blkfront.c
··· 2209 2209 rinfo->shadow[i].grants_used = 2210 2210 kvzalloc_objs(rinfo->shadow[i].grants_used[0], grants); 2211 2211 rinfo->shadow[i].sg = kvzalloc_objs(rinfo->shadow[i].sg[0], 2212 - psegs, GFP_KERNEL); 2212 + psegs); 2213 2213 if (info->max_indirect_segments) 2214 2214 rinfo->shadow[i].indirect_grants = 2215 2215 kvzalloc_objs(rinfo->shadow[i].indirect_grants[0], 2216 - INDIRECT_GREFS(grants), 2217 - GFP_KERNEL); 2216 + INDIRECT_GREFS(grants)); 2218 2217 if ((rinfo->shadow[i].grants_used == NULL) || 2219 2218 (rinfo->shadow[i].sg == NULL) || 2220 2219 (info->max_indirect_segments &&
+2 -2
drivers/bus/mhi/ep/main.c
··· 1137 1137 mhi_ep_mmio_init(mhi_cntrl); 1138 1138 1139 1139 mhi_cntrl->mhi_event = kzalloc_objs(*mhi_cntrl->mhi_event, 1140 - mhi_cntrl->event_rings, GFP_KERNEL); 1140 + mhi_cntrl->event_rings); 1141 1141 if (!mhi_cntrl->mhi_event) 1142 1142 return -ENOMEM; 1143 1143 ··· 1400 1400 * only the defined channels 1401 1401 */ 1402 1402 mhi_cntrl->mhi_chan = kzalloc_objs(*mhi_cntrl->mhi_chan, 1403 - mhi_cntrl->max_chan, GFP_KERNEL); 1403 + mhi_cntrl->max_chan); 1404 1404 if (!mhi_cntrl->mhi_chan) 1405 1405 return -ENOMEM; 1406 1406
+1 -1
drivers/bus/mhi/ep/ring.c
··· 206 206 207 207 /* Allocate ring cache memory for holding the copy of host ring */ 208 208 ring->ring_cache = kzalloc_objs(struct mhi_ring_element, 209 - ring->ring_size, GFP_KERNEL); 209 + ring->ring_size); 210 210 if (!ring->ring_cache) 211 211 return -ENOMEM; 212 212
+1 -1
drivers/clk/aspeed/clk-ast2600.c
··· 839 839 soc_rev = (readl(scu_g6_base + ASPEED_G6_SILICON_REV) & CHIP_REVISION_ID) >> 16; 840 840 841 841 aspeed_g6_clk_data = kzalloc_flex(*aspeed_g6_clk_data, hws, 842 - ASPEED_G6_NUM_CLKS, GFP_KERNEL); 842 + ASPEED_G6_NUM_CLKS); 843 843 if (!aspeed_g6_clk_data) 844 844 return; 845 845 aspeed_g6_clk_data->num = ASPEED_G6_NUM_CLKS;
+1 -1
drivers/clk/clk-clps711x.c
··· 54 54 BUG_ON(!base); 55 55 56 56 clps711x_clk = kzalloc_flex(*clps711x_clk, clk_data.hws, 57 - CLPS711X_CLK_MAX, GFP_KERNEL); 57 + CLPS711X_CLK_MAX); 58 58 BUG_ON(!clps711x_clk); 59 59 60 60 spin_lock_init(&clps711x_clk->lock);
+1 -1
drivers/clk/clk-npcm7xx.c
··· 422 422 goto npcm7xx_init_error; 423 423 424 424 npcm7xx_clk_data = kzalloc_flex(*npcm7xx_clk_data, hws, 425 - NPCM7XX_NUM_CLOCKS, GFP_KERNEL); 425 + NPCM7XX_NUM_CLOCKS); 426 426 if (!npcm7xx_clk_data) 427 427 goto npcm7xx_init_np_err; 428 428
+1 -1
drivers/clocksource/ingenic-sysost.c
··· 459 459 ost->soc_info = id->data; 460 460 461 461 ost->clocks = kzalloc_flex(*ost->clocks, hws, 462 - ost->soc_info->num_channels, GFP_KERNEL); 462 + ost->soc_info->num_channels); 463 463 if (!ost->clocks) { 464 464 ret = -ENOMEM; 465 465 goto err_clk_disable;
+1 -1
drivers/comedi/drivers/ni_tio.c
··· 1795 1795 1796 1796 counter_dev->counters = kzalloc_objs(*counter, num_counters); 1797 1797 counter_dev->regs = kzalloc_objs(*counter_dev->regs, 1798 - counter_dev->num_chips, GFP_KERNEL); 1798 + counter_dev->num_chips); 1799 1799 if (!counter_dev->regs || !counter_dev->counters) { 1800 1800 kfree(counter_dev->regs); 1801 1801 kfree(counter_dev->counters);
+1 -1
drivers/crypto/intel/qat/qat_common/adf_telemetry.c
··· 76 76 goto err_free_tl; 77 77 78 78 telemetry->regs_hist_buff = kmalloc_objs(*telemetry->regs_hist_buff, 79 - tl_data->num_hbuff, GFP_KERNEL); 79 + tl_data->num_hbuff); 80 80 if (!telemetry->regs_hist_buff) 81 81 goto err_free_rp_indexes; 82 82
+2 -4
drivers/crypto/intel/qat/qat_common/qat_uclo.c
··· 1200 1200 1201 1201 if (suof_handle->img_table.num_simgs != 0) { 1202 1202 suof_img_hdr = kzalloc_objs(img_header, 1203 - suof_handle->img_table.num_simgs, 1204 - GFP_KERNEL); 1203 + suof_handle->img_table.num_simgs); 1205 1204 if (!suof_img_hdr) 1206 1205 return -ENOMEM; 1207 1206 suof_handle->img_table.simg_hdr = suof_img_hdr; ··· 1891 1892 sobj_chunk_num = sobj_hdr->num_chunks; 1892 1893 1893 1894 mobj_hdr = kzalloc_objs(*mobj_hdr, 1894 - size_add(uobj_chunk_num, sobj_chunk_num), 1895 - GFP_KERNEL); 1895 + size_add(uobj_chunk_num, sobj_chunk_num)); 1896 1896 if (!mobj_hdr) 1897 1897 return -ENOMEM; 1898 1898
+1 -1
drivers/edac/edac_device.c
··· 78 78 dev_ctl->instances = dev_inst; 79 79 80 80 dev_blk = kzalloc_objs(struct edac_device_block, 81 - nr_instances * nr_blocks, GFP_KERNEL); 81 + nr_instances * nr_blocks); 82 82 if (!dev_blk) 83 83 goto free; 84 84
+1 -1
drivers/edac/sb_edac.c
··· 776 776 return NULL; 777 777 778 778 sbridge_dev->pdev = kzalloc_objs(*sbridge_dev->pdev, 779 - table->n_devs_per_imc, GFP_KERNEL); 779 + table->n_devs_per_imc); 780 780 if (!sbridge_dev->pdev) { 781 781 kfree(sbridge_dev); 782 782 return NULL;
+1 -2
drivers/extcon/extcon.c
··· 1208 1208 return 0; 1209 1209 1210 1210 edev->extcon_dev_type.groups = kzalloc_objs(*edev->extcon_dev_type.groups, 1211 - edev->max_supported + 2, 1212 - GFP_KERNEL); 1211 + edev->max_supported + 2); 1213 1212 if (!edev->extcon_dev_type.groups) 1214 1213 return -ENOMEM; 1215 1214
+2 -4
drivers/firewire/core-iso.c
··· 31 31 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) 32 32 { 33 33 struct page **page_array __free(kfree) = kzalloc_objs(page_array[0], 34 - page_count, 35 - GFP_KERNEL); 34 + page_count); 36 35 37 36 if (!page_array) 38 37 return -ENOMEM; ··· 57 58 enum dma_data_direction direction) 58 59 { 59 60 dma_addr_t *dma_addrs __free(kfree) = kzalloc_objs(dma_addrs[0], 60 - buffer->page_count, 61 - GFP_KERNEL); 61 + buffer->page_count); 62 62 int i; 63 63 64 64 if (!dma_addrs)
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
··· 1179 1179 */ 1180 1180 ip_hw_instance = kzalloc_flex(*ip_hw_instance, 1181 1181 base_addr, 1182 - ip->num_base_address, 1183 - GFP_KERNEL); 1182 + ip->num_base_address); 1184 1183 if (!ip_hw_instance) { 1185 1184 DRM_ERROR("no memory for ip_hw_instance"); 1186 1185 return -ENOMEM;
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 1728 1728 bool valid; 1729 1729 1730 1730 adev->gmc.mem_partitions = kzalloc_objs(struct amdgpu_mem_partition_info, 1731 - AMDGPU_MAX_MEM_RANGES, 1732 - GFP_KERNEL); 1731 + AMDGPU_MAX_MEM_RANGES); 1733 1732 if (!adev->gmc.mem_partitions) 1734 1733 return -ENOMEM; 1735 1734
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
··· 437 437 if (!adev->irq.client[client_id].sources) { 438 438 adev->irq.client[client_id].sources = 439 439 kzalloc_objs(struct amdgpu_irq_src *, 440 - AMDGPU_MAX_IRQ_SRC_ID, GFP_KERNEL); 440 + AMDGPU_MAX_IRQ_SRC_ID); 441 441 if (!adev->irq.client[client_id].sources) 442 442 return -ENOMEM; 443 443 }
+2 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
··· 452 452 return -ENOMEM; 453 453 454 454 fmt_attr_group->attrs = kzalloc_objs(*fmt_attr_group->attrs, 455 - config->num_formats + 1, 456 - GFP_KERNEL); 455 + config->num_formats + 1); 457 456 458 457 if (!fmt_attr_group->attrs) 459 458 goto err_fmt_attr_grp; ··· 463 464 goto err_evt_attr; 464 465 465 466 evt_attr_group->attrs = kzalloc_objs(*evt_attr_group->attrs, 466 - config->num_events + 1, GFP_KERNEL); 467 + config->num_events + 1); 467 468 468 469 if (!evt_attr_group->attrs) 469 470 goto err_evt_attr_grp;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 3239 3239 if (from_rom) { 3240 3240 err_data.err_addr = 3241 3241 kzalloc_objs(struct eeprom_table_record, 3242 - adev->umc.retire_unit, GFP_KERNEL); 3242 + adev->umc.retire_unit); 3243 3243 if (!err_data.err_addr) { 3244 3244 dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n"); 3245 3245 return -ENOMEM;
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 1881 1881 return 0; 1882 1882 1883 1883 adev->mman.ttm_pools = kzalloc_objs(*adev->mman.ttm_pools, 1884 - adev->gmc.num_mem_partitions, 1885 - GFP_KERNEL); 1884 + adev->gmc.num_mem_partitions); 1886 1885 if (!adev->mman.ttm_pools) 1887 1886 return -ENOMEM; 1888 1887
+5 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
··· 59 59 60 60 err_data.err_addr = 61 61 kzalloc_objs(struct eeprom_table_record, 62 - adev->umc.max_ras_err_cnt_per_query, GFP_KERNEL); 62 + adev->umc.max_ras_err_cnt_per_query); 63 63 if (!err_data.err_addr) { 64 64 dev_warn(adev->dev, 65 65 "Failed to alloc memory for umc error record in MCA notifier!\n"); ··· 106 106 107 107 err_data->err_addr = 108 108 kzalloc_objs(struct eeprom_table_record, 109 - adev->umc.max_ras_err_cnt_per_query, GFP_KERNEL); 109 + adev->umc.max_ras_err_cnt_per_query); 110 110 111 111 /* still call query_ras_error_address to clear error status 112 112 * even NOMEM error is encountered ··· 132 132 adev->umc.max_ras_err_cnt_per_query) { 133 133 err_data->err_addr = 134 134 kzalloc_objs(struct eeprom_table_record, 135 - adev->umc.max_ras_err_cnt_per_query, 136 - GFP_KERNEL); 135 + adev->umc.max_ras_err_cnt_per_query); 137 136 138 137 /* still call query_ras_error_address to clear error status 139 138 * even NOMEM error is encountered ··· 162 163 adev->umc.max_ras_err_cnt_per_query) { 163 164 err_data->err_addr = 164 165 kzalloc_objs(struct eeprom_table_record, 165 - adev->umc.max_ras_err_cnt_per_query, 166 - GFP_KERNEL); 166 + adev->umc.max_ras_err_cnt_per_query); 167 167 168 168 /* still call query_ras_error_address to clear error status 169 169 * even NOMEM error is encountered ··· 552 554 struct ras_err_data err_data; 553 555 554 556 err_data.err_addr = kzalloc_objs(struct eeprom_table_record, 555 - adev->umc.retire_unit, GFP_KERNEL); 557 + adev->umc.retire_unit); 556 558 if (!err_data.err_addr) { 557 559 dev_warn(adev->dev, "Failed to alloc memory in bad page lookup!\n"); 558 560 return 0;
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
··· 500 500 struct amdgpu_device *adev = ip_block->adev; 501 501 502 502 adev->amdgpu_vkms_output = kzalloc_objs(struct amdgpu_vkms_output, 503 - adev->mode_info.num_crtc, 504 - GFP_KERNEL); 503 + adev->mode_info.num_crtc); 505 504 if (!adev->amdgpu_vkms_output) 506 505 return -ENOMEM; 507 506
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 781 781 * the amount of memory allocated by user 782 782 */ 783 783 pa = kzalloc_objs(struct kfd_process_device_apertures, 784 - args->num_of_nodes, GFP_KERNEL); 784 + args->num_of_nodes); 785 785 if (!pa) 786 786 return -ENOMEM; 787 787
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
··· 959 959 int i; 960 960 961 961 crtc_ctx = kzalloc_objs(struct secure_display_crtc_context, 962 - adev->mode_info.num_crtc, GFP_KERNEL); 962 + adev->mode_info.num_crtc); 963 963 964 964 if (!crtc_ctx) { 965 965 adev->dm.secure_display_ctx.crtc_ctx = NULL;
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
··· 2131 2131 display_e2e_pipe_params_st *pipes; 2132 2132 2133 2133 pipes = kzalloc_objs(display_e2e_pipe_params_st, 2134 - dc->res_pool->pipe_count, GFP_KERNEL); 2134 + dc->res_pool->pipe_count); 2135 2135 if (!pipes) 2136 2136 return DC_FAIL_BANDWIDTH_VALIDATE; 2137 2137
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
··· 931 931 display_e2e_pipe_params_st *pipes; 932 932 933 933 pipes = kzalloc_objs(display_e2e_pipe_params_st, 934 - dc->res_pool->pipe_count, GFP_KERNEL); 934 + dc->res_pool->pipe_count); 935 935 if (!pipes) 936 936 return DC_FAIL_BANDWIDTH_VALIDATE; 937 937
+1 -2
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
··· 2045 2045 int vlevel = 0; 2046 2046 int pipe_cnt = 0; 2047 2047 display_e2e_pipe_params_st *pipes = kzalloc_objs(display_e2e_pipe_params_st, 2048 - dc->res_pool->pipe_count, 2049 - GFP_KERNEL); 2048 + dc->res_pool->pipe_count); 2050 2049 DC_LOGGER_INIT(dc->ctx->logger); 2051 2050 2052 2051 BW_VAL_TRACE_COUNT();
+1 -2
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
··· 1768 1768 int vlevel = 0; 1769 1769 int pipe_cnt = 0; 1770 1770 display_e2e_pipe_params_st *pipes = kzalloc_objs(display_e2e_pipe_params_st, 1771 - dc->res_pool->pipe_count, 1772 - GFP_KERNEL); 1771 + dc->res_pool->pipe_count); 1773 1772 DC_LOGGER_INIT(dc->ctx->logger); 1774 1773 1775 1774 BW_VAL_TRACE_COUNT();
+1 -2
drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
··· 1706 1706 int vlevel = 0; 1707 1707 int pipe_cnt = 0; 1708 1708 display_e2e_pipe_params_st *pipes = kzalloc_objs(display_e2e_pipe_params_st, 1709 - dc->res_pool->pipe_count, 1710 - GFP_KERNEL); 1709 + dc->res_pool->pipe_count); 1711 1710 DC_LOGGER_INIT(dc->ctx->logger); 1712 1711 1713 1712 BW_VAL_TRACE_COUNT();
+1 -2
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
··· 1751 1751 int vlevel = 0; 1752 1752 int pipe_cnt = 0; 1753 1753 display_e2e_pipe_params_st *pipes = kzalloc_objs(display_e2e_pipe_params_st, 1754 - dc->res_pool->pipe_count, 1755 - GFP_KERNEL); 1754 + dc->res_pool->pipe_count); 1756 1755 1757 1756 /* To handle Freesync properly, setting FreeSync DML parameters 1758 1757 * to its default state for the first stage of validation
+3 -6
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
··· 1715 1715 1716 1716 if (map_user_ramp && ramp && ramp->type == GAMMA_RGB_256) { 1717 1717 rgb_user = kvzalloc_objs(*rgb_user, 1718 - ramp->num_entries + _EXTRA_POINTS, 1719 - GFP_KERNEL); 1718 + ramp->num_entries + _EXTRA_POINTS); 1720 1719 if (!rgb_user) 1721 1720 goto rgb_user_alloc_fail; 1722 1721 1723 1722 axis_x = kvzalloc_objs(*axis_x, 1724 - ramp->num_entries + _EXTRA_POINTS, 1725 - GFP_KERNEL); 1723 + ramp->num_entries + _EXTRA_POINTS); 1726 1724 if (!axis_x) 1727 1725 goto axis_x_alloc_fail; 1728 1726 ··· 1938 1940 if (ramp && ramp->type != GAMMA_CS_TFM_1D && 1939 1941 (map_user_ramp || ramp->type != GAMMA_RGB_256)) { 1940 1942 rgb_user = kvzalloc_objs(*rgb_user, 1941 - ramp->num_entries + _EXTRA_POINTS, 1942 - GFP_KERNEL); 1943 + ramp->num_entries + _EXTRA_POINTS); 1943 1944 if (!rgb_user) 1944 1945 goto rgb_user_alloc_fail; 1945 1946
+1 -1
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
··· 2725 2725 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2726 2726 2727 2727 adev->pm.dpm.ps = kzalloc_objs(struct amdgpu_ps, 2728 - state_array->ucNumEntries, GFP_KERNEL); 2728 + state_array->ucNumEntries); 2729 2729 if (!adev->pm.dpm.ps) 2730 2730 return -ENOMEM; 2731 2731 power_state_offset = (u8 *)state_array->states;
+1 -1
drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
··· 303 303 304 304 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = 305 305 kzalloc_objs(struct amdgpu_phase_shedding_limits_entry, 306 - psl->ucNumEntries, GFP_KERNEL); 306 + psl->ucNumEntries); 307 307 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) 308 308 return -ENOMEM; 309 309
+1 -1
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
··· 7342 7342 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 7343 7343 7344 7344 adev->pm.dpm.ps = kzalloc_objs(struct amdgpu_ps, 7345 - state_array->ucNumEntries, GFP_KERNEL); 7345 + state_array->ucNumEntries); 7346 7346 if (!adev->pm.dpm.ps) 7347 7347 return -ENOMEM; 7348 7348 power_state_offset = (u8 *)state_array->states;
+6 -9
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
··· 371 371 "Invalid PowerPlay Table!", return -1); 372 372 373 373 mclk_table = kzalloc_flex(*mclk_table, entries, 374 - mclk_dep_table->ucNumEntries, GFP_KERNEL); 374 + mclk_dep_table->ucNumEntries); 375 375 if (!mclk_table) 376 376 return -ENOMEM; 377 377 ··· 415 415 "Invalid PowerPlay Table!", return -1); 416 416 417 417 sclk_table = kzalloc_flex(*sclk_table, entries, 418 - tonga_table->ucNumEntries, GFP_KERNEL); 418 + tonga_table->ucNumEntries); 419 419 if (!sclk_table) 420 420 return -ENOMEM; 421 421 ··· 444 444 "Invalid PowerPlay Table!", return -1); 445 445 446 446 sclk_table = kzalloc_flex(*sclk_table, entries, 447 - polaris_table->ucNumEntries, 448 - GFP_KERNEL); 447 + polaris_table->ucNumEntries); 449 448 if (!sclk_table) 450 449 return -ENOMEM; 451 450 ··· 491 492 "Invalid PowerPlay Table!", return -1); 492 493 493 494 pcie_table = kzalloc_flex(*pcie_table, entries, 494 - atom_pcie_table->ucNumEntries, 495 - GFP_KERNEL); 495 + atom_pcie_table->ucNumEntries); 496 496 if (!pcie_table) 497 497 return -ENOMEM; 498 498 ··· 527 529 "Invalid PowerPlay Table!", return -1); 528 530 529 531 pcie_table = kzalloc_flex(*pcie_table, entries, 530 - atom_pcie_table->ucNumEntries, 531 - GFP_KERNEL); 532 + atom_pcie_table->ucNumEntries); 532 533 if (!pcie_table) 533 534 return -ENOMEM; 534 535 ··· 722 725 PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries), 723 726 "Invalid PowerPlay Table!", return -1); 724 727 mm_table = kzalloc_flex(*mm_table, entries, 725 - mm_dependency_table->ucNumEntries, GFP_KERNEL); 728 + mm_dependency_table->ucNumEntries); 726 729 if (!mm_table) 727 730 return -ENOMEM; 728 731
+2 -2
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
··· 1483 1483 return -EINVAL; 1484 1484 1485 1485 cac_leakage_table = kzalloc_flex(*cac_leakage_table, entries, 1486 - table->ucNumEntries, GFP_KERNEL); 1486 + table->ucNumEntries); 1487 1487 if (!cac_leakage_table) 1488 1488 return -ENOMEM; 1489 1489 ··· 1621 1621 1622 1622 1623 1623 table = kzalloc_flex(*table, entries, 1624 - ptable->ucNumEntries, GFP_KERNEL); 1624 + ptable->ucNumEntries); 1625 1625 if (!table) 1626 1626 return -ENOMEM; 1627 1627
+6 -6
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
··· 351 351 "Invalid PowerPlay Table!", return -1); 352 352 353 353 mm_table = kzalloc_flex(*mm_table, entries, 354 - mm_dependency_table->ucNumEntries, GFP_KERNEL); 354 + mm_dependency_table->ucNumEntries); 355 355 if (!mm_table) 356 356 return -ENOMEM; 357 357 ··· 574 574 "Invalid PowerPlay Table!", return -1); 575 575 576 576 clk_table = kzalloc_flex(*clk_table, entries, 577 - clk_dep_table->ucNumEntries, GFP_KERNEL); 577 + clk_dep_table->ucNumEntries); 578 578 if (!clk_table) 579 579 return -ENOMEM; 580 580 ··· 604 604 "Invalid PowerPlay Table!", return -1); 605 605 606 606 mclk_table = kzalloc_flex(*mclk_table, entries, 607 - mclk_dep_table->ucNumEntries, GFP_KERNEL); 607 + mclk_dep_table->ucNumEntries); 608 608 if (!mclk_table) 609 609 return -ENOMEM; 610 610 ··· 641 641 "Invalid PowerPlay Table!", return -1); 642 642 643 643 clk_table = kzalloc_flex(*clk_table, entries, 644 - clk_dep_table->ucNumEntries, GFP_KERNEL); 644 + clk_dep_table->ucNumEntries); 645 645 if (!clk_table) 646 646 return -ENOMEM; 647 647 ··· 703 703 "Invalid PowerPlay Table!", return -1); 704 704 705 705 clk_table = kzalloc_flex(*clk_table, entries, 706 - clk_dep_table->ucNumEntries, GFP_KERNEL); 706 + clk_dep_table->ucNumEntries); 707 707 if (!clk_table) 708 708 return -ENOMEM; 709 709 ··· 794 794 return 0); 795 795 796 796 pcie_table = kzalloc_flex(*pcie_table, entries, 797 - atom_pcie_table->ucNumEntries, GFP_KERNEL); 797 + atom_pcie_table->ucNumEntries); 798 798 if (!pcie_table) 799 799 return -ENOMEM; 800 800
+1 -2
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
··· 2400 2400 return -EINVAL; 2401 2401 2402 2402 activity_monitor_external = kzalloc_objs(*activity_monitor_external, 2403 - PP_SMC_POWER_PROFILE_COUNT, 2404 - GFP_KERNEL); 2403 + PP_SMC_POWER_PROFILE_COUNT); 2405 2404 if (!activity_monitor_external) 2406 2405 return -ENOMEM; 2407 2406
+1 -2
drivers/gpu/drm/display/drm_dp_mst_topology.c
··· 4605 4605 4606 4606 num_commit_deps = hweight32(mst_state->pending_crtc_mask); 4607 4607 mst_state->commit_deps = kmalloc_objs(*mst_state->commit_deps, 4608 - num_commit_deps, 4609 - GFP_KERNEL); 4608 + num_commit_deps); 4610 4609 if (!mst_state->commit_deps) 4611 4610 return -ENOMEM; 4612 4611 mst_state->num_commit_deps = num_commit_deps;
+2 -3
drivers/gpu/drm/drm_atomic.c
··· 136 136 if (!state->crtcs) 137 137 goto fail; 138 138 state->planes = kzalloc_objs(*state->planes, 139 - dev->mode_config.num_total_plane, 140 - GFP_KERNEL); 139 + dev->mode_config.num_total_plane); 141 140 if (!state->planes) 142 141 goto fail; 143 142 state->colorops = kzalloc_objs(*state->colorops, 144 - dev->mode_config.num_colorop, GFP_KERNEL); 143 + dev->mode_config.num_colorop); 145 144 if (!state->colorops) 146 145 goto fail; 147 146
+1 -1
drivers/gpu/drm/drm_buddy.c
··· 326 326 327 327 for_each_free_tree(i) { 328 328 mm->free_trees[i] = kmalloc_objs(struct rb_root, 329 - mm->max_order + 1, GFP_KERNEL); 329 + mm->max_order + 1); 330 330 if (!mm->free_trees[i]) 331 331 goto out_free_tree; 332 332
+1 -2
drivers/gpu/drm/drm_client_modeset.c
··· 59 59 60 60 for (modeset = client->modesets; modeset->crtc; modeset++) { 61 61 modeset->connectors = kzalloc_objs(*modeset->connectors, 62 - max_connector_count, 63 - GFP_KERNEL); 62 + max_connector_count); 64 63 if (!modeset->connectors) 65 64 goto err_free; 66 65 }
+1 -2
drivers/gpu/drm/drm_crtc.c
··· 846 846 } 847 847 848 848 connector_set = kmalloc_objs(struct drm_connector *, 849 - crtc_req->count_connectors, 850 - GFP_KERNEL); 849 + crtc_req->count_connectors); 851 850 if (!connector_set) { 852 851 ret = -ENOMEM; 853 852 goto out;
+2 -4
drivers/gpu/drm/drm_crtc_helper.c
··· 603 603 * connector data. 604 604 */ 605 605 save_encoder_crtcs = kzalloc_objs(struct drm_crtc *, 606 - dev->mode_config.num_encoder, 607 - GFP_KERNEL); 606 + dev->mode_config.num_encoder); 608 607 if (!save_encoder_crtcs) 609 608 return -ENOMEM; 610 609 611 610 save_connector_encoders = kzalloc_objs(struct drm_encoder *, 612 - dev->mode_config.num_connector, 613 - GFP_KERNEL); 611 + dev->mode_config.num_connector); 614 612 if (!save_connector_encoders) { 615 613 kfree(save_encoder_crtcs); 616 614 return -ENOMEM;
+1 -1
drivers/gpu/drm/i915/display/intel_display_power_map.c
··· 1824 1824 power_domains->power_well_count = power_well_count; 1825 1825 power_domains->power_wells = 1826 1826 kzalloc_objs(*power_domains->power_wells, 1827 - power_well_count, GFP_KERNEL); 1827 + power_well_count); 1828 1828 if (!power_domains->power_wells) 1829 1829 return -ENOMEM; 1830 1830
+1 -2
drivers/gpu/drm/i915/display/intel_hdcp.c
··· 2328 2328 2329 2329 if (!data->streams) 2330 2330 data->streams = kzalloc_objs(struct hdcp2_streamid_type, 2331 - INTEL_NUM_PIPES(display), 2332 - GFP_KERNEL); 2331 + INTEL_NUM_PIPES(display)); 2333 2332 if (!data->streams) { 2334 2333 drm_err(display->drm, "Out of Memory\n"); 2335 2334 return -ENOMEM;
+1 -2
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
··· 990 990 991 991 for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) { 992 992 new->reginfo[i].regs = kzalloc_objs(struct guc_mmio_reg, 993 - guc->capture->max_mmio_per_node, 994 - GFP_KERNEL); 993 + guc->capture->max_mmio_per_node); 995 994 if (!new->reginfo[i].regs) { 996 995 while (i) 997 996 kfree(new->reginfo[--i].regs);
+2 -4
drivers/gpu/drm/nouveau/nouveau_sched.c
··· 70 70 } 71 71 72 72 job->out_sync.objs = kzalloc_objs(*job->out_sync.objs, 73 - job->out_sync.count, 74 - GFP_KERNEL); 73 + job->out_sync.count); 75 74 if (!job->out_sync.objs) { 76 75 ret = -ENOMEM; 77 76 goto err_free_out_sync; 78 77 } 79 78 80 79 job->out_sync.chains = kzalloc_objs(*job->out_sync.chains, 81 - job->out_sync.count, 82 - GFP_KERNEL); 80 + job->out_sync.count); 83 81 if (!job->out_sync.chains) { 84 82 ret = -ENOMEM; 85 83 goto err_free_objs;
+1 -1
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
··· 886 886 887 887 /* alloc engines */ 888 888 omap_dmm->engines = kzalloc_objs(*omap_dmm->engines, 889 - omap_dmm->num_engines, GFP_KERNEL); 889 + omap_dmm->num_engines); 890 890 if (!omap_dmm->engines) { 891 891 ret = -ENOMEM; 892 892 goto fail;
+2 -3
drivers/gpu/drm/panthor/panthor_mmu.c
··· 1258 1258 ((ALIGN(va + size, 1ull << 21) - ALIGN_DOWN(va, 1ull << 21)) >> 21); 1259 1259 1260 1260 op_ctx->rsvd_page_tables.pages = kzalloc_objs(*op_ctx->rsvd_page_tables.pages, 1261 - pt_count, GFP_KERNEL); 1261 + pt_count); 1262 1262 if (!op_ctx->rsvd_page_tables.pages) { 1263 1263 ret = -ENOMEM; 1264 1264 goto err_cleanup; ··· 1312 1312 1313 1313 if (pt_count) { 1314 1314 op_ctx->rsvd_page_tables.pages = kzalloc_objs(*op_ctx->rsvd_page_tables.pages, 1315 - pt_count, 1316 - GFP_KERNEL); 1315 + pt_count); 1317 1316 if (!op_ctx->rsvd_page_tables.pages) { 1318 1317 ret = -ENOMEM; 1319 1318 goto err_cleanup;
+1 -2
drivers/gpu/drm/qxl/qxl_display.c
··· 59 59 } 60 60 if (!qdev->client_monitors_config) { 61 61 qdev->client_monitors_config = kzalloc_flex(*qdev->client_monitors_config, 62 - heads, count, 63 - GFP_KERNEL); 62 + heads, count); 64 63 if (!qdev->client_monitors_config) 65 64 return -ENOMEM; 66 65 }
+1 -1
drivers/gpu/drm/radeon/ci_dpm.c
··· 5518 5518 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 5519 5519 5520 5520 rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, 5521 - state_array->ucNumEntries, GFP_KERNEL); 5521 + state_array->ucNumEntries); 5522 5522 if (!rdev->pm.dpm.ps) 5523 5523 return -ENOMEM; 5524 5524 power_state_offset = (u8 *)state_array->states;
+1 -1
drivers/gpu/drm/radeon/kv_dpm.c
··· 2458 2458 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2459 2459 2460 2460 rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, 2461 - state_array->ucNumEntries, GFP_KERNEL); 2461 + state_array->ucNumEntries); 2462 2462 if (!rdev->pm.dpm.ps) 2463 2463 return -ENOMEM; 2464 2464 power_state_offset = (u8 *)state_array->states;
+1 -2
drivers/gpu/drm/radeon/ni_dpm.c
··· 4001 4001 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 4002 4002 4003 4003 rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, 4004 - power_info->pplib.ucNumStates, 4005 - GFP_KERNEL); 4004 + power_info->pplib.ucNumStates); 4006 4005 if (!rdev->pm.dpm.ps) 4007 4006 return -ENOMEM; 4008 4007
+2 -3
drivers/gpu/drm/radeon/r600_dpm.c
··· 822 822 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; 823 823 824 824 radeon_table->entries = kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 825 - atom_table->ucNumEntries, 826 - GFP_KERNEL); 825 + atom_table->ucNumEntries); 827 826 if (!radeon_table->entries) 828 827 return -ENOMEM; 829 828 ··· 988 989 989 990 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = 990 991 kzalloc_objs(struct radeon_phase_shedding_limits_entry, 991 - psl->ucNumEntries, GFP_KERNEL); 992 + psl->ucNumEntries); 992 993 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { 993 994 r600_free_extended_power_table(rdev); 994 995 return -ENOMEM;
+5 -9
drivers/gpu/drm/radeon/radeon_atombios.c
··· 2118 2118 if (num_modes == 0) 2119 2119 return state_index; 2120 2120 rdev->pm.power_state = kzalloc_objs(struct radeon_power_state, 2121 - num_modes, GFP_KERNEL); 2121 + num_modes); 2122 2122 if (!rdev->pm.power_state) 2123 2123 return state_index; 2124 2124 /* last mode is usually default, array is low to high */ ··· 2590 2590 if (power_info->pplib.ucNumStates == 0) 2591 2591 return state_index; 2592 2592 rdev->pm.power_state = kzalloc_objs(struct radeon_power_state, 2593 - power_info->pplib.ucNumStates, 2594 - GFP_KERNEL); 2593 + power_info->pplib.ucNumStates); 2595 2594 if (!rdev->pm.power_state) 2596 2595 return state_index; 2597 2596 /* first mode is usually default, followed by low to high */ ··· 2607 2608 power_info->pplib.ucNonClockSize)); 2608 2609 rdev->pm.power_state[i].clock_info = 2609 2610 kzalloc_objs(struct radeon_pm_clock_info, 2610 - (power_info->pplib.ucStateEntrySize - 1) ? (power_info->pplib.ucStateEntrySize - 1) : 1, 2611 - GFP_KERNEL); 2611 + (power_info->pplib.ucStateEntrySize - 1) ? (power_info->pplib.ucStateEntrySize - 1) : 1); 2612 2612 if (!rdev->pm.power_state[i].clock_info) 2613 2613 return state_index; 2614 2614 if (power_info->pplib.ucStateEntrySize - 1) { ··· 2690 2692 if (state_array->ucNumEntries == 0) 2691 2693 return state_index; 2692 2694 rdev->pm.power_state = kzalloc_objs(struct radeon_power_state, 2693 - state_array->ucNumEntries, 2694 - GFP_KERNEL); 2695 + state_array->ucNumEntries); 2695 2696 if (!rdev->pm.power_state) 2696 2697 return state_index; 2697 2698 power_state_offset = (u8 *)state_array->states; ··· 2702 2705 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2703 2706 rdev->pm.power_state[i].clock_info = 2704 2707 kzalloc_objs(struct radeon_pm_clock_info, 2705 - power_state->v2.ucNumDPMLevels ? power_state->v2.ucNumDPMLevels : 1, 2706 - GFP_KERNEL); 2708 + power_state->v2.ucNumDPMLevels ? power_state->v2.ucNumDPMLevels : 1); 2707 2709 if (!rdev->pm.power_state[i].clock_info) 2708 2710 return state_index; 2709 2711 if (power_state->v2.ucNumDPMLevels) {
+1 -2
drivers/gpu/drm/radeon/rs780_dpm.c
··· 805 805 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 806 806 807 807 rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, 808 - power_info->pplib.ucNumStates, 809 - GFP_KERNEL); 808 + power_info->pplib.ucNumStates); 810 809 if (!rdev->pm.dpm.ps) 811 810 return -ENOMEM; 812 811
+1 -2
drivers/gpu/drm/radeon/rv6xx_dpm.c
··· 1888 1888 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 1889 1889 1890 1890 rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, 1891 - power_info->pplib.ucNumStates, 1892 - GFP_KERNEL); 1891 + power_info->pplib.ucNumStates); 1893 1892 if (!rdev->pm.dpm.ps) 1894 1893 return -ENOMEM; 1895 1894
+1 -2
drivers/gpu/drm/radeon/rv770_dpm.c
··· 2284 2284 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2285 2285 2286 2286 rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, 2287 - power_info->pplib.ucNumStates, 2288 - GFP_KERNEL); 2287 + power_info->pplib.ucNumStates); 2289 2288 if (!rdev->pm.dpm.ps) 2290 2289 return -ENOMEM; 2291 2290
+1 -1
drivers/gpu/drm/radeon/si_dpm.c
··· 6779 6779 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 6780 6780 6781 6781 rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, 6782 - state_array->ucNumEntries, GFP_KERNEL); 6782 + state_array->ucNumEntries); 6783 6783 if (!rdev->pm.dpm.ps) 6784 6784 return -ENOMEM; 6785 6785 power_state_offset = (u8 *)state_array->states;
+1 -1
drivers/gpu/drm/radeon/sumo_dpm.c
··· 1480 1480 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 1481 1481 1482 1482 rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, 1483 - state_array->ucNumEntries, GFP_KERNEL); 1483 + state_array->ucNumEntries); 1484 1484 if (!rdev->pm.dpm.ps) 1485 1485 return -ENOMEM; 1486 1486 power_state_offset = (u8 *)state_array->states;
+1 -1
drivers/gpu/drm/radeon/trinity_dpm.c
··· 1711 1711 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 1712 1712 1713 1713 rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, 1714 - state_array->ucNumEntries, GFP_KERNEL); 1714 + state_array->ucNumEntries); 1715 1715 if (!rdev->pm.dpm.ps) 1716 1716 return -ENOMEM; 1717 1717 power_state_offset = (u8 *)state_array->states;
+3 -3
drivers/gpu/drm/v3d/v3d_submit.c
··· 485 485 job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY; 486 486 487 487 query_info->queries = kvmalloc_objs(struct v3d_timestamp_query, 488 - timestamp.count, GFP_KERNEL); 488 + timestamp.count); 489 489 if (!query_info->queries) 490 490 return -ENOMEM; 491 491 ··· 543 543 job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY; 544 544 545 545 query_info->queries = kvmalloc_objs(struct v3d_timestamp_query, 546 - reset.count, GFP_KERNEL); 546 + reset.count); 547 547 if (!query_info->queries) 548 548 return -ENOMEM; 549 549 ··· 599 599 job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY; 600 600 601 601 query_info->queries = kvmalloc_objs(struct v3d_timestamp_query, 602 - copy.count, GFP_KERNEL); 602 + copy.count); 603 603 if (!query_info->queries) 604 604 return -ENOMEM; 605 605
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
··· 982 982 983 983 /* Allocate statically-sized temp arrays for pages -- too big to keep in frame */ 984 984 pages_stat = (struct page **) kmalloc_objs(*pages_stat, 985 - ARRAY_SIZE(pdesc->statPPNs) + ARRAY_SIZE(pdesc->infoPPNs) + ARRAY_SIZE(pdesc->strsPPNs), 986 - GFP_KERNEL); 985 + ARRAY_SIZE(pdesc->statPPNs) + ARRAY_SIZE(pdesc->infoPPNs) + ARRAY_SIZE(pdesc->strsPPNs)); 987 986 988 987 if (!pages_stat) 989 988 goto err_nomem;
+1 -1
drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
··· 213 213 214 214 front_info->evt_pairs = 215 215 kzalloc_objs(struct xen_drm_front_evtchnl_pair, 216 - cfg->num_connectors, GFP_KERNEL); 216 + cfg->num_connectors); 217 217 if (!front_info->evt_pairs) { 218 218 ret = -ENOMEM; 219 219 goto fail;
+1 -2
drivers/hid/hid-core.c
··· 1320 1320 end = start + size; 1321 1321 1322 1322 device->collection = kzalloc_objs(struct hid_collection, 1323 - HID_DEFAULT_NUM_COLLECTIONS, 1324 - GFP_KERNEL); 1323 + HID_DEFAULT_NUM_COLLECTIONS); 1325 1324 if (!device->collection) { 1326 1325 ret = -ENOMEM; 1327 1326 goto err;
+1 -1
drivers/hv/connection.c
··· 314 314 version >> 16, version & 0xFFFF); 315 315 316 316 vmbus_connection.channels = kzalloc_objs(struct vmbus_channel *, 317 - MAX_CHANNEL_RELIDS, GFP_KERNEL); 317 + MAX_CHANNEL_RELIDS); 318 318 if (vmbus_connection.channels == NULL) { 319 319 ret = -ENOMEM; 320 320 goto cleanup;
+1 -1
drivers/hwmon/acpi_power_meter.c
··· 246 246 goto end; 247 247 248 248 resource->domain_devices = kzalloc_objs(struct acpi_device *, 249 - pss->package.count, GFP_KERNEL); 249 + pss->package.count); 250 250 if (!resource->domain_devices) { 251 251 res = -ENOMEM; 252 252 goto end;
+1 -1
drivers/hwmon/coretemp.c
··· 493 493 */ 494 494 pdata->nr_cores = NUM_REAL_CORES; 495 495 pdata->core_data = kzalloc_objs(struct temp_data *, 496 - pdata->nr_cores, GFP_KERNEL); 496 + pdata->nr_cores); 497 497 if (!pdata->core_data) 498 498 return NULL; 499 499 }
+2 -4
drivers/iio/adc/ti-tsc2046.c
··· 291 291 return -ENOSPC; 292 292 293 293 struct tsc2046_adc_atom *tx_buf __free(kfree) = kzalloc_objs(*tx_buf, 294 - max_count, 295 - GFP_KERNEL); 294 + max_count); 296 295 if (!tx_buf) 297 296 return -ENOMEM; 298 297 299 298 struct tsc2046_adc_atom *rx_buf __free(kfree) = kzalloc_objs(*rx_buf, 300 - max_count, 301 - GFP_KERNEL); 299 + max_count); 302 300 if (!rx_buf) 303 301 return -ENOMEM; 304 302
+1 -1
drivers/iio/dac/ad5360.c
··· 440 440 unsigned int i; 441 441 442 442 channels = kzalloc_objs(struct iio_chan_spec, 443 - st->chip_info->num_channels, GFP_KERNEL); 443 + st->chip_info->num_channels); 444 444 445 445 if (!channels) 446 446 return -ENOMEM;
+1 -1
drivers/iio/industrialio-core.c
··· 1587 1587 1588 1588 iio_dev_opaque->chan_attr_group.attrs = 1589 1589 kzalloc_objs(iio_dev_opaque->chan_attr_group.attrs[0], 1590 - attrcount + 1, GFP_KERNEL); 1590 + attrcount + 1); 1591 1591 if (iio_dev_opaque->chan_attr_group.attrs == NULL) { 1592 1592 ret = -ENOMEM; 1593 1593 goto error_clear_attrs;
+1 -1
drivers/iio/industrialio-event.c
··· 607 607 608 608 ev_int->group.name = iio_event_group_name; 609 609 ev_int->group.attrs = kzalloc_objs(ev_int->group.attrs[0], 610 - attrcount + 1, GFP_KERNEL); 610 + attrcount + 1); 611 611 if (ev_int->group.attrs == NULL) { 612 612 ret = -ENOMEM; 613 613 goto error_free_setup_event_lines;
+1 -1
drivers/infiniband/core/cache.c
··· 1472 1472 1473 1473 if (update_pkeys) { 1474 1474 pkey_cache = kmalloc_flex(*pkey_cache, table, 1475 - tprops->pkey_tbl_len, GFP_KERNEL); 1475 + tprops->pkey_tbl_len); 1476 1476 if (!pkey_cache) { 1477 1477 ret = -ENOMEM; 1478 1478 goto err;
+1 -2
drivers/infiniband/core/cma.c
··· 5376 5376 5377 5377 cma_dev->device = device; 5378 5378 cma_dev->default_gid_type = kzalloc_objs(*cma_dev->default_gid_type, 5379 - device->phys_port_cnt, 5380 - GFP_KERNEL); 5379 + device->phys_port_cnt); 5381 5380 if (!cma_dev->default_gid_type) { 5382 5381 ret = -ENOMEM; 5383 5382 goto free_cma_dev;
+1 -1
drivers/infiniband/core/device.c
··· 812 812 * empty slots at the beginning. 813 813 */ 814 814 pdata_rcu = kzalloc_flex(*pdata_rcu, pdata, 815 - size_add(rdma_end_port(device), 1), GFP_KERNEL); 815 + size_add(rdma_end_port(device), 1)); 816 816 if (!pdata_rcu) 817 817 return -ENOMEM; 818 818 /*
+2 -2
drivers/infiniband/core/iwpm_util.c
··· 59 59 int iwpm_init(u8 nl_client) 60 60 { 61 61 iwpm_hash_bucket = kzalloc_objs(struct hlist_head, 62 - IWPM_MAPINFO_HASH_SIZE, GFP_KERNEL); 62 + IWPM_MAPINFO_HASH_SIZE); 63 63 if (!iwpm_hash_bucket) 64 64 return -ENOMEM; 65 65 66 66 iwpm_reminfo_bucket = kzalloc_objs(struct hlist_head, 67 - IWPM_REMINFO_HASH_SIZE, GFP_KERNEL); 67 + IWPM_REMINFO_HASH_SIZE); 68 68 if (!iwpm_reminfo_bucket) { 69 69 kfree(iwpm_hash_bucket); 70 70 return -ENOMEM;
+3 -5
drivers/infiniband/core/sysfs.c
··· 859 859 if (!data) 860 860 goto err_free_stats; 861 861 data->group.attrs = kzalloc_objs(*data->group.attrs, 862 - stats->num_counters + 2, GFP_KERNEL); 862 + stats->num_counters + 2); 863 863 if (!data->group.attrs) 864 864 goto err_free_data; 865 865 ··· 1090 1090 int ret; 1091 1091 1092 1092 gid_attr_group = kzalloc_flex(*gid_attr_group, attrs_list, 1093 - size_mul(attr->gid_tbl_len, 2), 1094 - GFP_KERNEL); 1093 + size_mul(attr->gid_tbl_len, 2)); 1095 1094 if (!gid_attr_group) 1096 1095 return -ENOMEM; 1097 1096 gid_attr_group->port = port; ··· 1154 1155 int ret; 1155 1156 1156 1157 p = kvzalloc_flex(*p, attrs_list, 1157 - size_add(attr->gid_tbl_len, attr->pkey_tbl_len), 1158 - GFP_KERNEL); 1158 + size_add(attr->gid_tbl_len, attr->pkey_tbl_len)); 1159 1159 if (!p) 1160 1160 return ERR_PTR(-ENOMEM); 1161 1161 p->ibdev = device;
+1 -1
drivers/infiniband/core/uverbs_uapi.c
··· 446 446 uapi->num_write = max_write + 1; 447 447 uapi->num_write_ex = max_write_ex + 1; 448 448 data = kmalloc_objs(*uapi->write_methods, 449 - uapi->num_write + uapi->num_write_ex, GFP_KERNEL); 449 + uapi->num_write + uapi->num_write_ex); 450 450 if (!data) 451 451 return -ENOMEM; 452 452
+1 -1
drivers/infiniband/hw/bnxt_re/qplib_res.c
··· 873 873 /* Allocate one extra to hold the QP1 entries */ 874 874 rcfw->qp_tbl_size = max_t(u32, BNXT_RE_MAX_QPC_COUNT + 1, dev_attr->max_qp); 875 875 rcfw->qp_tbl = kzalloc_objs(struct bnxt_qplib_qp_node, 876 - rcfw->qp_tbl_size, GFP_KERNEL); 876 + rcfw->qp_tbl_size); 877 877 if (!rcfw->qp_tbl) 878 878 return -ENOMEM; 879 879
+1 -2
drivers/infiniband/hw/cxgb4/device.c
··· 882 882 883 883 if (c4iw_wr_log) { 884 884 rdev->wr_log = kzalloc_objs(*rdev->wr_log, 885 - 1 << c4iw_wr_log_size_order, 886 - GFP_KERNEL); 885 + 1 << c4iw_wr_log_size_order); 887 886 if (rdev->wr_log) { 888 887 rdev->wr_log_size = 1 << c4iw_wr_log_size_order; 889 888 atomic_set(&rdev->wr_log_idx, 0);
+1 -1
drivers/infiniband/hw/cxgb4/qp.c
··· 2551 2551 if (!wq->sw_rq) 2552 2552 goto err_put_qpid; 2553 2553 wq->pending_wrs = kzalloc_objs(*srq->wq.pending_wrs, 2554 - srq->wq.size, GFP_KERNEL); 2554 + srq->wq.size); 2555 2555 if (!wq->pending_wrs) 2556 2556 goto err_free_sw_rq; 2557 2557 }
+1 -2
drivers/infiniband/hw/hfi1/affinity.c
··· 407 407 } 408 408 409 409 dd->comp_vect_mappings = kzalloc_objs(*dd->comp_vect_mappings, 410 - dd->comp_vect_possible_cpus, 411 - GFP_KERNEL); 410 + dd->comp_vect_possible_cpus); 412 411 if (!dd->comp_vect_mappings) { 413 412 ret = -ENOMEM; 414 413 goto fail;
+2 -2
drivers/infiniband/hw/hfi1/pio.c
··· 408 408 dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8), 409 409 GFP_KERNEL); 410 410 dd->send_contexts = kzalloc_objs(struct send_context_info, 411 - dd->num_send_contexts, GFP_KERNEL); 411 + dd->num_send_contexts); 412 412 if (!dd->send_contexts || !dd->hw_to_sw) { 413 413 kfree(dd->hw_to_sw); 414 414 kfree(dd->send_contexts); ··· 2051 2051 int i; 2052 2052 2053 2053 dd->cr_base = kzalloc_objs(struct credit_return_base, 2054 - node_affinity.num_possible_nodes, GFP_KERNEL); 2054 + node_affinity.num_possible_nodes); 2055 2055 if (!dd->cr_base) { 2056 2056 ret = -ENOMEM; 2057 2057 goto done;
+1 -2
drivers/infiniband/hw/hfi1/user_exp_rcv.c
··· 59 59 if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) { 60 60 fd->invalid_tid_idx = 0; 61 61 fd->invalid_tids = kzalloc_objs(*fd->invalid_tids, 62 - uctxt->expected_count, 63 - GFP_KERNEL); 62 + uctxt->expected_count); 64 63 if (!fd->invalid_tids) { 65 64 kfree(fd->entry_to_rb); 66 65 fd->entry_to_rb = NULL;
+2 -4
drivers/infiniband/hw/hns/hns_roce_hem.c
··· 778 778 goto err_kcalloc_bt_l1; 779 779 780 780 table->bt_l1_dma_addr = kzalloc_objs(*table->bt_l1_dma_addr, 781 - num_bt_l1, 782 - GFP_KERNEL); 781 + num_bt_l1); 783 782 784 783 if (!table->bt_l1_dma_addr) 785 784 goto err_kcalloc_l1_dma; ··· 792 793 goto err_kcalloc_bt_l0; 793 794 794 795 table->bt_l0_dma_addr = kzalloc_objs(*table->bt_l0_dma_addr, 795 - num_bt_l0, 796 - GFP_KERNEL); 796 + num_bt_l0); 797 797 if (!table->bt_l0_dma_addr) 798 798 goto err_kcalloc_l0_dma; 799 799 }
+1 -2
drivers/infiniband/hw/hns/hns_roce_mr.c
··· 459 459 460 460 mr->npages = 0; 461 461 mr->page_list = kvzalloc_objs(dma_addr_t, 462 - mr->pbl_mtr.hem_cfg.buf_pg_count, 463 - GFP_KERNEL); 462 + mr->pbl_mtr.hem_cfg.buf_pg_count); 464 463 if (!mr->page_list) 465 464 return sg_num; 466 465
+1 -1
drivers/infiniband/hw/mana/main.c
··· 794 794 return err; 795 795 796 796 mdev->eqs = kzalloc_objs(struct gdma_queue *, 797 - mdev->ib_dev.num_comp_vectors, GFP_KERNEL); 797 + mdev->ib_dev.num_comp_vectors); 798 798 if (!mdev->eqs) { 799 799 err = -ENOMEM; 800 800 goto destroy_fatal_eq;
+1 -1
drivers/infiniband/hw/mlx4/mad.c
··· 2158 2158 int i; 2159 2159 2160 2160 ctx->tun = kzalloc_objs(struct mlx4_ib_demux_pv_ctx *, 2161 - dev->dev->caps.sqp_demux, GFP_KERNEL); 2161 + dev->dev->caps.sqp_demux); 2162 2162 if (!ctx->tun) 2163 2163 return -ENOMEM; 2164 2164
+1 -1
drivers/infiniband/hw/mlx4/main.c
··· 2426 2426 int i, j, eq = 0, total_eqs = 0; 2427 2427 2428 2428 ibdev->eq_table = kzalloc_objs(ibdev->eq_table[0], 2429 - dev->caps.num_comp_vectors, GFP_KERNEL); 2429 + dev->caps.num_comp_vectors); 2430 2430 if (!ibdev->eq_table) 2431 2431 return; 2432 2432
+1 -1
drivers/infiniband/hw/mlx5/macsec.c
··· 181 181 max_gids = MLX5_CAP_ROCE(dev->mdev, roce_address_table_size); 182 182 for (i = 0; i < dev->num_ports; i++) { 183 183 dev->port[i].reserved_gids = kzalloc_objs(*dev->port[i].reserved_gids, 184 - max_gids, GFP_KERNEL); 184 + max_gids); 185 185 if (!dev->port[i].reserved_gids) 186 186 goto err; 187 187
+1 -2
drivers/infiniband/hw/mlx5/main.c
··· 5205 5205 return ERR_PTR(-ENOMEM); 5206 5206 5207 5207 mplane->port = kzalloc_objs(*mplane->port, 5208 - mparent->num_plane * mparent->num_ports, 5209 - GFP_KERNEL); 5208 + mparent->num_plane * mparent->num_ports); 5210 5209 if (!mplane->port) { 5211 5210 ret = -ENOMEM; 5212 5211 goto fail_kcalloc;
+1 -1
drivers/infiniband/hw/mthca/mthca_cmd.c
··· 560 560 int i; 561 561 562 562 dev->cmd.context = kmalloc_objs(struct mthca_cmd_context, 563 - dev->cmd.max_cmds, GFP_KERNEL); 563 + dev->cmd.max_cmds); 564 564 if (!dev->cmd.context) 565 565 return -ENOMEM; 566 566
+1 -1
drivers/infiniband/hw/mthca/mthca_memfree.c
··· 718 718 dev->db_tab->min_group2 = dev->db_tab->npages - 1; 719 719 720 720 dev->db_tab->page = kmalloc_objs(*dev->db_tab->page, 721 - dev->db_tab->npages, GFP_KERNEL); 721 + dev->db_tab->npages); 722 722 if (!dev->db_tab->page) { 723 723 kfree(dev->db_tab); 724 724 return -ENOMEM;
+1 -2
drivers/infiniband/hw/usnic/usnic_uiom.c
··· 150 150 151 151 while (ret) { 152 152 chunk = kmalloc_flex(*chunk, page_list, 153 - min_t(int, ret, USNIC_UIOM_PAGE_CHUNK), 154 - GFP_KERNEL); 153 + min_t(int, ret, USNIC_UIOM_PAGE_CHUNK)); 155 154 if (!chunk) { 156 155 ret = -ENOMEM; 157 156 goto out;
+1 -1
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
··· 272 272 ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_srq_ops); 273 273 274 274 dev->srq_tbl = kzalloc_objs(struct pvrdma_srq *, 275 - dev->dsr->caps.max_srq, GFP_KERNEL); 275 + dev->dsr->caps.max_srq); 276 276 if (!dev->srq_tbl) 277 277 goto err_qp_free; 278 278 }
+1 -1
drivers/infiniband/ulp/iser/iser_initiator.c
··· 241 241 242 242 iser_conn->num_rx_descs = session->cmds_max; 243 243 iser_conn->rx_descs = kmalloc_objs(struct iser_rx_desc, 244 - iser_conn->num_rx_descs, GFP_KERNEL); 244 + iser_conn->num_rx_descs); 245 245 if (!iser_conn->rx_descs) 246 246 goto rx_desc_alloc_fail; 247 247
+1 -1
drivers/infiniband/ulp/isert/ib_isert.c
··· 153 153 int i, j; 154 154 155 155 isert_conn->rx_descs = kzalloc_objs(struct iser_rx_desc, 156 - ISERT_QP_MAX_RECV_DTOS, GFP_KERNEL); 156 + ISERT_QP_MAX_RECV_DTOS); 157 157 if (!isert_conn->rx_descs) 158 158 return -ENOMEM; 159 159
+1 -1
drivers/infiniband/ulp/rtrs/rtrs-clt.c
··· 1871 1871 1872 1872 if (!clt_path->rbufs) { 1873 1873 clt_path->rbufs = kzalloc_objs(*clt_path->rbufs, 1874 - queue_depth, GFP_KERNEL); 1874 + queue_depth); 1875 1875 if (!clt_path->rbufs) 1876 1876 return -ENOMEM; 1877 1877 }
+3 -5
drivers/iommu/amd/init.c
··· 660 660 static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg) 661 661 { 662 662 pci_seg->rlookup_table = kvzalloc_objs(*pci_seg->rlookup_table, 663 - pci_seg->last_bdf + 1, 664 - GFP_KERNEL); 663 + pci_seg->last_bdf + 1); 665 664 if (pci_seg->rlookup_table == NULL) 666 665 return -ENOMEM; 667 666 ··· 676 677 static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) 677 678 { 678 679 pci_seg->irq_lookup_table = kvzalloc_objs(*pci_seg->irq_lookup_table, 679 - pci_seg->last_bdf + 1, 680 - GFP_KERNEL); 680 + pci_seg->last_bdf + 1); 681 681 if (pci_seg->irq_lookup_table == NULL) 682 682 return -ENOMEM; 683 683 ··· 694 696 int i; 695 697 696 698 pci_seg->alias_table = kvmalloc_objs(*pci_seg->alias_table, 697 - pci_seg->last_bdf + 1, GFP_KERNEL); 699 + pci_seg->last_bdf + 1); 698 700 if (!pci_seg->alias_table) 699 701 return -ENOMEM; 700 702
+1 -2
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
··· 1515 1515 DIV_ROUND_UP(max_contexts, CTXDESC_L2_ENTRIES); 1516 1516 1517 1517 cd_table->l2.l2ptrs = kzalloc_objs(*cd_table->l2.l2ptrs, 1518 - cd_table->l2.num_l1_ents, 1519 - GFP_KERNEL); 1518 + cd_table->l2.num_l1_ents); 1520 1519 if (!cd_table->l2.l2ptrs) 1521 1520 return -ENOMEM; 1522 1521
+1 -1
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
··· 743 743 vintf->base = cmdqv->base + TEGRA241_VINTF(idx); 744 744 745 745 vintf->lvcmdqs = kzalloc_objs(*vintf->lvcmdqs, 746 - cmdqv->num_lvcmdqs_per_vintf, GFP_KERNEL); 746 + cmdqv->num_lvcmdqs_per_vintf); 747 747 if (!vintf->lvcmdqs) { 748 748 ida_free(&cmdqv->vintf_ids, idx); 749 749 return -ENOMEM;
+1 -1
drivers/iommu/iova.c
··· 716 716 int i, ret; 717 717 718 718 iovad->rcaches = kzalloc_objs(struct iova_rcache, 719 - IOVA_RANGE_CACHE_MAX_SIZE, GFP_KERNEL); 719 + IOVA_RANGE_CACHE_MAX_SIZE); 720 720 if (!iovad->rcaches) 721 721 return -ENOMEM; 722 722
+1 -1
drivers/irqchip/irq-mtk-sysirq.c
··· 155 155 } 156 156 157 157 chip_data->intpol_bases = kzalloc_objs(*chip_data->intpol_bases, 158 - nr_intpol_bases, GFP_KERNEL); 158 + nr_intpol_bases); 159 159 if (!chip_data->intpol_bases) { 160 160 ret = -ENOMEM; 161 161 goto out_free_intpol_words;
+1 -1
drivers/irqchip/irq-riscv-imsic-state.c
··· 513 513 514 514 /* Allocate vector array */ 515 515 lpriv->vectors = kzalloc_objs(*lpriv->vectors, 516 - global->nr_ids + 1, GFP_KERNEL); 516 + global->nr_ids + 1); 517 517 if (!lpriv->vectors) 518 518 goto fail_local_cleanup; 519 519
+1 -1
drivers/irqchip/irq-stm32-exti.c
··· 275 275 276 276 host_data->drv_data = dd; 277 277 host_data->chips_data = kzalloc_objs(struct stm32_exti_chip_data, 278 - dd->bank_nr, GFP_KERNEL); 278 + dd->bank_nr); 279 279 if (!host_data->chips_data) 280 280 goto free_host_data; 281 281
+1 -1
drivers/md/dm-crypt.c
··· 2335 2335 int err; 2336 2336 2337 2337 cc->cipher_tfm.tfms = kzalloc_objs(struct crypto_skcipher *, 2338 - cc->tfms_count, GFP_KERNEL); 2338 + cc->tfms_count); 2339 2339 if (!cc->cipher_tfm.tfms) 2340 2340 return -ENOMEM; 2341 2341
+2 -2
drivers/md/dm-integrity.c
··· 4500 4500 } 4501 4501 4502 4502 sg = kvmalloc_objs(struct scatterlist, 4503 - ic->journal_pages + 1, GFP_KERNEL); 4503 + ic->journal_pages + 1); 4504 4504 if (!sg) { 4505 4505 *error = "Unable to allocate sg list"; 4506 4506 r = -ENOMEM; ··· 5271 5271 goto bad; 5272 5272 } 5273 5273 ic->bbs = kvmalloc_objs(struct bitmap_block_status, 5274 - ic->n_bitmap_blocks, GFP_KERNEL); 5274 + ic->n_bitmap_blocks); 5275 5275 if (!ic->bbs) { 5276 5276 ti->error = "Could not allocate memory for bitmap"; 5277 5277 r = -ENOMEM;
+1 -1
drivers/md/dm-pcache/cache.c
··· 139 139 int ret; 140 140 141 141 cache->segments = kvzalloc_objs(struct pcache_cache_segment, 142 - cache_dev->seg_num, GFP_KERNEL); 142 + cache_dev->seg_num); 143 143 if (!cache->segments) { 144 144 ret = -ENOMEM; 145 145 goto err;
+1 -1
drivers/md/dm-pcache/cache_key.c
··· 838 838 * an RB tree root and a spinlock for protecting its contents. 839 839 */ 840 840 cache_tree->subtrees = kvzalloc_objs(struct pcache_cache_subtree, 841 - cache_tree->n_subtrees, GFP_KERNEL); 841 + cache_tree->n_subtrees); 842 842 if (!cache_tree->subtrees) { 843 843 ret = -ENOMEM; 844 844 goto key_pool_exit;
+1 -1
drivers/md/dm-stats.c
··· 972 972 (*n_histogram_entries)++; 973 973 974 974 *histogram_boundaries = kmalloc_objs(unsigned long long, 975 - *n_histogram_entries, GFP_KERNEL); 975 + *n_histogram_entries); 976 976 if (!*histogram_boundaries) 977 977 return -ENOMEM; 978 978
+1 -1
drivers/md/md-cluster.c
··· 1544 1544 1545 1545 cinfo->other_bitmap_lockres = 1546 1546 kzalloc_objs(struct dlm_lock_resource *, 1547 - mddev->bitmap_info.nodes - 1, GFP_KERNEL); 1547 + mddev->bitmap_info.nodes - 1); 1548 1548 if (!cinfo->other_bitmap_lockres) { 1549 1549 pr_err("md: can't alloc mem for other bitmap locks\n"); 1550 1550 return 0;
+2 -4
drivers/md/raid10.c
··· 3858 3858 3859 3859 /* FIXME calc properly */ 3860 3860 conf->mirrors = kzalloc_objs(struct raid10_info, 3861 - mddev->raid_disks + max(0, -mddev->delta_disks), 3862 - GFP_KERNEL); 3861 + mddev->raid_disks + max(0, -mddev->delta_disks)); 3863 3862 if (!conf->mirrors) 3864 3863 goto out; 3865 3864 ··· 4281 4282 /* allocate new 'mirrors' list */ 4282 4283 conf->mirrors_new = 4283 4284 kzalloc_objs(struct raid10_info, 4284 - mddev->raid_disks + mddev->delta_disks, 4285 - GFP_KERNEL); 4285 + mddev->raid_disks + mddev->delta_disks); 4286 4286 if (!conf->mirrors_new) 4287 4287 return -ENOMEM; 4288 4288 }
+2 -2
drivers/media/pci/tw686x/tw686x-core.c
··· 251 251 sprintf(dev->name, "tw%04X", pci_dev->device); 252 252 253 253 dev->video_channels = kzalloc_objs(*dev->video_channels, 254 - max_channels(dev), GFP_KERNEL); 254 + max_channels(dev)); 255 255 if (!dev->video_channels) { 256 256 err = -ENOMEM; 257 257 goto free_dev; 258 258 } 259 259 260 260 dev->audio_channels = kzalloc_objs(*dev->audio_channels, 261 - max_channels(dev), GFP_KERNEL); 261 + max_channels(dev)); 262 262 if (!dev->audio_channels) { 263 263 err = -ENOMEM; 264 264 goto free_video;
+1 -1
drivers/media/usb/pvrusb2/pvrusb2-hdw.c
··· 2450 2450 2451 2451 /* Define and configure additional controls from cx2341x module. */ 2452 2452 hdw->mpeg_ctrl_info = kzalloc_objs(*(hdw->mpeg_ctrl_info), 2453 - MPEGDEF_COUNT, GFP_KERNEL); 2453 + MPEGDEF_COUNT); 2454 2454 if (!hdw->mpeg_ctrl_info) goto fail; 2455 2455 for (idx = 0; idx < MPEGDEF_COUNT; idx++) { 2456 2456 cptr = hdw->controls + idx + CTRLDEF_COUNT;
+1 -2
drivers/media/v4l2-core/v4l2-subdev.c
··· 1889 1889 1890 1890 if (new_configs.num_configs) { 1891 1891 new_configs.configs = kvzalloc_objs(*new_configs.configs, 1892 - new_configs.num_configs, 1893 - GFP_KERNEL); 1892 + new_configs.num_configs); 1894 1893 1895 1894 if (!new_configs.configs) 1896 1895 return -ENOMEM;
+1 -1
drivers/memstick/core/mspro_block.c
··· 940 940 attr_count = attr->count; 941 941 942 942 msb->attr_group.attrs = kzalloc_objs(*msb->attr_group.attrs, 943 - attr_count + 1, GFP_KERNEL); 943 + attr_count + 1); 944 944 if (!msb->attr_group.attrs) { 945 945 rc = -ENOMEM; 946 946 goto out_free_attr;
+3 -3
drivers/message/fusion/mptsas.c
··· 2428 2428 2429 2429 port_info->num_phys = buffer->NumPhys; 2430 2430 port_info->phy_info = kzalloc_objs(struct mptsas_phyinfo, 2431 - port_info->num_phys, GFP_KERNEL); 2431 + port_info->num_phys); 2432 2432 if (!port_info->phy_info) { 2433 2433 error = -ENOMEM; 2434 2434 goto out_free_consistent; ··· 2719 2719 /* save config data */ 2720 2720 port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1; 2721 2721 port_info->phy_info = kzalloc_objs(struct mptsas_phyinfo, 2722 - port_info->num_phys, GFP_KERNEL); 2722 + port_info->num_phys); 2723 2723 if (!port_info->phy_info) { 2724 2724 error = -ENOMEM; 2725 2725 goto out_free_consistent; ··· 3448 3448 port_info->num_phys = (expander_data->NumPhys) ? 3449 3449 expander_data->NumPhys : 1; 3450 3450 port_info->phy_info = kzalloc_objs(struct mptsas_phyinfo, 3451 - port_info->num_phys, GFP_KERNEL); 3451 + port_info->num_phys); 3452 3452 BUG_ON(!port_info->phy_info); 3453 3453 memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64)); 3454 3454 for (i = 0; i < port_info->num_phys; i++) {
+1 -1
drivers/mtd/chips/cfi_cmdset_0001.c
··· 628 628 629 629 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 630 630 mtd->eraseregions = kzalloc_objs(struct mtd_erase_region_info, 631 - mtd->numeraseregions, GFP_KERNEL); 631 + mtd->numeraseregions); 632 632 if (!mtd->eraseregions) 633 633 goto setup_err; 634 634
+1 -1
drivers/mtd/chips/cfi_cmdset_0002.c
··· 777 777 778 778 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 779 779 mtd->eraseregions = kmalloc_objs(struct mtd_erase_region_info, 780 - mtd->numeraseregions, GFP_KERNEL); 780 + mtd->numeraseregions); 781 781 if (!mtd->eraseregions) 782 782 goto setup_err; 783 783
+1 -1
drivers/mtd/chips/cfi_cmdset_0020.c
··· 186 186 187 187 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 188 188 mtd->eraseregions = kmalloc_objs(struct mtd_erase_region_info, 189 - mtd->numeraseregions, GFP_KERNEL); 189 + mtd->numeraseregions); 190 190 if (!mtd->eraseregions) { 191 191 kfree(cfi->cmdset_priv); 192 192 kfree(mtd);
+1 -1
drivers/mtd/nand/onenand/onenand_base.c
··· 3729 3729 mtd->numeraseregions = this->dies << 1; 3730 3730 mtd->eraseregions = 3731 3731 kzalloc_objs(struct mtd_erase_region_info, 3732 - this->dies << 1, GFP_KERNEL); 3732 + this->dies << 1); 3733 3733 if (!mtd->eraseregions) 3734 3734 return -ENOMEM; 3735 3735 }
+1 -1
drivers/mtd/nand/raw/nand_base.c
··· 5430 5430 5431 5431 chip->nr_secure_regions = nr_elem / 2; 5432 5432 chip->secure_regions = kzalloc_objs(*chip->secure_regions, 5433 - chip->nr_secure_regions, GFP_KERNEL); 5433 + chip->nr_secure_regions); 5434 5434 if (!chip->secure_regions) 5435 5435 return -ENOMEM; 5436 5436
+1 -1
drivers/mtd/parsers/sharpslpart.c
··· 363 363 } 364 364 365 365 sharpsl_nand_parts = kzalloc_objs(*sharpsl_nand_parts, 366 - SHARPSL_NAND_PARTS, GFP_KERNEL); 366 + SHARPSL_NAND_PARTS); 367 367 if (!sharpsl_nand_parts) 368 368 return -ENOMEM; 369 369
+1 -1
drivers/net/can/usb/ucan.c
··· 331 331 ucan_release_context_array(up); 332 332 333 333 up->context_array = kzalloc_objs(*up->context_array, 334 - up->device_info.tx_fifo, GFP_KERNEL); 334 + up->device_info.tx_fifo); 335 335 if (!up->context_array) { 336 336 netdev_err(up->netdev, 337 337 "Not enough memory to allocate tx contexts\n");
+3 -3
drivers/net/dsa/mv88e6xxx/devlink.c
··· 379 379 int fid = -1, err = 0, count = 0; 380 380 381 381 table = kzalloc_objs(struct mv88e6xxx_devlink_atu_entry, 382 - mv88e6xxx_num_databases(chip), GFP_KERNEL); 382 + mv88e6xxx_num_databases(chip)); 383 383 if (!table) 384 384 return -ENOMEM; 385 385 ··· 440 440 int err; 441 441 442 442 table = kzalloc_objs(struct mv88e6xxx_devlink_vtu_entry, 443 - mv88e6xxx_max_vid(chip) + 1, GFP_KERNEL); 443 + mv88e6xxx_max_vid(chip) + 1); 444 444 if (!table) 445 445 return -ENOMEM; 446 446 ··· 522 522 int err; 523 523 524 524 table = kzalloc_objs(struct mv88e6xxx_devlink_stu_entry, 525 - mv88e6xxx_max_sid(chip) + 1, GFP_KERNEL); 525 + mv88e6xxx_max_sid(chip) + 1); 526 526 if (!table) 527 527 return -ENOMEM; 528 528
+1 -1
drivers/net/dsa/sja1105/sja1105_vl.c
··· 636 636 rule->vl.cycle_time = cycle_time; 637 637 rule->vl.num_entries = num_entries; 638 638 rule->vl.entries = kzalloc_objs(struct action_gate_entry, 639 - num_entries, GFP_KERNEL); 639 + num_entries); 640 640 if (!rule->vl.entries) { 641 641 rc = -ENOMEM; 642 642 goto out;
+1 -2
drivers/net/ethernet/amd/pds_core/core.c
··· 416 416 enum pds_core_vif_types vt; 417 417 418 418 pdsc->viftype_status = kzalloc_objs(*pdsc->viftype_status, 419 - ARRAY_SIZE(pdsc_viftype_defaults), 420 - GFP_KERNEL); 419 + ARRAY_SIZE(pdsc_viftype_defaults)); 421 420 if (!pdsc->viftype_status) 422 421 return -ENOMEM; 423 422
+2 -4
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 4583 4583 index, cos); 4584 4584 4585 4585 txdata->tx_buf_ring = kzalloc_objs(struct sw_tx_bd, 4586 - NUM_TX_BD, 4587 - GFP_KERNEL); 4586 + NUM_TX_BD); 4588 4587 if (!txdata->tx_buf_ring) 4589 4588 goto alloc_mem_err; 4590 4589 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping, ··· 4751 4752 for (i = 0; i < bp->fp_array_size; i++) { 4752 4753 fp[i].tpa_info = 4753 4754 kzalloc_objs(struct bnx2x_agg_info, 4754 - ETH_MAX_AGGREGATION_QUEUES_E1H_E2, 4755 - GFP_KERNEL); 4755 + ETH_MAX_AGGREGATION_QUEUES_E1H_E2); 4756 4756 if (!(fp[i].tpa_info)) 4757 4757 goto alloc_err; 4758 4758 }
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
··· 1273 1273 1274 1274 /* allocate the queue arrays for all VFs */ 1275 1275 bp->vfdb->vfqs = kzalloc_objs(struct bnx2x_vf_queue, 1276 - BNX2X_MAX_NUM_VF_QUEUES, GFP_KERNEL); 1276 + BNX2X_MAX_NUM_VF_QUEUES); 1277 1277 1278 1278 if (!bp->vfdb->vfqs) { 1279 1279 BNX2X_ERR("failed to allocate vf queue array\n");
+3 -3
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 4624 4624 4625 4625 if (irq_re_init) { 4626 4626 bp->grp_info = kzalloc_objs(struct bnxt_ring_grp_info, 4627 - bp->cp_nr_rings, GFP_KERNEL); 4627 + bp->cp_nr_rings); 4628 4628 if (!bp->grp_info) 4629 4629 return -ENOMEM; 4630 4630 } ··· 5511 5511 } 5512 5512 5513 5513 bp->rx_ring = kzalloc_objs(struct bnxt_rx_ring_info, 5514 - bp->rx_nr_rings, GFP_KERNEL); 5514 + bp->rx_nr_rings); 5515 5515 if (!bp->rx_ring) 5516 5516 return -ENOMEM; 5517 5517 ··· 5531 5531 } 5532 5532 5533 5533 bp->tx_ring = kzalloc_objs(struct bnxt_tx_ring_info, 5534 - bp->tx_nr_rings, GFP_KERNEL); 5534 + bp->tx_nr_rings); 5535 5535 if (!bp->tx_ring) 5536 5536 return -ENOMEM; 5537 5537
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
··· 984 984 pps_info->num_pins = resp->num_pins; 985 985 ptp_info->n_pins = pps_info->num_pins; 986 986 ptp_info->pin_config = kzalloc_objs(*ptp_info->pin_config, 987 - ptp_info->n_pins, GFP_KERNEL); 987 + ptp_info->n_pins); 988 988 if (!ptp_info->pin_config) { 989 989 hwrm_req_drop(bp, req); 990 990 return -ENOMEM;
+1 -2
drivers/net/ethernet/broadcom/sb1250-mac.c
··· 623 623 d->sbdma_maxdescr = maxdescr; 624 624 625 625 d->sbdma_dscrtable_unaligned = kzalloc_objs(*d->sbdma_dscrtable, 626 - d->sbdma_maxdescr + 1, 627 - GFP_KERNEL); 626 + d->sbdma_maxdescr + 1); 628 627 629 628 /* 630 629 * The descriptor table must be aligned to at least 16 bytes or the
+1 -1
drivers/net/ethernet/broadcom/tg3.c
··· 8730 8730 8731 8731 for (i = 0; i < tp->txq_cnt; i++, tnapi++) { 8732 8732 tnapi->tx_buffers = kzalloc_objs(struct tg3_tx_ring_info, 8733 - TG3_TX_RING_SIZE, GFP_KERNEL); 8733 + TG3_TX_RING_SIZE); 8734 8734 if (!tnapi->tx_buffers) 8735 8735 goto err_out; 8736 8736
+2 -2
drivers/net/ethernet/brocade/bna/bnad.c
··· 1458 1458 if (cfg_flags & BNAD_CF_MSIX) { 1459 1459 intr_info->intr_type = BNA_INTR_T_MSIX; 1460 1460 intr_info->idl = kzalloc_objs(struct bna_intr_descr, 1461 - intr_info->num, GFP_KERNEL); 1461 + intr_info->num); 1462 1462 if (!intr_info->idl) 1463 1463 return -ENOMEM; 1464 1464 ··· 1483 1483 intr_info->intr_type = BNA_INTR_T_INTX; 1484 1484 intr_info->num = 1; 1485 1485 intr_info->idl = kzalloc_objs(struct bna_intr_descr, 1486 - intr_info->num, GFP_KERNEL); 1486 + intr_info->num); 1487 1487 if (!intr_info->idl) 1488 1488 return -ENOMEM; 1489 1489
+1 -1
drivers/net/ethernet/cavium/liquidio/lio_core.c
··· 1050 1050 [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)]; 1051 1051 1052 1052 oct->msix_entries = kzalloc_objs(struct msix_entry, 1053 - oct->num_msix_irqs, GFP_KERNEL); 1053 + oct->num_msix_irqs); 1054 1054 if (!oct->msix_entries) { 1055 1055 dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n"); 1056 1056 kfree(oct->irq_name_storage);
+2 -3
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 5028 5028 } 5029 5029 5030 5030 adap->sge.ingr_map = kzalloc_objs(*adap->sge.ingr_map, 5031 - adap->sge.ingr_sz, GFP_KERNEL); 5031 + adap->sge.ingr_sz); 5032 5032 if (!adap->sge.ingr_map) { 5033 5033 ret = -ENOMEM; 5034 5034 goto bye; ··· 6349 6349 } 6350 6350 /* Allocate and set up VF Information. */ 6351 6351 adap->vfinfo = kzalloc_objs(struct vf_info, 6352 - pci_sriov_get_totalvfs(pdev), 6353 - GFP_KERNEL); 6352 + pci_sriov_get_totalvfs(pdev)); 6354 6353 if (!adap->vfinfo) { 6355 6354 unregister_netdev(adap->port[0]); 6356 6355 free_netdev(adap->port[0]);
+2 -2
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
··· 157 157 /* Allocate ETHOFLD hardware queue structures if not done already */ 158 158 if (!refcount_read(&adap->tc_mqprio->refcnt)) { 159 159 adap->sge.eohw_rxq = kzalloc_objs(struct sge_ofld_rxq, 160 - adap->sge.eoqsets, GFP_KERNEL); 160 + adap->sge.eoqsets); 161 161 if (!adap->sge.eohw_rxq) 162 162 return -ENOMEM; 163 163 164 164 adap->sge.eohw_txq = kzalloc_objs(struct sge_eohw_txq, 165 - adap->sge.eoqsets, GFP_KERNEL); 165 + adap->sge.eoqsets); 166 166 if (!adap->sge.eohw_txq) { 167 167 kfree(adap->sge.eohw_rxq); 168 168 return -ENOMEM;
+1 -1
drivers/net/ethernet/cisco/enic/enic_main.c
··· 2469 2469 goto free_queues; 2470 2470 2471 2471 enic->napi = kzalloc_objs(struct napi_struct, 2472 - enic->wq_avail + enic->rq_avail, GFP_KERNEL); 2472 + enic->wq_avail + enic->rq_avail); 2473 2473 if (!enic->napi) 2474 2474 goto free_queues; 2475 2475
+2 -2
drivers/net/ethernet/engleder/tsnep_main.c
··· 2103 2103 return -EOPNOTSUPP; 2104 2104 2105 2105 queue->rx->page_buffer = kzalloc_objs(*queue->rx->page_buffer, 2106 - TSNEP_RING_SIZE, GFP_KERNEL); 2106 + TSNEP_RING_SIZE); 2107 2107 if (!queue->rx->page_buffer) 2108 2108 return -ENOMEM; 2109 2109 queue->rx->xdp_batch = kzalloc_objs(*queue->rx->xdp_batch, 2110 - TSNEP_RING_SIZE, GFP_KERNEL); 2110 + TSNEP_RING_SIZE); 2111 2111 if (!queue->rx->xdp_batch) { 2112 2112 kfree(queue->rx->page_buffer); 2113 2113 queue->rx->page_buffer = NULL;
+1 -2
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
··· 243 243 priv->trap_data = dpaa2_eth_trap_data; 244 244 245 245 dpaa2_eth_trap_data->trap_items_arr = kzalloc_objs(struct dpaa2_eth_trap_item, 246 - ARRAY_SIZE(dpaa2_eth_traps_arr), 247 - GFP_KERNEL); 246 + ARRAY_SIZE(dpaa2_eth_traps_arr)); 248 247 if (!dpaa2_eth_trap_data->trap_items_arr) { 249 248 err = -ENOMEM; 250 249 goto trap_data_free;
+1 -1
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
··· 3421 3421 } 3422 3422 3423 3423 ethsw->filter_blocks = kzalloc_objs(*ethsw->filter_blocks, 3424 - ethsw->sw_attr.num_ifs, GFP_KERNEL); 3424 + ethsw->sw_attr.num_ifs); 3425 3425 if (!ethsw->filter_blocks) { 3426 3426 err = -ENOMEM; 3427 3427 goto err_free_fdbs;
+1 -1
drivers/net/ethernet/freescale/enetc/enetc_pf.c
··· 959 959 pf->total_vfs = pci_sriov_get_totalvfs(pdev); 960 960 if (pf->total_vfs) { 961 961 pf->vf_state = kzalloc_objs(struct enetc_vf_state, 962 - pf->total_vfs, GFP_KERNEL); 962 + pf->total_vfs); 963 963 if (!pf->vf_state) 964 964 goto err_alloc_vf_state; 965 965 }
+2 -3
drivers/net/ethernet/freescale/gianfar.c
··· 1374 1374 tx_queue = priv->tx_queue[i]; 1375 1375 tx_queue->tx_skbuff = 1376 1376 kmalloc_objs(*tx_queue->tx_skbuff, 1377 - tx_queue->tx_ring_size, GFP_KERNEL); 1377 + tx_queue->tx_ring_size); 1378 1378 if (!tx_queue->tx_skbuff) 1379 1379 goto cleanup; 1380 1380 ··· 1385 1385 for (i = 0; i < priv->num_rx_queues; i++) { 1386 1386 rx_queue = priv->rx_queue[i]; 1387 1387 rx_queue->rx_buff = kzalloc_objs(*rx_queue->rx_buff, 1388 - rx_queue->rx_ring_size, 1389 - GFP_KERNEL); 1388 + rx_queue->rx_ring_size); 1390 1389 if (!rx_queue->rx_buff) 1391 1390 goto cleanup; 1392 1391 }
+2 -2
drivers/net/ethernet/freescale/ucc_geth.c
··· 2072 2072 /* Setup the skbuff rings */ 2073 2073 ugeth->tx_skbuff[j] = 2074 2074 kzalloc_objs(struct sk_buff *, 2075 - ugeth->ug_info->bdRingLenTx[j], GFP_KERNEL); 2075 + ugeth->ug_info->bdRingLenTx[j]); 2076 2076 2077 2077 if (ugeth->tx_skbuff[j] == NULL) { 2078 2078 if (netif_msg_ifup(ugeth)) ··· 2130 2130 /* Setup the skbuff rings */ 2131 2131 ugeth->rx_skbuff[j] = 2132 2132 kzalloc_objs(struct sk_buff *, 2133 - ugeth->ug_info->bdRingLenRx[j], GFP_KERNEL); 2133 + ugeth->ug_info->bdRingLenRx[j]); 2134 2134 2135 2135 if (ugeth->rx_skbuff[j] == NULL) { 2136 2136 if (netif_msg_ifup(ugeth))
+2 -2
drivers/net/ethernet/google/gve/gve_main.c
··· 152 152 153 153 flow_rules_cache->rules_cache = 154 154 kvzalloc_objs(*flow_rules_cache->rules_cache, 155 - GVE_FLOW_RULES_CACHE_SIZE, GFP_KERNEL); 155 + GVE_FLOW_RULES_CACHE_SIZE); 156 156 if (!flow_rules_cache->rules_cache) { 157 157 dev_err(&priv->pdev->dev, "Cannot alloc flow rules cache\n"); 158 158 return -ENOMEM; ··· 486 486 int err; 487 487 488 488 priv->msix_vectors = kvzalloc_objs(*priv->msix_vectors, 489 - num_vecs_requested, GFP_KERNEL); 489 + num_vecs_requested); 490 490 if (!priv->msix_vectors) 491 491 return -ENOMEM; 492 492 for (i = 0; i < num_vecs_requested; i++)
+2 -3
drivers/net/ethernet/google/gve/gve_tx_dqo.c
··· 267 267 int i; 268 268 269 269 tx->dqo.tx_qpl_buf_next = kvzalloc_objs(tx->dqo.tx_qpl_buf_next[0], 270 - num_tx_qpl_bufs, GFP_KERNEL); 270 + num_tx_qpl_bufs); 271 271 if (!tx->dqo.tx_qpl_buf_next) 272 272 return -ENOMEM; 273 273 ··· 337 337 338 338 tx->dqo.num_pending_packets = min_t(int, num_pending_packets, S16_MAX); 339 339 tx->dqo.pending_packets = kvzalloc_objs(tx->dqo.pending_packets[0], 340 - tx->dqo.num_pending_packets, 341 - GFP_KERNEL); 340 + tx->dqo.num_pending_packets); 342 341 if (!tx->dqo.pending_packets) 343 342 goto err; 344 343
+1 -1
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
··· 1100 1100 int i; 1101 1101 1102 1102 tmp_rings = kzalloc_objs(struct hns3_enet_ring, 1103 - handle->kinfo.num_tqps * 2, GFP_KERNEL); 1103 + handle->kinfo.num_tqps * 2); 1104 1104 if (!tmp_rings) 1105 1105 return NULL; 1106 1106
+1 -1
drivers/net/ethernet/huawei/hinic3/hinic3_main.c
··· 58 58 struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 59 59 60 60 nic_dev->intr_coalesce = kzalloc_objs(*nic_dev->intr_coalesce, 61 - nic_dev->max_qps, GFP_KERNEL); 61 + nic_dev->max_qps); 62 62 63 63 if (!nic_dev->intr_coalesce) 64 64 return -ENOMEM;
+3 -3
drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
··· 100 100 nic_dev->num_qp_irq = 0; 101 101 102 102 nic_dev->qps_msix_entries = kzalloc_objs(struct msix_entry, 103 - nic_dev->max_qps, GFP_KERNEL); 103 + nic_dev->max_qps); 104 104 if (!nic_dev->qps_msix_entries) 105 105 return -ENOMEM; 106 106 ··· 127 127 int err; 128 128 129 129 q_params->txqs_res = kzalloc_objs(*q_params->txqs_res, 130 - q_params->num_qps, GFP_KERNEL); 130 + q_params->num_qps); 131 131 if (!q_params->txqs_res) 132 132 return -ENOMEM; 133 133 134 134 q_params->rxqs_res = kzalloc_objs(*q_params->rxqs_res, 135 - q_params->num_qps, GFP_KERNEL); 135 + q_params->num_qps); 136 136 if (!q_params->rxqs_res) { 137 137 err = -ENOMEM; 138 138 goto err_free_txqs_res_arr;
+1 -2
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
··· 686 686 goto err_free_tqres; 687 687 688 688 tqres->bds = kzalloc_objs(*tqres->bds, 689 - sq_depth * HINIC3_BDS_PER_SQ_WQEBB + HINIC3_MAX_SQ_SGE, 690 - GFP_KERNEL); 689 + sq_depth * HINIC3_BDS_PER_SQ_WQEBB + HINIC3_MAX_SQ_SGE); 691 690 if (!tqres->bds) { 692 691 kfree(tqres->tx_info); 693 692 goto err_free_tqres;
+5 -5
drivers/net/ethernet/ibm/ibmvnic.c
··· 933 933 { 934 934 adapter->tx_stats_buffers = 935 935 kzalloc_objs(struct ibmvnic_tx_queue_stats, 936 - IBMVNIC_MAX_QUEUES, GFP_KERNEL); 936 + IBMVNIC_MAX_QUEUES); 937 937 if (!adapter->tx_stats_buffers) 938 938 return -ENOMEM; 939 939 940 940 adapter->rx_stats_buffers = 941 941 kzalloc_objs(struct ibmvnic_rx_queue_stats, 942 - IBMVNIC_MAX_QUEUES, GFP_KERNEL); 942 + IBMVNIC_MAX_QUEUES); 943 943 if (!adapter->rx_stats_buffers) 944 944 return -ENOMEM; 945 945 ··· 1124 1124 } 1125 1125 1126 1126 rx_pool->rx_buff = kzalloc_objs(struct ibmvnic_rx_buff, 1127 - rx_pool->size, GFP_KERNEL); 1127 + rx_pool->size); 1128 1128 if (!rx_pool->rx_buff) { 1129 1129 dev_err(dev, "Couldn't alloc rx buffers\n"); 1130 1130 rc = -ENOMEM; ··· 4476 4476 } 4477 4477 4478 4478 adapter->tx_scrq = kzalloc_objs(*adapter->tx_scrq, 4479 - adapter->req_tx_queues, GFP_KERNEL); 4479 + adapter->req_tx_queues); 4480 4480 if (!adapter->tx_scrq) 4481 4481 goto tx_failed; 4482 4482 ··· 4487 4487 } 4488 4488 4489 4489 adapter->rx_scrq = kzalloc_objs(*adapter->rx_scrq, 4490 - adapter->req_rx_queues, GFP_KERNEL); 4490 + adapter->req_rx_queues); 4491 4491 if (!adapter->rx_scrq) 4492 4492 goto rx_failed; 4493 4493
+2 -2
drivers/net/ethernet/intel/e1000/e1000_main.c
··· 1323 1323 static int e1000_alloc_queues(struct e1000_adapter *adapter) 1324 1324 { 1325 1325 adapter->tx_ring = kzalloc_objs(struct e1000_tx_ring, 1326 - adapter->num_tx_queues, GFP_KERNEL); 1326 + adapter->num_tx_queues); 1327 1327 if (!adapter->tx_ring) 1328 1328 return -ENOMEM; 1329 1329 1330 1330 adapter->rx_ring = kzalloc_objs(struct e1000_rx_ring, 1331 - adapter->num_rx_queues, GFP_KERNEL); 1331 + adapter->num_rx_queues); 1332 1332 if (!adapter->rx_ring) { 1333 1333 kfree(adapter->tx_ring); 1334 1334 return -ENOMEM;
+2 -4
drivers/net/ethernet/intel/e1000e/netdev.c
··· 2051 2051 if (adapter->flags & FLAG_HAS_MSIX) { 2052 2052 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ 2053 2053 adapter->msix_entries = kzalloc_objs(struct msix_entry, 2054 - adapter->num_vectors, 2055 - GFP_KERNEL); 2054 + adapter->num_vectors); 2056 2055 if (adapter->msix_entries) { 2057 2056 struct e1000_adapter *a = adapter; 2058 2057 ··· 2369 2370 for (i = 0; i < rx_ring->count; i++) { 2370 2371 buffer_info = &rx_ring->buffer_info[i]; 2371 2372 buffer_info->ps_pages = kzalloc_objs(struct e1000_ps_page, 2372 - PS_PAGE_BUFFERS, 2373 - GFP_KERNEL); 2373 + PS_PAGE_BUFFERS); 2374 2374 if (!buffer_info->ps_pages) 2375 2375 goto err_pages; 2376 2376 }
+1 -1
drivers/net/ethernet/intel/i40e/i40e_client.c
··· 567 567 u32 v_idx, i, reg_idx, reg; 568 568 569 569 ldev->qvlist_info = kzalloc_flex(*ldev->qvlist_info, qv_info, 570 - qvlist_info->num_vectors, GFP_KERNEL); 570 + qvlist_info->num_vectors); 571 571 if (!ldev->qvlist_info) 572 572 return -ENOMEM; 573 573 ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
+1 -1
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
··· 2159 2159 "Changing Rx descriptor count from %d to %d\n", 2160 2160 vsi->rx_rings[0]->count, new_rx_count); 2161 2161 rx_rings = kzalloc_objs(struct i40e_ring, 2162 - vsi->alloc_queue_pairs, GFP_KERNEL); 2162 + vsi->alloc_queue_pairs); 2163 2163 if (!rx_rings) { 2164 2164 err = -ENOMEM; 2165 2165 goto free_tx;
+1 -1
drivers/net/ethernet/intel/i40e/i40e_ptp.c
··· 1345 1345 pf->ptp_caps.n_per_out = 2; 1346 1346 1347 1347 pf->ptp_caps.pin_config = kzalloc_objs(*pf->ptp_caps.pin_config, 1348 - pf->ptp_caps.n_pins, GFP_KERNEL); 1348 + pf->ptp_caps.n_pins); 1349 1349 if (!pf->ptp_caps.pin_config) 1350 1350 return -ENOMEM; 1351 1351
+1 -2
drivers/net/ethernet/intel/ice/devlink/port.c
··· 59 59 int status; 60 60 61 61 options = kzalloc_objs(*options, 62 - ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV, 63 - GFP_KERNEL); 62 + ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV); 64 63 if (!options) 65 64 return; 66 65
+1 -1
drivers/net/ethernet/intel/ice/ice_xsk.c
··· 175 175 { 176 176 if (pool_present) { 177 177 rx_ring->xdp_buf = kzalloc_objs(*rx_ring->xdp_buf, 178 - rx_ring->count, GFP_KERNEL); 178 + rx_ring->count); 179 179 if (!rx_ring->xdp_buf) 180 180 return -ENOMEM; 181 181 } else {
+1 -1
drivers/net/ethernet/intel/idpf/idpf_idc.c
··· 432 432 433 433 privd->mapped_mem_regions = 434 434 kzalloc_objs(struct iidc_rdma_lan_mapped_mem_region, 435 - adapter->hw.num_lan_regs, GFP_KERNEL); 435 + adapter->hw.num_lan_regs); 436 436 if (!privd->mapped_mem_regions) { 437 437 err = -ENOMEM; 438 438 goto err_plug_aux_dev;
+1 -2
drivers/net/ethernet/intel/idpf/idpf_lib.c
··· 360 360 } 361 361 362 362 adapter->rdma_msix_entries = kzalloc_objs(struct msix_entry, 363 - num_rdma_vecs, 364 - GFP_KERNEL); 363 + num_rdma_vecs); 365 364 if (!adapter->rdma_msix_entries) { 366 365 err = -ENOMEM; 367 366 goto free_irq;
+7 -11
drivers/net/ethernet/intel/idpf/idpf_txrx.c
··· 1204 1204 goto config; 1205 1205 1206 1206 q_vector->xsksq = kzalloc_objs(*q_vector->xsksq, 1207 - DIV_ROUND_UP(rsrc->num_rxq_grp, rsrc->num_q_vectors), 1208 - GFP_KERNEL); 1207 + DIV_ROUND_UP(rsrc->num_rxq_grp, rsrc->num_q_vectors)); 1209 1208 if (!q_vector->xsksq) 1210 1209 return -ENOMEM; 1211 1210 ··· 1763 1764 continue; 1764 1765 1765 1766 tx_qgrp->complq = kzalloc_objs(*tx_qgrp->complq, 1766 - IDPF_COMPLQ_PER_GROUP, 1767 - GFP_KERNEL); 1767 + IDPF_COMPLQ_PER_GROUP); 1768 1768 if (!tx_qgrp->complq) 1769 1769 goto err_alloc; 1770 1770 ··· 1834 1836 } 1835 1837 1836 1838 rx_qgrp->splitq.bufq_sets = kzalloc_objs(struct idpf_bufq_set, 1837 - rsrc->num_bufqs_per_qgrp, 1838 - GFP_KERNEL); 1839 + rsrc->num_bufqs_per_qgrp); 1839 1840 if (!rx_qgrp->splitq.bufq_sets) { 1840 1841 err = -ENOMEM; 1841 1842 goto err_alloc; ··· 1870 1873 idpf_queue_set(GEN_CHK, refillq); 1871 1874 idpf_queue_set(RFL_GEN_CHK, refillq); 1872 1875 refillq->ring = kzalloc_objs(*refillq->ring, 1873 - refillq->desc_count, 1874 - GFP_KERNEL); 1876 + refillq->desc_count); 1875 1877 if (!refillq->ring) { 1876 1878 err = -ENOMEM; 1877 1879 goto err_alloc; ··· 4556 4560 user_config = &vport->adapter->vport_config[idx]->user_config; 4557 4561 4558 4562 rsrc->q_vectors = kzalloc_objs(struct idpf_q_vector, 4559 - rsrc->num_q_vectors, GFP_KERNEL); 4563 + rsrc->num_q_vectors); 4560 4564 if (!rsrc->q_vectors) 4561 4565 return -ENOMEM; 4562 4566 ··· 4599 4603 goto error; 4600 4604 4601 4605 q_vector->complq = kzalloc_objs(*q_vector->complq, 4602 - complqs_per_vector, GFP_KERNEL); 4606 + complqs_per_vector); 4603 4607 if (!q_vector->complq) 4604 4608 goto error; 4605 4609 ··· 4607 4611 continue; 4608 4612 4609 4613 q_vector->xsksq = kzalloc_objs(*q_vector->xsksq, 4610 - rxqs_per_vector, GFP_KERNEL); 4614 + rxqs_per_vector); 4611 4615 if (!q_vector->xsksq) 4612 4616 goto error; 4613 4617 }
+4 -4
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
··· 3433 3433 u16 num_max_vports = idpf_get_max_vports(adapter); 3434 3434 3435 3435 adapter->vport_params_reqd = kzalloc_objs(*adapter->vport_params_reqd, 3436 - num_max_vports, GFP_KERNEL); 3436 + num_max_vports); 3437 3437 if (!adapter->vport_params_reqd) 3438 3438 return -ENOMEM; 3439 3439 3440 3440 adapter->vport_params_recvd = kzalloc_objs(*adapter->vport_params_recvd, 3441 - num_max_vports, GFP_KERNEL); 3441 + num_max_vports); 3442 3442 if (!adapter->vport_params_recvd) 3443 3443 goto err_mem; 3444 3444 ··· 3450 3450 return 0; 3451 3451 3452 3452 adapter->vport_config = kzalloc_objs(*adapter->vport_config, 3453 - num_max_vports, GFP_KERNEL); 3453 + num_max_vports); 3454 3454 if (!adapter->vport_config) 3455 3455 goto err_mem; 3456 3456 ··· 3560 3560 3561 3561 if (!adapter->netdevs) { 3562 3562 adapter->netdevs = kzalloc_objs(struct net_device *, 3563 - num_max_vports, GFP_KERNEL); 3563 + num_max_vports); 3564 3564 if (!adapter->netdevs) { 3565 3565 err = -ENOMEM; 3566 3566 goto err_netdev_alloc;
+3 -4
drivers/net/ethernet/intel/igb/igb_main.c
··· 3776 3776 adapter->vfs_allocated_count = num_vfs; 3777 3777 3778 3778 adapter->vf_data = kzalloc_objs(struct vf_data_storage, 3779 - adapter->vfs_allocated_count, 3780 - GFP_KERNEL); 3779 + adapter->vfs_allocated_count); 3781 3780 3782 3781 /* if allocation failed then we do not support SR-IOV */ 3783 3782 if (!adapter->vf_data) { ··· 3795 3796 adapter->vfs_allocated_count); 3796 3797 3797 3798 adapter->vf_mac_list = kzalloc_objs(struct vf_mac_filter, 3798 - num_vf_mac_filters, GFP_KERNEL); 3799 + num_vf_mac_filters); 3799 3800 3800 3801 mac_list = adapter->vf_mac_list; 3801 3802 INIT_LIST_HEAD(&adapter->vf_macs.l); ··· 4091 4092 adapter->flags |= IGB_FLAG_HAS_MSIX; 4092 4093 4093 4094 adapter->mac_table = kzalloc_objs(struct igb_mac_addr, 4094 - hw->mac.rar_entry_count, GFP_KERNEL); 4095 + hw->mac.rar_entry_count); 4095 4096 if (!adapter->mac_table) 4096 4097 return -ENOMEM; 4097 4098
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6904 6904 adapter->jump_tables[i] = NULL; 6905 6905 6906 6906 adapter->mac_table = kzalloc_objs(struct ixgbe_mac_addr, 6907 - hw->mac.num_rar_entries, GFP_KERNEL); 6907 + hw->mac.num_rar_entries); 6908 6908 if (!adapter->mac_table) 6909 6909 return -ENOMEM; 6910 6910
+1 -2
drivers/net/ethernet/intel/libie/fwlog.c
··· 1013 1013 return status; 1014 1014 1015 1015 fwlog->ring.rings = kzalloc_objs(*fwlog->ring.rings, 1016 - LIBIE_FWLOG_RING_SIZE_DFLT, 1017 - GFP_KERNEL); 1016 + LIBIE_FWLOG_RING_SIZE_DFLT); 1018 1017 if (!fwlog->ring.rings) { 1019 1018 dev_warn(&fwlog->pdev->dev, "Unable to allocate memory for FW log rings\n"); 1020 1019 return -ENOMEM;
+1 -1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
··· 1950 1950 goto err_free_mem; 1951 1951 1952 1952 qset->sq = kzalloc_objs(struct otx2_snd_queue, 1953 - otx2_get_total_tx_queues(pf), GFP_KERNEL); 1953 + otx2_get_total_tx_queues(pf)); 1954 1954 if (!qset->sq) 1955 1955 goto err_free_mem; 1956 1956
+1 -1
drivers/net/ethernet/marvell/prestera/prestera_devlink.c
··· 456 456 return -ENOMEM; 457 457 458 458 trap_data->trap_items_arr = kzalloc_objs(struct prestera_trap_item, 459 - traps_count, GFP_KERNEL); 459 + traps_count); 460 460 if (!trap_data->trap_items_arr) { 461 461 err = -ENOMEM; 462 462 goto err_trap_items_alloc;
+2 -2
drivers/net/ethernet/mellanox/mlx4/cmd.c
··· 2374 2374 2375 2375 priv->mfunc.master.vf_admin = 2376 2376 kzalloc_objs(struct mlx4_vf_admin_state, 2377 - dev->num_slaves, GFP_KERNEL); 2377 + dev->num_slaves); 2378 2378 if (!priv->mfunc.master.vf_admin) 2379 2379 goto err_comm_admin; 2380 2380 ··· 2620 2620 int err = 0; 2621 2621 2622 2622 priv->cmd.context = kmalloc_objs(struct mlx4_cmd_context, 2623 - priv->cmd.max_cmds, GFP_KERNEL); 2623 + priv->cmd.max_cmds); 2624 2624 if (!priv->cmd.context) 2625 2625 return -ENOMEM; 2626 2626
+2 -2
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 2241 2241 continue; 2242 2242 2243 2243 dst->tx_ring[t] = kzalloc_objs(struct mlx4_en_tx_ring *, 2244 - MAX_TX_RINGS, GFP_KERNEL); 2244 + MAX_TX_RINGS); 2245 2245 if (!dst->tx_ring[t]) 2246 2246 goto err_free_tx; 2247 2247 ··· 3215 3215 continue; 3216 3216 3217 3217 priv->tx_ring[t] = kzalloc_objs(struct mlx4_en_tx_ring *, 3218 - MAX_TX_RINGS, GFP_KERNEL); 3218 + MAX_TX_RINGS); 3219 3219 if (!priv->tx_ring[t]) { 3220 3220 err = -ENOMEM; 3221 3221 goto out;
+2 -3
drivers/net/ethernet/mellanox/mlx4/eq.c
··· 1158 1158 struct mlx4_priv *priv = mlx4_priv(dev); 1159 1159 1160 1160 priv->eq_table.eq = kzalloc_objs(*priv->eq_table.eq, 1161 - dev->caps.num_eqs - dev->caps.reserved_eqs, 1162 - GFP_KERNEL); 1161 + dev->caps.num_eqs - dev->caps.reserved_eqs); 1163 1162 if (!priv->eq_table.eq) 1164 1163 return -ENOMEM; 1165 1164 ··· 1177 1178 int i; 1178 1179 1179 1180 priv->eq_table.uar_map = kzalloc_objs(*priv->eq_table.uar_map, 1180 - mlx4_num_eq_uar(dev), GFP_KERNEL); 1181 + mlx4_num_eq_uar(dev)); 1181 1182 if (!priv->eq_table.uar_map) { 1182 1183 err = -ENOMEM; 1183 1184 goto err_out_free;
+1 -1
drivers/net/ethernet/mellanox/mlx4/intf.c
··· 79 79 return priv->adev_idx; 80 80 81 81 priv->adev = kzalloc_objs(struct mlx4_adev *, 82 - ARRAY_SIZE(mlx4_adev_devices), GFP_KERNEL); 82 + ARRAY_SIZE(mlx4_adev_devices)); 83 83 if (!priv->adev) { 84 84 ida_free(&mlx4_adev_ida, priv->adev_idx); 85 85 return -ENOMEM;
+1 -2
drivers/net/ethernet/mellanox/mlx4/qp.c
··· 854 854 /* In mfunc, calculate proxy and tunnel qp offsets for the PF here, 855 855 * since the PF does not call mlx4_slave_caps */ 856 856 dev->caps.spec_qps = kzalloc_objs(*dev->caps.spec_qps, 857 - dev->caps.num_ports, 858 - GFP_KERNEL); 857 + dev->caps.num_ports); 859 858 if (!dev->caps.spec_qps) { 860 859 err = -ENOMEM; 861 860 goto err_mem;
+2 -4
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 526 526 &priv->mfunc.master.res_tracker.res_alloc[i]; 527 527 res_alloc->quota = kmalloc_objs(int, dev->persist->num_vfs + 1); 528 528 res_alloc->guaranteed = kmalloc_objs(int, 529 - dev->persist->num_vfs + 1, 530 - GFP_KERNEL); 529 + dev->persist->num_vfs + 1); 531 530 if (i == RES_MAC || i == RES_VLAN) 532 531 res_alloc->allocated = 533 532 kzalloc_objs(int, 534 - MLX4_MAX_PORTS * (dev->persist->num_vfs + 1), 535 - GFP_KERNEL); 533 + MLX4_MAX_PORTS * (dev->persist->num_vfs + 1)); 536 534 else 537 535 res_alloc->allocated = 538 536 kzalloc_objs(int, dev->persist->num_vfs + 1);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/dev.c
··· 278 278 struct mlx5_priv *priv = &dev->priv; 279 279 280 280 priv->adev = kzalloc_objs(struct mlx5_adev *, 281 - ARRAY_SIZE(mlx5_adev_devices), GFP_KERNEL); 281 + ARRAY_SIZE(mlx5_adev_devices)); 282 282 if (!priv->adev) 283 283 return -ENOMEM; 284 284
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
··· 88 88 struct mlx5e_sq_stats **stats_list; 89 89 90 90 stats_list = kvzalloc_objs(*stats_list, 91 - mlx5e_qos_max_leaf_nodes(priv->mdev), 92 - GFP_KERNEL); 91 + mlx5e_qos_max_leaf_nodes(priv->mdev)); 93 92 if (!stats_list) 94 93 return -ENOMEM; 95 94
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
··· 917 917 u32 rate_limit_elem_ix; 918 918 919 919 vport->qos.sched_nodes = kzalloc_objs(struct mlx5_esw_sched_node *, 920 - num_tcs, GFP_KERNEL); 920 + num_tcs); 921 921 if (!vport->qos.sched_nodes) { 922 922 NL_SET_ERR_MSG_MOD(extack, 923 923 "Allocating the vport TC scheduling elements failed.");
+3 -3
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 3375 3375 3376 3376 steering->rdma_transport_rx_root_ns = 3377 3377 kzalloc_objs(*steering->rdma_transport_rx_root_ns, 3378 - total_vports, GFP_KERNEL); 3378 + total_vports); 3379 3379 if (!steering->rdma_transport_rx_root_ns) 3380 3380 return -ENOMEM; 3381 3381 ··· 3407 3407 3408 3408 steering->rdma_transport_tx_root_ns = 3409 3409 kzalloc_objs(*steering->rdma_transport_tx_root_ns, 3410 - total_vports, GFP_KERNEL); 3410 + total_vports); 3411 3411 if (!steering->rdma_transport_tx_root_ns) 3412 3412 return -ENOMEM; 3413 3413 ··· 3516 3516 int err; 3517 3517 3518 3518 steering->fdb_sub_ns = kzalloc_objs(*steering->fdb_sub_ns, 3519 - FDB_NUM_CHAINS, GFP_KERNEL); 3519 + FDB_NUM_CHAINS); 3520 3520 if (!steering->fdb_sub_ns) 3521 3521 return -ENOMEM; 3522 3522
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
··· 314 314 hwmon->module_scount = mon_cap ? 1 : 0; 315 315 sensors_count = hwmon->asic_platform_scount + hwmon->module_scount; 316 316 hwmon->temp_channel_desc = kzalloc_objs(*hwmon->temp_channel_desc, 317 - sensors_count, GFP_KERNEL); 317 + sensors_count); 318 318 if (!hwmon->temp_channel_desc) { 319 319 err = -ENOMEM; 320 320 goto err_free_hwmon;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
··· 1083 1083 1084 1084 clock->ptp_info.pin_config = 1085 1085 kzalloc_objs(*clock->ptp_info.pin_config, 1086 - clock->ptp_info.n_pins, GFP_KERNEL); 1086 + clock->ptp_info.n_pins); 1087 1087 if (!clock->ptp_info.pin_config) 1088 1088 return; 1089 1089 clock->ptp_info.enable = mlx5_ptp_enable;
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
··· 254 254 255 255 bwc_matcher->size_of_at_array = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM; 256 256 bwc_matcher->at = kzalloc_objs(*bwc_matcher->at, 257 - bwc_matcher->size_of_at_array, 258 - GFP_KERNEL); 257 + bwc_matcher->size_of_at_array); 259 258 if (!bwc_matcher->at) 260 259 goto free_bwc_matcher_rules; 261 260
+3 -3
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
··· 1034 1034 queue->used_entries = 0; 1035 1035 1036 1036 queue->completed.entries = kzalloc_objs(queue->completed.entries[0], 1037 - queue->num_entries, GFP_KERNEL); 1037 + queue->num_entries); 1038 1038 if (!queue->completed.entries) 1039 1039 return -ENOMEM; 1040 1040 ··· 1094 1094 ctx->queues += bwc_queues; 1095 1095 1096 1096 ctx->bwc_send_queue_locks = kzalloc_objs(*ctx->bwc_send_queue_locks, 1097 - bwc_queues, GFP_KERNEL); 1097 + bwc_queues); 1098 1098 1099 1099 if (!ctx->bwc_send_queue_locks) 1100 1100 return -ENOMEM; 1101 1101 1102 1102 ctx->bwc_lock_class_keys = kzalloc_objs(*ctx->bwc_lock_class_keys, 1103 - bwc_queues, GFP_KERNEL); 1103 + bwc_queues); 1104 1104 if (!ctx->bwc_lock_class_keys) 1105 1105 goto err_lock_class_keys; 1106 1106
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c
··· 548 548 pool->th = max_hot_size; 549 549 550 550 pool->hot_chunks_arr = kvzalloc_objs(struct mlx5dr_icm_hot_chunk, 551 - num_of_chunks, GFP_KERNEL); 551 + num_of_chunks); 552 552 if (!pool->hot_chunks_arr) 553 553 goto free_pool; 554 554
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
··· 282 282 } 283 283 284 284 fs_dr_actions = kzalloc_objs(*fs_dr_actions, 285 - MLX5_FLOW_CONTEXT_ACTION_MAX, GFP_KERNEL); 285 + MLX5_FLOW_CONTEXT_ACTION_MAX); 286 286 if (!fs_dr_actions) { 287 287 err = -ENOMEM; 288 288 goto free_actions_alloc;
+1 -1
drivers/net/ethernet/mellanox/mlxsw/core.c
··· 151 151 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1; 152 152 153 153 mlxsw_core->ports = kzalloc_objs(struct mlxsw_core_port, 154 - mlxsw_core->max_ports, GFP_KERNEL); 154 + mlxsw_core->max_ports); 155 155 if (!mlxsw_core->ports) 156 156 return -ENOMEM; 157 157
+1 -2
drivers/net/ethernet/mellanox/mlxsw/core_env.c
··· 1245 1245 for (i = 0; i < env->num_of_slots; i++) { 1246 1246 env->line_cards[i] = kzalloc_flex(*env->line_cards[i], 1247 1247 module_info, 1248 - env->max_module_count, 1249 - GFP_KERNEL); 1248 + env->max_module_count); 1250 1249 if (!env->line_cards[i]) 1251 1250 goto kzalloc_err; 1252 1251
+1 -1
drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
··· 1470 1470 } 1471 1471 1472 1472 types_info->ini_files = kmalloc_objs(struct mlxsw_linecard_ini_file *, 1473 - types_info->count, GFP_KERNEL); 1473 + types_info->count); 1474 1474 if (!types_info->ini_files) { 1475 1475 err = -ENOMEM; 1476 1476 goto err_ini_files_alloc;
+2 -2
drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
··· 432 432 return 0; 433 433 434 434 area->tz_module_arr = kzalloc_objs(*area->tz_module_arr, 435 - area->tz_module_num, GFP_KERNEL); 435 + area->tz_module_num); 436 436 if (!area->tz_module_arr) 437 437 return -ENOMEM; 438 438 ··· 522 522 523 523 area->tz_gearbox_num = gbox_num; 524 524 area->tz_gearbox_arr = kzalloc_objs(*area->tz_gearbox_arr, 525 - area->tz_gearbox_num, GFP_KERNEL); 525 + area->tz_gearbox_num); 526 526 if (!area->tz_gearbox_arr) 527 527 return -ENOMEM; 528 528
+2 -2
drivers/net/ethernet/mellanox/mlxsw/minimal.c
··· 403 403 return -ENOMEM; 404 404 405 405 mlxsw_m->line_cards = kzalloc_objs(*mlxsw_m->line_cards, 406 - mlxsw_m->num_of_slots, GFP_KERNEL); 406 + mlxsw_m->num_of_slots); 407 407 if (!mlxsw_m->line_cards) { 408 408 err = -ENOMEM; 409 409 goto err_kcalloc; ··· 412 412 for (i = 0; i < mlxsw_m->num_of_slots; i++) { 413 413 mlxsw_m->line_cards[i] = 414 414 kzalloc_flex(*mlxsw_m->line_cards[i], module_to_port, 415 - mlxsw_m->max_modules_per_slot, GFP_KERNEL); 415 + mlxsw_m->max_modules_per_slot); 416 416 if (!mlxsw_m->line_cards[i]) { 417 417 err = -ENOMEM; 418 418 goto err_kmalloc_array;
+1 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 2020 2020 int err; 2021 2021 2022 2022 mlxsw_sp->port_mapping = kzalloc_objs(struct mlxsw_sp_port_mapping, 2023 - max_ports, GFP_KERNEL); 2023 + max_ports); 2024 2024 if (!mlxsw_sp->port_mapping) 2025 2025 return -ENOMEM; 2026 2026
+1 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
··· 260 260 mr_tcam_region->parman = parman; 261 261 262 262 parman_prios = kmalloc_objs(*parman_prios, 263 - MLXSW_SP_MR_ROUTE_PRIO_MAX + 1, GFP_KERNEL); 263 + MLXSW_SP_MR_ROUTE_PRIO_MAX + 1); 264 264 if (!parman_prios) { 265 265 err = -ENOMEM; 266 266 goto err_parman_prios_alloc;
+1 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
··· 366 366 367 367 if (ops->num_classes) { 368 368 mlxsw_sp_qdisc->qdiscs = kzalloc_objs(*mlxsw_sp_qdisc->qdiscs, 369 - ops->num_classes, 370 - GFP_KERNEL); 369 + ops->num_classes); 371 370 if (!mlxsw_sp_qdisc->qdiscs) 372 371 return -ENOMEM; 373 372
+1 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 718 718 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES); 719 719 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN; 720 720 mlxsw_sp->router->lpm.trees = kzalloc_objs(struct mlxsw_sp_lpm_tree, 721 - mlxsw_sp->router->lpm.tree_count, 722 - GFP_KERNEL); 721 + mlxsw_sp->router->lpm.tree_count); 723 722 if (!mlxsw_sp->router->lpm.trees) 724 723 return -ENOMEM; 725 724
+1 -1
drivers/net/ethernet/netronome/nfp/bpf/verifier.c
··· 771 771 nfp_prog = env->prog->aux->offload->dev_priv; 772 772 nfp_prog->subprog_cnt = env->subprog_cnt; 773 773 nfp_prog->subprog = kzalloc_objs(nfp_prog->subprog[0], 774 - nfp_prog->subprog_cnt, GFP_KERNEL); 774 + nfp_prog->subprog_cnt); 775 775 if (!nfp_prog->subprog) 776 776 return -ENOMEM; 777 777
+1 -1
drivers/net/ethernet/netronome/nfp/flower/metadata.c
··· 558 558 /* Init timestamps for mask id*/ 559 559 priv->mask_ids.last_used = 560 560 kmalloc_objs(*priv->mask_ids.last_used, 561 - NFP_FLOWER_MASK_ENTRY_RS, GFP_KERNEL); 561 + NFP_FLOWER_MASK_ENTRY_RS); 562 562 if (!priv->mask_ids.last_used) 563 563 goto err_free_mask_id; 564 564
+4 -8
drivers/net/ethernet/qlogic/qed/qed_dev.c
··· 2109 2109 goto alloc_err; 2110 2110 2111 2111 qm_info->qm_pq_params = kzalloc_objs(*qm_info->qm_pq_params, 2112 - qed_init_qm_get_num_pqs(p_hwfn), 2113 - GFP_KERNEL); 2112 + qed_init_qm_get_num_pqs(p_hwfn)); 2114 2113 if (!qm_info->qm_pq_params) 2115 2114 goto alloc_err; 2116 2115 2117 2116 qm_info->qm_vport_params = kzalloc_objs(*qm_info->qm_vport_params, 2118 - qed_init_qm_get_num_vports(p_hwfn), 2119 - GFP_KERNEL); 2117 + qed_init_qm_get_num_vports(p_hwfn)); 2120 2118 if (!qm_info->qm_vport_params) 2121 2119 goto alloc_err; 2122 2120 2123 2121 qm_info->qm_port_params = kzalloc_objs(*qm_info->qm_port_params, 2124 - p_hwfn->cdev->num_ports_in_engine, 2125 - GFP_KERNEL); 2122 + p_hwfn->cdev->num_ports_in_engine); 2126 2123 if (!qm_info->qm_port_params) 2127 2124 goto alloc_err; 2128 2125 2129 2126 qm_info->wfq_data = kzalloc_objs(*qm_info->wfq_data, 2130 - qed_init_qm_get_num_vports(p_hwfn), 2131 - GFP_KERNEL); 2127 + qed_init_qm_get_num_vports(p_hwfn)); 2132 2128 if (!qm_info->wfq_data) 2133 2129 goto alloc_err; 2134 2130
+2 -3
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
··· 2760 2760 goto err; 2761 2761 2762 2762 iwarp_info->partial_fpdus = kzalloc_objs(*iwarp_info->partial_fpdus, 2763 - (u16)p_hwfn->p_rdma_info->num_qps, 2764 - GFP_KERNEL); 2763 + (u16)p_hwfn->p_rdma_info->num_qps); 2765 2764 if (!iwarp_info->partial_fpdus) { 2766 2765 rc = -ENOMEM; 2767 2766 goto err; ··· 2780 2781 * we allocate enough to take care of all rx packets 2781 2782 */ 2782 2783 iwarp_info->mpa_bufs = kzalloc_objs(*iwarp_info->mpa_bufs, 2783 - data.input.rx_num_desc, GFP_KERNEL); 2784 + data.input.rx_num_desc); 2784 2785 if (!iwarp_info->mpa_bufs) { 2785 2786 rc = -ENOMEM; 2786 2787 goto err;
+1 -2
drivers/net/ethernet/qlogic/qed/qed_ll2.c
··· 2200 2200 2201 2201 /* Allocate LL2's set struct */ 2202 2202 p_ll2_connections = kzalloc_objs(struct qed_ll2_info, 2203 - QED_MAX_NUM_OF_LL2_CONNECTIONS, 2204 - GFP_KERNEL); 2203 + QED_MAX_NUM_OF_LL2_CONNECTIONS); 2205 2204 if (!p_ll2_connections) { 2206 2205 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n"); 2207 2206 return -ENOMEM;
+1 -1
drivers/net/ethernet/qlogic/qed/qed_mcp.c
··· 3366 3366 } 3367 3367 3368 3368 nvm_info.image_att = kmalloc_objs(struct bist_nvm_image_att, 3369 - nvm_info.num_images, GFP_KERNEL); 3369 + nvm_info.num_images); 3370 3370 if (!nvm_info.image_att) { 3371 3371 rc = -ENOMEM; 3372 3372 goto err0;
+3 -4
drivers/net/ethernet/qlogic/qed/qed_ooo.c
··· 119 119 INIT_LIST_HEAD(&p_ooo_info->free_isles_list); 120 120 121 121 p_ooo_info->p_isles_mem = kzalloc_objs(struct qed_ooo_isle, 122 - max_num_isles, GFP_KERNEL); 122 + max_num_isles); 123 123 if (!p_ooo_info->p_isles_mem) 124 124 goto no_isles_mem; 125 125 ··· 131 131 132 132 p_ooo_info->p_archipelagos_mem = 133 133 kzalloc_objs(struct qed_ooo_archipelago, 134 - max_num_archipelagos, GFP_KERNEL); 134 + max_num_archipelagos); 135 135 if (!p_ooo_info->p_archipelagos_mem) 136 136 goto no_archipelagos_mem; 137 137 ··· 140 140 141 141 p_ooo_info->ooo_history.p_cqes = 142 142 kzalloc_objs(struct ooo_opaque, 143 - QED_MAX_NUM_OOO_HISTORY_ENTRIES, 144 - GFP_KERNEL); 143 + QED_MAX_NUM_OOO_HISTORY_ENTRIES); 145 144 if (!p_ooo_info->ooo_history.p_cqes) 146 145 goto no_history_mem; 147 146
+1 -2
drivers/net/ethernet/qlogic/qede/qede_main.c
··· 971 971 972 972 if (!edev->coal_entry) { 973 973 edev->coal_entry = kzalloc_objs(*edev->coal_entry, 974 - QEDE_MAX_RSS_CNT(edev), 975 - GFP_KERNEL); 974 + QEDE_MAX_RSS_CNT(edev)); 976 975 if (!edev->coal_entry) { 977 976 DP_ERR(edev, "coalesce entry allocation failed\n"); 978 977 goto err;
+1 -1
drivers/net/ethernet/qlogic/qla3xxx.c
··· 2579 2579 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2580 2580 2581 2581 qdev->lrg_buf = kmalloc_objs(struct ql_rcv_buf_cb, 2582 - qdev->num_large_buffers, GFP_KERNEL); 2582 + qdev->num_large_buffers); 2583 2583 if (qdev->lrg_buf == NULL) 2584 2584 return -ENOMEM; 2585 2585
+1 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
··· 190 190 recv_ctx = adapter->recv_ctx; 191 191 192 192 rds_ring = kzalloc_objs(struct qlcnic_host_rds_ring, 193 - adapter->max_rds_rings, GFP_KERNEL); 193 + adapter->max_rds_rings); 194 194 if (rds_ring == NULL) 195 195 goto err_out; 196 196
+4 -4
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
··· 680 680 681 681 if (!adapter->msix_entries) { 682 682 adapter->msix_entries = kzalloc_objs(struct msix_entry, 683 - num_msix, GFP_KERNEL); 683 + num_msix); 684 684 if (!adapter->msix_entries) 685 685 return -ENOMEM; 686 686 } ··· 734 734 735 735 if (!adapter->msix_entries) { 736 736 adapter->msix_entries = kzalloc_objs(struct msix_entry, 737 - num_msix, GFP_KERNEL); 737 + num_msix); 738 738 if (!adapter->msix_entries) 739 739 return -ENOMEM; 740 740 } ··· 1001 1001 } 1002 1002 1003 1003 adapter->eswitch = kzalloc_objs(struct qlcnic_eswitch, 1004 - QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL); 1004 + QLCNIC_NIU_MAX_XG_PORTS); 1005 1005 if (!adapter->eswitch) { 1006 1006 ret = -ENOMEM; 1007 1007 goto err_npars; ··· 2351 2351 struct qlcnic_cmd_buffer *cmd_buf_arr; 2352 2352 2353 2353 tx_ring = kzalloc_objs(struct qlcnic_host_tx_ring, 2354 - adapter->drv_tx_rings, GFP_KERNEL); 2354 + adapter->drv_tx_rings); 2355 2355 if (tx_ring == NULL) 2356 2356 return -ENOMEM; 2357 2357
+1 -1
drivers/net/ethernet/renesas/ravb_main.c
··· 437 437 438 438 /* Allocate RX buffers */ 439 439 priv->rx_buffers[q] = kzalloc_objs(*priv->rx_buffers[q], 440 - priv->num_rx_ring[q], GFP_KERNEL); 440 + priv->num_rx_ring[q]); 441 441 if (!priv->rx_buffers[q]) 442 442 goto error; 443 443
+1 -2
drivers/net/ethernet/sfc/efx_channels.c
··· 935 935 936 936 /* Allocate array for XDP TX queue lookup. */ 937 937 efx->xdp_tx_queues = kzalloc_objs(*efx->xdp_tx_queues, 938 - efx->xdp_tx_queue_count, 939 - GFP_KERNEL); 938 + efx->xdp_tx_queue_count); 940 939 if (!efx->xdp_tx_queues) 941 940 return -ENOMEM; 942 941 }
+1 -1
drivers/net/ethernet/sfc/efx_common.c
··· 997 997 spin_lock_init(&efx->rps_hash_lock); 998 998 /* Failure to allocate is not fatal, but may degrade ARFS performance */ 999 999 efx->rps_hash_table = kzalloc_objs(*efx->rps_hash_table, 1000 - EFX_ARFS_HASH_TABLE_SIZE, GFP_KERNEL); 1000 + EFX_ARFS_HASH_TABLE_SIZE); 1001 1001 #endif 1002 1002 spin_lock_init(&efx->vf_reps_lock); 1003 1003 INIT_LIST_HEAD(&efx->vf_reps);
+1 -2
drivers/net/ethernet/sfc/falcon/tx.c
··· 549 549 return -ENOMEM; 550 550 551 551 tx_queue->cb_page = kzalloc_objs(tx_queue->cb_page[0], 552 - ef4_tx_cb_page_count(tx_queue), 553 - GFP_KERNEL); 552 + ef4_tx_cb_page_count(tx_queue)); 554 553 if (!tx_queue->cb_page) { 555 554 rc = -ENOMEM; 556 555 goto fail1;
+2 -2
drivers/net/ethernet/sfc/mae.c
··· 256 256 goto fail; 257 257 rc = -ENOMEM; 258 258 desc->keys = kzalloc_objs(struct efx_tc_table_field_fmt, 259 - desc->n_keys, GFP_KERNEL); 259 + desc->n_keys); 260 260 if (!desc->keys) 261 261 goto fail; 262 262 desc->resps = kzalloc_objs(struct efx_tc_table_field_fmt, 263 - desc->n_resps, GFP_KERNEL); 263 + desc->n_resps); 264 264 if (!desc->resps) 265 265 goto fail; 266 266 }
+1 -2
drivers/net/ethernet/sfc/siena/efx_channels.c
··· 967 967 968 968 /* Allocate array for XDP TX queue lookup. */ 969 969 efx->xdp_tx_queues = kzalloc_objs(*efx->xdp_tx_queues, 970 - efx->xdp_tx_queue_count, 971 - GFP_KERNEL); 970 + efx->xdp_tx_queue_count); 972 971 if (!efx->xdp_tx_queues) 973 972 return -ENOMEM; 974 973 }
+1 -1
drivers/net/ethernet/sfc/siena/efx_common.c
··· 1024 1024 spin_lock_init(&efx->rps_hash_lock); 1025 1025 /* Failure to allocate is not fatal, but may degrade ARFS performance */ 1026 1026 efx->rps_hash_table = kzalloc_objs(*efx->rps_hash_table, 1027 - EFX_ARFS_HASH_TABLE_SIZE, GFP_KERNEL); 1027 + EFX_ARFS_HASH_TABLE_SIZE); 1028 1028 #endif 1029 1029 efx->mdio.dev = net_dev; 1030 1030 INIT_WORK(&efx->mac_work, efx_mac_work);
+1 -2
drivers/net/ethernet/sfc/siena/tx_common.c
··· 41 41 return -ENOMEM; 42 42 43 43 tx_queue->cb_page = kzalloc_objs(tx_queue->cb_page[0], 44 - efx_tx_cb_page_count(tx_queue), 45 - GFP_KERNEL); 44 + efx_tx_cb_page_count(tx_queue)); 46 45 if (!tx_queue->cb_page) { 47 46 rc = -ENOMEM; 48 47 goto fail1;
+1 -2
drivers/net/ethernet/sfc/tx_common.c
··· 41 41 return -ENOMEM; 42 42 43 43 tx_queue->cb_page = kzalloc_objs(tx_queue->cb_page[0], 44 - efx_tx_cb_page_count(tx_queue), 45 - GFP_KERNEL); 44 + efx_tx_cb_page_count(tx_queue)); 46 45 if (!tx_queue->cb_page) { 47 46 rc = -ENOMEM; 48 47 goto fail1;
+2 -2
drivers/net/ethernet/spacemit/k1_emac.c
··· 392 392 struct platform_device *pdev = priv->pdev; 393 393 394 394 tx_ring->tx_desc_buf = kzalloc_objs(*tx_ring->tx_desc_buf, 395 - tx_ring->total_cnt, GFP_KERNEL); 395 + tx_ring->total_cnt); 396 396 397 397 if (!tx_ring->tx_desc_buf) 398 398 return -ENOMEM; ··· 420 420 struct platform_device *pdev = priv->pdev; 421 421 422 422 rx_ring->rx_desc_buf = kzalloc_objs(*rx_ring->rx_desc_buf, 423 - rx_ring->total_cnt, GFP_KERNEL); 423 + rx_ring->total_cnt); 424 424 if (!rx_ring->rx_desc_buf) 425 425 return -ENOMEM; 426 426
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 2296 2296 tx_q->priv_data = priv; 2297 2297 2298 2298 tx_q->tx_skbuff_dma = kzalloc_objs(*tx_q->tx_skbuff_dma, 2299 - dma_conf->dma_tx_size, GFP_KERNEL); 2299 + dma_conf->dma_tx_size); 2300 2300 if (!tx_q->tx_skbuff_dma) 2301 2301 return -ENOMEM; 2302 2302
+1 -1
drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
··· 141 141 142 142 /* Array of descriptor data */ 143 143 ring->desc_data_head = kzalloc_objs(struct xlgmac_desc_data, 144 - dma_desc_count, GFP_KERNEL); 144 + dma_desc_count); 145 145 if (!ring->desc_data_head) 146 146 return -ENOMEM; 147 147
+1 -1
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
··· 2306 2306 2307 2307 /* allocate scan list */ 2308 2308 wl->networks = kzalloc_objs(struct gelic_wl_scan_info, 2309 - GELIC_WL_BSS_MAX_ENT, GFP_KERNEL); 2309 + GELIC_WL_BSS_MAX_ENT); 2310 2310 2311 2311 if (!wl->networks) 2312 2312 goto fail_bss;
+2 -3
drivers/net/ethernet/via/via-velocity.c
··· 1632 1632 int ret = -ENOMEM; 1633 1633 1634 1634 vptr->rx.info = kzalloc_objs(struct velocity_rd_info, 1635 - vptr->options.numrx, GFP_KERNEL); 1635 + vptr->options.numrx); 1636 1636 if (!vptr->rx.info) 1637 1637 goto out; 1638 1638 ··· 1665 1665 for (j = 0; j < vptr->tx.numq; j++) { 1666 1666 1667 1667 vptr->tx.infos[j] = kzalloc_objs(struct velocity_td_info, 1668 - vptr->options.numtx, 1669 - GFP_KERNEL); 1668 + vptr->options.numtx); 1670 1669 if (!vptr->tx.infos[j]) { 1671 1670 while (--j >= 0) 1672 1671 kfree(vptr->tx.infos[j]);
+1 -1
drivers/net/ethernet/wangxun/libwx/wx_hw.c
··· 2506 2506 WX_RSS_FIELD_IPV6 | WX_RSS_FIELD_IPV6_TCP; 2507 2507 2508 2508 wx->mac_table = kzalloc_objs(struct wx_mac_addr, 2509 - wx->mac.num_rar_entries, GFP_KERNEL); 2509 + wx->mac.num_rar_entries); 2510 2510 if (!wx->mac_table) { 2511 2511 wx_err(wx, "mac_table allocation failed\n"); 2512 2512 kfree(wx->rss_key);
+1 -2
drivers/net/netdevsim/dev.c
··· 940 940 return -ENOMEM; 941 941 942 942 nsim_trap_data->trap_items_arr = kzalloc_objs(struct nsim_trap_item, 943 - ARRAY_SIZE(nsim_traps_arr), 944 - GFP_KERNEL); 943 + ARRAY_SIZE(nsim_traps_arr)); 945 944 if (!nsim_trap_data->trap_items_arr) { 946 945 err = -ENOMEM; 947 946 goto err_trap_data_free;
+1 -1
drivers/net/phy/dp83640.c
··· 1032 1032 goto out; 1033 1033 1034 1034 clock->caps.pin_config = kzalloc_objs(struct ptp_pin_desc, 1035 - DP83640_N_PINS, GFP_KERNEL); 1035 + DP83640_N_PINS); 1036 1036 if (!clock->caps.pin_config) { 1037 1037 kfree(clock); 1038 1038 clock = NULL;
+1 -2
drivers/net/phy/phy_led_triggers.c
··· 104 104 goto out_free_link; 105 105 106 106 phy->phy_led_triggers = kzalloc_objs(struct phy_led_trigger, 107 - phy->phy_num_led_triggers, 108 - GFP_KERNEL); 107 + phy->phy_num_led_triggers); 109 108 if (!phy->phy_led_triggers) { 110 109 err = -ENOMEM; 111 110 goto out_unreg_link;
+1 -2
drivers/net/pse-pd/pd692x0.c
··· 647 647 648 648 pw_table = pd692x0_class_pw_table; 649 649 c33_pw_limit_ranges = kzalloc_objs(*c33_pw_limit_ranges, 650 - PD692X0_CLASS_PW_TABLE_SIZE, 651 - GFP_KERNEL); 650 + PD692X0_CLASS_PW_TABLE_SIZE); 652 651 if (!c33_pw_limit_ranges) 653 652 return -ENOMEM; 654 653
+1 -2
drivers/net/wireless/ath/ath11k/debugfs.c
··· 1216 1216 dbr_dbg_data->num_ring_debug_entries = ATH11K_DEBUG_DBR_ENTRIES_MAX; 1217 1217 dbr_dbg_data->dbr_debug_idx = 0; 1218 1218 dbr_dbg_data->entries = kzalloc_objs(struct ath11k_dbg_dbr_entry, 1219 - ATH11K_DEBUG_DBR_ENTRIES_MAX, 1220 - GFP_KERNEL); 1219 + ATH11K_DEBUG_DBR_ENTRIES_MAX); 1221 1220 if (!dbr_dbg_data->entries) 1222 1221 return -ENOMEM; 1223 1222
+1 -1
drivers/net/wireless/ath/ath12k/dp.c
··· 408 408 409 409 dp->num_bank_profiles = num_tcl_banks; 410 410 dp->bank_profiles = kmalloc_objs(struct ath12k_dp_tx_bank_profile, 411 - num_tcl_banks, GFP_KERNEL); 411 + num_tcl_banks); 412 412 if (!dp->bank_profiles) 413 413 return -ENOMEM; 414 414
+1 -1
drivers/net/wireless/ath/ath5k/base.c
··· 920 920 ds, ah->desc_len, (unsigned long long)ah->desc_daddr); 921 921 922 922 bf = kzalloc_objs(struct ath5k_buf, 923 - 1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF, GFP_KERNEL); 923 + 1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF); 924 924 if (bf == NULL) { 925 925 ATH5K_ERR(ah, "can't allocate bufptr\n"); 926 926 ret = -ENOMEM;
+3 -5
drivers/net/wireless/ath/ath5k/eeprom.c
··· 728 728 /* Allocate pd_curves for this cal pier */ 729 729 chinfo[pier].pd_curves = 730 730 kzalloc_objs(struct ath5k_pdgain_info, 731 - AR5K_EEPROM_N_PD_CURVES, GFP_KERNEL); 731 + AR5K_EEPROM_N_PD_CURVES); 732 732 733 733 if (!chinfo[pier].pd_curves) 734 734 goto err_out; ··· 916 916 /* Allocate pd_curves for this cal pier */ 917 917 chinfo[pier].pd_curves = 918 918 kzalloc_objs(struct ath5k_pdgain_info, 919 - AR5K_EEPROM_N_PD_CURVES, 920 - GFP_KERNEL); 919 + AR5K_EEPROM_N_PD_CURVES); 921 920 922 921 if (!chinfo[pier].pd_curves) 923 922 goto err_out; ··· 1205 1206 /* Allocate pd_curves for this cal pier */ 1206 1207 chinfo[pier].pd_curves = 1207 1208 kzalloc_objs(struct ath5k_pdgain_info, 1208 - AR5K_EEPROM_N_PD_CURVES, 1209 - GFP_KERNEL); 1209 + AR5K_EEPROM_N_PD_CURVES); 1210 1210 1211 1211 if (!chinfo[pier].pd_curves) 1212 1212 goto err_out;
+1 -1
drivers/net/wireless/ath/wil6210/debugfs.c
··· 2442 2442 return -ENODEV; 2443 2443 2444 2444 wil->dbg_data.data_arr = kzalloc_objs(struct wil_debugfs_iomem_data, 2445 - dbg_off_count, GFP_KERNEL); 2445 + dbg_off_count); 2446 2446 if (!wil->dbg_data.data_arr) { 2447 2447 debugfs_remove_recursive(dbg); 2448 2448 wil->debug = NULL;
+1 -1
drivers/net/wireless/broadcom/b43legacy/debugfs.c
··· 366 366 e->dev = dev; 367 367 log = &e->txstatlog; 368 368 log->log = kzalloc_objs(struct b43legacy_txstatus, 369 - B43legacy_NR_LOGGED_TXSTATUS, GFP_KERNEL); 369 + B43legacy_NR_LOGGED_TXSTATUS); 370 370 if (!log->log) { 371 371 b43legacyerr(dev->wl, "debugfs: add device txstatus OOM\n"); 372 372 kfree(e);
+1 -1
drivers/net/wireless/broadcom/b43legacy/main.c
··· 3270 3270 if ((phy->type == B43legacy_PHYTYPE_B) || 3271 3271 (phy->type == B43legacy_PHYTYPE_G)) { 3272 3272 phy->_lo_pairs = kzalloc_objs(struct b43legacy_lopair, 3273 - B43legacy_LO_COUNT, GFP_KERNEL); 3273 + B43legacy_LO_COUNT); 3274 3274 if (!phy->_lo_pairs) 3275 3275 return -ENOMEM; 3276 3276 }
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
··· 1589 1589 msgbuf->max_flowrings = if_msgbuf->max_flowrings; 1590 1590 msgbuf->flowring_dma_handle = 1591 1591 kzalloc_objs(*msgbuf->flowring_dma_handle, 1592 - msgbuf->max_flowrings, GFP_KERNEL); 1592 + msgbuf->max_flowrings); 1593 1593 if (!msgbuf->flowring_dma_handle) 1594 1594 goto fail; 1595 1595
+3 -3
drivers/net/wireless/intel/ipw2x00/ipw2100.c
··· 1886 1886 bg_band->band = NL80211_BAND_2GHZ; 1887 1887 bg_band->n_channels = geo->bg_channels; 1888 1888 bg_band->channels = kzalloc_objs(struct ieee80211_channel, 1889 - geo->bg_channels, GFP_KERNEL); 1889 + geo->bg_channels); 1890 1890 if (!bg_band->channels) { 1891 1891 ipw2100_down(priv); 1892 1892 return -ENOMEM; ··· 4408 4408 } 4409 4409 4410 4410 priv->tx_buffers = kmalloc_objs(struct ipw2100_tx_packet, 4411 - TX_PENDED_QUEUE_LENGTH, GFP_KERNEL); 4411 + TX_PENDED_QUEUE_LENGTH); 4412 4412 if (!priv->tx_buffers) { 4413 4413 bd_queue_free(priv, &priv->tx_queue); 4414 4414 return -ENOMEM; ··· 4552 4552 * allocate packets 4553 4553 */ 4554 4554 priv->rx_buffers = kmalloc_objs(struct ipw2100_rx_packet, 4555 - RX_QUEUE_LENGTH, GFP_KERNEL); 4555 + RX_QUEUE_LENGTH); 4556 4556 if (!priv->rx_buffers) { 4557 4557 IPW_DEBUG_INFO("can't allocate rx packet buffer table\n"); 4558 4558
+2 -2
drivers/net/wireless/intel/ipw2x00/ipw2200.c
··· 11278 11278 bg_band->band = NL80211_BAND_2GHZ; 11279 11279 bg_band->n_channels = geo->bg_channels; 11280 11280 bg_band->channels = kzalloc_objs(struct ieee80211_channel, 11281 - geo->bg_channels, GFP_KERNEL); 11281 + geo->bg_channels); 11282 11282 if (!bg_band->channels) { 11283 11283 rc = -ENOMEM; 11284 11284 goto out; ··· 11316 11316 a_band->band = NL80211_BAND_5GHZ; 11317 11317 a_band->n_channels = geo->a_channels; 11318 11318 a_band->channels = kzalloc_objs(struct ieee80211_channel, 11319 - geo->a_channels, GFP_KERNEL); 11319 + geo->a_channels); 11320 11320 if (!a_band->channels) { 11321 11321 rc = -ENOMEM; 11322 11322 goto out;
+2 -3
drivers/net/wireless/intel/iwlwifi/dvm/tt.c
··· 596 596 if (priv->lib->adv_thermal_throttle) { 597 597 IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n"); 598 598 tt->restriction = kzalloc_objs(struct iwl_tt_restriction, 599 - IWL_TI_STATE_MAX, GFP_KERNEL); 599 + IWL_TI_STATE_MAX); 600 600 tt->transaction = kzalloc_objs(struct iwl_tt_trans, 601 - IWL_TI_STATE_MAX * (IWL_TI_STATE_MAX - 1), 602 - GFP_KERNEL); 601 + IWL_TI_STATE_MAX * (IWL_TI_STATE_MAX - 1)); 603 602 if (!tt->restriction || !tt->transaction) { 604 603 IWL_ERR(priv, "Fallback to Legacy Throttling\n"); 605 604 priv->thermal_throttle.advanced_tt = false;
+1 -1
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
··· 132 132 unsigned long ts = jiffies; 133 133 134 134 ll->pkts_counters = kzalloc_objs(*ll->pkts_counters, 135 - mld->trans->info.num_rxqs, GFP_KERNEL); 135 + mld->trans->info.num_rxqs); 136 136 if (!ll->pkts_counters) 137 137 return -ENOMEM; 138 138
+1 -2
drivers/net/wireless/intel/iwlwifi/mld/scan.c
··· 2084 2084 } 2085 2085 2086 2086 mld->channel_survey = kzalloc_flex(*mld->channel_survey, 2087 - channels, n_channels, 2088 - GFP_KERNEL); 2087 + channels, n_channels); 2089 2088 2090 2089 if (!mld->channel_survey) 2091 2090 return;
+1 -2
drivers/net/wireless/intel/iwlwifi/mld/sta.c
··· 695 695 return; 696 696 697 697 mld_sta->mpdu_counters = kzalloc_objs(*mld_sta->mpdu_counters, 698 - mld->trans->info.num_rxqs, 699 - GFP_KERNEL); 698 + mld->trans->info.num_rxqs); 700 699 if (!mld_sta->mpdu_counters) 701 700 return; 702 701
+1 -2
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 4349 4349 4350 4350 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); 4351 4351 ptk_pn = kzalloc_flex(*ptk_pn, q, 4352 - mvm->trans->info.num_rxqs, 4353 - GFP_KERNEL); 4352 + mvm->trans->info.num_rxqs); 4354 4353 if (!ptk_pn) { 4355 4354 ret = -ENOMEM; 4356 4355 break;
+1 -1
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
··· 3604 3604 } 3605 3605 3606 3606 mvm->acs_survey = kzalloc_flex(*mvm->acs_survey, channels, 3607 - n_channels, GFP_KERNEL); 3607 + n_channels); 3608 3608 3609 3609 if (!mvm->acs_survey) 3610 3610 return;
+2 -3
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
··· 775 775 776 776 trans_pcie->rxq = kzalloc_objs(struct iwl_rxq, trans->info.num_rxqs); 777 777 trans_pcie->rx_pool = kzalloc_objs(trans_pcie->rx_pool[0], 778 - RX_POOL_SIZE(trans_pcie->num_rx_bufs), 779 - GFP_KERNEL); 778 + RX_POOL_SIZE(trans_pcie->num_rx_bufs)); 780 779 trans_pcie->global_table = 781 780 kzalloc_objs(trans_pcie->global_table[0], 782 - RX_POOL_SIZE(trans_pcie->num_rx_bufs), GFP_KERNEL); 781 + RX_POOL_SIZE(trans_pcie->num_rx_bufs)); 783 782 if (!trans_pcie->rxq || !trans_pcie->rx_pool || 784 783 !trans_pcie->global_table) { 785 784 ret = -ENOMEM;
+1 -1
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
··· 836 836 837 837 trans_pcie->txq_memory = 838 838 kzalloc_objs(struct iwl_txq, 839 - trans->mac_cfg->base->num_of_queues, GFP_KERNEL); 839 + trans->mac_cfg->base->num_of_queues); 840 840 if (!trans_pcie->txq_memory) { 841 841 IWL_ERR(trans, "Not enough memory for txq\n"); 842 842 ret = -ENOMEM;
+1 -1
drivers/net/wireless/intersil/p54/eeprom.c
··· 159 159 goto err_out; 160 160 161 161 tmp->channels = kzalloc_objs(struct ieee80211_channel, 162 - list->band_channel_num[band], GFP_KERNEL); 162 + list->band_channel_num[band]); 163 163 if (!tmp->channels) 164 164 goto err_out; 165 165
+1 -2
drivers/net/wireless/marvell/mwifiex/cfg80211.c
··· 4678 4678 */ 4679 4679 adapter->num_in_chan_stats = 2 * (n_channels_bg + n_channels_a); 4680 4680 adapter->chan_stats = kzalloc_objs(*adapter->chan_stats, 4681 - adapter->num_in_chan_stats, 4682 - GFP_KERNEL); 4681 + adapter->num_in_chan_stats); 4683 4682 4684 4683 if (!adapter->chan_stats) 4685 4684 return -ENOMEM;
+1 -1
drivers/net/wireless/marvell/mwifiex/cmdevt.c
··· 390 390 391 391 /* Allocate and initialize struct cmd_ctrl_node */ 392 392 cmd_array = kzalloc_objs(struct cmd_ctrl_node, 393 - MWIFIEX_NUM_OF_CMD_BUFFER, GFP_KERNEL); 393 + MWIFIEX_NUM_OF_CMD_BUFFER); 394 394 if (!cmd_array) 395 395 return -ENOMEM; 396 396
+1 -1
drivers/net/wireless/marvell/mwifiex/scan.c
··· 1510 1510 } 1511 1511 1512 1512 scan_chan_list = kzalloc_objs(struct mwifiex_chan_scan_param_set, 1513 - MWIFIEX_USER_SCAN_CHAN_MAX, GFP_KERNEL); 1513 + MWIFIEX_USER_SCAN_CHAN_MAX); 1514 1514 if (!scan_chan_list) { 1515 1515 kfree(scan_cfg_out); 1516 1516 ret = -ENOMEM;
+1 -1
drivers/net/wireless/quantenna/qtnfmac/commands.c
··· 1253 1253 1254 1254 mac_info->n_if_comb = resp_info->n_iface_combinations; 1255 1255 mac_info->if_comb = kzalloc_objs(*mac->macinfo.if_comb, 1256 - mac->macinfo.n_if_comb, GFP_KERNEL); 1256 + mac->macinfo.n_if_comb); 1257 1257 1258 1258 if (!mac->macinfo.if_comb) 1259 1259 return -ENOMEM;
+1 -1
drivers/nvme/host/fc.c
··· 3498 3498 3499 3499 ret = -ENOMEM; 3500 3500 ctrl->queues = kzalloc_objs(struct nvme_fc_queue, 3501 - ctrl->ctrl.queue_count, GFP_KERNEL); 3501 + ctrl->ctrl.queue_count); 3502 3502 if (!ctrl->queues) 3503 3503 goto out_free_ida; 3504 3504
+1 -1
drivers/nvme/target/pci-epf.c
··· 503 503 static int nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl) 504 504 { 505 505 ctrl->irq_vectors = kzalloc_objs(struct nvmet_pci_epf_irq_vector, 506 - ctrl->nr_queues, GFP_KERNEL); 506 + ctrl->nr_queues); 507 507 if (!ctrl->irq_vectors) 508 508 return -ENOMEM; 509 509
+1 -1
drivers/parisc/iosapic.c
··· 928 928 DBG_IRT("iosapic_register: num vectors = %d\n", isi->isi_num_vectors); 929 929 930 930 vip = isi->isi_vector = kzalloc_objs(struct vector_info, 931 - isi->isi_num_vectors, GFP_KERNEL); 931 + isi->isi_num_vectors); 932 932 if (vip == NULL) { 933 933 kfree(isi); 934 934 return NULL;
+1 -2
drivers/platform/x86/amd/wbrf.c
··· 73 73 num_of_elements = 2 * num_of_ranges + 2; 74 74 75 75 union acpi_object *tmp __free(kfree) = kzalloc_objs(*tmp, 76 - num_of_elements, 77 - GFP_KERNEL); 76 + num_of_elements); 78 77 if (!tmp) 79 78 return -ENOMEM; 80 79
+1 -2
drivers/platform/x86/dell/dell-wmi-base.c
··· 657 657 } 658 658 659 659 keymap = kzalloc_objs(struct key_entry, 660 - dmi_results.keymap_size + ARRAY_SIZE(dell_wmi_keymap_type_0000) + ARRAY_SIZE(dell_wmi_keymap_type_0010) + ARRAY_SIZE(dell_wmi_keymap_type_0011) + ARRAY_SIZE(dell_wmi_keymap_type_0012) + 1, 661 - GFP_KERNEL); 660 + dmi_results.keymap_size + ARRAY_SIZE(dell_wmi_keymap_type_0000) + ARRAY_SIZE(dell_wmi_keymap_type_0010) + ARRAY_SIZE(dell_wmi_keymap_type_0011) + ARRAY_SIZE(dell_wmi_keymap_type_0012) + 1); 662 661 if (!keymap) { 663 662 kfree(dmi_results.keymap); 664 663 err = -ENOMEM;
+1 -1
drivers/platform/x86/dell/dell-wmi-privacy.c
··· 315 315 316 316 /* remap the wmi keymap event to new keymap */ 317 317 keymap = kzalloc_objs(struct key_entry, 318 - ARRAY_SIZE(dell_wmi_keymap_type_0012), GFP_KERNEL); 318 + ARRAY_SIZE(dell_wmi_keymap_type_0012)); 319 319 if (!keymap) 320 320 return -ENOMEM; 321 321
+1 -2
drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
··· 120 120 wmi_priv.enumeration_instances_count = 121 121 get_instance_count(DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID); 122 122 wmi_priv.enumeration_data = kzalloc_objs(struct enumeration_data, 123 - wmi_priv.enumeration_instances_count, 124 - GFP_KERNEL); 123 + wmi_priv.enumeration_instances_count); 125 124 if (!wmi_priv.enumeration_data) { 126 125 wmi_priv.enumeration_instances_count = 0; 127 126 ret = -ENOMEM;
+1 -2
drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
··· 124 124 125 125 wmi_priv.integer_instances_count = get_instance_count(DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID); 126 126 wmi_priv.integer_data = kzalloc_objs(struct integer_data, 127 - wmi_priv.integer_instances_count, 128 - GFP_KERNEL); 127 + wmi_priv.integer_instances_count); 129 128 if (!wmi_priv.integer_data) { 130 129 wmi_priv.integer_instances_count = 0; 131 130 ret = -ENOMEM;
+1 -1
drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
··· 144 144 145 145 wmi_priv.po_instances_count = get_instance_count(DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID); 146 146 wmi_priv.po_data = kzalloc_objs(struct po_data, 147 - wmi_priv.po_instances_count, GFP_KERNEL); 147 + wmi_priv.po_instances_count); 148 148 if (!wmi_priv.po_data) { 149 149 wmi_priv.po_instances_count = 0; 150 150 ret = -ENOMEM;
+1 -2
drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
··· 109 109 110 110 wmi_priv.str_instances_count = get_instance_count(DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID); 111 111 wmi_priv.str_data = kzalloc_objs(struct str_data, 112 - wmi_priv.str_instances_count, 113 - GFP_KERNEL); 112 + wmi_priv.str_instances_count); 114 113 if (!wmi_priv.str_data) { 115 114 wmi_priv.str_instances_count = 0; 116 115 ret = -ENOMEM;
+1 -2
drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c
··· 95 95 hp_get_instance_count(HP_WMI_BIOS_ENUMERATION_GUID); 96 96 97 97 bioscfg_drv.enumeration_data = kzalloc_objs(*bioscfg_drv.enumeration_data, 98 - bioscfg_drv.enumeration_instances_count, 99 - GFP_KERNEL); 98 + bioscfg_drv.enumeration_instances_count); 100 99 if (!bioscfg_drv.enumeration_data) { 101 100 bioscfg_drv.enumeration_instances_count = 0; 102 101 return -ENOMEM;
+1 -2
drivers/platform/x86/hp/hp-bioscfg/int-attributes.c
··· 110 110 { 111 111 bioscfg_drv.integer_instances_count = hp_get_instance_count(HP_WMI_BIOS_INTEGER_GUID); 112 112 bioscfg_drv.integer_data = kzalloc_objs(*bioscfg_drv.integer_data, 113 - bioscfg_drv.integer_instances_count, 114 - GFP_KERNEL); 113 + bioscfg_drv.integer_instances_count); 115 114 116 115 if (!bioscfg_drv.integer_data) { 117 116 bioscfg_drv.integer_instances_count = 0;
+1 -2
drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c
··· 99 99 bioscfg_drv.ordered_list_instances_count = 100 100 hp_get_instance_count(HP_WMI_BIOS_ORDERED_LIST_GUID); 101 101 bioscfg_drv.ordered_list_data = kzalloc_objs(*bioscfg_drv.ordered_list_data, 102 - bioscfg_drv.ordered_list_instances_count, 103 - GFP_KERNEL); 102 + bioscfg_drv.ordered_list_instances_count); 104 103 if (!bioscfg_drv.ordered_list_data) { 105 104 bioscfg_drv.ordered_list_instances_count = 0; 106 105 return -ENOMEM;
+1 -2
drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c
··· 186 186 { 187 187 bioscfg_drv.password_instances_count = hp_get_instance_count(HP_WMI_BIOS_PASSWORD_GUID); 188 188 bioscfg_drv.password_data = kzalloc_objs(*bioscfg_drv.password_data, 189 - bioscfg_drv.password_instances_count, 190 - GFP_KERNEL); 189 + bioscfg_drv.password_instances_count); 191 190 if (!bioscfg_drv.password_data) { 192 191 bioscfg_drv.password_instances_count = 0; 193 192 return -ENOMEM;
+1 -2
drivers/platform/x86/hp/hp-bioscfg/string-attributes.c
··· 102 102 { 103 103 bioscfg_drv.string_instances_count = hp_get_instance_count(HP_WMI_BIOS_STRING_GUID); 104 104 bioscfg_drv.string_data = kzalloc_objs(*bioscfg_drv.string_data, 105 - bioscfg_drv.string_instances_count, 106 - GFP_KERNEL); 105 + bioscfg_drv.string_instances_count); 107 106 if (!bioscfg_drv.string_data) { 108 107 bioscfg_drv.string_instances_count = 0; 109 108 return -ENOMEM;
+1 -1
drivers/platform/x86/intel/int1092/intel_sar.c
··· 92 92 return AE_ERROR; 93 93 94 94 data->device_mode_info = kmalloc_objs(struct wwan_device_mode_info, 95 - data->total_dev_mode, GFP_KERNEL); 95 + data->total_dev_mode); 96 96 if (!data->device_mode_info) 97 97 return AE_ERROR; 98 98
+1 -1
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
··· 1820 1820 } 1821 1821 1822 1822 isst_common.sst_inst = kzalloc_objs(*isst_common.sst_inst, 1823 - topology_max_packages(), GFP_KERNEL); 1823 + topology_max_packages()); 1824 1824 if (!isst_common.sst_inst) { 1825 1825 ret = -ENOMEM; 1826 1826 goto init_done;
+1 -2
drivers/platform/x86/intel/tpmi_power_domains.c
··· 222 222 return ret; 223 223 224 224 tpmi_power_domain_mask = kzalloc_objs(*tpmi_power_domain_mask, 225 - size_mul(topology_max_packages(), MAX_POWER_DOMAINS), 226 - GFP_KERNEL); 225 + size_mul(topology_max_packages(), MAX_POWER_DOMAINS)); 227 226 if (!tpmi_power_domain_mask) 228 227 return -ENOMEM; 229 228
+1 -1
drivers/platform/x86/uv_sysfs.c
··· 388 388 } 389 389 for (j = 0; j < uv_bios_obj_cnt; j++) { 390 390 uv_hubs[j]->ports = kzalloc_objs(*uv_hubs[j]->ports, 391 - hub_buf[j].ports, GFP_KERNEL); 391 + hub_buf[j].ports); 392 392 if (!uv_hubs[j]->ports) { 393 393 ret = -ENOMEM; 394 394 j--;
+1 -1
drivers/powercap/powercap_sys.c
··· 530 530 if (!power_zone->name) 531 531 goto err_name_alloc; 532 532 power_zone->constraints = kzalloc_objs(*power_zone->constraints, 533 - nr_constraints, GFP_KERNEL); 533 + nr_constraints); 534 534 if (!power_zone->constraints) 535 535 goto err_const_alloc; 536 536
+1 -2
drivers/resctrl/mpam_devices.c
··· 2445 2445 continue; 2446 2446 2447 2447 mbwu_state = kzalloc_objs(*ris->mbwu_state, 2448 - ris->props.num_mbwu_mon, 2449 - GFP_KERNEL); 2448 + ris->props.num_mbwu_mon); 2450 2449 if (!mbwu_state) { 2451 2450 __destroy_component_cfg(comp); 2452 2451 return -ENOMEM;
+1 -1
drivers/s390/crypto/zcrypt_api.c
··· 1628 1628 * sizeof(struct zcrypt_device_status_ext); 1629 1629 1630 1630 device_status = kvzalloc_objs(struct zcrypt_device_status_ext, 1631 - MAX_ZDEV_ENTRIES_EXT, GFP_KERNEL); 1631 + MAX_ZDEV_ENTRIES_EXT); 1632 1632 if (!device_status) 1633 1633 return -ENOMEM; 1634 1634 zcrypt_device_status_mask_ext(device_status,
+1 -1
drivers/scsi/aacraid/aachba.c
··· 493 493 fsa_dev_ptr = dev->fsa_dev; 494 494 495 495 dev->fsa_dev = kzalloc_objs(*fsa_dev_ptr, 496 - maximum_num_containers, GFP_KERNEL); 496 + maximum_num_containers); 497 497 498 498 kfree(fsa_dev_ptr); 499 499 fsa_dev_ptr = NULL;
+8 -14
drivers/scsi/be2iscsi/be_main.c
··· 2477 2477 /* Allocate memory for wrb_context */ 2478 2478 phwi_ctrlr = phba->phwi_ctrlr; 2479 2479 phwi_ctrlr->wrb_context = kzalloc_objs(struct hwi_wrb_context, 2480 - phba->params.cxns_per_ctrl, 2481 - GFP_KERNEL); 2480 + phba->params.cxns_per_ctrl); 2482 2481 if (!phwi_ctrlr->wrb_context) { 2483 2482 kfree(phba->phwi_ctrlr); 2484 2483 return -ENOMEM; ··· 2625 2626 /* Allocate memory for WRBQ */ 2626 2627 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2627 2628 phwi_ctxt->be_wrbq = kzalloc_objs(struct be_queue_info, 2628 - phba->params.cxns_per_ctrl, 2629 - GFP_KERNEL); 2629 + phba->params.cxns_per_ctrl); 2630 2630 if (!phwi_ctxt->be_wrbq) { 2631 2631 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2632 2632 "BM_%d : WRBQ Mem Alloc Failed\n"); ··· 2636 2638 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2637 2639 pwrb_context->pwrb_handle_base = 2638 2640 kzalloc_objs(struct wrb_handle *, 2639 - phba->params.wrbs_per_cxn, 2640 - GFP_KERNEL); 2641 + phba->params.wrbs_per_cxn); 2641 2642 if (!pwrb_context->pwrb_handle_base) { 2642 2643 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2643 2644 "BM_%d : Mem Alloc Failed. Failing to load\n"); ··· 2644 2647 } 2645 2648 pwrb_context->pwrb_handle_basestd = 2646 2649 kzalloc_objs(struct wrb_handle *, 2647 - phba->params.wrbs_per_cxn, 2648 - GFP_KERNEL); 2650 + phba->params.wrbs_per_cxn); 2649 2651 if (!pwrb_context->pwrb_handle_basestd) { 2650 2652 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2651 2653 "BM_%d : Mem Alloc Failed. Failing to load\n"); ··· 3893 3897 mem_descr_sglh += HWI_MEM_SGLH; 3894 3898 if (1 == mem_descr_sglh->num_elements) { 3895 3899 phba->io_sgl_hndl_base = kzalloc_objs(struct sgl_handle *, 3896 - phba->params.ios_per_ctrl, 3897 - GFP_KERNEL); 3900 + phba->params.ios_per_ctrl); 3898 3901 if (!phba->io_sgl_hndl_base) { 3899 3902 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3900 3903 "BM_%d : Mem Alloc Failed. Failing to load\n"); ··· 3901 3906 } 3902 3907 phba->eh_sgl_hndl_base = 3903 3908 kzalloc_objs(struct sgl_handle *, 3904 - phba->params.icds_per_ctrl - phba->params.ios_per_ctrl, 3905 - GFP_KERNEL); 3909 + phba->params.icds_per_ctrl - phba->params.ios_per_ctrl); 3906 3910 if (!phba->eh_sgl_hndl_base) { 3907 3911 kfree(phba->io_sgl_hndl_base); 3908 3912 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, ··· 4019 4025 } 4020 4026 } 4021 4027 phba->ep_array = kzalloc_objs(struct iscsi_endpoint *, 4022 - phba->params.cxns_per_ctrl, GFP_KERNEL); 4028 + phba->params.cxns_per_ctrl); 4023 4029 if (!phba->ep_array) { 4024 4030 ret = -ENOMEM; 4025 4031 ··· 4027 4033 } 4028 4034 4029 4035 phba->conn_table = kzalloc_objs(struct beiscsi_conn *, 4030 - phba->params.cxns_per_ctrl, GFP_KERNEL); 4036 + phba->params.cxns_per_ctrl); 4031 4037 if (!phba->conn_table) { 4032 4038 kfree(phba->ep_array); 4033 4039 phba->ep_array = NULL;
+1 -1
drivers/scsi/csiostor/csio_wr.c
··· 279 279 280 280 flq = wrm->q_arr[q->un.iq.flq_idx]; 281 281 flq->un.fl.bufs = kzalloc_objs(struct csio_dma_buf, 282 - flq->credits, GFP_KERNEL); 282 + flq->credits); 283 283 if (!flq->un.fl.bufs) { 284 284 csio_err(hw, 285 285 "Failed to allocate FL queue bufs"
+1 -1
drivers/scsi/esas2r/esas2r_init.c
··· 802 802 /* allocate the request table */ 803 803 a->req_table = 804 804 kzalloc_objs(struct esas2r_request *, 805 - num_requests + num_ae_requests + 1, GFP_KERNEL); 805 + num_requests + num_ae_requests + 1); 806 806 807 807 if (a->req_table == NULL) { 808 808 esas2r_log(ESAS2R_LOG_CRIT,
+2 -2
drivers/scsi/hpsa.c
··· 2206 2206 for (i = 0; i < h->nr_cmds; i++) { 2207 2207 h->ioaccel2_cmd_sg_list[i] = 2208 2208 kmalloc_objs(*h->ioaccel2_cmd_sg_list[i], 2209 - h->maxsgentries, GFP_KERNEL); 2209 + h->maxsgentries); 2210 2210 if (!h->ioaccel2_cmd_sg_list[i]) 2211 2211 goto clean; 2212 2212 } ··· 2244 2244 2245 2245 for (i = 0; i < h->nr_cmds; i++) { 2246 2246 h->cmd_sg_list[i] = kmalloc_objs(*h->cmd_sg_list[i], 2247 - h->chainsize, GFP_KERNEL); 2247 + h->chainsize); 2248 2248 if (!h->cmd_sg_list[i]) 2249 2249 goto clean; 2250 2250
+1 -1
drivers/scsi/ibmvscsi/ibmvfc.c
··· 6210 6210 goto free_login_buffer; 6211 6211 6212 6212 vhost->trace = kzalloc_objs(struct ibmvfc_trace_entry, 6213 - IBMVFC_NUM_TRACE_ENTRIES, GFP_KERNEL); 6213 + IBMVFC_NUM_TRACE_ENTRIES); 6214 6214 atomic_set(&vhost->trace_index, -1); 6215 6215 6216 6216 if (!vhost->trace)
+3 -4
drivers/scsi/ipr.c
··· 8860 8860 return -ENOMEM; 8861 8861 8862 8862 ioa_cfg->ipr_cmnd_list = kzalloc_objs(struct ipr_cmnd *, 8863 - IPR_NUM_CMD_BLKS, GFP_KERNEL); 8863 + IPR_NUM_CMD_BLKS); 8864 8864 ioa_cfg->ipr_cmnd_list_dma = kzalloc_objs(dma_addr_t, IPR_NUM_CMD_BLKS); 8865 8865 8866 8866 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { ··· 8965 8965 8966 8966 ENTER; 8967 8967 ioa_cfg->res_entries = kzalloc_objs(struct ipr_resource_entry, 8968 - ioa_cfg->max_devs_supported, 8969 - GFP_KERNEL); 8968 + ioa_cfg->max_devs_supported); 8970 8969 8971 8970 if (!ioa_cfg->res_entries) 8972 8971 goto out; ··· 9027 9028 } 9028 9029 9029 9030 ioa_cfg->trace = kzalloc_objs(struct ipr_trace_entry, 9030 - IPR_NUM_TRACE_ENTRIES, GFP_KERNEL); 9031 + IPR_NUM_TRACE_ENTRIES); 9031 9032 9032 9033 if (!ioa_cfg->trace) 9033 9034 goto out_free_hostrcb_dma;
+1 -2
drivers/scsi/lpfc/lpfc_debugfs.c
··· 6211 6211 &lpfc_debugfs_op_slow_ring_trc); 6212 6212 if (!phba->slow_ring_trc) { 6213 6213 phba->slow_ring_trc = kzalloc_objs(struct lpfc_debugfs_trc, 6214 - lpfc_debugfs_max_slow_ring_trc, 6215 - GFP_KERNEL); 6214 + lpfc_debugfs_max_slow_ring_trc); 6216 6215 if (!phba->slow_ring_trc) { 6217 6216 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 6218 6217 "0416 Cannot create debugfs "
+9 -18
drivers/scsi/lpfc/lpfc_init.c
··· 7771 7771 7772 7772 if (!phba->sli.sli3_ring) 7773 7773 phba->sli.sli3_ring = kzalloc_objs(struct lpfc_sli_ring, 7774 - LPFC_SLI3_MAX_RING, 7775 - GFP_KERNEL); 7774 + LPFC_SLI3_MAX_RING); 7776 7775 if (!phba->sli.sli3_ring) 7777 7776 return -ENOMEM; 7778 7777 ··· 8354 8355 } 8355 8356 8356 8357 phba->sli4_hba.hba_eq_hdl = kzalloc_objs(struct lpfc_hba_eq_hdl, 8357 - phba->cfg_irq_chann, 8358 - GFP_KERNEL); 8358 + phba->cfg_irq_chann); 8359 8359 if (!phba->sli4_hba.hba_eq_hdl) { 8360 8360 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8361 8361 "2572 Failed allocate memory for " ··· 8364 8366 } 8365 8367 8366 8368 phba->sli4_hba.cpu_map = kzalloc_objs(struct lpfc_vector_map_info, 8367 - phba->sli4_hba.num_possible_cpu, 8368 - GFP_KERNEL); 8369 + phba->sli4_hba.num_possible_cpu); 8369 8370 if (!phba->sli4_hba.cpu_map) { 8370 8371 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8371 8372 "3327 Failed allocate memory for msi-x " ··· 8382 8385 } 8383 8386 8384 8387 phba->sli4_hba.idle_stat = kzalloc_objs(*phba->sli4_hba.idle_stat, 8385 - phba->sli4_hba.num_possible_cpu, 8386 - GFP_KERNEL); 8388 + phba->sli4_hba.num_possible_cpu); 8387 8389 if (!phba->sli4_hba.idle_stat) { 8388 8390 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8389 8391 "3390 Failed allocation for idle_stat\n"); ··· 10436 10440 10437 10441 if (!phba->sli4_hba.hdwq) { 10438 10442 phba->sli4_hba.hdwq = kzalloc_objs(struct lpfc_sli4_hdw_queue, 10439 - phba->cfg_hdw_queue, 10440 - GFP_KERNEL); 10443 + phba->cfg_hdw_queue); 10441 10444 if (!phba->sli4_hba.hdwq) { 10442 10445 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10443 10446 "6427 Failed allocate memory for " ··· 10466 10471 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10467 10472 if (phba->nvmet_support) { 10468 10473 phba->sli4_hba.nvmet_cqset = kzalloc_objs(struct lpfc_queue *, 10469 - phba->cfg_nvmet_mrq, 10470 - GFP_KERNEL); 10474 + phba->cfg_nvmet_mrq); 10471 10475 if (!phba->sli4_hba.nvmet_cqset) { 10472 10476 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10473 10477 "3121 Fail allocate memory for " ··· 10474 10480 goto out_error; 10475 10481 } 10476 10482 phba->sli4_hba.nvmet_mrq_hdr = kzalloc_objs(struct lpfc_queue *, 10477 - phba->cfg_nvmet_mrq, 10478 - GFP_KERNEL); 10483 + phba->cfg_nvmet_mrq); 10479 10484 if (!phba->sli4_hba.nvmet_mrq_hdr) { 10480 10485 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10481 10486 "3122 Fail allocate memory for " ··· 10482 10489 goto out_error; 10483 10490 } 10484 10491 phba->sli4_hba.nvmet_mrq_data = kzalloc_objs(struct lpfc_queue *, 10485 - phba->cfg_nvmet_mrq, 10486 - GFP_KERNEL); 10492 + phba->cfg_nvmet_mrq); 10487 10493 if (!phba->sli4_hba.nvmet_mrq_data) { 10488 10494 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10489 10495 "3124 Fail allocate memory for " ··· 11368 11376 if (phba->sli4_hba.cq_max) { 11369 11377 kfree(phba->sli4_hba.cq_lookup); 11370 11378 phba->sli4_hba.cq_lookup = kzalloc_objs(struct lpfc_queue *, 11371 - (phba->sli4_hba.cq_max + 1), 11372 - GFP_KERNEL); 11379 + (phba->sli4_hba.cq_max + 1)); 11373 11380 if (!phba->sli4_hba.cq_lookup) { 11374 11381 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11375 11382 "0549 Failed setup of CQ Lookup table: "
+1 -2
drivers/scsi/lpfc/lpfc_nvmet.c
··· 1508 1508 phba->sli4_hba.nvmet_xri_cnt); 1509 1509 1510 1510 phba->sli4_hba.nvmet_ctx_info = kzalloc_objs(struct lpfc_nvmet_ctx_info, 1511 - phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq, 1512 - GFP_KERNEL); 1511 + phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq); 1513 1512 if (!phba->sli4_hba.nvmet_ctx_info) { 1514 1513 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1515 1514 "6419 Failed allocate memory for "
+1 -1
drivers/scsi/mac53c94.c
··· 463 463 * XXX FIXME: Use DMA consistent routines 464 464 */ 465 465 dma_cmd_space = kmalloc_objs(struct dbdma_cmd, 466 - host->sg_tablesize + 2, GFP_KERNEL); 466 + host->sg_tablesize + 2); 467 467 if (!dma_cmd_space) { 468 468 printk(KERN_ERR "mac53c94: couldn't allocate dma " 469 469 "command space for %pOF\n", node);
+1 -1
drivers/scsi/megaraid/megaraid_sas_base.c
··· 6376 6376 if (instance->adapter_type >= VENTURA_SERIES) { 6377 6377 fusion->stream_detect_by_ld = 6378 6378 kzalloc_objs(struct LD_STREAM_DETECT *, 6379 - MAX_LOGICAL_DRIVES_EXT, GFP_KERNEL); 6379 + MAX_LOGICAL_DRIVES_EXT); 6380 6380 if (!fusion->stream_detect_by_ld) { 6381 6381 dev_err(&instance->pdev->dev, 6382 6382 "unable to allocate stream detection for pool of LDs\n");
+2 -3
drivers/scsi/mpi3mr/mpi3mr_fw.c
··· 2111 2111 op_reply_q->segment_qd); 2112 2112 2113 2113 op_reply_q->q_segments = kzalloc_objs(struct segments, 2114 - op_reply_q->num_segments, 2115 - GFP_KERNEL); 2114 + op_reply_q->num_segments); 2116 2115 if (!op_reply_q->q_segments) 2117 2116 return -ENOMEM; 2118 2117 ··· 2169 2170 op_req_q->segment_qd); 2170 2171 2171 2172 op_req_q->q_segments = kzalloc_objs(struct segments, 2172 - op_req_q->num_segments, GFP_KERNEL); 2173 + op_req_q->num_segments); 2173 2174 if (!op_req_q->q_segments) 2174 2175 return -ENOMEM; 2175 2176
+1 -1
drivers/scsi/mpi3mr/mpi3mr_transport.c
··· 2116 2116 goto out_fail; 2117 2117 } 2118 2118 sas_expander->phy = kzalloc_objs(struct mpi3mr_sas_phy, 2119 - sas_expander->num_phys, GFP_KERNEL); 2119 + sas_expander->num_phys); 2120 2120 if (!sas_expander->phy) { 2121 2121 rc = -1; 2122 2122 goto out_fail;
+6 -9
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 3462 3462 3463 3463 if (iopoll_q_count) { 3464 3464 ioc->io_uring_poll_queues = kzalloc_objs(struct io_uring_poll_queue, 3465 - iopoll_q_count, 3466 - GFP_KERNEL); 3465 + iopoll_q_count); 3467 3466 if (!ioc->io_uring_poll_queues) 3468 3467 iopoll_q_count = 0; 3469 3468 } ··· 3727 3728 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one. 3728 3729 */ 3729 3730 ioc->replyPostRegisterIndex = kzalloc_objs(resource_size_t *, 3730 - ioc->combined_reply_index_count, 3731 - GFP_KERNEL); 3731 + ioc->combined_reply_index_count); 3732 3732 if (!ioc->replyPostRegisterIndex) { 3733 3733 ioc_err(ioc, 3734 3734 "allocation for replyPostRegisterIndex failed!\n"); ··· 6560 6562 6561 6563 /* initialize hi-priority queue smid's */ 6562 6564 ioc->hpr_lookup = kzalloc_objs(struct request_tracker, 6563 - ioc->hi_priority_depth, GFP_KERNEL); 6565 + ioc->hi_priority_depth); 6564 6566 if (!ioc->hpr_lookup) { 6565 6567 ioc_err(ioc, "hpr_lookup: kcalloc failed\n"); 6566 6568 goto out; ··· 6573 6575 6574 6576 /* initialize internal queue smid's */ 6575 6577 ioc->internal_lookup = kzalloc_objs(struct request_tracker, 6576 - ioc->internal_depth, GFP_KERNEL); 6578 + ioc->internal_depth); 6577 6579 if (!ioc->internal_lookup) { 6578 6580 ioc_err(ioc, "internal_lookup: kcalloc failed\n"); 6579 6581 goto out; ··· 8428 8430 8429 8431 if (ioc->is_warpdrive) { 8430 8432 ioc->reply_post_host_index = kzalloc_objs(resource_size_t *, 8431 - ioc->cpu_msix_table_sz, 8432 - GFP_KERNEL); 8433 + ioc->cpu_msix_table_sz); 8433 8434 if (!ioc->reply_post_host_index) { 8434 8435 ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n"); 8435 8436 r = -ENOMEM; ··· 8518 8521 goto out_free_resources; 8519 8522 8520 8523 ioc->pfacts = kzalloc_objs(struct mpt3sas_port_facts, 8521 - ioc->facts.NumberOfPorts, GFP_KERNEL); 8524 + ioc->facts.NumberOfPorts); 8522 8525 if (!ioc->pfacts) { 8523 8526 r = -ENOMEM; 8524 8527 goto out_free_resources;
+1 -1
drivers/scsi/mpt3sas/mpt3sas_ctl.c
··· 1332 1332 ioc->event_context = 0; 1333 1333 ioc->aen_event_read_flag = 0; 1334 1334 ioc->event_log = kzalloc_objs(struct MPT3_IOCTL_EVENTS, 1335 - MPT3SAS_CTL_EVENT_LOG_SIZE, GFP_KERNEL); 1335 + MPT3SAS_CTL_EVENT_LOG_SIZE); 1336 1336 if (!ioc->event_log) { 1337 1337 pr_err("failure at %s:%d/%s()!\n", 1338 1338 __FILE__, __LINE__, __func__);
+2 -3
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 7028 7028 ioc->sas_hba.nr_phys_allocated = max_t(u8, 7029 7029 MPT_MAX_HBA_NUM_PHYS, num_phys); 7030 7030 ioc->sas_hba.phy = kzalloc_objs(struct _sas_phy, 7031 - ioc->sas_hba.nr_phys_allocated, 7032 - GFP_KERNEL); 7031 + ioc->sas_hba.nr_phys_allocated); 7033 7032 if (!ioc->sas_hba.phy) { 7034 7033 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7035 7034 __FILE__, __LINE__, __func__); ··· 7282 7283 goto out_fail; 7283 7284 } 7284 7285 sas_expander->phy = kzalloc_objs(struct _sas_phy, 7285 - sas_expander->num_phys, GFP_KERNEL); 7286 + sas_expander->num_phys); 7286 7287 if (!sas_expander->phy) { 7287 7288 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7288 7289 __FILE__, __LINE__, __func__);
+1 -1
drivers/scsi/pmcraid.c
··· 4386 4386 4387 4387 pinstance->res_entries = 4388 4388 kzalloc_objs(struct pmcraid_resource_entry, 4389 - PMCRAID_MAX_RESOURCES, GFP_KERNEL); 4389 + PMCRAID_MAX_RESOURCES); 4390 4390 4391 4391 if (NULL == pinstance->res_entries) { 4392 4392 pmcraid_err("failed to allocate memory for resource table\n");
+4 -5
drivers/scsi/qedi/qedi_main.c
··· 411 411 int ret = 0; 412 412 413 413 qedi->fp_array = kzalloc_objs(struct qedi_fastpath, 414 - MIN_NUM_CPUS_MSIX(qedi), GFP_KERNEL); 414 + MIN_NUM_CPUS_MSIX(qedi)); 415 415 if (!qedi->fp_array) { 416 416 QEDI_ERR(&qedi->dbg_ctx, 417 417 "fastpath fp array allocation failed.\n"); ··· 419 419 } 420 420 421 421 qedi->sb_array = kzalloc_objs(struct qed_sb_info, 422 - MIN_NUM_CPUS_MSIX(qedi), GFP_KERNEL); 422 + MIN_NUM_CPUS_MSIX(qedi)); 423 423 if (!qedi->sb_array) { 424 424 QEDI_ERR(&qedi->dbg_ctx, 425 425 "fastpath sb array allocation failed.\n"); ··· 499 499 return -ENOMEM; 500 500 501 501 qedi->cid_que.conn_cid_tbl = kmalloc_objs(struct qedi_conn *, 502 - qedi->max_active_conns, 503 - GFP_KERNEL); 502 + qedi->max_active_conns); 504 503 if (!qedi->cid_que.conn_cid_tbl) { 505 504 kfree(qedi->cid_que.cid_que_base); 506 505 qedi->cid_que.cid_que_base = NULL; ··· 1894 1895 static int qedi_alloc_itt(struct qedi_ctx *qedi) 1895 1896 { 1896 1897 qedi->itt_map = kzalloc_objs(struct qedi_itt_map, 1897 - MAX_ISCSI_TASK_ENTRIES, GFP_KERNEL); 1898 + MAX_ISCSI_TASK_ENTRIES); 1898 1899 if (!qedi->itt_map) { 1899 1900 QEDI_ERR(&qedi->dbg_ctx, 1900 1901 "Unable to allocate itt map array memory\n");
+1 -2
drivers/scsi/qla2xxx/qla_init.c
··· 4042 4042 */ 4043 4043 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS; 4044 4044 req->outstanding_cmds = kzalloc_objs(srb_t *, 4045 - req->num_outstanding_cmds, 4046 - GFP_KERNEL); 4045 + req->num_outstanding_cmds); 4047 4046 4048 4047 if (!req->outstanding_cmds) { 4049 4048 ql_log(ql_log_fatal, NULL, 0x0126,
+3 -3
drivers/scsi/qla2xxx/qla_os.c
··· 463 463 464 464 if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) { 465 465 ha->queue_pair_map = kzalloc_objs(struct qla_qpair *, 466 - ha->max_qpairs, GFP_KERNEL); 466 + ha->max_qpairs); 467 467 if (!ha->queue_pair_map) { 468 468 ql_log(ql_log_fatal, vha, 0x0180, 469 469 "Unable to allocate memory for queue pair ptrs.\n"); ··· 4150 4150 4151 4151 if (QLA_TGT_MODE_ENABLED() || EDIF_CAP(ha)) { 4152 4152 ha->vp_map = kzalloc_objs(struct qla_vp_map, 4153 - MAX_MULTI_ID_FABRIC, GFP_KERNEL); 4153 + MAX_MULTI_ID_FABRIC); 4154 4154 if (!ha->vp_map) 4155 4155 goto fail; 4156 4156 } ··· 4376 4376 /* Allocate memory for NVRAM data for vports */ 4377 4377 if (ha->nvram_npiv_size) { 4378 4378 ha->npiv_info = kzalloc_objs(struct qla_npiv_entry, 4379 - ha->nvram_npiv_size, GFP_KERNEL); 4379 + ha->nvram_npiv_size); 4380 4380 if (!ha->npiv_info) { 4381 4381 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d, 4382 4382 "Failed to allocate memory for npiv_info.\n");
+2 -3
drivers/scsi/smartpqi/smartpqi_init.c
··· 1253 1253 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]); 1254 1254 1255 1255 rpl_16byte_wwid_list = kmalloc_flex(*rpl_16byte_wwid_list, lun_entries, 1256 - num_physicals, GFP_KERNEL); 1256 + num_physicals); 1257 1257 if (!rpl_16byte_wwid_list) { 1258 1258 rc = -ENOMEM; 1259 1259 goto out_free_rpl_list; ··· 5206 5206 struct pqi_io_request *io_request; 5207 5207 5208 5208 ctrl_info->io_request_pool = kzalloc_objs(ctrl_info->io_request_pool[0], 5209 - ctrl_info->max_io_slots, 5210 - GFP_KERNEL); 5209 + ctrl_info->max_io_slots); 5211 5210 5212 5211 if (!ctrl_info->io_request_pool) { 5213 5212 dev_err(&ctrl_info->pci_dev->dev,
+1 -2
drivers/soundwire/qcom.c
··· 1231 1231 int maxport, pn, nports = 0; 1232 1232 unsigned int m_port; 1233 1233 struct sdw_port_config *pconfig __free(kfree) = kzalloc_objs(*pconfig, 1234 - ctrl->nports, 1235 - GFP_KERNEL); 1234 + ctrl->nports); 1236 1235 if (!pconfig) 1237 1236 return -ENOMEM; 1238 1237
+2 -2
drivers/staging/greybus/power_supply.c
··· 552 552 } 553 553 554 554 gbpsy->props_raw = kzalloc_objs(*gbpsy->props_raw, 555 - gbpsy->properties_count, GFP_KERNEL); 555 + gbpsy->properties_count); 556 556 if (!gbpsy->props_raw) { 557 557 ret = -ENOMEM; 558 558 goto out_put_operation; ··· 942 942 goto out; 943 943 944 944 supplies->supply = kzalloc_objs(struct gb_power_supply, 945 - supplies->supplies_count, GFP_KERNEL); 945 + supplies->supplies_count); 946 946 947 947 if (!supplies->supply) { 948 948 ret = -ENOMEM;
+1 -1
drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
··· 306 306 307 307 /* Max num GPIOs we've seen plus a terminator */ 308 308 int3472 = kzalloc_flex(*int3472, gpios.table, 309 - INT3472_MAX_SENSOR_GPIOS + 1, GFP_KERNEL); 309 + INT3472_MAX_SENSOR_GPIOS + 1); 310 310 if (!int3472) 311 311 return -ENOMEM; 312 312
+7 -13
drivers/staging/media/atomisp/pci/sh_css.c
··· 4531 4531 return err; 4532 4532 mycs->num_yuv_scaler = cas_scaler_descr.num_stage; 4533 4533 mycs->yuv_scaler_binary = kzalloc_objs(struct ia_css_binary, 4534 - cas_scaler_descr.num_stage, 4535 - GFP_KERNEL); 4534 + cas_scaler_descr.num_stage); 4536 4535 if (!mycs->yuv_scaler_binary) { 4537 4536 mycs->num_yuv_scaler = 0; 4538 4537 err = -ENOMEM; 4539 4538 return err; 4540 4539 } 4541 4540 mycs->is_output_stage = kzalloc_objs(bool, 4542 - cas_scaler_descr.num_stage, 4543 - GFP_KERNEL); 4541 + cas_scaler_descr.num_stage); 4544 4542 if (!mycs->is_output_stage) { 4545 4543 err = -ENOMEM; 4546 4544 return err; ··· 5110 5112 } 5111 5113 mycs->num_yuv_scaler = cas_scaler_descr.num_stage; 5112 5114 mycs->yuv_scaler_binary = kzalloc_objs(struct ia_css_binary, 5113 - cas_scaler_descr.num_stage, 5114 - GFP_KERNEL); 5115 + cas_scaler_descr.num_stage); 5115 5116 if (!mycs->yuv_scaler_binary) { 5116 5117 err = -ENOMEM; 5117 5118 IA_CSS_LEAVE_ERR_PRIVATE(err); 5118 5119 return err; 5119 5120 } 5120 5121 mycs->is_output_stage = kzalloc_objs(bool, 5121 - cas_scaler_descr.num_stage, 5122 - GFP_KERNEL); 5122 + cas_scaler_descr.num_stage); 5123 5123 if (!mycs->is_output_stage) { 5124 5124 err = -ENOMEM; 5125 5125 IA_CSS_LEAVE_ERR_PRIVATE(err); ··· 5969 5973 descr->num_stage = num_stages; 5970 5974 5971 5975 descr->in_info = kmalloc_objs(struct ia_css_frame_info, 5972 - descr->num_stage, GFP_KERNEL); 5976 + descr->num_stage); 5973 5977 if (!descr->in_info) { 5974 5978 err = -ENOMEM; 5975 5979 goto ERR; ··· 6141 6145 mycs->num_output = cas_scaler_descr.num_output_stage; 6142 6146 mycs->num_yuv_scaler = cas_scaler_descr.num_stage; 6143 6147 mycs->yuv_scaler_binary = kzalloc_objs(struct ia_css_binary, 6144 - cas_scaler_descr.num_stage, 6145 - GFP_KERNEL); 6148 + cas_scaler_descr.num_stage); 6146 6149 if (!mycs->yuv_scaler_binary) { 6147 6150 err = -ENOMEM; 6148 6151 goto ERR; 6149 6152 } 6150 6153 mycs->is_output_stage = kzalloc_objs(bool, 6151 - cas_scaler_descr.num_stage, 6152 - GFP_KERNEL); 6154 + cas_scaler_descr.num_stage); 6153 6155 if (!mycs->is_output_stage) { 6154 6156 err = -ENOMEM; 6155 6157 goto ERR;
+1 -2
drivers/tee/qcomtee/call.c
··· 417 417 418 418 /* +1 for ending QCOMTEE_ARG_TYPE_INV. */ 419 419 struct qcomtee_arg *u __free(kfree) = kzalloc_objs(*u, 420 - arg->num_params + 1, 421 - GFP_KERNEL); 420 + arg->num_params + 1); 422 421 if (!u) 423 422 return -ENOMEM; 424 423
+1 -1
drivers/thermal/cpufreq_cooling.c
··· 372 372 unsigned int num_cpus = cpumask_weight(cpufreq_cdev->policy->related_cpus); 373 373 374 374 cpufreq_cdev->idle_time = kzalloc_objs(*cpufreq_cdev->idle_time, 375 - num_cpus, GFP_KERNEL); 375 + num_cpus); 376 376 if (!cpufreq_cdev->idle_time) 377 377 return -ENOMEM; 378 378
+1 -1
drivers/thermal/intel/int340x_thermal/int3400_thermal.c
··· 391 391 392 392 if (priv->odvp_attrs == NULL) { 393 393 priv->odvp_attrs = kzalloc_objs(struct odvp_attr, 394 - priv->odvp_count, GFP_KERNEL); 394 + priv->odvp_count); 395 395 if (!priv->odvp_attrs) { 396 396 ret = -ENOMEM; 397 397 goto out_err;
+1 -2
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
··· 134 134 int34x_zone->aux_trip_nr = trip_cnt; 135 135 136 136 zone_trips = kzalloc_objs(*zone_trips, 137 - trip_cnt + INT340X_THERMAL_MAX_TRIP_COUNT, 138 - GFP_KERNEL); 137 + trip_cnt + INT340X_THERMAL_MAX_TRIP_COUNT); 139 138 if (!zone_trips) { 140 139 ret = -ENOMEM; 141 140 goto err_trips_alloc;
+1 -2
drivers/thermal/testing/zone.c
··· 391 391 return -EINVAL; 392 392 393 393 struct thermal_trip *trips __free(kfree) = kzalloc_objs(*trips, 394 - tt_zone->num_trips, 395 - GFP_KERNEL); 394 + tt_zone->num_trips); 396 395 if (!trips) 397 396 return -ENOMEM; 398 397
+1 -1
drivers/tty/serial/sunsab.c
··· 1118 1118 1119 1119 if (num_channels) { 1120 1120 sunsab_ports = kzalloc_objs(struct uart_sunsab_port, 1121 - num_channels, GFP_KERNEL); 1121 + num_channels); 1122 1122 if (!sunsab_ports) 1123 1123 return -ENOMEM; 1124 1124
+2 -3
drivers/usb/gadget/configfs.c
··· 1629 1629 if (!nlangs) 1630 1630 return NULL; 1631 1631 1632 - gadget_strings = kzalloc_objs(struct usb_gadget_strings *, nlangs + 1, 1633 - GFP_KERNEL)/* including NULL terminator */; 1632 + gadget_strings = kzalloc_objs(struct usb_gadget_strings *, nlangs + 1)/* including NULL terminator */; 1634 1633 if (!gadget_strings) 1635 1634 return ERR_PTR(-ENOMEM); 1636 1635 ··· 1645 1646 } 1646 1647 1647 1648 stringtab = kzalloc_objs(struct usb_string, 1648 - language->nstrings + 1, GFP_KERNEL); 1649 + language->nstrings + 1); 1649 1650 if (!stringtab) { 1650 1651 us = ERR_PTR(-ENOMEM); 1651 1652 goto cleanup;
+1 -1
drivers/usb/gadget/function/f_midi2.c
··· 2855 2855 } 2856 2856 2857 2857 midi2->string_defs = kzalloc_objs(*midi2->string_defs, 2858 - midi2->total_blocks + 1, GFP_KERNEL); 2858 + midi2->total_blocks + 1); 2859 2859 if (!midi2->string_defs) { 2860 2860 do_f_midi2_free(midi2, opts); 2861 2861 return ERR_PTR(-ENOMEM);
+2 -2
drivers/usb/gadget/function/u_audio.c
··· 1210 1210 prm->srate = params->c_srates[0]; 1211 1211 1212 1212 prm->reqs = kzalloc_objs(struct usb_request *, 1213 - params->req_number, GFP_KERNEL); 1213 + params->req_number); 1214 1214 if (!prm->reqs) { 1215 1215 err = -ENOMEM; 1216 1216 goto fail; ··· 1234 1234 prm->srate = params->p_srates[0]; 1235 1235 1236 1236 prm->reqs = kzalloc_objs(struct usb_request *, 1237 - params->req_number, GFP_KERNEL); 1237 + params->req_number); 1238 1238 if (!prm->reqs) { 1239 1239 err = -ENOMEM; 1240 1240 goto fail;
+2 -2
drivers/usb/isp1760/isp1760-hcd.c
··· 2573 2573 priv->hcd = hcd; 2574 2574 2575 2575 priv->atl_slots = kzalloc_objs(struct isp1760_slotinfo, 2576 - mem_layout->slot_num, GFP_KERNEL); 2576 + mem_layout->slot_num); 2577 2577 if (!priv->atl_slots) { 2578 2578 ret = -ENOMEM; 2579 2579 goto put_hcd; 2580 2580 } 2581 2581 2582 2582 priv->int_slots = kzalloc_objs(struct isp1760_slotinfo, 2583 - mem_layout->slot_num, GFP_KERNEL); 2583 + mem_layout->slot_num); 2584 2584 if (!priv->int_slots) { 2585 2585 ret = -ENOMEM; 2586 2586 goto free_atl_slots;
+1 -2
drivers/usb/storage/ene_ub6250.c
··· 1120 1120 info->MS_Lib.BytesPerSector, 1121 1121 GFP_KERNEL); 1122 1122 info->MS_Lib.blkext = kmalloc_objs(struct ms_lib_type_extdat, 1123 - info->MS_Lib.PagesPerBlock, 1124 - GFP_KERNEL); 1123 + info->MS_Lib.PagesPerBlock); 1125 1124 1126 1125 if ((info->MS_Lib.blkpag == NULL) || (info->MS_Lib.blkext == NULL)) { 1127 1126 ms_lib_free_writebuf(us);
+1 -1
drivers/vdpa/mlx5/net/mlx5_vnet.c
··· 3810 3810 return; 3811 3811 3812 3812 ndev->irqp.entries = kzalloc_objs(*ndev->irqp.entries, 3813 - ndev->mvdev.max_vqs, GFP_KERNEL); 3813 + ndev->mvdev.max_vqs); 3814 3814 if (!ndev->irqp.entries) 3815 3815 return; 3816 3816
+1 -1
drivers/vdpa/vdpa_sim/vdpa_sim.c
··· 255 255 goto err_iommu; 256 256 257 257 vdpasim->iommu_pt = kmalloc_objs(*vdpasim->iommu_pt, 258 - vdpasim->dev_attr.nas, GFP_KERNEL); 258 + vdpasim->dev_attr.nas); 259 259 if (!vdpasim->iommu_pt) 260 260 goto err_iommu; 261 261
+2 -4
drivers/vhost/scsi.c
··· 1947 1947 1948 1948 if (vs->inline_sg_cnt) { 1949 1949 tv_cmd->sgl = kzalloc_objs(struct scatterlist, 1950 - vs->inline_sg_cnt, 1951 - GFP_KERNEL); 1950 + vs->inline_sg_cnt); 1952 1951 if (!tv_cmd->sgl) { 1953 1952 pr_err("Unable to allocate tv_cmd->sgl\n"); 1954 1953 goto out; ··· 1957 1958 if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI) && 1958 1959 vs->inline_sg_cnt) { 1959 1960 tv_cmd->prot_sgl = kzalloc_objs(struct scatterlist, 1960 - vs->inline_sg_cnt, 1961 - GFP_KERNEL); 1961 + vs->inline_sg_cnt); 1962 1962 if (!tv_cmd->prot_sgl) { 1963 1963 pr_err("Unable to allocate tv_cmd->prot_sgl\n"); 1964 1964 goto out;
+3 -5
drivers/virt/nitro_enclaves/ne_misc_dev.c
··· 295 295 ne_cpu_pool.nr_parent_vm_cores = nr_cpu_ids / ne_cpu_pool.nr_threads_per_core; 296 296 297 297 ne_cpu_pool.avail_threads_per_core = kzalloc_objs(*ne_cpu_pool.avail_threads_per_core, 298 - ne_cpu_pool.nr_parent_vm_cores, 299 - GFP_KERNEL); 298 + ne_cpu_pool.nr_parent_vm_cores); 300 299 if (!ne_cpu_pool.avail_threads_per_core) { 301 300 rc = -ENOMEM; 302 301 ··· 941 942 } 942 943 943 944 phys_contig_mem_regions.regions = kzalloc_objs(*phys_contig_mem_regions.regions, 944 - max_nr_pages, GFP_KERNEL); 945 + max_nr_pages); 945 946 if (!phys_contig_mem_regions.regions) { 946 947 rc = -ENOMEM; 947 948 ··· 1627 1628 mutex_unlock(&ne_cpu_pool.mutex); 1628 1629 1629 1630 ne_enclave->threads_per_core = kzalloc_objs(*ne_enclave->threads_per_core, 1630 - ne_enclave->nr_parent_vm_cores, 1631 - GFP_KERNEL); 1631 + ne_enclave->nr_parent_vm_cores); 1632 1632 if (!ne_enclave->threads_per_core) { 1633 1633 rc = -ENOMEM; 1634 1634
+1 -2
drivers/xen/events/events_base.c
··· 2293 2293 xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead); 2294 2294 2295 2295 evtchn_to_irq = kzalloc_objs(*evtchn_to_irq, 2296 - EVTCHN_ROW(xen_evtchn_max_channels()), 2297 - GFP_KERNEL); 2296 + EVTCHN_ROW(xen_evtchn_max_channels())); 2298 2297 BUG_ON(!evtchn_to_irq); 2299 2298 2300 2299 /* No event channels are 'live' right now. */
+1 -1
drivers/xen/gntdev-dmabuf.c
··· 535 535 goto fail_no_free; 536 536 537 537 gntdev_dmabuf->u.imp.refs = kzalloc_objs(gntdev_dmabuf->u.imp.refs[0], 538 - count, GFP_KERNEL); 538 + count); 539 539 if (!gntdev_dmabuf->u.imp.refs) 540 540 goto fail; 541 541
+1 -1
drivers/xen/xen-acpi-processor.c
··· 142 142 sizeof(struct acpi_processor_px)); 143 143 144 144 dst_states = kzalloc_objs(struct xen_processor_px, 145 - _pr->performance->state_count, GFP_KERNEL); 145 + _pr->performance->state_count); 146 146 if (!dst_states) 147 147 return ERR_PTR(-ENOMEM); 148 148
+1 -1
drivers/xen/xen-front-pgdir-shbuf.c
··· 254 254 return -ENOMEM; 255 255 256 256 buf->backend_map_handles = kzalloc_objs(*buf->backend_map_handles, 257 - buf->num_pages, GFP_KERNEL); 257 + buf->num_pages); 258 258 if (!buf->backend_map_handles) { 259 259 kfree(map_ops); 260 260 return -ENOMEM;
+2 -2
fs/afs/cmservice.c
··· 229 229 230 230 _debug("unmarshall FID array"); 231 231 call->request = kzalloc_objs(struct afs_callback_break, 232 - call->count, GFP_KERNEL); 232 + call->count); 233 233 if (!call->request) 234 234 return -ENOMEM; 235 235 ··· 589 589 590 590 _debug("unmarshall FID array"); 591 591 call->request = kzalloc_objs(struct afs_callback_break, 592 - call->count, GFP_KERNEL); 592 + call->count); 593 593 if (!call->request) 594 594 return -ENOMEM; 595 595
+1 -1
fs/afs/dir.c
··· 835 835 /* Need space for examining all the selected files */ 836 836 if (op->nr_files > 2) { 837 837 op->more_files = kvzalloc_objs(struct afs_vnode_param, 838 - op->nr_files - 2, GFP_KERNEL); 838 + op->nr_files - 2); 839 839 if (!op->more_files) { 840 840 afs_op_nomem(op); 841 841 goto out_op;
+1 -2
fs/afs/rotate.c
··· 47 47 read_unlock(&op->volume->servers_lock); 48 48 49 49 op->server_states = kzalloc_objs(op->server_states[0], 50 - op->server_list->nr_servers, 51 - GFP_KERNEL); 50 + op->server_list->nr_servers); 52 51 if (!op->server_states) { 53 52 afs_op_nomem(op); 54 53 trace_afs_rotate(op, afs_rotate_trace_nomem, 0);
+1 -1
fs/btrfs/fiemap.c
··· 647 647 648 648 cache.entries_size = PAGE_SIZE / sizeof(struct btrfs_fiemap_entry); 649 649 cache.entries = kmalloc_objs(struct btrfs_fiemap_entry, 650 - cache.entries_size, GFP_KERNEL); 650 + cache.entries_size); 651 651 backref_ctx = btrfs_alloc_backref_share_check_ctx(); 652 652 path = btrfs_alloc_path(); 653 653 if (!cache.entries || !backref_ctx || !path) {
+2 -3
fs/btrfs/scrub.c
··· 374 374 goto error; 375 375 376 376 stripe->sectors = kzalloc_objs(struct scrub_sector_verification, 377 - stripe->nr_sectors, GFP_KERNEL); 377 + stripe->nr_sectors); 378 378 if (!stripe->sectors) 379 379 goto error; 380 380 ··· 2474 2474 ASSERT(sctx->raid56_data_stripes == NULL); 2475 2475 2476 2476 sctx->raid56_data_stripes = kzalloc_objs(struct scrub_stripe, 2477 - nr_data_stripes(map), 2478 - GFP_KERNEL); 2477 + nr_data_stripes(map)); 2479 2478 if (!sctx->raid56_data_stripes) { 2480 2479 ret = -ENOMEM; 2481 2480 goto out;
+2 -4
fs/btrfs/send.c
··· 8098 8098 } 8099 8099 send_buf_num_pages = sctx->send_max_size >> PAGE_SHIFT; 8100 8100 sctx->send_buf_pages = kzalloc_objs(*sctx->send_buf_pages, 8101 - send_buf_num_pages, 8102 - GFP_KERNEL); 8101 + send_buf_num_pages); 8103 8102 if (!sctx->send_buf_pages) { 8104 8103 ret = -ENOMEM; 8105 8104 goto out; ··· 8117 8118 } 8118 8119 8119 8120 sctx->clone_roots = kvzalloc_objs(*sctx->clone_roots, 8120 - arg->clone_sources_count + 1, 8121 - GFP_KERNEL); 8121 + arg->clone_sources_count + 1); 8122 8122 if (!sctx->clone_roots) { 8123 8123 ret = -ENOMEM; 8124 8124 goto out;
+1 -1
fs/ceph/mds_client.c
··· 4231 4231 } 4232 4232 4233 4233 cap_auths = kzalloc_objs(struct ceph_mds_cap_auth, 4234 - cap_auths_num, GFP_KERNEL); 4234 + cap_auths_num); 4235 4235 if (!cap_auths) { 4236 4236 pr_err_client(cl, "No memory for cap_auths\n"); 4237 4237 return;
+1 -2
fs/erofs/zutil.c
··· 138 138 if (z_erofs_rsv_nrpages) { 139 139 z_erofs_rsvbuf = &z_erofs_gbufpool[total - 1]; 140 140 z_erofs_rsvbuf->pages = kzalloc_objs(*z_erofs_rsvbuf->pages, 141 - z_erofs_rsv_nrpages, 142 - GFP_KERNEL); 141 + z_erofs_rsv_nrpages); 143 142 if (!z_erofs_rsvbuf->pages) { 144 143 z_erofs_rsvbuf = NULL; 145 144 z_erofs_rsv_nrpages = 0;
+1 -2
fs/ext4/mballoc.c
··· 3816 3816 sbi->s_mb_nr_global_goals = umin(num_possible_cpus(), 3817 3817 DIV_ROUND_UP(sbi->s_groups_count, 4)); 3818 3818 sbi->s_mb_last_groups = kzalloc_objs(ext4_group_t, 3819 - sbi->s_mb_nr_global_goals, 3820 - GFP_KERNEL); 3819 + sbi->s_mb_nr_global_goals); 3821 3820 if (sbi->s_mb_last_groups == NULL) { 3822 3821 ret = -ENOMEM; 3823 3822 goto out;
+1 -2
fs/isofs/compress.c
··· 334 334 pcount = 1; 335 335 } 336 336 pages = kzalloc_objs(*pages, 337 - max_t(unsigned int, zisofs_pages_per_cblock, 1), 338 - GFP_KERNEL); 337 + max_t(unsigned int, zisofs_pages_per_cblock, 1)); 339 338 if (!pages) { 340 339 folio_unlock(folio); 341 340 return -ENOMEM;
+1 -1
fs/jffs2/fs.c
··· 563 563 564 564 c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size); 565 565 c->inocache_list = kzalloc_objs(struct jffs2_inode_cache *, 566 - c->inocache_hashsize, GFP_KERNEL); 566 + c->inocache_hashsize); 567 567 if (!c->inocache_list) { 568 568 ret = -ENOMEM; 569 569 goto out_wbuf;
+2 -3
fs/nfs/callback_xdr.c
··· 379 379 if (unlikely(p == NULL)) 380 380 goto out; 381 381 rc_list->rcl_refcalls = kmalloc_objs(*rc_list->rcl_refcalls, 382 - rc_list->rcl_nrefcalls, 383 - GFP_KERNEL); 382 + rc_list->rcl_nrefcalls); 384 383 if (unlikely(rc_list->rcl_refcalls == NULL)) 385 384 goto out; 386 385 for (i = 0; i < rc_list->rcl_nrefcalls; i++) { ··· 419 420 args->csa_rclists = NULL; 420 421 if (args->csa_nrclists) { 421 422 args->csa_rclists = kmalloc_objs(*args->csa_rclists, 422 - args->csa_nrclists, GFP_KERNEL); 423 + args->csa_nrclists); 423 424 if (unlikely(args->csa_rclists == NULL)) 424 425 return htonl(NFS4ERR_RESOURCE); 425 426
+1 -2
fs/nfs/delegation.c
··· 1603 1603 delegation_buckets = roundup_pow_of_two(nfs_delegation_watermark / 16); 1604 1604 server->delegation_hash_mask = delegation_buckets - 1; 1605 1605 server->delegation_hash_table = kmalloc_objs(*server->delegation_hash_table, 1606 - delegation_buckets, 1607 - GFP_KERNEL); 1606 + delegation_buckets); 1608 1607 if (!server->delegation_hash_table) 1609 1608 return -ENOMEM; 1610 1609 for (i = 0; i < delegation_buckets; i++)
+1 -1
fs/nfsd/export.c
··· 480 480 return 0; 481 481 482 482 fsloc->locations = kzalloc_objs(struct nfsd4_fs_location, 483 - fsloc->locations_count, GFP_KERNEL); 483 + fsloc->locations_count); 484 484 if (!fsloc->locations) 485 485 return -ENOMEM; 486 486 for (i=0; i < fsloc->locations_count; i++) {
+2 -2
fs/nfsd/nfs4recover.c
··· 479 479 int i; 480 480 481 481 nn->reclaim_str_hashtbl = kmalloc_objs(struct list_head, 482 - CLIENT_HASH_SIZE, GFP_KERNEL); 482 + CLIENT_HASH_SIZE); 483 483 if (!nn->reclaim_str_hashtbl) 484 484 return -ENOMEM; 485 485 ··· 1356 1356 int i; 1357 1357 1358 1358 nn->reclaim_str_hashtbl = kmalloc_objs(struct list_head, 1359 - CLIENT_HASH_SIZE, GFP_KERNEL); 1359 + CLIENT_HASH_SIZE); 1360 1360 if (!nn->reclaim_str_hashtbl) 1361 1361 return -ENOMEM; 1362 1362
+2 -2
fs/nfsd/nfs4state.c
··· 2358 2358 if (clp->cl_name.data == NULL) 2359 2359 goto err_no_name; 2360 2360 clp->cl_ownerstr_hashtbl = kmalloc_objs(struct list_head, 2361 - OWNER_HASH_SIZE, GFP_KERNEL); 2361 + OWNER_HASH_SIZE); 2362 2362 if (!clp->cl_ownerstr_hashtbl) 2363 2363 goto err_no_hashtbl; 2364 2364 clp->cl_callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0); ··· 8966 8966 if (!nn->unconf_id_hashtbl) 8967 8967 goto err_unconf_id; 8968 8968 nn->sessionid_hashtbl = kmalloc_objs(struct list_head, 8969 - SESSION_HASH_SIZE, GFP_KERNEL); 8969 + SESSION_HASH_SIZE); 8970 8970 if (!nn->sessionid_hashtbl) 8971 8971 goto err_sessionid; 8972 8972
+1 -1
fs/pstore/ram_core.c
··· 230 230 231 231 /* allocate workspace instead of using stack VLA */ 232 232 prz->ecc_info.par = kmalloc_objs(*prz->ecc_info.par, 233 - prz->ecc_info.ecc_size, GFP_KERNEL); 233 + prz->ecc_info.ecc_size); 234 234 if (!prz->ecc_info.par) { 235 235 pr_err("cannot allocate ECC parity workspace\n"); 236 236 return -ENOMEM;
+1 -1
fs/smb/client/smbdirect.c
··· 2806 2806 } 2807 2807 2808 2808 mr->sgt.sgl = kzalloc_objs(struct scatterlist, 2809 - sp->max_frmr_depth, GFP_KERNEL); 2809 + sp->max_frmr_depth); 2810 2810 if (!mr->sgt.sgl) { 2811 2811 ret = -ENOMEM; 2812 2812 log_rdma_mr(ERR, "failed to allocate sgl\n");
+1 -2
fs/squashfs/file.c
··· 104 104 * if a 'large' file is read. 105 105 */ 106 106 msblk->meta_index = kzalloc_objs(*(msblk->meta_index), 107 - SQUASHFS_META_SLOTS, 108 - GFP_KERNEL); 107 + SQUASHFS_META_SLOTS); 109 108 if (msblk->meta_index == NULL) { 110 109 ERROR("Failed to allocate meta_index\n"); 111 110 goto failed;
+1 -1
fs/udf/super.c
··· 1757 1757 data.size_part_descs = PART_DESC_ALLOC_STEP; 1758 1758 data.num_part_descs = 0; 1759 1759 data.part_descs_loc = kzalloc_objs(*data.part_descs_loc, 1760 - data.size_part_descs, GFP_KERNEL); 1760 + data.size_part_descs); 1761 1761 if (!data.part_descs_loc) 1762 1762 return -ENOMEM; 1763 1763
+1 -1
kernel/bpf/syscall.c
··· 6077 6077 } 6078 6078 6079 6079 used_maps_new = kmalloc_objs(used_maps_new[0], 6080 - prog->aux->used_map_cnt + 1, GFP_KERNEL); 6080 + prog->aux->used_map_cnt + 1); 6081 6081 if (!used_maps_new) { 6082 6082 ret = -ENOMEM; 6083 6083 goto out_unlock;
+1 -1
kernel/events/hw_breakpoint.c
··· 186 186 bp_slots_histogram_alloc(struct bp_slots_histogram *hist, enum bp_type_idx type) 187 187 { 188 188 hist->count = kzalloc_objs(*hist->count, 189 - hw_breakpoint_slots_cached(type), GFP_KERNEL); 189 + hw_breakpoint_slots_cached(type)); 190 190 return hist->count; 191 191 } 192 192
+5 -6
kernel/locking/locktorture.c
··· 611 611 ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class); 612 612 613 613 ww_acquire_ctxs = kmalloc_objs(*ww_acquire_ctxs, 614 - cxt.nrealwriters_stress, GFP_KERNEL); 614 + cxt.nrealwriters_stress); 615 615 if (!ww_acquire_ctxs) 616 616 VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory"); 617 617 } ··· 1129 1129 if (call_rcu_chains <= 0) 1130 1130 return 0; 1131 1131 call_rcu_chain_list = kzalloc_objs(*call_rcu_chain_list, 1132 - call_rcu_chains, GFP_KERNEL); 1132 + call_rcu_chains); 1133 1133 if (!call_rcu_chain_list) 1134 1134 return -ENOMEM; 1135 1135 for (i = 0; i < call_rcu_chains; i++) { ··· 1322 1322 1323 1323 if (nreaders_stress) { 1324 1324 cxt.lrsa = kmalloc_objs(*cxt.lrsa, 1325 - cxt.nrealreaders_stress, 1326 - GFP_KERNEL); 1325 + cxt.nrealreaders_stress); 1327 1326 if (cxt.lrsa == NULL) { 1328 1327 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); 1329 1328 firsterr = -ENOMEM; ··· 1370 1371 1371 1372 if (nwriters_stress) { 1372 1373 writer_tasks = kzalloc_objs(writer_tasks[0], 1373 - cxt.nrealwriters_stress, GFP_KERNEL); 1374 + cxt.nrealwriters_stress); 1374 1375 if (writer_tasks == NULL) { 1375 1376 TOROUT_ERRSTRING("writer_tasks: Out of memory"); 1376 1377 firsterr = -ENOMEM; ··· 1384 1385 1385 1386 if (cxt.cur_ops->readlock) { 1386 1387 reader_tasks = kzalloc_objs(reader_tasks[0], 1387 - cxt.nrealreaders_stress, GFP_KERNEL); 1388 + cxt.nrealreaders_stress); 1388 1389 if (reader_tasks == NULL) { 1389 1390 TOROUT_ERRSTRING("reader_tasks: Out of memory"); 1390 1391 kfree(writer_tasks);
+3 -4
kernel/rcu/rcuscale.c
··· 909 909 kfree_by_call_rcu); 910 910 911 911 kfree_reader_tasks = kzalloc_objs(kfree_reader_tasks[0], 912 - kfree_nrealthreads, GFP_KERNEL); 912 + kfree_nrealthreads); 913 913 if (kfree_reader_tasks == NULL) { 914 914 firsterr = -ENOMEM; 915 915 goto unwind; ··· 1156 1156 goto unwind; 1157 1157 } 1158 1158 writer_freelists = kzalloc_objs(writer_freelists[0], 1159 - nrealwriters, GFP_KERNEL); 1159 + nrealwriters); 1160 1160 } 1161 1161 if (!writer_tasks || !writer_durations || !writer_n_durations || !writer_done || 1162 1162 (gp_async && !writer_freelists)) { ··· 1178 1178 init_llist_head(&wflp->ws_lhg); 1179 1179 init_llist_head(&wflp->ws_lhp); 1180 1180 wflp->ws_mblocks = kzalloc_objs(wflp->ws_mblocks[0], 1181 - gp_async_max, 1182 - GFP_KERNEL); 1181 + gp_async_max); 1183 1182 if (!wflp->ws_mblocks) { 1184 1183 firsterr = -ENOMEM; 1185 1184 goto unwind;
+2 -2
kernel/rcu/rcutorture.c
··· 4549 4549 rcu_torture_write_types(); 4550 4550 if (nrealfakewriters > 0) { 4551 4551 fakewriter_tasks = kzalloc_objs(fakewriter_tasks[0], 4552 - nrealfakewriters, GFP_KERNEL); 4552 + nrealfakewriters); 4553 4553 if (fakewriter_tasks == NULL) { 4554 4554 TOROUT_ERRSTRING("out of memory"); 4555 4555 firsterr = -ENOMEM; ··· 4564 4564 } 4565 4565 reader_tasks = kzalloc_objs(reader_tasks[0], nrealreaders); 4566 4566 rcu_torture_reader_mbchk = kzalloc_objs(*rcu_torture_reader_mbchk, 4567 - nrealreaders, GFP_KERNEL); 4567 + nrealreaders); 4568 4568 if (!reader_tasks || !rcu_torture_reader_mbchk) { 4569 4569 TOROUT_ERRSTRING("out of memory"); 4570 4570 firsterr = -ENOMEM;
+1 -1
kernel/rcu/tasks.h
··· 260 260 lim = rcu_task_enqueue_lim; 261 261 262 262 rtp->rtpcp_array = kzalloc_objs(struct rcu_tasks_percpu *, 263 - num_possible_cpus(), GFP_KERNEL); 263 + num_possible_cpus()); 264 264 BUG_ON(!rtp->rtpcp_array); 265 265 266 266 for_each_possible_cpu(cpu) {
+1 -1
kernel/sched/ext_idle.c
··· 665 665 666 666 /* Allocate per-node idle cpumasks */ 667 667 scx_idle_node_masks = kzalloc_objs(*scx_idle_node_masks, 668 - num_possible_nodes(), GFP_KERNEL); 668 + num_possible_nodes()); 669 669 BUG_ON(!scx_idle_node_masks); 670 670 671 671 for_each_node(i) {
+1 -1
kernel/time/timer_migration.c
··· 2002 2002 tmigr_crossnode_level = cpulvl; 2003 2003 2004 2004 tmigr_level_list = kzalloc_objs(struct list_head, 2005 - tmigr_hierarchy_levels, GFP_KERNEL); 2005 + tmigr_hierarchy_levels); 2006 2006 if (!tmigr_level_list) 2007 2007 goto err; 2008 2008
+1 -2
kernel/trace/trace_events_synth.c
··· 999 999 1000 1000 if (n_dynamic_fields) { 1001 1001 event->dynamic_fields = kzalloc_objs(*event->dynamic_fields, 1002 - n_dynamic_fields, 1003 - GFP_KERNEL); 1002 + n_dynamic_fields); 1004 1003 if (!event->dynamic_fields) { 1005 1004 free_synth_event(event); 1006 1005 event = ERR_PTR(-ENOMEM);
+1 -1
kernel/trace/trace_syscalls.c
··· 1338 1338 1339 1339 if (!IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR)) { 1340 1340 syscalls_metadata = kzalloc_objs(*syscalls_metadata, 1341 - NR_syscalls, GFP_KERNEL); 1341 + NR_syscalls); 1342 1342 if (!syscalls_metadata) { 1343 1343 WARN_ON(1); 1344 1344 return;
+1 -1
kernel/user_namespace.c
··· 795 795 796 796 /* Allocate memory for 340 mappings. */ 797 797 forward = kmalloc_objs(struct uid_gid_extent, 798 - UID_GID_MAP_MAX_EXTENTS, GFP_KERNEL); 798 + UID_GID_MAP_MAX_EXTENTS); 799 799 if (!forward) 800 800 return -ENOMEM; 801 801
+2 -2
lib/objagg.c
··· 611 611 int i; 612 612 613 613 objagg_stats = kzalloc_flex(*objagg_stats, stats_info, 614 - objagg->obj_count, GFP_KERNEL); 614 + objagg->obj_count); 615 615 if (!objagg_stats) 616 616 return ERR_PTR(-ENOMEM); 617 617 ··· 1011 1011 int i; 1012 1012 1013 1013 objagg_stats = kzalloc_flex(*objagg_stats, stats_info, 1014 - objagg_hints->node_count, GFP_KERNEL); 1014 + objagg_hints->node_count); 1015 1015 if (!objagg_stats) 1016 1016 return ERR_PTR(-ENOMEM); 1017 1017
+1 -1
mm/damon/stat.c
··· 91 91 damon_for_each_target(t, c) { 92 92 /* there is only one target */ 93 93 region_pointers = kmalloc_objs(*region_pointers, 94 - damon_nr_regions(t), GFP_KERNEL); 94 + damon_nr_regions(t)); 95 95 if (!region_pointers) 96 96 return -ENOMEM; 97 97 damon_for_each_region(r, t) {
+1 -1
mm/damon/tests/sysfs-kunit.h
··· 49 49 kunit_skip(test, "sysfs_targets alloc fail"); 50 50 sysfs_targets->nr = 1; 51 51 sysfs_targets->targets_arr = kmalloc_objs(*sysfs_targets->targets_arr, 52 - 1, GFP_KERNEL); 52 + 1); 53 53 if (!sysfs_targets->targets_arr) { 54 54 kfree(sysfs_targets); 55 55 kunit_skip(test, "targets_arr alloc fail");
+1 -1
net/bridge/br_mdb.c
··· 1134 1134 } 1135 1135 1136 1136 cfg->src_entries = kzalloc_objs(struct br_mdb_src_entry, 1137 - cfg->num_src_entries, GFP_KERNEL); 1137 + cfg->num_src_entries); 1138 1138 if (!cfg->src_entries) 1139 1139 return -ENOMEM; 1140 1140
+2 -4
net/core/devmem.c
··· 242 242 243 243 if (direction == DMA_TO_DEVICE) { 244 244 binding->tx_vec = kvmalloc_objs(struct net_iov *, 245 - dmabuf->size / PAGE_SIZE, 246 - GFP_KERNEL); 245 + dmabuf->size / PAGE_SIZE); 247 246 if (!binding->tx_vec) { 248 247 err = -ENOMEM; 249 248 goto err_unmap; ··· 289 290 } 290 291 291 292 owner->area.niovs = kvmalloc_objs(*owner->area.niovs, 292 - owner->area.num_niovs, 293 - GFP_KERNEL); 293 + owner->area.num_niovs); 294 294 if (!owner->area.niovs) { 295 295 err = -ENOMEM; 296 296 goto err_free_chunks;
+1 -1
net/ipv4/cipso_ipv4.c
··· 169 169 u32 iter; 170 170 171 171 cipso_v4_cache = kzalloc_objs(struct cipso_v4_map_cache_bkt, 172 - CIPSO_V4_CACHE_BUCKETS, GFP_KERNEL); 172 + CIPSO_V4_CACHE_BUCKETS); 173 173 if (!cipso_v4_cache) 174 174 return -ENOMEM; 175 175
+1 -1
net/ipv4/devinet.c
··· 2755 2755 2756 2756 err = -ENOMEM; 2757 2757 net->ipv4.inet_addr_lst = kmalloc_objs(struct hlist_head, 2758 - IN4_ADDR_HSIZE, GFP_KERNEL); 2758 + IN4_ADDR_HSIZE); 2759 2759 if (!net->ipv4.inet_addr_lst) 2760 2760 goto err_alloc_hash; 2761 2761
+1 -1
net/ipv4/nexthop.c
··· 714 714 info->id = nh->id; 715 715 info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS; 716 716 info->nh_grp_hw_stats = kzalloc_flex(*info->nh_grp_hw_stats, stats, 717 - nhg->num_nh, GFP_KERNEL); 717 + nhg->num_nh); 718 718 if (!info->nh_grp_hw_stats) 719 719 return -ENOMEM; 720 720
+1 -2
net/ipv4/udp_tunnel_nic.c
··· 762 762 763 763 for (i = 0; i < n_tables; i++) { 764 764 utn->entries[i] = kzalloc_objs(*utn->entries[i], 765 - info->tables[i].n_entries, 766 - GFP_KERNEL); 765 + info->tables[i].n_entries); 767 766 if (!utn->entries[i]) 768 767 goto err_free_prev_entries; 769 768 }
+1 -1
net/ipv6/addrconf.c
··· 7398 7398 spin_lock_init(&net->ipv6.addrconf_hash_lock); 7399 7399 INIT_DEFERRABLE_WORK(&net->ipv6.addr_chk_work, addrconf_verify_work); 7400 7400 net->ipv6.inet6_addr_lst = kzalloc_objs(struct hlist_head, 7401 - IN6_ADDR_HSIZE, GFP_KERNEL); 7401 + IN6_ADDR_HSIZE); 7402 7402 if (!net->ipv6.inet6_addr_lst) 7403 7403 goto err_alloc_addr; 7404 7404
+1 -1
net/ipv6/calipso.c
··· 134 134 u32 iter; 135 135 136 136 calipso_cache = kzalloc_objs(struct calipso_map_cache_bkt, 137 - CALIPSO_CACHE_BUCKETS, GFP_KERNEL); 137 + CALIPSO_CACHE_BUCKETS); 138 138 if (!calipso_cache) 139 139 return -ENOMEM; 140 140
+2 -2
net/mac80211/cfg.c
··· 3941 3941 if (beacon->mbssid_ies && beacon->mbssid_ies->cnt) { 3942 3942 new_beacon->mbssid_ies = 3943 3943 kzalloc_flex(*new_beacon->mbssid_ies, elem, 3944 - beacon->mbssid_ies->cnt, GFP_KERNEL); 3944 + beacon->mbssid_ies->cnt); 3945 3945 if (!new_beacon->mbssid_ies) { 3946 3946 kfree(new_beacon); 3947 3947 return NULL; ··· 3950 3950 if (beacon->rnr_ies && beacon->rnr_ies->cnt) { 3951 3951 new_beacon->rnr_ies = 3952 3952 kzalloc_flex(*new_beacon->rnr_ies, elem, 3953 - beacon->rnr_ies->cnt, GFP_KERNEL); 3953 + beacon->rnr_ies->cnt); 3954 3954 if (!new_beacon->rnr_ies) { 3955 3955 kfree(new_beacon->mbssid_ies); 3956 3956 kfree(new_beacon);
+1 -1
net/mac80211/main.c
··· 1360 1360 1361 1361 1362 1362 local->int_scan_req = kzalloc_flex(*local->int_scan_req, channels, 1363 - channels, GFP_KERNEL); 1363 + channels); 1364 1364 if (!local->int_scan_req) 1365 1365 return -ENOMEM; 1366 1366
+1 -1
net/netfilter/ipvs/ip_vs_mh.c
··· 294 294 295 295 if (svc->num_dests >= 1) { 296 296 s->dest_setup = kzalloc_objs(struct ip_vs_mh_dest_setup, 297 - svc->num_dests, GFP_KERNEL); 297 + svc->num_dests); 298 298 if (!s->dest_setup) 299 299 return -ENOMEM; 300 300 }
+1 -1
net/netfilter/nf_tables_api.c
··· 4316 4316 size = 0; 4317 4317 if (nla[NFTA_RULE_EXPRESSIONS]) { 4318 4318 expr_info = kvmalloc_objs(struct nft_expr_info, 4319 - NFT_RULE_MAXEXPRS, GFP_KERNEL); 4319 + NFT_RULE_MAXEXPRS); 4320 4320 if (!expr_info) 4321 4321 return -ENOMEM; 4322 4322
+1 -2
net/sched/sch_htb.c
··· 1096 1096 1097 1097 q->num_direct_qdiscs = dev->real_num_tx_queues; 1098 1098 q->direct_qdiscs = kzalloc_objs(*q->direct_qdiscs, 1099 - q->num_direct_qdiscs, 1100 - GFP_KERNEL); 1099 + q->num_direct_qdiscs); 1101 1100 if (!q->direct_qdiscs) 1102 1101 return -ENOMEM; 1103 1102 }
+4 -4
net/smc/smc_wr.c
··· 756 756 if (!link->wr_rx_ibs) 757 757 goto no_mem_wr_tx_ibs; 758 758 link->wr_tx_rdmas = kzalloc_objs(link->wr_tx_rdmas[0], 759 - link->max_send_wr, GFP_KERNEL); 759 + link->max_send_wr); 760 760 if (!link->wr_tx_rdmas) 761 761 goto no_mem_wr_rx_ibs; 762 762 link->wr_tx_rdma_sges = kzalloc_objs(link->wr_tx_rdma_sges[0], 763 - link->max_send_wr, GFP_KERNEL); 763 + link->max_send_wr); 764 764 if (!link->wr_tx_rdma_sges) 765 765 goto no_mem_wr_tx_rdmas; 766 766 link->wr_tx_sges = kzalloc_objs(link->wr_tx_sges[0], link->max_send_wr); ··· 775 775 if (!link->wr_tx_mask) 776 776 goto no_mem_wr_rx_sges; 777 777 link->wr_tx_pends = kzalloc_objs(link->wr_tx_pends[0], 778 - link->max_send_wr, GFP_KERNEL); 778 + link->max_send_wr); 779 779 if (!link->wr_tx_pends) 780 780 goto no_mem_wr_tx_mask; 781 781 link->wr_tx_compl = kzalloc_objs(link->wr_tx_compl[0], 782 - link->max_send_wr, GFP_KERNEL); 782 + link->max_send_wr); 783 783 if (!link->wr_tx_compl) 784 784 goto no_mem_wr_tx_pends; 785 785
+1 -1
net/unix/af_unix.c
··· 3803 3803 goto err_proc; 3804 3804 3805 3805 net->unx.table.buckets = kvmalloc_objs(struct hlist_head, 3806 - UNIX_HASH_SIZE, GFP_KERNEL); 3806 + UNIX_HASH_SIZE); 3807 3807 if (!net->unx.table.buckets) 3808 3808 goto free_locks; 3809 3809
+1 -1
net/wireless/core.c
··· 1001 1001 int idx; 1002 1002 1003 1003 wiphy->radio_cfg = kzalloc_objs(*wiphy->radio_cfg, 1004 - wiphy->n_radio, GFP_KERNEL); 1004 + wiphy->n_radio); 1005 1005 if (!wiphy->radio_cfg) 1006 1006 return -ENOMEM; 1007 1007 /*
+2 -2
net/wireless/nl80211.c
··· 14308 14308 14309 14309 if (n_thresholds) { 14310 14310 cqm_config = kzalloc_flex(*cqm_config, rssi_thresholds, 14311 - n_thresholds, GFP_KERNEL); 14311 + n_thresholds); 14312 14312 if (!cqm_config) 14313 14313 return -ENOMEM; 14314 14314 ··· 15051 15051 return -EINVAL; 15052 15052 15053 15053 new_triggers.patterns = kzalloc_objs(new_triggers.patterns[0], 15054 - n_patterns, GFP_KERNEL); 15054 + n_patterns); 15055 15055 if (!new_triggers.patterns) 15056 15056 return -ENOMEM; 15057 15057
+1 -1
net/xdp/xsk_buff_pool.c
··· 420 420 } 421 421 422 422 pool->dma_pages = kvzalloc_objs(*pool->dma_pages, 423 - dma_map->dma_pages_cnt, GFP_KERNEL); 423 + dma_map->dma_pages_cnt); 424 424 if (!pool->dma_pages) 425 425 return -ENOMEM; 426 426
+2 -3
net/xfrm/xfrm_iptfs.c
··· 2527 2527 /* saved array is for saving 1..N seq nums from wantseq */ 2528 2528 if (xc->reorder_win_size) { 2529 2529 xtfs->w_saved = kzalloc_objs(*xtfs->w_saved, 2530 - xc->reorder_win_size, GFP_KERNEL); 2530 + xc->reorder_win_size); 2531 2531 if (!xtfs->w_saved) { 2532 2532 NL_SET_ERR_MSG(extack, "Cannot alloc reorder window"); 2533 2533 return -ENOMEM; ··· 2659 2659 xtfs->ra_newskb = NULL; 2660 2660 if (xtfs->cfg.reorder_win_size) { 2661 2661 xtfs->w_saved = kzalloc_objs(*xtfs->w_saved, 2662 - xtfs->cfg.reorder_win_size, 2663 - GFP_KERNEL); 2662 + xtfs->cfg.reorder_win_size); 2664 2663 if (!xtfs->w_saved) { 2665 2664 kfree_sensitive(xtfs); 2666 2665 return -ENOMEM;
+1 -2
security/integrity/ima/ima_crypto.c
··· 139 139 ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++; 140 140 141 141 ima_algo_array = kzalloc_objs(*ima_algo_array, 142 - NR_BANKS(ima_tpm_chip) + ima_extra_slots, 143 - GFP_KERNEL); 142 + NR_BANKS(ima_tpm_chip) + ima_extra_slots); 144 143 if (!ima_algo_array) { 145 144 rc = -ENOMEM; 146 145 goto out;
+2 -2
security/selinux/ss/conditional.c
··· 166 166 { 167 167 kfree(p->bool_val_to_struct); 168 168 p->bool_val_to_struct = kmalloc_objs(*p->bool_val_to_struct, 169 - p->p_bools.nprim, GFP_KERNEL); 169 + p->p_bools.nprim); 170 170 if (!p->bool_val_to_struct) 171 171 return -ENOMEM; 172 172 ··· 710 710 int rc; 711 711 712 712 cond_bool_array = kmalloc_objs(*orig->bool_val_to_struct, 713 - orig->p_bools.nprim, GFP_KERNEL); 713 + orig->p_bools.nprim); 714 714 if (!cond_bool_array) 715 715 return -ENOMEM; 716 716
+5 -5
security/selinux/ss/policydb.c
··· 739 739 symtab_hash_eval(p->symtab); 740 740 741 741 p->class_val_to_struct = kzalloc_objs(*p->class_val_to_struct, 742 - p->p_classes.nprim, GFP_KERNEL); 742 + p->p_classes.nprim); 743 743 if (!p->class_val_to_struct) 744 744 return -ENOMEM; 745 745 746 746 p->role_val_to_struct = kzalloc_objs(*p->role_val_to_struct, 747 - p->p_roles.nprim, GFP_KERNEL); 747 + p->p_roles.nprim); 748 748 if (!p->role_val_to_struct) 749 749 return -ENOMEM; 750 750 751 751 p->user_val_to_struct = kzalloc_objs(*p->user_val_to_struct, 752 - p->p_users.nprim, GFP_KERNEL); 752 + p->p_users.nprim); 753 753 if (!p->user_val_to_struct) 754 754 return -ENOMEM; 755 755 756 756 p->type_val_to_struct = kvzalloc_objs(*p->type_val_to_struct, 757 - p->p_types.nprim, GFP_KERNEL); 757 + p->p_types.nprim); 758 758 if (!p->type_val_to_struct) 759 759 return -ENOMEM; 760 760 ··· 2724 2724 2725 2725 rc = -ENOMEM; 2726 2726 p->type_attr_map_array = kvzalloc_objs(*p->type_attr_map_array, 2727 - p->p_types.nprim, GFP_KERNEL); 2727 + p->p_types.nprim); 2728 2728 if (!p->type_attr_map_array) 2729 2729 goto bad; 2730 2730
+1 -1
sound/core/seq/oss/seq_oss_synth.c
··· 214 214 info->nr_voices = rec->nr_voices; 215 215 if (info->nr_voices > 0) { 216 216 info->ch = kzalloc_objs(struct seq_oss_chinfo, 217 - info->nr_voices, GFP_KERNEL); 217 + info->nr_voices); 218 218 if (!info->ch) { 219 219 rec->oper.close(&info->arg); 220 220 module_put(rec->oper.owner);
+2 -3
sound/firewire/amdtp-stream.c
··· 1736 1736 queue_size * 3 / 2); 1737 1737 s->ctx_data.tx.cache.pos = 0; 1738 1738 s->ctx_data.tx.cache.descs = kzalloc_objs(*s->ctx_data.tx.cache.descs, 1739 - s->ctx_data.tx.cache.size, 1740 - GFP_KERNEL); 1739 + s->ctx_data.tx.cache.size); 1741 1740 if (!s->ctx_data.tx.cache.descs) { 1742 1741 err = -ENOMEM; 1743 1742 goto err_context; ··· 1757 1758 }; 1758 1759 1759 1760 s->ctx_data.rx.seq.descs = kzalloc_objs(*s->ctx_data.rx.seq.descs, 1760 - queue_size, GFP_KERNEL); 1761 + queue_size); 1761 1762 if (!s->ctx_data.rx.seq.descs) { 1762 1763 err = -ENOMEM; 1763 1764 goto err_context;
+1 -1
sound/pci/emu10k1/emufx.c
··· 1807 1807 goto __err_gpr; 1808 1808 1809 1809 controls = kzalloc_objs(struct snd_emu10k1_fx8010_control_gpr, 1810 - SND_EMU10K1_GPR_CONTROLS, GFP_KERNEL); 1810 + SND_EMU10K1_GPR_CONTROLS); 1811 1811 if (!controls) 1812 1812 goto __err_ctrls; 1813 1813
+1 -1
sound/soc/codecs/pcm6240.c
··· 1262 1262 config_offset += 4; 1263 1263 1264 1264 bk_da = cfg_info->blk_data = kzalloc_objs(struct pcmdevice_block_data *, 1265 - cfg_info->nblocks, GFP_KERNEL); 1265 + cfg_info->nblocks); 1266 1266 if (!bk_da) { 1267 1267 *status = -ENOMEM; 1268 1268 goto out;
+5 -6
sound/soc/codecs/tas2781-fmwlib.c
··· 218 218 * these firmwares. 219 219 */ 220 220 bk_da = cfg_info->blk_data = kzalloc_objs(struct tasdev_blk_data *, 221 - cfg_info->nblocks, GFP_KERNEL); 221 + cfg_info->nblocks); 222 222 if (!bk_da) { 223 223 *status = -ENOMEM; 224 224 goto out; ··· 805 805 } 806 806 807 807 tas_fmw->programs = kzalloc_objs(struct tasdevice_prog, 808 - tas_fmw->nr_programs, GFP_KERNEL); 808 + tas_fmw->nr_programs); 809 809 if (!tas_fmw->programs) { 810 810 offset = -ENOMEM; 811 811 goto out; ··· 844 844 } 845 845 846 846 tas_fmw->configs = kzalloc_objs(struct tasdevice_config, 847 - tas_fmw->nr_configurations, GFP_KERNEL); 847 + tas_fmw->nr_configurations); 848 848 if (!tas_fmw->configs) { 849 849 offset = -ENOMEM; 850 850 goto out; ··· 1346 1346 goto out; 1347 1347 } 1348 1348 tas_fmw->configs = kzalloc_objs(struct tasdevice_config, 1349 - tas_fmw->nr_configurations, GFP_KERNEL); 1349 + tas_fmw->nr_configurations); 1350 1350 if (!tas_fmw->configs) { 1351 1351 offset = -ENOMEM; 1352 1352 goto out; ··· 2141 2141 } 2142 2142 2143 2143 tas_fmw->calibrations = kzalloc_objs(struct tasdevice_calibration, 2144 - tas_fmw->nr_calibrations, 2145 - GFP_KERNEL); 2144 + tas_fmw->nr_calibrations); 2146 2145 if (!tas_fmw->calibrations) { 2147 2146 offset = -ENOMEM; 2148 2147 goto out;
+1 -2
sound/soc/qcom/qdsp6/topology.c
··· 318 318 struct audioreach_module_priv_data *pdata; 319 319 320 320 pdata = kzalloc_flex(*pdata, data, 321 - le32_to_cpu(mod_array->size), 322 - GFP_KERNEL); 321 + le32_to_cpu(mod_array->size)); 323 322 if (!pdata) 324 323 return ERR_PTR(-ENOMEM); 325 324
+1 -2
sound/soc/soc-dapm.c
··· 3333 3333 3334 3334 if (w->num_kcontrols) { 3335 3335 w->kcontrols = kzalloc_objs(struct snd_kcontrol *, 3336 - w->num_kcontrols, 3337 - GFP_KERNEL); 3336 + w->num_kcontrols); 3338 3337 if (!w->kcontrols) { 3339 3338 snd_soc_dapm_mutex_unlock(card); 3340 3339 return -ENOMEM;
+2 -3
sound/soc/sof/ipc4-pcm.c
··· 477 477 478 478 /* allocate memory for the pipeline data */ 479 479 trigger_list = kzalloc_flex(*trigger_list, pipeline_instance_ids, 480 - pipeline_list->count, GFP_KERNEL); 480 + pipeline_list->count); 481 481 if (!trigger_list) 482 482 return -ENOMEM; 483 483 ··· 932 932 933 933 /* allocate memory for max number of pipeline IDs */ 934 934 pipeline_list->pipelines = kzalloc_objs(*pipeline_list->pipelines, 935 - ipc4_data->max_num_pipelines, 936 - GFP_KERNEL); 935 + ipc4_data->max_num_pipelines); 937 936 if (!pipeline_list->pipelines) { 938 937 sof_ipc4_pcm_free(sdev, spcm); 939 938 return -ENOMEM;
+2 -4
sound/soc/sof/ipc4-topology.c
··· 437 437 438 438 if (available_fmt->num_input_formats) { 439 439 in_format = kzalloc_objs(*in_format, 440 - available_fmt->num_input_formats, 441 - GFP_KERNEL); 440 + available_fmt->num_input_formats); 442 441 if (!in_format) 443 442 return -ENOMEM; 444 443 available_fmt->input_pin_fmts = in_format; ··· 458 459 459 460 if (available_fmt->num_output_formats) { 460 461 out_format = kzalloc_objs(*out_format, 461 - available_fmt->num_output_formats, 462 - GFP_KERNEL); 462 + available_fmt->num_output_formats); 463 463 if (!out_format) { 464 464 ret = -ENOMEM; 465 465 goto err_in;
+1 -1
sound/synth/emux/emux_effect.c
··· 273 273 { 274 274 int i; 275 275 p->effect = kzalloc_objs(struct snd_emux_effect_table, 276 - p->chset.max_channels, GFP_KERNEL); 276 + p->chset.max_channels); 277 277 if (p->effect) { 278 278 for (i = 0; i < p->chset.max_channels; i++) 279 279 p->chset.channels[i].private = p->effect + i;
+1 -2
sound/usb/qcom/qc_audio_offload.c
··· 1433 1433 uadev[card_num].num_intf = 1434 1434 subs->dev->config->desc.bNumInterfaces; 1435 1435 uadev[card_num].info = kzalloc_objs(struct intf_info, 1436 - uadev[card_num].num_intf, 1437 - GFP_KERNEL); 1436 + uadev[card_num].num_intf); 1438 1437 if (!uadev[card_num].info) { 1439 1438 ret = -ENOMEM; 1440 1439 goto unmap_er;
+1 -1
sound/xen/xen_snd_front_evtchnl.c
··· 268 268 269 269 front_info->evt_pairs = 270 270 kzalloc_objs(struct xen_snd_front_evtchnl_pair, 271 - num_streams, GFP_KERNEL); 271 + num_streams); 272 272 if (!front_info->evt_pairs) 273 273 return -ENOMEM; 274 274