Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Convert more 'alloc_obj' cases to default GFP_KERNEL arguments

This converts some of the visually simpler cases that have been split
over multiple lines. I only did the ones that are easy to verify the
resulting diff by having just that final GFP_KERNEL argument on the next
line.

Somebody should probably do a proper coccinelle script for this, but for
me the trivial script actually resulted in an assertion failure in the
middle of the script. I probably had made it a bit _too_ trivial.

So after fighting that far a while I decided to just do some of the
syntactically simpler cases with variations of the previous 'sed'
scripts.

The more syntactically complex multi-line cases would mostly really want
whitespace cleanup anyway.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

+1211 -2422
+1 -2
arch/arm/mach-omap1/mcbsp.c
··· 294 294 { 295 295 int i; 296 296 297 - omap_mcbsp_devices = kzalloc_objs(struct platform_device *, size, 298 - GFP_KERNEL); 297 + omap_mcbsp_devices = kzalloc_objs(struct platform_device *, size); 299 298 if (!omap_mcbsp_devices) { 300 299 printk(KERN_ERR "Could not register McBSP devices\n"); 301 300 return;
+1 -2
arch/arm64/kernel/vdso.c
··· 81 81 vdso_info[abi].vdso_code_start) >> 82 82 PAGE_SHIFT; 83 83 84 - vdso_pagelist = kzalloc_objs(struct page *, vdso_info[abi].vdso_pages, 85 - GFP_KERNEL); 84 + vdso_pagelist = kzalloc_objs(struct page *, vdso_info[abi].vdso_pages); 86 85 if (vdso_pagelist == NULL) 87 86 return -ENOMEM; 88 87
+1 -2
arch/loongarch/kernel/vdso.c
··· 52 52 53 53 vdso_info.size = PAGE_ALIGN(vdso_end - vdso_start); 54 54 vdso_info.code_mapping.pages = 55 - kzalloc_objs(struct page *, vdso_info.size / PAGE_SIZE, 56 - GFP_KERNEL); 55 + kzalloc_objs(struct page *, vdso_info.size / PAGE_SIZE); 57 56 58 57 if (!vdso_info.code_mapping.pages) 59 58 return -ENOMEM;
+1 -2
arch/powerpc/kvm/book3s_pr.c
··· 1738 1738 vcpu->arch.book3s = vcpu_book3s; 1739 1739 1740 1740 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 1741 - vcpu->arch.shadow_vcpu = kzalloc_obj(*vcpu->arch.shadow_vcpu, 1742 - GFP_KERNEL); 1741 + vcpu->arch.shadow_vcpu = kzalloc_obj(*vcpu->arch.shadow_vcpu); 1743 1742 if (!vcpu->arch.shadow_vcpu) 1744 1743 goto free_vcpu3s; 1745 1744 #endif
+2 -4
arch/powerpc/mm/book3s64/mmu_context.c
··· 96 96 { 97 97 int index; 98 98 99 - mm->context.hash_context = kmalloc_obj(struct hash_mm_context, 100 - GFP_KERNEL); 99 + mm->context.hash_context = kmalloc_obj(struct hash_mm_context); 101 100 if (!mm->context.hash_context) 102 101 return -ENOMEM; 103 102 ··· 123 124 #ifdef CONFIG_PPC_SUBPAGE_PROT 124 125 /* inherit subpage prot details if we have one. */ 125 126 if (current->mm->context.hash_context->spt) { 126 - mm->context.hash_context->spt = kmalloc_obj(struct subpage_prot_table, 127 - GFP_KERNEL); 127 + mm->context.hash_context->spt = kmalloc_obj(struct subpage_prot_table); 128 128 if (!mm->context.hash_context->spt) { 129 129 kfree(mm->context.hash_context); 130 130 return -ENOMEM;
+1 -2
arch/powerpc/perf/hv-24x7.c
··· 917 917 goto e_event_attrs; 918 918 } 919 919 920 - event_long_descs = kmalloc_objs(*event_long_descs, event_idx + 1, 921 - GFP_KERNEL); 920 + event_long_descs = kmalloc_objs(*event_long_descs, event_idx + 1); 922 921 if (!event_long_descs) { 923 922 ret = -ENOMEM; 924 923 goto e_event_descs;
+4 -8
arch/powerpc/perf/imc-pmu.c
··· 1527 1527 { 1528 1528 int nid, i, cpu; 1529 1529 1530 - nest_imc_refc = kzalloc_objs(*nest_imc_refc, num_possible_nodes(), 1531 - GFP_KERNEL); 1530 + nest_imc_refc = kzalloc_objs(*nest_imc_refc, num_possible_nodes()); 1532 1531 1533 1532 if (!nest_imc_refc) 1534 1533 return -ENOMEM; ··· 1713 1714 goto err; 1714 1715 1715 1716 nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); 1716 - pmu_ptr->mem_info = kzalloc_objs(struct imc_mem_info, nr_cores, 1717 - GFP_KERNEL); 1717 + pmu_ptr->mem_info = kzalloc_objs(struct imc_mem_info, nr_cores); 1718 1718 1719 1719 if (!pmu_ptr->mem_info) 1720 1720 goto err; 1721 1721 1722 - core_imc_refc = kzalloc_objs(struct imc_pmu_ref, nr_cores, 1723 - GFP_KERNEL); 1722 + core_imc_refc = kzalloc_objs(struct imc_pmu_ref, nr_cores); 1724 1723 1725 1724 if (!core_imc_refc) { 1726 1725 kfree(pmu_ptr->mem_info); ··· 1751 1754 return -ENOMEM; 1752 1755 1753 1756 nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); 1754 - trace_imc_refc = kzalloc_objs(struct imc_pmu_ref, nr_cores, 1755 - GFP_KERNEL); 1757 + trace_imc_refc = kzalloc_objs(struct imc_pmu_ref, nr_cores); 1756 1758 if (!trace_imc_refc) 1757 1759 return -ENOMEM; 1758 1760
+1 -2
arch/powerpc/platforms/powernv/idle.c
··· 1336 1336 nr_idle_states = of_property_count_u32_elems(np, 1337 1337 "ibm,cpu-idle-state-flags"); 1338 1338 1339 - pnv_idle_states = kzalloc_objs(*pnv_idle_states, nr_idle_states, 1340 - GFP_KERNEL); 1339 + pnv_idle_states = kzalloc_objs(*pnv_idle_states, nr_idle_states); 1341 1340 temp_u32 = kcalloc(nr_idle_states, sizeof(u32), GFP_KERNEL); 1342 1341 temp_u64 = kcalloc(nr_idle_states, sizeof(u64), GFP_KERNEL); 1343 1342 temp_string = kcalloc(nr_idle_states, sizeof(char *), GFP_KERNEL);
+1 -2
arch/powerpc/platforms/powernv/memtrace.c
··· 133 133 u32 nid; 134 134 u64 m; 135 135 136 - memtrace_array = kzalloc_objs(struct memtrace_entry, num_online_nodes(), 137 - GFP_KERNEL); 136 + memtrace_array = kzalloc_objs(struct memtrace_entry, num_online_nodes()); 138 137 if (!memtrace_array) { 139 138 pr_err("Failed to allocate memtrace_array\n"); 140 139 return -EINVAL;
+1 -2
arch/powerpc/platforms/powernv/opal-imc.c
··· 108 108 nr_chips)) 109 109 goto error; 110 110 111 - pmu_ptr->mem_info = kzalloc_objs(*pmu_ptr->mem_info, nr_chips + 1, 112 - GFP_KERNEL); 111 + pmu_ptr->mem_info = kzalloc_objs(*pmu_ptr->mem_info, nr_chips + 1); 113 112 if (!pmu_ptr->mem_info) 114 113 goto error; 115 114
+2 -4
arch/powerpc/platforms/powernv/opal-powercap.c
··· 181 181 has_cur = true; 182 182 } 183 183 184 - pcaps[i].pattrs = kzalloc_objs(struct powercap_attr, j, 185 - GFP_KERNEL); 184 + pcaps[i].pattrs = kzalloc_objs(struct powercap_attr, j); 186 185 if (!pcaps[i].pattrs) 187 186 goto out_pcaps_pattrs; 188 187 189 - pcaps[i].pg.attrs = kzalloc_objs(struct attribute *, j + 1, 190 - GFP_KERNEL); 188 + pcaps[i].pg.attrs = kzalloc_objs(struct attribute *, j + 1); 191 189 if (!pcaps[i].pg.attrs) { 192 190 kfree(pcaps[i].pattrs); 193 191 goto out_pcaps_pattrs;
+1 -2
arch/powerpc/platforms/powernv/opal-psr.c
··· 132 132 return; 133 133 } 134 134 135 - psr_attrs = kzalloc_objs(*psr_attrs, of_get_child_count(psr), 136 - GFP_KERNEL); 135 + psr_attrs = kzalloc_objs(*psr_attrs, of_get_child_count(psr)); 137 136 if (!psr_attrs) 138 137 goto out_put_psr; 139 138
+2 -4
arch/powerpc/platforms/powernv/opal-sensor-groups.c
··· 190 190 if (!nr_attrs) 191 191 continue; 192 192 193 - sgs[i].sgattrs = kzalloc_objs(*sgs[i].sgattrs, nr_attrs, 194 - GFP_KERNEL); 193 + sgs[i].sgattrs = kzalloc_objs(*sgs[i].sgattrs, nr_attrs); 195 194 if (!sgs[i].sgattrs) 196 195 goto out_sgs_sgattrs; 197 196 198 - sgs[i].sg.attrs = kzalloc_objs(*sgs[i].sg.attrs, nr_attrs + 1, 199 - GFP_KERNEL); 197 + sgs[i].sg.attrs = kzalloc_objs(*sgs[i].sg.attrs, nr_attrs + 1); 200 198 201 199 if (!sgs[i].sg.attrs) { 202 200 kfree(sgs[i].sgattrs);
+1 -2
arch/powerpc/sysdev/mpic.c
··· 1639 1639 1640 1640 #ifdef CONFIG_PM 1641 1641 /* allocate memory to save mpic state */ 1642 - mpic->save_data = kmalloc_objs(*mpic->save_data, mpic->num_sources, 1643 - GFP_KERNEL); 1642 + mpic->save_data = kmalloc_objs(*mpic->save_data, mpic->num_sources); 1644 1643 BUG_ON(mpic->save_data == NULL); 1645 1644 #endif 1646 1645
+1 -2
arch/powerpc/sysdev/mpic_msgr.c
··· 188 188 dev_info(&dev->dev, "Found %d message registers\n", 189 189 mpic_msgr_count); 190 190 191 - mpic_msgrs = kzalloc_objs(*mpic_msgrs, mpic_msgr_count, 192 - GFP_KERNEL); 191 + mpic_msgrs = kzalloc_objs(*mpic_msgrs, mpic_msgr_count); 193 192 if (!mpic_msgrs) { 194 193 dev_err(&dev->dev, 195 194 "No memory for message register blocks\n");
+1 -2
arch/riscv/kernel/vdso.c
··· 55 55 vdso_info->vdso_code_start) >> 56 56 PAGE_SHIFT; 57 57 58 - vdso_pagelist = kzalloc_objs(struct page *, vdso_info->vdso_pages, 59 - GFP_KERNEL); 58 + vdso_pagelist = kzalloc_objs(struct page *, vdso_info->vdso_pages); 60 59 if (vdso_pagelist == NULL) 61 60 panic("vDSO kcalloc failed!\n"); 62 61
+1 -2
arch/s390/kvm/pci.c
··· 126 126 return -EPERM; 127 127 128 128 mutex_lock(&aift->aift_lock); 129 - aift->kzdev = kzalloc_objs(struct kvm_zdev *, ZPCI_NR_DEVICES, 130 - GFP_KERNEL); 129 + aift->kzdev = kzalloc_objs(struct kvm_zdev *, ZPCI_NR_DEVICES); 131 130 if (!aift->kzdev) { 132 131 rc = -ENOMEM; 133 132 goto unlock;
+1 -2
arch/s390/pci/pci.c
··· 1073 1073 if (!zdev_fmb_cache) 1074 1074 goto error_fmb; 1075 1075 1076 - zpci_iomap_start = kzalloc_objs(*zpci_iomap_start, ZPCI_IOMAP_ENTRIES, 1077 - GFP_KERNEL); 1076 + zpci_iomap_start = kzalloc_objs(*zpci_iomap_start, ZPCI_IOMAP_ENTRIES); 1078 1077 if (!zpci_iomap_start) 1079 1078 goto error_iomap; 1080 1079
+1 -2
arch/sh/drivers/pci/pcie-sh7786.c
··· 558 558 if (unlikely(nr_ports == 0)) 559 559 return -ENODEV; 560 560 561 - sh7786_pcie_ports = kzalloc_objs(struct sh7786_pcie_port, nr_ports, 562 - GFP_KERNEL); 561 + sh7786_pcie_ports = kzalloc_objs(struct sh7786_pcie_port, nr_ports); 563 562 if (unlikely(!sh7786_pcie_ports)) 564 563 return -ENOMEM; 565 564
+1 -2
arch/sparc/kernel/smp_64.c
··· 297 297 unsigned long hv_err; 298 298 int i; 299 299 300 - hdesc = kzalloc_flex(*hdesc, maps, num_kernel_image_mappings, 301 - GFP_KERNEL); 300 + hdesc = kzalloc_flex(*hdesc, maps, num_kernel_image_mappings); 302 301 if (!hdesc) { 303 302 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " 304 303 "hvtramp_descr.\n");
+2 -4
arch/sparc/kernel/sys_sparc_64.c
··· 647 647 } 648 648 if (!current_thread_info()->utraps) { 649 649 current_thread_info()->utraps = 650 - kzalloc_objs(long, UT_TRAP_INSTRUCTION_31 + 1, 651 - GFP_KERNEL); 650 + kzalloc_objs(long, UT_TRAP_INSTRUCTION_31 + 1); 652 651 if (!current_thread_info()->utraps) 653 652 return -ENOMEM; 654 653 current_thread_info()->utraps[0] = 1; ··· 657 658 unsigned long *p = current_thread_info()->utraps; 658 659 659 660 current_thread_info()->utraps = 660 - kmalloc_objs(long, UT_TRAP_INSTRUCTION_31 + 1, 661 - GFP_KERNEL); 661 + kmalloc_objs(long, UT_TRAP_INSTRUCTION_31 + 1); 662 662 if (!current_thread_info()->utraps) { 663 663 current_thread_info()->utraps = p; 664 664 return -ENOMEM;
+2 -4
arch/um/drivers/vector_kern.c
··· 544 544 result->max_iov_frags = num_extra_frags; 545 545 for (i = 0; i < max_size; i++) { 546 546 if (vp->header_size > 0) 547 - iov = kmalloc_objs(struct iovec, 3 + num_extra_frags, 548 - GFP_KERNEL); 547 + iov = kmalloc_objs(struct iovec, 3 + num_extra_frags); 549 548 else 550 - iov = kmalloc_objs(struct iovec, 2 + num_extra_frags, 551 - GFP_KERNEL); 549 + iov = kmalloc_objs(struct iovec, 2 + num_extra_frags); 552 550 if (iov == NULL) 553 551 goto out_fail; 554 552 mmsg_vector->msg_hdr.msg_iov = iov;
+1 -2
arch/x86/events/intel/uncore_snbep.c
··· 3753 3753 goto err; 3754 3754 3755 3755 for (die = 0; die < uncore_max_dies(); die++) { 3756 - topology[die] = kzalloc_objs(**topology, type->num_boxes, 3757 - GFP_KERNEL); 3756 + topology[die] = kzalloc_objs(**topology, type->num_boxes); 3758 3757 if (!topology[die]) 3759 3758 goto clear; 3760 3759 for (idx = 0; idx < type->num_boxes; idx++) {
+1 -2
arch/x86/hyperv/hv_init.c
··· 467 467 if (hv_isolation_type_tdx()) 468 468 hv_vp_assist_page = NULL; 469 469 else 470 - hv_vp_assist_page = kzalloc_objs(*hv_vp_assist_page, nr_cpu_ids, 471 - GFP_KERNEL); 470 + hv_vp_assist_page = kzalloc_objs(*hv_vp_assist_page, nr_cpu_ids); 472 471 if (!hv_vp_assist_page) { 473 472 ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; 474 473
+1 -2
arch/x86/kernel/amd_nb.c
··· 68 68 69 69 amd_northbridges.num = amd_num_nodes(); 70 70 71 - nb = kzalloc_objs(struct amd_northbridge, amd_northbridges.num, 72 - GFP_KERNEL); 71 + nb = kzalloc_objs(struct amd_northbridge, amd_northbridges.num); 73 72 if (!nb) 74 73 return -ENOMEM; 75 74
+1 -2
arch/x86/kernel/cpu/sgx/main.c
··· 798 798 int nid; 799 799 int i; 800 800 801 - sgx_numa_nodes = kmalloc_objs(*sgx_numa_nodes, num_possible_nodes(), 802 - GFP_KERNEL); 801 + sgx_numa_nodes = kmalloc_objs(*sgx_numa_nodes, num_possible_nodes()); 803 802 if (!sgx_numa_nodes) 804 803 return false; 805 804
+1 -2
arch/x86/kvm/cpuid.c
··· 1991 1991 if (sanity_check_entries(entries, cpuid->nent, type)) 1992 1992 return -EINVAL; 1993 1993 1994 - array.entries = kvzalloc_objs(struct kvm_cpuid_entry2, cpuid->nent, 1995 - GFP_KERNEL); 1994 + array.entries = kvzalloc_objs(struct kvm_cpuid_entry2, cpuid->nent); 1996 1995 if (!array.entries) 1997 1996 return -ENOMEM; 1998 1997
+1 -2
arch/x86/kvm/vmx/tdx.c
··· 2218 2218 if (nr_user_entries < td_conf->num_cpuid_config) 2219 2219 return -E2BIG; 2220 2220 2221 - caps = kzalloc_flex(*caps, cpuid.entries, td_conf->num_cpuid_config, 2222 - GFP_KERNEL); 2221 + caps = kzalloc_flex(*caps, cpuid.entries, td_conf->num_cpuid_config); 2223 2222 if (!caps) 2224 2223 return -ENOMEM; 2225 2224
+1 -2
block/blk-crypto-fallback.c
··· 546 546 goto out; 547 547 548 548 /* Dynamic allocation is needed because of lockdep_register_key(). */ 549 - blk_crypto_fallback_profile = kzalloc_obj(*blk_crypto_fallback_profile, 550 - GFP_KERNEL); 549 + blk_crypto_fallback_profile = kzalloc_obj(*blk_crypto_fallback_profile); 551 550 if (!blk_crypto_fallback_profile) { 552 551 err = -ENOMEM; 553 552 goto fail_free_bioset;
+1 -2
block/blk-crypto-profile.c
··· 120 120 121 121 profile->log_slot_ht_size = ilog2(slot_hashtable_size); 122 122 profile->slot_hashtable = 123 - kvmalloc_objs(profile->slot_hashtable[0], slot_hashtable_size, 124 - GFP_KERNEL); 123 + kvmalloc_objs(profile->slot_hashtable[0], slot_hashtable_size); 125 124 if (!profile->slot_hashtable) 126 125 goto err_destroy; 127 126 for (i = 0; i < slot_hashtable_size; i++)
+1 -2
drivers/accel/amdxdna/amdxdna_gem.c
··· 210 210 return -ENOMEM; 211 211 212 212 nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT; 213 - mapp->range.hmm_pfns = kvzalloc_objs(*mapp->range.hmm_pfns, nr_pages, 214 - GFP_KERNEL); 213 + mapp->range.hmm_pfns = kvzalloc_objs(*mapp->range.hmm_pfns, nr_pages); 215 214 if (!mapp->range.hmm_pfns) { 216 215 ret = -ENOMEM; 217 216 goto free_map;
+1 -2
drivers/accel/ethosu/ethosu_drv.c
··· 144 144 if (!try_module_get(THIS_MODULE)) 145 145 return -EINVAL; 146 146 147 - struct ethosu_file_priv __free(kfree) *priv = kzalloc_obj(*priv, 148 - GFP_KERNEL); 147 + struct ethosu_file_priv __free(kfree) *priv = kzalloc_obj(*priv); 149 148 if (!priv) { 150 149 ret = -ENOMEM; 151 150 goto err_put_mod;
+1 -2
drivers/accel/ethosu/ethosu_gem.c
··· 352 352 struct ethosu_gem_object *bo, 353 353 u32 size) 354 354 { 355 - struct ethosu_validated_cmdstream_info __free(kfree) *info = kzalloc_obj(*info, 356 - GFP_KERNEL); 355 + struct ethosu_validated_cmdstream_info __free(kfree) *info = kzalloc_obj(*info); 357 356 struct ethosu_device *edev = to_ethosu_device(ddev); 358 357 u32 *bocmds = bo->base.vaddr; 359 358 struct cmd_state st;
+1 -2
drivers/accel/habanalabs/common/command_submission.c
··· 1422 1422 1423 1423 *cs_chunk_array = kmalloc_objs(**cs_chunk_array, num_chunks, GFP_ATOMIC); 1424 1424 if (!*cs_chunk_array) 1425 - *cs_chunk_array = kmalloc_objs(**cs_chunk_array, num_chunks, 1426 - GFP_KERNEL); 1425 + *cs_chunk_array = kmalloc_objs(**cs_chunk_array, num_chunks); 1427 1426 if (!*cs_chunk_array) { 1428 1427 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 1429 1428 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
+1 -2
drivers/accel/habanalabs/common/debugfs.c
··· 2052 2052 int count = ARRAY_SIZE(hl_debugfs_list); 2053 2053 2054 2054 dev_entry->hdev = hdev; 2055 - dev_entry->entry_arr = kmalloc_objs(struct hl_debugfs_entry, count, 2056 - GFP_KERNEL); 2055 + dev_entry->entry_arr = kmalloc_objs(struct hl_debugfs_entry, count); 2057 2056 if (!dev_entry->entry_arr) 2058 2057 return -ENOMEM; 2059 2058
+1 -2
drivers/accel/habanalabs/common/hw_queue.c
··· 837 837 838 838 q->kernel_address = p; 839 839 840 - q->shadow_queue = kmalloc_objs(struct hl_cs_job *, HL_QUEUE_LENGTH, 841 - GFP_KERNEL); 840 + q->shadow_queue = kmalloc_objs(struct hl_cs_job *, HL_QUEUE_LENGTH); 842 841 if (!q->shadow_queue) { 843 842 dev_err(hdev->dev, 844 843 "Failed to allocate shadow queue for H/W queue %d\n",
+1 -2
drivers/accel/habanalabs/common/mmu/mmu.c
··· 843 843 return -ENOMEM; 844 844 } 845 845 846 - hr_priv->mmu_asid_hop0 = kvzalloc_objs(struct pgt_info, prop->max_asid, 847 - GFP_KERNEL); 846 + hr_priv->mmu_asid_hop0 = kvzalloc_objs(struct pgt_info, prop->max_asid); 848 847 if (ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0)) { 849 848 dev_err(hdev->dev, "Failed to allocate hr-mmu hop0 table\n"); 850 849 rc = -ENOMEM;
+4 -8
drivers/accel/habanalabs/common/security.c
··· 312 312 int i, j; 313 313 struct hl_block_glbl_sec *glbl_sec; 314 314 315 - glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size, 316 - GFP_KERNEL); 315 + glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size); 317 316 if (!glbl_sec) 318 317 return -ENOMEM; 319 318 ··· 391 392 int i, j, rc = 0; 392 393 struct hl_block_glbl_sec *glbl_sec; 393 394 394 - glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size, 395 - GFP_KERNEL); 395 + glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size); 396 396 if (!glbl_sec) 397 397 return -ENOMEM; 398 398 ··· 472 474 int i, rc = 0; 473 475 struct hl_block_glbl_sec *glbl_sec; 474 476 475 - glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size, 476 - GFP_KERNEL); 477 + glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size); 477 478 if (!glbl_sec) 478 479 return -ENOMEM; 479 480 ··· 518 521 int i; 519 522 struct hl_block_glbl_sec *glbl_sec; 520 523 521 - glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size, 522 - GFP_KERNEL); 524 + glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size); 523 525 if (!glbl_sec) 524 526 return -ENOMEM; 525 527
+1 -2
drivers/accel/habanalabs/gaudi2/gaudi2_security.c
··· 2620 2620 2621 2621 block_array_size = ARRAY_SIZE(gaudi2_pb_dcr0_tpc0); 2622 2622 2623 - glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, block_array_size, 2624 - GFP_KERNEL); 2623 + glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, block_array_size); 2625 2624 if (!glbl_sec) 2626 2625 return -ENOMEM; 2627 2626
+1 -2
drivers/accel/qaic/qaic_ssr.c
··· 457 457 } 458 458 459 459 /* Buffer used to send MEMORY READ request to device via MHI */ 460 - dump_info->read_buf_req = kzalloc_obj(*dump_info->read_buf_req, 461 - GFP_KERNEL); 460 + dump_info->read_buf_req = kzalloc_obj(*dump_info->read_buf_req); 462 461 if (!dump_info->read_buf_req) { 463 462 ret = -ENOMEM; 464 463 goto free_dump_info;
+1 -2
drivers/acpi/apei/einj-core.c
··· 1021 1021 { 1022 1022 char name[32]; 1023 1023 1024 - syndrome_data = kzalloc_objs(syndrome_data[0], max_nr_components, 1025 - GFP_KERNEL); 1024 + syndrome_data = kzalloc_objs(syndrome_data[0], max_nr_components); 1026 1025 if (!syndrome_data) 1027 1026 return false; 1028 1027
+1 -2
drivers/acpi/cppc_acpi.c
··· 636 636 if (pcc_data[pcc_ss_id]) { 637 637 pcc_data[pcc_ss_id]->refcount++; 638 638 } else { 639 - pcc_data[pcc_ss_id] = kzalloc_obj(struct cppc_pcc_data, 640 - GFP_KERNEL); 639 + pcc_data[pcc_ss_id] = kzalloc_obj(struct cppc_pcc_data); 641 640 if (!pcc_data[pcc_ss_id]) 642 641 return -ENOMEM; 643 642 pcc_data[pcc_ss_id]->refcount++;
+1 -2
drivers/acpi/mipi-disco-img.c
··· 91 91 return AE_OK; 92 92 } 93 93 94 - conn = kmalloc_flex(*conn, remote_name, csi2_res_src_length + 1, 95 - GFP_KERNEL); 94 + conn = kmalloc_flex(*conn, remote_name, csi2_res_src_length + 1); 96 95 if (!conn) 97 96 return AE_OK; 98 97
+1 -2
drivers/acpi/platform_profile.c
··· 560 560 !ops->profile_set || !ops->probe)) 561 561 return ERR_PTR(-EINVAL); 562 562 563 - struct platform_profile_handler *pprof __free(kfree) = kzalloc_obj(*pprof, 564 - GFP_KERNEL); 563 + struct platform_profile_handler *pprof __free(kfree) = kzalloc_obj(*pprof); 565 564 if (!pprof) 566 565 return ERR_PTR(-ENOMEM); 567 566
+1 -2
drivers/acpi/processor_perflib.c
··· 341 341 342 342 pr->performance->state_count = pss->package.count; 343 343 pr->performance->states = 344 - kmalloc_objs(struct acpi_processor_px, pss->package.count, 345 - GFP_KERNEL); 344 + kmalloc_objs(struct acpi_processor_px, pss->package.count); 346 345 if (!pr->performance->states) { 347 346 result = -ENOMEM; 348 347 goto end;
+1 -2
drivers/acpi/processor_throttling.c
··· 512 512 513 513 pr->throttling.state_count = tss->package.count; 514 514 pr->throttling.states_tss = 515 - kmalloc_objs(struct acpi_processor_tx_tss, tss->package.count, 516 - GFP_KERNEL); 515 + kmalloc_objs(struct acpi_processor_tx_tss, tss->package.count); 517 516 if (!pr->throttling.states_tss) { 518 517 result = -ENOMEM; 519 518 goto end;
+1 -2
drivers/acpi/riscv/irq.c
··· 384 384 riscv_acpi_irq_get_dep(handle, i, &gsi_handle); 385 385 i++) { 386 386 dep_devices.count = 1; 387 - dep_devices.handles = kzalloc_objs(*dep_devices.handles, 1, 388 - GFP_KERNEL); 387 + dep_devices.handles = kzalloc_objs(*dep_devices.handles, 1); 389 388 if (!dep_devices.handles) { 390 389 acpi_handle_err(handle, "failed to allocate memory\n"); 391 390 continue;
+1 -2
drivers/acpi/scan.c
··· 757 757 if (result) 758 758 goto err_unlock; 759 759 } else { 760 - acpi_device_bus_id = kzalloc_obj(*acpi_device_bus_id, 761 - GFP_KERNEL); 760 + acpi_device_bus_id = kzalloc_obj(*acpi_device_bus_id); 762 761 if (!acpi_device_bus_id) { 763 762 result = -ENOMEM; 764 763 goto err_unlock;
+1 -2
drivers/ata/libahci_platform.c
··· 594 594 * We cannot use devm_ here, since ahci_platform_put_resources() uses 595 595 * target_pwrs after devm_ have freed memory 596 596 */ 597 - hpriv->target_pwrs = kzalloc_objs(*hpriv->target_pwrs, hpriv->nports, 598 - GFP_KERNEL); 597 + hpriv->target_pwrs = kzalloc_objs(*hpriv->target_pwrs, hpriv->nports); 599 598 if (!hpriv->target_pwrs) { 600 599 rc = -ENOMEM; 601 600 goto err_out;
+1 -2
drivers/atm/he.c
··· 786 786 } 787 787 788 788 /* rbpl_virt 64-bit pointers */ 789 - he_dev->rbpl_virt = kmalloc_objs(*he_dev->rbpl_virt, RBPL_TABLE_SIZE, 790 - GFP_KERNEL); 789 + he_dev->rbpl_virt = kmalloc_objs(*he_dev->rbpl_virt, RBPL_TABLE_SIZE); 791 790 if (!he_dev->rbpl_virt) { 792 791 hprintk("unable to allocate rbpl virt table\n"); 793 792 goto out_free_rbpl_table;
+4 -8
drivers/atm/iphase.c
··· 1978 1978 buf_desc_ptr++; 1979 1979 tx_pkt_start += iadev->tx_buf_sz; 1980 1980 } 1981 - iadev->tx_buf = kmalloc_objs(*iadev->tx_buf, iadev->num_tx_desc, 1982 - GFP_KERNEL); 1981 + iadev->tx_buf = kmalloc_objs(*iadev->tx_buf, iadev->num_tx_desc); 1983 1982 if (!iadev->tx_buf) { 1984 1983 printk(KERN_ERR DEV_LABEL " couldn't get mem\n"); 1985 1984 goto err_free_dle; ··· 1998 1999 sizeof(*cpcs), 1999 2000 DMA_TO_DEVICE); 2000 2001 } 2001 - iadev->desc_tbl = kmalloc_objs(*iadev->desc_tbl, iadev->num_tx_desc, 2002 - GFP_KERNEL); 2002 + iadev->desc_tbl = kmalloc_objs(*iadev->desc_tbl, iadev->num_tx_desc); 2003 2003 if (!iadev->desc_tbl) { 2004 2004 printk(KERN_ERR DEV_LABEL " couldn't get mem\n"); 2005 2005 goto err_free_all_tx_bufs; ··· 2126 2128 memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4); 2127 2129 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR; 2128 2130 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR; 2129 - iadev->testTable = kmalloc_objs(*iadev->testTable, iadev->num_vc, 2130 - GFP_KERNEL); 2131 + iadev->testTable = kmalloc_objs(*iadev->testTable, iadev->num_vc); 2131 2132 if (!iadev->testTable) { 2132 2133 printk("Get freepage failed\n"); 2133 2134 goto err_free_desc_tbl; ··· 2135 2138 { 2136 2139 memset((caddr_t)vc, 0, sizeof(*vc)); 2137 2140 memset((caddr_t)evc, 0, sizeof(*evc)); 2138 - iadev->testTable[i] = kmalloc_obj(struct testTable_t, 2139 - GFP_KERNEL); 2141 + iadev->testTable[i] = kmalloc_obj(struct testTable_t); 2140 2142 if (!iadev->testTable[i]) 2141 2143 goto err_free_test_tables; 2142 2144 iadev->testTable[i]->lastTime = 0;
+1 -2
drivers/base/physical_location.c
··· 21 21 if (!acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld)) 22 22 return false; 23 23 24 - dev->physical_location = kzalloc_obj(*dev->physical_location, 25 - GFP_KERNEL); 24 + dev->physical_location = kzalloc_obj(*dev->physical_location); 26 25 if (!dev->physical_location) { 27 26 ACPI_FREE(pld); 28 27 return false;
+1 -2
drivers/block/drbd/drbd_receiver.c
··· 3932 3932 if (my_usize != p_usize) { 3933 3933 struct disk_conf *old_disk_conf, *new_disk_conf = NULL; 3934 3934 3935 - new_disk_conf = kzalloc_obj(struct disk_conf, 3936 - GFP_KERNEL); 3935 + new_disk_conf = kzalloc_obj(struct disk_conf); 3937 3936 if (!new_disk_conf) { 3938 3937 put_ldev(device); 3939 3938 return -ENOMEM;
+1 -2
drivers/block/ps3vram.c
··· 401 401 402 402 priv->cache.page_count = CACHE_PAGE_COUNT; 403 403 priv->cache.page_size = CACHE_PAGE_SIZE; 404 - priv->cache.tags = kzalloc_objs(struct ps3vram_tag, CACHE_PAGE_COUNT, 405 - GFP_KERNEL); 404 + priv->cache.tags = kzalloc_objs(struct ps3vram_tag, CACHE_PAGE_COUNT); 406 405 if (!priv->cache.tags) 407 406 return -ENOMEM; 408 407
+1 -2
drivers/block/xen-blkback/blkback.c
··· 846 846 * We are using persistent grants, the grant is 847 847 * not mapped but we might have room for it. 848 848 */ 849 - persistent_gnt = kmalloc_obj(struct persistent_gnt, 850 - GFP_KERNEL); 849 + persistent_gnt = kmalloc_obj(struct persistent_gnt); 851 850 if (!persistent_gnt) { 852 851 /* 853 852 * If we don't have enough memory to
+3 -6
drivers/block/xen-blkback/xenbus.c
··· 131 131 { 132 132 unsigned int r; 133 133 134 - blkif->rings = kzalloc_objs(struct xen_blkif_ring, blkif->nr_rings, 135 - GFP_KERNEL); 134 + blkif->rings = kzalloc_objs(struct xen_blkif_ring, blkif->nr_rings); 136 135 if (!blkif->rings) 137 136 return -ENOMEM; 138 137 ··· 1013 1014 goto fail; 1014 1015 list_add_tail(&req->free_list, &ring->pending_free); 1015 1016 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) { 1016 - req->segments[j] = kzalloc_obj(*req->segments[0], 1017 - GFP_KERNEL); 1017 + req->segments[j] = kzalloc_obj(*req->segments[0]); 1018 1018 if (!req->segments[j]) 1019 1019 goto fail; 1020 1020 } 1021 1021 for (j = 0; j < MAX_INDIRECT_PAGES; j++) { 1022 - req->indirect_pages[j] = kzalloc_obj(*req->indirect_pages[0], 1023 - GFP_KERNEL); 1022 + req->indirect_pages[j] = kzalloc_obj(*req->indirect_pages[0]); 1024 1023 if (!req->indirect_pages[j]) 1025 1024 goto fail; 1026 1025 }
+1 -2
drivers/block/xen-blkfront.c
··· 2207 2207 2208 2208 for (i = 0; i < BLK_RING_SIZE(info); i++) { 2209 2209 rinfo->shadow[i].grants_used = 2210 - kvzalloc_objs(rinfo->shadow[i].grants_used[0], grants, 2211 - GFP_KERNEL); 2210 + kvzalloc_objs(rinfo->shadow[i].grants_used[0], grants); 2212 2211 rinfo->shadow[i].sg = kvzalloc_objs(rinfo->shadow[i].sg[0], 2213 2212 psegs, GFP_KERNEL); 2214 2213 if (info->max_indirect_segments)
+1 -2
drivers/block/z2ram.c
··· 187 187 (unsigned long)z_remap_nocache_nonser(paddr, size); 188 188 #endif 189 189 z2ram_map = 190 - kmalloc_objs(z2ram_map[0], size / Z2RAM_CHUNKSIZE, 191 - GFP_KERNEL); 190 + kmalloc_objs(z2ram_map[0], size / Z2RAM_CHUNKSIZE); 192 191 if (z2ram_map == NULL) { 193 192 printk(KERN_ERR DEVICE_NAME 194 193 ": cannot get mem for z2ram_map\n");
+1 -2
drivers/bus/mhi/ep/main.c
··· 1459 1459 if (ret) 1460 1460 return ret; 1461 1461 1462 - mhi_cntrl->mhi_cmd = kzalloc_objs(*mhi_cntrl->mhi_cmd, NR_OF_CMD_RINGS, 1463 - GFP_KERNEL); 1462 + mhi_cntrl->mhi_cmd = kzalloc_objs(*mhi_cntrl->mhi_cmd, NR_OF_CMD_RINGS); 1464 1463 if (!mhi_cntrl->mhi_cmd) { 1465 1464 ret = -ENOMEM; 1466 1465 goto err_free_ch;
+1 -2
drivers/bus/mhi/host/boot.c
··· 380 380 return -ENOMEM; 381 381 382 382 /* Allocate memory for entries */ 383 - img_info->mhi_buf = kzalloc_objs(*img_info->mhi_buf, segments, 384 - GFP_KERNEL); 383 + img_info->mhi_buf = kzalloc_objs(*img_info->mhi_buf, segments); 385 384 if (!img_info->mhi_buf) 386 385 goto error_alloc_mhi_buf; 387 386
+2 -4
drivers/bus/mhi/host/init.c
··· 699 699 700 700 num = config->num_events; 701 701 mhi_cntrl->total_ev_rings = num; 702 - mhi_cntrl->mhi_event = kzalloc_objs(*mhi_cntrl->mhi_event, num, 703 - GFP_KERNEL); 702 + mhi_cntrl->mhi_event = kzalloc_objs(*mhi_cntrl->mhi_event, num); 704 703 if (!mhi_cntrl->mhi_event) 705 704 return -ENOMEM; 706 705 ··· 937 938 if (ret) 938 939 return -EINVAL; 939 940 940 - mhi_cntrl->mhi_cmd = kzalloc_objs(*mhi_cntrl->mhi_cmd, NR_OF_CMD_RINGS, 941 - GFP_KERNEL); 941 + mhi_cntrl->mhi_cmd = kzalloc_objs(*mhi_cntrl->mhi_cmd, NR_OF_CMD_RINGS); 942 942 if (!mhi_cntrl->mhi_cmd) { 943 943 ret = -ENOMEM; 944 944 goto err_free_event;
+1 -2
drivers/char/agp/sworks-agp.c
··· 96 96 int retval = 0; 97 97 int i; 98 98 99 - tables = kzalloc_objs(struct serverworks_page_map *, nr_tables + 1, 100 - GFP_KERNEL); 99 + tables = kzalloc_objs(struct serverworks_page_map *, nr_tables + 1); 101 100 if (tables == NULL) 102 101 return -ENOMEM; 103 102
+1 -2
drivers/char/agp/uninorth-agp.c
··· 404 404 if (table == NULL) 405 405 return -ENOMEM; 406 406 407 - uninorth_priv.pages_arr = kmalloc_objs(struct page *, 1 << page_order, 408 - GFP_KERNEL); 407 + uninorth_priv.pages_arr = kmalloc_objs(struct page *, 1 << page_order); 409 408 if (uninorth_priv.pages_arr == NULL) 410 409 goto enomem; 411 410
+1 -2
drivers/char/virtio_console.c
··· 1812 1812 vqs = kmalloc_objs(struct virtqueue *, nr_queues); 1813 1813 vqs_info = kzalloc_objs(*vqs_info, nr_queues); 1814 1814 portdev->in_vqs = kmalloc_objs(struct virtqueue *, nr_ports); 1815 - portdev->out_vqs = kmalloc_objs(struct virtqueue *, nr_ports, 1816 - GFP_KERNEL); 1815 + portdev->out_vqs = kmalloc_objs(struct virtqueue *, nr_ports); 1817 1816 if (!vqs || !vqs_info || !portdev->in_vqs || !portdev->out_vqs) { 1818 1817 err = -ENOMEM; 1819 1818 goto free;
+1 -2
drivers/clk/aspeed/clk-aspeed.c
··· 698 698 if (!scu_base) 699 699 return; 700 700 701 - aspeed_clk_data = kzalloc_flex(*aspeed_clk_data, hws, ASPEED_NUM_CLKS, 702 - GFP_KERNEL); 701 + aspeed_clk_data = kzalloc_flex(*aspeed_clk_data, hws, ASPEED_NUM_CLKS); 703 702 if (!aspeed_clk_data) 704 703 return; 705 704 aspeed_clk_data->num = ASPEED_NUM_CLKS;
+1 -2
drivers/clk/bcm/clk-iproc-asiu.c
··· 192 192 if (WARN_ON(!asiu)) 193 193 return; 194 194 195 - asiu->clk_data = kzalloc_flex(*asiu->clk_data, hws, num_clks, 196 - GFP_KERNEL); 195 + asiu->clk_data = kzalloc_flex(*asiu->clk_data, hws, num_clks); 197 196 if (WARN_ON(!asiu->clk_data)) 198 197 goto err_clks; 199 198 asiu->clk_data->num = num_clks;
+1 -2
drivers/clk/clk-gemini.c
··· 398 398 int ret; 399 399 int i; 400 400 401 - gemini_clk_data = kzalloc_flex(*gemini_clk_data, hws, GEMINI_NUM_CLKS, 402 - GFP_KERNEL); 401 + gemini_clk_data = kzalloc_flex(*gemini_clk_data, hws, GEMINI_NUM_CLKS); 403 402 if (!gemini_clk_data) 404 403 return; 405 404 gemini_clk_data->num = GEMINI_NUM_CLKS;
+1 -2
drivers/clk/clk-milbeaut.c
··· 611 611 const char *parent_name; 612 612 struct clk_hw *hw; 613 613 614 - m10v_clk_data = kzalloc_flex(*m10v_clk_data, hws, M10V_NUM_CLKS, 615 - GFP_KERNEL); 614 + m10v_clk_data = kzalloc_flex(*m10v_clk_data, hws, M10V_NUM_CLKS); 616 615 617 616 if (!m10v_clk_data) 618 617 return;
+1 -2
drivers/clk/clk-stm32f4.c
··· 1855 1855 1856 1856 stm32fx_end_primary_clk = data->end_primary; 1857 1857 1858 - clks = kmalloc_objs(*clks, data->gates_num + stm32fx_end_primary_clk, 1859 - GFP_KERNEL); 1858 + clks = kmalloc_objs(*clks, data->gates_num + stm32fx_end_primary_clk); 1860 1859 if (!clks) 1861 1860 goto fail; 1862 1861
+1 -2
drivers/clk/imx/clk-imx6q.c
··· 439 439 void __iomem *anatop_base, *base; 440 440 int ret; 441 441 442 - clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6QDL_CLK_END, 443 - GFP_KERNEL); 442 + clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6QDL_CLK_END); 444 443 if (WARN_ON(!clk_hw_data)) 445 444 return; 446 445
+1 -2
drivers/clk/imx/clk-imx6sl.c
··· 185 185 void __iomem *base; 186 186 int ret; 187 187 188 - clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SL_CLK_END, 189 - GFP_KERNEL); 188 + clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SL_CLK_END); 190 189 if (WARN_ON(!clk_hw_data)) 191 190 return; 192 191
+1 -2
drivers/clk/imx/clk-imx6sll.c
··· 81 81 struct device_node *np; 82 82 void __iomem *base; 83 83 84 - clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SLL_CLK_END, 85 - GFP_KERNEL); 84 + clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SLL_CLK_END); 86 85 if (WARN_ON(!clk_hw_data)) 87 86 return; 88 87
+1 -2
drivers/clk/imx/clk-imx6sx.c
··· 123 123 void __iomem *base; 124 124 bool lcdif1_assigned_clk; 125 125 126 - clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SX_CLK_CLK_END, 127 - GFP_KERNEL); 126 + clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SX_CLK_CLK_END); 128 127 if (WARN_ON(!clk_hw_data)) 129 128 return; 130 129
+1 -2
drivers/clk/imx/clk-imx6ul.c
··· 130 130 struct device_node *np; 131 131 void __iomem *base; 132 132 133 - clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6UL_CLK_END, 134 - GFP_KERNEL); 133 + clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6UL_CLK_END); 135 134 if (WARN_ON(!clk_hw_data)) 136 135 return; 137 136
+4 -8
drivers/clk/imx/clk-imx7ulp.c
··· 49 49 struct clk_hw **hws; 50 50 void __iomem *base; 51 51 52 - clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_SCG1_END, 53 - GFP_KERNEL); 52 + clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_SCG1_END); 54 53 if (!clk_data) 55 54 return; 56 55 ··· 137 138 struct clk_hw **hws; 138 139 void __iomem *base; 139 140 140 - clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_PCC2_END, 141 - GFP_KERNEL); 141 + clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_PCC2_END); 142 142 if (!clk_data) 143 143 return; 144 144 ··· 184 186 struct clk_hw **hws; 185 187 void __iomem *base; 186 188 187 - clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_PCC3_END, 188 - GFP_KERNEL); 189 + clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_PCC3_END); 189 190 if (!clk_data) 190 191 return; 191 192 ··· 230 233 struct clk_hw **hws; 231 234 void __iomem *base; 232 235 233 - clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_SMC1_END, 234 - GFP_KERNEL); 236 + clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_SMC1_END); 235 237 if (!clk_data) 236 238 return; 237 239
+1 -2
drivers/clk/imx/clk-imx8mm.c
··· 303 303 void __iomem *base; 304 304 int ret; 305 305 306 - clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX8MM_CLK_END, 307 - GFP_KERNEL); 306 + clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX8MM_CLK_END); 308 307 if (WARN_ON(!clk_hw_data)) 309 308 return -ENOMEM; 310 309
+1 -2
drivers/clk/ingenic/cgu.c
··· 819 819 unsigned i; 820 820 int err; 821 821 822 - cgu->clocks.clks = kzalloc_objs(struct clk *, cgu->clocks.clk_num, 823 - GFP_KERNEL); 822 + cgu->clocks.clks = kzalloc_objs(struct clk *, cgu->clocks.clk_num); 824 823 if (!cgu->clocks.clks) { 825 824 err = -ENOMEM; 826 825 goto err_out;
+1 -2
drivers/clk/mvebu/common.c
··· 124 124 if (desc->get_refclk_freq) 125 125 clk_data.clk_num += 1; 126 126 127 - clk_data.clks = kzalloc_objs(*clk_data.clks, clk_data.clk_num, 128 - GFP_KERNEL); 127 + clk_data.clks = kzalloc_objs(*clk_data.clks, clk_data.clk_num); 129 128 if (WARN_ON(!clk_data.clks)) { 130 129 iounmap(base); 131 130 return;
+1 -2
drivers/clk/st/clk-flexgen.c
··· 612 612 } else 613 613 clk_data->clk_num = data->outputs_nb; 614 614 615 - clk_data->clks = kzalloc_objs(struct clk *, clk_data->clk_num, 616 - GFP_KERNEL); 615 + clk_data->clks = kzalloc_objs(struct clk *, clk_data->clk_num); 617 616 if (!clk_data->clks) 618 617 goto err; 619 618
+1 -2
drivers/clk/st/clkgen-pll.c
··· 788 788 return; 789 789 790 790 clk_data->clk_num = num_odfs; 791 - clk_data->clks = kzalloc_objs(struct clk *, clk_data->clk_num, 792 - GFP_KERNEL); 791 + clk_data->clks = kzalloc_objs(struct clk *, clk_data->clk_num); 793 792 794 793 if (!clk_data->clks) 795 794 goto err;
+1 -2
drivers/clk/tegra/clk.c
··· 227 227 if (WARN_ON(banks > ARRAY_SIZE(periph_regs))) 228 228 return NULL; 229 229 230 - periph_clk_enb_refcnt = kzalloc_objs(*periph_clk_enb_refcnt, 32 * banks, 231 - GFP_KERNEL); 230 + periph_clk_enb_refcnt = kzalloc_objs(*periph_clk_enb_refcnt, 32 * banks); 232 231 if (!periph_clk_enb_refcnt) 233 232 return NULL; 234 233
+1 -2
drivers/clocksource/dw_apb_timer.c
··· 222 222 dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, 223 223 void __iomem *base, int irq, unsigned long freq) 224 224 { 225 - struct dw_apb_clock_event_device *dw_ced = kzalloc_obj(*dw_ced, 226 - GFP_KERNEL); 225 + struct dw_apb_clock_event_device *dw_ced = kzalloc_obj(*dw_ced); 227 226 int err; 228 227 229 228 if (!dw_ced)
+1 -2
drivers/clocksource/sh_cmt.c
··· 1084 1084 1085 1085 /* Allocate and setup the channels. */ 1086 1086 cmt->num_channels = hweight8(cmt->hw_channels); 1087 - cmt->channels = kzalloc_objs(*cmt->channels, cmt->num_channels, 1088 - GFP_KERNEL); 1087 + cmt->channels = kzalloc_objs(*cmt->channels, cmt->num_channels); 1089 1088 if (cmt->channels == NULL) { 1090 1089 ret = -ENOMEM; 1091 1090 goto err_unmap;
+1 -2
drivers/clocksource/sh_mtu2.c
··· 420 420 mtu->num_channels = min_t(unsigned int, ret, 421 421 ARRAY_SIZE(sh_mtu2_channel_offsets)); 422 422 423 - mtu->channels = kzalloc_objs(*mtu->channels, mtu->num_channels, 424 - GFP_KERNEL); 423 + mtu->channels = kzalloc_objs(*mtu->channels, mtu->num_channels); 425 424 if (mtu->channels == NULL) { 426 425 ret = -ENOMEM; 427 426 goto err_unmap;
+1 -2
drivers/clocksource/sh_tmu.c
··· 546 546 } 547 547 548 548 /* Allocate and setup the channels. */ 549 - tmu->channels = kzalloc_objs(*tmu->channels, tmu->num_channels, 550 - GFP_KERNEL); 549 + tmu->channels = kzalloc_objs(*tmu->channels, tmu->num_channels); 551 550 if (tmu->channels == NULL) { 552 551 ret = -ENOMEM; 553 552 goto err_unmap;
+1 -2
drivers/comedi/drivers/ni_670x.c
··· 198 198 if (s->n_chan == 32) { 199 199 const struct comedi_lrange **range_table_list; 200 200 201 - range_table_list = kmalloc_objs(*range_table_list, 32, 202 - GFP_KERNEL); 201 + range_table_list = kmalloc_objs(*range_table_list, 32); 203 202 if (!range_table_list) 204 203 return -ENOMEM; 205 204 s->range_table_list = range_table_list;
+1 -2
drivers/connector/cn_proc.c
··· 429 429 if (nsp->sk) { 430 430 sk = nsp->sk; 431 431 if (sk->sk_user_data == NULL) { 432 - sk->sk_user_data = kzalloc_obj(struct proc_input, 433 - GFP_KERNEL); 432 + sk->sk_user_data = kzalloc_obj(struct proc_input); 434 433 if (sk->sk_user_data == NULL) { 435 434 err = ENOMEM; 436 435 goto out;
+1 -2
drivers/cpufreq/acpi-cpufreq.c
··· 798 798 goto err_unreg; 799 799 } 800 800 801 - freq_table = kzalloc_objs(*freq_table, perf->state_count + 1, 802 - GFP_KERNEL); 801 + freq_table = kzalloc_objs(*freq_table, perf->state_count + 1); 803 802 if (!freq_table) { 804 803 result = -ENOMEM; 805 804 goto err_unreg;
+1 -2
drivers/cpufreq/armada-37xx-cpufreq.c
··· 467 467 return -EINVAL; 468 468 } 469 469 470 - armada37xx_cpufreq_state = kmalloc_obj(*armada37xx_cpufreq_state, 471 - GFP_KERNEL); 470 + armada37xx_cpufreq_state = kmalloc_obj(*armada37xx_cpufreq_state); 472 471 if (!armada37xx_cpufreq_state) { 473 472 clk_put(clk); 474 473 return -ENOMEM;
+1 -2
drivers/cpufreq/longhaul.c
··· 475 475 return -EINVAL; 476 476 } 477 477 478 - longhaul_table = kzalloc_objs(*longhaul_table, numscales + 1, 479 - GFP_KERNEL); 478 + longhaul_table = kzalloc_objs(*longhaul_table, numscales + 1); 480 479 if (!longhaul_table) 481 480 return -ENOMEM; 482 481
+1 -2
drivers/cpufreq/sparc-us2e-cpufreq.c
··· 323 323 impl = ((ver >> 32) & 0xffff); 324 324 325 325 if (manuf == 0x17 && impl == 0x13) { 326 - us2e_freq_table = kzalloc_objs(*us2e_freq_table, NR_CPUS, 327 - GFP_KERNEL); 326 + us2e_freq_table = kzalloc_objs(*us2e_freq_table, NR_CPUS); 328 327 if (!us2e_freq_table) 329 328 return -ENOMEM; 330 329
+1 -2
drivers/cpufreq/sparc-us3-cpufreq.c
··· 171 171 impl == CHEETAH_PLUS_IMPL || 172 172 impl == JAGUAR_IMPL || 173 173 impl == PANTHER_IMPL)) { 174 - us3_freq_table = kzalloc_objs(*us3_freq_table, NR_CPUS, 175 - GFP_KERNEL); 174 + us3_freq_table = kzalloc_objs(*us3_freq_table, NR_CPUS); 176 175 if (!us3_freq_table) 177 176 return -ENOMEM; 178 177
+1 -2
drivers/crypto/amcc/crypto4xx_core.c
··· 173 173 if (!dev->pdr) 174 174 return -ENOMEM; 175 175 176 - dev->pdr_uinfo = kzalloc_objs(struct pd_uinfo, PPC4XX_NUM_PD, 177 - GFP_KERNEL); 176 + dev->pdr_uinfo = kzalloc_objs(struct pd_uinfo, PPC4XX_NUM_PD); 178 177 if (!dev->pdr_uinfo) { 179 178 dma_free_coherent(dev->core_dev->device, 180 179 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+1 -2
drivers/crypto/cavium/nitrox/nitrox_mbx.c
··· 181 181 struct nitrox_vfdev *vfdev; 182 182 int i; 183 183 184 - ndev->iov.vfdev = kzalloc_objs(struct nitrox_vfdev, ndev->iov.num_vfs, 185 - GFP_KERNEL); 184 + ndev->iov.vfdev = kzalloc_objs(struct nitrox_vfdev, ndev->iov.num_vfs); 186 185 if (!ndev->iov.vfdev) 187 186 return -ENOMEM; 188 187
+2 -4
drivers/crypto/hisilicon/qm.c
··· 5771 5771 if (!qm->qp_array) 5772 5772 return -ENOMEM; 5773 5773 5774 - qm->poll_data = kzalloc_objs(struct hisi_qm_poll_data, qm->qp_num, 5775 - GFP_KERNEL); 5774 + qm->poll_data = kzalloc_objs(struct hisi_qm_poll_data, qm->qp_num); 5776 5775 if (!qm->poll_data) { 5777 5776 kfree(qm->qp_array); 5778 5777 return -ENOMEM; ··· 5836 5837 5837 5838 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 5838 5839 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; 5839 - qm->factor = kzalloc_objs(struct qm_shaper_factor, total_func, 5840 - GFP_KERNEL); 5840 + qm->factor = kzalloc_objs(struct qm_shaper_factor, total_func); 5841 5841 if (!qm->factor) 5842 5842 return -ENOMEM; 5843 5843
+1 -2
drivers/crypto/hisilicon/sec2/sec_crypto.c
··· 672 672 ctx->hlf_q_num = sec->ctx_q_num >> 1; 673 673 674 674 ctx->pbuf_supported = ctx->sec->iommu_used; 675 - ctx->qp_ctx = kzalloc_objs(struct sec_qp_ctx, sec->ctx_q_num, 676 - GFP_KERNEL); 675 + ctx->qp_ctx = kzalloc_objs(struct sec_qp_ctx, sec->ctx_q_num); 677 676 if (!ctx->qp_ctx) { 678 677 ret = -ENOMEM; 679 678 goto err_destroy_qps;
+1 -2
drivers/crypto/hisilicon/zip/zip_crypto.c
··· 479 479 } 480 480 spin_lock_init(&req_q->req_lock); 481 481 482 - req_q->q = kzalloc_objs(struct hisi_zip_req, req_q->size, 483 - GFP_KERNEL); 482 + req_q->q = kzalloc_objs(struct hisi_zip_req, req_q->size); 484 483 if (!req_q->q) { 485 484 ret = -ENOMEM; 486 485 if (i == 0)
+1 -2
drivers/crypto/intel/qat/qat_common/adf_fw_counters.c
··· 83 83 if (unlikely(!ae_count)) 84 84 return ERR_PTR(-EINVAL); 85 85 86 - fw_counters = kmalloc_flex(*fw_counters, ae_counters, ae_count, 87 - GFP_KERNEL); 86 + fw_counters = kmalloc_flex(*fw_counters, ae_counters, ae_count); 88 87 if (!fw_counters) 89 88 return ERR_PTR(-ENOMEM); 90 89
+1 -2
drivers/crypto/intel/qat/qat_common/adf_sriov.c
··· 173 173 goto err_del_cfg; 174 174 175 175 /* Allocate memory for VF info structs */ 176 - accel_dev->pf.vf_info = kzalloc_objs(struct adf_accel_vf_info, totalvfs, 177 - GFP_KERNEL); 176 + accel_dev->pf.vf_info = kzalloc_objs(struct adf_accel_vf_info, totalvfs); 178 177 ret = -ENOMEM; 179 178 if (!accel_dev->pf.vf_info) 180 179 goto err_del_cfg;
+1 -2
drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
··· 340 340 if (!cptpf->flr_wq) 341 341 return -ENOMEM; 342 342 343 - cptpf->flr_work = kzalloc_objs(struct cptpf_flr_work, num_vfs, 344 - GFP_KERNEL); 343 + cptpf->flr_work = kzalloc_objs(struct cptpf_flr_work, num_vfs); 345 344 if (!cptpf->flr_work) 346 345 goto destroy_wq; 347 346
+1 -2
drivers/crypto/stm32/stm32-cryp.c
··· 1498 1498 return alloc_sg_len; 1499 1499 1500 1500 /* We allocate to much sg entry, but it is easier */ 1501 - *new_sg = kmalloc_objs(struct scatterlist, (size_t)alloc_sg_len, 1502 - GFP_KERNEL); 1501 + *new_sg = kmalloc_objs(struct scatterlist, (size_t)alloc_sg_len); 1503 1502 if (!*new_sg) 1504 1503 return -ENOMEM; 1505 1504
+1 -2
drivers/crypto/virtio/virtio_crypto_core.c
··· 170 170 171 171 static int virtcrypto_alloc_queues(struct virtio_crypto *vi) 172 172 { 173 - vi->data_vq = kzalloc_objs(*vi->data_vq, vi->max_data_queues, 174 - GFP_KERNEL); 173 + vi->data_vq = kzalloc_objs(*vi->data_vq, vi->max_data_queues); 175 174 if (!vi->data_vq) 176 175 return -ENOMEM; 177 176
+1 -2
drivers/devfreq/governor_userspace.c
··· 87 87 static int userspace_init(struct devfreq *devfreq) 88 88 { 89 89 int err = 0; 90 - struct userspace_data *data = kzalloc_obj(struct userspace_data, 91 - GFP_KERNEL); 90 + struct userspace_data *data = kzalloc_obj(struct userspace_data); 92 91 93 92 if (!data) { 94 93 err = -ENOMEM;
+1 -2
drivers/dma-buf/udmabuf.c
··· 215 215 if (!ubuf->offsets) 216 216 return -ENOMEM; 217 217 218 - ubuf->pinned_folios = kvmalloc_objs(*ubuf->pinned_folios, pgcnt, 219 - GFP_KERNEL); 218 + ubuf->pinned_folios = kvmalloc_objs(*ubuf->pinned_folios, pgcnt); 220 219 if (!ubuf->pinned_folios) 221 220 return -ENOMEM; 222 221
+1 -2
drivers/dma/amba-pl08x.c
··· 2855 2855 } 2856 2856 2857 2857 /* Initialize physical channels */ 2858 - pl08x->phy_chans = kzalloc_objs(*pl08x->phy_chans, vd->channels, 2859 - GFP_KERNEL); 2858 + pl08x->phy_chans = kzalloc_objs(*pl08x->phy_chans, vd->channels); 2860 2859 if (!pl08x->phy_chans) { 2861 2860 ret = -ENOMEM; 2862 2861 goto out_no_phychans;
+1 -2
drivers/dma/plx_dma.c
··· 378 378 struct plx_dma_desc *desc; 379 379 int i; 380 380 381 - plxdev->desc_ring = kzalloc_objs(*plxdev->desc_ring, PLX_DMA_RING_COUNT, 382 - GFP_KERNEL); 381 + plxdev->desc_ring = kzalloc_objs(*plxdev->desc_ring, PLX_DMA_RING_COUNT); 383 382 if (!plxdev->desc_ring) 384 383 return -ENOMEM; 385 384
+1 -2
drivers/dma/xilinx/zynqmp_dma.c
··· 482 482 if (ret < 0) 483 483 return ret; 484 484 485 - chan->sw_desc_pool = kzalloc_objs(*desc, ZYNQMP_DMA_NUM_DESCS, 486 - GFP_KERNEL); 485 + chan->sw_desc_pool = kzalloc_objs(*desc, ZYNQMP_DMA_NUM_DESCS); 487 486 if (!chan->sw_desc_pool) 488 487 return -ENOMEM; 489 488
+3 -6
drivers/edac/edac_device.c
··· 71 71 if (!dev_ctl) 72 72 return NULL; 73 73 74 - dev_inst = kzalloc_objs(struct edac_device_instance, nr_instances, 75 - GFP_KERNEL); 74 + dev_inst = kzalloc_objs(struct edac_device_instance, nr_instances); 76 75 if (!dev_inst) 77 76 goto free; 78 77 ··· 647 648 if (!ctx) 648 649 return -ENOMEM; 649 650 650 - ras_attr_groups = kzalloc_objs(*ras_attr_groups, attr_gcnt + 1, 651 - GFP_KERNEL); 651 + ras_attr_groups = kzalloc_objs(*ras_attr_groups, attr_gcnt + 1); 652 652 if (!ras_attr_groups) 653 653 goto ctx_free; 654 654 ··· 658 660 } 659 661 660 662 if (mem_repair_cnt) { 661 - ctx->mem_repair = kzalloc_objs(*ctx->mem_repair, mem_repair_cnt, 662 - GFP_KERNEL); 663 + ctx->mem_repair = kzalloc_objs(*ctx->mem_repair, mem_repair_cnt); 663 664 if (!ctx->mem_repair) 664 665 goto data_mem_free; 665 666 }
+1 -2
drivers/edac/edac_mc.c
··· 231 231 csr->csrow_idx = row; 232 232 csr->mci = mci; 233 233 csr->nr_channels = tot_channels; 234 - csr->channels = kzalloc_objs(*csr->channels, tot_channels, 235 - GFP_KERNEL); 234 + csr->channels = kzalloc_objs(*csr->channels, tot_channels); 236 235 if (!csr->channels) 237 236 return -ENOMEM; 238 237
+2 -4
drivers/edac/i7core_edac.c
··· 459 459 if (!i7core_dev) 460 460 return NULL; 461 461 462 - i7core_dev->pdev = kzalloc_objs(*i7core_dev->pdev, table->n_devs, 463 - GFP_KERNEL); 462 + i7core_dev->pdev = kzalloc_objs(*i7core_dev->pdev, table->n_devs); 464 463 if (!i7core_dev->pdev) { 465 464 kfree(i7core_dev); 466 465 return NULL; ··· 1176 1177 goto err_put_addrmatch; 1177 1178 1178 1179 if (!pvt->is_registered) { 1179 - pvt->chancounts_dev = kzalloc_obj(*pvt->chancounts_dev, 1180 - GFP_KERNEL); 1180 + pvt->chancounts_dev = kzalloc_obj(*pvt->chancounts_dev); 1181 1181 if (!pvt->chancounts_dev) { 1182 1182 rc = -ENOMEM; 1183 1183 goto err_del_addrmatch;
+4 -8
drivers/extcon/extcon.c
··· 1098 1098 if (!edev->max_supported) 1099 1099 return 0; 1100 1100 1101 - edev->cables = kzalloc_objs(*edev->cables, edev->max_supported, 1102 - GFP_KERNEL); 1101 + edev->cables = kzalloc_objs(*edev->cables, edev->max_supported); 1103 1102 if (!edev->cables) 1104 1103 return -ENOMEM; 1105 1104 ··· 1159 1160 for (index = 0; edev->mutually_exclusive[index]; index++) 1160 1161 ; 1161 1162 1162 - edev->attrs_muex = kzalloc_objs(*edev->attrs_muex, index + 1, 1163 - GFP_KERNEL); 1163 + edev->attrs_muex = kzalloc_objs(*edev->attrs_muex, index + 1); 1164 1164 if (!edev->attrs_muex) 1165 1165 return -ENOMEM; 1166 1166 1167 - edev->d_attrs_muex = kzalloc_objs(*edev->d_attrs_muex, index, 1168 - GFP_KERNEL); 1167 + edev->d_attrs_muex = kzalloc_objs(*edev->d_attrs_muex, index); 1169 1168 if (!edev->d_attrs_muex) { 1170 1169 kfree(edev->attrs_muex); 1171 1170 return -ENOMEM; ··· 1291 1294 1292 1295 spin_lock_init(&edev->lock); 1293 1296 if (edev->max_supported) { 1294 - edev->nh = kzalloc_objs(*edev->nh, edev->max_supported, 1295 - GFP_KERNEL); 1297 + edev->nh = kzalloc_objs(*edev->nh, edev->max_supported); 1296 1298 if (!edev->nh) { 1297 1299 ret = -ENOMEM; 1298 1300 goto err_alloc_nh;
+1 -2
drivers/firmware/efi/apple-properties.c
··· 146 146 goto skip_device; 147 147 } 148 148 149 - entry = kzalloc_objs(*entry, dev_header->prop_count + 1, 150 - GFP_KERNEL); 149 + entry = kzalloc_objs(*entry, dev_header->prop_count + 1); 151 150 if (!entry) { 152 151 dev_err(dev, "cannot allocate properties\n"); 153 152 goto skip_device;
+1 -2
drivers/firmware/efi/test/efi_test.c
··· 614 614 if (qcaps.capsule_count == ULONG_MAX) 615 615 return -EINVAL; 616 616 617 - capsules = kzalloc_objs(efi_capsule_header_t, qcaps.capsule_count + 1, 618 - GFP_KERNEL); 617 + capsules = kzalloc_objs(efi_capsule_header_t, qcaps.capsule_count + 1); 619 618 if (!capsules) 620 619 return -ENOMEM; 621 620
+1 -2
drivers/firmware/qcom/qcom_tzmem.c
··· 262 262 return ERR_PTR(-EINVAL); 263 263 } 264 264 265 - struct qcom_tzmem_pool *pool __free(kfree) = kzalloc_obj(*pool, 266 - GFP_KERNEL); 265 + struct qcom_tzmem_pool *pool __free(kfree) = kzalloc_obj(*pool); 267 266 if (!pool) 268 267 return ERR_PTR(-ENOMEM); 269 268
+2 -4
drivers/fpga/dfl.c
··· 397 397 398 398 /* then add irq resource */ 399 399 if (feature->nr_irqs) { 400 - ddev->irqs = kzalloc_objs(*ddev->irqs, feature->nr_irqs, 401 - GFP_KERNEL); 400 + ddev->irqs = kzalloc_objs(*ddev->irqs, feature->nr_irqs); 402 401 if (!ddev->irqs) { 403 402 ret = -ENOMEM; 404 403 goto put_dev; ··· 1181 1182 if (binfo->len - ofst < size) 1182 1183 return -EINVAL; 1183 1184 1184 - finfo = kzalloc_flex(*finfo, params, dfh_psize / sizeof(u64), 1185 - GFP_KERNEL); 1185 + finfo = kzalloc_flex(*finfo, params, dfh_psize / sizeof(u64)); 1186 1186 if (!finfo) 1187 1187 return -ENOMEM; 1188 1188
+1 -2
drivers/gpib/agilent_82350b/agilent_82350b.c
··· 481 481 482 482 static int agilent_82350b_allocate_private(struct gpib_board *board) 483 483 { 484 - board->private_data = kzalloc_obj(struct agilent_82350b_priv, 485 - GFP_KERNEL); 484 + board->private_data = kzalloc_obj(struct agilent_82350b_priv); 486 485 if (!board->private_data) 487 486 return -ENOMEM; 488 487 return 0;
+1 -2
drivers/gpib/agilent_82357a/agilent_82357a.c
··· 1196 1196 { 1197 1197 struct agilent_82357a_priv *a_priv; 1198 1198 1199 - board->private_data = kzalloc_obj(struct agilent_82357a_priv, 1200 - GFP_KERNEL); 1199 + board->private_data = kzalloc_obj(struct agilent_82357a_priv); 1201 1200 if (!board->private_data) 1202 1201 return -ENOMEM; 1203 1202 a_priv = board->private_data;
+1 -2
drivers/gpib/common/gpib_os.c
··· 1241 1241 mutex_unlock(&file_priv->descriptors_mutex); 1242 1242 return -ERANGE; 1243 1243 } 1244 - file_priv->descriptors[i] = kmalloc_obj(struct gpib_descriptor, 1245 - GFP_KERNEL); 1244 + file_priv->descriptors[i] = kmalloc_obj(struct gpib_descriptor); 1246 1245 if (!file_priv->descriptors[i]) { 1247 1246 mutex_unlock(&file_priv->descriptors_mutex); 1248 1247 return -ENOMEM;
+1 -2
drivers/gpio/gpio-sim.c
··· 1569 1569 { 1570 1570 int id; 1571 1571 1572 - struct gpio_sim_device *dev __free(kfree) = kzalloc_obj(*dev, 1573 - GFP_KERNEL); 1572 + struct gpio_sim_device *dev __free(kfree) = kzalloc_obj(*dev); 1574 1573 if (!dev) 1575 1574 return ERR_PTR(-ENOMEM); 1576 1575
+1 -2
drivers/gpio/gpio-virtuser.c
··· 1711 1711 gpio_virtuser_config_make_device_group(struct config_group *group, 1712 1712 const char *name) 1713 1713 { 1714 - struct gpio_virtuser_device *dev __free(kfree) = kzalloc_obj(*dev, 1715 - GFP_KERNEL); 1714 + struct gpio_virtuser_device *dev __free(kfree) = kzalloc_obj(*dev); 1716 1715 if (!dev) 1717 1716 return ERR_PTR(-ENOMEM); 1718 1717
+1 -2
drivers/gpio/gpiolib-shared.c
··· 84 84 { 85 85 char *con_id_cpy __free(kfree) = NULL; 86 86 87 - struct gpio_shared_ref *ref __free(kfree) = kzalloc_obj(*ref, 88 - GFP_KERNEL); 87 + struct gpio_shared_ref *ref __free(kfree) = kzalloc_obj(*ref); 89 88 if (!ref) 90 89 return NULL; 91 90
+4 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
··· 260 260 switch (acp_machine_id) { 261 261 case ST_JADEITE: 262 262 { 263 - adev->acp.acp_cell = kzalloc_objs(struct mfd_cell, 2, 264 - GFP_KERNEL); 263 + adev->acp.acp_cell = kzalloc_objs(struct mfd_cell, 2); 265 264 if (!adev->acp.acp_cell) { 266 265 r = -ENOMEM; 267 266 goto failure; ··· 272 273 goto failure; 273 274 } 274 275 275 - i2s_pdata = kzalloc_objs(struct i2s_platform_data, 1, 276 - GFP_KERNEL); 276 + i2s_pdata = kzalloc_objs(struct i2s_platform_data, 1); 277 277 if (!i2s_pdata) { 278 278 r = -ENOMEM; 279 279 goto failure; ··· 323 325 break; 324 326 } 325 327 default: 326 - adev->acp.acp_cell = kzalloc_objs(struct mfd_cell, ACP_DEVS, 327 - GFP_KERNEL); 328 + adev->acp.acp_cell = kzalloc_objs(struct mfd_cell, ACP_DEVS); 328 329 329 330 if (!adev->acp.acp_cell) { 330 331 r = -ENOMEM; ··· 336 339 goto failure; 337 340 } 338 341 339 - i2s_pdata = kzalloc_objs(struct i2s_platform_data, 3, 340 - GFP_KERNEL); 342 + i2s_pdata = kzalloc_objs(struct i2s_platform_data, 3); 341 343 if (!i2s_pdata) { 342 344 r = -ENOMEM; 343 345 goto failure;
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
··· 399 399 400 400 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev) 401 401 { 402 - struct amdgpu_cgs_device *cgs_device = kmalloc_obj(*cgs_device, 403 - GFP_KERNEL); 402 + struct amdgpu_cgs_device *cgs_device = kmalloc_obj(*cgs_device); 404 403 405 404 if (!cgs_device) { 406 405 drm_err(adev_to_drm(adev), "Couldn't allocate CGS device structure\n");
+6 -12
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
··· 1673 1673 } 1674 1674 1675 1675 if (is_dp_bridge) { 1676 - amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig, 1677 - GFP_KERNEL); 1676 + amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig); 1678 1677 if (!amdgpu_dig_connector) 1679 1678 goto failed; 1680 1679 amdgpu_connector->con_priv = amdgpu_dig_connector; ··· 1828 1829 break; 1829 1830 case DRM_MODE_CONNECTOR_DVII: 1830 1831 case DRM_MODE_CONNECTOR_DVID: 1831 - amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig, 1832 - GFP_KERNEL); 1832 + amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig); 1833 1833 if (!amdgpu_dig_connector) 1834 1834 goto failed; 1835 1835 amdgpu_connector->con_priv = amdgpu_dig_connector; ··· 1885 1887 break; 1886 1888 case DRM_MODE_CONNECTOR_HDMIA: 1887 1889 case DRM_MODE_CONNECTOR_HDMIB: 1888 - amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig, 1889 - GFP_KERNEL); 1890 + amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig); 1890 1891 if (!amdgpu_dig_connector) 1891 1892 goto failed; 1892 1893 amdgpu_connector->con_priv = amdgpu_dig_connector; ··· 1934 1937 connector->doublescan_allowed = false; 1935 1938 break; 1936 1939 case DRM_MODE_CONNECTOR_DisplayPort: 1937 - amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig, 1938 - GFP_KERNEL); 1940 + amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig); 1939 1941 if (!amdgpu_dig_connector) 1940 1942 goto failed; 1941 1943 amdgpu_connector->con_priv = amdgpu_dig_connector; ··· 1983 1987 connector->doublescan_allowed = false; 1984 1988 break; 1985 1989 case DRM_MODE_CONNECTOR_eDP: 1986 - amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig, 1987 - GFP_KERNEL); 1990 + amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig); 1988 1991 if (!amdgpu_dig_connector) 1989 1992 goto failed; 1990 1993 amdgpu_connector->con_priv = amdgpu_dig_connector; ··· 2010 2015 connector->doublescan_allowed = false; 2011 2016 break; 2012 2017 case DRM_MODE_CONNECTOR_LVDS: 2013 - amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig, 2014 - GFP_KERNEL); 2018 + amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig); 2015 2019 if (!amdgpu_dig_connector) 2016 2020 goto failed; 2017 2021 amdgpu_connector->con_priv = amdgpu_dig_connector;
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 192 192 return PTR_ERR(chunk_array); 193 193 194 194 p->nchunks = cs->in.num_chunks; 195 - p->chunks = kvmalloc_objs(struct amdgpu_cs_chunk, p->nchunks, 196 - GFP_KERNEL); 195 + p->chunks = kvmalloc_objs(struct amdgpu_cs_chunk, p->nchunks); 197 196 if (!p->chunks) { 198 197 ret = -ENOMEM; 199 198 goto free_chunk;
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
··· 1931 1931 1932 1932 switch (le16_to_cpu(nps_info->v1.header.version_major)) { 1933 1933 case 1: 1934 - mem_ranges = kvzalloc_objs(*mem_ranges, nps_info->v1.count, 1935 - GFP_KERNEL); 1934 + mem_ranges = kvzalloc_objs(*mem_ranges, nps_info->v1.count); 1936 1935 if (!mem_ranges) 1937 1936 return -ENOMEM; 1938 1937 *nps_type = nps_info->v1.nps_type;
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
··· 153 153 mux->real_ring = ring; 154 154 mux->num_ring_entries = 0; 155 155 156 - mux->ring_entry = kzalloc_objs(struct amdgpu_mux_entry, entry_size, 157 - GFP_KERNEL); 156 + mux->ring_entry = kzalloc_objs(struct amdgpu_mux_entry, entry_size); 158 157 if (!mux->ring_entry) 159 158 return -ENOMEM; 160 159
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
··· 966 966 } 967 967 968 968 /* Allocate for init_data_hdr */ 969 - init_data_hdr = kzalloc_obj(struct amd_sriov_msg_init_data_header, 970 - GFP_KERNEL); 969 + init_data_hdr = kzalloc_obj(struct amd_sriov_msg_init_data_header); 971 970 if (!init_data_hdr) 972 971 return -ENOMEM; 973 972
+1 -2
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
··· 2107 2107 amdgpu_atombios_encoder_get_dig_info(struct amdgpu_encoder *amdgpu_encoder) 2108 2108 { 2109 2109 int encoder_enum = (amdgpu_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; 2110 - struct amdgpu_encoder_atom_dig *dig = kzalloc_obj(struct amdgpu_encoder_atom_dig, 2111 - GFP_KERNEL); 2110 + struct amdgpu_encoder_atom_dig *dig = kzalloc_obj(struct amdgpu_encoder_atom_dig); 2112 2111 2113 2112 if (!dig) 2114 2113 return NULL;
+1 -2
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
··· 1775 1775 1776 1776 /* DCE10 has audio blocks tied to DIG encoders */ 1777 1777 for (i = 0; i < adev->mode_info.num_dig; i++) { 1778 - adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt, 1779 - GFP_KERNEL); 1778 + adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt); 1780 1779 if (adev->mode_info.afmt[i]) { 1781 1780 adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1782 1781 adev->mode_info.afmt[i]->id = i;
+1 -2
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
··· 1818 1818 1819 1819 /* DCE6 has audio blocks tied to DIG encoders */ 1820 1820 for (i = 0; i < adev->mode_info.num_dig; i++) { 1821 - adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt, 1822 - GFP_KERNEL); 1821 + adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt); 1823 1822 if (adev->mode_info.afmt[i]) { 1824 1823 adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1825 1824 adev->mode_info.afmt[i]->id = i;
+1 -2
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
··· 1722 1722 1723 1723 /* DCE8 has audio blocks tied to DIG encoders */ 1724 1724 for (i = 0; i < adev->mode_info.num_dig; i++) { 1725 - adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt, 1726 - GFP_KERNEL); 1725 + adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt); 1727 1726 if (adev->mode_info.afmt[i]) { 1728 1727 adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1729 1728 adev->mode_info.afmt[i]->id = i;
+1 -2
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 1064 1064 adev->vm_manager.vram_base_offset = 0; 1065 1065 } 1066 1066 1067 - adev->gmc.vm_fault_info = kmalloc_obj(struct kfd_vm_fault_info, 1068 - GFP_KERNEL); 1067 + adev->gmc.vm_fault_info = kmalloc_obj(struct kfd_vm_fault_info); 1069 1068 if (!adev->gmc.vm_fault_info) 1070 1069 return -ENOMEM; 1071 1070 atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
+1 -2
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 1179 1179 adev->vm_manager.vram_base_offset = 0; 1180 1180 } 1181 1181 1182 - adev->gmc.vm_fault_info = kmalloc_obj(struct kfd_vm_fault_info, 1183 - GFP_KERNEL); 1182 + adev->gmc.vm_fault_info = kmalloc_obj(struct kfd_vm_fault_info); 1184 1183 if (!adev->gmc.vm_fault_info) 1185 1184 return -ENOMEM; 1186 1185 atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
+1 -2
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 2224 2224 if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size) 2225 2225 return -EINVAL; 2226 2226 2227 - device_buckets = kmalloc_objs(*device_buckets, args->num_devices, 2228 - GFP_KERNEL); 2227 + device_buckets = kmalloc_objs(*device_buckets, args->num_devices); 2229 2228 if (!device_buckets) 2230 2229 return -ENOMEM; 2231 2230
+1 -2
drivers/gpu/drm/amd/amdkfd/kfd_events.c
··· 791 791 struct kfd_event_waiter *event_waiters; 792 792 uint32_t i; 793 793 794 - event_waiters = kzalloc_objs(struct kfd_event_waiter, num_events, 795 - GFP_KERNEL); 794 + event_waiters = kzalloc_objs(struct kfd_event_waiter, num_events); 796 795 if (!event_waiters) 797 796 return NULL; 798 797
+1 -2
drivers/gpu/drm/amd/amdkfd/kfd_process.c
··· 291 291 wave_cnt = 0; 292 292 max_waves_per_cu = 0; 293 293 294 - cu_occupancy = kzalloc_objs(*cu_occupancy, AMDGPU_MAX_QUEUES, 295 - GFP_KERNEL); 294 + cu_occupancy = kzalloc_objs(*cu_occupancy, AMDGPU_MAX_QUEUES); 296 295 if (!cu_occupancy) 297 296 return -ENOMEM; 298 297
+4 -8
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 1650 1650 int i = 0; 1651 1651 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; 1652 1652 1653 - hpd_rx_offload_wq = kzalloc_objs(*hpd_rx_offload_wq, max_caps, 1654 - GFP_KERNEL); 1653 + hpd_rx_offload_wq = kzalloc_objs(*hpd_rx_offload_wq, max_caps); 1655 1654 1656 1655 if (!hpd_rx_offload_wq) 1657 1656 return NULL; ··· 2129 2130 } 2130 2131 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 2131 2132 init_completion(&adev->dm.dmub_aux_transfer_done); 2132 - adev->dm.dmub_notify = kzalloc_obj(struct dmub_notification, 2133 - GFP_KERNEL); 2133 + adev->dm.dmub_notify = kzalloc_obj(struct dmub_notification); 2134 2134 if (!adev->dm.dmub_notify) { 2135 2135 drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify"); 2136 2136 goto error; ··· 4158 4160 static void schedule_hpd_rx_offload_work(struct amdgpu_device *adev, struct hpd_rx_irq_offload_work_queue *offload_wq, 4159 4161 union hpd_irq_data hpd_irq_data) 4160 4162 { 4161 - struct hpd_rx_irq_offload_work *offload_work = kzalloc_obj(*offload_work, 4162 - GFP_KERNEL); 4163 + struct hpd_rx_irq_offload_work *offload_work = kzalloc_obj(*offload_work); 4163 4164 4164 4165 if (!offload_work) { 4165 4166 drm_err(adev_to_drm(adev), "Failed to allocate hpd_rx_irq_offload_work.\n"); ··· 5603 5606 link = dc_get_link_at_index(dm->dc, i); 5604 5607 5605 5608 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) { 5606 - struct amdgpu_dm_wb_connector *wbcon = kzalloc_obj(*wbcon, 5607 - GFP_KERNEL); 5609 + struct amdgpu_dm_wb_connector *wbcon = kzalloc_obj(*wbcon); 5608 5610 5609 5611 if (!wbcon) { 5610 5612 drm_err(adev_to_drm(adev), "KMS: Failed to allocate writeback connector\n");
+3 -6
drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
··· 128 128 if (!sclk) 129 129 goto free_yclk; 130 130 131 - tiling_mode = kzalloc_objs(*tiling_mode, maximum_number_of_surfaces, 132 - GFP_KERNEL); 131 + tiling_mode = kzalloc_objs(*tiling_mode, maximum_number_of_surfaces); 133 132 if (!tiling_mode) 134 133 goto free_sclk; 135 134 136 - surface_type = kzalloc_objs(*surface_type, maximum_number_of_surfaces, 137 - GFP_KERNEL); 135 + surface_type = kzalloc_objs(*surface_type, maximum_number_of_surfaces); 138 136 if (!surface_type) 139 137 goto free_tiling_mode; 140 138 ··· 3045 3047 int pipe_count, 3046 3048 struct dce_bw_output *calcs_output) 3047 3049 { 3048 - struct bw_calcs_data *data = kzalloc_obj(struct bw_calcs_data, 3049 - GFP_KERNEL); 3050 + struct bw_calcs_data *data = kzalloc_obj(struct bw_calcs_data); 3050 3051 if (!data) 3051 3052 return false; 3052 3053
+14 -28
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
··· 151 151 switch (asic_id.chip_family) { 152 152 #if defined(CONFIG_DRM_AMD_DC_SI) 153 153 case FAMILY_SI: { 154 - struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr, 155 - GFP_KERNEL); 154 + struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr); 156 155 157 156 if (clk_mgr == NULL) { 158 157 BREAK_TO_DEBUGGER(); ··· 163 164 #endif 164 165 case FAMILY_CI: 165 166 case FAMILY_KV: { 166 - struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr, 167 - GFP_KERNEL); 167 + struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr); 168 168 169 169 if (clk_mgr == NULL) { 170 170 BREAK_TO_DEBUGGER(); ··· 173 175 return &clk_mgr->base; 174 176 } 175 177 case FAMILY_CZ: { 176 - struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr, 177 - GFP_KERNEL); 178 + struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr); 178 179 179 180 if (clk_mgr == NULL) { 180 181 BREAK_TO_DEBUGGER(); ··· 183 186 return &clk_mgr->base; 184 187 } 185 188 case FAMILY_VI: { 186 - struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr, 187 - GFP_KERNEL); 189 + struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr); 188 190 189 191 if (clk_mgr == NULL) { 190 192 BREAK_TO_DEBUGGER(); ··· 207 211 return &clk_mgr->base; 208 212 } 209 213 case FAMILY_AI: { 210 - struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr, 211 - GFP_KERNEL); 214 + struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr); 212 215 213 216 if (clk_mgr == NULL) { 214 217 BREAK_TO_DEBUGGER(); ··· 221 226 } 222 227 #if defined(CONFIG_DRM_AMD_DC_FP) 223 228 case FAMILY_RV: { 224 - struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr, 225 - GFP_KERNEL); 229 + struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr); 226 230 227 231 if (clk_mgr == NULL) { 228 232 BREAK_TO_DEBUGGER(); ··· 249 255 return &clk_mgr->base; 250 256 } 251 257 case FAMILY_NV: { 252 - struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr, 253 - GFP_KERNEL); 258 + struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr); 254 259 255 260 if (clk_mgr == NULL) { 256 261 BREAK_TO_DEBUGGER(); ··· 276 283 } 277 284 case FAMILY_VGH: 278 285 if (ASICREV_IS_VANGOGH(asic_id.hw_internal_rev)) { 279 - struct clk_mgr_vgh *clk_mgr = kzalloc_obj(*clk_mgr, 280 - GFP_KERNEL); 286 + struct clk_mgr_vgh *clk_mgr = kzalloc_obj(*clk_mgr); 281 287 282 288 if (clk_mgr == NULL) { 283 289 BREAK_TO_DEBUGGER(); ··· 288 296 break; 289 297 290 298 case FAMILY_YELLOW_CARP: { 291 - struct clk_mgr_dcn31 *clk_mgr = kzalloc_obj(*clk_mgr, 292 - GFP_KERNEL); 299 + struct clk_mgr_dcn31 *clk_mgr = kzalloc_obj(*clk_mgr); 293 300 294 301 if (clk_mgr == NULL) { 295 302 BREAK_TO_DEBUGGER(); ··· 300 309 } 301 310 break; 302 311 case AMDGPU_FAMILY_GC_10_3_6: { 303 - struct clk_mgr_dcn315 *clk_mgr = kzalloc_obj(*clk_mgr, 304 - GFP_KERNEL); 312 + struct clk_mgr_dcn315 *clk_mgr = kzalloc_obj(*clk_mgr); 305 313 306 314 if (clk_mgr == NULL) { 307 315 BREAK_TO_DEBUGGER(); ··· 312 322 } 313 323 break; 314 324 case AMDGPU_FAMILY_GC_10_3_7: { 315 - struct clk_mgr_dcn316 *clk_mgr = kzalloc_obj(*clk_mgr, 316 - GFP_KERNEL); 325 + struct clk_mgr_dcn316 *clk_mgr = kzalloc_obj(*clk_mgr); 317 326 318 327 if (clk_mgr == NULL) { 319 328 BREAK_TO_DEBUGGER(); ··· 324 335 } 325 336 break; 326 337 case AMDGPU_FAMILY_GC_11_0_0: { 327 - struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr, 328 - GFP_KERNEL); 338 + struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr); 329 339 330 340 if (clk_mgr == NULL) { 331 341 BREAK_TO_DEBUGGER(); ··· 335 347 } 336 348 337 349 case AMDGPU_FAMILY_GC_11_0_1: { 338 - struct clk_mgr_dcn314 *clk_mgr = kzalloc_obj(*clk_mgr, 339 - GFP_KERNEL); 350 + struct clk_mgr_dcn314 *clk_mgr = kzalloc_obj(*clk_mgr); 340 351 341 352 if (clk_mgr == NULL) { 342 353 BREAK_TO_DEBUGGER(); ··· 348 361 break; 349 362 350 363 case AMDGPU_FAMILY_GC_11_5_0: { 351 - struct clk_mgr_dcn35 *clk_mgr = kzalloc_obj(*clk_mgr, 352 - GFP_KERNEL); 364 + struct clk_mgr_dcn35 *clk_mgr = kzalloc_obj(*clk_mgr); 353 365 354 366 if (clk_mgr == NULL) { 355 367 BREAK_TO_DEBUGGER();
+1 -2
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
··· 561 561 562 562 dce_clock_read_ss_info(clk_mgr); 563 563 564 - clk_mgr->base.bw_params = kzalloc_obj(*clk_mgr->base.bw_params, 565 - GFP_KERNEL); 564 + clk_mgr->base.bw_params = kzalloc_obj(*clk_mgr->base.bw_params); 566 565 if (!clk_mgr->base.bw_params) { 567 566 BREAK_TO_DEBUGGER(); 568 567 return;
+1 -2
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
··· 1206 1206 1207 1207 clk_mgr->smu_present = false; 1208 1208 1209 - clk_mgr->base.bw_params = kzalloc_obj(*clk_mgr->base.bw_params, 1210 - GFP_KERNEL); 1209 + clk_mgr->base.bw_params = kzalloc_obj(*clk_mgr->base.bw_params); 1211 1210 if (!clk_mgr->base.bw_params) { 1212 1211 BREAK_TO_DEBUGGER(); 1213 1212 return;
+2 -4
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
··· 1549 1549 struct dccg *dccg) 1550 1550 { 1551 1551 struct clk_log_info log_info = {0}; 1552 - struct dcn401_clk_mgr *clk_mgr401 = kzalloc_obj(struct dcn401_clk_mgr, 1553 - GFP_KERNEL); 1552 + struct dcn401_clk_mgr *clk_mgr401 = kzalloc_obj(struct dcn401_clk_mgr); 1554 1553 struct clk_mgr_internal *clk_mgr; 1555 1554 1556 1555 if (!clk_mgr401) ··· 1599 1600 1600 1601 clk_mgr->smu_present = false; 1601 1602 1602 - clk_mgr->base.bw_params = kzalloc_obj(*clk_mgr->base.bw_params, 1603 - GFP_KERNEL); 1603 + clk_mgr->base.bw_params = kzalloc_obj(*clk_mgr->base.bw_params); 1604 1604 if (!clk_mgr->base.bw_params) { 1605 1605 BREAK_TO_DEBUGGER(); 1606 1606 kfree(clk_mgr401);
+1 -2
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 2613 2613 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; 2614 2614 struct gpio_pin_info pin_info; 2615 2615 struct gpio *generic; 2616 - struct gpio_generic_mux_config *config = kzalloc_obj(struct gpio_generic_mux_config, 2617 - GFP_KERNEL); 2616 + struct gpio_generic_mux_config *config = kzalloc_obj(struct gpio_generic_mux_config); 2618 2617 2619 2618 if (!config) 2620 2619 return false;
+2 -4
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
··· 1476 1476 if (*ss_entries_num == 0) 1477 1477 return; 1478 1478 1479 - ss_info = kzalloc_objs(struct spread_spectrum_info, *ss_entries_num, 1480 - GFP_KERNEL); 1479 + ss_info = kzalloc_objs(struct spread_spectrum_info, *ss_entries_num); 1481 1480 ss_info_cur = ss_info; 1482 1481 if (ss_info == NULL) 1483 1482 return; 1484 1483 1485 - ss_data = kzalloc_objs(struct spread_spectrum_data, *ss_entries_num, 1486 - GFP_KERNEL); 1484 + ss_data = kzalloc_objs(struct spread_spectrum_data, *ss_entries_num); 1487 1485 if (ss_data == NULL) 1488 1486 goto out_free_info; 1489 1487
+1 -2
drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
··· 573 573 struct dc_context *ctx, 574 574 uint32_t inst) 575 575 { 576 - struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input, 577 - GFP_KERNEL); 576 + struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input); 578 577 579 578 if (!dce_mi) { 580 579 BREAK_TO_DEBUGGER();
+3 -6
drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
··· 608 608 struct dc_context *ctx, 609 609 uint32_t inst) 610 610 { 611 - struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input, 612 - GFP_KERNEL); 611 + struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input); 613 612 614 613 if (!dce_mi) { 615 614 BREAK_TO_DEBUGGER(); ··· 1249 1250 1250 1251 static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool) 1251 1252 { 1252 - struct dce110_timing_generator *dce110_tgv = kzalloc_obj(*dce110_tgv, 1253 - GFP_KERNEL); 1254 - struct dce_transform *dce110_xfmv = kzalloc_obj(*dce110_xfmv, 1255 - GFP_KERNEL); 1253 + struct dce110_timing_generator *dce110_tgv = kzalloc_obj(*dce110_tgv); 1254 + struct dce_transform *dce110_xfmv = kzalloc_obj(*dce110_xfmv); 1256 1255 struct dce_mem_input *dce110_miv = kzalloc_obj(*dce110_miv); 1257 1256 struct dce110_opp *dce110_oppv = kzalloc_obj(*dce110_oppv); 1258 1257
+1 -2
drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
··· 580 580 struct dc_context *ctx, 581 581 uint32_t inst) 582 582 { 583 - struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input, 584 - GFP_KERNEL); 583 + struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input); 585 584 586 585 if (!dce_mi) { 587 586 BREAK_TO_DEBUGGER();
+1 -2
drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
··· 874 874 struct dc_context *ctx, 875 875 uint32_t inst) 876 876 { 877 - struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input, 878 - GFP_KERNEL); 877 + struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input); 879 878 880 879 if (!dce_mi) { 881 880 BREAK_TO_DEBUGGER();
+1 -2
drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
··· 683 683 struct dc_context *ctx, 684 684 uint32_t inst) 685 685 { 686 - struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input, 687 - GFP_KERNEL); 686 + struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input); 688 687 689 688 if (!dce_mi) { 690 689 BREAK_TO_DEBUGGER();
+1 -2
drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
··· 689 689 struct dc_context *ctx, 690 690 uint32_t inst) 691 691 { 692 - struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input, 693 - GFP_KERNEL); 692 + struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input); 694 693 695 694 if (!dce_mi) { 696 695 BREAK_TO_DEBUGGER();
+1 -2
drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
··· 686 686 687 687 static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx) 688 688 { 689 - struct dcn10_hubbub *dcn10_hubbub = kzalloc_obj(struct dcn10_hubbub, 690 - GFP_KERNEL); 689 + struct dcn10_hubbub *dcn10_hubbub = kzalloc_obj(struct dcn10_hubbub); 691 690 692 691 if (!dcn10_hubbub) 693 692 return NULL;
+3 -6
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
··· 855 855 struct hubbub *dcn20_hubbub_create(struct dc_context *ctx) 856 856 { 857 857 int i; 858 - struct dcn20_hubbub *hubbub = kzalloc_obj(struct dcn20_hubbub, 859 - GFP_KERNEL); 858 + struct dcn20_hubbub *hubbub = kzalloc_obj(struct dcn20_hubbub); 860 859 861 860 if (!hubbub) 862 861 return NULL; ··· 2241 2242 uint32_t pipe_count = pool->res_cap->num_dwb; 2242 2243 2243 2244 for (i = 0; i < pipe_count; i++) { 2244 - struct dcn20_dwbc *dwbc20 = kzalloc_obj(struct dcn20_dwbc, 2245 - GFP_KERNEL); 2245 + struct dcn20_dwbc *dwbc20 = kzalloc_obj(struct dcn20_dwbc); 2246 2246 2247 2247 if (!dwbc20) { 2248 2248 dm_error("DC: failed to create dwbc20!\n"); ··· 2265 2267 ASSERT(pipe_count > 0); 2266 2268 2267 2269 for (i = 0; i < pipe_count; i++) { 2268 - struct dcn20_mmhubbub *mcif_wb20 = kzalloc_obj(struct dcn20_mmhubbub, 2269 - GFP_KERNEL); 2270 + struct dcn20_mmhubbub *mcif_wb20 = kzalloc_obj(struct dcn20_mmhubbub); 2270 2271 2271 2272 if (!mcif_wb20) { 2272 2273 dm_error("DC: failed to create mcif_wb20!\n");
+1 -2
drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
··· 742 742 743 743 static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx) 744 744 { 745 - struct dcn20_hubbub *hubbub = kzalloc_obj(struct dcn20_hubbub, 746 - GFP_KERNEL); 745 + struct dcn20_hubbub *hubbub = kzalloc_obj(struct dcn20_hubbub); 747 746 748 747 if (!hubbub) 749 748 return NULL;
+1 -2
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
··· 999 999 { 1000 1000 int i; 1001 1001 1002 - struct dcn20_hubbub *hubbub = kzalloc_obj(struct dcn20_hubbub, 1003 - GFP_KERNEL); 1002 + struct dcn20_hubbub *hubbub = kzalloc_obj(struct dcn20_hubbub); 1004 1003 1005 1004 if (!hubbub) 1006 1005 return NULL;
+3 -6
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
··· 862 862 { 863 863 int i; 864 864 865 - struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub, 866 - GFP_KERNEL); 865 + struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub); 867 866 868 867 if (!hubbub3) 869 868 return NULL; ··· 1219 1220 uint32_t pipe_count = pool->res_cap->num_dwb; 1220 1221 1221 1222 for (i = 0; i < pipe_count; i++) { 1222 - struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc, 1223 - GFP_KERNEL); 1223 + struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc); 1224 1224 1225 1225 if (!dwbc30) { 1226 1226 dm_error("DC: failed to create dwbc30!\n"); ··· 1243 1245 uint32_t pipe_count = pool->res_cap->num_dwb; 1244 1246 1245 1247 for (i = 0; i < pipe_count; i++) { 1246 - struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub, 1247 - GFP_KERNEL); 1248 + struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub); 1248 1249 1249 1250 if (!mcif_wb30) { 1250 1251 dm_error("DC: failed to create mcif_wb30!\n");
+3 -6
drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
··· 817 817 { 818 818 int i; 819 819 820 - struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub, 821 - GFP_KERNEL); 820 + struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub); 822 821 823 822 if (!hubbub3) 824 823 return NULL; ··· 1179 1180 uint32_t pipe_count = pool->res_cap->num_dwb; 1180 1181 1181 1182 for (i = 0; i < pipe_count; i++) { 1182 - struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc, 1183 - GFP_KERNEL); 1183 + struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc); 1184 1184 1185 1185 if (!dwbc30) { 1186 1186 dm_error("DC: failed to create dwbc30!\n"); ··· 1203 1205 uint32_t pipe_count = pool->res_cap->num_dwb; 1204 1206 1205 1207 for (i = 0; i < pipe_count; i++) { 1206 - struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub, 1207 - GFP_KERNEL); 1208 + struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub); 1208 1209 1209 1210 if (!mcif_wb30) { 1210 1211 dm_error("DC: failed to create mcif_wb30!\n");
+9 -18
drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
··· 257 257 { 258 258 int i; 259 259 260 - struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub, 261 - GFP_KERNEL); 260 + struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub); 262 261 263 262 if (!hubbub3) 264 263 return NULL; ··· 445 446 static struct clock_source *dcn302_clock_source_create(struct dc_context *ctx, struct dc_bios *bios, 446 447 enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) 447 448 { 448 - struct dce110_clk_src *clk_src = kzalloc_obj(struct dce110_clk_src, 449 - GFP_KERNEL); 449 + struct dce110_clk_src *clk_src = kzalloc_obj(struct dce110_clk_src); 450 450 451 451 if (!clk_src) 452 452 return NULL; ··· 714 716 uint32_t pipe_count = pool->res_cap->num_dwb; 715 717 716 718 for (i = 0; i < pipe_count; i++) { 717 - struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc, 718 - GFP_KERNEL); 719 + struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc); 719 720 720 721 if (!dwbc30) { 721 722 dm_error("DC: failed to create dwbc30!\n"); ··· 749 752 uint32_t pipe_count = pool->res_cap->num_dwb; 750 753 751 754 for (i = 0; i < pipe_count; i++) { 752 - struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub, 753 - GFP_KERNEL); 755 + struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub); 754 756 755 757 if (!mcif_wb30) { 756 758 dm_error("DC: failed to create mcif_wb30!\n"); ··· 789 793 790 794 static struct dce_aux *dcn302_aux_engine_create(struct dc_context *ctx, uint32_t inst) 791 795 { 792 - struct aux_engine_dce110 *aux_engine = kzalloc_obj(struct aux_engine_dce110, 793 - GFP_KERNEL); 796 + struct aux_engine_dce110 *aux_engine = kzalloc_obj(struct aux_engine_dce110); 794 797 795 798 if (!aux_engine) 796 799 return NULL; ··· 820 825 821 826 static struct dce_i2c_hw *dcn302_i2c_hw_create(struct dc_context *ctx, uint32_t inst) 822 827 { 823 - struct dce_i2c_hw *dce_i2c_hw = kzalloc_obj(struct dce_i2c_hw, 824 - GFP_KERNEL); 828 + struct dce_i2c_hw *dce_i2c_hw = kzalloc_obj(struct dce_i2c_hw); 825 829 826 830 if (!dce_i2c_hw) 827 831 return NULL; ··· 894 900 struct dc_context *ctx, 895 901 const struct encoder_init_data *enc_init_data) 896 902 { 897 - struct dcn20_link_encoder *enc20 = kzalloc_obj(struct dcn20_link_encoder, 898 - GFP_KERNEL); 903 + struct dcn20_link_encoder *enc20 = kzalloc_obj(struct dcn20_link_encoder); 899 904 900 905 if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) 901 906 return NULL; ··· 920 927 921 928 static struct panel_cntl *dcn302_panel_cntl_create(const struct panel_cntl_init_data *init_data) 922 929 { 923 - struct dce_panel_cntl *panel_cntl = kzalloc_obj(struct dce_panel_cntl, 924 - GFP_KERNEL); 930 + struct dce_panel_cntl *panel_cntl = kzalloc_obj(struct dce_panel_cntl); 925 931 926 932 if (!panel_cntl) 927 933 return NULL; ··· 1512 1520 1513 1521 struct resource_pool *dcn302_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc) 1514 1522 { 1515 - struct resource_pool *pool = kzalloc_obj(struct resource_pool, 1516 - GFP_KERNEL); 1523 + struct resource_pool *pool = kzalloc_obj(struct resource_pool); 1517 1524 1518 1525 if (!pool) 1519 1526 return NULL;
+9 -18
drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
··· 253 253 { 254 254 int i; 255 255 256 - struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub, 257 - GFP_KERNEL); 256 + struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub); 258 257 259 258 if (!hubbub3) 260 259 return NULL; ··· 429 430 static struct clock_source *dcn303_clock_source_create(struct dc_context *ctx, struct dc_bios *bios, 430 431 enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) 431 432 { 432 - struct dce110_clk_src *clk_src = kzalloc_obj(struct dce110_clk_src, 433 - GFP_KERNEL); 433 + struct dce110_clk_src *clk_src = kzalloc_obj(struct dce110_clk_src); 434 434 435 435 if (!clk_src) 436 436 return NULL; ··· 675 677 uint32_t pipe_count = pool->res_cap->num_dwb; 676 678 677 679 for (i = 0; i < pipe_count; i++) { 678 - struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc, 679 - GFP_KERNEL); 680 + struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc); 680 681 681 682 if (!dwbc30) { 682 683 dm_error("DC: failed to create dwbc30!\n"); ··· 710 713 uint32_t pipe_count = pool->res_cap->num_dwb; 711 714 712 715 for (i = 0; i < pipe_count; i++) { 713 - struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub, 714 - GFP_KERNEL); 716 + struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub); 715 717 716 718 if (!mcif_wb30) { 717 719 dm_error("DC: failed to create mcif_wb30!\n"); ··· 747 751 748 752 static struct dce_aux *dcn303_aux_engine_create(struct dc_context *ctx, uint32_t inst) 749 753 { 750 - struct aux_engine_dce110 *aux_engine = kzalloc_obj(struct aux_engine_dce110, 751 - GFP_KERNEL); 754 + struct aux_engine_dce110 *aux_engine = kzalloc_obj(struct aux_engine_dce110); 752 755 753 756 if (!aux_engine) 754 757 return NULL; ··· 775 780 776 781 static struct dce_i2c_hw *dcn303_i2c_hw_create(struct dc_context *ctx, uint32_t inst) 777 782 { 778 - struct dce_i2c_hw *dce_i2c_hw = kzalloc_obj(struct dce_i2c_hw, 779 - GFP_KERNEL); 783 + struct dce_i2c_hw *dce_i2c_hw = kzalloc_obj(struct dce_i2c_hw); 780 784 781 785 if (!dce_i2c_hw) 782 786 return NULL; ··· 839 845 struct dc_context *ctx, 840 846 const struct encoder_init_data *enc_init_data) 841 847 { 842 - struct dcn20_link_encoder *enc20 = kzalloc_obj(struct dcn20_link_encoder, 843 - GFP_KERNEL); 848 + struct dcn20_link_encoder *enc20 = kzalloc_obj(struct dcn20_link_encoder); 844 849 845 850 if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) 846 851 return NULL; ··· 865 872 866 873 static struct panel_cntl *dcn303_panel_cntl_create(const struct panel_cntl_init_data *init_data) 867 874 { 868 - struct dce_panel_cntl *panel_cntl = kzalloc_obj(struct dce_panel_cntl, 869 - GFP_KERNEL); 875 + struct dce_panel_cntl *panel_cntl = kzalloc_obj(struct dce_panel_cntl); 870 876 871 877 if (!panel_cntl) 872 878 return NULL; ··· 1444 1452 1445 1453 struct resource_pool *dcn303_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc) 1446 1454 { 1447 - struct resource_pool *pool = kzalloc_obj(struct resource_pool, 1448 - GFP_KERNEL); 1455 + struct resource_pool *pool = kzalloc_obj(struct resource_pool); 1449 1456 1450 1457 if (!pool) 1451 1458 return NULL;
+4 -8
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
··· 1025 1025 { 1026 1026 int i; 1027 1027 1028 - struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub, 1029 - GFP_KERNEL); 1028 + struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub); 1030 1029 1031 1030 if (!hubbub3) 1032 1031 return NULL; ··· 1283 1284 apg_inst = hpo_dp_inst; 1284 1285 1285 1286 /* allocate HPO stream encoder and create VPG sub-block */ 1286 - hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder, 1287 - GFP_KERNEL); 1287 + hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder); 1288 1288 vpg = dcn31_vpg_create(ctx, vpg_inst); 1289 1289 apg = dcn31_apg_create(ctx, apg_inst); 1290 1290 ··· 1517 1519 uint32_t pipe_count = pool->res_cap->num_dwb; 1518 1520 1519 1521 for (i = 0; i < pipe_count; i++) { 1520 - struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc, 1521 - GFP_KERNEL); 1522 + struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc); 1522 1523 1523 1524 if (!dwbc30) { 1524 1525 dm_error("DC: failed to create dwbc30!\n"); ··· 1541 1544 uint32_t pipe_count = pool->res_cap->num_dwb; 1542 1545 1543 1546 for (i = 0; i < pipe_count; i++) { 1544 - struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub, 1545 - GFP_KERNEL); 1547 + struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub); 1546 1548 1547 1549 if (!mcif_wb30) { 1548 1550 dm_error("DC: failed to create mcif_wb30!\n");
+4 -8
drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
··· 1083 1083 { 1084 1084 int i; 1085 1085 1086 - struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub, 1087 - GFP_KERNEL); 1086 + struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub); 1088 1087 1089 1088 if (!hubbub3) 1090 1089 return NULL; ··· 1342 1343 apg_inst = hpo_dp_inst; 1343 1344 1344 1345 /* allocate HPO stream encoder and create VPG sub-block */ 1345 - hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder, 1346 - GFP_KERNEL); 1346 + hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder); 1347 1347 vpg = dcn31_vpg_create(ctx, vpg_inst); 1348 1348 apg = dcn31_apg_create(ctx, apg_inst); 1349 1349 ··· 1575 1577 uint32_t pipe_count = pool->res_cap->num_dwb; 1576 1578 1577 1579 for (i = 0; i < pipe_count; i++) { 1578 - struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc, 1579 - GFP_KERNEL); 1580 + struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc); 1580 1581 1581 1582 if (!dwbc30) { 1582 1583 dm_error("DC: failed to create dwbc30!\n"); ··· 1599 1602 uint32_t pipe_count = pool->res_cap->num_dwb; 1600 1603 1601 1604 for (i = 0; i < pipe_count; i++) { 1602 - struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub, 1603 - GFP_KERNEL); 1605 + struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub); 1604 1606 1605 1607 if (!mcif_wb30) { 1606 1608 dm_error("DC: failed to create mcif_wb30!\n");
+4 -8
drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
··· 1024 1024 { 1025 1025 int i; 1026 1026 1027 - struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub, 1028 - GFP_KERNEL); 1027 + struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub); 1029 1028 1030 1029 if (!hubbub3) 1031 1030 return NULL; ··· 1284 1285 apg_inst = hpo_dp_inst; 1285 1286 1286 1287 /* allocate HPO stream encoder and create VPG sub-block */ 1287 - hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder, 1288 - GFP_KERNEL); 1288 + hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder); 1289 1289 vpg = dcn31_vpg_create(ctx, vpg_inst); 1290 1290 apg = dcn31_apg_create(ctx, apg_inst); 1291 1291 ··· 1518 1520 uint32_t pipe_count = pool->res_cap->num_dwb; 1519 1521 1520 1522 for (i = 0; i < pipe_count; i++) { 1521 - struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc, 1522 - GFP_KERNEL); 1523 + struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc); 1523 1524 1524 1525 if (!dwbc30) { 1525 1526 dm_error("DC: failed to create dwbc30!\n"); ··· 1542 1545 uint32_t pipe_count = pool->res_cap->num_dwb; 1543 1546 1544 1547 for (i = 0; i < pipe_count; i++) { 1545 - struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub, 1546 - GFP_KERNEL); 1548 + struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub); 1547 1549 1548 1550 if (!mcif_wb30) { 1549 1551 dm_error("DC: failed to create mcif_wb30!\n");
+4 -8
drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
··· 1017 1017 { 1018 1018 int i; 1019 1019 1020 - struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub, 1021 - GFP_KERNEL); 1020 + struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub); 1022 1021 1023 1022 if (!hubbub3) 1024 1023 return NULL; ··· 1278 1279 apg_inst = hpo_dp_inst; 1279 1280 1280 1281 /* allocate HPO stream encoder and create VPG sub-block */ 1281 - hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder, 1282 - GFP_KERNEL); 1282 + hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder); 1283 1283 vpg = dcn31_vpg_create(ctx, vpg_inst); 1284 1284 apg = dcn31_apg_create(ctx, apg_inst); 1285 1285 ··· 1510 1512 uint32_t pipe_count = pool->res_cap->num_dwb; 1511 1513 1512 1514 for (i = 0; i < pipe_count; i++) { 1513 - struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc, 1514 - GFP_KERNEL); 1515 + struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc); 1515 1516 1516 1517 if (!dwbc30) { 1517 1518 dm_error("DC: failed to create dwbc30!\n"); ··· 1534 1537 uint32_t pipe_count = pool->res_cap->num_dwb; 1535 1538 1536 1539 for (i = 0; i < pipe_count; i++) { 1537 - struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub, 1538 - GFP_KERNEL); 1540 + struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub); 1539 1541 1540 1542 if (!mcif_wb30) { 1541 1543 dm_error("DC: failed to create mcif_wb30!\n");
+4 -8
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
··· 837 837 { 838 838 int i; 839 839 840 - struct dcn20_hubbub *hubbub2 = kzalloc_obj(struct dcn20_hubbub, 841 - GFP_KERNEL); 840 + struct dcn20_hubbub *hubbub2 = kzalloc_obj(struct dcn20_hubbub); 842 841 843 842 if (!hubbub2) 844 843 return NULL; ··· 1273 1274 apg_inst = hpo_dp_inst; 1274 1275 1275 1276 /* allocate HPO stream encoder and create VPG sub-block */ 1276 - hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder, 1277 - GFP_KERNEL); 1277 + hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder); 1278 1278 vpg = dcn32_vpg_create(ctx, vpg_inst); 1279 1279 apg = dcn31_apg_create(ctx, apg_inst); 1280 1280 ··· 1508 1510 uint32_t dwb_count = pool->res_cap->num_dwb; 1509 1511 1510 1512 for (i = 0; i < dwb_count; i++) { 1511 - struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc, 1512 - GFP_KERNEL); 1513 + struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc); 1513 1514 1514 1515 if (!dwbc30) { 1515 1516 dm_error("DC: failed to create dwbc30!\n"); ··· 1536 1539 uint32_t dwb_count = pool->res_cap->num_dwb; 1537 1540 1538 1541 for (i = 0; i < dwb_count; i++) { 1539 - struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub, 1540 - GFP_KERNEL); 1542 + struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub); 1541 1543 1542 1544 if (!mcif_wb30) { 1543 1545 dm_error("DC: failed to create mcif_wb30!\n");
+4 -8
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
··· 831 831 { 832 832 int i; 833 833 834 - struct dcn20_hubbub *hubbub2 = kzalloc_obj(struct dcn20_hubbub, 835 - GFP_KERNEL); 834 + struct dcn20_hubbub *hubbub2 = kzalloc_obj(struct dcn20_hubbub); 836 835 837 836 if (!hubbub2) 838 837 return NULL; ··· 1254 1255 apg_inst = hpo_dp_inst; 1255 1256 1256 1257 /* allocate HPO stream encoder and create VPG sub-block */ 1257 - hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder, 1258 - GFP_KERNEL); 1258 + hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder); 1259 1259 vpg = dcn321_vpg_create(ctx, vpg_inst); 1260 1260 apg = dcn321_apg_create(ctx, apg_inst); 1261 1261 ··· 1488 1490 uint32_t dwb_count = pool->res_cap->num_dwb; 1489 1491 1490 1492 for (i = 0; i < dwb_count; i++) { 1491 - struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc, 1492 - GFP_KERNEL); 1493 + struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc); 1493 1494 1494 1495 if (!dwbc30) { 1495 1496 dm_error("DC: failed to create dwbc30!\n"); ··· 1516 1519 uint32_t dwb_count = pool->res_cap->num_dwb; 1517 1520 1518 1521 for (i = 0; i < dwb_count; i++) { 1519 - struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub, 1520 - GFP_KERNEL); 1522 + struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub); 1521 1523 1522 1524 if (!mcif_wb30) { 1523 1525 dm_error("DC: failed to create mcif_wb30!\n");
+4 -8
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
··· 977 977 { 978 978 int i; 979 979 980 - struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub, 981 - GFP_KERNEL); 980 + struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub); 982 981 983 982 if (!hubbub3) 984 983 return NULL; ··· 1337 1338 apg_inst = hpo_dp_inst; 1338 1339 1339 1340 /* allocate HPO stream encoder and create VPG sub-block */ 1340 - hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder, 1341 - GFP_KERNEL); 1341 + hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder); 1342 1342 vpg = dcn31_vpg_create(ctx, vpg_inst); 1343 1343 apg = dcn31_apg_create(ctx, apg_inst); 1344 1344 ··· 1603 1605 uint32_t pipe_count = pool->res_cap->num_dwb; 1604 1606 1605 1607 for (i = 0; i < pipe_count; i++) { 1606 - struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc, 1607 - GFP_KERNEL); 1608 + struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc); 1608 1609 1609 1610 if (!dwbc30) { 1610 1611 dm_error("DC: failed to create dwbc30!\n"); ··· 1641 1644 uint32_t pipe_count = pool->res_cap->num_dwb; 1642 1645 1643 1646 for (i = 0; i < pipe_count; i++) { 1644 - struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub, 1645 - GFP_KERNEL); 1647 + struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub); 1646 1648 1647 1649 if (!mcif_wb30) { 1648 1650 dm_error("DC: failed to create mcif_wb30!\n");
+4 -8
drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
··· 957 957 { 958 958 int i; 959 959 960 - struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub, 961 - GFP_KERNEL); 960 + struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub); 962 961 963 962 if (!hubbub3) 964 963 return NULL; ··· 1317 1318 apg_inst = hpo_dp_inst; 1318 1319 1319 1320 /* allocate HPO stream encoder and create VPG sub-block */ 1320 - hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder, 1321 - GFP_KERNEL); 1321 + hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder); 1322 1322 vpg = dcn31_vpg_create(ctx, vpg_inst); 1323 1323 apg = dcn31_apg_create(ctx, apg_inst); 1324 1324 ··· 1583 1585 uint32_t pipe_count = pool->res_cap->num_dwb; 1584 1586 1585 1587 for (i = 0; i < pipe_count; i++) { 1586 - struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc, 1587 - GFP_KERNEL); 1588 + struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc); 1588 1589 1589 1590 if (!dwbc30) { 1590 1591 dm_error("DC: failed to create dwbc30!\n"); ··· 1621 1624 uint32_t pipe_count = pool->res_cap->num_dwb; 1622 1625 1623 1626 for (i = 0; i < pipe_count; i++) { 1624 - struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub, 1625 - GFP_KERNEL); 1627 + struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub); 1626 1628 1627 1629 if (!mcif_wb30) { 1628 1630 dm_error("DC: failed to create mcif_wb30!\n");
+4 -8
drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
··· 964 964 { 965 965 int i; 966 966 967 - struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub, 968 - GFP_KERNEL); 967 + struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub); 969 968 970 969 if (!hubbub3) 971 970 return NULL; ··· 1324 1325 apg_inst = hpo_dp_inst; 1325 1326 1326 1327 /* allocate HPO stream encoder and create VPG sub-block */ 1327 - hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder, 1328 - GFP_KERNEL); 1328 + hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder); 1329 1329 vpg = dcn31_vpg_create(ctx, vpg_inst); 1330 1330 apg = dcn31_apg_create(ctx, apg_inst); 1331 1331 ··· 1590 1592 uint32_t pipe_count = pool->res_cap->num_dwb; 1591 1593 1592 1594 for (i = 0; i < pipe_count; i++) { 1593 - struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc, 1594 - GFP_KERNEL); 1595 + struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc); 1595 1596 1596 1597 if (!dwbc30) { 1597 1598 dm_error("DC: failed to create dwbc30!\n"); ··· 1628 1631 uint32_t pipe_count = pool->res_cap->num_dwb; 1629 1632 1630 1633 for (i = 0; i < pipe_count; i++) { 1631 - struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub, 1632 - GFP_KERNEL); 1634 + struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub); 1633 1635 1634 1636 if (!mcif_wb30) { 1635 1637 dm_error("DC: failed to create mcif_wb30!\n");
+4 -8
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
··· 847 847 { 848 848 int i; 849 849 850 - struct dcn20_hubbub *hubbub2 = kzalloc_obj(struct dcn20_hubbub, 851 - GFP_KERNEL); 850 + struct dcn20_hubbub *hubbub2 = kzalloc_obj(struct dcn20_hubbub); 852 851 853 852 if (!hubbub2) 854 853 return NULL; ··· 1277 1278 apg_inst = hpo_dp_inst; 1278 1279 1279 1280 /* allocate HPO stream encoder and create VPG sub-block */ 1280 - hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder, 1281 - GFP_KERNEL); 1281 + hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder); 1282 1282 vpg = dcn401_vpg_create(ctx, vpg_inst); 1283 1283 apg = dcn401_apg_create(ctx, apg_inst); 1284 1284 ··· 1549 1551 uint32_t dwb_count = pool->res_cap->num_dwb; 1550 1552 1551 1553 for (i = 0; i < dwb_count; i++) { 1552 - struct dcn30_dwbc *dwbc401 = kzalloc_obj(struct dcn30_dwbc, 1553 - GFP_KERNEL); 1554 + struct dcn30_dwbc *dwbc401 = kzalloc_obj(struct dcn30_dwbc); 1554 1555 1555 1556 if (!dwbc401) { 1556 1557 dm_error("DC: failed to create dwbc401!\n"); ··· 1579 1582 uint32_t dwb_count = pool->res_cap->num_dwb; 1580 1583 1581 1584 for (i = 0; i < dwb_count; i++) { 1582 - struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub, 1583 - GFP_KERNEL); 1585 + struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub); 1584 1586 1585 1587 if (!mcif_wb30) { 1586 1588 dm_error("DC: failed to create mcif_wb30!\n");
+2 -4
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
··· 1945 1945 if (!rgb_user) 1946 1946 goto rgb_user_alloc_fail; 1947 1947 1948 - axis_x = kvzalloc_objs(*axis_x, ramp->num_entries + 3, 1949 - GFP_KERNEL); 1948 + axis_x = kvzalloc_objs(*axis_x, ramp->num_entries + 3); 1950 1949 if (!axis_x) 1951 1950 goto axis_x_alloc_fail; 1952 1951 ··· 1964 1965 scale_gamma_dx(rgb_user, ramp, dividers); 1965 1966 } 1966 1967 1967 - rgb_regamma = kvzalloc_objs(*rgb_regamma, MAX_HW_POINTS + _EXTRA_POINTS, 1968 - GFP_KERNEL); 1968 + rgb_regamma = kvzalloc_objs(*rgb_regamma, MAX_HW_POINTS + _EXTRA_POINTS); 1969 1969 if (!rgb_regamma) 1970 1970 goto rgb_regamma_alloc_fail; 1971 1971
+1 -2
drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
··· 556 556 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 557 557 ATOM_PowerTune_Table *pt; 558 558 adev->pm.dpm.dyn_state.cac_tdp_table = 559 - kzalloc_obj(struct amdgpu_cac_tdp_table, 560 - GFP_KERNEL); 559 + kzalloc_obj(struct amdgpu_cac_tdp_table); 561 560 if (!adev->pm.dpm.dyn_state.cac_tdp_table) 562 561 return -ENOMEM; 563 562 if (rev > 0) {
+1 -2
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
··· 7442 7442 return ret; 7443 7443 7444 7444 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = 7445 - kzalloc_objs(struct amdgpu_clock_voltage_dependency_entry, 4, 7446 - GFP_KERNEL); 7445 + kzalloc_objs(struct amdgpu_clock_voltage_dependency_entry, 4); 7447 7446 if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) 7448 7447 return -ENOMEM; 7449 7448
+1 -2
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
··· 321 321 PP_ASSERT_WITH_CODE((0 != clk_volt_pp_table->count), 322 322 "Invalid PowerPlay Table!", return -1); 323 323 324 - table = kzalloc_flex(*table, values, clk_volt_pp_table->count, 325 - GFP_KERNEL); 324 + table = kzalloc_flex(*table, values, clk_volt_pp_table->count); 326 325 if (!table) 327 326 return -ENOMEM; 328 327
+7 -14
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
··· 382 382 unsigned long i; 383 383 struct phm_clock_voltage_dependency_table *dep_table; 384 384 385 - dep_table = kzalloc_flex(*dep_table, entries, table->ucNumEntries, 386 - GFP_KERNEL); 385 + dep_table = kzalloc_flex(*dep_table, entries, table->ucNumEntries); 387 386 if (NULL == dep_table) 388 387 return -ENOMEM; 389 388 ··· 408 409 unsigned long i; 409 410 struct phm_clock_array *clock_table; 410 411 411 - clock_table = kzalloc_flex(*clock_table, values, table->count, 412 - GFP_KERNEL); 412 + clock_table = kzalloc_flex(*clock_table, values, table->count); 413 413 if (!clock_table) 414 414 return -ENOMEM; 415 415 ··· 1208 1210 unsigned long i; 1209 1211 struct phm_uvd_clock_voltage_dependency_table *uvd_table; 1210 1212 1211 - uvd_table = kzalloc_flex(*uvd_table, entries, table->numEntries, 1212 - GFP_KERNEL); 1213 + uvd_table = kzalloc_flex(*uvd_table, entries, table->numEntries); 1213 1214 if (!uvd_table) 1214 1215 return -ENOMEM; 1215 1216 ··· 1237 1240 unsigned long i; 1238 1241 struct phm_vce_clock_voltage_dependency_table *vce_table; 1239 1242 1240 - vce_table = kzalloc_flex(*vce_table, entries, table->numEntries, 1241 - GFP_KERNEL); 1243 + vce_table = kzalloc_flex(*vce_table, entries, table->numEntries); 1242 1244 if (!vce_table) 1243 1245 return -ENOMEM; 1244 1246 ··· 1264 1268 unsigned long i; 1265 1269 struct phm_samu_clock_voltage_dependency_table *samu_table; 1266 1270 1267 - samu_table = kzalloc_flex(*samu_table, entries, table->numEntries, 1268 - GFP_KERNEL); 1271 + samu_table = kzalloc_flex(*samu_table, entries, table->numEntries); 1269 1272 if (!samu_table) 1270 1273 return -ENOMEM; 1271 1274 ··· 1288 1293 unsigned long i; 1289 1294 struct phm_acp_clock_voltage_dependency_table *acp_table; 1290 1295 1291 - acp_table = kzalloc_flex(*acp_table, entries, table->numEntries, 1292 - GFP_KERNEL); 1296 + acp_table = kzalloc_flex(*acp_table, entries, table->numEntries); 1293 1297 if (!acp_table) 1294 1298 return -ENOMEM; 1295 1299 ··· 1509 1515 static int get_platform_power_management_table(struct pp_hwmgr *hwmgr, 1510 1516 ATOM_PPLIB_PPM_Table *atom_ppm_table) 1511 1517 { 1512 - struct phm_ppm_table *ptr = kzalloc_obj(struct phm_ppm_table, 1513 - GFP_KERNEL); 1518 + struct phm_ppm_table *ptr = kzalloc_obj(struct phm_ppm_table); 1514 1519 1515 1520 if (NULL == ptr) 1516 1521 return -ENOMEM;
+1 -2
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
··· 852 852 PP_ASSERT_WITH_CODE(clk_volt_pp_table->count, 853 853 "Invalid PowerPlay Table!", return -1); 854 854 855 - table = kzalloc_flex(*table, values, clk_volt_pp_table->count, 856 - GFP_KERNEL); 855 + table = kzalloc_flex(*table, values, clk_volt_pp_table->count); 857 856 if (!table) 858 857 return -ENOMEM; 859 858
+1 -2
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
··· 307 307 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 308 308 struct smu_dpm_policy *policy; 309 309 310 - smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context, 311 - GFP_KERNEL); 310 + smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context); 312 311 if (!smu_dpm->dpm_context) 313 312 return -ENOMEM; 314 313 smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
+1 -2
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
··· 934 934 { 935 935 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 936 936 937 - smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context, 938 - GFP_KERNEL); 937 + smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context); 939 938 if (!smu_dpm->dpm_context) 940 939 return -ENOMEM; 941 940
+1 -2
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
··· 921 921 { 922 922 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 923 923 924 - smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context, 925 - GFP_KERNEL); 924 + smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context); 926 925 if (!smu_dpm->dpm_context) 927 926 return -ENOMEM; 928 927
+1 -2
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
··· 433 433 { 434 434 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 435 435 436 - smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context, 437 - GFP_KERNEL); 436 + smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context); 438 437 if (!smu_dpm->dpm_context) 439 438 return -ENOMEM; 440 439
+1 -2
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
··· 294 294 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 295 295 struct smu_dpm_policy *policy; 296 296 297 - smu_dpm->dpm_context = kzalloc_obj(struct smu_13_0_dpm_context, 298 - GFP_KERNEL); 297 + smu_dpm->dpm_context = kzalloc_obj(struct smu_13_0_dpm_context); 299 298 if (!smu_dpm->dpm_context) 300 299 return -ENOMEM; 301 300 smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
+1 -2
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
··· 553 553 if (smu_power->power_context || smu_power->power_context_size != 0) 554 554 return -EINVAL; 555 555 556 - smu_power->power_context = kzalloc_obj(struct smu_13_0_power_context, 557 - GFP_KERNEL); 556 + smu_power->power_context = kzalloc_obj(struct smu_13_0_power_context); 558 557 if (!smu_power->power_context) 559 558 return -ENOMEM; 560 559 smu_power->power_context_size = sizeof(struct smu_13_0_power_context);
+1 -2
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
··· 530 530 { 531 531 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 532 532 533 - smu_dpm->dpm_context = kzalloc_obj(struct smu_13_0_dpm_context, 534 - GFP_KERNEL); 533 + smu_dpm->dpm_context = kzalloc_obj(struct smu_13_0_dpm_context); 535 534 if (!smu_dpm->dpm_context) 536 535 return -ENOMEM; 537 536
+1 -2
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
··· 559 559 { 560 560 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 561 561 562 - smu_dpm->dpm_context = kzalloc_obj(struct smu_13_0_dpm_context, 563 - GFP_KERNEL); 562 + smu_dpm->dpm_context = kzalloc_obj(struct smu_13_0_dpm_context); 564 563 if (!smu_dpm->dpm_context) 565 564 return -ENOMEM; 566 565
+1 -2
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
··· 538 538 if (smu_power->power_context || smu_power->power_context_size != 0) 539 539 return -EINVAL; 540 540 541 - smu_power->power_context = kzalloc_obj(struct smu_14_0_dpm_context, 542 - GFP_KERNEL); 541 + smu_power->power_context = kzalloc_obj(struct smu_14_0_dpm_context); 543 542 if (!smu_power->power_context) 544 543 return -ENOMEM; 545 544 smu_power->power_context_size = sizeof(struct smu_14_0_dpm_context);
+1 -2
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
··· 428 428 { 429 429 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 430 430 431 - smu_dpm->dpm_context = kzalloc_obj(struct smu_14_0_dpm_context, 432 - GFP_KERNEL); 431 + smu_dpm->dpm_context = kzalloc_obj(struct smu_14_0_dpm_context); 433 432 if (!smu_dpm->dpm_context) 434 433 return -ENOMEM; 435 434
+1 -2
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
··· 507 507 if (smu_power->power_context || smu_power->power_context_size != 0) 508 508 return -EINVAL; 509 509 510 - smu_power->power_context = kzalloc_obj(struct smu_15_0_dpm_context, 511 - GFP_KERNEL); 510 + smu_power->power_context = kzalloc_obj(struct smu_15_0_dpm_context); 512 511 if (!smu_power->power_context) 513 512 return -ENOMEM; 514 513 smu_power->power_context_size = sizeof(struct smu_15_0_dpm_context);
+1 -2
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_virt_ras_cmd.c
··· 365 365 { 366 366 struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); 367 367 368 - ras_mgr->virt_ras_cmd = kzalloc_obj(struct amdgpu_virt_ras_cmd, 369 - GFP_KERNEL); 368 + ras_mgr->virt_ras_cmd = kzalloc_obj(struct amdgpu_virt_ras_cmd); 370 369 if (!ras_mgr->virt_ras_cmd) 371 370 return -ENOMEM; 372 371
+1 -2
drivers/gpu/drm/arm/malidp_mw.c
··· 66 66 67 67 static void malidp_mw_connector_reset(struct drm_connector *connector) 68 68 { 69 - struct malidp_mw_connector_state *mw_state = kzalloc_obj(*mw_state, 70 - GFP_KERNEL); 69 + struct malidp_mw_connector_state *mw_state = kzalloc_obj(*mw_state); 71 70 72 71 if (connector->state) 73 72 __drm_atomic_helper_connector_destroy_state(connector->state);
+1 -2
drivers/gpu/drm/ast/ast_dp.c
··· 479 479 480 480 static void ast_astdp_connector_reset(struct drm_connector *connector) 481 481 { 482 - struct ast_astdp_connector_state *astdp_state = kzalloc_obj(*astdp_state, 483 - GFP_KERNEL); 482 + struct ast_astdp_connector_state *astdp_state = kzalloc_obj(*astdp_state); 484 483 485 484 if (connector->state) 486 485 connector->funcs->atomic_destroy_state(connector, connector->state);
+1 -2
drivers/gpu/drm/drm_atomic.c
··· 132 132 */ 133 133 state->allow_modeset = true; 134 134 135 - state->crtcs = kzalloc_objs(*state->crtcs, dev->mode_config.num_crtc, 136 - GFP_KERNEL); 135 + state->crtcs = kzalloc_objs(*state->crtcs, dev->mode_config.num_crtc); 137 136 if (!state->crtcs) 138 137 goto fail; 139 138 state->planes = kzalloc_objs(*state->planes,
+1 -2
drivers/gpu/drm/drm_atomic_helper.c
··· 2453 2453 } 2454 2454 2455 2455 if (!state->fake_commit) { 2456 - state->fake_commit = kzalloc_obj(*state->fake_commit, 2457 - GFP_KERNEL); 2456 + state->fake_commit = kzalloc_obj(*state->fake_commit); 2458 2457 if (!state->fake_commit) 2459 2458 return NULL; 2460 2459
+1 -2
drivers/gpu/drm/drm_atomic_state_helper.c
··· 473 473 */ 474 474 void drm_atomic_helper_connector_reset(struct drm_connector *connector) 475 475 { 476 - struct drm_connector_state *conn_state = kzalloc_obj(*conn_state, 477 - GFP_KERNEL); 476 + struct drm_connector_state *conn_state = kzalloc_obj(*conn_state); 478 477 479 478 if (connector->state) 480 479 __drm_atomic_helper_connector_destroy_state(connector->state);
+2 -4
drivers/gpu/drm/drm_buddy.c
··· 320 320 321 321 BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER); 322 322 323 - mm->free_trees = kmalloc_objs(*mm->free_trees, DRM_BUDDY_MAX_FREE_TREES, 324 - GFP_KERNEL); 323 + mm->free_trees = kmalloc_objs(*mm->free_trees, DRM_BUDDY_MAX_FREE_TREES); 325 324 if (!mm->free_trees) 326 325 return -ENOMEM; 327 326 ··· 336 337 337 338 mm->n_roots = hweight64(size); 338 339 339 - mm->roots = kmalloc_objs(struct drm_buddy_block *, mm->n_roots, 340 - GFP_KERNEL); 340 + mm->roots = kmalloc_objs(struct drm_buddy_block *, mm->n_roots); 341 341 if (!mm->roots) 342 342 goto out_free_tree; 343 343
+1 -2
drivers/gpu/drm/drm_client_modeset.c
··· 44 44 int i = 0; 45 45 46 46 /* Add terminating zero entry to enable index less iteration */ 47 - client->modesets = kzalloc_objs(*client->modesets, num_crtc + 1, 48 - GFP_KERNEL); 47 + client->modesets = kzalloc_objs(*client->modesets, num_crtc + 1); 49 48 if (!client->modesets) 50 49 return -ENOMEM; 51 50
+1 -2
drivers/gpu/drm/drm_gem_atomic_helper.c
··· 256 256 if (!plane_state) 257 257 return NULL; 258 258 259 - new_shadow_plane_state = kzalloc_obj(*new_shadow_plane_state, 260 - GFP_KERNEL); 259 + new_shadow_plane_state = kzalloc_obj(*new_shadow_plane_state); 261 260 if (!new_shadow_plane_state) 262 261 return NULL; 263 262 __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
+1 -2
drivers/gpu/drm/drm_lease.c
··· 386 386 int ret; 387 387 bool universal_planes = READ_ONCE(lessor_priv->universal_planes); 388 388 389 - objects = kzalloc_objs(struct drm_mode_object *, object_count, 390 - GFP_KERNEL); 389 + objects = kzalloc_objs(struct drm_mode_object *, object_count); 391 390 if (!objects) 392 391 return -ENOMEM; 393 392
+1 -2
drivers/gpu/drm/drm_plane.c
··· 1844 1844 int len = 0; 1845 1845 int i; 1846 1846 1847 - all_pipelines = kzalloc_objs(*all_pipelines, num_pipelines + 1, 1848 - GFP_KERNEL); 1847 + all_pipelines = kzalloc_objs(*all_pipelines, num_pipelines + 1); 1849 1848 1850 1849 if (!all_pipelines) { 1851 1850 drm_err(plane->dev, "failed to allocate color pipeline\n");
+1 -2
drivers/gpu/drm/drm_plane_helper.c
··· 217 217 /* Find current connectors for CRTC */ 218 218 num_connectors = get_connectors_for_crtc(crtc, NULL, 0); 219 219 BUG_ON(num_connectors == 0); 220 - connector_list = kzalloc_objs(*connector_list, num_connectors, 221 - GFP_KERNEL); 220 + connector_list = kzalloc_objs(*connector_list, num_connectors); 222 221 if (!connector_list) 223 222 return -ENOMEM; 224 223 get_connectors_for_crtc(crtc, connector_list, num_connectors);
+1 -2
drivers/gpu/drm/drm_writeback.c
··· 422 422 WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); 423 423 424 424 if (!conn_state->writeback_job) { 425 - conn_state->writeback_job = kzalloc_obj(*conn_state->writeback_job, 426 - GFP_KERNEL); 425 + conn_state->writeback_job = kzalloc_obj(*conn_state->writeback_job); 427 426 if (!conn_state->writeback_job) 428 427 return -ENOMEM; 429 428
+1 -2
drivers/gpu/drm/etnaviv/etnaviv_flop_reset.c
··· 182 182 * (input and output image, and shader), we keep this buffer 183 183 * for the whole life time the driver is bound 184 184 */ 185 - priv->flop_reset_data_ppu = kzalloc_obj(*priv->flop_reset_data_ppu, 186 - GFP_KERNEL); 185 + priv->flop_reset_data_ppu = kzalloc_obj(*priv->flop_reset_data_ppu); 187 186 188 187 if (!priv->flop_reset_data_ppu) 189 188 return -ENOMEM;
+1 -2
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
··· 38 38 if (!submit) 39 39 return NULL; 40 40 41 - submit->pmrs = kzalloc_objs(struct etnaviv_perfmon_request, nr_pmrs, 42 - GFP_KERNEL); 41 + submit->pmrs = kzalloc_objs(struct etnaviv_perfmon_request, nr_pmrs); 43 42 if (!submit->pmrs) { 44 43 kfree(submit); 45 44 return NULL;
+1 -2
drivers/gpu/drm/exynos/exynos_drm_g2d.c
··· 470 470 offset = userptr & ~PAGE_MASK; 471 471 end = PAGE_ALIGN(userptr + size); 472 472 npages = (end - start) >> PAGE_SHIFT; 473 - g2d_userptr->pages = kvmalloc_objs(*g2d_userptr->pages, npages, 474 - GFP_KERNEL); 473 + g2d_userptr->pages = kvmalloc_objs(*g2d_userptr->pages, npages); 475 474 if (!g2d_userptr->pages) { 476 475 ret = -ENOMEM; 477 476 goto err_free;
+1 -2
drivers/gpu/drm/gma500/intel_gmbus.c
··· 399 399 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 400 400 int ret, i; 401 401 402 - dev_priv->gmbus = kzalloc_objs(struct intel_gmbus, GMBUS_NUM_PORTS, 403 - GFP_KERNEL); 402 + dev_priv->gmbus = kzalloc_objs(struct intel_gmbus, GMBUS_NUM_PORTS); 404 403 if (dev_priv->gmbus == NULL) 405 404 return -ENOMEM; 406 405
+4 -8
drivers/gpu/drm/gma500/psb_intel_sdvo.c
··· 1977 1977 struct gma_connector *intel_connector; 1978 1978 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 1979 1979 1980 - psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector, 1981 - GFP_KERNEL); 1980 + psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector); 1982 1981 if (!psb_intel_sdvo_connector) 1983 1982 return false; 1984 1983 ··· 2017 2018 struct gma_connector *intel_connector; 2018 2019 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2019 2020 2020 - psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector, 2021 - GFP_KERNEL); 2021 + psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector); 2022 2022 if (!psb_intel_sdvo_connector) 2023 2023 return false; 2024 2024 ··· 2056 2058 struct gma_connector *intel_connector; 2057 2059 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2058 2060 2059 - psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector, 2060 - GFP_KERNEL); 2061 + psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector); 2061 2062 if (!psb_intel_sdvo_connector) 2062 2063 return false; 2063 2064 ··· 2090 2093 struct gma_connector *intel_connector; 2091 2094 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2092 2095 2093 - psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector, 2094 - GFP_KERNEL); 2096 + psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector); 2095 2097 if (!psb_intel_sdvo_connector) 2096 2098 return false; 2097 2099
+2 -4
drivers/gpu/drm/gud/gud_connector.c
··· 246 246 if (drm_edid && edid_ctx.edid_override) 247 247 goto out; 248 248 249 - reqmodes = kmalloc_objs(*reqmodes, GUD_CONNECTOR_MAX_NUM_MODES, 250 - GFP_KERNEL); 249 + reqmodes = kmalloc_objs(*reqmodes, GUD_CONNECTOR_MAX_NUM_MODES); 251 250 if (!reqmodes) 252 251 goto out; 253 252 ··· 479 480 unsigned int i, num_properties; 480 481 int ret; 481 482 482 - properties = kzalloc_objs(*properties, GUD_CONNECTOR_PROPERTIES_MAX_NUM, 483 - GFP_KERNEL); 483 + properties = kzalloc_objs(*properties, GUD_CONNECTOR_PROPERTIES_MAX_NUM); 484 484 if (!properties) 485 485 return -ENOMEM; 486 486
+1 -2
drivers/gpu/drm/gud/gud_drv.c
··· 255 255 unsigned int i, num_properties; 256 256 int ret; 257 257 258 - properties = kzalloc_objs(*properties, GUD_PROPERTIES_MAX_NUM, 259 - GFP_KERNEL); 258 + properties = kzalloc_objs(*properties, GUD_PROPERTIES_MAX_NUM); 260 259 if (!properties) 261 260 return -ENOMEM; 262 261
+1 -2
drivers/gpu/drm/i915/display/intel_bios.c
··· 486 486 if (section_id == BDB_MIPI_SEQUENCE && *(const u8 *)block >= 3) 487 487 block_size += 5; 488 488 489 - entry = kzalloc_flex(*entry, data, max(min_size, block_size) + 3, 490 - GFP_KERNEL); 489 + entry = kzalloc_flex(*entry, data, max(min_size, block_size) + 3); 491 490 if (!entry) { 492 491 kfree(temp_block); 493 492 return;
+1 -2
drivers/gpu/drm/i915/display/intel_dp_tunnel.c
··· 381 381 } 382 382 383 383 if (!state->inherited_dp_tunnels) { 384 - state->inherited_dp_tunnels = kzalloc_obj(*state->inherited_dp_tunnels, 385 - GFP_KERNEL); 384 + state->inherited_dp_tunnels = kzalloc_obj(*state->inherited_dp_tunnels); 386 385 if (!state->inherited_dp_tunnels) 387 386 return -ENOMEM; 388 387 }
+1 -2
drivers/gpu/drm/i915/gem/i915_gem_create.c
··· 54 54 obj->mm.placements = &i915->mm.regions[mr->id]; 55 55 obj->mm.n_placements = 1; 56 56 } else { 57 - arr = kmalloc_objs(struct intel_memory_region *, n_placements, 58 - GFP_KERNEL); 57 + arr = kmalloc_objs(struct intel_memory_region *, n_placements); 59 58 if (!arr) 60 59 return -ENOMEM; 61 60
+1 -2
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
··· 865 865 if (intel_vgpu_active(i915)) 866 866 num_fences = intel_uncore_read(uncore, 867 867 vgtif_reg(avail_rs.fence_num)); 868 - ggtt->fence_regs = kzalloc_objs(*ggtt->fence_regs, num_fences, 869 - GFP_KERNEL); 868 + ggtt->fence_regs = kzalloc_objs(*ggtt->fence_regs, num_fences); 870 869 if (!ggtt->fence_regs) 871 870 num_fences = 0; 872 871
+1 -2
drivers/gpu/drm/i915/gt/intel_workarounds.c
··· 157 157 if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */ 158 158 struct i915_wa *list; 159 159 160 - list = kmalloc_objs(*list, ALIGN(wal->count + 1, grow), 161 - GFP_KERNEL); 160 + list = kmalloc_objs(*list, ALIGN(wal->count + 1, grow)); 162 161 if (!list) { 163 162 drm_err(&i915->drm, "No space for workaround init!\n"); 164 163 return;
+1 -2
drivers/gpu/drm/i915/gt/selftest_execlists.c
··· 3648 3648 u32 *cs; 3649 3649 int n; 3650 3650 3651 - smoke.contexts = kmalloc_objs(*smoke.contexts, smoke.ncontext, 3652 - GFP_KERNEL); 3651 + smoke.contexts = kmalloc_objs(*smoke.contexts, smoke.ncontext); 3653 3652 if (!smoke.contexts) 3654 3653 return -ENOMEM; 3655 3654
+2 -4
drivers/gpu/drm/i915/gt/selftest_timeline.c
··· 536 536 * independently to each of their breadcrumb slots. 537 537 */ 538 538 539 - timelines = kvmalloc_objs(*timelines, NUM_TIMELINES * I915_NUM_ENGINES, 540 - GFP_KERNEL); 539 + timelines = kvmalloc_objs(*timelines, NUM_TIMELINES * I915_NUM_ENGINES); 541 540 if (!timelines) 542 541 return -ENOMEM; 543 542 ··· 609 610 * engines. 610 611 */ 611 612 612 - timelines = kvmalloc_objs(*timelines, NUM_TIMELINES * I915_NUM_ENGINES, 613 - GFP_KERNEL); 613 + timelines = kvmalloc_objs(*timelines, NUM_TIMELINES * I915_NUM_ENGINES); 614 614 if (!timelines) 615 615 return -ENOMEM; 616 616
+1 -2
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
··· 320 320 return; 321 321 322 322 /* allocate an extra for an end marker */ 323 - extlists = kzalloc_objs(struct __guc_mmio_reg_descr_group, 2, 324 - GFP_KERNEL); 323 + extlists = kzalloc_objs(struct __guc_mmio_reg_descr_group, 2); 325 324 if (!extlists) 326 325 return; 327 326
+1 -2
drivers/gpu/drm/i915/i915_cmd_parser.c
··· 918 918 for (j = 0; j < table->count; j++) { 919 919 const struct drm_i915_cmd_descriptor *desc = 920 920 &table->table[j]; 921 - struct cmd_node *desc_node = kmalloc_obj(*desc_node, 922 - GFP_KERNEL); 921 + struct cmd_node *desc_node = kmalloc_obj(*desc_node); 923 922 924 923 if (!desc_node) 925 924 return -ENOMEM;
+1 -2
drivers/gpu/drm/i915/selftests/i915_request.c
··· 1768 1768 1769 1769 smoke[0].request_alloc = __live_request_alloc; 1770 1770 smoke[0].ncontexts = 64; 1771 - smoke[0].contexts = kzalloc_objs(*smoke[0].contexts, smoke[0].ncontexts, 1772 - GFP_KERNEL); 1771 + smoke[0].contexts = kzalloc_objs(*smoke[0].contexts, smoke[0].ncontexts); 1773 1772 if (!smoke[0].contexts) { 1774 1773 ret = -ENOMEM; 1775 1774 goto out_threads;
+1 -2
drivers/gpu/drm/mgag200/mgag200_mode.c
··· 713 713 if (!crtc_state) 714 714 return NULL; 715 715 716 - new_mgag200_crtc_state = kzalloc_obj(*new_mgag200_crtc_state, 717 - GFP_KERNEL); 716 + new_mgag200_crtc_state = kzalloc_obj(*new_mgag200_crtc_state); 718 717 if (!new_mgag200_crtc_state) 719 718 return NULL; 720 719 __drm_atomic_helper_crtc_duplicate_state(crtc, &new_mgag200_crtc_state->base);
+1 -2
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
··· 1130 1130 1131 1131 static void mdp5_crtc_reset(struct drm_crtc *crtc) 1132 1132 { 1133 - struct mdp5_crtc_state *mdp5_cstate = kzalloc_obj(*mdp5_cstate, 1134 - GFP_KERNEL); 1133 + struct mdp5_crtc_state *mdp5_cstate = kzalloc_obj(*mdp5_cstate); 1135 1134 1136 1135 if (crtc->state) 1137 1136 mdp5_crtc_destroy_state(crtc, crtc->state);
+2 -4
drivers/gpu/drm/nouveau/dispnv04/overlay.c
··· 279 279 nv10_overlay_init(struct drm_device *device) 280 280 { 281 281 struct nouveau_drm *drm = nouveau_drm(device); 282 - struct nouveau_plane *plane = kzalloc_obj(struct nouveau_plane, 283 - GFP_KERNEL); 282 + struct nouveau_plane *plane = kzalloc_obj(struct nouveau_plane); 284 283 unsigned int num_formats = ARRAY_SIZE(formats); 285 284 int ret; 286 285 ··· 470 471 nv04_overlay_init(struct drm_device *device) 471 472 { 472 473 struct nouveau_drm *drm = nouveau_drm(device); 473 - struct nouveau_plane *plane = kzalloc_obj(struct nouveau_plane, 474 - GFP_KERNEL); 474 + struct nouveau_plane *plane = kzalloc_obj(struct nouveau_plane); 475 475 int ret; 476 476 477 477 if (!plane)
+1 -2
drivers/gpu/drm/nouveau/nouveau_svm.c
··· 1010 1010 if (ret) 1011 1011 return ret; 1012 1012 1013 - buffer->fault = kvzalloc_objs(*buffer->fault, buffer->entries, 1014 - GFP_KERNEL); 1013 + buffer->fault = kvzalloc_objs(*buffer->fault, buffer->entries); 1015 1014 if (!buffer->fault) 1016 1015 return -ENOMEM; 1017 1016
+1 -2
drivers/gpu/drm/nouveau/nvif/fifo.c
··· 51 51 goto done; 52 52 53 53 device->runlists = fls64(a->v.runlists.data); 54 - device->runlist = kzalloc_objs(*device->runlist, device->runlists, 55 - GFP_KERNEL); 54 + device->runlist = kzalloc_objs(*device->runlist, device->runlists); 56 55 if (!device->runlist) { 57 56 ret = -ENOMEM; 58 57 goto done;
+1 -2
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
··· 904 904 list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head); 905 905 } 906 906 907 - omap_dmm->tcm = kzalloc_objs(*omap_dmm->tcm, omap_dmm->num_lut, 908 - GFP_KERNEL); 907 + omap_dmm->tcm = kzalloc_objs(*omap_dmm->tcm, omap_dmm->num_lut); 909 908 if (!omap_dmm->tcm) { 910 909 ret = -ENOMEM; 911 910 goto fail;
+1 -2
drivers/gpu/drm/qxl/qxl_display.c
··· 1241 1241 qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0); 1242 1242 1243 1243 memset(qdev->monitors_config, 0, monitors_config_size); 1244 - qdev->dumb_heads = kzalloc_objs(qdev->dumb_heads[0], qxl_num_crtc, 1245 - GFP_KERNEL); 1244 + qdev->dumb_heads = kzalloc_objs(qdev->dumb_heads[0], qxl_num_crtc); 1246 1245 if (!qdev->dumb_heads) { 1247 1246 qxl_destroy_monitors_object(qdev); 1248 1247 return -ENOMEM;
+1 -2
drivers/gpu/drm/qxl/qxl_ioctl.c
··· 168 168 cmd->command_size)) 169 169 return -EFAULT; 170 170 171 - reloc_info = kmalloc_objs(struct qxl_reloc_info, cmd->relocs_num, 172 - GFP_KERNEL); 171 + reloc_info = kmalloc_objs(struct qxl_reloc_info, cmd->relocs_num); 173 172 if (!reloc_info) 174 173 return -ENOMEM; 175 174
+2 -4
drivers/gpu/drm/radeon/atombios_encoders.c
··· 2625 2625 { 2626 2626 struct drm_device *dev = radeon_encoder->base.dev; 2627 2627 struct radeon_device *rdev = dev->dev_private; 2628 - struct radeon_encoder_atom_dac *dac = kzalloc_obj(struct radeon_encoder_atom_dac, 2629 - GFP_KERNEL); 2628 + struct radeon_encoder_atom_dac *dac = kzalloc_obj(struct radeon_encoder_atom_dac); 2630 2629 2631 2630 if (!dac) 2632 2631 return NULL; ··· 2638 2639 radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) 2639 2640 { 2640 2641 int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; 2641 - struct radeon_encoder_atom_dig *dig = kzalloc_obj(struct radeon_encoder_atom_dig, 2642 - GFP_KERNEL); 2642 + struct radeon_encoder_atom_dig *dig = kzalloc_obj(struct radeon_encoder_atom_dig); 2643 2643 2644 2644 if (!dig) 2645 2645 return NULL;
+1 -2
drivers/gpu/drm/radeon/btc_dpm.c
··· 2552 2552 return ret; 2553 2553 2554 2554 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = 2555 - kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4, 2556 - GFP_KERNEL); 2555 + kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4); 2557 2556 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { 2558 2557 r600_free_extended_power_table(rdev); 2559 2558 return -ENOMEM;
+1 -2
drivers/gpu/drm/radeon/ci_dpm.c
··· 5740 5740 ci_set_private_data_variables_based_on_pptable(rdev); 5741 5741 5742 5742 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = 5743 - kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4, 5744 - GFP_KERNEL); 5743 + kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4); 5745 5744 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { 5746 5745 ci_dpm_fini(rdev); 5747 5746 return -ENOMEM;
+1 -2
drivers/gpu/drm/radeon/ni_dpm.c
··· 4078 4078 return ret; 4079 4079 4080 4080 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = 4081 - kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4, 4082 - GFP_KERNEL); 4081 + kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4); 4083 4082 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { 4084 4083 r600_free_extended_power_table(rdev); 4085 4084 return -ENOMEM;
+1 -2
drivers/gpu/drm/radeon/r600_dpm.c
··· 1255 1255 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 1256 1256 ATOM_PowerTune_Table *pt; 1257 1257 rdev->pm.dpm.dyn_state.cac_tdp_table = 1258 - kzalloc_obj(struct radeon_cac_tdp_table, 1259 - GFP_KERNEL); 1258 + kzalloc_obj(struct radeon_cac_tdp_table); 1260 1259 if (!rdev->pm.dpm.dyn_state.cac_tdp_table) { 1261 1260 r600_free_extended_power_table(rdev); 1262 1261 return -ENOMEM;
+4 -8
drivers/gpu/drm/radeon/radeon_atombios.c
··· 1773 1773 dac_info = (struct _COMPASSIONATE_DATA *) 1774 1774 (mode_info->atom_context->bios + data_offset); 1775 1775 1776 - p_dac = kzalloc_obj(struct radeon_encoder_primary_dac, 1777 - GFP_KERNEL); 1776 + p_dac = kzalloc_obj(struct radeon_encoder_primary_dac); 1778 1777 1779 1778 if (!p_dac) 1780 1779 return NULL; ··· 2126 2127 /* avoid memory leaks from invalid modes or unknown frev. */ 2127 2128 if (!rdev->pm.power_state[state_index].clock_info) { 2128 2129 rdev->pm.power_state[state_index].clock_info = 2129 - kzalloc_obj(struct radeon_pm_clock_info, 2130 - GFP_KERNEL); 2130 + kzalloc_obj(struct radeon_pm_clock_info); 2131 2131 } 2132 2132 if (!rdev->pm.power_state[state_index].clock_info) 2133 2133 goto out; ··· 2783 2785 } 2784 2786 2785 2787 if (state_index == 0) { 2786 - rdev->pm.power_state = kzalloc_obj(struct radeon_power_state, 2787 - GFP_KERNEL); 2788 + rdev->pm.power_state = kzalloc_obj(struct radeon_power_state); 2788 2789 if (rdev->pm.power_state) { 2789 2790 rdev->pm.power_state[0].clock_info = 2790 - kzalloc_objs(struct radeon_pm_clock_info, 1, 2791 - GFP_KERNEL); 2791 + kzalloc_objs(struct radeon_pm_clock_info, 1); 2792 2792 if (rdev->pm.power_state[0].clock_info) { 2793 2793 /* add the default mode */ 2794 2794 rdev->pm.power_state[state_index].type =
+1 -2
drivers/gpu/drm/radeon/radeon_combios.c
··· 2629 2629 rdev->pm.default_power_state_index = -1; 2630 2630 2631 2631 /* allocate 2 power states */ 2632 - rdev->pm.power_state = kzalloc_objs(struct radeon_power_state, 2, 2633 - GFP_KERNEL); 2632 + rdev->pm.power_state = kzalloc_objs(struct radeon_power_state, 2); 2634 2633 if (rdev->pm.power_state) { 2635 2634 /* allocate 1 clock mode per state */ 2636 2635 rdev->pm.power_state[0].clock_info =
+6 -12
drivers/gpu/drm/radeon/radeon_connectors.c
··· 1907 1907 } 1908 1908 1909 1909 if (is_dp_bridge) { 1910 - radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig, 1911 - GFP_KERNEL); 1910 + radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig); 1912 1911 if (!radeon_dig_connector) 1913 1912 goto failed; 1914 1913 radeon_dig_connector->igp_lane_info = igp_lane_info; ··· 2078 2079 break; 2079 2080 case DRM_MODE_CONNECTOR_DVII: 2080 2081 case DRM_MODE_CONNECTOR_DVID: 2081 - radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig, 2082 - GFP_KERNEL); 2082 + radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig); 2083 2083 if (!radeon_dig_connector) 2084 2084 goto failed; 2085 2085 radeon_dig_connector->igp_lane_info = igp_lane_info; ··· 2140 2142 break; 2141 2143 case DRM_MODE_CONNECTOR_HDMIA: 2142 2144 case DRM_MODE_CONNECTOR_HDMIB: 2143 - radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig, 2144 - GFP_KERNEL); 2145 + radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig); 2145 2146 if (!radeon_dig_connector) 2146 2147 goto failed; 2147 2148 radeon_dig_connector->igp_lane_info = igp_lane_info; ··· 2195 2198 connector->doublescan_allowed = false; 2196 2199 break; 2197 2200 case DRM_MODE_CONNECTOR_DisplayPort: 2198 - radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig, 2199 - GFP_KERNEL); 2201 + radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig); 2200 2202 if (!radeon_dig_connector) 2201 2203 goto failed; 2202 2204 radeon_dig_connector->igp_lane_info = igp_lane_info; ··· 2250 2254 connector->doublescan_allowed = false; 2251 2255 break; 2252 2256 case DRM_MODE_CONNECTOR_eDP: 2253 - radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig, 2254 - GFP_KERNEL); 2257 + radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig); 2255 2258 if (!radeon_dig_connector) 2256 2259 goto failed; 2257 2260 radeon_dig_connector->igp_lane_info = igp_lane_info; ··· 2297 2302 connector->doublescan_allowed = false; 2298 2303 break; 2299 2304 case DRM_MODE_CONNECTOR_LVDS: 2300 - radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig, 2301 - GFP_KERNEL); 2305 + radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig); 2302 2306 if (!radeon_dig_connector) 2303 2307 goto failed; 2304 2308 radeon_dig_connector->igp_lane_info = igp_lane_info;
+1 -2
drivers/gpu/drm/radeon/radeon_cs.c
··· 296 296 } 297 297 p->cs_flags = 0; 298 298 p->nchunks = cs->num_chunks; 299 - p->chunks = kvzalloc_objs(struct radeon_cs_chunk, p->nchunks, 300 - GFP_KERNEL); 299 + p->chunks = kvzalloc_objs(struct radeon_cs_chunk, p->nchunks); 301 300 if (p->chunks == NULL) { 302 301 return -ENOMEM; 303 302 }
+5 -10
drivers/gpu/drm/radeon/radeon_display.c
··· 1539 1539 1540 1540 BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets)); 1541 1541 for (i = 0; i < num_afmt; i++) { 1542 - rdev->mode_info.afmt[i] = kzalloc_obj(struct radeon_afmt, 1543 - GFP_KERNEL); 1542 + rdev->mode_info.afmt[i] = kzalloc_obj(struct radeon_afmt); 1544 1543 if (rdev->mode_info.afmt[i]) { 1545 1544 rdev->mode_info.afmt[i]->offset = eg_offsets[i]; 1546 1545 rdev->mode_info.afmt[i]->id = i; ··· 1547 1548 } 1548 1549 } else if (ASIC_IS_DCE3(rdev)) { 1549 1550 /* DCE3.x has 2 audio blocks tied to DIG encoders */ 1550 - rdev->mode_info.afmt[0] = kzalloc_obj(struct radeon_afmt, 1551 - GFP_KERNEL); 1551 + rdev->mode_info.afmt[0] = kzalloc_obj(struct radeon_afmt); 1552 1552 if (rdev->mode_info.afmt[0]) { 1553 1553 rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0; 1554 1554 rdev->mode_info.afmt[0]->id = 0; 1555 1555 } 1556 - rdev->mode_info.afmt[1] = kzalloc_obj(struct radeon_afmt, 1557 - GFP_KERNEL); 1556 + rdev->mode_info.afmt[1] = kzalloc_obj(struct radeon_afmt); 1558 1557 if (rdev->mode_info.afmt[1]) { 1559 1558 rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1; 1560 1559 rdev->mode_info.afmt[1]->id = 1; 1561 1560 } 1562 1561 } else if (ASIC_IS_DCE2(rdev)) { 1563 1562 /* DCE2 has at least 1 routable audio block */ 1564 - rdev->mode_info.afmt[0] = kzalloc_obj(struct radeon_afmt, 1565 - GFP_KERNEL); 1563 + rdev->mode_info.afmt[0] = kzalloc_obj(struct radeon_afmt); 1566 1564 if (rdev->mode_info.afmt[0]) { 1567 1565 rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0; 1568 1566 rdev->mode_info.afmt[0]->id = 0; 1569 1567 } 1570 1568 /* r6xx has 2 routable audio blocks */ 1571 1569 if (rdev->family >= CHIP_R600) { 1572 - rdev->mode_info.afmt[1] = kzalloc_obj(struct radeon_afmt, 1573 - GFP_KERNEL); 1570 + rdev->mode_info.afmt[1] = kzalloc_obj(struct radeon_afmt); 1574 1571 if (rdev->mode_info.afmt[1]) { 1575 1572 rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1; 1576 1573 rdev->mode_info.afmt[1]->id = 1;
+1 -2
drivers/gpu/drm/radeon/radeon_vm.c
··· 133 133 struct radeon_bo_list *list; 134 134 unsigned i, idx; 135 135 136 - list = kvmalloc_objs(struct radeon_bo_list, vm->max_pde_used + 2, 137 - GFP_KERNEL); 136 + list = kvmalloc_objs(struct radeon_bo_list, vm->max_pde_used + 2); 138 137 if (!list) 139 138 return NULL; 140 139
+1 -2
drivers/gpu/drm/radeon/si_dpm.c
··· 6898 6898 return ret; 6899 6899 6900 6900 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = 6901 - kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4, 6902 - GFP_KERNEL); 6901 + kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4); 6903 6902 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { 6904 6903 r600_free_extended_power_table(rdev); 6905 6904 return -ENOMEM;
+1 -2
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
··· 1658 1658 1659 1659 static void vop_crtc_reset(struct drm_crtc *crtc) 1660 1660 { 1661 - struct rockchip_crtc_state *crtc_state = kzalloc_obj(*crtc_state, 1662 - GFP_KERNEL); 1661 + struct rockchip_crtc_state *crtc_state = kzalloc_obj(*crtc_state); 1663 1662 1664 1663 if (crtc->state) 1665 1664 vop_crtc_destroy_state(crtc, crtc->state);
+1 -2
drivers/gpu/drm/scheduler/sched_main.c
··· 1358 1358 goto Out_check_own; 1359 1359 sched->num_rqs = args->num_rqs; 1360 1360 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { 1361 - sched->sched_rq[i] = kzalloc_obj(*sched->sched_rq[i], 1362 - GFP_KERNEL); 1361 + sched->sched_rq[i] = kzalloc_obj(*sched->sched_rq[i]); 1363 1362 if (!sched->sched_rq[i]) 1364 1363 goto Out_unroll; 1365 1364 drm_sched_rq_init(sched, sched->sched_rq[i]);
+1 -2
drivers/gpu/drm/tiny/appletbdrm.c
··· 358 358 if (!appletbdrm_state->request) 359 359 return -ENOMEM; 360 360 361 - appletbdrm_state->response = kzalloc_obj(*appletbdrm_state->response, 362 - GFP_KERNEL); 361 + appletbdrm_state->response = kzalloc_obj(*appletbdrm_state->response); 363 362 364 363 if (!appletbdrm_state->response) 365 364 return -ENOMEM;
+1 -2
drivers/gpu/drm/ttm/ttm_tt.c
··· 137 137 138 138 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm) 139 139 { 140 - ttm->dma_address = kvzalloc_objs(*ttm->dma_address, ttm->num_pages, 141 - GFP_KERNEL); 140 + ttm->dma_address = kvzalloc_objs(*ttm->dma_address, ttm->num_pages); 142 141 if (!ttm->dma_address) 143 142 return -ENOMEM; 144 143
+4 -8
drivers/gpu/drm/v3d/v3d_submit.c
··· 335 335 return 0; 336 336 337 337 se->out_syncs = (struct v3d_submit_outsync *) 338 - kvmalloc_objs(struct v3d_submit_outsync, count, 339 - GFP_KERNEL); 338 + kvmalloc_objs(struct v3d_submit_outsync, count); 340 339 if (!se->out_syncs) 341 340 return -ENOMEM; 342 341 ··· 724 725 job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY; 725 726 726 727 query_info->queries = 727 - kvmalloc_objs(struct v3d_performance_query, reset.count, 728 - GFP_KERNEL); 728 + kvmalloc_objs(struct v3d_performance_query, reset.count); 729 729 if (!query_info->queries) 730 730 return -ENOMEM; 731 731 ··· 764 766 job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY; 765 767 766 768 query_info->queries = 767 - kvmalloc_objs(struct v3d_performance_query, copy.count, 768 - GFP_KERNEL); 769 + kvmalloc_objs(struct v3d_performance_query, copy.count); 769 770 if (!query_info->queries) 770 771 return -ENOMEM; 771 772 ··· 1073 1076 goto fail; 1074 1077 } 1075 1078 1076 - job->base.bo = kzalloc_objs(*job->base.bo, ARRAY_SIZE(args->bo_handles), 1077 - GFP_KERNEL); 1079 + job->base.bo = kzalloc_objs(*job->base.bo, ARRAY_SIZE(args->bo_handles)); 1078 1080 if (!job->base.bo) { 1079 1081 ret = -ENOMEM; 1080 1082 goto fail;
+1 -2
drivers/gpu/drm/vc4/vc4_bo.c
··· 1014 1014 * use. This lets us avoid a bunch of string reallocation in 1015 1015 * the kernel's draw and BO allocation paths. 1016 1016 */ 1017 - vc4->bo_labels = kzalloc_objs(*vc4->bo_labels, VC4_BO_TYPE_COUNT, 1018 - GFP_KERNEL); 1017 + vc4->bo_labels = kzalloc_objs(*vc4->bo_labels, VC4_BO_TYPE_COUNT); 1019 1018 if (!vc4->bo_labels) 1020 1019 return -ENOMEM; 1021 1020 vc4->num_labels = VC4_BO_TYPE_COUNT;
+1 -2
drivers/gpu/drm/virtio/virtgpu_fence.c
··· 61 61 { 62 62 uint64_t fence_context = base_fence_ctx + ring_idx; 63 63 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; 64 - struct virtio_gpu_fence *fence = kzalloc_obj(struct virtio_gpu_fence, 65 - GFP_KERNEL); 64 + struct virtio_gpu_fence *fence = kzalloc_obj(struct virtio_gpu_fence); 66 65 67 66 if (!fence) 68 67 return fence;
+1 -2
drivers/gpu/drm/virtio/virtgpu_prime.c
··· 164 164 if (IS_ERR(sgt)) 165 165 return PTR_ERR(sgt); 166 166 167 - *ents = kvmalloc_objs(struct virtio_gpu_mem_entry, sgt->nents, 168 - GFP_KERNEL); 167 + *ents = kvmalloc_objs(struct virtio_gpu_mem_entry, sgt->nents); 169 168 if (!(*ents)) { 170 169 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 171 170 return -ENOMEM;
+2 -4
drivers/gpu/drm/vkms/vkms_crtc.c
··· 87 87 88 88 static void vkms_atomic_crtc_reset(struct drm_crtc *crtc) 89 89 { 90 - struct vkms_crtc_state *vkms_state = kzalloc_obj(*vkms_state, 91 - GFP_KERNEL); 90 + struct vkms_crtc_state *vkms_state = kzalloc_obj(*vkms_state); 92 91 93 92 if (crtc->state) 94 93 vkms_atomic_crtc_destroy_state(crtc, crtc->state); ··· 136 137 i++; 137 138 } 138 139 139 - vkms_state->active_planes = kzalloc_objs(*vkms_state->active_planes, i, 140 - GFP_KERNEL); 140 + vkms_state->active_planes = kzalloc_objs(*vkms_state->active_planes, i); 141 141 if (!vkms_state->active_planes) 142 142 return -ENOMEM; 143 143 vkms_state->num_active_planes = i;
+2 -4
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
··· 586 586 w, h, diff); 587 587 588 588 if (!src->ttm->pages && src->ttm->sg) { 589 - src_pages = kvmalloc_objs(struct page *, src->ttm->num_pages, 590 - GFP_KERNEL); 589 + src_pages = kvmalloc_objs(struct page *, src->ttm->num_pages); 591 590 if (!src_pages) 592 591 return -ENOMEM; 593 592 ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages, ··· 595 596 goto out; 596 597 } 597 598 if (!dst->ttm->pages && dst->ttm->sg) { 598 - dst_pages = kvmalloc_objs(struct page *, dst->ttm->num_pages, 599 - GFP_KERNEL); 599 + dst_pages = kvmalloc_objs(struct page *, dst->ttm->num_pages); 600 600 if (!dst_pages) { 601 601 ret = -ENOMEM; 602 602 goto out;
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 948 948 uint32_t i; 949 949 int ret = 0; 950 950 951 - rects = kzalloc_objs(struct drm_rect, dev->mode_config.num_crtc, 952 - GFP_KERNEL); 951 + rects = kzalloc_objs(struct drm_rect, dev->mode_config.num_crtc); 953 952 if (!rects) 954 953 return -ENOMEM; 955 954
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 767 767 ret = PTR_ERR(metadata->sizes); 768 768 goto out_no_sizes; 769 769 } 770 - srf->offsets = kmalloc_objs(*srf->offsets, metadata->num_sizes, 771 - GFP_KERNEL); 770 + srf->offsets = kmalloc_objs(*srf->offsets, metadata->num_sizes); 772 771 if (unlikely(!srf->offsets)) { 773 772 ret = -ENOMEM; 774 773 goto out_no_offsets;
+1 -2
drivers/gpu/drm/xe/xe_eu_stall.c
··· 636 636 struct xe_bo *bo; 637 637 u32 size; 638 638 639 - stream->xecore_buf = kzalloc_objs(*stream->xecore_buf, last_xecore, 640 - GFP_KERNEL); 639 + stream->xecore_buf = kzalloc_objs(*stream->xecore_buf, last_xecore); 641 640 if (!stream->xecore_buf) 642 641 return -ENOMEM; 643 642
+1 -2
drivers/gpu/drm/xe/xe_oa.c
··· 1408 1408 } 1409 1409 1410 1410 if (param->num_syncs) { 1411 - param->syncs = kzalloc_objs(*param->syncs, param->num_syncs, 1412 - GFP_KERNEL); 1411 + param->syncs = kzalloc_objs(*param->syncs, param->num_syncs); 1413 1412 if (!param->syncs) { 1414 1413 ret = -ENOMEM; 1415 1414 goto exit;
+1 -2
drivers/gpu/drm/xe/xe_pt.c
··· 368 368 entry->pt_bo->update_index = -1; 369 369 370 370 if (alloc_entries) { 371 - entry->pt_entries = kmalloc_objs(*entry->pt_entries, XE_PDES, 372 - GFP_KERNEL); 371 + entry->pt_entries = kmalloc_objs(*entry->pt_entries, XE_PDES); 373 372 if (!entry->pt_entries) 374 373 return -ENOMEM; 375 374 }
+1 -2
drivers/gpu/drm/xe/xe_vm_madvise.c
··· 47 47 lockdep_assert_held(&vm->lock); 48 48 49 49 madvise_range->num_vmas = 0; 50 - madvise_range->vmas = kmalloc_objs(*madvise_range->vmas, max_vmas, 51 - GFP_KERNEL); 50 + madvise_range->vmas = kmalloc_objs(*madvise_range->vmas, max_vmas); 52 51 if (!madvise_range->vmas) 53 52 return -ENOMEM; 54 53
+1 -2
drivers/gpu/drm/xen/xen_drm_front_gem.c
··· 47 47 size_t buf_size) 48 48 { 49 49 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE); 50 - xen_obj->pages = kvmalloc_objs(struct page *, xen_obj->num_pages, 51 - GFP_KERNEL); 50 + xen_obj->pages = kvmalloc_objs(struct page *, xen_obj->num_pages); 52 51 return !xen_obj->pages ? -ENOMEM : 0; 53 52 } 54 53
+1 -2
drivers/gpu/host1x/channel.c
··· 16 16 int host1x_channel_list_init(struct host1x_channel_list *chlist, 17 17 unsigned int num_channels) 18 18 { 19 - chlist->channels = kzalloc_objs(struct host1x_channel, num_channels, 20 - GFP_KERNEL); 19 + chlist->channels = kzalloc_objs(struct host1x_channel, num_channels); 21 20 if (!chlist->channels) 22 21 return -ENOMEM; 23 22
+1 -2
drivers/greybus/manifest.c
··· 275 275 if (!count) 276 276 return 0; 277 277 278 - bundle->cport_desc = kzalloc_objs(*bundle->cport_desc, count, 279 - GFP_KERNEL); 278 + bundle->cport_desc = kzalloc_objs(*bundle->cport_desc, count); 280 279 if (!bundle->cport_desc) 281 280 goto exit; 282 281
+1 -2
drivers/greybus/svc.c
··· 782 782 if (!rail_names) 783 783 goto err_pwrmon_debugfs; 784 784 785 - svc->pwrmon_rails = kzalloc_objs(*svc->pwrmon_rails, rail_count, 786 - GFP_KERNEL); 785 + svc->pwrmon_rails = kzalloc_objs(*svc->pwrmon_rails, rail_count); 787 786 if (!svc->pwrmon_rails) 788 787 goto err_pwrmon_debugfs_free; 789 788
+1 -2
drivers/hid/hid-google-hammer.c
··· 59 59 struct cros_ec_command *msg; 60 60 int ret; 61 61 62 - msg = kzalloc_flex(*msg, data, max(sizeof(u32), sizeof(*params)), 63 - GFP_KERNEL); 62 + msg = kzalloc_flex(*msg, data, max(sizeof(u32), sizeof(*params))); 64 63 if (!msg) 65 64 return -ENOMEM; 66 65
+1 -2
drivers/hid/hid-haptic.c
··· 474 474 ret = -ENOMEM; 475 475 goto duration_map; 476 476 } 477 - haptic->effect = kzalloc_objs(struct hid_haptic_effect, FF_MAX_EFFECTS, 478 - GFP_KERNEL); 477 + haptic->effect = kzalloc_objs(struct hid_haptic_effect, FF_MAX_EFFECTS); 479 478 if (!haptic->effect) { 480 479 ret = -ENOMEM; 481 480 goto output_queue;
+1 -2
drivers/hid/intel-ish-hid/ishtp/hbm.c
··· 34 34 return; 35 35 36 36 /* allocate storage for fw clients representation */ 37 - clients = kzalloc_objs(struct ishtp_fw_client, dev->fw_clients_num, 38 - GFP_KERNEL); 37 + clients = kzalloc_objs(struct ishtp_fw_client, dev->fw_clients_num); 39 38 if (!clients) { 40 39 dev->dev_state = ISHTP_DEV_RESETTING; 41 40 ish_hw_reset(dev);
+1 -2
drivers/hv/hv.c
··· 183 183 memset(hv_cpu, 0, sizeof(*hv_cpu)); 184 184 } 185 185 186 - hv_context.hv_numa_map = kzalloc_objs(struct cpumask, nr_node_ids, 187 - GFP_KERNEL); 186 + hv_context.hv_numa_map = kzalloc_objs(struct cpumask, nr_node_ids); 188 187 if (!hv_context.hv_numa_map) { 189 188 pr_err("Unable to allocate NUMA map\n"); 190 189 goto err;
+1 -2
drivers/hv/ring_buffer.c
··· 196 196 * First page holds struct hv_ring_buffer, do wraparound mapping for 197 197 * the rest. 198 198 */ 199 - pages_wraparound = kzalloc_objs(struct page *, page_cnt * 2 - 1, 200 - GFP_KERNEL); 199 + pages_wraparound = kzalloc_objs(struct page *, page_cnt * 2 - 1); 201 200 if (!pages_wraparound) 202 201 return -ENOMEM; 203 202
+1 -2
drivers/hwmon/coretemp.c
··· 804 804 return -ENODEV; 805 805 806 806 max_zones = topology_max_packages() * topology_max_dies_per_package(); 807 - zone_devices = kzalloc_objs(struct platform_device *, max_zones, 808 - GFP_KERNEL); 807 + zone_devices = kzalloc_objs(struct platform_device *, max_zones); 809 808 if (!zone_devices) 810 809 return -ENOMEM; 811 810
+1 -2
drivers/hwmon/ibmpex.c
··· 367 367 return -ENOENT; 368 368 data->num_sensors = err; 369 369 370 - data->sensors = kzalloc_objs(*data->sensors, data->num_sensors, 371 - GFP_KERNEL); 370 + data->sensors = kzalloc_objs(*data->sensors, data->num_sensors); 372 371 if (!data->sensors) 373 372 return -ENOMEM; 374 373
+1 -2
drivers/hwtracing/coresight/coresight-tmc-etr.c
··· 204 204 struct device *real_dev = dev->parent; 205 205 206 206 nr_pages = tmc_pages->nr_pages; 207 - tmc_pages->daddrs = kzalloc_objs(*tmc_pages->daddrs, nr_pages, 208 - GFP_KERNEL); 207 + tmc_pages->daddrs = kzalloc_objs(*tmc_pages->daddrs, nr_pages); 209 208 if (!tmc_pages->daddrs) 210 209 return -ENOMEM; 211 210 tmc_pages->pages = kzalloc_objs(*tmc_pages->pages, nr_pages);
+1 -2
drivers/i2c/busses/i2c-qcom-geni.c
··· 799 799 800 800 if (gi2c->is_tx_multi_desc_xfer) { 801 801 tx_multi_xfer->dma_buf = kcalloc(num, sizeof(void *), GFP_KERNEL); 802 - tx_multi_xfer->dma_addr = kzalloc_objs(dma_addr_t, num, 803 - GFP_KERNEL); 802 + tx_multi_xfer->dma_addr = kzalloc_objs(dma_addr_t, num); 804 803 if (!tx_multi_xfer->dma_buf || !tx_multi_xfer->dma_addr) { 805 804 ret = -ENOMEM; 806 805 goto err;
+1 -2
drivers/i2c/i2c-dev.c
··· 552 552 if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS) 553 553 return -EINVAL; 554 554 555 - rdwr_pa = kmalloc_objs(struct i2c_msg, rdwr_arg.nmsgs, 556 - GFP_KERNEL); 555 + rdwr_pa = kmalloc_objs(struct i2c_msg, rdwr_arg.nmsgs); 557 556 if (!rdwr_pa) 558 557 return -ENOMEM; 559 558
+1 -2
drivers/i3c/master.c
··· 1852 1852 void *bounce __free(kfree) = NULL; 1853 1853 void *dma_buf = buf; 1854 1854 1855 - struct i3c_dma *dma_xfer __free(kfree) = kzalloc_obj(*dma_xfer, 1856 - GFP_KERNEL); 1855 + struct i3c_dma *dma_xfer __free(kfree) = kzalloc_obj(*dma_xfer); 1857 1856 if (!dma_xfer) 1858 1857 return NULL; 1859 1858
+1 -2
drivers/i3c/master/mipi-i3c-hci/dma.c
··· 363 363 rh->resp = dma_alloc_coherent(rings->sysdev, resps_sz, 364 364 &rh->resp_dma, GFP_KERNEL); 365 365 rh->src_xfers = 366 - kmalloc_objs(*rh->src_xfers, rh->xfer_entries, 367 - GFP_KERNEL); 366 + kmalloc_objs(*rh->src_xfers, rh->xfer_entries); 368 367 ret = -ENOMEM; 369 368 if (!rh->xfer || !rh->resp || !rh->src_xfers) 370 369 goto err_out;
+1 -2
drivers/infiniband/core/cache.c
··· 302 302 303 303 ndev = rcu_dereference_protected(attr->ndev, 1); 304 304 if (ndev) { 305 - entry->ndev_storage = kzalloc_obj(*entry->ndev_storage, 306 - GFP_KERNEL); 305 + entry->ndev_storage = kzalloc_obj(*entry->ndev_storage); 307 306 if (!entry->ndev_storage) { 308 307 kfree(entry); 309 308 return NULL;
+1 -2
drivers/infiniband/core/cm.c
··· 4348 4348 int count = 0; 4349 4349 u32 i; 4350 4350 4351 - cm_dev = kzalloc_flex(*cm_dev, port, ib_device->phys_port_cnt, 4352 - GFP_KERNEL); 4351 + cm_dev = kzalloc_flex(*cm_dev, port, ib_device->phys_port_cnt); 4353 4352 if (!cm_dev) 4354 4353 return -ENOMEM; 4355 4354
+3 -6
drivers/infiniband/core/cma.c
··· 2300 2300 2301 2301 rt = &id->route; 2302 2302 rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 2303 - rt->path_rec = kmalloc_objs(*rt->path_rec, rt->num_pri_alt_paths, 2304 - GFP_KERNEL); 2303 + rt->path_rec = kmalloc_objs(*rt->path_rec, rt->num_pri_alt_paths); 2305 2304 if (!rt->path_rec) 2306 2305 goto err; 2307 2306 ··· 2879 2880 struct rdma_route *route = &work->id->id.route; 2880 2881 2881 2882 if (!route->path_rec_inbound) { 2882 - route->path_rec_inbound = kzalloc_obj(*route->path_rec_inbound, 2883 - GFP_KERNEL); 2883 + route->path_rec_inbound = kzalloc_obj(*route->path_rec_inbound); 2884 2884 if (!route->path_rec_inbound) 2885 2885 return -ENOMEM; 2886 2886 } ··· 2894 2896 struct rdma_route *route = &work->id->id.route; 2895 2897 2896 2898 if (!route->path_rec_outbound) { 2897 - route->path_rec_outbound = kzalloc_obj(*route->path_rec_outbound, 2898 - GFP_KERNEL); 2899 + route->path_rec_outbound = kzalloc_obj(*route->path_rec_outbound); 2899 2900 if (!route->path_rec_outbound) 2900 2901 return -ENOMEM; 2901 2902 }
+1 -2
drivers/infiniband/core/roce_gid_mgmt.c
··· 660 660 struct net_device *ndev) 661 661 { 662 662 unsigned int i; 663 - struct netdev_event_work *ndev_work = kmalloc_obj(*ndev_work, 664 - GFP_KERNEL); 663 + struct netdev_event_work *ndev_work = kmalloc_obj(*ndev_work); 665 664 666 665 if (!ndev_work) 667 666 return NOTIFY_DONE;
+2 -4
drivers/infiniband/core/rw.c
··· 213 213 int i, ret, count = 0; 214 214 u32 nents = 0; 215 215 216 - ctx->reg = kzalloc_objs(*ctx->reg, DIV_ROUND_UP(nr_bvec, pages_per_mr), 217 - GFP_KERNEL); 216 + ctx->reg = kzalloc_objs(*ctx->reg, DIV_ROUND_UP(nr_bvec, pages_per_mr)); 218 217 if (!ctx->reg) 219 218 return -ENOMEM; 220 219 ··· 221 222 * Build scatterlist from bvecs using the iterator. This follows 222 223 * the pattern from __blk_rq_map_sg. 223 224 */ 224 - ctx->reg[0].sgt.sgl = kmalloc_objs(*ctx->reg[0].sgt.sgl, nr_bvec, 225 - GFP_KERNEL); 225 + ctx->reg[0].sgt.sgl = kmalloc_objs(*ctx->reg[0].sgt.sgl, nr_bvec); 226 226 if (!ctx->reg[0].sgt.sgl) { 227 227 ret = -ENOMEM; 228 228 goto out_free_reg;
+1 -2
drivers/infiniband/core/sa_query.c
··· 2414 2414 s = rdma_start_port(device); 2415 2415 e = rdma_end_port(device); 2416 2416 2417 - sa_dev = kzalloc_flex(*sa_dev, port, size_add(size_sub(e, s), 1), 2418 - GFP_KERNEL); 2417 + sa_dev = kzalloc_flex(*sa_dev, port, size_add(size_sub(e, s), 1)); 2419 2418 if (!sa_dev) 2420 2419 return -ENOMEM; 2421 2420
+3 -6
drivers/infiniband/core/sysfs.c
··· 855 855 * Two extra attribue elements here, one for the lifespan entry and 856 856 * one to NULL terminate the list for the sysfs core code 857 857 */ 858 - data = kzalloc_flex(*data, attrs, size_add(stats->num_counters, 1), 859 - GFP_KERNEL); 858 + data = kzalloc_flex(*data, attrs, size_add(stats->num_counters, 1)); 860 859 if (!data) 861 860 goto err_free_stats; 862 861 data->group.attrs = kzalloc_objs(*data->group.attrs, ··· 961 962 * Two extra attribue elements here, one for the lifespan entry and 962 963 * one to NULL terminate the list for the sysfs core code 963 964 */ 964 - data = kzalloc_flex(*data, attrs, size_add(stats->num_counters, 1), 965 - GFP_KERNEL); 965 + data = kzalloc_flex(*data, attrs, size_add(stats->num_counters, 1)); 966 966 if (!data) 967 967 goto err_free_stats; 968 - group->attrs = kzalloc_objs(*group->attrs, stats->num_counters + 2, 969 - GFP_KERNEL); 968 + group->attrs = kzalloc_objs(*group->attrs, stats->num_counters + 2); 970 969 if (!group->attrs) 971 970 goto err_free_data; 972 971
+1 -2
drivers/infiniband/core/user_mad.c
··· 1396 1396 s = rdma_start_port(device); 1397 1397 e = rdma_end_port(device); 1398 1398 1399 - umad_dev = kzalloc_flex(*umad_dev, ports, size_add(size_sub(e, s), 1), 1400 - GFP_KERNEL); 1399 + umad_dev = kzalloc_flex(*umad_dev, ports, size_add(size_sub(e, s), 1)); 1401 1400 if (!umad_dev) 1402 1401 return -ENOMEM; 1403 1402
+1 -2
drivers/infiniband/core/uverbs_cmd.c
··· 3292 3292 goto err_put; 3293 3293 } 3294 3294 3295 - flow_attr = kzalloc_flex(*flow_attr, flows, cmd.flow_attr.num_of_specs, 3296 - GFP_KERNEL); 3295 + flow_attr = kzalloc_flex(*flow_attr, flows, cmd.flow_attr.num_of_specs); 3297 3296 if (!flow_attr) { 3298 3297 err = -ENOMEM; 3299 3298 goto err_put;
+1 -2
drivers/infiniband/hw/bng_re/bng_fw.c
··· 98 98 goto fail; 99 99 } 100 100 101 - rcfw->crsqe_tbl = kzalloc_objs(*rcfw->crsqe_tbl, cmdq->hwq.max_elements, 102 - GFP_KERNEL); 101 + rcfw->crsqe_tbl = kzalloc_objs(*rcfw->crsqe_tbl, cmdq->hwq.max_elements); 103 102 if (!rcfw->crsqe_tbl) 104 103 goto fail; 105 104
+2 -4
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 1547 1547 1548 1548 rdev = qp->rdev; 1549 1549 /* Create a shadow QP to handle the QP1 traffic */ 1550 - sqp_tbl = kzalloc_objs(*sqp_tbl, BNXT_RE_MAX_GSI_SQP_ENTRIES, 1551 - GFP_KERNEL); 1550 + sqp_tbl = kzalloc_objs(*sqp_tbl, BNXT_RE_MAX_GSI_SQP_ENTRIES); 1552 1551 if (!sqp_tbl) 1553 1552 return -ENOMEM; 1554 1553 rdev->gsi_ctx.sqp_tbl = sqp_tbl; ··· 3193 3194 cq->qplib_cq.dpi = &uctx->dpi; 3194 3195 } else { 3195 3196 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); 3196 - cq->cql = kzalloc_objs(struct bnxt_qplib_cqe, cq->max_cql, 3197 - GFP_KERNEL); 3197 + cq->cql = kzalloc_objs(struct bnxt_qplib_cqe, cq->max_cql); 3198 3198 if (!cq->cql) { 3199 3199 rc = -ENOMEM; 3200 3200 goto fail;
+1 -2
drivers/infiniband/hw/bnxt_re/qplib_fp.c
··· 688 688 srq->start_idx = 0; 689 689 srq->last_idx = srq->hwq.max_elements - 1; 690 690 if (!srq->hwq.is_user) { 691 - srq->swq = kzalloc_objs(*srq->swq, srq->hwq.max_elements, 692 - GFP_KERNEL); 691 + srq->swq = kzalloc_objs(*srq->swq, srq->hwq.max_elements); 693 692 if (!srq->swq) { 694 693 rc = -ENOMEM; 695 694 goto fail;
+1 -2
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
··· 968 968 goto fail; 969 969 } 970 970 971 - rcfw->crsqe_tbl = kzalloc_objs(*rcfw->crsqe_tbl, cmdq->hwq.max_elements, 972 - GFP_KERNEL); 971 + rcfw->crsqe_tbl = kzalloc_objs(*rcfw->crsqe_tbl, cmdq->hwq.max_elements); 973 972 if (!rcfw->crsqe_tbl) 974 973 goto fail; 975 974
+3 -6
drivers/infiniband/hw/cxgb4/qp.c
··· 223 223 } 224 224 225 225 if (!user) { 226 - wq->sq.sw_sq = kzalloc_objs(*wq->sq.sw_sq, wq->sq.size, 227 - GFP_KERNEL); 226 + wq->sq.sw_sq = kzalloc_objs(*wq->sq.sw_sq, wq->sq.size); 228 227 if (!wq->sq.sw_sq) { 229 228 ret = -ENOMEM; 230 229 goto free_rq_qid;//FIXME 231 230 } 232 231 233 232 if (need_rq) { 234 - wq->rq.sw_rq = kzalloc_objs(*wq->rq.sw_rq, wq->rq.size, 235 - GFP_KERNEL); 233 + wq->rq.sw_rq = kzalloc_objs(*wq->rq.sw_rq, wq->rq.size); 236 234 if (!wq->rq.sw_rq) { 237 235 ret = -ENOMEM; 238 236 goto free_sw_sq; ··· 2244 2246 } 2245 2247 memset(&uresp, 0, sizeof(uresp)); 2246 2248 if (t4_sq_onchip(&qhp->wq.sq)) { 2247 - ma_sync_key_mm = kmalloc_obj(*ma_sync_key_mm, 2248 - GFP_KERNEL); 2249 + ma_sync_key_mm = kmalloc_obj(*ma_sync_key_mm); 2249 2250 if (!ma_sync_key_mm) { 2250 2251 ret = -ENOMEM; 2251 2252 goto err_free_rq_db_key;
+1 -2
drivers/infiniband/hw/efa/efa_verbs.c
··· 1374 1374 chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK); 1375 1375 1376 1376 chunk_list->size = chunk_list_size; 1377 - chunk_list->chunks = kzalloc_objs(*chunk_list->chunks, chunk_list_size, 1378 - GFP_KERNEL); 1377 + chunk_list->chunks = kzalloc_objs(*chunk_list->chunks, chunk_list_size); 1379 1378 if (!chunk_list->chunks) 1380 1379 return -ENOMEM; 1381 1380
+2 -4
drivers/infiniband/hw/hfi1/pio.c
··· 1882 1882 vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0); 1883 1883 } 1884 1884 /* build new map */ 1885 - newmap = kzalloc_flex(*newmap, map, roundup_pow_of_two(num_vls), 1886 - GFP_KERNEL); 1885 + newmap = kzalloc_flex(*newmap, map, roundup_pow_of_two(num_vls)); 1887 1886 if (!newmap) 1888 1887 goto bail; 1889 1888 newmap->actual_vls = num_vls; ··· 1896 1897 int sz = roundup_pow_of_two(vl_scontexts[i]); 1897 1898 1898 1899 /* only allocate once */ 1899 - newmap->map[i] = kzalloc_flex(*newmap->map[i], ksc, sz, 1900 - GFP_KERNEL); 1900 + newmap->map[i] = kzalloc_flex(*newmap->map[i], ksc, sz); 1901 1901 if (!newmap->map[i]) 1902 1902 goto bail; 1903 1903 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
+1 -2
drivers/infiniband/hw/hfi1/user_exp_rcv.c
··· 52 52 { 53 53 int ret = 0; 54 54 55 - fd->entry_to_rb = kzalloc_objs(*fd->entry_to_rb, uctxt->expected_count, 56 - GFP_KERNEL); 55 + fd->entry_to_rb = kzalloc_objs(*fd->entry_to_rb, uctxt->expected_count); 57 56 if (!fd->entry_to_rb) 58 57 return -ENOMEM; 59 58
+1 -2
drivers/infiniband/hw/hns/hns_roce_main.c
··· 1153 1153 1154 1154 static int hns_roce_alloc_dfx_cnt(struct hns_roce_dev *hr_dev) 1155 1155 { 1156 - hr_dev->dfx_cnt = kvzalloc_objs(atomic64_t, HNS_ROCE_DFX_CNT_TOTAL, 1157 - GFP_KERNEL); 1156 + hr_dev->dfx_cnt = kvzalloc_objs(atomic64_t, HNS_ROCE_DFX_CNT_TOTAL); 1158 1157 if (!hr_dev->dfx_cnt) 1159 1158 return -ENOMEM; 1160 1159
+2 -4
drivers/infiniband/hw/ionic/ionic_admin.c
··· 1095 1095 goto out; 1096 1096 } 1097 1097 1098 - dev->eq_vec = kmalloc_objs(*dev->eq_vec, dev->lif_cfg.eq_count, 1099 - GFP_KERNEL); 1098 + dev->eq_vec = kmalloc_objs(*dev->eq_vec, dev->lif_cfg.eq_count); 1100 1099 if (!dev->eq_vec) { 1101 1100 rc = -ENOMEM; 1102 1101 goto out; ··· 1125 1126 1126 1127 dev->lif_cfg.eq_count = eq_i; 1127 1128 1128 - dev->aq_vec = kmalloc_objs(*dev->aq_vec, dev->lif_cfg.aq_count, 1129 - GFP_KERNEL); 1129 + dev->aq_vec = kmalloc_objs(*dev->aq_vec, dev->lif_cfg.aq_count); 1130 1130 if (!dev->aq_vec) { 1131 1131 rc = -ENOMEM; 1132 1132 goto out;
+2 -4
drivers/infiniband/hw/ionic/ionic_controlpath.c
··· 1868 1868 1869 1869 ionic_queue_dbell_init(&qp->sq, qp->qpid); 1870 1870 1871 - qp->sq_meta = kmalloc_objs(*qp->sq_meta, (u32)qp->sq.mask + 1, 1872 - GFP_KERNEL); 1871 + qp->sq_meta = kmalloc_objs(*qp->sq_meta, (u32)qp->sq.mask + 1); 1873 1872 if (!qp->sq_meta) { 1874 1873 rc = -ENOMEM; 1875 1874 goto err_sq_meta; ··· 2081 2082 2082 2083 ionic_queue_dbell_init(&qp->rq, qp->qpid); 2083 2084 2084 - qp->rq_meta = kmalloc_objs(*qp->rq_meta, (u32)qp->rq.mask + 1, 2085 - GFP_KERNEL); 2085 + qp->rq_meta = kmalloc_objs(*qp->rq_meta, (u32)qp->rq.mask + 1); 2086 2086 if (!qp->rq_meta) { 2087 2087 rc = -ENOMEM; 2088 2088 goto err_rq_meta;
+3 -6
drivers/infiniband/hw/ionic/ionic_hw_stats.c
··· 155 155 dev->hw_stats_count = hw_stats_count; 156 156 157 157 /* alloc and init array of names, for alloc_hw_stats */ 158 - dev->hw_stats_hdrs = kzalloc_objs(*dev->hw_stats_hdrs, hw_stats_count, 159 - GFP_KERNEL); 158 + dev->hw_stats_hdrs = kzalloc_objs(*dev->hw_stats_hdrs, hw_stats_count); 160 159 if (!dev->hw_stats_hdrs) { 161 160 rc = -ENOMEM; 162 161 goto err_dma; ··· 400 401 cs->queue_stats_count = hw_stats_count; 401 402 402 403 /* alloc and init array of names */ 403 - cs->stats_hdrs = kzalloc_objs(*cs->stats_hdrs, hw_stats_count, 404 - GFP_KERNEL); 404 + cs->stats_hdrs = kzalloc_objs(*cs->stats_hdrs, hw_stats_count); 405 405 if (!cs->stats_hdrs) { 406 406 rc = -ENOMEM; 407 407 goto err_dma; ··· 446 448 } 447 449 448 450 if (stats_type & IONIC_LIF_RDMA_STAT_QP) { 449 - dev->counter_stats = kzalloc_obj(*dev->counter_stats, 450 - GFP_KERNEL); 451 + dev->counter_stats = kzalloc_obj(*dev->counter_stats); 451 452 if (!dev->counter_stats) 452 453 return; 453 454
+2 -4
drivers/infiniband/hw/irdma/cm.c
··· 1683 1683 ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI6, vlan_id=%d, MAC=%pM\n", 1684 1684 &ifp->addr, rdma_vlan_dev_vlan_id(ip_dev), 1685 1685 ip_dev->dev_addr); 1686 - child_listen_node = kzalloc_obj(*child_listen_node, 1687 - GFP_KERNEL); 1686 + child_listen_node = kzalloc_obj(*child_listen_node); 1688 1687 ibdev_dbg(&iwdev->ibdev, "CM: Allocating child listener %p\n", 1689 1688 child_listen_node); 1690 1689 if (!child_listen_node) { ··· 1771 1772 "CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n", 1772 1773 &ifa->ifa_address, rdma_vlan_dev_vlan_id(ip_dev), 1773 1774 ip_dev->dev_addr); 1774 - child_listen_node = kzalloc_obj(*child_listen_node, 1775 - GFP_KERNEL); 1775 + child_listen_node = kzalloc_obj(*child_listen_node); 1776 1776 cm_parent_listen_node->cm_core->stats_listen_nodes_created++; 1777 1777 ibdev_dbg(&iwdev->ibdev, "CM: Allocating child listener %p\n", 1778 1778 child_listen_node);
+1 -2
drivers/infiniband/hw/irdma/icrdma_if.c
··· 167 167 int i; 168 168 169 169 rf->msix_count = num_online_cpus() + IRDMA_NUM_AEQ_MSIX; 170 - rf->msix_entries = kzalloc_objs(*rf->msix_entries, rf->msix_count, 171 - GFP_KERNEL); 170 + rf->msix_entries = kzalloc_objs(*rf->msix_entries, rf->msix_count); 172 171 if (!rf->msix_entries) 173 172 return -ENOMEM; 174 173
+1 -2
drivers/infiniband/hw/irdma/ig3rdma_if.c
··· 101 101 return -ENOMEM; 102 102 103 103 hw->num_io_regions = le16_to_cpu(idc_priv->num_memory_regions); 104 - hw->io_regs = kzalloc_objs(struct irdma_mmio_region, hw->num_io_regions, 105 - GFP_KERNEL); 104 + hw->io_regs = kzalloc_objs(struct irdma_mmio_region, hw->num_io_regions); 106 105 107 106 if (!hw->io_regs) { 108 107 iounmap(hw->rdma_reg.addr);
+2 -4
drivers/infiniband/hw/irdma/verbs.c
··· 709 709 return status; 710 710 711 711 iwqp->kqp.sq_wrid_mem = 712 - kzalloc_objs(*iwqp->kqp.sq_wrid_mem, ukinfo->sq_depth, 713 - GFP_KERNEL); 712 + kzalloc_objs(*iwqp->kqp.sq_wrid_mem, ukinfo->sq_depth); 714 713 if (!iwqp->kqp.sq_wrid_mem) 715 714 return -ENOMEM; 716 715 717 716 iwqp->kqp.rq_wrid_mem = 718 - kzalloc_objs(*iwqp->kqp.rq_wrid_mem, ukinfo->rq_depth, 719 - GFP_KERNEL); 717 + kzalloc_objs(*iwqp->kqp.rq_wrid_mem, ukinfo->rq_depth); 720 718 721 719 if (!iwqp->kqp.rq_wrid_mem) { 722 720 kfree(iwqp->kqp.sq_wrid_mem);
+1 -2
drivers/infiniband/hw/mlx4/alias_GUID.c
··· 835 835 836 836 if (!mlx4_is_master(dev->dev)) 837 837 return 0; 838 - dev->sriov.alias_guid.sa_client = kzalloc_obj(*dev->sriov.alias_guid.sa_client, 839 - GFP_KERNEL); 838 + dev->sriov.alias_guid.sa_client = kzalloc_obj(*dev->sriov.alias_guid.sa_client); 840 839 if (!dev->sriov.alias_guid.sa_client) 841 840 return -ENOMEM; 842 841
+1 -2
drivers/infiniband/hw/mlx4/mad.c
··· 1616 1616 if (!tun_qp->ring) 1617 1617 return -ENOMEM; 1618 1618 1619 - tun_qp->tx_ring = kzalloc_objs(struct mlx4_ib_tun_tx_buf, nmbr_bufs, 1620 - GFP_KERNEL); 1619 + tun_qp->tx_ring = kzalloc_objs(struct mlx4_ib_tun_tx_buf, nmbr_bufs); 1621 1620 if (!tun_qp->tx_ring) { 1622 1621 kfree(tun_qp->ring); 1623 1622 tun_qp->ring = NULL;
+1 -2
drivers/infiniband/hw/mlx4/main.c
··· 2748 2748 if (mlx4_is_bonded(dev)) 2749 2749 for (i = 1; i < ibdev->num_ports ; ++i) { 2750 2750 new_counter_index = 2751 - kmalloc_obj(struct counter_index, 2752 - GFP_KERNEL); 2751 + kmalloc_obj(struct counter_index); 2753 2752 if (!new_counter_index) { 2754 2753 err = -ENOMEM; 2755 2754 goto err_counter;
+1 -2
drivers/infiniband/hw/mlx4/sysfs.c
··· 244 244 * gids (operational) 245 245 * mcg_table 246 246 */ 247 - port->dentr_ar = kzalloc_obj(struct mlx4_ib_iov_sysfs_attr_ar, 248 - GFP_KERNEL); 247 + port->dentr_ar = kzalloc_obj(struct mlx4_ib_iov_sysfs_attr_ar); 249 248 if (!port->dentr_ar) { 250 249 ret = -ENOMEM; 251 250 goto err;
+2 -4
drivers/infiniband/hw/mlx5/counters.c
··· 858 858 skip_non_qcounters: 859 859 cnts->num_op_counters = num_op_counters; 860 860 num_counters += num_op_counters; 861 - cnts->descs = kzalloc_objs(struct rdma_stat_desc, num_counters, 862 - GFP_KERNEL); 861 + cnts->descs = kzalloc_objs(struct rdma_stat_desc, num_counters); 863 862 if (!cnts->descs) 864 863 return -ENOMEM; 865 864 ··· 1072 1073 if (cntrs_data->ncounters > MAX_COUNTERS_NUM) 1073 1074 return -EINVAL; 1074 1075 1075 - desc_data = kzalloc_objs(*desc_data, cntrs_data->ncounters, 1076 - GFP_KERNEL); 1076 + desc_data = kzalloc_objs(*desc_data, cntrs_data->ncounters); 1077 1077 if (!desc_data) 1078 1078 return -ENOMEM; 1079 1079
+2 -4
drivers/infiniband/hw/mlx5/fs.c
··· 3486 3486 3487 3487 for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++) { 3488 3488 dev->flow_db->rdma_transport_rx[i] = 3489 - kzalloc_objs(struct mlx5_ib_flow_prio, dev->num_ports, 3490 - GFP_KERNEL); 3489 + kzalloc_objs(struct mlx5_ib_flow_prio, dev->num_ports); 3491 3490 if (!dev->flow_db->rdma_transport_rx[i]) 3492 3491 goto free_rdma_transport_rx; 3493 3492 } 3494 3493 3495 3494 for (j = 0; j < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; j++) { 3496 3495 dev->flow_db->rdma_transport_tx[j] = 3497 - kzalloc_objs(struct mlx5_ib_flow_prio, dev->num_ports, 3498 - GFP_KERNEL); 3496 + kzalloc_objs(struct mlx5_ib_flow_prio, dev->num_ports); 3499 3497 if (!dev->flow_db->rdma_transport_tx[j]) 3500 3498 goto free_rdma_transport_tx; 3501 3499 }
+1 -2
drivers/infiniband/hw/mlx5/gsi.c
··· 109 109 return -ENOMEM; 110 110 111 111 gsi->outstanding_wrs = 112 - kzalloc_objs(*gsi->outstanding_wrs, attr->cap.max_send_wr, 113 - GFP_KERNEL); 112 + kzalloc_objs(*gsi->outstanding_wrs, attr->cap.max_send_wr); 114 113 if (!gsi->outstanding_wrs) { 115 114 ret = -ENOMEM; 116 115 goto err_free_tx;
+1 -2
drivers/infiniband/hw/mlx5/mr.c
··· 276 276 int i; 277 277 278 278 for (i = 0; i < num; i++) { 279 - async_create = kzalloc_obj(struct mlx5r_async_create_mkey, 280 - GFP_KERNEL); 279 + async_create = kzalloc_obj(struct mlx5r_async_create_mkey); 281 280 if (!async_create) 282 281 return -ENOMEM; 283 282 mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in,
+1 -2
drivers/infiniband/hw/mlx5/qp.c
··· 1185 1185 sizeof(*qp->sq.wr_data), GFP_KERNEL); 1186 1186 qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt, 1187 1187 sizeof(*qp->rq.wrid), GFP_KERNEL); 1188 - qp->sq.w_list = kvmalloc_objs(*qp->sq.w_list, qp->sq.wqe_cnt, 1189 - GFP_KERNEL); 1188 + qp->sq.w_list = kvmalloc_objs(*qp->sq.w_list, qp->sq.wqe_cnt); 1190 1189 qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt, 1191 1190 sizeof(*qp->sq.wqe_head), GFP_KERNEL); 1192 1191
+1 -2
drivers/infiniband/hw/mthca/mthca_allocator.c
··· 230 230 if (!dma_list) 231 231 return -ENOMEM; 232 232 233 - buf->page_list = kmalloc_objs(*buf->page_list, npages, 234 - GFP_KERNEL); 233 + buf->page_list = kmalloc_objs(*buf->page_list, npages); 235 234 if (!buf->page_list) 236 235 goto err_out; 237 236
+1 -2
drivers/infiniband/hw/mthca/mthca_mr.c
··· 146 146 147 147 buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits), 148 148 GFP_KERNEL); 149 - buddy->num_free = kzalloc_objs(*buddy->num_free, (buddy->max_order + 1), 150 - GFP_KERNEL); 149 + buddy->num_free = kzalloc_objs(*buddy->num_free, (buddy->max_order + 1)); 151 150 if (!buddy->bits || !buddy->num_free) 152 151 goto err_out; 153 152
+2 -4
drivers/infiniband/hw/ocrdma/ocrdma_main.c
··· 219 219 static int ocrdma_alloc_resources(struct ocrdma_dev *dev) 220 220 { 221 221 mutex_init(&dev->dev_lock); 222 - dev->cq_tbl = kzalloc_objs(struct ocrdma_cq *, OCRDMA_MAX_CQ, 223 - GFP_KERNEL); 222 + dev->cq_tbl = kzalloc_objs(struct ocrdma_cq *, OCRDMA_MAX_CQ); 224 223 if (!dev->cq_tbl) 225 224 goto alloc_err; 226 225 227 226 if (dev->attr.max_qp) { 228 - dev->qp_tbl = kzalloc_objs(struct ocrdma_qp *, OCRDMA_MAX_QP, 229 - GFP_KERNEL); 227 + dev->qp_tbl = kzalloc_objs(struct ocrdma_qp *, OCRDMA_MAX_QP); 230 228 if (!dev->qp_tbl) 231 229 goto alloc_err; 232 230 }
+1 -2
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 794 794 void *va; 795 795 dma_addr_t pa; 796 796 797 - mr->pbl_table = kzalloc_objs(struct ocrdma_pbl, mr->num_pbls, 798 - GFP_KERNEL); 797 + mr->pbl_table = kzalloc_objs(struct ocrdma_pbl, mr->num_pbls); 799 798 800 799 if (!mr->pbl_table) 801 800 return -ENOMEM;
+3 -6
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
··· 257 257 mutex_init(&dev->port_mutex); 258 258 spin_lock_init(&dev->desc_lock); 259 259 260 - dev->cq_tbl = kzalloc_objs(struct pvrdma_cq *, dev->dsr->caps.max_cq, 261 - GFP_KERNEL); 260 + dev->cq_tbl = kzalloc_objs(struct pvrdma_cq *, dev->dsr->caps.max_cq); 262 261 if (!dev->cq_tbl) 263 262 return ret; 264 263 spin_lock_init(&dev->cq_tbl_lock); 265 264 266 - dev->qp_tbl = kzalloc_objs(struct pvrdma_qp *, dev->dsr->caps.max_qp, 267 - GFP_KERNEL); 265 + dev->qp_tbl = kzalloc_objs(struct pvrdma_qp *, dev->dsr->caps.max_qp); 268 266 if (!dev->qp_tbl) 269 267 goto err_cq_free; 270 268 spin_lock_init(&dev->qp_tbl_lock); ··· 982 984 } 983 985 984 986 /* Allocate GID table */ 985 - dev->sgid_tbl = kzalloc_objs(union ib_gid, dev->dsr->caps.gid_tbl_len, 986 - GFP_KERNEL); 987 + dev->sgid_tbl = kzalloc_objs(union ib_gid, dev->dsr->caps.gid_tbl_len); 987 988 if (!dev->sgid_tbl) { 988 989 ret = -ENOMEM; 989 990 goto err_free_uar_table;
+1 -2
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 1773 1773 ipoib_napi_add(dev); 1774 1774 1775 1775 /* Allocate RX/TX "rings" to hold queued skbs */ 1776 - priv->rx_ring = kzalloc_objs(*priv->rx_ring, ipoib_recvq_size, 1777 - GFP_KERNEL); 1776 + priv->rx_ring = kzalloc_objs(*priv->rx_ring, ipoib_recvq_size); 1778 1777 if (!priv->rx_ring) 1779 1778 goto out; 1780 1779
+1 -2
drivers/infiniband/ulp/isert/ib_isert.c
··· 332 332 { 333 333 int ret; 334 334 335 - isert_conn->login_desc = kzalloc_obj(*isert_conn->login_desc, 336 - GFP_KERNEL); 335 + isert_conn->login_desc = kzalloc_obj(*isert_conn->login_desc); 337 336 if (!isert_conn->login_desc) 338 337 return -ENOMEM; 339 338
+1 -2
drivers/infiniband/ulp/rtrs/rtrs-clt.c
··· 1364 1364 enum ib_mr_type mr_type; 1365 1365 int i, err = -ENOMEM; 1366 1366 1367 - clt_path->reqs = kzalloc_objs(*clt_path->reqs, clt_path->queue_depth, 1368 - GFP_KERNEL); 1367 + clt_path->reqs = kzalloc_objs(*clt_path->reqs, clt_path->queue_depth); 1369 1368 if (!clt_path->reqs) 1370 1369 return -ENOMEM; 1371 1370
+2 -4
drivers/infiniband/ulp/rtrs/rtrs-srv.c
··· 138 138 struct rtrs_srv_op *id; 139 139 int i, ret; 140 140 141 - srv_path->ops_ids = kzalloc_objs(*srv_path->ops_ids, srv->queue_depth, 142 - GFP_KERNEL); 141 + srv_path->ops_ids = kzalloc_objs(*srv_path->ops_ids, srv->queue_depth); 143 142 if (!srv_path->ops_ids) 144 143 goto err; 145 144 ··· 1819 1820 1820 1821 srv_path->stats->srv_path = srv_path; 1821 1822 1822 - srv_path->dma_addr = kzalloc_objs(*srv_path->dma_addr, srv->queue_depth, 1823 - GFP_KERNEL); 1823 + srv_path->dma_addr = kzalloc_objs(*srv_path->dma_addr, srv->queue_depth); 1824 1824 if (!srv_path->dma_addr) 1825 1825 goto err_free_percpu; 1826 1826
+1 -2
drivers/infiniband/ulp/srpt/ib_srpt.c
··· 965 965 if (nbufs == 1) { 966 966 ioctx->rw_ctxs = &ioctx->s_rw_ctx; 967 967 } else { 968 - ioctx->rw_ctxs = kmalloc_objs(*ioctx->rw_ctxs, nbufs, 969 - GFP_KERNEL); 968 + ioctx->rw_ctxs = kmalloc_objs(*ioctx->rw_ctxs, nbufs); 970 969 if (!ioctx->rw_ctxs) 971 970 return -ENOMEM; 972 971 }
+1 -2
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
··· 3517 3517 int ret = 0; 3518 3518 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev); 3519 3519 3520 - master->streams = kzalloc_objs(*master->streams, fwspec->num_ids, 3521 - GFP_KERNEL); 3520 + master->streams = kzalloc_objs(*master->streams, fwspec->num_ids); 3522 3521 if (!master->streams) 3523 3522 return -ENOMEM; 3524 3523 master->num_streams = fwspec->num_ids;
+1 -2
drivers/ipack/carriers/tpci200.c
··· 461 461 { 462 462 int res; 463 463 464 - tpci200->slots = kzalloc_objs(struct tpci200_slot, TPCI200_NB_SLOT, 465 - GFP_KERNEL); 464 + tpci200->slots = kzalloc_objs(struct tpci200_slot, TPCI200_NB_SLOT); 466 465 if (tpci200->slots == NULL) 467 466 return -ENOMEM; 468 467
+1 -2
drivers/irqchip/irq-alpine-msi.c
··· 192 192 193 193 static int alpine_msix_init(struct device_node *node, struct device_node *parent) 194 194 { 195 - struct alpine_msix_data *priv __free(kfree) = kzalloc_obj(*priv, 196 - GFP_KERNEL); 195 + struct alpine_msix_data *priv __free(kfree) = kzalloc_obj(*priv); 197 196 struct resource res; 198 197 int ret; 199 198
+1 -2
drivers/irqchip/irq-bcm6345-l1.c
··· 238 238 else if (intc->n_words != n_words) 239 239 return -EINVAL; 240 240 241 - cpu = intc->cpus[idx] = kzalloc_flex(*cpu, enable_cache, n_words, 242 - GFP_KERNEL); 241 + cpu = intc->cpus[idx] = kzalloc_flex(*cpu, enable_cache, n_words); 243 242 if (!cpu) 244 243 return -ENOMEM; 245 244
+1 -2
drivers/irqchip/irq-bcm7038-l1.c
··· 242 242 return -EINVAL; 243 243 } 244 244 245 - cpu = intc->cpus[idx] = kzalloc_flex(*cpu, mask_cache, n_words, 246 - GFP_KERNEL); 245 + cpu = intc->cpus[idx] = kzalloc_flex(*cpu, mask_cache, n_words); 247 246 if (!cpu) 248 247 return -ENOMEM; 249 248
+1 -2
drivers/irqchip/irq-bcm7120-l2.c
··· 231 231 goto out_unmap; 232 232 } 233 233 234 - data->l1_data = kzalloc_objs(*data->l1_data, data->num_parent_irqs, 235 - GFP_KERNEL); 234 + data->l1_data = kzalloc_objs(*data->l1_data, data->num_parent_irqs); 236 235 if (!data->l1_data) { 237 236 ret = -ENOMEM; 238 237 goto out_free_l1_data;
+1 -2
drivers/irqchip/irq-gic-v3-its.c
··· 3025 3025 { 3026 3026 int i; 3027 3027 3028 - its->collections = kzalloc_objs(*its->collections, nr_cpu_ids, 3029 - GFP_KERNEL); 3028 + its->collections = kzalloc_objs(*its->collections, nr_cpu_ids); 3030 3029 if (!its->collections) 3031 3030 return -ENOMEM; 3032 3031
+1 -2
drivers/irqchip/irq-gic-v5-iwb.c
··· 219 219 unsigned int n; 220 220 int ret; 221 221 222 - struct gicv5_iwb_chip_data *iwb_node __free(kfree) = kzalloc_obj(*iwb_node, 223 - GFP_KERNEL); 222 + struct gicv5_iwb_chip_data *iwb_node __free(kfree) = kzalloc_obj(*iwb_node); 224 223 if (!iwb_node) 225 224 return ERR_PTR(-ENOMEM); 226 225
+1 -2
drivers/irqchip/irq-riscv-intc.c
··· 349 349 if (count <= 0) 350 350 return -EINVAL; 351 351 352 - rintc_acpi_data = kzalloc_objs(*rintc_acpi_data, count, 353 - GFP_KERNEL); 352 + rintc_acpi_data = kzalloc_objs(*rintc_acpi_data, count); 354 353 if (!rintc_acpi_data) 355 354 return -ENOMEM; 356 355 }
+1 -2
drivers/isdn/capi/capi.c
··· 1259 1259 if (capi_ttyminors <= 0) 1260 1260 capi_ttyminors = CAPINC_NR_PORTS; 1261 1261 1262 - capiminors = kzalloc_objs(struct capiminor *, capi_ttyminors, 1263 - GFP_KERNEL); 1262 + capiminors = kzalloc_objs(struct capiminor *, capi_ttyminors); 1264 1263 if (!capiminors) 1265 1264 return -ENOMEM; 1266 1265
+1 -2
drivers/macintosh/windfarm_pm121.c
··· 531 531 control = controls[param->control_id]; 532 532 533 533 /* Alloc & initialize state */ 534 - pm121_sys_state[loop_id] = kmalloc_obj(struct pm121_sys_state, 535 - GFP_KERNEL); 534 + pm121_sys_state[loop_id] = kmalloc_obj(struct pm121_sys_state); 536 535 if (pm121_sys_state[loop_id] == NULL) { 537 536 printk(KERN_WARNING "pm121: Memory allocation error\n"); 538 537 goto fail;
+2 -4
drivers/macintosh/windfarm_pm91.c
··· 299 299 }; 300 300 301 301 /* Alloc & initialize state */ 302 - wf_smu_drive_fans = kmalloc_obj(struct wf_smu_drive_fans_state, 303 - GFP_KERNEL); 302 + wf_smu_drive_fans = kmalloc_obj(struct wf_smu_drive_fans_state); 304 303 if (wf_smu_drive_fans == NULL) { 305 304 printk(KERN_WARNING "windfarm: Memory allocation error" 306 305 " max fan speed\n"); ··· 379 380 }; 380 381 381 382 /* Alloc & initialize state */ 382 - wf_smu_slots_fans = kmalloc_obj(struct wf_smu_slots_fans_state, 383 - GFP_KERNEL); 383 + wf_smu_slots_fans = kmalloc_obj(struct wf_smu_slots_fans_state); 384 384 if (wf_smu_slots_fans == NULL) { 385 385 printk(KERN_WARNING "windfarm: Memory allocation error" 386 386 " max fan speed\n");
+1 -2
drivers/md/dm-snap-persistent.c
··· 626 626 */ 627 627 ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / 628 628 sizeof(struct disk_exception); 629 - ps->callbacks = kvzalloc_objs(*ps->callbacks, ps->exceptions_per_area, 630 - GFP_KERNEL); 629 + ps->callbacks = kvzalloc_objs(*ps->callbacks, ps->exceptions_per_area); 631 630 if (!ps->callbacks) 632 631 return -ENOMEM; 633 632
+1 -2
drivers/md/dm-snap.c
··· 362 362 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 363 363 INIT_LIST_HEAD(_origins + i); 364 364 365 - _dm_origins = kmalloc_objs(struct list_head, ORIGIN_HASH_SIZE, 366 - GFP_KERNEL); 365 + _dm_origins = kmalloc_objs(struct list_head, ORIGIN_HASH_SIZE); 367 366 if (!_dm_origins) { 368 367 DMERR("unable to allocate memory for _dm_origins"); 369 368 kfree(_origins);
+1 -2
drivers/md/dm-zoned-metadata.c
··· 1686 1686 unsigned int bzone_id; 1687 1687 1688 1688 /* Metadata block array for the chunk mapping table */ 1689 - zmd->map_mblk = kzalloc_objs(struct dmz_mblock *, zmd->nr_map_blocks, 1690 - GFP_KERNEL); 1689 + zmd->map_mblk = kzalloc_objs(struct dmz_mblock *, zmd->nr_map_blocks); 1691 1690 if (!zmd->map_mblk) 1692 1691 return -ENOMEM; 1693 1692
+1 -2
drivers/md/raid0.c
··· 143 143 } 144 144 145 145 err = -ENOMEM; 146 - conf->strip_zone = kzalloc_objs(struct strip_zone, conf->nr_strip_zones, 147 - GFP_KERNEL); 146 + conf->strip_zone = kzalloc_objs(struct strip_zone, conf->nr_strip_zones); 148 147 if (!conf->strip_zone) 149 148 goto abort; 150 149 conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
+2 -4
drivers/md/raid1.c
··· 3073 3073 if (!conf) 3074 3074 goto abort; 3075 3075 3076 - conf->nr_pending = kzalloc_objs(atomic_t, BARRIER_BUCKETS_NR, 3077 - GFP_KERNEL); 3076 + conf->nr_pending = kzalloc_objs(atomic_t, BARRIER_BUCKETS_NR); 3078 3077 if (!conf->nr_pending) 3079 3078 goto abort; 3080 3079 3081 - conf->nr_waiting = kzalloc_objs(atomic_t, BARRIER_BUCKETS_NR, 3082 - GFP_KERNEL); 3080 + conf->nr_waiting = kzalloc_objs(atomic_t, BARRIER_BUCKETS_NR); 3083 3081 if (!conf->nr_waiting) 3084 3082 goto abort; 3085 3083
+1 -2
drivers/md/raid5-ppl.c
··· 1378 1378 goto err; 1379 1379 1380 1380 ppl_conf->count = conf->raid_disks; 1381 - ppl_conf->child_logs = kzalloc_objs(struct ppl_log, ppl_conf->count, 1382 - GFP_KERNEL); 1381 + ppl_conf->child_logs = kzalloc_objs(struct ppl_log, ppl_conf->count); 1383 1382 if (!ppl_conf->child_logs) { 1384 1383 ret = -ENOMEM; 1385 1384 goto err;
+1 -2
drivers/md/raid5.c
··· 7508 7508 #endif 7509 7509 INIT_LIST_HEAD(&conf->free_list); 7510 7510 INIT_LIST_HEAD(&conf->pending_list); 7511 - conf->pending_data = kzalloc_objs(struct r5pending_data, PENDING_IO_MAX, 7512 - GFP_KERNEL); 7511 + conf->pending_data = kzalloc_objs(struct r5pending_data, PENDING_IO_MAX); 7513 7512 if (!conf->pending_data) 7514 7513 goto abort; 7515 7514 for (i = 0; i < PENDING_IO_MAX; i++)
+1 -2
drivers/media/common/b2c2/flexcop.c
··· 214 214 struct flexcop_device *flexcop_device_kmalloc(size_t bus_specific_len) 215 215 { 216 216 void *bus; 217 - struct flexcop_device *fc = kzalloc_obj(struct flexcop_device, 218 - GFP_KERNEL); 217 + struct flexcop_device *fc = kzalloc_obj(struct flexcop_device); 219 218 if (!fc) { 220 219 err("no memory"); 221 220 return NULL;
+2 -4
drivers/media/dvb-core/dvbdev.c
··· 251 251 { 252 252 int i; 253 253 254 - dvbdev->tsout_pads = kzalloc_objs(*dvbdev->tsout_pads, npads, 255 - GFP_KERNEL); 254 + dvbdev->tsout_pads = kzalloc_objs(*dvbdev->tsout_pads, npads); 256 255 if (!dvbdev->tsout_pads) 257 256 return -ENOMEM; 258 257 259 - dvbdev->tsout_entity = kzalloc_objs(*dvbdev->tsout_entity, npads, 260 - GFP_KERNEL); 258 + dvbdev->tsout_entity = kzalloc_objs(*dvbdev->tsout_entity, npads); 261 259 if (!dvbdev->tsout_entity) 262 260 return -ENOMEM; 263 261
+1 -2
drivers/media/dvb-frontends/dib0070.c
··· 738 738 739 739 struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg) 740 740 { 741 - struct dib0070_state *state = kzalloc_obj(struct dib0070_state, 742 - GFP_KERNEL); 741 + struct dib0070_state *state = kzalloc_obj(struct dib0070_state); 743 742 if (state == NULL) 744 743 return NULL; 745 744
+1 -2
drivers/media/dvb-frontends/dib0090.c
··· 2638 2638 2639 2639 struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config) 2640 2640 { 2641 - struct dib0090_fw_state *st = kzalloc_obj(struct dib0090_fw_state, 2642 - GFP_KERNEL); 2641 + struct dib0090_fw_state *st = kzalloc_obj(struct dib0090_fw_state); 2643 2642 if (st == NULL) 2644 2643 return NULL; 2645 2644
+1 -2
drivers/media/dvb-frontends/s5h1420.c
··· 872 872 struct i2c_adapter *i2c) 873 873 { 874 874 /* allocate memory for the internal state */ 875 - struct s5h1420_state *state = kzalloc_obj(struct s5h1420_state, 876 - GFP_KERNEL); 875 + struct s5h1420_state *state = kzalloc_obj(struct s5h1420_state); 877 876 u8 i; 878 877 879 878 if (state == NULL)
+2 -4
drivers/media/dvb-frontends/stv0900_core.c
··· 91 91 while (new_node->next_inode != NULL) 92 92 new_node = new_node->next_inode; 93 93 94 - new_node->next_inode = kmalloc_obj(struct stv0900_inode, 95 - GFP_KERNEL); 94 + new_node->next_inode = kmalloc_obj(struct stv0900_inode); 96 95 if (new_node->next_inode != NULL) 97 96 new_node = new_node->next_inode; 98 97 else ··· 1347 1348 dprintk("%s: Find Internal Structure!\n", __func__); 1348 1349 return STV0900_NO_ERROR; 1349 1350 } else { 1350 - state->internal = kmalloc_obj(struct stv0900_internal, 1351 - GFP_KERNEL); 1351 + state->internal = kmalloc_obj(struct stv0900_internal); 1352 1352 if (state->internal == NULL) 1353 1353 return STV0900_INVALID_HANDLE; 1354 1354 temp_int = append_internal(state->internal);
+1 -2
drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
··· 474 474 475 475 isi->pdata = of_device_get_match_data(dev); 476 476 477 - isi->pipes = kzalloc_objs(isi->pipes[0], isi->pdata->num_channels, 478 - GFP_KERNEL); 477 + isi->pipes = kzalloc_objs(isi->pipes[0], isi->pdata->num_channels); 479 478 if (!isi->pipes) 480 479 return -ENOMEM; 481 480
+1 -2
drivers/media/platform/renesas/vsp1/vsp1_video.c
··· 720 720 } 721 721 722 722 pipe->partitions = DIV_ROUND_UP(format->width, div_size); 723 - pipe->part_table = kzalloc_objs(*pipe->part_table, pipe->partitions, 724 - GFP_KERNEL); 723 + pipe->part_table = kzalloc_objs(*pipe->part_table, pipe->partitions); 725 724 if (!pipe->part_table) 726 725 return -ENOMEM; 727 726
+1 -2
drivers/media/test-drivers/vidtv/vidtv_psi.c
··· 1523 1523 program = program->next; 1524 1524 } 1525 1525 1526 - pmt_secs = kzalloc_objs(struct vidtv_psi_table_pmt *, num_pmt, 1527 - GFP_KERNEL); 1526 + pmt_secs = kzalloc_objs(struct vidtv_psi_table_pmt *, num_pmt); 1528 1527 if (!pmt_secs) 1529 1528 return NULL; 1530 1529
+1 -2
drivers/media/test-drivers/vimc/vimc-core.c
··· 287 287 return ret; 288 288 } 289 289 /* allocate ent_devs */ 290 - vimc->ent_devs = kzalloc_objs(*vimc->ent_devs, vimc->pipe_cfg->num_ents, 291 - GFP_KERNEL); 290 + vimc->ent_devs = kzalloc_objs(*vimc->ent_devs, vimc->pipe_cfg->num_ents); 292 291 if (!vimc->ent_devs) { 293 292 ret = -ENOMEM; 294 293 goto err_v4l2_unregister;
+1 -2
drivers/media/usb/dvb-usb/cinergyT2-fe.c
··· 268 268 269 269 struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d) 270 270 { 271 - struct cinergyt2_fe_state *s = kzalloc_obj(struct cinergyt2_fe_state, 272 - GFP_KERNEL); 271 + struct cinergyt2_fe_state *s = kzalloc_obj(struct cinergyt2_fe_state); 273 272 if (s == NULL) 274 273 return NULL; 275 274
+1 -2
drivers/media/usb/dvb-usb/vp702x-fe.c
··· 323 323 324 324 struct dvb_frontend * vp702x_fe_attach(struct dvb_usb_device *d) 325 325 { 326 - struct vp702x_fe_state *s = kzalloc_obj(struct vp702x_fe_state, 327 - GFP_KERNEL); 326 + struct vp702x_fe_state *s = kzalloc_obj(struct vp702x_fe_state); 328 327 if (s == NULL) 329 328 goto error; 330 329
+1 -2
drivers/media/usb/dvb-usb/vp7045-fe.c
··· 140 140 141 141 struct dvb_frontend * vp7045_fe_attach(struct dvb_usb_device *d) 142 142 { 143 - struct vp7045_fe_state *s = kzalloc_obj(struct vp7045_fe_state, 144 - GFP_KERNEL); 143 + struct vp7045_fe_state *s = kzalloc_obj(struct vp7045_fe_state); 145 144 if (s == NULL) 146 145 goto error; 147 146
+1 -2
drivers/media/usb/pvrusb2/pvrusb2-hdw.c
··· 2424 2424 2425 2425 hdw->control_cnt = CTRLDEF_COUNT; 2426 2426 hdw->control_cnt += MPEGDEF_COUNT; 2427 - hdw->controls = kzalloc_objs(struct pvr2_ctrl, hdw->control_cnt, 2428 - GFP_KERNEL); 2427 + hdw->controls = kzalloc_objs(struct pvr2_ctrl, hdw->control_cnt); 2429 2428 if (!hdw->controls) goto fail; 2430 2429 hdw->hdw_desc = hdw_desc; 2431 2430 hdw->ir_scheme_active = hdw->hdw_desc->ir_scheme;
+1 -2
drivers/media/v4l2-core/v4l2-ctrls-core.c
··· 1725 1725 INIT_LIST_HEAD(&hdl->ctrls); 1726 1726 INIT_LIST_HEAD(&hdl->ctrl_refs); 1727 1727 hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8; 1728 - hdl->buckets = kvzalloc_objs(hdl->buckets[0], hdl->nr_of_buckets, 1729 - GFP_KERNEL); 1728 + hdl->buckets = kvzalloc_objs(hdl->buckets[0], hdl->nr_of_buckets); 1730 1729 hdl->error = hdl->buckets ? 0 : -ENOMEM; 1731 1730 v4l2_ctrl_handler_init_request(hdl); 1732 1731 return hdl->error;
+1 -2
drivers/media/v4l2-core/v4l2-flash-led-class.c
··· 443 443 return -ENOMEM; 444 444 445 445 /* allocate memory dynamically so as not to exceed stack frame size */ 446 - ctrl_init_data = kzalloc_objs(*ctrl_init_data, NUM_FLASH_CTRLS, 447 - GFP_KERNEL); 446 + ctrl_init_data = kzalloc_objs(*ctrl_init_data, NUM_FLASH_CTRLS); 448 447 if (!ctrl_init_data) 449 448 return -ENOMEM; 450 449
+1 -2
drivers/media/v4l2-core/v4l2-subdev.c
··· 1620 1620 1621 1621 /* Drivers that support streams do not need the legacy pad config */ 1622 1622 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) { 1623 - state->pads = kvzalloc_objs(*state->pads, sd->entity.num_pads, 1624 - GFP_KERNEL); 1623 + state->pads = kvzalloc_objs(*state->pads, sd->entity.num_pads); 1625 1624 if (!state->pads) { 1626 1625 ret = -ENOMEM; 1627 1626 goto err;
+2 -4
drivers/message/fusion/mptlan.c
··· 396 396 goto out; 397 397 priv->mpt_txfidx_tail = -1; 398 398 399 - priv->SendCtl = kzalloc_objs(struct BufferControl, priv->tx_max_out, 400 - GFP_KERNEL); 399 + priv->SendCtl = kzalloc_objs(struct BufferControl, priv->tx_max_out); 401 400 if (priv->SendCtl == NULL) 402 401 goto out_mpt_txfidx; 403 402 for (i = 0; i < priv->tx_max_out; i++) ··· 409 410 goto out_SendCtl; 410 411 priv->mpt_rxfidx_tail = -1; 411 412 412 - priv->RcvCtl = kzalloc_objs(struct BufferControl, priv->max_buckets_out, 413 - GFP_KERNEL); 413 + priv->RcvCtl = kzalloc_objs(struct BufferControl, priv->max_buckets_out); 414 414 if (priv->RcvCtl == NULL) 415 415 goto out_mpt_rxfidx; 416 416 for (i = 0; i < priv->max_buckets_out; i++)
+1 -2
drivers/message/fusion/mptsas.c
··· 907 907 * Forming a port 908 908 */ 909 909 if (!port_details) { 910 - port_details = kzalloc_obj(struct mptsas_portinfo_details, 911 - GFP_KERNEL); 910 + port_details = kzalloc_obj(struct mptsas_portinfo_details); 912 911 if (!port_details) 913 912 goto out; 914 913 port_details->num_phys = 1;
+1 -2
drivers/mfd/timberdale.c
··· 698 698 goto err_config; 699 699 } 700 700 701 - msix_entries = kzalloc_objs(*msix_entries, TIMBERDALE_NR_IRQS, 702 - GFP_KERNEL); 701 + msix_entries = kzalloc_objs(*msix_entries, TIMBERDALE_NR_IRQS); 703 702 if (!msix_entries) 704 703 goto err_config; 705 704
+1 -2
drivers/misc/altera-stapl/altera.c
··· 1098 1098 /* Allocate a writable buffer for this array */ 1099 1099 count = var_size[variable_id]; 1100 1100 long_tmp = vars[variable_id]; 1101 - longptr_tmp = kzalloc_objs(long, count, 1102 - GFP_KERNEL); 1101 + longptr_tmp = kzalloc_objs(long, count); 1103 1102 vars[variable_id] = (long)longptr_tmp; 1104 1103 1105 1104 if (vars[variable_id] == 0) {
+2 -4
drivers/misc/fastrpc.c
··· 616 616 kfree(ctx); 617 617 return ERR_PTR(-ENOMEM); 618 618 } 619 - ctx->olaps = kzalloc_objs(*ctx->olaps, ctx->nscalars, 620 - GFP_KERNEL); 619 + ctx->olaps = kzalloc_objs(*ctx->olaps, ctx->nscalars); 621 620 if (!ctx->olaps) { 622 621 kfree(ctx->maps); 623 622 kfree(ctx); ··· 1305 1306 } inbuf; 1306 1307 u32 sc; 1307 1308 1308 - args = kzalloc_objs(*args, FASTRPC_CREATE_STATIC_PROCESS_NARGS, 1309 - GFP_KERNEL); 1309 + args = kzalloc_objs(*args, FASTRPC_CREATE_STATIC_PROCESS_NARGS); 1310 1310 if (!args) 1311 1311 return -ENOMEM; 1312 1312
+2 -4
drivers/misc/genwqe/card_ddcb.c
··· 1046 1046 "[%s] **err: could not allocate DDCB **\n", __func__); 1047 1047 return -ENOMEM; 1048 1048 } 1049 - queue->ddcb_req = kzalloc_objs(struct ddcb_requ *, queue->ddcb_max, 1050 - GFP_KERNEL); 1049 + queue->ddcb_req = kzalloc_objs(struct ddcb_requ *, queue->ddcb_max); 1051 1050 if (!queue->ddcb_req) { 1052 1051 rc = -ENOMEM; 1053 1052 goto free_ddcbs; 1054 1053 } 1055 1054 1056 - queue->ddcb_waitqs = kzalloc_objs(wait_queue_head_t, queue->ddcb_max, 1057 - GFP_KERNEL); 1055 + queue->ddcb_waitqs = kzalloc_objs(wait_queue_head_t, queue->ddcb_max); 1058 1056 if (!queue->ddcb_waitqs) { 1059 1057 rc = -ENOMEM; 1060 1058 goto free_requs;
+2 -4
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
··· 42 42 if (!aux_bus) 43 43 return -ENOMEM; 44 44 45 - aux_bus->aux_device_wrapper[0] = kzalloc_obj(*aux_bus->aux_device_wrapper[0], 46 - GFP_KERNEL); 45 + aux_bus->aux_device_wrapper[0] = kzalloc_obj(*aux_bus->aux_device_wrapper[0]); 47 46 if (!aux_bus->aux_device_wrapper[0]) 48 47 return -ENOMEM; 49 48 ··· 66 67 if (retval) 67 68 goto err_aux_dev_add_0; 68 69 69 - aux_bus->aux_device_wrapper[1] = kzalloc_obj(*aux_bus->aux_device_wrapper[1], 70 - GFP_KERNEL); 70 + aux_bus->aux_device_wrapper[1] = kzalloc_obj(*aux_bus->aux_device_wrapper[1]); 71 71 if (!aux_bus->aux_device_wrapper[1]) { 72 72 retval = -ENOMEM; 73 73 goto err_aux_dev_add_0;
+1 -2
drivers/misc/mei/interrupt.c
··· 133 133 break; 134 134 case MEI_EXT_HDR_GSC: 135 135 gsc_f2h = (struct mei_ext_hdr_gsc_f2h *)ext; 136 - cb->ext_hdr = (struct mei_ext_hdr *) kzalloc_obj(*gsc_f2h, 137 - GFP_KERNEL); 136 + cb->ext_hdr = (struct mei_ext_hdr *) kzalloc_obj(*gsc_f2h); 138 137 if (!cb->ext_hdr) { 139 138 cb->status = -ENOMEM; 140 139 goto discard;
+2 -4
drivers/misc/sgi-xp/xpc_main.c
··· 400 400 * memory. 401 401 */ 402 402 DBUG_ON(part->channels != NULL); 403 - part->channels = kzalloc_objs(struct xpc_channel, XPC_MAX_NCHANNELS, 404 - GFP_KERNEL); 403 + part->channels = kzalloc_objs(struct xpc_channel, XPC_MAX_NCHANNELS); 405 404 if (part->channels == NULL) { 406 405 dev_err(xpc_chan, "can't get memory for channels\n"); 407 406 return xpNoMemory; ··· 888 889 short partid; 889 890 struct xpc_partition *part; 890 891 891 - xpc_partitions = kzalloc_objs(struct xpc_partition, xp_max_npartitions, 892 - GFP_KERNEL); 892 + xpc_partitions = kzalloc_objs(struct xpc_partition, xp_max_npartitions); 893 893 if (xpc_partitions == NULL) { 894 894 dev_err(xpc_part, "can't get memory for partition structure\n"); 895 895 return -ENOMEM;
+1 -2
drivers/misc/sgi-xp/xpc_uv.c
··· 1073 1073 1074 1074 DBUG_ON(ch->flags & XPC_C_SETUP); 1075 1075 1076 - ch_uv->cached_notify_gru_mq_desc = kmalloc_obj(struct gru_message_queue_desc, 1077 - GFP_KERNEL); 1076 + ch_uv->cached_notify_gru_mq_desc = kmalloc_obj(struct gru_message_queue_desc); 1078 1077 if (ch_uv->cached_notify_gru_mq_desc == NULL) 1079 1078 return xpNoMemory; 1080 1079
+1 -2
drivers/mtd/chips/cfi_cmdset_0001.c
··· 779 779 newcfi = kmalloc_flex(*newcfi, chips, numvirtchips); 780 780 if (!newcfi) 781 781 return -ENOMEM; 782 - shared = kmalloc_objs(struct flchip_shared, cfi->numchips, 783 - GFP_KERNEL); 782 + shared = kmalloc_objs(struct flchip_shared, cfi->numchips); 784 783 if (!shared) { 785 784 kfree(newcfi); 786 785 return -ENOMEM;
+1 -2
drivers/mtd/chips/cfi_probe.c
··· 208 208 if (!num_erase_regions) 209 209 return 0; 210 210 211 - cfi->cfiq = kmalloc_flex(*cfi->cfiq, EraseRegionInfo, num_erase_regions, 212 - GFP_KERNEL); 211 + cfi->cfiq = kmalloc_flex(*cfi->cfiq, EraseRegionInfo, num_erase_regions); 213 212 if (!cfi->cfiq) 214 213 return 0; 215 214
+1 -2
drivers/mtd/chips/jedec_probe.c
··· 1985 1985 1986 1986 num_erase_regions = jedec_table[index].nr_regions; 1987 1987 1988 - cfi->cfiq = kmalloc_flex(*cfi->cfiq, EraseRegionInfo, num_erase_regions, 1989 - GFP_KERNEL); 1988 + cfi->cfiq = kmalloc_flex(*cfi->cfiq, EraseRegionInfo, num_erase_regions); 1990 1989 if (!cfi->cfiq) { 1991 1990 //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); 1992 1991 return 0;
+1 -2
drivers/mtd/ftl.c
··· 207 207 for (i = 0; i < part->DataUnits; i++) 208 208 part->EUNInfo[i].Offset = 0xffffffff; 209 209 part->XferInfo = 210 - kmalloc_objs(struct xfer_info_t, part->header.NumTransferUnits, 211 - GFP_KERNEL); 210 + kmalloc_objs(struct xfer_info_t, part->header.NumTransferUnits); 212 211 if (!part->XferInfo) 213 212 goto out_EUNInfo; 214 213
+1 -2
drivers/mtd/mtdconcat.c
··· 823 823 concat->mtd.erasesize = max_erasesize; 824 824 concat->mtd.numeraseregions = num_erase_region; 825 825 concat->mtd.eraseregions = erase_region_p = 826 - kmalloc_objs(struct mtd_erase_region_info, num_erase_region, 827 - GFP_KERNEL); 826 + kmalloc_objs(struct mtd_erase_region_info, num_erase_region); 828 827 if (!erase_region_p) { 829 828 kfree(concat); 830 829 printk
+1 -2
drivers/mtd/parsers/bcm47xxpart.c
··· 106 106 blocksize = 0x1000; 107 107 108 108 /* Alloc */ 109 - parts = kzalloc_objs(struct mtd_partition, BCM47XXPART_MAX_PARTS, 110 - GFP_KERNEL); 109 + parts = kzalloc_objs(struct mtd_partition, BCM47XXPART_MAX_PARTS); 111 110 if (!parts) 112 111 return -ENOMEM; 113 112
+1 -2
drivers/mtd/parsers/parser_trx.c
··· 65 65 if (err != 0 && err != -EINVAL) 66 66 pr_err("failed to parse \"brcm,trx-magic\" DT attribute, using default: %d\n", err); 67 67 68 - parts = kzalloc_objs(struct mtd_partition, TRX_PARSER_MAX_PARTS, 69 - GFP_KERNEL); 68 + parts = kzalloc_objs(struct mtd_partition, TRX_PARSER_MAX_PARTS); 70 69 if (!parts) 71 70 return -ENOMEM; 72 71
+1 -2
drivers/mtd/parsers/scpart.c
··· 171 171 goto free; 172 172 } 173 173 174 - parts = kzalloc_objs(*parts, of_get_child_count(ofpart_node), 175 - GFP_KERNEL); 174 + parts = kzalloc_objs(*parts, of_get_child_count(ofpart_node)); 176 175 if (!parts) { 177 176 res = -ENOMEM; 178 177 goto free;
+1 -2
drivers/mtd/rfd_ftl.c
··· 185 185 if (!part->header_cache) 186 186 goto err; 187 187 188 - part->blocks = kzalloc_objs(struct block, part->total_blocks, 189 - GFP_KERNEL); 188 + part->blocks = kzalloc_objs(struct block, part->total_blocks); 190 189 if (!part->blocks) 191 190 goto err; 192 191
+1 -2
drivers/mtd/sm_ftl.c
··· 78 78 79 79 80 80 /* Create array of pointers to the attributes */ 81 - attributes = kzalloc_objs(struct attribute *, NUM_ATTRIBUTES + 1, 82 - GFP_KERNEL); 81 + attributes = kzalloc_objs(struct attribute *, NUM_ATTRIBUTES + 1); 83 82 if (!attributes) 84 83 goto error3; 85 84 attributes[0] = &vendor_attribute->dev_attr.attr;
+2 -4
drivers/mtd/ubi/eba.c
··· 1550 1550 if (!vol) 1551 1551 continue; 1552 1552 1553 - scan_eba[i] = kmalloc_objs(**scan_eba, vol->reserved_pebs, 1554 - GFP_KERNEL); 1553 + scan_eba[i] = kmalloc_objs(**scan_eba, vol->reserved_pebs); 1555 1554 if (!scan_eba[i]) { 1556 1555 ret = -ENOMEM; 1557 1556 goto out_free; 1558 1557 } 1559 1558 1560 - fm_eba[i] = kmalloc_objs(**fm_eba, vol->reserved_pebs, 1561 - GFP_KERNEL); 1559 + fm_eba[i] = kmalloc_objs(**fm_eba, vol->reserved_pebs); 1562 1560 if (!fm_eba[i]) { 1563 1561 ret = -ENOMEM; 1564 1562 kfree(scan_eba[i]);
+2 -4
drivers/net/bonding/bond_main.c
··· 1723 1723 return NULL; 1724 1724 1725 1725 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 1726 - SLAVE_AD_INFO(slave) = kzalloc_obj(struct ad_slave_info, 1727 - GFP_KERNEL); 1726 + SLAVE_AD_INFO(slave) = kzalloc_obj(struct ad_slave_info); 1728 1727 if (!SLAVE_AD_INFO(slave)) { 1729 1728 kobject_put(&slave->kobj); 1730 1729 return NULL; ··· 5096 5097 5097 5098 might_sleep(); 5098 5099 5099 - usable_slaves = kzalloc_flex(*usable_slaves, arr, bond->slave_cnt, 5100 - GFP_KERNEL); 5100 + usable_slaves = kzalloc_flex(*usable_slaves, arr, bond->slave_cnt); 5101 5101 all_slaves = kzalloc_flex(*all_slaves, arr, bond->slave_cnt); 5102 5102 if (!usable_slaves || !all_slaves) { 5103 5103 ret = -ENOMEM;
+1 -2
drivers/net/dsa/ocelot/felix.c
··· 1539 1539 ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; 1540 1540 ocelot->devlink = felix->ds->devlink; 1541 1541 1542 - port_phy_modes = kzalloc_objs(phy_interface_t, num_phys_ports, 1543 - GFP_KERNEL); 1542 + port_phy_modes = kzalloc_objs(phy_interface_t, num_phys_ports); 1544 1543 if (!port_phy_modes) 1545 1544 return -ENOMEM; 1546 1545
+1 -2
drivers/net/dsa/sja1105/sja1105_devlink.c
··· 82 82 struct devlink_region *region; 83 83 u64 size; 84 84 85 - priv->regions = kzalloc_objs(struct devlink_region *, num_regions, 86 - GFP_KERNEL); 85 + priv->regions = kzalloc_objs(struct devlink_region *, num_regions); 87 86 if (!priv->regions) 88 87 return -ENOMEM; 89 88
+2 -4
drivers/net/ethernet/amd/pcnet32.c
··· 2043 2043 if (!lp->rx_dma_addr) 2044 2044 return -ENOMEM; 2045 2045 2046 - lp->tx_skbuff = kzalloc_objs(struct sk_buff *, lp->tx_ring_size, 2047 - GFP_KERNEL); 2046 + lp->tx_skbuff = kzalloc_objs(struct sk_buff *, lp->tx_ring_size); 2048 2047 if (!lp->tx_skbuff) 2049 2048 return -ENOMEM; 2050 2049 2051 - lp->rx_skbuff = kzalloc_objs(struct sk_buff *, lp->rx_ring_size, 2052 - GFP_KERNEL); 2050 + lp->rx_skbuff = kzalloc_objs(struct sk_buff *, lp->rx_ring_size); 2053 2051 if (!lp->rx_skbuff) 2054 2052 return -ENOMEM; 2055 2053
+1 -2
drivers/net/ethernet/apm/xgene-v2/main.c
··· 417 417 if (!ring->desc_addr) 418 418 goto err; 419 419 420 - ring->pkt_info = kzalloc_objs(*ring->pkt_info, XGENE_ENET_NUM_DESC, 421 - GFP_KERNEL); 420 + ring->pkt_info = kzalloc_objs(*ring->pkt_info, XGENE_ENET_NUM_DESC); 422 421 if (!ring->pkt_info) 423 422 goto err; 424 423
+1 -2
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
··· 1130 1130 if (!info->n_pins) 1131 1131 return; 1132 1132 1133 - info->pin_config = kzalloc_objs(struct ptp_pin_desc, info->n_pins, 1134 - GFP_KERNEL); 1133 + info->pin_config = kzalloc_objs(struct ptp_pin_desc, info->n_pins); 1135 1134 1136 1135 if (!info->pin_config) 1137 1136 return;
+1 -2
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
··· 689 689 if (!intf->tx_spb_cpu) 690 690 goto free_rx_edpkt_dma; 691 691 692 - intf->tx_cbs = kzalloc_objs(struct bcmasp_tx_cb, DESC_RING_COUNT, 693 - GFP_KERNEL); 692 + intf->tx_cbs = kzalloc_objs(struct bcmasp_tx_cb, DESC_RING_COUNT); 694 693 if (!intf->tx_cbs) 695 694 goto free_tx_spb_dma; 696 695
+2 -4
drivers/net/ethernet/broadcom/bcm63xx_enet.c
··· 981 981 priv->tx_desc_alloc_size = size; 982 982 priv->tx_desc_cpu = p; 983 983 984 - priv->tx_skb = kzalloc_objs(struct sk_buff *, priv->tx_ring_size, 985 - GFP_KERNEL); 984 + priv->tx_skb = kzalloc_objs(struct sk_buff *, priv->tx_ring_size); 986 985 if (!priv->tx_skb) { 987 986 ret = -ENOMEM; 988 987 goto out_free_tx_ring; ··· 2148 2149 priv->tx_desc_alloc_size = size; 2149 2150 priv->tx_desc_cpu = p; 2150 2151 2151 - priv->tx_skb = kzalloc_objs(struct sk_buff *, priv->tx_ring_size, 2152 - GFP_KERNEL); 2152 + priv->tx_skb = kzalloc_objs(struct sk_buff *, priv->tx_ring_size); 2153 2153 if (!priv->tx_skb) { 2154 2154 dev_err(kdev, "cannot allocate tx skb queue\n"); 2155 2155 ret = -ENOMEM;
+1 -2
drivers/net/ethernet/broadcom/bcmsysport.c
··· 1665 1665 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; 1666 1666 priv->rx_c_index = 0; 1667 1667 priv->rx_read_ptr = 0; 1668 - priv->rx_cbs = kzalloc_objs(struct bcm_sysport_cb, priv->num_rx_bds, 1669 - GFP_KERNEL); 1668 + priv->rx_cbs = kzalloc_objs(struct bcm_sysport_cb, priv->num_rx_bds); 1670 1669 if (!priv->rx_cbs) { 1671 1670 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1672 1671 return -ENOMEM;
+6 -12
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
··· 509 509 for (i = 0; i < bd->rx_nr_rings; i++) { 510 510 struct bnge_rx_ring_info *rxr = &bn->rx_ring[i]; 511 511 512 - rxr->rx_tpa = kzalloc_objs(struct bnge_tpa_info, bn->max_tpa, 513 - GFP_KERNEL); 512 + rxr->rx_tpa = kzalloc_objs(struct bnge_tpa_info, bn->max_tpa); 514 513 if (!rxr->rx_tpa) 515 514 goto err_free_tpa_info; 516 515 ··· 521 522 goto err_free_tpa_info; 522 523 rxr->rx_tpa[j].agg_arr = agg; 523 524 } 524 - rxr->rx_tpa_idx_map = kzalloc_obj(*rxr->rx_tpa_idx_map, 525 - GFP_KERNEL); 525 + rxr->rx_tpa_idx_map = kzalloc_obj(*rxr->rx_tpa_idx_map); 526 526 if (!rxr->rx_tpa_idx_map) 527 527 goto err_free_tpa_info; 528 528 } ··· 810 812 */ 811 813 num_vnics = 1; 812 814 813 - bn->vnic_info = kzalloc_objs(struct bnge_vnic_info, num_vnics, 814 - GFP_KERNEL); 815 + bn->vnic_info = kzalloc_objs(struct bnge_vnic_info, num_vnics); 815 816 if (!bn->vnic_info) 816 817 return -ENOMEM; 817 818 ··· 837 840 struct bnge_dev *bd = bn->bd; 838 841 int i; 839 842 840 - bn->grp_info = kzalloc_objs(struct bnge_ring_grp_info, bd->nq_nr_rings, 841 - GFP_KERNEL); 843 + bn->grp_info = kzalloc_objs(struct bnge_ring_grp_info, bd->nq_nr_rings); 842 844 if (!bn->grp_info) 843 845 return -ENOMEM; 844 846 for (i = 0; i < bd->nq_nr_rings; i++) { ··· 897 901 nqr->ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG; 898 902 } 899 903 900 - bn->rx_ring = kzalloc_objs(struct bnge_rx_ring_info, bd->rx_nr_rings, 901 - GFP_KERNEL); 904 + bn->rx_ring = kzalloc_objs(struct bnge_rx_ring_info, bd->rx_nr_rings); 902 905 if (!bn->rx_ring) 903 906 goto err_free_core; 904 907 ··· 912 917 bn->bnapi[i]->rx_ring = &bn->rx_ring[i]; 913 918 } 914 919 915 - bn->tx_ring = kzalloc_objs(struct bnge_tx_ring_info, bd->tx_nr_rings, 916 - GFP_KERNEL); 920 + bn->tx_ring = kzalloc_objs(struct bnge_tx_ring_info, bd->tx_nr_rings); 917 921 if (!bn->tx_ring) 918 922 goto err_free_core; 919 923
+1 -2
drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
··· 158 158 int nr_tbls, i; 159 159 160 160 rmem->depth = 2; 161 - ctx_pg->ctx_pg_tbl = kzalloc_objs(ctx_pg, MAX_CTX_PAGES, 162 - GFP_KERNEL); 161 + ctx_pg->ctx_pg_tbl = kzalloc_objs(ctx_pg, MAX_CTX_PAGES); 163 162 if (!ctx_pg->ctx_pg_tbl) 164 163 return -ENOMEM; 165 164 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
+3 -6
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 4761 4761 bp->fp = fp; 4762 4762 4763 4763 /* allocate sp objs */ 4764 - bp->sp_objs = kzalloc_objs(struct bnx2x_sp_objs, bp->fp_array_size, 4765 - GFP_KERNEL); 4764 + bp->sp_objs = kzalloc_objs(struct bnx2x_sp_objs, bp->fp_array_size); 4766 4765 if (!bp->sp_objs) 4767 4766 goto alloc_err; 4768 4767 4769 4768 /* allocate fp_stats */ 4770 - bp->fp_stats = kzalloc_objs(struct bnx2x_fp_stats, bp->fp_array_size, 4771 - GFP_KERNEL); 4769 + bp->fp_stats = kzalloc_objs(struct bnx2x_fp_stats, bp->fp_array_size); 4772 4770 if (!bp->fp_stats) 4773 4771 goto alloc_err; 4774 4772 ··· 4775 4777 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp); 4776 4778 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size); 4777 4779 4778 - bp->bnx2x_txq = kzalloc_objs(struct bnx2x_fp_txdata, txq_array_size, 4779 - GFP_KERNEL); 4780 + bp->bnx2x_txq = kzalloc_objs(struct bnx2x_fp_txdata, txq_array_size); 4780 4781 if (!bp->bnx2x_txq) 4781 4782 goto alloc_err; 4782 4783
+1 -2
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 8396 8396 goto alloc_mem_err; 8397 8397 allocated += bp->context[i].size; 8398 8398 } 8399 - bp->ilt->lines = kzalloc_objs(struct ilt_line, ILT_MAX_LINES, 8400 - GFP_KERNEL); 8399 + bp->ilt->lines = kzalloc_objs(struct ilt_line, ILT_MAX_LINES); 8401 8400 if (!bp->ilt->lines) 8402 8401 goto alloc_mem_err; 8403 8402
+2 -4
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
··· 551 551 else 552 552 set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags); 553 553 if (mc_num) { 554 - mc = kzalloc_objs(struct bnx2x_mcast_list_elem, mc_num, 555 - GFP_KERNEL); 554 + mc = kzalloc_objs(struct bnx2x_mcast_list_elem, mc_num); 556 555 if (!mc) { 557 556 BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n"); 558 557 return -ENOMEM; ··· 1246 1247 num_vfs_param, iov->nr_virtfn); 1247 1248 1248 1249 /* allocate the vf array */ 1249 - bp->vfdb->vfs = kzalloc_objs(struct bnx2x_virtf, BNX2X_NR_VIRTFN(bp), 1250 - GFP_KERNEL); 1250 + bp->vfdb->vfs = kzalloc_objs(struct bnx2x_virtf, BNX2X_NR_VIRTFN(bp)); 1251 1251 if (!bp->vfdb->vfs) { 1252 1252 BNX2X_ERR("failed to allocate vf array\n"); 1253 1253 err = -ENOMEM;
+4 -8
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 3752 3752 struct rx_agg_cmp *agg; 3753 3753 int i; 3754 3754 3755 - rxr->rx_tpa = kzalloc_objs(struct bnxt_tpa_info, bp->max_tpa, 3756 - GFP_KERNEL); 3755 + rxr->rx_tpa = kzalloc_objs(struct bnxt_tpa_info, bp->max_tpa); 3757 3756 if (!rxr->rx_tpa) 3758 3757 return -ENOMEM; 3759 3758 ··· 4082 4083 cpr->cp_desc_ring = kzalloc_objs(*cpr->cp_desc_ring, n); 4083 4084 if (!cpr->cp_desc_ring) 4084 4085 return -ENOMEM; 4085 - cpr->cp_desc_mapping = kzalloc_objs(*cpr->cp_desc_mapping, n, 4086 - GFP_KERNEL); 4086 + cpr->cp_desc_mapping = kzalloc_objs(*cpr->cp_desc_mapping, n); 4087 4087 if (!cpr->cp_desc_mapping) 4088 4088 return -ENOMEM; 4089 4089 return 0; ··· 4662 4664 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4663 4665 num_vnics++; 4664 4666 4665 - bp->vnic_info = kzalloc_objs(struct bnxt_vnic_info, num_vnics, 4666 - GFP_KERNEL); 4667 + bp->vnic_info = kzalloc_objs(struct bnxt_vnic_info, num_vnics); 4667 4668 if (!bp->vnic_info) 4668 4669 return -ENOMEM; 4669 4670 ··· 9040 9043 int nr_tbls, i; 9041 9044 9042 9045 rmem->depth = 2; 9043 - ctx_pg->ctx_pg_tbl = kzalloc_objs(ctx_pg, MAX_CTX_PAGES, 9044 - GFP_KERNEL); 9046 + ctx_pg->ctx_pg_tbl = kzalloc_objs(ctx_pg, MAX_CTX_PAGES); 9045 9047 if (!ctx_pg->ctx_pg_tbl) 9046 9048 return -ENOMEM; 9047 9049 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
+1 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
··· 333 333 return; 334 334 335 335 if (!err) { 336 - ent = kzalloc_objs(*ent, ulp->msix_requested, 337 - GFP_KERNEL); 336 + ent = kzalloc_objs(*ent, ulp->msix_requested); 338 337 if (!ent) 339 338 return; 340 339 bnxt_fill_msix_vecs(bp, ent);
+3 -6
drivers/net/ethernet/broadcom/cnic.c
··· 1261 1261 cp->fcoe_init_cid = 0x10; 1262 1262 } 1263 1263 1264 - cp->iscsi_tbl = kzalloc_objs(struct cnic_iscsi, MAX_ISCSI_TBL_SZ, 1265 - GFP_KERNEL); 1264 + cp->iscsi_tbl = kzalloc_objs(struct cnic_iscsi, MAX_ISCSI_TBL_SZ); 1266 1265 if (!cp->iscsi_tbl) 1267 1266 goto error; 1268 1267 1269 - cp->ctx_tbl = kzalloc_objs(struct cnic_context, cp->max_cid_space, 1270 - GFP_KERNEL); 1268 + cp->ctx_tbl = kzalloc_objs(struct cnic_context, cp->max_cid_space); 1271 1269 if (!cp->ctx_tbl) 1272 1270 goto error; 1273 1271 ··· 4103 4105 u32 port_id; 4104 4106 int i; 4105 4107 4106 - cp->csk_tbl = kvzalloc_objs(struct cnic_sock, MAX_CM_SK_TBL_SZ, 4107 - GFP_KERNEL); 4108 + cp->csk_tbl = kvzalloc_objs(struct cnic_sock, MAX_CM_SK_TBL_SZ); 4108 4109 if (!cp->csk_tbl) 4109 4110 return -ENOMEM; 4110 4111
+2 -4
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 3083 3083 /* Initialize common Rx ring structures */ 3084 3084 priv->rx_bds = priv->base + priv->hw_params->rdma_offset; 3085 3085 priv->num_rx_bds = TOTAL_DESC; 3086 - priv->rx_cbs = kzalloc_objs(struct enet_cb, priv->num_rx_bds, 3087 - GFP_KERNEL); 3086 + priv->rx_cbs = kzalloc_objs(struct enet_cb, priv->num_rx_bds); 3088 3087 if (!priv->rx_cbs) 3089 3088 return -ENOMEM; 3090 3089 ··· 3095 3096 /* Initialize common TX ring structures */ 3096 3097 priv->tx_bds = priv->base + priv->hw_params->tdma_offset; 3097 3098 priv->num_tx_bds = TOTAL_DESC; 3098 - priv->tx_cbs = kzalloc_objs(struct enet_cb, priv->num_tx_bds, 3099 - GFP_KERNEL); 3099 + priv->tx_cbs = kzalloc_objs(struct enet_cb, priv->num_tx_bds); 3100 3100 if (!priv->tx_cbs) { 3101 3101 kfree(priv->rx_cbs); 3102 3102 return -ENOMEM;
+1 -2
drivers/net/ethernet/broadcom/sb1250-mac.c
··· 642 642 * And context table 643 643 */ 644 644 645 - d->sbdma_ctxtable = kzalloc_objs(*d->sbdma_ctxtable, d->sbdma_maxdescr, 646 - GFP_KERNEL); 645 + d->sbdma_ctxtable = kzalloc_objs(*d->sbdma_ctxtable, d->sbdma_maxdescr); 647 646 648 647 #ifdef CONFIG_SBMAC_COALESCE 649 648 /*
+1 -2
drivers/net/ethernet/brocade/bna/bnad.c
··· 1345 1345 return 0; 1346 1346 } 1347 1347 1348 - mem_info->mdl = kzalloc_objs(struct bna_mem_descr, mem_info->num, 1349 - GFP_KERNEL); 1348 + mem_info->mdl = kzalloc_objs(struct bna_mem_descr, mem_info->num); 1350 1349 if (mem_info->mdl == NULL) 1351 1350 return -ENOMEM; 1352 1351
+2 -4
drivers/net/ethernet/calxeda/xgmac.c
··· 729 729 730 730 netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize); 731 731 732 - priv->rx_skbuff = kzalloc_objs(struct sk_buff *, DMA_RX_RING_SZ, 733 - GFP_KERNEL); 732 + priv->rx_skbuff = kzalloc_objs(struct sk_buff *, DMA_RX_RING_SZ); 734 733 if (!priv->rx_skbuff) 735 734 return -ENOMEM; 736 735 ··· 741 742 if (!priv->dma_rx) 742 743 goto err_dma_rx; 743 744 744 - priv->tx_skbuff = kzalloc_objs(struct sk_buff *, DMA_TX_RING_SZ, 745 - GFP_KERNEL); 745 + priv->tx_skbuff = kzalloc_objs(struct sk_buff *, DMA_TX_RING_SZ); 746 746 if (!priv->tx_skbuff) 747 747 goto err_tx_skb; 748 748
+2 -4
drivers/net/ethernet/cavium/liquidio/lio_core.c
··· 107 107 /* allocate memory to store virtual and dma base address of 108 108 * per glist consistent memory 109 109 */ 110 - lio->glists_virt_base = kzalloc_objs(*lio->glists_virt_base, num_iqs, 111 - GFP_KERNEL); 112 - lio->glists_dma_base = kzalloc_objs(*lio->glists_dma_base, num_iqs, 113 - GFP_KERNEL); 110 + lio->glists_virt_base = kzalloc_objs(*lio->glists_virt_base, num_iqs); 111 + lio->glists_dma_base = kzalloc_objs(*lio->glists_dma_base, num_iqs); 114 112 115 113 if (!lio->glists_virt_base || !lio->glists_dma_base) { 116 114 lio_delete_glists(lio);
+1 -2
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
··· 2254 2254 if (!eth_filter) 2255 2255 return -ENOMEM; 2256 2256 2257 - eth_filter_info = kzalloc_objs(*eth_filter_info, adap->params.nports, 2258 - GFP_KERNEL); 2257 + eth_filter_info = kzalloc_objs(*eth_filter_info, adap->params.nports); 2259 2258 if (!eth_filter_info) { 2260 2259 ret = -ENOMEM; 2261 2260 goto free_eth_filter;
+1 -2
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 5021 5021 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; 5022 5022 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; 5023 5023 5024 - adap->sge.egr_map = kzalloc_objs(*adap->sge.egr_map, adap->sge.egr_sz, 5025 - GFP_KERNEL); 5024 + adap->sge.egr_map = kzalloc_objs(*adap->sge.egr_map, adap->sge.egr_sz); 5026 5025 if (!adap->sge.egr_map) { 5027 5026 ret = -ENOMEM; 5028 5027 goto bye;
+1 -2
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
··· 535 535 if (!tc_matchall) 536 536 return -ENOMEM; 537 537 538 - tc_port_matchall = kzalloc_objs(*tc_port_matchall, adap->params.nports, 539 - GFP_KERNEL); 538 + tc_port_matchall = kzalloc_objs(*tc_port_matchall, adap->params.nports); 540 539 if (!tc_port_matchall) { 541 540 ret = -ENOMEM; 542 541 goto out_free_matchall;
+2 -4
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
··· 658 658 if (!tc_mqprio) 659 659 return -ENOMEM; 660 660 661 - tc_port_mqprio = kzalloc_objs(*tc_port_mqprio, adap->params.nports, 662 - GFP_KERNEL); 661 + tc_port_mqprio = kzalloc_objs(*tc_port_mqprio, adap->params.nports); 663 662 if (!tc_port_mqprio) { 664 663 ret = -ENOMEM; 665 664 goto out_free_mqprio; ··· 669 670 tc_mqprio->port_mqprio = tc_port_mqprio; 670 671 for (i = 0; i < adap->params.nports; i++) { 671 672 port_mqprio = &tc_mqprio->port_mqprio[i]; 672 - eosw_txq = kzalloc_objs(*eosw_txq, adap->tids.neotids, 673 - GFP_KERNEL); 673 + eosw_txq = kzalloc_objs(*eosw_txq, adap->tids.neotids); 674 674 if (!eosw_txq) { 675 675 ret = -ENOMEM; 676 676 goto out_free_ports;
+3 -6
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
··· 488 488 i = min_t(int, uld_info->ntxq, num_online_cpus()); 489 489 txq_info->ntxq = roundup(i, adap->params.nports); 490 490 } 491 - txq_info->uldtxq = kzalloc_objs(struct sge_uld_txq, txq_info->ntxq, 492 - GFP_KERNEL); 491 + txq_info->uldtxq = kzalloc_objs(struct sge_uld_txq, txq_info->ntxq); 493 492 if (!txq_info->uldtxq) { 494 493 kfree(txq_info); 495 494 return -ENOMEM; ··· 527 528 if (!adap->uld) 528 529 return -ENOMEM; 529 530 530 - s->uld_rxq_info = kzalloc_objs(struct sge_uld_rxq_info *, CXGB4_ULD_MAX, 531 - GFP_KERNEL); 531 + s->uld_rxq_info = kzalloc_objs(struct sge_uld_rxq_info *, CXGB4_ULD_MAX); 532 532 if (!s->uld_rxq_info) 533 533 goto err_uld; 534 534 535 - s->uld_txq_info = kzalloc_objs(struct sge_uld_txq_info *, CXGB4_TX_MAX, 536 - GFP_KERNEL); 535 + s->uld_txq_info = kzalloc_objs(struct sge_uld_txq_info *, CXGB4_TX_MAX); 537 536 if (!s->uld_txq_info) 538 537 goto err_uld_rx; 539 538 return 0;
+3 -6
drivers/net/ethernet/cisco/enic/enic_main.c
··· 2473 2473 if (!enic->napi) 2474 2474 goto free_queues; 2475 2475 2476 - enic->msix_entry = kzalloc_objs(struct msix_entry, enic->intr_avail, 2477 - GFP_KERNEL); 2476 + enic->msix_entry = kzalloc_objs(struct msix_entry, enic->intr_avail); 2478 2477 if (!enic->msix_entry) 2479 2478 goto free_queues; 2480 2479 2481 - enic->msix = kzalloc_objs(struct enic_msix_entry, enic->intr_avail, 2482 - GFP_KERNEL); 2480 + enic->msix = kzalloc_objs(struct enic_msix_entry, enic->intr_avail); 2483 2481 if (!enic->msix) 2484 2482 goto free_queues; 2485 2483 2486 - enic->intr = kzalloc_objs(struct vnic_intr, enic->intr_avail, 2487 - GFP_KERNEL); 2484 + enic->intr = kzalloc_objs(struct vnic_intr, enic->intr_avail); 2488 2485 if (!enic->intr) 2489 2486 goto free_queues; 2490 2487
+2 -4
drivers/net/ethernet/emulex/benet/be_main.c
··· 4685 4685 if (!adapter->pmac_id) 4686 4686 return -ENOMEM; 4687 4687 4688 - adapter->mc_list = kzalloc_objs(*adapter->mc_list, be_max_mc(adapter), 4689 - GFP_KERNEL); 4688 + adapter->mc_list = kzalloc_objs(*adapter->mc_list, be_max_mc(adapter)); 4690 4689 if (!adapter->mc_list) 4691 4690 return -ENOMEM; 4692 4691 4693 - adapter->uc_list = kzalloc_objs(*adapter->uc_list, be_max_uc(adapter), 4694 - GFP_KERNEL); 4692 + adapter->uc_list = kzalloc_objs(*adapter->uc_list, be_max_uc(adapter)); 4695 4693 if (!adapter->uc_list) 4696 4694 return -ENOMEM; 4697 4695
+2 -4
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
··· 3408 3408 if (err) 3409 3409 goto err_free_cmdport; 3410 3410 3411 - ethsw->ports = kzalloc_objs(*ethsw->ports, ethsw->sw_attr.num_ifs, 3412 - GFP_KERNEL); 3411 + ethsw->ports = kzalloc_objs(*ethsw->ports, ethsw->sw_attr.num_ifs); 3413 3412 if (!(ethsw->ports)) { 3414 3413 err = -ENOMEM; 3415 3414 goto err_teardown; 3416 3415 } 3417 3416 3418 - ethsw->fdbs = kzalloc_objs(*ethsw->fdbs, ethsw->sw_attr.num_ifs, 3419 - GFP_KERNEL); 3417 + ethsw->fdbs = kzalloc_objs(*ethsw->fdbs, ethsw->sw_attr.num_ifs); 3420 3418 if (!ethsw->fdbs) { 3421 3419 err = -ENOMEM; 3422 3420 goto err_free_ports;
+1 -2
drivers/net/ethernet/freescale/enetc/enetc.c
··· 2562 2562 { 2563 2563 struct enetc_si *si = priv->si; 2564 2564 2565 - priv->cls_rules = kzalloc_objs(*priv->cls_rules, si->num_fs_entries, 2566 - GFP_KERNEL); 2565 + priv->cls_rules = kzalloc_objs(*priv->cls_rules, si->num_fs_entries); 2567 2566 if (!priv->cls_rules) 2568 2567 return -ENOMEM; 2569 2568
+2 -4
drivers/net/ethernet/freescale/gianfar.c
··· 413 413 int i; 414 414 415 415 for (i = 0; i < priv->num_tx_queues; i++) { 416 - priv->tx_queue[i] = kzalloc_obj(struct gfar_priv_tx_q, 417 - GFP_KERNEL); 416 + priv->tx_queue[i] = kzalloc_obj(struct gfar_priv_tx_q); 418 417 if (!priv->tx_queue[i]) 419 418 return -ENOMEM; 420 419 ··· 430 431 int i; 431 432 432 433 for (i = 0; i < priv->num_rx_queues; i++) { 433 - priv->rx_queue[i] = kzalloc_obj(struct gfar_priv_rx_q, 434 - GFP_KERNEL); 434 + priv->rx_queue[i] = kzalloc_obj(struct gfar_priv_rx_q); 435 435 if (!priv->rx_queue[i]) 436 436 return -ENOMEM; 437 437
+1 -2
drivers/net/ethernet/google/gve/gve_ethtool.c
··· 176 176 priv = netdev_priv(netdev); 177 177 num_tx_queues = gve_num_tx_queues(priv); 178 178 report_stats = priv->stats_report->stats; 179 - rx_qid_to_stats_idx = kmalloc_objs(int, priv->rx_cfg.num_queues, 180 - GFP_KERNEL); 179 + rx_qid_to_stats_idx = kmalloc_objs(int, priv->rx_cfg.num_queues); 181 180 if (!rx_qid_to_stats_idx) 182 181 return; 183 182 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
+1 -2
drivers/net/ethernet/google/gve/gve_main.c
··· 666 666 } 667 667 668 668 if (!gve_is_gqi(priv)) { 669 - priv->ptype_lut_dqo = kvzalloc_obj(*priv->ptype_lut_dqo, 670 - GFP_KERNEL); 669 + priv->ptype_lut_dqo = kvzalloc_obj(*priv->ptype_lut_dqo); 671 670 if (!priv->ptype_lut_dqo) { 672 671 err = -ENOMEM; 673 672 goto abort_with_stats_report;
+1 -2
drivers/net/ethernet/google/gve/gve_rx.c
··· 390 390 int err = 0; 391 391 int i, j; 392 392 393 - rx = kvzalloc_objs(struct gve_rx_ring, cfg->qcfg_rx->max_queues, 394 - GFP_KERNEL); 393 + rx = kvzalloc_objs(struct gve_rx_ring, cfg->qcfg_rx->max_queues); 395 394 if (!rx) 396 395 return -ENOMEM; 397 396
+1 -2
drivers/net/ethernet/google/gve/gve_rx_dqo.c
··· 320 320 int err; 321 321 int i; 322 322 323 - rx = kvzalloc_objs(struct gve_rx_ring, cfg->qcfg_rx->max_queues, 324 - GFP_KERNEL); 323 + rx = kvzalloc_objs(struct gve_rx_ring, cfg->qcfg_rx->max_queues); 325 324 if (!rx) 326 325 return -ENOMEM; 327 326
+1 -2
drivers/net/ethernet/google/gve/gve_tx.c
··· 345 345 return -EINVAL; 346 346 } 347 347 348 - tx = kvzalloc_objs(struct gve_tx_ring, cfg->qcfg->max_queues, 349 - GFP_KERNEL); 348 + tx = kvzalloc_objs(struct gve_tx_ring, cfg->qcfg->max_queues); 350 349 if (!tx) 351 350 return -ENOMEM; 352 351
+1 -2
drivers/net/ethernet/google/gve/gve_tx_dqo.c
··· 416 416 return -EINVAL; 417 417 } 418 418 419 - tx = kvzalloc_objs(struct gve_tx_ring, cfg->qcfg->max_queues, 420 - GFP_KERNEL); 419 + tx = kvzalloc_objs(struct gve_tx_ring, cfg->qcfg->max_queues); 421 420 if (!tx) 422 421 return -ENOMEM; 423 422
+1 -2
drivers/net/ethernet/hisilicon/hns/hnae.c
··· 208 208 assert(ring->next_to_use == 0); 209 209 assert(ring->next_to_clean == 0); 210 210 211 - ring->desc_cb = kzalloc_objs(ring->desc_cb[0], ring->desc_num, 212 - GFP_KERNEL); 211 + ring->desc_cb = kzalloc_objs(ring->desc_cb[0], ring->desc_num); 213 212 if (!ring->desc_cb) { 214 213 ret = -ENOMEM; 215 214 goto out;
+1 -2
drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
··· 614 614 615 615 spin_lock_init(&cmdq->cmdq_lock); 616 616 617 - cmdq->cmd_infos = kzalloc_objs(*cmdq->cmd_infos, cmdq->wq.q_depth, 618 - GFP_KERNEL); 617 + cmdq->cmd_infos = kzalloc_objs(*cmdq->cmd_infos, cmdq->wq.q_depth); 619 618 if (!cmdq->cmd_infos) { 620 619 err = -ENOMEM; 621 620 return err;
+1 -2
drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
··· 138 138 goto err_free_txqs_res_arr; 139 139 } 140 140 141 - q_params->irq_cfg = kzalloc_objs(*q_params->irq_cfg, q_params->num_qps, 142 - GFP_KERNEL); 141 + q_params->irq_cfg = kzalloc_objs(*q_params->irq_cfg, q_params->num_qps); 143 142 if (!q_params->irq_cfg) { 144 143 err = -ENOMEM; 145 144 goto err_free_rxqs_res_arr;
+1 -2
drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c
··· 44 44 u32 pg_idx; 45 45 int err; 46 46 47 - qpages->pages = kzalloc_objs(qpages->pages[0], qpages->num_pages, 48 - GFP_KERNEL); 47 + qpages->pages = kzalloc_objs(qpages->pages[0], qpages->num_pages); 49 48 if (!qpages->pages) 50 49 return -ENOMEM; 51 50
+1 -2
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
··· 419 419 420 420 for (idx = 0; idx < num_rq; idx++) { 421 421 rqres = &rxqs_res[idx]; 422 - rqres->rx_info = kzalloc_objs(*rqres->rx_info, rq_depth, 423 - GFP_KERNEL); 422 + rqres->rx_info = kzalloc_objs(*rqres->rx_info, rq_depth); 424 423 if (!rqres->rx_info) 425 424 goto err_free_rqres; 426 425
+1 -2
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
··· 681 681 for (idx = 0; idx < num_sq; idx++) { 682 682 tqres = &txqs_res[idx]; 683 683 684 - tqres->tx_info = kzalloc_objs(*tqres->tx_info, sq_depth, 685 - GFP_KERNEL); 684 + tqres->tx_info = kzalloc_objs(*tqres->tx_info, sq_depth); 686 685 if (!tqres->tx_info) 687 686 goto err_free_tqres; 688 687
+5 -10
drivers/net/ethernet/ibm/ibmvnic.c
··· 1094 1094 /* Allocate/populate the pools. */ 1095 1095 release_rx_pools(adapter); 1096 1096 1097 - adapter->rx_pool = kzalloc_objs(struct ibmvnic_rx_pool, num_pools, 1098 - GFP_KERNEL); 1097 + adapter->rx_pool = kzalloc_objs(struct ibmvnic_rx_pool, num_pools); 1099 1098 if (!adapter->rx_pool) { 1100 1099 dev_err(dev, "Failed to allocate rx pools\n"); 1101 1100 return -ENOMEM; ··· 1237 1238 { 1238 1239 int i; 1239 1240 1240 - tx_pool->tx_buff = kzalloc_objs(struct ibmvnic_tx_buff, pool_size, 1241 - GFP_KERNEL); 1241 + tx_pool->tx_buff = kzalloc_objs(struct ibmvnic_tx_buff, pool_size); 1242 1242 if (!tx_pool->tx_buff) 1243 1243 return -ENOMEM; 1244 1244 ··· 1333 1335 pool_size = adapter->req_tx_entries_per_subcrq; 1334 1336 num_pools = adapter->num_active_tx_scrqs; 1335 1337 1336 - adapter->tx_pool = kzalloc_objs(struct ibmvnic_tx_pool, num_pools, 1337 - GFP_KERNEL); 1338 + adapter->tx_pool = kzalloc_objs(struct ibmvnic_tx_pool, num_pools); 1338 1339 if (!adapter->tx_pool) 1339 1340 return -ENOMEM; 1340 1341 1341 - adapter->tso_pool = kzalloc_objs(struct ibmvnic_tx_pool, num_pools, 1342 - GFP_KERNEL); 1342 + adapter->tso_pool = kzalloc_objs(struct ibmvnic_tx_pool, num_pools); 1343 1343 /* To simplify release_tx_pools() ensure that ->tx_pool and 1344 1344 * ->tso_pool are either both NULL or both non-NULL. 1345 1345 */ ··· 1461 1465 { 1462 1466 int i; 1463 1467 1464 - adapter->napi = kzalloc_objs(struct napi_struct, adapter->req_rx_queues, 1465 - GFP_KERNEL); 1468 + adapter->napi = kzalloc_objs(struct napi_struct, adapter->req_rx_queues); 1466 1469 if (!adapter->napi) 1467 1470 return -ENOMEM; 1468 1471
+4 -8
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
··· 582 582 rx_old = adapter->rx_ring; 583 583 584 584 err = -ENOMEM; 585 - txdr = kzalloc_objs(struct e1000_tx_ring, adapter->num_tx_queues, 586 - GFP_KERNEL); 585 + txdr = kzalloc_objs(struct e1000_tx_ring, adapter->num_tx_queues); 587 586 if (!txdr) 588 587 goto err_alloc_tx; 589 588 590 - rxdr = kzalloc_objs(struct e1000_rx_ring, adapter->num_rx_queues, 591 - GFP_KERNEL); 589 + rxdr = kzalloc_objs(struct e1000_rx_ring, adapter->num_rx_queues); 592 590 if (!rxdr) 593 591 goto err_alloc_rx; 594 592 ··· 982 984 if (!txdr->count) 983 985 txdr->count = E1000_DEFAULT_TXD; 984 986 985 - txdr->buffer_info = kzalloc_objs(struct e1000_tx_buffer, txdr->count, 986 - GFP_KERNEL); 987 + txdr->buffer_info = kzalloc_objs(struct e1000_tx_buffer, txdr->count); 987 988 if (!txdr->buffer_info) { 988 989 ret_val = 1; 989 990 goto err_nomem; ··· 1040 1043 if (!rxdr->count) 1041 1044 rxdr->count = E1000_DEFAULT_RXD; 1042 1045 1043 - rxdr->buffer_info = kzalloc_objs(struct e1000_rx_buffer, rxdr->count, 1044 - GFP_KERNEL); 1046 + rxdr->buffer_info = kzalloc_objs(struct e1000_rx_buffer, rxdr->count); 1045 1047 if (!rxdr->buffer_info) { 1046 1048 ret_val = 5; 1047 1049 goto err_nomem;
+2 -4
drivers/net/ethernet/intel/e1000e/ethtool.c
··· 1173 1173 if (!tx_ring->count) 1174 1174 tx_ring->count = E1000_DEFAULT_TXD; 1175 1175 1176 - tx_ring->buffer_info = kzalloc_objs(struct e1000_buffer, tx_ring->count, 1177 - GFP_KERNEL); 1176 + tx_ring->buffer_info = kzalloc_objs(struct e1000_buffer, tx_ring->count); 1178 1177 if (!tx_ring->buffer_info) { 1179 1178 ret_val = 1; 1180 1179 goto err_nomem; ··· 1233 1234 if (!rx_ring->count) 1234 1235 rx_ring->count = E1000_DEFAULT_RXD; 1235 1236 1236 - rx_ring->buffer_info = kzalloc_objs(struct e1000_buffer, rx_ring->count, 1237 - GFP_KERNEL); 1237 + rx_ring->buffer_info = kzalloc_objs(struct e1000_buffer, rx_ring->count); 1238 1238 if (!rx_ring->buffer_info) { 1239 1239 ret_val = 5; 1240 1240 goto err_nomem;
+1 -2
drivers/net/ethernet/intel/fm10k/fm10k_main.c
··· 1825 1825 v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); 1826 1826 1827 1827 /* A failure in MSI-X entry allocation is fatal. */ 1828 - interface->msix_entries = kzalloc_objs(struct msix_entry, v_budget, 1829 - GFP_KERNEL); 1828 + interface->msix_entries = kzalloc_objs(struct msix_entry, v_budget); 1830 1829 if (!interface->msix_entries) 1831 1830 return -ENOMEM; 1832 1831
+1 -2
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
··· 983 983 int i, ret; 984 984 u16 switch_id; 985 985 986 - bw_data = kzalloc_obj(struct i40e_aqc_query_port_ets_config_resp, 987 - GFP_KERNEL); 986 + bw_data = kzalloc_obj(struct i40e_aqc_query_port_ets_config_resp); 988 987 if (!bw_data) { 989 988 ret = -ENOMEM; 990 989 goto command_write_done;
+1 -2
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
··· 2120 2120 netdev_info(netdev, 2121 2121 "Changing Tx descriptor count from %d to %d.\n", 2122 2122 vsi->tx_rings[0]->count, new_tx_count); 2123 - tx_rings = kzalloc_objs(struct i40e_ring, tx_alloc_queue_pairs, 2124 - GFP_KERNEL); 2123 + tx_rings = kzalloc_objs(struct i40e_ring, tx_alloc_queue_pairs); 2125 2124 if (!tx_rings) { 2126 2125 err = -ENOMEM; 2127 2126 goto done;
+3 -6
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 1585 1585 (int)(num_online_cpus())); 1586 1586 1587 1587 1588 - adapter->tx_rings = kzalloc_objs(struct iavf_ring, num_active_queues, 1589 - GFP_KERNEL); 1588 + adapter->tx_rings = kzalloc_objs(struct iavf_ring, num_active_queues); 1590 1589 if (!adapter->tx_rings) 1591 1590 goto err_out; 1592 - adapter->rx_rings = kzalloc_objs(struct iavf_ring, num_active_queues, 1593 - GFP_KERNEL); 1591 + adapter->rx_rings = kzalloc_objs(struct iavf_ring, num_active_queues); 1594 1592 if (!adapter->rx_rings) 1595 1593 goto err_out; 1596 1594 ··· 1651 1653 v_budget = min_t(int, pairs + NONQ_VECS, 1652 1654 (int)adapter->vf_res->max_vectors); 1653 1655 1654 - adapter->msix_entries = kzalloc_objs(struct msix_entry, v_budget, 1655 - GFP_KERNEL); 1656 + adapter->msix_entries = kzalloc_objs(struct msix_entry, v_budget); 1656 1657 if (!adapter->msix_entries) { 1657 1658 err = -ENOMEM; 1658 1659 goto out;
+2 -4
drivers/net/ethernet/intel/ice/ice_arfs.c
··· 538 538 if (!vsi->arfs_fltr_cntrs) 539 539 return -ENOMEM; 540 540 541 - vsi->arfs_last_fltr_id = kzalloc_obj(*vsi->arfs_last_fltr_id, 542 - GFP_KERNEL); 541 + vsi->arfs_last_fltr_id = kzalloc_obj(*vsi->arfs_last_fltr_id); 543 542 if (!vsi->arfs_last_fltr_id) { 544 543 kfree(vsi->arfs_fltr_cntrs); 545 544 vsi->arfs_fltr_cntrs = NULL; ··· 560 561 if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi)) 561 562 return; 562 563 563 - arfs_fltr_list = kzalloc_objs(*arfs_fltr_list, ICE_MAX_ARFS_LIST, 564 - GFP_KERNEL); 564 + arfs_fltr_list = kzalloc_objs(*arfs_fltr_list, ICE_MAX_ARFS_LIST); 565 565 if (!arfs_fltr_list) 566 566 return; 567 567
+1 -2
drivers/net/ethernet/intel/ice/ice_common.c
··· 1103 1103 1104 1104 /* Get MAC information */ 1105 1105 /* A single port can report up to two (LAN and WoL) addresses */ 1106 - mac_buf = kzalloc_objs(struct ice_aqc_manage_mac_read_resp, 2, 1107 - GFP_KERNEL); 1106 + mac_buf = kzalloc_objs(struct ice_aqc_manage_mac_read_resp, 2); 1108 1107 if (!mac_buf) { 1109 1108 status = -ENOMEM; 1110 1109 goto err_unroll_fltr_mgmt_struct;
+3 -6
drivers/net/ethernet/intel/ice/ice_lib.c
··· 538 538 return -ENOMEM; 539 539 540 540 vsi_stat->tx_ring_stats = 541 - kzalloc_objs(*vsi_stat->tx_ring_stats, vsi->alloc_txq, 542 - GFP_KERNEL); 541 + kzalloc_objs(*vsi_stat->tx_ring_stats, vsi->alloc_txq); 543 542 if (!vsi_stat->tx_ring_stats) 544 543 goto err_alloc_tx; 545 544 546 545 vsi_stat->rx_ring_stats = 547 - kzalloc_objs(*vsi_stat->rx_ring_stats, vsi->alloc_rxq, 548 - GFP_KERNEL); 546 + kzalloc_objs(*vsi_stat->rx_ring_stats, vsi->alloc_rxq); 549 547 if (!vsi_stat->rx_ring_stats) 550 548 goto err_alloc_rx; 551 549 ··· 3102 3104 if (ret) 3103 3105 goto unlock; 3104 3106 3105 - coalesce = kzalloc_objs(struct ice_coalesce_stored, vsi->num_q_vectors, 3106 - GFP_KERNEL); 3107 + coalesce = kzalloc_objs(struct ice_coalesce_stored, vsi->num_q_vectors); 3107 3108 if (!coalesce) { 3108 3109 ret = -ENOMEM; 3109 3110 goto decfg;
+1 -2
drivers/net/ethernet/intel/idpf/idpf_controlq.c
··· 159 159 idpf_ctlq_init_rxq_bufs(cq); 160 160 } else { 161 161 /* Allocate the array of msg pointers for TX queues */ 162 - cq->bi.tx_msg = kzalloc_objs(struct idpf_ctlq_msg *, qinfo->len, 163 - GFP_KERNEL); 162 + cq->bi.tx_msg = kzalloc_objs(struct idpf_ctlq_msg *, qinfo->len); 164 163 if (!cq->bi.tx_msg) { 165 164 err = -ENOMEM; 166 165 goto init_dealloc_q_mem;
+2 -4
drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c
··· 40 40 /* We'll be allocating the buffer info memory first, then we can 41 41 * allocate the mapped buffers for the event processing 42 42 */ 43 - cq->bi.rx_buff = kzalloc_objs(struct idpf_dma_mem *, cq->ring_size, 44 - GFP_KERNEL); 43 + cq->bi.rx_buff = kzalloc_objs(struct idpf_dma_mem *, cq->ring_size); 45 44 if (!cq->bi.rx_buff) 46 45 return -ENOMEM; 47 46 ··· 49 50 struct idpf_dma_mem *bi; 50 51 int num = 1; /* number of idpf_dma_mem to be allocated */ 51 52 52 - cq->bi.rx_buff[i] = kzalloc_objs(struct idpf_dma_mem, num, 53 - GFP_KERNEL); 53 + cq->bi.rx_buff[i] = kzalloc_objs(struct idpf_dma_mem, num); 54 54 if (!cq->bi.rx_buff[i]) 55 55 goto unwind_alloc_cq_bufs; 56 56
+1 -2
drivers/net/ethernet/intel/idpf/idpf_lib.c
··· 369 369 } 370 370 371 371 num_lan_vecs = actual_vecs - num_rdma_vecs; 372 - adapter->msix_entries = kzalloc_objs(struct msix_entry, num_lan_vecs, 373 - GFP_KERNEL); 372 + adapter->msix_entries = kzalloc_objs(struct msix_entry, num_lan_vecs); 374 373 if (!adapter->msix_entries) { 375 374 err = -ENOMEM; 376 375 goto free_rdma_msix;
+8 -16
drivers/net/ethernet/intel/idpf/idpf_txrx.c
··· 183 183 tx_q->buf_pool_size = U16_MAX; 184 184 else 185 185 tx_q->buf_pool_size = tx_q->desc_count; 186 - tx_q->tx_buf = kzalloc_objs(*tx_q->tx_buf, tx_q->buf_pool_size, 187 - GFP_KERNEL); 186 + tx_q->tx_buf = kzalloc_objs(*tx_q->tx_buf, tx_q->buf_pool_size); 188 187 if (!tx_q->tx_buf) 189 188 return -ENOMEM; 190 189 ··· 1709 1710 { 1710 1711 bool split, flow_sch_en; 1711 1712 1712 - rsrc->txq_grps = kzalloc_objs(*rsrc->txq_grps, rsrc->num_txq_grp, 1713 - GFP_KERNEL); 1713 + rsrc->txq_grps = kzalloc_objs(*rsrc->txq_grps, rsrc->num_txq_grp); 1714 1714 if (!rsrc->txq_grps) 1715 1715 return -ENOMEM; 1716 1716 ··· 1725 1727 tx_qgrp->num_txq = num_txq; 1726 1728 1727 1729 for (unsigned int j = 0; j < tx_qgrp->num_txq; j++) { 1728 - tx_qgrp->txqs[j] = kzalloc_obj(*tx_qgrp->txqs[j], 1729 - GFP_KERNEL); 1730 + tx_qgrp->txqs[j] = kzalloc_obj(*tx_qgrp->txqs[j]); 1730 1731 if (!tx_qgrp->txqs[j]) 1731 1732 goto err_alloc; 1732 1733 } ··· 1802 1805 bool hs, rsc; 1803 1806 int err = 0; 1804 1807 1805 - rsrc->rxq_grps = kzalloc_objs(struct idpf_rxq_group, rsrc->num_rxq_grp, 1806 - GFP_KERNEL); 1808 + rsrc->rxq_grps = kzalloc_objs(struct idpf_rxq_group, rsrc->num_rxq_grp); 1807 1809 if (!rsrc->rxq_grps) 1808 1810 return -ENOMEM; 1809 1811 ··· 1816 1820 if (!idpf_is_queue_model_split(rsrc->rxq_model)) { 1817 1821 rx_qgrp->singleq.num_rxq = num_rxq; 1818 1822 for (unsigned int j = 0; j < num_rxq; j++) { 1819 - rx_qgrp->singleq.rxqs[j] = kzalloc_obj(*rx_qgrp->singleq.rxqs[j], 1820 - GFP_KERNEL); 1823 + rx_qgrp->singleq.rxqs[j] = kzalloc_obj(*rx_qgrp->singleq.rxqs[j]); 1821 1824 if (!rx_qgrp->singleq.rxqs[j]) { 1822 1825 err = -ENOMEM; 1823 1826 goto err_alloc; ··· 4587 4592 q_vector->rx_intr_mode = q_coal->rx_intr_mode; 4588 4593 q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; 4589 4594 4590 - q_vector->tx = kzalloc_objs(*q_vector->tx, txqs_per_vector, 4591 - GFP_KERNEL); 4595 + q_vector->tx = kzalloc_objs(*q_vector->tx, txqs_per_vector); 4592 4596 if (!q_vector->tx) 4593 4597 goto error; 4594 4598 4595 - q_vector->rx = kzalloc_objs(*q_vector->rx, rxqs_per_vector, 4596 - GFP_KERNEL); 4599 + q_vector->rx = kzalloc_objs(*q_vector->rx, rxqs_per_vector); 4597 4600 if (!q_vector->rx) 4598 4601 goto error; 4599 4602 4600 4603 if (!idpf_is_queue_model_split(rsrc->rxq_model)) 4601 4604 continue; 4602 4605 4603 - q_vector->bufq = kzalloc_objs(*q_vector->bufq, bufqs_per_vector, 4604 - GFP_KERNEL); 4606 + q_vector->bufq = kzalloc_objs(*q_vector->bufq, bufqs_per_vector); 4605 4607 if (!q_vector->bufq) 4606 4608 goto error; 4607 4609
+4 -8
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
··· 1290 1290 1291 1291 kfree(q_info->queue_chunks); 1292 1292 1293 - q_info->queue_chunks = kzalloc_objs(*q_info->queue_chunks, num_chunks, 1294 - GFP_KERNEL); 1293 + q_info->queue_chunks = kzalloc_objs(*q_info->queue_chunks, num_chunks); 1295 1294 if (!q_info->queue_chunks) { 1296 1295 q_info->num_chunks = 0; 1297 1296 return -ENOMEM; ··· 3196 3197 u16 next_ptype_id = 0; 3197 3198 ssize_t reply_sz; 3198 3199 3199 - singleq_pt_lkup = kzalloc_objs(*singleq_pt_lkup, IDPF_RX_MAX_BASE_PTYPE, 3200 - GFP_KERNEL); 3200 + singleq_pt_lkup = kzalloc_objs(*singleq_pt_lkup, IDPF_RX_MAX_BASE_PTYPE); 3201 3201 if (!singleq_pt_lkup) 3202 3202 return -ENOMEM; 3203 3203 ··· 3482 3484 int err = 0; 3483 3485 3484 3486 if (!adapter->vcxn_mngr) { 3485 - adapter->vcxn_mngr = kzalloc_obj(*adapter->vcxn_mngr, 3486 - GFP_KERNEL); 3487 + adapter->vcxn_mngr = kzalloc_obj(*adapter->vcxn_mngr); 3487 3488 if (!adapter->vcxn_mngr) { 3488 3489 err = -ENOMEM; 3489 3490 goto init_failed; ··· 3554 3557 pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); 3555 3558 num_max_vports = idpf_get_max_vports(adapter); 3556 3559 adapter->max_vports = num_max_vports; 3557 - adapter->vports = kzalloc_objs(*adapter->vports, num_max_vports, 3558 - GFP_KERNEL); 3560 + adapter->vports = kzalloc_objs(*adapter->vports, num_max_vports); 3559 3561 if (!adapter->vports) 3560 3562 return -ENOMEM; 3561 3563
+1 -2
drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
··· 39 39 u32 temp_offset; 40 40 int reply_sz; 41 41 42 - recv_ptp_caps_msg = kzalloc_obj(struct virtchnl2_ptp_get_caps, 43 - GFP_KERNEL); 42 + recv_ptp_caps_msg = kzalloc_obj(struct virtchnl2_ptp_get_caps); 44 43 if (!recv_ptp_caps_msg) 45 44 return -ENOMEM; 46 45
+1 -2
drivers/net/ethernet/intel/igc/igc_main.c
··· 4633 4633 /* add 1 vector for link status interrupts */ 4634 4634 numvecs++; 4635 4635 4636 - adapter->msix_entries = kzalloc_objs(struct msix_entry, numvecs, 4637 - GFP_KERNEL); 4636 + adapter->msix_entries = kzalloc_objs(struct msix_entry, numvecs); 4638 4637 4639 4638 if (!adapter->msix_entries) 4640 4639 return;
+2 -4
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
··· 516 516 return -EINVAL; 517 517 518 518 if (!adapter->ixgbe_ieee_ets) { 519 - adapter->ixgbe_ieee_ets = kmalloc_obj(struct ieee_ets, 520 - GFP_KERNEL); 519 + adapter->ixgbe_ieee_ets = kmalloc_obj(struct ieee_ets); 521 520 if (!adapter->ixgbe_ieee_ets) 522 521 return -ENOMEM; 523 522 ··· 592 593 return -EINVAL; 593 594 594 595 if (!adapter->ixgbe_ieee_pfc) { 595 - adapter->ixgbe_ieee_pfc = kmalloc_obj(struct ieee_pfc, 596 - GFP_KERNEL); 596 + adapter->ixgbe_ieee_pfc = kmalloc_obj(struct ieee_pfc); 597 597 if (!adapter->ixgbe_ieee_pfc) 598 598 return -ENOMEM; 599 599 }
+1 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
··· 768 768 */ 769 769 vector_threshold = MIN_MSIX_COUNT; 770 770 771 - adapter->msix_entries = kzalloc_objs(struct msix_entry, vectors, 772 - GFP_KERNEL); 771 + adapter->msix_entries = kzalloc_objs(struct msix_entry, vectors); 773 772 if (!adapter->msix_entries) 774 773 return -ENOMEM; 775 774
+1 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6895 6895 #endif /* IXGBE_FCOE */ 6896 6896 6897 6897 /* initialize static ixgbe jump table entries */ 6898 - adapter->jump_tables[0] = kzalloc_obj(*adapter->jump_tables[0], 6899 - GFP_KERNEL); 6898 + adapter->jump_tables[0] = kzalloc_obj(*adapter->jump_tables[0]); 6900 6899 if (!adapter->jump_tables[0]) 6901 6900 return -ENOMEM; 6902 6901 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
+1 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
··· 64 64 IXGBE_FLAG_VMDQ_ENABLED; 65 65 66 66 /* Allocate memory for per VF control structures */ 67 - adapter->vfinfo = kzalloc_objs(struct vf_data_storage, num_vfs, 68 - GFP_KERNEL); 67 + adapter->vfinfo = kzalloc_objs(struct vf_data_storage, num_vfs); 69 68 if (!adapter->vfinfo) 70 69 return -ENOMEM; 71 70
+1 -2
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 2716 2716 v_budget = min_t(int, v_budget, num_online_cpus()); 2717 2717 v_budget += NON_Q_VECTORS; 2718 2718 2719 - adapter->msix_entries = kzalloc_objs(struct msix_entry, v_budget, 2720 - GFP_KERNEL); 2719 + adapter->msix_entries = kzalloc_objs(struct msix_entry, v_budget); 2721 2720 if (!adapter->msix_entries) 2722 2721 return -ENOMEM; 2723 2722
+1 -2
drivers/net/ethernet/intel/libie/fwlog.c
··· 838 838 /* allocate space for this first because if it fails then we don't 839 839 * need to unwind 840 840 */ 841 - fw_modules = kzalloc_objs(*fw_modules, LIBIE_NR_FW_LOG_MODULES, 842 - GFP_KERNEL); 841 + fw_modules = kzalloc_objs(*fw_modules, LIBIE_NR_FW_LOG_MODULES); 843 842 if (!fw_modules) 844 843 return; 845 844
+1 -2
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 3151 3151 for (thread = 0; thread < port->priv->nthreads; thread++) { 3152 3152 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3153 3153 txq_pcpu->size = txq->size; 3154 - txq_pcpu->buffs = kmalloc_objs(*txq_pcpu->buffs, txq_pcpu->size, 3155 - GFP_KERNEL); 3154 + txq_pcpu->buffs = kmalloc_objs(*txq_pcpu->buffs, txq_pcpu->size); 3156 3155 if (!txq_pcpu->buffs) 3157 3156 return -ENOMEM; 3158 3157
+1 -2
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
··· 115 115 116 116 /* Generic interrupts apart from input/output queues */ 117 117 num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf); 118 - oct->msix_entries = kzalloc_objs(struct msix_entry, num_msix, 119 - GFP_KERNEL); 118 + oct->msix_entries = kzalloc_objs(struct msix_entry, num_msix); 120 119 if (!oct->msix_entries) 121 120 goto msix_alloc_err; 122 121
+1 -2
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
··· 113 113 /* Generic interrupts apart from input/output queues */ 114 114 //num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf); 115 115 num_msix = oct->num_oqs; 116 - oct->msix_entries = kzalloc_objs(struct msix_entry, num_msix, 117 - GFP_KERNEL); 116 + oct->msix_entries = kzalloc_objs(struct msix_entry, num_msix); 118 117 if (!oct->msix_entries) 119 118 goto msix_alloc_err; 120 119
+2 -4
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
··· 1945 1945 /* CQ size of SQ */ 1946 1946 qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K); 1947 1947 1948 - qset->cq = kzalloc_objs(struct otx2_cq_queue, pf->qset.cq_cnt, 1949 - GFP_KERNEL); 1948 + qset->cq = kzalloc_objs(struct otx2_cq_queue, pf->qset.cq_cnt); 1950 1949 if (!qset->cq) 1951 1950 goto err_free_mem; 1952 1951 ··· 1954 1955 if (!qset->sq) 1955 1956 goto err_free_mem; 1956 1957 1957 - qset->rq = kzalloc_objs(struct otx2_rcv_queue, pf->hw.rx_queues, 1958 - GFP_KERNEL); 1958 + qset->rq = kzalloc_objs(struct otx2_rcv_queue, pf->hw.rx_queues); 1959 1959 if (!qset->rq) 1960 1960 goto err_free_mem; 1961 1961
+1 -2
drivers/net/ethernet/marvell/prestera/prestera_counter.c
··· 157 157 if (err) 158 158 goto err_block; 159 159 160 - block->stats = kzalloc_objs(*block->stats, block->num_counters, 161 - GFP_KERNEL); 160 + block->stats = kzalloc_objs(*block->stats, block->num_counters); 162 161 if (!block->stats) { 163 162 err = -ENOMEM; 164 163 goto err_stats;
+2 -4
drivers/net/ethernet/marvell/sky2.c
··· 1601 1601 if (!sky2->tx_le) 1602 1602 goto nomem; 1603 1603 1604 - sky2->tx_ring = kzalloc_objs(struct tx_ring_info, sky2->tx_ring_size, 1605 - GFP_KERNEL); 1604 + sky2->tx_ring = kzalloc_objs(struct tx_ring_info, sky2->tx_ring_size); 1606 1605 if (!sky2->tx_ring) 1607 1606 goto nomem; 1608 1607 ··· 1610 1611 if (!sky2->rx_le) 1611 1612 goto nomem; 1612 1613 1613 - sky2->rx_ring = kzalloc_objs(struct rx_ring_info, sky2->rx_pending, 1614 - GFP_KERNEL); 1614 + sky2->rx_ring = kzalloc_objs(struct rx_ring_info, sky2->rx_pending); 1615 1615 if (!sky2->rx_ring) 1616 1616 goto nomem; 1617 1617
+1 -2
drivers/net/ethernet/mellanox/mlx4/alloc.c
··· 594 594 buf->nbufs = DIV_ROUND_UP(size, PAGE_SIZE); 595 595 buf->npages = buf->nbufs; 596 596 buf->page_shift = PAGE_SHIFT; 597 - buf->page_list = kzalloc_objs(*buf->page_list, buf->nbufs, 598 - GFP_KERNEL); 597 + buf->page_list = kzalloc_objs(*buf->page_list, buf->nbufs); 599 598 if (!buf->page_list) 600 599 return -ENOMEM; 601 600
+3 -6
drivers/net/ethernet/mellanox/mlx4/cmd.c
··· 2368 2368 struct mlx4_vf_admin_state *vf_admin; 2369 2369 2370 2370 priv->mfunc.master.slave_state = 2371 - kzalloc_objs(struct mlx4_slave_state, dev->num_slaves, 2372 - GFP_KERNEL); 2371 + kzalloc_objs(struct mlx4_slave_state, dev->num_slaves); 2373 2372 if (!priv->mfunc.master.slave_state) 2374 2373 goto err_comm; 2375 2374 ··· 2379 2380 goto err_comm_admin; 2380 2381 2381 2382 priv->mfunc.master.vf_oper = 2382 - kzalloc_objs(struct mlx4_vf_oper_state, dev->num_slaves, 2383 - GFP_KERNEL); 2383 + kzalloc_objs(struct mlx4_vf_oper_state, dev->num_slaves); 2384 2384 if (!priv->mfunc.master.vf_oper) 2385 2385 goto err_comm_oper; 2386 2386 ··· 2403 2405 struct mlx4_vport_state *oper_vport; 2404 2406 2405 2407 s_state->vlan_filter[port] = 2406 - kzalloc_obj(struct mlx4_vlan_fltr, 2407 - GFP_KERNEL); 2408 + kzalloc_obj(struct mlx4_vlan_fltr); 2408 2409 if (!s_state->vlan_filter[port]) { 2409 2410 if (--port) 2410 2411 kfree(s_state->vlan_filter[port]);
+2 -4
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 2245 2245 if (!dst->tx_ring[t]) 2246 2246 goto err_free_tx; 2247 2247 2248 - dst->tx_cq[t] = kzalloc_objs(struct mlx4_en_cq *, MAX_TX_RINGS, 2249 - GFP_KERNEL); 2248 + dst->tx_cq[t] = kzalloc_objs(struct mlx4_en_cq *, MAX_TX_RINGS); 2250 2249 if (!dst->tx_cq[t]) { 2251 2250 kfree(dst->tx_ring[t]); 2252 2251 goto err_free_tx; ··· 3220 3221 err = -ENOMEM; 3221 3222 goto out; 3222 3223 } 3223 - priv->tx_cq[t] = kzalloc_objs(struct mlx4_en_cq *, MAX_TX_RINGS, 3224 - GFP_KERNEL); 3224 + priv->tx_cq[t] = kzalloc_objs(struct mlx4_en_cq *, MAX_TX_RINGS); 3225 3225 if (!priv->tx_cq[t]) { 3226 3226 err = -ENOMEM; 3227 3227 goto out;
+2 -4
drivers/net/ethernet/mellanox/mlx4/main.c
··· 868 868 int i, err = 0; 869 869 870 870 func_cap = kzalloc_obj(*func_cap); 871 - caps->spec_qps = kzalloc_objs(*caps->spec_qps, caps->num_ports, 872 - GFP_KERNEL); 871 + caps->spec_qps = kzalloc_objs(*caps->spec_qps, caps->num_ports); 873 872 874 873 if (!func_cap || !caps->spec_qps) { 875 874 mlx4_err(dev, "Failed to allocate memory for special qps cap\n"); ··· 3278 3279 MLX4_MAX_NUM_VF); 3279 3280 3280 3281 if (reset_flow) { 3281 - dev->dev_vfs = kzalloc_objs(*dev->dev_vfs, total_vfs, 3282 - GFP_KERNEL); 3282 + dev->dev_vfs = kzalloc_objs(*dev->dev_vfs, total_vfs); 3283 3283 if (!dev->dev_vfs) 3284 3284 goto free_mem; 3285 3285 return dev_flags;
+4 -8
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 524 524 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 525 525 struct resource_allocator *res_alloc = 526 526 &priv->mfunc.master.res_tracker.res_alloc[i]; 527 - res_alloc->quota = kmalloc_objs(int, dev->persist->num_vfs + 1, 528 - GFP_KERNEL); 527 + res_alloc->quota = kmalloc_objs(int, dev->persist->num_vfs + 1); 529 528 res_alloc->guaranteed = kmalloc_objs(int, 530 529 dev->persist->num_vfs + 1, 531 530 GFP_KERNEL); ··· 535 536 GFP_KERNEL); 536 537 else 537 538 res_alloc->allocated = 538 - kzalloc_objs(int, dev->persist->num_vfs + 1, 539 - GFP_KERNEL); 539 + kzalloc_objs(int, dev->persist->num_vfs + 1); 540 540 /* Reduce the sink counter */ 541 541 if (i == RES_COUNTER) 542 542 res_alloc->res_free = dev->caps.max_counters - 1; ··· 1236 1238 1237 1239 memset(data, 0, sizeof(*data)); 1238 1240 1239 - counters_arr = kmalloc_objs(*counters_arr, dev->caps.max_counters, 1240 - GFP_KERNEL); 1241 + counters_arr = kmalloc_objs(*counters_arr, dev->caps.max_counters); 1241 1242 if (!counters_arr) 1242 1243 return -ENOMEM; 1243 1244 ··· 5182 5185 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", 5183 5186 slave); 5184 5187 5185 - counters_arr = kmalloc_objs(*counters_arr, dev->caps.max_counters, 5186 - GFP_KERNEL); 5188 + counters_arr = kmalloc_objs(*counters_arr, dev->caps.max_counters); 5187 5189 if (!counters_arr) 5188 5190 return; 5189 5191
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
··· 277 277 for (i = 0; i < chs->num; i++) { 278 278 struct mlx5e_txqsq **sqs; 279 279 280 - sqs = kvzalloc_objs(struct mlx5e_txqsq *, qos_sqs_size, 281 - GFP_KERNEL); 280 + sqs = kvzalloc_objs(struct mlx5e_txqsq *, qos_sqs_size); 282 281 if (!sqs) 283 282 goto err_free; 284 283
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
··· 23 23 static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk) 24 24 { 25 25 if (!xsk->pools) { 26 - xsk->pools = kzalloc_objs(*xsk->pools, MLX5E_MAX_NUM_CHANNELS, 27 - GFP_KERNEL); 26 + xsk->pools = kzalloc_objs(*xsk->pools, MLX5E_MAX_NUM_CHANNELS); 28 27 if (unlikely(!xsk->pools)) 29 28 return -ENOMEM; 30 29 }
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 6258 6258 if (!priv->channel_stats) 6259 6259 goto err_free_tx_rates; 6260 6260 6261 - priv->fec_ranges = kzalloc_objs(*priv->fec_ranges, ETHTOOL_FEC_HIST_MAX, 6262 - GFP_KERNEL); 6261 + priv->fec_ranges = kzalloc_objs(*priv->fec_ranges, ETHTOOL_FEC_HIST_MAX); 6263 6262 if (!priv->fec_ranges) 6264 6263 goto err_free_channel_stats; 6265 6264
+2 -4
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
··· 531 531 if (err) 532 532 goto out; 533 533 534 - conn->qp.rq.bufs = kvzalloc_objs(conn->qp.rq.bufs[0], conn->qp.rq.size, 535 - GFP_KERNEL); 534 + conn->qp.rq.bufs = kvzalloc_objs(conn->qp.rq.bufs[0], conn->qp.rq.size); 536 535 if (!conn->qp.rq.bufs) { 537 536 err = -ENOMEM; 538 537 goto err_wq; 539 538 } 540 539 541 - conn->qp.sq.bufs = kvzalloc_objs(conn->qp.sq.bufs[0], conn->qp.sq.size, 542 - GFP_KERNEL); 540 + conn->qp.sq.bufs = kvzalloc_objs(conn->qp.sq.bufs[0], conn->qp.sq.size); 543 541 if (!conn->qp.sq.bufs) { 544 542 err = -ENOMEM; 545 543 goto err_rq_bufs;
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/rl.c
··· 247 247 return 0; 248 248 } 249 249 250 - table->rl_entry = kzalloc_objs(struct mlx5_rl_entry, table->max_size, 251 - GFP_KERNEL); 250 + table->rl_entry = kzalloc_objs(struct mlx5_rl_entry, table->max_size); 252 251 if (!table->rl_entry) 253 252 return -ENOMEM; 254 253
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
··· 2629 2629 ; 2630 2630 2631 2631 at->num_actions = num_actions - 1; 2632 - at->action_type_arr = kzalloc_objs(*action_type, num_actions, 2633 - GFP_KERNEL); 2632 + at->action_type_arr = kzalloc_objs(*action_type, num_actions); 2634 2633 if (!at->action_type_arr) 2635 2634 goto free_at; 2636 2635
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
··· 236 236 struct mlx5hws_matcher_attr attr = {0}; 237 237 int i; 238 238 239 - bwc_matcher->rules = kzalloc_objs(*bwc_matcher->rules, bwc_queues, 240 - GFP_KERNEL); 239 + bwc_matcher->rules = kzalloc_objs(*bwc_matcher->rules, bwc_queues); 241 240 if (!bwc_matcher->rules) 242 241 goto err; 243 242
+3 -6
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
··· 759 759 int num_actions = 0; 760 760 int err; 761 761 762 - *ractions = kzalloc_objs(**ractions, MLX5_FLOW_CONTEXT_ACTION_MAX, 763 - GFP_KERNEL); 762 + *ractions = kzalloc_objs(**ractions, MLX5_FLOW_CONTEXT_ACTION_MAX); 764 763 if (!*ractions) { 765 764 err = -ENOMEM; 766 765 goto out_err; 767 766 } 768 767 769 - fs_actions = kzalloc_objs(*fs_actions, MLX5_FLOW_CONTEXT_ACTION_MAX, 770 - GFP_KERNEL); 768 + fs_actions = kzalloc_objs(*fs_actions, MLX5_FLOW_CONTEXT_ACTION_MAX); 771 769 if (!fs_actions) { 772 770 err = -ENOMEM; 773 771 goto free_actions_alloc; 774 772 } 775 773 776 - dest_actions = kzalloc_objs(*dest_actions, MLX5_FLOW_CONTEXT_ACTION_MAX, 777 - GFP_KERNEL); 774 + dest_actions = kzalloc_objs(*dest_actions, MLX5_FLOW_CONTEXT_ACTION_MAX); 778 775 if (!dest_actions) { 779 776 err = -ENOMEM; 780 777 goto free_fs_actions_alloc;
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
··· 1090 1090 1091 1091 matcher->size_of_at_array = 1092 1092 num_of_at + matcher->attr.max_num_of_at_attach; 1093 - matcher->at = kvzalloc_objs(*matcher->at, matcher->size_of_at_array, 1094 - GFP_KERNEL); 1093 + matcher->at = kvzalloc_objs(*matcher->at, matcher->size_of_at_array); 1095 1094 if (!matcher->at) { 1096 1095 mlx5hws_err(ctx, "Failed to allocate action template array\n"); 1097 1096 ret = -ENOMEM;
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
··· 1132 1132 if (err) 1133 1133 return err; 1134 1134 1135 - ctx->send_queue = kzalloc_objs(*ctx->send_queue, ctx->queues, 1136 - GFP_KERNEL); 1135 + ctx->send_queue = kzalloc_objs(*ctx->send_queue, ctx->queues); 1137 1136 if (!ctx->send_queue) { 1138 1137 err = -ENOMEM; 1139 1138 goto free_bwc_locks;
+2 -4
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c
··· 16 16 17 17 INIT_LIST_HEAD(&buddy->list_node); 18 18 19 - buddy->bitmap = kzalloc_objs(*buddy->bitmap, buddy->max_order + 1, 20 - GFP_KERNEL); 21 - buddy->num_free = kzalloc_objs(*buddy->num_free, buddy->max_order + 1, 22 - GFP_KERNEL); 19 + buddy->bitmap = kzalloc_objs(*buddy->bitmap, buddy->max_order + 1); 20 + buddy->num_free = kzalloc_objs(*buddy->num_free, buddy->max_order + 1); 23 21 24 22 if (!buddy->bitmap || !buddy->num_free) 25 23 goto err_free_all;
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c
··· 227 227 int num_of_entries = 228 228 mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz); 229 229 230 - buddy->ste_arr = kvzalloc_objs(struct mlx5dr_ste, num_of_entries, 231 - GFP_KERNEL); 230 + buddy->ste_arr = kvzalloc_objs(struct mlx5dr_ste, num_of_entries); 232 231 if (!buddy->ste_arr) 233 232 return -ENOMEM; 234 233
+2 -4
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
··· 275 275 if (mlx5_fs_cmd_is_fw_term_table(ft)) 276 276 return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte); 277 277 278 - actions = kzalloc_objs(*actions, MLX5_FLOW_CONTEXT_ACTION_MAX, 279 - GFP_KERNEL); 278 + actions = kzalloc_objs(*actions, MLX5_FLOW_CONTEXT_ACTION_MAX); 280 279 if (!actions) { 281 280 err = -ENOMEM; 282 281 goto out_err; ··· 288 289 goto free_actions_alloc; 289 290 } 290 291 291 - term_actions = kzalloc_objs(*term_actions, MLX5_FLOW_CONTEXT_ACTION_MAX, 292 - GFP_KERNEL); 292 + term_actions = kzalloc_objs(*term_actions, MLX5_FLOW_CONTEXT_ACTION_MAX); 293 293 if (!term_actions) { 294 294 err = -ENOMEM; 295 295 goto free_fs_dr_actions_alloc;
+1 -2
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
··· 327 327 struct mlxsw_afk_key_info *key_info; 328 328 int err; 329 329 330 - key_info = kzalloc_flex(*key_info, blocks, mlxsw_afk->max_blocks, 331 - GFP_KERNEL); 330 + key_info = kzalloc_flex(*key_info, blocks, mlxsw_afk->max_blocks); 332 331 if (!key_info) 333 332 return ERR_PTR(-ENOMEM); 334 333 err = mlxsw_afk_picker(mlxsw_afk, key_info, elusage);
+1 -2
drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
··· 871 871 mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL, 872 872 &num_of_slots); 873 873 874 - mlxsw_hwmon = kzalloc_flex(*mlxsw_hwmon, line_cards, num_of_slots + 1, 875 - GFP_KERNEL); 874 + mlxsw_hwmon = kzalloc_flex(*mlxsw_hwmon, line_cards, num_of_slots + 1); 876 875 if (!mlxsw_hwmon) 877 876 return -ENOMEM; 878 877
+1 -2
drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
··· 642 642 mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL, 643 643 &num_of_slots); 644 644 645 - thermal = kzalloc_flex(*thermal, line_cards, num_of_slots + 1, 646 - GFP_KERNEL); 645 + thermal = kzalloc_flex(*thermal, line_cards, num_of_slots + 1); 647 646 if (!thermal) 648 647 return -ENOMEM; 649 648
+1 -2
drivers/net/ethernet/mellanox/mlxsw/pci.c
··· 1667 1667 int i; 1668 1668 int err; 1669 1669 1670 - mlxsw_pci->fw_area.items = kzalloc_objs(*mem_item, num_pages, 1671 - GFP_KERNEL); 1670 + mlxsw_pci->fw_area.items = kzalloc_objs(*mem_item, num_pages); 1672 1671 if (!mlxsw_pci->fw_area.items) 1673 1672 return -ENOMEM; 1674 1673 mlxsw_pci->fw_area.count = num_pages;
+2 -4
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 2489 2489 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2490 2490 return -EIO; 2491 2491 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2492 - trap = kzalloc_flex(*trap, policers_usage, BITS_TO_LONGS(max_policers), 2493 - GFP_KERNEL); 2492 + trap = kzalloc_flex(*trap, policers_usage, BITS_TO_LONGS(max_policers)); 2494 2493 if (!trap) 2495 2494 return -ENOMEM; 2496 2495 trap->max_policers = max_policers; ··· 2622 2623 if (err) 2623 2624 return err; 2624 2625 2625 - mlxsw_sp->lags = kzalloc_objs(struct mlxsw_sp_lag, mlxsw_sp->max_lag, 2626 - GFP_KERNEL); 2626 + mlxsw_sp->lags = kzalloc_objs(struct mlxsw_sp_lag, mlxsw_sp->max_lag); 2627 2627 if (!mlxsw_sp->lags) { 2628 2628 err = -ENOMEM; 2629 2629 goto err_kcalloc;
+1 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
··· 513 513 * is 2^ACL_MAX_BF_LOG 514 514 */ 515 515 bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG); 516 - bf = kzalloc_flex(*bf, refcnt, size_mul(bf_bank_size, num_erp_banks), 517 - GFP_KERNEL); 516 + bf = kzalloc_flex(*bf, refcnt, size_mul(bf_bank_size, num_erp_banks)); 518 517 if (!bf) 519 518 return ERR_PTR(-ENOMEM); 520 519
+1 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
··· 647 647 int i; 648 648 int err; 649 649 650 - mlxsw_sp->sb->ports = kzalloc_objs(struct mlxsw_sp_sb_port, max_ports, 651 - GFP_KERNEL); 650 + mlxsw_sp->sb->ports = kzalloc_objs(struct mlxsw_sp_sb_port, max_ports); 652 651 if (!mlxsw_sp->sb->ports) 653 652 return -ENOMEM; 654 653
+3 -6
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
··· 654 654 655 655 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 656 656 { 657 - mlxsw_sp_port->dcb.ets = kzalloc_obj(*mlxsw_sp_port->dcb.ets, 658 - GFP_KERNEL); 657 + mlxsw_sp_port->dcb.ets = kzalloc_obj(*mlxsw_sp_port->dcb.ets); 659 658 if (!mlxsw_sp_port->dcb.ets) 660 659 return -ENOMEM; 661 660 ··· 672 673 { 673 674 int i; 674 675 675 - mlxsw_sp_port->dcb.maxrate = kmalloc_obj(*mlxsw_sp_port->dcb.maxrate, 676 - GFP_KERNEL); 676 + mlxsw_sp_port->dcb.maxrate = kmalloc_obj(*mlxsw_sp_port->dcb.maxrate); 677 677 if (!mlxsw_sp_port->dcb.maxrate) 678 678 return -ENOMEM; 679 679 ··· 689 691 690 692 static int mlxsw_sp_port_pfc_init(struct mlxsw_sp_port *mlxsw_sp_port) 691 693 { 692 - mlxsw_sp_port->dcb.pfc = kzalloc_obj(*mlxsw_sp_port->dcb.pfc, 693 - GFP_KERNEL); 694 + mlxsw_sp_port->dcb.pfc = kzalloc_obj(*mlxsw_sp_port->dcb.pfc); 694 695 if (!mlxsw_sp_port->dcb.pfc) 695 696 return -ENOMEM; 696 697
+1 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
··· 277 277 struct mlxsw_sp_nve_mc_record *mc_record; 278 278 int err; 279 279 280 - mc_record = kzalloc_flex(*mc_record, entries, num_max_entries, 281 - GFP_KERNEL); 280 + mc_record = kzalloc_flex(*mc_record, entries, num_max_entries); 282 281 if (!mc_record) 283 282 return ERR_PTR(-ENOMEM); 284 283
+2 -4
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 1038 1038 return -EIO; 1039 1039 1040 1040 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); 1041 - mlxsw_sp->router->vrs = kzalloc_objs(struct mlxsw_sp_vr, max_vrs, 1042 - GFP_KERNEL); 1041 + mlxsw_sp->router->vrs = kzalloc_objs(struct mlxsw_sp_vr, max_vrs); 1043 1042 if (!mlxsw_sp->router->vrs) 1044 1043 return -ENOMEM; 1045 1044 ··· 11067 11068 mlxsw_sp->router->max_rif_mac_profile = 11068 11069 MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES); 11069 11070 11070 - mlxsw_sp->router->rifs = kzalloc_objs(struct mlxsw_sp_rif *, max_rifs, 11071 - GFP_KERNEL); 11071 + mlxsw_sp->router->rifs = kzalloc_objs(struct mlxsw_sp_rif *, max_rifs); 11072 11072 if (!mlxsw_sp->router->rifs) 11073 11073 return -ENOMEM; 11074 11074
+1 -2
drivers/net/ethernet/micrel/ksz884x.c
··· 3991 3991 */ 3992 3992 static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit) 3993 3993 { 3994 - desc_info->ring = kzalloc_objs(struct ksz_desc, desc_info->alloc, 3995 - GFP_KERNEL); 3994 + desc_info->ring = kzalloc_objs(struct ksz_desc, desc_info->alloc); 3996 3995 if (!desc_info->ring) 3997 3996 return 1; 3998 3997 hw_init_desc(desc_info, transmit);
+1 -2
drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
··· 200 200 struct fdma *fdma = &tx->fdma; 201 201 int err; 202 202 203 - tx->dcbs_buf = kzalloc_objs(struct lan966x_tx_dcb_buf, fdma->n_dcbs, 204 - GFP_KERNEL); 203 + tx->dcbs_buf = kzalloc_objs(struct lan966x_tx_dcb_buf, fdma->n_dcbs); 205 204 if (!tx->dcbs_buf) 206 205 return -ENOMEM; 207 206
+1 -2
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
··· 889 889 } 890 890 sparx5->port_count = of_get_child_count(ports); 891 891 892 - configs = kzalloc_objs(struct initial_port_config, sparx5->port_count, 893 - GFP_KERNEL); 892 + configs = kzalloc_objs(struct initial_port_config, sparx5->port_count); 894 893 if (!configs) { 895 894 err = -ENOMEM; 896 895 goto cleanup_pnode;
+2 -4
drivers/net/ethernet/microsoft/mana/mana_en.c
··· 2326 2326 int err; 2327 2327 int i; 2328 2328 2329 - apc->tx_qp = kzalloc_objs(struct mana_tx_qp, apc->num_queues, 2330 - GFP_KERNEL); 2329 + apc->tx_qp = kzalloc_objs(struct mana_tx_qp, apc->num_queues); 2331 2330 if (!apc->tx_qp) 2332 2331 return -ENOMEM; 2333 2332 ··· 2851 2852 if (!apc->indir_table) 2852 2853 return -ENOMEM; 2853 2854 2854 - apc->rxobj_table = kzalloc_objs(mana_handle_t, apc->indir_table_sz, 2855 - GFP_KERNEL); 2855 + apc->rxobj_table = kzalloc_objs(mana_handle_t, apc->indir_table_sz); 2856 2856 if (!apc->rxobj_table) { 2857 2857 kfree(apc->indir_table); 2858 2858 return -ENOMEM;
+1 -2
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
··· 3703 3703 * slices. We give up on MSI-X if we can only get a single 3704 3704 * vector. */ 3705 3705 3706 - mgp->msix_vectors = kzalloc_objs(*mgp->msix_vectors, mgp->num_slices, 3707 - GFP_KERNEL); 3706 + mgp->msix_vectors = kzalloc_objs(*mgp->msix_vectors, mgp->num_slices); 3708 3707 if (mgp->msix_vectors == NULL) 3709 3708 goto no_msix; 3710 3709 for (i = 0; i < mgp->num_slices; i++) {
+1 -2
drivers/net/ethernet/netronome/nfp/bpf/offload.c
··· 123 123 if (!cnt) 124 124 goto out; 125 125 126 - nfp_prog->map_records = kmalloc_objs(nfp_prog->map_records[0], cnt, 127 - GFP_KERNEL); 126 + nfp_prog->map_records = kmalloc_objs(nfp_prog->map_records[0], cnt); 128 127 if (!nfp_prog->map_records) { 129 128 err = -ENOMEM; 130 129 goto out;
+1 -2
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
··· 335 335 continue; 336 336 } 337 337 338 - acti_netdevs = kmalloc_objs(*acti_netdevs, entry->slave_cnt, 339 - GFP_KERNEL); 338 + acti_netdevs = kmalloc_objs(*acti_netdevs, entry->slave_cnt); 340 339 if (!acti_netdevs) { 341 340 schedule_delayed_work(&lag->work, 342 341 NFP_FL_LAG_DELAY);
+1 -2
drivers/net/ethernet/netronome/nfp/nfd3/rings.c
··· 140 140 goto err_alloc; 141 141 } 142 142 143 - tx_ring->txbufs = kvzalloc_objs(*tx_ring->txbufs, tx_ring->cnt, 144 - GFP_KERNEL); 143 + tx_ring->txbufs = kvzalloc_objs(*tx_ring->txbufs, tx_ring->cnt); 145 144 if (!tx_ring->txbufs) 146 145 goto err_alloc; 147 146
+1 -2
drivers/net/ethernet/netronome/nfp/nfdk/rings.c
··· 105 105 goto err_alloc; 106 106 } 107 107 108 - tx_ring->ktxbufs = kvzalloc_objs(*tx_ring->ktxbufs, tx_ring->cnt, 109 - GFP_KERNEL); 108 + tx_ring->ktxbufs = kvzalloc_objs(*tx_ring->ktxbufs, tx_ring->cnt); 110 109 if (!tx_ring->ktxbufs) 111 110 goto err_alloc; 112 111
+1 -2
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 2537 2537 nn->dp.num_r_vecs, num_online_cpus()); 2538 2538 nn->max_r_vecs = nn->dp.num_r_vecs; 2539 2539 2540 - nn->dp.xsk_pools = kzalloc_objs(*nn->dp.xsk_pools, nn->max_r_vecs, 2541 - GFP_KERNEL); 2540 + nn->dp.xsk_pools = kzalloc_objs(*nn->dp.xsk_pools, nn->max_r_vecs); 2542 2541 if (!nn->dp.xsk_pools) { 2543 2542 err = -ENOMEM; 2544 2543 goto err_free_nn;
+1 -2
drivers/net/ethernet/netronome/nfp/nfp_net_main.c
··· 229 229 wanted_irqs = 0; 230 230 list_for_each_entry(nn, &pf->vnics, vnic_list) 231 231 wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs; 232 - pf->irq_entries = kzalloc_objs(*pf->irq_entries, wanted_irqs, 233 - GFP_KERNEL); 232 + pf->irq_entries = kzalloc_objs(*pf->irq_entries, wanted_irqs); 234 233 if (!pf->irq_entries) 235 234 return -ENOMEM; 236 235
+1 -2
drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
··· 99 99 100 100 entry_sz = nfp_cpp_area_size(sb_desc_area) / num_entries; 101 101 102 - pf->shared_bufs = kmalloc_objs(pf->shared_bufs[0], num_entries, 103 - GFP_KERNEL); 102 + pf->shared_bufs = kmalloc_objs(pf->shared_bufs[0], num_entries); 104 103 if (!pf->shared_bufs) { 105 104 err = -ENOMEM; 106 105 goto err_release_area;
+2 -4
drivers/net/ethernet/nvidia/forcedeth.c
··· 5854 5854 goto out_unmap; 5855 5855 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 5856 5856 } 5857 - np->rx_skb = kzalloc_objs(struct nv_skb_map, np->rx_ring_size, 5858 - GFP_KERNEL); 5859 - np->tx_skb = kzalloc_objs(struct nv_skb_map, np->tx_ring_size, 5860 - GFP_KERNEL); 5857 + np->rx_skb = kzalloc_objs(struct nv_skb_map, np->rx_ring_size); 5858 + np->tx_skb = kzalloc_objs(struct nv_skb_map, np->tx_ring_size); 5861 5859 if (!np->rx_skb || !np->tx_skb) 5862 5860 goto out_freering; 5863 5861
+2 -4
drivers/net/ethernet/pasemi/pasemi_mac.c
··· 381 381 spin_lock_init(&ring->lock); 382 382 383 383 ring->size = RX_RING_SIZE; 384 - ring->ring_info = kzalloc_objs(struct pasemi_mac_buffer, RX_RING_SIZE, 385 - GFP_KERNEL); 384 + ring->ring_info = kzalloc_objs(struct pasemi_mac_buffer, RX_RING_SIZE); 386 385 387 386 if (!ring->ring_info) 388 387 goto out_ring_info; ··· 463 464 spin_lock_init(&ring->lock); 464 465 465 466 ring->size = TX_RING_SIZE; 466 - ring->ring_info = kzalloc_objs(struct pasemi_mac_buffer, TX_RING_SIZE, 467 - GFP_KERNEL); 467 + ring->ring_info = kzalloc_objs(struct pasemi_mac_buffer, TX_RING_SIZE); 468 468 if (!ring->ring_info) 469 469 goto out_ring_info; 470 470
+1 -2
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
··· 202 202 203 203 recv_ctx = &adapter->recv_ctx; 204 204 205 - rds_ring = kzalloc_objs(struct nx_host_rds_ring, adapter->max_rds_rings, 206 - GFP_KERNEL); 205 + rds_ring = kzalloc_objs(struct nx_host_rds_ring, adapter->max_rds_rings); 207 206 if (rds_ring == NULL) 208 207 goto err_out; 209 208
+2 -4
drivers/net/ethernet/qlogic/qed/qed_cxt.c
··· 845 845 p_t2->num_pages = DIV_ROUND_UP(total_size, psz); 846 846 847 847 /* allocate t2 */ 848 - p_t2->dma_mem = kzalloc_objs(struct phys_mem_desc, p_t2->num_pages, 849 - GFP_KERNEL); 848 + p_t2->dma_mem = kzalloc_objs(struct phys_mem_desc, p_t2->num_pages); 850 849 if (!p_t2->dma_mem) { 851 850 DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n"); 852 851 rc = -ENOMEM; ··· 993 994 int rc; 994 995 995 996 size = qed_cxt_ilt_shadow_size(clients); 996 - p_mngr->ilt_shadow = kzalloc_objs(struct phys_mem_desc, size, 997 - GFP_KERNEL); 997 + p_mngr->ilt_shadow = kzalloc_objs(struct phys_mem_desc, size); 998 998 if (!p_mngr->ilt_shadow) { 999 999 rc = -ENOMEM; 1000 1000 goto ilt_shadow_fail;
+1 -2
drivers/net/ethernet/qlogic/qed/qed_debug.c
··· 6821 6821 6822 6822 /* Read number of formats and allocate memory for all formats */ 6823 6823 meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset); 6824 - meta->formats = kzalloc_objs(struct mcp_trace_format, meta->formats_num, 6825 - GFP_KERNEL); 6824 + meta->formats = kzalloc_objs(struct mcp_trace_format, meta->formats_num); 6826 6825 if (!meta->formats) 6827 6826 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; 6828 6827
+1 -2
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
··· 1692 1692 if (!buf_size) 1693 1693 return NULL; 1694 1694 1695 - allocated_mem = kzalloc_objs(struct phys_mem_desc, NUM_STORMS, 1696 - GFP_KERNEL); 1695 + allocated_mem = kzalloc_objs(struct phys_mem_desc, NUM_STORMS); 1697 1696 if (!allocated_mem) 1698 1697 return NULL; 1699 1698
+3 -6
drivers/net/ethernet/qlogic/qede/qede_main.c
··· 963 963 struct qede_fastpath *fp; 964 964 int i; 965 965 966 - edev->fp_array = kzalloc_objs(*edev->fp_array, QEDE_QUEUE_CNT(edev), 967 - GFP_KERNEL); 966 + edev->fp_array = kzalloc_objs(*edev->fp_array, QEDE_QUEUE_CNT(edev)); 968 967 if (!edev->fp_array) { 969 968 DP_NOTICE(edev, "fp array allocation failed\n"); 970 969 goto err; ··· 1006 1007 } 1007 1008 1008 1009 if (fp->type & QEDE_FASTPATH_TX) { 1009 - fp->txq = kzalloc_objs(*fp->txq, edev->dev_info.num_tc, 1010 - GFP_KERNEL); 1010 + fp->txq = kzalloc_objs(*fp->txq, edev->dev_info.num_tc); 1011 1011 if (!fp->txq) 1012 1012 goto err; 1013 1013 } ··· 1017 1019 goto err; 1018 1020 1019 1021 if (edev->xdp_prog) { 1020 - fp->xdp_tx = kzalloc_obj(*fp->xdp_tx, 1021 - GFP_KERNEL); 1022 + fp->xdp_tx = kzalloc_obj(*fp->xdp_tx); 1022 1023 if (!fp->xdp_tx) 1023 1024 goto err; 1024 1025 fp->type |= QEDE_FASTPATH_XDP;
+1 -2
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
··· 994 994 995 995 act_pci_func = ahw->total_nic_func; 996 996 997 - adapter->npars = kzalloc_objs(struct qlcnic_npar_info, act_pci_func, 998 - GFP_KERNEL); 997 + adapter->npars = kzalloc_objs(struct qlcnic_npar_info, act_pci_func); 999 998 if (!adapter->npars) { 1000 999 ret = -ENOMEM; 1001 1000 goto err_pci_info;
+1 -2
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
··· 156 156 adapter->ahw->sriov = sriov; 157 157 sriov->num_vfs = num_vfs; 158 158 bc = &sriov->bc; 159 - sriov->vf_info = kzalloc_objs(struct qlcnic_vf_info, num_vfs, 160 - GFP_KERNEL); 159 + sriov->vf_info = kzalloc_objs(struct qlcnic_vf_info, num_vfs); 161 160 if (!sriov->vf_info) { 162 161 err = -ENOMEM; 163 162 goto qlcnic_free_sriov;
+1 -2
drivers/net/ethernet/renesas/ravb_main.c
··· 442 442 goto error; 443 443 444 444 /* Allocate TX skb rings */ 445 - priv->tx_skb[q] = kzalloc_objs(*priv->tx_skb[q], priv->num_tx_ring[q], 446 - GFP_KERNEL); 445 + priv->tx_skb[q] = kzalloc_objs(*priv->tx_skb[q], priv->num_tx_ring[q]); 447 446 if (!priv->tx_skb[q]) 448 447 goto error; 449 448
+2 -4
drivers/net/ethernet/renesas/rswitch_main.c
··· 346 346 gq->ndev = ndev; 347 347 348 348 if (!dir_tx) { 349 - gq->rx_bufs = kzalloc_objs(*gq->rx_bufs, gq->ring_size, 350 - GFP_KERNEL); 349 + gq->rx_bufs = kzalloc_objs(*gq->rx_bufs, gq->ring_size); 351 350 if (!gq->rx_bufs) 352 351 return -ENOMEM; 353 352 if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0) ··· 359 360 gq->skbs = kzalloc_objs(*gq->skbs, gq->ring_size); 360 361 if (!gq->skbs) 361 362 return -ENOMEM; 362 - gq->unmap_addrs = kzalloc_objs(*gq->unmap_addrs, gq->ring_size, 363 - GFP_KERNEL); 363 + gq->unmap_addrs = kzalloc_objs(*gq->unmap_addrs, gq->ring_size); 364 364 if (!gq->unmap_addrs) 365 365 goto out; 366 366 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
+2 -4
drivers/net/ethernet/renesas/sh_eth.c
··· 1410 1410 mdp->rx_buf_sz += NET_IP_ALIGN; 1411 1411 1412 1412 /* Allocate RX and TX skb rings */ 1413 - mdp->rx_skbuff = kzalloc_objs(*mdp->rx_skbuff, mdp->num_rx_ring, 1414 - GFP_KERNEL); 1413 + mdp->rx_skbuff = kzalloc_objs(*mdp->rx_skbuff, mdp->num_rx_ring); 1415 1414 if (!mdp->rx_skbuff) 1416 1415 return -ENOMEM; 1417 1416 1418 - mdp->tx_skbuff = kzalloc_objs(*mdp->tx_skbuff, mdp->num_tx_ring, 1419 - GFP_KERNEL); 1417 + mdp->tx_skbuff = kzalloc_objs(*mdp->tx_skbuff, mdp->num_tx_ring); 1420 1418 if (!mdp->tx_skbuff) 1421 1419 goto ring_free; 1422 1420
+1 -2
drivers/net/ethernet/rocker/rocker_main.c
··· 2647 2647 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count)) 2648 2648 return -EINVAL; 2649 2649 2650 - rocker->msix_entries = kmalloc_objs(struct msix_entry, msix_entries, 2651 - GFP_KERNEL); 2650 + rocker->msix_entries = kmalloc_objs(struct msix_entry, msix_entries); 2652 2651 if (!rocker->msix_entries) 2653 2652 return -ENOMEM; 2654 2653
+1 -2
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
··· 495 495 goto err_free_dma_rx; 496 496 } 497 497 498 - rx_ring->rx_skbuff = kmalloc_objs(struct sk_buff *, rx_rsize, 499 - GFP_KERNEL); 498 + rx_ring->rx_skbuff = kmalloc_objs(struct sk_buff *, rx_rsize); 500 499 if (!rx_ring->rx_skbuff) { 501 500 ret = -ENOMEM; 502 501 goto err_free_skbuff_dma;
+1 -2
drivers/net/ethernet/sfc/falcon/rx.c
··· 733 733 734 734 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / 735 735 efx->rx_bufs_per_page); 736 - rx_queue->page_ring = kzalloc_objs(*rx_queue->page_ring, page_ring_size, 737 - GFP_KERNEL); 736 + rx_queue->page_ring = kzalloc_objs(*rx_queue->page_ring, page_ring_size); 738 737 if (!rx_queue->page_ring) 739 738 rx_queue->page_ptr_mask = 0; 740 739 else
+1 -2
drivers/net/ethernet/sfc/falcon/selftest.c
··· 545 545 /* Determine how many packets to send */ 546 546 state->packet_count = efx->txq_entries / 3; 547 547 state->packet_count = min(1 << (i << 2), state->packet_count); 548 - state->skbs = kzalloc_objs(state->skbs[0], state->packet_count, 549 - GFP_KERNEL); 548 + state->skbs = kzalloc_objs(state->skbs[0], state->packet_count); 550 549 if (!state->skbs) 551 550 return -ENOMEM; 552 551 state->flush = false;
+1 -2
drivers/net/ethernet/sfc/mcdi_mon.c
··· 355 355 rc = -ENOMEM; 356 356 goto fail; 357 357 } 358 - hwmon->group.attrs = kzalloc_objs(struct attribute *, n_attrs + 1, 359 - GFP_KERNEL); 358 + hwmon->group.attrs = kzalloc_objs(struct attribute *, n_attrs + 1); 360 359 if (!hwmon->group.attrs) { 361 360 rc = -ENOMEM; 362 361 goto fail;
+1 -2
drivers/net/ethernet/sfc/rx_common.c
··· 138 138 bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx); 139 139 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / 140 140 efx->rx_bufs_per_page); 141 - rx_queue->page_ring = kzalloc_objs(*rx_queue->page_ring, page_ring_size, 142 - GFP_KERNEL); 141 + rx_queue->page_ring = kzalloc_objs(*rx_queue->page_ring, page_ring_size); 143 142 if (!rx_queue->page_ring) 144 143 rx_queue->page_ptr_mask = 0; 145 144 else
+1 -2
drivers/net/ethernet/sfc/selftest.c
··· 542 542 /* Determine how many packets to send */ 543 543 state->packet_count = efx->txq_entries / 3; 544 544 state->packet_count = min(1 << (i << 2), state->packet_count); 545 - state->skbs = kzalloc_objs(state->skbs[0], state->packet_count, 546 - GFP_KERNEL); 545 + state->skbs = kzalloc_objs(state->skbs[0], state->packet_count); 547 546 if (!state->skbs) 548 547 return -ENOMEM; 549 548 state->flush = false;
+1 -2
drivers/net/ethernet/sfc/siena/mcdi_mon.c
··· 355 355 rc = -ENOMEM; 356 356 goto fail; 357 357 } 358 - hwmon->group.attrs = kzalloc_objs(struct attribute *, n_attrs + 1, 359 - GFP_KERNEL); 358 + hwmon->group.attrs = kzalloc_objs(struct attribute *, n_attrs + 1); 360 359 if (!hwmon->group.attrs) { 361 360 rc = -ENOMEM; 362 361 goto fail;
+1 -2
drivers/net/ethernet/sfc/siena/rx_common.c
··· 141 141 bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx); 142 142 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / 143 143 efx->rx_bufs_per_page); 144 - rx_queue->page_ring = kzalloc_objs(*rx_queue->page_ring, page_ring_size, 145 - GFP_KERNEL); 144 + rx_queue->page_ring = kzalloc_objs(*rx_queue->page_ring, page_ring_size); 146 145 if (!rx_queue->page_ring) 147 146 rx_queue->page_ptr_mask = 0; 148 147 else
+1 -2
drivers/net/ethernet/sfc/siena/selftest.c
··· 543 543 /* Determine how many packets to send */ 544 544 state->packet_count = efx->txq_entries / 3; 545 545 state->packet_count = min(1 << (i << 2), state->packet_count); 546 - state->skbs = kzalloc_objs(state->skbs[0], state->packet_count, 547 - GFP_KERNEL); 546 + state->skbs = kzalloc_objs(state->skbs[0], state->packet_count); 548 547 if (!state->skbs) 549 548 return -ENOMEM; 550 549 state->flush = false;
+2 -4
drivers/net/ethernet/smsc/smsc9420.c
··· 1179 1179 1180 1180 BUG_ON(!pd->tx_ring); 1181 1181 1182 - pd->tx_buffers = kmalloc_objs(struct smsc9420_ring_info, TX_RING_SIZE, 1183 - GFP_KERNEL); 1182 + pd->tx_buffers = kmalloc_objs(struct smsc9420_ring_info, TX_RING_SIZE); 1184 1183 if (!pd->tx_buffers) 1185 1184 return -ENOMEM; 1186 1185 ··· 1210 1211 1211 1212 BUG_ON(!pd->rx_ring); 1212 1213 1213 - pd->rx_buffers = kmalloc_objs(struct smsc9420_ring_info, RX_RING_SIZE, 1214 - GFP_KERNEL); 1214 + pd->rx_buffers = kmalloc_objs(struct smsc9420_ring_info, RX_RING_SIZE); 1215 1215 if (pd->rx_buffers == NULL) 1216 1216 goto out; 1217 1217
+2 -4
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 2212 2212 return ret; 2213 2213 } 2214 2214 2215 - rx_q->buf_pool = kzalloc_objs(*rx_q->buf_pool, dma_conf->dma_rx_size, 2216 - GFP_KERNEL); 2215 + rx_q->buf_pool = kzalloc_objs(*rx_q->buf_pool, dma_conf->dma_rx_size); 2217 2216 if (!rx_q->buf_pool) 2218 2217 return -ENOMEM; 2219 2218 ··· 2300 2301 if (!tx_q->tx_skbuff_dma) 2301 2302 return -ENOMEM; 2302 2303 2303 - tx_q->tx_skbuff = kzalloc_objs(struct sk_buff *, dma_conf->dma_tx_size, 2304 - GFP_KERNEL); 2304 + tx_q->tx_skbuff = kzalloc_objs(struct sk_buff *, dma_conf->dma_tx_size); 2305 2305 if (!tx_q->tx_skbuff) 2306 2306 return -ENOMEM; 2307 2307
+3 -6
drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
··· 233 233 int ret = -ENOMEM; 234 234 unsigned int i; 235 235 236 - channel_head = kzalloc_objs(struct xlgmac_channel, pdata->channel_count, 237 - GFP_KERNEL); 236 + channel_head = kzalloc_objs(struct xlgmac_channel, pdata->channel_count); 238 237 if (!channel_head) 239 238 return ret; 240 239 241 240 netif_dbg(pdata, drv, pdata->netdev, 242 241 "channel_head=%p\n", channel_head); 243 242 244 - tx_ring = kzalloc_objs(struct xlgmac_ring, pdata->tx_ring_count, 245 - GFP_KERNEL); 243 + tx_ring = kzalloc_objs(struct xlgmac_ring, pdata->tx_ring_count); 246 244 if (!tx_ring) 247 245 goto err_tx_ring; 248 246 249 - rx_ring = kzalloc_objs(struct xlgmac_ring, pdata->rx_ring_count, 250 - GFP_KERNEL); 247 + rx_ring = kzalloc_objs(struct xlgmac_ring, pdata->rx_ring_count); 251 248 if (!rx_ring) 252 249 goto err_rx_ring; 253 250
+1 -2
drivers/net/ethernet/ti/k3-cppi-desc-pool.c
··· 77 77 78 78 pool->gen_pool->name = pool_name; 79 79 80 - pool->desc_infos = kzalloc_objs(*pool->desc_infos, pool->num_desc, 81 - GFP_KERNEL); 80 + pool->desc_infos = kzalloc_objs(*pool->desc_infos, pool->num_desc); 82 81 if (!pool->desc_infos) 83 82 goto gen_pool_desc_infos_alloc_fail; 84 83
+2 -4
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 1542 1542 lp->tx_ring_head = 0; 1543 1543 lp->rx_ring_tail = 0; 1544 1544 lp->rx_ring_head = 0; 1545 - lp->tx_skb_ring = kzalloc_objs(*lp->tx_skb_ring, TX_BD_NUM_MAX, 1546 - GFP_KERNEL); 1545 + lp->tx_skb_ring = kzalloc_objs(*lp->tx_skb_ring, TX_BD_NUM_MAX); 1547 1546 if (!lp->tx_skb_ring) { 1548 1547 ret = -ENOMEM; 1549 1548 goto err_dma_release_rx; ··· 1556 1557 lp->tx_skb_ring[i] = skbuf_dma; 1557 1558 } 1558 1559 1559 - lp->rx_skb_ring = kzalloc_objs(*lp->rx_skb_ring, RX_BUF_NUM_DEFAULT, 1560 - GFP_KERNEL); 1560 + lp->rx_skb_ring = kzalloc_objs(*lp->rx_skb_ring, RX_BUF_NUM_DEFAULT); 1561 1561 if (!lp->rx_skb_ring) { 1562 1562 ret = -ENOMEM; 1563 1563 goto err_free_tx_skb_ring;
+1 -2
drivers/net/hyperv/netvsc_drv.c
··· 1524 1524 data[i++] = xdp_tx; 1525 1525 } 1526 1526 1527 - pcpu_sum = kvmalloc_objs(struct netvsc_ethtool_pcpu_stats, nr_cpu_ids, 1528 - GFP_KERNEL); 1527 + pcpu_sum = kvmalloc_objs(struct netvsc_ethtool_pcpu_stats, nr_cpu_ids); 1529 1528 if (!pcpu_sum) 1530 1529 return; 1531 1530
+1 -2
drivers/net/ieee802154/ca8210.c
··· 720 720 &priv->spi->dev, 721 721 "Resetting MAC...\n"); 722 722 723 - mlme_reset_wpc = kmalloc_obj(*mlme_reset_wpc, 724 - GFP_KERNEL); 723 + mlme_reset_wpc = kmalloc_obj(*mlme_reset_wpc); 725 724 if (!mlme_reset_wpc) 726 725 goto finish; 727 726 INIT_WORK(
+1 -2
drivers/net/ipa/gsi_trans.c
··· 730 730 * modulo that number to determine the next one that's free. 731 731 * Transactions are allocated one at a time. 732 732 */ 733 - trans_info->trans = kzalloc_objs(*trans_info->trans, tre_count, 734 - GFP_KERNEL); 733 + trans_info->trans = kzalloc_objs(*trans_info->trans, tre_count); 735 734 if (!trans_info->trans) 736 735 return -ENOMEM; 737 736 trans_info->free_id = 0; /* all modulo channel->tre_count */
+1 -2
drivers/net/pcs/pcs-rzn1-miic.c
··· 679 679 s8 *dt_val; 680 680 u32 conf; 681 681 682 - dt_val = kmalloc_objs(*dt_val, miic->of_data->conf_conv_count, 683 - GFP_KERNEL); 682 + dt_val = kmalloc_objs(*dt_val, miic->of_data->conf_conv_count); 684 683 if (!dt_val) 685 684 return -ENOMEM; 686 685
+1 -2
drivers/net/usb/hso.c
··· 2630 2630 if (!serial->tiocmget) 2631 2631 goto exit; 2632 2632 serial->tiocmget->serial_state_notification 2633 - = kzalloc_obj(struct hso_serial_state_notification, 2634 - GFP_KERNEL); 2633 + = kzalloc_obj(struct hso_serial_state_notification); 2635 2634 if (!serial->tiocmget->serial_state_notification) 2636 2635 goto exit; 2637 2636 tiocmget = serial->tiocmget;
+1 -2
drivers/net/usb/smsc75xx.c
··· 1450 1450 return ret; 1451 1451 } 1452 1452 1453 - dev->data[0] = (unsigned long) kzalloc_obj(struct smsc75xx_priv, 1454 - GFP_KERNEL); 1453 + dev->data[0] = (unsigned long) kzalloc_obj(struct smsc75xx_priv); 1455 1454 1456 1455 pdata = (struct smsc75xx_priv *)(dev->data[0]); 1457 1456 if (!pdata)
+2 -4
drivers/net/wan/fsl_ucc_hdlc.c
··· 203 203 goto free_tx_bd; 204 204 } 205 205 206 - priv->rx_skbuff = kzalloc_objs(*priv->rx_skbuff, priv->rx_ring_size, 207 - GFP_KERNEL); 206 + priv->rx_skbuff = kzalloc_objs(*priv->rx_skbuff, priv->rx_ring_size); 208 207 if (!priv->rx_skbuff) { 209 208 ret = -ENOMEM; 210 209 goto free_ucc_pram; 211 210 } 212 211 213 - priv->tx_skbuff = kzalloc_objs(*priv->tx_skbuff, priv->tx_ring_size, 214 - GFP_KERNEL); 212 + priv->tx_skbuff = kzalloc_objs(*priv->tx_skbuff, priv->tx_ring_size); 215 213 if (!priv->tx_skbuff) { 216 214 ret = -ENOMEM; 217 215 goto free_rx_skbuff;
+4 -8
drivers/net/wireless/ath/ath10k/ce.c
··· 1461 1461 1462 1462 nentries = roundup_pow_of_two(nentries); 1463 1463 1464 - src_ring = kzalloc_flex(*src_ring, per_transfer_context, nentries, 1465 - GFP_KERNEL); 1464 + src_ring = kzalloc_flex(*src_ring, per_transfer_context, nentries); 1466 1465 if (src_ring == NULL) 1467 1466 return ERR_PTR(-ENOMEM); 1468 1467 ··· 1518 1519 1519 1520 nentries = roundup_pow_of_two(nentries); 1520 1521 1521 - src_ring = kzalloc_flex(*src_ring, per_transfer_context, nentries, 1522 - GFP_KERNEL); 1522 + src_ring = kzalloc_flex(*src_ring, per_transfer_context, nentries); 1523 1523 if (!src_ring) 1524 1524 return ERR_PTR(-ENOMEM); 1525 1525 ··· 1573 1575 1574 1576 nentries = roundup_pow_of_two(attr->dest_nentries); 1575 1577 1576 - dest_ring = kzalloc_flex(*dest_ring, per_transfer_context, nentries, 1577 - GFP_KERNEL); 1578 + dest_ring = kzalloc_flex(*dest_ring, per_transfer_context, nentries); 1578 1579 if (dest_ring == NULL) 1579 1580 return ERR_PTR(-ENOMEM); 1580 1581 ··· 1616 1619 1617 1620 nentries = roundup_pow_of_two(attr->dest_nentries); 1618 1621 1619 - dest_ring = kzalloc_flex(*dest_ring, per_transfer_context, nentries, 1620 - GFP_KERNEL); 1622 + dest_ring = kzalloc_flex(*dest_ring, per_transfer_context, nentries); 1621 1623 if (!dest_ring) 1622 1624 return ERR_PTR(-ENOMEM); 1623 1625
+1 -2
drivers/net/wireless/ath/ath10k/mac.c
··· 7559 7559 } 7560 7560 7561 7561 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) { 7562 - arsta->tx_stats = kzalloc_obj(*arsta->tx_stats, 7563 - GFP_KERNEL); 7562 + arsta->tx_stats = kzalloc_obj(*arsta->tx_stats); 7564 7563 if (!arsta->tx_stats) { 7565 7564 ath10k_mac_dec_num_stations(arvif, sta); 7566 7565 ret = -ENOMEM;
+1 -2
drivers/net/wireless/ath/ath12k/dp.c
··· 1362 1362 if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES) 1363 1363 dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES; 1364 1364 1365 - dp->spt_info = kzalloc_objs(struct ath12k_spt_info, dp->num_spt_pages, 1366 - GFP_KERNEL); 1365 + dp->spt_info = kzalloc_objs(struct ath12k_spt_info, dp->num_spt_pages); 1367 1366 1368 1367 if (!dp->spt_info) { 1369 1368 ath12k_warn(ab, "SPT page allocation failure");
+2 -4
drivers/net/wireless/ath/ath12k/mac.c
··· 5024 5024 u8 link_id) 5025 5025 { 5026 5026 if (!ahvif->cache[link_id]) { 5027 - ahvif->cache[link_id] = kzalloc_obj(*ahvif->cache[0], 5028 - GFP_KERNEL); 5027 + ahvif->cache[link_id] = kzalloc_obj(*ahvif->cache[0]); 5029 5028 if (ahvif->cache[link_id]) 5030 5029 INIT_LIST_HEAD(&ahvif->cache[link_id]->key_conf.list); 5031 5030 } ··· 14249 14250 if (ar->ab->hw_params->single_pdev_only) 14250 14251 n_combinations = 2; 14251 14252 14252 - combinations = kzalloc_objs(*combinations, n_combinations, 14253 - GFP_KERNEL); 14253 + combinations = kzalloc_objs(*combinations, n_combinations); 14254 14254 if (!combinations) 14255 14255 return -ENOMEM; 14256 14256
+4 -8
drivers/net/wireless/ath/ath5k/eeprom.c
··· 760 760 if (!pd->pd_step) 761 761 goto err_out; 762 762 763 - pd->pd_pwr = kzalloc_objs(s16, AR5K_EEPROM_N_PWR_POINTS_5111, 764 - GFP_KERNEL); 763 + pd->pd_pwr = kzalloc_objs(s16, AR5K_EEPROM_N_PWR_POINTS_5111); 765 764 if (!pd->pd_pwr) 766 765 goto err_out; 767 766 ··· 941 942 if (!pd->pd_step) 942 943 goto err_out; 943 944 944 - pd->pd_pwr = kzalloc_objs(s16, pd->pd_points, 945 - GFP_KERNEL); 945 + pd->pd_pwr = kzalloc_objs(s16, pd->pd_points); 946 946 947 947 if (!pd->pd_pwr) 948 948 goto err_out; ··· 978 980 if (!pd->pd_step) 979 981 goto err_out; 980 982 981 - pd->pd_pwr = kzalloc_objs(s16, pd->pd_points, 982 - GFP_KERNEL); 983 + pd->pd_pwr = kzalloc_objs(s16, pd->pd_points); 983 984 984 985 if (!pd->pd_pwr) 985 986 goto err_out; ··· 1233 1236 if (!pd->pd_step) 1234 1237 goto err_out; 1235 1238 1236 - pd->pd_pwr = kzalloc_objs(s16, pd->pd_points, 1237 - GFP_KERNEL); 1239 + pd->pd_pwr = kzalloc_objs(s16, pd->pd_points); 1238 1240 1239 1241 if (!pd->pd_pwr) 1240 1242 goto err_out;
+1 -2
drivers/net/wireless/ath/wil6210/fw_inc.c
··· 160 160 return -EINVAL; 161 161 } 162 162 163 - wil->brd_info = kzalloc_objs(struct wil_brd_info, max_num_ent, 164 - GFP_KERNEL); 163 + wil->brd_info = kzalloc_objs(struct wil_brd_info, max_num_ent); 165 164 if (!wil->brd_info) 166 165 return -ENOMEM; 167 166
+1 -2
drivers/net/wireless/ath/wil6210/pmc.c
··· 85 85 num_descriptors, descriptor_size); 86 86 87 87 /* allocate descriptors info list in pmc context*/ 88 - pmc->descriptors = kzalloc_objs(struct desc_alloc_info, num_descriptors, 89 - GFP_KERNEL); 88 + pmc->descriptors = kzalloc_objs(struct desc_alloc_info, num_descriptors); 90 89 if (!pmc->descriptors) { 91 90 wil_err(wil, "ERROR allocating pmc skb list\n"); 92 91 goto no_release_err;
+1 -2
drivers/net/wireless/ath/wil6210/txrx_edma.c
··· 314 314 struct list_head *free = &wil->rx_buff_mgmt.free; 315 315 int i; 316 316 317 - wil->rx_buff_mgmt.buff_arr = kzalloc_objs(struct wil_rx_buff, size + 1, 318 - GFP_KERNEL); 317 + wil->rx_buff_mgmt.buff_arr = kzalloc_objs(struct wil_rx_buff, size + 1); 319 318 if (!wil->rx_buff_mgmt.buff_arr) 320 319 return -ENOMEM; 321 320
+1 -2
drivers/net/wireless/broadcom/b43/debugfs.c
··· 677 677 } 678 678 e->dev = dev; 679 679 log = &e->txstatlog; 680 - log->log = kzalloc_objs(struct b43_txstatus, B43_NR_LOGGED_TXSTATUS, 681 - GFP_KERNEL); 680 + log->log = kzalloc_objs(struct b43_txstatus, B43_NR_LOGGED_TXSTATUS); 682 681 if (!log->log) { 683 682 b43err(dev->wl, "debugfs: add device txstatus OOM\n"); 684 683 kfree(e);
+1 -2
drivers/net/wireless/broadcom/b43/dma.c
··· 846 846 if (for_tx) 847 847 ring->nr_slots = B43_TXRING_SLOTS; 848 848 849 - ring->meta = kzalloc_objs(struct b43_dmadesc_meta, ring->nr_slots, 850 - GFP_KERNEL); 849 + ring->meta = kzalloc_objs(struct b43_dmadesc_meta, ring->nr_slots); 851 850 if (!ring->meta) 852 851 goto err_kfree_ring; 853 852 for (i = 0; i < ring->nr_slots; i++)
+1 -2
drivers/net/wireless/broadcom/b43legacy/dma.c
··· 620 620 if (for_tx) 621 621 nr_slots = B43legacy_TXRING_SLOTS; 622 622 623 - ring->meta = kzalloc_objs(struct b43legacy_dmadesc_meta, nr_slots, 624 - GFP_KERNEL); 623 + ring->meta = kzalloc_objs(struct b43legacy_dmadesc_meta, nr_slots); 625 624 if (!ring->meta) 626 625 goto err_kfree_ring; 627 626 if (for_tx) {
+1 -2
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
··· 2199 2199 bus->msgbuf->commonrings[i] = 2200 2200 &devinfo->shared.commonrings[i]->commonring; 2201 2201 2202 - flowrings = kzalloc_objs(*flowrings, devinfo->shared.max_flowrings, 2203 - GFP_KERNEL); 2202 + flowrings = kzalloc_objs(*flowrings, devinfo->shared.max_flowrings); 2204 2203 if (!flowrings) 2205 2204 goto fail; 2206 2205
+1 -2
drivers/net/wireless/intel/ipw2x00/ipw2100.c
··· 3412 3412 dma_addr_t p; 3413 3413 3414 3414 priv->msg_buffers = 3415 - kmalloc_objs(struct ipw2100_tx_packet, IPW_COMMAND_POOL_SIZE, 3416 - GFP_KERNEL); 3415 + kmalloc_objs(struct ipw2100_tx_packet, IPW_COMMAND_POOL_SIZE); 3417 3416 if (!priv->msg_buffers) 3418 3417 return -ENOMEM; 3419 3418
+1 -2
drivers/net/wireless/intel/ipw2x00/libipw_module.c
··· 57 57 int i, j; 58 58 59 59 for (i = 0; i < MAX_NETWORK_COUNT; i++) { 60 - ieee->networks[i] = kzalloc_obj(struct libipw_network, 61 - GFP_KERNEL); 60 + ieee->networks[i] = kzalloc_obj(struct libipw_network); 62 61 if (!ieee->networks[i]) { 63 62 LIBIPW_ERROR("Out of memory allocating beacons\n"); 64 63 for (j = 0; j < i; j++)
+3 -6
drivers/net/wireless/intel/iwlegacy/common.c
··· 2968 2968 /* Driver ilate data, only for Tx (not command) queues, 2969 2969 * not shared with device. */ 2970 2970 if (id != il->cmd_queue) { 2971 - txq->skbs = kzalloc_objs(struct sk_buff *, TFD_QUEUE_SIZE_MAX, 2972 - GFP_KERNEL); 2971 + txq->skbs = kzalloc_objs(struct sk_buff *, TFD_QUEUE_SIZE_MAX); 2973 2972 if (!txq->skbs) { 2974 2973 IL_ERR("Fail to alloc skbs\n"); 2975 2974 goto error; ··· 3441 3442 } 3442 3443 3443 3444 channels = 3444 - kzalloc_objs(struct ieee80211_channel, il->channel_count, 3445 - GFP_KERNEL); 3445 + kzalloc_objs(struct ieee80211_channel, il->channel_count); 3446 3446 if (!channels) 3447 3447 return -ENOMEM; 3448 3448 ··· 4607 4609 { 4608 4610 if (!il->txq) 4609 4611 il->txq = 4610 - kzalloc_objs(struct il_tx_queue, il->cfg->num_of_queues, 4611 - GFP_KERNEL); 4612 + kzalloc_objs(struct il_tx_queue, il->cfg->num_of_queues); 4612 4613 if (!il->txq) { 4613 4614 IL_ERR("Not enough memory for txq\n"); 4614 4615 return -ENOMEM;
+1 -2
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
··· 2958 2958 struct iwl_fw_dump_desc *iwl_dump_error_desc; 2959 2959 int ret; 2960 2960 2961 - iwl_dump_error_desc = kmalloc_obj(*iwl_dump_error_desc, 2962 - GFP_KERNEL); 2961 + iwl_dump_error_desc = kmalloc_obj(*iwl_dump_error_desc); 2963 2962 2964 2963 if (!iwl_dump_error_desc) 2965 2964 return -ENOMEM;
+5 -10
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
··· 1377 1377 u8 tx_chains = fw->valid_rx_ant; 1378 1378 1379 1379 if (cfg->uhb_supported) 1380 - data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_UHB, 1381 - GFP_KERNEL); 1380 + data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_UHB); 1382 1381 else 1383 - data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_EXT, 1384 - GFP_KERNEL); 1382 + data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_EXT); 1385 1383 if (!data) 1386 1384 return NULL; 1387 1385 ··· 1442 1444 const __le16 *ch_section; 1443 1445 1444 1446 if (cfg->uhb_supported) 1445 - data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_UHB, 1446 - GFP_KERNEL); 1447 + data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_UHB); 1447 1448 else if (cfg->nvm_type != IWL_NVM_EXT) 1448 - data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS, 1449 - GFP_KERNEL); 1449 + data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS); 1450 1450 else 1451 - data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_EXT, 1452 - GFP_KERNEL); 1451 + data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_EXT); 1453 1452 if (!data) 1454 1453 return NULL; 1455 1454
+2 -4
drivers/net/wireless/intel/iwlwifi/mld/d3.c
··· 1249 1249 goto out; 1250 1250 } 1251 1251 n_matches = hweight_long(matched_profiles); 1252 - netdetect_info = kzalloc_flex(*netdetect_info, matches, n_matches, 1253 - GFP_KERNEL); 1252 + netdetect_info = kzalloc_flex(*netdetect_info, matches, n_matches); 1254 1253 if (netdetect_info) 1255 1254 iwl_mld_set_netdetect_info(mld, netdetect_cfg, netdetect_info, 1256 1255 resume_data->netdetect_res, ··· 1999 2000 2000 2001 iwl_fw_dbg_read_d3_debug_data(&mld->fwrt); 2001 2002 2002 - resume_data.wowlan_status = kzalloc_obj(*resume_data.wowlan_status, 2003 - GFP_KERNEL); 2003 + resume_data.wowlan_status = kzalloc_obj(*resume_data.wowlan_status); 2004 2004 if (!resume_data.wowlan_status) 2005 2005 return -ENOMEM; 2006 2006
+1 -2
drivers/net/wireless/intel/iwlwifi/mld/sta.c
··· 660 660 if (mld->fw_status.in_hw_restart) 661 661 return 0; 662 662 663 - dup_data = kzalloc_objs(*dup_data, mld->trans->info.num_rxqs, 664 - GFP_KERNEL); 663 + dup_data = kzalloc_objs(*dup_data, mld->trans->info.num_rxqs); 665 664 if (!dup_data) 666 665 return -ENOMEM; 667 666
+1 -2
drivers/net/wireless/intel/iwlwifi/mld/time_sync.c
··· 12 12 static int iwl_mld_init_time_sync(struct iwl_mld *mld, u32 protocols, 13 13 const u8 *addr) 14 14 { 15 - struct iwl_mld_time_sync_data *time_sync = kzalloc_obj(*time_sync, 16 - GFP_KERNEL); 15 + struct iwl_mld_time_sync_data *time_sync = kzalloc_obj(*time_sync); 17 16 18 17 if (!time_sync) 19 18 return -ENOMEM;
+1 -2
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
··· 1798 1798 if (iwl_mvm_has_new_rx_api(mvm)) { 1799 1799 int q; 1800 1800 1801 - dup_data = kzalloc_objs(*dup_data, mvm->trans->info.num_rxqs, 1802 - GFP_KERNEL); 1801 + dup_data = kzalloc_objs(*dup_data, mvm->trans->info.num_rxqs); 1803 1802 if (!dup_data) 1804 1803 return -ENOMEM; 1805 1804 /*
+1 -2
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
··· 773 773 if (WARN_ON(trans_pcie->rxq)) 774 774 return -EINVAL; 775 775 776 - trans_pcie->rxq = kzalloc_objs(struct iwl_rxq, trans->info.num_rxqs, 777 - GFP_KERNEL); 776 + trans_pcie->rxq = kzalloc_objs(struct iwl_rxq, trans->info.num_rxqs); 778 777 trans_pcie->rx_pool = kzalloc_objs(trans_pcie->rx_pool[0], 779 778 RX_POOL_SIZE(trans_pcie->num_rx_bufs), 780 779 GFP_KERNEL);
+1 -2
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
··· 741 741 742 742 txq->n_window = slots_num; 743 743 744 - txq->entries = kzalloc_objs(struct iwl_pcie_txq_entry, slots_num, 745 - GFP_KERNEL); 744 + txq->entries = kzalloc_objs(struct iwl_pcie_txq_entry, slots_num); 746 745 747 746 if (!txq->entries) 748 747 goto error;
+2 -4
drivers/net/wireless/intersil/p54/eeprom.c
··· 341 341 goto free; 342 342 } 343 343 priv->chan_num = max_channel_num; 344 - priv->survey = kzalloc_objs(struct survey_info, max_channel_num, 345 - GFP_KERNEL); 344 + priv->survey = kzalloc_objs(struct survey_info, max_channel_num); 346 345 if (!priv->survey) { 347 346 ret = -ENOMEM; 348 347 goto free; 349 348 } 350 349 351 350 list->max_entries = max_channel_num; 352 - list->channels = kzalloc_objs(struct p54_channel_entry, max_channel_num, 353 - GFP_KERNEL); 351 + list->channels = kzalloc_objs(struct p54_channel_entry, max_channel_num); 354 352 if (!list->channels) { 355 353 ret = -ENOMEM; 356 354 goto free;
+1 -2
drivers/net/wireless/marvell/mwifiex/scan.c
··· 1503 1503 adapter->scan_processing = true; 1504 1504 spin_unlock_bh(&adapter->mwifiex_cmd_lock); 1505 1505 1506 - scan_cfg_out = kzalloc_obj(union mwifiex_scan_cmd_config_tlv, 1507 - GFP_KERNEL); 1506 + scan_cfg_out = kzalloc_obj(union mwifiex_scan_cmd_config_tlv); 1508 1507 if (!scan_cfg_out) { 1509 1508 ret = -ENOMEM; 1510 1509 goto done;
+3 -6
drivers/net/wireless/microchip/wilc1000/cfg80211.c
··· 484 484 static int wilc_wfi_cfg_allocate_wpa_entry(struct wilc_priv *priv, u8 idx) 485 485 { 486 486 if (!priv->wilc_gtk[idx]) { 487 - priv->wilc_gtk[idx] = kzalloc_obj(*priv->wilc_gtk[idx], 488 - GFP_KERNEL); 487 + priv->wilc_gtk[idx] = kzalloc_obj(*priv->wilc_gtk[idx]); 489 488 if (!priv->wilc_gtk[idx]) 490 489 return -ENOMEM; 491 490 } 492 491 493 492 if (!priv->wilc_ptk[idx]) { 494 - priv->wilc_ptk[idx] = kzalloc_obj(*priv->wilc_ptk[idx], 495 - GFP_KERNEL); 493 + priv->wilc_ptk[idx] = kzalloc_obj(*priv->wilc_ptk[idx]); 496 494 if (!priv->wilc_ptk[idx]) 497 495 return -ENOMEM; 498 496 } ··· 502 504 { 503 505 idx -= 4; 504 506 if (!priv->wilc_igtk[idx]) { 505 - priv->wilc_igtk[idx] = kzalloc_obj(*priv->wilc_igtk[idx], 506 - GFP_KERNEL); 507 + priv->wilc_igtk[idx] = kzalloc_obj(*priv->wilc_igtk[idx]); 507 508 if (!priv->wilc_igtk[idx]) 508 509 return -ENOMEM; 509 510 }
+4 -8
drivers/net/wireless/quantenna/qtnfmac/commands.c
··· 1031 1031 if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES)) 1032 1032 return -E2BIG; 1033 1033 1034 - mac->rd = kzalloc_flex(*mac->rd, reg_rules, resp->n_reg_rules, 1035 - GFP_KERNEL); 1034 + mac->rd = kzalloc_flex(*mac->rd, reg_rules, resp->n_reg_rules); 1036 1035 if (!mac->rd) 1037 1036 return -ENOMEM; 1038 1037 ··· 1083 1084 return -EINVAL; 1084 1085 } 1085 1086 1086 - limits = kzalloc_objs(*limits, rec->n_limits, 1087 - GFP_KERNEL); 1087 + limits = kzalloc_objs(*limits, rec->n_limits); 1088 1088 if (!limits) 1089 1089 return -ENOMEM; 1090 1090 ··· 1338 1340 if (band->n_iftype_data == 0) 1339 1341 return 0; 1340 1342 1341 - iftype_data = kzalloc_objs(*iftype_data, band->n_iftype_data, 1342 - GFP_KERNEL); 1343 + iftype_data = kzalloc_objs(*iftype_data, band->n_iftype_data); 1343 1344 if (!iftype_data) { 1344 1345 band->n_iftype_data = 0; 1345 1346 return -ENOMEM; ··· 1385 1388 return 0; 1386 1389 1387 1390 if (!band->channels) 1388 - band->channels = kzalloc_objs(*chan, band->n_channels, 1389 - GFP_KERNEL); 1391 + band->channels = kzalloc_objs(*chan, band->n_channels); 1390 1392 if (!band->channels) { 1391 1393 band->n_channels = 0; 1392 1394 return -ENOMEM;
+1 -2
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
··· 11912 11912 return -ENOMEM; 11913 11913 11914 11914 rt2x00dev->chan_survey = 11915 - kzalloc_objs(struct rt2x00_chan_survey, spec->num_channels, 11916 - GFP_KERNEL); 11915 + kzalloc_objs(struct rt2x00_chan_survey, spec->num_channels); 11917 11916 if (!rt2x00dev->chan_survey) { 11918 11917 kfree(info); 11919 11918 return -ENOMEM;
+2 -4
drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c
··· 70 70 rtlpriv->curveindex_5g = kcalloc(TARGET_CHNL_NUM_5G, 71 71 sizeof(*rtlpriv->curveindex_5g), 72 72 GFP_KERNEL); 73 - rtlpriv->mutex_for_power_on_off = kzalloc_obj(*rtlpriv->mutex_for_power_on_off, 74 - GFP_KERNEL); 75 - rtlpriv->mutex_for_hw_init = kzalloc_obj(*rtlpriv->mutex_for_hw_init, 76 - GFP_KERNEL); 73 + rtlpriv->mutex_for_power_on_off = kzalloc_obj(*rtlpriv->mutex_for_power_on_off); 74 + rtlpriv->mutex_for_hw_init = kzalloc_obj(*rtlpriv->mutex_for_hw_init); 77 75 78 76 if (!rtlpriv->curveindex_2g || !rtlpriv->curveindex_5g || 79 77 !rtlpriv->mutex_for_power_on_off || !rtlpriv->mutex_for_hw_init) {
+1 -2
drivers/net/wireless/realtek/rtw88/sdio.c
··· 1290 1290 1291 1291 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) 1292 1292 skb_queue_head_init(&rtwsdio->tx_queue[i]); 1293 - rtwsdio->tx_handler_data = kmalloc_obj(*rtwsdio->tx_handler_data, 1294 - GFP_KERNEL); 1293 + rtwsdio->tx_handler_data = kmalloc_obj(*rtwsdio->tx_handler_data); 1295 1294 if (!rtwsdio->tx_handler_data) 1296 1295 goto err_destroy_wq; 1297 1296
+1 -2
drivers/net/wireless/realtek/rtw89/usb.c
··· 931 931 if (ret) 932 932 return ret; 933 933 934 - rtwusb->vendor_req_buf = kmalloc_obj(*rtwusb->vendor_req_buf, 935 - GFP_KERNEL); 934 + rtwusb->vendor_req_buf = kmalloc_obj(*rtwusb->vendor_req_buf); 936 935 if (!rtwusb->vendor_req_buf) 937 936 return -ENOMEM; 938 937
+1 -2
drivers/net/wireless/st/cw1200/debug.c
··· 360 360 int cw1200_debug_init(struct cw1200_common *priv) 361 361 { 362 362 int ret = -ENOMEM; 363 - struct cw1200_debug_priv *d = kzalloc_obj(struct cw1200_debug_priv, 364 - GFP_KERNEL); 363 + struct cw1200_debug_priv *d = kzalloc_obj(struct cw1200_debug_priv); 365 364 priv->debug = d; 366 365 if (!d) 367 366 return ret;
+2 -4
drivers/net/wireless/st/cw1200/queue.c
··· 179 179 spin_lock_init(&queue->lock); 180 180 timer_setup(&queue->gc, cw1200_queue_gc, 0); 181 181 182 - queue->pool = kzalloc_objs(struct cw1200_queue_item, capacity, 183 - GFP_KERNEL); 182 + queue->pool = kzalloc_objs(struct cw1200_queue_item, capacity); 184 183 if (!queue->pool) 185 184 return -ENOMEM; 186 185 187 - queue->link_map_cache = kzalloc_objs(int, stats->map_capacity, 188 - GFP_KERNEL); 186 + queue->link_map_cache = kzalloc_objs(int, stats->map_capacity); 189 187 if (!queue->link_map_cache) { 190 188 kfree(queue->pool); 191 189 queue->pool = NULL;
+1 -2
drivers/net/wireless/st/cw1200/scan.c
··· 225 225 scan.type = WSM_SCAN_TYPE_BACKGROUND; 226 226 scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND; 227 227 } 228 - scan.ch = kzalloc_objs(struct wsm_scan_ch, it - priv->scan.curr, 229 - GFP_KERNEL); 228 + scan.ch = kzalloc_objs(struct wsm_scan_ch, it - priv->scan.curr); 230 229 if (!scan.ch) { 231 230 priv->scan.status = -ENOMEM; 232 231 goto fail;
+1 -2
drivers/net/wireless/ti/wl1251/init.c
··· 293 293 int ret; 294 294 295 295 /* asking for the data path parameters */ 296 - wl->data_path = kzalloc_obj(struct acx_data_path_params_resp, 297 - GFP_KERNEL); 296 + wl->data_path = kzalloc_obj(struct acx_data_path_params_resp); 298 297 if (!wl->data_path) 299 298 return -ENOMEM; 300 299
+1 -2
drivers/net/wwan/iosm/iosm_ipc_protocol.c
··· 222 222 223 223 struct iosm_protocol *ipc_protocol_init(struct iosm_imem *ipc_imem) 224 224 { 225 - struct iosm_protocol *ipc_protocol = kzalloc_obj(*ipc_protocol, 226 - GFP_KERNEL); 225 + struct iosm_protocol *ipc_protocol = kzalloc_obj(*ipc_protocol); 227 226 struct ipc_protocol_context_info *p_ci; 228 227 u64 addr; 229 228
+1 -2
drivers/net/xen-netfront.c
··· 2212 2212 unsigned int i; 2213 2213 int ret; 2214 2214 2215 - info->queues = kzalloc_objs(struct netfront_queue, *num_queues, 2216 - GFP_KERNEL); 2215 + info->queues = kzalloc_objs(struct netfront_queue, *num_queues); 2217 2216 if (!info->queues) 2218 2217 return -ENOMEM; 2219 2218
+2 -4
drivers/nvdimm/btt.c
··· 539 539 struct log_entry log_new; 540 540 u32 i, map_entry, log_oldmap, log_newmap; 541 541 542 - arena->freelist = kzalloc_objs(struct free_entry, arena->nfree, 543 - GFP_KERNEL); 542 + arena->freelist = kzalloc_objs(struct free_entry, arena->nfree); 544 543 if (!arena->freelist) 545 544 return -ENOMEM; 546 545 ··· 732 733 { 733 734 u32 i; 734 735 735 - arena->map_locks = kzalloc_objs(struct aligned_lock, arena->nfree, 736 - GFP_KERNEL); 736 + arena->map_locks = kzalloc_objs(struct aligned_lock, arena->nfree); 737 737 if (!arena->map_locks) 738 738 return -ENOMEM; 739 739
+1 -2
drivers/nvdimm/nd_perf.c
··· 195 195 } 196 196 197 197 /* Allocate memory for cpumask attribute group */ 198 - nvdimm_pmu_cpumask_group = kzalloc_obj(*nvdimm_pmu_cpumask_group, 199 - GFP_KERNEL); 198 + nvdimm_pmu_cpumask_group = kzalloc_obj(*nvdimm_pmu_cpumask_group); 200 199 if (!nvdimm_pmu_cpumask_group) { 201 200 kfree(pmu_events_attr); 202 201 kfree(attrs_group);
+1 -2
drivers/nvdimm/region_devs.c
··· 1005 1005 } 1006 1006 1007 1007 nd_region = 1008 - kzalloc_flex(*nd_region, mapping, ndr_desc->num_mappings, 1009 - GFP_KERNEL); 1008 + kzalloc_flex(*nd_region, mapping, ndr_desc->num_mappings); 1010 1009 1011 1010 if (!nd_region) 1012 1011 return NULL;
+1 -2
drivers/nvme/host/auth.c
··· 1083 1083 if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret) 1084 1084 return 0; 1085 1085 1086 - ctrl->dhchap_ctxs = kvzalloc_objs(*chap, ctrl_max_dhchaps(ctrl), 1087 - GFP_KERNEL); 1086 + ctrl->dhchap_ctxs = kvzalloc_objs(*chap, ctrl_max_dhchaps(ctrl)); 1088 1087 if (!ctrl->dhchap_ctxs) { 1089 1088 ret = -ENOMEM; 1090 1089 goto err_free_dhchap_ctrl_secret;
+1 -2
drivers/nvme/host/rdma.c
··· 2290 2290 ctrl->ctrl.kato = opts->kato; 2291 2291 2292 2292 ret = -ENOMEM; 2293 - ctrl->queues = kzalloc_objs(*ctrl->queues, ctrl->ctrl.queue_count, 2294 - GFP_KERNEL); 2293 + ctrl->queues = kzalloc_objs(*ctrl->queues, ctrl->ctrl.queue_count); 2295 2294 if (!ctrl->queues) 2296 2295 goto out_free_ctrl; 2297 2296
+1 -2
drivers/nvme/host/tcp.c
··· 2949 2949 goto out_free_ctrl; 2950 2950 } 2951 2951 2952 - ctrl->queues = kzalloc_objs(*ctrl->queues, ctrl->ctrl.queue_count, 2953 - GFP_KERNEL); 2952 + ctrl->queues = kzalloc_objs(*ctrl->queues, ctrl->ctrl.queue_count); 2954 2953 if (!ctrl->queues) { 2955 2954 ret = -ENOMEM; 2956 2955 goto out_free_ctrl;
+1 -2
drivers/nvme/target/configfs.c
··· 2026 2026 if (!port) 2027 2027 return ERR_PTR(-ENOMEM); 2028 2028 2029 - port->ana_state = kzalloc_objs(*port->ana_state, NVMET_MAX_ANAGRPS + 1, 2030 - GFP_KERNEL); 2029 + port->ana_state = kzalloc_objs(*port->ana_state, NVMET_MAX_ANAGRPS + 1); 2031 2030 if (!port->ana_state) { 2032 2031 kfree(port); 2033 2032 return ERR_PTR(-ENOMEM);
+2 -4
drivers/nvme/target/core.c
··· 1642 1642 if (!ctrl->changed_ns_list) 1643 1643 goto out_free_ctrl; 1644 1644 1645 - ctrl->sqs = kzalloc_objs(struct nvmet_sq *, subsys->max_qid + 1, 1646 - GFP_KERNEL); 1645 + ctrl->sqs = kzalloc_objs(struct nvmet_sq *, subsys->max_qid + 1); 1647 1646 if (!ctrl->sqs) 1648 1647 goto out_free_changed_ns_list; 1649 1648 1650 - ctrl->cqs = kzalloc_objs(struct nvmet_cq *, subsys->max_qid + 1, 1651 - GFP_KERNEL); 1649 + ctrl->cqs = kzalloc_objs(struct nvmet_cq *, subsys->max_qid + 1); 1652 1650 if (!ctrl->cqs) 1653 1651 goto out_free_sqs; 1654 1652
+1 -2
drivers/nvme/target/fc.c
··· 528 528 struct nvmet_fc_ls_iod *iod; 529 529 int i; 530 530 531 - iod = kzalloc_objs(struct nvmet_fc_ls_iod, NVMET_LS_CTX_COUNT, 532 - GFP_KERNEL); 531 + iod = kzalloc_objs(struct nvmet_fc_ls_iod, NVMET_LS_CTX_COUNT); 533 532 if (!iod) 534 533 return -ENOMEM; 535 534
+1 -2
drivers/nvme/target/loop.c
··· 592 592 ctrl->ctrl.kato = opts->kato; 593 593 ctrl->port = nvme_loop_find_port(&ctrl->ctrl); 594 594 595 - ctrl->queues = kzalloc_objs(*ctrl->queues, opts->nr_io_queues + 1, 596 - GFP_KERNEL); 595 + ctrl->queues = kzalloc_objs(*ctrl->queues, opts->nr_io_queues + 1); 597 596 if (!ctrl->queues) 598 597 goto out_uninit_ctrl; 599 598
+2 -4
drivers/nvme/target/pci-epf.c
··· 1562 1562 { 1563 1563 unsigned int qid; 1564 1564 1565 - ctrl->sq = kzalloc_objs(struct nvmet_pci_epf_queue, ctrl->nr_queues, 1566 - GFP_KERNEL); 1565 + ctrl->sq = kzalloc_objs(struct nvmet_pci_epf_queue, ctrl->nr_queues); 1567 1566 if (!ctrl->sq) 1568 1567 return -ENOMEM; 1569 1568 1570 - ctrl->cq = kzalloc_objs(struct nvmet_pci_epf_queue, ctrl->nr_queues, 1571 - GFP_KERNEL); 1569 + ctrl->cq = kzalloc_objs(struct nvmet_pci_epf_queue, ctrl->nr_queues); 1572 1570 if (!ctrl->cq) { 1573 1571 kfree(ctrl->sq); 1574 1572 ctrl->sq = NULL;
+1 -2
drivers/opp/core.c
··· 2260 2260 if (opp_table->regulators) 2261 2261 return 0; 2262 2262 2263 - opp_table->regulators = kmalloc_objs(*opp_table->regulators, count, 2264 - GFP_KERNEL); 2263 + opp_table->regulators = kmalloc_objs(*opp_table->regulators, count); 2265 2264 if (!opp_table->regulators) 2266 2265 return -ENOMEM; 2267 2266
+1 -2
drivers/opp/of.c
··· 303 303 if (!count) 304 304 return 0; 305 305 306 - opp->required_opps = kzalloc_objs(*opp->required_opps, count, 307 - GFP_KERNEL); 306 + opp->required_opps = kzalloc_objs(*opp->required_opps, count); 308 307 if (!opp->required_opps) 309 308 return -ENOMEM; 310 309
+1 -2
drivers/pci/hotplug/cpqphp_nvram.c
··· 532 532 } 533 533 534 534 while (numpmem--) { 535 - p_mem_node = kmalloc_obj(struct pci_resource, 536 - GFP_KERNEL); 535 + p_mem_node = kmalloc_obj(struct pci_resource); 537 536 538 537 if (!p_mem_node) 539 538 break;
+7 -14
drivers/pci/hotplug/cpqphp_pci.c
··· 768 768 pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length); 769 769 770 770 if ((w_base <= w_length) && (save_command & 0x02)) { 771 - p_mem_node = kmalloc_obj(*p_mem_node, 772 - GFP_KERNEL); 771 + p_mem_node = kmalloc_obj(*p_mem_node); 773 772 if (!p_mem_node) 774 773 return -ENOMEM; 775 774 ··· 799 800 temp_register = base & 0xFFFFFFFE; 800 801 temp_register = (~temp_register) + 1; 801 802 802 - io_node = kmalloc_obj(*io_node, 803 - GFP_KERNEL); 803 + io_node = kmalloc_obj(*io_node); 804 804 if (!io_node) 805 805 return -ENOMEM; 806 806 ··· 816 818 temp_register = base & 0xFFFFFFF0; 817 819 temp_register = (~temp_register) + 1; 818 820 819 - p_mem_node = kmalloc_obj(*p_mem_node, 820 - GFP_KERNEL); 821 + p_mem_node = kmalloc_obj(*p_mem_node); 821 822 if (!p_mem_node) 822 823 return -ENOMEM; 823 824 ··· 832 835 temp_register = base & 0xFFFFFFF0; 833 836 temp_register = (~temp_register) + 1; 834 837 835 - mem_node = kmalloc_obj(*mem_node, 836 - GFP_KERNEL); 838 + mem_node = kmalloc_obj(*mem_node); 837 839 if (!mem_node) 838 840 return -ENOMEM; 839 841 ··· 868 872 temp_register = base & 0xFFFFFFFE; 869 873 temp_register = (~temp_register) + 1; 870 874 871 - io_node = kmalloc_obj(*io_node, 872 - GFP_KERNEL); 875 + io_node = kmalloc_obj(*io_node); 873 876 if (!io_node) 874 877 return -ENOMEM; 875 878 ··· 884 889 temp_register = base & 0xFFFFFFF0; 885 890 temp_register = (~temp_register) + 1; 886 891 887 - p_mem_node = kmalloc_obj(*p_mem_node, 888 - GFP_KERNEL); 892 + p_mem_node = kmalloc_obj(*p_mem_node); 889 893 if (!p_mem_node) 890 894 return -ENOMEM; 891 895 ··· 900 906 temp_register = base & 0xFFFFFFF0; 901 907 temp_register = (~temp_register) + 1; 902 908 903 - mem_node = kmalloc_obj(*mem_node, 904 - GFP_KERNEL); 909 + mem_node = kmalloc_obj(*mem_node); 905 910 if (!mem_node) 906 911 return -ENOMEM; 907 912
+3 -6
drivers/pci/hotplug/ibmphp_ebda.c
··· 352 352 debug("now enter io table ---\n"); 353 353 debug("rio blk id: %x\n", blk_id); 354 354 355 - rio_table_ptr = kzalloc_obj(struct rio_table_hdr, 356 - GFP_KERNEL); 355 + rio_table_ptr = kzalloc_obj(struct rio_table_hdr); 357 356 if (!rio_table_ptr) { 358 357 rc = -ENOMEM; 359 358 goto out; ··· 499 500 list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) { 500 501 opt_rio_lo_ptr = search_opt_lo(rio_detail_ptr->chassis_num); 501 502 if (!opt_rio_lo_ptr) { 502 - opt_rio_lo_ptr = kzalloc_obj(struct opt_rio_lo, 503 - GFP_KERNEL); 503 + opt_rio_lo_ptr = kzalloc_obj(struct opt_rio_lo); 504 504 if (!opt_rio_lo_ptr) 505 505 return -ENOMEM; 506 506 opt_rio_lo_ptr->rio_type = rio_detail_ptr->rio_type; ··· 738 740 739 741 bus_info_ptr2 = ibmphp_find_same_bus_num(slot_ptr->slot_bus_num); 740 742 if (!bus_info_ptr2) { 741 - bus_info_ptr1 = kzalloc_obj(struct bus_info, 742 - GFP_KERNEL); 743 + bus_info_ptr1 = kzalloc_obj(struct bus_info); 743 744 if (!bus_info_ptr1) { 744 745 rc = -ENOMEM; 745 746 goto error_no_slot;
+12 -24
drivers/pci/hotplug/ibmphp_pci.c
··· 152 152 cleanup_count = 6; 153 153 goto error; 154 154 } 155 - newfunc = kzalloc_obj(*newfunc, 156 - GFP_KERNEL); 155 + newfunc = kzalloc_obj(*newfunc); 157 156 if (!newfunc) 158 157 return -ENOMEM; 159 158 ··· 189 190 flag = 0; 190 191 for (i = 0; i < 32; i++) { 191 192 if (func->devices[i]) { 192 - newfunc = kzalloc_obj(*newfunc, 193 - GFP_KERNEL); 193 + newfunc = kzalloc_obj(*newfunc); 194 194 if (!newfunc) 195 195 return -ENOMEM; 196 196 ··· 216 218 } 217 219 } 218 220 219 - newfunc = kzalloc_obj(*newfunc, 220 - GFP_KERNEL); 221 + newfunc = kzalloc_obj(*newfunc); 221 222 if (!newfunc) 222 223 return -ENOMEM; 223 224 ··· 261 264 for (i = 0; i < 32; i++) { 262 265 if (func->devices[i]) { 263 266 debug("inside for loop, device is %x\n", i); 264 - newfunc = kzalloc_obj(*newfunc, 265 - GFP_KERNEL); 267 + newfunc = kzalloc_obj(*newfunc); 266 268 if (!newfunc) 267 269 return -ENOMEM; 268 270 ··· 384 388 385 389 debug("len[count] in IO %x, count %d\n", len[count], count); 386 390 387 - io[count] = kzalloc_obj(struct resource_node, 388 - GFP_KERNEL); 391 + io[count] = kzalloc_obj(struct resource_node); 389 392 390 393 if (!io[count]) 391 394 return -ENOMEM; ··· 421 426 422 427 debug("len[count] in PFMEM %x, count %d\n", len[count], count); 423 428 424 - pfmem[count] = kzalloc_obj(struct resource_node, 425 - GFP_KERNEL); 429 + pfmem[count] = kzalloc_obj(struct resource_node); 426 430 if (!pfmem[count]) 427 431 return -ENOMEM; 428 432 ··· 435 441 ibmphp_add_resource(pfmem[count]); 436 442 func->pfmem[count] = pfmem[count]; 437 443 } else { 438 - mem_tmp = kzalloc_obj(*mem_tmp, 439 - GFP_KERNEL); 444 + mem_tmp = kzalloc_obj(*mem_tmp); 440 445 if (!mem_tmp) { 441 446 kfree(pfmem[count]); 442 447 return -ENOMEM; ··· 485 492 486 493 debug("len[count] in Mem %x, count %d\n", len[count], count); 487 494 488 - mem[count] = kzalloc_obj(struct resource_node, 489 - GFP_KERNEL); 495 + mem[count] = kzalloc_obj(struct resource_node); 490 496 if (!mem[count]) 491 497 return -ENOMEM; 492 498 ··· 648 656 649 657 debug("len[count] in IO = %x\n", len[count]); 650 658 651 - bus_io[count] = kzalloc_obj(struct resource_node, 652 - GFP_KERNEL); 659 + bus_io[count] = kzalloc_obj(struct resource_node); 653 660 654 661 if (!bus_io[count]) { 655 662 retval = -ENOMEM; ··· 680 689 681 690 debug("len[count] in PFMEM = %x\n", len[count]); 682 691 683 - bus_pfmem[count] = kzalloc_obj(struct resource_node, 684 - GFP_KERNEL); 692 + bus_pfmem[count] = kzalloc_obj(struct resource_node); 685 693 if (!bus_pfmem[count]) { 686 694 retval = -ENOMEM; 687 695 goto error; ··· 695 705 ibmphp_add_resource(bus_pfmem[count]); 696 706 func->pfmem[count] = bus_pfmem[count]; 697 707 } else { 698 - mem_tmp = kzalloc_obj(*mem_tmp, 699 - GFP_KERNEL); 708 + mem_tmp = kzalloc_obj(*mem_tmp); 700 709 if (!mem_tmp) { 701 710 retval = -ENOMEM; 702 711 goto error; ··· 735 746 736 747 debug("len[count] in Memory is %x\n", len[count]); 737 748 738 - bus_mem[count] = kzalloc_obj(struct resource_node, 739 - GFP_KERNEL); 749 + bus_mem[count] = kzalloc_obj(struct resource_node); 740 750 if (!bus_mem[count]) { 741 751 retval = -ENOMEM; 742 752 goto error;
+7 -14
drivers/pci/hotplug/ibmphp_res.c
··· 1687 1687 1688 1688 bus_cur->firstPFMemFromMem = pfmem_cur; 1689 1689 1690 - mem = kzalloc_obj(struct resource_node, 1691 - GFP_KERNEL); 1690 + mem = kzalloc_obj(struct resource_node); 1692 1691 if (!mem) 1693 1692 return -ENOMEM; 1694 1693 ··· 1969 1970 end_address |= (upper_io_end << 16); 1970 1971 1971 1972 if ((start_address) && (start_address <= end_address)) { 1972 - range = kzalloc_obj(struct range_node, 1973 - GFP_KERNEL); 1973 + range = kzalloc_obj(struct range_node); 1974 1974 if (!range) 1975 1975 return -ENOMEM; 1976 1976 ··· 1993 1995 fix_resources(bus_sec); 1994 1996 1995 1997 if (ibmphp_find_resource(bus_cur, start_address, &io, IO)) { 1996 - io = kzalloc_obj(struct resource_node, 1997 - GFP_KERNEL); 1998 + io = kzalloc_obj(struct resource_node); 1998 1999 if (!io) { 1999 2000 kfree(range); 2000 2001 return -ENOMEM; ··· 2016 2019 2017 2020 if ((start_address) && (start_address <= end_address)) { 2018 2021 2019 - range = kzalloc_obj(struct range_node, 2020 - GFP_KERNEL); 2022 + range = kzalloc_obj(struct range_node); 2021 2023 if (!range) 2022 2024 return -ENOMEM; 2023 2025 ··· 2041 2045 fix_resources(bus_sec); 2042 2046 2043 2047 if (ibmphp_find_resource(bus_cur, start_address, &mem, MEM)) { 2044 - mem = kzalloc_obj(struct resource_node, 2045 - GFP_KERNEL); 2048 + mem = kzalloc_obj(struct resource_node); 2046 2049 if (!mem) { 2047 2050 kfree(range); 2048 2051 return -ENOMEM; ··· 2068 2073 2069 2074 if ((start_address) && (start_address <= end_address)) { 2070 2075 2071 - range = kzalloc_obj(struct range_node, 2072 - GFP_KERNEL); 2076 + range = kzalloc_obj(struct range_node); 2073 2077 if (!range) 2074 2078 return -ENOMEM; 2075 2079 ··· 2092 2098 2093 2099 fix_resources(bus_sec); 2094 2100 if (ibmphp_find_resource(bus_cur, start_address, &pfmem, PFMEM)) { 2095 - pfmem = kzalloc_obj(struct resource_node, 2096 - GFP_KERNEL); 2101 + pfmem = kzalloc_obj(struct resource_node); 2097 2102 if (!pfmem) { 2098 2103 kfree(range); 2099 2104 return -ENOMEM;
+1 -2
drivers/perf/riscv_pmu_sbi.c
··· 309 309 int i, j, k, result = 0, count = 0; 310 310 struct sbiret ret; 311 311 312 - event_info_shmem = kzalloc_objs(*event_info_shmem, num_events, 313 - GFP_KERNEL); 312 + event_info_shmem = kzalloc_objs(*event_info_shmem, num_events); 314 313 if (!event_info_shmem) 315 314 return -ENOMEM; 316 315
+1 -2
drivers/pinctrl/bcm/pinctrl-bcm2835.c
··· 873 873 maps_per_pin++; 874 874 if (num_pulls) 875 875 maps_per_pin++; 876 - cur_map = maps = kzalloc_objs(*maps, num_pins * maps_per_pin, 877 - GFP_KERNEL); 876 + cur_map = maps = kzalloc_objs(*maps, num_pins * maps_per_pin); 878 877 if (!maps) 879 878 return -ENOMEM; 880 879
+1 -2
drivers/pinctrl/berlin/berlin.c
··· 215 215 } 216 216 217 217 /* we will reallocate later */ 218 - pctrl->functions = kzalloc_objs(*pctrl->functions, max_functions, 219 - GFP_KERNEL); 218 + pctrl->functions = kzalloc_objs(*pctrl->functions, max_functions); 220 219 if (!pctrl->functions) 221 220 return -ENOMEM; 222 221
+1 -2
drivers/pinctrl/pinctrl-apple-gpio.c
··· 400 400 girq->parents = kmalloc_array(girq->num_parents, 401 401 sizeof(*girq->parents), 402 402 GFP_KERNEL); 403 - irq_data = kmalloc_objs(*irq_data, girq->num_parents, 404 - GFP_KERNEL); 403 + irq_data = kmalloc_objs(*irq_data, girq->num_parents); 405 404 if (!girq->parents || !irq_data) { 406 405 ret = -ENOMEM; 407 406 goto out_free_irq_data;
+1 -2
drivers/pinctrl/sunxi/pinctrl-sunxi.c
··· 1328 1328 * special functions per pin, plus one entry for the sentinel. 1329 1329 * We'll reallocate that later anyway. 1330 1330 */ 1331 - pctl->functions = kzalloc_objs(*pctl->functions, 7 * pctl->ngroups + 4, 1332 - GFP_KERNEL); 1331 + pctl->functions = kzalloc_objs(*pctl->functions, 7 * pctl->ngroups + 4); 1333 1332 if (!pctl->functions) 1334 1333 return -ENOMEM; 1335 1334
+1 -2
drivers/pinctrl/vt8500/pinctrl-wmt.c
··· 344 344 if (num_pulls) 345 345 maps_per_pin++; 346 346 347 - cur_map = maps = kzalloc_objs(*maps, num_pins * maps_per_pin, 348 - GFP_KERNEL); 347 + cur_map = maps = kzalloc_objs(*maps, num_pins * maps_per_pin); 349 348 if (!maps) 350 349 return -ENOMEM; 351 350
+1 -2
drivers/platform/chrome/chromeos_laptop.c
··· 807 807 if (!n_peripherals) 808 808 return 0; 809 809 810 - acpi_peripherals = kzalloc_objs(*src->acpi_peripherals, n_peripherals, 811 - GFP_KERNEL); 810 + acpi_peripherals = kzalloc_objs(*src->acpi_peripherals, n_peripherals); 812 811 if (!acpi_peripherals) 813 812 return -ENOMEM; 814 813
+2 -4
drivers/platform/x86/asus-armoury.c
··· 1005 1005 /* Initialize AC power tunables */ 1006 1006 ac_limits = power_data->ac_data; 1007 1007 if (ac_limits) { 1008 - ac_rog_tunables = kzalloc_obj(*asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC], 1009 - GFP_KERNEL); 1008 + ac_rog_tunables = kzalloc_obj(*asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC]); 1010 1009 if (!ac_rog_tunables) 1011 1010 goto err_nomem; 1012 1011 ··· 1052 1053 /* Initialize DC power tunables */ 1053 1054 dc_limits = power_data->dc_data; 1054 1055 if (dc_limits) { 1055 - dc_rog_tunables = kzalloc_obj(*asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC], 1056 - GFP_KERNEL); 1056 + dc_rog_tunables = kzalloc_obj(*asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC]); 1057 1057 if (!dc_rog_tunables) { 1058 1058 kfree(ac_rog_tunables); 1059 1059 goto err_nomem;
+1 -2
drivers/platform/x86/dell/dell-smbios-base.c
··· 500 500 return -ENOMEM; 501 501 502 502 /* need to store both location and value + terminator*/ 503 - token_attrs = kzalloc_objs(*token_attrs, (2 * da_num_tokens) + 1, 504 - GFP_KERNEL); 503 + token_attrs = kzalloc_objs(*token_attrs, (2 * da_num_tokens) + 1); 505 504 if (!token_attrs) 506 505 goto out_allocate_attrs; 507 506
+1 -2
drivers/platform/x86/intel/int3472/tps68470.c
··· 180 180 if (!board_data) 181 181 return dev_err_probe(&client->dev, -ENODEV, "No board-data found for this model\n"); 182 182 183 - cells = kzalloc_objs(*cells, TPS68470_WIN_MFD_CELL_COUNT, 184 - GFP_KERNEL); 183 + cells = kzalloc_objs(*cells, TPS68470_WIN_MFD_CELL_COUNT); 185 184 if (!cells) 186 185 return -ENOMEM; 187 186
+2 -4
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
··· 425 425 { 426 426 int ret; 427 427 428 - isst_cpu_info = kzalloc_objs(*isst_cpu_info, num_possible_cpus(), 429 - GFP_KERNEL); 428 + isst_cpu_info = kzalloc_objs(*isst_cpu_info, num_possible_cpus()); 430 429 if (!isst_cpu_info) 431 430 return -ENOMEM; 432 431 433 - isst_pkg_info = kzalloc_objs(*isst_pkg_info, topology_max_packages(), 434 - GFP_KERNEL); 432 + isst_pkg_info = kzalloc_objs(*isst_pkg_info, topology_max_packages()); 435 433 if (!isst_pkg_info) { 436 434 kfree(isst_cpu_info); 437 435 return -ENOMEM;
+1 -2
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
··· 278 278 279 279 uncore_max_entries = topology_max_packages() * 280 280 topology_max_dies_per_package(); 281 - uncore_instances = kzalloc_objs(*uncore_instances, uncore_max_entries, 282 - GFP_KERNEL); 281 + uncore_instances = kzalloc_objs(*uncore_instances, uncore_max_entries); 283 282 if (!uncore_instances) 284 283 return -ENOMEM; 285 284
+1 -2
drivers/platform/x86/sony-laptop.c
··· 4161 4161 case ACPI_RESOURCE_TYPE_START_DEPENDENT: 4162 4162 { 4163 4163 /* start IO enumeration */ 4164 - struct sony_pic_ioport *ioport = kzalloc_obj(*ioport, 4165 - GFP_KERNEL); 4164 + struct sony_pic_ioport *ioport = kzalloc_obj(*ioport); 4166 4165 if (!ioport) 4167 4166 return AE_ERROR; 4168 4167
+4 -8
drivers/platform/x86/uv_sysfs.c
··· 216 216 u64 sz; 217 217 int i, ret; 218 218 219 - prev_obj_to_cnode = kmalloc_objs(*prev_obj_to_cnode, uv_bios_obj_cnt, 220 - GFP_KERNEL); 219 + prev_obj_to_cnode = kmalloc_objs(*prev_obj_to_cnode, uv_bios_obj_cnt); 221 220 if (!prev_obj_to_cnode) 222 221 return -ENOMEM; 223 222 ··· 397 398 } 398 399 for (j = 0; j < uv_bios_obj_cnt; j++) { 399 400 for (k = 0; k < hub_buf[j].ports; k++) { 400 - uv_hubs[j]->ports[k] = kzalloc_obj(*uv_hubs[j]->ports[k], 401 - GFP_KERNEL); 401 + uv_hubs[j]->ports[k] = kzalloc_obj(*uv_hubs[j]->ports[k]); 402 402 if (!uv_hubs[j]->ports[k]) { 403 403 ret = -ENOMEM; 404 404 k--; ··· 673 675 } 674 676 num_pci_lines = l; 675 677 676 - uv_pci_objs = kzalloc_objs(*uv_pci_objs, num_pci_lines, 677 - GFP_KERNEL); 678 + uv_pci_objs = kzalloc_objs(*uv_pci_objs, num_pci_lines); 678 679 if (!uv_pci_objs) { 679 680 kfree(pci_top_str); 680 681 ret = -ENOMEM; ··· 681 684 } 682 685 start = pci_top_str; 683 686 while ((found = strsep(&start, "\n")) != NULL) { 684 - uv_pci_objs[k] = kzalloc_obj(*uv_pci_objs[k], 685 - GFP_KERNEL); 687 + uv_pci_objs[k] = kzalloc_obj(*uv_pci_objs[k]); 686 688 if (!uv_pci_objs[k]) { 687 689 ret = -ENOMEM; 688 690 goto err_pci_obj;
+1 -2
drivers/platform/x86/x86-android-tablets/core.c
··· 447 447 exit_handler = dev_info->exit; 448 448 } 449 449 450 - i2c_clients = kzalloc_objs(*i2c_clients, dev_info->i2c_client_count, 451 - GFP_KERNEL); 450 + i2c_clients = kzalloc_objs(*i2c_clients, dev_info->i2c_client_count); 452 451 if (!i2c_clients) { 453 452 x86_android_tablet_remove(pdev); 454 453 return -ENOMEM;
+1 -2
drivers/powercap/intel_rapl_common.c
··· 1520 1520 } 1521 1521 pr_debug("found %d domains on %s\n", rp->nr_domains, rp->name); 1522 1522 1523 - rp->domains = kzalloc_objs(struct rapl_domain, rp->nr_domains, 1524 - GFP_KERNEL); 1523 + rp->domains = kzalloc_objs(struct rapl_domain, rp->nr_domains); 1525 1524 if (!rp->domains) 1526 1525 return -ENOMEM; 1527 1526
+1 -2
drivers/regulator/of_regulator.c
··· 973 973 } 974 974 if (num_consumers == 0) 975 975 return 0; 976 - _consumers = kmalloc_objs(struct regulator_bulk_data, num_consumers, 977 - GFP_KERNEL); 976 + _consumers = kmalloc_objs(struct regulator_bulk_data, num_consumers); 978 977 if (!_consumers) 979 978 return -ENOMEM; 980 979 goto restart;
+2 -4
drivers/s390/block/dcssblk.c
··· 240 240 if (dev_info->num_of_segments <= 1) 241 241 return 0; 242 242 243 - sort_list = kzalloc_objs(struct segment_info, dev_info->num_of_segments, 244 - GFP_KERNEL); 243 + sort_list = kzalloc_objs(struct segment_info, dev_info->num_of_segments); 245 244 if (sort_list == NULL) 246 245 return -ENOMEM; 247 246 i = 0; ··· 604 605 * get a struct dcssblk_dev_info 605 606 */ 606 607 if (num_of_segments == 0) { 607 - dev_info = kzalloc_obj(struct dcssblk_dev_info, 608 - GFP_KERNEL); 608 + dev_info = kzalloc_obj(struct dcssblk_dev_info); 609 609 if (dev_info == NULL) { 610 610 rc = -ENOMEM; 611 611 goto out;
+1 -2
drivers/s390/block/scm_blk.c
··· 77 77 if (!scmrq->aob) 78 78 goto free; 79 79 80 - scmrq->request = kzalloc_objs(scmrq->request[0], nr_requests_per_io, 81 - GFP_KERNEL); 80 + scmrq->request = kzalloc_objs(scmrq->request[0], nr_requests_per_io); 82 81 if (!scmrq->request) 83 82 goto free; 84 83
+1 -2
drivers/s390/char/con3270.c
··· 899 899 if (!screen) 900 900 goto out_err; 901 901 for (lines = 0; lines < allocated; lines++) { 902 - screen[lines].cells = kzalloc_objs(struct tty3270_cell, cols, 903 - GFP_KERNEL); 902 + screen[lines].cells = kzalloc_objs(struct tty3270_cell, cols); 904 903 if (!screen[lines].cells) 905 904 goto out_screen; 906 905 }
+1 -2
drivers/s390/cio/css.c
··· 1005 1005 goto out_err; 1006 1006 } 1007 1007 1008 - css->pseudo_subchannel = kzalloc_obj(*css->pseudo_subchannel, 1009 - GFP_KERNEL); 1008 + css->pseudo_subchannel = kzalloc_obj(*css->pseudo_subchannel); 1010 1009 if (!css->pseudo_subchannel) { 1011 1010 device_unregister(&css->device); 1012 1011 ret = -ENOMEM;
+1 -2
drivers/s390/cio/qdio_thinint.c
··· 204 204 { 205 205 int rc; 206 206 207 - q_indicators = kzalloc_objs(struct indicator_t, TIQDIO_NR_INDICATORS, 208 - GFP_KERNEL); 207 + q_indicators = kzalloc_objs(struct indicator_t, TIQDIO_NR_INDICATORS); 209 208 if (!q_indicators) 210 209 return -ENOMEM; 211 210
+1 -2
drivers/s390/cio/vfio_ccw_ops.c
··· 55 55 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); 56 56 INIT_WORK(&private->crw_work, vfio_ccw_crw_todo); 57 57 58 - private->cp.guest_cp = kzalloc_objs(struct ccw1, CCWCHAIN_LEN_MAX, 59 - GFP_KERNEL); 58 + private->cp.guest_cp = kzalloc_objs(struct ccw1, CCWCHAIN_LEN_MAX); 60 59 if (!private->cp.guest_cp) 61 60 goto out_free_private; 62 61
+1 -2
drivers/scsi/53c700.c
··· 2020 2020 STATIC int 2021 2021 NCR_700_sdev_init(struct scsi_device *SDp) 2022 2022 { 2023 - SDp->hostdata = kzalloc_obj(struct NCR_700_Device_Parameters, 2024 - GFP_KERNEL); 2023 + SDp->hostdata = kzalloc_obj(struct NCR_700_Device_Parameters); 2025 2024 2026 2025 if (!SDp->hostdata) 2027 2026 return -ENOMEM;
+1 -2
drivers/scsi/BusLogic.c
··· 2214 2214 if (blogic_probe_options.noprobe) 2215 2215 return -ENODEV; 2216 2216 blogic_probeinfo_list = 2217 - kzalloc_objs(struct blogic_probeinfo, BLOGIC_MAX_ADAPTERS, 2218 - GFP_KERNEL); 2217 + kzalloc_objs(struct blogic_probeinfo, BLOGIC_MAX_ADAPTERS); 2219 2218 if (blogic_probeinfo_list == NULL) { 2220 2219 blogic_err("BusLogic: Unable to allocate Probe Info List\n", 2221 2220 NULL);
+1 -2
drivers/scsi/aacraid/linit.c
··· 1661 1661 if (aac_reset_devices || reset_devices) 1662 1662 aac->init_reset = true; 1663 1663 1664 - aac->fibs = kzalloc_objs(struct fib, shost->can_queue + AAC_NUM_MGT_FIB, 1665 - GFP_KERNEL); 1664 + aac->fibs = kzalloc_objs(struct fib, shost->can_queue + AAC_NUM_MGT_FIB); 1666 1665 if (!aac->fibs) { 1667 1666 error = -ENOMEM; 1668 1667 goto out_free_host;
+3 -6
drivers/scsi/be2iscsi/be_main.c
··· 2491 2491 return -ENOMEM; 2492 2492 } 2493 2493 2494 - mem_arr_orig = kmalloc_objs(*mem_arr_orig, BEISCSI_MAX_FRAGS_INIT, 2495 - GFP_KERNEL); 2494 + mem_arr_orig = kmalloc_objs(*mem_arr_orig, BEISCSI_MAX_FRAGS_INIT); 2496 2495 if (!mem_arr_orig) { 2497 2496 kfree(phba->init_mem); 2498 2497 kfree(phwi_ctrlr->wrb_context); ··· 3359 3360 idx = 0; 3360 3361 mem_descr = phba->init_mem; 3361 3362 mem_descr += HWI_MEM_WRB; 3362 - pwrb_arr = kmalloc_objs(*pwrb_arr, phba->params.cxns_per_ctrl, 3363 - GFP_KERNEL); 3363 + pwrb_arr = kmalloc_objs(*pwrb_arr, phba->params.cxns_per_ctrl); 3364 3364 if (!pwrb_arr) { 3365 3365 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3366 3366 "BM_%d : Memory alloc failed in create wrb ring.\n"); ··· 3998 4000 3999 4001 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4000 4002 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4001 - ptr_cid_info = kzalloc_obj(struct ulp_cid_info, 4002 - GFP_KERNEL); 4003 + ptr_cid_info = kzalloc_obj(struct ulp_cid_info); 4003 4004 4004 4005 if (!ptr_cid_info) { 4005 4006 ret = -ENOMEM;
+1 -2
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 1381 1381 hba->next_conn_id = 0; 1382 1382 1383 1383 hba->tgt_ofld_list = 1384 - kzalloc_objs(struct bnx2fc_rport *, BNX2FC_NUM_MAX_SESS, 1385 - GFP_KERNEL); 1384 + kzalloc_objs(struct bnx2fc_rport *, BNX2FC_NUM_MAX_SESS); 1386 1385 if (!hba->tgt_ofld_list) { 1387 1386 printk(KERN_ERR PFX "Unable to allocate tgt offload list\n"); 1388 1387 goto tgtofld_err;
+1 -2
drivers/scsi/bnx2fc/bnx2fc_io.c
··· 247 247 goto mem_err; 248 248 } 249 249 250 - cmgr->free_list_lock = kzalloc_objs(*cmgr->free_list_lock, arr_sz, 251 - GFP_KERNEL); 250 + cmgr->free_list_lock = kzalloc_objs(*cmgr->free_list_lock, arr_sz); 252 251 if (!cmgr->free_list_lock) { 253 252 printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); 254 253 kfree(cmgr->free_list);
+1 -2
drivers/scsi/csiostor/csio_lnode.c
··· 2029 2029 ln->fcfinfo = pln->fcfinfo; 2030 2030 } else { 2031 2031 /* Another non-root physical lnode (FCF) */ 2032 - ln->fcfinfo = kzalloc_obj(struct csio_fcf_info, 2033 - GFP_KERNEL); 2032 + ln->fcfinfo = kzalloc_obj(struct csio_fcf_info); 2034 2033 if (!ln->fcfinfo) { 2035 2034 csio_ln_err(ln, "Failed to alloc FCF info\n"); 2036 2035 CSIO_INC_STATS(hw, n_err_nomem);
+1 -2
drivers/scsi/elx/efct/efct_hw.c
··· 1287 1287 for (i = 0; i < hw->hw_rq_count; i++) 1288 1288 count += hw->hw_rq[i]->entry_count; 1289 1289 1290 - hw->seq_pool = kmalloc_objs(struct efc_hw_sequence, count, 1291 - GFP_KERNEL); 1290 + hw->seq_pool = kmalloc_objs(struct efc_hw_sequence, count); 1292 1291 if (!hw->seq_pool) 1293 1292 return -ENOMEM; 1294 1293 }
+2 -4
drivers/scsi/esas2r/esas2r_init.c
··· 103 103 static bool alloc_vda_req(struct esas2r_adapter *a, 104 104 struct esas2r_request *rq) 105 105 { 106 - struct esas2r_mem_desc *memdesc = kzalloc_obj(struct esas2r_mem_desc, 107 - GFP_KERNEL); 106 + struct esas2r_mem_desc *memdesc = kzalloc_obj(struct esas2r_mem_desc); 108 107 109 108 if (memdesc == NULL) { 110 109 esas2r_hdebug("could not alloc mem for vda request memdesc\n"); ··· 791 792 } 792 793 793 794 /* allocate the S/G list memory descriptors */ 794 - a->sg_list_mds = kzalloc_objs(struct esas2r_mem_desc, num_sg_lists, 795 - GFP_KERNEL); 795 + a->sg_list_mds = kzalloc_objs(struct esas2r_mem_desc, num_sg_lists); 796 796 797 797 if (a->sg_list_mds == NULL) { 798 798 esas2r_log(ESAS2R_LOG_CRIT,
+1 -2
drivers/scsi/ibmvscsi/ibmvfc.c
··· 6057 6057 int i, j; 6058 6058 int rc = 0; 6059 6059 6060 - channels->scrqs = kzalloc_objs(*channels->scrqs, channels->max_queues, 6061 - GFP_KERNEL); 6060 + channels->scrqs = kzalloc_objs(*channels->scrqs, channels->max_queues); 6062 6061 if (!channels->scrqs) 6063 6062 return -ENOMEM; 6064 6063
+1 -2
drivers/scsi/ipr.c
··· 8861 8861 8862 8862 ioa_cfg->ipr_cmnd_list = kzalloc_objs(struct ipr_cmnd *, 8863 8863 IPR_NUM_CMD_BLKS, GFP_KERNEL); 8864 - ioa_cfg->ipr_cmnd_list_dma = kzalloc_objs(dma_addr_t, IPR_NUM_CMD_BLKS, 8865 - GFP_KERNEL); 8864 + ioa_cfg->ipr_cmnd_list_dma = kzalloc_objs(dma_addr_t, IPR_NUM_CMD_BLKS); 8866 8865 8867 8866 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { 8868 8867 ipr_free_cmd_blks(ioa_cfg);
+2 -4
drivers/scsi/lpfc/lpfc_els.c
··· 12324 12324 12325 12325 if (!vport->qfpa_res) { 12326 12326 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 12327 - vport->qfpa_res = kzalloc_objs(*vport->qfpa_res, max_desc, 12328 - GFP_KERNEL); 12327 + vport->qfpa_res = kzalloc_objs(*vport->qfpa_res, max_desc); 12329 12328 if (!vport->qfpa_res) 12330 12329 goto out; 12331 12330 } ··· 12337 12338 desc = (struct priority_range_desc *)(pcmd + 8); 12338 12339 vmid_range = vport->vmid_priority.vmid_range; 12339 12340 if (!vmid_range) { 12340 - vmid_range = kzalloc_objs(*vmid_range, MAX_PRIORITY_DESC, 12341 - GFP_KERNEL); 12341 + vmid_range = kzalloc_objs(*vmid_range, MAX_PRIORITY_DESC); 12342 12342 if (!vmid_range) { 12343 12343 kfree(vport->qfpa_res); 12344 12344 goto out;
+1 -2
drivers/scsi/lpfc/lpfc_init.c
··· 4635 4635 4636 4636 if (lpfc_is_vmid_enabled(phba)) { 4637 4637 vport->vmid = 4638 - kzalloc_objs(struct lpfc_vmid, phba->cfg_max_vmid, 4639 - GFP_KERNEL); 4638 + kzalloc_objs(struct lpfc_vmid, phba->cfg_max_vmid); 4640 4639 if (!vport->vmid) 4641 4640 return -ENOMEM; 4642 4641
+1 -2
drivers/scsi/lpfc/lpfc_mbox.c
··· 1869 1869 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? 1870 1870 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; 1871 1871 /* Allocate record for keeping SGE virtual addresses */ 1872 - mbox->sge_array = kzalloc_obj(struct lpfc_mbx_nembed_sge_virt, 1873 - GFP_KERNEL); 1872 + mbox->sge_array = kzalloc_obj(struct lpfc_mbx_nembed_sge_virt); 1874 1873 if (!mbox->sge_array) { 1875 1874 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1876 1875 "2527 Failed to allocate non-embedded SGE "
+1 -2
drivers/scsi/lpfc/lpfc_mem.c
··· 118 118 if (!phba->lpfc_mbuf_pool) 119 119 goto fail; 120 120 121 - pool->elements = kmalloc_objs(struct lpfc_dmabuf, LPFC_MBUF_POOL_SIZE, 122 - GFP_KERNEL); 121 + pool->elements = kmalloc_objs(struct lpfc_dmabuf, LPFC_MBUF_POOL_SIZE); 123 122 if (!pool->elements) 124 123 goto fail_free_lpfc_mbuf_pool; 125 124
+1 -2
drivers/scsi/lpfc/lpfc_sli.c
··· 8046 8046 int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor, 8047 8047 u32 entries) 8048 8048 { 8049 - rx_monitor->ring = kmalloc_objs(struct rx_info_entry, entries, 8050 - GFP_KERNEL); 8049 + rx_monitor->ring = kmalloc_objs(struct rx_info_entry, entries); 8051 8050 if (!rx_monitor->ring) 8052 8051 return -ENOMEM; 8053 8052
+1 -2
drivers/scsi/lpfc/lpfc_vport.c
··· 787 787 struct lpfc_vport *port_iterator; 788 788 struct lpfc_vport **vports; 789 789 int index = 0; 790 - vports = kzalloc_objs(struct lpfc_vport *, phba->max_vports + 1, 791 - GFP_KERNEL); 790 + vports = kzalloc_objs(struct lpfc_vport *, phba->max_vports + 1); 792 791 if (vports == NULL) 793 792 return NULL; 794 793 spin_lock_irq(&phba->port_list_lock);
+2 -4
drivers/scsi/megaraid/megaraid_mm.c
··· 932 932 * Allocate single blocks of memory for all required kiocs, 933 933 * mailboxes and passthru structures. 934 934 */ 935 - adapter->kioc_list = kmalloc_objs(uioc_t, lld_adp->max_kioc, 936 - GFP_KERNEL); 937 - adapter->mbox_list = kmalloc_objs(mbox64_t, lld_adp->max_kioc, 938 - GFP_KERNEL); 935 + adapter->kioc_list = kmalloc_objs(uioc_t, lld_adp->max_kioc); 936 + adapter->mbox_list = kmalloc_objs(mbox64_t, lld_adp->max_kioc); 939 937 adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool", 940 938 &adapter->pdev->dev, 941 939 sizeof(mraid_passthru_t),
+2 -4
drivers/scsi/megaraid/megaraid_sas_base.c
··· 4467 4467 * Allocate the dynamic array first and then allocate individual 4468 4468 * commands. 4469 4469 */ 4470 - instance->cmd_list = kzalloc_objs(struct megasas_cmd *, max_cmd, 4471 - GFP_KERNEL); 4470 + instance->cmd_list = kzalloc_objs(struct megasas_cmd *, max_cmd); 4472 4471 4473 4472 if (!instance->cmd_list) { 4474 4473 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); ··· 4475 4476 } 4476 4477 4477 4478 for (i = 0; i < max_cmd; i++) { 4478 - instance->cmd_list[i] = kmalloc_obj(struct megasas_cmd, 4479 - GFP_KERNEL); 4479 + instance->cmd_list[i] = kmalloc_obj(struct megasas_cmd); 4480 4480 4481 4481 if (!instance->cmd_list[i]) { 4482 4482
+2 -4
drivers/scsi/megaraid/megaraid_sas_fusion.c
··· 598 598 * commands. 599 599 */ 600 600 fusion->cmd_list = 601 - kzalloc_objs(struct megasas_cmd_fusion *, max_mpt_cmd, 602 - GFP_KERNEL); 601 + kzalloc_objs(struct megasas_cmd_fusion *, max_mpt_cmd); 603 602 if (!fusion->cmd_list) { 604 603 dev_err(&instance->pdev->dev, 605 604 "Failed from %s %d\n", __func__, __LINE__); ··· 606 607 } 607 608 608 609 for (i = 0; i < max_mpt_cmd; i++) { 609 - fusion->cmd_list[i] = kzalloc_obj(struct megasas_cmd_fusion, 610 - GFP_KERNEL); 610 + fusion->cmd_list[i] = kzalloc_obj(struct megasas_cmd_fusion); 611 611 if (!fusion->cmd_list[i]) { 612 612 for (j = 0; j < i; j++) 613 613 kfree(fusion->cmd_list[j]);
+1 -2
drivers/scsi/mpi3mr/mpi3mr_fw.c
··· 2464 2464 num_queues); 2465 2465 2466 2466 if (!mrioc->req_qinfo) { 2467 - mrioc->req_qinfo = kzalloc_objs(struct op_req_qinfo, num_queues, 2468 - GFP_KERNEL); 2467 + mrioc->req_qinfo = kzalloc_objs(struct op_req_qinfo, num_queues); 2469 2468 if (!mrioc->req_qinfo) { 2470 2469 retval = -1; 2471 2470 goto out_failed;
+1 -2
drivers/scsi/mpi3mr/mpi3mr_transport.c
··· 1220 1220 mrioc->sas_hba.host_node = 1; 1221 1221 INIT_LIST_HEAD(&mrioc->sas_hba.sas_port_list); 1222 1222 mrioc->sas_hba.parent_dev = &mrioc->shost->shost_gendev; 1223 - mrioc->sas_hba.phy = kzalloc_objs(struct mpi3mr_sas_phy, num_phys, 1224 - GFP_KERNEL); 1223 + mrioc->sas_hba.phy = kzalloc_objs(struct mpi3mr_sas_phy, num_phys); 1225 1224 if (!mrioc->sas_hba.phy) 1226 1225 return; 1227 1226
+1 -2
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 6218 6218 sizeof(Mpi2DefaultReplyDescriptor_t); 6219 6219 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; 6220 6220 6221 - ioc->reply_post = kzalloc_objs(struct reply_post_struct, count, 6222 - GFP_KERNEL); 6221 + ioc->reply_post = kzalloc_objs(struct reply_post_struct, count); 6223 6222 if (!ioc->reply_post) 6224 6223 return -ENOMEM; 6225 6224 /*
+2 -4
drivers/scsi/mpt3sas/mpt3sas_ctl.c
··· 3881 3881 rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); 3882 3882 3883 3883 if (ioc->supports_trigger_pages) { 3884 - master_tg = kzalloc_obj(struct SL_WH_MASTER_TRIGGER_T, 3885 - GFP_KERNEL); 3884 + master_tg = kzalloc_obj(struct SL_WH_MASTER_TRIGGER_T); 3886 3885 if (!master_tg) 3887 3886 return -ENOMEM; 3888 3887 ··· 3955 3956 3956 3957 sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); 3957 3958 if (ioc->supports_trigger_pages) { 3958 - event_tg = kzalloc_obj(struct SL_WH_EVENT_TRIGGERS_T, 3959 - GFP_KERNEL); 3959 + event_tg = kzalloc_obj(struct SL_WH_EVENT_TRIGGERS_T); 3960 3960 if (!event_tg) 3961 3961 return -ENOMEM; 3962 3962
+2 -4
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 6376 6376 port_id = sas_iounit_pg0->PhyData[i].Port; 6377 6377 mport = mpt3sas_get_port_by_id(ioc, port_id, 1); 6378 6378 if (!mport) { 6379 - mport = kzalloc_obj(struct hba_port, 6380 - GFP_KERNEL); 6379 + mport = kzalloc_obj(struct hba_port); 6381 6380 if (!mport) 6382 6381 break; 6383 6382 mport->port_id = port_id; ··· 6746 6747 } 6747 6748 ioc->sas_hba.num_phys = num_phys; 6748 6749 6749 - port_table = kzalloc_objs(struct hba_port, ioc->sas_hba.num_phys, 6750 - GFP_KERNEL); 6750 + port_table = kzalloc_objs(struct hba_port, ioc->sas_hba.num_phys); 6751 6751 if (!port_table) 6752 6752 return; 6753 6753
+1 -2
drivers/scsi/mvumi.c
··· 1572 1572 found = mvumi_match_devices(mhba, id, wwid); 1573 1573 if (!found) { 1574 1574 mvumi_remove_devices(mhba, id); 1575 - mv_dev = kzalloc_obj(struct mvumi_device, 1576 - GFP_KERNEL); 1575 + mv_dev = kzalloc_obj(struct mvumi_device); 1577 1576 if (!mv_dev) { 1578 1577 dev_err(&mhba->pdev->dev, 1579 1578 "%s alloc mv_dev failed\n",
+2 -4
drivers/scsi/qedf/qedf_io.c
··· 230 230 } 231 231 232 232 /* Allocate task parameters to pass to f/w init funcions */ 233 - io_req->task_params = kzalloc_obj(*io_req->task_params, 234 - GFP_KERNEL); 233 + io_req->task_params = kzalloc_obj(*io_req->task_params); 235 234 if (!io_req->task_params) { 236 235 QEDF_ERR(&(qedf->dbg_ctx), 237 236 "Failed to allocate task_params for xid=0x%x\n", ··· 242 243 * Allocate scatter/gather list info to pass to f/w init 243 244 * functions. 244 245 */ 245 - io_req->sgl_task_params = kzalloc_obj(struct scsi_sgl_task_params, 246 - GFP_KERNEL); 246 + io_req->sgl_task_params = kzalloc_obj(struct scsi_sgl_task_params); 247 247 if (!io_req->sgl_task_params) { 248 248 QEDF_ERR(&(qedf->dbg_ctx), 249 249 "Failed to allocate sgl_task_params for xid=0x%x\n",
+1 -2
drivers/scsi/qedf/qedf_main.c
··· 3082 3082 3083 3083 /* Allocate a CQ and an associated PBL for each MSI-X vector */ 3084 3084 for (i = 0; i < qedf->num_queues; i++) { 3085 - qedf->global_queues[i] = kzalloc_obj(struct global_queue, 3086 - GFP_KERNEL); 3085 + qedf->global_queues[i] = kzalloc_obj(struct global_queue); 3087 3086 if (!qedf->global_queues[i]) { 3088 3087 QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate " 3089 3088 "global queue %d.\n", i);
+1 -2
drivers/scsi/qedi/qedi_main.c
··· 1668 1668 */ 1669 1669 for (i = 0; i < qedi->num_queues; i++) { 1670 1670 qedi->global_queues[i] = 1671 - kzalloc_obj(*qedi->global_queues[0], 1672 - GFP_KERNEL); 1671 + kzalloc_obj(*qedi->global_queues[0]); 1673 1672 if (!qedi->global_queues[i]) { 1674 1673 QEDI_ERR(&qedi->dbg_ctx, 1675 1674 "Unable to allocation global queue %d.\n", i);
+2 -4
drivers/scsi/qla2xxx/qla_init.c
··· 4033 4033 req->num_outstanding_cmds = ha->cur_fw_iocb_count; 4034 4034 } 4035 4035 4036 - req->outstanding_cmds = kzalloc_objs(srb_t *, req->num_outstanding_cmds, 4037 - GFP_KERNEL); 4036 + req->outstanding_cmds = kzalloc_objs(srb_t *, req->num_outstanding_cmds); 4038 4037 4039 4038 if (!req->outstanding_cmds) { 4040 4039 /* ··· 6492 6493 6493 6494 /* Try GID_PT to get device list, else GAN. */ 6494 6495 if (!ha->swl) 6495 - ha->swl = kzalloc_objs(sw_info_t, ha->max_fibre_devices, 6496 - GFP_KERNEL); 6496 + ha->swl = kzalloc_objs(sw_info_t, ha->max_fibre_devices); 6497 6497 swl = ha->swl; 6498 6498 if (!swl) { 6499 6499 /*EMPTY*/
+1 -2
drivers/scsi/qla2xxx/qla_inline.h
··· 621 621 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 622 622 623 623 if (!ha->qp_cpu_map) { 624 - ha->qp_cpu_map = kzalloc_objs(struct qla_qpair *, NR_CPUS, 625 - GFP_KERNEL); 624 + ha->qp_cpu_map = kzalloc_objs(struct qla_qpair *, NR_CPUS); 626 625 if (!ha->qp_cpu_map) { 627 626 ql_log(ql_log_fatal, vha, 0x0180, 628 627 "Unable to allocate memory for qp_cpu_map ptrs.\n");
+1 -2
drivers/scsi/qla2xxx/qla_isr.c
··· 4562 4562 } 4563 4563 } 4564 4564 vha->irq_offset = desc.pre_vectors; 4565 - ha->msix_entries = kzalloc_objs(struct qla_msix_entry, ha->msix_count, 4566 - GFP_KERNEL); 4565 + ha->msix_entries = kzalloc_objs(struct qla_msix_entry, ha->msix_count); 4567 4566 if (!ha->msix_entries) { 4568 4567 ql_log(ql_log_fatal, vha, 0x00c8, 4569 4568 "Failed to allocate memory for ha->msix_entries.\n");
+1 -2
drivers/scsi/qla2xxx/qla_mid.c
··· 1102 1102 return -ENOMEM; 1103 1103 } 1104 1104 sz = qp->req->length * sizeof(dma_addr_t); 1105 - qp->buf_pool.dma_array = kzalloc_objs(dma_addr_t, qp->req->length, 1106 - GFP_KERNEL); 1105 + qp->buf_pool.dma_array = kzalloc_objs(dma_addr_t, qp->req->length); 1107 1106 if (!qp->buf_pool.dma_array) { 1108 1107 ql_log(ql_log_warn, vha, 0x0186, 1109 1108 "Failed to allocate dma_array(%d).\n", sz);
+3 -6
drivers/scsi/qla2xxx/qla_os.c
··· 438 438 { 439 439 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 440 440 441 - ha->req_q_map = kzalloc_objs(struct req_que *, ha->max_req_queues, 442 - GFP_KERNEL); 441 + ha->req_q_map = kzalloc_objs(struct req_que *, ha->max_req_queues); 443 442 if (!ha->req_q_map) { 444 443 ql_log(ql_log_fatal, vha, 0x003b, 445 444 "Unable to allocate memory for request queue ptrs.\n"); 446 445 goto fail_req_map; 447 446 } 448 447 449 - ha->rsp_q_map = kzalloc_objs(struct rsp_que *, ha->max_rsp_queues, 450 - GFP_KERNEL); 448 + ha->rsp_q_map = kzalloc_objs(struct rsp_que *, ha->max_rsp_queues); 451 449 if (!ha->rsp_q_map) { 452 450 ql_log(ql_log_fatal, vha, 0x003c, 453 451 "Unable to allocate memory for response queue ptrs.\n"); ··· 4420 4422 INIT_LIST_HEAD(&ha->vp_list); 4421 4423 4422 4424 /* Allocate memory for our loop_id bitmap */ 4423 - ha->loop_id_map = kzalloc_objs(long, BITS_TO_LONGS(LOOPID_MAP_SIZE), 4424 - GFP_KERNEL); 4425 + ha->loop_id_map = kzalloc_objs(long, BITS_TO_LONGS(LOOPID_MAP_SIZE)); 4425 4426 if (!ha->loop_id_map) 4426 4427 goto fail_loop_id_map; 4427 4428 else {
+1 -2
drivers/scsi/qla2xxx/qla_target.c
··· 7465 7465 return -ENOMEM; 7466 7466 } 7467 7467 7468 - tgt->qphints = kzalloc_objs(struct qla_qpair_hint, ha->max_qpairs + 1, 7469 - GFP_KERNEL); 7468 + tgt->qphints = kzalloc_objs(struct qla_qpair_hint, ha->max_qpairs + 1); 7470 7469 if (!tgt->qphints) { 7471 7470 kfree(tgt); 7472 7471 ql_log(ql_log_warn, base_vha, 0x0197,
+2 -4
drivers/scsi/scsi_debug.c
··· 6504 6504 devip->max_open = sdeb_zbc_max_open; 6505 6505 } 6506 6506 6507 - devip->zstate = kzalloc_objs(struct sdeb_zone_state, devip->nr_zones, 6508 - GFP_KERNEL); 6507 + devip->zstate = kzalloc_objs(struct sdeb_zone_state, devip->nr_zones); 6509 6508 if (!devip->zstate) 6510 6509 return -ENOMEM; 6511 6510 ··· 6648 6649 if (sdebug_ptype == TYPE_TAPE) { 6649 6650 if (!devip->tape_blocks[0]) { 6650 6651 devip->tape_blocks[0] = 6651 - kzalloc_objs(struct tape_block, TAPE_UNITS, 6652 - GFP_KERNEL); 6652 + kzalloc_objs(struct tape_block, TAPE_UNITS); 6653 6653 if (!devip->tape_blocks[0]) 6654 6654 return 1; 6655 6655 }
+1 -2
drivers/scsi/ses.c
··· 799 799 } 800 800 page2_not_supported: 801 801 if (components > 0) { 802 - scomp = kzalloc_objs(struct ses_component, components, 803 - GFP_KERNEL); 802 + scomp = kzalloc_objs(struct ses_component, components); 804 803 if (!scomp) 805 804 goto err_free; 806 805 }
+1 -2
drivers/scsi/smartpqi/smartpqi_init.c
··· 2472 2472 2473 2473 num_new_devices = num_physicals + num_logicals; 2474 2474 2475 - new_device_list = kmalloc_objs(*new_device_list, num_new_devices, 2476 - GFP_KERNEL); 2475 + new_device_list = kmalloc_objs(*new_device_list, num_new_devices); 2477 2476 if (!new_device_list) { 2478 2477 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 2479 2478 rc = -ENOMEM;
+1 -2
drivers/scsi/vmw_pvscsi.c
··· 1478 1478 */ 1479 1479 pvscsi_setup_all_rings(adapter); 1480 1480 1481 - adapter->cmd_map = kzalloc_objs(struct pvscsi_ctx, adapter->req_depth, 1482 - GFP_KERNEL); 1481 + adapter->cmd_map = kzalloc_objs(struct pvscsi_ctx, adapter->req_depth); 1483 1482 if (!adapter->cmd_map) { 1484 1483 printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n"); 1485 1484 error = -ENOMEM;
+1 -2
drivers/soc/qcom/pdr_interface.c
··· 396 396 int domains_read = 0; 397 397 int ret, i; 398 398 399 - struct servreg_get_domain_list_resp *resp __free(kfree) = kzalloc_obj(*resp, 400 - GFP_KERNEL); 399 + struct servreg_get_domain_list_resp *resp __free(kfree) = kzalloc_obj(*resp); 401 400 if (!resp) 402 401 return -ENOMEM; 403 402
+1 -2
drivers/soc/renesas/rz-sysc.c
··· 110 110 struct rz_sysc *sysc; 111 111 int ret; 112 112 113 - struct regmap_config *regmap_cfg __free(kfree) = kzalloc_obj(*regmap_cfg, 114 - GFP_KERNEL); 113 + struct regmap_config *regmap_cfg __free(kfree) = kzalloc_obj(*regmap_cfg); 115 114 if (!regmap_cfg) 116 115 return -ENOMEM; 117 116
+2 -4
drivers/soundwire/amd_init.c
··· 104 104 105 105 ctx->count = count; 106 106 ctx->link_mask = res->link_mask; 107 - struct resource *sdw_res __free(kfree) = kzalloc_obj(*sdw_res, 108 - GFP_KERNEL); 107 + struct resource *sdw_res __free(kfree) = kzalloc_obj(*sdw_res); 109 108 if (!sdw_res) { 110 109 kfree(ctx); 111 110 return NULL; ··· 204 205 num_slaves++; 205 206 } 206 207 207 - ctx->peripherals = kmalloc_flex(*ctx->peripherals, array, num_slaves, 208 - GFP_KERNEL); 208 + ctx->peripherals = kmalloc_flex(*ctx->peripherals, array, num_slaves); 209 209 if (!ctx->peripherals) 210 210 return -ENOMEM; 211 211 ctx->peripherals->num_peripherals = num_slaves;
+1 -2
drivers/soundwire/amd_manager.c
··· 718 718 sconfig.bps = snd_pcm_format_width(params_format(params)); 719 719 720 720 /* Port configuration */ 721 - struct sdw_port_config *pconfig __free(kfree) = kzalloc_obj(*pconfig, 722 - GFP_KERNEL); 721 + struct sdw_port_config *pconfig __free(kfree) = kzalloc_obj(*pconfig); 723 722 if (!pconfig) 724 723 return -ENOMEM; 725 724
+1 -2
drivers/soundwire/intel.c
··· 769 769 sconfig.bps = snd_pcm_format_width(params_format(params)); 770 770 771 771 /* Port configuration */ 772 - struct sdw_port_config *pconfig __free(kfree) = kzalloc_obj(*pconfig, 773 - GFP_KERNEL); 772 + struct sdw_port_config *pconfig __free(kfree) = kzalloc_obj(*pconfig); 774 773 if (!pconfig) 775 774 return -ENOMEM; 776 775
+1 -2
drivers/soundwire/intel_ace2x.c
··· 747 747 sconfig.bps = snd_pcm_format_width(params_format(params)); 748 748 749 749 /* Port configuration */ 750 - struct sdw_port_config *pconfig __free(kfree) = kzalloc_obj(*pconfig, 751 - GFP_KERNEL); 750 + struct sdw_port_config *pconfig __free(kfree) = kzalloc_obj(*pconfig); 752 751 if (!pconfig) 753 752 return -ENOMEM; 754 753
+1 -2
drivers/soundwire/intel_init.c
··· 253 253 num_slaves++; 254 254 } 255 255 256 - ctx->peripherals = kmalloc_flex(*ctx->peripherals, array, num_slaves, 257 - GFP_KERNEL); 256 + ctx->peripherals = kmalloc_flex(*ctx->peripherals, array, num_slaves); 258 257 if (!ctx->peripherals) 259 258 goto err; 260 259 ctx->peripherals->num_peripherals = num_slaves;
+1 -2
drivers/spi/spi-bcm-qspi.c
··· 1565 1565 return PTR_ERR(qspi->base[CHIP_SELECT]); 1566 1566 } 1567 1567 1568 - qspi->dev_ids = kzalloc_objs(struct bcm_qspi_dev_id, num_irqs, 1569 - GFP_KERNEL); 1568 + qspi->dev_ids = kzalloc_objs(struct bcm_qspi_dev_id, num_irqs); 1570 1569 if (!qspi->dev_ids) 1571 1570 return -ENOMEM; 1572 1571
+1 -2
drivers/spi/spi-mpc52xx.c
··· 443 443 ms->gpio_cs_count = gpiod_count(&op->dev, NULL); 444 444 if (ms->gpio_cs_count > 0) { 445 445 host->num_chipselect = ms->gpio_cs_count; 446 - ms->gpio_cs = kmalloc_objs(*ms->gpio_cs, ms->gpio_cs_count, 447 - GFP_KERNEL); 446 + ms->gpio_cs = kmalloc_objs(*ms->gpio_cs, ms->gpio_cs_count); 448 447 if (!ms->gpio_cs) { 449 448 rc = -ENOMEM; 450 449 goto err_alloc_gpio;
+1 -2
drivers/spi/spi-virtio.c
··· 158 158 unsigned int incnt = 0; 159 159 int ret; 160 160 161 - struct virtio_spi_req *spi_req __free(kfree) = kzalloc_obj(*spi_req, 162 - GFP_KERNEL); 161 + struct virtio_spi_req *spi_req __free(kfree) = kzalloc_obj(*spi_req); 163 162 if (!spi_req) 164 163 return -ENOMEM; 165 164
+3 -6
drivers/staging/greybus/light.c
··· 275 275 channel->attr_group = kzalloc_obj(*channel->attr_group); 276 276 if (!channel->attr_group) 277 277 return -ENOMEM; 278 - channel->attr_groups = kzalloc_objs(*channel->attr_groups, 2, 279 - GFP_KERNEL); 278 + channel->attr_groups = kzalloc_objs(*channel->attr_groups, 2); 280 279 if (!channel->attr_groups) 281 280 return -ENOMEM; 282 281 ··· 1010 1011 light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL); 1011 1012 if (!light->name) 1012 1013 return -ENOMEM; 1013 - light->channels = kzalloc_objs(struct gb_channel, conf.channel_count, 1014 - GFP_KERNEL); 1014 + light->channels = kzalloc_objs(struct gb_channel, conf.channel_count); 1015 1015 if (!light->channels) 1016 1016 return -ENOMEM; 1017 1017 /* ··· 1151 1153 if (ret < 0) 1152 1154 goto out; 1153 1155 1154 - glights->lights = kzalloc_objs(struct gb_light, glights->lights_count, 1155 - GFP_KERNEL); 1156 + glights->lights = kzalloc_objs(struct gb_light, glights->lights_count); 1156 1157 if (!glights->lights) { 1157 1158 ret = -ENOMEM; 1158 1159 goto out;
+1 -2
drivers/staging/greybus/power_supply.c
··· 545 545 } 546 546 } 547 547 548 - gbpsy->props = kzalloc_objs(*gbpsy->props, gbpsy->properties_count, 549 - GFP_KERNEL); 548 + gbpsy->props = kzalloc_objs(*gbpsy->props, gbpsy->properties_count); 550 549 if (!gbpsy->props) { 551 550 ret = -ENOMEM; 552 551 goto out_put_operation;
+3 -6
drivers/staging/media/atomisp/pci/atomisp_ioctl.c
··· 696 696 ATOMISP_S3A_BUF_QUEUE_DEPTH_FOR_HAL; 697 697 dev_dbg(isp->dev, "allocating %d 3a buffers\n", count); 698 698 while (count--) { 699 - s3a_buf = kzalloc_obj(struct atomisp_s3a_buf, 700 - GFP_KERNEL); 699 + s3a_buf = kzalloc_obj(struct atomisp_s3a_buf); 701 700 if (!s3a_buf) 702 701 goto error; 703 702 ··· 715 716 count = ATOMISP_CSS_Q_DEPTH + 1; 716 717 dev_dbg(isp->dev, "allocating %d dis buffers\n", count); 717 718 while (count--) { 718 - dis_buf = kzalloc_obj(struct atomisp_dis_buf, 719 - GFP_KERNEL); 719 + dis_buf = kzalloc_obj(struct atomisp_dis_buf); 720 720 if (!dis_buf) 721 721 goto error; 722 722 if (atomisp_css_allocate_stat_buffers( ··· 737 739 dev_dbg(isp->dev, "allocating %d metadata buffers for type %d\n", 738 740 count, i); 739 741 while (count--) { 740 - md_buf = kzalloc_obj(struct atomisp_metadata_buf, 741 - GFP_KERNEL); 742 + md_buf = kzalloc_obj(struct atomisp_metadata_buf); 742 743 if (!md_buf) 743 744 goto error; 744 745
+2 -4
drivers/staging/media/atomisp/pci/sh_css.c
··· 6252 6252 6253 6253 mycs->num_vf_pp = 1; 6254 6254 } 6255 - mycs->vf_pp_binary = kzalloc_objs(struct ia_css_binary, mycs->num_vf_pp, 6256 - GFP_KERNEL); 6255 + mycs->vf_pp_binary = kzalloc_objs(struct ia_css_binary, mycs->num_vf_pp); 6257 6256 if (!mycs->vf_pp_binary) { 6258 6257 err = -ENOMEM; 6259 6258 goto ERR; ··· 7920 7921 7921 7922 /* allocate pipes */ 7922 7923 curr_stream->num_pipes = num_pipes; 7923 - curr_stream->pipes = kzalloc_objs(struct ia_css_pipe *, num_pipes, 7924 - GFP_KERNEL); 7924 + curr_stream->pipes = kzalloc_objs(struct ia_css_pipe *, num_pipes); 7925 7925 if (!curr_stream->pipes) { 7926 7926 curr_stream->num_pipes = 0; 7927 7927 kfree(curr_stream);
+1 -2
drivers/staging/media/atomisp/pci/sh_css_firmware.c
··· 262 262 sh_css_blob_info = NULL; 263 263 } 264 264 265 - fw_minibuffer = kzalloc_objs(struct fw_param, sh_css_num_binaries, 266 - GFP_KERNEL); 265 + fw_minibuffer = kzalloc_objs(struct fw_param, sh_css_num_binaries); 267 266 if (!fw_minibuffer) 268 267 return -ENOMEM; 269 268
+1 -2
drivers/staging/media/atomisp/pci/sh_css_params.c
··· 4464 4464 if (!params || !params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]) 4465 4465 goto err; 4466 4466 4467 - dvs_config = kvzalloc_objs(struct ia_css_dvs_6axis_config, 1, 4468 - GFP_KERNEL); 4467 + dvs_config = kvzalloc_objs(struct ia_css_dvs_6axis_config, 1); 4469 4468 if (!dvs_config) 4470 4469 goto err; 4471 4470
+1 -2
drivers/staging/vme_user/vme_tsi148.c
··· 2332 2332 if (err_chk) { 2333 2333 master_num--; 2334 2334 2335 - tsi148_device->flush_image = kmalloc_obj(*tsi148_device->flush_image, 2336 - GFP_KERNEL); 2335 + tsi148_device->flush_image = kmalloc_obj(*tsi148_device->flush_image); 2337 2336 if (!tsi148_device->flush_image) { 2338 2337 retval = -ENOMEM; 2339 2338 goto err_master;
+1 -2
drivers/target/target_core_iblock.c
··· 66 66 } 67 67 ib_dev->ibd_exclusive = true; 68 68 69 - ib_dev->ibd_plug = kzalloc_objs(*ib_dev->ibd_plug, nr_cpu_ids, 70 - GFP_KERNEL); 69 + ib_dev->ibd_plug = kzalloc_objs(*ib_dev->ibd_plug, nr_cpu_ids); 71 70 if (!ib_dev->ibd_plug) 72 71 goto free_dev; 73 72
+1 -2
drivers/target/target_core_pr.c
··· 1712 1712 * the dest_node_acl and dest_se_deve pointers for the 1713 1713 * loop below. 1714 1714 */ 1715 - tidh_new = kzalloc_obj(struct pr_transport_id_holder, 1716 - GFP_KERNEL); 1715 + tidh_new = kzalloc_obj(struct pr_transport_id_holder); 1717 1716 if (!tidh_new) { 1718 1717 pr_err("Unable to allocate tidh_new\n"); 1719 1718 core_scsi3_lunacl_undepend_item(dest_se_deve);
+1 -2
drivers/tee/qcomtee/call.c
··· 563 563 564 564 static int qcomtee_open(struct tee_context *ctx) 565 565 { 566 - struct qcomtee_context_data *ctxdata __free(kfree) = kzalloc_obj(*ctxdata, 567 - GFP_KERNEL); 566 + struct qcomtee_context_data *ctxdata __free(kfree) = kzalloc_obj(*ctxdata); 568 567 if (!ctxdata) 569 568 return -ENOMEM; 570 569
+1 -2
drivers/tee/qcomtee/mem_obj.c
··· 91 91 struct tee_shm *shm; 92 92 int err; 93 93 94 - struct qcomtee_mem_object *mem_object __free(kfree) = kzalloc_obj(*mem_object, 95 - GFP_KERNEL); 94 + struct qcomtee_mem_object *mem_object __free(kfree) = kzalloc_obj(*mem_object); 96 95 if (!mem_object) 97 96 return -ENOMEM; 98 97
+3 -6
drivers/tee/tee_core.c
··· 560 560 return -EINVAL; 561 561 562 562 if (arg.num_params) { 563 - params = kzalloc_objs(struct tee_param, arg.num_params, 564 - GFP_KERNEL); 563 + params = kzalloc_objs(struct tee_param, arg.num_params); 565 564 if (!params) 566 565 return -ENOMEM; 567 566 uparams = uarg->params; ··· 637 638 return -EINVAL; 638 639 639 640 if (arg.num_params) { 640 - params = kzalloc_objs(struct tee_param, arg.num_params, 641 - GFP_KERNEL); 641 + params = kzalloc_objs(struct tee_param, arg.num_params); 642 642 if (!params) 643 643 return -ENOMEM; 644 644 uparams = uarg->params; ··· 697 699 return -EINVAL; 698 700 699 701 if (arg.num_params) { 700 - params = kzalloc_objs(struct tee_param, arg.num_params, 701 - GFP_KERNEL); 702 + params = kzalloc_objs(struct tee_param, arg.num_params); 702 703 if (!params) 703 704 return -ENOMEM; 704 705 uparams = uarg->params;
+1 -2
drivers/thermal/intel/intel_hfi.c
··· 690 690 * This allocation may fail. CPU hotplug callbacks must check 691 691 * for a null pointer. 692 692 */ 693 - hfi_instances = kzalloc_objs(*hfi_instances, max_hfi_instances, 694 - GFP_KERNEL); 693 + hfi_instances = kzalloc_objs(*hfi_instances, max_hfi_instances); 695 694 if (!hfi_instances) 696 695 return; 697 696
+5 -10
drivers/thermal/testing/zone.c
··· 186 186 { 187 187 int ret; 188 188 189 - struct tt_thermal_zone *tt_zone __free(kfree) = kzalloc_obj(*tt_zone, 190 - GFP_KERNEL); 189 + struct tt_thermal_zone *tt_zone __free(kfree) = kzalloc_obj(*tt_zone); 191 190 if (!tt_zone) 192 191 return -ENOMEM; 193 192 194 - struct tt_work *tt_work __free(kfree) = kzalloc_obj(*tt_work, 195 - GFP_KERNEL); 193 + struct tt_work *tt_work __free(kfree) = kzalloc_obj(*tt_work); 196 194 if (!tt_work) 197 195 return -ENOMEM; 198 196 ··· 243 245 if (ret != 1) 244 246 return -EINVAL; 245 247 246 - struct tt_work *tt_work __free(kfree) = kzalloc_obj(*tt_work, 247 - GFP_KERNEL); 248 + struct tt_work *tt_work __free(kfree) = kzalloc_obj(*tt_work); 248 249 if (!tt_work) 249 250 return -ENOMEM; 250 251 ··· 329 332 { 330 333 int id; 331 334 332 - struct tt_work *tt_work __free(kfree) = kzalloc_obj(*tt_work, 333 - GFP_KERNEL); 335 + struct tt_work *tt_work __free(kfree) = kzalloc_obj(*tt_work); 334 336 if (!tt_work) 335 337 return -ENOMEM; 336 338 337 - struct tt_trip *tt_trip __free(kfree) = kzalloc_obj(*tt_trip, 338 - GFP_KERNEL); 339 + struct tt_trip *tt_trip __free(kfree) = kzalloc_obj(*tt_trip); 339 340 if (!tt_trip) 340 341 return -ENOMEM; 341 342
+1 -2
drivers/thermal/thermal_of.c
··· 107 107 if (!count) 108 108 return NULL; 109 109 110 - struct thermal_trip *tt __free(kfree) = kzalloc_objs(*tt, count, 111 - GFP_KERNEL); 110 + struct thermal_trip *tt __free(kfree) = kzalloc_objs(*tt, count); 112 111 if (!tt) 113 112 return ERR_PTR(-ENOMEM); 114 113
+1 -2
drivers/thunderbolt/switch.c
··· 2503 2503 } 2504 2504 2505 2505 /* initialize ports */ 2506 - sw->ports = kzalloc_objs(*sw->ports, sw->config.max_port_number + 1, 2507 - GFP_KERNEL); 2506 + sw->ports = kzalloc_objs(*sw->ports, sw->config.max_port_number + 1); 2508 2507 if (!sw->ports) { 2509 2508 ret = -ENOMEM; 2510 2509 goto err_free_sw_ports;
+1 -2
drivers/tty/goldfish.c
··· 238 238 int ret; 239 239 struct tty_driver *tty; 240 240 241 - goldfish_ttys = kzalloc_objs(*goldfish_ttys, goldfish_tty_line_count, 242 - GFP_KERNEL); 241 + goldfish_ttys = kzalloc_objs(*goldfish_ttys, goldfish_tty_line_count); 243 242 if (goldfish_ttys == NULL) { 244 243 ret = -ENOMEM; 245 244 goto err_alloc_goldfish_ttys_failed;
+2 -4
drivers/tty/serial/8250/8250_platform.c
··· 111 111 struct resource *regs; 112 112 int ret, line; 113 113 114 - struct uart_8250_port *uart __free(kfree) = kzalloc_obj(*uart, 115 - GFP_KERNEL); 114 + struct uart_8250_port *uart __free(kfree) = kzalloc_obj(*uart); 116 115 if (!uart) 117 116 return -ENOMEM; 118 117 ··· 156 157 { 157 158 int ret, i; 158 159 159 - struct uart_8250_port *uart __free(kfree) = kzalloc_obj(*uart, 160 - GFP_KERNEL); 160 + struct uart_8250_port *uart __free(kfree) = kzalloc_obj(*uart); 161 161 if (!uart) 162 162 return -ENOMEM; 163 163
+1 -2
drivers/tty/serial/jsm/jsm_tty.c
··· 391 391 * Okay to malloc with GFP_KERNEL, we are not at 392 392 * interrupt context, and there are no locks held. 393 393 */ 394 - brd->channels[i] = kzalloc_obj(struct jsm_channel, 395 - GFP_KERNEL); 394 + brd->channels[i] = kzalloc_obj(struct jsm_channel); 396 395 if (!brd->channels[i]) { 397 396 jsm_dbg(CORE, &brd->pci_dev, 398 397 "%s:%d Unable to allocate memory for channel struct\n",
+1 -2
drivers/tty/serial/serial_core.c
··· 3088 3088 if (uport->attr_group) 3089 3089 num_groups++; 3090 3090 3091 - uport->tty_groups = kzalloc_objs(*uport->tty_groups, num_groups, 3092 - GFP_KERNEL); 3091 + uport->tty_groups = kzalloc_objs(*uport->tty_groups, num_groups); 3093 3092 if (!uport->tty_groups) 3094 3093 return -ENOMEM; 3095 3094
+1 -2
drivers/tty/tty_io.c
··· 3344 3344 3345 3345 if (!(flags & TTY_DRIVER_DEVPTS_MEM)) { 3346 3346 driver->ttys = kzalloc_objs(*driver->ttys, lines); 3347 - driver->termios = kzalloc_objs(*driver->termios, lines, 3348 - GFP_KERNEL); 3347 + driver->termios = kzalloc_objs(*driver->termios, lines); 3349 3348 if (!driver->ttys || !driver->termios) { 3350 3349 err = -ENOMEM; 3351 3350 goto err_free_all;
+1 -2
drivers/tty/vt/keyboard.c
··· 1548 1548 { 1549 1549 int error; 1550 1550 1551 - struct input_handle __free(kfree) *handle = kzalloc_obj(*handle, 1552 - GFP_KERNEL); 1551 + struct input_handle __free(kfree) *handle = kzalloc_obj(*handle); 1553 1552 if (!handle) 1554 1553 return -ENOMEM; 1555 1554
+2 -4
drivers/usb/core/hcd.c
··· 2567 2567 if (!hcd) 2568 2568 return NULL; 2569 2569 if (primary_hcd == NULL) { 2570 - hcd->address0_mutex = kmalloc_obj(*hcd->address0_mutex, 2571 - GFP_KERNEL); 2570 + hcd->address0_mutex = kmalloc_obj(*hcd->address0_mutex); 2572 2571 if (!hcd->address0_mutex) { 2573 2572 kfree(hcd); 2574 2573 dev_dbg(dev, "hcd address0 mutex alloc failed\n"); 2575 2574 return NULL; 2576 2575 } 2577 2576 mutex_init(hcd->address0_mutex); 2578 - hcd->bandwidth_mutex = kmalloc_obj(*hcd->bandwidth_mutex, 2579 - GFP_KERNEL); 2577 + hcd->bandwidth_mutex = kmalloc_obj(*hcd->bandwidth_mutex); 2580 2578 if (!hcd->bandwidth_mutex) { 2581 2579 kfree(hcd->address0_mutex); 2582 2580 kfree(hcd);
+1 -2
drivers/usb/gadget/function/f_midi.c
··· 931 931 goto fail; 932 932 933 933 /* allocate temporary function list */ 934 - midi_function = kzalloc_objs(*midi_function, (MAX_PORTS * 4) + 11, 935 - GFP_KERNEL); 934 + midi_function = kzalloc_objs(*midi_function, (MAX_PORTS * 4) + 11); 936 935 if (!midi_function) { 937 936 status = -ENOMEM; 938 937 goto fail;
+1 -2
drivers/usb/gadget/function/f_midi2.c
··· 1187 1187 return -ENODEV; 1188 1188 usb_ep->complete = complete; 1189 1189 1190 - usb_ep->reqs = kzalloc_objs(*usb_ep->reqs, midi2->info.num_reqs, 1191 - GFP_KERNEL); 1190 + usb_ep->reqs = kzalloc_objs(*usb_ep->reqs, midi2->info.num_reqs); 1192 1191 if (!usb_ep->reqs) 1193 1192 return -ENOMEM; 1194 1193 for (i = 0; i < midi2->info.num_reqs; i++) {
+1 -2
drivers/usb/gadget/udc/bdc/bdc_core.c
··· 397 397 "ieps:%d eops:%d num_eps:%d\n", 398 398 num_ieps, num_oeps, bdc->num_eps); 399 399 /* allocate array of ep pointers */ 400 - bdc->bdc_ep_array = kzalloc_objs(struct bdc_ep *, bdc->num_eps, 401 - GFP_KERNEL); 400 + bdc->bdc_ep_array = kzalloc_objs(struct bdc_ep *, bdc->num_eps); 402 401 if (!bdc->bdc_ep_array) 403 402 goto fail; 404 403
+1 -2
drivers/usb/host/uhci-hcd.c
··· 603 603 goto err_alloc_frame; 604 604 } 605 605 606 - uhci->frame_cpu = kzalloc_objs(*uhci->frame_cpu, UHCI_NUMFRAMES, 607 - GFP_KERNEL); 606 + uhci->frame_cpu = kzalloc_objs(*uhci->frame_cpu, UHCI_NUMFRAMES); 608 607 if (!uhci->frame_cpu) 609 608 goto err_alloc_frame_cpu; 610 609
+1 -2
drivers/usb/host/xhci-mtk-sch.c
··· 179 179 if (utt->multi) { 180 180 tt_index = utt->hcpriv; 181 181 if (!tt_index) { /* Create the index array */ 182 - tt_index = kzalloc_objs(*tt_index, utt->hub->maxchild, 183 - GFP_KERNEL); 182 + tt_index = kzalloc_objs(*tt_index, utt->hub->maxchild); 184 183 if (!tt_index) 185 184 return ERR_PTR(-ENOMEM); 186 185 utt->hcpriv = tt_index;
+1 -2
drivers/usb/mon/mon_bin.c
··· 1029 1029 return -EINVAL; 1030 1030 1031 1031 size = CHUNK_ALIGN(arg); 1032 - vec = kzalloc_objs(struct mon_pgmap, size / CHUNK_SIZE, 1033 - GFP_KERNEL); 1032 + vec = kzalloc_objs(struct mon_pgmap, size / CHUNK_SIZE); 1034 1033 if (vec == NULL) { 1035 1034 ret = -ENOMEM; 1036 1035 break;
+1 -2
drivers/usb/serial/mos7840.c
··· 1677 1677 /* Initialize LED timers */ 1678 1678 if (mos7840_port->has_led) { 1679 1679 mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL); 1680 - mos7840_port->led_dr = kmalloc_obj(*mos7840_port->led_dr, 1681 - GFP_KERNEL); 1680 + mos7840_port->led_dr = kmalloc_obj(*mos7840_port->led_dr); 1682 1681 if (!mos7840_port->led_urb || !mos7840_port->led_dr) { 1683 1682 status = -ENOMEM; 1684 1683 goto error;
+1 -2
drivers/usb/typec/ucsi/ucsi.c
··· 1865 1865 } 1866 1866 1867 1867 /* Allocate the connectors. Released in ucsi_unregister() */ 1868 - connector = kzalloc_objs(*connector, ucsi->cap.num_connectors + 1, 1869 - GFP_KERNEL); 1868 + connector = kzalloc_objs(*connector, ucsi->cap.num_connectors + 1); 1870 1869 if (!connector) { 1871 1870 ret = -ENOMEM; 1872 1871 goto err_reset;
+2 -4
drivers/usb/usbip/vhci_sysfs.c
··· 476 476 { 477 477 int id; 478 478 479 - status_attrs = kzalloc_objs(struct status_attr, vhci_num_controllers, 480 - GFP_KERNEL); 479 + status_attrs = kzalloc_objs(struct status_attr, vhci_num_controllers); 481 480 if (status_attrs == NULL) 482 481 return -ENOMEM; 483 482 ··· 500 501 struct attribute **attrs; 501 502 int ret, i; 502 503 503 - attrs = kzalloc_objs(struct attribute *, (vhci_num_controllers + 5), 504 - GFP_KERNEL); 504 + attrs = kzalloc_objs(struct attribute *, (vhci_num_controllers + 5)); 505 505 if (attrs == NULL) 506 506 return -ENOMEM; 507 507
+1 -2
drivers/vdpa/mlx5/net/mlx5_vnet.c
··· 3903 3903 mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx); 3904 3904 3905 3905 ndev->vqs = kzalloc_objs(*ndev->vqs, max_vqs); 3906 - ndev->event_cbs = kzalloc_objs(*ndev->event_cbs, max_vqs + 1, 3907 - GFP_KERNEL); 3906 + ndev->event_cbs = kzalloc_objs(*ndev->event_cbs, max_vqs + 1); 3908 3907 if (!ndev->vqs || !ndev->event_cbs) { 3909 3908 err = -ENOMEM; 3910 3909 goto err_alloc;
+2 -4
drivers/vdpa/vdpa_sim/vdpa_sim.c
··· 246 246 if (!vdpasim->config) 247 247 goto err_iommu; 248 248 249 - vdpasim->vqs = kzalloc_objs(struct vdpasim_virtqueue, dev_attr->nvqs, 250 - GFP_KERNEL); 249 + vdpasim->vqs = kzalloc_objs(struct vdpasim_virtqueue, dev_attr->nvqs); 251 250 if (!vdpasim->vqs) 252 251 goto err_iommu; 253 252 254 - vdpasim->iommu = kmalloc_objs(*vdpasim->iommu, vdpasim->dev_attr.nas, 255 - GFP_KERNEL); 253 + vdpasim->iommu = kmalloc_objs(*vdpasim->iommu, vdpasim->dev_attr.nas); 256 254 if (!vdpasim->iommu) 257 255 goto err_iommu; 258 256
+1 -2
drivers/vfio/pci/pds/dirty.c
··· 43 43 u8 num_regions; 44 44 int err; 45 45 46 - region_info = kzalloc_objs(struct pds_lm_dirty_region_info, max_regions, 47 - GFP_KERNEL); 46 + region_info = kzalloc_objs(struct pds_lm_dirty_region_info, max_regions); 48 47 if (!region_info) 49 48 return; 50 49
+1 -2
drivers/vfio/pci/vfio_pci_dmabuf.c
··· 251 251 ret = -ENOMEM; 252 252 goto err_free_ranges; 253 253 } 254 - priv->phys_vec = kzalloc_objs(*priv->phys_vec, get_dma_buf.nr_ranges, 255 - GFP_KERNEL); 254 + priv->phys_vec = kzalloc_objs(*priv->phys_vec, get_dma_buf.nr_ranges); 256 255 if (!priv->phys_vec) { 257 256 ret = -ENOMEM; 258 257 goto err_free_priv;
+1 -2
drivers/vhost/net.c
··· 293 293 if (!zcopy) 294 294 continue; 295 295 n->vqs[i].ubuf_info = 296 - kmalloc_objs(*n->vqs[i].ubuf_info, UIO_MAXIOV, 297 - GFP_KERNEL); 296 + kmalloc_objs(*n->vqs[i].ubuf_info, UIO_MAXIOV); 298 297 if (!n->vqs[i].ubuf_info) 299 298 goto err; 300 299 }
+2 -4
drivers/vhost/scsi.c
··· 382 382 unsigned int log_num) 383 383 { 384 384 if (!cmd->tvc_log) 385 - cmd->tvc_log = kmalloc_objs(*cmd->tvc_log, vq->dev->iov_limit, 386 - GFP_KERNEL); 385 + cmd->tvc_log = kmalloc_objs(*cmd->tvc_log, vq->dev->iov_limit); 387 386 388 387 if (unlikely(!cmd->tvc_log)) { 389 388 vq_err(vq, "Failed to alloc tvc_log\n"); ··· 1938 1939 return -ENOMEM; 1939 1940 } 1940 1941 1941 - svq->upages = kzalloc_objs(struct page *, VHOST_SCSI_PREALLOC_UPAGES, 1942 - GFP_KERNEL); 1942 + svq->upages = kzalloc_objs(struct page *, VHOST_SCSI_PREALLOC_UPAGES); 1943 1943 if (!svq->upages) 1944 1944 goto out; 1945 1945
+1 -2
drivers/vhost/vhost.c
··· 514 514 515 515 for (i = 0; i < dev->nvqs; ++i) { 516 516 vq = dev->vqs[i]; 517 - vq->indirect = kmalloc_objs(*vq->indirect, UIO_MAXIOV, 518 - GFP_KERNEL); 517 + vq->indirect = kmalloc_objs(*vq->indirect, UIO_MAXIOV); 519 518 vq->log = kmalloc_objs(*vq->log, dev->iov_limit); 520 519 vq->heads = kmalloc_objs(*vq->heads, dev->iov_limit); 521 520 vq->nheads = kmalloc_array(dev->iov_limit, sizeof(*vq->nheads),
+1 -2
drivers/video/fbdev/arkfb.c
··· 431 431 432 432 static struct dac_info * ics5342_init(dac_read_regs_t drr, dac_write_regs_t dwr, void *data) 433 433 { 434 - struct ics5342_info *ics_info = kzalloc_obj(struct ics5342_info, 435 - GFP_KERNEL); 434 + struct ics5342_info *ics_info = kzalloc_obj(struct ics5342_info); 436 435 struct dac_info *info = &ics_info->dac; 437 436 438 437 if (!ics_info)
+1 -2
drivers/video/fbdev/mmp/fb/mmpfb.c
··· 476 476 return 0; 477 477 } 478 478 /* put videomode list to info structure */ 479 - videomodes = kzalloc_objs(struct fb_videomode, videomode_num, 480 - GFP_KERNEL); 479 + videomodes = kzalloc_objs(struct fb_videomode, videomode_num); 481 480 if (!videomodes) 482 481 return -ENOMEM; 483 482
+1 -2
drivers/video/fbdev/omap2/omapfb/dss/manager.c
··· 32 32 33 33 num_managers = dss_feat_get_num_mgrs(); 34 34 35 - managers = kzalloc_objs(struct omap_overlay_manager, num_managers, 36 - GFP_KERNEL); 35 + managers = kzalloc_objs(struct omap_overlay_manager, num_managers); 37 36 38 37 BUG_ON(managers == NULL); 39 38
+1 -2
drivers/video/fbdev/smscufx.c
··· 946 946 947 947 /* TODO: Help propose a standard fb.h ioctl to report mmap damage */ 948 948 if (cmd == UFX_IOCTL_REPORT_DAMAGE) { 949 - struct dloarea *area __free(kfree) = kmalloc_obj(*area, 950 - GFP_KERNEL); 949 + struct dloarea *area __free(kfree) = kmalloc_obj(*area); 951 950 if (!area) 952 951 return -ENOMEM; 953 952
+1 -2
drivers/video/fbdev/udlfb.c
··· 1164 1164 1165 1165 static void dlfb_deferred_vfree(struct dlfb_data *dlfb, void *mem) 1166 1166 { 1167 - struct dlfb_deferred_free *d = kmalloc_obj(struct dlfb_deferred_free, 1168 - GFP_KERNEL); 1167 + struct dlfb_deferred_free *d = kmalloc_obj(struct dlfb_deferred_free); 1169 1168 if (!d) 1170 1169 return; 1171 1170 d->mem = mem;
+1 -2
drivers/video/fbdev/uvesafb.c
··· 487 487 mode++; 488 488 } 489 489 490 - par->vbe_modes = kzalloc_objs(struct vbe_mode_ib, par->vbe_modes_cnt, 491 - GFP_KERNEL); 490 + par->vbe_modes = kzalloc_objs(struct vbe_mode_ib, par->vbe_modes_cnt); 492 491 if (!par->vbe_modes) 493 492 return -ENOMEM; 494 493
+1 -2
drivers/video/of_display_timing.c
··· 184 184 goto timingfail; 185 185 } 186 186 187 - disp->timings = kzalloc_objs(struct display_timing *, disp->num_timings, 188 - GFP_KERNEL); 187 + disp->timings = kzalloc_objs(struct display_timing *, disp->num_timings); 189 188 if (!disp->timings) { 190 189 pr_err("%pOF: could not allocate timings array\n", np); 191 190 goto timingfail;
+1 -2
drivers/virt/acrn/mm.c
··· 285 285 } 286 286 287 287 /* Prepare the vm_memory_region_batch */ 288 - regions_info = kzalloc_flex(*regions_info, regions_op, nr_regions, 289 - GFP_KERNEL); 288 + regions_info = kzalloc_flex(*regions_info, regions_op, nr_regions); 290 289 if (!regions_info) { 291 290 ret = -ENOMEM; 292 291 goto unmap_kernel_map;
+1 -2
drivers/virt/nitro_enclaves/ne_misc_dev.c
··· 934 934 935 935 max_nr_pages = mem_region.memory_size / NE_MIN_MEM_REGION_SIZE; 936 936 937 - ne_mem_region->pages = kzalloc_objs(*ne_mem_region->pages, max_nr_pages, 938 - GFP_KERNEL); 937 + ne_mem_region->pages = kzalloc_objs(*ne_mem_region->pages, max_nr_pages); 939 938 if (!ne_mem_region->pages) { 940 939 rc = -ENOMEM; 941 940
+2 -4
drivers/virtio/virtio_pci_common.c
··· 134 134 135 135 vp_dev->msix_vectors = nvectors; 136 136 137 - vp_dev->msix_names = kmalloc_objs(*vp_dev->msix_names, nvectors, 138 - GFP_KERNEL); 137 + vp_dev->msix_names = kmalloc_objs(*vp_dev->msix_names, nvectors); 139 138 if (!vp_dev->msix_names) 140 139 goto error; 141 140 vp_dev->msix_affinity_masks 142 - = kzalloc_objs(*vp_dev->msix_affinity_masks, nvectors, 143 - GFP_KERNEL); 141 + = kzalloc_objs(*vp_dev->msix_affinity_masks, nvectors); 144 142 if (!vp_dev->msix_affinity_masks) 145 143 goto error; 146 144 for (i = 0; i < nvectors; ++i)
+2 -4
drivers/xen/gntdev.c
··· 158 158 NULL == add->being_removed) 159 159 goto err; 160 160 if (xen_pv_domain()) { 161 - add->kmap_ops = kvmalloc_objs(add->kmap_ops[0], count, 162 - GFP_KERNEL); 163 - add->kunmap_ops = kvmalloc_objs(add->kunmap_ops[0], count, 164 - GFP_KERNEL); 161 + add->kmap_ops = kvmalloc_objs(add->kmap_ops[0], count); 162 + add->kunmap_ops = kvmalloc_objs(add->kunmap_ops[0], count); 165 163 if (NULL == add->kmap_ops || NULL == add->kunmap_ops) 166 164 goto err; 167 165 }
+1 -2
drivers/xen/grant-table.c
··· 1635 1635 */ 1636 1636 max_nr_glist_frames = max_nr_grefs / RPP; 1637 1637 1638 - gnttab_list = kmalloc_objs(grant_ref_t *, max_nr_glist_frames, 1639 - GFP_KERNEL); 1638 + gnttab_list = kmalloc_objs(grant_ref_t *, max_nr_glist_frames); 1640 1639 if (gnttab_list == NULL) 1641 1640 return -ENOMEM; 1642 1641
+2 -4
drivers/xen/xen-acpi-processor.c
··· 61 61 unsigned int i, ok; 62 62 int ret = 0; 63 63 64 - dst_cx_states = kzalloc_objs(struct xen_processor_cx, _pr->power.count, 65 - GFP_KERNEL); 64 + dst_cx_states = kzalloc_objs(struct xen_processor_cx, _pr->power.count); 66 65 if (!dst_cx_states) 67 66 return -ENOMEM; 68 67 ··· 411 412 return -ENOMEM; 412 413 } 413 414 414 - acpi_psd = kzalloc_objs(struct acpi_psd_package, nr_acpi_bits, 415 - GFP_KERNEL); 415 + acpi_psd = kzalloc_objs(struct acpi_psd_package, nr_acpi_bits); 416 416 if (!acpi_psd) { 417 417 bitmap_free(acpi_id_present); 418 418 bitmap_free(acpi_id_cst_present);
+1 -2
drivers/xen/xen-scsiback.c
··· 543 543 } 544 544 545 545 /* free of (sgl) in fast_flush_area() */ 546 - pending_req->sgl = kmalloc_objs(struct scatterlist, nr_segments, 547 - GFP_KERNEL); 546 + pending_req->sgl = kmalloc_objs(struct scatterlist, nr_segments); 548 547 if (!pending_req->sgl) 549 548 return -ENOMEM; 550 549
+1 -2
fs/aio.c
··· 510 510 511 511 ctx->ring_folios = ctx->internal_folios; 512 512 if (nr_pages > AIO_RING_PAGES) { 513 - ctx->ring_folios = kzalloc_objs(struct folio *, nr_pages, 514 - GFP_KERNEL); 513 + ctx->ring_folios = kzalloc_objs(struct folio *, nr_pages); 515 514 if (!ctx->ring_folios) { 516 515 put_aio_ring_file(ctx); 517 516 return -ENOMEM;
+1 -2
fs/binfmt_elf.c
··· 1873 1873 /* 1874 1874 * Allocate a structure for each thread. 1875 1875 */ 1876 - info->thread = kzalloc_flex(*info->thread, notes, info->thread_notes, 1877 - GFP_KERNEL); 1876 + info->thread = kzalloc_flex(*info->thread, notes, info->thread_notes); 1878 1877 if (unlikely(!info->thread)) 1879 1878 return 0; 1880 1879
+1 -2
fs/btrfs/ioctl.c
··· 733 733 ret = get_anon_bdev(&pending_snapshot->anon_dev); 734 734 if (ret < 0) 735 735 goto free_pending; 736 - pending_snapshot->root_item = kzalloc_obj(struct btrfs_root_item, 737 - GFP_KERNEL); 736 + pending_snapshot->root_item = kzalloc_obj(struct btrfs_root_item); 738 737 pending_snapshot->path = btrfs_alloc_path(); 739 738 if (!pending_snapshot->root_item || !pending_snapshot->path) { 740 739 ret = -ENOMEM;
+1 -2
fs/btrfs/tests/btrfs-tests.c
··· 117 117 118 118 struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize) 119 119 { 120 - struct btrfs_fs_info *fs_info = kzalloc_obj(struct btrfs_fs_info, 121 - GFP_KERNEL); 120 + struct btrfs_fs_info *fs_info = kzalloc_obj(struct btrfs_fs_info); 122 121 123 122 if (!fs_info) 124 123 return fs_info;
+1 -2
fs/btrfs/zoned.c
··· 454 454 goto out; 455 455 } 456 456 457 - zones = kvzalloc_objs(struct blk_zone, BTRFS_REPORT_NR_ZONES, 458 - GFP_KERNEL); 457 + zones = kvzalloc_objs(struct blk_zone, BTRFS_REPORT_NR_ZONES); 459 458 if (!zones) { 460 459 ret = -ENOMEM; 461 460 goto out;
+1 -2
fs/cachefiles/ondemand.c
··· 734 734 if (!cachefiles_in_ondemand_mode(volume->cache)) 735 735 return 0; 736 736 737 - object->ondemand = kzalloc_obj(struct cachefiles_ondemand_info, 738 - GFP_KERNEL); 737 + object->ondemand = kzalloc_obj(struct cachefiles_ondemand_info); 739 738 if (!object->ondemand) 740 739 return -ENOMEM; 741 740
+1 -2
fs/coredump.c
··· 1736 1736 gate_vma = get_gate_vma(mm); 1737 1737 cprm->vma_count = mm->map_count + (gate_vma ? 1 : 0); 1738 1738 1739 - cprm->vma_meta = kvmalloc_objs(*cprm->vma_meta, cprm->vma_count, 1740 - GFP_KERNEL); 1739 + cprm->vma_meta = kvmalloc_objs(*cprm->vma_meta, cprm->vma_count); 1741 1740 if (!cprm->vma_meta) { 1742 1741 mmap_write_unlock(mm); 1743 1742 return false;
+1 -2
fs/erofs/xattr.c
··· 85 85 } 86 86 vi->xattr_name_filter = le32_to_cpu(ih->h_name_filter); 87 87 vi->xattr_shared_count = ih->h_shared_count; 88 - vi->xattr_shared_xattrs = kmalloc_objs(uint, vi->xattr_shared_count, 89 - GFP_KERNEL); 88 + vi->xattr_shared_xattrs = kmalloc_objs(uint, vi->xattr_shared_count); 90 89 if (!vi->xattr_shared_xattrs) { 91 90 erofs_put_metabuf(&buf); 92 91 ret = -ENOMEM;
+1 -2
fs/exfat/balloc.c
··· 96 96 } 97 97 sbi->map_sectors = ((need_map_size - 1) >> 98 98 (sb->s_blocksize_bits)) + 1; 99 - sbi->vol_amap = kvmalloc_objs(struct buffer_head *, sbi->map_sectors, 100 - GFP_KERNEL); 99 + sbi->vol_amap = kvmalloc_objs(struct buffer_head *, sbi->map_sectors); 101 100 if (!sbi->vol_amap) 102 101 return -ENOMEM; 103 102
+1 -2
fs/ext2/super.c
··· 1122 1122 } 1123 1123 db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) / 1124 1124 EXT2_DESC_PER_BLOCK(sb); 1125 - sbi->s_group_desc = kvmalloc_objs(struct buffer_head *, db_count, 1126 - GFP_KERNEL); 1125 + sbi->s_group_desc = kvmalloc_objs(struct buffer_head *, db_count); 1127 1126 if (sbi->s_group_desc == NULL) { 1128 1127 ret = -ENOMEM; 1129 1128 ext2_msg(sb, KERN_ERR, "error: not enough memory");
+1 -2
fs/ext4/orphan.c
··· 598 598 } 599 599 oi->of_blocks = inode->i_size >> sb->s_blocksize_bits; 600 600 oi->of_csum_seed = EXT4_I(inode)->i_csum_seed; 601 - oi->of_binfo = kvmalloc_objs(struct ext4_orphan_block, oi->of_blocks, 602 - GFP_KERNEL); 601 + oi->of_binfo = kvmalloc_objs(struct ext4_orphan_block, oi->of_blocks); 603 602 if (!oi->of_binfo) { 604 603 ret = -ENOMEM; 605 604 goto out_put;
+2 -4
fs/fhandle.c
··· 46 46 if (f_handle.handle_bytes > MAX_HANDLE_SZ) 47 47 return -EINVAL; 48 48 49 - handle = kzalloc_flex(*handle, f_handle, f_handle.handle_bytes, 50 - GFP_KERNEL); 49 + handle = kzalloc_flex(*handle, f_handle, f_handle.handle_bytes); 51 50 if (!handle) 52 51 return -ENOMEM; 53 52 ··· 367 368 if (retval) 368 369 goto out_path; 369 370 370 - handle = kmalloc_flex(*handle, f_handle, f_handle.handle_bytes, 371 - GFP_KERNEL); 371 + handle = kmalloc_flex(*handle, f_handle, f_handle.handle_bytes); 372 372 if (!handle) { 373 373 retval = -ENOMEM; 374 374 goto out_path;
+1 -2
fs/jbd2/journal.c
··· 2268 2268 2269 2269 /* Are we called twice? */ 2270 2270 WARN_ON(journal->j_fc_wbuf != NULL); 2271 - journal->j_fc_wbuf = kmalloc_objs(struct buffer_head *, num_fc_blks, 2272 - GFP_KERNEL); 2271 + journal->j_fc_wbuf = kmalloc_objs(struct buffer_head *, num_fc_blks); 2273 2272 if (!journal->j_fc_wbuf) 2274 2273 return -ENOMEM; 2275 2274
+4 -8
fs/jffs2/summary.c
··· 115 115 int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, 116 116 uint32_t ofs) 117 117 { 118 - struct jffs2_sum_inode_mem *temp = kmalloc_obj(struct jffs2_sum_inode_mem, 119 - GFP_KERNEL); 118 + struct jffs2_sum_inode_mem *temp = kmalloc_obj(struct jffs2_sum_inode_mem); 120 119 121 120 if (!temp) 122 121 return -ENOMEM; ··· 263 264 switch (je16_to_cpu(node->u.nodetype)) { 264 265 case JFFS2_NODETYPE_INODE: { 265 266 struct jffs2_sum_inode_mem *temp = 266 - kmalloc_obj(struct jffs2_sum_inode_mem, 267 - GFP_KERNEL); 267 + kmalloc_obj(struct jffs2_sum_inode_mem); 268 268 269 269 if (!temp) 270 270 goto no_mem; ··· 314 316 #ifdef CONFIG_JFFS2_FS_XATTR 315 317 case JFFS2_NODETYPE_XATTR: { 316 318 struct jffs2_sum_xattr_mem *temp; 317 - temp = kmalloc_obj(struct jffs2_sum_xattr_mem, 318 - GFP_KERNEL); 319 + temp = kmalloc_obj(struct jffs2_sum_xattr_mem); 319 320 if (!temp) 320 321 goto no_mem; 321 322 ··· 329 332 } 330 333 case JFFS2_NODETYPE_XREF: { 331 334 struct jffs2_sum_xref_mem *temp; 332 - temp = kmalloc_obj(struct jffs2_sum_xref_mem, 333 - GFP_KERNEL); 335 + temp = kmalloc_obj(struct jffs2_sum_xref_mem); 334 336 if (!temp) 335 337 goto no_mem; 336 338 temp->nodetype = node->r.nodetype;
+1 -2
fs/jffs2/xattr.c
··· 784 784 785 785 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING)); 786 786 787 - xref_tmphash = kzalloc_objs(struct jffs2_xattr_ref *, XREF_TMPHASH_SIZE, 788 - GFP_KERNEL); 787 + xref_tmphash = kzalloc_objs(struct jffs2_xattr_ref *, XREF_TMPHASH_SIZE); 789 788 if (!xref_tmphash) 790 789 return -ENOMEM; 791 790
+1 -2
fs/mbcache.c
··· 365 365 cache->c_max_entries = bucket_count << 4; 366 366 INIT_LIST_HEAD(&cache->c_list); 367 367 spin_lock_init(&cache->c_list_lock); 368 - cache->c_hash = kmalloc_objs(struct hlist_bl_head, bucket_count, 369 - GFP_KERNEL); 368 + cache->c_hash = kmalloc_objs(struct hlist_bl_head, bucket_count); 370 369 if (!cache->c_hash) { 371 370 kfree(cache); 372 371 goto err_out;
+1 -2
fs/nfsd/nfs4proc.c
··· 2171 2171 if (atomic_inc_return(&nn->pending_async_copies) > 2172 2172 (int)rqstp->rq_pool->sp_nrthreads) 2173 2173 goto out_dec_async_copy_err; 2174 - async_copy->cp_src = kmalloc_obj(*async_copy->cp_src, 2175 - GFP_KERNEL); 2174 + async_copy->cp_src = kmalloc_obj(*async_copy->cp_src); 2176 2175 if (!async_copy->cp_src) 2177 2176 goto out_dec_async_copy_err; 2178 2177 if (!nfs4_init_copy_state(nn, copy))
+2 -4
fs/nfsd/nfs4state.c
··· 8959 8959 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8960 8960 int i; 8961 8961 8962 - nn->conf_id_hashtbl = kmalloc_objs(struct list_head, CLIENT_HASH_SIZE, 8963 - GFP_KERNEL); 8962 + nn->conf_id_hashtbl = kmalloc_objs(struct list_head, CLIENT_HASH_SIZE); 8964 8963 if (!nn->conf_id_hashtbl) 8965 8964 goto err; 8966 - nn->unconf_id_hashtbl = kmalloc_objs(struct list_head, CLIENT_HASH_SIZE, 8967 - GFP_KERNEL); 8965 + nn->unconf_id_hashtbl = kmalloc_objs(struct list_head, CLIENT_HASH_SIZE); 8968 8966 if (!nn->unconf_id_hashtbl) 8969 8967 goto err_unconf_id; 8970 8968 nn->sessionid_hashtbl = kmalloc_objs(struct list_head,
+2 -4
fs/ocfs2/cluster/heartbeat.c
··· 1677 1677 if (reg->hr_tmp_block == NULL) 1678 1678 return -ENOMEM; 1679 1679 1680 - reg->hr_slots = kzalloc_objs(struct o2hb_disk_slot, reg->hr_blocks, 1681 - GFP_KERNEL); 1680 + reg->hr_slots = kzalloc_objs(struct o2hb_disk_slot, reg->hr_blocks); 1682 1681 if (reg->hr_slots == NULL) 1683 1682 return -ENOMEM; 1684 1683 ··· 1693 1694 "at %u blocks per page\n", 1694 1695 reg->hr_num_pages, reg->hr_blocks, spp); 1695 1696 1696 - reg->hr_slot_data = kzalloc_objs(struct page *, reg->hr_num_pages, 1697 - GFP_KERNEL); 1697 + reg->hr_slot_data = kzalloc_objs(struct page *, reg->hr_num_pages); 1698 1698 if (!reg->hr_slot_data) 1699 1699 return -ENOMEM; 1700 1700
+1 -2
fs/ocfs2/journal.c
··· 114 114 if (osb->replay_map) 115 115 return 0; 116 116 117 - replay_map = kzalloc_flex(*replay_map, rm_replay_slots, osb->max_slots, 118 - GFP_KERNEL); 117 + replay_map = kzalloc_flex(*replay_map, rm_replay_slots, osb->max_slots); 119 118 if (!replay_map) { 120 119 mlog_errno(-ENOMEM); 121 120 return -ENOMEM;
+1 -2
fs/ocfs2/slot_map.c
··· 385 385 386 386 trace_ocfs2_map_slot_buffers(bytes, si->si_blocks); 387 387 388 - si->si_bh = kzalloc_objs(struct buffer_head *, si->si_blocks, 389 - GFP_KERNEL); 388 + si->si_bh = kzalloc_objs(struct buffer_head *, si->si_blocks); 390 389 if (!si->si_bh) { 391 390 status = -ENOMEM; 392 391 mlog_errno(status);
+1 -2
fs/orangefs/orangefs-bufmap.c
··· 219 219 goto out_free_bufmap; 220 220 221 221 bufmap->desc_array = 222 - kzalloc_objs(struct orangefs_bufmap_desc, bufmap->desc_count, 223 - GFP_KERNEL); 222 + kzalloc_objs(struct orangefs_bufmap_desc, bufmap->desc_count); 224 223 if (!bufmap->desc_array) 225 224 goto out_free_index_array; 226 225
+1 -2
fs/pstore/ram.c
··· 232 232 */ 233 233 struct persistent_ram_zone *tmp_prz, *prz_next; 234 234 235 - tmp_prz = kzalloc_obj(struct persistent_ram_zone, 236 - GFP_KERNEL); 235 + tmp_prz = kzalloc_obj(struct persistent_ram_zone); 237 236 if (!tmp_prz) 238 237 return -ENOMEM; 239 238 prz = tmp_prz;
+1 -2
fs/select.c
··· 993 993 todo -= walk->len; 994 994 995 995 len = min(todo, POLLFD_PER_PAGE); 996 - walk = walk->next = kmalloc_flex(*walk, entries, len, 997 - GFP_KERNEL); 996 + walk = walk->next = kmalloc_flex(*walk, entries, len); 998 997 if (!walk) { 999 998 err = -ENOMEM; 1000 999 goto out_fds;
+1 -2
fs/smb/client/misc.c
··· 673 673 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n", 674 674 *num_of_nodes, le32_to_cpu(rsp->DFSFlags)); 675 675 676 - *target_nodes = kzalloc_objs(struct dfs_info3_param, *num_of_nodes, 677 - GFP_KERNEL); 676 + *target_nodes = kzalloc_objs(struct dfs_info3_param, *num_of_nodes); 678 677 if (*target_nodes == NULL) { 679 678 rc = -ENOMEM; 680 679 goto parse_DFS_referrals_exit;
+1 -2
fs/xfs/xfs_super.c
··· 2239 2239 if (!mp) 2240 2240 return -ENOMEM; 2241 2241 #ifdef DEBUG 2242 - mp->m_errortag = kzalloc_objs(*mp->m_errortag, XFS_ERRTAG_MAX, 2243 - GFP_KERNEL); 2242 + mp->m_errortag = kzalloc_objs(*mp->m_errortag, XFS_ERRTAG_MAX); 2244 2243 if (!mp->m_errortag) { 2245 2244 kfree(mp); 2246 2245 return -ENOMEM;
+1 -2
fs/xfs/xfs_zone_gc.c
··· 202 202 data = kzalloc_obj(*data); 203 203 if (!data) 204 204 return NULL; 205 - data->iter.recs = kzalloc_objs(*data->iter.recs, XFS_ZONE_GC_RECS, 206 - GFP_KERNEL); 205 + data->iter.recs = kzalloc_objs(*data->iter.recs, XFS_ZONE_GC_RECS); 207 206 if (!data->iter.recs) 208 207 goto out_free_data; 209 208
+2 -4
fs/zonefs/super.c
··· 903 903 struct block_device *bdev = zd->sb->s_bdev; 904 904 int ret; 905 905 906 - zd->zones = kvzalloc_objs(struct blk_zone, bdev_nr_zones(bdev), 907 - GFP_KERNEL); 906 + zd->zones = kvzalloc_objs(struct blk_zone, bdev_nr_zones(bdev)); 908 907 if (!zd->zones) 909 908 return -ENOMEM; 910 909 ··· 947 948 if (!zgroup->g_nr_zones) 948 949 return 0; 949 950 950 - zgroup->g_zones = kvzalloc_objs(struct zonefs_zone, zgroup->g_nr_zones, 951 - GFP_KERNEL); 951 + zgroup->g_zones = kvzalloc_objs(struct zonefs_zone, zgroup->g_nr_zones); 952 952 if (!zgroup->g_zones) 953 953 return -ENOMEM; 954 954
+1 -2
include/net/udp.h
··· 294 294 up->forward_threshold = sk->sk_rcvbuf >> 2; 295 295 set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags); 296 296 297 - up->udp_prod_queue = kzalloc_objs(*up->udp_prod_queue, nr_node_ids, 298 - GFP_KERNEL); 297 + up->udp_prod_queue = kzalloc_objs(*up->udp_prod_queue, nr_node_ids); 299 298 if (!up->udp_prod_queue) 300 299 return -ENOMEM; 301 300 for (int i = 0; i < nr_node_ids; i++)
+1 -2
kernel/bpf/core.c
··· 910 910 struct bpf_prog_pack *pack; 911 911 int err; 912 912 913 - pack = kzalloc_flex(*pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT), 914 - GFP_KERNEL); 913 + pack = kzalloc_flex(*pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)); 915 914 if (!pack) 916 915 return NULL; 917 916 pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
+1 -2
kernel/gcov/fs.c
··· 545 545 if (!node) 546 546 goto err_nomem; 547 547 if (info) { 548 - node->loaded_info = kzalloc_objs(struct gcov_info *, 1, 549 - GFP_KERNEL); 548 + node->loaded_info = kzalloc_objs(struct gcov_info *, 1); 550 549 if (!node->loaded_info) 551 550 goto err_nomem; 552 551 }
+1 -2
kernel/gcov/gcc_4_7.c
··· 298 298 if (!dup->filename) 299 299 goto err_free; 300 300 301 - dup->functions = kzalloc_objs(struct gcov_fn_info *, info->n_functions, 302 - GFP_KERNEL); 301 + dup->functions = kzalloc_objs(struct gcov_fn_info *, info->n_functions); 303 302 if (!dup->functions) 304 303 goto err_free; 305 304
+1 -2
kernel/locking/locktorture.c
··· 1293 1293 /* Initialize the statistics so that each run gets its own numbers. */ 1294 1294 if (nwriters_stress) { 1295 1295 lock_is_write_held = false; 1296 - cxt.lwsa = kmalloc_objs(*cxt.lwsa, cxt.nrealwriters_stress, 1297 - GFP_KERNEL); 1296 + cxt.lwsa = kmalloc_objs(*cxt.lwsa, cxt.nrealwriters_stress); 1298 1297 if (cxt.lwsa == NULL) { 1299 1298 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory"); 1300 1299 firsterr = -ENOMEM;
+1 -2
kernel/padata.c
··· 1106 1106 #endif 1107 1107 1108 1108 possible_cpus = num_possible_cpus(); 1109 - padata_works = kmalloc_objs(struct padata_work, possible_cpus, 1110 - GFP_KERNEL); 1109 + padata_works = kmalloc_objs(struct padata_work, possible_cpus); 1111 1110 if (!padata_works) 1112 1111 goto remove_dead_state; 1113 1112
+1 -2
kernel/params.c
··· 638 638 return -ENOMEM; 639 639 mk->mp->grp.name = "parameters"; 640 640 /* NULL-terminated attribute array. */ 641 - mk->mp->grp.attrs = kzalloc_obj(mk->mp->grp.attrs[0], 642 - GFP_KERNEL); 641 + mk->mp->grp.attrs = kzalloc_obj(mk->mp->grp.attrs[0]); 643 642 /* Caller will cleanup via free_module_param_attrs */ 644 643 if (!mk->mp->grp.attrs) 645 644 return -ENOMEM;
+2 -4
kernel/rcu/rcuscale.c
··· 755 755 } 756 756 757 757 for (i = 0; i < kfree_alloc_num; i++) { 758 - alloc_ptr = kzalloc_objs(struct kfree_obj, kfree_mult, 759 - GFP_KERNEL); 758 + alloc_ptr = kzalloc_objs(struct kfree_obj, kfree_mult); 760 759 if (!alloc_ptr) 761 760 return -ENOMEM; 762 761 ··· 1145 1146 schedule_timeout_uninterruptible(1); 1146 1147 writer_tasks = kzalloc_objs(writer_tasks[0], nrealwriters); 1147 1148 writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations), GFP_KERNEL); 1148 - writer_n_durations = kzalloc_objs(*writer_n_durations, nrealwriters, 1149 - GFP_KERNEL); 1149 + writer_n_durations = kzalloc_objs(*writer_n_durations, nrealwriters); 1150 1150 writer_done = kzalloc_objs(writer_done[0], nrealwriters); 1151 1151 if (gp_async) { 1152 1152 if (gp_async_max <= 0) {
+1 -2
kernel/rcu/rcutorture.c
··· 4592 4592 if (WARN_ON(nocbs_toggle < 0)) 4593 4593 nocbs_toggle = HZ; 4594 4594 if (nrealnocbers > 0) { 4595 - nocb_tasks = kzalloc_objs(nocb_tasks[0], nrealnocbers, 4596 - GFP_KERNEL); 4595 + nocb_tasks = kzalloc_objs(nocb_tasks[0], nrealnocbers); 4597 4596 if (nocb_tasks == NULL) { 4598 4597 TOROUT_ERRSTRING("out of memory"); 4599 4598 firsterr = -ENOMEM;
+1 -2
kernel/sched/ext.c
··· 4838 4838 if (ret < 0) 4839 4839 goto err_free_ei; 4840 4840 4841 - sch->global_dsqs = kzalloc_objs(sch->global_dsqs[0], nr_node_ids, 4842 - GFP_KERNEL); 4841 + sch->global_dsqs = kzalloc_objs(sch->global_dsqs[0], nr_node_ids); 4843 4842 if (!sch->global_dsqs) { 4844 4843 ret = -ENOMEM; 4845 4844 goto err_free_hash;
+3 -6
kernel/trace/trace.c
··· 3903 3903 if (!iter) 3904 3904 return ERR_PTR(-ENOMEM); 3905 3905 3906 - iter->buffer_iter = kzalloc_objs(*iter->buffer_iter, nr_cpu_ids, 3907 - GFP_KERNEL); 3906 + iter->buffer_iter = kzalloc_objs(*iter->buffer_iter, nr_cpu_ids); 3908 3907 if (!iter->buffer_iter) 3909 3908 goto release; 3910 3909 ··· 9309 9310 mod_addr_comp, NULL, NULL); 9310 9311 9311 9312 if (IS_ENABLED(CONFIG_MODULES)) { 9312 - module_delta = kzalloc_flex(*module_delta, delta, nr_entries, 9313 - GFP_KERNEL); 9313 + module_delta = kzalloc_flex(*module_delta, delta, nr_entries); 9314 9314 if (!module_delta) { 9315 9315 pr_info("module_delta allocation failed. Not able to decode module address."); 9316 9316 goto reset; ··· 10927 10929 void __init early_trace_init(void) 10928 10930 { 10929 10931 if (tracepoint_printk) { 10930 - tracepoint_print_iter = kzalloc_obj(*tracepoint_print_iter, 10931 - GFP_KERNEL); 10932 + tracepoint_print_iter = kzalloc_obj(*tracepoint_print_iter); 10932 10933 if (MEM_FAIL(!tracepoint_print_iter, 10933 10934 "Failed to allocate trace iterator\n")) 10934 10935 tracepoint_printk = 0;
+1 -2
kernel/trace/trace_events_hist.c
··· 5674 5674 (HIST_FIELD_FL_PERCENT | HIST_FIELD_FL_GRAPH))) 5675 5675 continue; 5676 5676 if (!stats) { 5677 - stats = kzalloc_objs(*stats, hist_data->n_vals, 5678 - GFP_KERNEL); 5677 + stats = kzalloc_objs(*stats, hist_data->n_vals); 5679 5678 if (!stats) { 5680 5679 n_entries = -ENOMEM; 5681 5680 goto out;
+1 -2
kernel/trace/trace_probe.c
··· 842 842 if (!earg) 843 843 return -ENOMEM; 844 844 earg->size = 2 * tp->nr_args + 1; 845 - earg->code = kzalloc_objs(struct fetch_insn, earg->size, 846 - GFP_KERNEL); 845 + earg->code = kzalloc_objs(struct fetch_insn, earg->size); 847 846 if (!earg->code) { 848 847 kfree(earg); 849 848 return -ENOMEM;
+1 -2
kernel/unwind/deferred.c
··· 120 120 return -EINVAL; 121 121 122 122 if (!info->cache) { 123 - info->cache = kzalloc_flex(*cache, entries, UNWIND_MAX_ENTRIES, 124 - GFP_KERNEL); 123 + info->cache = kzalloc_flex(*cache, entries, UNWIND_MAX_ENTRIES); 125 124 if (!info->cache) 126 125 return -ENOMEM; 127 126 }
+1 -2
lib/assoc_array.c
··· 1204 1204 node = parent; 1205 1205 1206 1206 /* Create a new node to collapse into */ 1207 - new_n0 = kzalloc_obj(struct assoc_array_node, 1208 - GFP_KERNEL); 1207 + new_n0 = kzalloc_obj(struct assoc_array_node); 1209 1208 if (!new_n0) 1210 1209 goto enomem; 1211 1210 edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
+1 -2
lib/kunit/executor.c
··· 194 194 /* Parse attribute filters */ 195 195 if (filters) { 196 196 filter_count = kunit_get_filter_count(filters); 197 - parsed_filters = kzalloc_objs(*parsed_filters, filter_count, 198 - GFP_KERNEL); 197 + parsed_filters = kzalloc_objs(*parsed_filters, filter_count); 199 198 if (!parsed_filters) { 200 199 *err = -ENOMEM; 201 200 goto free_parsed_glob;
+1 -2
lib/tests/test_ratelimit.c
··· 104 104 int i; 105 105 const int n_stress_kthread = cpumask_weight(cpu_online_mask); 106 106 struct stress_kthread skt = { 0 }; 107 - struct stress_kthread *sktp = kzalloc_objs(*sktp, n_stress_kthread, 108 - GFP_KERNEL); 107 + struct stress_kthread *sktp = kzalloc_objs(*sktp, n_stress_kthread); 109 108 110 109 KUNIT_EXPECT_NOT_NULL_MSG(test, sktp, "Memory allocation failure"); 111 110 for (i = 0; i < n_stress_kthread; i++) {
+6 -12
mm/damon/sysfs-schemes.c
··· 26 26 static struct damon_sysfs_scheme_region *damon_sysfs_scheme_region_alloc( 27 27 struct damon_region *region) 28 28 { 29 - struct damon_sysfs_scheme_region *sysfs_region = kmalloc_obj(*sysfs_region, 30 - GFP_KERNEL); 29 + struct damon_sysfs_scheme_region *sysfs_region = kmalloc_obj(*sysfs_region); 31 30 32 31 if (!sysfs_region) 33 32 return NULL; ··· 137 138 static struct damon_sysfs_scheme_regions * 138 139 damon_sysfs_scheme_regions_alloc(void) 139 140 { 140 - struct damon_sysfs_scheme_regions *regions = kmalloc_obj(*regions, 141 - GFP_KERNEL); 141 + struct damon_sysfs_scheme_regions *regions = kmalloc_obj(*regions); 142 142 143 143 if (!regions) 144 144 return NULL; ··· 849 851 enum damos_wmark_metric metric, unsigned long interval_us, 850 852 unsigned long high, unsigned long mid, unsigned long low) 851 853 { 852 - struct damon_sysfs_watermarks *watermarks = kmalloc_obj(*watermarks, 853 - GFP_KERNEL); 854 + struct damon_sysfs_watermarks *watermarks = kmalloc_obj(*watermarks); 854 855 855 856 if (!watermarks) 856 857 return NULL; ··· 1656 1659 static 1657 1660 struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void) 1658 1661 { 1659 - struct damon_sysfs_access_pattern *access_pattern = kmalloc_obj(*access_pattern, 1660 - GFP_KERNEL); 1662 + struct damon_sysfs_access_pattern *access_pattern = kmalloc_obj(*access_pattern); 1661 1663 1662 1664 if (!access_pattern) 1663 1665 return NULL; ··· 2677 2681 struct damos_migrate_dests *dests = &scheme->migrate_dests; 2678 2682 int i; 2679 2683 2680 - dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, sysfs_dests->nr, 2681 - GFP_KERNEL); 2684 + dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, sysfs_dests->nr); 2682 2685 if (!dests->node_id_arr) 2683 2686 return -ENOMEM; 2684 - dests->weight_arr = kmalloc_objs(*dests->weight_arr, sysfs_dests->nr, 2685 - GFP_KERNEL); 2687 + dests->weight_arr = kmalloc_objs(*dests->weight_arr, sysfs_dests->nr); 2686 2688 if (!dests->weight_arr) 2687 2689 /* ->node_id_arr will be freed by scheme destruction */ 2688 2690 return -ENOMEM;
+1 -2
mm/damon/sysfs.c
··· 609 609 unsigned long sample_us, unsigned long aggr_us, 610 610 unsigned long update_us) 611 611 { 612 - struct damon_sysfs_intervals *intervals = kmalloc_obj(*intervals, 613 - GFP_KERNEL); 612 + struct damon_sysfs_intervals *intervals = kmalloc_obj(*intervals); 614 613 615 614 if (!intervals) 616 615 return NULL;
+2 -4
mm/damon/tests/core-kunit.h
··· 725 725 { 726 726 size_t i; 727 727 728 - dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, nr_dests, 729 - GFP_KERNEL); 728 + dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, nr_dests); 730 729 if (!dests->node_id_arr) 731 730 return -ENOMEM; 732 - dests->weight_arr = kmalloc_objs(*dests->weight_arr, nr_dests, 733 - GFP_KERNEL); 731 + dests->weight_arr = kmalloc_objs(*dests->weight_arr, nr_dests); 734 732 if (!dests->weight_arr) { 735 733 kfree(dests->node_id_arr); 736 734 dests->node_id_arr = NULL;
+1 -2
mm/damon/vaddr.c
··· 821 821 use_target_nid = dests->nr_dests == 0; 822 822 nr_dests = use_target_nid ? 1 : dests->nr_dests; 823 823 priv.scheme = s; 824 - priv.migration_lists = kmalloc_objs(*priv.migration_lists, nr_dests, 825 - GFP_KERNEL); 824 + priv.migration_lists = kmalloc_objs(*priv.migration_lists, nr_dests); 826 825 if (!priv.migration_lists) 827 826 return 0; 828 827
+1 -2
mm/ksm.c
··· 3586 3586 * Allocate stable and unstable together: 3587 3587 * MAXSMP NODES_SHIFT 10 will use 16kB. 3588 3588 */ 3589 - buf = kzalloc_objs(*buf, nr_node_ids + nr_node_ids, 3590 - GFP_KERNEL); 3589 + buf = kzalloc_objs(*buf, nr_node_ids + nr_node_ids); 3591 3590 /* Let us assume that RB_ROOT is NULL is zero */ 3592 3591 if (!buf) 3593 3592 err = -ENOMEM;
+1 -2
mm/memory-tiers.c
··· 912 912 panic("%s() failed to register memory tier subsystem\n", __func__); 913 913 914 914 #ifdef CONFIG_MIGRATION 915 - node_demotion = kzalloc_objs(struct demotion_nodes, nr_node_ids, 916 - GFP_KERNEL); 915 + node_demotion = kzalloc_objs(struct demotion_nodes, nr_node_ids); 917 916 WARN_ON(!node_demotion); 918 917 #endif 919 918
+3 -6
mm/mempolicy.c
··· 229 229 if (!new_bw) 230 230 return -ENOMEM; 231 231 232 - new_wi_state = kmalloc_flex(*new_wi_state, iw_table, nr_node_ids, 233 - GFP_KERNEL); 232 + new_wi_state = kmalloc_flex(*new_wi_state, iw_table, nr_node_ids); 234 233 if (!new_wi_state) { 235 234 kfree(new_bw); 236 235 return -ENOMEM; ··· 3641 3642 kstrtou8(buf, 0, &weight) || weight == 0) 3642 3643 return -EINVAL; 3643 3644 3644 - new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids, 3645 - GFP_KERNEL); 3645 + new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids); 3646 3646 if (!new_wi_state) 3647 3647 return -ENOMEM; 3648 3648 ··· 3693 3695 if (kstrtobool(buf, &input)) 3694 3696 return -EINVAL; 3695 3697 3696 - new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids, 3697 - GFP_KERNEL); 3698 + new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids); 3698 3699 if (!new_wi_state) 3699 3700 return -ENOMEM; 3700 3701 for (i = 0; i < nr_node_ids; i++)
+1 -2
mm/mmu_notifier.c
··· 618 618 * know that mm->notifier_subscriptions can't change while we 619 619 * hold the write side of the mmap_lock. 620 620 */ 621 - subscriptions = kzalloc_obj(struct mmu_notifier_subscriptions, 622 - GFP_KERNEL); 621 + subscriptions = kzalloc_obj(struct mmu_notifier_subscriptions); 623 622 if (!subscriptions) 624 623 return -ENOMEM; 625 624
+1 -2
mm/swapfile.c
··· 3265 3265 spin_lock_init(&cluster_info[i].lock); 3266 3266 3267 3267 if (!(si->flags & SWP_SOLIDSTATE)) { 3268 - si->global_cluster = kmalloc_obj(*si->global_cluster, 3269 - GFP_KERNEL); 3268 + si->global_cluster = kmalloc_obj(*si->global_cluster); 3270 3269 if (!si->global_cluster) 3271 3270 goto err; 3272 3271 for (i = 0; i < SWAP_NR_ORDERS; i++)
+1 -2
net/bluetooth/l2cap_sock.c
··· 1564 1564 (chan->mode == L2CAP_MODE_ERTM || 1565 1565 chan->mode == L2CAP_MODE_LE_FLOWCTL || 1566 1566 chan->mode == L2CAP_MODE_EXT_FLOWCTL)) { 1567 - struct l2cap_rx_busy *rx_busy = kmalloc_obj(*rx_busy, 1568 - GFP_KERNEL); 1567 + struct l2cap_rx_busy *rx_busy = kmalloc_obj(*rx_busy); 1569 1568 if (!rx_busy) { 1570 1569 err = -ENOMEM; 1571 1570 goto done;
+2 -4
net/can/af_can.c
··· 798 798 static int can_pernet_init(struct net *net) 799 799 { 800 800 spin_lock_init(&net->can.rcvlists_lock); 801 - net->can.rx_alldev_list = kzalloc_obj(*net->can.rx_alldev_list, 802 - GFP_KERNEL); 801 + net->can.rx_alldev_list = kzalloc_obj(*net->can.rx_alldev_list); 803 802 if (!net->can.rx_alldev_list) 804 803 goto out; 805 804 net->can.pkg_stats = kzalloc_obj(*net->can.pkg_stats); 806 805 if (!net->can.pkg_stats) 807 806 goto out_free_rx_alldev_list; 808 - net->can.rcv_lists_stats = kzalloc_obj(*net->can.rcv_lists_stats, 809 - GFP_KERNEL); 807 + net->can.rcv_lists_stats = kzalloc_obj(*net->can.rcv_lists_stats); 810 808 if (!net->can.rcv_lists_stats) 811 809 goto out_free_pkg_stats; 812 810
+1 -2
net/ceph/mon_client.c
··· 1140 1140 int i; 1141 1141 1142 1142 /* build initial monmap */ 1143 - monc->monmap = kzalloc_flex(*monc->monmap, mon_inst, num_mon, 1144 - GFP_KERNEL); 1143 + monc->monmap = kzalloc_flex(*monc->monmap, mon_inst, num_mon); 1145 1144 if (!monc->monmap) 1146 1145 return -ENOMEM; 1147 1146 monc->monmap->num_mon = num_mon;
+1 -2
net/core/dev.c
··· 9133 9133 int err; 9134 9134 int rc; 9135 9135 9136 - dev->offload_xstats_l3 = kzalloc_obj(*dev->offload_xstats_l3, 9137 - GFP_KERNEL); 9136 + dev->offload_xstats_l3 = kzalloc_obj(*dev->offload_xstats_l3); 9138 9137 if (!dev->offload_xstats_l3) 9139 9138 return -ENOMEM; 9140 9139
+1 -2
net/core/drop_monitor.c
··· 306 306 struct net_dm_hw_entries *hw_entries; 307 307 unsigned long flags; 308 308 309 - hw_entries = kzalloc_flex(*hw_entries, entries, dm_hit_limit, 310 - GFP_KERNEL); 309 + hw_entries = kzalloc_flex(*hw_entries, entries, dm_hit_limit); 311 310 if (!hw_entries) { 312 311 /* If the memory allocation failed, we try to perform another 313 312 * allocation in 1/10 second. Otherwise, the probe function
+1 -2
net/core/flow_offload.c
··· 32 32 struct flow_offload_action *fl_action; 33 33 int i; 34 34 35 - fl_action = kzalloc_flex(*fl_action, action.entries, num_actions, 36 - GFP_KERNEL); 35 + fl_action = kzalloc_flex(*fl_action, action.entries, num_actions); 37 36 if (!fl_action) 38 37 return NULL; 39 38
+1 -2
net/ethtool/mse.c
··· 64 64 if (!data->capability.supported_caps) 65 65 return 0; 66 66 67 - data->snapshots = kzalloc_objs(*data->snapshots, PHY_MSE_CHANNEL_COUNT, 68 - GFP_KERNEL); 67 + data->snapshots = kzalloc_objs(*data->snapshots, PHY_MSE_CHANNEL_COUNT); 69 68 if (!data->snapshots) 70 69 return -ENOMEM; 71 70
+1 -2
net/ipv4/af_inet.c
··· 1737 1737 net->mib.icmp_statistics = alloc_percpu(struct icmp_mib); 1738 1738 if (!net->mib.icmp_statistics) 1739 1739 goto err_icmp_mib; 1740 - net->mib.icmpmsg_statistics = kzalloc_obj(struct icmpmsg_mib, 1741 - GFP_KERNEL); 1740 + net->mib.icmpmsg_statistics = kzalloc_obj(struct icmpmsg_mib); 1742 1741 if (!net->mib.icmpmsg_statistics) 1743 1742 goto err_icmpmsg_mib; 1744 1743
+1 -2
net/ipv4/fib_semantics.c
··· 365 365 static struct hlist_head *fib_info_hash_alloc(unsigned int hash_bits) 366 366 { 367 367 /* The second half is used for prefsrc */ 368 - return kvzalloc_objs(struct hlist_head, (1 << hash_bits) * 2, 369 - GFP_KERNEL); 368 + return kvzalloc_objs(struct hlist_head, (1 << hash_bits) * 2); 370 369 } 371 370 372 371 static void fib_info_hash_free(struct hlist_head *head)
+1 -2
net/ipv4/nexthop.c
··· 137 137 int i; 138 138 139 139 info->type = NH_NOTIFIER_INFO_TYPE_GRP; 140 - info->nh_grp = kzalloc_flex(*info->nh_grp, nh_entries, num_nh, 141 - GFP_KERNEL); 140 + info->nh_grp = kzalloc_flex(*info->nh_grp, nh_entries, num_nh); 142 141 if (!info->nh_grp) 143 142 return -ENOMEM; 144 143
+1 -2
net/ipv6/addrconf.c
··· 355 355 } 356 356 357 357 358 - idev->stats.icmpv6dev = kzalloc_obj(struct icmpv6_mib_device, 359 - GFP_KERNEL); 358 + idev->stats.icmpv6dev = kzalloc_obj(struct icmpv6_mib_device); 360 359 if (!idev->stats.icmpv6dev) 361 360 goto err_icmp; 362 361 idev->stats.icmpv6msgdev = kzalloc_obj(struct icmpv6msg_mib_device,
+1 -2
net/ipv6/af_inet6.c
··· 921 921 net->mib.icmpv6_statistics = alloc_percpu(struct icmpv6_mib); 922 922 if (!net->mib.icmpv6_statistics) 923 923 goto err_icmp_mib; 924 - net->mib.icmpv6msg_statistics = kzalloc_obj(struct icmpv6msg_mib, 925 - GFP_KERNEL); 924 + net->mib.icmpv6msg_statistics = kzalloc_obj(struct icmpv6msg_mib); 926 925 if (!net->mib.icmpv6msg_statistics) 927 926 goto err_icmpmsg_mib; 928 927 return 0;
+2 -4
net/ipv6/ip6_fib.c
··· 2477 2477 2478 2478 spin_lock_init(&net->ipv6.fib_table_hash_lock); 2479 2479 2480 - net->ipv6.fib6_main_tbl = kzalloc_obj(*net->ipv6.fib6_main_tbl, 2481 - GFP_KERNEL); 2480 + net->ipv6.fib6_main_tbl = kzalloc_obj(*net->ipv6.fib6_main_tbl); 2482 2481 if (!net->ipv6.fib6_main_tbl) 2483 2482 goto out_fib_table_hash; 2484 2483 ··· 2490 2491 INIT_HLIST_HEAD(&net->ipv6.fib6_main_tbl->tb6_gc_hlist); 2491 2492 2492 2493 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 2493 - net->ipv6.fib6_local_tbl = kzalloc_obj(*net->ipv6.fib6_local_tbl, 2494 - GFP_KERNEL); 2494 + net->ipv6.fib6_local_tbl = kzalloc_obj(*net->ipv6.fib6_local_tbl); 2495 2495 if (!net->ipv6.fib6_local_tbl) 2496 2496 goto out_fib6_main_tbl; 2497 2497 net->ipv6.fib6_local_tbl->tb6_id = RT6_TABLE_LOCAL;
+1 -2
net/mac80211/util.c
··· 1742 1742 if (WARN_ON(res)) 1743 1743 return res; 1744 1744 1745 - funcs = kzalloc_objs(*funcs, sdata->local->hw.max_nan_de_entries + 1, 1746 - GFP_KERNEL); 1745 + funcs = kzalloc_objs(*funcs, sdata->local->hw.max_nan_de_entries + 1); 1747 1746 if (!funcs) 1748 1747 return -ENOMEM; 1749 1748
+1 -2
net/netfilter/ipvs/ip_vs_conn.c
··· 1510 1510 */ 1511 1511 tab_array_size = array_size(ip_vs_conn_tab_size, 1512 1512 sizeof(*ip_vs_conn_tab)); 1513 - ip_vs_conn_tab = kvmalloc_objs(*ip_vs_conn_tab, ip_vs_conn_tab_size, 1514 - GFP_KERNEL); 1513 + ip_vs_conn_tab = kvmalloc_objs(*ip_vs_conn_tab, ip_vs_conn_tab_size); 1515 1514 if (!ip_vs_conn_tab) 1516 1515 return -ENOMEM; 1517 1516
+1 -2
net/netfilter/ipvs/ip_vs_mh.c
··· 386 386 if (!s) 387 387 return -ENOMEM; 388 388 389 - s->lookup = kzalloc_objs(struct ip_vs_mh_lookup, IP_VS_MH_TAB_SIZE, 390 - GFP_KERNEL); 389 + s->lookup = kzalloc_objs(struct ip_vs_mh_lookup, IP_VS_MH_TAB_SIZE); 391 390 if (!s->lookup) { 392 391 kfree(s); 393 392 return -ENOMEM;
+1 -2
net/netlink/policy.c
··· 102 102 { 103 103 struct netlink_policy_dump_state *state; 104 104 105 - state = kzalloc_flex(*state, policies, INITIAL_POLICIES_ALLOC, 106 - GFP_KERNEL); 105 + state = kzalloc_flex(*state, policies, INITIAL_POLICIES_ALLOC); 107 106 if (!state) 108 107 return ERR_PTR(-ENOMEM); 109 108 state->n_alloc = INITIAL_POLICIES_ALLOC;
+2 -4
net/openvswitch/conntrack.c
··· 1586 1586 { 1587 1587 int i, err; 1588 1588 1589 - ovs_net->ct_limit_info = kmalloc_obj(*ovs_net->ct_limit_info, 1590 - GFP_KERNEL); 1589 + ovs_net->ct_limit_info = kmalloc_obj(*ovs_net->ct_limit_info); 1591 1590 if (!ovs_net->ct_limit_info) 1592 1591 return -ENOMEM; 1593 1592 1594 1593 ovs_net->ct_limit_info->default_limit = OVS_CT_LIMIT_DEFAULT; 1595 1594 ovs_net->ct_limit_info->limits = 1596 - kmalloc_objs(struct hlist_head, CT_LIMIT_HASH_BUCKETS, 1597 - GFP_KERNEL); 1595 + kmalloc_objs(struct hlist_head, CT_LIMIT_HASH_BUCKETS); 1598 1596 if (!ovs_net->ct_limit_info->limits) { 1599 1597 kfree(ovs_net->ct_limit_info); 1600 1598 return -ENOMEM;
+1 -2
net/openvswitch/datapath.c
··· 1797 1797 { 1798 1798 int i; 1799 1799 1800 - dp->ports = kmalloc_objs(struct hlist_head, DP_VPORT_HASH_BUCKETS, 1801 - GFP_KERNEL); 1800 + dp->ports = kmalloc_objs(struct hlist_head, DP_VPORT_HASH_BUCKETS); 1802 1801 if (!dp->ports) 1803 1802 return -ENOMEM; 1804 1803
+1 -2
net/openvswitch/vport.c
··· 34 34 */ 35 35 int ovs_vport_init(void) 36 36 { 37 - dev_table = kzalloc_objs(struct hlist_head, VPORT_HASH_BUCKETS, 38 - GFP_KERNEL); 37 + dev_table = kzalloc_objs(struct hlist_head, VPORT_HASH_BUCKETS); 39 38 if (!dev_table) 40 39 return -ENOMEM; 41 40
+1 -2
net/packet/af_packet.c
··· 1754 1754 /* legacy PACKET_FANOUT_MAX */ 1755 1755 args->max_num_members = 256; 1756 1756 err = -ENOMEM; 1757 - match = kvzalloc_flex(*match, arr, args->max_num_members, 1758 - GFP_KERNEL); 1757 + match = kvzalloc_flex(*match, arr, args->max_num_members); 1759 1758 if (!match) 1760 1759 goto out; 1761 1760 write_pnet(&match->net, sock_net(sk));
+1 -2
net/rds/ib.c
··· 172 172 rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom; 173 173 rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom; 174 174 175 - rds_ibdev->vector_load = kzalloc_objs(int, device->num_comp_vectors, 176 - GFP_KERNEL); 175 + rds_ibdev->vector_load = kzalloc_objs(int, device->num_comp_vectors); 177 176 if (!rds_ibdev->vector_load) { 178 177 pr_err("RDS/IB: %s failed to allocate vector memory\n", 179 178 __func__);
+1 -2
net/rds/rdma.c
··· 934 934 * would have to use GFP_ATOMIC there, and don't want to deal 935 935 * with failed allocations. 936 936 */ 937 - rm->atomic.op_notifier = kmalloc_obj(*rm->atomic.op_notifier, 938 - GFP_KERNEL); 937 + rm->atomic.op_notifier = kmalloc_obj(*rm->atomic.op_notifier); 939 938 if (!rm->atomic.op_notifier) { 940 939 ret = -ENOMEM; 941 940 goto err;
+1 -2
net/sched/cls_api.c
··· 3341 3341 * This reference might be taken later from tcf_exts_get_net(). 3342 3342 */ 3343 3343 exts->net = net; 3344 - exts->actions = kzalloc_objs(struct tc_action *, TCA_ACT_MAX_PRIO, 3345 - GFP_KERNEL); 3344 + exts->actions = kzalloc_objs(struct tc_action *, TCA_ACT_MAX_PRIO); 3346 3345 if (!exts->actions) 3347 3346 return -ENOMEM; 3348 3347 #endif
+1 -2
net/sched/cls_u32.c
··· 1480 1480 #ifdef CONFIG_NET_CLS_ACT 1481 1481 pr_info(" Actions configured\n"); 1482 1482 #endif 1483 - tc_u_common_hash = kvmalloc_objs(struct hlist_head, U32_HASH_SIZE, 1484 - GFP_KERNEL); 1483 + tc_u_common_hash = kvmalloc_objs(struct hlist_head, U32_HASH_SIZE); 1485 1484 if (!tc_u_common_hash) 1486 1485 return -ENOMEM; 1487 1486
+1 -2
net/sched/sch_cake.c
··· 2849 2849 for (i = 1; i <= CAKE_QUEUES; i++) 2850 2850 quantum_div[i] = 65535 / i; 2851 2851 2852 - qd->tins = kvzalloc_objs(struct cake_tin_data, CAKE_MAX_TINS, 2853 - GFP_KERNEL); 2852 + qd->tins = kvzalloc_objs(struct cake_tin_data, CAKE_MAX_TINS); 2854 2853 if (!qd->tins) 2855 2854 return -ENOMEM; 2856 2855
+1 -2
net/sched/sch_fq_codel.c
··· 496 496 goto init_failure; 497 497 498 498 if (!q->flows) { 499 - q->flows = kvzalloc_objs(struct fq_codel_flow, q->flows_cnt, 500 - GFP_KERNEL); 499 + q->flows = kvzalloc_objs(struct fq_codel_flow, q->flows_cnt); 501 500 if (!q->flows) { 502 501 err = -ENOMEM; 503 502 goto init_failure;
+1 -2
net/sched/sch_hhf.c
··· 604 604 605 605 if (!q->hh_flows) { 606 606 /* Initialize heavy-hitter flow table. */ 607 - q->hh_flows = kvzalloc_objs(struct list_head, HH_FLOWS_CNT, 608 - GFP_KERNEL); 607 + q->hh_flows = kvzalloc_objs(struct list_head, HH_FLOWS_CNT); 609 608 if (!q->hh_flows) 610 609 return -ENOMEM; 611 610 for (i = 0; i < HH_FLOWS_CNT; i++)
+1 -2
net/sched/sch_mq.c
··· 82 82 return -EOPNOTSUPP; 83 83 84 84 /* pre-allocate qdiscs, attachment can't fail */ 85 - priv->qdiscs = kzalloc_objs(priv->qdiscs[0], dev->num_tx_queues, 86 - GFP_KERNEL); 85 + priv->qdiscs = kzalloc_objs(priv->qdiscs[0], dev->num_tx_queues); 87 86 if (!priv->qdiscs) 88 87 return -ENOMEM; 89 88
+1 -2
net/sched/sch_mqprio.c
··· 388 388 } 389 389 390 390 /* pre-allocate qdisc, attachment can't fail */ 391 - priv->qdiscs = kzalloc_objs(priv->qdiscs[0], dev->num_tx_queues, 392 - GFP_KERNEL); 391 + priv->qdiscs = kzalloc_objs(priv->qdiscs[0], dev->num_tx_queues); 393 392 if (!priv->qdiscs) 394 393 return -ENOMEM; 395 394
+1 -2
net/sched/sch_taprio.c
··· 1376 1376 { 1377 1377 struct __tc_taprio_qopt_offload *__offload; 1378 1378 1379 - __offload = kzalloc_flex(*__offload, offload.entries, num_entries, 1380 - GFP_KERNEL); 1379 + __offload = kzalloc_flex(*__offload, offload.entries, num_entries); 1381 1380 if (!__offload) 1382 1381 return NULL; 1383 1382
+5 -10
net/smc/smc_wr.c
··· 749 749 GFP_KERNEL); 750 750 if (!link->wr_rx_bufs) 751 751 goto no_mem_wr_tx_bufs; 752 - link->wr_tx_ibs = kzalloc_objs(link->wr_tx_ibs[0], link->max_send_wr, 753 - GFP_KERNEL); 752 + link->wr_tx_ibs = kzalloc_objs(link->wr_tx_ibs[0], link->max_send_wr); 754 753 if (!link->wr_tx_ibs) 755 754 goto no_mem_wr_rx_bufs; 756 - link->wr_rx_ibs = kzalloc_objs(link->wr_rx_ibs[0], link->max_recv_wr, 757 - GFP_KERNEL); 755 + link->wr_rx_ibs = kzalloc_objs(link->wr_rx_ibs[0], link->max_recv_wr); 758 756 if (!link->wr_rx_ibs) 759 757 goto no_mem_wr_tx_ibs; 760 758 link->wr_tx_rdmas = kzalloc_objs(link->wr_tx_rdmas[0], ··· 763 765 link->max_send_wr, GFP_KERNEL); 764 766 if (!link->wr_tx_rdma_sges) 765 767 goto no_mem_wr_tx_rdmas; 766 - link->wr_tx_sges = kzalloc_objs(link->wr_tx_sges[0], link->max_send_wr, 767 - GFP_KERNEL); 768 + link->wr_tx_sges = kzalloc_objs(link->wr_tx_sges[0], link->max_send_wr); 768 769 if (!link->wr_tx_sges) 769 770 goto no_mem_wr_tx_rdma_sges; 770 771 link->wr_rx_sges = kcalloc(link->max_recv_wr, ··· 787 790 link->wr_tx_v2_ib = kzalloc_obj(*link->wr_tx_v2_ib); 788 791 if (!link->wr_tx_v2_ib) 789 792 goto no_mem_tx_compl; 790 - link->wr_tx_v2_sge = kzalloc_obj(*link->wr_tx_v2_sge, 791 - GFP_KERNEL); 793 + link->wr_tx_v2_sge = kzalloc_obj(*link->wr_tx_v2_sge); 792 794 if (!link->wr_tx_v2_sge) 793 795 goto no_mem_v2_ib; 794 - link->wr_tx_v2_pend = kzalloc_obj(*link->wr_tx_v2_pend, 795 - GFP_KERNEL); 796 + link->wr_tx_v2_pend = kzalloc_obj(*link->wr_tx_v2_pend); 796 797 if (!link->wr_tx_v2_pend) 797 798 goto no_mem_v2_sge; 798 799 }
+1 -2
net/sunrpc/auth_gss/auth_gss.c
··· 1817 1817 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT; 1818 1818 rqstp->rq_enc_pages_num = last - first + 1 + 1; 1819 1819 rqstp->rq_enc_pages 1820 - = kmalloc_objs(struct page *, rqstp->rq_enc_pages_num, 1821 - GFP_KERNEL); 1820 + = kmalloc_objs(struct page *, rqstp->rq_enc_pages_num); 1822 1821 if (!rqstp->rq_enc_pages) 1823 1822 goto out; 1824 1823 for (i=0; i < rqstp->rq_enc_pages_num; i++) {
+1 -2
net/sunrpc/cache.c
··· 1745 1745 if (cd == NULL) 1746 1746 return ERR_PTR(-ENOMEM); 1747 1747 1748 - cd->hash_table = kzalloc_objs(struct hlist_head, cd->hash_size, 1749 - GFP_KERNEL); 1748 + cd->hash_table = kzalloc_objs(struct hlist_head, cd->hash_size); 1750 1749 if (cd->hash_table == NULL) { 1751 1750 kfree(cd); 1752 1751 return ERR_PTR(-ENOMEM);
+1 -2
net/sunrpc/svcsock.c
··· 1441 1441 return ERR_PTR(-ENOMEM); 1442 1442 1443 1443 if (sendpages) { 1444 - svsk->sk_bvec = kzalloc_objs(*svsk->sk_bvec, sendpages, 1445 - GFP_KERNEL); 1444 + svsk->sk_bvec = kzalloc_objs(*svsk->sk_bvec, sendpages); 1446 1445 if (!svsk->sk_bvec) { 1447 1446 kfree(svsk); 1448 1447 return ERR_PTR(-ENOMEM);
+1 -2
net/unix/af_unix.c
··· 3798 3798 goto err_sysctl; 3799 3799 #endif 3800 3800 3801 - net->unx.table.locks = kvmalloc_objs(spinlock_t, UNIX_HASH_SIZE, 3802 - GFP_KERNEL); 3801 + net->unx.table.locks = kvmalloc_objs(spinlock_t, UNIX_HASH_SIZE); 3803 3802 if (!net->unx.table.locks) 3804 3803 goto err_proc; 3805 3804
+3 -6
net/wireless/nl80211.c
··· 1106 1106 struct nlattr **attrbuf_free = NULL; 1107 1107 1108 1108 if (!attrbuf) { 1109 - attrbuf = kzalloc_objs(*attrbuf, NUM_NL80211_ATTR, 1110 - GFP_KERNEL); 1109 + attrbuf = kzalloc_objs(*attrbuf, NUM_NL80211_ATTR); 1111 1110 if (!attrbuf) 1112 1111 return -ENOMEM; 1113 1112 attrbuf_free = attrbuf; ··· 15298 15299 if (n_patterns > coalesce->n_patterns) 15299 15300 return -EINVAL; 15300 15301 15301 - new_rule->patterns = kzalloc_objs(new_rule->patterns[0], n_patterns, 15302 - GFP_KERNEL); 15302 + new_rule->patterns = kzalloc_objs(new_rule->patterns[0], n_patterns); 15303 15303 if (!new_rule->patterns) 15304 15304 return -ENOMEM; 15305 15305 ··· 16092 16094 16093 16095 func->srf_num_macs = n_entries; 16094 16096 func->srf_macs = 16095 - kzalloc_objs(*func->srf_macs, n_entries, 16096 - GFP_KERNEL); 16097 + kzalloc_objs(*func->srf_macs, n_entries); 16097 16098 if (!func->srf_macs) { 16098 16099 err = -ENOMEM; 16099 16100 goto out;
+2 -4
net/xdp/xsk_buff_pool.c
··· 42 42 43 43 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs) 44 44 { 45 - pool->tx_descs = kvzalloc_objs(*pool->tx_descs, xs->tx->nentries, 46 - GFP_KERNEL); 45 + pool->tx_descs = kvzalloc_objs(*pool->tx_descs, xs->tx->nentries); 47 46 if (!pool->tx_descs) 48 47 return -ENOMEM; 49 48 ··· 331 332 if (!dma_map) 332 333 return NULL; 333 334 334 - dma_map->dma_pages = kvzalloc_objs(*dma_map->dma_pages, nr_pages, 335 - GFP_KERNEL); 335 + dma_map->dma_pages = kvzalloc_objs(*dma_map->dma_pages, nr_pages); 336 336 if (!dma_map->dma_pages) { 337 337 kfree(dma_map); 338 338 return NULL;
+1 -2
security/apparmor/policy_unpack.c
··· 611 611 if (!aa_unpack_array(e, NULL, &size)) 612 612 goto fail; 613 613 614 - rules->secmark = kzalloc_objs(struct aa_secmark, size, 615 - GFP_KERNEL); 614 + rules->secmark = kzalloc_objs(struct aa_secmark, size); 616 615 if (!rules->secmark) 617 616 goto fail; 618 617
+1 -2
security/integrity/ima/ima_policy.c
··· 921 921 for (rules = arch_rules; *rules != NULL; rules++) 922 922 arch_entries++; 923 923 924 - arch_policy_entry = kzalloc_objs(*arch_policy_entry, arch_entries + 1, 925 - GFP_KERNEL); 924 + arch_policy_entry = kzalloc_objs(*arch_policy_entry, arch_entries + 1); 926 925 if (!arch_policy_entry) 927 926 return 0; 928 927
+1 -2
security/selinux/ss/conditional.c
··· 631 631 return rc; 632 632 633 633 newp->cond_list_len = 0; 634 - newp->cond_list = kzalloc_objs(*newp->cond_list, origp->cond_list_len, 635 - GFP_KERNEL); 634 + newp->cond_list = kzalloc_objs(*newp->cond_list, origp->cond_list_len); 636 635 if (!newp->cond_list) 637 636 goto error; 638 637
+1 -2
security/selinux/ss/policydb.c
··· 1296 1296 return rc; 1297 1297 if (p->policyvers >= 1298 1298 POLICYDB_VERSION_CONSTRAINT_NAMES) { 1299 - e->type_names = kzalloc_obj(*e->type_names, 1300 - GFP_KERNEL); 1299 + e->type_names = kzalloc_obj(*e->type_names); 1301 1300 if (!e->type_names) 1302 1301 return -ENOMEM; 1303 1302 type_set_init(e->type_names);
+2 -4
sound/aoa/codecs/onyx.c
··· 855 855 /* if no inputs are present... */ 856 856 if ((onyx->codec.connected & 0xC) == 0) { 857 857 if (!onyx->codec_info) 858 - onyx->codec_info = kmalloc_obj(struct codec_info, 859 - GFP_KERNEL); 858 + onyx->codec_info = kmalloc_obj(struct codec_info); 860 859 if (!onyx->codec_info) 861 860 return -ENOMEM; 862 861 ci = onyx->codec_info; ··· 866 867 /* if no outputs are present... */ 867 868 if ((onyx->codec.connected & 3) == 0) { 868 869 if (!onyx->codec_info) 869 - onyx->codec_info = kmalloc_obj(struct codec_info, 870 - GFP_KERNEL); 870 + onyx->codec_info = kmalloc_obj(struct codec_info); 871 871 if (!onyx->codec_info) 872 872 return -ENOMEM; 873 873 ci = onyx->codec_info;
+1 -2
sound/core/oss/pcm_plugin.c
··· 163 163 channels = src_format->channels; 164 164 else 165 165 channels = dst_format->channels; 166 - plugin->buf_channels = kzalloc_objs(*plugin->buf_channels, channels, 167 - GFP_KERNEL); 166 + plugin->buf_channels = kzalloc_objs(*plugin->buf_channels, channels); 168 167 if (plugin->buf_channels == NULL) { 169 168 snd_pcm_plugin_free(plugin); 170 169 return -ENOMEM;
+1 -2
sound/core/seq/seq_memory.c
··· 441 441 if (snd_BUG_ON(!pool)) 442 442 return -EINVAL; 443 443 444 - cellptr = kvmalloc_objs(struct snd_seq_event_cell, pool->size, 445 - GFP_KERNEL); 444 + cellptr = kvmalloc_objs(struct snd_seq_event_cell, pool->size); 446 445 if (!cellptr) 447 446 return -ENOMEM; 448 447
+1 -2
sound/core/ump.c
··· 1352 1352 bool input, output; 1353 1353 int err, num; 1354 1354 1355 - ump->out_cvts = kzalloc_objs(*ump->out_cvts, SNDRV_UMP_MAX_GROUPS, 1356 - GFP_KERNEL); 1355 + ump->out_cvts = kzalloc_objs(*ump->out_cvts, SNDRV_UMP_MAX_GROUPS); 1357 1356 if (!ump->out_cvts) 1358 1357 return -ENOMEM; 1359 1358
+2 -4
sound/drivers/vx/vx_pcm.c
··· 1154 1154 chip->audio_info = rmh.Stat[1]; 1155 1155 1156 1156 /* allocate pipes */ 1157 - chip->playback_pipes = kzalloc_objs(struct vx_pipe *, chip->audio_outs, 1158 - GFP_KERNEL); 1157 + chip->playback_pipes = kzalloc_objs(struct vx_pipe *, chip->audio_outs); 1159 1158 if (!chip->playback_pipes) 1160 1159 return -ENOMEM; 1161 - chip->capture_pipes = kzalloc_objs(struct vx_pipe *, chip->audio_ins, 1162 - GFP_KERNEL); 1160 + chip->capture_pipes = kzalloc_objs(struct vx_pipe *, chip->audio_ins); 1163 1161 if (!chip->capture_pipes) { 1164 1162 kfree(chip->playback_pipes); 1165 1163 return -ENOMEM;
+1 -2
sound/hda/codecs/ca0132.c
··· 9831 9831 */ 9832 9832 if (ca0132_use_pci_mmio(spec)) 9833 9833 spec->desktop_init_verbs = ca0132_init_verbs1; 9834 - spec->spec_init_verbs = kzalloc_objs(struct hda_verb, NUM_SPEC_VERBS, 9835 - GFP_KERNEL); 9834 + spec->spec_init_verbs = kzalloc_objs(struct hda_verb, NUM_SPEC_VERBS); 9836 9835 if (!spec->spec_init_verbs) 9837 9836 return -ENOMEM; 9838 9837
+1 -2
sound/hda/common/proc.c
··· 845 845 if (wid_caps & AC_WCAP_CONN_LIST) { 846 846 conn_len = snd_hda_get_num_raw_conns(codec, nid); 847 847 if (conn_len > 0) { 848 - conn = kmalloc_objs(hda_nid_t, conn_len, 849 - GFP_KERNEL); 848 + conn = kmalloc_objs(hda_nid_t, conn_len); 850 849 if (!conn) 851 850 return; 852 851 if (snd_hda_get_raw_connections(codec, nid, conn,
+1 -2
sound/hda/core/ext/stream.c
··· 101 101 setup_op = snd_hdac_stream_setup; 102 102 103 103 for (i = 0; i < num_stream; i++) { 104 - struct hdac_ext_stream *hext_stream = kzalloc_obj(*hext_stream, 105 - GFP_KERNEL); 104 + struct hdac_ext_stream *hext_stream = kzalloc_obj(*hext_stream); 106 105 if (!hext_stream) 107 106 return -ENOMEM; 108 107 tag = ++stream_tag;
+1 -2
sound/hda/core/sysfs.c
··· 374 374 if (!tree->root) 375 375 return -ENOMEM; 376 376 377 - tree->nodes = kzalloc_objs(*tree->nodes, codec->num_nodes + 1, 378 - GFP_KERNEL); 377 + tree->nodes = kzalloc_objs(*tree->nodes, codec->num_nodes + 1); 379 378 if (!tree->nodes) 380 379 return -ENOMEM; 381 380
+2 -4
sound/pci/cs46xx/dsp_spos.c
··· 221 221 222 222 struct dsp_spos_instance *cs46xx_dsp_spos_create (struct snd_cs46xx * chip) 223 223 { 224 - struct dsp_spos_instance * ins = kzalloc_obj(struct dsp_spos_instance, 225 - GFP_KERNEL); 224 + struct dsp_spos_instance * ins = kzalloc_obj(struct dsp_spos_instance); 226 225 227 226 if (ins == NULL) 228 227 return NULL; ··· 231 232 vmalloc(array_size(DSP_MAX_SYMBOLS, 232 233 sizeof(struct dsp_symbol_entry))); 233 234 ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL); 234 - ins->modules = kmalloc_objs(struct dsp_module_desc, DSP_MAX_MODULES, 235 - GFP_KERNEL); 235 + ins->modules = kmalloc_objs(struct dsp_module_desc, DSP_MAX_MODULES); 236 236 if (!ins->symbol_table.symbols || !ins->code.data || !ins->modules) { 237 237 cs46xx_dsp_spos_destroy(chip); 238 238 goto error;
+2 -4
sound/pci/emu10k1/emufx.c
··· 1286 1286 if (!icode) 1287 1287 return err; 1288 1288 1289 - icode->gpr_map = kzalloc_objs(u_int32_t, 512 + 256 + 256 + 2 * 1024, 1290 - GFP_KERNEL); 1289 + icode->gpr_map = kzalloc_objs(u_int32_t, 512 + 256 + 256 + 2 * 1024); 1291 1290 if (!icode->gpr_map) 1292 1291 goto __err_gpr; 1293 1292 controls = kzalloc_objs(*controls, SND_EMU10K1_GPR_CONTROLS); ··· 1802 1803 if (!icode) 1803 1804 return err; 1804 1805 1805 - icode->gpr_map = kzalloc_objs(u_int32_t, 256 + 160 + 160 + 2 * 512, 1806 - GFP_KERNEL); 1806 + icode->gpr_map = kzalloc_objs(u_int32_t, 256 + 160 + 160 + 2 * 512); 1807 1807 if (!icode->gpr_map) 1808 1808 goto __err_gpr; 1809 1809
+1 -2
sound/pci/via82xx.c
··· 423 423 return -ENOMEM; 424 424 } 425 425 if (! dev->idx_table) { 426 - dev->idx_table = kmalloc_objs(*dev->idx_table, VIA_TABLE_SIZE, 427 - GFP_KERNEL); 426 + dev->idx_table = kmalloc_objs(*dev->idx_table, VIA_TABLE_SIZE); 428 427 if (! dev->idx_table) 429 428 return -ENOMEM; 430 429 }
+1 -2
sound/pci/via82xx_modem.c
··· 278 278 return -ENOMEM; 279 279 } 280 280 if (! dev->idx_table) { 281 - dev->idx_table = kmalloc_objs(*dev->idx_table, VIA_TABLE_SIZE, 282 - GFP_KERNEL); 281 + dev->idx_table = kmalloc_objs(*dev->idx_table, VIA_TABLE_SIZE); 283 282 if (! dev->idx_table) 284 283 return -ENOMEM; 285 284 }
+1 -2
sound/soc/au1x/dma.c
··· 89 89 pointer->start = (u32)(dma_start + (i * period_bytes)); 90 90 pointer->relative_end = (u32) (((i+1) * period_bytes) - 0x1); 91 91 if (i < periods - 1) { 92 - pointer->next = kmalloc_obj(struct pcm_period, 93 - GFP_KERNEL); 92 + pointer->next = kmalloc_obj(struct pcm_period); 94 93 if (!pointer->next) { 95 94 au1000_release_dma_link(stream); 96 95 return -ENOMEM;
+1 -2
sound/soc/codecs/aw88395/aw88395_lib.c
··· 665 665 { 666 666 int ret; 667 667 668 - struct aw_all_prof_info *all_prof_info __free(kfree) = kzalloc_obj(*all_prof_info, 669 - GFP_KERNEL); 668 + struct aw_all_prof_info *all_prof_info __free(kfree) = kzalloc_obj(*all_prof_info); 670 669 if (!all_prof_info) 671 670 return -ENOMEM; 672 671
+1 -2
sound/soc/codecs/da7219.c
··· 2143 2143 2144 2144 /* For DT platforms allocate onecell data for clock registration */ 2145 2145 if (np) { 2146 - clk_data = kzalloc_flex(*clk_data, hws, DA7219_DAI_NUM_CLKS, 2147 - GFP_KERNEL); 2146 + clk_data = kzalloc_flex(*clk_data, hws, DA7219_DAI_NUM_CLKS); 2148 2147 if (!clk_data) 2149 2148 return -ENOMEM; 2150 2149
+2 -4
sound/soc/codecs/lpass-rx-macro.c
··· 3820 3820 rx->rxn_reg_stride = 0x80; 3821 3821 rx->rxn_reg_stride2 = 0xc; 3822 3822 def_count = ARRAY_SIZE(rx_defaults) + ARRAY_SIZE(rx_pre_2_5_defaults); 3823 - reg_defaults = kmalloc_objs(struct reg_default, def_count, 3824 - GFP_KERNEL); 3823 + reg_defaults = kmalloc_objs(struct reg_default, def_count); 3825 3824 if (!reg_defaults) 3826 3825 return -ENOMEM; 3827 3826 memcpy(&reg_defaults[0], rx_defaults, sizeof(rx_defaults)); ··· 3834 3835 rx->rxn_reg_stride = 0xc0; 3835 3836 rx->rxn_reg_stride2 = 0x0; 3836 3837 def_count = ARRAY_SIZE(rx_defaults) + ARRAY_SIZE(rx_2_5_defaults); 3837 - reg_defaults = kmalloc_objs(struct reg_default, def_count, 3838 - GFP_KERNEL); 3838 + reg_defaults = kmalloc_objs(struct reg_default, def_count); 3839 3839 if (!reg_defaults) 3840 3840 return -ENOMEM; 3841 3841 memcpy(&reg_defaults[0], rx_defaults, sizeof(rx_defaults));
+2 -4
sound/soc/codecs/lpass-wsa-macro.c
··· 2725 2725 case LPASS_CODEC_VERSION_2_1: 2726 2726 wsa->reg_layout = &wsa_codec_v2_1; 2727 2727 def_count = ARRAY_SIZE(wsa_defaults) + ARRAY_SIZE(wsa_defaults_v2_1); 2728 - reg_defaults = kmalloc_objs(*reg_defaults, def_count, 2729 - GFP_KERNEL); 2728 + reg_defaults = kmalloc_objs(*reg_defaults, def_count); 2730 2729 if (!reg_defaults) 2731 2730 return -ENOMEM; 2732 2731 memcpy(&reg_defaults[0], wsa_defaults, sizeof(wsa_defaults)); ··· 2740 2741 case LPASS_CODEC_VERSION_2_9: 2741 2742 wsa->reg_layout = &wsa_codec_v2_5; 2742 2743 def_count = ARRAY_SIZE(wsa_defaults) + ARRAY_SIZE(wsa_defaults_v2_5); 2743 - reg_defaults = kmalloc_objs(*reg_defaults, def_count, 2744 - GFP_KERNEL); 2744 + reg_defaults = kmalloc_objs(*reg_defaults, def_count); 2745 2745 if (!reg_defaults) 2746 2746 return -ENOMEM; 2747 2747 memcpy(&reg_defaults[0], wsa_defaults, sizeof(wsa_defaults));
+4 -8
sound/soc/codecs/tas2781-fmwlib.c
··· 509 509 img_data->nr_blk = get_unaligned_be32(&data[offset]); 510 510 offset += 4; 511 511 512 - img_data->dev_blks = kzalloc_objs(struct tasdev_blk, img_data->nr_blk, 513 - GFP_KERNEL); 512 + img_data->dev_blks = kzalloc_objs(struct tasdev_blk, img_data->nr_blk); 514 513 if (!img_data->dev_blks) { 515 514 offset = -ENOMEM; 516 515 goto out; ··· 1238 1239 img_data->nr_blk = get_unaligned_be16(&data[offset]); 1239 1240 offset += 2; 1240 1241 1241 - img_data->dev_blks = kzalloc_objs(struct tasdev_blk, img_data->nr_blk, 1242 - GFP_KERNEL); 1242 + img_data->dev_blks = kzalloc_objs(struct tasdev_blk, img_data->nr_blk); 1243 1243 if (!img_data->dev_blks) { 1244 1244 offset = -ENOMEM; 1245 1245 goto out; ··· 1282 1284 } 1283 1285 1284 1286 tas_fmw->programs = 1285 - kzalloc_objs(struct tasdevice_prog, tas_fmw->nr_programs, 1286 - GFP_KERNEL); 1287 + kzalloc_objs(struct tasdevice_prog, tas_fmw->nr_programs); 1287 1288 if (!tas_fmw->programs) { 1288 1289 offset = -ENOMEM; 1289 1290 goto out; ··· 2204 2207 fmw.size = fw_entry->size; 2205 2208 fmw.data = fw_entry->data; 2206 2209 2207 - tas_fmw = tasdev->cali_data_fmw = kzalloc_obj(struct tasdevice_fw, 2208 - GFP_KERNEL); 2210 + tas_fmw = tasdev->cali_data_fmw = kzalloc_obj(struct tasdevice_fw); 2209 2211 if (!tasdev->cali_data_fmw) { 2210 2212 ret = -ENOMEM; 2211 2213 goto out;
+1 -2
sound/soc/codecs/wm_adsp.c
··· 1430 1430 u32 offset = 0; 1431 1431 int i, ret; 1432 1432 1433 - buf->regions = kzalloc_objs(*buf->regions, caps->num_regions, 1434 - GFP_KERNEL); 1433 + buf->regions = kzalloc_objs(*buf->regions, caps->num_regions); 1435 1434 if (!buf->regions) 1436 1435 return -ENOMEM; 1437 1436
+1 -2
sound/soc/soc-dapm.c
··· 3972 3972 * stuff that increases stack usage. 3973 3973 * So, we use kzalloc()/kfree() for params in this function. 3974 3974 */ 3975 - struct snd_pcm_hw_params *params __free(kfree) = kzalloc_obj(*params, 3976 - GFP_KERNEL); 3975 + struct snd_pcm_hw_params *params __free(kfree) = kzalloc_obj(*params); 3977 3976 if (!params) 3978 3977 return -ENOMEM; 3979 3978
+1 -2
sound/soc/sof/ipc3-topology.c
··· 1622 1622 continue; 1623 1623 1624 1624 /* Reserve memory for all hw configs, eventually freed by widget */ 1625 - config = kzalloc_objs(*config, slink->num_hw_configs, 1626 - GFP_KERNEL); 1625 + config = kzalloc_objs(*config, slink->num_hw_configs); 1627 1626 if (!config) { 1628 1627 ret = -ENOMEM; 1629 1628 goto free_comp;
+1 -2
sound/soc/sof/ipc4-topology.c
··· 865 865 SOF_IPC4_NODE_INDEX_INTEL_DMIC(ipc4_copier->dai_index); 866 866 break; 867 867 default: 868 - ipc4_copier->gtw_attr = kzalloc_obj(*ipc4_copier->gtw_attr, 869 - GFP_KERNEL); 868 + ipc4_copier->gtw_attr = kzalloc_obj(*ipc4_copier->gtw_attr); 870 869 if (!ipc4_copier->gtw_attr) { 871 870 ret = -ENOMEM; 872 871 goto free_available_fmt;
+1 -2
sound/synth/emux/emux.c
··· 86 86 87 87 emu->card = card; 88 88 emu->name = kstrdup_const(name, GFP_KERNEL); 89 - emu->voices = kzalloc_objs(struct snd_emux_voice, emu->max_voices, 90 - GFP_KERNEL); 89 + emu->voices = kzalloc_objs(struct snd_emux_voice, emu->max_voices); 91 90 if (emu->name == NULL || emu->voices == NULL) 92 91 return -ENOMEM; 93 92
+1 -2
sound/synth/emux/emux_seq.c
··· 136 136 if (!p) 137 137 return NULL; 138 138 139 - p->chset.channels = kzalloc_objs(*p->chset.channels, max_channels, 140 - GFP_KERNEL); 139 + p->chset.channels = kzalloc_objs(*p->chset.channels, max_channels); 141 140 if (!p->chset.channels) { 142 141 kfree(p); 143 142 return NULL;
+1 -2
sound/usb/6fire/control.c
··· 551 551 { 552 552 int i; 553 553 int ret; 554 - struct control_runtime *rt = kzalloc_obj(struct control_runtime, 555 - GFP_KERNEL); 554 + struct control_runtime *rt = kzalloc_obj(struct control_runtime); 556 555 struct comm_runtime *comm_rt = chip->comm; 557 556 558 557 if (!rt)
+1 -2
sound/usb/line6/capture.c
··· 255 255 struct usb_line6 *line6 = line6pcm->line6; 256 256 int i; 257 257 258 - line6pcm->in.urbs = kzalloc_objs(struct urb *, line6->iso_buffers, 259 - GFP_KERNEL); 258 + line6pcm->in.urbs = kzalloc_objs(struct urb *, line6->iso_buffers); 260 259 if (line6pcm->in.urbs == NULL) 261 260 return -ENOMEM; 262 261
+1 -2
sound/usb/line6/playback.c
··· 404 404 struct usb_line6 *line6 = line6pcm->line6; 405 405 int i; 406 406 407 - line6pcm->out.urbs = kzalloc_objs(struct urb *, line6->iso_buffers, 408 - GFP_KERNEL); 407 + line6pcm->out.urbs = kzalloc_objs(struct urb *, line6->iso_buffers); 409 408 if (line6pcm->out.urbs == NULL) 410 409 return -ENOMEM; 411 410
+1 -2
sound/usb/mixer.c
··· 3603 3603 return -ENOMEM; 3604 3604 mixer->chip = chip; 3605 3605 mixer->ignore_ctl_error = !!(chip->quirk_flags & QUIRK_FLAG_IGNORE_CTL_ERROR); 3606 - mixer->id_elems = kzalloc_objs(*mixer->id_elems, MAX_ID_ELEMS, 3607 - GFP_KERNEL); 3606 + mixer->id_elems = kzalloc_objs(*mixer->id_elems, MAX_ID_ELEMS); 3608 3607 if (!mixer->id_elems) { 3609 3608 kfree(mixer); 3610 3609 return -ENOMEM;
+1 -2
sound/usb/mixer_quirks.c
··· 274 274 mixer->rc_urb = usb_alloc_urb(0, GFP_KERNEL); 275 275 if (!mixer->rc_urb) 276 276 return -ENOMEM; 277 - mixer->rc_setup_packet = kmalloc_obj(*mixer->rc_setup_packet, 278 - GFP_KERNEL); 277 + mixer->rc_setup_packet = kmalloc_obj(*mixer->rc_setup_packet); 279 278 if (!mixer->rc_setup_packet) { 280 279 usb_free_urb(mixer->rc_urb); 281 280 mixer->rc_urb = NULL;
+1 -2
sound/usb/usx2y/usbusx2yaudio.c
··· 942 942 943 943 for (i = playback_endpoint ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE; 944 944 i <= SNDRV_PCM_STREAM_CAPTURE; ++i) { 945 - usx2y_substream[i] = kzalloc_obj(struct snd_usx2y_substream, 946 - GFP_KERNEL); 945 + usx2y_substream[i] = kzalloc_obj(struct snd_usx2y_substream); 947 946 if (!usx2y_substream[i]) 948 947 return -ENOMEM; 949 948
+1 -2
sound/xen/xen_snd_front_alsa.c
··· 442 442 443 443 stream->buffer_sz = buffer_sz; 444 444 stream->num_pages = DIV_ROUND_UP(stream->buffer_sz, PAGE_SIZE); 445 - stream->pages = kzalloc_objs(struct page *, stream->num_pages, 446 - GFP_KERNEL); 445 + stream->pages = kzalloc_objs(struct page *, stream->num_pages); 447 446 if (!stream->pages) 448 447 return -ENOMEM; 449 448