Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cross-tree: phase out dma_zalloc_coherent()

We already need to zero out memory for dma_alloc_coherent(), as such
using dma_zalloc_coherent() is superflous. Phase it out.

This change was generated with the following Coccinelle SmPL patch:

@ replace_dma_zalloc_coherent @
expression dev, size, data, handle, flags;
@@

-dma_zalloc_coherent(dev, size, handle, flags)
+dma_alloc_coherent(dev, size, handle, flags)

Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
[hch: re-ran the script on the latest tree]
Signed-off-by: Christoph Hellwig <hch@lst.de>

authored by

Luis Chamberlain and committed by
Christoph Hellwig
750afb08 3bd6e94b

+915 -949
+3 -3
arch/mips/lantiq/xway/dma.c
··· 129 129 unsigned long flags; 130 130 131 131 ch->desc = 0; 132 - ch->desc_base = dma_zalloc_coherent(ch->dev, 133 - LTQ_DESC_NUM * LTQ_DESC_SIZE, 134 - &ch->phys, GFP_ATOMIC); 132 + ch->desc_base = dma_alloc_coherent(ch->dev, 133 + LTQ_DESC_NUM * LTQ_DESC_SIZE, 134 + &ch->phys, GFP_ATOMIC); 135 135 136 136 spin_lock_irqsave(&ltq_dma_lock, flags); 137 137 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+1 -1
arch/powerpc/platforms/pasemi/dma_lib.c
··· 255 255 256 256 chan->ring_size = ring_size; 257 257 258 - chan->ring_virt = dma_zalloc_coherent(&dma_pdev->dev, 258 + chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev, 259 259 ring_size * sizeof(u64), 260 260 &chan->ring_dma, GFP_KERNEL); 261 261
+4 -3
arch/powerpc/sysdev/fsl_rmu.c
··· 756 756 } 757 757 758 758 /* Initialize outbound message descriptor ring */ 759 - rmu->msg_tx_ring.virt = dma_zalloc_coherent(priv->dev, 760 - rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 761 - &rmu->msg_tx_ring.phys, GFP_KERNEL); 759 + rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, 760 + rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 761 + &rmu->msg_tx_ring.phys, 762 + GFP_KERNEL); 762 763 if (!rmu->msg_tx_ring.virt) { 763 764 rc = -ENOMEM; 764 765 goto out_dma;
+2 -2
drivers/ata/sata_fsl.c
··· 729 729 if (!pp) 730 730 return -ENOMEM; 731 731 732 - mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, 733 - GFP_KERNEL); 732 + mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, 733 + GFP_KERNEL); 734 734 if (!mem) { 735 735 kfree(pp); 736 736 return -ENOMEM;
+19 -20
drivers/atm/he.c
··· 533 533 534 534 static int he_init_tpdrq(struct he_dev *he_dev) 535 535 { 536 - he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 537 - CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), 538 - &he_dev->tpdrq_phys, GFP_KERNEL); 536 + he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, 537 + CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), 538 + &he_dev->tpdrq_phys, 539 + GFP_KERNEL); 539 540 if (he_dev->tpdrq_base == NULL) { 540 541 hprintk("failed to alloc tpdrq\n"); 541 542 return -ENOMEM; ··· 806 805 goto out_free_rbpl_virt; 807 806 } 808 807 809 - he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 810 - CONFIG_RBPL_SIZE * sizeof(struct he_rbp), 811 - &he_dev->rbpl_phys, GFP_KERNEL); 808 + he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev, 809 + CONFIG_RBPL_SIZE * sizeof(struct he_rbp), 810 + &he_dev->rbpl_phys, GFP_KERNEL); 812 811 if (he_dev->rbpl_base == NULL) { 813 812 hprintk("failed to alloc rbpl_base\n"); 814 813 goto out_destroy_rbpl_pool; ··· 845 844 846 845 /* rx buffer ready queue */ 847 846 848 - he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 849 - CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), 850 - &he_dev->rbrq_phys, GFP_KERNEL); 847 + he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, 848 + CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), 849 + &he_dev->rbrq_phys, GFP_KERNEL); 851 850 if (he_dev->rbrq_base == NULL) { 852 851 hprintk("failed to allocate rbrq\n"); 853 852 goto out_free_rbpl; ··· 869 868 870 869 /* tx buffer ready queue */ 871 870 872 - he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 873 - CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 874 - &he_dev->tbrq_phys, GFP_KERNEL); 871 + he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, 872 + CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 873 + &he_dev->tbrq_phys, GFP_KERNEL); 875 874 if (he_dev->tbrq_base == NULL) { 876 875 hprintk("failed to allocate tbrq\n"); 877 876 goto out_free_rbpq_base; ··· 914 913 /* 2.9.3.5 tail offset for each interrupt queue is located after the 915 914 end of the interrupt queue */ 916 915 917 - he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 918 - (CONFIG_IRQ_SIZE + 1) 919 - * sizeof(struct he_irq), 920 - &he_dev->irq_phys, 921 - GFP_KERNEL); 916 + he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, 917 + (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq), 918 + &he_dev->irq_phys, GFP_KERNEL); 922 919 if (he_dev->irq_base == NULL) { 923 920 hprintk("failed to allocate irq\n"); 924 921 return -ENOMEM; ··· 1463 1464 1464 1465 /* host status page */ 1465 1466 1466 - he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev, 1467 - sizeof(struct he_hsp), 1468 - &he_dev->hsp_phys, GFP_KERNEL); 1467 + he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev, 1468 + sizeof(struct he_hsp), 1469 + &he_dev->hsp_phys, GFP_KERNEL); 1469 1470 if (he_dev->hsp == NULL) { 1470 1471 hprintk("failed to allocate host status page\n"); 1471 1472 return -ENOMEM;
+8 -8
drivers/atm/idt77252.c
··· 641 641 scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); 642 642 if (!scq) 643 643 return NULL; 644 - scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE, 645 - &scq->paddr, GFP_KERNEL); 644 + scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE, 645 + &scq->paddr, GFP_KERNEL); 646 646 if (scq->base == NULL) { 647 647 kfree(scq); 648 648 return NULL; ··· 971 971 { 972 972 struct rsq_entry *rsqe; 973 973 974 - card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE, 975 - &card->rsq.paddr, GFP_KERNEL); 974 + card->rsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE, 975 + &card->rsq.paddr, GFP_KERNEL); 976 976 if (card->rsq.base == NULL) { 977 977 printk("%s: can't allocate RSQ.\n", card->name); 978 978 return -1; ··· 3390 3390 writel(0, SAR_REG_GP); 3391 3391 3392 3392 /* Initialize RAW Cell Handle Register */ 3393 - card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev, 3394 - 2 * sizeof(u32), 3395 - &card->raw_cell_paddr, 3396 - GFP_KERNEL); 3393 + card->raw_cell_hnd = dma_alloc_coherent(&card->pcidev->dev, 3394 + 2 * sizeof(u32), 3395 + &card->raw_cell_paddr, 3396 + GFP_KERNEL); 3397 3397 if (!card->raw_cell_hnd) { 3398 3398 printk("%s: memory allocation failure.\n", card->name); 3399 3399 deinit_card(card);
+2 -2
drivers/block/skd_main.c
··· 2641 2641 "comp pci_alloc, total bytes %zd entries %d\n", 2642 2642 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); 2643 2643 2644 - skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, 2645 - &skdev->cq_dma_address, GFP_KERNEL); 2644 + skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, 2645 + &skdev->cq_dma_address, GFP_KERNEL); 2646 2646 2647 2647 if (skcomp == NULL) { 2648 2648 rc = -ENOMEM;
+3 -3
drivers/crypto/amcc/crypto4xx_core.c
··· 283 283 */ 284 284 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) 285 285 { 286 - dev->gdr = dma_zalloc_coherent(dev->core_dev->device, 287 - sizeof(struct ce_gd) * PPC4XX_NUM_GD, 288 - &dev->gdr_pa, GFP_ATOMIC); 286 + dev->gdr = dma_alloc_coherent(dev->core_dev->device, 287 + sizeof(struct ce_gd) * PPC4XX_NUM_GD, 288 + &dev->gdr_pa, GFP_ATOMIC); 289 289 if (!dev->gdr) 290 290 return -ENOMEM; 291 291
+2 -2
drivers/crypto/cavium/cpt/cptpf_main.c
··· 278 278 mcode->num_cores = is_ae ? 6 : 10; 279 279 280 280 /* Allocate DMAable space */ 281 - mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size, 282 - &mcode->phys_base, GFP_KERNEL); 281 + mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size, 282 + &mcode->phys_base, GFP_KERNEL); 283 283 if (!mcode->code) { 284 284 dev_err(dev, "Unable to allocate space for microcode"); 285 285 ret = -ENOMEM;
+4 -3
drivers/crypto/cavium/cpt/cptvf_main.c
··· 236 236 237 237 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : 238 238 rem_q_size; 239 - curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev, 240 - c_size + CPT_NEXT_CHUNK_PTR_SIZE, 241 - &curr->dma_addr, GFP_KERNEL); 239 + curr->head = (u8 *)dma_alloc_coherent(&pdev->dev, 240 + c_size + CPT_NEXT_CHUNK_PTR_SIZE, 241 + &curr->dma_addr, 242 + GFP_KERNEL); 242 243 if (!curr->head) { 243 244 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", 244 245 i, queue->nchunks);
+3 -3
drivers/crypto/cavium/nitrox/nitrox_lib.c
··· 25 25 struct nitrox_device *ndev = cmdq->ndev; 26 26 27 27 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; 28 - cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize, 29 - &cmdq->unalign_dma, 30 - GFP_KERNEL); 28 + cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, 29 + &cmdq->unalign_dma, 30 + GFP_KERNEL); 31 31 if (!cmdq->unalign_base) 32 32 return -ENOMEM; 33 33
+3 -3
drivers/crypto/ccp/ccp-dev-v5.c
··· 822 822 /* Page alignment satisfies our needs for N <= 128 */ 823 823 BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); 824 824 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); 825 - cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, 826 - &cmd_q->qbase_dma, 827 - GFP_KERNEL); 825 + cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize, 826 + &cmd_q->qbase_dma, 827 + GFP_KERNEL); 828 828 if (!cmd_q->qbase) { 829 829 dev_err(dev, "unable to allocate command queue\n"); 830 830 ret = -ENOMEM;
+2 -2
drivers/crypto/hisilicon/sec/sec_algs.c
··· 241 241 memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); 242 242 } else { 243 243 /* new key */ 244 - ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY, 245 - &ctx->pkey, GFP_KERNEL); 244 + ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY, 245 + &ctx->pkey, GFP_KERNEL); 246 246 if (!ctx->key) { 247 247 mutex_unlock(&ctx->lock); 248 248 return -ENOMEM;
+6 -9
drivers/crypto/hisilicon/sec/sec_drv.c
··· 1082 1082 struct sec_queue_ring_db *ring_db = &queue->ring_db; 1083 1083 int ret; 1084 1084 1085 - ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE, 1086 - &ring_cmd->paddr, 1087 - GFP_KERNEL); 1085 + ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE, 1086 + &ring_cmd->paddr, GFP_KERNEL); 1088 1087 if (!ring_cmd->vaddr) 1089 1088 return -ENOMEM; 1090 1089 ··· 1091 1092 mutex_init(&ring_cmd->lock); 1092 1093 ring_cmd->callback = sec_alg_callback; 1093 1094 1094 - ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE, 1095 - &ring_cq->paddr, 1096 - GFP_KERNEL); 1095 + ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE, 1096 + &ring_cq->paddr, GFP_KERNEL); 1097 1097 if (!ring_cq->vaddr) { 1098 1098 ret = -ENOMEM; 1099 1099 goto err_free_ring_cmd; 1100 1100 } 1101 1101 1102 - ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE, 1103 - &ring_db->paddr, 1104 - GFP_KERNEL); 1102 + ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE, 1103 + &ring_db->paddr, GFP_KERNEL); 1105 1104 if (!ring_db->vaddr) { 1106 1105 ret = -ENOMEM; 1107 1106 goto err_free_ring_cq;
+3 -3
drivers/crypto/ixp4xx_crypto.c
··· 260 260 { 261 261 struct device *dev = &pdev->dev; 262 262 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); 263 - crypt_virt = dma_zalloc_coherent(dev, 264 - NPE_QLEN * sizeof(struct crypt_ctl), 265 - &crypt_phys, GFP_ATOMIC); 263 + crypt_virt = dma_alloc_coherent(dev, 264 + NPE_QLEN * sizeof(struct crypt_ctl), 265 + &crypt_phys, GFP_ATOMIC); 266 266 if (!crypt_virt) 267 267 return -ENOMEM; 268 268 return 0;
+8 -8
drivers/crypto/mediatek/mtk-platform.c
··· 453 453 if (!ring[i]) 454 454 goto err_cleanup; 455 455 456 - ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev, 457 - MTK_DESC_RING_SZ, 458 - &ring[i]->cmd_dma, 459 - GFP_KERNEL); 456 + ring[i]->cmd_base = dma_alloc_coherent(cryp->dev, 457 + MTK_DESC_RING_SZ, 458 + &ring[i]->cmd_dma, 459 + GFP_KERNEL); 460 460 if (!ring[i]->cmd_base) 461 461 goto err_cleanup; 462 462 463 - ring[i]->res_base = dma_zalloc_coherent(cryp->dev, 464 - MTK_DESC_RING_SZ, 465 - &ring[i]->res_dma, 466 - GFP_KERNEL); 463 + ring[i]->res_base = dma_alloc_coherent(cryp->dev, 464 + MTK_DESC_RING_SZ, 465 + &ring[i]->res_dma, 466 + GFP_KERNEL); 467 467 if (!ring[i]->res_base) 468 468 goto err_cleanup; 469 469
+6 -6
drivers/crypto/qat/qat_common/adf_admin.c
··· 244 244 dev_to_node(&GET_DEV(accel_dev))); 245 245 if (!admin) 246 246 return -ENOMEM; 247 - admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, 248 - &admin->phy_addr, GFP_KERNEL); 247 + admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, 248 + &admin->phy_addr, GFP_KERNEL); 249 249 if (!admin->virt_addr) { 250 250 dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); 251 251 kfree(admin); 252 252 return -ENOMEM; 253 253 } 254 254 255 - admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), 256 - PAGE_SIZE, 257 - &admin->const_tbl_addr, 258 - GFP_KERNEL); 255 + admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev), 256 + PAGE_SIZE, 257 + &admin->const_tbl_addr, 258 + GFP_KERNEL); 259 259 if (!admin->virt_tbl_addr) { 260 260 dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); 261 261 dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+12 -12
drivers/crypto/qat/qat_common/qat_algs.c
··· 601 601 602 602 dev = &GET_DEV(inst->accel_dev); 603 603 ctx->inst = inst; 604 - ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), 605 - &ctx->enc_cd_paddr, 606 - GFP_ATOMIC); 604 + ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), 605 + &ctx->enc_cd_paddr, 606 + GFP_ATOMIC); 607 607 if (!ctx->enc_cd) { 608 608 return -ENOMEM; 609 609 } 610 - ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), 611 - &ctx->dec_cd_paddr, 612 - GFP_ATOMIC); 610 + ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), 611 + &ctx->dec_cd_paddr, 612 + GFP_ATOMIC); 613 613 if (!ctx->dec_cd) { 614 614 goto out_free_enc; 615 615 } ··· 933 933 934 934 dev = &GET_DEV(inst->accel_dev); 935 935 ctx->inst = inst; 936 - ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), 937 - &ctx->enc_cd_paddr, 938 - GFP_ATOMIC); 936 + ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), 937 + &ctx->enc_cd_paddr, 938 + GFP_ATOMIC); 939 939 if (!ctx->enc_cd) { 940 940 spin_unlock(&ctx->lock); 941 941 return -ENOMEM; 942 942 } 943 - ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), 944 - &ctx->dec_cd_paddr, 945 - GFP_ATOMIC); 943 + ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), 944 + &ctx->dec_cd_paddr, 945 + GFP_ATOMIC); 946 946 if (!ctx->dec_cd) { 947 947 spin_unlock(&ctx->lock); 948 948 goto out_free_enc;
+34 -34
drivers/crypto/qat/qat_common/qat_asym_algs.c
··· 332 332 } else { 333 333 int shift = ctx->p_size - req->src_len; 334 334 335 - qat_req->src_align = dma_zalloc_coherent(dev, 336 - ctx->p_size, 337 - &qat_req->in.dh.in.b, 338 - GFP_KERNEL); 335 + qat_req->src_align = dma_alloc_coherent(dev, 336 + ctx->p_size, 337 + &qat_req->in.dh.in.b, 338 + GFP_KERNEL); 339 339 if (unlikely(!qat_req->src_align)) 340 340 return ret; 341 341 ··· 360 360 goto unmap_src; 361 361 362 362 } else { 363 - qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size, 364 - &qat_req->out.dh.r, 365 - GFP_KERNEL); 363 + qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size, 364 + &qat_req->out.dh.r, 365 + GFP_KERNEL); 366 366 if (unlikely(!qat_req->dst_align)) 367 367 goto unmap_src; 368 368 } ··· 447 447 return -EINVAL; 448 448 449 449 ctx->p_size = params->p_size; 450 - ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); 450 + ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); 451 451 if (!ctx->p) 452 452 return -ENOMEM; 453 453 memcpy(ctx->p, params->p, ctx->p_size); ··· 458 458 return 0; 459 459 } 460 460 461 - ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); 461 + ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); 462 462 if (!ctx->g) 463 463 return -ENOMEM; 464 464 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, ··· 503 503 if (ret < 0) 504 504 goto err_clear_ctx; 505 505 506 - ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, 507 - GFP_KERNEL); 506 + ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa, 507 + GFP_KERNEL); 508 508 if (!ctx->xa) { 509 509 ret = -ENOMEM; 510 510 goto err_clear_ctx; ··· 737 737 } else { 738 738 int shift = ctx->key_sz - req->src_len; 739 739 740 - qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 741 - &qat_req->in.rsa.enc.m, 742 - GFP_KERNEL); 740 + qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, 741 + &qat_req->in.rsa.enc.m, 742 + GFP_KERNEL); 743 743 if (unlikely(!qat_req->src_align)) 744 744 return ret; 745 745 ··· 756 756 goto unmap_src; 757 757 758 758 } else { 759 - qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, 760 - &qat_req->out.rsa.enc.c, 761 - GFP_KERNEL); 759 + qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, 760 + &qat_req->out.rsa.enc.c, 761 + GFP_KERNEL); 762 762 if (unlikely(!qat_req->dst_align)) 763 763 goto unmap_src; 764 764 ··· 881 881 } else { 882 882 int shift = ctx->key_sz - req->src_len; 883 883 884 - qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 885 - &qat_req->in.rsa.dec.c, 886 - GFP_KERNEL); 884 + qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, 885 + &qat_req->in.rsa.dec.c, 886 + GFP_KERNEL); 887 887 if (unlikely(!qat_req->src_align)) 888 888 return ret; 889 889 ··· 900 900 goto unmap_src; 901 901 902 902 } else { 903 - qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, 904 - &qat_req->out.rsa.dec.m, 905 - GFP_KERNEL); 903 + qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, 904 + &qat_req->out.rsa.dec.m, 905 + GFP_KERNEL); 906 906 if (unlikely(!qat_req->dst_align)) 907 907 goto unmap_src; 908 908 ··· 989 989 goto err; 990 990 991 991 ret = -ENOMEM; 992 - ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); 992 + ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); 993 993 if (!ctx->n) 994 994 goto err; 995 995 ··· 1018 1018 return -EINVAL; 1019 1019 } 1020 1020 1021 - ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); 1021 + ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); 1022 1022 if (!ctx->e) 1023 1023 return -ENOMEM; 1024 1024 ··· 1044 1044 goto err; 1045 1045 1046 1046 ret = -ENOMEM; 1047 - ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); 1047 + ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); 1048 1048 if (!ctx->d) 1049 1049 goto err; 1050 1050 ··· 1077 1077 qat_rsa_drop_leading_zeros(&ptr, &len); 1078 1078 if (!len) 1079 1079 goto err; 1080 - ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); 1080 + ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); 1081 1081 if (!ctx->p) 1082 1082 goto err; 1083 1083 memcpy(ctx->p + (half_key_sz - len), ptr, len); ··· 1088 1088 qat_rsa_drop_leading_zeros(&ptr, &len); 1089 1089 if (!len) 1090 1090 goto free_p; 1091 - ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); 1091 + ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); 1092 1092 if (!ctx->q) 1093 1093 goto free_p; 1094 1094 memcpy(ctx->q + (half_key_sz - len), ptr, len); ··· 1099 1099 qat_rsa_drop_leading_zeros(&ptr, &len); 1100 1100 if (!len) 1101 1101 goto free_q; 1102 - ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp, 1103 - GFP_KERNEL); 1102 + ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp, 1103 + GFP_KERNEL); 1104 1104 if (!ctx->dp) 1105 1105 goto free_q; 1106 1106 memcpy(ctx->dp + (half_key_sz - len), ptr, len); ··· 1111 1111 qat_rsa_drop_leading_zeros(&ptr, &len); 1112 1112 if (!len) 1113 1113 goto free_dp; 1114 - ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq, 1115 - GFP_KERNEL); 1114 + ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq, 1115 + GFP_KERNEL); 1116 1116 if (!ctx->dq) 1117 1117 goto free_dp; 1118 1118 memcpy(ctx->dq + (half_key_sz - len), ptr, len); ··· 1123 1123 qat_rsa_drop_leading_zeros(&ptr, &len); 1124 1124 if (!len) 1125 1125 goto free_dq; 1126 - ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv, 1127 - GFP_KERNEL); 1126 + ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv, 1127 + GFP_KERNEL); 1128 1128 if (!ctx->qinv) 1129 1129 goto free_dq; 1130 1130 memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
+4 -4
drivers/dma/imx-sdma.c
··· 1182 1182 { 1183 1183 int ret = -EBUSY; 1184 1184 1185 - sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, 1186 - GFP_NOWAIT); 1185 + sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, 1186 + GFP_NOWAIT); 1187 1187 if (!sdma->bd0) { 1188 1188 ret = -ENOMEM; 1189 1189 goto out; ··· 1205 1205 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); 1206 1206 int ret = 0; 1207 1207 1208 - desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, 1209 - GFP_NOWAIT); 1208 + desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys, 1209 + GFP_NOWAIT); 1210 1210 if (!desc->bd) { 1211 1211 ret = -ENOMEM; 1212 1212 goto out;
+2 -2
drivers/dma/mediatek/mtk-hsdma.c
··· 325 325 * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. 326 326 */ 327 327 pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); 328 - ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring, 329 - &ring->tphys, GFP_NOWAIT); 328 + ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring, 329 + &ring->tphys, GFP_NOWAIT); 330 330 if (!ring->txd) 331 331 return -ENOMEM; 332 332
+3 -3
drivers/dma/mxs-dma.c
··· 416 416 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 417 417 int ret; 418 418 419 - mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev, 420 - CCW_BLOCK_SIZE, 421 - &mxs_chan->ccw_phys, GFP_KERNEL); 419 + mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, 420 + CCW_BLOCK_SIZE, 421 + &mxs_chan->ccw_phys, GFP_KERNEL); 422 422 if (!mxs_chan->ccw) { 423 423 ret = -ENOMEM; 424 424 goto err_alloc;
+2 -2
drivers/dma/xgene-dma.c
··· 1208 1208 ring->size = ret; 1209 1209 1210 1210 /* Allocate memory for DMA ring descriptor */ 1211 - ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, 1212 - &ring->desc_paddr, GFP_KERNEL); 1211 + ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size, 1212 + &ring->desc_paddr, GFP_KERNEL); 1213 1213 if (!ring->desc_vaddr) { 1214 1214 chan_err(chan, "Failed to allocate ring desc\n"); 1215 1215 return -ENOMEM;
+7 -7
drivers/dma/xilinx/xilinx_dma.c
··· 879 879 */ 880 880 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 881 881 /* Allocate the buffer descriptors. */ 882 - chan->seg_v = dma_zalloc_coherent(chan->dev, 883 - sizeof(*chan->seg_v) * 884 - XILINX_DMA_NUM_DESCS, 885 - &chan->seg_p, GFP_KERNEL); 882 + chan->seg_v = dma_alloc_coherent(chan->dev, 883 + sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS, 884 + &chan->seg_p, GFP_KERNEL); 886 885 if (!chan->seg_v) { 887 886 dev_err(chan->dev, 888 887 "unable to allocate channel %d descriptors\n", ··· 894 895 * so allocating a desc segment during channel allocation for 895 896 * programming tail descriptor. 896 897 */ 897 - chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, 898 - sizeof(*chan->cyclic_seg_v), 899 - &chan->cyclic_seg_p, GFP_KERNEL); 898 + chan->cyclic_seg_v = dma_alloc_coherent(chan->dev, 899 + sizeof(*chan->cyclic_seg_v), 900 + &chan->cyclic_seg_p, 901 + GFP_KERNEL); 900 902 if (!chan->cyclic_seg_v) { 901 903 dev_err(chan->dev, 902 904 "unable to allocate desc segment for cyclic DMA\n");
+3 -3
drivers/dma/xilinx/zynqmp_dma.c
··· 490 490 list_add_tail(&desc->node, &chan->free_list); 491 491 } 492 492 493 - chan->desc_pool_v = dma_zalloc_coherent(chan->dev, 494 - (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), 495 - &chan->desc_pool_p, GFP_KERNEL); 493 + chan->desc_pool_v = dma_alloc_coherent(chan->dev, 494 + (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), 495 + &chan->desc_pool_p, GFP_KERNEL); 496 496 if (!chan->desc_pool_v) 497 497 return -ENOMEM; 498 498
+3 -2
drivers/gpu/drm/drm_pci.c
··· 61 61 return NULL; 62 62 63 63 dmah->size = size; 64 - dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, 65 - GFP_KERNEL | __GFP_COMP); 64 + dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, 65 + &dmah->busaddr, 66 + GFP_KERNEL | __GFP_COMP); 66 67 67 68 if (dmah->vaddr == NULL) { 68 69 kfree(dmah);
+2 -2
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
··· 766 766 return NULL; 767 767 768 768 sbuf->size = size; 769 - sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, 770 - &sbuf->dma_addr, GFP_ATOMIC); 769 + sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size, 770 + &sbuf->dma_addr, GFP_ATOMIC); 771 771 if (!sbuf->sb) 772 772 goto bail; 773 773
+4 -4
drivers/infiniband/hw/bnxt_re/qplib_res.c
··· 105 105 106 106 if (!sghead) { 107 107 for (i = 0; i < pages; i++) { 108 - pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev, 109 - pbl->pg_size, 110 - &pbl->pg_map_arr[i], 111 - GFP_KERNEL); 108 + pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 109 + pbl->pg_size, 110 + &pbl->pg_map_arr[i], 111 + GFP_KERNEL); 112 112 if (!pbl->pg_arr[i]) 113 113 goto fail; 114 114 pbl->pg_count++;
+3 -3
drivers/infiniband/hw/cxgb3/cxio_hal.c
··· 291 291 if (!wq->sq) 292 292 goto err3; 293 293 294 - wq->queue = dma_zalloc_coherent(&(rdev_p->rnic_info.pdev->dev), 295 - depth * sizeof(union t3_wr), 296 - &(wq->dma_addr), GFP_KERNEL); 294 + wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), 295 + depth * sizeof(union t3_wr), 296 + &(wq->dma_addr), GFP_KERNEL); 297 297 if (!wq->queue) 298 298 goto err4; 299 299
+2 -3
drivers/infiniband/hw/cxgb4/qp.c
··· 2564 2564 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> 2565 2565 T4_RQT_ENTRY_SHIFT; 2566 2566 2567 - wq->queue = dma_zalloc_coherent(&rdev->lldi.pdev->dev, 2568 - wq->memsize, &wq->dma_addr, 2569 - GFP_KERNEL); 2567 + wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize, 2568 + &wq->dma_addr, GFP_KERNEL); 2570 2569 if (!wq->queue) 2571 2570 goto err_free_rqtpool; 2572 2571
+15 -14
drivers/infiniband/hw/hfi1/init.c
··· 899 899 goto done; 900 900 901 901 /* allocate dummy tail memory for all receive contexts */ 902 - dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( 903 - &dd->pcidev->dev, sizeof(u64), 904 - &dd->rcvhdrtail_dummy_dma, 905 - GFP_KERNEL); 902 + dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, 903 + sizeof(u64), 904 + &dd->rcvhdrtail_dummy_dma, 905 + GFP_KERNEL); 906 906 907 907 if (!dd->rcvhdrtail_dummy_kvaddr) { 908 908 dd_dev_err(dd, "cannot allocate dummy tail memory\n"); ··· 1863 1863 gfp_flags = GFP_KERNEL; 1864 1864 else 1865 1865 gfp_flags = GFP_USER; 1866 - rcd->rcvhdrq = dma_zalloc_coherent( 1867 - &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, 1868 - gfp_flags | __GFP_COMP); 1866 + rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, 1867 + &rcd->rcvhdrq_dma, 1868 + gfp_flags | __GFP_COMP); 1869 1869 1870 1870 if (!rcd->rcvhdrq) { 1871 1871 dd_dev_err(dd, ··· 1876 1876 1877 1877 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || 1878 1878 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { 1879 - rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( 1880 - &dd->pcidev->dev, PAGE_SIZE, 1881 - &rcd->rcvhdrqtailaddr_dma, gfp_flags); 1879 + rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, 1880 + PAGE_SIZE, 1881 + &rcd->rcvhdrqtailaddr_dma, 1882 + gfp_flags); 1882 1883 if (!rcd->rcvhdrtail_kvaddr) 1883 1884 goto bail_free; 1884 1885 } ··· 1975 1974 while (alloced_bytes < rcd->egrbufs.size && 1976 1975 rcd->egrbufs.alloced < rcd->egrbufs.count) { 1977 1976 rcd->egrbufs.buffers[idx].addr = 1978 - dma_zalloc_coherent(&dd->pcidev->dev, 1979 - rcd->egrbufs.rcvtid_size, 1980 - &rcd->egrbufs.buffers[idx].dma, 1981 - gfp_flags); 1977 + dma_alloc_coherent(&dd->pcidev->dev, 1978 + rcd->egrbufs.rcvtid_size, 1979 + &rcd->egrbufs.buffers[idx].dma, 1980 + gfp_flags); 1982 1981 if (rcd->egrbufs.buffers[idx].addr) { 1983 1982 rcd->egrbufs.buffers[idx].len = 1984 1983 rcd->egrbufs.rcvtid_size;
+4 -5
drivers/infiniband/hw/hfi1/pio.c
··· 2098 2098 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); 2099 2099 2100 2100 set_dev_node(&dd->pcidev->dev, i); 2101 - dd->cr_base[i].va = dma_zalloc_coherent( 2102 - &dd->pcidev->dev, 2103 - bytes, 2104 - &dd->cr_base[i].dma, 2105 - GFP_KERNEL); 2101 + dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev, 2102 + bytes, 2103 + &dd->cr_base[i].dma, 2104 + GFP_KERNEL); 2106 2105 if (!dd->cr_base[i].va) { 2107 2106 set_dev_node(&dd->pcidev->dev, dd->node); 2108 2107 dd_dev_err(dd,
+9 -18
drivers/infiniband/hw/hfi1/sdma.c
··· 1453 1453 timer_setup(&sde->err_progress_check_timer, 1454 1454 sdma_err_progress_check, 0); 1455 1455 1456 - sde->descq = dma_zalloc_coherent( 1457 - &dd->pcidev->dev, 1458 - descq_cnt * sizeof(u64[2]), 1459 - &sde->descq_phys, 1460 - GFP_KERNEL 1461 - ); 1456 + sde->descq = dma_alloc_coherent(&dd->pcidev->dev, 1457 + descq_cnt * sizeof(u64[2]), 1458 + &sde->descq_phys, GFP_KERNEL); 1462 1459 if (!sde->descq) 1463 1460 goto bail; 1464 1461 sde->tx_ring = ··· 1468 1471 1469 1472 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; 1470 1473 /* Allocate memory for DMA of head registers to memory */ 1471 - dd->sdma_heads_dma = dma_zalloc_coherent( 1472 - &dd->pcidev->dev, 1473 - dd->sdma_heads_size, 1474 - &dd->sdma_heads_phys, 1475 - GFP_KERNEL 1476 - ); 1474 + dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev, 1475 + dd->sdma_heads_size, 1476 + &dd->sdma_heads_phys, 1477 + GFP_KERNEL); 1477 1478 if (!dd->sdma_heads_dma) { 1478 1479 dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); 1479 1480 goto bail; 1480 1481 } 1481 1482 1482 1483 /* Allocate memory for pad */ 1483 - dd->sdma_pad_dma = dma_zalloc_coherent( 1484 - &dd->pcidev->dev, 1485 - sizeof(u32), 1486 - &dd->sdma_pad_phys, 1487 - GFP_KERNEL 1488 - ); 1484 + dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32), 1485 + &dd->sdma_pad_phys, GFP_KERNEL); 1489 1486 if (!dd->sdma_pad_dma) { 1490 1487 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); 1491 1488 goto bail;
+6 -5
drivers/infiniband/hw/hns/hns_roce_alloc.c
··· 197 197 buf->npages = 1 << order; 198 198 buf->page_shift = page_shift; 199 199 /* MTT PA must be recorded in 4k alignment, t is 4k aligned */ 200 - buf->direct.buf = dma_zalloc_coherent(dev, 201 - size, &t, GFP_KERNEL); 200 + buf->direct.buf = dma_alloc_coherent(dev, size, &t, 201 + GFP_KERNEL); 202 202 if (!buf->direct.buf) 203 203 return -ENOMEM; 204 204 ··· 219 219 return -ENOMEM; 220 220 221 221 for (i = 0; i < buf->nbufs; ++i) { 222 - buf->page_list[i].buf = dma_zalloc_coherent(dev, 223 - page_size, &t, 224 - GFP_KERNEL); 222 + buf->page_list[i].buf = dma_alloc_coherent(dev, 223 + page_size, 224 + &t, 225 + GFP_KERNEL); 225 226 226 227 if (!buf->page_list[i].buf) 227 228 goto err_free;
+5 -5
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
··· 5091 5091 eqe_alloc = i * (buf_chk_sz / eq->eqe_size); 5092 5092 size = (eq->entries - eqe_alloc) * eq->eqe_size; 5093 5093 } 5094 - eq->buf[i] = dma_zalloc_coherent(dev, size, 5094 + eq->buf[i] = dma_alloc_coherent(dev, size, 5095 5095 &(eq->buf_dma[i]), 5096 5096 GFP_KERNEL); 5097 5097 if (!eq->buf[i]) ··· 5126 5126 size = (eq->entries - eqe_alloc) 5127 5127 * eq->eqe_size; 5128 5128 } 5129 - eq->buf[idx] = dma_zalloc_coherent(dev, size, 5130 - &(eq->buf_dma[idx]), 5131 - GFP_KERNEL); 5129 + eq->buf[idx] = dma_alloc_coherent(dev, size, 5130 + &(eq->buf_dma[idx]), 5131 + GFP_KERNEL); 5132 5132 if (!eq->buf[idx]) 5133 5133 goto err_dma_alloc_buf; 5134 5134 ··· 5241 5241 goto free_cmd_mbox; 5242 5242 } 5243 5243 5244 - eq->buf_list->buf = dma_zalloc_coherent(dev, buf_chk_sz, 5244 + eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz, 5245 5245 &(eq->buf_list->map), 5246 5246 GFP_KERNEL); 5247 5247 if (!eq->buf_list->buf) {
+2 -2
drivers/infiniband/hw/i40iw/i40iw_utils.c
··· 745 745 if (!mem) 746 746 return I40IW_ERR_PARAM; 747 747 mem->size = ALIGN(size, alignment); 748 - mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size, 749 - (dma_addr_t *)&mem->pa, GFP_KERNEL); 748 + mem->va = dma_alloc_coherent(&pcidev->dev, mem->size, 749 + (dma_addr_t *)&mem->pa, GFP_KERNEL); 750 750 if (!mem->va) 751 751 return I40IW_ERR_NO_MEMORY; 752 752 return 0;
+3 -2
drivers/infiniband/hw/mthca/mthca_memfree.c
··· 623 623 page = dev->db_tab->page + end; 624 624 625 625 alloc: 626 - page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 627 - &page->mapping, GFP_KERNEL); 626 + page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 627 + MTHCA_ICM_PAGE_SIZE, &page->mapping, 628 + GFP_KERNEL); 628 629 if (!page->db_rec) { 629 630 ret = -ENOMEM; 630 631 goto out;
+7 -7
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
··· 380 380 q->len = len; 381 381 q->entry_size = entry_size; 382 382 q->size = len * entry_size; 383 - q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size, 384 - &q->dma, GFP_KERNEL); 383 + q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma, 384 + GFP_KERNEL); 385 385 if (!q->va) 386 386 return -ENOMEM; 387 387 return 0; ··· 1819 1819 return -ENOMEM; 1820 1820 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ, 1821 1821 OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); 1822 - cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); 1822 + cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); 1823 1823 if (!cq->va) { 1824 1824 status = -ENOMEM; 1825 1825 goto mem_err; ··· 2209 2209 qp->sq.max_cnt = max_wqe_allocated; 2210 2210 len = (hw_pages * hw_page_size); 2211 2211 2212 - qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2212 + qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2213 2213 if (!qp->sq.va) 2214 2214 return -EINVAL; 2215 2215 qp->sq.len = len; ··· 2259 2259 qp->rq.max_cnt = max_rqe_allocated; 2260 2260 len = (hw_pages * hw_page_size); 2261 2261 2262 - qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2262 + qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2263 2263 if (!qp->rq.va) 2264 2264 return -ENOMEM; 2265 2265 qp->rq.pa = pa; ··· 2315 2315 if (dev->attr.ird == 0) 2316 2316 return 0; 2317 2317 2318 - qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa, 2319 - GFP_KERNEL); 2318 + qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa, 2319 + GFP_KERNEL); 2320 2320 if (!qp->ird_q_va) 2321 2321 return -ENOMEM; 2322 2322 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
+2 -2
drivers/infiniband/hw/ocrdma/ocrdma_stats.c
··· 73 73 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), 74 74 sizeof(struct ocrdma_rdma_stats_resp)); 75 75 76 - mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size, 77 - &mem->pa, GFP_KERNEL); 76 + mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size, 77 + &mem->pa, GFP_KERNEL); 78 78 if (!mem->va) { 79 79 pr_err("%s: stats mbox allocation failed\n", __func__); 80 80 return false;
+3 -3
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 504 504 INIT_LIST_HEAD(&ctx->mm_head); 505 505 mutex_init(&ctx->mm_list_lock); 506 506 507 - ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len, 508 - &ctx->ah_tbl.pa, GFP_KERNEL); 507 + ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, 508 + &ctx->ah_tbl.pa, GFP_KERNEL); 509 509 if (!ctx->ah_tbl.va) { 510 510 kfree(ctx); 511 511 return ERR_PTR(-ENOMEM); ··· 838 838 return -ENOMEM; 839 839 840 840 for (i = 0; i < mr->num_pbls; i++) { 841 - va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); 841 + va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); 842 842 if (!va) { 843 843 ocrdma_free_mr_pbl_tbl(dev, mr); 844 844 status = -ENOMEM;
+2 -2
drivers/infiniband/hw/qedr/verbs.c
··· 556 556 return ERR_PTR(-ENOMEM); 557 557 558 558 for (i = 0; i < pbl_info->num_pbls; i++) { 559 - va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size, 560 - &pa, flags); 559 + va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa, 560 + flags); 561 561 if (!va) 562 562 goto err; 563 563
+2 -2
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
··· 890 890 dev_info(&pdev->dev, "device version %d, driver version %d\n", 891 891 dev->dsr_version, PVRDMA_VERSION); 892 892 893 - dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr), 894 - &dev->dsrbase, GFP_KERNEL); 893 + dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr), 894 + &dev->dsrbase, GFP_KERNEL); 895 895 if (!dev->dsr) { 896 896 dev_err(&pdev->dev, "failed to allocate shared region\n"); 897 897 ret = -ENOMEM;
+2 -2
drivers/input/touchscreen/raspberrypi-ts.c
··· 147 147 return -ENOMEM; 148 148 ts->pdev = pdev; 149 149 150 - ts->fw_regs_va = dma_zalloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys, 151 - GFP_KERNEL); 150 + ts->fw_regs_va = dma_alloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys, 151 + GFP_KERNEL); 152 152 if (!ts->fw_regs_va) { 153 153 dev_err(dev, "failed to dma_alloc_coherent\n"); 154 154 return -ENOMEM;
+2 -3
drivers/iommu/mtk_iommu_v1.c
··· 232 232 233 233 spin_lock_init(&dom->pgtlock); 234 234 235 - dom->pgt_va = dma_zalloc_coherent(data->dev, 236 - M2701_IOMMU_PGT_SIZE, 237 - &dom->pgt_pa, GFP_KERNEL); 235 + dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE, 236 + &dom->pgt_pa, GFP_KERNEL); 238 237 if (!dom->pgt_va) 239 238 return -ENOMEM; 240 239
+2 -2
drivers/media/pci/intel/ipu3/ipu3-cio2.c
··· 218 218 { 219 219 struct device *dev = &cio2->pci_dev->dev; 220 220 221 - q->fbpt = dma_zalloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, 222 - GFP_KERNEL); 221 + q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, 222 + GFP_KERNEL); 223 223 if (!q->fbpt) 224 224 return -ENOMEM; 225 225
+1 -1
drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
··· 49 49 struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data; 50 50 struct device *dev = &ctx->dev->plat_dev->dev; 51 51 52 - mem->va = dma_zalloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); 52 + mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); 53 53 if (!mem->va) { 54 54 mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev), 55 55 size);
+2 -2
drivers/misc/genwqe/card_utils.c
··· 218 218 if (get_order(size) >= MAX_ORDER) 219 219 return NULL; 220 220 221 - return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle, 222 - GFP_KERNEL); 221 + return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle, 222 + GFP_KERNEL); 223 223 } 224 224 225 225 void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
+3 -2
drivers/mmc/host/sdhci.c
··· 3763 3763 * Use zalloc to zero the reserved high 32-bits of 128-bit 3764 3764 * descriptors so that they never need to be written. 3765 3765 */ 3766 - buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz + 3767 - host->adma_table_sz, &dma, GFP_KERNEL); 3766 + buf = dma_alloc_coherent(mmc_dev(mmc), 3767 + host->align_buffer_sz + host->adma_table_sz, 3768 + &dma, GFP_KERNEL); 3768 3769 if (!buf) { 3769 3770 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 3770 3771 mmc_hostname(mmc));
+6 -6
drivers/net/ethernet/aeroflex/greth.c
··· 1433 1433 } 1434 1434 1435 1435 /* Allocate TX descriptor ring in coherent memory */ 1436 - greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024, 1437 - &greth->tx_bd_base_phys, 1438 - GFP_KERNEL); 1436 + greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024, 1437 + &greth->tx_bd_base_phys, 1438 + GFP_KERNEL); 1439 1439 if (!greth->tx_bd_base) { 1440 1440 err = -ENOMEM; 1441 1441 goto error3; 1442 1442 } 1443 1443 1444 1444 /* Allocate RX descriptor ring in coherent memory */ 1445 - greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024, 1446 - &greth->rx_bd_base_phys, 1447 - GFP_KERNEL); 1445 + greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024, 1446 + &greth->rx_bd_base_phys, 1447 + GFP_KERNEL); 1448 1448 if (!greth->rx_bd_base) { 1449 1449 err = -ENOMEM; 1450 1450 goto error4;
+6 -6
drivers/net/ethernet/alacritech/slicoss.c
··· 795 795 size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK; 796 796 797 797 for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) { 798 - descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr, 799 - GFP_KERNEL); 798 + descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr, 799 + GFP_KERNEL); 800 800 if (!descs) { 801 801 netdev_err(sdev->netdev, 802 802 "failed to allocate status descriptors\n"); ··· 1240 1240 struct slic_shmem_data *sm_data; 1241 1241 dma_addr_t paddr; 1242 1242 1243 - sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), 1244 - &paddr, GFP_KERNEL); 1243 + sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), 1244 + &paddr, GFP_KERNEL); 1245 1245 if (!sm_data) { 1246 1246 dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n"); 1247 1247 return -ENOMEM; ··· 1621 1621 int err = 0; 1622 1622 u8 *mac[2]; 1623 1623 1624 - eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, 1625 - &paddr, GFP_KERNEL); 1624 + eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, 1625 + &paddr, GFP_KERNEL); 1626 1626 if (!eeprom) 1627 1627 return -ENOMEM; 1628 1628
+31 -30
drivers/net/ethernet/amazon/ena/ena_com.c
··· 111 111 struct ena_com_admin_sq *sq = &queue->sq; 112 112 u16 size = ADMIN_SQ_SIZE(queue->q_depth); 113 113 114 - sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr, 115 - GFP_KERNEL); 114 + sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr, 115 + GFP_KERNEL); 116 116 117 117 if (!sq->entries) { 118 118 pr_err("memory allocation failed"); ··· 133 133 struct ena_com_admin_cq *cq = &queue->cq; 134 134 u16 size = ADMIN_CQ_SIZE(queue->q_depth); 135 135 136 - cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr, 137 - GFP_KERNEL); 136 + cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr, 137 + GFP_KERNEL); 138 138 139 139 if (!cq->entries) { 140 140 pr_err("memory allocation failed"); ··· 156 156 157 157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; 158 158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); 159 - aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr, 160 - GFP_KERNEL); 159 + aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr, 160 + GFP_KERNEL); 161 161 162 162 if (!aenq->entries) { 163 163 pr_err("memory allocation failed"); ··· 344 344 dev_node = dev_to_node(ena_dev->dmadev); 345 345 set_dev_node(ena_dev->dmadev, ctx->numa_node); 346 346 io_sq->desc_addr.virt_addr = 347 - dma_zalloc_coherent(ena_dev->dmadev, size, 348 - &io_sq->desc_addr.phys_addr, 349 - GFP_KERNEL); 347 + dma_alloc_coherent(ena_dev->dmadev, size, 348 + &io_sq->desc_addr.phys_addr, 349 + GFP_KERNEL); 350 350 set_dev_node(ena_dev->dmadev, dev_node); 351 351 if (!io_sq->desc_addr.virt_addr) { 352 352 io_sq->desc_addr.virt_addr = 353 - dma_zalloc_coherent(ena_dev->dmadev, size, 354 - &io_sq->desc_addr.phys_addr, 355 - GFP_KERNEL); 353 + dma_alloc_coherent(ena_dev->dmadev, size, 354 + &io_sq->desc_addr.phys_addr, 355 + GFP_KERNEL); 356 356 } 357 357 358 358 if (!io_sq->desc_addr.virt_addr) { ··· 425 425 prev_node = dev_to_node(ena_dev->dmadev); 426 426 set_dev_node(ena_dev->dmadev, ctx->numa_node); 427 427 io_cq->cdesc_addr.virt_addr = 428 - dma_zalloc_coherent(ena_dev->dmadev, size, 429 - &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); 428 + dma_alloc_coherent(ena_dev->dmadev, size, 429 + &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); 430 430 set_dev_node(ena_dev->dmadev, prev_node); 431 431 if (!io_cq->cdesc_addr.virt_addr) { 432 432 io_cq->cdesc_addr.virt_addr = 433 - dma_zalloc_coherent(ena_dev->dmadev, size, 434 - &io_cq->cdesc_addr.phys_addr, 435 - GFP_KERNEL); 433 + dma_alloc_coherent(ena_dev->dmadev, size, 434 + &io_cq->cdesc_addr.phys_addr, 435 + GFP_KERNEL); 436 436 } 437 437 438 438 if (!io_cq->cdesc_addr.virt_addr) { ··· 1026 1026 struct ena_rss *rss = &ena_dev->rss; 1027 1027 1028 1028 rss->hash_key = 1029 - dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 1030 - &rss->hash_key_dma_addr, GFP_KERNEL); 1029 + dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 1030 + &rss->hash_key_dma_addr, GFP_KERNEL); 1031 1031 1032 1032 if (unlikely(!rss->hash_key)) 1033 1033 return -ENOMEM; ··· 1050 1050 struct ena_rss *rss = &ena_dev->rss; 1051 1051 1052 1052 rss->hash_ctrl = 1053 - dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 1054 - &rss->hash_ctrl_dma_addr, GFP_KERNEL); 1053 + dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 1054 + &rss->hash_ctrl_dma_addr, GFP_KERNEL); 1055 1055 1056 1056 if (unlikely(!rss->hash_ctrl)) 1057 1057 return -ENOMEM; ··· 1094 1094 sizeof(struct ena_admin_rss_ind_table_entry); 1095 1095 1096 1096 rss->rss_ind_tbl = 1097 - dma_zalloc_coherent(ena_dev->dmadev, tbl_size, 1098 - &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); 1097 + dma_alloc_coherent(ena_dev->dmadev, tbl_size, 1098 + &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); 1099 1099 if (unlikely(!rss->rss_ind_tbl)) 1100 1100 goto mem_err1; 1101 1101 ··· 1649 1649 1650 1650 spin_lock_init(&mmio_read->lock); 1651 1651 mmio_read->read_resp = 1652 - dma_zalloc_coherent(ena_dev->dmadev, 1653 - sizeof(*mmio_read->read_resp), 1654 - &mmio_read->read_resp_dma_addr, GFP_KERNEL); 1652 + dma_alloc_coherent(ena_dev->dmadev, 1653 + sizeof(*mmio_read->read_resp), 1654 + &mmio_read->read_resp_dma_addr, GFP_KERNEL); 1655 1655 if (unlikely(!mmio_read->read_resp)) 1656 1656 goto err; 1657 1657 ··· 2623 2623 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2624 2624 2625 2625 host_attr->host_info = 2626 - dma_zalloc_coherent(ena_dev->dmadev, SZ_4K, 2627 - &host_attr->host_info_dma_addr, GFP_KERNEL); 2626 + dma_alloc_coherent(ena_dev->dmadev, SZ_4K, 2627 + &host_attr->host_info_dma_addr, GFP_KERNEL); 2628 2628 if (unlikely(!host_attr->host_info)) 2629 2629 return -ENOMEM; 2630 2630 ··· 2641 2641 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2642 2642 2643 2643 host_attr->debug_area_virt_addr = 2644 - dma_zalloc_coherent(ena_dev->dmadev, debug_area_size, 2645 - &host_attr->debug_area_dma_addr, GFP_KERNEL); 2644 + dma_alloc_coherent(ena_dev->dmadev, debug_area_size, 2645 + &host_attr->debug_area_dma_addr, 2646 + GFP_KERNEL); 2646 2647 if (unlikely(!host_attr->debug_area_virt_addr)) { 2647 2648 host_attr->debug_area_size = 0; 2648 2649 return -ENOMEM;
+4 -4
drivers/net/ethernet/apm/xgene-v2/main.c
··· 206 206 } 207 207 208 208 /* Packet buffers should be 64B aligned */ 209 - pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, 210 - GFP_ATOMIC); 209 + pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, 210 + GFP_ATOMIC); 211 211 if (unlikely(!pkt_buf)) { 212 212 dev_kfree_skb_any(skb); 213 213 return NETDEV_TX_OK; ··· 428 428 ring->ndev = ndev; 429 429 430 430 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; 431 - ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr, 432 - GFP_KERNEL); 431 + ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr, 432 + GFP_KERNEL); 433 433 if (!ring->desc_addr) 434 434 goto err; 435 435
+3 -4
drivers/net/ethernet/atheros/alx/main.c
··· 660 660 alx->num_txq + 661 661 sizeof(struct alx_rrd) * alx->rx_ringsz + 662 662 sizeof(struct alx_rfd) * alx->rx_ringsz; 663 - alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, 664 - alx->descmem.size, 665 - &alx->descmem.dma, 666 - GFP_KERNEL); 663 + alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev, 664 + alx->descmem.size, 665 + &alx->descmem.dma, GFP_KERNEL); 667 666 if (!alx->descmem.virt) 668 667 return -ENOMEM; 669 668
+2 -2
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
··· 1019 1019 sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 1020 1020 8 * 4; 1021 1021 1022 - ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, 1023 - &ring_header->dma, GFP_KERNEL); 1022 + ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size, 1023 + &ring_header->dma, GFP_KERNEL); 1024 1024 if (unlikely(!ring_header->desc)) { 1025 1025 dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); 1026 1026 goto err_nomem;
+4 -4
drivers/net/ethernet/broadcom/bcm63xx_enet.c
··· 936 936 937 937 /* allocate rx dma ring */ 938 938 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 939 - p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 939 + p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 940 940 if (!p) { 941 941 ret = -ENOMEM; 942 942 goto out_freeirq_tx; ··· 947 947 948 948 /* allocate tx dma ring */ 949 949 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 950 - p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 950 + p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 951 951 if (!p) { 952 952 ret = -ENOMEM; 953 953 goto out_free_rx_ring; ··· 2120 2120 2121 2121 /* allocate rx dma ring */ 2122 2122 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 2123 - p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 2123 + p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 2124 2124 if (!p) { 2125 2125 dev_err(kdev, "cannot allocate rx ring %u\n", size); 2126 2126 ret = -ENOMEM; ··· 2132 2132 2133 2133 /* allocate tx dma ring */ 2134 2134 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 2135 - p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 2135 + p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 2136 2136 if (!p) { 2137 2137 dev_err(kdev, "cannot allocate tx ring\n"); 2138 2138 ret = -ENOMEM;
+2 -2
drivers/net/ethernet/broadcom/bcmsysport.c
··· 1506 1506 /* We just need one DMA descriptor which is DMA-able, since writing to 1507 1507 * the port will allocate a new descriptor in its internal linked-list 1508 1508 */ 1509 - p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, 1510 - GFP_KERNEL); 1509 + p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, 1510 + GFP_KERNEL); 1511 1511 if (!p) { 1512 1512 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1513 1513 return -ENOMEM;
+6 -6
drivers/net/ethernet/broadcom/bgmac.c
··· 634 634 635 635 /* Alloc ring of descriptors */ 636 636 size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); 637 - ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 638 - &ring->dma_base, 639 - GFP_KERNEL); 637 + ring->cpu_base = dma_alloc_coherent(dma_dev, size, 638 + &ring->dma_base, 639 + GFP_KERNEL); 640 640 if (!ring->cpu_base) { 641 641 dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n", 642 642 ring->mmio_base); ··· 659 659 660 660 /* Alloc ring of descriptors */ 661 661 size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); 662 - ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 663 - &ring->dma_base, 664 - GFP_KERNEL); 662 + ring->cpu_base = dma_alloc_coherent(dma_dev, size, 663 + &ring->dma_base, 664 + GFP_KERNEL); 665 665 if (!ring->cpu_base) { 666 666 dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", 667 667 ring->mmio_base);
+2 -2
drivers/net/ethernet/broadcom/bnx2.c
··· 844 844 BNX2_SBLK_MSIX_ALIGN_SIZE); 845 845 bp->status_stats_size = status_blk_size + 846 846 sizeof(struct statistics_block); 847 - status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, 848 - &bp->status_blk_mapping, GFP_KERNEL); 847 + status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, 848 + &bp->status_blk_mapping, GFP_KERNEL); 849 849 if (!status_blk) 850 850 return -ENOMEM; 851 851
+8 -8
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 3449 3449 goto alloc_tx_ext_stats; 3450 3450 3451 3451 bp->hw_rx_port_stats_ext = 3452 - dma_zalloc_coherent(&pdev->dev, 3453 - sizeof(struct rx_port_stats_ext), 3454 - &bp->hw_rx_port_stats_ext_map, 3455 - GFP_KERNEL); 3452 + dma_alloc_coherent(&pdev->dev, 3453 + sizeof(struct rx_port_stats_ext), 3454 + &bp->hw_rx_port_stats_ext_map, 3455 + GFP_KERNEL); 3456 3456 if (!bp->hw_rx_port_stats_ext) 3457 3457 return 0; 3458 3458 ··· 3462 3462 3463 3463 if (bp->hwrm_spec_code >= 0x10902) { 3464 3464 bp->hw_tx_port_stats_ext = 3465 - dma_zalloc_coherent(&pdev->dev, 3466 - sizeof(struct tx_port_stats_ext), 3467 - &bp->hw_tx_port_stats_ext_map, 3468 - GFP_KERNEL); 3465 + dma_alloc_coherent(&pdev->dev, 3466 + sizeof(struct tx_port_stats_ext), 3467 + &bp->hw_tx_port_stats_ext_map, 3468 + GFP_KERNEL); 3469 3469 } 3470 3470 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 3471 3471 }
+2 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
··· 316 316 317 317 n = IEEE_8021QAZ_MAX_TCS; 318 318 data_len = sizeof(*data) + sizeof(*fw_app) * n; 319 - data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping, 320 - GFP_KERNEL); 319 + data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping, 320 + GFP_KERNEL); 321 321 if (!data) 322 322 return -ENOMEM; 323 323
+2 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
··· 85 85 return -EFAULT; 86 86 } 87 87 88 - data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, 89 - &data_dma_addr, GFP_KERNEL); 88 + data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize, 89 + &data_dma_addr, GFP_KERNEL); 90 90 if (!data_addr) 91 91 return -ENOMEM; 92 92
+11 -11
drivers/net/ethernet/broadcom/tg3.c
··· 8712 8712 if (!i && tg3_flag(tp, ENABLE_RSS)) 8713 8713 continue; 8714 8714 8715 - tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, 8716 - TG3_RX_RCB_RING_BYTES(tp), 8717 - &tnapi->rx_rcb_mapping, 8718 - GFP_KERNEL); 8715 + tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 8716 + TG3_RX_RCB_RING_BYTES(tp), 8717 + &tnapi->rx_rcb_mapping, 8718 + GFP_KERNEL); 8719 8719 if (!tnapi->rx_rcb) 8720 8720 goto err_out; 8721 8721 } ··· 8768 8768 { 8769 8769 int i; 8770 8770 8771 - tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, 8772 - sizeof(struct tg3_hw_stats), 8773 - &tp->stats_mapping, GFP_KERNEL); 8771 + tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, 8772 + sizeof(struct tg3_hw_stats), 8773 + &tp->stats_mapping, GFP_KERNEL); 8774 8774 if (!tp->hw_stats) 8775 8775 goto err_out; 8776 8776 ··· 8778 8778 struct tg3_napi *tnapi = &tp->napi[i]; 8779 8779 struct tg3_hw_status *sblk; 8780 8780 8781 - tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, 8782 - TG3_HW_STATUS_SIZE, 8783 - &tnapi->status_mapping, 8784 - GFP_KERNEL); 8781 + tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, 8782 + TG3_HW_STATUS_SIZE, 8783 + &tnapi->status_mapping, 8784 + GFP_KERNEL); 8785 8785 if (!tnapi->hw_status) 8786 8786 goto err_out; 8787 8787
+1 -1
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
··· 59 59 dmem->q_len = q_len; 60 60 dmem->size = (desc_size * q_len) + align_bytes; 61 61 /* Save address, need it while freeing */ 62 - dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, 62 + dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size, 63 63 &dmem->dma, GFP_KERNEL); 64 64 if (!dmem->unalign_base) 65 65 return -ENOMEM;
+1 -1
drivers/net/ethernet/chelsio/cxgb3/sge.c
··· 620 620 { 621 621 size_t len = nelem * elem_size; 622 622 void *s = NULL; 623 - void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); 623 + void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); 624 624 625 625 if (!p) 626 626 return NULL;
+1 -1
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 694 694 { 695 695 size_t len = nelem * elem_size + stat_size; 696 696 void *s = NULL; 697 - void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL); 697 + void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); 698 698 699 699 if (!p) 700 700 return NULL;
+1 -1
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
··· 756 756 * Allocate the hardware ring and PCI DMA bus address space for said. 757 757 */ 758 758 size_t hwlen = nelem * hwsize + stat_size; 759 - void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); 759 + void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); 760 760 761 761 if (!hwring) 762 762 return NULL;
+34 -34
drivers/net/ethernet/emulex/benet/be_cmds.c
··· 1808 1808 total_size = buf_len; 1809 1809 1810 1810 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1811 - get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 1812 - get_fat_cmd.size, 1813 - &get_fat_cmd.dma, GFP_ATOMIC); 1811 + get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 1812 + get_fat_cmd.size, 1813 + &get_fat_cmd.dma, GFP_ATOMIC); 1814 1814 if (!get_fat_cmd.va) 1815 1815 return -ENOMEM; 1816 1816 ··· 2302 2302 return -EINVAL; 2303 2303 2304 2304 cmd.size = sizeof(struct be_cmd_resp_port_type); 2305 - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 2306 - GFP_ATOMIC); 2305 + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 2306 + GFP_ATOMIC); 2307 2307 if (!cmd.va) { 2308 2308 dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); 2309 2309 return -ENOMEM; ··· 3066 3066 3067 3067 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) 3068 3068 + LANCER_FW_DOWNLOAD_CHUNK; 3069 - flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, 3070 - &flash_cmd.dma, GFP_KERNEL); 3069 + flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, 3070 + GFP_KERNEL); 3071 3071 if (!flash_cmd.va) 3072 3072 return -ENOMEM; 3073 3073 ··· 3184 3184 } 3185 3185 3186 3186 flash_cmd.size = sizeof(struct be_cmd_write_flashrom); 3187 - flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, 3188 - GFP_KERNEL); 3187 + flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, 3188 + GFP_KERNEL); 3189 3189 if (!flash_cmd.va) 3190 3190 return -ENOMEM; 3191 3191 ··· 3435 3435 goto err; 3436 3436 } 3437 3437 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 3438 - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3439 - GFP_ATOMIC); 3438 + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3439 + GFP_ATOMIC); 3440 3440 if (!cmd.va) { 3441 3441 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3442 3442 status = -ENOMEM; ··· 3522 3522 3523 3523 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 3524 3524 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 3525 - attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 3526 - attribs_cmd.size, 3527 - &attribs_cmd.dma, GFP_ATOMIC); 3525 + attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 3526 + attribs_cmd.size, 3527 + &attribs_cmd.dma, GFP_ATOMIC); 3528 3528 if (!attribs_cmd.va) { 3529 3529 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 3530 3530 status = -ENOMEM; ··· 3699 3699 3700 3700 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 3701 3701 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 3702 - get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 3703 - get_mac_list_cmd.size, 3704 - &get_mac_list_cmd.dma, 3705 - GFP_ATOMIC); 3702 + get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 3703 + get_mac_list_cmd.size, 3704 + &get_mac_list_cmd.dma, 3705 + GFP_ATOMIC); 3706 3706 3707 3707 if (!get_mac_list_cmd.va) { 3708 3708 dev_err(&adapter->pdev->dev, ··· 3829 3829 3830 3830 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3831 3831 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 3832 - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3833 - GFP_KERNEL); 3832 + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3833 + GFP_KERNEL); 3834 3834 if (!cmd.va) 3835 3835 return -ENOMEM; 3836 3836 ··· 4035 4035 4036 4036 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4037 4037 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 4038 - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4039 - GFP_ATOMIC); 4038 + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4039 + GFP_ATOMIC); 4040 4040 if (!cmd.va) { 4041 4041 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 4042 4042 status = -ENOMEM; ··· 4089 4089 4090 4090 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 4091 4091 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 4092 - extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 4093 - extfat_cmd.size, &extfat_cmd.dma, 4094 - GFP_ATOMIC); 4092 + extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 4093 + extfat_cmd.size, &extfat_cmd.dma, 4094 + GFP_ATOMIC); 4095 4095 if (!extfat_cmd.va) 4096 4096 return -ENOMEM; 4097 4097 ··· 4127 4127 4128 4128 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 4129 4129 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 4130 - extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 4131 - extfat_cmd.size, &extfat_cmd.dma, 4132 - GFP_ATOMIC); 4130 + extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 4131 + extfat_cmd.size, &extfat_cmd.dma, 4132 + GFP_ATOMIC); 4133 4133 4134 4134 if (!extfat_cmd.va) { 4135 4135 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", ··· 4354 4354 4355 4355 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4356 4356 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 4357 - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4358 - GFP_ATOMIC); 4357 + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4358 + GFP_ATOMIC); 4359 4359 if (!cmd.va) { 4360 4360 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 4361 4361 status = -ENOMEM; ··· 4452 4452 4453 4453 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4454 4454 cmd.size = sizeof(struct be_cmd_resp_get_profile_config); 4455 - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4456 - GFP_ATOMIC); 4455 + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4456 + GFP_ATOMIC); 4457 4457 if (!cmd.va) 4458 4458 return -ENOMEM; 4459 4459 ··· 4539 4539 4540 4540 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4541 4541 cmd.size = sizeof(struct be_cmd_req_set_profile_config); 4542 - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4543 - GFP_ATOMIC); 4542 + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4543 + GFP_ATOMIC); 4544 4544 if (!cmd.va) 4545 4545 return -ENOMEM; 4546 4546
+9 -9
drivers/net/ethernet/emulex/benet/be_ethtool.c
··· 274 274 int status = 0; 275 275 276 276 read_cmd.size = LANCER_READ_FILE_CHUNK; 277 - read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, 278 - &read_cmd.dma, GFP_ATOMIC); 277 + read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size, 278 + &read_cmd.dma, GFP_ATOMIC); 279 279 280 280 if (!read_cmd.va) { 281 281 dev_err(&adapter->pdev->dev, ··· 815 815 } 816 816 817 817 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); 818 - cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); 818 + cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); 819 819 if (!cmd.va) 820 820 return -ENOMEM; 821 821 ··· 851 851 }; 852 852 853 853 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 854 - ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 855 - ddrdma_cmd.size, &ddrdma_cmd.dma, 856 - GFP_KERNEL); 854 + ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 855 + ddrdma_cmd.size, &ddrdma_cmd.dma, 856 + GFP_KERNEL); 857 857 if (!ddrdma_cmd.va) 858 858 return -ENOMEM; 859 859 ··· 1014 1014 1015 1015 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); 1016 1016 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); 1017 - eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 1018 - eeprom_cmd.size, &eeprom_cmd.dma, 1019 - GFP_KERNEL); 1017 + eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 1018 + eeprom_cmd.size, &eeprom_cmd.dma, 1019 + GFP_KERNEL); 1020 1020 1021 1021 if (!eeprom_cmd.va) 1022 1022 return -ENOMEM;
+9 -9
drivers/net/ethernet/emulex/benet/be_main.c
··· 167 167 q->len = len; 168 168 q->entry_size = entry_size; 169 169 mem->size = len * entry_size; 170 - mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, 171 - GFP_KERNEL); 170 + mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 171 + &mem->dma, GFP_KERNEL); 172 172 if (!mem->va) 173 173 return -ENOMEM; 174 174 return 0; ··· 5766 5766 int status = 0; 5767 5767 5768 5768 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 5769 - mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size, 5770 - &mbox_mem_alloc->dma, 5771 - GFP_KERNEL); 5769 + mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, 5770 + &mbox_mem_alloc->dma, 5771 + GFP_KERNEL); 5772 5772 if (!mbox_mem_alloc->va) 5773 5773 return -ENOMEM; 5774 5774 ··· 5777 5777 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 5778 5778 5779 5779 rx_filter->size = sizeof(struct be_cmd_req_rx_filter); 5780 - rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, 5781 - &rx_filter->dma, GFP_KERNEL); 5780 + rx_filter->va = dma_alloc_coherent(dev, rx_filter->size, 5781 + &rx_filter->dma, GFP_KERNEL); 5782 5782 if (!rx_filter->va) { 5783 5783 status = -ENOMEM; 5784 5784 goto free_mbox; ··· 5792 5792 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); 5793 5793 else 5794 5794 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); 5795 - stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size, 5796 - &stats_cmd->dma, GFP_KERNEL); 5795 + stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size, 5796 + &stats_cmd->dma, GFP_KERNEL); 5797 5797 if (!stats_cmd->va) { 5798 5798 status = -ENOMEM; 5799 5799 goto free_rx_filter;
+6 -8
drivers/net/ethernet/faraday/ftgmac100.c
··· 935 935 return -ENOMEM; 936 936 937 937 /* Allocate descriptors */ 938 - priv->rxdes = dma_zalloc_coherent(priv->dev, 939 - MAX_RX_QUEUE_ENTRIES * 940 - sizeof(struct ftgmac100_rxdes), 941 - &priv->rxdes_dma, GFP_KERNEL); 938 + priv->rxdes = dma_alloc_coherent(priv->dev, 939 + MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes), 940 + &priv->rxdes_dma, GFP_KERNEL); 942 941 if (!priv->rxdes) 943 942 return -ENOMEM; 944 - priv->txdes = dma_zalloc_coherent(priv->dev, 945 - MAX_TX_QUEUE_ENTRIES * 946 - sizeof(struct ftgmac100_txdes), 947 - &priv->txdes_dma, GFP_KERNEL); 943 + priv->txdes = dma_alloc_coherent(priv->dev, 944 + MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes), 945 + &priv->txdes_dma, GFP_KERNEL); 948 946 if (!priv->txdes) 949 947 return -ENOMEM; 950 948
+3 -4
drivers/net/ethernet/faraday/ftmac100.c
··· 734 734 { 735 735 int i; 736 736 737 - priv->descs = dma_zalloc_coherent(priv->dev, 738 - sizeof(struct ftmac100_descs), 739 - &priv->descs_dma_addr, 740 - GFP_KERNEL); 737 + priv->descs = dma_alloc_coherent(priv->dev, 738 + sizeof(struct ftmac100_descs), 739 + &priv->descs_dma_addr, GFP_KERNEL); 741 740 if (!priv->descs) 742 741 return -ENOMEM; 743 742
+2 -2
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
··· 1006 1006 1007 1007 for (i = 0; i < QUEUE_NUMS; i++) { 1008 1008 size = priv->pool[i].count * sizeof(struct hix5hd2_desc); 1009 - virt_addr = dma_zalloc_coherent(dev, size, &phys_addr, 1010 - GFP_KERNEL); 1009 + virt_addr = dma_alloc_coherent(dev, size, &phys_addr, 1010 + GFP_KERNEL); 1011 1011 if (virt_addr == NULL) 1012 1012 goto error_free_pool; 1013 1013
+2 -3
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 2041 2041 { 2042 2042 int size = ring->desc_num * sizeof(ring->desc[0]); 2043 2043 2044 - ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, 2045 - &ring->desc_dma_addr, 2046 - GFP_KERNEL); 2044 + ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, 2045 + &ring->desc_dma_addr, GFP_KERNEL); 2047 2046 if (!ring->desc) 2048 2047 return -ENOMEM; 2049 2048
+2 -3
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
··· 39 39 { 40 40 int size = ring->desc_num * sizeof(struct hclge_desc); 41 41 42 - ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), 43 - size, &ring->desc_dma_addr, 44 - GFP_KERNEL); 42 + ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, 43 + &ring->desc_dma_addr, GFP_KERNEL); 45 44 if (!ring->desc) 46 45 return -ENOMEM; 47 46
+2 -3
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
··· 115 115 { 116 116 int size = ring->desc_num * sizeof(struct hclgevf_desc); 117 117 118 - ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), 119 - size, &ring->desc_dma_addr, 120 - GFP_KERNEL); 118 + ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, 119 + &ring->desc_dma_addr, GFP_KERNEL); 121 120 if (!ring->desc) 122 121 return -ENOMEM; 123 122
+8 -8
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
··· 613 613 u8 *cmd_vaddr; 614 614 int err = 0; 615 615 616 - cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, 617 - &cmd_paddr, GFP_KERNEL); 616 + cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, 617 + &cmd_paddr, GFP_KERNEL); 618 618 if (!cmd_vaddr) { 619 619 dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); 620 620 return -ENOMEM; ··· 663 663 dma_addr_t node_paddr; 664 664 int err; 665 665 666 - node = dma_zalloc_coherent(&pdev->dev, chain->cell_size, 667 - &node_paddr, GFP_KERNEL); 666 + node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr, 667 + GFP_KERNEL); 668 668 if (!node) { 669 669 dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); 670 670 return -ENOMEM; ··· 821 821 if (!chain->cell_ctxt) 822 822 return -ENOMEM; 823 823 824 - chain->wb_status = dma_zalloc_coherent(&pdev->dev, 825 - sizeof(*chain->wb_status), 826 - &chain->wb_status_paddr, 827 - GFP_KERNEL); 824 + chain->wb_status = dma_alloc_coherent(&pdev->dev, 825 + sizeof(*chain->wb_status), 826 + &chain->wb_status_paddr, 827 + GFP_KERNEL); 828 828 if (!chain->wb_status) { 829 829 dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); 830 830 return -ENOMEM;
+4 -4
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
··· 593 593 } 594 594 595 595 for (pg = 0; pg < eq->num_pages; pg++) { 596 - eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev, 597 - eq->page_size, 598 - &eq->dma_addr[pg], 599 - GFP_KERNEL); 596 + eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev, 597 + eq->page_size, 598 + &eq->dma_addr[pg], 599 + GFP_KERNEL); 600 600 if (!eq->virt_addr[pg]) { 601 601 err = -ENOMEM; 602 602 goto err_dma_alloc;
+3 -3
drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
··· 355 355 goto err_sq_db; 356 356 } 357 357 358 - ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), 359 - &func_to_io->ci_dma_base, 360 - GFP_KERNEL); 358 + ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), 359 + &func_to_io->ci_dma_base, 360 + GFP_KERNEL); 361 361 if (!ci_addr_base) { 362 362 dev_err(&pdev->dev, "Failed to allocate CI area\n"); 363 363 err = -ENOMEM;
+5 -5
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
··· 336 336 goto err_cqe_dma_arr_alloc; 337 337 338 338 for (i = 0; i < wq->q_depth; i++) { 339 - rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, 340 - sizeof(*rq->cqe[i]), 341 - &rq->cqe_dma[i], GFP_KERNEL); 339 + rq->cqe[i] = dma_alloc_coherent(&pdev->dev, 340 + sizeof(*rq->cqe[i]), 341 + &rq->cqe_dma[i], GFP_KERNEL); 342 342 if (!rq->cqe[i]) 343 343 goto err_cqe_alloc; 344 344 } ··· 415 415 416 416 /* HW requirements: Must be at least 32 bit */ 417 417 pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); 418 - rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size, 419 - &rq->pi_dma_addr, GFP_KERNEL); 418 + rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size, 419 + &rq->pi_dma_addr, GFP_KERNEL); 420 420 if (!rq->pi_virt_addr) { 421 421 dev_err(&pdev->dev, "Failed to allocate PI address\n"); 422 422 err = -ENOMEM;
+4 -4
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
··· 114 114 struct pci_dev *pdev = hwif->pdev; 115 115 dma_addr_t dma_addr; 116 116 117 - *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr, 118 - GFP_KERNEL); 117 + *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr, 118 + GFP_KERNEL); 119 119 if (!*vaddr) { 120 120 dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); 121 121 return -ENOMEM; ··· 482 482 u64 *paddr = &wq->block_vaddr[i]; 483 483 dma_addr_t dma_addr; 484 484 485 - *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size, 486 - &dma_addr, GFP_KERNEL); 485 + *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size, 486 + &dma_addr, GFP_KERNEL); 487 487 if (!*vaddr) { 488 488 dev_err(&pdev->dev, "Failed to allocate wq page\n"); 489 489 goto err_alloc_wq_pages;
+2 -2
drivers/net/ethernet/ibm/emac/mal.c
··· 636 636 bd_size = sizeof(struct mal_descriptor) * 637 637 (NUM_TX_BUFF * mal->num_tx_chans + 638 638 NUM_RX_BUFF * mal->num_rx_chans); 639 - mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, 640 - GFP_KERNEL); 639 + mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, 640 + GFP_KERNEL); 641 641 if (mal->bd_virt == NULL) { 642 642 err = -ENOMEM; 643 643 goto fail_unmap;
+4 -4
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
··· 993 993 994 994 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 995 995 txdr->size = ALIGN(txdr->size, 4096); 996 - txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 997 - GFP_KERNEL); 996 + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 997 + GFP_KERNEL); 998 998 if (!txdr->desc) { 999 999 ret_val = 2; 1000 1000 goto err_nomem; ··· 1051 1051 } 1052 1052 1053 1053 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1054 - rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1055 - GFP_KERNEL); 1054 + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1055 + GFP_KERNEL); 1056 1056 if (!rxdr->desc) { 1057 1057 ret_val = 6; 1058 1058 goto err_nomem;
+2 -2
drivers/net/ethernet/intel/e1000e/netdev.c
··· 2305 2305 { 2306 2306 struct pci_dev *pdev = adapter->pdev; 2307 2307 2308 - ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, 2309 - GFP_KERNEL); 2308 + ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, 2309 + GFP_KERNEL); 2310 2310 if (!ring->desc) 2311 2311 return -ENOMEM; 2312 2312
+2 -2
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 109 109 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 110 110 111 111 mem->size = ALIGN(size, alignment); 112 - mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 113 - &mem->pa, GFP_KERNEL); 112 + mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, 113 + GFP_KERNEL); 114 114 if (!mem->va) 115 115 return -ENOMEM; 116 116
+4 -4
drivers/net/ethernet/intel/ixgb/ixgb_main.c
··· 680 680 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); 681 681 txdr->size = ALIGN(txdr->size, 4096); 682 682 683 - txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 684 - GFP_KERNEL); 683 + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 684 + GFP_KERNEL); 685 685 if (!txdr->desc) { 686 686 vfree(txdr->buffer_info); 687 687 return -ENOMEM; ··· 763 763 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); 764 764 rxdr->size = ALIGN(rxdr->size, 4096); 765 765 766 - rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 767 - GFP_KERNEL); 766 + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 767 + GFP_KERNEL); 768 768 769 769 if (!rxdr->desc) { 770 770 vfree(rxdr->buffer_info);
+3 -3
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 2044 2044 u32 txq_dma; 2045 2045 2046 2046 /* Allocate memory for TX descriptors */ 2047 - aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, 2048 - MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 2049 - &aggr_txq->descs_dma, GFP_KERNEL); 2047 + aggr_txq->descs = dma_alloc_coherent(&pdev->dev, 2048 + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 2049 + &aggr_txq->descs_dma, GFP_KERNEL); 2050 2050 if (!aggr_txq->descs) 2051 2051 return -ENOMEM; 2052 2052
+9 -9
drivers/net/ethernet/marvell/pxa168_eth.c
··· 557 557 * table is full. 558 558 */ 559 559 if (!pep->htpr) { 560 - pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent, 561 - HASH_ADDR_TABLE_SIZE, 562 - &pep->htpr_dma, GFP_KERNEL); 560 + pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, 561 + HASH_ADDR_TABLE_SIZE, 562 + &pep->htpr_dma, GFP_KERNEL); 563 563 if (!pep->htpr) 564 564 return -ENOMEM; 565 565 } else { ··· 1044 1044 pep->rx_desc_count = 0; 1045 1045 size = pep->rx_ring_size * sizeof(struct rx_desc); 1046 1046 pep->rx_desc_area_size = size; 1047 - pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, 1048 - &pep->rx_desc_dma, 1049 - GFP_KERNEL); 1047 + pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, 1048 + &pep->rx_desc_dma, 1049 + GFP_KERNEL); 1050 1050 if (!pep->p_rx_desc_area) 1051 1051 goto out; 1052 1052 ··· 1103 1103 pep->tx_desc_count = 0; 1104 1104 size = pep->tx_ring_size * sizeof(struct tx_desc); 1105 1105 pep->tx_desc_area_size = size; 1106 - pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, 1107 - &pep->tx_desc_dma, 1108 - GFP_KERNEL); 1106 + pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, 1107 + &pep->tx_desc_dma, 1108 + GFP_KERNEL); 1109 1109 if (!pep->p_tx_desc_area) 1110 1110 goto out; 1111 1111 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
+9 -9
drivers/net/ethernet/mediatek/mtk_eth_soc.c
··· 598 598 dma_addr_t dma_addr; 599 599 int i; 600 600 601 - eth->scratch_ring = dma_zalloc_coherent(eth->dev, 602 - cnt * sizeof(struct mtk_tx_dma), 603 - &eth->phy_scratch_ring, 604 - GFP_ATOMIC); 601 + eth->scratch_ring = dma_alloc_coherent(eth->dev, 602 + cnt * sizeof(struct mtk_tx_dma), 603 + &eth->phy_scratch_ring, 604 + GFP_ATOMIC); 605 605 if (unlikely(!eth->scratch_ring)) 606 606 return -ENOMEM; 607 607 ··· 1213 1213 if (!ring->buf) 1214 1214 goto no_tx_mem; 1215 1215 1216 - ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz, 1217 - &ring->phys, GFP_ATOMIC); 1216 + ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, 1217 + &ring->phys, GFP_ATOMIC); 1218 1218 if (!ring->dma) 1219 1219 goto no_tx_mem; 1220 1220 ··· 1310 1310 return -ENOMEM; 1311 1311 } 1312 1312 1313 - ring->dma = dma_zalloc_coherent(eth->dev, 1314 - rx_dma_size * sizeof(*ring->dma), 1315 - &ring->phys, GFP_ATOMIC); 1313 + ring->dma = dma_alloc_coherent(eth->dev, 1314 + rx_dma_size * sizeof(*ring->dma), 1315 + &ring->phys, GFP_ATOMIC); 1316 1316 if (!ring->dma) 1317 1317 return -ENOMEM; 1318 1318
+4 -4
drivers/net/ethernet/mellanox/mlx4/alloc.c
··· 584 584 buf->npages = 1; 585 585 buf->page_shift = get_order(size) + PAGE_SHIFT; 586 586 buf->direct.buf = 587 - dma_zalloc_coherent(&dev->persist->pdev->dev, 588 - size, &t, GFP_KERNEL); 587 + dma_alloc_coherent(&dev->persist->pdev->dev, size, &t, 588 + GFP_KERNEL); 589 589 if (!buf->direct.buf) 590 590 return -ENOMEM; 591 591 ··· 624 624 625 625 for (i = 0; i < buf->nbufs; ++i) { 626 626 buf->page_list[i].buf = 627 - dma_zalloc_coherent(&dev->persist->pdev->dev, 628 - PAGE_SIZE, &t, GFP_KERNEL); 627 + dma_alloc_coherent(&dev->persist->pdev->dev, 628 + PAGE_SIZE, &t, GFP_KERNEL); 629 629 if (!buf->page_list[i].buf) 630 630 goto err_free; 631 631
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
··· 63 63 mutex_lock(&priv->alloc_mutex); 64 64 original_node = dev_to_node(&dev->pdev->dev); 65 65 set_dev_node(&dev->pdev->dev, node); 66 - cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size, 67 - dma_handle, GFP_KERNEL); 66 + cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle, 67 + GFP_KERNEL); 68 68 set_dev_node(&dev->pdev->dev, original_node); 69 69 mutex_unlock(&priv->alloc_mutex); 70 70 return cpu_handle;
+5 -5
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 1789 1789 { 1790 1790 struct device *ddev = &dev->pdev->dev; 1791 1791 1792 - cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, 1793 - &cmd->alloc_dma, GFP_KERNEL); 1792 + cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, 1793 + &cmd->alloc_dma, GFP_KERNEL); 1794 1794 if (!cmd->cmd_alloc_buf) 1795 1795 return -ENOMEM; 1796 1796 ··· 1804 1804 1805 1805 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, 1806 1806 cmd->alloc_dma); 1807 - cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, 1808 - 2 * MLX5_ADAPTER_PAGE_SIZE - 1, 1809 - &cmd->alloc_dma, GFP_KERNEL); 1807 + cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, 1808 + 2 * MLX5_ADAPTER_PAGE_SIZE - 1, 1809 + &cmd->alloc_dma, GFP_KERNEL); 1810 1810 if (!cmd->cmd_alloc_buf) 1811 1811 return -ENOMEM; 1812 1812
+3 -3
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
··· 3604 3604 for (i = 0; i < mgp->num_slices; i++) { 3605 3605 ss = &mgp->ss[i]; 3606 3606 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); 3607 - ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes, 3608 - &ss->rx_done.bus, 3609 - GFP_KERNEL); 3607 + ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, 3608 + &ss->rx_done.bus, 3609 + GFP_KERNEL); 3610 3610 if (ss->rx_done.entry == NULL) 3611 3611 goto abort; 3612 3612 bytes = sizeof(*ss->fw_stats);
+6 -6
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 2170 2170 tx_ring->cnt = dp->txd_cnt; 2171 2171 2172 2172 tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); 2173 - tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, 2174 - &tx_ring->dma, 2175 - GFP_KERNEL | __GFP_NOWARN); 2173 + tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size, 2174 + &tx_ring->dma, 2175 + GFP_KERNEL | __GFP_NOWARN); 2176 2176 if (!tx_ring->txds) { 2177 2177 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", 2178 2178 tx_ring->cnt); ··· 2328 2328 2329 2329 rx_ring->cnt = dp->rxd_cnt; 2330 2330 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); 2331 - rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, 2332 - &rx_ring->dma, 2333 - GFP_KERNEL | __GFP_NOWARN); 2331 + rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size, 2332 + &rx_ring->dma, 2333 + GFP_KERNEL | __GFP_NOWARN); 2334 2334 if (!rx_ring->rxds) { 2335 2335 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", 2336 2336 rx_ring->cnt);
+6 -6
drivers/net/ethernet/ni/nixge.c
··· 287 287 priv->rx_bd_ci = 0; 288 288 289 289 /* Allocate the Tx and Rx buffer descriptors. */ 290 - priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 291 - sizeof(*priv->tx_bd_v) * TX_BD_NUM, 292 - &priv->tx_bd_p, GFP_KERNEL); 290 + priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 291 + sizeof(*priv->tx_bd_v) * TX_BD_NUM, 292 + &priv->tx_bd_p, GFP_KERNEL); 293 293 if (!priv->tx_bd_v) 294 294 goto out; 295 295 ··· 299 299 if (!priv->tx_skb) 300 300 goto out; 301 301 302 - priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 303 - sizeof(*priv->rx_bd_v) * RX_BD_NUM, 304 - &priv->rx_bd_p, GFP_KERNEL); 302 + priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 303 + sizeof(*priv->rx_bd_v) * RX_BD_NUM, 304 + &priv->rx_bd_p, GFP_KERNEL); 305 305 if (!priv->rx_bd_v) 306 306 goto out; 307 307
+6 -6
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
··· 1440 1440 1441 1441 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; 1442 1442 rx_ring->rx_buff_pool = 1443 - dma_zalloc_coherent(&pdev->dev, size, 1444 - &rx_ring->rx_buff_pool_logic, GFP_KERNEL); 1443 + dma_alloc_coherent(&pdev->dev, size, 1444 + &rx_ring->rx_buff_pool_logic, GFP_KERNEL); 1445 1445 if (!rx_ring->rx_buff_pool) 1446 1446 return -ENOMEM; 1447 1447 ··· 1755 1755 1756 1756 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); 1757 1757 1758 - tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size, 1759 - &tx_ring->dma, GFP_KERNEL); 1758 + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 1759 + &tx_ring->dma, GFP_KERNEL); 1760 1760 if (!tx_ring->desc) { 1761 1761 vfree(tx_ring->buffer_info); 1762 1762 return -ENOMEM; ··· 1798 1798 return -ENOMEM; 1799 1799 1800 1800 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); 1801 - rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size, 1802 - &rx_ring->dma, GFP_KERNEL); 1801 + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1802 + &rx_ring->dma, GFP_KERNEL); 1803 1803 if (!rx_ring->desc) { 1804 1804 vfree(rx_ring->buffer_info); 1805 1805 return -ENOMEM;
+3 -3
drivers/net/ethernet/pasemi/pasemi_mac.c
··· 401 401 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) 402 402 goto out_ring_desc; 403 403 404 - ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev, 405 - RX_RING_SIZE * sizeof(u64), 406 - &ring->buf_dma, GFP_KERNEL); 404 + ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, 405 + RX_RING_SIZE * sizeof(u64), 406 + &ring->buf_dma, GFP_KERNEL); 407 407 if (!ring->buffers) 408 408 goto out_ring_desc; 409 409
+8 -8
drivers/net/ethernet/qlogic/qed/qed_cxt.c
··· 936 936 u32 size = min_t(u32, total_size, psz); 937 937 void **p_virt = &p_mngr->t2[i].p_virt; 938 938 939 - *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, 940 - size, &p_mngr->t2[i].p_phys, 941 - GFP_KERNEL); 939 + *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, 940 + &p_mngr->t2[i].p_phys, 941 + GFP_KERNEL); 942 942 if (!p_mngr->t2[i].p_virt) { 943 943 rc = -ENOMEM; 944 944 goto t2_fail; ··· 1054 1054 u32 size; 1055 1055 1056 1056 size = min_t(u32, sz_left, p_blk->real_size_in_page); 1057 - p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size, 1058 - &p_phys, GFP_KERNEL); 1057 + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, 1058 + &p_phys, GFP_KERNEL); 1059 1059 if (!p_virt) 1060 1060 return -ENOMEM; 1061 1061 ··· 2306 2306 goto out0; 2307 2307 } 2308 2308 2309 - p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, 2310 - p_blk->real_size_in_page, &p_phys, 2311 - GFP_KERNEL); 2309 + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 2310 + p_blk->real_size_in_page, &p_phys, 2311 + GFP_KERNEL); 2312 2312 if (!p_virt) { 2313 2313 rc = -ENOMEM; 2314 2314 goto out1;
+14 -14
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
··· 434 434 *(tx_ring->hw_consumer) = 0; 435 435 436 436 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 437 - rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size, 438 - &rq_phys_addr, GFP_KERNEL); 437 + rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 438 + &rq_phys_addr, GFP_KERNEL); 439 439 if (!rq_addr) 440 440 return -ENOMEM; 441 441 442 442 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 443 - rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size, 444 - &rsp_phys_addr, GFP_KERNEL); 443 + rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 444 + &rsp_phys_addr, GFP_KERNEL); 445 445 if (!rsp_addr) { 446 446 err = -ENOMEM; 447 447 goto out_free_rq; ··· 855 855 struct qlcnic_cmd_args cmd; 856 856 size_t nic_size = sizeof(struct qlcnic_info_le); 857 857 858 - nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, 859 - &nic_dma_t, GFP_KERNEL); 858 + nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 859 + &nic_dma_t, GFP_KERNEL); 860 860 if (!nic_info_addr) 861 861 return -ENOMEM; 862 862 ··· 909 909 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 910 910 return err; 911 911 912 - nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, 913 - &nic_dma_t, GFP_KERNEL); 912 + nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 913 + &nic_dma_t, GFP_KERNEL); 914 914 if (!nic_info_addr) 915 915 return -ENOMEM; 916 916 ··· 964 964 void *pci_info_addr; 965 965 int err = 0, i; 966 966 967 - pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size, 968 - &pci_info_dma_t, GFP_KERNEL); 967 + pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, 968 + &pci_info_dma_t, GFP_KERNEL); 969 969 if (!pci_info_addr) 970 970 return -ENOMEM; 971 971 ··· 1078 1078 return -EIO; 1079 1079 } 1080 1080 1081 - stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, 1082 - &stats_dma_t, GFP_KERNEL); 1081 + stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 1082 + &stats_dma_t, GFP_KERNEL); 1083 1083 if (!stats_addr) 1084 1084 return -ENOMEM; 1085 1085 ··· 1134 1134 if (mac_stats == NULL) 1135 1135 return -ENOMEM; 1136 1136 1137 - stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, 1138 - &stats_dma_t, GFP_KERNEL); 1137 + stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 1138 + &stats_dma_t, GFP_KERNEL); 1139 1139 if (!stats_addr) 1140 1140 return -ENOMEM; 1141 1141
+1 -1
drivers/net/ethernet/qualcomm/emac/emac-mac.c
··· 776 776 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */ 777 777 778 778 ring_header->used = 0; 779 - ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size, 779 + ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size, 780 780 &ring_header->dma_addr, 781 781 GFP_KERNEL); 782 782 if (!ring_header->v_addr)
+6 -6
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
··· 400 400 } 401 401 402 402 /* allocate memory for TX descriptors */ 403 - tx_ring->dma_tx = dma_zalloc_coherent(dev, 404 - tx_rsize * sizeof(struct sxgbe_tx_norm_desc), 405 - &tx_ring->dma_tx_phy, GFP_KERNEL); 403 + tx_ring->dma_tx = dma_alloc_coherent(dev, 404 + tx_rsize * sizeof(struct sxgbe_tx_norm_desc), 405 + &tx_ring->dma_tx_phy, GFP_KERNEL); 406 406 if (!tx_ring->dma_tx) 407 407 return -ENOMEM; 408 408 ··· 479 479 rx_ring->queue_no = queue_no; 480 480 481 481 /* allocate memory for RX descriptors */ 482 - rx_ring->dma_rx = dma_zalloc_coherent(priv->device, 483 - rx_rsize * sizeof(struct sxgbe_rx_norm_desc), 484 - &rx_ring->dma_rx_phy, GFP_KERNEL); 482 + rx_ring->dma_rx = dma_alloc_coherent(priv->device, 483 + rx_rsize * sizeof(struct sxgbe_rx_norm_desc), 484 + &rx_ring->dma_rx_phy, GFP_KERNEL); 485 485 486 486 if (rx_ring->dma_rx == NULL) 487 487 return -ENOMEM;
+2 -2
drivers/net/ethernet/sfc/falcon/nic.c
··· 33 33 int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer, 34 34 unsigned int len, gfp_t gfp_flags) 35 35 { 36 - buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, 37 - &buffer->dma_addr, gfp_flags); 36 + buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 37 + &buffer->dma_addr, gfp_flags); 38 38 if (!buffer->addr) 39 39 return -ENOMEM; 40 40 buffer->len = len;
+2 -2
drivers/net/ethernet/sfc/nic.c
··· 34 34 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 35 35 unsigned int len, gfp_t gfp_flags) 36 36 { 37 - buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, 38 - &buffer->dma_addr, gfp_flags); 37 + buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 38 + &buffer->dma_addr, gfp_flags); 39 39 if (!buffer->addr) 40 40 return -ENOMEM; 41 41 buffer->len = len;
+2 -2
drivers/net/ethernet/sgi/meth.c
··· 211 211 static int meth_init_tx_ring(struct meth_private *priv) 212 212 { 213 213 /* Init TX ring */ 214 - priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE, 215 - &priv->tx_ring_dma, GFP_ATOMIC); 214 + priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, 215 + &priv->tx_ring_dma, GFP_ATOMIC); 216 216 if (!priv->tx_ring) 217 217 return -ENOMEM; 218 218
+2 -2
drivers/net/ethernet/socionext/netsec.c
··· 1029 1029 struct netsec_desc_ring *dring = &priv->desc_ring[id]; 1030 1030 int i; 1031 1031 1032 - dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM, 1033 - &dring->desc_dma, GFP_KERNEL); 1032 + dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM, 1033 + &dring->desc_dma, GFP_KERNEL); 1034 1034 if (!dring->vaddr) 1035 1035 goto err; 1036 1036
+16 -24
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 1549 1549 goto err_dma; 1550 1550 1551 1551 if (priv->extend_desc) { 1552 - rx_q->dma_erx = dma_zalloc_coherent(priv->device, 1553 - DMA_RX_SIZE * 1554 - sizeof(struct 1555 - dma_extended_desc), 1556 - &rx_q->dma_rx_phy, 1557 - GFP_KERNEL); 1552 + rx_q->dma_erx = dma_alloc_coherent(priv->device, 1553 + DMA_RX_SIZE * sizeof(struct dma_extended_desc), 1554 + &rx_q->dma_rx_phy, 1555 + GFP_KERNEL); 1558 1556 if (!rx_q->dma_erx) 1559 1557 goto err_dma; 1560 1558 1561 1559 } else { 1562 - rx_q->dma_rx = dma_zalloc_coherent(priv->device, 1563 - DMA_RX_SIZE * 1564 - sizeof(struct 1565 - dma_desc), 1566 - &rx_q->dma_rx_phy, 1567 - GFP_KERNEL); 1560 + rx_q->dma_rx = dma_alloc_coherent(priv->device, 1561 + DMA_RX_SIZE * sizeof(struct dma_desc), 1562 + &rx_q->dma_rx_phy, 1563 + GFP_KERNEL); 1568 1564 if (!rx_q->dma_rx) 1569 1565 goto err_dma; 1570 1566 } ··· 1608 1612 goto err_dma; 1609 1613 1610 1614 if (priv->extend_desc) { 1611 - tx_q->dma_etx = dma_zalloc_coherent(priv->device, 1612 - DMA_TX_SIZE * 1613 - sizeof(struct 1614 - dma_extended_desc), 1615 - &tx_q->dma_tx_phy, 1616 - GFP_KERNEL); 1615 + tx_q->dma_etx = dma_alloc_coherent(priv->device, 1616 + DMA_TX_SIZE * sizeof(struct dma_extended_desc), 1617 + &tx_q->dma_tx_phy, 1618 + GFP_KERNEL); 1617 1619 if (!tx_q->dma_etx) 1618 1620 goto err_dma; 1619 1621 } else { 1620 - tx_q->dma_tx = dma_zalloc_coherent(priv->device, 1621 - DMA_TX_SIZE * 1622 - sizeof(struct 1623 - dma_desc), 1624 - &tx_q->dma_tx_phy, 1625 - GFP_KERNEL); 1622 + tx_q->dma_tx = dma_alloc_coherent(priv->device, 1623 + DMA_TX_SIZE * sizeof(struct dma_desc), 1624 + &tx_q->dma_tx_phy, 1625 + GFP_KERNEL); 1626 1626 if (!tx_q->dma_tx) 1627 1627 goto err_dma; 1628 1628 }
+4 -4
drivers/net/ethernet/tundra/tsi108_eth.c
··· 1311 1311 data->id, dev->irq, dev->name); 1312 1312 } 1313 1313 1314 - data->rxring = dma_zalloc_coherent(&data->pdev->dev, rxring_size, 1315 - &data->rxdma, GFP_KERNEL); 1314 + data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size, 1315 + &data->rxdma, GFP_KERNEL); 1316 1316 if (!data->rxring) 1317 1317 return -ENOMEM; 1318 1318 1319 - data->txring = dma_zalloc_coherent(&data->pdev->dev, txring_size, 1320 - &data->txdma, GFP_KERNEL); 1319 + data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size, 1320 + &data->txdma, GFP_KERNEL); 1321 1321 if (!data->txring) { 1322 1322 dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring, 1323 1323 data->rxdma);
+6 -6
drivers/net/ethernet/xilinx/ll_temac_main.c
··· 243 243 244 244 /* allocate the tx and rx ring buffer descriptors. */ 245 245 /* returns a virtual address and a physical address. */ 246 - lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 247 - sizeof(*lp->tx_bd_v) * TX_BD_NUM, 248 - &lp->tx_bd_p, GFP_KERNEL); 246 + lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 247 + sizeof(*lp->tx_bd_v) * TX_BD_NUM, 248 + &lp->tx_bd_p, GFP_KERNEL); 249 249 if (!lp->tx_bd_v) 250 250 goto out; 251 251 252 - lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 253 - sizeof(*lp->rx_bd_v) * RX_BD_NUM, 254 - &lp->rx_bd_p, GFP_KERNEL); 252 + lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 253 + sizeof(*lp->rx_bd_v) * RX_BD_NUM, 254 + &lp->rx_bd_p, GFP_KERNEL); 255 255 if (!lp->rx_bd_v) 256 256 goto out; 257 257
+6 -6
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 199 199 lp->rx_bd_ci = 0; 200 200 201 201 /* Allocate the Tx and Rx buffer descriptors. */ 202 - lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 203 - sizeof(*lp->tx_bd_v) * TX_BD_NUM, 204 - &lp->tx_bd_p, GFP_KERNEL); 202 + lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 203 + sizeof(*lp->tx_bd_v) * TX_BD_NUM, 204 + &lp->tx_bd_p, GFP_KERNEL); 205 205 if (!lp->tx_bd_v) 206 206 goto out; 207 207 208 - lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 209 - sizeof(*lp->rx_bd_v) * RX_BD_NUM, 210 - &lp->rx_bd_p, GFP_KERNEL); 208 + lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 209 + sizeof(*lp->rx_bd_v) * RX_BD_NUM, 210 + &lp->rx_bd_p, GFP_KERNEL); 211 211 if (!lp->rx_bd_v) 212 212 goto out; 213 213
+3 -3
drivers/net/fddi/defxx.c
··· 1139 1139 #endif 1140 1140 sizeof(PI_CONSUMER_BLOCK) + 1141 1141 (PI_ALIGN_K_DESC_BLK - 1); 1142 - bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size, 1143 - &bp->kmalloced_dma, 1144 - GFP_ATOMIC); 1142 + bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size, 1143 + &bp->kmalloced_dma, 1144 + GFP_ATOMIC); 1145 1145 if (top_v == NULL) 1146 1146 return DFX_K_FAILURE; 1147 1147
+4 -4
drivers/net/fddi/skfp/skfddi.c
··· 409 409 if (bp->SharedMemSize > 0) { 410 410 bp->SharedMemSize += 16; // for descriptor alignment 411 411 412 - bp->SharedMemAddr = dma_zalloc_coherent(&bp->pdev.dev, 413 - bp->SharedMemSize, 414 - &bp->SharedMemDMA, 415 - GFP_ATOMIC); 412 + bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev, 413 + bp->SharedMemSize, 414 + &bp->SharedMemDMA, 415 + GFP_ATOMIC); 416 416 if (!bp->SharedMemAddr) { 417 417 printk("could not allocate mem for "); 418 418 printk("hardware module: %ld byte\n",
+4 -4
drivers/net/vmxnet3/vmxnet3_drv.c
··· 535 535 } 536 536 537 537 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); 538 - tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, 539 - &tq->buf_info_pa, GFP_KERNEL); 538 + tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz, 539 + &tq->buf_info_pa, GFP_KERNEL); 540 540 if (!tq->buf_info) 541 541 goto err; 542 542 ··· 1815 1815 1816 1816 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1817 1817 rq->rx_ring[1].size); 1818 - bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, 1819 - GFP_KERNEL); 1818 + bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, 1819 + GFP_KERNEL); 1820 1820 if (!bi) 1821 1821 goto err; 1822 1822
+3 -4
drivers/net/wan/fsl_ucc_hdlc.c
··· 279 279 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); 280 280 281 281 /* Get BD buffer */ 282 - bd_buffer = dma_zalloc_coherent(priv->dev, 283 - (RX_BD_RING_LEN + TX_BD_RING_LEN) * 284 - MAX_RX_BUF_LENGTH, 285 - &bd_dma_addr, GFP_KERNEL); 282 + bd_buffer = dma_alloc_coherent(priv->dev, 283 + (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH, 284 + &bd_dma_addr, GFP_KERNEL); 286 285 287 286 if (!bd_buffer) { 288 287 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
+3 -4
drivers/net/wireless/ath/ath10k/ce.c
··· 1553 1553 * coherent DMA are unsupported 1554 1554 */ 1555 1555 dest_ring->base_addr_owner_space_unaligned = 1556 - dma_zalloc_coherent(ar->dev, 1557 - (nentries * sizeof(struct ce_desc) + 1558 - CE_DESC_RING_ALIGN), 1559 - &base_addr, GFP_KERNEL); 1556 + dma_alloc_coherent(ar->dev, 1557 + (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), 1558 + &base_addr, GFP_KERNEL); 1560 1559 if (!dest_ring->base_addr_owner_space_unaligned) { 1561 1560 kfree(dest_ring); 1562 1561 return ERR_PTR(-ENOMEM);
+4 -4
drivers/net/wireless/ath/ath10k/mac.c
··· 5169 5169 if (vif->type == NL80211_IFTYPE_ADHOC || 5170 5170 vif->type == NL80211_IFTYPE_MESH_POINT || 5171 5171 vif->type == NL80211_IFTYPE_AP) { 5172 - arvif->beacon_buf = dma_zalloc_coherent(ar->dev, 5173 - IEEE80211_MAX_FRAME_LEN, 5174 - &arvif->beacon_paddr, 5175 - GFP_ATOMIC); 5172 + arvif->beacon_buf = dma_alloc_coherent(ar->dev, 5173 + IEEE80211_MAX_FRAME_LEN, 5174 + &arvif->beacon_paddr, 5175 + GFP_ATOMIC); 5176 5176 if (!arvif->beacon_buf) { 5177 5177 ret = -ENOMEM; 5178 5178 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
+1 -2
drivers/net/wireless/ath/ath10k/pci.c
··· 936 936 */ 937 937 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); 938 938 939 - data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev, 940 - alloc_nbytes, 939 + data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, alloc_nbytes, 941 940 &ce_data_base, 942 941 GFP_ATOMIC); 943 942
+1 -1
drivers/net/wireless/ath/ath10k/wmi.c
··· 5193 5193 void *vaddr; 5194 5194 5195 5195 pool_size = num_units * round_up(unit_len, 4); 5196 - vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); 5196 + vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); 5197 5197 5198 5198 if (!vaddr) 5199 5199 return -ENOMEM;
+8 -9
drivers/net/wireless/ath/wcn36xx/dxe.c
··· 174 174 int i; 175 175 176 176 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc); 177 - wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size, 178 - &wcn_ch->dma_addr, 179 - GFP_KERNEL); 177 + wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr, 178 + GFP_KERNEL); 180 179 if (!wcn_ch->cpu_addr) 181 180 return -ENOMEM; 182 181 ··· 626 627 16 - (WCN36XX_BD_CHUNK_SIZE % 8); 627 628 628 629 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H; 629 - cpu_addr = dma_zalloc_coherent(wcn->dev, s, 630 - &wcn->mgmt_mem_pool.phy_addr, 631 - GFP_KERNEL); 630 + cpu_addr = dma_alloc_coherent(wcn->dev, s, 631 + &wcn->mgmt_mem_pool.phy_addr, 632 + GFP_KERNEL); 632 633 if (!cpu_addr) 633 634 goto out_err; 634 635 ··· 641 642 16 - (WCN36XX_BD_CHUNK_SIZE % 8); 642 643 643 644 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L; 644 - cpu_addr = dma_zalloc_coherent(wcn->dev, s, 645 - &wcn->data_mem_pool.phy_addr, 646 - GFP_KERNEL); 645 + cpu_addr = dma_alloc_coherent(wcn->dev, s, 646 + &wcn->data_mem_pool.phy_addr, 647 + GFP_KERNEL); 647 648 if (!cpu_addr) 648 649 goto out_err; 649 650
+4 -4
drivers/net/wireless/ath/wil6210/txrx_edma.c
··· 99 99 /* Status messages are allocated and initialized to 0. This is necessary 100 100 * since DR bit should be initialized to 0. 101 101 */ 102 - sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); 102 + sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); 103 103 if (!sring->va) 104 104 return -ENOMEM; 105 105 ··· 381 381 if (!ring->ctx) 382 382 goto err; 383 383 384 - ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL); 384 + ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL); 385 385 if (!ring->va) 386 386 goto err_free_ctx; 387 387 388 388 if (ring->is_rx) { 389 389 sz = sizeof(*ring->edma_rx_swtail.va); 390 390 ring->edma_rx_swtail.va = 391 - dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa, 392 - GFP_KERNEL); 391 + dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa, 392 + GFP_KERNEL); 393 393 if (!ring->edma_rx_swtail.va) 394 394 goto err_free_va; 395 395 }
+3 -3
drivers/net/wireless/broadcom/b43/dma.c
··· 431 431 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? 432 432 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; 433 433 434 - ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, 435 - ring_mem_size, &(ring->dmabase), 436 - GFP_KERNEL); 434 + ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 435 + ring_mem_size, &(ring->dmabase), 436 + GFP_KERNEL); 437 437 if (!ring->descbase) 438 438 return -ENOMEM; 439 439
+3 -3
drivers/net/wireless/broadcom/b43legacy/dma.c
··· 331 331 static int alloc_ringmemory(struct b43legacy_dmaring *ring) 332 332 { 333 333 /* GFP flags must match the flags in free_ringmemory()! */ 334 - ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, 335 - B43legacy_DMA_RINGMEMSIZE, 336 - &(ring->dmabase), GFP_KERNEL); 334 + ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 335 + B43legacy_DMA_RINGMEMSIZE, 336 + &(ring->dmabase), GFP_KERNEL); 337 337 if (!ring->descbase) 338 338 return -ENOMEM; 339 339
+8 -8
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
··· 1281 1281 u32 addr; 1282 1282 1283 1283 devinfo->shared.scratch = 1284 - dma_zalloc_coherent(&devinfo->pdev->dev, 1285 - BRCMF_DMA_D2H_SCRATCH_BUF_LEN, 1286 - &devinfo->shared.scratch_dmahandle, 1287 - GFP_KERNEL); 1284 + dma_alloc_coherent(&devinfo->pdev->dev, 1285 + BRCMF_DMA_D2H_SCRATCH_BUF_LEN, 1286 + &devinfo->shared.scratch_dmahandle, 1287 + GFP_KERNEL); 1288 1288 if (!devinfo->shared.scratch) 1289 1289 goto fail; 1290 1290 ··· 1298 1298 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); 1299 1299 1300 1300 devinfo->shared.ringupd = 1301 - dma_zalloc_coherent(&devinfo->pdev->dev, 1302 - BRCMF_DMA_D2H_RINGUPD_BUF_LEN, 1303 - &devinfo->shared.ringupd_dmahandle, 1304 - GFP_KERNEL); 1301 + dma_alloc_coherent(&devinfo->pdev->dev, 1302 + BRCMF_DMA_D2H_RINGUPD_BUF_LEN, 1303 + &devinfo->shared.ringupd_dmahandle, 1304 + GFP_KERNEL); 1305 1305 if (!devinfo->shared.ringupd) 1306 1306 goto fail; 1307 1307
+15 -24
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
··· 711 711 * Allocate the circular buffer of Read Buffer Descriptors 712 712 * (RBDs) 713 713 */ 714 - rxq->bd = dma_zalloc_coherent(dev, 715 - free_size * rxq->queue_size, 716 - &rxq->bd_dma, GFP_KERNEL); 714 + rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, 715 + &rxq->bd_dma, GFP_KERNEL); 717 716 if (!rxq->bd) 718 717 goto err; 719 718 720 719 if (trans->cfg->mq_rx_supported) { 721 - rxq->used_bd = dma_zalloc_coherent(dev, 722 - (use_rx_td ? 723 - sizeof(*rxq->cd) : 724 - sizeof(__le32)) * 725 - rxq->queue_size, 726 - &rxq->used_bd_dma, 727 - GFP_KERNEL); 720 + rxq->used_bd = dma_alloc_coherent(dev, 721 + (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, 722 + &rxq->used_bd_dma, 723 + GFP_KERNEL); 728 724 if (!rxq->used_bd) 729 725 goto err; 730 726 } 731 727 732 728 /* Allocate the driver's pointer to receive buffer status */ 733 - rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ? 734 - sizeof(__le16) : 735 - sizeof(struct iwl_rb_status), 736 - &rxq->rb_stts_dma, 737 - GFP_KERNEL); 729 + rxq->rb_stts = dma_alloc_coherent(dev, 730 + use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status), 731 + &rxq->rb_stts_dma, GFP_KERNEL); 738 732 if (!rxq->rb_stts) 739 733 goto err; 740 734 ··· 736 742 return 0; 737 743 738 744 /* Allocate the driver's pointer to TR tail */ 739 - rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16), 740 - &rxq->tr_tail_dma, 741 - GFP_KERNEL); 745 + rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16), 746 + &rxq->tr_tail_dma, GFP_KERNEL); 742 747 if (!rxq->tr_tail) 743 748 goto err; 744 749 745 750 /* Allocate the driver's pointer to CR tail */ 746 - rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16), 747 - &rxq->cr_tail_dma, 748 - GFP_KERNEL); 751 + rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16), 752 + &rxq->cr_tail_dma, GFP_KERNEL); 749 753 if (!rxq->cr_tail) 750 754 goto err; 751 755 /* ··· 1939 1947 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1940 1948 1941 1949 trans_pcie->ict_tbl = 1942 - dma_zalloc_coherent(trans->dev, ICT_SIZE, 1943 - &trans_pcie->ict_tbl_dma, 1944 - GFP_KERNEL); 1950 + dma_alloc_coherent(trans->dev, ICT_SIZE, 1951 + &trans_pcie->ict_tbl_dma, GFP_KERNEL); 1945 1952 if (!trans_pcie->ict_tbl) 1946 1953 return -ENOMEM; 1947 1954
+3 -3
drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
··· 119 119 /* 120 120 * Allocate DMA memory for descriptor and buffer. 121 121 */ 122 - addr = dma_zalloc_coherent(rt2x00dev->dev, 123 - queue->limit * queue->desc_size, &dma, 124 - GFP_KERNEL); 122 + addr = dma_alloc_coherent(rt2x00dev->dev, 123 + queue->limit * queue->desc_size, &dma, 124 + GFP_KERNEL); 125 125 if (!addr) 126 126 return -ENOMEM; 127 127
+4 -4
drivers/ntb/hw/mscc/ntb_hw_switchtec.c
··· 1339 1339 int rc; 1340 1340 1341 1341 sndev->nr_rsvd_luts++; 1342 - sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev, 1343 - LUT_SIZE, 1344 - &sndev->self_shared_dma, 1345 - GFP_KERNEL); 1342 + sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev, 1343 + LUT_SIZE, 1344 + &sndev->self_shared_dma, 1345 + GFP_KERNEL); 1346 1346 if (!sndev->self_shared) { 1347 1347 dev_err(&sndev->stdev->dev, 1348 1348 "unable to allocate memory for shared mw\n");
+4 -4
drivers/nvme/host/pci.c
··· 1485 1485 if (dev->ctrl.queue_count > qid) 1486 1486 return 0; 1487 1487 1488 - nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), 1489 - &nvmeq->cq_dma_addr, GFP_KERNEL); 1488 + nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth), 1489 + &nvmeq->cq_dma_addr, GFP_KERNEL); 1490 1490 if (!nvmeq->cqes) 1491 1491 goto free_nvmeq; 1492 1492 ··· 1915 1915 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 1916 1916 max_entries = dev->ctrl.hmmaxd; 1917 1917 1918 - descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs), 1919 - &descs_dma, GFP_KERNEL); 1918 + descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), 1919 + &descs_dma, GFP_KERNEL); 1920 1920 if (!descs) 1921 1921 goto out; 1922 1922
+3 -3
drivers/pci/controller/pcie-iproc-msi.c
··· 602 602 } 603 603 604 604 /* Reserve memory for event queue and make sure memories are zeroed */ 605 - msi->eq_cpu = dma_zalloc_coherent(pcie->dev, 606 - msi->nr_eq_region * EQ_MEM_REGION_SIZE, 607 - &msi->eq_dma, GFP_KERNEL); 605 + msi->eq_cpu = dma_alloc_coherent(pcie->dev, 606 + msi->nr_eq_region * EQ_MEM_REGION_SIZE, 607 + &msi->eq_dma, GFP_KERNEL); 608 608 if (!msi->eq_cpu) { 609 609 ret = -ENOMEM; 610 610 goto free_irqs;
+4 -4
drivers/pci/switch/switchtec.c
··· 1373 1373 if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0) 1374 1374 return 0; 1375 1375 1376 - stdev->dma_mrpc = dma_zalloc_coherent(&stdev->pdev->dev, 1377 - sizeof(*stdev->dma_mrpc), 1378 - &stdev->dma_mrpc_dma_addr, 1379 - GFP_KERNEL); 1376 + stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev, 1377 + sizeof(*stdev->dma_mrpc), 1378 + &stdev->dma_mrpc_dma_addr, 1379 + GFP_KERNEL); 1380 1380 if (stdev->dma_mrpc == NULL) 1381 1381 return -ENOMEM; 1382 1382
+11 -11
drivers/rapidio/devices/tsi721.c
··· 1382 1382 INIT_WORK(&priv->idb_work, tsi721_db_dpc); 1383 1383 1384 1384 /* Allocate buffer for inbound doorbells queue */ 1385 - priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, 1386 - IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 1387 - &priv->idb_dma, GFP_KERNEL); 1385 + priv->idb_base = dma_alloc_coherent(&priv->pdev->dev, 1386 + IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 1387 + &priv->idb_dma, GFP_KERNEL); 1388 1388 if (!priv->idb_base) 1389 1389 return -ENOMEM; 1390 1390 ··· 1447 1447 regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); 1448 1448 1449 1449 /* Allocate space for DMA descriptors */ 1450 - bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, 1451 - bd_num * sizeof(struct tsi721_dma_desc), 1452 - &bd_phys, GFP_KERNEL); 1450 + bd_ptr = dma_alloc_coherent(&priv->pdev->dev, 1451 + bd_num * sizeof(struct tsi721_dma_desc), 1452 + &bd_phys, GFP_KERNEL); 1453 1453 if (!bd_ptr) 1454 1454 return -ENOMEM; 1455 1455 ··· 1464 1464 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? 1465 1465 bd_num : TSI721_DMA_MINSTSSZ; 1466 1466 sts_size = roundup_pow_of_two(sts_size); 1467 - sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, 1467 + sts_ptr = dma_alloc_coherent(&priv->pdev->dev, 1468 1468 sts_size * sizeof(struct tsi721_dma_sts), 1469 1469 &sts_phys, GFP_KERNEL); 1470 1470 if (!sts_ptr) { ··· 1939 1939 1940 1940 /* Outbound message descriptor status FIFO allocation */ 1941 1941 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); 1942 - priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, 1943 - priv->omsg_ring[mbox].sts_size * 1944 - sizeof(struct tsi721_dma_sts), 1945 - &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); 1942 + priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev, 1943 + priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), 1944 + &priv->omsg_ring[mbox].sts_phys, 1945 + GFP_KERNEL); 1946 1946 if (priv->omsg_ring[mbox].sts_base == NULL) { 1947 1947 tsi_debug(OMSG, &priv->pdev->dev, 1948 1948 "ENOMEM for OB_MSG_%d status FIFO", mbox);
+4 -4
drivers/rapidio/devices/tsi721_dma.c
··· 90 90 * Allocate space for DMA descriptors 91 91 * (add an extra element for link descriptor) 92 92 */ 93 - bd_ptr = dma_zalloc_coherent(dev, 94 - (bd_num + 1) * sizeof(struct tsi721_dma_desc), 95 - &bd_phys, GFP_ATOMIC); 93 + bd_ptr = dma_alloc_coherent(dev, 94 + (bd_num + 1) * sizeof(struct tsi721_dma_desc), 95 + &bd_phys, GFP_ATOMIC); 96 96 if (!bd_ptr) 97 97 return -ENOMEM; 98 98 ··· 108 108 sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? 109 109 (bd_num + 1) : TSI721_DMA_MINSTSSZ; 110 110 sts_size = roundup_pow_of_two(sts_size); 111 - sts_ptr = dma_zalloc_coherent(dev, 111 + sts_ptr = dma_alloc_coherent(dev, 112 112 sts_size * sizeof(struct tsi721_dma_sts), 113 113 &sts_phys, GFP_ATOMIC); 114 114 if (!sts_ptr) {
+7 -8
drivers/s390/net/ism_drv.c
··· 89 89 dma_addr_t dma_handle; 90 90 struct ism_sba *sba; 91 91 92 - sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, 93 - &dma_handle, GFP_KERNEL); 92 + sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, 93 + GFP_KERNEL); 94 94 if (!sba) 95 95 return -ENOMEM; 96 96 ··· 116 116 dma_addr_t dma_handle; 117 117 struct ism_eq *ieq; 118 118 119 - ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, 120 - &dma_handle, GFP_KERNEL); 119 + ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, 120 + GFP_KERNEL); 121 121 if (!ieq) 122 122 return -ENOMEM; 123 123 ··· 234 234 test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) 235 235 return -EINVAL; 236 236 237 - dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len, 238 - &dmb->dma_addr, GFP_KERNEL | 239 - __GFP_NOWARN | __GFP_NOMEMALLOC | 240 - __GFP_COMP | __GFP_NORETRY); 237 + dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len, 238 + &dmb->dma_addr, 239 + GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY); 241 240 if (!dmb->cpu_addr) 242 241 clear_bit(dmb->sba_idx, ism->sba_bitmap); 243 242
+3 -2
drivers/scsi/3w-sas.c
··· 646 646 unsigned long *cpu_addr; 647 647 int retval = 1; 648 648 649 - cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev, 650 - size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL); 649 + cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, 650 + size * TW_Q_LENGTH, &dma_handle, 651 + GFP_KERNEL); 651 652 if (!cpu_addr) { 652 653 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); 653 654 goto out;
+4 -4
drivers/scsi/a100u2w.c
··· 1123 1123 1124 1124 /* Get total memory needed for SCB */ 1125 1125 sz = ORC_MAXQUEUE * sizeof(struct orc_scb); 1126 - host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys, 1127 - GFP_KERNEL); 1126 + host->scb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->scb_phys, 1127 + GFP_KERNEL); 1128 1128 if (!host->scb_virt) { 1129 1129 printk("inia100: SCB memory allocation error\n"); 1130 1130 goto out_host_put; ··· 1132 1132 1133 1133 /* Get total memory needed for ESCB */ 1134 1134 sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb); 1135 - host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys, 1136 - GFP_KERNEL); 1135 + host->escb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->escb_phys, 1136 + GFP_KERNEL); 1137 1137 if (!host->escb_virt) { 1138 1138 printk("inia100: ESCB memory allocation error\n"); 1139 1139 goto out_free_scb_array;
+12 -6
drivers/scsi/arcmsr/arcmsr_hba.c
··· 587 587 case ACB_ADAPTER_TYPE_B: { 588 588 struct MessageUnit_B *reg; 589 589 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32); 590 - dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 591 - &dma_coherent_handle, GFP_KERNEL); 590 + dma_coherent = dma_alloc_coherent(&pdev->dev, 591 + acb->roundup_ccbsize, 592 + &dma_coherent_handle, 593 + GFP_KERNEL); 592 594 if (!dma_coherent) { 593 595 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 594 596 return false; ··· 619 617 struct MessageUnit_D *reg; 620 618 621 619 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32); 622 - dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 623 - &dma_coherent_handle, GFP_KERNEL); 620 + dma_coherent = dma_alloc_coherent(&pdev->dev, 621 + acb->roundup_ccbsize, 622 + &dma_coherent_handle, 623 + GFP_KERNEL); 624 624 if (!dma_coherent) { 625 625 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 626 626 return false; ··· 663 659 uint32_t completeQ_size; 664 660 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128; 665 661 acb->roundup_ccbsize = roundup(completeQ_size, 32); 666 - dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 667 - &dma_coherent_handle, GFP_KERNEL); 662 + dma_coherent = dma_alloc_coherent(&pdev->dev, 663 + acb->roundup_ccbsize, 664 + &dma_coherent_handle, 665 + GFP_KERNEL); 668 666 if (!dma_coherent){ 669 667 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 670 668 return false;
+2 -2
drivers/scsi/be2iscsi/be_main.c
··· 3321 3321 q->len = len; 3322 3322 q->entry_size = entry_size; 3323 3323 mem->size = len * entry_size; 3324 - mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, 3325 - GFP_KERNEL); 3324 + mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, 3325 + GFP_KERNEL); 3326 3326 if (!mem->va) 3327 3327 return -ENOMEM; 3328 3328 return 0;
+5 -6
drivers/scsi/be2iscsi/be_mgmt.c
··· 293 293 struct be_dma_mem *cmd, 294 294 u8 subsystem, u8 opcode, u32 size) 295 295 { 296 - cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma, 297 - GFP_KERNEL); 296 + cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma, 297 + GFP_KERNEL); 298 298 if (!cmd->va) { 299 299 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 300 300 "BG_%d : Failed to allocate memory for if info\n"); ··· 1510 1510 return -EINVAL; 1511 1511 1512 1512 nonemb_cmd.size = sizeof(union be_invldt_cmds_params); 1513 - nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, 1514 - nonemb_cmd.size, 1515 - &nonemb_cmd.dma, 1516 - GFP_KERNEL); 1513 + nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, 1514 + nonemb_cmd.size, &nonemb_cmd.dma, 1515 + GFP_KERNEL); 1517 1516 if (!nonemb_cmd.va) { 1518 1517 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 1519 1518 "BM_%d : invldt_cmds_params alloc failed\n");
+3 -3
drivers/scsi/bfa/bfad_bsg.c
··· 3264 3264 /* Allocate dma coherent memory */ 3265 3265 buf_info = buf_base; 3266 3266 buf_info->size = payload_len; 3267 - buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev, 3268 - buf_info->size, &buf_info->phys, 3269 - GFP_KERNEL); 3267 + buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, 3268 + buf_info->size, &buf_info->phys, 3269 + GFP_KERNEL); 3270 3270 if (!buf_info->virt) 3271 3271 goto out_free_mem; 3272 3272
+24 -25
drivers/scsi/bnx2fc/bnx2fc_hwi.c
··· 1857 1857 * entries. Hence the limit with one page is 8192 task context 1858 1858 * entries. 1859 1859 */ 1860 - hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev, 1861 - PAGE_SIZE, 1862 - &hba->task_ctx_bd_dma, 1863 - GFP_KERNEL); 1860 + hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, 1861 + PAGE_SIZE, 1862 + &hba->task_ctx_bd_dma, 1863 + GFP_KERNEL); 1864 1864 if (!hba->task_ctx_bd_tbl) { 1865 1865 printk(KERN_ERR PFX "unable to allocate task context BDT\n"); 1866 1866 rc = -1; ··· 1894 1894 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; 1895 1895 for (i = 0; i < task_ctx_arr_sz; i++) { 1896 1896 1897 - hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev, 1898 - PAGE_SIZE, 1899 - &hba->task_ctx_dma[i], 1900 - GFP_KERNEL); 1897 + hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, 1898 + PAGE_SIZE, 1899 + &hba->task_ctx_dma[i], 1900 + GFP_KERNEL); 1901 1901 if (!hba->task_ctx[i]) { 1902 1902 printk(KERN_ERR PFX "unable to alloc task context\n"); 1903 1903 rc = -1; ··· 2031 2031 } 2032 2032 2033 2033 for (i = 0; i < segment_count; ++i) { 2034 - hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev, 2035 - BNX2FC_HASH_TBL_CHUNK_SIZE, 2036 - &dma_segment_array[i], 2037 - GFP_KERNEL); 2034 + hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev, 2035 + BNX2FC_HASH_TBL_CHUNK_SIZE, 2036 + &dma_segment_array[i], 2037 + GFP_KERNEL); 2038 2038 if (!hba->hash_tbl_segments[i]) { 2039 2039 printk(KERN_ERR PFX "hash segment alloc failed\n"); 2040 2040 goto cleanup_dma; 2041 2041 } 2042 2042 } 2043 2043 2044 - hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 2045 - &hba->hash_tbl_pbl_dma, 2046 - GFP_KERNEL); 2044 + hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 2045 + &hba->hash_tbl_pbl_dma, 2046 + GFP_KERNEL); 2047 2047 if (!hba->hash_tbl_pbl) { 2048 2048 printk(KERN_ERR PFX "hash table pbl alloc failed\n"); 2049 2049 goto cleanup_dma; ··· 2104 2104 return -ENOMEM; 2105 2105 2106 2106 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); 2107 - hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev, 2108 - mem_size, 2109 - &hba->t2_hash_tbl_ptr_dma, 2110 - GFP_KERNEL); 2107 + hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, 2108 + &hba->t2_hash_tbl_ptr_dma, 2109 + GFP_KERNEL); 2111 2110 if (!hba->t2_hash_tbl_ptr) { 2112 2111 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); 2113 2112 bnx2fc_free_fw_resc(hba); ··· 2115 2116 2116 2117 mem_size = BNX2FC_NUM_MAX_SESS * 2117 2118 sizeof(struct fcoe_t2_hash_table_entry); 2118 - hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size, 2119 - &hba->t2_hash_tbl_dma, 2120 - GFP_KERNEL); 2119 + hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, 2120 + &hba->t2_hash_tbl_dma, 2121 + GFP_KERNEL); 2121 2122 if (!hba->t2_hash_tbl) { 2122 2123 printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); 2123 2124 bnx2fc_free_fw_resc(hba); ··· 2139 2140 return -ENOMEM; 2140 2141 } 2141 2142 2142 - hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 2143 - &hba->stats_buf_dma, 2144 - GFP_KERNEL); 2143 + hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 2144 + &hba->stats_buf_dma, 2145 + GFP_KERNEL); 2145 2146 if (!hba->stats_buffer) { 2146 2147 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); 2147 2148 bnx2fc_free_fw_resc(hba);
+22 -22
drivers/scsi/bnx2fc/bnx2fc_tgt.c
··· 672 672 tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & 673 673 CNIC_PAGE_MASK; 674 674 675 - tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, 676 - &tgt->sq_dma, GFP_KERNEL); 675 + tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, 676 + &tgt->sq_dma, GFP_KERNEL); 677 677 if (!tgt->sq) { 678 678 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", 679 679 tgt->sq_mem_size); ··· 685 685 tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & 686 686 CNIC_PAGE_MASK; 687 687 688 - tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 689 - &tgt->cq_dma, GFP_KERNEL); 688 + tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 689 + &tgt->cq_dma, GFP_KERNEL); 690 690 if (!tgt->cq) { 691 691 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", 692 692 tgt->cq_mem_size); ··· 698 698 tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & 699 699 CNIC_PAGE_MASK; 700 700 701 - tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, 702 - &tgt->rq_dma, GFP_KERNEL); 701 + tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, 702 + &tgt->rq_dma, GFP_KERNEL); 703 703 if (!tgt->rq) { 704 704 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", 705 705 tgt->rq_mem_size); ··· 710 710 tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & 711 711 CNIC_PAGE_MASK; 712 712 713 - tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, 714 - &tgt->rq_pbl_dma, GFP_KERNEL); 713 + tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, 714 + &tgt->rq_pbl_dma, GFP_KERNEL); 715 715 if (!tgt->rq_pbl) { 716 716 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", 717 717 tgt->rq_pbl_size); ··· 735 735 tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & 736 736 CNIC_PAGE_MASK; 737 737 738 - tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev, 739 - tgt->xferq_mem_size, &tgt->xferq_dma, 740 - GFP_KERNEL); 738 + tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, 739 + tgt->xferq_mem_size, &tgt->xferq_dma, 740 + GFP_KERNEL); 741 741 if (!tgt->xferq) { 742 742 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", 743 743 tgt->xferq_mem_size); ··· 749 749 tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & 750 750 CNIC_PAGE_MASK; 751 751 752 - tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev, 753 - tgt->confq_mem_size, &tgt->confq_dma, 754 - GFP_KERNEL); 752 + tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, 753 + tgt->confq_mem_size, &tgt->confq_dma, 754 + GFP_KERNEL); 755 755 if (!tgt->confq) { 756 756 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", 757 757 tgt->confq_mem_size); ··· 763 763 tgt->confq_pbl_size = 764 764 (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; 765 765 766 - tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, 767 - tgt->confq_pbl_size, 768 - &tgt->confq_pbl_dma, GFP_KERNEL); 766 + tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, 767 + tgt->confq_pbl_size, 768 + &tgt->confq_pbl_dma, GFP_KERNEL); 769 769 if (!tgt->confq_pbl) { 770 770 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", 771 771 tgt->confq_pbl_size); ··· 787 787 /* Allocate and map ConnDB */ 788 788 tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); 789 789 790 - tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev, 791 - tgt->conn_db_mem_size, 792 - &tgt->conn_db_dma, GFP_KERNEL); 790 + tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev, 791 + tgt->conn_db_mem_size, 792 + &tgt->conn_db_dma, GFP_KERNEL); 793 793 if (!tgt->conn_db) { 794 794 printk(KERN_ERR PFX "unable to allocate conn_db %d\n", 795 795 tgt->conn_db_mem_size); ··· 802 802 tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & 803 803 CNIC_PAGE_MASK; 804 804 805 - tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, 806 - &tgt->lcq_dma, GFP_KERNEL); 805 + tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, 806 + &tgt->lcq_dma, GFP_KERNEL); 807 807 808 808 if (!tgt->lcq) { 809 809 printk(KERN_ERR PFX "unable to allocate lcq %d\n",
+4 -4
drivers/scsi/bnx2i/bnx2i_hwi.c
··· 1070 1070 1071 1071 /* Allocate memory area for actual SQ element */ 1072 1072 ep->qp.sq_virt = 1073 - dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, 1074 - &ep->qp.sq_phys, GFP_KERNEL); 1073 + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, 1074 + &ep->qp.sq_phys, GFP_KERNEL); 1075 1075 if (!ep->qp.sq_virt) { 1076 1076 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", 1077 1077 ep->qp.sq_mem_size); ··· 1106 1106 1107 1107 /* Allocate memory area for actual CQ element */ 1108 1108 ep->qp.cq_virt = 1109 - dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, 1110 - &ep->qp.cq_phys, GFP_KERNEL); 1109 + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, 1110 + &ep->qp.cq_phys, GFP_KERNEL); 1111 1111 if (!ep->qp.cq_virt) { 1112 1112 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", 1113 1113 ep->qp.cq_mem_size);
+2 -2
drivers/scsi/csiostor/csio_wr.c
··· 233 233 234 234 q = wrm->q_arr[free_idx]; 235 235 236 - q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart, 237 - GFP_KERNEL); 236 + q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart, 237 + GFP_KERNEL); 238 238 if (!q->vstart) { 239 239 csio_err(hw, 240 240 "Failed to allocate DMA memory for "
+2 -2
drivers/scsi/lpfc/lpfc_bsg.c
··· 2730 2730 INIT_LIST_HEAD(&dmabuf->list); 2731 2731 2732 2732 /* now, allocate dma buffer */ 2733 - dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2734 - &(dmabuf->phys), GFP_KERNEL); 2733 + dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2734 + &(dmabuf->phys), GFP_KERNEL); 2735 2735 2736 2736 if (!dmabuf->virt) { 2737 2737 kfree(dmabuf);
+7 -7
drivers/scsi/lpfc/lpfc_init.c
··· 6973 6973 if (!dmabuf) 6974 6974 return NULL; 6975 6975 6976 - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6977 - LPFC_HDR_TEMPLATE_SIZE, 6978 - &dmabuf->phys, GFP_KERNEL); 6976 + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6977 + LPFC_HDR_TEMPLATE_SIZE, 6978 + &dmabuf->phys, GFP_KERNEL); 6979 6979 if (!dmabuf->virt) { 6980 6980 rpi_hdr = NULL; 6981 6981 goto err_free_dmabuf; ··· 7397 7397 } 7398 7398 7399 7399 /* Allocate memory for SLI-2 structures */ 7400 - phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7401 - &phba->slim2p.phys, GFP_KERNEL); 7400 + phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7401 + &phba->slim2p.phys, GFP_KERNEL); 7402 7402 if (!phba->slim2p.virt) 7403 7403 goto out_iounmap; 7404 7404 ··· 7816 7816 * plus an alignment restriction of 16 bytes. 7817 7817 */ 7818 7818 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 7819 - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, 7820 - &dmabuf->phys, GFP_KERNEL); 7819 + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 7820 + &dmabuf->phys, GFP_KERNEL); 7821 7821 if (!dmabuf->virt) { 7822 7822 kfree(dmabuf); 7823 7823 return -ENOMEM;
+3 -3
drivers/scsi/lpfc/lpfc_mbox.c
··· 1827 1827 * page, this is used as a priori size of SLI4_PAGE_SIZE for 1828 1828 * the later DMA memory free. 1829 1829 */ 1830 - viraddr = dma_zalloc_coherent(&phba->pcidev->dev, 1831 - SLI4_PAGE_SIZE, &phyaddr, 1832 - GFP_KERNEL); 1830 + viraddr = dma_alloc_coherent(&phba->pcidev->dev, 1831 + SLI4_PAGE_SIZE, &phyaddr, 1832 + GFP_KERNEL); 1833 1833 /* In case of malloc fails, proceed with whatever we have */ 1834 1834 if (!viraddr) 1835 1835 break;
+7 -8
drivers/scsi/lpfc/lpfc_sli.c
··· 5362 5362 * mailbox command. 5363 5363 */ 5364 5364 dma_size = *vpd_size; 5365 - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size, 5366 - &dmabuf->phys, GFP_KERNEL); 5365 + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, 5366 + &dmabuf->phys, GFP_KERNEL); 5367 5367 if (!dmabuf->virt) { 5368 5368 kfree(dmabuf); 5369 5369 return -ENOMEM; ··· 6300 6300 goto free_mem; 6301 6301 } 6302 6302 6303 - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6303 + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6304 6304 LPFC_RAS_MAX_ENTRY_SIZE, 6305 - &dmabuf->phys, 6306 - GFP_KERNEL); 6305 + &dmabuf->phys, GFP_KERNEL); 6307 6306 if (!dmabuf->virt) { 6308 6307 kfree(dmabuf); 6309 6308 rc = -ENOMEM; ··· 14612 14613 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 14613 14614 if (!dmabuf) 14614 14615 goto out_fail; 14615 - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 14616 - hw_page_size, &dmabuf->phys, 14617 - GFP_KERNEL); 14616 + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14617 + hw_page_size, &dmabuf->phys, 14618 + GFP_KERNEL); 14618 14619 if (!dmabuf->virt) { 14619 14620 kfree(dmabuf); 14620 14621 goto out_fail;
+8 -7
drivers/scsi/megaraid/megaraid_mbox.c
··· 967 967 * Allocate the common 16-byte aligned memory for the handshake 968 968 * mailbox. 969 969 */ 970 - raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev, 971 - sizeof(mbox64_t), &raid_dev->una_mbox64_dma, 972 - GFP_KERNEL); 970 + raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev, 971 + sizeof(mbox64_t), 972 + &raid_dev->una_mbox64_dma, 973 + GFP_KERNEL); 973 974 974 975 if (!raid_dev->una_mbox64) { 975 976 con_log(CL_ANN, (KERN_WARNING ··· 996 995 align; 997 996 998 997 // Allocate memory for commands issued internally 999 - adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE, 1000 - &adapter->ibuf_dma_h, GFP_KERNEL); 998 + adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE, 999 + &adapter->ibuf_dma_h, GFP_KERNEL); 1001 1000 if (!adapter->ibuf) { 1002 1001 1003 1002 con_log(CL_ANN, (KERN_WARNING ··· 2898 2897 * Issue an ENQUIRY3 command to find out certain adapter parameters, 2899 2898 * e.g., max channels, max commands etc. 2900 2899 */ 2901 - pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), 2902 - &pinfo_dma_h, GFP_KERNEL); 2900 + pinfo = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), 2901 + &pinfo_dma_h, GFP_KERNEL); 2903 2902 if (pinfo == NULL) { 2904 2903 con_log(CL_ANN, (KERN_WARNING 2905 2904 "megaraid: out of memory, %s %d\n", __func__,
+13 -13
drivers/scsi/megaraid/megaraid_sas_base.c
··· 2273 2273 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2274 2274 else { 2275 2275 new_affiliation_111 = 2276 - dma_zalloc_coherent(&instance->pdev->dev, 2277 - sizeof(struct MR_LD_VF_AFFILIATION_111), 2278 - &new_affiliation_111_h, GFP_KERNEL); 2276 + dma_alloc_coherent(&instance->pdev->dev, 2277 + sizeof(struct MR_LD_VF_AFFILIATION_111), 2278 + &new_affiliation_111_h, GFP_KERNEL); 2279 2279 if (!new_affiliation_111) { 2280 2280 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2281 2281 "memory for new affiliation for scsi%d\n", ··· 2380 2380 sizeof(struct MR_LD_VF_AFFILIATION)); 2381 2381 else { 2382 2382 new_affiliation = 2383 - dma_zalloc_coherent(&instance->pdev->dev, 2384 - (MAX_LOGICAL_DRIVES + 1) * 2385 - sizeof(struct MR_LD_VF_AFFILIATION), 2386 - &new_affiliation_h, GFP_KERNEL); 2383 + dma_alloc_coherent(&instance->pdev->dev, 2384 + (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), 2385 + &new_affiliation_h, GFP_KERNEL); 2387 2386 if (!new_affiliation) { 2388 2387 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2389 2388 "memory for new affiliation for scsi%d\n", ··· 2545 2546 2546 2547 if (initial) { 2547 2548 instance->hb_host_mem = 2548 - dma_zalloc_coherent(&instance->pdev->dev, 2549 - sizeof(struct MR_CTRL_HB_HOST_MEM), 2550 - &instance->hb_host_mem_h, GFP_KERNEL); 2549 + dma_alloc_coherent(&instance->pdev->dev, 2550 + sizeof(struct MR_CTRL_HB_HOST_MEM), 2551 + &instance->hb_host_mem_h, 2552 + GFP_KERNEL); 2551 2553 if (!instance->hb_host_mem) { 2552 2554 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2553 2555 " memory for heartbeat host memory for scsi%d\n", ··· 5816 5816 } 5817 5817 5818 5818 dcmd = &cmd->frame->dcmd; 5819 - el_info = dma_zalloc_coherent(&instance->pdev->dev, 5820 - sizeof(struct megasas_evt_log_info), &el_info_h, 5821 - GFP_KERNEL); 5819 + el_info = dma_alloc_coherent(&instance->pdev->dev, 5820 + sizeof(struct megasas_evt_log_info), 5821 + &el_info_h, GFP_KERNEL); 5822 5822 if (!el_info) { 5823 5823 megasas_return_cmd(instance, cmd); 5824 5824 return -ENOMEM;
+3 -2
drivers/scsi/megaraid/megaraid_sas_fusion.c
··· 689 689 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * 690 690 MAX_MSIX_QUEUES_FUSION; 691 691 692 - fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev, 693 - array_size, &fusion->rdpq_phys, GFP_KERNEL); 692 + fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev, 693 + array_size, &fusion->rdpq_phys, 694 + GFP_KERNEL); 694 695 if (!fusion->rdpq_virt) { 695 696 dev_err(&instance->pdev->dev, 696 697 "Failed from %s %d\n", __func__, __LINE__);
+3 -2
drivers/scsi/mesh.c
··· 1915 1915 /* We use the PCI APIs for now until the generic one gets fixed 1916 1916 * enough or until we get some macio-specific versions 1917 1917 */ 1918 - dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev, 1919 - ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL); 1918 + dma_cmd_space = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev, 1919 + ms->dma_cmd_size, &dma_cmd_bus, 1920 + GFP_KERNEL); 1920 1921 if (dma_cmd_space == NULL) { 1921 1922 printk(KERN_ERR "mesh: can't allocate DMA table\n"); 1922 1923 goto out_unmap;
+5 -4
drivers/scsi/mvumi.c
··· 143 143 144 144 case RESOURCE_UNCACHED_MEMORY: 145 145 size = round_up(size, 8); 146 - res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, 147 - &res->bus_addr, GFP_KERNEL); 146 + res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, 147 + &res->bus_addr, 148 + GFP_KERNEL); 148 149 if (!res->virt_addr) { 149 150 dev_err(&mhba->pdev->dev, 150 151 "unable to allocate consistent mem," ··· 247 246 if (size == 0) 248 247 return 0; 249 248 250 - virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr, 251 - GFP_KERNEL); 249 + virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr, 250 + GFP_KERNEL); 252 251 if (!virt_addr) 253 252 return -1; 254 253
+2 -2
drivers/scsi/pm8001/pm8001_sas.c
··· 116 116 u64 align_offset = 0; 117 117 if (align) 118 118 align_offset = (dma_addr_t)align - 1; 119 - mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align, 120 - &mem_dma_handle, GFP_KERNEL); 119 + mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align, 120 + &mem_dma_handle, GFP_KERNEL); 121 121 if (!mem_virt_alloc) { 122 122 pm8001_printk("memory allocation error\n"); 123 123 return -1;
+17 -12
drivers/scsi/qedf/qedf_main.c
··· 1050 1050 sizeof(void *); 1051 1051 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; 1052 1052 1053 - fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev, 1054 - fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL); 1053 + fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, 1054 + &fcport->sq_dma, GFP_KERNEL); 1055 1055 if (!fcport->sq) { 1056 1056 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); 1057 1057 rval = 1; 1058 1058 goto out; 1059 1059 } 1060 1060 1061 - fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev, 1062 - fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL); 1061 + fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev, 1062 + fcport->sq_pbl_size, 1063 + &fcport->sq_pbl_dma, GFP_KERNEL); 1063 1064 if (!fcport->sq_pbl) { 1064 1065 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); 1065 1066 rval = 1; ··· 2681 2680 } 2682 2681 2683 2682 /* Allocate list of PBL pages */ 2684 - qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev, 2685 - QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL); 2683 + qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev, 2684 + QEDF_PAGE_SIZE, 2685 + &qedf->bdq_pbl_list_dma, 2686 + GFP_KERNEL); 2686 2687 if (!qedf->bdq_pbl_list) { 2687 2688 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); 2688 2689 return -ENOMEM; ··· 2773 2770 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); 2774 2771 2775 2772 qedf->global_queues[i]->cq = 2776 - dma_zalloc_coherent(&qedf->pdev->dev, 2777 - qedf->global_queues[i]->cq_mem_size, 2778 - &qedf->global_queues[i]->cq_dma, GFP_KERNEL); 2773 + dma_alloc_coherent(&qedf->pdev->dev, 2774 + qedf->global_queues[i]->cq_mem_size, 2775 + &qedf->global_queues[i]->cq_dma, 2776 + GFP_KERNEL); 2779 2777 2780 2778 if (!qedf->global_queues[i]->cq) { 2781 2779 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); ··· 2785 2781 } 2786 2782 2787 2783 qedf->global_queues[i]->cq_pbl = 2788 - dma_zalloc_coherent(&qedf->pdev->dev, 2789 - qedf->global_queues[i]->cq_pbl_size, 2790 - &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL); 2784 + dma_alloc_coherent(&qedf->pdev->dev, 2785 + qedf->global_queues[i]->cq_pbl_size, 2786 + &qedf->global_queues[i]->cq_pbl_dma, 2787 + GFP_KERNEL); 2791 2788 2792 2789 if (!qedf->global_queues[i]->cq_pbl) { 2793 2790 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
+19 -20
drivers/scsi/qedi/qedi_main.c
··· 1394 1394 { 1395 1395 struct qedi_nvm_iscsi_image nvm_image; 1396 1396 1397 - qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev, 1398 - sizeof(nvm_image), 1399 - &qedi->nvm_buf_dma, 1400 - GFP_KERNEL); 1397 + qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev, 1398 + sizeof(nvm_image), 1399 + &qedi->nvm_buf_dma, GFP_KERNEL); 1401 1400 if (!qedi->iscsi_image) { 1402 1401 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); 1403 1402 return -ENOMEM; ··· 1509 1510 } 1510 1511 1511 1512 /* Allocate list of PBL pages */ 1512 - qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, 1513 - QEDI_PAGE_SIZE, 1514 - &qedi->bdq_pbl_list_dma, 1515 - GFP_KERNEL); 1513 + qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev, 1514 + QEDI_PAGE_SIZE, 1515 + &qedi->bdq_pbl_list_dma, 1516 + GFP_KERNEL); 1516 1517 if (!qedi->bdq_pbl_list) { 1517 1518 QEDI_ERR(&qedi->dbg_ctx, 1518 1519 "Could not allocate list of PBL pages.\n"); ··· 1608 1609 (qedi->global_queues[i]->cq_pbl_size + 1609 1610 (QEDI_PAGE_SIZE - 1)); 1610 1611 1611 - qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev, 1612 - qedi->global_queues[i]->cq_mem_size, 1613 - &qedi->global_queues[i]->cq_dma, 1614 - GFP_KERNEL); 1612 + qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev, 1613 + qedi->global_queues[i]->cq_mem_size, 1614 + &qedi->global_queues[i]->cq_dma, 1615 + GFP_KERNEL); 1615 1616 1616 1617 if (!qedi->global_queues[i]->cq) { 1617 1618 QEDI_WARN(&qedi->dbg_ctx, ··· 1619 1620 status = -ENOMEM; 1620 1621 goto mem_alloc_failure; 1621 1622 } 1622 - qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, 1623 - qedi->global_queues[i]->cq_pbl_size, 1624 - &qedi->global_queues[i]->cq_pbl_dma, 1625 - GFP_KERNEL); 1623 + qedi->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedi->pdev->dev, 1624 + qedi->global_queues[i]->cq_pbl_size, 1625 + &qedi->global_queues[i]->cq_pbl_dma, 1626 + GFP_KERNEL); 1626 1627 1627 1628 if (!qedi->global_queues[i]->cq_pbl) { 1628 1629 QEDI_WARN(&qedi->dbg_ctx, ··· 1690 1691 ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); 1691 1692 ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE; 1692 1693 1693 - ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, 1694 - &ep->sq_dma, GFP_KERNEL); 1694 + ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, 1695 + &ep->sq_dma, GFP_KERNEL); 1695 1696 if (!ep->sq) { 1696 1697 QEDI_WARN(&qedi->dbg_ctx, 1697 1698 "Could not allocate send queue.\n"); 1698 1699 rval = -ENOMEM; 1699 1700 goto out; 1700 1701 } 1701 - ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, 1702 - &ep->sq_pbl_dma, GFP_KERNEL); 1702 + ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, 1703 + &ep->sq_pbl_dma, GFP_KERNEL); 1703 1704 if (!ep->sq_pbl) { 1704 1705 QEDI_WARN(&qedi->dbg_ctx, 1705 1706 "Could not allocate send queue PBL.\n");
+2 -2
drivers/scsi/qla2xxx/qla_attr.c
··· 2415 2415 if (qla2x00_chip_is_down(vha)) 2416 2416 goto done; 2417 2417 2418 - stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), 2419 - &stats_dma, GFP_KERNEL); 2418 + stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, 2419 + GFP_KERNEL); 2420 2420 if (!stats) { 2421 2421 ql_log(ql_log_warn, vha, 0x707d, 2422 2422 "Failed to allocate memory for stats.\n");
+2 -2
drivers/scsi/qla2xxx/qla_bsg.c
··· 2312 2312 if (!IS_FWI2_CAPABLE(ha)) 2313 2313 return -EPERM; 2314 2314 2315 - stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), 2316 - &stats_dma, GFP_KERNEL); 2315 + stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, 2316 + GFP_KERNEL); 2317 2317 if (!stats) { 2318 2318 ql_log(ql_log_warn, vha, 0x70e2, 2319 2319 "Failed to allocate memory for stats.\n");
+8 -6
drivers/scsi/qla2xxx/qla_gs.c
··· 4147 4147 return rval; 4148 4148 } 4149 4149 4150 - sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent( 4151 - &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), 4152 - &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); 4150 + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 4151 + sizeof(struct ct_sns_pkt), 4152 + &sp->u.iocb_cmd.u.ctarg.req_dma, 4153 + GFP_KERNEL); 4153 4154 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 4154 4155 if (!sp->u.iocb_cmd.u.ctarg.req) { 4155 4156 ql_log(ql_log_warn, vha, 0xffff, ··· 4166 4165 ((vha->hw->max_fibre_devices - 1) * 4167 4166 sizeof(struct ct_sns_gpn_ft_data)); 4168 4167 4169 - sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent( 4170 - &vha->hw->pdev->dev, rspsz, 4171 - &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); 4168 + sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 4169 + rspsz, 4170 + &sp->u.iocb_cmd.u.ctarg.rsp_dma, 4171 + GFP_KERNEL); 4172 4172 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 4173 4173 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 4174 4174 ql_log(ql_log_warn, vha, 0xffff,
+4 -4
drivers/scsi/qla2xxx/qla_init.c
··· 3099 3099 FCE_SIZE, ha->fce, ha->fce_dma); 3100 3100 3101 3101 /* Allocate memory for Fibre Channel Event Buffer. */ 3102 - tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 3103 - GFP_KERNEL); 3102 + tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 3103 + GFP_KERNEL); 3104 3104 if (!tc) { 3105 3105 ql_log(ql_log_warn, vha, 0x00be, 3106 3106 "Unable to allocate (%d KB) for FCE.\n", ··· 3131 3131 EFT_SIZE, ha->eft, ha->eft_dma); 3132 3132 3133 3133 /* Allocate memory for Extended Trace Buffer. */ 3134 - tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 3135 - GFP_KERNEL); 3134 + tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 3135 + GFP_KERNEL); 3136 3136 if (!tc) { 3137 3137 ql_log(ql_log_warn, vha, 0x00c1, 3138 3138 "Unable to allocate (%d KB) for EFT.\n",
+2 -2
drivers/scsi/qla4xxx/ql4_init.c
··· 153 153 dma_addr_t sys_info_dma; 154 154 int status = QLA_ERROR; 155 155 156 - sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 157 - &sys_info_dma, GFP_KERNEL); 156 + sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 157 + &sys_info_dma, GFP_KERNEL); 158 158 if (sys_info == NULL) { 159 159 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 160 160 ha->host_no, __func__));
+9 -9
drivers/scsi/qla4xxx/ql4_mbx.c
··· 625 625 uint32_t mbox_sts[MBOX_REG_COUNT]; 626 626 int status = QLA_ERROR; 627 627 628 - init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 629 - sizeof(struct addr_ctrl_blk), 630 - &init_fw_cb_dma, GFP_KERNEL); 628 + init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 629 + sizeof(struct addr_ctrl_blk), 630 + &init_fw_cb_dma, GFP_KERNEL); 631 631 if (init_fw_cb == NULL) { 632 632 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n", 633 633 ha->host_no, __func__)); ··· 709 709 uint32_t mbox_cmd[MBOX_REG_COUNT]; 710 710 uint32_t mbox_sts[MBOX_REG_COUNT]; 711 711 712 - init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 713 - sizeof(struct addr_ctrl_blk), 714 - &init_fw_cb_dma, GFP_KERNEL); 712 + init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 713 + sizeof(struct addr_ctrl_blk), 714 + &init_fw_cb_dma, GFP_KERNEL); 715 715 if (init_fw_cb == NULL) { 716 716 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, 717 717 __func__); ··· 1340 1340 uint32_t mbox_sts[MBOX_REG_COUNT]; 1341 1341 int status = QLA_ERROR; 1342 1342 1343 - about_fw = dma_zalloc_coherent(&ha->pdev->dev, 1344 - sizeof(struct about_fw_info), 1345 - &about_fw_dma, GFP_KERNEL); 1343 + about_fw = dma_alloc_coherent(&ha->pdev->dev, 1344 + sizeof(struct about_fw_info), 1345 + &about_fw_dma, GFP_KERNEL); 1346 1346 if (!about_fw) { 1347 1347 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory " 1348 1348 "for about_fw\n", __func__));
+2 -2
drivers/scsi/qla4xxx/ql4_nx.c
··· 4052 4052 dma_addr_t sys_info_dma; 4053 4053 int status = QLA_ERROR; 4054 4054 4055 - sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 4056 - &sys_info_dma, GFP_KERNEL); 4055 + sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 4056 + &sys_info_dma, GFP_KERNEL); 4057 4057 if (sys_info == NULL) { 4058 4058 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 4059 4059 ha->host_no, __func__));
+5 -5
drivers/scsi/qla4xxx/ql4_os.c
··· 2704 2704 uint32_t rem = len; 2705 2705 struct nlattr *attr; 2706 2706 2707 - init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 2708 - sizeof(struct addr_ctrl_blk), 2709 - &init_fw_cb_dma, GFP_KERNEL); 2707 + init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 2708 + sizeof(struct addr_ctrl_blk), 2709 + &init_fw_cb_dma, GFP_KERNEL); 2710 2710 if (!init_fw_cb) { 2711 2711 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", 2712 2712 __func__); ··· 4206 4206 sizeof(struct shadow_regs) + 4207 4207 MEM_ALIGN_VALUE + 4208 4208 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4209 - ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len, 4210 - &ha->queues_dma, GFP_KERNEL); 4209 + ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, 4210 + &ha->queues_dma, GFP_KERNEL); 4211 4211 if (ha->queues == NULL) { 4212 4212 ql4_printk(KERN_WARNING, ha, 4213 4213 "Memory Allocation failed - queues.\n");
+16 -16
drivers/scsi/smartpqi/smartpqi_init.c
··· 3576 3576 alloc_length += PQI_EXTRA_SGL_MEMORY; 3577 3577 3578 3578 ctrl_info->queue_memory_base = 3579 - dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3580 - alloc_length, 3581 - &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); 3579 + dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 3580 + &ctrl_info->queue_memory_base_dma_handle, 3581 + GFP_KERNEL); 3582 3582 3583 3583 if (!ctrl_info->queue_memory_base) 3584 3584 return -ENOMEM; ··· 3715 3715 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3716 3716 3717 3717 ctrl_info->admin_queue_memory_base = 3718 - dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3719 - alloc_length, 3720 - &ctrl_info->admin_queue_memory_base_dma_handle, 3721 - GFP_KERNEL); 3718 + dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 3719 + &ctrl_info->admin_queue_memory_base_dma_handle, 3720 + GFP_KERNEL); 3722 3721 3723 3722 if (!ctrl_info->admin_queue_memory_base) 3724 3723 return -ENOMEM; ··· 4601 4602 4602 4603 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 4603 4604 { 4604 - ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 4605 - ctrl_info->error_buffer_length, 4606 - &ctrl_info->error_buffer_dma_handle, GFP_KERNEL); 4605 + ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, 4606 + ctrl_info->error_buffer_length, 4607 + &ctrl_info->error_buffer_dma_handle, 4608 + GFP_KERNEL); 4607 4609 4608 4610 if (!ctrl_info->error_buffer) 4609 4611 return -ENOMEM; ··· 7487 7487 dma_addr_t dma_handle; 7488 7488 7489 7489 ctrl_info->pqi_ofa_chunk_virt_addr[i] = 7490 - dma_zalloc_coherent(dev, chunk_size, &dma_handle, 7491 - GFP_KERNEL); 7490 + dma_alloc_coherent(dev, chunk_size, &dma_handle, 7491 + GFP_KERNEL); 7492 7492 7493 7493 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) 7494 7494 break; ··· 7545 7545 struct device *dev; 7546 7546 7547 7547 dev = &ctrl_info->pci_dev->dev; 7548 - pqi_ofa_memory = dma_zalloc_coherent(dev, 7549 - PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, 7550 - &ctrl_info->pqi_ofa_mem_dma_handle, 7551 - GFP_KERNEL); 7548 + pqi_ofa_memory = dma_alloc_coherent(dev, 7549 + PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, 7550 + &ctrl_info->pqi_ofa_mem_dma_handle, 7551 + GFP_KERNEL); 7552 7552 7553 7553 if (!pqi_ofa_memory) 7554 7554 return;
+1 -1
drivers/soc/fsl/qbman/dpaa_sys.c
··· 62 62 return -ENODEV; 63 63 } 64 64 65 - if (!dma_zalloc_coherent(dev, *size, addr, 0)) { 65 + if (!dma_alloc_coherent(dev, *size, addr, 0)) { 66 66 dev_err(dev, "DMA Alloc memory failed\n"); 67 67 return -ENODEV; 68 68 }
+3 -3
drivers/spi/spi-pic32-sqi.c
··· 466 466 int i; 467 467 468 468 /* allocate coherent DMAable memory for hardware buffer descriptors. */ 469 - sqi->bd = dma_zalloc_coherent(&sqi->master->dev, 470 - sizeof(*bd) * PESQI_BD_COUNT, 471 - &sqi->bd_dma, GFP_KERNEL); 469 + sqi->bd = dma_alloc_coherent(&sqi->master->dev, 470 + sizeof(*bd) * PESQI_BD_COUNT, 471 + &sqi->bd_dma, GFP_KERNEL); 472 472 if (!sqi->bd) { 473 473 dev_err(&sqi->master->dev, "failed allocating dma buffer\n"); 474 474 return -ENOMEM;
+1 -2
drivers/staging/mt7621-eth/mtk_eth_soc.c
··· 1396 1396 if (!ring->tx_buf) 1397 1397 goto no_tx_mem; 1398 1398 1399 - ring->tx_dma = dma_zalloc_coherent(eth->dev, 1400 - ring->tx_ring_size * sz, 1399 + ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz, 1401 1400 &ring->tx_phys, 1402 1401 GFP_ATOMIC | __GFP_ZERO); 1403 1402 if (!ring->tx_dma)
+2 -4
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
··· 407 407 /* Allocate enough storage to hold the page pointers and the page 408 408 * list 409 409 */ 410 - pagelist = dma_zalloc_coherent(g_dev, 411 - pagelist_size, 412 - &dma_addr, 413 - GFP_KERNEL); 410 + pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr, 411 + GFP_KERNEL); 414 412 415 413 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist); 416 414
+6 -13
drivers/staging/vt6655/device_main.c
··· 440 440 void *vir_pool; 441 441 442 442 /*allocate all RD/TD rings a single pool*/ 443 - vir_pool = dma_zalloc_coherent(&priv->pcid->dev, 444 - priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + 445 - priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + 446 - priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + 447 - priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc), 448 - &priv->pool_dma, GFP_ATOMIC); 443 + vir_pool = dma_alloc_coherent(&priv->pcid->dev, 444 + priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc), 445 + &priv->pool_dma, GFP_ATOMIC); 449 446 if (!vir_pool) { 450 447 dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n"); 451 448 return false; ··· 456 459 priv->rd1_pool_dma = priv->rd0_pool_dma + 457 460 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc); 458 461 459 - priv->tx0_bufs = dma_zalloc_coherent(&priv->pcid->dev, 460 - priv->opts.tx_descs[0] * PKT_BUF_SZ + 461 - priv->opts.tx_descs[1] * PKT_BUF_SZ + 462 - CB_BEACON_BUF_SIZE + 463 - CB_MAX_BUF_SIZE, 464 - &priv->tx_bufs_dma0, 465 - GFP_ATOMIC); 462 + priv->tx0_bufs = dma_alloc_coherent(&priv->pcid->dev, 463 + priv->opts.tx_descs[0] * PKT_BUF_SZ + priv->opts.tx_descs[1] * PKT_BUF_SZ + CB_BEACON_BUF_SIZE + CB_MAX_BUF_SIZE, 464 + &priv->tx_bufs_dma0, GFP_ATOMIC); 466 465 if (!priv->tx0_bufs) { 467 466 dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n"); 468 467
+6 -7
drivers/usb/gadget/udc/bdc/bdc_core.c
··· 172 172 /* Refer to BDC spec, Table 4 for description of SPB */ 173 173 sp_buff_size = 1 << (sp_buff_size + 5); 174 174 dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size); 175 - bdc->scratchpad.buff = dma_zalloc_coherent(bdc->dev, sp_buff_size, 176 - &bdc->scratchpad.sp_dma, GFP_KERNEL); 175 + bdc->scratchpad.buff = dma_alloc_coherent(bdc->dev, sp_buff_size, 176 + &bdc->scratchpad.sp_dma, 177 + GFP_KERNEL); 177 178 178 179 if (!bdc->scratchpad.buff) 179 180 goto fail; ··· 203 202 bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST); 204 203 bdc->srr.dqp_index = 0; 205 204 /* allocate the status report descriptors */ 206 - bdc->srr.sr_bds = dma_zalloc_coherent( 207 - bdc->dev, 208 - NUM_SR_ENTRIES * sizeof(struct bdc_bd), 209 - &bdc->srr.dma_addr, 210 - GFP_KERNEL); 205 + bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev, 206 + NUM_SR_ENTRIES * sizeof(struct bdc_bd), 207 + &bdc->srr.dma_addr, GFP_KERNEL); 211 208 if (!bdc->srr.sr_bds) 212 209 return -ENOMEM; 213 210
+3 -3
drivers/usb/host/uhci-hcd.c
··· 596 596 &uhci_debug_operations); 597 597 #endif 598 598 599 - uhci->frame = dma_zalloc_coherent(uhci_dev(uhci), 600 - UHCI_NUMFRAMES * sizeof(*uhci->frame), 601 - &uhci->frame_dma_handle, GFP_KERNEL); 599 + uhci->frame = dma_alloc_coherent(uhci_dev(uhci), 600 + UHCI_NUMFRAMES * sizeof(*uhci->frame), 601 + &uhci->frame_dma_handle, GFP_KERNEL); 602 602 if (!uhci->frame) { 603 603 dev_err(uhci_dev(uhci), 604 604 "unable to allocate consistent memory for frame list\n");
+4 -4
drivers/usb/host/xhci-mem.c
··· 1672 1672 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1673 1673 for (i = 0; i < num_sp; i++) { 1674 1674 dma_addr_t dma; 1675 - void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma, 1676 - flags); 1675 + void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, 1676 + flags); 1677 1677 if (!buf) 1678 1678 goto fail_sp4; 1679 1679 ··· 1799 1799 struct xhci_erst_entry *entry; 1800 1800 1801 1801 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; 1802 - erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev, 1803 - size, &erst->erst_dma_addr, flags); 1802 + erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, 1803 + size, &erst->erst_dma_addr, flags); 1804 1804 if (!erst->entries) 1805 1805 return -ENOMEM; 1806 1806
+3 -3
drivers/video/fbdev/da8xx-fb.c
··· 1446 1446 da8xx_fb_fix.line_length - 1; 1447 1447 1448 1448 /* allocate palette buffer */ 1449 - par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE, 1450 - &par->p_palette_base, 1451 - GFP_KERNEL | GFP_DMA); 1449 + par->v_palette_base = dma_alloc_coherent(NULL, PALETTE_SIZE, 1450 + &par->p_palette_base, 1451 + GFP_KERNEL | GFP_DMA); 1452 1452 if (!par->v_palette_base) { 1453 1453 dev_err(&device->dev, 1454 1454 "GLCD: kmalloc for palette buffer failed\n");
+1 -1
include/linux/pci-dma-compat.h
··· 24 24 pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, 25 25 dma_addr_t *dma_handle) 26 26 { 27 - return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); 27 + return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); 28 28 } 29 29 30 30 static inline void
+2 -2
sound/aoa/soundbus/i2sbus/core.c
··· 47 47 /* We use the PCI APIs for now until the generic one gets fixed 48 48 * enough or until we get some macio-specific versions 49 49 */ 50 - r->space = dma_zalloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev, 51 - r->size, &r->bus_addr, GFP_KERNEL); 50 + r->space = dma_alloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev, 51 + r->size, &r->bus_addr, GFP_KERNEL); 52 52 if (!r->space) 53 53 return -ENOMEM; 54 54
+2 -2
sound/sparc/dbri.c
··· 2541 2541 dbri->op = op; 2542 2542 dbri->irq = irq; 2543 2543 2544 - dbri->dma = dma_zalloc_coherent(&op->dev, sizeof(struct dbri_dma), 2545 - &dbri->dma_dvma, GFP_KERNEL); 2544 + dbri->dma = dma_alloc_coherent(&op->dev, sizeof(struct dbri_dma), 2545 + &dbri->dma_dvma, GFP_KERNEL); 2546 2546 if (!dbri->dma) 2547 2547 return -ENOMEM; 2548 2548