RDMA: Use vzalloc() to replace vmalloc()+memset(0)

Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by Joe Perches and committed by Roland Dreier 948579cd 4162cf64

+15 -34
+2 -3
drivers/infiniband/hw/amso1100/c2_rnic.c
··· 459 IB_DEVICE_MEM_WINDOW); 460 461 /* Allocate the qptr_array */ 462 - c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *)); 463 if (!c2dev->qptr_array) { 464 return -ENOMEM; 465 } 466 467 - /* Inialize the qptr_array */ 468 - memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *)); 469 c2dev->qptr_array[0] = (void *) &c2dev->req_vq; 470 c2dev->qptr_array[1] = (void *) &c2dev->rep_vq; 471 c2dev->qptr_array[2] = (void *) &c2dev->aeq;
··· 459 IB_DEVICE_MEM_WINDOW); 460 461 /* Allocate the qptr_array */ 462 + c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *)); 463 if (!c2dev->qptr_array) { 464 return -ENOMEM; 465 } 466 467 + /* Initialize the qptr_array */ 468 c2dev->qptr_array[0] = (void *) &c2dev->req_vq; 469 c2dev->qptr_array[1] = (void *) &c2dev->rep_vq; 470 c2dev->qptr_array[2] = (void *) &c2dev->aeq;
+2 -3
drivers/infiniband/hw/ehca/ipz_pt_fn.c
··· 222 queue->small_page = NULL; 223 224 /* allocate queue page pointers */ 225 - queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); 226 if (!queue->queue_pages) { 227 - queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *)); 228 if (!queue->queue_pages) { 229 ehca_gen_err("Couldn't allocate queue page list"); 230 return 0; 231 } 232 } 233 - memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *)); 234 235 /* allocate actual queue pages */ 236 if (is_small) {
··· 222 queue->small_page = NULL; 223 224 /* allocate queue page pointers */ 225 + queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); 226 if (!queue->queue_pages) { 227 + queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *)); 228 if (!queue->queue_pages) { 229 ehca_gen_err("Couldn't allocate queue page list"); 230 return 0; 231 } 232 } 233 234 /* allocate actual queue pages */ 235 if (is_small) {
+1 -2
drivers/infiniband/hw/ipath/ipath_driver.c
··· 199 goto bail; 200 } 201 202 - dd = vmalloc(sizeof(*dd)); 203 if (!dd) { 204 dd = ERR_PTR(-ENOMEM); 205 goto bail; 206 } 207 - memset(dd, 0, sizeof(*dd)); 208 dd->ipath_unit = -1; 209 210 spin_lock_irqsave(&ipath_devs_lock, flags);
··· 199 goto bail; 200 } 201 202 + dd = vzalloc(sizeof(*dd)); 203 if (!dd) { 204 dd = ERR_PTR(-ENOMEM); 205 goto bail; 206 } 207 dd->ipath_unit = -1; 208 209 spin_lock_irqsave(&ipath_devs_lock, flags);
+3 -8
drivers/infiniband/hw/ipath/ipath_file_ops.c
··· 1530 } 1531 1532 num_subports = uinfo->spu_subport_cnt; 1533 - pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports); 1534 if (!pd->subport_uregbase) { 1535 ret = -ENOMEM; 1536 goto bail; ··· 1538 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ 1539 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * 1540 sizeof(u32), PAGE_SIZE) * num_subports; 1541 - pd->subport_rcvhdr_base = vmalloc(size); 1542 if (!pd->subport_rcvhdr_base) { 1543 ret = -ENOMEM; 1544 goto bail_ureg; 1545 } 1546 1547 - pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks * 1548 pd->port_rcvegrbuf_size * 1549 num_subports); 1550 if (!pd->subport_rcvegrbuf) { ··· 1556 pd->port_subport_id = uinfo->spu_subport_id; 1557 pd->active_slaves = 1; 1558 set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); 1559 - memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports); 1560 - memset(pd->subport_rcvhdr_base, 0, size); 1561 - memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks * 1562 - pd->port_rcvegrbuf_size * 1563 - num_subports); 1564 goto bail; 1565 1566 bail_rhdr:
··· 1530 } 1531 1532 num_subports = uinfo->spu_subport_cnt; 1533 + pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports); 1534 if (!pd->subport_uregbase) { 1535 ret = -ENOMEM; 1536 goto bail; ··· 1538 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ 1539 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * 1540 sizeof(u32), PAGE_SIZE) * num_subports; 1541 + pd->subport_rcvhdr_base = vzalloc(size); 1542 if (!pd->subport_rcvhdr_base) { 1543 ret = -ENOMEM; 1544 goto bail_ureg; 1545 } 1546 1547 + pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks * 1548 pd->port_rcvegrbuf_size * 1549 num_subports); 1550 if (!pd->subport_rcvegrbuf) { ··· 1556 pd->port_subport_id = uinfo->spu_subport_id; 1557 pd->active_slaves = 1; 1558 set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); 1559 goto bail; 1560 1561 bail_rhdr:
+1 -4
drivers/infiniband/hw/ipath/ipath_init_chip.c
··· 442 struct page **pages; 443 dma_addr_t *addrs; 444 445 - pages = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * 446 sizeof(struct page *)); 447 if (!pages) { 448 ipath_dev_err(dd, "failed to allocate shadow page * " ··· 460 dd->ipath_pageshadow = NULL; 461 return; 462 } 463 - 464 - memset(pages, 0, dd->ipath_cfgports * dd->ipath_rcvtidcnt * 465 - sizeof(struct page *)); 466 467 dd->ipath_pageshadow = pages; 468 dd->ipath_physshadow = addrs;
··· 442 struct page **pages; 443 dma_addr_t *addrs; 444 445 + pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * 446 sizeof(struct page *)); 447 if (!pages) { 448 ipath_dev_err(dd, "failed to allocate shadow page * " ··· 460 dd->ipath_pageshadow = NULL; 461 return; 462 } 463 464 dd->ipath_pageshadow = pages; 465 dd->ipath_physshadow = addrs;
+2 -5
drivers/infiniband/hw/qib/qib_init.c
··· 270 struct page **pages; 271 dma_addr_t *addrs; 272 273 - pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); 274 if (!pages) { 275 qib_dev_err(dd, "failed to allocate shadow page * " 276 "array, no expected sends!\n"); 277 goto bail; 278 } 279 280 - addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); 281 if (!addrs) { 282 qib_dev_err(dd, "failed to allocate shadow dma handle " 283 "array, no expected sends!\n"); 284 goto bail_free; 285 } 286 - 287 - memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); 288 - memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); 289 290 dd->pageshadow = pages; 291 dd->physshadow = addrs;
··· 270 struct page **pages; 271 dma_addr_t *addrs; 272 273 + pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); 274 if (!pages) { 275 qib_dev_err(dd, "failed to allocate shadow page * " 276 "array, no expected sends!\n"); 277 goto bail; 278 } 279 280 + addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); 281 if (!addrs) { 282 qib_dev_err(dd, "failed to allocate shadow dma handle " 283 "array, no expected sends!\n"); 284 goto bail_free; 285 } 286 287 dd->pageshadow = pages; 288 dd->physshadow = addrs;
+3 -7
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 352 int ret; 353 int i; 354 355 - rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring); 356 if (!rx->rx_ring) { 357 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n", 358 priv->ca->name, ipoib_recvq_size); 359 return -ENOMEM; 360 } 361 - 362 - memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring); 363 364 t = kmalloc(sizeof *t, GFP_KERNEL); 365 if (!t) { ··· 1095 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 1096 int ret; 1097 1098 - p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring); 1099 if (!p->tx_ring) { 1100 ipoib_warn(priv, "failed to allocate tx ring\n"); 1101 ret = -ENOMEM; 1102 goto err_tx; 1103 } 1104 - memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring); 1105 1106 p->qp = ipoib_cm_create_tx_qp(p->dev, p); 1107 if (IS_ERR(p->qp)) { ··· 1518 return; 1519 } 1520 1521 - priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring); 1522 if (!priv->cm.srq_ring) { 1523 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", 1524 priv->ca->name, ipoib_recvq_size); ··· 1527 return; 1528 } 1529 1530 - memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring); 1531 } 1532 1533 int ipoib_cm_dev_init(struct net_device *dev)
··· 352 int ret; 353 int i; 354 355 + rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring); 356 if (!rx->rx_ring) { 357 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n", 358 priv->ca->name, ipoib_recvq_size); 359 return -ENOMEM; 360 } 361 362 t = kmalloc(sizeof *t, GFP_KERNEL); 363 if (!t) { ··· 1097 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 1098 int ret; 1099 1100 + p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring); 1101 if (!p->tx_ring) { 1102 ipoib_warn(priv, "failed to allocate tx ring\n"); 1103 ret = -ENOMEM; 1104 goto err_tx; 1105 } 1106 1107 p->qp = ipoib_cm_create_tx_qp(p->dev, p); 1108 if (IS_ERR(p->qp)) { ··· 1521 return; 1522 } 1523 1524 + priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring); 1525 if (!priv->cm.srq_ring) { 1526 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", 1527 priv->ca->name, ipoib_recvq_size); ··· 1530 return; 1531 } 1532 1533 } 1534 1535 int ipoib_cm_dev_init(struct net_device *dev)
+1 -2
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 916 goto out; 917 } 918 919 - priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 920 if (!priv->tx_ring) { 921 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 922 ca->name, ipoib_sendq_size); 923 goto out_rx_ring_cleanup; 924 } 925 - memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring); 926 927 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 928
··· 916 goto out; 917 } 918 919 + priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 920 if (!priv->tx_ring) { 921 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 922 ca->name, ipoib_sendq_size); 923 goto out_rx_ring_cleanup; 924 } 925 926 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 927