IB/amso1100: Use dma_alloc_coherent() instead of kmalloc/dma_map_single

The Ammasso driver needs to use dma_alloc_coherent() for
allocating memory that will be used by the HW for dma.

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by Steve Wise and committed by Roland Dreier 8de94ce1 04d03bc5

+32 -49
+6 -7
drivers/infiniband/hw/amso1100/c2_alloc.c
··· 42 42 { 43 43 int i; 44 44 struct sp_chunk *new_head; 45 + dma_addr_t dma_addr; 45 46 46 - new_head = (struct sp_chunk *) __get_free_page(gfp_mask); 47 + new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE, 48 + &dma_addr, gfp_mask); 47 49 if (new_head == NULL) 48 50 return -ENOMEM; 49 51 50 - new_head->dma_addr = dma_map_single(c2dev->ibdev.dma_device, new_head, 51 - PAGE_SIZE, DMA_FROM_DEVICE); 52 + new_head->dma_addr = dma_addr; 52 53 pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); 53 54 54 55 new_head->next = NULL; ··· 81 80 82 81 while (root) { 83 82 next = root->next; 84 - dma_unmap_single(c2dev->ibdev.dma_device, 85 - pci_unmap_addr(root, mapping), PAGE_SIZE, 86 - DMA_FROM_DEVICE); 87 - __free_page((struct page *) root); 83 + dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root, 84 + pci_unmap_addr(root, mapping)); 88 85 root = next; 89 86 } 90 87 }
+6 -12
drivers/infiniband/hw/amso1100/c2_cq.c
··· 246 246 247 247 static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) 248 248 { 249 - 250 - dma_unmap_single(c2dev->ibdev.dma_device, pci_unmap_addr(mq, mapping), 251 - mq->q_size * mq->msg_size, DMA_FROM_DEVICE); 252 - free_pages((unsigned long) mq->msg_pool.host, 253 - get_order(mq->q_size * mq->msg_size)); 249 + dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size, 250 + mq->msg_pool.host, pci_unmap_addr(mq, mapping)); 254 251 } 255 252 256 253 static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, 257 254 int msg_size) 258 255 { 259 - unsigned long pool_start; 256 + u8 *pool_start; 260 257 261 - pool_start = __get_free_pages(GFP_KERNEL, 262 - get_order(q_size * msg_size)); 258 + pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size, 259 + &mq->host_dma, GFP_KERNEL); 263 260 if (!pool_start) 264 261 return -ENOMEM; 265 262 ··· 264 267 0, /* index (currently unknown) */ 265 268 q_size, 266 269 msg_size, 267 - (u8 *) pool_start, 270 + pool_start, 268 271 NULL, /* peer (currently unknown) */ 269 272 C2_MQ_HOST_TARGET); 270 273 271 - mq->host_dma = dma_map_single(c2dev->ibdev.dma_device, 272 - (void *)pool_start, 273 - q_size * msg_size, DMA_FROM_DEVICE); 274 274 pci_unmap_addr_set(mq, mapping, mq->host_dma); 275 275 276 276 return 0;
+20 -30
drivers/infiniband/hw/amso1100/c2_rnic.c
··· 517 517 /* Initialize the Verbs Reply Queue */ 518 518 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); 519 519 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); 520 - q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL); 520 + q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, 521 + &c2dev->rep_vq.host_dma, GFP_KERNEL); 521 522 if (!q1_pages) { 522 523 err = -ENOMEM; 523 524 goto bail1; 524 525 } 525 - c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device, 526 - (void *)q1_pages, qsize * msgsize, 527 - DMA_FROM_DEVICE); 528 526 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); 529 527 pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, 530 528 (unsigned long long) c2dev->rep_vq.host_dma); ··· 538 540 /* Initialize the Asynchronus Event Queue */ 539 541 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); 540 542 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); 541 - q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL); 543 + q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, 544 + &c2dev->aeq.host_dma, GFP_KERNEL); 542 545 if (!q2_pages) { 543 546 err = -ENOMEM; 544 547 goto bail2; 545 548 } 546 - c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device, 547 - (void *)q2_pages, qsize * msgsize, 548 - DMA_FROM_DEVICE); 549 549 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); 550 550 pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages, 551 551 (unsigned long long) c2dev->rep_vq.host_dma); ··· 593 597 bail4: 594 598 vq_term(c2dev); 595 599 bail3: 596 - dma_unmap_single(c2dev->ibdev.dma_device, 597 - pci_unmap_addr(&c2dev->aeq, mapping), 598 - c2dev->aeq.q_size * c2dev->aeq.msg_size, 599 - DMA_FROM_DEVICE); 600 - kfree(q2_pages); 600 + dma_free_coherent(&c2dev->pcidev->dev, 601 + c2dev->aeq.q_size * c2dev->aeq.msg_size, 602 + q2_pages, pci_unmap_addr(&c2dev->aeq, mapping)); 601 603 bail2: 602 - dma_unmap_single(c2dev->ibdev.dma_device, 603 - pci_unmap_addr(&c2dev->rep_vq, mapping), 604 - c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 605 - DMA_FROM_DEVICE); 606 - kfree(q1_pages); 604 + dma_free_coherent(&c2dev->pcidev->dev, 605 + c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 606 + q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping)); 607 607 bail1: 608 608 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 609 609 bail0: ··· 632 640 /* Free the verbs request allocator */ 633 641 vq_term(c2dev); 634 642 635 - /* Unmap and free the asynchronus event queue */ 636 - dma_unmap_single(c2dev->ibdev.dma_device, 637 - pci_unmap_addr(&c2dev->aeq, mapping), 638 - c2dev->aeq.q_size * c2dev->aeq.msg_size, 639 - DMA_FROM_DEVICE); 640 - kfree(c2dev->aeq.msg_pool.host); 643 + /* Free the asynchronus event queue */ 644 + dma_free_coherent(&c2dev->pcidev->dev, 645 + c2dev->aeq.q_size * c2dev->aeq.msg_size, 646 + c2dev->aeq.msg_pool.host, 647 + pci_unmap_addr(&c2dev->aeq, mapping)); 641 648 642 - /* Unmap and free the verbs reply queue */ 643 - dma_unmap_single(c2dev->ibdev.dma_device, 644 - pci_unmap_addr(&c2dev->rep_vq, mapping), 645 - c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 646 - DMA_FROM_DEVICE); 647 - kfree(c2dev->rep_vq.msg_pool.host); 649 + /* Free the verbs reply queue */ 650 + dma_free_coherent(&c2dev->pcidev->dev, 651 + c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 652 + c2dev->rep_vq.msg_pool.host, 653 + pci_unmap_addr(&c2dev->rep_vq, mapping)); 648 654 649 655 /* Free the MQ shared pointer pool */ 650 656 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);