IB/amso1100: Use dma_alloc_coherent() instead of kmalloc/dma_map_single

The Ammasso driver needs to use dma_alloc_coherent() for
allocating memory that will be used by the HW for dma.

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by Steve Wise and committed by Roland Dreier 8de94ce1 04d03bc5

+32 -49
+6 -7
drivers/infiniband/hw/amso1100/c2_alloc.c
··· 42 { 43 int i; 44 struct sp_chunk *new_head; 45 46 - new_head = (struct sp_chunk *) __get_free_page(gfp_mask); 47 if (new_head == NULL) 48 return -ENOMEM; 49 50 - new_head->dma_addr = dma_map_single(c2dev->ibdev.dma_device, new_head, 51 - PAGE_SIZE, DMA_FROM_DEVICE); 52 pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); 53 54 new_head->next = NULL; ··· 81 82 while (root) { 83 next = root->next; 84 - dma_unmap_single(c2dev->ibdev.dma_device, 85 - pci_unmap_addr(root, mapping), PAGE_SIZE, 86 - DMA_FROM_DEVICE); 87 - __free_page((struct page *) root); 88 root = next; 89 } 90 }
··· 42 { 43 int i; 44 struct sp_chunk *new_head; 45 + dma_addr_t dma_addr; 46 47 + new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE, 48 + &dma_addr, gfp_mask); 49 if (new_head == NULL) 50 return -ENOMEM; 51 52 + new_head->dma_addr = dma_addr; 53 pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); 54 55 new_head->next = NULL; ··· 80 81 while (root) { 82 next = root->next; 83 + dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root, 84 + pci_unmap_addr(root, mapping)); 85 root = next; 86 } 87 }
+6 -12
drivers/infiniband/hw/amso1100/c2_cq.c
··· 246 247 static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) 248 { 249 - 250 - dma_unmap_single(c2dev->ibdev.dma_device, pci_unmap_addr(mq, mapping), 251 - mq->q_size * mq->msg_size, DMA_FROM_DEVICE); 252 - free_pages((unsigned long) mq->msg_pool.host, 253 - get_order(mq->q_size * mq->msg_size)); 254 } 255 256 static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, 257 int msg_size) 258 { 259 - unsigned long pool_start; 260 261 - pool_start = __get_free_pages(GFP_KERNEL, 262 - get_order(q_size * msg_size)); 263 if (!pool_start) 264 return -ENOMEM; 265 ··· 264 0, /* index (currently unknown) */ 265 q_size, 266 msg_size, 267 - (u8 *) pool_start, 268 NULL, /* peer (currently unknown) */ 269 C2_MQ_HOST_TARGET); 270 271 - mq->host_dma = dma_map_single(c2dev->ibdev.dma_device, 272 - (void *)pool_start, 273 - q_size * msg_size, DMA_FROM_DEVICE); 274 pci_unmap_addr_set(mq, mapping, mq->host_dma); 275 276 return 0;
··· 246 247 static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) 248 { 249 + dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size, 250 + mq->msg_pool.host, pci_unmap_addr(mq, mapping)); 251 } 252 253 static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, 254 int msg_size) 255 { 256 + u8 *pool_start; 257 258 + pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size, 259 + &mq->host_dma, GFP_KERNEL); 260 if (!pool_start) 261 return -ENOMEM; 262 ··· 267 0, /* index (currently unknown) */ 268 q_size, 269 msg_size, 270 + pool_start, 271 NULL, /* peer (currently unknown) */ 272 C2_MQ_HOST_TARGET); 273 274 pci_unmap_addr_set(mq, mapping, mq->host_dma); 275 276 return 0;
+20 -30
drivers/infiniband/hw/amso1100/c2_rnic.c
··· 517 /* Initialize the Verbs Reply Queue */ 518 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); 519 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); 520 - q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL); 521 if (!q1_pages) { 522 err = -ENOMEM; 523 goto bail1; 524 } 525 - c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device, 526 - (void *)q1_pages, qsize * msgsize, 527 - DMA_FROM_DEVICE); 528 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); 529 pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, 530 (unsigned long long) c2dev->rep_vq.host_dma); ··· 538 /* Initialize the Asynchronus Event Queue */ 539 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); 540 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); 541 - q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL); 542 if (!q2_pages) { 543 err = -ENOMEM; 544 goto bail2; 545 } 546 - c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device, 547 - (void *)q2_pages, qsize * msgsize, 548 - DMA_FROM_DEVICE); 549 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); 550 pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages, 551 (unsigned long long) c2dev->rep_vq.host_dma); ··· 593 bail4: 594 vq_term(c2dev); 595 bail3: 596 - dma_unmap_single(c2dev->ibdev.dma_device, 597 - pci_unmap_addr(&c2dev->aeq, mapping), 598 - c2dev->aeq.q_size * c2dev->aeq.msg_size, 599 - DMA_FROM_DEVICE); 600 - kfree(q2_pages); 601 bail2: 602 - dma_unmap_single(c2dev->ibdev.dma_device, 603 - pci_unmap_addr(&c2dev->rep_vq, mapping), 604 - c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 605 - DMA_FROM_DEVICE); 606 - kfree(q1_pages); 607 bail1: 608 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 609 bail0: ··· 632 /* Free the verbs request allocator */ 633 vq_term(c2dev); 634 635 - /* Unmap and free the asynchronus event queue */ 636 - dma_unmap_single(c2dev->ibdev.dma_device, 637 - pci_unmap_addr(&c2dev->aeq, mapping), 638 - c2dev->aeq.q_size * c2dev->aeq.msg_size, 639 - DMA_FROM_DEVICE); 640 - kfree(c2dev->aeq.msg_pool.host); 641 642 - /* Unmap and free the verbs reply queue */ 643 - dma_unmap_single(c2dev->ibdev.dma_device, 644 - pci_unmap_addr(&c2dev->rep_vq, mapping), 645 - c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 646 - DMA_FROM_DEVICE); 647 - kfree(c2dev->rep_vq.msg_pool.host); 648 649 /* Free the MQ shared pointer pool */ 650 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
··· 517 /* Initialize the Verbs Reply Queue */ 518 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); 519 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); 520 + q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, 521 + &c2dev->rep_vq.host_dma, GFP_KERNEL); 522 if (!q1_pages) { 523 err = -ENOMEM; 524 goto bail1; 525 } 526 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); 527 pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, 528 (unsigned long long) c2dev->rep_vq.host_dma); ··· 540 /* Initialize the Asynchronus Event Queue */ 541 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); 542 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); 543 + q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, 544 + &c2dev->aeq.host_dma, GFP_KERNEL); 545 if (!q2_pages) { 546 err = -ENOMEM; 547 goto bail2; 548 } 549 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); 550 pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages, 551 (unsigned long long) c2dev->rep_vq.host_dma); ··· 597 bail4: 598 vq_term(c2dev); 599 bail3: 600 + dma_free_coherent(&c2dev->pcidev->dev, 601 + c2dev->aeq.q_size * c2dev->aeq.msg_size, 602 + q2_pages, pci_unmap_addr(&c2dev->aeq, mapping)); 603 bail2: 604 + dma_free_coherent(&c2dev->pcidev->dev, 605 + c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 606 + q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping)); 607 bail1: 608 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 609 bail0: ··· 640 /* Free the verbs request allocator */ 641 vq_term(c2dev); 642 643 + /* Free the asynchronus event queue */ 644 + dma_free_coherent(&c2dev->pcidev->dev, 645 + c2dev->aeq.q_size * c2dev->aeq.msg_size, 646 + c2dev->aeq.msg_pool.host, 647 + pci_unmap_addr(&c2dev->aeq, mapping)); 648 649 + /* Free the verbs reply queue */ 650 + dma_free_coherent(&c2dev->pcidev->dev, 651 + c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 652 + c2dev->rep_vq.msg_pool.host, 653 + pci_unmap_addr(&c2dev->rep_vq, mapping)); 654 655 /* Free the MQ shared pointer pool */ 656 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);