IB/srp: Use new verbs IB DMA mapping functions

Convert SRP to use the new verbs DMA mapping functions for kernel
verbs consumers.

Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by Ralph Campbell and committed by Roland Dreier 85507bcc 37ccf9df

+49 -34
+48 -33
drivers/infiniband/ulp/srp/ib_srp.c
··· 122 122 if (!iu->buf) 123 123 goto out_free_iu; 124 124 125 - iu->dma = dma_map_single(host->dev->dev->dma_device, 126 - iu->buf, size, direction); 127 - if (dma_mapping_error(iu->dma)) 125 + iu->dma = ib_dma_map_single(host->dev->dev, iu->buf, size, direction); 126 + if (ib_dma_mapping_error(host->dev->dev, iu->dma)) 128 127 goto out_free_buf; 129 128 130 129 iu->size = size; ··· 144 145 if (!iu) 145 146 return; 146 147 147 - dma_unmap_single(host->dev->dev->dma_device, 148 - iu->dma, iu->size, iu->direction); 148 + ib_dma_unmap_single(host->dev->dev, iu->dma, iu->size, iu->direction); 149 149 kfree(iu->buf); 150 150 kfree(iu); 151 151 } ··· 480 482 scat = &req->fake_sg; 481 483 } 482 484 483 - dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents, 484 - scmnd->sc_data_direction); 485 + ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents, 486 + scmnd->sc_data_direction); 485 487 } 486 488 487 489 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) ··· 593 595 int i, j; 594 596 int ret; 595 597 struct srp_device *dev = target->srp_host->dev; 598 + struct ib_device *ibdev = dev->dev; 596 599 597 600 if (!dev->fmr_pool) 598 601 return -ENODEV; 599 602 600 - if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) && 603 + if ((ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask) && 601 604 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) 602 605 return -EINVAL; 603 606 604 607 len = page_cnt = 0; 605 608 for (i = 0; i < sg_cnt; ++i) { 606 - if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { 609 + unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); 610 + 611 + if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) { 607 612 if (i > 0) 608 613 return -EINVAL; 609 614 else 610 615 ++page_cnt; 611 616 } 612 - if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) & 617 + if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) & 613 618 ~dev->fmr_page_mask) { 614 619 if (i < sg_cnt - 1) 615 620 return -EINVAL; ··· 620 619 ++page_cnt; 621 620 } 622 621 623 - len += sg_dma_len(&scat[i]); 622 + len += dma_len; 624 623 } 625 624 626 625 page_cnt += len >> dev->fmr_page_shift; ··· 632 631 return -ENOMEM; 633 632 634 633 page_cnt = 0; 635 - for (i = 0; i < sg_cnt; ++i) 636 - for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size) 634 + for (i = 0; i < sg_cnt; ++i) { 635 + unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); 636 + 637 + for (j = 0; j < dma_len; j += dev->fmr_page_size) 637 638 dma_pages[page_cnt++] = 638 - (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j; 639 + (ib_sg_dma_address(ibdev, &scat[i]) & 640 + dev->fmr_page_mask) + j; 641 + } 639 642 640 643 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 641 644 dma_pages, page_cnt, io_addr); ··· 649 644 goto out; 650 645 } 651 646 652 - buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask); 647 + buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) & 648 + ~dev->fmr_page_mask); 653 649 buf->key = cpu_to_be32(req->fmr->fmr->rkey); 654 650 buf->len = cpu_to_be32(len); 655 651 ··· 669 663 struct srp_cmd *cmd = req->cmd->buf; 670 664 int len, nents, count; 671 665 u8 fmt = SRP_DATA_DESC_DIRECT; 666 + struct srp_device *dev; 667 + struct ib_device *ibdev; 672 668 673 669 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) 674 670 return sizeof (struct srp_cmd); ··· 695 687 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); 696 688 } 697 689 698 - count = dma_map_sg(target->srp_host->dev->dev->dma_device, 699 - scat, nents, scmnd->sc_data_direction); 690 + dev = target->srp_host->dev; 691 + ibdev = dev->dev; 692 + 693 + count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 700 694 701 695 fmt = SRP_DATA_DESC_DIRECT; 702 696 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); ··· 712 702 */ 713 703 struct srp_direct_buf *buf = (void *) cmd->add_data; 714 704 715 - buf->va = cpu_to_be64(sg_dma_address(scat)); 716 - buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); 717 - buf->len = cpu_to_be32(sg_dma_len(scat)); 705 + buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 706 + buf->key = cpu_to_be32(dev->mr->rkey); 707 + buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 718 708 } else if (srp_map_fmr(target, scat, count, req, 719 709 (void *) cmd->add_data)) { 720 710 /* ··· 732 722 count * sizeof (struct srp_direct_buf); 733 723 734 724 for (i = 0; i < count; ++i) { 725 + unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); 726 + 735 727 buf->desc_list[i].va = 736 - cpu_to_be64(sg_dma_address(&scat[i])); 728 + cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i])); 737 729 buf->desc_list[i].key = 738 - cpu_to_be32(target->srp_host->dev->mr->rkey); 739 - buf->desc_list[i].len = 740 - cpu_to_be32(sg_dma_len(&scat[i])); 741 - datalen += sg_dma_len(&scat[i]); 730 + cpu_to_be32(dev->mr->rkey); 731 + buf->desc_list[i].len = cpu_to_be32(dma_len); 732 + datalen += dma_len; 742 733 } 743 734 744 735 if (scmnd->sc_data_direction == DMA_TO_DEVICE) ··· 819 808 820 809 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 821 810 { 811 + struct ib_device *dev; 822 812 struct srp_iu *iu; 823 813 u8 opcode; 824 814 825 815 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 826 816 827 - dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, 828 - target->max_ti_iu_len, DMA_FROM_DEVICE); 817 + dev = target->srp_host->dev->dev; 818 + ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 819 + DMA_FROM_DEVICE); 829 820 830 821 opcode = *(u8 *) iu->buf; 831 822 ··· 863 850 break; 864 851 } 865 852 866 - dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, 867 - target->max_ti_iu_len, DMA_FROM_DEVICE); 853 + ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, 854 + DMA_FROM_DEVICE); 868 855 } 869 856 870 857 static void srp_completion(struct ib_cq *cq, void *target_ptr) ··· 982 969 struct srp_request *req; 983 970 struct srp_iu *iu; 984 971 struct srp_cmd *cmd; 972 + struct ib_device *dev; 985 973 int len; 986 974 987 975 if (target->state == SRP_TARGET_CONNECTING) ··· 999 985 if (!iu) 1000 986 goto err; 1001 987 1002 - dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, 1003 - srp_max_iu_len, DMA_TO_DEVICE); 988 + dev = target->srp_host->dev->dev; 989 + ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, 990 + DMA_TO_DEVICE); 1004 991 1005 992 req = list_entry(target->free_reqs.next, struct srp_request, list); 1006 993 ··· 1033 1018 goto err_unmap; 1034 1019 } 1035 1020 1036 - dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, 1037 - srp_max_iu_len, DMA_TO_DEVICE); 1021 + ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, 1022 + DMA_TO_DEVICE); 1038 1023 1039 1024 if (__srp_post_send(target, iu, len)) { 1040 1025 printk(KERN_ERR PFX "Send failed\n");
+1 -1
drivers/infiniband/ulp/srp/ib_srp.h
··· 161 161 }; 162 162 163 163 struct srp_iu { 164 - dma_addr_t dma; 164 + u64 dma; 165 165 void *buf; 166 166 size_t size; 167 167 enum dma_data_direction direction;