IB/srp: Use new verbs IB DMA mapping functions

Convert SRP to use the new verbs DMA mapping functions for kernel
verbs consumers.

Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by Ralph Campbell and committed by Roland Dreier 85507bcc 37ccf9df

+49 -34
+48 -33
drivers/infiniband/ulp/srp/ib_srp.c
··· 122 if (!iu->buf) 123 goto out_free_iu; 124 125 - iu->dma = dma_map_single(host->dev->dev->dma_device, 126 - iu->buf, size, direction); 127 - if (dma_mapping_error(iu->dma)) 128 goto out_free_buf; 129 130 iu->size = size; ··· 144 if (!iu) 145 return; 146 147 - dma_unmap_single(host->dev->dev->dma_device, 148 - iu->dma, iu->size, iu->direction); 149 kfree(iu->buf); 150 kfree(iu); 151 } ··· 480 scat = &req->fake_sg; 481 } 482 483 - dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents, 484 - scmnd->sc_data_direction); 485 } 486 487 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) ··· 593 int i, j; 594 int ret; 595 struct srp_device *dev = target->srp_host->dev; 596 597 if (!dev->fmr_pool) 598 return -ENODEV; 599 600 - if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) && 601 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) 602 return -EINVAL; 603 604 len = page_cnt = 0; 605 for (i = 0; i < sg_cnt; ++i) { 606 - if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { 607 if (i > 0) 608 return -EINVAL; 609 else 610 ++page_cnt; 611 } 612 - if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) & 613 ~dev->fmr_page_mask) { 614 if (i < sg_cnt - 1) 615 return -EINVAL; ··· 620 ++page_cnt; 621 } 622 623 - len += sg_dma_len(&scat[i]); 624 } 625 626 page_cnt += len >> dev->fmr_page_shift; ··· 632 return -ENOMEM; 633 634 page_cnt = 0; 635 - for (i = 0; i < sg_cnt; ++i) 636 - for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size) 637 dma_pages[page_cnt++] = 638 - (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j; 639 640 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 641 dma_pages, page_cnt, io_addr); ··· 649 goto out; 650 } 651 652 - buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask); 653 buf->key = cpu_to_be32(req->fmr->fmr->rkey); 654 buf->len = cpu_to_be32(len); 655 ··· 669 struct srp_cmd *cmd = req->cmd->buf; 670 int len, nents, count; 671 u8 fmt = SRP_DATA_DESC_DIRECT; 672 673 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) 674 return sizeof (struct srp_cmd); ··· 695 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); 696 } 697 698 - count = dma_map_sg(target->srp_host->dev->dev->dma_device, 699 - scat, nents, scmnd->sc_data_direction); 700 701 fmt = SRP_DATA_DESC_DIRECT; 702 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); ··· 712 */ 713 struct srp_direct_buf *buf = (void *) cmd->add_data; 714 715 - buf->va = cpu_to_be64(sg_dma_address(scat)); 716 - buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); 717 - buf->len = cpu_to_be32(sg_dma_len(scat)); 718 } else if (srp_map_fmr(target, scat, count, req, 719 (void *) cmd->add_data)) { 720 /* ··· 732 count * sizeof (struct srp_direct_buf); 733 734 for (i = 0; i < count; ++i) { 735 buf->desc_list[i].va = 736 - cpu_to_be64(sg_dma_address(&scat[i])); 737 buf->desc_list[i].key = 738 - cpu_to_be32(target->srp_host->dev->mr->rkey); 739 - buf->desc_list[i].len = 740 - cpu_to_be32(sg_dma_len(&scat[i])); 741 - datalen += sg_dma_len(&scat[i]); 742 } 743 744 if (scmnd->sc_data_direction == DMA_TO_DEVICE) ··· 819 820 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 821 { 822 struct srp_iu *iu; 823 u8 opcode; 824 825 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 826 827 - dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, 828 - target->max_ti_iu_len, DMA_FROM_DEVICE); 829 830 opcode = *(u8 *) iu->buf; 831 ··· 863 break; 864 } 865 866 - dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, 867 - target->max_ti_iu_len, DMA_FROM_DEVICE); 868 } 869 870 static void srp_completion(struct ib_cq *cq, void *target_ptr) ··· 982 struct srp_request *req; 983 struct srp_iu *iu; 984 struct srp_cmd *cmd; 985 int len; 986 987 if (target->state == SRP_TARGET_CONNECTING) ··· 999 if (!iu) 1000 goto err; 1001 1002 - dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, 1003 - srp_max_iu_len, DMA_TO_DEVICE); 1004 1005 req = list_entry(target->free_reqs.next, struct srp_request, list); 1006 ··· 1033 goto err_unmap; 1034 } 1035 1036 - dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, 1037 - srp_max_iu_len, DMA_TO_DEVICE); 1038 1039 if (__srp_post_send(target, iu, len)) { 1040 printk(KERN_ERR PFX "Send failed\n");
··· 122 if (!iu->buf) 123 goto out_free_iu; 124 125 + iu->dma = ib_dma_map_single(host->dev->dev, iu->buf, size, direction); 126 + if (ib_dma_mapping_error(host->dev->dev, iu->dma)) 127 goto out_free_buf; 128 129 iu->size = size; ··· 145 if (!iu) 146 return; 147 148 + ib_dma_unmap_single(host->dev->dev, iu->dma, iu->size, iu->direction); 149 kfree(iu->buf); 150 kfree(iu); 151 } ··· 482 scat = &req->fake_sg; 483 } 484 485 + ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents, 486 + scmnd->sc_data_direction); 487 } 488 489 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) ··· 595 int i, j; 596 int ret; 597 struct srp_device *dev = target->srp_host->dev; 598 + struct ib_device *ibdev = dev->dev; 599 600 if (!dev->fmr_pool) 601 return -ENODEV; 602 603 + if ((ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask) && 604 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) 605 return -EINVAL; 606 607 len = page_cnt = 0; 608 for (i = 0; i < sg_cnt; ++i) { 609 + unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); 610 + 611 + if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) { 612 if (i > 0) 613 return -EINVAL; 614 else 615 ++page_cnt; 616 } 617 + if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) & 618 ~dev->fmr_page_mask) { 619 if (i < sg_cnt - 1) 620 return -EINVAL; ··· 619 ++page_cnt; 620 } 621 622 + len += dma_len; 623 } 624 625 page_cnt += len >> dev->fmr_page_shift; ··· 631 return -ENOMEM; 632 633 page_cnt = 0; 634 + for (i = 0; i < sg_cnt; ++i) { 635 + unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); 636 + 637 + for (j = 0; j < dma_len; j += dev->fmr_page_size) 638 dma_pages[page_cnt++] = 639 + (ib_sg_dma_address(ibdev, &scat[i]) & 640 + dev->fmr_page_mask) + j; 641 + } 642 643 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 644 dma_pages, page_cnt, io_addr); ··· 644 goto out; 645 } 646 647 + buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) & 648 + ~dev->fmr_page_mask); 649 buf->key = cpu_to_be32(req->fmr->fmr->rkey); 650 buf->len = cpu_to_be32(len); 651 ··· 663 struct srp_cmd *cmd = req->cmd->buf; 664 int len, nents, count; 665 u8 fmt = SRP_DATA_DESC_DIRECT; 666 + struct srp_device *dev; 667 + struct ib_device *ibdev; 668 669 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) 670 return sizeof (struct srp_cmd); ··· 687 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); 688 } 689 690 + dev = target->srp_host->dev; 691 + ibdev = dev->dev; 692 + 693 + count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 694 695 fmt = SRP_DATA_DESC_DIRECT; 696 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); ··· 702 */ 703 struct srp_direct_buf *buf = (void *) cmd->add_data; 704 705 + buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 706 + buf->key = cpu_to_be32(dev->mr->rkey); 707 + buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 708 } else if (srp_map_fmr(target, scat, count, req, 709 (void *) cmd->add_data)) { 710 /* ··· 722 count * sizeof (struct srp_direct_buf); 723 724 for (i = 0; i < count; ++i) { 725 + unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); 726 + 727 buf->desc_list[i].va = 728 + cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i])); 729 buf->desc_list[i].key = 730 + cpu_to_be32(dev->mr->rkey); 731 + buf->desc_list[i].len = cpu_to_be32(dma_len); 732 + datalen += dma_len; 733 } 734 735 if (scmnd->sc_data_direction == DMA_TO_DEVICE) ··· 808 809 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 810 { 811 + struct ib_device *dev; 812 struct srp_iu *iu; 813 u8 opcode; 814 815 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 816 817 + dev = target->srp_host->dev->dev; 818 + ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 819 + DMA_FROM_DEVICE); 820 821 opcode = *(u8 *) iu->buf; 822 ··· 850 break; 851 } 852 853 + ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, 854 + DMA_FROM_DEVICE); 855 } 856 857 static void srp_completion(struct ib_cq *cq, void *target_ptr) ··· 969 struct srp_request *req; 970 struct srp_iu *iu; 971 struct srp_cmd *cmd; 972 + struct ib_device *dev; 973 int len; 974 975 if (target->state == SRP_TARGET_CONNECTING) ··· 985 if (!iu) 986 goto err; 987 988 + dev = target->srp_host->dev->dev; 989 + ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, 990 + DMA_TO_DEVICE); 991 992 req = list_entry(target->free_reqs.next, struct srp_request, list); 993 ··· 1018 goto err_unmap; 1019 } 1020 1021 + ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, 1022 + DMA_TO_DEVICE); 1023 1024 if (__srp_post_send(target, iu, len)) { 1025 printk(KERN_ERR PFX "Send failed\n");
+1 -1
drivers/infiniband/ulp/srp/ib_srp.h
··· 161 }; 162 163 struct srp_iu { 164 - dma_addr_t dma; 165 void *buf; 166 size_t size; 167 enum dma_data_direction direction;
··· 161 }; 162 163 struct srp_iu { 164 + u64 dma; 165 void *buf; 166 size_t size; 167 enum dma_data_direction direction;