Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IB: Increase DMA max_segment_size on Mellanox hardware
IB/mad: Improve an error message so error code is included
RDMA/nes: Don't print success message at level KERN_ERR
RDMA/addr: Fix return of uninitialized ret value
IB/srp: try to use larger FMR sizes to cover our mappings
IB/srp: add support for indirect tables that don't fit in SRP_CMD
IB/srp: rework mapping engine to use multiple FMR entries
IB/srp: allow sg_tablesize to be set for each target
IB/srp: move IB CM setup completion into its own function
IB/srp: always avoid non-zero offsets into an FMR

+534 -262
+1 -1
drivers/infiniband/core/addr.c
··· 204 204 205 205 /* If the device does ARP internally, return 'done' */ 206 206 if (rt->dst.dev->flags & IFF_NOARP) { 207 - rdma_copy_addr(addr, rt->dst.dev, NULL); 207 + ret = rdma_copy_addr(addr, rt->dst.dev, NULL); 208 208 goto put; 209 209 } 210 210
+2 -1
drivers/infiniband/core/agent.c
··· 101 101 agent = port_priv->agent[qpn]; 102 102 ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); 103 103 if (IS_ERR(ah)) { 104 - printk(KERN_ERR SPFX "ib_create_ah_from_wc error\n"); 104 + printk(KERN_ERR SPFX "ib_create_ah_from_wc error %ld\n", 105 + PTR_ERR(ah)); 105 106 return; 106 107 } 107 108
+3
drivers/infiniband/hw/mthca/mthca_main.c
··· 1043 1043 } 1044 1044 } 1045 1045 1046 + /* We can handle large RDMA requests, so allow larger segments. */ 1047 + dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 1048 + 1046 1049 mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev); 1047 1050 if (!mdev) { 1048 1051 dev_err(&pdev->dev, "Device struct alloc failed, "
+1 -1
drivers/infiniband/hw/nes/nes.c
··· 694 694 nesdev->netdev_count++; 695 695 nesdev->nesadapter->netdev_count++; 696 696 697 - printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n", 697 + printk(KERN_INFO PFX "%s: NetEffect RNIC driver successfully loaded.\n", 698 698 pci_name(pcidev)); 699 699 return 0; 700 700
+491 -254
drivers/infiniband/ulp/srp/ib_srp.c
··· 59 59 "v" DRV_VERSION " (" DRV_RELDATE ")"); 60 60 MODULE_LICENSE("Dual BSD/GPL"); 61 61 62 - static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE; 63 - static int srp_max_iu_len; 64 - 65 - module_param(srp_sg_tablesize, int, 0444); 66 - MODULE_PARM_DESC(srp_sg_tablesize, 67 - "Max number of gather/scatter entries per I/O (default is 12, max 255)"); 68 - 62 + static unsigned int srp_sg_tablesize; 63 + static unsigned int cmd_sg_entries; 64 + static unsigned int indirect_sg_entries; 65 + static bool allow_ext_sg; 69 66 static int topspin_workarounds = 1; 67 + 68 + module_param(srp_sg_tablesize, uint, 0444); 69 + MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries"); 70 + 71 + module_param(cmd_sg_entries, uint, 0444); 72 + MODULE_PARM_DESC(cmd_sg_entries, 73 + "Default number of gather/scatter entries in the SRP command (default is 12, max 255)"); 74 + 75 + module_param(indirect_sg_entries, uint, 0444); 76 + MODULE_PARM_DESC(indirect_sg_entries, 77 + "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")"); 78 + 79 + module_param(allow_ext_sg, bool, 0444); 80 + MODULE_PARM_DESC(allow_ext_sg, 81 + "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)"); 70 82 71 83 module_param(topspin_workarounds, int, 0444); 72 84 MODULE_PARM_DESC(topspin_workarounds, 73 85 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 74 - 75 - static int mellanox_workarounds = 1; 76 - 77 - module_param(mellanox_workarounds, int, 0444); 78 - MODULE_PARM_DESC(mellanox_workarounds, 79 - "Enable workarounds for Mellanox SRP target bugs if != 0"); 80 86 81 87 static void srp_add_one(struct ib_device *device); 82 88 static void srp_remove_one(struct ib_device *device); ··· 118 112 return topspin_workarounds && 119 113 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || 120 114 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); 121 - } 122 - 123 - static int srp_target_is_mellanox(struct srp_target_port *target) 124 - { 125 - static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 }; 126 - 127 - return mellanox_workarounds && 128 - !memcmp(&target->ioc_guid, mellanox_oui, sizeof mellanox_oui); 129 115 } 130 116 131 117 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, ··· 376 378 377 379 req->priv.opcode = SRP_LOGIN_REQ; 378 380 req->priv.tag = 0; 379 - req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len); 381 + req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len); 380 382 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 381 383 SRP_BUF_FORMAT_INDIRECT); 382 384 /* ··· 454 456 return changed; 455 457 } 456 458 459 + static void srp_free_req_data(struct srp_target_port *target) 460 + { 461 + struct ib_device *ibdev = target->srp_host->srp_dev->dev; 462 + struct srp_request *req; 463 + int i; 464 + 465 + for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) { 466 + kfree(req->fmr_list); 467 + kfree(req->map_page); 468 + if (req->indirect_dma_addr) { 469 + ib_dma_unmap_single(ibdev, req->indirect_dma_addr, 470 + target->indirect_size, 471 + DMA_TO_DEVICE); 472 + } 473 + kfree(req->indirect_desc); 474 + } 475 + } 476 + 457 477 static void srp_remove_work(struct work_struct *work) 458 478 { 459 479 struct srp_target_port *target = ··· 488 472 scsi_remove_host(target->scsi_host); 489 473 ib_destroy_cm_id(target->cm_id); 490 474 srp_free_target_ib(target); 475 + srp_free_req_data(target); 491 476 scsi_host_put(target->scsi_host); 492 477 } 493 478 ··· 552 535 struct srp_target_port *target, 553 536 struct srp_request *req) 554 537 { 538 + struct ib_device *ibdev = target->srp_host->srp_dev->dev; 539 + struct ib_pool_fmr **pfmr; 540 + 555 541 if (!scsi_sglist(scmnd) || 556 542 (scmnd->sc_data_direction != DMA_TO_DEVICE && 557 543 scmnd->sc_data_direction != DMA_FROM_DEVICE)) 558 544 return; 559 545 560 - if (req->fmr) { 561 - ib_fmr_pool_unmap(req->fmr); 562 - req->fmr = NULL; 563 - } 546 + pfmr = req->fmr_list; 547 + while (req->nfmr--) 548 + ib_fmr_pool_unmap(*pfmr++); 564 549 565 - ib_dma_unmap_sg(target->srp_host->srp_dev->dev, scsi_sglist(scmnd), 566 - scsi_sg_count(scmnd), scmnd->sc_data_direction); 550 + ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), 551 + scmnd->sc_data_direction); 567 552 } 568 553 569 554 static void srp_remove_req(struct srp_target_port *target, ··· 664 645 return ret; 665 646 } 666 647 667 - static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, 668 - int sg_cnt, struct srp_request *req, 669 - struct srp_direct_buf *buf) 648 + static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, 649 + unsigned int dma_len, u32 rkey) 670 650 { 651 + struct srp_direct_buf *desc = state->desc; 652 + 653 + desc->va = cpu_to_be64(dma_addr); 654 + desc->key = cpu_to_be32(rkey); 655 + desc->len = cpu_to_be32(dma_len); 656 + 657 + state->total_len += dma_len; 658 + state->desc++; 659 + state->ndesc++; 660 + } 661 + 662 + static int srp_map_finish_fmr(struct srp_map_state *state, 663 + struct srp_target_port *target) 664 + { 665 + struct srp_device *dev = target->srp_host->srp_dev; 666 + struct ib_pool_fmr *fmr; 671 667 u64 io_addr = 0; 672 - u64 *dma_pages; 673 - u32 len; 674 - int page_cnt; 675 - int i, j; 676 - int ret; 668 + 669 + if (!state->npages) 670 + return 0; 671 + 672 + if (state->npages == 1) { 673 + srp_map_desc(state, state->base_dma_addr, state->fmr_len, 674 + target->rkey); 675 + state->npages = state->fmr_len = 0; 676 + return 0; 677 + } 678 + 679 + fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages, 680 + state->npages, io_addr); 681 + if (IS_ERR(fmr)) 682 + return PTR_ERR(fmr); 683 + 684 + *state->next_fmr++ = fmr; 685 + state->nfmr++; 686 + 687 + srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey); 688 + state->npages = state->fmr_len = 0; 689 + return 0; 690 + } 691 + 692 + static void srp_map_update_start(struct srp_map_state *state, 693 + struct scatterlist *sg, int sg_index, 694 + dma_addr_t dma_addr) 695 + { 696 + state->unmapped_sg = sg; 697 + state->unmapped_index = sg_index; 698 + state->unmapped_addr = dma_addr; 699 + } 700 + 701 + static int srp_map_sg_entry(struct srp_map_state *state, 702 + struct srp_target_port *target, 703 + struct scatterlist *sg, int sg_index, 704 + int use_fmr) 705 + { 677 706 struct srp_device *dev = target->srp_host->srp_dev; 678 707 struct ib_device *ibdev = dev->dev; 679 - struct scatterlist *sg; 708 + dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg); 709 + unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 710 + unsigned int len; 711 + int ret; 680 712 681 - if (!dev->fmr_pool) 682 - return -ENODEV; 713 + if (!dma_len) 714 + return 0; 683 715 684 - if (srp_target_is_mellanox(target) && 685 - (ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask)) 686 - return -EINVAL; 716 + if (use_fmr == SRP_MAP_NO_FMR) { 717 + /* Once we're in direct map mode for a request, we don't 718 + * go back to FMR mode, so no need to update anything 719 + * other than the descriptor. 720 + */ 721 + srp_map_desc(state, dma_addr, dma_len, target->rkey); 722 + return 0; 723 + } 687 724 688 - len = page_cnt = 0; 689 - scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { 690 - unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 725 + /* If we start at an offset into the FMR page, don't merge into 726 + * the current FMR. Finish it out, and use the kernel's MR for this 727 + * sg entry. This is to avoid potential bugs on some SRP targets 728 + * that were never quite defined, but went away when the initiator 729 + * avoided using FMR on such page fragments. 730 + */ 731 + if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) { 732 + ret = srp_map_finish_fmr(state, target); 733 + if (ret) 734 + return ret; 691 735 692 - if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) { 693 - if (i > 0) 694 - return -EINVAL; 695 - else 696 - ++page_cnt; 736 + srp_map_desc(state, dma_addr, dma_len, target->rkey); 737 + srp_map_update_start(state, NULL, 0, 0); 738 + return 0; 739 + } 740 + 741 + /* If this is the first sg to go into the FMR, save our position. 742 + * We need to know the first unmapped entry, its index, and the 743 + * first unmapped address within that entry to be able to restart 744 + * mapping after an error. 745 + */ 746 + if (!state->unmapped_sg) 747 + srp_map_update_start(state, sg, sg_index, dma_addr); 748 + 749 + while (dma_len) { 750 + if (state->npages == SRP_FMR_SIZE) { 751 + ret = srp_map_finish_fmr(state, target); 752 + if (ret) 753 + return ret; 754 + 755 + srp_map_update_start(state, sg, sg_index, dma_addr); 697 756 } 698 - if ((ib_sg_dma_address(ibdev, sg) + dma_len) & 699 - ~dev->fmr_page_mask) { 700 - if (i < sg_cnt - 1) 701 - return -EINVAL; 702 - else 703 - ++page_cnt; 704 - } 705 757 706 - len += dma_len; 758 + len = min_t(unsigned int, dma_len, dev->fmr_page_size); 759 + 760 + if (!state->npages) 761 + state->base_dma_addr = dma_addr; 762 + state->pages[state->npages++] = dma_addr; 763 + state->fmr_len += len; 764 + dma_addr += len; 765 + dma_len -= len; 707 766 } 708 767 709 - page_cnt += len >> dev->fmr_page_shift; 710 - if (page_cnt > SRP_FMR_SIZE) 711 - return -ENOMEM; 712 - 713 - dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC); 714 - if (!dma_pages) 715 - return -ENOMEM; 716 - 717 - page_cnt = 0; 718 - scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { 719 - unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 720 - 721 - for (j = 0; j < dma_len; j += dev->fmr_page_size) 722 - dma_pages[page_cnt++] = 723 - (ib_sg_dma_address(ibdev, sg) & 724 - dev->fmr_page_mask) + j; 725 - } 726 - 727 - req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 728 - dma_pages, page_cnt, io_addr); 729 - if (IS_ERR(req->fmr)) { 730 - ret = PTR_ERR(req->fmr); 731 - req->fmr = NULL; 732 - goto out; 733 - } 734 - 735 - buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) & 736 - ~dev->fmr_page_mask); 737 - buf->key = cpu_to_be32(req->fmr->fmr->rkey); 738 - buf->len = cpu_to_be32(len); 739 - 768 + /* If the last entry of the FMR wasn't a full page, then we need to 769 + * close it out and start a new one -- we can only merge at page 770 + * boundries. 771 + */ 740 772 ret = 0; 741 - 742 - out: 743 - kfree(dma_pages); 744 - 773 + if (len != dev->fmr_page_size) { 774 + ret = srp_map_finish_fmr(state, target); 775 + if (!ret) 776 + srp_map_update_start(state, NULL, 0, 0); 777 + } 745 778 return ret; 746 779 } 747 780 748 781 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 749 782 struct srp_request *req) 750 783 { 751 - struct scatterlist *scat; 784 + struct scatterlist *scat, *sg; 752 785 struct srp_cmd *cmd = req->cmd->buf; 753 - int len, nents, count; 754 - u8 fmt = SRP_DATA_DESC_DIRECT; 786 + int i, len, nents, count, use_fmr; 755 787 struct srp_device *dev; 756 788 struct ib_device *ibdev; 789 + struct srp_map_state state; 790 + struct srp_indirect_buf *indirect_hdr; 791 + u32 table_len; 792 + u8 fmt; 757 793 758 794 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) 759 795 return sizeof (struct srp_cmd); ··· 828 754 ibdev = dev->dev; 829 755 830 756 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 757 + if (unlikely(count == 0)) 758 + return -EIO; 831 759 832 760 fmt = SRP_DATA_DESC_DIRECT; 833 761 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); ··· 846 770 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 847 771 buf->key = cpu_to_be32(target->rkey); 848 772 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 849 - } else if (srp_map_fmr(target, scat, count, req, 850 - (void *) cmd->add_data)) { 851 - /* 852 - * FMR mapping failed, and the scatterlist has more 853 - * than one entry. Generate an indirect memory 854 - * descriptor. 855 - */ 856 - struct srp_indirect_buf *buf = (void *) cmd->add_data; 857 - struct scatterlist *sg; 858 - u32 datalen = 0; 859 - int i; 860 773 861 - fmt = SRP_DATA_DESC_INDIRECT; 862 - len = sizeof (struct srp_cmd) + 863 - sizeof (struct srp_indirect_buf) + 864 - count * sizeof (struct srp_direct_buf); 865 - 866 - scsi_for_each_sg(scmnd, sg, count, i) { 867 - unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 868 - 869 - buf->desc_list[i].va = 870 - cpu_to_be64(ib_sg_dma_address(ibdev, sg)); 871 - buf->desc_list[i].key = 872 - cpu_to_be32(target->rkey); 873 - buf->desc_list[i].len = cpu_to_be32(dma_len); 874 - datalen += dma_len; 875 - } 876 - 877 - if (scmnd->sc_data_direction == DMA_TO_DEVICE) 878 - cmd->data_out_desc_cnt = count; 879 - else 880 - cmd->data_in_desc_cnt = count; 881 - 882 - buf->table_desc.va = 883 - cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf); 884 - buf->table_desc.key = 885 - cpu_to_be32(target->rkey); 886 - buf->table_desc.len = 887 - cpu_to_be32(count * sizeof (struct srp_direct_buf)); 888 - 889 - buf->len = cpu_to_be32(datalen); 774 + req->nfmr = 0; 775 + goto map_complete; 890 776 } 891 777 778 + /* We have more than one scatter/gather entry, so build our indirect 779 + * descriptor table, trying to merge as many entries with FMR as we 780 + * can. 781 + */ 782 + indirect_hdr = (void *) cmd->add_data; 783 + 784 + ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, 785 + target->indirect_size, DMA_TO_DEVICE); 786 + 787 + memset(&state, 0, sizeof(state)); 788 + state.desc = req->indirect_desc; 789 + state.pages = req->map_page; 790 + state.next_fmr = req->fmr_list; 791 + 792 + use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR; 793 + 794 + for_each_sg(scat, sg, count, i) { 795 + if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) { 796 + /* FMR mapping failed, so backtrack to the first 797 + * unmapped entry and continue on without using FMR. 798 + */ 799 + dma_addr_t dma_addr; 800 + unsigned int dma_len; 801 + 802 + backtrack: 803 + sg = state.unmapped_sg; 804 + i = state.unmapped_index; 805 + 806 + dma_addr = ib_sg_dma_address(ibdev, sg); 807 + dma_len = ib_sg_dma_len(ibdev, sg); 808 + dma_len -= (state.unmapped_addr - dma_addr); 809 + dma_addr = state.unmapped_addr; 810 + use_fmr = SRP_MAP_NO_FMR; 811 + srp_map_desc(&state, dma_addr, dma_len, target->rkey); 812 + } 813 + } 814 + 815 + if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target)) 816 + goto backtrack; 817 + 818 + /* We've mapped the request, now pull as much of the indirect 819 + * descriptor table as we can into the command buffer. If this 820 + * target is not using an external indirect table, we are 821 + * guaranteed to fit into the command, as the SCSI layer won't 822 + * give us more S/G entries than we allow. 823 + */ 824 + req->nfmr = state.nfmr; 825 + if (state.ndesc == 1) { 826 + /* FMR mapping was able to collapse this to one entry, 827 + * so use a direct descriptor. 828 + */ 829 + struct srp_direct_buf *buf = (void *) cmd->add_data; 830 + 831 + *buf = req->indirect_desc[0]; 832 + goto map_complete; 833 + } 834 + 835 + if (unlikely(target->cmd_sg_cnt < state.ndesc && 836 + !target->allow_ext_sg)) { 837 + shost_printk(KERN_ERR, target->scsi_host, 838 + "Could not fit S/G list into SRP_CMD\n"); 839 + return -EIO; 840 + } 841 + 842 + count = min(state.ndesc, target->cmd_sg_cnt); 843 + table_len = state.ndesc * sizeof (struct srp_direct_buf); 844 + 845 + fmt = SRP_DATA_DESC_INDIRECT; 846 + len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf); 847 + len += count * sizeof (struct srp_direct_buf); 848 + 849 + memcpy(indirect_hdr->desc_list, req->indirect_desc, 850 + count * sizeof (struct srp_direct_buf)); 851 + 852 + indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); 853 + indirect_hdr->table_desc.key = cpu_to_be32(target->rkey); 854 + indirect_hdr->table_desc.len = cpu_to_be32(table_len); 855 + indirect_hdr->len = cpu_to_be32(state.total_len); 856 + 857 + if (scmnd->sc_data_direction == DMA_TO_DEVICE) 858 + cmd->data_out_desc_cnt = count; 859 + else 860 + cmd->data_in_desc_cnt = count; 861 + 862 + ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len, 863 + DMA_TO_DEVICE); 864 + 865 + map_complete: 892 866 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 893 867 cmd->buf_fmt = fmt << 4; 894 868 else ··· 1266 1140 spin_unlock_irqrestore(&target->lock, flags); 1267 1141 1268 1142 dev = target->srp_host->srp_dev->dev; 1269 - ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, 1143 + ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, 1270 1144 DMA_TO_DEVICE); 1271 1145 1272 1146 scmnd->result = 0; ··· 1290 1164 goto err_iu; 1291 1165 } 1292 1166 1293 - ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, 1167 + ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, 1294 1168 DMA_TO_DEVICE); 1295 1169 1296 1170 if (srp_post_send(target, iu, len)) { ··· 1330 1204 1331 1205 for (i = 0; i < SRP_SQ_SIZE; ++i) { 1332 1206 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 1333 - srp_max_iu_len, 1207 + target->max_iu_len, 1334 1208 GFP_KERNEL, DMA_TO_DEVICE); 1335 1209 if (!target->tx_ring[i]) 1336 1210 goto err; ··· 1352 1226 } 1353 1227 1354 1228 return -ENOMEM; 1229 + } 1230 + 1231 + static void srp_cm_rep_handler(struct ib_cm_id *cm_id, 1232 + struct srp_login_rsp *lrsp, 1233 + struct srp_target_port *target) 1234 + { 1235 + struct ib_qp_attr *qp_attr = NULL; 1236 + int attr_mask = 0; 1237 + int ret; 1238 + int i; 1239 + 1240 + if (lrsp->opcode == SRP_LOGIN_RSP) { 1241 + target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); 1242 + target->req_lim = be32_to_cpu(lrsp->req_lim_delta); 1243 + 1244 + /* 1245 + * Reserve credits for task management so we don't 1246 + * bounce requests back to the SCSI mid-layer. 1247 + */ 1248 + target->scsi_host->can_queue 1249 + = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, 1250 + target->scsi_host->can_queue); 1251 + } else { 1252 + shost_printk(KERN_WARNING, target->scsi_host, 1253 + PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); 1254 + ret = -ECONNRESET; 1255 + goto error; 1256 + } 1257 + 1258 + if (!target->rx_ring[0]) { 1259 + ret = srp_alloc_iu_bufs(target); 1260 + if (ret) 1261 + goto error; 1262 + } 1263 + 1264 + ret = -ENOMEM; 1265 + qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 1266 + if (!qp_attr) 1267 + goto error; 1268 + 1269 + qp_attr->qp_state = IB_QPS_RTR; 1270 + ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1271 + if (ret) 1272 + goto error_free; 1273 + 1274 + ret = ib_modify_qp(target->qp, qp_attr, attr_mask); 1275 + if (ret) 1276 + goto error_free; 1277 + 1278 + for (i = 0; i < SRP_RQ_SIZE; i++) { 1279 + struct srp_iu *iu = target->rx_ring[i]; 1280 + ret = srp_post_recv(target, iu); 1281 + if (ret) 1282 + goto error_free; 1283 + } 1284 + 1285 + qp_attr->qp_state = IB_QPS_RTS; 1286 + ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1287 + if (ret) 1288 + goto error_free; 1289 + 1290 + ret = ib_modify_qp(target->qp, qp_attr, attr_mask); 1291 + if (ret) 1292 + goto error_free; 1293 + 1294 + ret = ib_send_cm_rtu(cm_id, NULL, 0); 1295 + 1296 + error_free: 1297 + kfree(qp_attr); 1298 + 1299 + error: 1300 + target->status = ret; 1355 1301 } 1356 1302 1357 1303 static void srp_cm_rej_handler(struct ib_cm_id *cm_id, ··· 1509 1311 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 1510 1312 { 1511 1313 struct srp_target_port *target = cm_id->context; 1512 - struct ib_qp_attr *qp_attr = NULL; 1513 - int attr_mask = 0; 1514 1314 int comp = 0; 1515 - int opcode = 0; 1516 - int i; 1517 1315 1518 1316 switch (event->event) { 1519 1317 case IB_CM_REQ_ERROR: ··· 1521 1327 1522 1328 case IB_CM_REP_RECEIVED: 1523 1329 comp = 1; 1524 - opcode = *(u8 *) event->private_data; 1525 - 1526 - if (opcode == SRP_LOGIN_RSP) { 1527 - struct srp_login_rsp *rsp = event->private_data; 1528 - 1529 - target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); 1530 - target->req_lim = be32_to_cpu(rsp->req_lim_delta); 1531 - 1532 - /* 1533 - * Reserve credits for task management so we don't 1534 - * bounce requests back to the SCSI mid-layer. 1535 - */ 1536 - target->scsi_host->can_queue 1537 - = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, 1538 - target->scsi_host->can_queue); 1539 - } else { 1540 - shost_printk(KERN_WARNING, target->scsi_host, 1541 - PFX "Unhandled RSP opcode %#x\n", opcode); 1542 - target->status = -ECONNRESET; 1543 - break; 1544 - } 1545 - 1546 - if (!target->rx_ring[0]) { 1547 - target->status = srp_alloc_iu_bufs(target); 1548 - if (target->status) 1549 - break; 1550 - } 1551 - 1552 - qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 1553 - if (!qp_attr) { 1554 - target->status = -ENOMEM; 1555 - break; 1556 - } 1557 - 1558 - qp_attr->qp_state = IB_QPS_RTR; 1559 - target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1560 - if (target->status) 1561 - break; 1562 - 1563 - target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1564 - if (target->status) 1565 - break; 1566 - 1567 - for (i = 0; i < SRP_RQ_SIZE; i++) { 1568 - struct srp_iu *iu = target->rx_ring[i]; 1569 - target->status = srp_post_recv(target, iu); 1570 - if (target->status) 1571 - break; 1572 - } 1573 - if (target->status) 1574 - break; 1575 - 1576 - qp_attr->qp_state = IB_QPS_RTS; 1577 - target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1578 - if (target->status) 1579 - break; 1580 - 1581 - target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1582 - if (target->status) 1583 - break; 1584 - 1585 - target->status = ib_send_cm_rtu(cm_id, NULL, 0); 1586 - if (target->status) 1587 - break; 1588 - 1330 + srp_cm_rep_handler(cm_id, event->private_data, target); 1589 1331 break; 1590 1332 1591 1333 case IB_CM_REJ_RECEIVED: ··· 1560 1430 1561 1431 if (comp) 1562 1432 complete(&target->done); 1563 - 1564 - kfree(qp_attr); 1565 1433 1566 1434 return 0; 1567 1435 } ··· 1786 1658 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); 1787 1659 } 1788 1660 1661 + static ssize_t show_cmd_sg_entries(struct device *dev, 1662 + struct device_attribute *attr, char *buf) 1663 + { 1664 + struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1665 + 1666 + return sprintf(buf, "%u\n", target->cmd_sg_cnt); 1667 + } 1668 + 1669 + static ssize_t show_allow_ext_sg(struct device *dev, 1670 + struct device_attribute *attr, char *buf) 1671 + { 1672 + struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1673 + 1674 + return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); 1675 + } 1676 + 1789 1677 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 1790 1678 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 1791 1679 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); ··· 1812 1668 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 1813 1669 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); 1814 1670 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); 1671 + static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); 1672 + static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL); 1815 1673 1816 1674 static struct device_attribute *srp_host_attrs[] = { 1817 1675 &dev_attr_id_ext, ··· 1826 1680 &dev_attr_zero_req_lim, 1827 1681 &dev_attr_local_ib_port, 1828 1682 &dev_attr_local_ib_device, 1683 + &dev_attr_cmd_sg_entries, 1684 + &dev_attr_allow_ext_sg, 1829 1685 NULL 1830 1686 }; 1831 1687 ··· 1840 1692 .eh_abort_handler = srp_abort, 1841 1693 .eh_device_reset_handler = srp_reset_device, 1842 1694 .eh_host_reset_handler = srp_reset_host, 1695 + .sg_tablesize = SRP_DEF_SG_TABLESIZE, 1843 1696 .can_queue = SRP_CMD_SQ_SIZE, 1844 1697 .this_id = -1, 1845 1698 .cmd_per_lun = SRP_CMD_SQ_SIZE, ··· 1912 1763 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, 1913 1764 SRP_OPT_IO_CLASS = 1 << 7, 1914 1765 SRP_OPT_INITIATOR_EXT = 1 << 8, 1766 + SRP_OPT_CMD_SG_ENTRIES = 1 << 9, 1767 + SRP_OPT_ALLOW_EXT_SG = 1 << 10, 1768 + SRP_OPT_SG_TABLESIZE = 1 << 11, 1915 1769 SRP_OPT_ALL = (SRP_OPT_ID_EXT | 1916 1770 SRP_OPT_IOC_GUID | 1917 1771 SRP_OPT_DGID | ··· 1932 1780 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, 1933 1781 { SRP_OPT_IO_CLASS, "io_class=%x" }, 1934 1782 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, 1783 + { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" }, 1784 + { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" }, 1785 + { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, 1935 1786 { SRP_OPT_ERR, NULL } 1936 1787 }; 1937 1788 ··· 2062 1907 kfree(p); 2063 1908 break; 2064 1909 1910 + case SRP_OPT_CMD_SG_ENTRIES: 1911 + if (match_int(args, &token) || token < 1 || token > 255) { 1912 + printk(KERN_WARNING PFX "bad max cmd_sg_entries parameter '%s'\n", p); 1913 + goto out; 1914 + } 1915 + target->cmd_sg_cnt = token; 1916 + break; 1917 + 1918 + case SRP_OPT_ALLOW_EXT_SG: 1919 + if (match_int(args, &token)) { 1920 + printk(KERN_WARNING PFX "bad allow_ext_sg parameter '%s'\n", p); 1921 + goto out; 1922 + } 1923 + target->allow_ext_sg = !!token; 1924 + break; 1925 + 1926 + case SRP_OPT_SG_TABLESIZE: 1927 + if (match_int(args, &token) || token < 1 || 1928 + token > SCSI_MAX_SG_CHAIN_SEGMENTS) { 1929 + printk(KERN_WARNING PFX "bad max sg_tablesize parameter '%s'\n", p); 1930 + goto out; 1931 + } 1932 + target->sg_tablesize = token; 1933 + break; 1934 + 2065 1935 default: 2066 1936 printk(KERN_WARNING PFX "unknown parameter or missing value " 2067 1937 "'%s' in target creation request\n", p); ··· 2117 1937 container_of(dev, struct srp_host, dev); 2118 1938 struct Scsi_Host *target_host; 2119 1939 struct srp_target_port *target; 2120 - int ret; 2121 - int i; 1940 + struct ib_device *ibdev = host->srp_dev->dev; 1941 + dma_addr_t dma_addr; 1942 + int i, ret; 2122 1943 2123 1944 target_host = scsi_host_alloc(&srp_template, 2124 1945 sizeof (struct srp_target_port)); 2125 1946 if (!target_host) 2126 1947 return -ENOMEM; 2127 1948 2128 - target_host->transportt = ib_srp_transport_template; 1949 + target_host->transportt = ib_srp_transport_template; 2129 1950 target_host->max_lun = SRP_MAX_LUN; 2130 1951 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 2131 1952 2132 1953 target = host_to_target(target_host); 2133 1954 2134 - target->io_class = SRP_REV16A_IB_IO_CLASS; 2135 - target->scsi_host = target_host; 2136 - target->srp_host = host; 2137 - target->lkey = host->srp_dev->mr->lkey; 2138 - target->rkey = host->srp_dev->mr->rkey; 2139 - 2140 - spin_lock_init(&target->lock); 2141 - INIT_LIST_HEAD(&target->free_tx); 2142 - INIT_LIST_HEAD(&target->free_reqs); 2143 - for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 2144 - target->req_ring[i].index = i; 2145 - list_add_tail(&target->req_ring[i].list, &target->free_reqs); 2146 - } 1955 + target->io_class = SRP_REV16A_IB_IO_CLASS; 1956 + target->scsi_host = target_host; 1957 + target->srp_host = host; 1958 + target->lkey = host->srp_dev->mr->lkey; 1959 + target->rkey = host->srp_dev->mr->rkey; 1960 + target->cmd_sg_cnt = cmd_sg_entries; 1961 + target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; 1962 + target->allow_ext_sg = allow_ext_sg; 2147 1963 2148 1964 ret = srp_parse_options(buf, target); 2149 1965 if (ret) 2150 1966 goto err; 2151 1967 2152 - ib_query_gid(host->srp_dev->dev, host->port, 0, &target->path.sgid); 1968 + if (!host->srp_dev->fmr_pool && !target->allow_ext_sg && 1969 + target->cmd_sg_cnt < target->sg_tablesize) { 1970 + printk(KERN_WARNING PFX "No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); 1971 + target->sg_tablesize = target->cmd_sg_cnt; 1972 + } 1973 + 1974 + target_host->sg_tablesize = target->sg_tablesize; 1975 + target->indirect_size = target->sg_tablesize * 1976 + sizeof (struct srp_direct_buf); 1977 + target->max_iu_len = sizeof (struct srp_cmd) + 1978 + sizeof (struct srp_indirect_buf) + 1979 + target->cmd_sg_cnt * sizeof (struct srp_direct_buf); 1980 + 1981 + spin_lock_init(&target->lock); 1982 + INIT_LIST_HEAD(&target->free_tx); 1983 + INIT_LIST_HEAD(&target->free_reqs); 1984 + for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 1985 + struct srp_request *req = &target->req_ring[i]; 1986 + 1987 + req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *), 1988 + GFP_KERNEL); 1989 + req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *), 1990 + GFP_KERNEL); 1991 + req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); 1992 + if (!req->fmr_list || !req->map_page || !req->indirect_desc) 1993 + goto err_free_mem; 1994 + 1995 + dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, 1996 + target->indirect_size, 1997 + DMA_TO_DEVICE); 1998 + if (ib_dma_mapping_error(ibdev, dma_addr)) 1999 + goto err_free_mem; 2000 + 2001 + req->indirect_dma_addr = dma_addr; 2002 + req->index = i; 2003 + list_add_tail(&req->list, &target->free_reqs); 2004 + } 2005 + 2006 + ib_query_gid(ibdev, host->port, 0, &target->path.sgid); 2153 2007 2154 2008 shost_printk(KERN_DEBUG, target->scsi_host, PFX 2155 2009 "new target: id_ext %016llx ioc_guid %016llx pkey %04x " ··· 2196 1982 2197 1983 ret = srp_create_target_ib(target); 2198 1984 if (ret) 2199 - goto err; 1985 + goto err_free_mem; 2200 1986 2201 1987 ret = srp_new_cm_id(target); 2202 1988 if (ret) 2203 - goto err_free; 1989 + goto err_free_ib; 2204 1990 2205 1991 target->qp_in_error = 0; 2206 1992 ret = srp_connect_target(target); ··· 2222 2008 err_cm_id: 2223 2009 ib_destroy_cm_id(target->cm_id); 2224 2010 2225 - err_free: 2011 + err_free_ib: 2226 2012 srp_free_target_ib(target); 2013 + 2014 + err_free_mem: 2015 + srp_free_req_data(target); 2227 2016 2228 2017 err: 2229 2018 scsi_host_put(target_host); ··· 2300 2083 struct ib_device_attr *dev_attr; 2301 2084 struct ib_fmr_pool_param fmr_param; 2302 2085 struct srp_host *host; 2303 - int s, e, p; 2086 + int max_pages_per_fmr, fmr_page_shift, s, e, p; 2304 2087 2305 2088 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 2306 2089 if (!dev_attr) ··· 2318 2101 2319 2102 /* 2320 2103 * Use the smallest page size supported by the HCA, down to a 2321 - * minimum of 512 bytes (which is the smallest sector that a 2322 - * SCSI command will ever carry). 2104 + * minimum of 4096 bytes. We're unlikely to build large sglists 2105 + * out of smaller entries. 2323 2106 */ 2324 - srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1); 2325 - srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift; 2326 - srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1); 2107 + fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1); 2108 + srp_dev->fmr_page_size = 1 << fmr_page_shift; 2109 + srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1); 2110 + srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE; 2327 2111 2328 2112 INIT_LIST_HEAD(&srp_dev->dev_list); 2329 2113 ··· 2340 2122 if (IS_ERR(srp_dev->mr)) 2341 2123 goto err_pd; 2342 2124 2343 - memset(&fmr_param, 0, sizeof fmr_param); 2344 - fmr_param.pool_size = SRP_FMR_POOL_SIZE; 2345 - fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE; 2346 - fmr_param.cache = 1; 2347 - fmr_param.max_pages_per_fmr = SRP_FMR_SIZE; 2348 - fmr_param.page_shift = srp_dev->fmr_page_shift; 2349 - fmr_param.access = (IB_ACCESS_LOCAL_WRITE | 2350 - IB_ACCESS_REMOTE_WRITE | 2351 - IB_ACCESS_REMOTE_READ); 2125 + for (max_pages_per_fmr = SRP_FMR_SIZE; 2126 + max_pages_per_fmr >= SRP_FMR_MIN_SIZE; 2127 + max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) { 2128 + memset(&fmr_param, 0, sizeof fmr_param); 2129 + fmr_param.pool_size = SRP_FMR_POOL_SIZE; 2130 + fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE; 2131 + fmr_param.cache = 1; 2132 + fmr_param.max_pages_per_fmr = max_pages_per_fmr; 2133 + fmr_param.page_shift = fmr_page_shift; 2134 + fmr_param.access = (IB_ACCESS_LOCAL_WRITE | 2135 + IB_ACCESS_REMOTE_WRITE | 2136 + IB_ACCESS_REMOTE_READ); 2352 2137 2353 - srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param); 2138 + srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param); 2139 + if (!IS_ERR(srp_dev->fmr_pool)) 2140 + break; 2141 + } 2142 + 2354 2143 if (IS_ERR(srp_dev->fmr_pool)) 2355 2144 srp_dev->fmr_pool = NULL; 2356 2145 ··· 2432 2207 srp_disconnect_target(target); 2433 2208 ib_destroy_cm_id(target->cm_id); 2434 2209 srp_free_target_ib(target); 2210 + srp_free_req_data(target); 2435 2211 scsi_host_put(target->scsi_host); 2436 2212 } 2437 2213 ··· 2456 2230 2457 2231 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *)); 2458 2232 2459 - if (srp_sg_tablesize > 255) { 2460 - printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); 2461 - srp_sg_tablesize = 255; 2233 + if (srp_sg_tablesize) { 2234 + printk(KERN_WARNING PFX "srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); 2235 + if (!cmd_sg_entries) 2236 + cmd_sg_entries = srp_sg_tablesize; 2237 + } 2238 + 2239 + if (!cmd_sg_entries) 2240 + cmd_sg_entries = SRP_DEF_SG_TABLESIZE; 2241 + 2242 + if (cmd_sg_entries > 255) { 2243 + printk(KERN_WARNING PFX "Clamping cmd_sg_entries to 255\n"); 2244 + cmd_sg_entries = 255; 2245 + } 2246 + 2247 + if (!indirect_sg_entries) 2248 + indirect_sg_entries = cmd_sg_entries; 2249 + else if (indirect_sg_entries < cmd_sg_entries) { 2250 + printk(KERN_WARNING PFX "Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", cmd_sg_entries); 2251 + indirect_sg_entries = cmd_sg_entries; 2462 2252 } 2463 2253 2464 2254 ib_srp_transport_template = 2465 2255 srp_attach_transport(&ib_srp_transport_functions); 2466 2256 if (!ib_srp_transport_template) 2467 2257 return -ENOMEM; 2468 - 2469 - srp_template.sg_tablesize = srp_sg_tablesize; 2470 - srp_max_iu_len = (sizeof (struct srp_cmd) + 2471 - sizeof (struct srp_indirect_buf) + 2472 - srp_sg_tablesize * 16); 2473 2258 2474 2259 ret = class_register(&srp_class); 2475 2260 if (ret) {
+33 -5
drivers/infiniband/ulp/srp/ib_srp.h
··· 69 69 SRP_TAG_NO_REQ = ~0U, 70 70 SRP_TAG_TSK_MGMT = 1U << 31, 71 71 72 - SRP_FMR_SIZE = 256, 72 + SRP_FMR_SIZE = 512, 73 + SRP_FMR_MIN_SIZE = 128, 73 74 SRP_FMR_POOL_SIZE = 1024, 74 - SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4 75 + SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4, 76 + 77 + SRP_MAP_ALLOW_FMR = 0, 78 + SRP_MAP_NO_FMR = 1, 75 79 }; 76 80 77 81 enum srp_target_state { ··· 97 93 struct ib_pd *pd; 98 94 struct ib_mr *mr; 99 95 struct ib_fmr_pool *fmr_pool; 100 - int fmr_page_shift; 101 - int fmr_page_size; 102 96 u64 fmr_page_mask; 97 + int fmr_page_size; 98 + int fmr_max_size; 103 99 }; 104 100 105 101 struct srp_host { ··· 116 112 struct list_head list; 117 113 struct scsi_cmnd *scmnd; 118 114 struct srp_iu *cmd; 119 - struct ib_pool_fmr *fmr; 115 + struct ib_pool_fmr **fmr_list; 116 + u64 *map_page; 117 + struct srp_direct_buf *indirect_desc; 118 + dma_addr_t indirect_dma_addr; 119 + short nfmr; 120 120 short index; 121 121 }; 122 122 ··· 138 130 u32 lkey; 139 131 u32 rkey; 140 132 enum srp_target_state state; 133 + unsigned int max_iu_len; 134 + unsigned int cmd_sg_cnt; 135 + unsigned int indirect_size; 136 + bool allow_ext_sg; 141 137 142 138 /* Everything above this point is used in the hot path of 143 139 * command processing. Try to keep them packed into cachelines. ··· 156 144 struct Scsi_Host *scsi_host; 157 145 char target_name[32]; 158 146 unsigned int scsi_id; 147 + unsigned int sg_tablesize; 159 148 160 149 struct ib_sa_path_rec path; 161 150 __be16 orig_dgid[8]; ··· 190 177 void *buf; 191 178 size_t size; 192 179 enum dma_data_direction direction; 180 + }; 181 + 182 + struct srp_map_state { 183 + struct ib_pool_fmr **next_fmr; 184 + struct srp_direct_buf *desc; 185 + u64 *pages; 186 + dma_addr_t base_dma_addr; 187 + u32 fmr_len; 188 + u32 total_len; 189 + unsigned int npages; 190 + unsigned int nfmr; 191 + unsigned int ndesc; 192 + struct scatterlist *unmapped_sg; 193 + int unmapped_index; 194 + dma_addr_t unmapped_addr; 193 195 }; 194 196 195 197 #endif /* IB_SRP_H */
+3
drivers/net/mlx4/main.c
··· 1109 1109 } 1110 1110 } 1111 1111 1112 + /* Allow large DMA segments, up to the firmware limit of 1 GB */ 1113 + dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 1114 + 1112 1115 priv = kzalloc(sizeof *priv, GFP_KERNEL); 1113 1116 if (!priv) { 1114 1117 dev_err(&pdev->dev, "Device struct alloc failed, "