Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/srp: Preparation for transmit ring response allocation

The transmit ring in ib_srp (srp_target.tx_ring) is currently only used
for allocating requests sent by the initiator to the target. This patch
prepares using that ring for allocation of both requests and responses.
Also, this patch differentiates the uses of SRP_SQ_SIZE, increases the
size of the IB send completion queue by one element and reserves one
transmit ring slot for SRP_TSK_MGMT requests.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: David Dillow <dillowda@ornl.gov>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by

Bart Van Assche and committed by
Roland Dreier
dd5e6e38 fb50a83d

+25 -15
+15 -12
drivers/infiniband/ulp/srp/ib_srp.c
··· 291 291 292 292 for (i = 0; i < SRP_RQ_SIZE; ++i) 293 293 srp_free_iu(target->srp_host, target->rx_ring[i]); 294 - for (i = 0; i < SRP_SQ_SIZE + 1; ++i) 294 + for (i = 0; i < SRP_SQ_SIZE; ++i) 295 295 srp_free_iu(target->srp_host, target->tx_ring[i]); 296 296 } 297 297 ··· 822 822 823 823 spin_lock_irqsave(target->scsi_host->host_lock, flags); 824 824 825 - next = target->rx_head & (SRP_RQ_SIZE - 1); 825 + next = target->rx_head & SRP_RQ_MASK; 826 826 wr.wr_id = next; 827 827 iu = target->rx_ring[next]; 828 828 ··· 989 989 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, 990 990 enum srp_request_type req_type) 991 991 { 992 - s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2; 992 + s32 rsv = (req_type == SRP_REQ_TASK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; 993 993 994 994 srp_send_completion(target->send_cq, target); 995 995 996 996 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 997 997 return NULL; 998 998 999 - if (target->req_lim < min) { 999 + if (target->req_lim <= rsv) { 1000 1000 ++target->zero_req_lim; 1001 1001 return NULL; 1002 1002 } 1003 1003 1004 - return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; 1004 + return target->tx_ring[target->tx_head & SRP_SQ_MASK]; 1005 1005 } 1006 1006 1007 1007 /* ··· 1020 1020 list.lkey = target->srp_host->srp_dev->mr->lkey; 1021 1021 1022 1022 wr.next = NULL; 1023 - wr.wr_id = target->tx_head & SRP_SQ_SIZE; 1023 + wr.wr_id = target->tx_head & SRP_SQ_MASK; 1024 1024 wr.sg_list = &list; 1025 1025 wr.num_sge = 1; 1026 1026 wr.opcode = IB_WR_SEND; ··· 1121 1121 goto err; 1122 1122 } 1123 1123 1124 - for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 1124 + for (i = 0; i < SRP_SQ_SIZE; ++i) { 1125 1125 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 1126 1126 srp_max_iu_len, 1127 1127 GFP_KERNEL, DMA_TO_DEVICE); ··· 1137 1137 target->rx_ring[i] = NULL; 1138 1138 } 1139 1139 1140 - for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 1140 + for (i = 0; i < SRP_SQ_SIZE; ++i) { 1141 1141 srp_free_iu(target->srp_host, target->tx_ring[i]); 1142 1142 target->tx_ring[i] = NULL; 1143 1143 } ··· 1626 1626 .eh_abort_handler = srp_abort, 1627 1627 .eh_device_reset_handler = srp_reset_device, 1628 1628 .eh_host_reset_handler = srp_reset_host, 1629 - .can_queue = SRP_SQ_SIZE, 1629 + .can_queue = SRP_CMD_SQ_SIZE, 1630 1630 .this_id = -1, 1631 - .cmd_per_lun = SRP_SQ_SIZE, 1631 + .cmd_per_lun = SRP_CMD_SQ_SIZE, 1632 1632 .use_clustering = ENABLE_CLUSTERING, 1633 1633 .shost_attrs = srp_host_attrs 1634 1634 }; ··· 1813 1813 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p); 1814 1814 goto out; 1815 1815 } 1816 - target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE); 1816 + target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE); 1817 1817 break; 1818 1818 1819 1819 case SRP_OPT_IO_CLASS: ··· 1891 1891 1892 1892 INIT_LIST_HEAD(&target->free_reqs); 1893 1893 INIT_LIST_HEAD(&target->req_queue); 1894 - for (i = 0; i < SRP_SQ_SIZE; ++i) { 1894 + for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 1895 1895 target->req_ring[i].index = i; 1896 1896 list_add_tail(&target->req_ring[i].list, &target->free_reqs); 1897 1897 } ··· 2158 2158 static int __init srp_init_module(void) 2159 2159 { 2160 2160 int ret; 2161 + 2162 + BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE); 2163 + BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE); 2161 2164 2162 2165 if (srp_sg_tablesize > 255) { 2163 2166 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
+10 -3
drivers/infiniband/ulp/srp/ib_srp.h
··· 59 59 60 60 SRP_RQ_SHIFT = 6, 61 61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, 62 - SRP_SQ_SIZE = SRP_RQ_SIZE - 1, 62 + SRP_RQ_MASK = SRP_RQ_SIZE - 1, 63 + 64 + SRP_SQ_SIZE = SRP_RQ_SIZE, 65 + SRP_SQ_MASK = SRP_SQ_SIZE - 1, 66 + SRP_RSP_SQ_SIZE = 1, 67 + SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE, 68 + SRP_TSK_MGMT_SQ_SIZE = 1, 69 + SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, 63 70 64 71 SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), 65 72 ··· 151 144 152 145 unsigned tx_head; 153 146 unsigned tx_tail; 154 - struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; 147 + struct srp_iu *tx_ring[SRP_SQ_SIZE]; 155 148 156 149 struct list_head free_reqs; 157 150 struct list_head req_queue; 158 - struct srp_request req_ring[SRP_SQ_SIZE]; 151 + struct srp_request req_ring[SRP_CMD_SQ_SIZE]; 159 152 160 153 struct work_struct work; 161 154