···11+config INFINIBAND_SRP22+ tristate "InfiniBand SCSI RDMA Protocol"33+ depends on INFINIBAND && SCSI44+ ---help---55+ Support for the SCSI RDMA Protocol over InfiniBand. This66+ allows you to access storage devices that speak SRP over77+ InfiniBand.88+99+ The SRP protocol is defined by the INCITS T10 technical1010+ committee. See <http://www.t10.org/>.1111+
+1700
drivers/infiniband/ulp/srp/ib_srp.c
···11+/*22+ * Copyright (c) 2005 Cisco Systems. All rights reserved.33+ *44+ * This software is available to you under a choice of one of two55+ * licenses. You may choose to be licensed under the terms of the GNU66+ * General Public License (GPL) Version 2, available from the file77+ * COPYING in the main directory of this source tree, or the88+ * OpenIB.org BSD license below:99+ *1010+ * Redistribution and use in source and binary forms, with or1111+ * without modification, are permitted provided that the following1212+ * conditions are met:1313+ *1414+ * - Redistributions of source code must retain the above1515+ * copyright notice, this list of conditions and the following1616+ * disclaimer.1717+ *1818+ * - Redistributions in binary form must reproduce the above1919+ * copyright notice, this list of conditions and the following2020+ * disclaimer in the documentation and/or other materials2121+ * provided with the distribution.2222+ *2323+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2424+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2525+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2626+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2727+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN2828+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN2929+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3030+ * SOFTWARE.3131+ *3232+ * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $3333+ */3434+3535+#include <linux/version.h>3636+#include <linux/module.h>3737+#include <linux/init.h>3838+#include <linux/slab.h>3939+#include <linux/err.h>4040+#include <linux/string.h>4141+#include <linux/parser.h>4242+#include <linux/random.h>4343+4444+#include <asm/atomic.h>4545+4646+#include <scsi/scsi.h>4747+#include <scsi/scsi_device.h>4848+#include <scsi/scsi_dbg.h>4949+#include <scsi/srp.h>5050+5151+#include <rdma/ib_cache.h>5252+5353+#include "ib_srp.h"5454+5555+#define DRV_NAME "ib_srp"5656+#define PFX DRV_NAME ": "5757+#define DRV_VERSION "0.2"5858+#define DRV_RELDATE "November 1, 2005"5959+6060+MODULE_AUTHOR("Roland Dreier");6161+MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "6262+ "v" DRV_VERSION " (" DRV_RELDATE ")");6363+MODULE_LICENSE("Dual BSD/GPL");6464+6565+static int topspin_workarounds = 1;6666+6767+module_param(topspin_workarounds, int, 0444);6868+MODULE_PARM_DESC(topspin_workarounds,6969+ "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");7070+7171+static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };7272+7373+static void srp_add_one(struct ib_device *device);7474+static void srp_remove_one(struct ib_device *device);7575+static void srp_completion(struct ib_cq *cq, void *target_ptr);7676+static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);7777+7878+static struct ib_client srp_client = {7979+ .name = "srp",8080+ .add = srp_add_one,8181+ .remove = srp_remove_one8282+};8383+8484+static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)8585+{8686+ return (struct srp_target_port *) host->hostdata;8787+}8888+8989+static const char *srp_target_info(struct Scsi_Host *host)9090+{9191+ return host_to_target(host)->target_name;9292+}9393+9494+static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,9595+ gfp_t gfp_mask,9696+ enum dma_data_direction direction)9797+{9898+ struct srp_iu *iu;9999+100100+ iu = kmalloc(sizeof *iu, gfp_mask);101101+ if (!iu)102102+ goto out;103103+104104+ iu->buf = kzalloc(size, gfp_mask);105105+ if (!iu->buf)106106+ goto out_free_iu;107107+108108+ iu->dma = dma_map_single(host->dev->dma_device, iu->buf, size, direction);109109+ if (dma_mapping_error(iu->dma))110110+ goto out_free_buf;111111+112112+ iu->size = size;113113+ iu->direction = direction;114114+115115+ return iu;116116+117117+out_free_buf:118118+ kfree(iu->buf);119119+out_free_iu:120120+ kfree(iu);121121+out:122122+ return NULL;123123+}124124+125125+static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)126126+{127127+ if (!iu)128128+ return;129129+130130+ dma_unmap_single(host->dev->dma_device, iu->dma, iu->size, iu->direction);131131+ kfree(iu->buf);132132+ kfree(iu);133133+}134134+135135+static void srp_qp_event(struct ib_event *event, void *context)136136+{137137+ printk(KERN_ERR PFX "QP event %d\n", event->event);138138+}139139+140140+static int srp_init_qp(struct srp_target_port *target,141141+ struct ib_qp *qp)142142+{143143+ struct ib_qp_attr *attr;144144+ int ret;145145+146146+ attr = kmalloc(sizeof *attr, GFP_KERNEL);147147+ if (!attr)148148+ return -ENOMEM;149149+150150+ ret = ib_find_cached_pkey(target->srp_host->dev,151151+ target->srp_host->port,152152+ be16_to_cpu(target->path.pkey),153153+ &attr->pkey_index);154154+ if (ret)155155+ goto out;156156+157157+ attr->qp_state = IB_QPS_INIT;158158+ attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |159159+ IB_ACCESS_REMOTE_WRITE);160160+ attr->port_num = target->srp_host->port;161161+162162+ ret = ib_modify_qp(qp, attr,163163+ IB_QP_STATE |164164+ IB_QP_PKEY_INDEX |165165+ IB_QP_ACCESS_FLAGS |166166+ IB_QP_PORT);167167+168168+out:169169+ kfree(attr);170170+ return ret;171171+}172172+173173+static int srp_create_target_ib(struct srp_target_port *target)174174+{175175+ struct ib_qp_init_attr *init_attr;176176+ int ret;177177+178178+ init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);179179+ if (!init_attr)180180+ return -ENOMEM;181181+182182+ target->cq = ib_create_cq(target->srp_host->dev, srp_completion,183183+ NULL, target, SRP_CQ_SIZE);184184+ if (IS_ERR(target->cq)) {185185+ ret = PTR_ERR(target->cq);186186+ goto out;187187+ }188188+189189+ ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP);190190+191191+ init_attr->event_handler = srp_qp_event;192192+ init_attr->cap.max_send_wr = SRP_SQ_SIZE;193193+ init_attr->cap.max_recv_wr = SRP_RQ_SIZE;194194+ init_attr->cap.max_recv_sge = 1;195195+ init_attr->cap.max_send_sge = 1;196196+ init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;197197+ init_attr->qp_type = IB_QPT_RC;198198+ init_attr->send_cq = target->cq;199199+ init_attr->recv_cq = target->cq;200200+201201+ target->qp = ib_create_qp(target->srp_host->pd, init_attr);202202+ if (IS_ERR(target->qp)) {203203+ ret = PTR_ERR(target->qp);204204+ ib_destroy_cq(target->cq);205205+ goto out;206206+ }207207+208208+ ret = srp_init_qp(target, target->qp);209209+ if (ret) {210210+ ib_destroy_qp(target->qp);211211+ ib_destroy_cq(target->cq);212212+ goto out;213213+ }214214+215215+out:216216+ kfree(init_attr);217217+ return ret;218218+}219219+220220+static void srp_free_target_ib(struct srp_target_port *target)221221+{222222+ int i;223223+224224+ ib_destroy_qp(target->qp);225225+ ib_destroy_cq(target->cq);226226+227227+ for (i = 0; i < SRP_RQ_SIZE; ++i)228228+ srp_free_iu(target->srp_host, target->rx_ring[i]);229229+ for (i = 0; i < SRP_SQ_SIZE + 1; ++i)230230+ srp_free_iu(target->srp_host, target->tx_ring[i]);231231+}232232+233233+static void srp_path_rec_completion(int status,234234+ struct ib_sa_path_rec *pathrec,235235+ void *target_ptr)236236+{237237+ struct srp_target_port *target = target_ptr;238238+239239+ target->status = status;240240+ if (status)241241+ printk(KERN_ERR PFX "Got failed path rec status %d\n", status);242242+ else243243+ target->path = *pathrec;244244+ complete(&target->done);245245+}246246+247247+static int srp_lookup_path(struct srp_target_port *target)248248+{249249+ target->path.numb_path = 1;250250+251251+ init_completion(&target->done);252252+253253+ target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev,254254+ target->srp_host->port,255255+ &target->path,256256+ IB_SA_PATH_REC_DGID |257257+ IB_SA_PATH_REC_SGID |258258+ IB_SA_PATH_REC_NUMB_PATH |259259+ IB_SA_PATH_REC_PKEY,260260+ SRP_PATH_REC_TIMEOUT_MS,261261+ GFP_KERNEL,262262+ srp_path_rec_completion,263263+ target, &target->path_query);264264+ if (target->path_query_id < 0)265265+ return target->path_query_id;266266+267267+ wait_for_completion(&target->done);268268+269269+ if (target->status < 0)270270+ printk(KERN_WARNING PFX "Path record query failed\n");271271+272272+ return target->status;273273+}274274+275275+static int srp_send_req(struct srp_target_port *target)276276+{277277+ struct {278278+ struct ib_cm_req_param param;279279+ struct srp_login_req priv;280280+ } *req = NULL;281281+ int status;282282+283283+ req = kzalloc(sizeof *req, GFP_KERNEL);284284+ if (!req)285285+ return -ENOMEM;286286+287287+ req->param.primary_path = &target->path;288288+ req->param.alternate_path = NULL;289289+ req->param.service_id = target->service_id;290290+ req->param.qp_num = target->qp->qp_num;291291+ req->param.qp_type = target->qp->qp_type;292292+ req->param.private_data = &req->priv;293293+ req->param.private_data_len = sizeof req->priv;294294+ req->param.flow_control = 1;295295+296296+ get_random_bytes(&req->param.starting_psn, 4);297297+ req->param.starting_psn &= 0xffffff;298298+299299+ /*300300+ * Pick some arbitrary defaults here; we could make these301301+ * module parameters if anyone cared about setting them.302302+ */303303+ req->param.responder_resources = 4;304304+ req->param.remote_cm_response_timeout = 20;305305+ req->param.local_cm_response_timeout = 20;306306+ req->param.retry_count = 7;307307+ req->param.rnr_retry_count = 7;308308+ req->param.max_cm_retries = 15;309309+310310+ req->priv.opcode = SRP_LOGIN_REQ;311311+ req->priv.tag = 0;312312+ req->priv.req_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);313313+ req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |314314+ SRP_BUF_FORMAT_INDIRECT);315315+ memcpy(req->priv.initiator_port_id, target->srp_host->initiator_port_id, 16);316316+ /*317317+ * Topspin/Cisco SRP targets will reject our login unless we318318+ * zero out the first 8 bytes of our initiator port ID. The319319+ * second 8 bytes must be our local node GUID, but we always320320+ * use that anyway.321321+ */322322+ if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) {323323+ printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround "324324+ "activated for target GUID %016llx\n",325325+ (unsigned long long) be64_to_cpu(target->ioc_guid));326326+ memset(req->priv.initiator_port_id, 0, 8);327327+ }328328+ memcpy(req->priv.target_port_id, &target->id_ext, 8);329329+ memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);330330+331331+ status = ib_send_cm_req(target->cm_id, &req->param);332332+333333+ kfree(req);334334+335335+ return status;336336+}337337+338338+static void srp_disconnect_target(struct srp_target_port *target)339339+{340340+ /* XXX should send SRP_I_LOGOUT request */341341+342342+ init_completion(&target->done);343343+ ib_send_cm_dreq(target->cm_id, NULL, 0);344344+ wait_for_completion(&target->done);345345+}346346+347347+static void srp_remove_work(void *target_ptr)348348+{349349+ struct srp_target_port *target = target_ptr;350350+351351+ spin_lock_irq(target->scsi_host->host_lock);352352+ if (target->state != SRP_TARGET_DEAD) {353353+ spin_unlock_irq(target->scsi_host->host_lock);354354+ scsi_host_put(target->scsi_host);355355+ return;356356+ }357357+ target->state = SRP_TARGET_REMOVED;358358+ spin_unlock_irq(target->scsi_host->host_lock);359359+360360+ down(&target->srp_host->target_mutex);361361+ list_del(&target->list);362362+ up(&target->srp_host->target_mutex);363363+364364+ scsi_remove_host(target->scsi_host);365365+ ib_destroy_cm_id(target->cm_id);366366+ srp_free_target_ib(target);367367+ scsi_host_put(target->scsi_host);368368+ /* And another put to really free the target port... */369369+ scsi_host_put(target->scsi_host);370370+}371371+372372+static int srp_connect_target(struct srp_target_port *target)373373+{374374+ int ret;375375+376376+ ret = srp_lookup_path(target);377377+ if (ret)378378+ return ret;379379+380380+ while (1) {381381+ init_completion(&target->done);382382+ ret = srp_send_req(target);383383+ if (ret)384384+ return ret;385385+ wait_for_completion(&target->done);386386+387387+ /*388388+ * The CM event handling code will set status to389389+ * SRP_PORT_REDIRECT if we get a port redirect REJ390390+ * back, or SRP_DLID_REDIRECT if we get a lid/qp391391+ * redirect REJ back.392392+ */393393+ switch (target->status) {394394+ case 0:395395+ return 0;396396+397397+ case SRP_PORT_REDIRECT:398398+ ret = srp_lookup_path(target);399399+ if (ret)400400+ return ret;401401+ break;402402+403403+ case SRP_DLID_REDIRECT:404404+ break;405405+406406+ default:407407+ return target->status;408408+ }409409+ }410410+}411411+412412+static int srp_reconnect_target(struct srp_target_port *target)413413+{414414+ struct ib_cm_id *new_cm_id;415415+ struct ib_qp_attr qp_attr;416416+ struct srp_request *req;417417+ struct ib_wc wc;418418+ int ret;419419+ int i;420420+421421+ spin_lock_irq(target->scsi_host->host_lock);422422+ if (target->state != SRP_TARGET_LIVE) {423423+ spin_unlock_irq(target->scsi_host->host_lock);424424+ return -EAGAIN;425425+ }426426+ target->state = SRP_TARGET_CONNECTING;427427+ spin_unlock_irq(target->scsi_host->host_lock);428428+429429+ srp_disconnect_target(target);430430+ /*431431+ * Now get a new local CM ID so that we avoid confusing the432432+ * target in case things are really fouled up.433433+ */434434+ new_cm_id = ib_create_cm_id(target->srp_host->dev,435435+ srp_cm_handler, target);436436+ if (IS_ERR(new_cm_id)) {437437+ ret = PTR_ERR(new_cm_id);438438+ goto err;439439+ }440440+ ib_destroy_cm_id(target->cm_id);441441+ target->cm_id = new_cm_id;442442+443443+ qp_attr.qp_state = IB_QPS_RESET;444444+ ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);445445+ if (ret)446446+ goto err;447447+448448+ ret = srp_init_qp(target, target->qp);449449+ if (ret)450450+ goto err;451451+452452+ while (ib_poll_cq(target->cq, 1, &wc) > 0)453453+ ; /* nothing */454454+455455+ list_for_each_entry(req, &target->req_queue, list) {456456+ req->scmnd->result = DID_RESET << 16;457457+ req->scmnd->scsi_done(req->scmnd);458458+ }459459+460460+ target->rx_head = 0;461461+ target->tx_head = 0;462462+ target->tx_tail = 0;463463+ target->req_head = 0;464464+ for (i = 0; i < SRP_SQ_SIZE - 1; ++i)465465+ target->req_ring[i].next = i + 1;466466+ target->req_ring[SRP_SQ_SIZE - 1].next = -1;467467+ INIT_LIST_HEAD(&target->req_queue);468468+469469+ ret = srp_connect_target(target);470470+ if (ret)471471+ goto err;472472+473473+ spin_lock_irq(target->scsi_host->host_lock);474474+ if (target->state == SRP_TARGET_CONNECTING) {475475+ ret = 0;476476+ target->state = SRP_TARGET_LIVE;477477+ } else478478+ ret = -EAGAIN;479479+ spin_unlock_irq(target->scsi_host->host_lock);480480+481481+ return ret;482482+483483+err:484484+ printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret);485485+486486+ /*487487+ * We couldn't reconnect, so kill our target port off.488488+ * However, we have to defer the real removal because we might489489+ * be in the context of the SCSI error handler now, which490490+ * would deadlock if we call scsi_remove_host().491491+ */492492+ spin_lock_irq(target->scsi_host->host_lock);493493+ if (target->state == SRP_TARGET_CONNECTING) {494494+ target->state = SRP_TARGET_DEAD;495495+ INIT_WORK(&target->work, srp_remove_work, target);496496+ schedule_work(&target->work);497497+ }498498+ spin_unlock_irq(target->scsi_host->host_lock);499499+500500+ return ret;501501+}502502+503503+static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,504504+ struct srp_request *req)505505+{506506+ struct srp_cmd *cmd = req->cmd->buf;507507+ int len;508508+ u8 fmt;509509+510510+ if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)511511+ return sizeof (struct srp_cmd);512512+513513+ if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&514514+ scmnd->sc_data_direction != DMA_TO_DEVICE) {515515+ printk(KERN_WARNING PFX "Unhandled data direction %d\n",516516+ scmnd->sc_data_direction);517517+ return -EINVAL;518518+ }519519+520520+ if (scmnd->use_sg) {521521+ struct scatterlist *scat = scmnd->request_buffer;522522+ int n;523523+ int i;524524+525525+ n = dma_map_sg(target->srp_host->dev->dma_device,526526+ scat, scmnd->use_sg, scmnd->sc_data_direction);527527+528528+ if (n == 1) {529529+ struct srp_direct_buf *buf = (void *) cmd->add_data;530530+531531+ fmt = SRP_DATA_DESC_DIRECT;532532+533533+ buf->va = cpu_to_be64(sg_dma_address(scat));534534+ buf->key = cpu_to_be32(target->srp_host->mr->rkey);535535+ buf->len = cpu_to_be32(sg_dma_len(scat));536536+537537+ len = sizeof (struct srp_cmd) +538538+ sizeof (struct srp_direct_buf);539539+ } else {540540+ struct srp_indirect_buf *buf = (void *) cmd->add_data;541541+ u32 datalen = 0;542542+543543+ fmt = SRP_DATA_DESC_INDIRECT;544544+545545+ if (scmnd->sc_data_direction == DMA_TO_DEVICE)546546+ cmd->data_out_desc_cnt = n;547547+ else548548+ cmd->data_in_desc_cnt = n;549549+550550+ buf->table_desc.va = cpu_to_be64(req->cmd->dma +551551+ sizeof *cmd +552552+ sizeof *buf);553553+ buf->table_desc.key =554554+ cpu_to_be32(target->srp_host->mr->rkey);555555+ buf->table_desc.len =556556+ cpu_to_be32(n * sizeof (struct srp_direct_buf));557557+558558+ for (i = 0; i < n; ++i) {559559+ buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i]));560560+ buf->desc_list[i].key =561561+ cpu_to_be32(target->srp_host->mr->rkey);562562+ buf->desc_list[i].len = cpu_to_be32(sg_dma_len(&scat[i]));563563+564564+ datalen += sg_dma_len(&scat[i]);565565+ }566566+567567+ buf->len = cpu_to_be32(datalen);568568+569569+ len = sizeof (struct srp_cmd) +570570+ sizeof (struct srp_indirect_buf) +571571+ n * sizeof (struct srp_direct_buf);572572+ }573573+ } else {574574+ struct srp_direct_buf *buf = (void *) cmd->add_data;575575+ dma_addr_t dma;576576+577577+ dma = dma_map_single(target->srp_host->dev->dma_device,578578+ scmnd->request_buffer, scmnd->request_bufflen,579579+ scmnd->sc_data_direction);580580+ if (dma_mapping_error(dma)) {581581+ printk(KERN_WARNING PFX "unable to map %p/%d (dir %d)\n",582582+ scmnd->request_buffer, (int) scmnd->request_bufflen,583583+ scmnd->sc_data_direction);584584+ return -EINVAL;585585+ }586586+587587+ pci_unmap_addr_set(req, direct_mapping, dma);588588+589589+ buf->va = cpu_to_be64(dma);590590+ buf->key = cpu_to_be32(target->srp_host->mr->rkey);591591+ buf->len = cpu_to_be32(scmnd->request_bufflen);592592+593593+ fmt = SRP_DATA_DESC_DIRECT;594594+595595+ len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);596596+ }597597+598598+ if (scmnd->sc_data_direction == DMA_TO_DEVICE)599599+ cmd->buf_fmt = fmt << 4;600600+ else601601+ cmd->buf_fmt = fmt;602602+603603+604604+ return len;605605+}606606+607607+static void srp_unmap_data(struct scsi_cmnd *scmnd,608608+ struct srp_target_port *target,609609+ struct srp_request *req)610610+{611611+ if (!scmnd->request_buffer ||612612+ (scmnd->sc_data_direction != DMA_TO_DEVICE &&613613+ scmnd->sc_data_direction != DMA_FROM_DEVICE))614614+ return;615615+616616+ if (scmnd->use_sg)617617+ dma_unmap_sg(target->srp_host->dev->dma_device,618618+ (struct scatterlist *) scmnd->request_buffer,619619+ scmnd->use_sg, scmnd->sc_data_direction);620620+ else621621+ dma_unmap_single(target->srp_host->dev->dma_device,622622+ pci_unmap_addr(req, direct_mapping),623623+ scmnd->request_bufflen,624624+ scmnd->sc_data_direction);625625+}626626+627627+static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)628628+{629629+ struct srp_request *req;630630+ struct scsi_cmnd *scmnd;631631+ unsigned long flags;632632+ s32 delta;633633+634634+ delta = (s32) be32_to_cpu(rsp->req_lim_delta);635635+636636+ spin_lock_irqsave(target->scsi_host->host_lock, flags);637637+638638+ target->req_lim += delta;639639+640640+ req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];641641+642642+ if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {643643+ if (be32_to_cpu(rsp->resp_data_len) < 4)644644+ req->tsk_status = -1;645645+ else646646+ req->tsk_status = rsp->data[3];647647+ complete(&req->done);648648+ } else {649649+ scmnd = req->scmnd;650650+ if (!scmnd)651651+ printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",652652+ (unsigned long long) rsp->tag);653653+ scmnd->result = rsp->status;654654+655655+ if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {656656+ memcpy(scmnd->sense_buffer, rsp->data +657657+ be32_to_cpu(rsp->resp_data_len),658658+ min_t(int, be32_to_cpu(rsp->sense_data_len),659659+ SCSI_SENSE_BUFFERSIZE));660660+ }661661+662662+ if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))663663+ scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt);664664+ else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))665665+ scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt);666666+667667+ srp_unmap_data(scmnd, target, req);668668+669669+ if (!req->tsk_mgmt) {670670+ req->scmnd = NULL;671671+ scmnd->host_scribble = (void *) -1L;672672+ scmnd->scsi_done(scmnd);673673+674674+ list_del(&req->list);675675+ req->next = target->req_head;676676+ target->req_head = rsp->tag & ~SRP_TAG_TSK_MGMT;677677+ } else678678+ req->cmd_done = 1;679679+ }680680+681681+ spin_unlock_irqrestore(target->scsi_host->host_lock, flags);682682+}683683+684684+static void srp_reconnect_work(void *target_ptr)685685+{686686+ struct srp_target_port *target = target_ptr;687687+688688+ srp_reconnect_target(target);689689+}690690+691691+static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)692692+{693693+ struct srp_iu *iu;694694+ u8 opcode;695695+696696+ iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];697697+698698+ dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma,699699+ target->max_ti_iu_len, DMA_FROM_DEVICE);700700+701701+ opcode = *(u8 *) iu->buf;702702+703703+ if (0) {704704+ int i;705705+706706+ printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode);707707+708708+ for (i = 0; i < wc->byte_len; ++i) {709709+ if (i % 8 == 0)710710+ printk(KERN_ERR " [%02x] ", i);711711+ printk(" %02x", ((u8 *) iu->buf)[i]);712712+ if ((i + 1) % 8 == 0)713713+ printk("\n");714714+ }715715+716716+ if (wc->byte_len % 8)717717+ printk("\n");718718+ }719719+720720+ switch (opcode) {721721+ case SRP_RSP:722722+ srp_process_rsp(target, iu->buf);723723+ break;724724+725725+ case SRP_T_LOGOUT:726726+ /* XXX Handle target logout */727727+ printk(KERN_WARNING PFX "Got target logout request\n");728728+ break;729729+730730+ default:731731+ printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode);732732+ break;733733+ }734734+735735+ dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma,736736+ target->max_ti_iu_len, DMA_FROM_DEVICE);737737+}738738+739739+static void srp_completion(struct ib_cq *cq, void *target_ptr)740740+{741741+ struct srp_target_port *target = target_ptr;742742+ struct ib_wc wc;743743+ unsigned long flags;744744+745745+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);746746+ while (ib_poll_cq(cq, 1, &wc) > 0) {747747+ if (wc.status) {748748+ printk(KERN_ERR PFX "failed %s status %d\n",749749+ wc.wr_id & SRP_OP_RECV ? "receive" : "send",750750+ wc.status);751751+ spin_lock_irqsave(target->scsi_host->host_lock, flags);752752+ if (target->state == SRP_TARGET_LIVE)753753+ schedule_work(&target->work);754754+ spin_unlock_irqrestore(target->scsi_host->host_lock, flags);755755+ break;756756+ }757757+758758+ if (wc.wr_id & SRP_OP_RECV)759759+ srp_handle_recv(target, &wc);760760+ else761761+ ++target->tx_tail;762762+ }763763+}764764+765765+static int __srp_post_recv(struct srp_target_port *target)766766+{767767+ struct srp_iu *iu;768768+ struct ib_sge list;769769+ struct ib_recv_wr wr, *bad_wr;770770+ unsigned int next;771771+ int ret;772772+773773+ next = target->rx_head & (SRP_RQ_SIZE - 1);774774+ wr.wr_id = next | SRP_OP_RECV;775775+ iu = target->rx_ring[next];776776+777777+ list.addr = iu->dma;778778+ list.length = iu->size;779779+ list.lkey = target->srp_host->mr->lkey;780780+781781+ wr.next = NULL;782782+ wr.sg_list = &list;783783+ wr.num_sge = 1;784784+785785+ ret = ib_post_recv(target->qp, &wr, &bad_wr);786786+ if (!ret)787787+ ++target->rx_head;788788+789789+ return ret;790790+}791791+792792+static int srp_post_recv(struct srp_target_port *target)793793+{794794+ unsigned long flags;795795+ int ret;796796+797797+ spin_lock_irqsave(target->scsi_host->host_lock, flags);798798+ ret = __srp_post_recv(target);799799+ spin_unlock_irqrestore(target->scsi_host->host_lock, flags);800800+801801+ return ret;802802+}803803+804804+/*805805+ * Must be called with target->scsi_host->host_lock held to protect806806+ * req_lim and tx_head.807807+ */808808+static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)809809+{810810+ if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)811811+ return NULL;812812+813813+ return target->tx_ring[target->tx_head & SRP_SQ_SIZE];814814+}815815+816816+/*817817+ * Must be called with target->scsi_host->host_lock held to protect818818+ * req_lim and tx_head.819819+ */820820+static int __srp_post_send(struct srp_target_port *target,821821+ struct srp_iu *iu, int len)822822+{823823+ struct ib_sge list;824824+ struct ib_send_wr wr, *bad_wr;825825+ int ret = 0;826826+827827+ if (target->req_lim < 1) {828828+ printk(KERN_ERR PFX "Target has req_lim %d\n", target->req_lim);829829+ return -EAGAIN;830830+ }831831+832832+ list.addr = iu->dma;833833+ list.length = len;834834+ list.lkey = target->srp_host->mr->lkey;835835+836836+ wr.next = NULL;837837+ wr.wr_id = target->tx_head & SRP_SQ_SIZE;838838+ wr.sg_list = &list;839839+ wr.num_sge = 1;840840+ wr.opcode = IB_WR_SEND;841841+ wr.send_flags = IB_SEND_SIGNALED;842842+843843+ ret = ib_post_send(target->qp, &wr, &bad_wr);844844+845845+ if (!ret) {846846+ ++target->tx_head;847847+ --target->req_lim;848848+ }849849+850850+ return ret;851851+}852852+853853+static int srp_queuecommand(struct scsi_cmnd *scmnd,854854+ void (*done)(struct scsi_cmnd *))855855+{856856+ struct srp_target_port *target = host_to_target(scmnd->device->host);857857+ struct srp_request *req;858858+ struct srp_iu *iu;859859+ struct srp_cmd *cmd;860860+ long req_index;861861+ int len;862862+863863+ if (target->state == SRP_TARGET_CONNECTING)864864+ goto err;865865+866866+ if (target->state == SRP_TARGET_DEAD ||867867+ target->state == SRP_TARGET_REMOVED) {868868+ scmnd->result = DID_BAD_TARGET << 16;869869+ done(scmnd);870870+ return 0;871871+ }872872+873873+ iu = __srp_get_tx_iu(target);874874+ if (!iu)875875+ goto err;876876+877877+ dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma,878878+ SRP_MAX_IU_LEN, DMA_TO_DEVICE);879879+880880+ req_index = target->req_head;881881+882882+ scmnd->scsi_done = done;883883+ scmnd->result = 0;884884+ scmnd->host_scribble = (void *) req_index;885885+886886+ cmd = iu->buf;887887+ memset(cmd, 0, sizeof *cmd);888888+889889+ cmd->opcode = SRP_CMD;890890+ cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);891891+ cmd->tag = req_index;892892+ memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);893893+894894+ req = &target->req_ring[req_index];895895+896896+ req->scmnd = scmnd;897897+ req->cmd = iu;898898+ req->cmd_done = 0;899899+ req->tsk_mgmt = NULL;900900+901901+ len = srp_map_data(scmnd, target, req);902902+ if (len < 0) {903903+ printk(KERN_ERR PFX "Failed to map data\n");904904+ goto err;905905+ }906906+907907+ if (__srp_post_recv(target)) {908908+ printk(KERN_ERR PFX "Recv failed\n");909909+ goto err_unmap;910910+ }911911+912912+ dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma,913913+ SRP_MAX_IU_LEN, DMA_TO_DEVICE);914914+915915+ if (__srp_post_send(target, iu, len)) {916916+ printk(KERN_ERR PFX "Send failed\n");917917+ goto err_unmap;918918+ }919919+920920+ target->req_head = req->next;921921+ list_add_tail(&req->list, &target->req_queue);922922+923923+ return 0;924924+925925+err_unmap:926926+ srp_unmap_data(scmnd, target, req);927927+928928+err:929929+ return SCSI_MLQUEUE_HOST_BUSY;930930+}931931+932932+static int srp_alloc_iu_bufs(struct srp_target_port *target)933933+{934934+ int i;935935+936936+ for (i = 0; i < SRP_RQ_SIZE; ++i) {937937+ target->rx_ring[i] = srp_alloc_iu(target->srp_host,938938+ target->max_ti_iu_len,939939+ GFP_KERNEL, DMA_FROM_DEVICE);940940+ if (!target->rx_ring[i])941941+ goto err;942942+ }943943+944944+ for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {945945+ target->tx_ring[i] = srp_alloc_iu(target->srp_host,946946+ SRP_MAX_IU_LEN,947947+ GFP_KERNEL, DMA_TO_DEVICE);948948+ if (!target->tx_ring[i])949949+ goto err;950950+ }951951+952952+ return 0;953953+954954+err:955955+ for (i = 0; i < SRP_RQ_SIZE; ++i) {956956+ srp_free_iu(target->srp_host, target->rx_ring[i]);957957+ target->rx_ring[i] = NULL;958958+ }959959+960960+ for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {961961+ srp_free_iu(target->srp_host, target->tx_ring[i]);962962+ target->tx_ring[i] = NULL;963963+ }964964+965965+ return -ENOMEM;966966+}967967+968968+static void srp_cm_rej_handler(struct ib_cm_id *cm_id,969969+ struct ib_cm_event *event,970970+ struct srp_target_port *target)971971+{972972+ struct ib_class_port_info *cpi;973973+ int opcode;974974+975975+ switch (event->param.rej_rcvd.reason) {976976+ case IB_CM_REJ_PORT_CM_REDIRECT:977977+ cpi = event->param.rej_rcvd.ari;978978+ target->path.dlid = cpi->redirect_lid;979979+ target->path.pkey = cpi->redirect_pkey;980980+ cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;981981+ memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);982982+983983+ target->status = target->path.dlid ?984984+ SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;985985+ break;986986+987987+ case IB_CM_REJ_PORT_REDIRECT:988988+ if (topspin_workarounds &&989989+ !memcmp(&target->ioc_guid, topspin_oui, 3)) {990990+ /*991991+ * Topspin/Cisco SRP gateways incorrectly send992992+ * reject reason code 25 when they mean 24993993+ * (port redirect).994994+ */995995+ memcpy(target->path.dgid.raw,996996+ event->param.rej_rcvd.ari, 16);997997+998998+ printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",999999+ (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),10001000+ (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));10011001+10021002+ target->status = SRP_PORT_REDIRECT;10031003+ } else {10041004+ printk(KERN_WARNING " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");10051005+ target->status = -ECONNRESET;10061006+ }10071007+ break;10081008+10091009+ case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:10101010+ printk(KERN_WARNING " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");10111011+ target->status = -ECONNRESET;10121012+ break;10131013+10141014+ case IB_CM_REJ_CONSUMER_DEFINED:10151015+ opcode = *(u8 *) event->private_data;10161016+ if (opcode == SRP_LOGIN_REJ) {10171017+ struct srp_login_rej *rej = event->private_data;10181018+ u32 reason = be32_to_cpu(rej->reason);10191019+10201020+ if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)10211021+ printk(KERN_WARNING PFX10221022+ "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");10231023+ else10241024+ printk(KERN_WARNING PFX10251025+ "SRP LOGIN REJECTED, reason 0x%08x\n", reason);10261026+ } else10271027+ printk(KERN_WARNING " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"10281028+ " opcode 0x%02x\n", opcode);10291029+ target->status = -ECONNRESET;10301030+ break;10311031+10321032+ default:10331033+ printk(KERN_WARNING " REJ reason 0x%x\n",10341034+ event->param.rej_rcvd.reason);10351035+ target->status = -ECONNRESET;10361036+ }10371037+}10381038+10391039+static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)10401040+{10411041+ struct srp_target_port *target = cm_id->context;10421042+ struct ib_qp_attr *qp_attr = NULL;10431043+ int attr_mask = 0;10441044+ int comp = 0;10451045+ int opcode = 0;10461046+10471047+ switch (event->event) {10481048+ case IB_CM_REQ_ERROR:10491049+ printk(KERN_DEBUG PFX "Sending CM REQ failed\n");10501050+ comp = 1;10511051+ target->status = -ECONNRESET;10521052+ break;10531053+10541054+ case IB_CM_REP_RECEIVED:10551055+ comp = 1;10561056+ opcode = *(u8 *) event->private_data;10571057+10581058+ if (opcode == SRP_LOGIN_RSP) {10591059+ struct srp_login_rsp *rsp = event->private_data;10601060+10611061+ target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);10621062+ target->req_lim = be32_to_cpu(rsp->req_lim_delta);10631063+10641064+ target->scsi_host->can_queue = min(target->req_lim,10651065+ target->scsi_host->can_queue);10661066+ } else {10671067+ printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode);10681068+ target->status = -ECONNRESET;10691069+ break;10701070+ }10711071+10721072+ target->status = srp_alloc_iu_bufs(target);10731073+ if (target->status)10741074+ break;10751075+10761076+ qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);10771077+ if (!qp_attr) {10781078+ target->status = -ENOMEM;10791079+ break;10801080+ }10811081+10821082+ qp_attr->qp_state = IB_QPS_RTR;10831083+ target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);10841084+ if (target->status)10851085+ break;10861086+10871087+ target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);10881088+ if (target->status)10891089+ break;10901090+10911091+ target->status = srp_post_recv(target);10921092+ if (target->status)10931093+ break;10941094+10951095+ qp_attr->qp_state = IB_QPS_RTS;10961096+ target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);10971097+ if (target->status)10981098+ break;10991099+11001100+ target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);11011101+ if (target->status)11021102+ break;11031103+11041104+ target->status = ib_send_cm_rtu(cm_id, NULL, 0);11051105+ if (target->status)11061106+ break;11071107+11081108+ break;11091109+11101110+ case IB_CM_REJ_RECEIVED:11111111+ printk(KERN_DEBUG PFX "REJ received\n");11121112+ comp = 1;11131113+11141114+ srp_cm_rej_handler(cm_id, event, target);11151115+ break;11161116+11171117+ case IB_CM_MRA_RECEIVED:11181118+ printk(KERN_ERR PFX "MRA received\n");11191119+ break;11201120+11211121+ case IB_CM_DREP_RECEIVED:11221122+ break;11231123+11241124+ case IB_CM_TIMEWAIT_EXIT:11251125+ printk(KERN_ERR PFX "connection closed\n");11261126+11271127+ comp = 1;11281128+ target->status = 0;11291129+ break;11301130+11311131+ default:11321132+ printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event);11331133+ break;11341134+ }11351135+11361136+ if (comp)11371137+ complete(&target->done);11381138+11391139+ kfree(qp_attr);11401140+11411141+ return 0;11421142+}11431143+11441144+static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)11451145+{11461146+ struct srp_target_port *target = host_to_target(scmnd->device->host);11471147+ struct srp_request *req;11481148+ struct srp_iu *iu;11491149+ struct srp_tsk_mgmt *tsk_mgmt;11501150+ int req_index;11511151+ int ret = FAILED;11521152+11531153+ spin_lock_irq(target->scsi_host->host_lock);11541154+11551155+ if (scmnd->host_scribble == (void *) -1L)11561156+ goto out;11571157+11581158+ req_index = (long) scmnd->host_scribble;11591159+ printk(KERN_ERR "Abort for req_index %d\n", req_index);11601160+11611161+ req = &target->req_ring[req_index];11621162+ init_completion(&req->done);11631163+11641164+ iu = __srp_get_tx_iu(target);11651165+ if (!iu)11661166+ goto out;11671167+11681168+ tsk_mgmt = iu->buf;11691169+ memset(tsk_mgmt, 0, sizeof *tsk_mgmt);11701170+11711171+ tsk_mgmt->opcode = SRP_TSK_MGMT;11721172+ tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48);11731173+ tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT;11741174+ tsk_mgmt->tsk_mgmt_func = func;11751175+ tsk_mgmt->task_tag = req_index;11761176+11771177+ if (__srp_post_send(target, iu, sizeof *tsk_mgmt))11781178+ goto out;11791179+11801180+ req->tsk_mgmt = iu;11811181+11821182+ spin_unlock_irq(target->scsi_host->host_lock);11831183+ if (!wait_for_completion_timeout(&req->done,11841184+ msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))11851185+ return FAILED;11861186+ spin_lock_irq(target->scsi_host->host_lock);11871187+11881188+ if (req->cmd_done) {11891189+ list_del(&req->list);11901190+ req->next = target->req_head;11911191+ target->req_head = req_index;11921192+11931193+ scmnd->scsi_done(scmnd);11941194+ } else if (!req->tsk_status) {11951195+ scmnd->result = DID_ABORT << 16;11961196+ ret = SUCCESS;11971197+ }11981198+11991199+out:12001200+ spin_unlock_irq(target->scsi_host->host_lock);12011201+ return ret;12021202+}12031203+12041204+static int srp_abort(struct scsi_cmnd *scmnd)12051205+{12061206+ printk(KERN_ERR "SRP abort called\n");12071207+12081208+ return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK);12091209+}12101210+12111211+static int srp_reset_device(struct scsi_cmnd *scmnd)12121212+{12131213+ printk(KERN_ERR "SRP reset_device called\n");12141214+12151215+ return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET);12161216+}12171217+12181218+static int srp_reset_host(struct scsi_cmnd *scmnd)12191219+{12201220+ struct srp_target_port *target = host_to_target(scmnd->device->host);12211221+ int ret = FAILED;12221222+12231223+ printk(KERN_ERR PFX "SRP reset_host called\n");12241224+12251225+ if (!srp_reconnect_target(target))12261226+ ret = SUCCESS;12271227+12281228+ return ret;12291229+}12301230+12311231+static struct scsi_host_template srp_template = {12321232+ .module = THIS_MODULE,12331233+ .name = DRV_NAME,12341234+ .info = srp_target_info,12351235+ .queuecommand = srp_queuecommand,12361236+ .eh_abort_handler = srp_abort,12371237+ .eh_device_reset_handler = srp_reset_device,12381238+ .eh_host_reset_handler = srp_reset_host,12391239+ .can_queue = SRP_SQ_SIZE,12401240+ .this_id = -1,12411241+ .sg_tablesize = SRP_MAX_INDIRECT,12421242+ .cmd_per_lun = SRP_SQ_SIZE,12431243+ .use_clustering = ENABLE_CLUSTERING12441244+};12451245+12461246+static int srp_add_target(struct srp_host *host, struct srp_target_port *target)12471247+{12481248+ sprintf(target->target_name, "SRP.T10:%016llX",12491249+ (unsigned long long) be64_to_cpu(target->id_ext));12501250+12511251+ if (scsi_add_host(target->scsi_host, host->dev->dma_device))12521252+ return -ENODEV;12531253+12541254+ down(&host->target_mutex);12551255+ list_add_tail(&target->list, &host->target_list);12561256+ up(&host->target_mutex);12571257+12581258+ target->state = SRP_TARGET_LIVE;12591259+12601260+ /* XXX: are we supposed to have a definition of SCAN_WILD_CARD ?? */12611261+ scsi_scan_target(&target->scsi_host->shost_gendev,12621262+ 0, target->scsi_id, ~0, 0);12631263+12641264+ return 0;12651265+}12661266+12671267+static void srp_release_class_dev(struct class_device *class_dev)12681268+{12691269+ struct srp_host *host =12701270+ container_of(class_dev, struct srp_host, class_dev);12711271+12721272+ complete(&host->released);12731273+}12741274+12751275+static struct class srp_class = {12761276+ .name = "infiniband_srp",12771277+ .release = srp_release_class_dev12781278+};12791279+12801280+/*12811281+ * Target ports are added by writing12821282+ *12831283+ * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,12841284+ * pkey=<P_Key>,service_id=<service ID>12851285+ *12861286+ * to the add_target sysfs attribute.12871287+ */12881288+enum {12891289+ SRP_OPT_ERR = 0,12901290+ SRP_OPT_ID_EXT = 1 << 0,12911291+ SRP_OPT_IOC_GUID = 1 << 1,12921292+ SRP_OPT_DGID = 1 << 2,12931293+ SRP_OPT_PKEY = 1 << 3,12941294+ SRP_OPT_SERVICE_ID = 1 << 4,12951295+ SRP_OPT_MAX_SECT = 1 << 5,12961296+ SRP_OPT_ALL = (SRP_OPT_ID_EXT |12971297+ SRP_OPT_IOC_GUID |12981298+ SRP_OPT_DGID |12991299+ SRP_OPT_PKEY |13001300+ SRP_OPT_SERVICE_ID),13011301+};13021302+13031303+static match_table_t srp_opt_tokens = {13041304+ { SRP_OPT_ID_EXT, "id_ext=%s" },13051305+ { SRP_OPT_IOC_GUID, "ioc_guid=%s" },13061306+ { SRP_OPT_DGID, "dgid=%s" },13071307+ { SRP_OPT_PKEY, "pkey=%x" },13081308+ { SRP_OPT_SERVICE_ID, "service_id=%s" },13091309+ { SRP_OPT_MAX_SECT, "max_sect=%d" },13101310+ { SRP_OPT_ERR, NULL }13111311+};13121312+13131313+static int srp_parse_options(const char *buf, struct srp_target_port *target)13141314+{13151315+ char *options, *sep_opt;13161316+ char *p;13171317+ char dgid[3];13181318+ substring_t args[MAX_OPT_ARGS];13191319+ int opt_mask = 0;13201320+ int token;13211321+ int ret = -EINVAL;13221322+ int i;13231323+13241324+ options = kstrdup(buf, GFP_KERNEL);13251325+ if (!options)13261326+ return -ENOMEM;13271327+13281328+ sep_opt = options;13291329+ while ((p = strsep(&sep_opt, ",")) != NULL) {13301330+ if (!*p)13311331+ continue;13321332+13331333+ token = match_token(p, srp_opt_tokens, args);13341334+ opt_mask |= token;13351335+13361336+ switch (token) {13371337+ case SRP_OPT_ID_EXT:13381338+ p = match_strdup(args);13391339+ target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));13401340+ kfree(p);13411341+ break;13421342+13431343+ case SRP_OPT_IOC_GUID:13441344+ p = match_strdup(args);13451345+ target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));13461346+ kfree(p);13471347+ break;13481348+13491349+ case SRP_OPT_DGID:13501350+ p = match_strdup(args);13511351+ if (strlen(p) != 32) {13521352+ printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);13531353+ goto out;13541354+ }13551355+13561356+ for (i = 0; i < 16; ++i) {13571357+ strlcpy(dgid, p + i * 2, 3);13581358+ target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);13591359+ }13601360+ break;13611361+13621362+ case SRP_OPT_PKEY:13631363+ if (match_hex(args, &token)) {13641364+ printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);13651365+ goto out;13661366+ }13671367+ target->path.pkey = cpu_to_be16(token);13681368+ break;13691369+13701370+ case SRP_OPT_SERVICE_ID:13711371+ p = match_strdup(args);13721372+ target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));13731373+ kfree(p);13741374+ break;13751375+13761376+ case SRP_OPT_MAX_SECT:13771377+ if (match_int(args, &token)) {13781378+ printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);13791379+ goto out;13801380+ }13811381+ target->scsi_host->max_sectors = token;13821382+ break;13831383+13841384+ default:13851385+ printk(KERN_WARNING PFX "unknown parameter or missing value "13861386+ "'%s' in target creation request\n", p);13871387+ goto out;13881388+ }13891389+ }13901390+13911391+ if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)13921392+ ret = 0;13931393+ else13941394+ for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)13951395+ if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&13961396+ !(srp_opt_tokens[i].token & opt_mask))13971397+ printk(KERN_WARNING PFX "target creation request is "13981398+ "missing parameter '%s'\n",13991399+ srp_opt_tokens[i].pattern);14001400+14011401+out:14021402+ kfree(options);14031403+ return ret;14041404+}14051405+14061406+static ssize_t srp_create_target(struct class_device *class_dev,14071407+ const char *buf, size_t count)14081408+{14091409+ struct srp_host *host =14101410+ container_of(class_dev, struct srp_host, class_dev);14111411+ struct Scsi_Host *target_host;14121412+ struct srp_target_port *target;14131413+ int ret;14141414+ int i;14151415+14161416+ target_host = scsi_host_alloc(&srp_template,14171417+ sizeof (struct srp_target_port));14181418+ if (!target_host)14191419+ return -ENOMEM;14201420+14211421+ target = host_to_target(target_host);14221422+ memset(target, 0, sizeof *target);14231423+14241424+ target->scsi_host = target_host;14251425+ target->srp_host = host;14261426+14271427+ INIT_WORK(&target->work, srp_reconnect_work, target);14281428+14291429+ for (i = 0; i < SRP_SQ_SIZE - 1; ++i)14301430+ target->req_ring[i].next = i + 1;14311431+ target->req_ring[SRP_SQ_SIZE - 1].next = -1;14321432+ INIT_LIST_HEAD(&target->req_queue);14331433+14341434+ ret = srp_parse_options(buf, target);14351435+ if (ret)14361436+ goto err;14371437+14381438+ ib_get_cached_gid(host->dev, host->port, 0, &target->path.sgid);14391439+14401440+ printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x "14411441+ "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",14421442+ (unsigned long long) be64_to_cpu(target->id_ext),14431443+ (unsigned long long) be64_to_cpu(target->ioc_guid),14441444+ be16_to_cpu(target->path.pkey),14451445+ (unsigned long long) be64_to_cpu(target->service_id),14461446+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]),14471447+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]),14481448+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]),14491449+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]),14501450+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]),14511451+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]),14521452+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]),14531453+ (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14]));14541454+14551455+ ret = srp_create_target_ib(target);14561456+ if (ret)14571457+ goto err;14581458+14591459+ target->cm_id = ib_create_cm_id(host->dev, srp_cm_handler, target);14601460+ if (IS_ERR(target->cm_id)) {14611461+ ret = PTR_ERR(target->cm_id);14621462+ goto err_free;14631463+ }14641464+14651465+ ret = srp_connect_target(target);14661466+ if (ret) {14671467+ printk(KERN_ERR PFX "Connection failed\n");14681468+ goto err_cm_id;14691469+ }14701470+14711471+ ret = srp_add_target(host, target);14721472+ if (ret)14731473+ goto err_disconnect;14741474+14751475+ return count;14761476+14771477+err_disconnect:14781478+ srp_disconnect_target(target);14791479+14801480+err_cm_id:14811481+ ib_destroy_cm_id(target->cm_id);14821482+14831483+err_free:14841484+ srp_free_target_ib(target);14851485+14861486+err:14871487+ scsi_host_put(target_host);14881488+14891489+ return ret;14901490+}14911491+14921492+static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);14931493+14941494+static ssize_t show_ibdev(struct class_device *class_dev, char *buf)14951495+{14961496+ struct srp_host *host =14971497+ container_of(class_dev, struct srp_host, class_dev);14981498+14991499+ return sprintf(buf, "%s\n", host->dev->name);15001500+}15011501+15021502+static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);15031503+15041504+static ssize_t show_port(struct class_device *class_dev, char *buf)15051505+{15061506+ struct srp_host *host =15071507+ container_of(class_dev, struct srp_host, class_dev);15081508+15091509+ return sprintf(buf, "%d\n", host->port);15101510+}15111511+15121512+static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);15131513+15141514+static struct srp_host *srp_add_port(struct ib_device *device,15151515+ __be64 node_guid, u8 port)15161516+{15171517+ struct srp_host *host;15181518+15191519+ host = kzalloc(sizeof *host, GFP_KERNEL);15201520+ if (!host)15211521+ return NULL;15221522+15231523+ INIT_LIST_HEAD(&host->target_list);15241524+ init_MUTEX(&host->target_mutex);15251525+ init_completion(&host->released);15261526+ host->dev = device;15271527+ host->port = port;15281528+15291529+ host->initiator_port_id[7] = port;15301530+ memcpy(host->initiator_port_id + 8, &node_guid, 8);15311531+15321532+ host->pd = ib_alloc_pd(device);15331533+ if (IS_ERR(host->pd))15341534+ goto err_free;15351535+15361536+ host->mr = ib_get_dma_mr(host->pd,15371537+ IB_ACCESS_LOCAL_WRITE |15381538+ IB_ACCESS_REMOTE_READ |15391539+ IB_ACCESS_REMOTE_WRITE);15401540+ if (IS_ERR(host->mr))15411541+ goto err_pd;15421542+15431543+ host->class_dev.class = &srp_class;15441544+ host->class_dev.dev = device->dma_device;15451545+ snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d",15461546+ device->name, port);15471547+15481548+ if (class_device_register(&host->class_dev))15491549+ goto err_mr;15501550+ if (class_device_create_file(&host->class_dev, &class_device_attr_add_target))15511551+ goto err_class;15521552+ if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev))15531553+ goto err_class;15541554+ if (class_device_create_file(&host->class_dev, &class_device_attr_port))15551555+ goto err_class;15561556+15571557+ return host;15581558+15591559+err_class:15601560+ class_device_unregister(&host->class_dev);15611561+15621562+err_mr:15631563+ ib_dereg_mr(host->mr);15641564+15651565+err_pd:15661566+ ib_dealloc_pd(host->pd);15671567+15681568+err_free:15691569+ kfree(host);15701570+15711571+ return NULL;15721572+}15731573+15741574+static void srp_add_one(struct ib_device *device)15751575+{15761576+ struct list_head *dev_list;15771577+ struct srp_host *host;15781578+ struct ib_device_attr *dev_attr;15791579+ int s, e, p;15801580+15811581+ dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);15821582+ if (!dev_attr)15831583+ return;15841584+15851585+ if (ib_query_device(device, dev_attr)) {15861586+ printk(KERN_WARNING PFX "Couldn't query node GUID for %s.\n",15871587+ device->name);15881588+ goto out;15891589+ }15901590+15911591+ dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);15921592+ if (!dev_list)15931593+ goto out;15941594+15951595+ INIT_LIST_HEAD(dev_list);15961596+15971597+ if (device->node_type == IB_NODE_SWITCH) {15981598+ s = 0;15991599+ e = 0;16001600+ } else {16011601+ s = 1;16021602+ e = device->phys_port_cnt;16031603+ }16041604+16051605+ for (p = s; p <= e; ++p) {16061606+ host = srp_add_port(device, dev_attr->node_guid, p);16071607+ if (host)16081608+ list_add_tail(&host->list, dev_list);16091609+ }16101610+16111611+ ib_set_client_data(device, &srp_client, dev_list);16121612+16131613+out:16141614+ kfree(dev_attr);16151615+}16161616+16171617+static void srp_remove_one(struct ib_device *device)16181618+{16191619+ struct list_head *dev_list;16201620+ struct srp_host *host, *tmp_host;16211621+ LIST_HEAD(target_list);16221622+ struct srp_target_port *target, *tmp_target;16231623+ unsigned long flags;16241624+16251625+ dev_list = ib_get_client_data(device, &srp_client);16261626+16271627+ list_for_each_entry_safe(host, tmp_host, dev_list, list) {16281628+ class_device_unregister(&host->class_dev);16291629+ /*16301630+ * Wait for the sysfs entry to go away, so that no new16311631+ * target ports can be created.16321632+ */16331633+ wait_for_completion(&host->released);16341634+16351635+ /*16361636+ * Mark all target ports as removed, so we stop queueing16371637+ * commands and don't try to reconnect.16381638+ */16391639+ down(&host->target_mutex);16401640+ list_for_each_entry_safe(target, tmp_target,16411641+ &host->target_list, list) {16421642+ spin_lock_irqsave(target->scsi_host->host_lock, flags);16431643+ if (target->state != SRP_TARGET_REMOVED)16441644+ target->state = SRP_TARGET_REMOVED;16451645+ spin_unlock_irqrestore(target->scsi_host->host_lock, flags);16461646+ }16471647+ up(&host->target_mutex);16481648+16491649+ /*16501650+ * Wait for any reconnection tasks that may have16511651+ * started before we marked our target ports as16521652+ * removed, and any target port removal tasks.16531653+ */16541654+ flush_scheduled_work();16551655+16561656+ list_for_each_entry_safe(target, tmp_target,16571657+ &host->target_list, list) {16581658+ scsi_remove_host(target->scsi_host);16591659+ srp_disconnect_target(target);16601660+ ib_destroy_cm_id(target->cm_id);16611661+ srp_free_target_ib(target);16621662+ scsi_host_put(target->scsi_host);16631663+ }16641664+16651665+ ib_dereg_mr(host->mr);16661666+ ib_dealloc_pd(host->pd);16671667+ kfree(host);16681668+ }16691669+16701670+ kfree(dev_list);16711671+}16721672+16731673+static int __init srp_init_module(void)16741674+{16751675+ int ret;16761676+16771677+ ret = class_register(&srp_class);16781678+ if (ret) {16791679+ printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");16801680+ return ret;16811681+ }16821682+16831683+ ret = ib_register_client(&srp_client);16841684+ if (ret) {16851685+ printk(KERN_ERR PFX "couldn't register IB client\n");16861686+ class_unregister(&srp_class);16871687+ return ret;16881688+ }16891689+16901690+ return 0;16911691+}16921692+16931693+static void __exit srp_cleanup_module(void)16941694+{16951695+ ib_unregister_client(&srp_client);16961696+ class_unregister(&srp_class);16971697+}16981698+16991699+module_init(srp_init_module);17001700+module_exit(srp_cleanup_module);
+150
drivers/infiniband/ulp/srp/ib_srp.h
···11+/*22+ * Copyright (c) 2005 Cisco Systems. All rights reserved.33+ *44+ * This software is available to you under a choice of one of two55+ * licenses. You may choose to be licensed under the terms of the GNU66+ * General Public License (GPL) Version 2, available from the file77+ * COPYING in the main directory of this source tree, or the88+ * OpenIB.org BSD license below:99+ *1010+ * Redistribution and use in source and binary forms, with or1111+ * without modification, are permitted provided that the following1212+ * conditions are met:1313+ *1414+ * - Redistributions of source code must retain the above1515+ * copyright notice, this list of conditions and the following1616+ * disclaimer.1717+ *1818+ * - Redistributions in binary form must reproduce the above1919+ * copyright notice, this list of conditions and the following2020+ * disclaimer in the documentation and/or other materials2121+ * provided with the distribution.2222+ *2323+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2424+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2525+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2626+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2727+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN2828+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN2929+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3030+ * SOFTWARE.3131+ *3232+ * $Id: ib_srp.h 3932 2005-11-01 17:19:29Z roland $3333+ */3434+3535+#ifndef IB_SRP_H3636+#define IB_SRP_H3737+3838+#include <linux/types.h>3939+#include <linux/list.h>4040+4141+#include <asm/semaphore.h>4242+4343+#include <scsi/scsi_host.h>4444+#include <scsi/scsi_cmnd.h>4545+4646+#include <rdma/ib_verbs.h>4747+#include <rdma/ib_sa.h>4848+#include <rdma/ib_cm.h>4949+5050+enum {5151+ SRP_PATH_REC_TIMEOUT_MS = 1000,5252+ SRP_ABORT_TIMEOUT_MS = 5000,5353+5454+ SRP_PORT_REDIRECT = 1,5555+ SRP_DLID_REDIRECT = 2,5656+5757+ SRP_MAX_IU_LEN = 256,5858+5959+ SRP_RQ_SHIFT = 6,6060+ SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,6161+ SRP_SQ_SIZE = SRP_RQ_SIZE - 1,6262+ SRP_CQ_SIZE = SRP_SQ_SIZE + SRP_RQ_SIZE,6363+6464+ SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1)6565+};6666+6767+#define SRP_OP_RECV (1 << 31)6868+#define SRP_MAX_INDIRECT ((SRP_MAX_IU_LEN - \6969+ sizeof (struct srp_cmd) - \7070+ sizeof (struct srp_indirect_buf)) / 16)7171+7272+enum srp_target_state {7373+ SRP_TARGET_LIVE,7474+ SRP_TARGET_CONNECTING,7575+ SRP_TARGET_DEAD,7676+ SRP_TARGET_REMOVED7777+};7878+7979+struct srp_host {8080+ u8 initiator_port_id[16];8181+ struct ib_device *dev;8282+ u8 port;8383+ struct ib_pd *pd;8484+ struct ib_mr *mr;8585+ struct class_device class_dev;8686+ struct list_head target_list;8787+ struct semaphore target_mutex;8888+ struct completion released;8989+ struct list_head list;9090+};9191+9292+struct srp_request {9393+ struct list_head list;9494+ struct scsi_cmnd *scmnd;9595+ struct srp_iu *cmd;9696+ struct srp_iu *tsk_mgmt;9797+ DECLARE_PCI_UNMAP_ADDR(direct_mapping)9898+ struct completion done;9999+ short next;100100+ u8 cmd_done;101101+ u8 tsk_status;102102+};103103+104104+struct srp_target_port {105105+ __be64 id_ext;106106+ __be64 ioc_guid;107107+ __be64 service_id;108108+ struct srp_host *srp_host;109109+ struct Scsi_Host *scsi_host;110110+ char target_name[32];111111+ unsigned int scsi_id;112112+113113+ struct ib_sa_path_rec path;114114+ struct ib_sa_query *path_query;115115+ int path_query_id;116116+117117+ struct ib_cm_id *cm_id;118118+ struct ib_cq *cq;119119+ struct ib_qp *qp;120120+121121+ int max_ti_iu_len;122122+ s32 req_lim;123123+124124+ unsigned rx_head;125125+ struct srp_iu *rx_ring[SRP_RQ_SIZE];126126+127127+ unsigned tx_head;128128+ unsigned tx_tail;129129+ struct srp_iu *tx_ring[SRP_SQ_SIZE + 1];130130+131131+ int req_head;132132+ struct list_head req_queue;133133+ struct srp_request req_ring[SRP_SQ_SIZE];134134+135135+ struct work_struct work;136136+137137+ struct list_head list;138138+ struct completion done;139139+ int status;140140+ enum srp_target_state state;141141+};142142+143143+struct srp_iu {144144+ dma_addr_t dma;145145+ void *buf;146146+ size_t size;147147+ enum dma_data_direction direction;148148+};149149+150150+#endif /* IB_SRP_H */
+226
include/scsi/srp.h
···11+/*22+ * Copyright (c) 2005 Cisco Systems. All rights reserved.33+ *44+ * This software is available to you under a choice of one of two55+ * licenses. You may choose to be licensed under the terms of the GNU66+ * General Public License (GPL) Version 2, available from the file77+ * COPYING in the main directory of this source tree, or the88+ * OpenIB.org BSD license below:99+ *1010+ * Redistribution and use in source and binary forms, with or1111+ * without modification, are permitted provided that the following1212+ * conditions are met:1313+ *1414+ * - Redistributions of source code must retain the above1515+ * copyright notice, this list of conditions and the following1616+ * disclaimer.1717+ *1818+ * - Redistributions in binary form must reproduce the above1919+ * copyright notice, this list of conditions and the following2020+ * disclaimer in the documentation and/or other materials2121+ * provided with the distribution.2222+ *2323+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2424+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2525+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2626+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2727+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN2828+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN2929+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3030+ * SOFTWARE.3131+ *3232+ * $Id$3333+ */3434+3535+#ifndef SCSI_SRP_H3636+#define SCSI_SRP_H3737+3838+/*3939+ * Structures and constants for the SCSI RDMA Protocol (SRP) as4040+ * defined by the INCITS T10 committee. This file was written using4141+ * draft Revision 16a of the SRP standard.4242+ */4343+4444+#include <linux/types.h>4545+4646+enum {4747+ SRP_LOGIN_REQ = 0x00,4848+ SRP_TSK_MGMT = 0x01,4949+ SRP_CMD = 0x02,5050+ SRP_I_LOGOUT = 0x03,5151+ SRP_LOGIN_RSP = 0xc0,5252+ SRP_RSP = 0xc1,5353+ SRP_LOGIN_REJ = 0xc2,5454+ SRP_T_LOGOUT = 0x80,5555+ SRP_CRED_REQ = 0x81,5656+ SRP_AER_REQ = 0x82,5757+ SRP_CRED_RSP = 0x41,5858+ SRP_AER_RSP = 0x425959+};6060+6161+enum {6262+ SRP_BUF_FORMAT_DIRECT = 1 << 1,6363+ SRP_BUF_FORMAT_INDIRECT = 1 << 26464+};6565+6666+enum {6767+ SRP_NO_DATA_DESC = 0,6868+ SRP_DATA_DESC_DIRECT = 1,6969+ SRP_DATA_DESC_INDIRECT = 27070+};7171+7272+enum {7373+ SRP_TSK_ABORT_TASK = 0x01,7474+ SRP_TSK_ABORT_TASK_SET = 0x02,7575+ SRP_TSK_CLEAR_TASK_SET = 0x04,7676+ SRP_TSK_LUN_RESET = 0x08,7777+ SRP_TSK_CLEAR_ACA = 0x407878+};7979+8080+enum srp_login_rej_reason {8181+ SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL = 0x00010000,8282+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES = 0x00010001,8383+ SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE = 0x00010002,8484+ SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL = 0x00010003,8585+ SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT = 0x00010004,8686+ SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED = 0x00010005,8787+ SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED = 0x000100068888+};8989+9090+struct srp_direct_buf {9191+ __be64 va;9292+ __be32 key;9393+ __be32 len;9494+};9595+9696+/*9797+ * We need the packed attribute because the SRP spec puts the list of9898+ * descriptors at an offset of 20, which is not aligned to the size9999+ * of struct srp_direct_buf.100100+ */101101+struct srp_indirect_buf {102102+ struct srp_direct_buf table_desc;103103+ __be32 len;104104+ struct srp_direct_buf desc_list[0] __attribute__((packed));105105+};106106+107107+enum {108108+ SRP_MULTICHAN_SINGLE = 0,109109+ SRP_MULTICHAN_MULTI = 1110110+};111111+112112+struct srp_login_req {113113+ u8 opcode;114114+ u8 reserved1[7];115115+ u64 tag;116116+ __be32 req_it_iu_len;117117+ u8 reserved2[4];118118+ __be16 req_buf_fmt;119119+ u8 req_flags;120120+ u8 reserved3[5];121121+ u8 initiator_port_id[16];122122+ u8 target_port_id[16];123123+};124124+125125+struct srp_login_rsp {126126+ u8 opcode;127127+ u8 reserved1[3];128128+ __be32 req_lim_delta;129129+ u64 tag;130130+ __be32 max_it_iu_len;131131+ __be32 max_ti_iu_len;132132+ __be16 buf_fmt;133133+ u8 rsp_flags;134134+ u8 reserved2[25];135135+};136136+137137+struct srp_login_rej {138138+ u8 opcode;139139+ u8 reserved1[3];140140+ __be32 reason;141141+ u64 tag;142142+ u8 reserved2[8];143143+ __be16 buf_fmt;144144+ u8 reserved3[6];145145+};146146+147147+struct srp_i_logout {148148+ u8 opcode;149149+ u8 reserved[7];150150+ u64 tag;151151+};152152+153153+struct srp_t_logout {154154+ u8 opcode;155155+ u8 sol_not;156156+ u8 reserved[2];157157+ __be32 reason;158158+ u64 tag;159159+};160160+161161+/*162162+ * We need the packed attribute because the SRP spec only aligns the163163+ * 8-byte LUN field to 4 bytes.164164+ */165165+struct srp_tsk_mgmt {166166+ u8 opcode;167167+ u8 sol_not;168168+ u8 reserved1[6];169169+ u64 tag;170170+ u8 reserved2[4];171171+ __be64 lun __attribute__((packed));172172+ u8 reserved3[2];173173+ u8 tsk_mgmt_func;174174+ u8 reserved4;175175+ u64 task_tag;176176+ u8 reserved5[8];177177+};178178+179179+/*180180+ * We need the packed attribute because the SRP spec only aligns the181181+ * 8-byte LUN field to 4 bytes.182182+ */183183+struct srp_cmd {184184+ u8 opcode;185185+ u8 sol_not;186186+ u8 reserved1[3];187187+ u8 buf_fmt;188188+ u8 data_out_desc_cnt;189189+ u8 data_in_desc_cnt;190190+ u64 tag;191191+ u8 reserved2[4];192192+ __be64 lun __attribute__((packed));193193+ u8 reserved3;194194+ u8 task_attr;195195+ u8 reserved4;196196+ u8 add_cdb_len;197197+ u8 cdb[16];198198+ u8 add_data[0];199199+};200200+201201+enum {202202+ SRP_RSP_FLAG_RSPVALID = 1 << 0,203203+ SRP_RSP_FLAG_SNSVALID = 1 << 1,204204+ SRP_RSP_FLAG_DOOVER = 1 << 2,205205+ SRP_RSP_FLAG_DOUNDER = 1 << 3,206206+ SRP_RSP_FLAG_DIOVER = 1 << 4,207207+ SRP_RSP_FLAG_DIUNDER = 1 << 5208208+};209209+210210+struct srp_rsp {211211+ u8 opcode;212212+ u8 sol_not;213213+ u8 reserved1[2];214214+ __be32 req_lim_delta;215215+ u64 tag;216216+ u8 reserved2[2];217217+ u8 flags;218218+ u8 status;219219+ __be32 data_out_res_cnt;220220+ __be32 data_in_res_cnt;221221+ __be32 sense_data_len;222222+ __be32 resp_data_len;223223+ u8 data[0];224224+};225225+226226+#endif /* SCSI_SRP_H */