···18121812 This is the virtual HBA driver for virtio. If the kernel will18131813 be used in a virtual machine, say Y or M.1814181418151815+source "drivers/scsi/csiostor/Kconfig"1815181618161817endif # SCSI_LOWLEVEL18171818
···11+config SCSI_CHELSIO_FCOE22+ tristate "Chelsio Communications FCoE support"33+ depends on PCI && SCSI44+ select SCSI_FC_ATTRS55+ select FW_LOADER66+ help77+ This driver supports FCoE Offload functionality over88+ Chelsio T4-based 10Gb Converged Network Adapters.99+1010+ For general information about Chelsio and our products, visit1111+ our website at <http://www.chelsio.com>.1212+1313+ For customer support, please visit our customer support page at1414+ <http://www.chelsio.com/support.html>.1515+1616+ Please send feedback to <linux-bugs@chelsio.com>.1717+1818+ To compile this driver as a module choose M here; the module1919+ will be called csiostor.
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#include <linux/kernel.h>3636+#include <linux/string.h>3737+#include <linux/delay.h>3838+#include <linux/module.h>3939+#include <linux/init.h>4040+#include <linux/pci.h>4141+#include <linux/mm.h>4242+#include <linux/jiffies.h>4343+#include <scsi/fc/fc_fs.h>4444+4545+#include "csio_init.h"4646+4747+static void4848+csio_vport_set_state(struct csio_lnode *ln);4949+5050+/*5151+ * csio_reg_rnode - Register a remote port with FC transport.5252+ * @rn: Rnode representing remote port.5353+ *5454+ * Call fc_remote_port_add() to register this remote port with FC transport.5555+ * If remote port is Initiator OR Target OR both, change the role appropriately.5656+ *5757+ */5858+void5959+csio_reg_rnode(struct csio_rnode *rn)6060+{6161+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);6262+ struct Scsi_Host *shost = csio_ln_to_shost(ln);6363+ struct fc_rport_identifiers ids;6464+ struct fc_rport *rport;6565+ struct csio_service_parms *sp;6666+6767+ ids.node_name = wwn_to_u64(csio_rn_wwnn(rn));6868+ ids.port_name = wwn_to_u64(csio_rn_wwpn(rn));6969+ ids.port_id = rn->nport_id;7070+ ids.roles = FC_RPORT_ROLE_UNKNOWN;7171+7272+ if (rn->role & CSIO_RNFR_INITIATOR || rn->role & CSIO_RNFR_TARGET) {7373+ rport = rn->rport;7474+ CSIO_ASSERT(rport != NULL);7575+ goto update_role;7676+ }7777+7878+ rn->rport = fc_remote_port_add(shost, 0, &ids);7979+ if (!rn->rport) {8080+ csio_ln_err(ln, "Failed to register rport = 0x%x.\n",8181+ rn->nport_id);8282+ return;8383+ }8484+8585+ ln->num_reg_rnodes++;8686+ rport = rn->rport;8787+ spin_lock_irq(shost->host_lock);8888+ *((struct csio_rnode **)rport->dd_data) = rn;8989+ spin_unlock_irq(shost->host_lock);9090+9191+ sp = &rn->rn_sparm;9292+ rport->maxframe_size = sp->csp.sp_bb_data;9393+ if (ntohs(sp->clsp[2].cp_class) & FC_CPC_VALID)9494+ rport->supported_classes = FC_COS_CLASS3;9595+ else9696+ rport->supported_classes = FC_COS_UNSPECIFIED;9797+update_role:9898+ if (rn->role & CSIO_RNFR_INITIATOR)9999+ ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;100100+ if (rn->role & CSIO_RNFR_TARGET)101101+ ids.roles |= FC_RPORT_ROLE_FCP_TARGET;102102+103103+ if (ids.roles != FC_RPORT_ROLE_UNKNOWN)104104+ fc_remote_port_rolechg(rport, ids.roles);105105+106106+ rn->scsi_id = rport->scsi_target_id;107107+108108+ csio_ln_dbg(ln, "Remote port x%x role 0x%x registered\n",109109+ rn->nport_id, ids.roles);110110+}111111+112112+/*113113+ * csio_unreg_rnode - Unregister a remote port with FC transport.114114+ * @rn: Rnode representing remote port.115115+ *116116+ * Call fc_remote_port_delete() to unregister this remote port with FC117117+ * transport.118118+ *119119+ */120120+void121121+csio_unreg_rnode(struct csio_rnode *rn)122122+{123123+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);124124+ struct fc_rport *rport = rn->rport;125125+126126+ rn->role &= ~(CSIO_RNFR_INITIATOR | CSIO_RNFR_TARGET);127127+ fc_remote_port_delete(rport);128128+ ln->num_reg_rnodes--;129129+130130+ csio_ln_dbg(ln, "Remote port x%x un-registered\n", rn->nport_id);131131+}132132+133133+/*134134+ * csio_lnode_async_event - Async events from local port.135135+ * @ln: lnode representing local port.136136+ *137137+ * Async events from local node that FC transport/SCSI ML138138+ * should be made aware of (Eg: RSCN).139139+ */140140+void141141+csio_lnode_async_event(struct csio_lnode *ln, enum csio_ln_fc_evt fc_evt)142142+{143143+ switch (fc_evt) {144144+ case CSIO_LN_FC_RSCN:145145+ /* Get payload of rscn from ln */146146+ /* For each RSCN entry */147147+ /*148148+ * fc_host_post_event(shost,149149+ * fc_get_event_number(),150150+ * FCH_EVT_RSCN,151151+ * rscn_entry);152152+ */153153+ break;154154+ case CSIO_LN_FC_LINKUP:155155+ /* send fc_host_post_event */156156+ /* set vport state */157157+ if (csio_is_npiv_ln(ln))158158+ csio_vport_set_state(ln);159159+160160+ break;161161+ case CSIO_LN_FC_LINKDOWN:162162+ /* send fc_host_post_event */163163+ /* set vport state */164164+ if (csio_is_npiv_ln(ln))165165+ csio_vport_set_state(ln);166166+167167+ break;168168+ case CSIO_LN_FC_ATTRIB_UPDATE:169169+ csio_fchost_attr_init(ln);170170+ break;171171+ default:172172+ break;173173+ }174174+}175175+176176+/*177177+ * csio_fchost_attr_init - Initialize FC transport attributes178178+ * @ln: Lnode.179179+ *180180+ */181181+void182182+csio_fchost_attr_init(struct csio_lnode *ln)183183+{184184+ struct Scsi_Host *shost = csio_ln_to_shost(ln);185185+186186+ fc_host_node_name(shost) = wwn_to_u64(csio_ln_wwnn(ln));187187+ fc_host_port_name(shost) = wwn_to_u64(csio_ln_wwpn(ln));188188+189189+ fc_host_supported_classes(shost) = FC_COS_CLASS3;190190+ fc_host_max_npiv_vports(shost) =191191+ (csio_lnode_to_hw(ln))->fres_info.max_vnps;192192+ fc_host_supported_speeds(shost) = FC_PORTSPEED_10GBIT |193193+ FC_PORTSPEED_1GBIT;194194+195195+ fc_host_maxframe_size(shost) = ln->ln_sparm.csp.sp_bb_data;196196+ memset(fc_host_supported_fc4s(shost), 0,197197+ sizeof(fc_host_supported_fc4s(shost)));198198+ fc_host_supported_fc4s(shost)[7] = 1;199199+200200+ memset(fc_host_active_fc4s(shost), 0,201201+ sizeof(fc_host_active_fc4s(shost)));202202+ fc_host_active_fc4s(shost)[7] = 1;203203+}204204+205205+/*206206+ * csio_get_host_port_id - sysfs entries for nport_id is207207+ * populated/cached from this function208208+ */209209+static void210210+csio_get_host_port_id(struct Scsi_Host *shost)211211+{212212+ struct csio_lnode *ln = shost_priv(shost);213213+ struct csio_hw *hw = csio_lnode_to_hw(ln);214214+215215+ spin_lock_irq(&hw->lock);216216+ fc_host_port_id(shost) = ln->nport_id;217217+ spin_unlock_irq(&hw->lock);218218+}219219+220220+/*221221+ * csio_get_port_type - Return FC local port type.222222+ * @shost: scsi host.223223+ *224224+ */225225+static void226226+csio_get_host_port_type(struct Scsi_Host *shost)227227+{228228+ struct csio_lnode *ln = shost_priv(shost);229229+ struct csio_hw *hw = csio_lnode_to_hw(ln);230230+231231+ spin_lock_irq(&hw->lock);232232+ if (csio_is_npiv_ln(ln))233233+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;234234+ else235235+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;236236+ spin_unlock_irq(&hw->lock);237237+}238238+239239+/*240240+ * csio_get_port_state - Return FC local port state.241241+ * @shost: scsi host.242242+ *243243+ */244244+static void245245+csio_get_host_port_state(struct Scsi_Host *shost)246246+{247247+ struct csio_lnode *ln = shost_priv(shost);248248+ struct csio_hw *hw = csio_lnode_to_hw(ln);249249+ char state[16];250250+251251+ spin_lock_irq(&hw->lock);252252+253253+ csio_lnode_state_to_str(ln, state);254254+ if (!strcmp(state, "READY"))255255+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;256256+ else if (!strcmp(state, "OFFLINE"))257257+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;258258+ else259259+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;260260+261261+ spin_unlock_irq(&hw->lock);262262+}263263+264264+/*265265+ * csio_get_host_speed - Return link speed to FC transport.266266+ * @shost: scsi host.267267+ *268268+ */269269+static void270270+csio_get_host_speed(struct Scsi_Host *shost)271271+{272272+ struct csio_lnode *ln = shost_priv(shost);273273+ struct csio_hw *hw = csio_lnode_to_hw(ln);274274+275275+ spin_lock_irq(&hw->lock);276276+ switch (hw->pport[ln->portid].link_speed) {277277+ case FW_PORT_CAP_SPEED_1G:278278+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;279279+ break;280280+ case FW_PORT_CAP_SPEED_10G:281281+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;282282+ break;283283+ default:284284+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;285285+ break;286286+ }287287+ spin_unlock_irq(&hw->lock);288288+}289289+290290+/*291291+ * csio_get_host_fabric_name - Return fabric name292292+ * @shost: scsi host.293293+ *294294+ */295295+static void296296+csio_get_host_fabric_name(struct Scsi_Host *shost)297297+{298298+ struct csio_lnode *ln = shost_priv(shost);299299+ struct csio_rnode *rn = NULL;300300+ struct csio_hw *hw = csio_lnode_to_hw(ln);301301+302302+ spin_lock_irq(&hw->lock);303303+ rn = csio_rnode_lookup_portid(ln, FC_FID_FLOGI);304304+ if (rn)305305+ fc_host_fabric_name(shost) = wwn_to_u64(csio_rn_wwnn(rn));306306+ else307307+ fc_host_fabric_name(shost) = 0;308308+ spin_unlock_irq(&hw->lock);309309+}310310+311311+/*312312+ * csio_get_host_speed - Return FC transport statistics.313313+ * @ln: Lnode.314314+ *315315+ */316316+static struct fc_host_statistics *317317+csio_get_stats(struct Scsi_Host *shost)318318+{319319+ struct csio_lnode *ln = shost_priv(shost);320320+ struct csio_hw *hw = csio_lnode_to_hw(ln);321321+ struct fc_host_statistics *fhs = &ln->fch_stats;322322+ struct fw_fcoe_port_stats fcoe_port_stats;323323+ uint64_t seconds;324324+325325+ memset(&fcoe_port_stats, 0, sizeof(struct fw_fcoe_port_stats));326326+ csio_get_phy_port_stats(hw, ln->portid, &fcoe_port_stats);327327+328328+ fhs->tx_frames += (fcoe_port_stats.tx_bcast_frames +329329+ fcoe_port_stats.tx_mcast_frames +330330+ fcoe_port_stats.tx_ucast_frames +331331+ fcoe_port_stats.tx_offload_frames);332332+ fhs->tx_words += (fcoe_port_stats.tx_bcast_bytes +333333+ fcoe_port_stats.tx_mcast_bytes +334334+ fcoe_port_stats.tx_ucast_bytes +335335+ fcoe_port_stats.tx_offload_bytes) /336336+ CSIO_WORD_TO_BYTE;337337+ fhs->rx_frames += (fcoe_port_stats.rx_bcast_frames +338338+ fcoe_port_stats.rx_mcast_frames +339339+ fcoe_port_stats.rx_ucast_frames);340340+ fhs->rx_words += (fcoe_port_stats.rx_bcast_bytes +341341+ fcoe_port_stats.rx_mcast_bytes +342342+ fcoe_port_stats.rx_ucast_bytes) /343343+ CSIO_WORD_TO_BYTE;344344+ fhs->error_frames += fcoe_port_stats.rx_err_frames;345345+ fhs->fcp_input_requests += ln->stats.n_input_requests;346346+ fhs->fcp_output_requests += ln->stats.n_output_requests;347347+ fhs->fcp_control_requests += ln->stats.n_control_requests;348348+ fhs->fcp_input_megabytes += ln->stats.n_input_bytes >> 20;349349+ fhs->fcp_output_megabytes += ln->stats.n_output_bytes >> 20;350350+ fhs->link_failure_count = ln->stats.n_link_down;351351+ /* Reset stats for the device */352352+ seconds = jiffies_to_msecs(jiffies) - hw->stats.n_reset_start;353353+ do_div(seconds, 1000);354354+ fhs->seconds_since_last_reset = seconds;355355+356356+ return fhs;357357+}358358+359359+/*360360+ * csio_set_rport_loss_tmo - Set the rport dev loss timeout361361+ * @rport: fc rport.362362+ * @timeout: new value for dev loss tmo.363363+ *364364+ * If timeout is non zero set the dev_loss_tmo to timeout, else set365365+ * dev_loss_tmo to one.366366+ */367367+static void368368+csio_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)369369+{370370+ if (timeout)371371+ rport->dev_loss_tmo = timeout;372372+ else373373+ rport->dev_loss_tmo = 1;374374+}375375+376376+static void377377+csio_vport_set_state(struct csio_lnode *ln)378378+{379379+ struct fc_vport *fc_vport = ln->fc_vport;380380+ struct csio_lnode *pln = ln->pln;381381+ char state[16];382382+383383+ /* Set fc vport state based on phyiscal lnode */384384+ csio_lnode_state_to_str(pln, state);385385+ if (strcmp(state, "READY")) {386386+ fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);387387+ return;388388+ }389389+390390+ if (!(pln->flags & CSIO_LNF_NPIVSUPP)) {391391+ fc_vport_set_state(fc_vport, FC_VPORT_NO_FABRIC_SUPP);392392+ return;393393+ }394394+395395+ /* Set fc vport state based on virtual lnode */396396+ csio_lnode_state_to_str(ln, state);397397+ if (strcmp(state, "READY")) {398398+ fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);399399+ return;400400+ }401401+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);402402+}403403+404404+static int405405+csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln)406406+{407407+ struct csio_lnode *pln;408408+ struct csio_mb *mbp;409409+ struct fw_fcoe_vnp_cmd *rsp;410410+ int ret = 0;411411+ int retry = 0;412412+413413+ /* Issue VNP cmd to alloc vport */414414+ /* Allocate Mbox request */415415+ spin_lock_irq(&hw->lock);416416+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);417417+ if (!mbp) {418418+ CSIO_INC_STATS(hw, n_err_nomem);419419+ ret = -ENOMEM;420420+ goto out;421421+ }422422+423423+ pln = ln->pln;424424+ ln->fcf_flowid = pln->fcf_flowid;425425+ ln->portid = pln->portid;426426+427427+ csio_fcoe_vnp_alloc_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,428428+ pln->fcf_flowid, pln->vnp_flowid, 0,429429+ csio_ln_wwnn(ln), csio_ln_wwpn(ln), NULL);430430+431431+ for (retry = 0; retry < 3; retry++) {432432+ /* FW is expected to complete vnp cmd in immediate mode433433+ * without much delay.434434+ * Otherwise, there will be increase in IO latency since HW435435+ * lock is held till completion of vnp mbox cmd.436436+ */437437+ ret = csio_mb_issue(hw, mbp);438438+ if (ret != -EBUSY)439439+ break;440440+441441+ /* Retry if mbox returns busy */442442+ spin_unlock_irq(&hw->lock);443443+ msleep(2000);444444+ spin_lock_irq(&hw->lock);445445+ }446446+447447+ if (ret) {448448+ csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");449449+ goto out_free;450450+ }451451+452452+ /* Process Mbox response of VNP command */453453+ rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);454454+ if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {455455+ csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n",456456+ FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)));457457+ ret = -EINVAL;458458+ goto out_free;459459+ }460460+461461+ ln->vnp_flowid = FW_FCOE_VNP_CMD_VNPI_GET(462462+ ntohl(rsp->gen_wwn_to_vnpi));463463+ memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);464464+ memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);465465+466466+ csio_ln_dbg(ln, "FCOE VNPI: 0x%x\n", ln->vnp_flowid);467467+ csio_ln_dbg(ln, "\tWWNN: %x%x%x%x%x%x%x%x\n",468468+ ln->ln_sparm.wwnn[0], ln->ln_sparm.wwnn[1],469469+ ln->ln_sparm.wwnn[2], ln->ln_sparm.wwnn[3],470470+ ln->ln_sparm.wwnn[4], ln->ln_sparm.wwnn[5],471471+ ln->ln_sparm.wwnn[6], ln->ln_sparm.wwnn[7]);472472+ csio_ln_dbg(ln, "\tWWPN: %x%x%x%x%x%x%x%x\n",473473+ ln->ln_sparm.wwpn[0], ln->ln_sparm.wwpn[1],474474+ ln->ln_sparm.wwpn[2], ln->ln_sparm.wwpn[3],475475+ ln->ln_sparm.wwpn[4], ln->ln_sparm.wwpn[5],476476+ ln->ln_sparm.wwpn[6], ln->ln_sparm.wwpn[7]);477477+478478+out_free:479479+ mempool_free(mbp, hw->mb_mempool);480480+out:481481+ spin_unlock_irq(&hw->lock);482482+ return ret;483483+}484484+485485+static int486486+csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln)487487+{488488+ struct csio_lnode *pln;489489+ struct csio_mb *mbp;490490+ struct fw_fcoe_vnp_cmd *rsp;491491+ int ret = 0;492492+ int retry = 0;493493+494494+ /* Issue VNP cmd to free vport */495495+ /* Allocate Mbox request */496496+497497+ spin_lock_irq(&hw->lock);498498+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);499499+ if (!mbp) {500500+ CSIO_INC_STATS(hw, n_err_nomem);501501+ ret = -ENOMEM;502502+ goto out;503503+ }504504+505505+ pln = ln->pln;506506+507507+ csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,508508+ ln->fcf_flowid, ln->vnp_flowid,509509+ NULL);510510+511511+ for (retry = 0; retry < 3; retry++) {512512+ ret = csio_mb_issue(hw, mbp);513513+ if (ret != -EBUSY)514514+ break;515515+516516+ /* Retry if mbox returns busy */517517+ spin_unlock_irq(&hw->lock);518518+ msleep(2000);519519+ spin_lock_irq(&hw->lock);520520+ }521521+522522+ if (ret) {523523+ csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");524524+ goto out_free;525525+ }526526+527527+ /* Process Mbox response of VNP command */528528+ rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);529529+ if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {530530+ csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n",531531+ FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)));532532+ ret = -EINVAL;533533+ }534534+535535+out_free:536536+ mempool_free(mbp, hw->mb_mempool);537537+out:538538+ spin_unlock_irq(&hw->lock);539539+ return ret;540540+}541541+542542+static int543543+csio_vport_create(struct fc_vport *fc_vport, bool disable)544544+{545545+ struct Scsi_Host *shost = fc_vport->shost;546546+ struct csio_lnode *pln = shost_priv(shost);547547+ struct csio_lnode *ln = NULL;548548+ struct csio_hw *hw = csio_lnode_to_hw(pln);549549+ uint8_t wwn[8];550550+ int ret = -1;551551+552552+ ln = csio_shost_init(hw, &fc_vport->dev, false, pln);553553+ if (!ln)554554+ goto error;555555+556556+ if (fc_vport->node_name != 0) {557557+ u64_to_wwn(fc_vport->node_name, wwn);558558+559559+ if (!CSIO_VALID_WWN(wwn)) {560560+ csio_ln_err(ln,561561+ "vport create failed. Invalid wwnn\n");562562+ goto error;563563+ }564564+ memcpy(csio_ln_wwnn(ln), wwn, 8);565565+ }566566+567567+ if (fc_vport->port_name != 0) {568568+ u64_to_wwn(fc_vport->port_name, wwn);569569+570570+ if (!CSIO_VALID_WWN(wwn)) {571571+ csio_ln_err(ln,572572+ "vport create failed. Invalid wwpn\n");573573+ goto error;574574+ }575575+576576+ if (csio_lnode_lookup_by_wwpn(hw, wwn)) {577577+ csio_ln_err(ln,578578+ "vport create failed. wwpn already exists\n");579579+ goto error;580580+ }581581+ memcpy(csio_ln_wwpn(ln), wwn, 8);582582+ }583583+584584+ fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);585585+586586+ if (csio_fcoe_alloc_vnp(hw, ln))587587+ goto error;588588+589589+ *(struct csio_lnode **)fc_vport->dd_data = ln;590590+ ln->fc_vport = fc_vport;591591+ if (!fc_vport->node_name)592592+ fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));593593+ if (!fc_vport->port_name)594594+ fc_vport->port_name = wwn_to_u64(csio_ln_wwpn(ln));595595+ csio_fchost_attr_init(ln);596596+ return 0;597597+error:598598+ if (ln)599599+ csio_shost_exit(ln);600600+601601+ return ret;602602+}603603+604604+static int605605+csio_vport_delete(struct fc_vport *fc_vport)606606+{607607+ struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;608608+ struct Scsi_Host *shost = csio_ln_to_shost(ln);609609+ struct csio_hw *hw = csio_lnode_to_hw(ln);610610+ int rmv;611611+612612+ spin_lock_irq(&hw->lock);613613+ rmv = csio_is_hw_removing(hw);614614+ spin_unlock_irq(&hw->lock);615615+616616+ if (rmv) {617617+ csio_shost_exit(ln);618618+ return 0;619619+ }620620+621621+ /* Quiesce ios and send remove event to lnode */622622+ scsi_block_requests(shost);623623+ spin_lock_irq(&hw->lock);624624+ csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);625625+ csio_lnode_close(ln);626626+ spin_unlock_irq(&hw->lock);627627+ scsi_unblock_requests(shost);628628+629629+ /* Free vnp */630630+ if (fc_vport->vport_state != FC_VPORT_DISABLED)631631+ csio_fcoe_free_vnp(hw, ln);632632+633633+ csio_shost_exit(ln);634634+ return 0;635635+}636636+637637+static int638638+csio_vport_disable(struct fc_vport *fc_vport, bool disable)639639+{640640+ struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;641641+ struct Scsi_Host *shost = csio_ln_to_shost(ln);642642+ struct csio_hw *hw = csio_lnode_to_hw(ln);643643+644644+ /* disable vport */645645+ if (disable) {646646+ /* Quiesce ios and send stop event to lnode */647647+ scsi_block_requests(shost);648648+ spin_lock_irq(&hw->lock);649649+ csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);650650+ csio_lnode_stop(ln);651651+ spin_unlock_irq(&hw->lock);652652+ scsi_unblock_requests(shost);653653+654654+ /* Free vnp */655655+ csio_fcoe_free_vnp(hw, ln);656656+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);657657+ csio_ln_err(ln, "vport disabled\n");658658+ return 0;659659+ } else {660660+ /* enable vport */661661+ fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);662662+ if (csio_fcoe_alloc_vnp(hw, ln)) {663663+ csio_ln_err(ln, "vport enabled failed.\n");664664+ return -1;665665+ }666666+ csio_ln_err(ln, "vport enabled\n");667667+ return 0;668668+ }669669+}670670+671671+static void672672+csio_dev_loss_tmo_callbk(struct fc_rport *rport)673673+{674674+ struct csio_rnode *rn;675675+ struct csio_hw *hw;676676+ struct csio_lnode *ln;677677+678678+ rn = *((struct csio_rnode **)rport->dd_data);679679+ ln = csio_rnode_to_lnode(rn);680680+ hw = csio_lnode_to_hw(ln);681681+682682+ spin_lock_irq(&hw->lock);683683+684684+ /* return if driver is being removed or same rnode comes back online */685685+ if (csio_is_hw_removing(hw) || csio_is_rnode_ready(rn))686686+ goto out;687687+688688+ csio_ln_dbg(ln, "devloss timeout on rnode:%p portid:x%x flowid:x%x\n",689689+ rn, rn->nport_id, csio_rn_flowid(rn));690690+691691+ CSIO_INC_STATS(ln, n_dev_loss_tmo);692692+693693+ /*694694+ * enqueue devloss event to event worker thread to serialize all695695+ * rnode events.696696+ */697697+ if (csio_enqueue_evt(hw, CSIO_EVT_DEV_LOSS, &rn, sizeof(rn))) {698698+ CSIO_INC_STATS(hw, n_evt_drop);699699+ goto out;700700+ }701701+702702+ if (!(hw->flags & CSIO_HWF_FWEVT_PENDING)) {703703+ hw->flags |= CSIO_HWF_FWEVT_PENDING;704704+ spin_unlock_irq(&hw->lock);705705+ schedule_work(&hw->evtq_work);706706+ return;707707+ }708708+709709+out:710710+ spin_unlock_irq(&hw->lock);711711+}712712+713713+/* FC transport functions template - Physical port */714714+struct fc_function_template csio_fc_transport_funcs = {715715+ .show_host_node_name = 1,716716+ .show_host_port_name = 1,717717+ .show_host_supported_classes = 1,718718+ .show_host_supported_fc4s = 1,719719+ .show_host_maxframe_size = 1,720720+721721+ .get_host_port_id = csio_get_host_port_id,722722+ .show_host_port_id = 1,723723+724724+ .get_host_port_type = csio_get_host_port_type,725725+ .show_host_port_type = 1,726726+727727+ .get_host_port_state = csio_get_host_port_state,728728+ .show_host_port_state = 1,729729+730730+ .show_host_active_fc4s = 1,731731+ .get_host_speed = csio_get_host_speed,732732+ .show_host_speed = 1,733733+ .get_host_fabric_name = csio_get_host_fabric_name,734734+ .show_host_fabric_name = 1,735735+736736+ .get_fc_host_stats = csio_get_stats,737737+738738+ .dd_fcrport_size = sizeof(struct csio_rnode *),739739+ .show_rport_maxframe_size = 1,740740+ .show_rport_supported_classes = 1,741741+742742+ .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,743743+ .show_rport_dev_loss_tmo = 1,744744+745745+ .show_starget_port_id = 1,746746+ .show_starget_node_name = 1,747747+ .show_starget_port_name = 1,748748+749749+ .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,750750+ .dd_fcvport_size = sizeof(struct csio_lnode *),751751+752752+ .vport_create = csio_vport_create,753753+ .vport_disable = csio_vport_disable,754754+ .vport_delete = csio_vport_delete,755755+};756756+757757+/* FC transport functions template - Virtual port */758758+struct fc_function_template csio_fc_transport_vport_funcs = {759759+ .show_host_node_name = 1,760760+ .show_host_port_name = 1,761761+ .show_host_supported_classes = 1,762762+ .show_host_supported_fc4s = 1,763763+ .show_host_maxframe_size = 1,764764+765765+ .get_host_port_id = csio_get_host_port_id,766766+ .show_host_port_id = 1,767767+768768+ .get_host_port_type = csio_get_host_port_type,769769+ .show_host_port_type = 1,770770+771771+ .get_host_port_state = csio_get_host_port_state,772772+ .show_host_port_state = 1,773773+ .show_host_active_fc4s = 1,774774+775775+ .get_host_speed = csio_get_host_speed,776776+ .show_host_speed = 1,777777+778778+ .get_host_fabric_name = csio_get_host_fabric_name,779779+ .show_host_fabric_name = 1,780780+781781+ .get_fc_host_stats = csio_get_stats,782782+783783+ .dd_fcrport_size = sizeof(struct csio_rnode *),784784+ .show_rport_maxframe_size = 1,785785+ .show_rport_supported_classes = 1,786786+787787+ .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,788788+ .show_rport_dev_loss_tmo = 1,789789+790790+ .show_starget_port_id = 1,791791+ .show_starget_node_name = 1,792792+ .show_starget_port_name = 1,793793+794794+ .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,795795+796796+};
+121
drivers/scsi/csiostor/csio_defs.h
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#ifndef __CSIO_DEFS_H__3636+#define __CSIO_DEFS_H__3737+3838+#include <linux/kernel.h>3939+#include <linux/stddef.h>4040+#include <linux/timer.h>4141+#include <linux/list.h>4242+#include <linux/bug.h>4343+#include <linux/pci.h>4444+#include <linux/jiffies.h>4545+4646+#define CSIO_INVALID_IDX 0xFFFFFFFF4747+#define CSIO_INC_STATS(elem, val) ((elem)->stats.val++)4848+#define CSIO_DEC_STATS(elem, val) ((elem)->stats.val--)4949+#define CSIO_VALID_WWN(__n) ((*__n >> 4) == 0x5 ? true : false)5050+#define CSIO_DID_MASK 0xFFFFFF5151+#define CSIO_WORD_TO_BYTE 45252+5353+#ifndef readq5454+static inline u64 readq(void __iomem *addr)5555+{5656+ return readl(addr) + ((u64)readl(addr + 4) << 32);5757+}5858+5959+static inline void writeq(u64 val, void __iomem *addr)6060+{6161+ writel(val, addr);6262+ writel(val >> 32, addr + 4);6363+}6464+#endif6565+6666+static inline int6767+csio_list_deleted(struct list_head *list)6868+{6969+ return ((list->next == list) && (list->prev == list));7070+}7171+7272+#define csio_list_next(elem) (((struct list_head *)(elem))->next)7373+#define csio_list_prev(elem) (((struct list_head *)(elem))->prev)7474+7575+/* State machine */7676+typedef void (*csio_sm_state_t)(void *, uint32_t);7777+7878+struct csio_sm {7979+ struct list_head sm_list;8080+ csio_sm_state_t sm_state;8181+};8282+8383+static inline void8484+csio_set_state(void *smp, void *state)8585+{8686+ ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;8787+}8888+8989+static inline void9090+csio_init_state(struct csio_sm *smp, void *state)9191+{9292+ csio_set_state(smp, state);9393+}9494+9595+static inline void9696+csio_post_event(void *smp, uint32_t evt)9797+{9898+ ((struct csio_sm *)smp)->sm_state(smp, evt);9999+}100100+101101+static inline csio_sm_state_t102102+csio_get_state(void *smp)103103+{104104+ return ((struct csio_sm *)smp)->sm_state;105105+}106106+107107+static inline bool108108+csio_match_state(void *smp, void *state)109109+{110110+ return (csio_get_state(smp) == (csio_sm_state_t)state);111111+}112112+113113+#define CSIO_ASSERT(cond) BUG_ON(!(cond))114114+115115+#ifdef __CSIO_DEBUG__116116+#define CSIO_DB_ASSERT(__c) CSIO_ASSERT((__c))117117+#else118118+#define CSIO_DB_ASSERT(__c)119119+#endif120120+121121+#endif /* ifndef __CSIO_DEFS_H__ */
+4395
drivers/scsi/csiostor/csio_hw.c
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#include <linux/pci.h>3636+#include <linux/pci_regs.h>3737+#include <linux/firmware.h>3838+#include <linux/stddef.h>3939+#include <linux/delay.h>4040+#include <linux/string.h>4141+#include <linux/compiler.h>4242+#include <linux/jiffies.h>4343+#include <linux/kernel.h>4444+#include <linux/log2.h>4545+4646+#include "csio_hw.h"4747+#include "csio_lnode.h"4848+#include "csio_rnode.h"4949+5050+int csio_force_master;5151+int csio_dbg_level = 0xFEFF;5252+unsigned int csio_port_mask = 0xf;5353+5454+/* Default FW event queue entries. */5555+static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE;5656+5757+/* Default MSI param level */5858+int csio_msi = 2;5959+6060+/* FCoE function instances */6161+static int dev_num;6262+6363+/* FCoE Adapter types & its description */6464+static const struct csio_adap_desc csio_fcoe_adapters[] = {6565+ {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},6666+ {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},6767+ {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},6868+ {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"},6969+ {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"},7070+ {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"},7171+ {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"},7272+ {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"},7373+ {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"},7474+ {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"},7575+ {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"},7676+ {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"},7777+ {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},7878+ {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},7979+ {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},8080+ {"T4 FPGA", "Chelsio T4 FPGA [FCoE]"}8181+};8282+8383+static void csio_mgmtm_cleanup(struct csio_mgmtm *);8484+static void csio_hw_mbm_cleanup(struct csio_hw *);8585+8686+/* State machine forward declarations */8787+static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev);8888+static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev);8989+static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev);9090+static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev);9191+static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev);9292+static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev);9393+static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev);9494+static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev);9595+static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev);9696+9797+static void csio_hw_initialize(struct csio_hw *hw);9898+static void csio_evtq_stop(struct csio_hw *hw);9999+static void csio_evtq_start(struct csio_hw *hw);100100+101101+int csio_is_hw_ready(struct csio_hw *hw)102102+{103103+ return csio_match_state(hw, csio_hws_ready);104104+}105105+106106+int csio_is_hw_removing(struct csio_hw *hw)107107+{108108+ return csio_match_state(hw, csio_hws_removing);109109+}110110+111111+112112+/*113113+ * csio_hw_wait_op_done_val - wait until an operation is completed114114+ * @hw: the HW module115115+ * @reg: the register to check for completion116116+ * @mask: a single-bit field within @reg that indicates completion117117+ * @polarity: the value of the field when the operation is completed118118+ * @attempts: number of check iterations119119+ * @delay: delay in usecs between iterations120120+ * @valp: where to store the value of the register at completion time121121+ *122122+ * Wait until an operation is completed by checking a bit in a register123123+ * up to @attempts times. If @valp is not NULL the value of the register124124+ * at the time it indicated completion is stored there. Returns 0 if the125125+ * operation completes and -EAGAIN otherwise.126126+ */127127+static int128128+csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,129129+ int polarity, int attempts, int delay, uint32_t *valp)130130+{131131+ uint32_t val;132132+ while (1) {133133+ val = csio_rd_reg32(hw, reg);134134+135135+ if (!!(val & mask) == polarity) {136136+ if (valp)137137+ *valp = val;138138+ return 0;139139+ }140140+141141+ if (--attempts == 0)142142+ return -EAGAIN;143143+ if (delay)144144+ udelay(delay);145145+ }146146+}147147+148148+void149149+csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,150150+ uint32_t value)151151+{152152+ uint32_t val = csio_rd_reg32(hw, reg) & ~mask;153153+154154+ csio_wr_reg32(hw, val | value, reg);155155+ /* Flush */156156+ csio_rd_reg32(hw, reg);157157+158158+}159159+160160+/*161161+ * csio_hw_mc_read - read from MC through backdoor accesses162162+ * @hw: the hw module163163+ * @addr: address of first byte requested164164+ * @data: 64 bytes of data containing the requested address165165+ * @ecc: where to store the corresponding 64-bit ECC word166166+ *167167+ * Read 64 bytes of data from MC starting at a 64-byte-aligned address168168+ * that covers the requested address @addr. If @parity is not %NULL it169169+ * is assigned the 64-bit ECC word for the read data.170170+ */171171+int172172+csio_hw_mc_read(struct csio_hw *hw, uint32_t addr, uint32_t *data,173173+ uint64_t *ecc)174174+{175175+ int i;176176+177177+ if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)178178+ return -EBUSY;179179+ csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);180180+ csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);181181+ csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);182182+ csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),183183+ MC_BIST_CMD);184184+ i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,185185+ 0, 10, 1, NULL);186186+ if (i)187187+ return i;188188+189189+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)190190+191191+ for (i = 15; i >= 0; i--)192192+ *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));193193+ if (ecc)194194+ *ecc = csio_rd_reg64(hw, MC_DATA(16));195195+#undef MC_DATA196196+ return 0;197197+}198198+199199+/*200200+ * csio_hw_edc_read - read from EDC through backdoor accesses201201+ * @hw: the hw module202202+ * @idx: which EDC to access203203+ * @addr: address of first byte requested204204+ * @data: 64 bytes of data containing the requested address205205+ * @ecc: where to store the corresponding 64-bit ECC word206206+ *207207+ * Read 64 bytes of data from EDC starting at a 64-byte-aligned address208208+ * that covers the requested address @addr. If @parity is not %NULL it209209+ * is assigned the 64-bit ECC word for the read data.210210+ */211211+int212212+csio_hw_edc_read(struct csio_hw *hw, int idx, uint32_t addr, uint32_t *data,213213+ uint64_t *ecc)214214+{215215+ int i;216216+217217+ idx *= EDC_STRIDE;218218+ if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)219219+ return -EBUSY;220220+ csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);221221+ csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);222222+ csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);223223+ csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,224224+ EDC_BIST_CMD + idx);225225+ i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,226226+ 0, 10, 1, NULL);227227+ if (i)228228+ return i;229229+230230+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)231231+232232+ for (i = 15; i >= 0; i--)233233+ *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));234234+ if (ecc)235235+ *ecc = csio_rd_reg64(hw, EDC_DATA(16));236236+#undef EDC_DATA237237+ return 0;238238+}239239+240240+/*241241+ * csio_mem_win_rw - read/write memory through PCIE memory window242242+ * @hw: the adapter243243+ * @addr: address of first byte requested244244+ * @data: MEMWIN0_APERTURE bytes of data containing the requested address245245+ * @dir: direction of transfer 1 => read, 0 => write246246+ *247247+ * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a248248+ * MEMWIN0_APERTURE-byte-aligned address that covers the requested249249+ * address @addr.250250+ */251251+static int252252+csio_mem_win_rw(struct csio_hw *hw, u32 addr, __be32 *data, int dir)253253+{254254+ int i;255255+256256+ /*257257+ * Setup offset into PCIE memory window. Address must be a258258+ * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to259259+ * ensure that changes propagate before we attempt to use the new260260+ * values.)261261+ */262262+ csio_wr_reg32(hw, addr & ~(MEMWIN0_APERTURE - 1),263263+ PCIE_MEM_ACCESS_OFFSET);264264+ csio_rd_reg32(hw, PCIE_MEM_ACCESS_OFFSET);265265+266266+ /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */267267+ for (i = 0; i < MEMWIN0_APERTURE; i = i + sizeof(__be32)) {268268+ if (dir)269269+ *data++ = csio_rd_reg32(hw, (MEMWIN0_BASE + i));270270+ else271271+ csio_wr_reg32(hw, *data++, (MEMWIN0_BASE + i));272272+ }273273+274274+ return 0;275275+}276276+277277+/*278278+ * csio_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window279279+ * @hw: the csio_hw280280+ * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC281281+ * @addr: address within indicated memory type282282+ * @len: amount of memory to transfer283283+ * @buf: host memory buffer284284+ * @dir: direction of transfer 1 => read, 0 => write285285+ *286286+ * Reads/writes an [almost] arbitrary memory region in the firmware: the287287+ * firmware memory address, length and host buffer must be aligned on288288+ * 32-bit boudaries. The memory is transferred as a raw byte sequence289289+ * from/to the firmware's memory. If this memory contains data290290+ * structures which contain multi-byte integers, it's the callers291291+ * responsibility to perform appropriate byte order conversions.292292+ */293293+static int294294+csio_memory_rw(struct csio_hw *hw, int mtype, u32 addr, u32 len,295295+ uint32_t *buf, int dir)296296+{297297+ uint32_t pos, start, end, offset, memoffset;298298+ int ret;299299+ __be32 *data;300300+301301+ /*302302+ * Argument sanity checks ...303303+ */304304+ if ((addr & 0x3) || (len & 0x3))305305+ return -EINVAL;306306+307307+ data = kzalloc(MEMWIN0_APERTURE, GFP_KERNEL);308308+ if (!data)309309+ return -ENOMEM;310310+311311+ /* Offset into the region of memory which is being accessed312312+ * MEM_EDC0 = 0313313+ * MEM_EDC1 = 1314314+ * MEM_MC = 2315315+ */316316+ memoffset = (mtype * (5 * 1024 * 1024));317317+318318+ /* Determine the PCIE_MEM_ACCESS_OFFSET */319319+ addr = addr + memoffset;320320+321321+ /*322322+ * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes323323+ * at a time so we need to round down the start and round up the end.324324+ * We'll start copying out of the first line at (addr - start) a word325325+ * at a time.326326+ */327327+ start = addr & ~(MEMWIN0_APERTURE-1);328328+ end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);329329+ offset = (addr - start)/sizeof(__be32);330330+331331+ for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {332332+ /*333333+ * If we're writing, copy the data from the caller's memory334334+ * buffer335335+ */336336+ if (!dir) {337337+ /*338338+ * If we're doing a partial write, then we need to do339339+ * a read-modify-write ...340340+ */341341+ if (offset || len < MEMWIN0_APERTURE) {342342+ ret = csio_mem_win_rw(hw, pos, data, 1);343343+ if (ret) {344344+ kfree(data);345345+ return ret;346346+ }347347+ }348348+ while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&349349+ len > 0) {350350+ data[offset++] = *buf++;351351+ len -= sizeof(__be32);352352+ }353353+ }354354+355355+ /*356356+ * Transfer a block of memory and bail if there's an error.357357+ */358358+ ret = csio_mem_win_rw(hw, pos, data, dir);359359+ if (ret) {360360+ kfree(data);361361+ return ret;362362+ }363363+364364+ /*365365+ * If we're reading, copy the data into the caller's memory366366+ * buffer.367367+ */368368+ if (dir)369369+ while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&370370+ len > 0) {371371+ *buf++ = data[offset++];372372+ len -= sizeof(__be32);373373+ }374374+ }375375+376376+ kfree(data);377377+378378+ return 0;379379+}380380+381381+static int382382+csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, __be32 *buf)383383+{384384+ return csio_memory_rw(hw, mtype, addr, len, buf, 0);385385+}386386+387387+/*388388+ * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.389389+ */390390+#define EEPROM_MAX_RD_POLL 40391391+#define EEPROM_MAX_WR_POLL 6392392+#define EEPROM_STAT_ADDR 0x7bfc393393+#define VPD_BASE 0x400394394+#define VPD_BASE_OLD 0395395+#define VPD_LEN 512396396+#define VPD_INFO_FLD_HDR_SIZE 3397397+398398+/*399399+ * csio_hw_seeprom_read - read a serial EEPROM location400400+ * @hw: hw to read401401+ * @addr: EEPROM virtual address402402+ * @data: where to store the read data403403+ *404404+ * Read a 32-bit word from a location in serial EEPROM using the card's PCI405405+ * VPD capability. Note that this function must be called with a virtual406406+ * address.407407+ */408408+static int409409+csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)410410+{411411+ uint16_t val = 0;412412+ int attempts = EEPROM_MAX_RD_POLL;413413+ uint32_t base = hw->params.pci.vpd_cap_addr;414414+415415+ if (addr >= EEPROMVSIZE || (addr & 3))416416+ return -EINVAL;417417+418418+ pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr);419419+420420+ do {421421+ udelay(10);422422+ pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val);423423+ } while (!(val & PCI_VPD_ADDR_F) && --attempts);424424+425425+ if (!(val & PCI_VPD_ADDR_F)) {426426+ csio_err(hw, "reading EEPROM address 0x%x failed\n", addr);427427+ return -EINVAL;428428+ }429429+430430+ pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);431431+ *data = le32_to_cpu(*data);432432+ return 0;433433+}434434+435435+/*436436+ * Partial EEPROM Vital Product Data structure. Includes only the ID and437437+ * VPD-R sections.438438+ */439439+struct t4_vpd_hdr {440440+ u8 id_tag;441441+ u8 id_len[2];442442+ u8 id_data[ID_LEN];443443+ u8 vpdr_tag;444444+ u8 vpdr_len[2];445445+};446446+447447+/*448448+ * csio_hw_get_vpd_keyword_val - Locates an information field keyword in449449+ * the VPD450450+ * @v: Pointer to buffered vpd data structure451451+ * @kw: The keyword to search for452452+ *453453+ * Returns the value of the information field keyword or454454+ * -EINVAL otherwise.455455+ */456456+static int457457+csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)458458+{459459+ int32_t i;460460+ int32_t offset , len;461461+ const uint8_t *buf = &v->id_tag;462462+ const uint8_t *vpdr_len = &v->vpdr_tag;463463+ offset = sizeof(struct t4_vpd_hdr);464464+ len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8);465465+466466+ if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN)467467+ return -EINVAL;468468+469469+ for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) {470470+ if (memcmp(buf + i , kw, 2) == 0) {471471+ i += VPD_INFO_FLD_HDR_SIZE;472472+ return i;473473+ }474474+475475+ i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];476476+ }477477+478478+ return -EINVAL;479479+}480480+481481+static int482482+csio_pci_capability(struct pci_dev *pdev, int cap, int *pos)483483+{484484+ *pos = pci_find_capability(pdev, cap);485485+ if (*pos)486486+ return 0;487487+488488+ return -1;489489+}490490+491491+/*492492+ * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM493493+ * @hw: HW module494494+ * @p: where to store the parameters495495+ *496496+ * Reads card parameters stored in VPD EEPROM.497497+ */498498+static int499499+csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)500500+{501501+ int i, ret, ec, sn, addr;502502+ uint8_t *vpd, csum;503503+ const struct t4_vpd_hdr *v;504504+ /* To get around compilation warning from strstrip */505505+ char *s;506506+507507+ if (csio_is_valid_vpd(hw))508508+ return 0;509509+510510+ ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD,511511+ &hw->params.pci.vpd_cap_addr);512512+ if (ret)513513+ return -EINVAL;514514+515515+ vpd = kzalloc(VPD_LEN, GFP_ATOMIC);516516+ if (vpd == NULL)517517+ return -ENOMEM;518518+519519+ /*520520+ * Card information normally starts at VPD_BASE but early cards had521521+ * it at 0.522522+ */523523+ ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd));524524+ addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;525525+526526+ for (i = 0; i < VPD_LEN; i += 4) {527527+ ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i));528528+ if (ret) {529529+ kfree(vpd);530530+ return ret;531531+ }532532+ }533533+534534+ /* Reset the VPD flag! */535535+ hw->flags &= (~CSIO_HWF_VPD_VALID);536536+537537+ v = (const struct t4_vpd_hdr *)vpd;538538+539539+#define FIND_VPD_KW(var, name) do { \540540+ var = csio_hw_get_vpd_keyword_val(v, name); \541541+ if (var < 0) { \542542+ csio_err(hw, "missing VPD keyword " name "\n"); \543543+ kfree(vpd); \544544+ return -EINVAL; \545545+ } \546546+} while (0)547547+548548+ FIND_VPD_KW(i, "RV");549549+ for (csum = 0; i >= 0; i--)550550+ csum += vpd[i];551551+552552+ if (csum) {553553+ csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum);554554+ kfree(vpd);555555+ return -EINVAL;556556+ }557557+ FIND_VPD_KW(ec, "EC");558558+ FIND_VPD_KW(sn, "SN");559559+#undef FIND_VPD_KW560560+561561+ memcpy(p->id, v->id_data, ID_LEN);562562+ s = strstrip(p->id);563563+ memcpy(p->ec, vpd + ec, EC_LEN);564564+ s = strstrip(p->ec);565565+ i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];566566+ memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));567567+ s = strstrip(p->sn);568568+569569+ csio_valid_vpd_copied(hw);570570+571571+ kfree(vpd);572572+ return 0;573573+}574574+575575+/*576576+ * csio_hw_sf1_read - read data from the serial flash577577+ * @hw: the HW module578578+ * @byte_cnt: number of bytes to read579579+ * @cont: whether another operation will be chained580580+ * @lock: whether to lock SF for PL access only581581+ * @valp: where to store the read data582582+ *583583+ * Reads up to 4 bytes of data from the serial flash. The location of584584+ * the read needs to be specified prior to calling this by issuing the585585+ * appropriate commands to the serial flash.586586+ */587587+static int588588+csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,589589+ int32_t lock, uint32_t *valp)590590+{591591+ int ret;592592+593593+ if (!byte_cnt || byte_cnt > 4)594594+ return -EINVAL;595595+ if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)596596+ return -EBUSY;597597+598598+ cont = cont ? SF_CONT : 0;599599+ lock = lock ? SF_LOCK : 0;600600+601601+ csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP);602602+ ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,603603+ 10, NULL);604604+ if (!ret)605605+ *valp = csio_rd_reg32(hw, SF_DATA);606606+ return ret;607607+}608608+609609+/*610610+ * csio_hw_sf1_write - write data to the serial flash611611+ * @hw: the HW module612612+ * @byte_cnt: number of bytes to write613613+ * @cont: whether another operation will be chained614614+ * @lock: whether to lock SF for PL access only615615+ * @val: value to write616616+ *617617+ * Writes up to 4 bytes of data to the serial flash. The location of618618+ * the write needs to be specified prior to calling this by issuing the619619+ * appropriate commands to the serial flash.620620+ */621621+static int622622+csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,623623+ int32_t lock, uint32_t val)624624+{625625+ if (!byte_cnt || byte_cnt > 4)626626+ return -EINVAL;627627+ if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)628628+ return -EBUSY;629629+630630+ cont = cont ? SF_CONT : 0;631631+ lock = lock ? SF_LOCK : 0;632632+633633+ csio_wr_reg32(hw, val, SF_DATA);634634+ csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP);635635+636636+ return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,637637+ 10, NULL);638638+}639639+640640+/*641641+ * csio_hw_flash_wait_op - wait for a flash operation to complete642642+ * @hw: the HW module643643+ * @attempts: max number of polls of the status register644644+ * @delay: delay between polls in ms645645+ *646646+ * Wait for a flash operation to complete by polling the status register.647647+ */648648+static int649649+csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay)650650+{651651+ int ret;652652+ uint32_t status;653653+654654+ while (1) {655655+ ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS);656656+ if (ret != 0)657657+ return ret;658658+659659+ ret = csio_hw_sf1_read(hw, 1, 0, 1, &status);660660+ if (ret != 0)661661+ return ret;662662+663663+ if (!(status & 1))664664+ return 0;665665+ if (--attempts == 0)666666+ return -EAGAIN;667667+ if (delay)668668+ msleep(delay);669669+ }670670+}671671+672672+/*673673+ * csio_hw_read_flash - read words from serial flash674674+ * @hw: the HW module675675+ * @addr: the start address for the read676676+ * @nwords: how many 32-bit words to read677677+ * @data: where to store the read data678678+ * @byte_oriented: whether to store data as bytes or as words679679+ *680680+ * Read the specified number of 32-bit words from the serial flash.681681+ * If @byte_oriented is set the read data is stored as a byte array682682+ * (i.e., big-endian), otherwise as 32-bit words in the platform's683683+ * natural endianess.684684+ */685685+static int686686+csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,687687+ uint32_t *data, int32_t byte_oriented)688688+{689689+ int ret;690690+691691+ if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3))692692+ return -EINVAL;693693+694694+ addr = swab32(addr) | SF_RD_DATA_FAST;695695+696696+ ret = csio_hw_sf1_write(hw, 4, 1, 0, addr);697697+ if (ret != 0)698698+ return ret;699699+700700+ ret = csio_hw_sf1_read(hw, 1, 1, 0, data);701701+ if (ret != 0)702702+ return ret;703703+704704+ for ( ; nwords; nwords--, data++) {705705+ ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);706706+ if (nwords == 1)707707+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */708708+ if (ret)709709+ return ret;710710+ if (byte_oriented)711711+ *data = htonl(*data);712712+ }713713+ return 0;714714+}715715+716716+/*717717+ * csio_hw_write_flash - write up to a page of data to the serial flash718718+ * @hw: the hw719719+ * @addr: the start address to write720720+ * @n: length of data to write in bytes721721+ * @data: the data to write722722+ *723723+ * Writes up to a page of data (256 bytes) to the serial flash starting724724+ * at the given address. All the data must be written to the same page.725725+ */726726+static int727727+csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,728728+ uint32_t n, const uint8_t *data)729729+{730730+ int ret = -EINVAL;731731+ uint32_t buf[64];732732+ uint32_t i, c, left, val, offset = addr & 0xff;733733+734734+ if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE)735735+ return -EINVAL;736736+737737+ val = swab32(addr) | SF_PROG_PAGE;738738+739739+ ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);740740+ if (ret != 0)741741+ goto unlock;742742+743743+ ret = csio_hw_sf1_write(hw, 4, 1, 1, val);744744+ if (ret != 0)745745+ goto unlock;746746+747747+ for (left = n; left; left -= c) {748748+ c = min(left, 4U);749749+ for (val = 0, i = 0; i < c; ++i)750750+ val = (val << 8) + *data++;751751+752752+ ret = csio_hw_sf1_write(hw, c, c != left, 1, val);753753+ if (ret)754754+ goto unlock;755755+ }756756+ ret = csio_hw_flash_wait_op(hw, 8, 1);757757+ if (ret)758758+ goto unlock;759759+760760+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */761761+762762+ /* Read the page to verify the write succeeded */763763+ ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);764764+ if (ret)765765+ return ret;766766+767767+ if (memcmp(data - n, (uint8_t *)buf + offset, n)) {768768+ csio_err(hw,769769+ "failed to correctly write the flash page at %#x\n",770770+ addr);771771+ return -EINVAL;772772+ }773773+774774+ return 0;775775+776776+unlock:777777+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */778778+ return ret;779779+}780780+781781+/*782782+ * csio_hw_flash_erase_sectors - erase a range of flash sectors783783+ * @hw: the HW module784784+ * @start: the first sector to erase785785+ * @end: the last sector to erase786786+ *787787+ * Erases the sectors in the given inclusive range.788788+ */789789+static int790790+csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end)791791+{792792+ int ret = 0;793793+794794+ while (start <= end) {795795+796796+ ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);797797+ if (ret != 0)798798+ goto out;799799+800800+ ret = csio_hw_sf1_write(hw, 4, 0, 1,801801+ SF_ERASE_SECTOR | (start << 8));802802+ if (ret != 0)803803+ goto out;804804+805805+ ret = csio_hw_flash_wait_op(hw, 14, 500);806806+ if (ret != 0)807807+ goto out;808808+809809+ start++;810810+ }811811+out:812812+ if (ret)813813+ csio_err(hw, "erase of flash sector %d failed, error %d\n",814814+ start, ret);815815+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */816816+ return 0;817817+}818818+819819+/*820820+ * csio_hw_flash_cfg_addr - return the address of the flash821821+ * configuration file822822+ * @hw: the HW module823823+ *824824+ * Return the address within the flash where the Firmware Configuration825825+ * File is stored.826826+ */827827+static unsigned int828828+csio_hw_flash_cfg_addr(struct csio_hw *hw)829829+{830830+ if (hw->params.sf_size == 0x100000)831831+ return FPGA_FLASH_CFG_OFFSET;832832+ else833833+ return FLASH_CFG_OFFSET;834834+}835835+836836+static void837837+csio_hw_print_fw_version(struct csio_hw *hw, char *str)838838+{839839+ csio_info(hw, "%s: %u.%u.%u.%u\n", str,840840+ FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),841841+ FW_HDR_FW_VER_MINOR_GET(hw->fwrev),842842+ FW_HDR_FW_VER_MICRO_GET(hw->fwrev),843843+ FW_HDR_FW_VER_BUILD_GET(hw->fwrev));844844+}845845+846846+/*847847+ * csio_hw_get_fw_version - read the firmware version848848+ * @hw: HW module849849+ * @vers: where to place the version850850+ *851851+ * Reads the FW version from flash.852852+ */853853+static int854854+csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)855855+{856856+ return csio_hw_read_flash(hw, FW_IMG_START +857857+ offsetof(struct fw_hdr, fw_ver), 1,858858+ vers, 0);859859+}860860+861861+/*862862+ * csio_hw_get_tp_version - read the TP microcode version863863+ * @hw: HW module864864+ * @vers: where to place the version865865+ *866866+ * Reads the TP microcode version from flash.867867+ */868868+static int869869+csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)870870+{871871+ return csio_hw_read_flash(hw, FLASH_FW_START +872872+ offsetof(struct fw_hdr, tp_microcode_ver), 1,873873+ vers, 0);874874+}875875+876876+/*877877+ * csio_hw_check_fw_version - check if the FW is compatible with878878+ * this driver879879+ * @hw: HW module880880+ *881881+ * Checks if an adapter's FW is compatible with the driver. Returns 0882882+ * if there's exact match, a negative error if the version could not be883883+ * read or there's a major/minor version mismatch/minor.884884+ */885885+static int886886+csio_hw_check_fw_version(struct csio_hw *hw)887887+{888888+ int ret, major, minor, micro;889889+890890+ ret = csio_hw_get_fw_version(hw, &hw->fwrev);891891+ if (!ret)892892+ ret = csio_hw_get_tp_version(hw, &hw->tp_vers);893893+ if (ret)894894+ return ret;895895+896896+ major = FW_HDR_FW_VER_MAJOR_GET(hw->fwrev);897897+ minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev);898898+ micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev);899899+900900+ if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */901901+ csio_err(hw, "card FW has major version %u, driver wants %u\n",902902+ major, FW_VERSION_MAJOR);903903+ return -EINVAL;904904+ }905905+906906+ if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)907907+ return 0; /* perfect match */908908+909909+ /* Minor/micro version mismatch */910910+ return -EINVAL;911911+}912912+913913+/*914914+ * csio_hw_fw_dload - download firmware.915915+ * @hw: HW module916916+ * @fw_data: firmware image to write.917917+ * @size: image size918918+ *919919+ * Write the supplied firmware image to the card's serial flash.920920+ */921921+static int922922+csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)923923+{924924+ uint32_t csum;925925+ int32_t addr;926926+ int ret;927927+ uint32_t i;928928+ uint8_t first_page[SF_PAGE_SIZE];929929+ const uint32_t *p = (const uint32_t *)fw_data;930930+ struct fw_hdr *hdr = (struct fw_hdr *)fw_data;931931+ uint32_t sf_sec_size;932932+933933+ if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) {934934+ csio_err(hw, "Serial Flash data invalid\n");935935+ return -EINVAL;936936+ }937937+938938+ if (!size) {939939+ csio_err(hw, "FW image has no data\n");940940+ return -EINVAL;941941+ }942942+943943+ if (size & 511) {944944+ csio_err(hw, "FW image size not multiple of 512 bytes\n");945945+ return -EINVAL;946946+ }947947+948948+ if (ntohs(hdr->len512) * 512 != size) {949949+ csio_err(hw, "FW image size differs from size in FW header\n");950950+ return -EINVAL;951951+ }952952+953953+ if (size > FW_MAX_SIZE) {954954+ csio_err(hw, "FW image too large, max is %u bytes\n",955955+ FW_MAX_SIZE);956956+ return -EINVAL;957957+ }958958+959959+ for (csum = 0, i = 0; i < size / sizeof(csum); i++)960960+ csum += ntohl(p[i]);961961+962962+ if (csum != 0xffffffff) {963963+ csio_err(hw, "corrupted firmware image, checksum %#x\n", csum);964964+ return -EINVAL;965965+ }966966+967967+ sf_sec_size = hw->params.sf_size / hw->params.sf_nsec;968968+ i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */969969+970970+ csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",971971+ FW_START_SEC, FW_START_SEC + i - 1);972972+973973+ ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC,974974+ FW_START_SEC + i - 1);975975+ if (ret) {976976+ csio_err(hw, "Flash Erase failed\n");977977+ goto out;978978+ }979979+980980+ /*981981+ * We write the correct version at the end so the driver can see a bad982982+ * version if the FW write fails. Start by writing a copy of the983983+ * first page with a bad version.984984+ */985985+ memcpy(first_page, fw_data, SF_PAGE_SIZE);986986+ ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);987987+ ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page);988988+ if (ret)989989+ goto out;990990+991991+ csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",992992+ FW_IMG_START, FW_IMG_START + size);993993+994994+ addr = FW_IMG_START;995995+ for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {996996+ addr += SF_PAGE_SIZE;997997+ fw_data += SF_PAGE_SIZE;998998+ ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data);999999+ if (ret)10001000+ goto out;10011001+ }10021002+10031003+ ret = csio_hw_write_flash(hw,10041004+ FW_IMG_START +10051005+ offsetof(struct fw_hdr, fw_ver),10061006+ sizeof(hdr->fw_ver),10071007+ (const uint8_t *)&hdr->fw_ver);10081008+10091009+out:10101010+ if (ret)10111011+ csio_err(hw, "firmware download failed, error %d\n", ret);10121012+ return ret;10131013+}10141014+10151015+static int10161016+csio_hw_get_flash_params(struct csio_hw *hw)10171017+{10181018+ int ret;10191019+ uint32_t info = 0;10201020+10211021+ ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);10221022+ if (!ret)10231023+ ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);10241024+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */10251025+ if (ret != 0)10261026+ return ret;10271027+10281028+ if ((info & 0xff) != 0x20) /* not a Numonix flash */10291029+ return -EINVAL;10301030+ info >>= 16; /* log2 of size */10311031+ if (info >= 0x14 && info < 0x18)10321032+ hw->params.sf_nsec = 1 << (info - 16);10331033+ else if (info == 0x18)10341034+ hw->params.sf_nsec = 64;10351035+ else10361036+ return -EINVAL;10371037+ hw->params.sf_size = 1 << info;10381038+10391039+ return 0;10401040+}10411041+10421042+static void10431043+csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)10441044+{10451045+ uint16_t val;10461046+ uint32_t pcie_cap;10471047+10481048+ if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {10491049+ pci_read_config_word(hw->pdev,10501050+ pcie_cap + PCI_EXP_DEVCTL2, &val);10511051+ val &= 0xfff0;10521052+ val |= range ;10531053+ pci_write_config_word(hw->pdev,10541054+ pcie_cap + PCI_EXP_DEVCTL2, val);10551055+ }10561056+}10571057+10581058+10591059+/*10601060+ * Return the specified PCI-E Configuration Space register from our Physical10611061+ * Function. We try first via a Firmware LDST Command since we prefer to let10621062+ * the firmware own all of these registers, but if that fails we go for it10631063+ * directly ourselves.10641064+ */10651065+static uint32_t10661066+csio_read_pcie_cfg4(struct csio_hw *hw, int reg)10671067+{10681068+ u32 val = 0;10691069+ struct csio_mb *mbp;10701070+ int rv;10711071+ struct fw_ldst_cmd *ldst_cmd;10721072+10731073+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);10741074+ if (!mbp) {10751075+ CSIO_INC_STATS(hw, n_err_nomem);10761076+ pci_read_config_dword(hw->pdev, reg, &val);10771077+ return val;10781078+ }10791079+10801080+ csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);10811081+10821082+ rv = csio_mb_issue(hw, mbp);10831083+10841084+ /*10851085+ * If the LDST Command suucceeded, exctract the returned register10861086+ * value. Otherwise read it directly ourself.10871087+ */10881088+ if (rv == 0) {10891089+ ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);10901090+ val = ntohl(ldst_cmd->u.pcie.data[0]);10911091+ } else10921092+ pci_read_config_dword(hw->pdev, reg, &val);10931093+10941094+ mempool_free(mbp, hw->mb_mempool);10951095+10961096+ return val;10971097+} /* csio_read_pcie_cfg4 */10981098+10991099+static int11001100+csio_hw_set_mem_win(struct csio_hw *hw)11011101+{11021102+ u32 bar0;11031103+11041104+ /*11051105+ * Truncation intentional: we only read the bottom 32-bits of the11061106+ * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to11071107+ * read BAR0 instead of using pci_resource_start() because we could be11081108+ * operating from within a Virtual Machine which is trapping our11091109+ * accesses to our Configuration Space and we need to set up the PCI-E11101110+ * Memory Window decoders with the actual addresses which will be11111111+ * coming across the PCI-E link.11121112+ */11131113+ bar0 = csio_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);11141114+ bar0 &= PCI_BASE_ADDRESS_MEM_MASK;11151115+11161116+ /*11171117+ * Set up memory window for accessing adapter memory ranges. (Read11181118+ * back MA register to ensure that changes propagate before we attempt11191119+ * to use the new values.)11201120+ */11211121+ csio_wr_reg32(hw, (bar0 + MEMWIN0_BASE) | BIR(0) |11221122+ WINDOW(ilog2(MEMWIN0_APERTURE) - 10),11231123+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0));11241124+ csio_wr_reg32(hw, (bar0 + MEMWIN1_BASE) | BIR(0) |11251125+ WINDOW(ilog2(MEMWIN1_APERTURE) - 10),11261126+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1));11271127+ csio_wr_reg32(hw, (bar0 + MEMWIN2_BASE) | BIR(0) |11281128+ WINDOW(ilog2(MEMWIN2_APERTURE) - 10),11291129+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));11301130+ csio_rd_reg32(hw, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));11311131+ return 0;11321132+} /* csio_hw_set_mem_win */11331133+11341134+11351135+11361136+/*****************************************************************************/11371137+/* HW State machine assists */11381138+/*****************************************************************************/11391139+11401140+static int11411141+csio_hw_dev_ready(struct csio_hw *hw)11421142+{11431143+ uint32_t reg;11441144+ int cnt = 6;11451145+11461146+ while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) &&11471147+ (--cnt != 0))11481148+ mdelay(100);11491149+11501150+ if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) ||11511151+ (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) {11521152+ csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);11531153+ return -EIO;11541154+ }11551155+11561156+ hw->pfn = SOURCEPF_GET(reg);11571157+11581158+ return 0;11591159+}11601160+11611161+/*11621162+ * csio_do_hello - Perform the HELLO FW Mailbox command and process response.11631163+ * @hw: HW module11641164+ * @state: Device state11651165+ *11661166+ * FW_HELLO_CMD has to be polled for completion.11671167+ */11681168+static int11691169+csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)11701170+{11711171+ struct csio_mb *mbp;11721172+ int rv = 0;11731173+ enum csio_dev_master master;11741174+ enum fw_retval retval;11751175+ uint8_t mpfn;11761176+ char state_str[16];11771177+ int retries = FW_CMD_HELLO_RETRIES;11781178+11791179+ memset(state_str, 0, sizeof(state_str));11801180+11811181+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);11821182+ if (!mbp) {11831183+ rv = -ENOMEM;11841184+ CSIO_INC_STATS(hw, n_err_nomem);11851185+ goto out;11861186+ }11871187+11881188+ master = csio_force_master ? CSIO_MASTER_MUST : CSIO_MASTER_MAY;11891189+11901190+retry:11911191+ csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,11921192+ hw->pfn, master, NULL);11931193+11941194+ rv = csio_mb_issue(hw, mbp);11951195+ if (rv) {11961196+ csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv);11971197+ goto out_free_mb;11981198+ }11991199+12001200+ csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn);12011201+ if (retval != FW_SUCCESS) {12021202+ csio_err(hw, "HELLO cmd failed with ret: %d\n", retval);12031203+ rv = -EINVAL;12041204+ goto out_free_mb;12051205+ }12061206+12071207+ /* Firmware has designated us to be master */12081208+ if (hw->pfn == mpfn) {12091209+ hw->flags |= CSIO_HWF_MASTER;12101210+ } else if (*state == CSIO_DEV_STATE_UNINIT) {12111211+ /*12121212+ * If we're not the Master PF then we need to wait around for12131213+ * the Master PF Driver to finish setting up the adapter.12141214+ *12151215+ * Note that we also do this wait if we're a non-Master-capable12161216+ * PF and there is no current Master PF; a Master PF may show up12171217+ * momentarily and we wouldn't want to fail pointlessly. (This12181218+ * can happen when an OS loads lots of different drivers rapidly12191219+ * at the same time). In this case, the Master PF returned by12201220+ * the firmware will be PCIE_FW_MASTER_MASK so the test below12211221+ * will work ...12221222+ */12231223+12241224+ int waiting = FW_CMD_HELLO_TIMEOUT;12251225+12261226+ /*12271227+ * Wait for the firmware to either indicate an error or12281228+ * initialized state. If we see either of these we bail out12291229+ * and report the issue to the caller. If we exhaust the12301230+ * "hello timeout" and we haven't exhausted our retries, try12311231+ * again. Otherwise bail with a timeout error.12321232+ */12331233+ for (;;) {12341234+ uint32_t pcie_fw;12351235+12361236+ msleep(50);12371237+ waiting -= 50;12381238+12391239+ /*12401240+ * If neither Error nor Initialialized are indicated12411241+ * by the firmware keep waiting till we exaust our12421242+ * timeout ... and then retry if we haven't exhausted12431243+ * our retries ...12441244+ */12451245+ pcie_fw = csio_rd_reg32(hw, PCIE_FW);12461246+ if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {12471247+ if (waiting <= 0) {12481248+ if (retries-- > 0)12491249+ goto retry;12501250+12511251+ rv = -ETIMEDOUT;12521252+ break;12531253+ }12541254+ continue;12551255+ }12561256+12571257+ /*12581258+ * We either have an Error or Initialized condition12591259+ * report errors preferentially.12601260+ */12611261+ if (state) {12621262+ if (pcie_fw & PCIE_FW_ERR) {12631263+ *state = CSIO_DEV_STATE_ERR;12641264+ rv = -ETIMEDOUT;12651265+ } else if (pcie_fw & PCIE_FW_INIT)12661266+ *state = CSIO_DEV_STATE_INIT;12671267+ }12681268+12691269+ /*12701270+ * If we arrived before a Master PF was selected and12711271+ * there's not a valid Master PF, grab its identity12721272+ * for our caller.12731273+ */12741274+ if (mpfn == PCIE_FW_MASTER_MASK &&12751275+ (pcie_fw & PCIE_FW_MASTER_VLD))12761276+ mpfn = PCIE_FW_MASTER_GET(pcie_fw);12771277+ break;12781278+ }12791279+ hw->flags &= ~CSIO_HWF_MASTER;12801280+ }12811281+12821282+ switch (*state) {12831283+ case CSIO_DEV_STATE_UNINIT:12841284+ strcpy(state_str, "Initializing");12851285+ break;12861286+ case CSIO_DEV_STATE_INIT:12871287+ strcpy(state_str, "Initialized");12881288+ break;12891289+ case CSIO_DEV_STATE_ERR:12901290+ strcpy(state_str, "Error");12911291+ break;12921292+ default:12931293+ strcpy(state_str, "Unknown");12941294+ break;12951295+ }12961296+12971297+ if (hw->pfn == mpfn)12981298+ csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n",12991299+ hw->pfn, state_str);13001300+ else13011301+ csio_info(hw,13021302+ "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",13031303+ hw->pfn, mpfn, state_str);13041304+13051305+out_free_mb:13061306+ mempool_free(mbp, hw->mb_mempool);13071307+out:13081308+ return rv;13091309+}13101310+13111311+/*13121312+ * csio_do_bye - Perform the BYE FW Mailbox command and process response.13131313+ * @hw: HW module13141314+ *13151315+ */13161316+static int13171317+csio_do_bye(struct csio_hw *hw)13181318+{13191319+ struct csio_mb *mbp;13201320+ enum fw_retval retval;13211321+13221322+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);13231323+ if (!mbp) {13241324+ CSIO_INC_STATS(hw, n_err_nomem);13251325+ return -ENOMEM;13261326+ }13271327+13281328+ csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);13291329+13301330+ if (csio_mb_issue(hw, mbp)) {13311331+ csio_err(hw, "Issue of BYE command failed\n");13321332+ mempool_free(mbp, hw->mb_mempool);13331333+ return -EINVAL;13341334+ }13351335+13361336+ retval = csio_mb_fw_retval(mbp);13371337+ if (retval != FW_SUCCESS) {13381338+ mempool_free(mbp, hw->mb_mempool);13391339+ return -EINVAL;13401340+ }13411341+13421342+ mempool_free(mbp, hw->mb_mempool);13431343+13441344+ return 0;13451345+}13461346+13471347+/*13481348+ * csio_do_reset- Perform the device reset.13491349+ * @hw: HW module13501350+ * @fw_rst: FW reset13511351+ *13521352+ * If fw_rst is set, issues FW reset mbox cmd otherwise13531353+ * does PIO reset.13541354+ * Performs reset of the function.13551355+ */13561356+static int13571357+csio_do_reset(struct csio_hw *hw, bool fw_rst)13581358+{13591359+ struct csio_mb *mbp;13601360+ enum fw_retval retval;13611361+13621362+ if (!fw_rst) {13631363+ /* PIO reset */13641364+ csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);13651365+ mdelay(2000);13661366+ return 0;13671367+ }13681368+13691369+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);13701370+ if (!mbp) {13711371+ CSIO_INC_STATS(hw, n_err_nomem);13721372+ return -ENOMEM;13731373+ }13741374+13751375+ csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,13761376+ PIORSTMODE | PIORST, 0, NULL);13771377+13781378+ if (csio_mb_issue(hw, mbp)) {13791379+ csio_err(hw, "Issue of RESET command failed.n");13801380+ mempool_free(mbp, hw->mb_mempool);13811381+ return -EINVAL;13821382+ }13831383+13841384+ retval = csio_mb_fw_retval(mbp);13851385+ if (retval != FW_SUCCESS) {13861386+ csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval);13871387+ mempool_free(mbp, hw->mb_mempool);13881388+ return -EINVAL;13891389+ }13901390+13911391+ mempool_free(mbp, hw->mb_mempool);13921392+13931393+ return 0;13941394+}13951395+13961396+static int13971397+csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp)13981398+{13991399+ struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb;14001400+ uint16_t caps;14011401+14021402+ caps = ntohs(rsp->fcoecaps);14031403+14041404+ if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) {14051405+ csio_err(hw, "No FCoE Initiator capability in the firmware.\n");14061406+ return -EINVAL;14071407+ }14081408+14091409+ if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) {14101410+ csio_err(hw, "No FCoE Control Offload capability\n");14111411+ return -EINVAL;14121412+ }14131413+14141414+ return 0;14151415+}14161416+14171417+/*14181418+ * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET14191419+ * @hw: the HW module14201420+ * @mbox: mailbox to use for the FW RESET command (if desired)14211421+ * @force: force uP into RESET even if FW RESET command fails14221422+ *14231423+ * Issues a RESET command to firmware (if desired) with a HALT indication14241424+ * and then puts the microprocessor into RESET state. The RESET command14251425+ * will only be issued if a legitimate mailbox is provided (mbox <=14261426+ * PCIE_FW_MASTER_MASK).14271427+ *14281428+ * This is generally used in order for the host to safely manipulate the14291429+ * adapter without fear of conflicting with whatever the firmware might14301430+ * be doing. The only way out of this state is to RESTART the firmware14311431+ * ...14321432+ */14331433+static int14341434+csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)14351435+{14361436+ enum fw_retval retval = 0;14371437+14381438+ /*14391439+ * If a legitimate mailbox is provided, issue a RESET command14401440+ * with a HALT indication.14411441+ */14421442+ if (mbox <= PCIE_FW_MASTER_MASK) {14431443+ struct csio_mb *mbp;14441444+14451445+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);14461446+ if (!mbp) {14471447+ CSIO_INC_STATS(hw, n_err_nomem);14481448+ return -ENOMEM;14491449+ }14501450+14511451+ csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,14521452+ PIORSTMODE | PIORST, FW_RESET_CMD_HALT(1),14531453+ NULL);14541454+14551455+ if (csio_mb_issue(hw, mbp)) {14561456+ csio_err(hw, "Issue of RESET command failed!\n");14571457+ mempool_free(mbp, hw->mb_mempool);14581458+ return -EINVAL;14591459+ }14601460+14611461+ retval = csio_mb_fw_retval(mbp);14621462+ mempool_free(mbp, hw->mb_mempool);14631463+ }14641464+14651465+ /*14661466+ * Normally we won't complete the operation if the firmware RESET14671467+ * command fails but if our caller insists we'll go ahead and put the14681468+ * uP into RESET. This can be useful if the firmware is hung or even14691469+ * missing ... We'll have to take the risk of putting the uP into14701470+ * RESET without the cooperation of firmware in that case.14711471+ *14721472+ * We also force the firmware's HALT flag to be on in case we bypassed14731473+ * the firmware RESET command above or we're dealing with old firmware14741474+ * which doesn't have the HALT capability. This will serve as a flag14751475+ * for the incoming firmware to know that it's coming out of a HALT14761476+ * rather than a RESET ... if it's new enough to understand that ...14771477+ */14781478+ if (retval == 0 || force) {14791479+ csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST);14801480+ csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT);14811481+ }14821482+14831483+ /*14841484+ * And we always return the result of the firmware RESET command14851485+ * even when we force the uP into RESET ...14861486+ */14871487+ return retval ? -EINVAL : 0;14881488+}14891489+14901490+/*14911491+ * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET14921492+ * @hw: the HW module14931493+ * @reset: if we want to do a RESET to restart things14941494+ *14951495+ * Restart firmware previously halted by csio_hw_fw_halt(). On successful14961496+ * return the previous PF Master remains as the new PF Master and there14971497+ * is no need to issue a new HELLO command, etc.14981498+ *14991499+ * We do this in two ways:15001500+ *15011501+ * 1. If we're dealing with newer firmware we'll simply want to take15021502+ * the chip's microprocessor out of RESET. This will cause the15031503+ * firmware to start up from its start vector. And then we'll loop15041504+ * until the firmware indicates it's started again (PCIE_FW.HALT15051505+ * reset to 0) or we timeout.15061506+ *15071507+ * 2. If we're dealing with older firmware then we'll need to RESET15081508+ * the chip since older firmware won't recognize the PCIE_FW.HALT15091509+ * flag and automatically RESET itself on startup.15101510+ */15111511+static int15121512+csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)15131513+{15141514+ if (reset) {15151515+ /*15161516+ * Since we're directing the RESET instead of the firmware15171517+ * doing it automatically, we need to clear the PCIE_FW.HALT15181518+ * bit.15191519+ */15201520+ csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0);15211521+15221522+ /*15231523+ * If we've been given a valid mailbox, first try to get the15241524+ * firmware to do the RESET. If that works, great and we can15251525+ * return success. Otherwise, if we haven't been given a15261526+ * valid mailbox or the RESET command failed, fall back to15271527+ * hitting the chip with a hammer.15281528+ */15291529+ if (mbox <= PCIE_FW_MASTER_MASK) {15301530+ csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);15311531+ msleep(100);15321532+ if (csio_do_reset(hw, true) == 0)15331533+ return 0;15341534+ }15351535+15361536+ csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);15371537+ msleep(2000);15381538+ } else {15391539+ int ms;15401540+15411541+ csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);15421542+ for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {15431543+ if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT))15441544+ return 0;15451545+ msleep(100);15461546+ ms += 100;15471547+ }15481548+ return -ETIMEDOUT;15491549+ }15501550+ return 0;15511551+}15521552+15531553+/*15541554+ * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW15551555+ * @hw: the HW module15561556+ * @mbox: mailbox to use for the FW RESET command (if desired)15571557+ * @fw_data: the firmware image to write15581558+ * @size: image size15591559+ * @force: force upgrade even if firmware doesn't cooperate15601560+ *15611561+ * Perform all of the steps necessary for upgrading an adapter's15621562+ * firmware image. Normally this requires the cooperation of the15631563+ * existing firmware in order to halt all existing activities15641564+ * but if an invalid mailbox token is passed in we skip that step15651565+ * (though we'll still put the adapter microprocessor into RESET in15661566+ * that case).15671567+ *15681568+ * On successful return the new firmware will have been loaded and15691569+ * the adapter will have been fully RESET losing all previous setup15701570+ * state. On unsuccessful return the adapter may be completely hosed ...15711571+ * positive errno indicates that the adapter is ~probably~ intact, a15721572+ * negative errno indicates that things are looking bad ...15731573+ */15741574+static int15751575+csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,15761576+ const u8 *fw_data, uint32_t size, int32_t force)15771577+{15781578+ const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;15791579+ int reset, ret;15801580+15811581+ ret = csio_hw_fw_halt(hw, mbox, force);15821582+ if (ret != 0 && !force)15831583+ return ret;15841584+15851585+ ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size);15861586+ if (ret != 0)15871587+ return ret;15881588+15891589+ /*15901590+ * Older versions of the firmware don't understand the new15911591+ * PCIE_FW.HALT flag and so won't know to perform a RESET when they15921592+ * restart. So for newly loaded older firmware we'll have to do the15931593+ * RESET for it so it starts up on a clean slate. We can tell if15941594+ * the newly loaded firmware will handle this right by checking15951595+ * its header flags to see if it advertises the capability.15961596+ */15971597+ reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);15981598+ return csio_hw_fw_restart(hw, mbox, reset);15991599+}16001600+16011601+16021602+/*16031603+ * csio_hw_fw_config_file - setup an adapter via a Configuration File16041604+ * @hw: the HW module16051605+ * @mbox: mailbox to use for the FW command16061606+ * @mtype: the memory type where the Configuration File is located16071607+ * @maddr: the memory address where the Configuration File is located16081608+ * @finiver: return value for CF [fini] version16091609+ * @finicsum: return value for CF [fini] checksum16101610+ * @cfcsum: return value for CF computed checksum16111611+ *16121612+ * Issue a command to get the firmware to process the Configuration16131613+ * File located at the specified mtype/maddress. If the Configuration16141614+ * File is processed successfully and return value pointers are16151615+ * provided, the Configuration File "[fini] section version and16161616+ * checksum values will be returned along with the computed checksum.16171617+ * It's up to the caller to decide how it wants to respond to the16181618+ * checksums not matching but it recommended that a prominant warning16191619+ * be emitted in order to help people rapidly identify changed or16201620+ * corrupted Configuration Files.16211621+ *16221622+ * Also note that it's possible to modify things like "niccaps",16231623+ * "toecaps",etc. between processing the Configuration File and telling16241624+ * the firmware to use the new configuration. Callers which want to16251625+ * do this will need to "hand-roll" their own CAPS_CONFIGS commands for16261626+ * Configuration Files if they want to do this.16271627+ */16281628+static int16291629+csio_hw_fw_config_file(struct csio_hw *hw,16301630+ unsigned int mtype, unsigned int maddr,16311631+ uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum)16321632+{16331633+ struct csio_mb *mbp;16341634+ struct fw_caps_config_cmd *caps_cmd;16351635+ int rv = -EINVAL;16361636+ enum fw_retval ret;16371637+16381638+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);16391639+ if (!mbp) {16401640+ CSIO_INC_STATS(hw, n_err_nomem);16411641+ return -ENOMEM;16421642+ }16431643+ /*16441644+ * Tell the firmware to process the indicated Configuration File.16451645+ * If there are no errors and the caller has provided return value16461646+ * pointers for the [fini] section version, checksum and computed16471647+ * checksum, pass those back to the caller.16481648+ */16491649+ caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);16501650+ CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);16511651+ caps_cmd->op_to_write =16521652+ htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |16531653+ FW_CMD_REQUEST |16541654+ FW_CMD_READ);16551655+ caps_cmd->cfvalid_to_len16 =16561656+ htonl(FW_CAPS_CONFIG_CMD_CFVALID |16571657+ FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |16581658+ FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |16591659+ FW_LEN16(*caps_cmd));16601660+16611661+ if (csio_mb_issue(hw, mbp)) {16621662+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");16631663+ goto out;16641664+ }16651665+16661666+ ret = csio_mb_fw_retval(mbp);16671667+ if (ret != FW_SUCCESS) {16681668+ csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);16691669+ goto out;16701670+ }16711671+16721672+ if (finiver)16731673+ *finiver = ntohl(caps_cmd->finiver);16741674+ if (finicsum)16751675+ *finicsum = ntohl(caps_cmd->finicsum);16761676+ if (cfcsum)16771677+ *cfcsum = ntohl(caps_cmd->cfcsum);16781678+16791679+ /* Validate device capabilities */16801680+ if (csio_hw_validate_caps(hw, mbp)) {16811681+ rv = -ENOENT;16821682+ goto out;16831683+ }16841684+16851685+ /*16861686+ * And now tell the firmware to use the configuration we just loaded.16871687+ */16881688+ caps_cmd->op_to_write =16891689+ htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |16901690+ FW_CMD_REQUEST |16911691+ FW_CMD_WRITE);16921692+ caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));16931693+16941694+ if (csio_mb_issue(hw, mbp)) {16951695+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");16961696+ goto out;16971697+ }16981698+16991699+ ret = csio_mb_fw_retval(mbp);17001700+ if (ret != FW_SUCCESS) {17011701+ csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);17021702+ goto out;17031703+ }17041704+17051705+ rv = 0;17061706+out:17071707+ mempool_free(mbp, hw->mb_mempool);17081708+ return rv;17091709+}17101710+17111711+/*17121712+ * csio_get_device_params - Get device parameters.17131713+ * @hw: HW module17141714+ *17151715+ */17161716+static int17171717+csio_get_device_params(struct csio_hw *hw)17181718+{17191719+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);17201720+ struct csio_mb *mbp;17211721+ enum fw_retval retval;17221722+ u32 param[6];17231723+ int i, j = 0;17241724+17251725+ /* Initialize portids to -1 */17261726+ for (i = 0; i < CSIO_MAX_PPORTS; i++)17271727+ hw->pport[i].portid = -1;17281728+17291729+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);17301730+ if (!mbp) {17311731+ CSIO_INC_STATS(hw, n_err_nomem);17321732+ return -ENOMEM;17331733+ }17341734+17351735+ /* Get port vec information. */17361736+ param[0] = FW_PARAM_DEV(PORTVEC);17371737+17381738+ /* Get Core clock. */17391739+ param[1] = FW_PARAM_DEV(CCLK);17401740+17411741+ /* Get EQ id start and end. */17421742+ param[2] = FW_PARAM_PFVF(EQ_START);17431743+ param[3] = FW_PARAM_PFVF(EQ_END);17441744+17451745+ /* Get IQ id start and end. */17461746+ param[4] = FW_PARAM_PFVF(IQFLINT_START);17471747+ param[5] = FW_PARAM_PFVF(IQFLINT_END);17481748+17491749+ csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,17501750+ ARRAY_SIZE(param), param, NULL, false, NULL);17511751+ if (csio_mb_issue(hw, mbp)) {17521752+ csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");17531753+ mempool_free(mbp, hw->mb_mempool);17541754+ return -EINVAL;17551755+ }17561756+17571757+ csio_mb_process_read_params_rsp(hw, mbp, &retval,17581758+ ARRAY_SIZE(param), param);17591759+ if (retval != FW_SUCCESS) {17601760+ csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",17611761+ retval);17621762+ mempool_free(mbp, hw->mb_mempool);17631763+ return -EINVAL;17641764+ }17651765+17661766+ /* cache the information. */17671767+ hw->port_vec = param[0];17681768+ hw->vpd.cclk = param[1];17691769+ wrm->fw_eq_start = param[2];17701770+ wrm->fw_iq_start = param[4];17711771+17721772+ /* Using FW configured max iqs & eqs */17731773+ if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) ||17741774+ !csio_is_hw_master(hw)) {17751775+ hw->cfg_niq = param[5] - param[4] + 1;17761776+ hw->cfg_neq = param[3] - param[2] + 1;17771777+ csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n",17781778+ hw->cfg_niq, hw->cfg_neq);17791779+ }17801780+17811781+ hw->port_vec &= csio_port_mask;17821782+17831783+ hw->num_pports = hweight32(hw->port_vec);17841784+17851785+ csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n",17861786+ hw->port_vec, hw->num_pports);17871787+17881788+ for (i = 0; i < hw->num_pports; i++) {17891789+ while ((hw->port_vec & (1 << j)) == 0)17901790+ j++;17911791+ hw->pport[i].portid = j++;17921792+ csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid);17931793+ }17941794+ mempool_free(mbp, hw->mb_mempool);17951795+17961796+ return 0;17971797+}17981798+17991799+18001800+/*18011801+ * csio_config_device_caps - Get and set device capabilities.18021802+ * @hw: HW module18031803+ *18041804+ */18051805+static int18061806+csio_config_device_caps(struct csio_hw *hw)18071807+{18081808+ struct csio_mb *mbp;18091809+ enum fw_retval retval;18101810+ int rv = -EINVAL;18111811+18121812+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);18131813+ if (!mbp) {18141814+ CSIO_INC_STATS(hw, n_err_nomem);18151815+ return -ENOMEM;18161816+ }18171817+18181818+ /* Get device capabilities */18191819+ csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL);18201820+18211821+ if (csio_mb_issue(hw, mbp)) {18221822+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");18231823+ goto out;18241824+ }18251825+18261826+ retval = csio_mb_fw_retval(mbp);18271827+ if (retval != FW_SUCCESS) {18281828+ csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval);18291829+ goto out;18301830+ }18311831+18321832+ /* Validate device capabilities */18331833+ if (csio_hw_validate_caps(hw, mbp))18341834+ goto out;18351835+18361836+ /* Don't config device capabilities if already configured */18371837+ if (hw->fw_state == CSIO_DEV_STATE_INIT) {18381838+ rv = 0;18391839+ goto out;18401840+ }18411841+18421842+ /* Write back desired device capabilities */18431843+ csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true,18441844+ false, true, NULL);18451845+18461846+ if (csio_mb_issue(hw, mbp)) {18471847+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");18481848+ goto out;18491849+ }18501850+18511851+ retval = csio_mb_fw_retval(mbp);18521852+ if (retval != FW_SUCCESS) {18531853+ csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval);18541854+ goto out;18551855+ }18561856+18571857+ rv = 0;18581858+out:18591859+ mempool_free(mbp, hw->mb_mempool);18601860+ return rv;18611861+}18621862+18631863+static int18641864+csio_config_global_rss(struct csio_hw *hw)18651865+{18661866+ struct csio_mb *mbp;18671867+ enum fw_retval retval;18681868+18691869+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);18701870+ if (!mbp) {18711871+ CSIO_INC_STATS(hw, n_err_nomem);18721872+ return -ENOMEM;18731873+ }18741874+18751875+ csio_rss_glb_config(hw, mbp, CSIO_MB_DEFAULT_TMO,18761876+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,18771877+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |18781878+ FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |18791879+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP,18801880+ NULL);18811881+18821882+ if (csio_mb_issue(hw, mbp)) {18831883+ csio_err(hw, "Issue of FW_RSS_GLB_CONFIG_CMD failed!\n");18841884+ mempool_free(mbp, hw->mb_mempool);18851885+ return -EINVAL;18861886+ }18871887+18881888+ retval = csio_mb_fw_retval(mbp);18891889+ if (retval != FW_SUCCESS) {18901890+ csio_err(hw, "FW_RSS_GLB_CONFIG_CMD returned 0x%x!\n", retval);18911891+ mempool_free(mbp, hw->mb_mempool);18921892+ return -EINVAL;18931893+ }18941894+18951895+ mempool_free(mbp, hw->mb_mempool);18961896+18971897+ return 0;18981898+}18991899+19001900+/*19011901+ * csio_config_pfvf - Configure Physical/Virtual functions settings.19021902+ * @hw: HW module19031903+ *19041904+ */19051905+static int19061906+csio_config_pfvf(struct csio_hw *hw)19071907+{19081908+ struct csio_mb *mbp;19091909+ enum fw_retval retval;19101910+19111911+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);19121912+ if (!mbp) {19131913+ CSIO_INC_STATS(hw, n_err_nomem);19141914+ return -ENOMEM;19151915+ }19161916+19171917+ /*19181918+ * For now, allow all PFs to access to all ports using a pmask19191919+ * value of 0xF (M_FW_PFVF_CMD_PMASK). Once we have VFs, we will19201920+ * need to provide access based on some rule.19211921+ */19221922+ csio_mb_pfvf(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, CSIO_NEQ,19231923+ CSIO_NETH_CTRL, CSIO_NIQ_FLINT, 0, 0, CSIO_NVI, CSIO_CMASK,19241924+ CSIO_PMASK, CSIO_NEXACTF, CSIO_R_CAPS, CSIO_WX_CAPS, NULL);19251925+19261926+ if (csio_mb_issue(hw, mbp)) {19271927+ csio_err(hw, "Issue of FW_PFVF_CMD failed!\n");19281928+ mempool_free(mbp, hw->mb_mempool);19291929+ return -EINVAL;19301930+ }19311931+19321932+ retval = csio_mb_fw_retval(mbp);19331933+ if (retval != FW_SUCCESS) {19341934+ csio_err(hw, "FW_PFVF_CMD returned 0x%x!\n", retval);19351935+ mempool_free(mbp, hw->mb_mempool);19361936+ return -EINVAL;19371937+ }19381938+19391939+ mempool_free(mbp, hw->mb_mempool);19401940+19411941+ return 0;19421942+}19431943+19441944+/*19451945+ * csio_enable_ports - Bring up all available ports.19461946+ * @hw: HW module.19471947+ *19481948+ */19491949+static int19501950+csio_enable_ports(struct csio_hw *hw)19511951+{19521952+ struct csio_mb *mbp;19531953+ enum fw_retval retval;19541954+ uint8_t portid;19551955+ int i;19561956+19571957+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);19581958+ if (!mbp) {19591959+ CSIO_INC_STATS(hw, n_err_nomem);19601960+ return -ENOMEM;19611961+ }19621962+19631963+ for (i = 0; i < hw->num_pports; i++) {19641964+ portid = hw->pport[i].portid;19651965+19661966+ /* Read PORT information */19671967+ csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,19681968+ false, 0, 0, NULL);19691969+19701970+ if (csio_mb_issue(hw, mbp)) {19711971+ csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n",19721972+ portid);19731973+ mempool_free(mbp, hw->mb_mempool);19741974+ return -EINVAL;19751975+ }19761976+19771977+ csio_mb_process_read_port_rsp(hw, mbp, &retval,19781978+ &hw->pport[i].pcap);19791979+ if (retval != FW_SUCCESS) {19801980+ csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",19811981+ portid, retval);19821982+ mempool_free(mbp, hw->mb_mempool);19831983+ return -EINVAL;19841984+ }19851985+19861986+ /* Write back PORT information */19871987+ csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true,19881988+ (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL);19891989+19901990+ if (csio_mb_issue(hw, mbp)) {19911991+ csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n",19921992+ portid);19931993+ mempool_free(mbp, hw->mb_mempool);19941994+ return -EINVAL;19951995+ }19961996+19971997+ retval = csio_mb_fw_retval(mbp);19981998+ if (retval != FW_SUCCESS) {19991999+ csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n",20002000+ portid, retval);20012001+ mempool_free(mbp, hw->mb_mempool);20022002+ return -EINVAL;20032003+ }20042004+20052005+ } /* For all ports */20062006+20072007+ mempool_free(mbp, hw->mb_mempool);20082008+20092009+ return 0;20102010+}20112011+20122012+/*20132013+ * csio_get_fcoe_resinfo - Read fcoe fw resource info.20142014+ * @hw: HW module20152015+ * Issued with lock held.20162016+ */20172017+static int20182018+csio_get_fcoe_resinfo(struct csio_hw *hw)20192019+{20202020+ struct csio_fcoe_res_info *res_info = &hw->fres_info;20212021+ struct fw_fcoe_res_info_cmd *rsp;20222022+ struct csio_mb *mbp;20232023+ enum fw_retval retval;20242024+20252025+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);20262026+ if (!mbp) {20272027+ CSIO_INC_STATS(hw, n_err_nomem);20282028+ return -ENOMEM;20292029+ }20302030+20312031+ /* Get FCoE FW resource information */20322032+ csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);20332033+20342034+ if (csio_mb_issue(hw, mbp)) {20352035+ csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n");20362036+ mempool_free(mbp, hw->mb_mempool);20372037+ return -EINVAL;20382038+ }20392039+20402040+ rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb);20412041+ retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));20422042+ if (retval != FW_SUCCESS) {20432043+ csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",20442044+ retval);20452045+ mempool_free(mbp, hw->mb_mempool);20462046+ return -EINVAL;20472047+ }20482048+20492049+ res_info->e_d_tov = ntohs(rsp->e_d_tov);20502050+ res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq);20512051+ res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els);20522052+ res_info->r_r_tov = ntohs(rsp->r_r_tov);20532053+ res_info->max_xchgs = ntohl(rsp->max_xchgs);20542054+ res_info->max_ssns = ntohl(rsp->max_ssns);20552055+ res_info->used_xchgs = ntohl(rsp->used_xchgs);20562056+ res_info->used_ssns = ntohl(rsp->used_ssns);20572057+ res_info->max_fcfs = ntohl(rsp->max_fcfs);20582058+ res_info->max_vnps = ntohl(rsp->max_vnps);20592059+ res_info->used_fcfs = ntohl(rsp->used_fcfs);20602060+ res_info->used_vnps = ntohl(rsp->used_vnps);20612061+20622062+ csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns,20632063+ res_info->max_xchgs);20642064+ mempool_free(mbp, hw->mb_mempool);20652065+20662066+ return 0;20672067+}20682068+20692069+static int20702070+csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)20712071+{20722072+ struct csio_mb *mbp;20732073+ enum fw_retval retval;20742074+ u32 _param[1];20752075+20762076+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);20772077+ if (!mbp) {20782078+ CSIO_INC_STATS(hw, n_err_nomem);20792079+ return -ENOMEM;20802080+ }20812081+20822082+ /*20832083+ * Find out whether we're dealing with a version of20842084+ * the firmware which has configuration file support.20852085+ */20862086+ _param[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |20872087+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));20882088+20892089+ csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,20902090+ ARRAY_SIZE(_param), _param, NULL, false, NULL);20912091+ if (csio_mb_issue(hw, mbp)) {20922092+ csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");20932093+ mempool_free(mbp, hw->mb_mempool);20942094+ return -EINVAL;20952095+ }20962096+20972097+ csio_mb_process_read_params_rsp(hw, mbp, &retval,20982098+ ARRAY_SIZE(_param), _param);20992099+ if (retval != FW_SUCCESS) {21002100+ csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",21012101+ retval);21022102+ mempool_free(mbp, hw->mb_mempool);21032103+ return -EINVAL;21042104+ }21052105+21062106+ mempool_free(mbp, hw->mb_mempool);21072107+ *param = _param[0];21082108+21092109+ return 0;21102110+}21112111+21122112+static int21132113+csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)21142114+{21152115+ int ret = 0;21162116+ const struct firmware *cf;21172117+ struct pci_dev *pci_dev = hw->pdev;21182118+ struct device *dev = &pci_dev->dev;21192119+21202120+ unsigned int mtype = 0, maddr = 0;21212121+ uint32_t *cfg_data;21222122+ int value_to_add = 0;21232123+21242124+ if (request_firmware(&cf, CSIO_CF_FNAME, dev) < 0) {21252125+ csio_err(hw, "could not find config file " CSIO_CF_FNAME21262126+ ",err: %d\n", ret);21272127+ return -ENOENT;21282128+ }21292129+21302130+ if (cf->size%4 != 0)21312131+ value_to_add = 4 - (cf->size % 4);21322132+21332133+ cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);21342134+ if (cfg_data == NULL)21352135+ return -ENOMEM;21362136+21372137+ memcpy((void *)cfg_data, (const void *)cf->data, cf->size);21382138+21392139+ if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0)21402140+ return -EINVAL;21412141+21422142+ mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);21432143+ maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;21442144+21452145+ ret = csio_memory_write(hw, mtype, maddr,21462146+ cf->size + value_to_add, cfg_data);21472147+ if (ret == 0) {21482148+ csio_info(hw, "config file upgraded to " CSIO_CF_FNAME "\n");21492149+ strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64);21502150+ }21512151+21522152+ kfree(cfg_data);21532153+ release_firmware(cf);21542154+21552155+ return ret;21562156+}21572157+21582158+/*21592159+ * HW initialization: contact FW, obtain config, perform basic init.21602160+ *21612161+ * If the firmware we're dealing with has Configuration File support, then21622162+ * we use that to perform all configuration -- either using the configuration21632163+ * file stored in flash on the adapter or using a filesystem-local file21642164+ * if available.21652165+ *21662166+ * If we don't have configuration file support in the firmware, then we'll21672167+ * have to set things up the old fashioned way with hard-coded register21682168+ * writes and firmware commands ...21692169+ */21702170+21712171+/*21722172+ * Attempt to initialize the HW via a Firmware Configuration File.21732173+ */21742174+static int21752175+csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)21762176+{21772177+ unsigned int mtype, maddr;21782178+ int rv;21792179+ uint32_t finiver, finicsum, cfcsum;21802180+ int using_flash;21812181+ char path[64];21822182+21832183+ /*21842184+ * Reset device if necessary21852185+ */21862186+ if (reset) {21872187+ rv = csio_do_reset(hw, true);21882188+ if (rv != 0)21892189+ goto bye;21902190+ }21912191+21922192+ /*21932193+ * If we have a configuration file in host ,21942194+ * then use that. Otherwise, use the configuration file stored21952195+ * in the HW flash ...21962196+ */21972197+ spin_unlock_irq(&hw->lock);21982198+ rv = csio_hw_flash_config(hw, fw_cfg_param, path);21992199+ spin_lock_irq(&hw->lock);22002200+ if (rv != 0) {22012201+ if (rv == -ENOENT) {22022202+ /*22032203+ * config file was not found. Use default22042204+ * config file from flash.22052205+ */22062206+ mtype = FW_MEMTYPE_CF_FLASH;22072207+ maddr = csio_hw_flash_cfg_addr(hw);22082208+ using_flash = 1;22092209+ } else {22102210+ /*22112211+ * we revert back to the hardwired config if22122212+ * flashing failed.22132213+ */22142214+ goto bye;22152215+ }22162216+ } else {22172217+ mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);22182218+ maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;22192219+ using_flash = 0;22202220+ }22212221+22222222+ hw->cfg_store = (uint8_t)mtype;22232223+22242224+ /*22252225+ * Issue a Capability Configuration command to the firmware to get it22262226+ * to parse the Configuration File.22272227+ */22282228+ rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver,22292229+ &finicsum, &cfcsum);22302230+ if (rv != 0)22312231+ goto bye;22322232+22332233+ hw->cfg_finiver = finiver;22342234+ hw->cfg_finicsum = finicsum;22352235+ hw->cfg_cfcsum = cfcsum;22362236+ hw->cfg_csum_status = true;22372237+22382238+ if (finicsum != cfcsum) {22392239+ csio_warn(hw,22402240+ "Config File checksum mismatch: csum=%#x, computed=%#x\n",22412241+ finicsum, cfcsum);22422242+22432243+ hw->cfg_csum_status = false;22442244+ }22452245+22462246+ /*22472247+ * Note that we're operating with parameters22482248+ * not supplied by the driver, rather than from hard-wired22492249+ * initialization constants buried in the driver.22502250+ */22512251+ hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;22522252+22532253+ /* device parameters */22542254+ rv = csio_get_device_params(hw);22552255+ if (rv != 0)22562256+ goto bye;22572257+22582258+ /* Configure SGE */22592259+ csio_wr_sge_init(hw);22602260+22612261+ /*22622262+ * And finally tell the firmware to initialize itself using the22632263+ * parameters from the Configuration File.22642264+ */22652265+ /* Post event to notify completion of configuration */22662266+ csio_post_event(&hw->sm, CSIO_HWE_INIT);22672267+22682268+ csio_info(hw,22692269+ "Firmware Configuration File %s, version %#x, computed checksum %#x\n",22702270+ (using_flash ? "in device FLASH" : path), finiver, cfcsum);22712271+22722272+ return 0;22732273+22742274+ /*22752275+ * Something bad happened. Return the error ...22762276+ */22772277+bye:22782278+ hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;22792279+ csio_dbg(hw, "Configuration file error %d\n", rv);22802280+ return rv;22812281+}22822282+22832283+/*22842284+ * Attempt to initialize the adapter via hard-coded, driver supplied22852285+ * parameters ...22862286+ */22872287+static int22882288+csio_hw_no_fwconfig(struct csio_hw *hw, int reset)22892289+{22902290+ int rv;22912291+ /*22922292+ * Reset device if necessary22932293+ */22942294+ if (reset) {22952295+ rv = csio_do_reset(hw, true);22962296+ if (rv != 0)22972297+ goto out;22982298+ }22992299+23002300+ /* Get and set device capabilities */23012301+ rv = csio_config_device_caps(hw);23022302+ if (rv != 0)23032303+ goto out;23042304+23052305+ /* Config Global RSS command */23062306+ rv = csio_config_global_rss(hw);23072307+ if (rv != 0)23082308+ goto out;23092309+23102310+ /* Configure PF/VF capabilities of device */23112311+ rv = csio_config_pfvf(hw);23122312+ if (rv != 0)23132313+ goto out;23142314+23152315+ /* device parameters */23162316+ rv = csio_get_device_params(hw);23172317+ if (rv != 0)23182318+ goto out;23192319+23202320+ /* Configure SGE */23212321+ csio_wr_sge_init(hw);23222322+23232323+ /* Post event to notify completion of configuration */23242324+ csio_post_event(&hw->sm, CSIO_HWE_INIT);23252325+23262326+out:23272327+ return rv;23282328+}23292329+23302330+/*23312331+ * Returns -EINVAL if attempts to flash the firmware failed23322332+ * else returns 0,23332333+ * if flashing was not attempted because the card had the23342334+ * latest firmware ECANCELED is returned23352335+ */23362336+static int23372337+csio_hw_flash_fw(struct csio_hw *hw)23382338+{23392339+ int ret = -ECANCELED;23402340+ const struct firmware *fw;23412341+ const struct fw_hdr *hdr;23422342+ u32 fw_ver;23432343+ struct pci_dev *pci_dev = hw->pdev;23442344+ struct device *dev = &pci_dev->dev ;23452345+23462346+ if (request_firmware(&fw, CSIO_FW_FNAME, dev) < 0) {23472347+ csio_err(hw, "could not find firmware image " CSIO_FW_FNAME23482348+ ",err: %d\n", ret);23492349+ return -EINVAL;23502350+ }23512351+23522352+ hdr = (const struct fw_hdr *)fw->data;23532353+ fw_ver = ntohl(hdr->fw_ver);23542354+ if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR)23552355+ return -EINVAL; /* wrong major version, won't do */23562356+23572357+ /*23582358+ * If the flash FW is unusable or we found something newer, load it.23592359+ */23602360+ if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR ||23612361+ fw_ver > hw->fwrev) {23622362+ ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,23632363+ /*force=*/false);23642364+ if (!ret)23652365+ csio_info(hw, "firmware upgraded to version %pI4 from "23662366+ CSIO_FW_FNAME "\n", &hdr->fw_ver);23672367+ else23682368+ csio_err(hw, "firmware upgrade failed! err=%d\n", ret);23692369+ }23702370+23712371+ release_firmware(fw);23722372+23732373+ return ret;23742374+}23752375+23762376+23772377+/*23782378+ * csio_hw_configure - Configure HW23792379+ * @hw - HW module23802380+ *23812381+ */23822382+static void23832383+csio_hw_configure(struct csio_hw *hw)23842384+{23852385+ int reset = 1;23862386+ int rv;23872387+ u32 param[1];23882388+23892389+ rv = csio_hw_dev_ready(hw);23902390+ if (rv != 0) {23912391+ CSIO_INC_STATS(hw, n_err_fatal);23922392+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);23932393+ goto out;23942394+ }23952395+23962396+ /* HW version */23972397+ hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV);23982398+23992399+ /* Needed for FW download */24002400+ rv = csio_hw_get_flash_params(hw);24012401+ if (rv != 0) {24022402+ csio_err(hw, "Failed to get serial flash params rv:%d\n", rv);24032403+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);24042404+ goto out;24052405+ }24062406+24072407+ /* Set pci completion timeout value to 4 seconds. */24082408+ csio_set_pcie_completion_timeout(hw, 0xd);24092409+24102410+ csio_hw_set_mem_win(hw);24112411+24122412+ rv = csio_hw_get_fw_version(hw, &hw->fwrev);24132413+ if (rv != 0)24142414+ goto out;24152415+24162416+ csio_hw_print_fw_version(hw, "Firmware revision");24172417+24182418+ rv = csio_do_hello(hw, &hw->fw_state);24192419+ if (rv != 0) {24202420+ CSIO_INC_STATS(hw, n_err_fatal);24212421+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);24222422+ goto out;24232423+ }24242424+24252425+ /* Read vpd */24262426+ rv = csio_hw_get_vpd_params(hw, &hw->vpd);24272427+ if (rv != 0)24282428+ goto out;24292429+24302430+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {24312431+ rv = csio_hw_check_fw_version(hw);24322432+ if (rv == -EINVAL) {24332433+24342434+ /* Do firmware update */24352435+ spin_unlock_irq(&hw->lock);24362436+ rv = csio_hw_flash_fw(hw);24372437+ spin_lock_irq(&hw->lock);24382438+24392439+ if (rv == 0) {24402440+ reset = 0;24412441+ /*24422442+ * Note that the chip was reset as part of the24432443+ * firmware upgrade so we don't reset it again24442444+ * below and grab the new firmware version.24452445+ */24462446+ rv = csio_hw_check_fw_version(hw);24472447+ }24482448+ }24492449+ /*24502450+ * If the firmware doesn't support Configuration24512451+ * Files, use the old Driver-based, hard-wired24522452+ * initialization. Otherwise, try using the24532453+ * Configuration File support and fall back to the24542454+ * Driver-based initialization if there's no24552455+ * Configuration File found.24562456+ */24572457+ if (csio_hw_check_fwconfig(hw, param) == 0) {24582458+ rv = csio_hw_use_fwconfig(hw, reset, param);24592459+ if (rv == -ENOENT)24602460+ goto out;24612461+ if (rv != 0) {24622462+ csio_info(hw,24632463+ "No Configuration File present "24642464+ "on adapter. Using hard-wired "24652465+ "configuration parameters.\n");24662466+ rv = csio_hw_no_fwconfig(hw, reset);24672467+ }24682468+ } else {24692469+ rv = csio_hw_no_fwconfig(hw, reset);24702470+ }24712471+24722472+ if (rv != 0)24732473+ goto out;24742474+24752475+ } else {24762476+ if (hw->fw_state == CSIO_DEV_STATE_INIT) {24772477+24782478+ /* device parameters */24792479+ rv = csio_get_device_params(hw);24802480+ if (rv != 0)24812481+ goto out;24822482+24832483+ /* Get device capabilities */24842484+ rv = csio_config_device_caps(hw);24852485+ if (rv != 0)24862486+ goto out;24872487+24882488+ /* Configure SGE */24892489+ csio_wr_sge_init(hw);24902490+24912491+ /* Post event to notify completion of configuration */24922492+ csio_post_event(&hw->sm, CSIO_HWE_INIT);24932493+ goto out;24942494+ }24952495+ } /* if not master */24962496+24972497+out:24982498+ return;24992499+}25002500+25012501+/*25022502+ * csio_hw_initialize - Initialize HW25032503+ * @hw - HW module25042504+ *25052505+ */25062506+static void25072507+csio_hw_initialize(struct csio_hw *hw)25082508+{25092509+ struct csio_mb *mbp;25102510+ enum fw_retval retval;25112511+ int rv;25122512+ int i;25132513+25142514+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {25152515+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);25162516+ if (!mbp)25172517+ goto out;25182518+25192519+ csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);25202520+25212521+ if (csio_mb_issue(hw, mbp)) {25222522+ csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n");25232523+ goto free_and_out;25242524+ }25252525+25262526+ retval = csio_mb_fw_retval(mbp);25272527+ if (retval != FW_SUCCESS) {25282528+ csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n",25292529+ retval);25302530+ goto free_and_out;25312531+ }25322532+25332533+ mempool_free(mbp, hw->mb_mempool);25342534+ }25352535+25362536+ rv = csio_get_fcoe_resinfo(hw);25372537+ if (rv != 0) {25382538+ csio_err(hw, "Failed to read fcoe resource info: %d\n", rv);25392539+ goto out;25402540+ }25412541+25422542+ spin_unlock_irq(&hw->lock);25432543+ rv = csio_config_queues(hw);25442544+ spin_lock_irq(&hw->lock);25452545+25462546+ if (rv != 0) {25472547+ csio_err(hw, "Config of queues failed!: %d\n", rv);25482548+ goto out;25492549+ }25502550+25512551+ for (i = 0; i < hw->num_pports; i++)25522552+ hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA;25532553+25542554+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {25552555+ rv = csio_enable_ports(hw);25562556+ if (rv != 0) {25572557+ csio_err(hw, "Failed to enable ports: %d\n", rv);25582558+ goto out;25592559+ }25602560+ }25612561+25622562+ csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE);25632563+ return;25642564+25652565+free_and_out:25662566+ mempool_free(mbp, hw->mb_mempool);25672567+out:25682568+ return;25692569+}25702570+25712571+#define PF_INTR_MASK (PFSW | PFCIM)25722572+25732573+/*25742574+ * csio_hw_intr_enable - Enable HW interrupts25752575+ * @hw: Pointer to HW module.25762576+ *25772577+ * Enable interrupts in HW registers.25782578+ */25792579+static void25802580+csio_hw_intr_enable(struct csio_hw *hw)25812581+{25822582+ uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));25832583+ uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));25842584+ uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE);25852585+25862586+ /*25872587+ * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up25882588+ * by FW, so do nothing for INTX.25892589+ */25902590+ if (hw->intr_mode == CSIO_IM_MSIX)25912591+ csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),25922592+ AIVEC(AIVEC_MASK), vec);25932593+ else if (hw->intr_mode == CSIO_IM_MSI)25942594+ csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),25952595+ AIVEC(AIVEC_MASK), 0);25962596+25972597+ csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE));25982598+25992599+ /* Turn on MB interrupts - this will internally flush PIO as well */26002600+ csio_mb_intr_enable(hw);26012601+26022602+ /* These are common registers - only a master can modify them */26032603+ if (csio_is_hw_master(hw)) {26042604+ /*26052605+ * Disable the Serial FLASH interrupt, if enabled!26062606+ */26072607+ pl &= (~SF);26082608+ csio_wr_reg32(hw, pl, PL_INT_ENABLE);26092609+26102610+ csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE |26112611+ EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC |26122612+ ERR_CPL_OPCODE_0 | ERR_DROPPED_DB |26132613+ ERR_DATA_CPL_ON_HIGH_QID1 |26142614+ ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |26152615+ ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |26162616+ ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |26172617+ ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR,26182618+ SGE_INT_ENABLE3);26192619+ csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf);26202620+ }26212621+26222622+ hw->flags |= CSIO_HWF_HW_INTR_ENABLED;26232623+26242624+}26252625+26262626+/*26272627+ * csio_hw_intr_disable - Disable HW interrupts26282628+ * @hw: Pointer to HW module.26292629+ *26302630+ * Turn off Mailbox and PCI_PF_CFG interrupts.26312631+ */26322632+void26332633+csio_hw_intr_disable(struct csio_hw *hw)26342634+{26352635+ uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));26362636+26372637+ if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))26382638+ return;26392639+26402640+ hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;26412641+26422642+ csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE));26432643+ if (csio_is_hw_master(hw))26442644+ csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0);26452645+26462646+ /* Turn off MB interrupts */26472647+ csio_mb_intr_disable(hw);26482648+26492649+}26502650+26512651+static void26522652+csio_hw_fatal_err(struct csio_hw *hw)26532653+{26542654+ csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);26552655+ csio_hw_intr_disable(hw);26562656+26572657+ /* Do not reset HW, we may need FW state for debugging */26582658+ csio_fatal(hw, "HW Fatal error encountered!\n");26592659+}26602660+26612661+/*****************************************************************************/26622662+/* START: HW SM */26632663+/*****************************************************************************/26642664+/*26652665+ * csio_hws_uninit - Uninit state26662666+ * @hw - HW module26672667+ * @evt - Event26682668+ *26692669+ */26702670+static void26712671+csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)26722672+{26732673+ hw->prev_evt = hw->cur_evt;26742674+ hw->cur_evt = evt;26752675+ CSIO_INC_STATS(hw, n_evt_sm[evt]);26762676+26772677+ switch (evt) {26782678+ case CSIO_HWE_CFG:26792679+ csio_set_state(&hw->sm, csio_hws_configuring);26802680+ csio_hw_configure(hw);26812681+ break;26822682+26832683+ default:26842684+ CSIO_INC_STATS(hw, n_evt_unexp);26852685+ break;26862686+ }26872687+}26882688+26892689+/*26902690+ * csio_hws_configuring - Configuring state26912691+ * @hw - HW module26922692+ * @evt - Event26932693+ *26942694+ */26952695+static void26962696+csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)26972697+{26982698+ hw->prev_evt = hw->cur_evt;26992699+ hw->cur_evt = evt;27002700+ CSIO_INC_STATS(hw, n_evt_sm[evt]);27012701+27022702+ switch (evt) {27032703+ case CSIO_HWE_INIT:27042704+ csio_set_state(&hw->sm, csio_hws_initializing);27052705+ csio_hw_initialize(hw);27062706+ break;27072707+27082708+ case CSIO_HWE_INIT_DONE:27092709+ csio_set_state(&hw->sm, csio_hws_ready);27102710+ /* Fan out event to all lnode SMs */27112711+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);27122712+ break;27132713+27142714+ case CSIO_HWE_FATAL:27152715+ csio_set_state(&hw->sm, csio_hws_uninit);27162716+ break;27172717+27182718+ case CSIO_HWE_PCI_REMOVE:27192719+ csio_do_bye(hw);27202720+ break;27212721+ default:27222722+ CSIO_INC_STATS(hw, n_evt_unexp);27232723+ break;27242724+ }27252725+}27262726+27272727+/*27282728+ * csio_hws_initializing - Initialiazing state27292729+ * @hw - HW module27302730+ * @evt - Event27312731+ *27322732+ */27332733+static void27342734+csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)27352735+{27362736+ hw->prev_evt = hw->cur_evt;27372737+ hw->cur_evt = evt;27382738+ CSIO_INC_STATS(hw, n_evt_sm[evt]);27392739+27402740+ switch (evt) {27412741+ case CSIO_HWE_INIT_DONE:27422742+ csio_set_state(&hw->sm, csio_hws_ready);27432743+27442744+ /* Fan out event to all lnode SMs */27452745+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);27462746+27472747+ /* Enable interrupts */27482748+ csio_hw_intr_enable(hw);27492749+ break;27502750+27512751+ case CSIO_HWE_FATAL:27522752+ csio_set_state(&hw->sm, csio_hws_uninit);27532753+ break;27542754+27552755+ case CSIO_HWE_PCI_REMOVE:27562756+ csio_do_bye(hw);27572757+ break;27582758+27592759+ default:27602760+ CSIO_INC_STATS(hw, n_evt_unexp);27612761+ break;27622762+ }27632763+}27642764+27652765+/*27662766+ * csio_hws_ready - Ready state27672767+ * @hw - HW module27682768+ * @evt - Event27692769+ *27702770+ */27712771+static void27722772+csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)27732773+{27742774+ /* Remember the event */27752775+ hw->evtflag = evt;27762776+27772777+ hw->prev_evt = hw->cur_evt;27782778+ hw->cur_evt = evt;27792779+ CSIO_INC_STATS(hw, n_evt_sm[evt]);27802780+27812781+ switch (evt) {27822782+ case CSIO_HWE_HBA_RESET:27832783+ case CSIO_HWE_FW_DLOAD:27842784+ case CSIO_HWE_SUSPEND:27852785+ case CSIO_HWE_PCI_REMOVE:27862786+ case CSIO_HWE_PCIERR_DETECTED:27872787+ csio_set_state(&hw->sm, csio_hws_quiescing);27882788+ /* cleanup all outstanding cmds */27892789+ if (evt == CSIO_HWE_HBA_RESET ||27902790+ evt == CSIO_HWE_PCIERR_DETECTED)27912791+ csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false);27922792+ else27932793+ csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true);27942794+27952795+ csio_hw_intr_disable(hw);27962796+ csio_hw_mbm_cleanup(hw);27972797+ csio_evtq_stop(hw);27982798+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP);27992799+ csio_evtq_flush(hw);28002800+ csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw));28012801+ csio_post_event(&hw->sm, CSIO_HWE_QUIESCED);28022802+ break;28032803+28042804+ case CSIO_HWE_FATAL:28052805+ csio_set_state(&hw->sm, csio_hws_uninit);28062806+ break;28072807+28082808+ default:28092809+ CSIO_INC_STATS(hw, n_evt_unexp);28102810+ break;28112811+ }28122812+}28132813+28142814+/*28152815+ * csio_hws_quiescing - Quiescing state28162816+ * @hw - HW module28172817+ * @evt - Event28182818+ *28192819+ */28202820+static void28212821+csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)28222822+{28232823+ hw->prev_evt = hw->cur_evt;28242824+ hw->cur_evt = evt;28252825+ CSIO_INC_STATS(hw, n_evt_sm[evt]);28262826+28272827+ switch (evt) {28282828+ case CSIO_HWE_QUIESCED:28292829+ switch (hw->evtflag) {28302830+ case CSIO_HWE_FW_DLOAD:28312831+ csio_set_state(&hw->sm, csio_hws_resetting);28322832+ /* Download firmware */28332833+ /* Fall through */28342834+28352835+ case CSIO_HWE_HBA_RESET:28362836+ csio_set_state(&hw->sm, csio_hws_resetting);28372837+ /* Start reset of the HBA */28382838+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET);28392839+ csio_wr_destroy_queues(hw, false);28402840+ csio_do_reset(hw, false);28412841+ csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE);28422842+ break;28432843+28442844+ case CSIO_HWE_PCI_REMOVE:28452845+ csio_set_state(&hw->sm, csio_hws_removing);28462846+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE);28472847+ csio_wr_destroy_queues(hw, true);28482848+ /* Now send the bye command */28492849+ csio_do_bye(hw);28502850+ break;28512851+28522852+ case CSIO_HWE_SUSPEND:28532853+ csio_set_state(&hw->sm, csio_hws_quiesced);28542854+ break;28552855+28562856+ case CSIO_HWE_PCIERR_DETECTED:28572857+ csio_set_state(&hw->sm, csio_hws_pcierr);28582858+ csio_wr_destroy_queues(hw, false);28592859+ break;28602860+28612861+ default:28622862+ CSIO_INC_STATS(hw, n_evt_unexp);28632863+ break;28642864+28652865+ }28662866+ break;28672867+28682868+ default:28692869+ CSIO_INC_STATS(hw, n_evt_unexp);28702870+ break;28712871+ }28722872+}28732873+28742874+/*28752875+ * csio_hws_quiesced - Quiesced state28762876+ * @hw - HW module28772877+ * @evt - Event28782878+ *28792879+ */28802880+static void28812881+csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)28822882+{28832883+ hw->prev_evt = hw->cur_evt;28842884+ hw->cur_evt = evt;28852885+ CSIO_INC_STATS(hw, n_evt_sm[evt]);28862886+28872887+ switch (evt) {28882888+ case CSIO_HWE_RESUME:28892889+ csio_set_state(&hw->sm, csio_hws_configuring);28902890+ csio_hw_configure(hw);28912891+ break;28922892+28932893+ default:28942894+ CSIO_INC_STATS(hw, n_evt_unexp);28952895+ break;28962896+ }28972897+}28982898+28992899+/*29002900+ * csio_hws_resetting - HW Resetting state29012901+ * @hw - HW module29022902+ * @evt - Event29032903+ *29042904+ */29052905+static void29062906+csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)29072907+{29082908+ hw->prev_evt = hw->cur_evt;29092909+ hw->cur_evt = evt;29102910+ CSIO_INC_STATS(hw, n_evt_sm[evt]);29112911+29122912+ switch (evt) {29132913+ case CSIO_HWE_HBA_RESET_DONE:29142914+ csio_evtq_start(hw);29152915+ csio_set_state(&hw->sm, csio_hws_configuring);29162916+ csio_hw_configure(hw);29172917+ break;29182918+29192919+ default:29202920+ CSIO_INC_STATS(hw, n_evt_unexp);29212921+ break;29222922+ }29232923+}29242924+29252925+/*29262926+ * csio_hws_removing - PCI Hotplug removing state29272927+ * @hw - HW module29282928+ * @evt - Event29292929+ *29302930+ */29312931+static void29322932+csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)29332933+{29342934+ hw->prev_evt = hw->cur_evt;29352935+ hw->cur_evt = evt;29362936+ CSIO_INC_STATS(hw, n_evt_sm[evt]);29372937+29382938+ switch (evt) {29392939+ case CSIO_HWE_HBA_RESET:29402940+ if (!csio_is_hw_master(hw))29412941+ break;29422942+ /*29432943+ * The BYE should have alerady been issued, so we cant29442944+ * use the mailbox interface. Hence we use the PL_RST29452945+ * register directly.29462946+ */29472947+ csio_err(hw, "Resetting HW and waiting 2 seconds...\n");29482948+ csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);29492949+ mdelay(2000);29502950+ break;29512951+29522952+ /* Should never receive any new events */29532953+ default:29542954+ CSIO_INC_STATS(hw, n_evt_unexp);29552955+ break;29562956+29572957+ }29582958+}29592959+29602960+/*29612961+ * csio_hws_pcierr - PCI Error state29622962+ * @hw - HW module29632963+ * @evt - Event29642964+ *29652965+ */29662966+static void29672967+csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)29682968+{29692969+ hw->prev_evt = hw->cur_evt;29702970+ hw->cur_evt = evt;29712971+ CSIO_INC_STATS(hw, n_evt_sm[evt]);29722972+29732973+ switch (evt) {29742974+ case CSIO_HWE_PCIERR_SLOT_RESET:29752975+ csio_evtq_start(hw);29762976+ csio_set_state(&hw->sm, csio_hws_configuring);29772977+ csio_hw_configure(hw);29782978+ break;29792979+29802980+ default:29812981+ CSIO_INC_STATS(hw, n_evt_unexp);29822982+ break;29832983+ }29842984+}29852985+29862986+/*****************************************************************************/29872987+/* END: HW SM */29882988+/*****************************************************************************/29892989+29902990+/* Slow path handlers */29912991+struct intr_info {29922992+ unsigned int mask; /* bits to check in interrupt status */29932993+ const char *msg; /* message to print or NULL */29942994+ short stat_idx; /* stat counter to increment or -1 */29952995+ unsigned short fatal; /* whether the condition reported is fatal */29962996+};29972997+29982998+/*29992999+ * csio_handle_intr_status - table driven interrupt handler30003000+ * @hw: HW instance30013001+ * @reg: the interrupt status register to process30023002+ * @acts: table of interrupt actions30033003+ *30043004+ * A table driven interrupt handler that applies a set of masks to an30053005+ * interrupt status word and performs the corresponding actions if the30063006+ * interrupts described by the mask have occured. The actions include30073007+ * optionally emitting a warning or alert message. The table is terminated30083008+ * by an entry specifying mask 0. Returns the number of fatal interrupt30093009+ * conditions.30103010+ */30113011+static int30123012+csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,30133013+ const struct intr_info *acts)30143014+{30153015+ int fatal = 0;30163016+ unsigned int mask = 0;30173017+ unsigned int status = csio_rd_reg32(hw, reg);30183018+30193019+ for ( ; acts->mask; ++acts) {30203020+ if (!(status & acts->mask))30213021+ continue;30223022+ if (acts->fatal) {30233023+ fatal++;30243024+ csio_fatal(hw, "Fatal %s (0x%x)\n",30253025+ acts->msg, status & acts->mask);30263026+ } else if (acts->msg)30273027+ csio_info(hw, "%s (0x%x)\n",30283028+ acts->msg, status & acts->mask);30293029+ mask |= acts->mask;30303030+ }30313031+ status &= mask;30323032+ if (status) /* clear processed interrupts */30333033+ csio_wr_reg32(hw, status, reg);30343034+ return fatal;30353035+}30363036+30373037+/*30383038+ * Interrupt handler for the PCIE module.30393039+ */30403040+static void30413041+csio_pcie_intr_handler(struct csio_hw *hw)30423042+{30433043+ static struct intr_info sysbus_intr_info[] = {30443044+ { RNPP, "RXNP array parity error", -1, 1 },30453045+ { RPCP, "RXPC array parity error", -1, 1 },30463046+ { RCIP, "RXCIF array parity error", -1, 1 },30473047+ { RCCP, "Rx completions control array parity error", -1, 1 },30483048+ { RFTP, "RXFT array parity error", -1, 1 },30493049+ { 0, NULL, 0, 0 }30503050+ };30513051+ static struct intr_info pcie_port_intr_info[] = {30523052+ { TPCP, "TXPC array parity error", -1, 1 },30533053+ { TNPP, "TXNP array parity error", -1, 1 },30543054+ { TFTP, "TXFT array parity error", -1, 1 },30553055+ { TCAP, "TXCA array parity error", -1, 1 },30563056+ { TCIP, "TXCIF array parity error", -1, 1 },30573057+ { RCAP, "RXCA array parity error", -1, 1 },30583058+ { OTDD, "outbound request TLP discarded", -1, 1 },30593059+ { RDPE, "Rx data parity error", -1, 1 },30603060+ { TDUE, "Tx uncorrectable data error", -1, 1 },30613061+ { 0, NULL, 0, 0 }30623062+ };30633063+ static struct intr_info pcie_intr_info[] = {30643064+ { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },30653065+ { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },30663066+ { MSIDATAPERR, "MSI data parity error", -1, 1 },30673067+ { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },30683068+ { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },30693069+ { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },30703070+ { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },30713071+ { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },30723072+ { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },30733073+ { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },30743074+ { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },30753075+ { CREQPERR, "PCI CMD channel request parity error", -1, 1 },30763076+ { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },30773077+ { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },30783078+ { DREQPERR, "PCI DMA channel request parity error", -1, 1 },30793079+ { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },30803080+ { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },30813081+ { HREQPERR, "PCI HMA channel request parity error", -1, 1 },30823082+ { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },30833083+ { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },30843084+ { FIDPERR, "PCI FID parity error", -1, 1 },30853085+ { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },30863086+ { MATAGPERR, "PCI MA tag parity error", -1, 1 },30873087+ { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },30883088+ { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },30893089+ { RXWRPERR, "PCI Rx write parity error", -1, 1 },30903090+ { RPLPERR, "PCI replay buffer parity error", -1, 1 },30913091+ { PCIESINT, "PCI core secondary fault", -1, 1 },30923092+ { PCIEPINT, "PCI core primary fault", -1, 1 },30933093+ { UNXSPLCPLERR, "PCI unexpected split completion error", -1,30943094+ 0 },30953095+ { 0, NULL, 0, 0 }30963096+ };30973097+30983098+ int fat;30993099+31003100+ fat = csio_handle_intr_status(hw,31013101+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,31023102+ sysbus_intr_info) +31033103+ csio_handle_intr_status(hw,31043104+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,31053105+ pcie_port_intr_info) +31063106+ csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);31073107+ if (fat)31083108+ csio_hw_fatal_err(hw);31093109+}31103110+31113111+/*31123112+ * TP interrupt handler.31133113+ */31143114+static void csio_tp_intr_handler(struct csio_hw *hw)31153115+{31163116+ static struct intr_info tp_intr_info[] = {31173117+ { 0x3fffffff, "TP parity error", -1, 1 },31183118+ { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },31193119+ { 0, NULL, 0, 0 }31203120+ };31213121+31223122+ if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info))31233123+ csio_hw_fatal_err(hw);31243124+}31253125+31263126+/*31273127+ * SGE interrupt handler.31283128+ */31293129+static void csio_sge_intr_handler(struct csio_hw *hw)31303130+{31313131+ uint64_t v;31323132+31333133+ static struct intr_info sge_intr_info[] = {31343134+ { ERR_CPL_EXCEED_IQE_SIZE,31353135+ "SGE received CPL exceeding IQE size", -1, 1 },31363136+ { ERR_INVALID_CIDX_INC,31373137+ "SGE GTS CIDX increment too large", -1, 0 },31383138+ { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },31393139+ { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },31403140+ { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,31413141+ "SGE IQID > 1023 received CPL for FL", -1, 0 },31423142+ { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,31433143+ 0 },31443144+ { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,31453145+ 0 },31463146+ { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,31473147+ 0 },31483148+ { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,31493149+ 0 },31503150+ { ERR_ING_CTXT_PRIO,31513151+ "SGE too many priority ingress contexts", -1, 0 },31523152+ { ERR_EGR_CTXT_PRIO,31533153+ "SGE too many priority egress contexts", -1, 0 },31543154+ { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },31553155+ { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },31563156+ { 0, NULL, 0, 0 }31573157+ };31583158+31593159+ v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) |31603160+ ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32);31613161+ if (v) {31623162+ csio_fatal(hw, "SGE parity error (%#llx)\n",31633163+ (unsigned long long)v);31643164+ csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),31653165+ SGE_INT_CAUSE1);31663166+ csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2);31673167+ }31683168+31693169+ v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info);31703170+31713171+ if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) ||31723172+ v != 0)31733173+ csio_hw_fatal_err(hw);31743174+}31753175+31763176+#define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\31773177+ OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR)31783178+#define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\31793179+ IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR)31803180+31813181+/*31823182+ * CIM interrupt handler.31833183+ */31843184+static void csio_cim_intr_handler(struct csio_hw *hw)31853185+{31863186+ static struct intr_info cim_intr_info[] = {31873187+ { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },31883188+ { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },31893189+ { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },31903190+ { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },31913191+ { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },31923192+ { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },31933193+ { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },31943194+ { 0, NULL, 0, 0 }31953195+ };31963196+ static struct intr_info cim_upintr_info[] = {31973197+ { RSVDSPACEINT, "CIM reserved space access", -1, 1 },31983198+ { ILLTRANSINT, "CIM illegal transaction", -1, 1 },31993199+ { ILLWRINT, "CIM illegal write", -1, 1 },32003200+ { ILLRDINT, "CIM illegal read", -1, 1 },32013201+ { ILLRDBEINT, "CIM illegal read BE", -1, 1 },32023202+ { ILLWRBEINT, "CIM illegal write BE", -1, 1 },32033203+ { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },32043204+ { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },32053205+ { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },32063206+ { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },32073207+ { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },32083208+ { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },32093209+ { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },32103210+ { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },32113211+ { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },32123212+ { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },32133213+ { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },32143214+ { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },32153215+ { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },32163216+ { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },32173217+ { SGLRDPLINT , "CIM single read from PL space", -1, 1 },32183218+ { SGLWRPLINT , "CIM single write to PL space", -1, 1 },32193219+ { BLKRDPLINT , "CIM block read from PL space", -1, 1 },32203220+ { BLKWRPLINT , "CIM block write to PL space", -1, 1 },32213221+ { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },32223222+ { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },32233223+ { TIMEOUTINT , "CIM PIF timeout", -1, 1 },32243224+ { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },32253225+ { 0, NULL, 0, 0 }32263226+ };32273227+32283228+ int fat;32293229+32303230+ fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE,32313231+ cim_intr_info) +32323232+ csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE,32333233+ cim_upintr_info);32343234+ if (fat)32353235+ csio_hw_fatal_err(hw);32363236+}32373237+32383238+/*32393239+ * ULP RX interrupt handler.32403240+ */32413241+static void csio_ulprx_intr_handler(struct csio_hw *hw)32423242+{32433243+ static struct intr_info ulprx_intr_info[] = {32443244+ { 0x1800000, "ULPRX context error", -1, 1 },32453245+ { 0x7fffff, "ULPRX parity error", -1, 1 },32463246+ { 0, NULL, 0, 0 }32473247+ };32483248+32493249+ if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info))32503250+ csio_hw_fatal_err(hw);32513251+}32523252+32533253+/*32543254+ * ULP TX interrupt handler.32553255+ */32563256+static void csio_ulptx_intr_handler(struct csio_hw *hw)32573257+{32583258+ static struct intr_info ulptx_intr_info[] = {32593259+ { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,32603260+ 0 },32613261+ { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,32623262+ 0 },32633263+ { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,32643264+ 0 },32653265+ { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,32663266+ 0 },32673267+ { 0xfffffff, "ULPTX parity error", -1, 1 },32683268+ { 0, NULL, 0, 0 }32693269+ };32703270+32713271+ if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info))32723272+ csio_hw_fatal_err(hw);32733273+}32743274+32753275+/*32763276+ * PM TX interrupt handler.32773277+ */32783278+static void csio_pmtx_intr_handler(struct csio_hw *hw)32793279+{32803280+ static struct intr_info pmtx_intr_info[] = {32813281+ { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },32823282+ { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },32833283+ { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },32843284+ { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },32853285+ { 0xffffff0, "PMTX framing error", -1, 1 },32863286+ { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },32873287+ { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,32883288+ 1 },32893289+ { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },32903290+ { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},32913291+ { 0, NULL, 0, 0 }32923292+ };32933293+32943294+ if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info))32953295+ csio_hw_fatal_err(hw);32963296+}32973297+32983298+/*32993299+ * PM RX interrupt handler.33003300+ */33013301+static void csio_pmrx_intr_handler(struct csio_hw *hw)33023302+{33033303+ static struct intr_info pmrx_intr_info[] = {33043304+ { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },33053305+ { 0x3ffff0, "PMRX framing error", -1, 1 },33063306+ { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },33073307+ { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,33083308+ 1 },33093309+ { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },33103310+ { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},33113311+ { 0, NULL, 0, 0 }33123312+ };33133313+33143314+ if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info))33153315+ csio_hw_fatal_err(hw);33163316+}33173317+33183318+/*33193319+ * CPL switch interrupt handler.33203320+ */33213321+static void csio_cplsw_intr_handler(struct csio_hw *hw)33223322+{33233323+ static struct intr_info cplsw_intr_info[] = {33243324+ { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },33253325+ { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },33263326+ { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },33273327+ { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },33283328+ { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },33293329+ { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },33303330+ { 0, NULL, 0, 0 }33313331+ };33323332+33333333+ if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info))33343334+ csio_hw_fatal_err(hw);33353335+}33363336+33373337+/*33383338+ * LE interrupt handler.33393339+ */33403340+static void csio_le_intr_handler(struct csio_hw *hw)33413341+{33423342+ static struct intr_info le_intr_info[] = {33433343+ { LIPMISS, "LE LIP miss", -1, 0 },33443344+ { LIP0, "LE 0 LIP error", -1, 0 },33453345+ { PARITYERR, "LE parity error", -1, 1 },33463346+ { UNKNOWNCMD, "LE unknown command", -1, 1 },33473347+ { REQQPARERR, "LE request queue parity error", -1, 1 },33483348+ { 0, NULL, 0, 0 }33493349+ };33503350+33513351+ if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info))33523352+ csio_hw_fatal_err(hw);33533353+}33543354+33553355+/*33563356+ * MPS interrupt handler.33573357+ */33583358+static void csio_mps_intr_handler(struct csio_hw *hw)33593359+{33603360+ static struct intr_info mps_rx_intr_info[] = {33613361+ { 0xffffff, "MPS Rx parity error", -1, 1 },33623362+ { 0, NULL, 0, 0 }33633363+ };33643364+ static struct intr_info mps_tx_intr_info[] = {33653365+ { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },33663366+ { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },33673367+ { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },33683368+ { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },33693369+ { BUBBLE, "MPS Tx underflow", -1, 1 },33703370+ { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },33713371+ { FRMERR, "MPS Tx framing error", -1, 1 },33723372+ { 0, NULL, 0, 0 }33733373+ };33743374+ static struct intr_info mps_trc_intr_info[] = {33753375+ { FILTMEM, "MPS TRC filter parity error", -1, 1 },33763376+ { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },33773377+ { MISCPERR, "MPS TRC misc parity error", -1, 1 },33783378+ { 0, NULL, 0, 0 }33793379+ };33803380+ static struct intr_info mps_stat_sram_intr_info[] = {33813381+ { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },33823382+ { 0, NULL, 0, 0 }33833383+ };33843384+ static struct intr_info mps_stat_tx_intr_info[] = {33853385+ { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },33863386+ { 0, NULL, 0, 0 }33873387+ };33883388+ static struct intr_info mps_stat_rx_intr_info[] = {33893389+ { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },33903390+ { 0, NULL, 0, 0 }33913391+ };33923392+ static struct intr_info mps_cls_intr_info[] = {33933393+ { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },33943394+ { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },33953395+ { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },33963396+ { 0, NULL, 0, 0 }33973397+ };33983398+33993399+ int fat;34003400+34013401+ fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE,34023402+ mps_rx_intr_info) +34033403+ csio_handle_intr_status(hw, MPS_TX_INT_CAUSE,34043404+ mps_tx_intr_info) +34053405+ csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE,34063406+ mps_trc_intr_info) +34073407+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM,34083408+ mps_stat_sram_intr_info) +34093409+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,34103410+ mps_stat_tx_intr_info) +34113411+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,34123412+ mps_stat_rx_intr_info) +34133413+ csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE,34143414+ mps_cls_intr_info);34153415+34163416+ csio_wr_reg32(hw, 0, MPS_INT_CAUSE);34173417+ csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */34183418+ if (fat)34193419+ csio_hw_fatal_err(hw);34203420+}34213421+34223422+#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)34233423+34243424+/*34253425+ * EDC/MC interrupt handler.34263426+ */34273427+static void csio_mem_intr_handler(struct csio_hw *hw, int idx)34283428+{34293429+ static const char name[3][5] = { "EDC0", "EDC1", "MC" };34303430+34313431+ unsigned int addr, cnt_addr, v;34323432+34333433+ if (idx <= MEM_EDC1) {34343434+ addr = EDC_REG(EDC_INT_CAUSE, idx);34353435+ cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);34363436+ } else {34373437+ addr = MC_INT_CAUSE;34383438+ cnt_addr = MC_ECC_STATUS;34393439+ }34403440+34413441+ v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;34423442+ if (v & PERR_INT_CAUSE)34433443+ csio_fatal(hw, "%s FIFO parity error\n", name[idx]);34443444+ if (v & ECC_CE_INT_CAUSE) {34453445+ uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr));34463446+34473447+ csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr);34483448+ csio_warn(hw, "%u %s correctable ECC data error%s\n",34493449+ cnt, name[idx], cnt > 1 ? "s" : "");34503450+ }34513451+ if (v & ECC_UE_INT_CAUSE)34523452+ csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);34533453+34543454+ csio_wr_reg32(hw, v, addr);34553455+ if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))34563456+ csio_hw_fatal_err(hw);34573457+}34583458+34593459+/*34603460+ * MA interrupt handler.34613461+ */34623462+static void csio_ma_intr_handler(struct csio_hw *hw)34633463+{34643464+ uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE);34653465+34663466+ if (status & MEM_PERR_INT_CAUSE)34673467+ csio_fatal(hw, "MA parity error, parity status %#x\n",34683468+ csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS));34693469+ if (status & MEM_WRAP_INT_CAUSE) {34703470+ v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS);34713471+ csio_fatal(hw,34723472+ "MA address wrap-around error by client %u to address %#x\n",34733473+ MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4);34743474+ }34753475+ csio_wr_reg32(hw, status, MA_INT_CAUSE);34763476+ csio_hw_fatal_err(hw);34773477+}34783478+34793479+/*34803480+ * SMB interrupt handler.34813481+ */34823482+static void csio_smb_intr_handler(struct csio_hw *hw)34833483+{34843484+ static struct intr_info smb_intr_info[] = {34853485+ { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },34863486+ { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },34873487+ { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },34883488+ { 0, NULL, 0, 0 }34893489+ };34903490+34913491+ if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info))34923492+ csio_hw_fatal_err(hw);34933493+}34943494+34953495+/*34963496+ * NC-SI interrupt handler.34973497+ */34983498+static void csio_ncsi_intr_handler(struct csio_hw *hw)34993499+{35003500+ static struct intr_info ncsi_intr_info[] = {35013501+ { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },35023502+ { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },35033503+ { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },35043504+ { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },35053505+ { 0, NULL, 0, 0 }35063506+ };35073507+35083508+ if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info))35093509+ csio_hw_fatal_err(hw);35103510+}35113511+35123512+/*35133513+ * XGMAC interrupt handler.35143514+ */35153515+static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)35163516+{35173517+ uint32_t v = csio_rd_reg32(hw, PORT_REG(port, XGMAC_PORT_INT_CAUSE));35183518+35193519+ v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;35203520+ if (!v)35213521+ return;35223522+35233523+ if (v & TXFIFO_PRTY_ERR)35243524+ csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);35253525+ if (v & RXFIFO_PRTY_ERR)35263526+ csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);35273527+ csio_wr_reg32(hw, v, PORT_REG(port, XGMAC_PORT_INT_CAUSE));35283528+ csio_hw_fatal_err(hw);35293529+}35303530+35313531+/*35323532+ * PL interrupt handler.35333533+ */35343534+static void csio_pl_intr_handler(struct csio_hw *hw)35353535+{35363536+ static struct intr_info pl_intr_info[] = {35373537+ { FATALPERR, "T4 fatal parity error", -1, 1 },35383538+ { PERRVFID, "PL VFID_MAP parity error", -1, 1 },35393539+ { 0, NULL, 0, 0 }35403540+ };35413541+35423542+ if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info))35433543+ csio_hw_fatal_err(hw);35443544+}35453545+35463546+/*35473547+ * csio_hw_slow_intr_handler - control path interrupt handler35483548+ * @hw: HW module35493549+ *35503550+ * Interrupt handler for non-data global interrupt events, e.g., errors.35513551+ * The designation 'slow' is because it involves register reads, while35523552+ * data interrupts typically don't involve any MMIOs.35533553+ */35543554+int35553555+csio_hw_slow_intr_handler(struct csio_hw *hw)35563556+{35573557+ uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE);35583558+35593559+ if (!(cause & CSIO_GLBL_INTR_MASK)) {35603560+ CSIO_INC_STATS(hw, n_plint_unexp);35613561+ return 0;35623562+ }35633563+35643564+ csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause);35653565+35663566+ CSIO_INC_STATS(hw, n_plint_cnt);35673567+35683568+ if (cause & CIM)35693569+ csio_cim_intr_handler(hw);35703570+35713571+ if (cause & MPS)35723572+ csio_mps_intr_handler(hw);35733573+35743574+ if (cause & NCSI)35753575+ csio_ncsi_intr_handler(hw);35763576+35773577+ if (cause & PL)35783578+ csio_pl_intr_handler(hw);35793579+35803580+ if (cause & SMB)35813581+ csio_smb_intr_handler(hw);35823582+35833583+ if (cause & XGMAC0)35843584+ csio_xgmac_intr_handler(hw, 0);35853585+35863586+ if (cause & XGMAC1)35873587+ csio_xgmac_intr_handler(hw, 1);35883588+35893589+ if (cause & XGMAC_KR0)35903590+ csio_xgmac_intr_handler(hw, 2);35913591+35923592+ if (cause & XGMAC_KR1)35933593+ csio_xgmac_intr_handler(hw, 3);35943594+35953595+ if (cause & PCIE)35963596+ csio_pcie_intr_handler(hw);35973597+35983598+ if (cause & MC)35993599+ csio_mem_intr_handler(hw, MEM_MC);36003600+36013601+ if (cause & EDC0)36023602+ csio_mem_intr_handler(hw, MEM_EDC0);36033603+36043604+ if (cause & EDC1)36053605+ csio_mem_intr_handler(hw, MEM_EDC1);36063606+36073607+ if (cause & LE)36083608+ csio_le_intr_handler(hw);36093609+36103610+ if (cause & TP)36113611+ csio_tp_intr_handler(hw);36123612+36133613+ if (cause & MA)36143614+ csio_ma_intr_handler(hw);36153615+36163616+ if (cause & PM_TX)36173617+ csio_pmtx_intr_handler(hw);36183618+36193619+ if (cause & PM_RX)36203620+ csio_pmrx_intr_handler(hw);36213621+36223622+ if (cause & ULP_RX)36233623+ csio_ulprx_intr_handler(hw);36243624+36253625+ if (cause & CPL_SWITCH)36263626+ csio_cplsw_intr_handler(hw);36273627+36283628+ if (cause & SGE)36293629+ csio_sge_intr_handler(hw);36303630+36313631+ if (cause & ULP_TX)36323632+ csio_ulptx_intr_handler(hw);36333633+36343634+ /* Clear the interrupts just processed for which we are the master. */36353635+ csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE);36363636+ csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */36373637+36383638+ return 1;36393639+}36403640+36413641+/*****************************************************************************36423642+ * HW <--> mailbox interfacing routines.36433643+ ****************************************************************************/36443644+/*36453645+ * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions36463646+ *36473647+ * @data: Private data pointer.36483648+ *36493649+ * Called from worker thread context.36503650+ */36513651+static void36523652+csio_mberr_worker(void *data)36533653+{36543654+ struct csio_hw *hw = (struct csio_hw *)data;36553655+ struct csio_mbm *mbm = &hw->mbm;36563656+ LIST_HEAD(cbfn_q);36573657+ struct csio_mb *mbp_next;36583658+ int rv;36593659+36603660+ del_timer_sync(&mbm->timer);36613661+36623662+ spin_lock_irq(&hw->lock);36633663+ if (list_empty(&mbm->cbfn_q)) {36643664+ spin_unlock_irq(&hw->lock);36653665+ return;36663666+ }36673667+36683668+ list_splice_tail_init(&mbm->cbfn_q, &cbfn_q);36693669+ mbm->stats.n_cbfnq = 0;36703670+36713671+ /* Try to start waiting mailboxes */36723672+ if (!list_empty(&mbm->req_q)) {36733673+ mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list);36743674+ list_del_init(&mbp_next->list);36753675+36763676+ rv = csio_mb_issue(hw, mbp_next);36773677+ if (rv != 0)36783678+ list_add_tail(&mbp_next->list, &mbm->req_q);36793679+ else36803680+ CSIO_DEC_STATS(mbm, n_activeq);36813681+ }36823682+ spin_unlock_irq(&hw->lock);36833683+36843684+ /* Now callback completions */36853685+ csio_mb_completions(hw, &cbfn_q);36863686+}36873687+36883688+/*36893689+ * csio_hw_mb_timer - Top-level Mailbox timeout handler.36903690+ *36913691+ * @data: private data pointer36923692+ *36933693+ **/36943694+static void36953695+csio_hw_mb_timer(uintptr_t data)36963696+{36973697+ struct csio_hw *hw = (struct csio_hw *)data;36983698+ struct csio_mb *mbp = NULL;36993699+37003700+ spin_lock_irq(&hw->lock);37013701+ mbp = csio_mb_tmo_handler(hw);37023702+ spin_unlock_irq(&hw->lock);37033703+37043704+ /* Call back the function for the timed-out Mailbox */37053705+ if (mbp)37063706+ mbp->mb_cbfn(hw, mbp);37073707+37083708+}37093709+37103710+/*37113711+ * csio_hw_mbm_cleanup - Cleanup Mailbox module.37123712+ * @hw: HW module37133713+ *37143714+ * Called with lock held, should exit with lock held.37153715+ * Cancels outstanding mailboxes (waiting, in-flight) and gathers them37163716+ * into a local queue. Drops lock and calls the completions. Holds37173717+ * lock and returns.37183718+ */37193719+static void37203720+csio_hw_mbm_cleanup(struct csio_hw *hw)37213721+{37223722+ LIST_HEAD(cbfn_q);37233723+37243724+ csio_mb_cancel_all(hw, &cbfn_q);37253725+37263726+ spin_unlock_irq(&hw->lock);37273727+ csio_mb_completions(hw, &cbfn_q);37283728+ spin_lock_irq(&hw->lock);37293729+}37303730+37313731+/*****************************************************************************37323732+ * Event handling37333733+ ****************************************************************************/37343734+int37353735+csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg,37363736+ uint16_t len)37373737+{37383738+ struct csio_evt_msg *evt_entry = NULL;37393739+37403740+ if (type >= CSIO_EVT_MAX)37413741+ return -EINVAL;37423742+37433743+ if (len > CSIO_EVT_MSG_SIZE)37443744+ return -EINVAL;37453745+37463746+ if (hw->flags & CSIO_HWF_FWEVT_STOP)37473747+ return -EINVAL;37483748+37493749+ if (list_empty(&hw->evt_free_q)) {37503750+ csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",37513751+ type, len);37523752+ return -ENOMEM;37533753+ }37543754+37553755+ evt_entry = list_first_entry(&hw->evt_free_q,37563756+ struct csio_evt_msg, list);37573757+ list_del_init(&evt_entry->list);37583758+37593759+ /* copy event msg and queue the event */37603760+ evt_entry->type = type;37613761+ memcpy((void *)evt_entry->data, evt_msg, len);37623762+ list_add_tail(&evt_entry->list, &hw->evt_active_q);37633763+37643764+ CSIO_DEC_STATS(hw, n_evt_freeq);37653765+ CSIO_INC_STATS(hw, n_evt_activeq);37663766+37673767+ return 0;37683768+}37693769+37703770+static int37713771+csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg,37723772+ uint16_t len, bool msg_sg)37733773+{37743774+ struct csio_evt_msg *evt_entry = NULL;37753775+ struct csio_fl_dma_buf *fl_sg;37763776+ uint32_t off = 0;37773777+ unsigned long flags;37783778+ int n, ret = 0;37793779+37803780+ if (type >= CSIO_EVT_MAX)37813781+ return -EINVAL;37823782+37833783+ if (len > CSIO_EVT_MSG_SIZE)37843784+ return -EINVAL;37853785+37863786+ spin_lock_irqsave(&hw->lock, flags);37873787+ if (hw->flags & CSIO_HWF_FWEVT_STOP) {37883788+ ret = -EINVAL;37893789+ goto out;37903790+ }37913791+37923792+ if (list_empty(&hw->evt_free_q)) {37933793+ csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",37943794+ type, len);37953795+ ret = -ENOMEM;37963796+ goto out;37973797+ }37983798+37993799+ evt_entry = list_first_entry(&hw->evt_free_q,38003800+ struct csio_evt_msg, list);38013801+ list_del_init(&evt_entry->list);38023802+38033803+ /* copy event msg and queue the event */38043804+ evt_entry->type = type;38053805+38063806+ /* If Payload in SG list*/38073807+ if (msg_sg) {38083808+ fl_sg = (struct csio_fl_dma_buf *) evt_msg;38093809+ for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) {38103810+ memcpy((void *)((uintptr_t)evt_entry->data + off),38113811+ fl_sg->flbufs[n].vaddr,38123812+ fl_sg->flbufs[n].len);38133813+ off += fl_sg->flbufs[n].len;38143814+ }38153815+ } else38163816+ memcpy((void *)evt_entry->data, evt_msg, len);38173817+38183818+ list_add_tail(&evt_entry->list, &hw->evt_active_q);38193819+ CSIO_DEC_STATS(hw, n_evt_freeq);38203820+ CSIO_INC_STATS(hw, n_evt_activeq);38213821+out:38223822+ spin_unlock_irqrestore(&hw->lock, flags);38233823+ return ret;38243824+}38253825+38263826+static void38273827+csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry)38283828+{38293829+ if (evt_entry) {38303830+ spin_lock_irq(&hw->lock);38313831+ list_del_init(&evt_entry->list);38323832+ list_add_tail(&evt_entry->list, &hw->evt_free_q);38333833+ CSIO_DEC_STATS(hw, n_evt_activeq);38343834+ CSIO_INC_STATS(hw, n_evt_freeq);38353835+ spin_unlock_irq(&hw->lock);38363836+ }38373837+}38383838+38393839+void38403840+csio_evtq_flush(struct csio_hw *hw)38413841+{38423842+ uint32_t count;38433843+ count = 30;38443844+ while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) {38453845+ spin_unlock_irq(&hw->lock);38463846+ msleep(2000);38473847+ spin_lock_irq(&hw->lock);38483848+ }38493849+38503850+ CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING));38513851+}38523852+38533853+static void38543854+csio_evtq_stop(struct csio_hw *hw)38553855+{38563856+ hw->flags |= CSIO_HWF_FWEVT_STOP;38573857+}38583858+38593859+static void38603860+csio_evtq_start(struct csio_hw *hw)38613861+{38623862+ hw->flags &= ~CSIO_HWF_FWEVT_STOP;38633863+}38643864+38653865+static void38663866+csio_evtq_cleanup(struct csio_hw *hw)38673867+{38683868+ struct list_head *evt_entry, *next_entry;38693869+38703870+ /* Release outstanding events from activeq to freeq*/38713871+ if (!list_empty(&hw->evt_active_q))38723872+ list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q);38733873+38743874+ hw->stats.n_evt_activeq = 0;38753875+ hw->flags &= ~CSIO_HWF_FWEVT_PENDING;38763876+38773877+ /* Freeup event entry */38783878+ list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) {38793879+ kfree(evt_entry);38803880+ CSIO_DEC_STATS(hw, n_evt_freeq);38813881+ }38823882+38833883+ hw->stats.n_evt_freeq = 0;38843884+}38853885+38863886+38873887+static void38883888+csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,38893889+ struct csio_fl_dma_buf *flb, void *priv)38903890+{38913891+ __u8 op;38923892+ __be64 *data;38933893+ void *msg = NULL;38943894+ uint32_t msg_len = 0;38953895+ bool msg_sg = 0;38963896+38973897+ op = ((struct rss_header *) wr)->opcode;38983898+ if (op == CPL_FW6_PLD) {38993899+ CSIO_INC_STATS(hw, n_cpl_fw6_pld);39003900+ if (!flb || !flb->totlen) {39013901+ CSIO_INC_STATS(hw, n_cpl_unexp);39023902+ return;39033903+ }39043904+39053905+ msg = (void *) flb;39063906+ msg_len = flb->totlen;39073907+ msg_sg = 1;39083908+39093909+ data = (__be64 *) msg;39103910+ } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {39113911+39123912+ CSIO_INC_STATS(hw, n_cpl_fw6_msg);39133913+ /* skip RSS header */39143914+ msg = (void *)((uintptr_t)wr + sizeof(__be64));39153915+ msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :39163916+ sizeof(struct cpl_fw4_msg);39173917+39183918+ data = (__be64 *) msg;39193919+ } else {39203920+ csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);39213921+ CSIO_INC_STATS(hw, n_cpl_unexp);39223922+ return;39233923+ }39243924+39253925+ /*39263926+ * Enqueue event to EventQ. Events processing happens39273927+ * in Event worker thread context39283928+ */39293929+ if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg,39303930+ (uint16_t)msg_len, msg_sg))39313931+ CSIO_INC_STATS(hw, n_evt_drop);39323932+}39333933+39343934+void39353935+csio_evtq_worker(struct work_struct *work)39363936+{39373937+ struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work);39383938+ struct list_head *evt_entry, *next_entry;39393939+ LIST_HEAD(evt_q);39403940+ struct csio_evt_msg *evt_msg;39413941+ struct cpl_fw6_msg *msg;39423942+ struct csio_rnode *rn;39433943+ int rv = 0;39443944+ uint8_t evtq_stop = 0;39453945+39463946+ csio_dbg(hw, "event worker thread active evts#%d\n",39473947+ hw->stats.n_evt_activeq);39483948+39493949+ spin_lock_irq(&hw->lock);39503950+ while (!list_empty(&hw->evt_active_q)) {39513951+ list_splice_tail_init(&hw->evt_active_q, &evt_q);39523952+ spin_unlock_irq(&hw->lock);39533953+39543954+ list_for_each_safe(evt_entry, next_entry, &evt_q) {39553955+ evt_msg = (struct csio_evt_msg *) evt_entry;39563956+39573957+ /* Drop events if queue is STOPPED */39583958+ spin_lock_irq(&hw->lock);39593959+ if (hw->flags & CSIO_HWF_FWEVT_STOP)39603960+ evtq_stop = 1;39613961+ spin_unlock_irq(&hw->lock);39623962+ if (evtq_stop) {39633963+ CSIO_INC_STATS(hw, n_evt_drop);39643964+ goto free_evt;39653965+ }39663966+39673967+ switch (evt_msg->type) {39683968+ case CSIO_EVT_FW:39693969+ msg = (struct cpl_fw6_msg *)(evt_msg->data);39703970+39713971+ if ((msg->opcode == CPL_FW6_MSG ||39723972+ msg->opcode == CPL_FW4_MSG) &&39733973+ !msg->type) {39743974+ rv = csio_mb_fwevt_handler(hw,39753975+ msg->data);39763976+ if (!rv)39773977+ break;39783978+ /* Handle any remaining fw events */39793979+ csio_fcoe_fwevt_handler(hw,39803980+ msg->opcode, msg->data);39813981+ } else if (msg->opcode == CPL_FW6_PLD) {39823982+39833983+ csio_fcoe_fwevt_handler(hw,39843984+ msg->opcode, msg->data);39853985+ } else {39863986+ csio_warn(hw,39873987+ "Unhandled FW msg op %x type %x\n",39883988+ msg->opcode, msg->type);39893989+ CSIO_INC_STATS(hw, n_evt_drop);39903990+ }39913991+ break;39923992+39933993+ case CSIO_EVT_MBX:39943994+ csio_mberr_worker(hw);39953995+ break;39963996+39973997+ case CSIO_EVT_DEV_LOSS:39983998+ memcpy(&rn, evt_msg->data, sizeof(rn));39993999+ csio_rnode_devloss_handler(rn);40004000+ break;40014001+40024002+ default:40034003+ csio_warn(hw, "Unhandled event %x on evtq\n",40044004+ evt_msg->type);40054005+ CSIO_INC_STATS(hw, n_evt_unexp);40064006+ break;40074007+ }40084008+free_evt:40094009+ csio_free_evt(hw, evt_msg);40104010+ }40114011+40124012+ spin_lock_irq(&hw->lock);40134013+ }40144014+ hw->flags &= ~CSIO_HWF_FWEVT_PENDING;40154015+ spin_unlock_irq(&hw->lock);40164016+}40174017+40184018+int40194019+csio_fwevtq_handler(struct csio_hw *hw)40204020+{40214021+ int rv;40224022+40234023+ if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) {40244024+ CSIO_INC_STATS(hw, n_int_stray);40254025+ return -EINVAL;40264026+ }40274027+40284028+ rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx,40294029+ csio_process_fwevtq_entry, NULL);40304030+ return rv;40314031+}40324032+40334033+/****************************************************************************40344034+ * Entry points40354035+ ****************************************************************************/40364036+40374037+/* Management module */40384038+/*40394039+ * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.40404040+ * mgmt - mgmt module40414041+ * @io_req - io request40424042+ *40434043+ * Return - 0:if given IO Req exists in active Q.40444044+ * -EINVAL :if lookup fails.40454045+ */40464046+int40474047+csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req)40484048+{40494049+ struct list_head *tmp;40504050+40514051+ /* Lookup ioreq in the ACTIVEQ */40524052+ list_for_each(tmp, &mgmtm->active_q) {40534053+ if (io_req == (struct csio_ioreq *)tmp)40544054+ return 0;40554055+ }40564056+ return -EINVAL;40574057+}40584058+40594059+#define ECM_MIN_TMO 1000 /* Minimum timeout value for req */40604060+40614061+/*40624062+ * csio_mgmts_tmo_handler - MGMT IO Timeout handler.40634063+ * @data - Event data.40644064+ *40654065+ * Return - none.40664066+ */40674067+static void40684068+csio_mgmt_tmo_handler(uintptr_t data)40694069+{40704070+ struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data;40714071+ struct list_head *tmp;40724072+ struct csio_ioreq *io_req;40734073+40744074+ csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n");40754075+40764076+ spin_lock_irq(&mgmtm->hw->lock);40774077+40784078+ list_for_each(tmp, &mgmtm->active_q) {40794079+ io_req = (struct csio_ioreq *) tmp;40804080+ io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO);40814081+40824082+ if (!io_req->tmo) {40834083+ /* Dequeue the request from retry Q. */40844084+ tmp = csio_list_prev(tmp);40854085+ list_del_init(&io_req->sm.sm_list);40864086+ if (io_req->io_cbfn) {40874087+ /* io_req will be freed by completion handler */40884088+ io_req->wr_status = -ETIMEDOUT;40894089+ io_req->io_cbfn(mgmtm->hw, io_req);40904090+ } else {40914091+ CSIO_DB_ASSERT(0);40924092+ }40934093+ }40944094+ }40954095+40964096+ /* If retry queue is not empty, re-arm timer */40974097+ if (!list_empty(&mgmtm->active_q))40984098+ mod_timer(&mgmtm->mgmt_timer,40994099+ jiffies + msecs_to_jiffies(ECM_MIN_TMO));41004100+ spin_unlock_irq(&mgmtm->hw->lock);41014101+}41024102+41034103+static void41044104+csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm)41054105+{41064106+ struct csio_hw *hw = mgmtm->hw;41074107+ struct csio_ioreq *io_req;41084108+ struct list_head *tmp;41094109+ uint32_t count;41104110+41114111+ count = 30;41124112+ /* Wait for all outstanding req to complete gracefully */41134113+ while ((!list_empty(&mgmtm->active_q)) && count--) {41144114+ spin_unlock_irq(&hw->lock);41154115+ msleep(2000);41164116+ spin_lock_irq(&hw->lock);41174117+ }41184118+41194119+ /* release outstanding req from ACTIVEQ */41204120+ list_for_each(tmp, &mgmtm->active_q) {41214121+ io_req = (struct csio_ioreq *) tmp;41224122+ tmp = csio_list_prev(tmp);41234123+ list_del_init(&io_req->sm.sm_list);41244124+ mgmtm->stats.n_active--;41254125+ if (io_req->io_cbfn) {41264126+ /* io_req will be freed by completion handler */41274127+ io_req->wr_status = -ETIMEDOUT;41284128+ io_req->io_cbfn(mgmtm->hw, io_req);41294129+ }41304130+ }41314131+}41324132+41334133+/*41344134+ * csio_mgmt_init - Mgmt module init entry point41354135+ * @mgmtsm - mgmt module41364136+ * @hw - HW module41374137+ *41384138+ * Initialize mgmt timer, resource wait queue, active queue,41394139+ * completion q. Allocate Egress and Ingress41404140+ * WR queues and save off the queue index returned by the WR41414141+ * module for future use. Allocate and save off mgmt reqs in the41424142+ * mgmt_req_freelist for future use. Make sure their SM is initialized41434143+ * to uninit state.41444144+ * Returns: 0 - on success41454145+ * -ENOMEM - on error.41464146+ */41474147+static int41484148+csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)41494149+{41504150+ struct timer_list *timer = &mgmtm->mgmt_timer;41514151+41524152+ init_timer(timer);41534153+ timer->function = csio_mgmt_tmo_handler;41544154+ timer->data = (unsigned long)mgmtm;41554155+41564156+ INIT_LIST_HEAD(&mgmtm->active_q);41574157+ INIT_LIST_HEAD(&mgmtm->cbfn_q);41584158+41594159+ mgmtm->hw = hw;41604160+ /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/41614161+41624162+ return 0;41634163+}41644164+41654165+/*41664166+ * csio_mgmtm_exit - MGMT module exit entry point41674167+ * @mgmtsm - mgmt module41684168+ *41694169+ * This function called during MGMT module uninit.41704170+ * Stop timers, free ioreqs allocated.41714171+ * Returns: None41724172+ *41734173+ */41744174+static void41754175+csio_mgmtm_exit(struct csio_mgmtm *mgmtm)41764176+{41774177+ del_timer_sync(&mgmtm->mgmt_timer);41784178+}41794179+41804180+41814181+/**41824182+ * csio_hw_start - Kicks off the HW State machine41834183+ * @hw: Pointer to HW module.41844184+ *41854185+ * It is assumed that the initialization is a synchronous operation.41864186+ * So when we return afer posting the event, the HW SM should be in41874187+ * the ready state, if there were no errors during init.41884188+ */41894189+int41904190+csio_hw_start(struct csio_hw *hw)41914191+{41924192+ spin_lock_irq(&hw->lock);41934193+ csio_post_event(&hw->sm, CSIO_HWE_CFG);41944194+ spin_unlock_irq(&hw->lock);41954195+41964196+ if (csio_is_hw_ready(hw))41974197+ return 0;41984198+ else41994199+ return -EINVAL;42004200+}42014201+42024202+int42034203+csio_hw_stop(struct csio_hw *hw)42044204+{42054205+ csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE);42064206+42074207+ if (csio_is_hw_removing(hw))42084208+ return 0;42094209+ else42104210+ return -EINVAL;42114211+}42124212+42134213+/* Max reset retries */42144214+#define CSIO_MAX_RESET_RETRIES 342154215+42164216+/**42174217+ * csio_hw_reset - Reset the hardware42184218+ * @hw: HW module.42194219+ *42204220+ * Caller should hold lock across this function.42214221+ */42224222+int42234223+csio_hw_reset(struct csio_hw *hw)42244224+{42254225+ if (!csio_is_hw_master(hw))42264226+ return -EPERM;42274227+42284228+ if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) {42294229+ csio_dbg(hw, "Max hw reset attempts reached..");42304230+ return -EINVAL;42314231+ }42324232+42334233+ hw->rst_retries++;42344234+ csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET);42354235+42364236+ if (csio_is_hw_ready(hw)) {42374237+ hw->rst_retries = 0;42384238+ hw->stats.n_reset_start = jiffies_to_msecs(jiffies);42394239+ return 0;42404240+ } else42414241+ return -EINVAL;42424242+}42434243+42444244+/*42454245+ * csio_hw_get_device_id - Caches the Adapter's vendor & device id.42464246+ * @hw: HW module.42474247+ */42484248+static void42494249+csio_hw_get_device_id(struct csio_hw *hw)42504250+{42514251+ /* Is the adapter device id cached already ?*/42524252+ if (csio_is_dev_id_cached(hw))42534253+ return;42544254+42554255+ /* Get the PCI vendor & device id */42564256+ pci_read_config_word(hw->pdev, PCI_VENDOR_ID,42574257+ &hw->params.pci.vendor_id);42584258+ pci_read_config_word(hw->pdev, PCI_DEVICE_ID,42594259+ &hw->params.pci.device_id);42604260+42614261+ csio_dev_id_cached(hw);42624262+42634263+} /* csio_hw_get_device_id */42644264+42654265+/*42664266+ * csio_hw_set_description - Set the model, description of the hw.42674267+ * @hw: HW module.42684268+ * @ven_id: PCI Vendor ID42694269+ * @dev_id: PCI Device ID42704270+ */42714271+static void42724272+csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)42734273+{42744274+ uint32_t adap_type, prot_type;42754275+42764276+ if (ven_id == CSIO_VENDOR_ID) {42774277+ prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);42784278+ adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);42794279+42804280+ if (prot_type == CSIO_FPGA) {42814281+ memcpy(hw->model_desc,42824282+ csio_fcoe_adapters[13].description, 32);42834283+ } else if (prot_type == CSIO_T4_FCOE_ASIC) {42844284+ memcpy(hw->hw_ver,42854285+ csio_fcoe_adapters[adap_type].model_no, 16);42864286+ memcpy(hw->model_desc,42874287+ csio_fcoe_adapters[adap_type].description, 32);42884288+ } else {42894289+ char tempName[32] = "Chelsio FCoE Controller";42904290+ memcpy(hw->model_desc, tempName, 32);42914291+42924292+ CSIO_DB_ASSERT(0);42934293+ }42944294+ }42954295+} /* csio_hw_set_description */42964296+42974297+/**42984298+ * csio_hw_init - Initialize HW module.42994299+ * @hw: Pointer to HW module.43004300+ *43014301+ * Initialize the members of the HW module.43024302+ */43034303+int43044304+csio_hw_init(struct csio_hw *hw)43054305+{43064306+ int rv = -EINVAL;43074307+ uint32_t i;43084308+ uint16_t ven_id, dev_id;43094309+ struct csio_evt_msg *evt_entry;43104310+43114311+ INIT_LIST_HEAD(&hw->sm.sm_list);43124312+ csio_init_state(&hw->sm, csio_hws_uninit);43134313+ spin_lock_init(&hw->lock);43144314+ INIT_LIST_HEAD(&hw->sln_head);43154315+43164316+ /* Get the PCI vendor & device id */43174317+ csio_hw_get_device_id(hw);43184318+43194319+ strcpy(hw->name, CSIO_HW_NAME);43204320+43214321+ /* Set the model & its description */43224322+43234323+ ven_id = hw->params.pci.vendor_id;43244324+ dev_id = hw->params.pci.device_id;43254325+43264326+ csio_hw_set_description(hw, ven_id, dev_id);43274327+43284328+ /* Initialize default log level */43294329+ hw->params.log_level = (uint32_t) csio_dbg_level;43304330+43314331+ csio_set_fwevt_intr_idx(hw, -1);43324332+ csio_set_nondata_intr_idx(hw, -1);43334333+43344334+ /* Init all the modules: Mailbox, WorkRequest and Transport */43354335+ if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer))43364336+ goto err;43374337+43384338+ rv = csio_wrm_init(csio_hw_to_wrm(hw), hw);43394339+ if (rv)43404340+ goto err_mbm_exit;43414341+43424342+ rv = csio_scsim_init(csio_hw_to_scsim(hw), hw);43434343+ if (rv)43444344+ goto err_wrm_exit;43454345+43464346+ rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw);43474347+ if (rv)43484348+ goto err_scsim_exit;43494349+ /* Pre-allocate evtq and initialize them */43504350+ INIT_LIST_HEAD(&hw->evt_active_q);43514351+ INIT_LIST_HEAD(&hw->evt_free_q);43524352+ for (i = 0; i < csio_evtq_sz; i++) {43534353+43544354+ evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL);43554355+ if (!evt_entry) {43564356+ csio_err(hw, "Failed to initialize eventq");43574357+ goto err_evtq_cleanup;43584358+ }43594359+43604360+ list_add_tail(&evt_entry->list, &hw->evt_free_q);43614361+ CSIO_INC_STATS(hw, n_evt_freeq);43624362+ }43634363+43644364+ hw->dev_num = dev_num;43654365+ dev_num++;43664366+43674367+ return 0;43684368+43694369+err_evtq_cleanup:43704370+ csio_evtq_cleanup(hw);43714371+ csio_mgmtm_exit(csio_hw_to_mgmtm(hw));43724372+err_scsim_exit:43734373+ csio_scsim_exit(csio_hw_to_scsim(hw));43744374+err_wrm_exit:43754375+ csio_wrm_exit(csio_hw_to_wrm(hw), hw);43764376+err_mbm_exit:43774377+ csio_mbm_exit(csio_hw_to_mbm(hw));43784378+err:43794379+ return rv;43804380+}43814381+43824382+/**43834383+ * csio_hw_exit - Un-initialize HW module.43844384+ * @hw: Pointer to HW module.43854385+ *43864386+ */43874387+void43884388+csio_hw_exit(struct csio_hw *hw)43894389+{43904390+ csio_evtq_cleanup(hw);43914391+ csio_mgmtm_exit(csio_hw_to_mgmtm(hw));43924392+ csio_scsim_exit(csio_hw_to_scsim(hw));43934393+ csio_wrm_exit(csio_hw_to_wrm(hw), hw);43944394+ csio_mbm_exit(csio_hw_to_mbm(hw));43954395+}
+667
drivers/scsi/csiostor/csio_hw.h
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#ifndef __CSIO_HW_H__3636+#define __CSIO_HW_H__3737+3838+#include <linux/kernel.h>3939+#include <linux/pci.h>4040+#include <linux/device.h>4141+#include <linux/workqueue.h>4242+#include <linux/compiler.h>4343+#include <linux/cdev.h>4444+#include <linux/list.h>4545+#include <linux/mempool.h>4646+#include <linux/io.h>4747+#include <linux/spinlock_types.h>4848+#include <scsi/scsi_device.h>4949+#include <scsi/scsi_transport_fc.h>5050+5151+#include "csio_wr.h"5252+#include "csio_mb.h"5353+#include "csio_scsi.h"5454+#include "csio_defs.h"5555+#include "t4_regs.h"5656+#include "t4_msg.h"5757+5858+/*5959+ * An error value used by host. Should not clash with FW defined return values.6060+ */6161+#define FW_HOSTERROR 2556262+6363+#define CSIO_FW_FNAME "cxgb4/t4fw.bin"6464+#define CSIO_CF_FNAME "cxgb4/t4-config.txt"6565+6666+#define FW_VERSION_MAJOR 16767+#define FW_VERSION_MINOR 26868+#define FW_VERSION_MICRO 86969+7070+#define CSIO_HW_NAME "Chelsio FCoE Adapter"7171+#define CSIO_MAX_PFN 87272+#define CSIO_MAX_PPORTS 47373+7474+#define CSIO_MAX_LUN 0xFFFF7575+#define CSIO_MAX_QUEUE 20487676+#define CSIO_MAX_CMD_PER_LUN 327777+#define CSIO_MAX_DDP_BUF_SIZE (1024 * 1024)7878+#define CSIO_MAX_SECTOR_SIZE 1287979+8080+/* Interrupts */8181+#define CSIO_EXTRA_MSI_IQS 2 /* Extra iqs for INTX/MSI mode8282+ * (Forward intr iq + fw iq) */8383+#define CSIO_EXTRA_VECS 2 /* non-data + FW evt */8484+#define CSIO_MAX_SCSI_CPU 1288585+#define CSIO_MAX_SCSI_QSETS (CSIO_MAX_SCSI_CPU * CSIO_MAX_PPORTS)8686+#define CSIO_MAX_MSIX_VECS (CSIO_MAX_SCSI_QSETS + CSIO_EXTRA_VECS)8787+8888+/* Queues */8989+enum {9090+ CSIO_INTR_WRSIZE = 128,9191+ CSIO_INTR_IQSIZE = ((CSIO_MAX_MSIX_VECS + 1) * CSIO_INTR_WRSIZE),9292+ CSIO_FWEVT_WRSIZE = 128,9393+ CSIO_FWEVT_IQLEN = 128,9494+ CSIO_FWEVT_FLBUFS = 64,9595+ CSIO_FWEVT_IQSIZE = (CSIO_FWEVT_WRSIZE * CSIO_FWEVT_IQLEN),9696+ CSIO_HW_NIQ = 1,9797+ CSIO_HW_NFLQ = 1,9898+ CSIO_HW_NEQ = 1,9999+ CSIO_HW_NINTXQ = 1,100100+};101101+102102+struct csio_msix_entries {103103+ unsigned short vector; /* Vector assigned by pci_enable_msix */104104+ void *dev_id; /* Priv object associated w/ this msix*/105105+ char desc[24]; /* Description of this vector */106106+};107107+108108+struct csio_scsi_qset {109109+ int iq_idx; /* Ingress index */110110+ int eq_idx; /* Egress index */111111+ uint32_t intr_idx; /* MSIX Vector index */112112+};113113+114114+struct csio_scsi_cpu_info {115115+ int16_t max_cpus;116116+};117117+118118+extern int csio_dbg_level;119119+extern int csio_force_master;120120+extern unsigned int csio_port_mask;121121+extern int csio_msi;122122+123123+#define CSIO_VENDOR_ID 0x1425124124+#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00125125+#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF126126+#define CSIO_FPGA 0xA000127127+#define CSIO_T4_FCOE_ASIC 0x4600128128+129129+#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \130130+ EDC1 | LE | TP | MA | PM_TX | PM_RX | \131131+ ULP_RX | CPL_SWITCH | SGE | \132132+ ULP_TX | SF)133133+134134+/*135135+ * Hard parameters used to initialize the card in the absence of a136136+ * configuration file.137137+ */138138+enum {139139+ /* General */140140+ CSIO_SGE_DBFIFO_INT_THRESH = 10,141141+142142+ CSIO_SGE_RX_DMA_OFFSET = 2,143143+144144+ CSIO_SGE_FLBUF_SIZE1 = 65536,145145+ CSIO_SGE_FLBUF_SIZE2 = 1536,146146+ CSIO_SGE_FLBUF_SIZE3 = 9024,147147+ CSIO_SGE_FLBUF_SIZE4 = 9216,148148+ CSIO_SGE_FLBUF_SIZE5 = 2048,149149+ CSIO_SGE_FLBUF_SIZE6 = 128,150150+ CSIO_SGE_FLBUF_SIZE7 = 8192,151151+ CSIO_SGE_FLBUF_SIZE8 = 16384,152152+153153+ CSIO_SGE_TIMER_VAL_0 = 5,154154+ CSIO_SGE_TIMER_VAL_1 = 10,155155+ CSIO_SGE_TIMER_VAL_2 = 20,156156+ CSIO_SGE_TIMER_VAL_3 = 50,157157+ CSIO_SGE_TIMER_VAL_4 = 100,158158+ CSIO_SGE_TIMER_VAL_5 = 200,159159+160160+ CSIO_SGE_INT_CNT_VAL_0 = 1,161161+ CSIO_SGE_INT_CNT_VAL_1 = 4,162162+ CSIO_SGE_INT_CNT_VAL_2 = 8,163163+ CSIO_SGE_INT_CNT_VAL_3 = 16,164164+165165+ /* Storage specific - used by FW_PFVF_CMD */166166+ CSIO_WX_CAPS = FW_CMD_CAP_PF, /* w/x all */167167+ CSIO_R_CAPS = FW_CMD_CAP_PF, /* r all */168168+ CSIO_NVI = 4,169169+ CSIO_NIQ_FLINT = 34,170170+ CSIO_NETH_CTRL = 32,171171+ CSIO_NEQ = 66,172172+ CSIO_NEXACTF = 32,173173+ CSIO_CMASK = FW_PFVF_CMD_CMASK_MASK,174174+ CSIO_PMASK = FW_PFVF_CMD_PMASK_MASK,175175+};176176+177177+/* Slowpath events */178178+enum csio_evt {179179+ CSIO_EVT_FW = 0, /* FW event */180180+ CSIO_EVT_MBX, /* MBX event */181181+ CSIO_EVT_SCN, /* State change notification */182182+ CSIO_EVT_DEV_LOSS, /* Device loss event */183183+ CSIO_EVT_MAX, /* Max supported event */184184+};185185+186186+#define CSIO_EVT_MSG_SIZE 512187187+#define CSIO_EVTQ_SIZE 512188188+189189+/* Event msg */190190+struct csio_evt_msg {191191+ struct list_head list; /* evt queue*/192192+ enum csio_evt type;193193+ uint8_t data[CSIO_EVT_MSG_SIZE];194194+};195195+196196+enum {197197+ EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */198198+ SERNUM_LEN = 16, /* Serial # length */199199+ EC_LEN = 16, /* E/C length */200200+ ID_LEN = 16, /* ID length */201201+ TRACE_LEN = 112, /* length of trace data and mask */202202+};203203+204204+enum {205205+ SF_PAGE_SIZE = 256, /* serial flash page size */206206+ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */207207+ SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */208208+};209209+210210+enum { MEM_EDC0, MEM_EDC1, MEM_MC };211211+212212+enum {213213+ MEMWIN0_APERTURE = 2048,214214+ MEMWIN0_BASE = 0x1b800,215215+ MEMWIN1_APERTURE = 32768,216216+ MEMWIN1_BASE = 0x28000,217217+ MEMWIN2_APERTURE = 65536,218218+ MEMWIN2_BASE = 0x30000,219219+};220220+221221+/* serial flash and firmware constants */222222+enum {223223+ SF_ATTEMPTS = 10, /* max retries for SF operations */224224+225225+ /* flash command opcodes */226226+ SF_PROG_PAGE = 2, /* program page */227227+ SF_WR_DISABLE = 4, /* disable writes */228228+ SF_RD_STATUS = 5, /* read status register */229229+ SF_WR_ENABLE = 6, /* enable writes */230230+ SF_RD_DATA_FAST = 0xb, /* read flash */231231+ SF_RD_ID = 0x9f, /* read ID */232232+ SF_ERASE_SECTOR = 0xd8, /* erase sector */233233+234234+ FW_START_SEC = 8, /* first flash sector for FW */235235+ FW_END_SEC = 15, /* last flash sector for FW */236236+ FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,237237+ FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,238238+239239+ FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/240240+ FLASH_CFG_OFFSET = 0x1f0000,241241+ FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE,242242+ FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is243243+ * at 1MB - 64KB */244244+ FPGA_FLASH_CFG_START_SEC = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE,245245+};246246+247247+/*248248+ * Flash layout.249249+ */250250+#define FLASH_START(start) ((start) * SF_SEC_SIZE)251251+#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)252252+253253+enum {254254+ /*255255+ * Location of firmware image in FLASH.256256+ */257257+ FLASH_FW_START_SEC = 8,258258+ FLASH_FW_NSECS = 8,259259+ FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),260260+ FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),261261+262262+};263263+264264+#undef FLASH_START265265+#undef FLASH_MAX_SIZE266266+267267+/* Management module */268268+enum {269269+ CSIO_MGMT_EQ_WRSIZE = 512,270270+ CSIO_MGMT_IQ_WRSIZE = 128,271271+ CSIO_MGMT_EQLEN = 64,272272+ CSIO_MGMT_IQLEN = 64,273273+};274274+275275+#define CSIO_MGMT_EQSIZE (CSIO_MGMT_EQLEN * CSIO_MGMT_EQ_WRSIZE)276276+#define CSIO_MGMT_IQSIZE (CSIO_MGMT_IQLEN * CSIO_MGMT_IQ_WRSIZE)277277+278278+/* mgmt module stats */279279+struct csio_mgmtm_stats {280280+ uint32_t n_abort_req; /* Total abort request */281281+ uint32_t n_abort_rsp; /* Total abort response */282282+ uint32_t n_close_req; /* Total close request */283283+ uint32_t n_close_rsp; /* Total close response */284284+ uint32_t n_err; /* Total Errors */285285+ uint32_t n_drop; /* Total request dropped */286286+ uint32_t n_active; /* Count of active_q */287287+ uint32_t n_cbfn; /* Count of cbfn_q */288288+};289289+290290+/* MGMT module */291291+struct csio_mgmtm {292292+ struct csio_hw *hw; /* Pointer to HW moduel */293293+ int eq_idx; /* Egress queue index */294294+ int iq_idx; /* Ingress queue index */295295+ int msi_vec; /* MSI vector */296296+ struct list_head active_q; /* Outstanding ELS/CT */297297+ struct list_head abort_q; /* Outstanding abort req */298298+ struct list_head cbfn_q; /* Completion queue */299299+ struct list_head mgmt_req_freelist; /* Free poll of reqs */300300+ /* ELSCT request freelist*/301301+ struct timer_list mgmt_timer; /* MGMT timer */302302+ struct csio_mgmtm_stats stats; /* ELS/CT stats */303303+};304304+305305+struct csio_adap_desc {306306+ char model_no[16];307307+ char description[32];308308+};309309+310310+struct pci_params {311311+ uint16_t vendor_id;312312+ uint16_t device_id;313313+ uint32_t vpd_cap_addr;314314+ uint16_t speed;315315+ uint8_t width;316316+};317317+318318+/* User configurable hw parameters */319319+struct csio_hw_params {320320+ uint32_t sf_size; /* serial flash321321+ * size in bytes322322+ */323323+ uint32_t sf_nsec; /* # of flash sectors */324324+ struct pci_params pci;325325+ uint32_t log_level; /* Module-level for326326+ * debug log.327327+ */328328+};329329+330330+struct csio_vpd {331331+ uint32_t cclk;332332+ uint8_t ec[EC_LEN + 1];333333+ uint8_t sn[SERNUM_LEN + 1];334334+ uint8_t id[ID_LEN + 1];335335+};336336+337337+struct csio_pport {338338+ uint16_t pcap;339339+ uint8_t portid;340340+ uint8_t link_status;341341+ uint16_t link_speed;342342+ uint8_t mac[6];343343+ uint8_t mod_type;344344+ uint8_t rsvd1;345345+ uint8_t rsvd2;346346+ uint8_t rsvd3;347347+};348348+349349+/* fcoe resource information */350350+struct csio_fcoe_res_info {351351+ uint16_t e_d_tov;352352+ uint16_t r_a_tov_seq;353353+ uint16_t r_a_tov_els;354354+ uint16_t r_r_tov;355355+ uint32_t max_xchgs;356356+ uint32_t max_ssns;357357+ uint32_t used_xchgs;358358+ uint32_t used_ssns;359359+ uint32_t max_fcfs;360360+ uint32_t max_vnps;361361+ uint32_t used_fcfs;362362+ uint32_t used_vnps;363363+};364364+365365+/* HW State machine Events */366366+enum csio_hw_ev {367367+ CSIO_HWE_CFG = (uint32_t)1, /* Starts off the State machine */368368+ CSIO_HWE_INIT, /* Config done, start Init */369369+ CSIO_HWE_INIT_DONE, /* Init Mailboxes sent, HW ready */370370+ CSIO_HWE_FATAL, /* Fatal error during initialization */371371+ CSIO_HWE_PCIERR_DETECTED,/* PCI error recovery detetced */372372+ CSIO_HWE_PCIERR_SLOT_RESET, /* Slot reset after PCI recoviery */373373+ CSIO_HWE_PCIERR_RESUME, /* Resume after PCI error recovery */374374+ CSIO_HWE_QUIESCED, /* HBA quiesced */375375+ CSIO_HWE_HBA_RESET, /* HBA reset requested */376376+ CSIO_HWE_HBA_RESET_DONE, /* HBA reset completed */377377+ CSIO_HWE_FW_DLOAD, /* FW download requested */378378+ CSIO_HWE_PCI_REMOVE, /* PCI de-instantiation */379379+ CSIO_HWE_SUSPEND, /* HW suspend for Online(hot) replacement */380380+ CSIO_HWE_RESUME, /* HW resume for Online(hot) replacement */381381+ CSIO_HWE_MAX, /* Max HW event */382382+};383383+384384+/* hw stats */385385+struct csio_hw_stats {386386+ uint32_t n_evt_activeq; /* Number of event in active Q */387387+ uint32_t n_evt_freeq; /* Number of event in free Q */388388+ uint32_t n_evt_drop; /* Number of event droped */389389+ uint32_t n_evt_unexp; /* Number of unexpected events */390390+ uint32_t n_pcich_offline;/* Number of pci channel offline */391391+ uint32_t n_lnlkup_miss; /* Number of lnode lookup miss */392392+ uint32_t n_cpl_fw6_msg; /* Number of cpl fw6 message*/393393+ uint32_t n_cpl_fw6_pld; /* Number of cpl fw6 payload*/394394+ uint32_t n_cpl_unexp; /* Number of unexpected cpl */395395+ uint32_t n_mbint_unexp; /* Number of unexpected mbox */396396+ /* interrupt */397397+ uint32_t n_plint_unexp; /* Number of unexpected PL */398398+ /* interrupt */399399+ uint32_t n_plint_cnt; /* Number of PL interrupt */400400+ uint32_t n_int_stray; /* Number of stray interrupt */401401+ uint32_t n_err; /* Number of hw errors */402402+ uint32_t n_err_fatal; /* Number of fatal errors */403403+ uint32_t n_err_nomem; /* Number of memory alloc failure */404404+ uint32_t n_err_io; /* Number of IO failure */405405+ enum csio_hw_ev n_evt_sm[CSIO_HWE_MAX]; /* Number of sm events */406406+ uint64_t n_reset_start; /* Start time after the reset */407407+ uint32_t rsvd1;408408+};409409+410410+/* Defines for hw->flags */411411+#define CSIO_HWF_MASTER 0x00000001 /* This is the Master412412+ * function for the413413+ * card.414414+ */415415+#define CSIO_HWF_HW_INTR_ENABLED 0x00000002 /* Are HW Interrupt416416+ * enable bit set?417417+ */418418+#define CSIO_HWF_FWEVT_PENDING 0x00000004 /* FW events pending */419419+#define CSIO_HWF_Q_MEM_ALLOCED 0x00000008 /* Queues have been420420+ * allocated memory.421421+ */422422+#define CSIO_HWF_Q_FW_ALLOCED 0x00000010 /* Queues have been423423+ * allocated in FW.424424+ */425425+#define CSIO_HWF_VPD_VALID 0x00000020 /* Valid VPD copied */426426+#define CSIO_HWF_DEVID_CACHED 0X00000040 /* PCI vendor & device427427+ * id cached */428428+#define CSIO_HWF_FWEVT_STOP 0x00000080 /* Stop processing429429+ * FW events430430+ */431431+#define CSIO_HWF_USING_SOFT_PARAMS 0x00000100 /* Using FW config432432+ * params433433+ */434434+#define CSIO_HWF_HOST_INTR_ENABLED 0x00000200 /* Are host interrupts435435+ * enabled?436436+ */437437+438438+#define csio_is_hw_intr_enabled(__hw) \439439+ ((__hw)->flags & CSIO_HWF_HW_INTR_ENABLED)440440+#define csio_is_host_intr_enabled(__hw) \441441+ ((__hw)->flags & CSIO_HWF_HOST_INTR_ENABLED)442442+#define csio_is_hw_master(__hw) ((__hw)->flags & CSIO_HWF_MASTER)443443+#define csio_is_valid_vpd(__hw) ((__hw)->flags & CSIO_HWF_VPD_VALID)444444+#define csio_is_dev_id_cached(__hw) ((__hw)->flags & CSIO_HWF_DEVID_CACHED)445445+#define csio_valid_vpd_copied(__hw) ((__hw)->flags |= CSIO_HWF_VPD_VALID)446446+#define csio_dev_id_cached(__hw) ((__hw)->flags |= CSIO_HWF_DEVID_CACHED)447447+448448+/* Defines for intr_mode */449449+enum csio_intr_mode {450450+ CSIO_IM_NONE = 0,451451+ CSIO_IM_INTX = 1,452452+ CSIO_IM_MSI = 2,453453+ CSIO_IM_MSIX = 3,454454+};455455+456456+/* Master HW structure: One per function */457457+struct csio_hw {458458+ struct csio_sm sm; /* State machine: should459459+ * be the 1st member.460460+ */461461+ spinlock_t lock; /* Lock for hw */462462+463463+ struct csio_scsim scsim; /* SCSI module*/464464+ struct csio_wrm wrm; /* Work request module*/465465+ struct pci_dev *pdev; /* PCI device */466466+467467+ void __iomem *regstart; /* Virtual address of468468+ * register map469469+ */470470+ /* SCSI queue sets */471471+ uint32_t num_sqsets; /* Number of SCSI472472+ * queue sets */473473+ uint32_t num_scsi_msix_cpus; /* Number of CPUs that474474+ * will be used475475+ * for ingress476476+ * processing.477477+ */478478+479479+ struct csio_scsi_qset sqset[CSIO_MAX_PPORTS][CSIO_MAX_SCSI_CPU];480480+ struct csio_scsi_cpu_info scsi_cpu_info[CSIO_MAX_PPORTS];481481+482482+ uint32_t evtflag; /* Event flag */483483+ uint32_t flags; /* HW flags */484484+485485+ struct csio_mgmtm mgmtm; /* management module */486486+ struct csio_mbm mbm; /* Mailbox module */487487+488488+ /* Lnodes */489489+ uint32_t num_lns; /* Number of lnodes */490490+ struct csio_lnode *rln; /* Root lnode */491491+ struct list_head sln_head; /* Sibling node list492492+ * list493493+ */494494+ int intr_iq_idx; /* Forward interrupt495495+ * queue.496496+ */497497+ int fwevt_iq_idx; /* FW evt queue */498498+ struct work_struct evtq_work; /* Worker thread for499499+ * HW events.500500+ */501501+ struct list_head evt_free_q; /* freelist of evt502502+ * elements503503+ */504504+ struct list_head evt_active_q; /* active evt queue*/505505+506506+ /* board related info */507507+ char name[32];508508+ char hw_ver[16];509509+ char model_desc[32];510510+ char drv_version[32];511511+ char fwrev_str[32];512512+ uint32_t optrom_ver;513513+ uint32_t fwrev;514514+ uint32_t tp_vers;515515+ char chip_ver;516516+ uint32_t cfg_finiver;517517+ uint32_t cfg_finicsum;518518+ uint32_t cfg_cfcsum;519519+ uint8_t cfg_csum_status;520520+ uint8_t cfg_store;521521+ enum csio_dev_state fw_state;522522+ struct csio_vpd vpd;523523+524524+ uint8_t pfn; /* Physical Function525525+ * number526526+ */527527+ uint32_t port_vec; /* Port vector */528528+ uint8_t num_pports; /* Number of physical529529+ * ports.530530+ */531531+ uint8_t rst_retries; /* Reset retries */532532+ uint8_t cur_evt; /* current s/m evt */533533+ uint8_t prev_evt; /* Previous s/m evt */534534+ uint32_t dev_num; /* device number */535535+ struct csio_pport pport[CSIO_MAX_PPORTS]; /* Ports (XGMACs) */536536+ struct csio_hw_params params; /* Hw parameters */537537+538538+ struct pci_pool *scsi_pci_pool; /* PCI pool for SCSI */539539+ mempool_t *mb_mempool; /* Mailbox memory pool*/540540+ mempool_t *rnode_mempool; /* rnode memory pool */541541+542542+ /* Interrupt */543543+ enum csio_intr_mode intr_mode; /* INTx, MSI, MSIX */544544+ uint32_t fwevt_intr_idx; /* FW evt MSIX/interrupt545545+ * index546546+ */547547+ uint32_t nondata_intr_idx; /* nondata MSIX/intr548548+ * idx549549+ */550550+551551+ uint8_t cfg_neq; /* FW configured no of552552+ * egress queues553553+ */554554+ uint8_t cfg_niq; /* FW configured no of555555+ * iq queues.556556+ */557557+558558+ struct csio_fcoe_res_info fres_info; /* Fcoe resource info */559559+560560+ /* MSIX vectors */561561+ struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS];562562+563563+ struct dentry *debugfs_root; /* Debug FS */564564+ struct csio_hw_stats stats; /* Hw statistics */565565+};566566+567567+/* Register access macros */568568+#define csio_reg(_b, _r) ((_b) + (_r))569569+570570+#define csio_rd_reg8(_h, _r) readb(csio_reg((_h)->regstart, (_r)))571571+#define csio_rd_reg16(_h, _r) readw(csio_reg((_h)->regstart, (_r)))572572+#define csio_rd_reg32(_h, _r) readl(csio_reg((_h)->regstart, (_r)))573573+#define csio_rd_reg64(_h, _r) readq(csio_reg((_h)->regstart, (_r)))574574+575575+#define csio_wr_reg8(_h, _v, _r) writeb((_v), \576576+ csio_reg((_h)->regstart, (_r)))577577+#define csio_wr_reg16(_h, _v, _r) writew((_v), \578578+ csio_reg((_h)->regstart, (_r)))579579+#define csio_wr_reg32(_h, _v, _r) writel((_v), \580580+ csio_reg((_h)->regstart, (_r)))581581+#define csio_wr_reg64(_h, _v, _r) writeq((_v), \582582+ csio_reg((_h)->regstart, (_r)))583583+584584+void csio_set_reg_field(struct csio_hw *, uint32_t, uint32_t, uint32_t);585585+586586+/* Core clocks <==> uSecs */587587+static inline uint32_t588588+csio_core_ticks_to_us(struct csio_hw *hw, uint32_t ticks)589589+{590590+ /* add Core Clock / 2 to round ticks to nearest uS */591591+ return (ticks * 1000 + hw->vpd.cclk/2) / hw->vpd.cclk;592592+}593593+594594+static inline uint32_t595595+csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us)596596+{597597+ return (us * hw->vpd.cclk) / 1000;598598+}599599+600600+/* Easy access macros */601601+#define csio_hw_to_wrm(hw) ((struct csio_wrm *)(&(hw)->wrm))602602+#define csio_hw_to_mbm(hw) ((struct csio_mbm *)(&(hw)->mbm))603603+#define csio_hw_to_scsim(hw) ((struct csio_scsim *)(&(hw)->scsim))604604+#define csio_hw_to_mgmtm(hw) ((struct csio_mgmtm *)(&(hw)->mgmtm))605605+606606+#define CSIO_PCI_BUS(hw) ((hw)->pdev->bus->number)607607+#define CSIO_PCI_DEV(hw) (PCI_SLOT((hw)->pdev->devfn))608608+#define CSIO_PCI_FUNC(hw) (PCI_FUNC((hw)->pdev->devfn))609609+610610+#define csio_set_fwevt_intr_idx(_h, _i) ((_h)->fwevt_intr_idx = (_i))611611+#define csio_get_fwevt_intr_idx(_h) ((_h)->fwevt_intr_idx)612612+#define csio_set_nondata_intr_idx(_h, _i) ((_h)->nondata_intr_idx = (_i))613613+#define csio_get_nondata_intr_idx(_h) ((_h)->nondata_intr_idx)614614+615615+/* Printing/logging */616616+#define CSIO_DEVID(__dev) ((__dev)->dev_num)617617+#define CSIO_DEVID_LO(__dev) (CSIO_DEVID((__dev)) & 0xFFFF)618618+#define CSIO_DEVID_HI(__dev) ((CSIO_DEVID((__dev)) >> 16) & 0xFFFF)619619+620620+#define csio_info(__hw, __fmt, ...) \621621+ dev_info(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)622622+623623+#define csio_fatal(__hw, __fmt, ...) \624624+ dev_crit(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)625625+626626+#define csio_err(__hw, __fmt, ...) \627627+ dev_err(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)628628+629629+#define csio_warn(__hw, __fmt, ...) \630630+ dev_warn(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)631631+632632+#ifdef __CSIO_DEBUG__633633+#define csio_dbg(__hw, __fmt, ...) \634634+ csio_info((__hw), __fmt, ##__VA_ARGS__);635635+#else636636+#define csio_dbg(__hw, __fmt, ...)637637+#endif638638+639639+int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *);640640+void csio_hw_intr_disable(struct csio_hw *);641641+int csio_hw_slow_intr_handler(struct csio_hw *hw);642642+int csio_hw_start(struct csio_hw *);643643+int csio_hw_stop(struct csio_hw *);644644+int csio_hw_reset(struct csio_hw *);645645+int csio_is_hw_ready(struct csio_hw *);646646+int csio_is_hw_removing(struct csio_hw *);647647+648648+int csio_fwevtq_handler(struct csio_hw *);649649+void csio_evtq_worker(struct work_struct *);650650+int csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type,651651+ void *evt_msg, uint16_t len);652652+void csio_evtq_flush(struct csio_hw *hw);653653+654654+int csio_request_irqs(struct csio_hw *);655655+void csio_intr_enable(struct csio_hw *);656656+void csio_intr_disable(struct csio_hw *, bool);657657+658658+struct csio_lnode *csio_lnode_alloc(struct csio_hw *);659659+int csio_config_queues(struct csio_hw *);660660+661661+int csio_hw_mc_read(struct csio_hw *, uint32_t,662662+ uint32_t *, uint64_t *);663663+int csio_hw_edc_read(struct csio_hw *, int, uint32_t, uint32_t *,664664+ uint64_t *);665665+int csio_hw_init(struct csio_hw *);666666+void csio_hw_exit(struct csio_hw *);667667+#endif /* ifndef __CSIO_HW_H__ */
+1274
drivers/scsi/csiostor/csio_init.c
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt3636+3737+#include <linux/kernel.h>3838+#include <linux/module.h>3939+#include <linux/init.h>4040+#include <linux/pci.h>4141+#include <linux/aer.h>4242+#include <linux/mm.h>4343+#include <linux/notifier.h>4444+#include <linux/kdebug.h>4545+#include <linux/seq_file.h>4646+#include <linux/debugfs.h>4747+#include <linux/string.h>4848+#include <linux/export.h>4949+5050+#include "csio_init.h"5151+#include "csio_defs.h"5252+5353+#define CSIO_MIN_MEMPOOL_SZ 645454+5555+static struct dentry *csio_debugfs_root;5656+5757+static struct scsi_transport_template *csio_fcoe_transport;5858+static struct scsi_transport_template *csio_fcoe_transport_vport;5959+6060+/*6161+ * debugfs support6262+ */6363+static int6464+csio_mem_open(struct inode *inode, struct file *file)6565+{6666+ file->private_data = inode->i_private;6767+ return 0;6868+}6969+7070+static ssize_t7171+csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)7272+{7373+ loff_t pos = *ppos;7474+ loff_t avail = file->f_path.dentry->d_inode->i_size;7575+ unsigned int mem = (uintptr_t)file->private_data & 3;7676+ struct csio_hw *hw = file->private_data - mem;7777+7878+ if (pos < 0)7979+ return -EINVAL;8080+ if (pos >= avail)8181+ return 0;8282+ if (count > avail - pos)8383+ count = avail - pos;8484+8585+ while (count) {8686+ size_t len;8787+ int ret, ofst;8888+ __be32 data[16];8989+9090+ if (mem == MEM_MC)9191+ ret = csio_hw_mc_read(hw, pos, data, NULL);9292+ else9393+ ret = csio_hw_edc_read(hw, mem, pos, data, NULL);9494+ if (ret)9595+ return ret;9696+9797+ ofst = pos % sizeof(data);9898+ len = min(count, sizeof(data) - ofst);9999+ if (copy_to_user(buf, (u8 *)data + ofst, len))100100+ return -EFAULT;101101+102102+ buf += len;103103+ pos += len;104104+ count -= len;105105+ }106106+ count = pos - *ppos;107107+ *ppos = pos;108108+ return count;109109+}110110+111111+static const struct file_operations csio_mem_debugfs_fops = {112112+ .owner = THIS_MODULE,113113+ .open = csio_mem_open,114114+ .read = csio_mem_read,115115+ .llseek = default_llseek,116116+};117117+118118+static void __devinit119119+csio_add_debugfs_mem(struct csio_hw *hw, const char *name,120120+ unsigned int idx, unsigned int size_mb)121121+{122122+ struct dentry *de;123123+124124+ de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root,125125+ (void *)hw + idx, &csio_mem_debugfs_fops);126126+ if (de && de->d_inode)127127+ de->d_inode->i_size = size_mb << 20;128128+}129129+130130+static int __devinit131131+csio_setup_debugfs(struct csio_hw *hw)132132+{133133+ int i;134134+135135+ if (IS_ERR_OR_NULL(hw->debugfs_root))136136+ return -1;137137+138138+ i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);139139+ if (i & EDRAM0_ENABLE)140140+ csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);141141+ if (i & EDRAM1_ENABLE)142142+ csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);143143+ if (i & EXT_MEM_ENABLE)144144+ csio_add_debugfs_mem(hw, "mc", MEM_MC,145145+ EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR)));146146+ return 0;147147+}148148+149149+/*150150+ * csio_dfs_create - Creates and sets up per-hw debugfs.151151+ *152152+ */153153+static int154154+csio_dfs_create(struct csio_hw *hw)155155+{156156+ if (csio_debugfs_root) {157157+ hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev),158158+ csio_debugfs_root);159159+ csio_setup_debugfs(hw);160160+ }161161+162162+ return 0;163163+}164164+165165+/*166166+ * csio_dfs_destroy - Destroys per-hw debugfs.167167+ */168168+static int169169+csio_dfs_destroy(struct csio_hw *hw)170170+{171171+ if (hw->debugfs_root)172172+ debugfs_remove_recursive(hw->debugfs_root);173173+174174+ return 0;175175+}176176+177177+/*178178+ * csio_dfs_init - Debug filesystem initialization for the module.179179+ *180180+ */181181+static int182182+csio_dfs_init(void)183183+{184184+ csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);185185+ if (!csio_debugfs_root)186186+ pr_warn("Could not create debugfs entry, continuing\n");187187+188188+ return 0;189189+}190190+191191+/*192192+ * csio_dfs_exit - debugfs cleanup for the module.193193+ */194194+static void195195+csio_dfs_exit(void)196196+{197197+ debugfs_remove(csio_debugfs_root);198198+}199199+200200+/*201201+ * csio_pci_init - PCI initialization.202202+ * @pdev: PCI device.203203+ * @bars: Bitmask of bars to be requested.204204+ *205205+ * Initializes the PCI function by enabling MMIO, setting bus206206+ * mastership and setting DMA mask.207207+ */208208+static int209209+csio_pci_init(struct pci_dev *pdev, int *bars)210210+{211211+ int rv = -ENODEV;212212+213213+ *bars = pci_select_bars(pdev, IORESOURCE_MEM);214214+215215+ if (pci_enable_device_mem(pdev))216216+ goto err;217217+218218+ if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME))219219+ goto err_disable_device;220220+221221+ pci_set_master(pdev);222222+ pci_try_set_mwi(pdev);223223+224224+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {225225+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));226226+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {227227+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));228228+ } else {229229+ dev_err(&pdev->dev, "No suitable DMA available.\n");230230+ goto err_release_regions;231231+ }232232+233233+ return 0;234234+235235+err_release_regions:236236+ pci_release_selected_regions(pdev, *bars);237237+err_disable_device:238238+ pci_disable_device(pdev);239239+err:240240+ return rv;241241+242242+}243243+244244+/*245245+ * csio_pci_exit - PCI unitialization.246246+ * @pdev: PCI device.247247+ * @bars: Bars to be released.248248+ *249249+ */250250+static void251251+csio_pci_exit(struct pci_dev *pdev, int *bars)252252+{253253+ pci_release_selected_regions(pdev, *bars);254254+ pci_disable_device(pdev);255255+}256256+257257+/*258258+ * csio_hw_init_workers - Initialize the HW module's worker threads.259259+ * @hw: HW module.260260+ *261261+ */262262+static void263263+csio_hw_init_workers(struct csio_hw *hw)264264+{265265+ INIT_WORK(&hw->evtq_work, csio_evtq_worker);266266+}267267+268268+static void269269+csio_hw_exit_workers(struct csio_hw *hw)270270+{271271+ cancel_work_sync(&hw->evtq_work);272272+ flush_scheduled_work();273273+}274274+275275+static int276276+csio_create_queues(struct csio_hw *hw)277277+{278278+ int i, j;279279+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);280280+ int rv;281281+ struct csio_scsi_cpu_info *info;282282+283283+ if (hw->flags & CSIO_HWF_Q_FW_ALLOCED)284284+ return 0;285285+286286+ if (hw->intr_mode != CSIO_IM_MSIX) {287287+ rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx,288288+ 0, hw->pport[0].portid, false, NULL);289289+ if (rv != 0) {290290+ csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv);291291+ return rv;292292+ }293293+ }294294+295295+ /* FW event queue */296296+ rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx,297297+ csio_get_fwevt_intr_idx(hw),298298+ hw->pport[0].portid, true, NULL);299299+ if (rv != 0) {300300+ csio_err(hw, "FW event IQ config failed!: %d\n", rv);301301+ return rv;302302+ }303303+304304+ /* Create mgmt queue */305305+ rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx,306306+ mgmtm->iq_idx, hw->pport[0].portid, NULL);307307+308308+ if (rv != 0) {309309+ csio_err(hw, "Mgmt EQ create failed!: %d\n", rv);310310+ goto err;311311+ }312312+313313+ /* Create SCSI queues */314314+ for (i = 0; i < hw->num_pports; i++) {315315+ info = &hw->scsi_cpu_info[i];316316+317317+ for (j = 0; j < info->max_cpus; j++) {318318+ struct csio_scsi_qset *sqset = &hw->sqset[i][j];319319+320320+ rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx,321321+ sqset->intr_idx, i, false, NULL);322322+ if (rv != 0) {323323+ csio_err(hw,324324+ "SCSI module IQ config failed [%d][%d]:%d\n",325325+ i, j, rv);326326+ goto err;327327+ }328328+ rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx,329329+ sqset->iq_idx, i, NULL);330330+ if (rv != 0) {331331+ csio_err(hw,332332+ "SCSI module EQ config failed [%d][%d]:%d\n",333333+ i, j, rv);334334+ goto err;335335+ }336336+ } /* for all CPUs */337337+ } /* For all ports */338338+339339+ hw->flags |= CSIO_HWF_Q_FW_ALLOCED;340340+ return 0;341341+err:342342+ csio_wr_destroy_queues(hw, true);343343+ return -EINVAL;344344+}345345+346346+/*347347+ * csio_config_queues - Configure the DMA queues.348348+ * @hw: HW module.349349+ *350350+ * Allocates memory for queues are registers them with FW.351351+ */352352+int353353+csio_config_queues(struct csio_hw *hw)354354+{355355+ int i, j, idx, k = 0;356356+ int rv;357357+ struct csio_scsi_qset *sqset;358358+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);359359+ struct csio_scsi_qset *orig;360360+ struct csio_scsi_cpu_info *info;361361+362362+ if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED)363363+ return csio_create_queues(hw);364364+365365+ /* Calculate number of SCSI queues for MSIX we would like */366366+ hw->num_scsi_msix_cpus = num_online_cpus();367367+ hw->num_sqsets = num_online_cpus() * hw->num_pports;368368+369369+ if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) {370370+ hw->num_sqsets = CSIO_MAX_SCSI_QSETS;371371+ hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU;372372+ }373373+374374+ /* Initialize max_cpus, may get reduced during msix allocations */375375+ for (i = 0; i < hw->num_pports; i++)376376+ hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus;377377+378378+ csio_dbg(hw, "nsqsets:%d scpus:%d\n",379379+ hw->num_sqsets, hw->num_scsi_msix_cpus);380380+381381+ csio_intr_enable(hw);382382+383383+ if (hw->intr_mode != CSIO_IM_MSIX) {384384+385385+ /* Allocate Forward interrupt iq. */386386+ hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE,387387+ CSIO_INTR_WRSIZE, CSIO_INGRESS,388388+ (void *)hw, 0, 0, NULL);389389+ if (hw->intr_iq_idx == -1) {390390+ csio_err(hw,391391+ "Forward interrupt queue creation failed\n");392392+ goto intr_disable;393393+ }394394+ }395395+396396+ /* Allocate the FW evt queue */397397+ hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE,398398+ CSIO_FWEVT_WRSIZE,399399+ CSIO_INGRESS, (void *)hw,400400+ CSIO_FWEVT_FLBUFS, 0,401401+ csio_fwevt_intx_handler);402402+ if (hw->fwevt_iq_idx == -1) {403403+ csio_err(hw, "FW evt queue creation failed\n");404404+ goto intr_disable;405405+ }406406+407407+ /* Allocate the mgmt queue */408408+ mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE,409409+ CSIO_MGMT_EQ_WRSIZE,410410+ CSIO_EGRESS, (void *)hw, 0, 0, NULL);411411+ if (mgmtm->eq_idx == -1) {412412+ csio_err(hw, "Failed to alloc egress queue for mgmt module\n");413413+ goto intr_disable;414414+ }415415+416416+ /* Use FW IQ for MGMT req completion */417417+ mgmtm->iq_idx = hw->fwevt_iq_idx;418418+419419+ /* Allocate SCSI queues */420420+ for (i = 0; i < hw->num_pports; i++) {421421+ info = &hw->scsi_cpu_info[i];422422+423423+ for (j = 0; j < hw->num_scsi_msix_cpus; j++) {424424+ sqset = &hw->sqset[i][j];425425+426426+ if (j >= info->max_cpus) {427427+ k = j % info->max_cpus;428428+ orig = &hw->sqset[i][k];429429+ sqset->eq_idx = orig->eq_idx;430430+ sqset->iq_idx = orig->iq_idx;431431+ continue;432432+ }433433+434434+ idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0,435435+ CSIO_EGRESS, (void *)hw, 0, 0,436436+ NULL);437437+ if (idx == -1) {438438+ csio_err(hw, "EQ creation failed for idx:%d\n",439439+ idx);440440+ goto intr_disable;441441+ }442442+443443+ sqset->eq_idx = idx;444444+445445+ idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE,446446+ CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS,447447+ (void *)hw, 0, 0,448448+ csio_scsi_intx_handler);449449+ if (idx == -1) {450450+ csio_err(hw, "IQ creation failed for idx:%d\n",451451+ idx);452452+ goto intr_disable;453453+ }454454+ sqset->iq_idx = idx;455455+ } /* for all CPUs */456456+ } /* For all ports */457457+458458+ hw->flags |= CSIO_HWF_Q_MEM_ALLOCED;459459+460460+ rv = csio_create_queues(hw);461461+ if (rv != 0)462462+ goto intr_disable;463463+464464+ /*465465+ * Now request IRQs for the vectors. In the event of a failure,466466+ * cleanup is handled internally by this function.467467+ */468468+ rv = csio_request_irqs(hw);469469+ if (rv != 0)470470+ return -EINVAL;471471+472472+ return 0;473473+474474+intr_disable:475475+ csio_intr_disable(hw, false);476476+477477+ return -EINVAL;478478+}479479+480480+static int481481+csio_resource_alloc(struct csio_hw *hw)482482+{483483+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);484484+ int rv = -ENOMEM;485485+486486+ wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ +487487+ CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ);488488+489489+ hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,490490+ sizeof(struct csio_mb));491491+ if (!hw->mb_mempool)492492+ goto err;493493+494494+ hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,495495+ sizeof(struct csio_rnode));496496+ if (!hw->rnode_mempool)497497+ goto err_free_mb_mempool;498498+499499+ hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev,500500+ CSIO_SCSI_RSP_LEN, 8, 0);501501+ if (!hw->scsi_pci_pool)502502+ goto err_free_rn_pool;503503+504504+ return 0;505505+506506+err_free_rn_pool:507507+ mempool_destroy(hw->rnode_mempool);508508+ hw->rnode_mempool = NULL;509509+err_free_mb_mempool:510510+ mempool_destroy(hw->mb_mempool);511511+ hw->mb_mempool = NULL;512512+err:513513+ return rv;514514+}515515+516516+static void517517+csio_resource_free(struct csio_hw *hw)518518+{519519+ pci_pool_destroy(hw->scsi_pci_pool);520520+ hw->scsi_pci_pool = NULL;521521+ mempool_destroy(hw->rnode_mempool);522522+ hw->rnode_mempool = NULL;523523+ mempool_destroy(hw->mb_mempool);524524+ hw->mb_mempool = NULL;525525+}526526+527527+/*528528+ * csio_hw_alloc - Allocate and initialize the HW module.529529+ * @pdev: PCI device.530530+ *531531+ * Allocates HW structure, DMA, memory resources, maps BARS to532532+ * host memory and initializes HW module.533533+ */534534+static struct csio_hw * __devinit535535+csio_hw_alloc(struct pci_dev *pdev)536536+{537537+ struct csio_hw *hw;538538+539539+ hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL);540540+ if (!hw)541541+ goto err;542542+543543+ hw->pdev = pdev;544544+ strncpy(hw->drv_version, CSIO_DRV_VERSION, 32);545545+546546+ /* memory pool/DMA pool allocation */547547+ if (csio_resource_alloc(hw))548548+ goto err_free_hw;549549+550550+ /* Get the start address of registers from BAR 0 */551551+ hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),552552+ pci_resource_len(pdev, 0));553553+ if (!hw->regstart) {554554+ csio_err(hw, "Could not map BAR 0, regstart = %p\n",555555+ hw->regstart);556556+ goto err_resource_free;557557+ }558558+559559+ csio_hw_init_workers(hw);560560+561561+ if (csio_hw_init(hw))562562+ goto err_unmap_bar;563563+564564+ csio_dfs_create(hw);565565+566566+ csio_dbg(hw, "hw:%p\n", hw);567567+568568+ return hw;569569+570570+err_unmap_bar:571571+ csio_hw_exit_workers(hw);572572+ iounmap(hw->regstart);573573+err_resource_free:574574+ csio_resource_free(hw);575575+err_free_hw:576576+ kfree(hw);577577+err:578578+ return NULL;579579+}580580+581581+/*582582+ * csio_hw_free - Uninitialize and free the HW module.583583+ * @hw: The HW module584584+ *585585+ * Disable interrupts, uninit the HW module, free resources, free hw.586586+ */587587+static void588588+csio_hw_free(struct csio_hw *hw)589589+{590590+ csio_intr_disable(hw, true);591591+ csio_hw_exit_workers(hw);592592+ csio_hw_exit(hw);593593+ iounmap(hw->regstart);594594+ csio_dfs_destroy(hw);595595+ csio_resource_free(hw);596596+ kfree(hw);597597+}598598+599599+/**600600+ * csio_shost_init - Create and initialize the lnode module.601601+ * @hw: The HW module.602602+ * @dev: The device associated with this invocation.603603+ * @probe: Called from probe context or not?604604+ * @os_pln: Parent lnode if any.605605+ *606606+ * Allocates lnode structure via scsi_host_alloc, initializes607607+ * shost, initializes lnode module and registers with SCSI ML608608+ * via scsi_host_add. This function is shared between physical and609609+ * virtual node ports.610610+ */611611+struct csio_lnode *612612+csio_shost_init(struct csio_hw *hw, struct device *dev,613613+ bool probe, struct csio_lnode *pln)614614+{615615+ struct Scsi_Host *shost = NULL;616616+ struct csio_lnode *ln;617617+618618+ csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth;619619+ csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth;620620+621621+ /*622622+ * hw->pdev is the physical port's PCI dev structure,623623+ * which will be different from the NPIV dev structure.624624+ */625625+ if (dev == &hw->pdev->dev)626626+ shost = scsi_host_alloc(627627+ &csio_fcoe_shost_template,628628+ sizeof(struct csio_lnode));629629+ else630630+ shost = scsi_host_alloc(631631+ &csio_fcoe_shost_vport_template,632632+ sizeof(struct csio_lnode));633633+634634+ if (!shost)635635+ goto err;636636+637637+ ln = shost_priv(shost);638638+ memset(ln, 0, sizeof(struct csio_lnode));639639+640640+ /* Link common lnode to this lnode */641641+ ln->dev_num = (shost->host_no << 16);642642+643643+ shost->can_queue = CSIO_MAX_QUEUE;644644+ shost->this_id = -1;645645+ shost->unique_id = shost->host_no;646646+ shost->max_cmd_len = 16; /* Max CDB length supported */647647+ shost->max_id = min_t(uint32_t, csio_fcoe_rnodes,648648+ hw->fres_info.max_ssns);649649+ shost->max_lun = CSIO_MAX_LUN;650650+ if (dev == &hw->pdev->dev)651651+ shost->transportt = csio_fcoe_transport;652652+ else653653+ shost->transportt = csio_fcoe_transport_vport;654654+655655+ /* root lnode */656656+ if (!hw->rln)657657+ hw->rln = ln;658658+659659+ /* Other initialization here: Common, Transport specific */660660+ if (csio_lnode_init(ln, hw, pln))661661+ goto err_shost_put;662662+663663+ if (scsi_add_host(shost, dev))664664+ goto err_lnode_exit;665665+666666+ return ln;667667+668668+err_lnode_exit:669669+ csio_lnode_exit(ln);670670+err_shost_put:671671+ scsi_host_put(shost);672672+err:673673+ return NULL;674674+}675675+676676+/**677677+ * csio_shost_exit - De-instantiate the shost.678678+ * @ln: The lnode module corresponding to the shost.679679+ *680680+ */681681+void682682+csio_shost_exit(struct csio_lnode *ln)683683+{684684+ struct Scsi_Host *shost = csio_ln_to_shost(ln);685685+ struct csio_hw *hw = csio_lnode_to_hw(ln);686686+687687+ /* Inform transport */688688+ fc_remove_host(shost);689689+690690+ /* Inform SCSI ML */691691+ scsi_remove_host(shost);692692+693693+ /* Flush all the events, so that any rnode removal events694694+ * already queued are all handled, before we remove the lnode.695695+ */696696+ spin_lock_irq(&hw->lock);697697+ csio_evtq_flush(hw);698698+ spin_unlock_irq(&hw->lock);699699+700700+ csio_lnode_exit(ln);701701+ scsi_host_put(shost);702702+}703703+704704+struct csio_lnode *705705+csio_lnode_alloc(struct csio_hw *hw)706706+{707707+ return csio_shost_init(hw, &hw->pdev->dev, false, NULL);708708+}709709+710710+void711711+csio_lnodes_block_request(struct csio_hw *hw)712712+{713713+ struct Scsi_Host *shost;714714+ struct csio_lnode *sln;715715+ struct csio_lnode *ln;716716+ struct list_head *cur_ln, *cur_cln;717717+ struct csio_lnode **lnode_list;718718+ int cur_cnt = 0, ii;719719+720720+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),721721+ GFP_KERNEL);722722+ if (!lnode_list) {723723+ csio_err(hw, "Failed to allocate lnodes_list");724724+ return;725725+ }726726+727727+ spin_lock_irq(&hw->lock);728728+ /* Traverse sibling lnodes */729729+ list_for_each(cur_ln, &hw->sln_head) {730730+ sln = (struct csio_lnode *) cur_ln;731731+ lnode_list[cur_cnt++] = sln;732732+733733+ /* Traverse children lnodes */734734+ list_for_each(cur_cln, &sln->cln_head)735735+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;736736+ }737737+ spin_unlock_irq(&hw->lock);738738+739739+ for (ii = 0; ii < cur_cnt; ii++) {740740+ csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);741741+ ln = lnode_list[ii];742742+ shost = csio_ln_to_shost(ln);743743+ scsi_block_requests(shost);744744+745745+ }746746+ kfree(lnode_list);747747+}748748+749749+void750750+csio_lnodes_unblock_request(struct csio_hw *hw)751751+{752752+ struct csio_lnode *ln;753753+ struct Scsi_Host *shost;754754+ struct csio_lnode *sln;755755+ struct list_head *cur_ln, *cur_cln;756756+ struct csio_lnode **lnode_list;757757+ int cur_cnt = 0, ii;758758+759759+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),760760+ GFP_KERNEL);761761+ if (!lnode_list) {762762+ csio_err(hw, "Failed to allocate lnodes_list");763763+ return;764764+ }765765+766766+ spin_lock_irq(&hw->lock);767767+ /* Traverse sibling lnodes */768768+ list_for_each(cur_ln, &hw->sln_head) {769769+ sln = (struct csio_lnode *) cur_ln;770770+ lnode_list[cur_cnt++] = sln;771771+772772+ /* Traverse children lnodes */773773+ list_for_each(cur_cln, &sln->cln_head)774774+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;775775+ }776776+ spin_unlock_irq(&hw->lock);777777+778778+ for (ii = 0; ii < cur_cnt; ii++) {779779+ csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);780780+ ln = lnode_list[ii];781781+ shost = csio_ln_to_shost(ln);782782+ scsi_unblock_requests(shost);783783+ }784784+ kfree(lnode_list);785785+}786786+787787+void788788+csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid)789789+{790790+ struct csio_lnode *ln;791791+ struct Scsi_Host *shost;792792+ struct csio_lnode *sln;793793+ struct list_head *cur_ln, *cur_cln;794794+ struct csio_lnode **lnode_list;795795+ int cur_cnt = 0, ii;796796+797797+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),798798+ GFP_KERNEL);799799+ if (!lnode_list) {800800+ csio_err(hw, "Failed to allocate lnodes_list");801801+ return;802802+ }803803+804804+ spin_lock_irq(&hw->lock);805805+ /* Traverse sibling lnodes */806806+ list_for_each(cur_ln, &hw->sln_head) {807807+ sln = (struct csio_lnode *) cur_ln;808808+ if (sln->portid != portid)809809+ continue;810810+811811+ lnode_list[cur_cnt++] = sln;812812+813813+ /* Traverse children lnodes */814814+ list_for_each(cur_cln, &sln->cln_head)815815+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;816816+ }817817+ spin_unlock_irq(&hw->lock);818818+819819+ for (ii = 0; ii < cur_cnt; ii++) {820820+ csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);821821+ ln = lnode_list[ii];822822+ shost = csio_ln_to_shost(ln);823823+ scsi_block_requests(shost);824824+ }825825+ kfree(lnode_list);826826+}827827+828828+void829829+csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid)830830+{831831+ struct csio_lnode *ln;832832+ struct Scsi_Host *shost;833833+ struct csio_lnode *sln;834834+ struct list_head *cur_ln, *cur_cln;835835+ struct csio_lnode **lnode_list;836836+ int cur_cnt = 0, ii;837837+838838+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),839839+ GFP_KERNEL);840840+ if (!lnode_list) {841841+ csio_err(hw, "Failed to allocate lnodes_list");842842+ return;843843+ }844844+845845+ spin_lock_irq(&hw->lock);846846+ /* Traverse sibling lnodes */847847+ list_for_each(cur_ln, &hw->sln_head) {848848+ sln = (struct csio_lnode *) cur_ln;849849+ if (sln->portid != portid)850850+ continue;851851+ lnode_list[cur_cnt++] = sln;852852+853853+ /* Traverse children lnodes */854854+ list_for_each(cur_cln, &sln->cln_head)855855+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;856856+ }857857+ spin_unlock_irq(&hw->lock);858858+859859+ for (ii = 0; ii < cur_cnt; ii++) {860860+ csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);861861+ ln = lnode_list[ii];862862+ shost = csio_ln_to_shost(ln);863863+ scsi_unblock_requests(shost);864864+ }865865+ kfree(lnode_list);866866+}867867+868868+void869869+csio_lnodes_exit(struct csio_hw *hw, bool npiv)870870+{871871+ struct csio_lnode *sln;872872+ struct csio_lnode *ln;873873+ struct list_head *cur_ln, *cur_cln;874874+ struct csio_lnode **lnode_list;875875+ int cur_cnt = 0, ii;876876+877877+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),878878+ GFP_KERNEL);879879+ if (!lnode_list) {880880+ csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n");881881+ return;882882+ }883883+884884+ /* Get all child lnodes(NPIV ports) */885885+ spin_lock_irq(&hw->lock);886886+ list_for_each(cur_ln, &hw->sln_head) {887887+ sln = (struct csio_lnode *) cur_ln;888888+889889+ /* Traverse children lnodes */890890+ list_for_each(cur_cln, &sln->cln_head)891891+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;892892+ }893893+ spin_unlock_irq(&hw->lock);894894+895895+ /* Delete NPIV lnodes */896896+ for (ii = 0; ii < cur_cnt; ii++) {897897+ csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]);898898+ ln = lnode_list[ii];899899+ fc_vport_terminate(ln->fc_vport);900900+ }901901+902902+ /* Delete only npiv lnodes */903903+ if (npiv)904904+ goto free_lnodes;905905+906906+ cur_cnt = 0;907907+ /* Get all physical lnodes */908908+ spin_lock_irq(&hw->lock);909909+ /* Traverse sibling lnodes */910910+ list_for_each(cur_ln, &hw->sln_head) {911911+ sln = (struct csio_lnode *) cur_ln;912912+ lnode_list[cur_cnt++] = sln;913913+ }914914+ spin_unlock_irq(&hw->lock);915915+916916+ /* Delete physical lnodes */917917+ for (ii = 0; ii < cur_cnt; ii++) {918918+ csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]);919919+ csio_shost_exit(lnode_list[ii]);920920+ }921921+922922+free_lnodes:923923+ kfree(lnode_list);924924+}925925+926926+/*927927+ * csio_lnode_init_post: Set lnode attributes after starting HW.928928+ * @ln: lnode.929929+ *930930+ */931931+static void932932+csio_lnode_init_post(struct csio_lnode *ln)933933+{934934+ struct Scsi_Host *shost = csio_ln_to_shost(ln);935935+936936+ csio_fchost_attr_init(ln);937937+938938+ scsi_scan_host(shost);939939+}940940+941941+/*942942+ * csio_probe_one - Instantiate this function.943943+ * @pdev: PCI device944944+ * @id: Device ID945945+ *946946+ * This is the .probe() callback of the driver. This function:947947+ * - Initializes the PCI function by enabling MMIO, setting bus948948+ * mastership and setting DMA mask.949949+ * - Allocates HW structure, DMA, memory resources, maps BARS to950950+ * host memory and initializes HW module.951951+ * - Allocates lnode structure via scsi_host_alloc, initializes952952+ * shost, initialized lnode module and registers with SCSI ML953953+ * via scsi_host_add.954954+ * - Enables interrupts, and starts the chip by kicking off the955955+ * HW state machine.956956+ * - Once hardware is ready, initiated scan of the host via957957+ * scsi_scan_host.958958+ */959959+static int __devinit960960+csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)961961+{962962+ int rv;963963+ int bars;964964+ int i;965965+ struct csio_hw *hw;966966+ struct csio_lnode *ln;967967+968968+ rv = csio_pci_init(pdev, &bars);969969+ if (rv)970970+ goto err;971971+972972+ hw = csio_hw_alloc(pdev);973973+ if (!hw) {974974+ rv = -ENODEV;975975+ goto err_pci_exit;976976+ }977977+978978+ pci_set_drvdata(pdev, hw);979979+980980+ if (csio_hw_start(hw) != 0) {981981+ dev_err(&pdev->dev,982982+ "Failed to start FW, continuing in debug mode.\n");983983+ return 0;984984+ }985985+986986+ sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",987987+ FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),988988+ FW_HDR_FW_VER_MINOR_GET(hw->fwrev),989989+ FW_HDR_FW_VER_MICRO_GET(hw->fwrev),990990+ FW_HDR_FW_VER_BUILD_GET(hw->fwrev));991991+992992+ for (i = 0; i < hw->num_pports; i++) {993993+ ln = csio_shost_init(hw, &pdev->dev, true, NULL);994994+ if (!ln) {995995+ rv = -ENODEV;996996+ break;997997+ }998998+ /* Initialize portid */999999+ ln->portid = hw->pport[i].portid;10001000+10011001+ spin_lock_irq(&hw->lock);10021002+ if (csio_lnode_start(ln) != 0)10031003+ rv = -ENODEV;10041004+ spin_unlock_irq(&hw->lock);10051005+10061006+ if (rv)10071007+ break;10081008+10091009+ csio_lnode_init_post(ln);10101010+ }10111011+10121012+ if (rv)10131013+ goto err_lnode_exit;10141014+10151015+ return 0;10161016+10171017+err_lnode_exit:10181018+ csio_lnodes_block_request(hw);10191019+ spin_lock_irq(&hw->lock);10201020+ csio_hw_stop(hw);10211021+ spin_unlock_irq(&hw->lock);10221022+ csio_lnodes_unblock_request(hw);10231023+ pci_set_drvdata(hw->pdev, NULL);10241024+ csio_lnodes_exit(hw, 0);10251025+ csio_hw_free(hw);10261026+err_pci_exit:10271027+ csio_pci_exit(pdev, &bars);10281028+err:10291029+ dev_err(&pdev->dev, "probe of device failed: %d\n", rv);10301030+ return rv;10311031+}10321032+10331033+/*10341034+ * csio_remove_one - Remove one instance of the driver at this PCI function.10351035+ * @pdev: PCI device10361036+ *10371037+ * Used during hotplug operation.10381038+ */10391039+static void __devexit10401040+csio_remove_one(struct pci_dev *pdev)10411041+{10421042+ struct csio_hw *hw = pci_get_drvdata(pdev);10431043+ int bars = pci_select_bars(pdev, IORESOURCE_MEM);10441044+10451045+ csio_lnodes_block_request(hw);10461046+ spin_lock_irq(&hw->lock);10471047+10481048+ /* Stops lnode, Rnode s/m10491049+ * Quiesce IOs.10501050+ * All sessions with remote ports are unregistered.10511051+ */10521052+ csio_hw_stop(hw);10531053+ spin_unlock_irq(&hw->lock);10541054+ csio_lnodes_unblock_request(hw);10551055+10561056+ csio_lnodes_exit(hw, 0);10571057+ csio_hw_free(hw);10581058+ pci_set_drvdata(pdev, NULL);10591059+ csio_pci_exit(pdev, &bars);10601060+}10611061+10621062+/*10631063+ * csio_pci_error_detected - PCI error was detected10641064+ * @pdev: PCI device10651065+ *10661066+ */10671067+static pci_ers_result_t10681068+csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)10691069+{10701070+ struct csio_hw *hw = pci_get_drvdata(pdev);10711071+10721072+ csio_lnodes_block_request(hw);10731073+ spin_lock_irq(&hw->lock);10741074+10751075+ /* Post PCI error detected evt to HW s/m10761076+ * HW s/m handles this evt by quiescing IOs, unregisters rports10771077+ * and finally takes the device to offline.10781078+ */10791079+ csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED);10801080+ spin_unlock_irq(&hw->lock);10811081+ csio_lnodes_unblock_request(hw);10821082+ csio_lnodes_exit(hw, 0);10831083+ csio_intr_disable(hw, true);10841084+ pci_disable_device(pdev);10851085+ return state == pci_channel_io_perm_failure ?10861086+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;10871087+}10881088+10891089+/*10901090+ * csio_pci_slot_reset - PCI slot has been reset.10911091+ * @pdev: PCI device10921092+ *10931093+ */10941094+static pci_ers_result_t10951095+csio_pci_slot_reset(struct pci_dev *pdev)10961096+{10971097+ struct csio_hw *hw = pci_get_drvdata(pdev);10981098+ int ready;10991099+11001100+ if (pci_enable_device(pdev)) {11011101+ dev_err(&pdev->dev, "cannot re-enable device in slot reset\n");11021102+ return PCI_ERS_RESULT_DISCONNECT;11031103+ }11041104+11051105+ pci_set_master(pdev);11061106+ pci_restore_state(pdev);11071107+ pci_save_state(pdev);11081108+ pci_cleanup_aer_uncorrect_error_status(pdev);11091109+11101110+ /* Bring HW s/m to ready state.11111111+ * but don't resume IOs.11121112+ */11131113+ spin_lock_irq(&hw->lock);11141114+ csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET);11151115+ ready = csio_is_hw_ready(hw);11161116+ spin_unlock_irq(&hw->lock);11171117+11181118+ if (ready) {11191119+ return PCI_ERS_RESULT_RECOVERED;11201120+ } else {11211121+ dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n");11221122+ return PCI_ERS_RESULT_DISCONNECT;11231123+ }11241124+}11251125+11261126+/*11271127+ * csio_pci_resume - Resume normal operations11281128+ * @pdev: PCI device11291129+ *11301130+ */11311131+static void11321132+csio_pci_resume(struct pci_dev *pdev)11331133+{11341134+ struct csio_hw *hw = pci_get_drvdata(pdev);11351135+ struct csio_lnode *ln;11361136+ int rv = 0;11371137+ int i;11381138+11391139+ /* Bring the LINK UP and Resume IO */11401140+11411141+ for (i = 0; i < hw->num_pports; i++) {11421142+ ln = csio_shost_init(hw, &pdev->dev, true, NULL);11431143+ if (!ln) {11441144+ rv = -ENODEV;11451145+ break;11461146+ }11471147+ /* Initialize portid */11481148+ ln->portid = hw->pport[i].portid;11491149+11501150+ spin_lock_irq(&hw->lock);11511151+ if (csio_lnode_start(ln) != 0)11521152+ rv = -ENODEV;11531153+ spin_unlock_irq(&hw->lock);11541154+11551155+ if (rv)11561156+ break;11571157+11581158+ csio_lnode_init_post(ln);11591159+ }11601160+11611161+ if (rv)11621162+ goto err_resume_exit;11631163+11641164+ return;11651165+11661166+err_resume_exit:11671167+ csio_lnodes_block_request(hw);11681168+ spin_lock_irq(&hw->lock);11691169+ csio_hw_stop(hw);11701170+ spin_unlock_irq(&hw->lock);11711171+ csio_lnodes_unblock_request(hw);11721172+ csio_lnodes_exit(hw, 0);11731173+ csio_hw_free(hw);11741174+ dev_err(&pdev->dev, "resume of device failed: %d\n", rv);11751175+}11761176+11771177+static struct pci_error_handlers csio_err_handler = {11781178+ .error_detected = csio_pci_error_detected,11791179+ .slot_reset = csio_pci_slot_reset,11801180+ .resume = csio_pci_resume,11811181+};11821182+11831183+static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {11841184+ CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T440DBG FCOE */11851185+ CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */11861186+ CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */11871187+ CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */11881188+ CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0), /* T420BCH FCOE */11891189+ CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0), /* T440BCH FCOE */11901190+ CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0), /* T440CH FCOE */11911191+ CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0), /* T420SO FCOE */11921192+ CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0), /* T420CX FCOE */11931193+ CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0), /* T420BT FCOE */11941194+ CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0), /* T404BT FCOE */11951195+ CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0), /* B420 FCOE */11961196+ CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */11971197+ CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */11981198+ CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */11991199+ CSIO_DEVICE(CSIO_DEVID_PE10K, 0), /* PE10K FCOE */12001200+ CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0), /* PE10K FCOE on PF1 */12011201+ { 0, 0, 0, 0, 0, 0, 0 }12021202+};12031203+12041204+12051205+static struct pci_driver csio_pci_driver = {12061206+ .name = KBUILD_MODNAME,12071207+ .driver = {12081208+ .owner = THIS_MODULE,12091209+ },12101210+ .id_table = csio_pci_tbl,12111211+ .probe = csio_probe_one,12121212+ .remove = csio_remove_one,12131213+ .err_handler = &csio_err_handler,12141214+};12151215+12161216+/*12171217+ * csio_init - Chelsio storage driver initialization function.12181218+ *12191219+ */12201220+static int __init12211221+csio_init(void)12221222+{12231223+ int rv = -ENOMEM;12241224+12251225+ pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION);12261226+12271227+ csio_dfs_init();12281228+12291229+ csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs);12301230+ if (!csio_fcoe_transport)12311231+ goto err;12321232+12331233+ csio_fcoe_transport_vport =12341234+ fc_attach_transport(&csio_fc_transport_vport_funcs);12351235+ if (!csio_fcoe_transport_vport)12361236+ goto err_vport;12371237+12381238+ rv = pci_register_driver(&csio_pci_driver);12391239+ if (rv)12401240+ goto err_pci;12411241+12421242+ return 0;12431243+12441244+err_pci:12451245+ fc_release_transport(csio_fcoe_transport_vport);12461246+err_vport:12471247+ fc_release_transport(csio_fcoe_transport);12481248+err:12491249+ csio_dfs_exit();12501250+ return rv;12511251+}12521252+12531253+/*12541254+ * csio_exit - Chelsio storage driver uninitialization .12551255+ *12561256+ * Function that gets called in the unload path.12571257+ */12581258+static void __exit12591259+csio_exit(void)12601260+{12611261+ pci_unregister_driver(&csio_pci_driver);12621262+ csio_dfs_exit();12631263+ fc_release_transport(csio_fcoe_transport_vport);12641264+ fc_release_transport(csio_fcoe_transport);12651265+}12661266+12671267+module_init(csio_init);12681268+module_exit(csio_exit);12691269+MODULE_AUTHOR(CSIO_DRV_AUTHOR);12701270+MODULE_DESCRIPTION(CSIO_DRV_DESC);12711271+MODULE_LICENSE(CSIO_DRV_LICENSE);12721272+MODULE_DEVICE_TABLE(pci, csio_pci_tbl);12731273+MODULE_VERSION(CSIO_DRV_VERSION);12741274+MODULE_FIRMWARE(CSIO_FW_FNAME);
+158
drivers/scsi/csiostor/csio_init.h
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#ifndef __CSIO_INIT_H__3636+#define __CSIO_INIT_H__3737+3838+#include <linux/pci.h>3939+#include <linux/if_ether.h>4040+#include <scsi/scsi.h>4141+#include <scsi/scsi_device.h>4242+#include <scsi/scsi_host.h>4343+#include <scsi/scsi_transport_fc.h>4444+4545+#include "csio_scsi.h"4646+#include "csio_lnode.h"4747+#include "csio_rnode.h"4848+#include "csio_hw.h"4949+5050+#define CSIO_DRV_AUTHOR "Chelsio Communications"5151+#define CSIO_DRV_LICENSE "Dual BSD/GPL"5252+#define CSIO_DRV_DESC "Chelsio FCoE driver"5353+#define CSIO_DRV_VERSION "1.0.0"5454+5555+#define CSIO_DEVICE(devid, idx) \5656+{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }5757+5858+#define CSIO_IS_T4_FPGA(_dev) (((_dev) == CSIO_DEVID_PE10K) ||\5959+ ((_dev) == CSIO_DEVID_PE10K_PF1))6060+6161+/* FCoE device IDs */6262+#define CSIO_DEVID_PE10K 0xA0006363+#define CSIO_DEVID_PE10K_PF1 0xA0016464+#define CSIO_DEVID_T440DBG_FCOE 0x46006565+#define CSIO_DEVID_T420CR_FCOE 0x46016666+#define CSIO_DEVID_T422CR_FCOE 0x46026767+#define CSIO_DEVID_T440CR_FCOE 0x46036868+#define CSIO_DEVID_T420BCH_FCOE 0x46046969+#define CSIO_DEVID_T440BCH_FCOE 0x46057070+#define CSIO_DEVID_T440CH_FCOE 0x46067171+#define CSIO_DEVID_T420SO_FCOE 0x46077272+#define CSIO_DEVID_T420CX_FCOE 0x46087373+#define CSIO_DEVID_T420BT_FCOE 0x46097474+#define CSIO_DEVID_T404BT_FCOE 0x460A7575+#define CSIO_DEVID_B420_FCOE 0x460B7676+#define CSIO_DEVID_B404_FCOE 0x460C7777+#define CSIO_DEVID_T480CR_FCOE 0x460D7878+#define CSIO_DEVID_T440LPCR_FCOE 0x460E7979+8080+extern struct fc_function_template csio_fc_transport_funcs;8181+extern struct fc_function_template csio_fc_transport_vport_funcs;8282+8383+void csio_fchost_attr_init(struct csio_lnode *);8484+8585+/* INTx handlers */8686+void csio_scsi_intx_handler(struct csio_hw *, void *, uint32_t,8787+ struct csio_fl_dma_buf *, void *);8888+8989+void csio_fwevt_intx_handler(struct csio_hw *, void *, uint32_t,9090+ struct csio_fl_dma_buf *, void *);9191+9292+/* Common os lnode APIs */9393+void csio_lnodes_block_request(struct csio_hw *);9494+void csio_lnodes_unblock_request(struct csio_hw *);9595+void csio_lnodes_block_by_port(struct csio_hw *, uint8_t);9696+void csio_lnodes_unblock_by_port(struct csio_hw *, uint8_t);9797+9898+struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool,9999+ struct csio_lnode *);100100+void csio_shost_exit(struct csio_lnode *);101101+void csio_lnodes_exit(struct csio_hw *, bool);102102+103103+static inline struct Scsi_Host *104104+csio_ln_to_shost(struct csio_lnode *ln)105105+{106106+ return container_of((void *)ln, struct Scsi_Host, hostdata[0]);107107+}108108+109109+/* SCSI -- locking version of get/put ioreqs */110110+static inline struct csio_ioreq *111111+csio_get_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim)112112+{113113+ struct csio_ioreq *ioreq;114114+ unsigned long flags;115115+116116+ spin_lock_irqsave(&scsim->freelist_lock, flags);117117+ ioreq = csio_get_scsi_ioreq(scsim);118118+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);119119+120120+ return ioreq;121121+}122122+123123+static inline void124124+csio_put_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim,125125+ struct csio_ioreq *ioreq)126126+{127127+ unsigned long flags;128128+129129+ spin_lock_irqsave(&scsim->freelist_lock, flags);130130+ csio_put_scsi_ioreq(scsim, ioreq);131131+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);132132+}133133+134134+/* Called in interrupt context */135135+static inline void136136+csio_put_scsi_ioreq_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,137137+ struct list_head *reqlist, int n)138138+{139139+ unsigned long flags;140140+141141+ spin_lock_irqsave(&scsim->freelist_lock, flags);142142+ csio_put_scsi_ioreq_list(scsim, reqlist, n);143143+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);144144+}145145+146146+/* Called in interrupt context */147147+static inline void148148+csio_put_scsi_ddp_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,149149+ struct list_head *reqlist, int n)150150+{151151+ unsigned long flags;152152+153153+ spin_lock_irqsave(&hw->lock, flags);154154+ csio_put_scsi_ddp_list(scsim, reqlist, n);155155+ spin_unlock_irqrestore(&hw->lock, flags);156156+}157157+158158+#endif /* ifndef __CSIO_INIT_H__ */
+624
drivers/scsi/csiostor/csio_isr.c
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#include <linux/kernel.h>3636+#include <linux/pci.h>3737+#include <linux/interrupt.h>3838+#include <linux/cpumask.h>3939+#include <linux/string.h>4040+4141+#include "csio_init.h"4242+#include "csio_hw.h"4343+4444+static irqreturn_t4545+csio_nondata_isr(int irq, void *dev_id)4646+{4747+ struct csio_hw *hw = (struct csio_hw *) dev_id;4848+ int rv;4949+ unsigned long flags;5050+5151+ if (unlikely(!hw))5252+ return IRQ_NONE;5353+5454+ if (unlikely(pci_channel_offline(hw->pdev))) {5555+ CSIO_INC_STATS(hw, n_pcich_offline);5656+ return IRQ_NONE;5757+ }5858+5959+ spin_lock_irqsave(&hw->lock, flags);6060+ csio_hw_slow_intr_handler(hw);6161+ rv = csio_mb_isr_handler(hw);6262+6363+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {6464+ hw->flags |= CSIO_HWF_FWEVT_PENDING;6565+ spin_unlock_irqrestore(&hw->lock, flags);6666+ schedule_work(&hw->evtq_work);6767+ return IRQ_HANDLED;6868+ }6969+ spin_unlock_irqrestore(&hw->lock, flags);7070+ return IRQ_HANDLED;7171+}7272+7373+/*7474+ * csio_fwevt_handler - Common FW event handler routine.7575+ * @hw: HW module.7676+ *7777+ * This is the ISR for FW events. It is shared b/w MSIX7878+ * and INTx handlers.7979+ */8080+static void8181+csio_fwevt_handler(struct csio_hw *hw)8282+{8383+ int rv;8484+ unsigned long flags;8585+8686+ rv = csio_fwevtq_handler(hw);8787+8888+ spin_lock_irqsave(&hw->lock, flags);8989+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {9090+ hw->flags |= CSIO_HWF_FWEVT_PENDING;9191+ spin_unlock_irqrestore(&hw->lock, flags);9292+ schedule_work(&hw->evtq_work);9393+ return;9494+ }9595+ spin_unlock_irqrestore(&hw->lock, flags);9696+9797+} /* csio_fwevt_handler */9898+9999+/*100100+ * csio_fwevt_isr() - FW events MSIX ISR101101+ * @irq:102102+ * @dev_id:103103+ *104104+ * Process WRs on the FW event queue.105105+ *106106+ */107107+static irqreturn_t108108+csio_fwevt_isr(int irq, void *dev_id)109109+{110110+ struct csio_hw *hw = (struct csio_hw *) dev_id;111111+112112+ if (unlikely(!hw))113113+ return IRQ_NONE;114114+115115+ if (unlikely(pci_channel_offline(hw->pdev))) {116116+ CSIO_INC_STATS(hw, n_pcich_offline);117117+ return IRQ_NONE;118118+ }119119+120120+ csio_fwevt_handler(hw);121121+122122+ return IRQ_HANDLED;123123+}124124+125125+/*126126+ * csio_fwevt_isr() - INTx wrapper for handling FW events.127127+ * @irq:128128+ * @dev_id:129129+ */130130+void131131+csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,132132+ struct csio_fl_dma_buf *flb, void *priv)133133+{134134+ csio_fwevt_handler(hw);135135+} /* csio_fwevt_intx_handler */136136+137137+/*138138+ * csio_process_scsi_cmpl - Process a SCSI WR completion.139139+ * @hw: HW module.140140+ * @wr: The completed WR from the ingress queue.141141+ * @len: Length of the WR.142142+ * @flb: Freelist buffer array.143143+ *144144+ */145145+static void146146+csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,147147+ struct csio_fl_dma_buf *flb, void *cbfn_q)148148+{149149+ struct csio_ioreq *ioreq;150150+ uint8_t *scsiwr;151151+ uint8_t subop;152152+ void *cmnd;153153+ unsigned long flags;154154+155155+ ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);156156+ if (likely(ioreq)) {157157+ if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {158158+ subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(159159+ ((struct fw_scsi_abrt_cls_wr *)160160+ scsiwr)->sub_opcode_to_chk_all_io);161161+162162+ csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",163163+ subop ? "Close" : "Abort",164164+ ioreq, ioreq->wr_status);165165+166166+ spin_lock_irqsave(&hw->lock, flags);167167+ if (subop)168168+ csio_scsi_closed(ioreq,169169+ (struct list_head *)cbfn_q);170170+ else171171+ csio_scsi_aborted(ioreq,172172+ (struct list_head *)cbfn_q);173173+ /*174174+ * We call scsi_done for I/Os that driver thinks aborts175175+ * have timed out. If there is a race caused by FW176176+ * completing abort at the exact same time that the177177+ * driver has deteced the abort timeout, the following178178+ * check prevents calling of scsi_done twice for the179179+ * same command: once from the eh_abort_handler, another180180+ * from csio_scsi_isr_handler(). This also avoids the181181+ * need to check if csio_scsi_cmnd(req) is NULL in the182182+ * fast path.183183+ */184184+ cmnd = csio_scsi_cmnd(ioreq);185185+ if (unlikely(cmnd == NULL))186186+ list_del_init(&ioreq->sm.sm_list);187187+188188+ spin_unlock_irqrestore(&hw->lock, flags);189189+190190+ if (unlikely(cmnd == NULL))191191+ csio_put_scsi_ioreq_lock(hw,192192+ csio_hw_to_scsim(hw), ioreq);193193+ } else {194194+ spin_lock_irqsave(&hw->lock, flags);195195+ csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);196196+ spin_unlock_irqrestore(&hw->lock, flags);197197+ }198198+ }199199+}200200+201201+/*202202+ * csio_scsi_isr_handler() - Common SCSI ISR handler.203203+ * @iq: Ingress queue pointer.204204+ *205205+ * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx206206+ * by calling csio_wr_process_iq_idx. If there are completions on the207207+ * isr_cbfn_q, yank them out into a local queue and call their io_cbfns.208208+ * Once done, add these completions onto the freelist.209209+ * This routine is shared b/w MSIX and INTx.210210+ */211211+static inline irqreturn_t212212+csio_scsi_isr_handler(struct csio_q *iq)213213+{214214+ struct csio_hw *hw = (struct csio_hw *)iq->owner;215215+ LIST_HEAD(cbfn_q);216216+ struct list_head *tmp;217217+ struct csio_scsim *scm;218218+ struct csio_ioreq *ioreq;219219+ int isr_completions = 0;220220+221221+ scm = csio_hw_to_scsim(hw);222222+223223+ if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,224224+ &cbfn_q) != 0))225225+ return IRQ_NONE;226226+227227+ /* Call back the completion routines */228228+ list_for_each(tmp, &cbfn_q) {229229+ ioreq = (struct csio_ioreq *)tmp;230230+ isr_completions++;231231+ ioreq->io_cbfn(hw, ioreq);232232+ /* Release ddp buffer if used for this req */233233+ if (unlikely(ioreq->dcopy))234234+ csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,235235+ ioreq->nsge);236236+ }237237+238238+ if (isr_completions) {239239+ /* Return the ioreqs back to ioreq->freelist */240240+ csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,241241+ isr_completions);242242+ }243243+244244+ return IRQ_HANDLED;245245+}246246+247247+/*248248+ * csio_scsi_isr() - SCSI MSIX handler249249+ * @irq:250250+ * @dev_id:251251+ *252252+ * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()253253+ * for handling SCSI completions.254254+ */255255+static irqreturn_t256256+csio_scsi_isr(int irq, void *dev_id)257257+{258258+ struct csio_q *iq = (struct csio_q *) dev_id;259259+ struct csio_hw *hw;260260+261261+ if (unlikely(!iq))262262+ return IRQ_NONE;263263+264264+ hw = (struct csio_hw *)iq->owner;265265+266266+ if (unlikely(pci_channel_offline(hw->pdev))) {267267+ CSIO_INC_STATS(hw, n_pcich_offline);268268+ return IRQ_NONE;269269+ }270270+271271+ csio_scsi_isr_handler(iq);272272+273273+ return IRQ_HANDLED;274274+}275275+276276+/*277277+ * csio_scsi_intx_handler() - SCSI INTx handler278278+ * @irq:279279+ * @dev_id:280280+ *281281+ * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()282282+ * for handling SCSI completions.283283+ */284284+void285285+csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,286286+ struct csio_fl_dma_buf *flb, void *priv)287287+{288288+ struct csio_q *iq = priv;289289+290290+ csio_scsi_isr_handler(iq);291291+292292+} /* csio_scsi_intx_handler */293293+294294+/*295295+ * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.296296+ * @irq:297297+ * @dev_id:298298+ *299299+ *300300+ */301301+static irqreturn_t302302+csio_fcoe_isr(int irq, void *dev_id)303303+{304304+ struct csio_hw *hw = (struct csio_hw *) dev_id;305305+ struct csio_q *intx_q = NULL;306306+ int rv;307307+ irqreturn_t ret = IRQ_NONE;308308+ unsigned long flags;309309+310310+ if (unlikely(!hw))311311+ return IRQ_NONE;312312+313313+ if (unlikely(pci_channel_offline(hw->pdev))) {314314+ CSIO_INC_STATS(hw, n_pcich_offline);315315+ return IRQ_NONE;316316+ }317317+318318+ /* Disable the interrupt for this PCI function. */319319+ if (hw->intr_mode == CSIO_IM_INTX)320320+ csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI));321321+322322+ /*323323+ * The read in the following function will flush the324324+ * above write.325325+ */326326+ if (csio_hw_slow_intr_handler(hw))327327+ ret = IRQ_HANDLED;328328+329329+ /* Get the INTx Forward interrupt IQ. */330330+ intx_q = csio_get_q(hw, hw->intr_iq_idx);331331+332332+ CSIO_DB_ASSERT(intx_q);333333+334334+ /* IQ handler is not possible for intx_q, hence pass in NULL */335335+ if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))336336+ ret = IRQ_HANDLED;337337+338338+ spin_lock_irqsave(&hw->lock, flags);339339+ rv = csio_mb_isr_handler(hw);340340+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {341341+ hw->flags |= CSIO_HWF_FWEVT_PENDING;342342+ spin_unlock_irqrestore(&hw->lock, flags);343343+ schedule_work(&hw->evtq_work);344344+ return IRQ_HANDLED;345345+ }346346+ spin_unlock_irqrestore(&hw->lock, flags);347347+348348+ return ret;349349+}350350+351351+static void352352+csio_add_msix_desc(struct csio_hw *hw)353353+{354354+ int i;355355+ struct csio_msix_entries *entryp = &hw->msix_entries[0];356356+ int k = CSIO_EXTRA_VECS;357357+ int len = sizeof(entryp->desc) - 1;358358+ int cnt = hw->num_sqsets + k;359359+360360+ /* Non-data vector */361361+ memset(entryp->desc, 0, len + 1);362362+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",363363+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));364364+365365+ entryp++;366366+ memset(entryp->desc, 0, len + 1);367367+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",368368+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));369369+ entryp++;370370+371371+ /* Name SCSI vecs */372372+ for (i = k; i < cnt; i++, entryp++) {373373+ memset(entryp->desc, 0, len + 1);374374+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",375375+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),376376+ CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);377377+ }378378+}379379+380380+int381381+csio_request_irqs(struct csio_hw *hw)382382+{383383+ int rv, i, j, k = 0;384384+ struct csio_msix_entries *entryp = &hw->msix_entries[0];385385+ struct csio_scsi_cpu_info *info;386386+387387+ if (hw->intr_mode != CSIO_IM_MSIX) {388388+ rv = request_irq(hw->pdev->irq, csio_fcoe_isr,389389+ (hw->intr_mode == CSIO_IM_MSI) ?390390+ 0 : IRQF_SHARED,391391+ KBUILD_MODNAME, hw);392392+ if (rv) {393393+ if (hw->intr_mode == CSIO_IM_MSI)394394+ pci_disable_msi(hw->pdev);395395+ csio_err(hw, "Failed to allocate interrupt line.\n");396396+ return -EINVAL;397397+ }398398+399399+ goto out;400400+ }401401+402402+ /* Add the MSIX vector descriptions */403403+ csio_add_msix_desc(hw);404404+405405+ rv = request_irq(entryp[k].vector, csio_nondata_isr, 0,406406+ entryp[k].desc, hw);407407+ if (rv) {408408+ csio_err(hw, "IRQ request failed for vec %d err:%d\n",409409+ entryp[k].vector, rv);410410+ goto err;411411+ }412412+413413+ entryp[k++].dev_id = (void *)hw;414414+415415+ rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0,416416+ entryp[k].desc, hw);417417+ if (rv) {418418+ csio_err(hw, "IRQ request failed for vec %d err:%d\n",419419+ entryp[k].vector, rv);420420+ goto err;421421+ }422422+423423+ entryp[k++].dev_id = (void *)hw;424424+425425+ /* Allocate IRQs for SCSI */426426+ for (i = 0; i < hw->num_pports; i++) {427427+ info = &hw->scsi_cpu_info[i];428428+ for (j = 0; j < info->max_cpus; j++, k++) {429429+ struct csio_scsi_qset *sqset = &hw->sqset[i][j];430430+ struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];431431+432432+ rv = request_irq(entryp[k].vector, csio_scsi_isr, 0,433433+ entryp[k].desc, q);434434+ if (rv) {435435+ csio_err(hw,436436+ "IRQ request failed for vec %d err:%d\n",437437+ entryp[k].vector, rv);438438+ goto err;439439+ }440440+441441+ entryp[k].dev_id = (void *)q;442442+443443+ } /* for all scsi cpus */444444+ } /* for all ports */445445+446446+out:447447+ hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;448448+449449+ return 0;450450+451451+err:452452+ for (i = 0; i < k; i++) {453453+ entryp = &hw->msix_entries[i];454454+ free_irq(entryp->vector, entryp->dev_id);455455+ }456456+ pci_disable_msix(hw->pdev);457457+458458+ return -EINVAL;459459+}460460+461461+static void462462+csio_disable_msix(struct csio_hw *hw, bool free)463463+{464464+ int i;465465+ struct csio_msix_entries *entryp;466466+ int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;467467+468468+ if (free) {469469+ for (i = 0; i < cnt; i++) {470470+ entryp = &hw->msix_entries[i];471471+ free_irq(entryp->vector, entryp->dev_id);472472+ }473473+ }474474+ pci_disable_msix(hw->pdev);475475+}476476+477477+/* Reduce per-port max possible CPUs */478478+static void479479+csio_reduce_sqsets(struct csio_hw *hw, int cnt)480480+{481481+ int i;482482+ struct csio_scsi_cpu_info *info;483483+484484+ while (cnt < hw->num_sqsets) {485485+ for (i = 0; i < hw->num_pports; i++) {486486+ info = &hw->scsi_cpu_info[i];487487+ if (info->max_cpus > 1) {488488+ info->max_cpus--;489489+ hw->num_sqsets--;490490+ if (hw->num_sqsets <= cnt)491491+ break;492492+ }493493+ }494494+ }495495+496496+ csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);497497+}498498+499499+static int500500+csio_enable_msix(struct csio_hw *hw)501501+{502502+ int rv, i, j, k, n, min, cnt;503503+ struct csio_msix_entries *entryp;504504+ struct msix_entry *entries;505505+ int extra = CSIO_EXTRA_VECS;506506+ struct csio_scsi_cpu_info *info;507507+508508+ min = hw->num_pports + extra;509509+ cnt = hw->num_sqsets + extra;510510+511511+ /* Max vectors required based on #niqs configured in fw */512512+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))513513+ cnt = min_t(uint8_t, hw->cfg_niq, cnt);514514+515515+ entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);516516+ if (!entries)517517+ return -ENOMEM;518518+519519+ for (i = 0; i < cnt; i++)520520+ entries[i].entry = (uint16_t)i;521521+522522+ csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);523523+524524+ while ((rv = pci_enable_msix(hw->pdev, entries, cnt)) >= min)525525+ cnt = rv;526526+ if (!rv) {527527+ if (cnt < (hw->num_sqsets + extra)) {528528+ csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);529529+ csio_reduce_sqsets(hw, cnt - extra);530530+ }531531+ } else {532532+ if (rv > 0) {533533+ pci_disable_msix(hw->pdev);534534+ csio_info(hw, "Not using MSI-X, remainder:%d\n", rv);535535+ }536536+537537+ kfree(entries);538538+ return -ENOMEM;539539+ }540540+541541+ /* Save off vectors */542542+ for (i = 0; i < cnt; i++) {543543+ entryp = &hw->msix_entries[i];544544+ entryp->vector = entries[i].vector;545545+ }546546+547547+ /* Distribute vectors */548548+ k = 0;549549+ csio_set_nondata_intr_idx(hw, entries[k].entry);550550+ csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry);551551+ csio_set_fwevt_intr_idx(hw, entries[k++].entry);552552+553553+ for (i = 0; i < hw->num_pports; i++) {554554+ info = &hw->scsi_cpu_info[i];555555+556556+ for (j = 0; j < hw->num_scsi_msix_cpus; j++) {557557+ n = (j % info->max_cpus) + k;558558+ hw->sqset[i][j].intr_idx = entries[n].entry;559559+ }560560+561561+ k += info->max_cpus;562562+ }563563+564564+ kfree(entries);565565+ return 0;566566+}567567+568568+void569569+csio_intr_enable(struct csio_hw *hw)570570+{571571+ hw->intr_mode = CSIO_IM_NONE;572572+ hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;573573+574574+ /* Try MSIX, then MSI or fall back to INTx */575575+ if ((csio_msi == 2) && !csio_enable_msix(hw))576576+ hw->intr_mode = CSIO_IM_MSIX;577577+ else {578578+ /* Max iqs required based on #niqs configured in fw */579579+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||580580+ !csio_is_hw_master(hw)) {581581+ int extra = CSIO_EXTRA_MSI_IQS;582582+583583+ if (hw->cfg_niq < (hw->num_sqsets + extra)) {584584+ csio_dbg(hw, "Reducing sqsets to %d\n",585585+ hw->cfg_niq - extra);586586+ csio_reduce_sqsets(hw, hw->cfg_niq - extra);587587+ }588588+ }589589+590590+ if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))591591+ hw->intr_mode = CSIO_IM_MSI;592592+ else593593+ hw->intr_mode = CSIO_IM_INTX;594594+ }595595+596596+ csio_dbg(hw, "Using %s interrupt mode.\n",597597+ (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :598598+ ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));599599+}600600+601601+void602602+csio_intr_disable(struct csio_hw *hw, bool free)603603+{604604+ csio_hw_intr_disable(hw);605605+606606+ switch (hw->intr_mode) {607607+ case CSIO_IM_MSIX:608608+ csio_disable_msix(hw, free);609609+ break;610610+ case CSIO_IM_MSI:611611+ if (free)612612+ free_irq(hw->pdev->irq, hw);613613+ pci_disable_msi(hw->pdev);614614+ break;615615+ case CSIO_IM_INTX:616616+ if (free)617617+ free_irq(hw->pdev->irq, hw);618618+ break;619619+ default:620620+ break;621621+ }622622+ hw->intr_mode = CSIO_IM_NONE;623623+ hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;624624+}
+2133
drivers/scsi/csiostor/csio_lnode.c
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#include <linux/kernel.h>3636+#include <linux/delay.h>3737+#include <linux/slab.h>3838+#include <linux/utsname.h>3939+#include <scsi/scsi_device.h>4040+#include <scsi/scsi_transport_fc.h>4141+#include <asm/unaligned.h>4242+#include <scsi/fc/fc_els.h>4343+#include <scsi/fc/fc_fs.h>4444+#include <scsi/fc/fc_gs.h>4545+#include <scsi/fc/fc_ms.h>4646+4747+#include "csio_hw.h"4848+#include "csio_mb.h"4949+#include "csio_lnode.h"5050+#include "csio_rnode.h"5151+5252+int csio_fcoe_rnodes = 1024;5353+int csio_fdmi_enable = 1;5454+5555+#define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1)5656+5757+/* Lnode SM declarations */5858+static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev);5959+static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev);6060+static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev);6161+static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev);6262+6363+static int csio_ln_mgmt_submit_req(struct csio_ioreq *,6464+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),6565+ enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t);6666+6767+/* LN event mapping */6868+static enum csio_ln_ev fwevt_to_lnevt[] = {6969+ CSIO_LNE_NONE, /* None */7070+ CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */7171+ CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */7272+ CSIO_LNE_NONE, /* PLOGI_RCVD */7373+ CSIO_LNE_NONE, /* PLOGO_RCVD */7474+ CSIO_LNE_NONE, /* PRLI_ACC_RCVD */7575+ CSIO_LNE_NONE, /* PRLI_RJT_RCVD */7676+ CSIO_LNE_NONE, /* PRLI_RCVD */7777+ CSIO_LNE_NONE, /* PRLO_RCVD */7878+ CSIO_LNE_NONE, /* NPORT_ID_CHGD */7979+ CSIO_LNE_LOGO, /* FLOGO_RCVD */8080+ CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */8181+ CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */8282+ CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */8383+ CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */8484+ CSIO_LNE_NONE, /* FDISC_RJT_RCVD */8585+ CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */8686+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */8787+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */8888+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */8989+ CSIO_LNE_NONE, /* PRLI_TMO */9090+ CSIO_LNE_NONE, /* ADISC_TMO */9191+ CSIO_LNE_NONE, /* RSCN_DEV_LOST */9292+ CSIO_LNE_NONE, /* SCR_ACC_RCVD */9393+ CSIO_LNE_NONE, /* ADISC_RJT_RCVD */9494+ CSIO_LNE_NONE, /* LOGO_SNT */9595+ CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */9696+};9797+9898+#define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \9999+ CSIO_LNE_NONE : \100100+ fwevt_to_lnevt[_evt])101101+102102+#define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd)103103+#define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason)104104+#define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan)105105+#define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN))106106+107107+/*108108+ * csio_ln_match_by_portid - lookup lnode using given portid.109109+ * @hw: HW module110110+ * @portid: port-id.111111+ *112112+ * If found, returns lnode matching given portid otherwise returns NULL.113113+ */114114+static struct csio_lnode *115115+csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)116116+{117117+ struct csio_lnode *ln = hw->rln;118118+ struct list_head *tmp;119119+120120+ /* Match siblings lnode with portid */121121+ list_for_each(tmp, &hw->sln_head) {122122+ ln = (struct csio_lnode *) tmp;123123+ if (ln->portid == portid)124124+ return ln;125125+ }126126+127127+ return NULL;128128+}129129+130130+/*131131+ * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id.132132+ * @hw - HW module133133+ * @vnpi - vnp index.134134+ * Returns - If found, returns lnode matching given vnp id135135+ * otherwise returns NULL.136136+ */137137+static struct csio_lnode *138138+csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id)139139+{140140+ struct list_head *tmp1, *tmp2;141141+ struct csio_lnode *sln = NULL, *cln = NULL;142142+143143+ if (list_empty(&hw->sln_head)) {144144+ CSIO_INC_STATS(hw, n_lnlkup_miss);145145+ return NULL;146146+ }147147+ /* Traverse sibling lnodes */148148+ list_for_each(tmp1, &hw->sln_head) {149149+ sln = (struct csio_lnode *) tmp1;150150+151151+ /* Match sibling lnode */152152+ if (sln->vnp_flowid == vnp_id)153153+ return sln;154154+155155+ if (list_empty(&sln->cln_head))156156+ continue;157157+158158+ /* Traverse children lnodes */159159+ list_for_each(tmp2, &sln->cln_head) {160160+ cln = (struct csio_lnode *) tmp2;161161+162162+ if (cln->vnp_flowid == vnp_id)163163+ return cln;164164+ }165165+ }166166+ CSIO_INC_STATS(hw, n_lnlkup_miss);167167+ return NULL;168168+}169169+170170+/**171171+ * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn.172172+ * @hw: HW module.173173+ * @wwpn: WWPN.174174+ *175175+ * If found, returns lnode matching given wwpn, returns NULL otherwise.176176+ */177177+struct csio_lnode *178178+csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn)179179+{180180+ struct list_head *tmp1, *tmp2;181181+ struct csio_lnode *sln = NULL, *cln = NULL;182182+183183+ if (list_empty(&hw->sln_head)) {184184+ CSIO_INC_STATS(hw, n_lnlkup_miss);185185+ return NULL;186186+ }187187+ /* Traverse sibling lnodes */188188+ list_for_each(tmp1, &hw->sln_head) {189189+ sln = (struct csio_lnode *) tmp1;190190+191191+ /* Match sibling lnode */192192+ if (!memcmp(csio_ln_wwpn(sln), wwpn, 8))193193+ return sln;194194+195195+ if (list_empty(&sln->cln_head))196196+ continue;197197+198198+ /* Traverse children lnodes */199199+ list_for_each(tmp2, &sln->cln_head) {200200+ cln = (struct csio_lnode *) tmp2;201201+202202+ if (!memcmp(csio_ln_wwpn(cln), wwpn, 8))203203+ return cln;204204+ }205205+ }206206+ return NULL;207207+}208208+209209+/* FDMI */210210+static void211211+csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op)212212+{213213+ struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf;214214+ cmd->ct_rev = FC_CT_REV;215215+ cmd->ct_fs_type = type;216216+ cmd->ct_fs_subtype = sub_type;217217+ cmd->ct_cmd = op;218218+}219219+220220+static int221221+csio_hostname(uint8_t *buf, size_t buf_len)222222+{223223+ if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0)224224+ return 0;225225+ return -1;226226+}227227+228228+static int229229+csio_osname(uint8_t *buf, size_t buf_len)230230+{231231+ if (snprintf(buf, buf_len, "%s %s %s",232232+ init_utsname()->sysname,233233+ init_utsname()->release,234234+ init_utsname()->version) > 0)235235+ return 0;236236+237237+ return -1;238238+}239239+240240+static inline void241241+csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len)242242+{243243+ struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;244244+ ae->type = htons(type);245245+ len += 4; /* includes attribute type and length */246246+ len = (len + 3) & ~3; /* should be multiple of 4 bytes */247247+ ae->len = htons(len);248248+ memset(ae->value, 0, len - 4);249249+ memcpy(ae->value, val, len);250250+ *ptr += len;251251+}252252+253253+/*254254+ * csio_ln_fdmi_done - FDMI registeration completion255255+ * @hw: HW context256256+ * @fdmi_req: fdmi request257257+ */258258+static void259259+csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req)260260+{261261+ void *cmd;262262+ struct csio_lnode *ln = fdmi_req->lnode;263263+264264+ if (fdmi_req->wr_status != FW_SUCCESS) {265265+ csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n",266266+ fdmi_req->wr_status);267267+ CSIO_INC_STATS(ln, n_fdmi_err);268268+ }269269+270270+ cmd = fdmi_req->dma_buf.vaddr;271271+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {272272+ csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n",273273+ csio_ct_reason(cmd), csio_ct_expl(cmd));274274+ }275275+}276276+277277+/*278278+ * csio_ln_fdmi_rhba_cbfn - RHBA completion279279+ * @hw: HW context280280+ * @fdmi_req: fdmi request281281+ */282282+static void283283+csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)284284+{285285+ void *cmd;286286+ uint8_t *pld;287287+ uint32_t len = 0;288288+ struct csio_lnode *ln = fdmi_req->lnode;289289+ struct fs_fdmi_attrs *attrib_blk;290290+ struct fc_fdmi_port_name *port_name;291291+ uint8_t buf[64];292292+ uint32_t val;293293+ uint8_t *fc4_type;294294+295295+ if (fdmi_req->wr_status != FW_SUCCESS) {296296+ csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",297297+ fdmi_req->wr_status);298298+ CSIO_INC_STATS(ln, n_fdmi_err);299299+ }300300+301301+ cmd = fdmi_req->dma_buf.vaddr;302302+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {303303+ csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n",304304+ csio_ct_reason(cmd), csio_ct_expl(cmd));305305+ }306306+307307+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {308308+ CSIO_INC_STATS(ln, n_fdmi_err);309309+ return;310310+ }311311+312312+ /* Prepare CT hdr for RPA cmd */313313+ memset(cmd, 0, FC_CT_HDR_LEN);314314+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_RPA));315315+316316+ /* Prepare RPA payload */317317+ pld = (uint8_t *)csio_ct_get_pld(cmd);318318+ port_name = (struct fc_fdmi_port_name *)pld;319319+ memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);320320+ pld += sizeof(*port_name);321321+322322+ /* Start appending Port attributes */323323+ attrib_blk = (struct fs_fdmi_attrs *)pld;324324+ attrib_blk->numattrs = 0;325325+ len += sizeof(attrib_blk->numattrs);326326+ pld += sizeof(attrib_blk->numattrs);327327+328328+ fc4_type = &buf[0];329329+ memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);330330+ fc4_type[2] = 1;331331+ fc4_type[7] = 1;332332+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES,333333+ fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);334334+ attrib_blk->numattrs++;335335+ val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);336336+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,337337+ (uint8_t *)&val,338338+ FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);339339+ attrib_blk->numattrs++;340340+341341+ if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G)342342+ val = htonl(FC_PORTSPEED_1GBIT);343343+ else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)344344+ val = htonl(FC_PORTSPEED_10GBIT);345345+ else346346+ val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);347347+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,348348+ (uint8_t *)&val,349349+ FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);350350+ attrib_blk->numattrs++;351351+352352+ val = htonl(ln->ln_sparm.csp.sp_bb_data);353353+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,354354+ (uint8_t *)&val, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);355355+ attrib_blk->numattrs++;356356+357357+ strcpy(buf, "csiostor");358358+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,359359+ (uint16_t)strlen(buf));360360+ attrib_blk->numattrs++;361361+362362+ if (!csio_hostname(buf, sizeof(buf))) {363363+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,364364+ buf, (uint16_t)strlen(buf));365365+ attrib_blk->numattrs++;366366+ }367367+ attrib_blk->numattrs = ntohl(attrib_blk->numattrs);368368+ len = (uint32_t)(pld - (uint8_t *)cmd);369369+370370+ /* Submit FDMI RPA request */371371+ spin_lock_irq(&hw->lock);372372+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,373373+ FCOE_CT, &fdmi_req->dma_buf, len)) {374374+ CSIO_INC_STATS(ln, n_fdmi_err);375375+ csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");376376+ }377377+ spin_unlock_irq(&hw->lock);378378+}379379+380380+/*381381+ * csio_ln_fdmi_dprt_cbfn - DPRT completion382382+ * @hw: HW context383383+ * @fdmi_req: fdmi request384384+ */385385+static void386386+csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)387387+{388388+ void *cmd;389389+ uint8_t *pld;390390+ uint32_t len = 0;391391+ uint32_t maxpayload = htonl(65536);392392+ struct fc_fdmi_hba_identifier *hbaid;393393+ struct csio_lnode *ln = fdmi_req->lnode;394394+ struct fc_fdmi_rpl *reg_pl;395395+ struct fs_fdmi_attrs *attrib_blk;396396+ uint8_t buf[64];397397+398398+ if (fdmi_req->wr_status != FW_SUCCESS) {399399+ csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",400400+ fdmi_req->wr_status);401401+ CSIO_INC_STATS(ln, n_fdmi_err);402402+ }403403+404404+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {405405+ CSIO_INC_STATS(ln, n_fdmi_err);406406+ return;407407+ }408408+ cmd = fdmi_req->dma_buf.vaddr;409409+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {410410+ csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n",411411+ csio_ct_reason(cmd), csio_ct_expl(cmd));412412+ }413413+414414+ /* Prepare CT hdr for RHBA cmd */415415+ memset(cmd, 0, FC_CT_HDR_LEN);416416+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_RHBA));417417+ len = FC_CT_HDR_LEN;418418+419419+ /* Prepare RHBA payload */420420+ pld = (uint8_t *)csio_ct_get_pld(cmd);421421+ hbaid = (struct fc_fdmi_hba_identifier *)pld;422422+ memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */423423+ pld += sizeof(*hbaid);424424+425425+ /* Register one port per hba */426426+ reg_pl = (struct fc_fdmi_rpl *)pld;427427+ reg_pl->numport = ntohl(1);428428+ memcpy(®_pl->port[0].portname, csio_ln_wwpn(ln), 8);429429+ pld += sizeof(*reg_pl);430430+431431+ /* Start appending HBA attributes hba */432432+ attrib_blk = (struct fs_fdmi_attrs *)pld;433433+ attrib_blk->numattrs = 0;434434+ len += sizeof(attrib_blk->numattrs);435435+ pld += sizeof(attrib_blk->numattrs);436436+437437+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln),438438+ FC_FDMI_HBA_ATTR_NODENAME_LEN);439439+ attrib_blk->numattrs++;440440+441441+ memset(buf, 0, sizeof(buf));442442+443443+ strcpy(buf, "Chelsio Communications");444444+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,445445+ (uint16_t)strlen(buf));446446+ attrib_blk->numattrs++;447447+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,448448+ hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn));449449+ attrib_blk->numattrs++;450450+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,451451+ (uint16_t)sizeof(hw->vpd.id));452452+ attrib_blk->numattrs++;453453+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,454454+ hw->model_desc, (uint16_t)strlen(hw->model_desc));455455+ attrib_blk->numattrs++;456456+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,457457+ hw->hw_ver, (uint16_t)sizeof(hw->hw_ver));458458+ attrib_blk->numattrs++;459459+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,460460+ hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str));461461+ attrib_blk->numattrs++;462462+463463+ if (!csio_osname(buf, sizeof(buf))) {464464+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,465465+ buf, (uint16_t)strlen(buf));466466+ attrib_blk->numattrs++;467467+ }468468+469469+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,470470+ (uint8_t *)&maxpayload,471471+ FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);472472+ len = (uint32_t)(pld - (uint8_t *)cmd);473473+ attrib_blk->numattrs++;474474+ attrib_blk->numattrs = ntohl(attrib_blk->numattrs);475475+476476+ /* Submit FDMI RHBA request */477477+ spin_lock_irq(&hw->lock);478478+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,479479+ FCOE_CT, &fdmi_req->dma_buf, len)) {480480+ CSIO_INC_STATS(ln, n_fdmi_err);481481+ csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");482482+ }483483+ spin_unlock_irq(&hw->lock);484484+}485485+486486+/*487487+ * csio_ln_fdmi_dhba_cbfn - DHBA completion488488+ * @hw: HW context489489+ * @fdmi_req: fdmi request490490+ */491491+static void492492+csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)493493+{494494+ struct csio_lnode *ln = fdmi_req->lnode;495495+ void *cmd;496496+ struct fc_fdmi_port_name *port_name;497497+ uint32_t len;498498+499499+ if (fdmi_req->wr_status != FW_SUCCESS) {500500+ csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",501501+ fdmi_req->wr_status);502502+ CSIO_INC_STATS(ln, n_fdmi_err);503503+ }504504+505505+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {506506+ CSIO_INC_STATS(ln, n_fdmi_err);507507+ return;508508+ }509509+ cmd = fdmi_req->dma_buf.vaddr;510510+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {511511+ csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n",512512+ csio_ct_reason(cmd), csio_ct_expl(cmd));513513+ }514514+515515+ /* Send FDMI cmd to de-register any Port attributes if registered516516+ * before517517+ */518518+519519+ /* Prepare FDMI DPRT cmd */520520+ memset(cmd, 0, FC_CT_HDR_LEN);521521+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_DPRT));522522+ len = FC_CT_HDR_LEN;523523+ port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd);524524+ memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);525525+ len += sizeof(*port_name);526526+527527+ /* Submit FDMI request */528528+ spin_lock_irq(&hw->lock);529529+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,530530+ FCOE_CT, &fdmi_req->dma_buf, len)) {531531+ CSIO_INC_STATS(ln, n_fdmi_err);532532+ csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");533533+ }534534+ spin_unlock_irq(&hw->lock);535535+}536536+537537+/**538538+ * csio_ln_fdmi_start - Start an FDMI request.539539+ * @ln: lnode540540+ * @context: session context541541+ *542542+ * Issued with lock held.543543+ */544544+int545545+csio_ln_fdmi_start(struct csio_lnode *ln, void *context)546546+{547547+ struct csio_ioreq *fdmi_req;548548+ struct csio_rnode *fdmi_rn = (struct csio_rnode *)context;549549+ void *cmd;550550+ struct fc_fdmi_hba_identifier *hbaid;551551+ uint32_t len;552552+553553+ if (!(ln->flags & CSIO_LNF_FDMI_ENABLE))554554+ return -EPROTONOSUPPORT;555555+556556+ if (!csio_is_rnode_ready(fdmi_rn))557557+ CSIO_INC_STATS(ln, n_fdmi_err);558558+559559+ /* Send FDMI cmd to de-register any HBA attributes if registered560560+ * before561561+ */562562+563563+ fdmi_req = ln->mgmt_req;564564+ fdmi_req->lnode = ln;565565+ fdmi_req->rnode = fdmi_rn;566566+567567+ /* Prepare FDMI DHBA cmd */568568+ cmd = fdmi_req->dma_buf.vaddr;569569+ memset(cmd, 0, FC_CT_HDR_LEN);570570+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_DHBA));571571+ len = FC_CT_HDR_LEN;572572+573573+ hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd);574574+ memcpy(&hbaid->id, csio_ln_wwpn(ln), 8);575575+ len += sizeof(*hbaid);576576+577577+ /* Submit FDMI request */578578+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn,579579+ FCOE_CT, &fdmi_req->dma_buf, len)) {580580+ CSIO_INC_STATS(ln, n_fdmi_err);581581+ csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n");582582+ }583583+584584+ return 0;585585+}586586+587587+/*588588+ * csio_ln_vnp_read_cbfn - vnp read completion handler.589589+ * @hw: HW lnode590590+ * @cbfn: Completion handler.591591+ *592592+ * Reads vnp response and updates ln parameters.593593+ */594594+static void595595+csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)596596+{597597+ struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv);598598+ struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);599599+ struct fc_els_csp *csp;600600+ struct fc_els_cssp *clsp;601601+ enum fw_retval retval;602602+603603+ retval = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));604604+ if (retval != FW_SUCCESS) {605605+ csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);606606+ mempool_free(mbp, hw->mb_mempool);607607+ return;608608+ }609609+610610+ spin_lock_irq(&hw->lock);611611+612612+ memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac));613613+ memcpy(&ln->nport_id, &rsp->vnport_mac[3],614614+ sizeof(uint8_t)*3);615615+ ln->nport_id = ntohl(ln->nport_id);616616+ ln->nport_id = ln->nport_id>>8;617617+618618+ /* Update WWNs */619619+ /*620620+ * This may look like a duplication of what csio_fcoe_enable_link()621621+ * does, but is absolutely necessary if the vnpi changes between622622+ * a FCOE LINK UP and FCOE LINK DOWN.623623+ */624624+ memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);625625+ memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);626626+627627+ /* Copy common sparam */628628+ csp = (struct fc_els_csp *)rsp->cmn_srv_parms;629629+ ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver;630630+ ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver;631631+ ln->ln_sparm.csp.sp_bb_cred = ntohs(csp->sp_bb_cred);632632+ ln->ln_sparm.csp.sp_features = ntohs(csp->sp_features);633633+ ln->ln_sparm.csp.sp_bb_data = ntohs(csp->sp_bb_data);634634+ ln->ln_sparm.csp.sp_r_a_tov = ntohl(csp->sp_r_a_tov);635635+ ln->ln_sparm.csp.sp_e_d_tov = ntohl(csp->sp_e_d_tov);636636+637637+ /* Copy word 0 & word 1 of class sparam */638638+ clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1;639639+ ln->ln_sparm.clsp[2].cp_class = ntohs(clsp->cp_class);640640+ ln->ln_sparm.clsp[2].cp_init = ntohs(clsp->cp_init);641641+ ln->ln_sparm.clsp[2].cp_recip = ntohs(clsp->cp_recip);642642+ ln->ln_sparm.clsp[2].cp_rdfs = ntohs(clsp->cp_rdfs);643643+644644+ spin_unlock_irq(&hw->lock);645645+646646+ mempool_free(mbp, hw->mb_mempool);647647+648648+ /* Send an event to update local attribs */649649+ csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE);650650+}651651+652652+/*653653+ * csio_ln_vnp_read - Read vnp params.654654+ * @ln: lnode655655+ * @cbfn: Completion handler.656656+ *657657+ * Issued with lock held.658658+ */659659+static int660660+csio_ln_vnp_read(struct csio_lnode *ln,661661+ void (*cbfn) (struct csio_hw *, struct csio_mb *))662662+{663663+ struct csio_hw *hw = ln->hwp;664664+ struct csio_mb *mbp;665665+666666+ /* Allocate Mbox request */667667+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);668668+ if (!mbp) {669669+ CSIO_INC_STATS(hw, n_err_nomem);670670+ return -ENOMEM;671671+ }672672+673673+ /* Prepare VNP Command */674674+ csio_fcoe_vnp_read_init_mb(ln, mbp,675675+ CSIO_MB_DEFAULT_TMO,676676+ ln->fcf_flowid,677677+ ln->vnp_flowid,678678+ cbfn);679679+680680+ /* Issue MBOX cmd */681681+ if (csio_mb_issue(hw, mbp)) {682682+ csio_err(hw, "Failed to issue mbox FCoE VNP command\n");683683+ mempool_free(mbp, hw->mb_mempool);684684+ return -EINVAL;685685+ }686686+687687+ return 0;688688+}689689+690690+/*691691+ * csio_fcoe_enable_link - Enable fcoe link.692692+ * @ln: lnode693693+ * @enable: enable/disable694694+ * Issued with lock held.695695+ * Issues mbox cmd to bring up FCOE link on port associated with given ln.696696+ */697697+static int698698+csio_fcoe_enable_link(struct csio_lnode *ln, bool enable)699699+{700700+ struct csio_hw *hw = ln->hwp;701701+ struct csio_mb *mbp;702702+ enum fw_retval retval;703703+ uint8_t portid;704704+ uint8_t sub_op;705705+ struct fw_fcoe_link_cmd *lcmd;706706+ int i;707707+708708+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);709709+ if (!mbp) {710710+ CSIO_INC_STATS(hw, n_err_nomem);711711+ return -ENOMEM;712712+ }713713+714714+ portid = ln->portid;715715+ sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN;716716+717717+ csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n",718718+ sub_op ? "UP" : "DOWN", portid);719719+720720+ csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,721721+ portid, sub_op, 0, 0, 0, NULL);722722+723723+ if (csio_mb_issue(hw, mbp)) {724724+ csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n",725725+ portid);726726+ mempool_free(mbp, hw->mb_mempool);727727+ return -EINVAL;728728+ }729729+730730+ retval = csio_mb_fw_retval(mbp);731731+ if (retval != FW_SUCCESS) {732732+ csio_err(hw,733733+ "FCOE LINK %s cmd on port[%d] failed with "734734+ "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval);735735+ mempool_free(mbp, hw->mb_mempool);736736+ return -EINVAL;737737+ }738738+739739+ if (!enable)740740+ goto out;741741+742742+ lcmd = (struct fw_fcoe_link_cmd *)mbp->mb;743743+744744+ memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8);745745+ memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8);746746+747747+ for (i = 0; i < CSIO_MAX_PPORTS; i++)748748+ if (hw->pport[i].portid == portid)749749+ memcpy(hw->pport[i].mac, lcmd->phy_mac, 6);750750+751751+out:752752+ mempool_free(mbp, hw->mb_mempool);753753+ return 0;754754+}755755+756756+/*757757+ * csio_ln_read_fcf_cbfn - Read fcf parameters758758+ * @ln: lnode759759+ *760760+ * read fcf response and Update ln fcf information.761761+ */762762+static void763763+csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)764764+{765765+ struct csio_lnode *ln = (struct csio_lnode *)mbp->priv;766766+ struct csio_fcf_info *fcf_info;767767+ struct fw_fcoe_fcf_cmd *rsp =768768+ (struct fw_fcoe_fcf_cmd *)(mbp->mb);769769+ enum fw_retval retval;770770+771771+ retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));772772+ if (retval != FW_SUCCESS) {773773+ csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",774774+ retval);775775+ mempool_free(mbp, hw->mb_mempool);776776+ return;777777+ }778778+779779+ spin_lock_irq(&hw->lock);780780+ fcf_info = ln->fcfinfo;781781+ fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET(782782+ ntohs(rsp->priority_pkd));783783+ fcf_info->vf_id = ntohs(rsp->vf_id);784784+ fcf_info->vlan_id = rsp->vlan_id;785785+ fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size);786786+ fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv);787787+ fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi));788788+ fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid);789789+ fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid);790790+ fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid);791791+ fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid);792792+ memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map));793793+ memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac));794794+ memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id));795795+ memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric));796796+ memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac));797797+798798+ spin_unlock_irq(&hw->lock);799799+800800+ mempool_free(mbp, hw->mb_mempool);801801+}802802+803803+/*804804+ * csio_ln_read_fcf_entry - Read fcf entry.805805+ * @ln: lnode806806+ * @cbfn: Completion handler.807807+ *808808+ * Issued with lock held.809809+ */810810+static int811811+csio_ln_read_fcf_entry(struct csio_lnode *ln,812812+ void (*cbfn) (struct csio_hw *, struct csio_mb *))813813+{814814+ struct csio_hw *hw = ln->hwp;815815+ struct csio_mb *mbp;816816+817817+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);818818+ if (!mbp) {819819+ CSIO_INC_STATS(hw, n_err_nomem);820820+ return -ENOMEM;821821+ }822822+823823+ /* Get FCoE FCF information */824824+ csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,825825+ ln->portid, ln->fcf_flowid, cbfn);826826+827827+ if (csio_mb_issue(hw, mbp)) {828828+ csio_err(hw, "failed to issue FCOE FCF cmd\n");829829+ mempool_free(mbp, hw->mb_mempool);830830+ return -EINVAL;831831+ }832832+833833+ return 0;834834+}835835+836836+/*837837+ * csio_handle_link_up - Logical Linkup event.838838+ * @hw - HW module.839839+ * @portid - Physical port number840840+ * @fcfi - FCF index.841841+ * @vnpi - VNP index.842842+ * Returns - none.843843+ *844844+ * This event is received from FW, when virtual link is established between845845+ * Physical port[ENode] and FCF. If its new vnpi, then local node object is846846+ * created on this FCF and set to [ONLINE] state.847847+ * Lnode waits for FW_RDEV_CMD event to be received indicating that848848+ * Fabric login is completed and lnode moves to [READY] state.849849+ *850850+ * This called with hw lock held851851+ */852852+static void853853+csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,854854+ uint32_t vnpi)855855+{856856+ struct csio_lnode *ln = NULL;857857+858858+ /* Lookup lnode based on vnpi */859859+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);860860+ if (!ln) {861861+ /* Pick lnode based on portid */862862+ ln = csio_ln_lookup_by_portid(hw, portid);863863+ if (!ln) {864864+ csio_err(hw, "failed to lookup fcoe lnode on port:%d\n",865865+ portid);866866+ CSIO_DB_ASSERT(0);867867+ return;868868+ }869869+870870+ /* Check if lnode has valid vnp flowid */871871+ if (ln->vnp_flowid != CSIO_INVALID_IDX) {872872+ /* New VN-Port */873873+ spin_unlock_irq(&hw->lock);874874+ csio_lnode_alloc(hw);875875+ spin_lock_irq(&hw->lock);876876+ if (!ln) {877877+ csio_err(hw,878878+ "failed to allocate fcoe lnode"879879+ "for port:%d vnpi:x%x\n",880880+ portid, vnpi);881881+ CSIO_DB_ASSERT(0);882882+ return;883883+ }884884+ ln->portid = portid;885885+ }886886+ ln->vnp_flowid = vnpi;887887+ ln->dev_num &= ~0xFFFF;888888+ ln->dev_num |= vnpi;889889+ }890890+891891+ /*Initialize fcfi */892892+ ln->fcf_flowid = fcfi;893893+894894+ csio_info(hw, "Port:%d - FCOE LINK UP\n", portid);895895+896896+ CSIO_INC_STATS(ln, n_link_up);897897+898898+ /* Send LINKUP event to SM */899899+ csio_post_event(&ln->sm, CSIO_LNE_LINKUP);900900+}901901+902902+/*903903+ * csio_post_event_rns904904+ * @ln - FCOE lnode905905+ * @evt - Given rnode event906906+ * Returns - none907907+ *908908+ * Posts given rnode event to all FCOE rnodes connected with given Lnode.909909+ * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE910910+ * event.911911+ *912912+ * This called with hw lock held913913+ */914914+static void915915+csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt)916916+{917917+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;918918+ struct list_head *tmp, *next;919919+ struct csio_rnode *rn;920920+921921+ list_for_each_safe(tmp, next, &rnhead->sm.sm_list) {922922+ rn = (struct csio_rnode *) tmp;923923+ csio_post_event(&rn->sm, evt);924924+ }925925+}926926+927927+/*928928+ * csio_cleanup_rns929929+ * @ln - FCOE lnode930930+ * Returns - none931931+ *932932+ * Frees all FCOE rnodes connected with given Lnode.933933+ *934934+ * This called with hw lock held935935+ */936936+static void937937+csio_cleanup_rns(struct csio_lnode *ln)938938+{939939+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;940940+ struct list_head *tmp, *next_rn;941941+ struct csio_rnode *rn;942942+943943+ list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) {944944+ rn = (struct csio_rnode *) tmp;945945+ csio_put_rnode(ln, rn);946946+ }947947+948948+}949949+950950+/*951951+ * csio_post_event_lns952952+ * @ln - FCOE lnode953953+ * @evt - Given lnode event954954+ * Returns - none955955+ *956956+ * Posts given lnode event to all FCOE lnodes connected with given Lnode.957957+ * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE958958+ * event.959959+ *960960+ * This called with hw lock held961961+ */962962+static void963963+csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt)964964+{965965+ struct list_head *tmp;966966+ struct csio_lnode *cln, *sln;967967+968968+ /* If NPIV lnode, send evt only to that and return */969969+ if (csio_is_npiv_ln(ln)) {970970+ csio_post_event(&ln->sm, evt);971971+ return;972972+ }973973+974974+ sln = ln;975975+ /* Traverse children lnodes list and send evt */976976+ list_for_each(tmp, &sln->cln_head) {977977+ cln = (struct csio_lnode *) tmp;978978+ csio_post_event(&cln->sm, evt);979979+ }980980+981981+ /* Send evt to parent lnode */982982+ csio_post_event(&ln->sm, evt);983983+}984984+985985+/*986986+ * csio_ln_down - Lcoal nport is down987987+ * @ln - FCOE Lnode988988+ * Returns - none989989+ *990990+ * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes.991991+ *992992+ * This called with hw lock held993993+ */994994+static void995995+csio_ln_down(struct csio_lnode *ln)996996+{997997+ csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN);998998+}999999+10001000+/*10011001+ * csio_handle_link_down - Logical Linkdown event.10021002+ * @hw - HW module.10031003+ * @portid - Physical port number10041004+ * @fcfi - FCF index.10051005+ * @vnpi - VNP index.10061006+ * Returns - none10071007+ *10081008+ * This event is received from FW, when virtual link goes down between10091009+ * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on10101010+ * this vnpi[VN-Port] will be de-instantiated.10111011+ *10121012+ * This called with hw lock held10131013+ */10141014+static void10151015+csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,10161016+ uint32_t vnpi)10171017+{10181018+ struct csio_fcf_info *fp;10191019+ struct csio_lnode *ln;10201020+10211021+ /* Lookup lnode based on vnpi */10221022+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);10231023+ if (ln) {10241024+ fp = ln->fcfinfo;10251025+ CSIO_INC_STATS(ln, n_link_down);10261026+10271027+ /*Warn if linkdown received if lnode is not in ready state */10281028+ if (!csio_is_lnode_ready(ln)) {10291029+ csio_ln_warn(ln,10301030+ "warn: FCOE link is already in offline "10311031+ "Ignoring Fcoe linkdown event on portid %d\n",10321032+ portid);10331033+ CSIO_INC_STATS(ln, n_evt_drop);10341034+ return;10351035+ }10361036+10371037+ /* Verify portid */10381038+ if (fp->portid != portid) {10391039+ csio_ln_warn(ln,10401040+ "warn: FCOE linkdown recv with "10411041+ "invalid port %d\n", portid);10421042+ CSIO_INC_STATS(ln, n_evt_drop);10431043+ return;10441044+ }10451045+10461046+ /* verify fcfi */10471047+ if (ln->fcf_flowid != fcfi) {10481048+ csio_ln_warn(ln,10491049+ "warn: FCOE linkdown recv with "10501050+ "invalid fcfi x%x\n", fcfi);10511051+ CSIO_INC_STATS(ln, n_evt_drop);10521052+ return;10531053+ }10541054+10551055+ csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid);10561056+10571057+ /* Send LINK_DOWN event to lnode s/m */10581058+ csio_ln_down(ln);10591059+10601060+ return;10611061+ } else {10621062+ csio_warn(hw,10631063+ "warn: FCOE linkdown recv with invalid vnpi x%x\n",10641064+ vnpi);10651065+ CSIO_INC_STATS(hw, n_evt_drop);10661066+ }10671067+}10681068+10691069+/*10701070+ * csio_is_lnode_ready - Checks FCOE lnode is in ready state.10711071+ * @ln: Lnode module10721072+ *10731073+ * Returns True if FCOE lnode is in ready state.10741074+ */10751075+int10761076+csio_is_lnode_ready(struct csio_lnode *ln)10771077+{10781078+ return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));10791079+}10801080+10811081+/*****************************************************************************/10821082+/* START: Lnode SM */10831083+/*****************************************************************************/10841084+/*10851085+ * csio_lns_uninit - The request in uninit state.10861086+ * @ln - FCOE lnode.10871087+ * @evt - Event to be processed.10881088+ *10891089+ * Process the given lnode event which is currently in "uninit" state.10901090+ * Invoked with HW lock held.10911091+ * Return - none.10921092+ */10931093+static void10941094+csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)10951095+{10961096+ struct csio_hw *hw = csio_lnode_to_hw(ln);10971097+ struct csio_lnode *rln = hw->rln;10981098+ int rv;10991099+11001100+ CSIO_INC_STATS(ln, n_evt_sm[evt]);11011101+ switch (evt) {11021102+ case CSIO_LNE_LINKUP:11031103+ csio_set_state(&ln->sm, csio_lns_online);11041104+ /* Read FCF only for physical lnode */11051105+ if (csio_is_phys_ln(ln)) {11061106+ rv = csio_ln_read_fcf_entry(ln,11071107+ csio_ln_read_fcf_cbfn);11081108+ if (rv != 0) {11091109+ /* TODO: Send HW RESET event */11101110+ CSIO_INC_STATS(ln, n_err);11111111+ break;11121112+ }11131113+11141114+ /* Add FCF record */11151115+ list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);11161116+ }11171117+11181118+ rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);11191119+ if (rv != 0) {11201120+ /* TODO: Send HW RESET event */11211121+ CSIO_INC_STATS(ln, n_err);11221122+ }11231123+ break;11241124+11251125+ case CSIO_LNE_DOWN_LINK:11261126+ break;11271127+11281128+ default:11291129+ csio_ln_dbg(ln,11301130+ "unexp ln event %d recv from did:x%x in "11311131+ "ln state[uninit].\n", evt, ln->nport_id);11321132+ CSIO_INC_STATS(ln, n_evt_unexp);11331133+ break;11341134+ } /* switch event */11351135+}11361136+11371137+/*11381138+ * csio_lns_online - The request in online state.11391139+ * @ln - FCOE lnode.11401140+ * @evt - Event to be processed.11411141+ *11421142+ * Process the given lnode event which is currently in "online" state.11431143+ * Invoked with HW lock held.11441144+ * Return - none.11451145+ */11461146+static void11471147+csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)11481148+{11491149+ struct csio_hw *hw = csio_lnode_to_hw(ln);11501150+11511151+ CSIO_INC_STATS(ln, n_evt_sm[evt]);11521152+ switch (evt) {11531153+ case CSIO_LNE_LINKUP:11541154+ csio_ln_warn(ln,11551155+ "warn: FCOE link is up already "11561156+ "Ignoring linkup on port:%d\n", ln->portid);11571157+ CSIO_INC_STATS(ln, n_evt_drop);11581158+ break;11591159+11601160+ case CSIO_LNE_FAB_INIT_DONE:11611161+ csio_set_state(&ln->sm, csio_lns_ready);11621162+11631163+ spin_unlock_irq(&hw->lock);11641164+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP);11651165+ spin_lock_irq(&hw->lock);11661166+11671167+ break;11681168+11691169+ case CSIO_LNE_LINK_DOWN:11701170+ /* Fall through */11711171+ case CSIO_LNE_DOWN_LINK:11721172+ csio_set_state(&ln->sm, csio_lns_uninit);11731173+ if (csio_is_phys_ln(ln)) {11741174+ /* Remove FCF entry */11751175+ list_del_init(&ln->fcfinfo->list);11761176+ }11771177+ break;11781178+11791179+ default:11801180+ csio_ln_dbg(ln,11811181+ "unexp ln event %d recv from did:x%x in "11821182+ "ln state[uninit].\n", evt, ln->nport_id);11831183+ CSIO_INC_STATS(ln, n_evt_unexp);11841184+11851185+ break;11861186+ } /* switch event */11871187+}11881188+11891189+/*11901190+ * csio_lns_ready - The request in ready state.11911191+ * @ln - FCOE lnode.11921192+ * @evt - Event to be processed.11931193+ *11941194+ * Process the given lnode event which is currently in "ready" state.11951195+ * Invoked with HW lock held.11961196+ * Return - none.11971197+ */11981198+static void11991199+csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)12001200+{12011201+ struct csio_hw *hw = csio_lnode_to_hw(ln);12021202+12031203+ CSIO_INC_STATS(ln, n_evt_sm[evt]);12041204+ switch (evt) {12051205+ case CSIO_LNE_FAB_INIT_DONE:12061206+ csio_ln_dbg(ln,12071207+ "ignoring event %d recv from did x%x"12081208+ "in ln state[ready].\n", evt, ln->nport_id);12091209+ CSIO_INC_STATS(ln, n_evt_drop);12101210+ break;12111211+12121212+ case CSIO_LNE_LINK_DOWN:12131213+ csio_set_state(&ln->sm, csio_lns_offline);12141214+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);12151215+12161216+ spin_unlock_irq(&hw->lock);12171217+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);12181218+ spin_lock_irq(&hw->lock);12191219+12201220+ if (csio_is_phys_ln(ln)) {12211221+ /* Remove FCF entry */12221222+ list_del_init(&ln->fcfinfo->list);12231223+ }12241224+ break;12251225+12261226+ case CSIO_LNE_DOWN_LINK:12271227+ csio_set_state(&ln->sm, csio_lns_offline);12281228+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);12291229+12301230+ /* Host need to issue aborts in case if FW has not returned12311231+ * WRs with status "ABORTED"12321232+ */12331233+ spin_unlock_irq(&hw->lock);12341234+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);12351235+ spin_lock_irq(&hw->lock);12361236+12371237+ if (csio_is_phys_ln(ln)) {12381238+ /* Remove FCF entry */12391239+ list_del_init(&ln->fcfinfo->list);12401240+ }12411241+ break;12421242+12431243+ case CSIO_LNE_CLOSE:12441244+ csio_set_state(&ln->sm, csio_lns_uninit);12451245+ csio_post_event_rns(ln, CSIO_RNFE_CLOSE);12461246+ break;12471247+12481248+ case CSIO_LNE_LOGO:12491249+ csio_set_state(&ln->sm, csio_lns_offline);12501250+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);12511251+ break;12521252+12531253+ default:12541254+ csio_ln_dbg(ln,12551255+ "unexp ln event %d recv from did:x%x in "12561256+ "ln state[uninit].\n", evt, ln->nport_id);12571257+ CSIO_INC_STATS(ln, n_evt_unexp);12581258+ CSIO_DB_ASSERT(0);12591259+ break;12601260+ } /* switch event */12611261+}12621262+12631263+/*12641264+ * csio_lns_offline - The request in offline state.12651265+ * @ln - FCOE lnode.12661266+ * @evt - Event to be processed.12671267+ *12681268+ * Process the given lnode event which is currently in "offline" state.12691269+ * Invoked with HW lock held.12701270+ * Return - none.12711271+ */12721272+static void12731273+csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt)12741274+{12751275+ struct csio_hw *hw = csio_lnode_to_hw(ln);12761276+ struct csio_lnode *rln = hw->rln;12771277+ int rv;12781278+12791279+ CSIO_INC_STATS(ln, n_evt_sm[evt]);12801280+ switch (evt) {12811281+ case CSIO_LNE_LINKUP:12821282+ csio_set_state(&ln->sm, csio_lns_online);12831283+ /* Read FCF only for physical lnode */12841284+ if (csio_is_phys_ln(ln)) {12851285+ rv = csio_ln_read_fcf_entry(ln,12861286+ csio_ln_read_fcf_cbfn);12871287+ if (rv != 0) {12881288+ /* TODO: Send HW RESET event */12891289+ CSIO_INC_STATS(ln, n_err);12901290+ break;12911291+ }12921292+12931293+ /* Add FCF record */12941294+ list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);12951295+ }12961296+12971297+ rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);12981298+ if (rv != 0) {12991299+ /* TODO: Send HW RESET event */13001300+ CSIO_INC_STATS(ln, n_err);13011301+ }13021302+ break;13031303+13041304+ case CSIO_LNE_LINK_DOWN:13051305+ case CSIO_LNE_DOWN_LINK:13061306+ case CSIO_LNE_LOGO:13071307+ csio_ln_dbg(ln,13081308+ "ignoring event %d recv from did x%x"13091309+ "in ln state[offline].\n", evt, ln->nport_id);13101310+ CSIO_INC_STATS(ln, n_evt_drop);13111311+ break;13121312+13131313+ case CSIO_LNE_CLOSE:13141314+ csio_set_state(&ln->sm, csio_lns_uninit);13151315+ csio_post_event_rns(ln, CSIO_RNFE_CLOSE);13161316+ break;13171317+13181318+ default:13191319+ csio_ln_dbg(ln,13201320+ "unexp ln event %d recv from did:x%x in "13211321+ "ln state[offline]\n", evt, ln->nport_id);13221322+ CSIO_INC_STATS(ln, n_evt_unexp);13231323+ CSIO_DB_ASSERT(0);13241324+ break;13251325+ } /* switch event */13261326+}13271327+13281328+/*****************************************************************************/13291329+/* END: Lnode SM */13301330+/*****************************************************************************/13311331+13321332+static void13331333+csio_free_fcfinfo(struct kref *kref)13341334+{13351335+ struct csio_fcf_info *fcfinfo = container_of(kref,13361336+ struct csio_fcf_info, kref);13371337+ kfree(fcfinfo);13381338+}13391339+13401340+/* Helper routines for attributes */13411341+/*13421342+ * csio_lnode_state_to_str - Get current state of FCOE lnode.13431343+ * @ln - lnode13441344+ * @str - state of lnode.13451345+ *13461346+ */13471347+void13481348+csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)13491349+{13501350+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {13511351+ strcpy(str, "UNINIT");13521352+ return;13531353+ }13541354+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {13551355+ strcpy(str, "READY");13561356+ return;13571357+ }13581358+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {13591359+ strcpy(str, "OFFLINE");13601360+ return;13611361+ }13621362+ strcpy(str, "UNKNOWN");13631363+} /* csio_lnode_state_to_str */13641364+13651365+13661366+int13671367+csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid,13681368+ struct fw_fcoe_port_stats *port_stats)13691369+{13701370+ struct csio_mb *mbp;13711371+ struct fw_fcoe_port_cmd_params portparams;13721372+ enum fw_retval retval;13731373+ int idx;13741374+13751375+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);13761376+ if (!mbp) {13771377+ csio_err(hw, "FCoE FCF PARAMS command out of memory!\n");13781378+ return -EINVAL;13791379+ }13801380+ portparams.portid = portid;13811381+13821382+ for (idx = 1; idx <= 3; idx++) {13831383+ portparams.idx = (idx-1)*6 + 1;13841384+ portparams.nstats = 6;13851385+ if (idx == 3)13861386+ portparams.nstats = 4;13871387+ csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO,13881388+ &portparams, NULL);13891389+ if (csio_mb_issue(hw, mbp)) {13901390+ csio_err(hw, "Issue of FCoE port params failed!\n");13911391+ mempool_free(mbp, hw->mb_mempool);13921392+ return -EINVAL;13931393+ }13941394+ csio_mb_process_portparams_rsp(hw, mbp, &retval,13951395+ &portparams, port_stats);13961396+ }13971397+13981398+ mempool_free(mbp, hw->mb_mempool);13991399+ return 0;14001400+}14011401+14021402+/*14031403+ * csio_ln_mgmt_wr_handler -Mgmt Work Request handler.14041404+ * @wr - WR.14051405+ * @len - WR len.14061406+ * This handler is invoked when an outstanding mgmt WR is completed.14071407+ * Its invoked in the context of FW event worker thread for every14081408+ * mgmt event received.14091409+ * Return - none.14101410+ */14111411+14121412+static void14131413+csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len)14141414+{14151415+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);14161416+ struct csio_ioreq *io_req = NULL;14171417+ struct fw_fcoe_els_ct_wr *wr_cmd;14181418+14191419+14201420+ wr_cmd = (struct fw_fcoe_els_ct_wr *) wr;14211421+14221422+ if (len < sizeof(struct fw_fcoe_els_ct_wr)) {14231423+ csio_err(mgmtm->hw,14241424+ "Invalid ELS CT WR length recvd, len:%x\n", len);14251425+ mgmtm->stats.n_err++;14261426+ return;14271427+ }14281428+14291429+ io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie);14301430+ io_req->wr_status = csio_wr_status(wr_cmd);14311431+14321432+ /* lookup ioreq exists in our active Q */14331433+ spin_lock_irq(&hw->lock);14341434+ if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) {14351435+ csio_err(mgmtm->hw,14361436+ "Error- Invalid IO handle recv in WR. handle: %p\n",14371437+ io_req);14381438+ mgmtm->stats.n_err++;14391439+ spin_unlock_irq(&hw->lock);14401440+ return;14411441+ }14421442+14431443+ mgmtm = csio_hw_to_mgmtm(hw);14441444+14451445+ /* Dequeue from active queue */14461446+ list_del_init(&io_req->sm.sm_list);14471447+ mgmtm->stats.n_active--;14481448+ spin_unlock_irq(&hw->lock);14491449+14501450+ /* io_req will be freed by completion handler */14511451+ if (io_req->io_cbfn)14521452+ io_req->io_cbfn(hw, io_req);14531453+}14541454+14551455+/**14561456+ * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events.14571457+ * @hw: HW module14581458+ * @cpl_op: CPL opcode14591459+ * @cmd: FW cmd/WR.14601460+ *14611461+ * Process received FCoE cmd/WR event from FW.14621462+ */14631463+void14641464+csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)14651465+{14661466+ struct csio_lnode *ln;14671467+ struct csio_rnode *rn;14681468+ uint8_t portid, opcode = *(uint8_t *)cmd;14691469+ struct fw_fcoe_link_cmd *lcmd;14701470+ struct fw_wr_hdr *wr;14711471+ struct fw_rdev_wr *rdev_wr;14721472+ enum fw_fcoe_link_status lstatus;14731473+ uint32_t fcfi, rdev_flowid, vnpi;14741474+ enum csio_ln_ev evt;14751475+14761476+ if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) {14771477+14781478+ lcmd = (struct fw_fcoe_link_cmd *)cmd;14791479+ lstatus = lcmd->lstatus;14801480+ portid = FW_FCOE_LINK_CMD_PORTID_GET(14811481+ ntohl(lcmd->op_to_portid));14821482+ fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi));14831483+ vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd));14841484+14851485+ if (lstatus == FCOE_LINKUP) {14861486+14871487+ /* HW lock here */14881488+ spin_lock_irq(&hw->lock);14891489+ csio_handle_link_up(hw, portid, fcfi, vnpi);14901490+ spin_unlock_irq(&hw->lock);14911491+ /* HW un lock here */14921492+14931493+ } else if (lstatus == FCOE_LINKDOWN) {14941494+14951495+ /* HW lock here */14961496+ spin_lock_irq(&hw->lock);14971497+ csio_handle_link_down(hw, portid, fcfi, vnpi);14981498+ spin_unlock_irq(&hw->lock);14991499+ /* HW un lock here */15001500+ } else {15011501+ csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n",15021502+ ntohl(lcmd->lstatus));15031503+ CSIO_INC_STATS(hw, n_cpl_unexp);15041504+ }15051505+ } else if (cpl_op == CPL_FW6_PLD) {15061506+ wr = (struct fw_wr_hdr *) (cmd + 4);15071507+ if (FW_WR_OP_GET(be32_to_cpu(wr->hi))15081508+ == FW_RDEV_WR) {15091509+15101510+ rdev_wr = (struct fw_rdev_wr *) (cmd + 4);15111511+15121512+ rdev_flowid = FW_RDEV_WR_FLOWID_GET(15131513+ ntohl(rdev_wr->alloc_to_len16));15141514+ vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET(15151515+ ntohl(rdev_wr->flags_to_assoc_flowid));15161516+15171517+ csio_dbg(hw,15181518+ "FW_RDEV_WR: flowid:x%x ev_cause:x%x "15191519+ "vnpi:0x%x\n", rdev_flowid,15201520+ rdev_wr->event_cause, vnpi);15211521+15221522+ if (rdev_wr->protocol != PROT_FCOE) {15231523+ csio_err(hw,15241524+ "FW_RDEV_WR: invalid proto:x%x "15251525+ "received with flowid:x%x\n",15261526+ rdev_wr->protocol,15271527+ rdev_flowid);15281528+ CSIO_INC_STATS(hw, n_evt_drop);15291529+ return;15301530+ }15311531+15321532+ /* HW lock here */15331533+ spin_lock_irq(&hw->lock);15341534+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);15351535+ if (!ln) {15361536+ csio_err(hw,15371537+ "FW_DEV_WR: invalid vnpi:x%x received "15381538+ "with flowid:x%x\n", vnpi, rdev_flowid);15391539+ CSIO_INC_STATS(hw, n_evt_drop);15401540+ goto out_pld;15411541+ }15421542+15431543+ rn = csio_confirm_rnode(ln, rdev_flowid,15441544+ &rdev_wr->u.fcoe_rdev);15451545+ if (!rn) {15461546+ csio_ln_dbg(ln,15471547+ "Failed to confirm rnode "15481548+ "for flowid:x%x\n", rdev_flowid);15491549+ CSIO_INC_STATS(hw, n_evt_drop);15501550+ goto out_pld;15511551+ }15521552+15531553+ /* save previous event for debugging */15541554+ ln->prev_evt = ln->cur_evt;15551555+ ln->cur_evt = rdev_wr->event_cause;15561556+ CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]);15571557+15581558+ /* Translate all the fabric events to lnode SM events */15591559+ evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause);15601560+ if (evt) {15611561+ csio_ln_dbg(ln,15621562+ "Posting event to lnode event:%d "15631563+ "cause:%d flowid:x%x\n", evt,15641564+ rdev_wr->event_cause, rdev_flowid);15651565+ csio_post_event(&ln->sm, evt);15661566+ }15671567+15681568+ /* Handover event to rn SM here. */15691569+ csio_rnode_fwevt_handler(rn, rdev_wr->event_cause);15701570+out_pld:15711571+ spin_unlock_irq(&hw->lock);15721572+ return;15731573+ } else {15741574+ csio_warn(hw, "unexpected WR op(0x%x) recv\n",15751575+ FW_WR_OP_GET(be32_to_cpu((wr->hi))));15761576+ CSIO_INC_STATS(hw, n_cpl_unexp);15771577+ }15781578+ } else if (cpl_op == CPL_FW6_MSG) {15791579+ wr = (struct fw_wr_hdr *) (cmd);15801580+ if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {15811581+ csio_ln_mgmt_wr_handler(hw, wr,15821582+ sizeof(struct fw_fcoe_els_ct_wr));15831583+ } else {15841584+ csio_warn(hw, "unexpected WR op(0x%x) recv\n",15851585+ FW_WR_OP_GET(be32_to_cpu((wr->hi))));15861586+ CSIO_INC_STATS(hw, n_cpl_unexp);15871587+ }15881588+ } else {15891589+ csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode);15901590+ CSIO_INC_STATS(hw, n_cpl_unexp);15911591+ }15921592+}15931593+15941594+/**15951595+ * csio_lnode_start - Kickstart lnode discovery.15961596+ * @ln: lnode15971597+ *15981598+ * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command.15991599+ */16001600+int16011601+csio_lnode_start(struct csio_lnode *ln)16021602+{16031603+ int rv = 0;16041604+ if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) {16051605+ rv = csio_fcoe_enable_link(ln, 1);16061606+ ln->flags |= CSIO_LNF_LINK_ENABLE;16071607+ }16081608+16091609+ return rv;16101610+}16111611+16121612+/**16131613+ * csio_lnode_stop - Stop the lnode.16141614+ * @ln: lnode16151615+ *16161616+ * This routine is invoked by HW module to stop lnode and its associated NPIV16171617+ * lnodes.16181618+ */16191619+void16201620+csio_lnode_stop(struct csio_lnode *ln)16211621+{16221622+ csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK);16231623+ if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) {16241624+ csio_fcoe_enable_link(ln, 0);16251625+ ln->flags &= ~CSIO_LNF_LINK_ENABLE;16261626+ }16271627+ csio_ln_dbg(ln, "stopping ln :%p\n", ln);16281628+}16291629+16301630+/**16311631+ * csio_lnode_close - Close an lnode.16321632+ * @ln: lnode16331633+ *16341634+ * This routine is invoked by HW module to close an lnode and its16351635+ * associated NPIV lnodes. Lnode and its associated NPIV lnodes are16361636+ * set to uninitialized state.16371637+ */16381638+void16391639+csio_lnode_close(struct csio_lnode *ln)16401640+{16411641+ csio_post_event_lns(ln, CSIO_LNE_CLOSE);16421642+ if (csio_is_phys_ln(ln))16431643+ ln->vnp_flowid = CSIO_INVALID_IDX;16441644+16451645+ csio_ln_dbg(ln, "closed ln :%p\n", ln);16461646+}16471647+16481648+/*16491649+ * csio_ln_prep_ecwr - Prepare ELS/CT WR.16501650+ * @io_req - IO request.16511651+ * @wr_len - WR len16521652+ * @immd_len - WR immediate data16531653+ * @sub_op - Sub opcode16541654+ * @sid - source portid.16551655+ * @did - destination portid16561656+ * @flow_id - flowid16571657+ * @fw_wr - ELS/CT WR to be prepared.16581658+ * Returns: 0 - on success16591659+ */16601660+static int16611661+csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,16621662+ uint32_t immd_len, uint8_t sub_op, uint32_t sid,16631663+ uint32_t did, uint32_t flow_id, uint8_t *fw_wr)16641664+{16651665+ struct fw_fcoe_els_ct_wr *wr;16661666+ uint32_t port_id;16671667+16681668+ wr = (struct fw_fcoe_els_ct_wr *)fw_wr;16691669+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_FCOE_ELS_CT_WR) |16701670+ FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));16711671+16721672+ wr_len = DIV_ROUND_UP(wr_len, 16);16731673+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(flow_id) |16741674+ FW_WR_LEN16(wr_len));16751675+ wr->els_ct_type = sub_op;16761676+ wr->ctl_pri = 0;16771677+ wr->cp_en_class = 0;16781678+ wr->cookie = io_req->fw_handle;16791679+ wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(16801680+ io_req->lnode->hwp, io_req->iq_idx));16811681+ wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1);16821682+ wr->tmo_val = (uint8_t) io_req->tmo;16831683+ port_id = htonl(sid);16841684+ memcpy(wr->l_id, PORT_ID_PTR(port_id), 3);16851685+ port_id = htonl(did);16861686+ memcpy(wr->r_id, PORT_ID_PTR(port_id), 3);16871687+16881688+ /* Prepare RSP SGL */16891689+ wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len);16901690+ wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr);16911691+ return 0;16921692+}16931693+16941694+/*16951695+ * csio_ln_mgmt_submit_wr - Post elsct work request.16961696+ * @mgmtm - mgmtm16971697+ * @io_req - io request.16981698+ * @sub_op - ELS or CT request type16991699+ * @pld - Dma Payload buffer17001700+ * @pld_len - Payload len17011701+ * Prepares ELSCT Work request and sents it to FW.17021702+ * Returns: 0 - on success17031703+ */17041704+static int17051705+csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,17061706+ uint8_t sub_op, struct csio_dma_buf *pld,17071707+ uint32_t pld_len)17081708+{17091709+ struct csio_wr_pair wrp;17101710+ struct csio_lnode *ln = io_req->lnode;17111711+ struct csio_rnode *rn = io_req->rnode;17121712+ struct csio_hw *hw = mgmtm->hw;17131713+ uint8_t fw_wr[64];17141714+ struct ulptx_sgl dsgl;17151715+ uint32_t wr_size = 0;17161716+ uint8_t im_len = 0;17171717+ uint32_t wr_off = 0;17181718+17191719+ int ret = 0;17201720+17211721+ /* Calculate WR Size for this ELS REQ */17221722+ wr_size = sizeof(struct fw_fcoe_els_ct_wr);17231723+17241724+ /* Send as immediate data if pld < 256 */17251725+ if (pld_len < 256) {17261726+ wr_size += ALIGN(pld_len, 8);17271727+ im_len = (uint8_t)pld_len;17281728+ } else17291729+ wr_size += sizeof(struct ulptx_sgl);17301730+17311731+ /* Roundup WR size in units of 16 bytes */17321732+ wr_size = ALIGN(wr_size, 16);17331733+17341734+ /* Get WR to send ELS REQ */17351735+ ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp);17361736+ if (ret != 0) {17371737+ csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n",17381738+ io_req, ret);17391739+ return ret;17401740+ }17411741+17421742+ /* Prepare Generic WR used by all ELS/CT cmd */17431743+ csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op,17441744+ ln->nport_id, rn->nport_id,17451745+ csio_rn_flowid(rn),17461746+ &fw_wr[0]);17471747+17481748+ /* Copy ELS/CT WR CMD */17491749+ csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off,17501750+ sizeof(struct fw_fcoe_els_ct_wr));17511751+ wr_off += sizeof(struct fw_fcoe_els_ct_wr);17521752+17531753+ /* Copy payload to Immediate section of WR */17541754+ if (im_len)17551755+ csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);17561756+ else {17571757+ /* Program DSGL to dma payload */17581758+ dsgl.cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |17591759+ ULPTX_MORE | ULPTX_NSGE(1));17601760+ dsgl.len0 = cpu_to_be32(pld_len);17611761+ dsgl.addr0 = cpu_to_be64(pld->paddr);17621762+ csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),17631763+ sizeof(struct ulptx_sgl));17641764+ }17651765+17661766+ /* Issue work request to xmit ELS/CT req to FW */17671767+ csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false);17681768+ return ret;17691769+}17701770+17711771+/*17721772+ * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request.17731773+ * @io_req - IO Request17741774+ * @io_cbfn - Completion handler.17751775+ * @req_type - ELS or CT request type17761776+ * @pld - Dma Payload buffer17771777+ * @pld_len - Payload len17781778+ *17791779+ *17801780+ * This API used submit managment ELS/CT request.17811781+ * This called with hw lock held17821782+ * Returns: 0 - on success17831783+ * -ENOMEM - on error.17841784+ */17851785+static int17861786+csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,17871787+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),17881788+ enum fcoe_cmn_type req_type, struct csio_dma_buf *pld,17891789+ uint32_t pld_len)17901790+{17911791+ struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode);17921792+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);17931793+ int rv;17941794+17951795+ io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */17961796+ io_req->fw_handle = (uintptr_t) (io_req);17971797+ io_req->eq_idx = mgmtm->eq_idx;17981798+ io_req->iq_idx = mgmtm->iq_idx;17991799+18001800+ rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len);18011801+ if (rv == 0) {18021802+ list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q);18031803+ mgmtm->stats.n_active++;18041804+ }18051805+ return rv;18061806+}18071807+18081808+/*18091809+ * csio_ln_fdmi_init - FDMI Init entry point.18101810+ * @ln: lnode18111811+ */18121812+static int18131813+csio_ln_fdmi_init(struct csio_lnode *ln)18141814+{18151815+ struct csio_hw *hw = csio_lnode_to_hw(ln);18161816+ struct csio_dma_buf *dma_buf;18171817+18181818+ /* Allocate MGMT request required for FDMI */18191819+ ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);18201820+ if (!ln->mgmt_req) {18211821+ csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n");18221822+ CSIO_INC_STATS(hw, n_err_nomem);18231823+ return -ENOMEM;18241824+ }18251825+18261826+ /* Allocate Dma buffers for FDMI response Payload */18271827+ dma_buf = &ln->mgmt_req->dma_buf;18281828+ dma_buf->len = 2048;18291829+ dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,18301830+ &dma_buf->paddr);18311831+ if (!dma_buf->vaddr) {18321832+ csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");18331833+ kfree(ln->mgmt_req);18341834+ ln->mgmt_req = NULL;18351835+ return -ENOMEM;18361836+ }18371837+18381838+ ln->flags |= CSIO_LNF_FDMI_ENABLE;18391839+ return 0;18401840+}18411841+18421842+/*18431843+ * csio_ln_fdmi_exit - FDMI exit entry point.18441844+ * @ln: lnode18451845+ */18461846+static int18471847+csio_ln_fdmi_exit(struct csio_lnode *ln)18481848+{18491849+ struct csio_dma_buf *dma_buf;18501850+ struct csio_hw *hw = csio_lnode_to_hw(ln);18511851+18521852+ if (!ln->mgmt_req)18531853+ return 0;18541854+18551855+ dma_buf = &ln->mgmt_req->dma_buf;18561856+ if (dma_buf->vaddr)18571857+ pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,18581858+ dma_buf->paddr);18591859+18601860+ kfree(ln->mgmt_req);18611861+ return 0;18621862+}18631863+18641864+int18651865+csio_scan_done(struct csio_lnode *ln, unsigned long ticks,18661866+ unsigned long time, unsigned long max_scan_ticks,18671867+ unsigned long delta_scan_ticks)18681868+{18691869+ int rv = 0;18701870+18711871+ if (time >= max_scan_ticks)18721872+ return 1;18731873+18741874+ if (!ln->tgt_scan_tick)18751875+ ln->tgt_scan_tick = ticks;18761876+18771877+ if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) {18781878+ if (!ln->last_scan_ntgts)18791879+ ln->last_scan_ntgts = ln->n_scsi_tgts;18801880+ else {18811881+ if (ln->last_scan_ntgts == ln->n_scsi_tgts)18821882+ return 1;18831883+18841884+ ln->last_scan_ntgts = ln->n_scsi_tgts;18851885+ }18861886+ ln->tgt_scan_tick = ticks;18871887+ }18881888+ return rv;18891889+}18901890+18911891+/*18921892+ * csio_notify_lnodes:18931893+ * @hw: HW module18941894+ * @note: Notification18951895+ *18961896+ * Called from the HW SM to fan out notifications to the18971897+ * Lnode SM. Since the HW SM is entered with lock held,18981898+ * there is no need to hold locks here.18991899+ *19001900+ */19011901+void19021902+csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note)19031903+{19041904+ struct list_head *tmp;19051905+ struct csio_lnode *ln;19061906+19071907+ csio_dbg(hw, "Notifying all nodes of event %d\n", note);19081908+19091909+ /* Traverse children lnodes list and send evt */19101910+ list_for_each(tmp, &hw->sln_head) {19111911+ ln = (struct csio_lnode *) tmp;19121912+19131913+ switch (note) {19141914+ case CSIO_LN_NOTIFY_HWREADY:19151915+ csio_lnode_start(ln);19161916+ break;19171917+19181918+ case CSIO_LN_NOTIFY_HWRESET:19191919+ case CSIO_LN_NOTIFY_HWREMOVE:19201920+ csio_lnode_close(ln);19211921+ break;19221922+19231923+ case CSIO_LN_NOTIFY_HWSTOP:19241924+ csio_lnode_stop(ln);19251925+ break;19261926+19271927+ default:19281928+ break;19291929+19301930+ }19311931+ }19321932+}19331933+19341934+/*19351935+ * csio_disable_lnodes:19361936+ * @hw: HW module19371937+ * @portid:port id19381938+ * @disable: disable/enable flag.19391939+ * If disable=1, disables all lnode hosted on given physical port.19401940+ * otherwise enables all the lnodes on given phsysical port.19411941+ * This routine need to called with hw lock held.19421942+ */19431943+void19441944+csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable)19451945+{19461946+ struct list_head *tmp;19471947+ struct csio_lnode *ln;19481948+19491949+ csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid);19501950+19511951+ /* Traverse sibling lnodes list and send evt */19521952+ list_for_each(tmp, &hw->sln_head) {19531953+ ln = (struct csio_lnode *) tmp;19541954+ if (ln->portid != portid)19551955+ continue;19561956+19571957+ if (disable)19581958+ csio_lnode_stop(ln);19591959+ else19601960+ csio_lnode_start(ln);19611961+ }19621962+}19631963+19641964+/*19651965+ * csio_ln_init - Initialize an lnode.19661966+ * @ln: lnode19671967+ *19681968+ */19691969+static int19701970+csio_ln_init(struct csio_lnode *ln)19711971+{19721972+ int rv = -EINVAL;19731973+ struct csio_lnode *rln, *pln;19741974+ struct csio_hw *hw = csio_lnode_to_hw(ln);19751975+19761976+ csio_init_state(&ln->sm, csio_lns_uninit);19771977+ ln->vnp_flowid = CSIO_INVALID_IDX;19781978+ ln->fcf_flowid = CSIO_INVALID_IDX;19791979+19801980+ if (csio_is_root_ln(ln)) {19811981+19821982+ /* This is the lnode used during initialization */19831983+19841984+ ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL);19851985+ if (!ln->fcfinfo) {19861986+ csio_ln_err(ln, "Failed to alloc FCF record\n");19871987+ CSIO_INC_STATS(hw, n_err_nomem);19881988+ goto err;19891989+ }19901990+19911991+ INIT_LIST_HEAD(&ln->fcf_lsthead);19921992+ kref_init(&ln->fcfinfo->kref);19931993+19941994+ if (csio_fdmi_enable && csio_ln_fdmi_init(ln))19951995+ goto err;19961996+19971997+ } else { /* Either a non-root physical or a virtual lnode */19981998+19991999+ /*20002000+ * THe rest is common for non-root physical and NPIV lnodes.20012001+ * Just get references to all other modules20022002+ */20032003+ rln = csio_root_lnode(ln);20042004+20052005+ if (csio_is_npiv_ln(ln)) {20062006+ /* NPIV */20072007+ pln = csio_parent_lnode(ln);20082008+ kref_get(&pln->fcfinfo->kref);20092009+ ln->fcfinfo = pln->fcfinfo;20102010+ } else {20112011+ /* Another non-root physical lnode (FCF) */20122012+ ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info),20132013+ GFP_KERNEL);20142014+ if (!ln->fcfinfo) {20152015+ csio_ln_err(ln, "Failed to alloc FCF info\n");20162016+ CSIO_INC_STATS(hw, n_err_nomem);20172017+ goto err;20182018+ }20192019+20202020+ kref_init(&ln->fcfinfo->kref);20212021+20222022+ if (csio_fdmi_enable && csio_ln_fdmi_init(ln))20232023+ goto err;20242024+ }20252025+20262026+ } /* if (!csio_is_root_ln(ln)) */20272027+20282028+ return 0;20292029+err:20302030+ return rv;20312031+}20322032+20332033+static void20342034+csio_ln_exit(struct csio_lnode *ln)20352035+{20362036+ struct csio_lnode *pln;20372037+20382038+ csio_cleanup_rns(ln);20392039+ if (csio_is_npiv_ln(ln)) {20402040+ pln = csio_parent_lnode(ln);20412041+ kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo);20422042+ } else {20432043+ kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo);20442044+ if (csio_fdmi_enable)20452045+ csio_ln_fdmi_exit(ln);20462046+ }20472047+ ln->fcfinfo = NULL;20482048+}20492049+20502050+/**20512051+ * csio_lnode_init - Initialize the members of an lnode.20522052+ * @ln: lnode20532053+ *20542054+ */20552055+int20562056+csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw,20572057+ struct csio_lnode *pln)20582058+{20592059+ int rv = -EINVAL;20602060+20612061+ /* Link this lnode to hw */20622062+ csio_lnode_to_hw(ln) = hw;20632063+20642064+ /* Link child to parent if child lnode */20652065+ if (pln)20662066+ ln->pln = pln;20672067+ else20682068+ ln->pln = NULL;20692069+20702070+ /* Initialize scsi_tgt and timers to zero */20712071+ ln->n_scsi_tgts = 0;20722072+ ln->last_scan_ntgts = 0;20732073+ ln->tgt_scan_tick = 0;20742074+20752075+ /* Initialize rnode list */20762076+ INIT_LIST_HEAD(&ln->rnhead);20772077+ INIT_LIST_HEAD(&ln->cln_head);20782078+20792079+ /* Initialize log level for debug */20802080+ ln->params.log_level = hw->params.log_level;20812081+20822082+ if (csio_ln_init(ln))20832083+ goto err;20842084+20852085+ /* Add lnode to list of sibling or children lnodes */20862086+ spin_lock_irq(&hw->lock);20872087+ list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head);20882088+ if (pln)20892089+ pln->num_vports++;20902090+ spin_unlock_irq(&hw->lock);20912091+20922092+ hw->num_lns++;20932093+20942094+ return 0;20952095+err:20962096+ csio_lnode_to_hw(ln) = NULL;20972097+ return rv;20982098+}20992099+21002100+/**21012101+ * csio_lnode_exit - De-instantiate an lnode.21022102+ * @ln: lnode21032103+ *21042104+ */21052105+void21062106+csio_lnode_exit(struct csio_lnode *ln)21072107+{21082108+ struct csio_hw *hw = csio_lnode_to_hw(ln);21092109+21102110+ csio_ln_exit(ln);21112111+21122112+ /* Remove this lnode from hw->sln_head */21132113+ spin_lock_irq(&hw->lock);21142114+21152115+ list_del_init(&ln->sm.sm_list);21162116+21172117+ /* If it is children lnode, decrement the21182118+ * counter in its parent lnode21192119+ */21202120+ if (ln->pln)21212121+ ln->pln->num_vports--;21222122+21232123+ /* Update root lnode pointer */21242124+ if (list_empty(&hw->sln_head))21252125+ hw->rln = NULL;21262126+ else21272127+ hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head);21282128+21292129+ spin_unlock_irq(&hw->lock);21302130+21312131+ csio_lnode_to_hw(ln) = NULL;21322132+ hw->num_lns--;21332133+}
+255
drivers/scsi/csiostor/csio_lnode.h
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#ifndef __CSIO_LNODE_H__3636+#define __CSIO_LNODE_H__3737+3838+#include <linux/kref.h>3939+#include <linux/timer.h>4040+#include <linux/workqueue.h>4141+#include <scsi/fc/fc_els.h>4242+4343+4444+#include "csio_defs.h"4545+#include "csio_hw.h"4646+4747+#define CSIO_FCOE_MAX_NPIV 1284848+#define CSIO_FCOE_MAX_RNODES 20484949+5050+/* FDMI port attribute unknown speed */5151+#define CSIO_HBA_PORTSPEED_UNKNOWN 0x80005252+5353+extern int csio_fcoe_rnodes;5454+extern int csio_fdmi_enable;5555+5656+/* State machine evets */5757+enum csio_ln_ev {5858+ CSIO_LNE_NONE = (uint32_t)0,5959+ CSIO_LNE_LINKUP,6060+ CSIO_LNE_FAB_INIT_DONE,6161+ CSIO_LNE_LINK_DOWN,6262+ CSIO_LNE_DOWN_LINK,6363+ CSIO_LNE_LOGO,6464+ CSIO_LNE_CLOSE,6565+ CSIO_LNE_MAX_EVENT,6666+};6767+6868+6969+struct csio_fcf_info {7070+ struct list_head list;7171+ uint8_t priority;7272+ uint8_t mac[6];7373+ uint8_t name_id[8];7474+ uint8_t fabric[8];7575+ uint16_t vf_id;7676+ uint8_t vlan_id;7777+ uint16_t max_fcoe_size;7878+ uint8_t fc_map[3];7979+ uint32_t fka_adv;8080+ uint32_t fcfi;8181+ uint8_t get_next:1;8282+ uint8_t link_aff:1;8383+ uint8_t fpma:1;8484+ uint8_t spma:1;8585+ uint8_t login:1;8686+ uint8_t portid;8787+ uint8_t spma_mac[6];8888+ struct kref kref;8989+};9090+9191+/* Defines for flags */9292+#define CSIO_LNF_FIPSUPP 0x00000001 /* Fip Supported */9393+#define CSIO_LNF_NPIVSUPP 0x00000002 /* NPIV supported */9494+#define CSIO_LNF_LINK_ENABLE 0x00000004 /* Link enabled */9595+#define CSIO_LNF_FDMI_ENABLE 0x00000008 /* FDMI support */9696+9797+/* Transport events */9898+enum csio_ln_fc_evt {9999+ CSIO_LN_FC_LINKUP = 1,100100+ CSIO_LN_FC_LINKDOWN,101101+ CSIO_LN_FC_RSCN,102102+ CSIO_LN_FC_ATTRIB_UPDATE,103103+};104104+105105+/* Lnode stats */106106+struct csio_lnode_stats {107107+ uint32_t n_link_up; /* Link down */108108+ uint32_t n_link_down; /* Link up */109109+ uint32_t n_err; /* error */110110+ uint32_t n_err_nomem; /* memory not available */111111+ uint32_t n_inval_parm; /* Invalid parameters */112112+ uint32_t n_evt_unexp; /* unexpected event */113113+ uint32_t n_evt_drop; /* dropped event */114114+ uint32_t n_rnode_match; /* matched rnode */115115+ uint32_t n_dev_loss_tmo; /* Device loss timeout */116116+ uint32_t n_fdmi_err; /* fdmi err */117117+ uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */118118+ enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */119119+ uint32_t n_rnode_alloc; /* rnode allocated */120120+ uint32_t n_rnode_free; /* rnode freed */121121+ uint32_t n_rnode_nomem; /* rnode alloc failure */122122+ uint32_t n_input_requests; /* Input Requests */123123+ uint32_t n_output_requests; /* Output Requests */124124+ uint32_t n_control_requests; /* Control Requests */125125+ uint32_t n_input_bytes; /* Input Bytes */126126+ uint32_t n_output_bytes; /* Output Bytes */127127+ uint32_t rsvd1;128128+};129129+130130+/* Common Lnode params */131131+struct csio_lnode_params {132132+ uint32_t ra_tov;133133+ uint32_t fcfi;134134+ uint32_t log_level; /* Module level for debugging */135135+};136136+137137+struct csio_service_parms {138138+ struct fc_els_csp csp; /* Common service parms */139139+ uint8_t wwpn[8]; /* WWPN */140140+ uint8_t wwnn[8]; /* WWNN */141141+ struct fc_els_cssp clsp[4]; /* Class service params */142142+ uint8_t vvl[16]; /* Vendor version level */143143+};144144+145145+/* Lnode */146146+struct csio_lnode {147147+ struct csio_sm sm; /* State machine + sibling148148+ * lnode list.149149+ */150150+ struct csio_hw *hwp; /* Pointer to the HW module */151151+ uint8_t portid; /* Port ID */152152+ uint8_t rsvd1;153153+ uint16_t rsvd2;154154+ uint32_t dev_num; /* Device number */155155+ uint32_t flags; /* Flags */156156+ struct list_head fcf_lsthead; /* FCF entries */157157+ struct csio_fcf_info *fcfinfo; /* FCF in use */158158+ struct csio_ioreq *mgmt_req; /* MGMT request */159159+160160+ /* FCoE identifiers */161161+ uint8_t mac[6];162162+ uint32_t nport_id;163163+ struct csio_service_parms ln_sparm; /* Service parms */164164+165165+ /* Firmware identifiers */166166+ uint32_t fcf_flowid; /*fcf flowid */167167+ uint32_t vnp_flowid;168168+ uint16_t ssn_cnt; /* Registered Session */169169+ uint8_t cur_evt; /* Current event */170170+ uint8_t prev_evt; /* Previous event */171171+172172+ /* Children */173173+ struct list_head cln_head; /* Head of the children lnode174174+ * list.175175+ */176176+ uint32_t num_vports; /* Total NPIV/children LNodes*/177177+ struct csio_lnode *pln; /* Parent lnode of child178178+ * lnodes.179179+ */180180+ struct list_head cmpl_q; /* Pending I/Os on this lnode */181181+182182+ /* Remote node information */183183+ struct list_head rnhead; /* Head of rnode list */184184+ uint32_t num_reg_rnodes; /* Number of rnodes registered185185+ * with the host.186186+ */187187+ uint32_t n_scsi_tgts; /* Number of scsi targets188188+ * found189189+ */190190+ uint32_t last_scan_ntgts;/* Number of scsi targets191191+ * found per last scan.192192+ */193193+ uint32_t tgt_scan_tick; /* timer started after194194+ * new tgt found195195+ */196196+ /* FC transport data */197197+ struct fc_vport *fc_vport;198198+ struct fc_host_statistics fch_stats;199199+200200+ struct csio_lnode_stats stats; /* Common lnode stats */201201+ struct csio_lnode_params params; /* Common lnode params */202202+};203203+204204+#define csio_lnode_to_hw(ln) ((ln)->hwp)205205+#define csio_root_lnode(ln) (csio_lnode_to_hw((ln))->rln)206206+#define csio_parent_lnode(ln) ((ln)->pln)207207+#define csio_ln_flowid(ln) ((ln)->vnp_flowid)208208+#define csio_ln_wwpn(ln) ((ln)->ln_sparm.wwpn)209209+#define csio_ln_wwnn(ln) ((ln)->ln_sparm.wwnn)210210+211211+#define csio_is_root_ln(ln) (((ln) == csio_root_lnode((ln))) ? 1 : 0)212212+#define csio_is_phys_ln(ln) (((ln)->pln == NULL) ? 1 : 0)213213+#define csio_is_npiv_ln(ln) (((ln)->pln != NULL) ? 1 : 0)214214+215215+216216+#define csio_ln_dbg(_ln, _fmt, ...) \217217+ csio_dbg(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \218218+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);219219+220220+#define csio_ln_err(_ln, _fmt, ...) \221221+ csio_err(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \222222+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);223223+224224+#define csio_ln_warn(_ln, _fmt, ...) \225225+ csio_warn(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \226226+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);227227+228228+/* HW->Lnode notifications */229229+enum csio_ln_notify {230230+ CSIO_LN_NOTIFY_HWREADY = 1,231231+ CSIO_LN_NOTIFY_HWSTOP,232232+ CSIO_LN_NOTIFY_HWREMOVE,233233+ CSIO_LN_NOTIFY_HWRESET,234234+};235235+236236+void csio_fcoe_fwevt_handler(struct csio_hw *, __u8 cpl_op, __be64 *);237237+int csio_is_lnode_ready(struct csio_lnode *);238238+void csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str);239239+struct csio_lnode *csio_lnode_lookup_by_wwpn(struct csio_hw *, uint8_t *);240240+int csio_get_phy_port_stats(struct csio_hw *, uint8_t ,241241+ struct fw_fcoe_port_stats *);242242+int csio_scan_done(struct csio_lnode *, unsigned long, unsigned long,243243+ unsigned long, unsigned long);244244+void csio_notify_lnodes(struct csio_hw *, enum csio_ln_notify);245245+void csio_disable_lnodes(struct csio_hw *, uint8_t, bool);246246+void csio_lnode_async_event(struct csio_lnode *, enum csio_ln_fc_evt);247247+int csio_ln_fdmi_start(struct csio_lnode *, void *);248248+int csio_lnode_start(struct csio_lnode *);249249+void csio_lnode_stop(struct csio_lnode *);250250+void csio_lnode_close(struct csio_lnode *);251251+int csio_lnode_init(struct csio_lnode *, struct csio_hw *,252252+ struct csio_lnode *);253253+void csio_lnode_exit(struct csio_lnode *);254254+255255+#endif /* ifndef __CSIO_LNODE_H__ */
+1770
drivers/scsi/csiostor/csio_mb.c
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#include <linux/delay.h>3636+#include <linux/jiffies.h>3737+#include <linux/string.h>3838+#include <scsi/scsi_device.h>3939+#include <scsi/scsi_transport_fc.h>4040+4141+#include "csio_hw.h"4242+#include "csio_lnode.h"4343+#include "csio_rnode.h"4444+#include "csio_mb.h"4545+#include "csio_wr.h"4646+4747+#define csio_mb_is_host_owner(__owner) ((__owner) == CSIO_MBOWNER_PL)4848+4949+/* MB Command/Response Helpers */5050+/*5151+ * csio_mb_fw_retval - FW return value from a mailbox response.5252+ * @mbp: Mailbox structure5353+ *5454+ */5555+enum fw_retval5656+csio_mb_fw_retval(struct csio_mb *mbp)5757+{5858+ struct fw_cmd_hdr *hdr;5959+6060+ hdr = (struct fw_cmd_hdr *)(mbp->mb);6161+6262+ return FW_CMD_RETVAL_GET(ntohl(hdr->lo));6363+}6464+6565+/*6666+ * csio_mb_hello - FW HELLO command helper6767+ * @hw: The HW structure6868+ * @mbp: Mailbox structure6969+ * @m_mbox: Master mailbox number, if any.7070+ * @a_mbox: Mailbox number for asycn notifications.7171+ * @master: Device mastership.7272+ * @cbfn: Callback, if any.7373+ *7474+ */7575+void7676+csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,7777+ uint32_t m_mbox, uint32_t a_mbox, enum csio_dev_master master,7878+ void (*cbfn) (struct csio_hw *, struct csio_mb *))7979+{8080+ struct fw_hello_cmd *cmdp = (struct fw_hello_cmd *)(mbp->mb);8181+8282+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);8383+8484+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_HELLO_CMD) |8585+ FW_CMD_REQUEST | FW_CMD_WRITE);8686+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));8787+ cmdp->err_to_clearinit = htonl(8888+ FW_HELLO_CMD_MASTERDIS(master == CSIO_MASTER_CANT) |8989+ FW_HELLO_CMD_MASTERFORCE(master == CSIO_MASTER_MUST) |9090+ FW_HELLO_CMD_MBMASTER(master == CSIO_MASTER_MUST ?9191+ m_mbox : FW_HELLO_CMD_MBMASTER_MASK) |9292+ FW_HELLO_CMD_MBASYNCNOT(a_mbox) |9393+ FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |9494+ FW_HELLO_CMD_CLEARINIT);9595+9696+}9797+9898+/*9999+ * csio_mb_process_hello_rsp - FW HELLO response processing helper100100+ * @hw: The HW structure101101+ * @mbp: Mailbox structure102102+ * @retval: Mailbox return value from Firmware103103+ * @state: State that the function is in.104104+ * @mpfn: Master pfn105105+ *106106+ */107107+void108108+csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp,109109+ enum fw_retval *retval, enum csio_dev_state *state,110110+ uint8_t *mpfn)111111+{112112+ struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb);113113+ uint32_t value;114114+115115+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));116116+117117+ if (*retval == FW_SUCCESS) {118118+ hw->fwrev = ntohl(rsp->fwrev);119119+120120+ value = ntohl(rsp->err_to_clearinit);121121+ *mpfn = FW_HELLO_CMD_MBMASTER_GET(value);122122+123123+ if (value & FW_HELLO_CMD_INIT)124124+ *state = CSIO_DEV_STATE_INIT;125125+ else if (value & FW_HELLO_CMD_ERR)126126+ *state = CSIO_DEV_STATE_ERR;127127+ else128128+ *state = CSIO_DEV_STATE_UNINIT;129129+ }130130+}131131+132132+/*133133+ * csio_mb_bye - FW BYE command helper134134+ * @hw: The HW structure135135+ * @mbp: Mailbox structure136136+ * @cbfn: Callback, if any.137137+ *138138+ */139139+void140140+csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,141141+ void (*cbfn) (struct csio_hw *, struct csio_mb *))142142+{143143+ struct fw_bye_cmd *cmdp = (struct fw_bye_cmd *)(mbp->mb);144144+145145+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);146146+147147+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_BYE_CMD) |148148+ FW_CMD_REQUEST | FW_CMD_WRITE);149149+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));150150+151151+}152152+153153+/*154154+ * csio_mb_reset - FW RESET command helper155155+ * @hw: The HW structure156156+ * @mbp: Mailbox structure157157+ * @reset: Type of reset.158158+ * @cbfn: Callback, if any.159159+ *160160+ */161161+void162162+csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,163163+ int reset, int halt,164164+ void (*cbfn) (struct csio_hw *, struct csio_mb *))165165+{166166+ struct fw_reset_cmd *cmdp = (struct fw_reset_cmd *)(mbp->mb);167167+168168+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);169169+170170+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_RESET_CMD) |171171+ FW_CMD_REQUEST | FW_CMD_WRITE);172172+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));173173+ cmdp->val = htonl(reset);174174+ cmdp->halt_pkd = htonl(halt);175175+176176+}177177+178178+/*179179+ * csio_mb_params - FW PARAMS command helper180180+ * @hw: The HW structure181181+ * @mbp: Mailbox structure182182+ * @tmo: Command timeout.183183+ * @pf: PF number.184184+ * @vf: VF number.185185+ * @nparams: Number of paramters186186+ * @params: Parameter mnemonic array.187187+ * @val: Parameter value array.188188+ * @wr: Write/Read PARAMS.189189+ * @cbfn: Callback, if any.190190+ *191191+ */192192+void193193+csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,194194+ unsigned int pf, unsigned int vf, unsigned int nparams,195195+ const u32 *params, u32 *val, bool wr,196196+ void (*cbfn)(struct csio_hw *, struct csio_mb *))197197+{198198+ uint32_t i;199199+ uint32_t temp_params = 0, temp_val = 0;200200+ struct fw_params_cmd *cmdp = (struct fw_params_cmd *)(mbp->mb);201201+ __be32 *p = &cmdp->param[0].mnem;202202+203203+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);204204+205205+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) |206206+ FW_CMD_REQUEST |207207+ (wr ? FW_CMD_WRITE : FW_CMD_READ) |208208+ FW_PARAMS_CMD_PFN(pf) |209209+ FW_PARAMS_CMD_VFN(vf));210210+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));211211+212212+ /* Write Params */213213+ if (wr) {214214+ while (nparams--) {215215+ temp_params = *params++;216216+ temp_val = *val++;217217+218218+ *p++ = htonl(temp_params);219219+ *p++ = htonl(temp_val);220220+ }221221+ } else {222222+ for (i = 0; i < nparams; i++, p += 2) {223223+ temp_params = *params++;224224+ *p = htonl(temp_params);225225+ }226226+ }227227+228228+}229229+230230+/*231231+ * csio_mb_process_read_params_rsp - FW PARAMS response processing helper232232+ * @hw: The HW structure233233+ * @mbp: Mailbox structure234234+ * @retval: Mailbox return value from Firmware235235+ * @nparams: Number of parameters236236+ * @val: Parameter value array.237237+ *238238+ */239239+void240240+csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp,241241+ enum fw_retval *retval, unsigned int nparams,242242+ u32 *val)243243+{244244+ struct fw_params_cmd *rsp = (struct fw_params_cmd *)(mbp->mb);245245+ uint32_t i;246246+ __be32 *p = &rsp->param[0].val;247247+248248+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));249249+250250+ if (*retval == FW_SUCCESS)251251+ for (i = 0; i < nparams; i++, p += 2)252252+ *val++ = ntohl(*p);253253+}254254+255255+/*256256+ * csio_mb_ldst - FW LDST command257257+ * @hw: The HW structure258258+ * @mbp: Mailbox structure259259+ * @tmo: timeout260260+ * @reg: register261261+ *262262+ */263263+void264264+csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg)265265+{266266+ struct fw_ldst_cmd *ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);267267+ CSIO_INIT_MBP(mbp, ldst_cmd, tmo, hw, NULL, 1);268268+269269+ /*270270+ * Construct and send the Firmware LDST Command to retrieve the271271+ * specified PCI-E Configuration Space register.272272+ */273273+ ldst_cmd->op_to_addrspace =274274+ htonl(FW_CMD_OP(FW_LDST_CMD) |275275+ FW_CMD_REQUEST |276276+ FW_CMD_READ |277277+ FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));278278+ ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd));279279+ ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);280280+ ldst_cmd->u.pcie.ctrl_to_fn =281281+ (FW_LDST_CMD_LC | FW_LDST_CMD_FN(hw->pfn));282282+ ldst_cmd->u.pcie.r = (uint8_t)reg;283283+}284284+285285+/*286286+ *287287+ * csio_mb_caps_config - FW Read/Write Capabilities command helper288288+ * @hw: The HW structure289289+ * @mbp: Mailbox structure290290+ * @wr: Write if 1, Read if 0291291+ * @init: Turn on initiator mode.292292+ * @tgt: Turn on target mode.293293+ * @cofld: If 1, Control Offload for FCoE294294+ * @cbfn: Callback, if any.295295+ *296296+ * This helper assumes that cmdp has MB payload from a previous CAPS297297+ * read command.298298+ */299299+void300300+csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,301301+ bool wr, bool init, bool tgt, bool cofld,302302+ void (*cbfn) (struct csio_hw *, struct csio_mb *))303303+{304304+ struct fw_caps_config_cmd *cmdp =305305+ (struct fw_caps_config_cmd *)(mbp->mb);306306+307307+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1);308308+309309+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |310310+ FW_CMD_REQUEST |311311+ (wr ? FW_CMD_WRITE : FW_CMD_READ));312312+ cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));313313+314314+ /* Read config */315315+ if (!wr)316316+ return;317317+318318+ /* Write config */319319+ cmdp->fcoecaps = 0;320320+321321+ if (cofld)322322+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_CTRL_OFLD);323323+ if (init)324324+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_INITIATOR);325325+ if (tgt)326326+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET);327327+}328328+329329+void330330+csio_rss_glb_config(struct csio_hw *hw, struct csio_mb *mbp,331331+ uint32_t tmo, uint8_t mode, unsigned int flags,332332+ void (*cbfn)(struct csio_hw *, struct csio_mb *))333333+{334334+ struct fw_rss_glb_config_cmd *cmdp =335335+ (struct fw_rss_glb_config_cmd *)(mbp->mb);336336+337337+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);338338+339339+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |340340+ FW_CMD_REQUEST | FW_CMD_WRITE);341341+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));342342+343343+ if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {344344+ cmdp->u.manual.mode_pkd =345345+ htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));346346+ } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {347347+ cmdp->u.basicvirtual.mode_pkd =348348+ htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));349349+ cmdp->u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);350350+ }351351+}352352+353353+354354+/*355355+ * csio_mb_pfvf - FW Write PF/VF capabilities command helper.356356+ * @hw: The HW structure357357+ * @mbp: Mailbox structure358358+ * @pf:359359+ * @vf:360360+ * @txq:361361+ * @txq_eht_ctrl:362362+ * @rxqi:363363+ * @rxq:364364+ * @tc:365365+ * @vi:366366+ * @pmask:367367+ * @rcaps:368368+ * @wxcaps:369369+ * @cbfn: Callback, if any.370370+ *371371+ */372372+void373373+csio_mb_pfvf(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,374374+ unsigned int pf, unsigned int vf, unsigned int txq,375375+ unsigned int txq_eth_ctrl, unsigned int rxqi,376376+ unsigned int rxq, unsigned int tc, unsigned int vi,377377+ unsigned int cmask, unsigned int pmask, unsigned int nexactf,378378+ unsigned int rcaps, unsigned int wxcaps,379379+ void (*cbfn) (struct csio_hw *, struct csio_mb *))380380+{381381+ struct fw_pfvf_cmd *cmdp = (struct fw_pfvf_cmd *)(mbp->mb);382382+383383+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);384384+385385+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) |386386+ FW_CMD_REQUEST |387387+ FW_CMD_WRITE |388388+ FW_PFVF_CMD_PFN(pf) |389389+ FW_PFVF_CMD_VFN(vf));390390+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));391391+ cmdp->niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |392392+ FW_PFVF_CMD_NIQ(rxq));393393+394394+ cmdp->type_to_neq = htonl(FW_PFVF_CMD_TYPE |395395+ FW_PFVF_CMD_CMASK(cmask) |396396+ FW_PFVF_CMD_PMASK(pmask) |397397+ FW_PFVF_CMD_NEQ(txq));398398+ cmdp->tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) |399399+ FW_PFVF_CMD_NVI(vi) |400400+ FW_PFVF_CMD_NEXACTF(nexactf));401401+ cmdp->r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |402402+ FW_PFVF_CMD_WX_CAPS(wxcaps) |403403+ FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));404404+}405405+406406+#define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\407407+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)408408+409409+/*410410+ * csio_mb_port- FW PORT command helper411411+ * @hw: The HW structure412412+ * @mbp: Mailbox structure413413+ * @tmo: COmmand timeout414414+ * @portid: Port ID to get/set info415415+ * @wr: Write/Read PORT information.416416+ * @fc: Flow control417417+ * @caps: Port capabilites to set.418418+ * @cbfn: Callback, if any.419419+ *420420+ */421421+void422422+csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,423423+ uint8_t portid, bool wr, uint32_t fc, uint16_t caps,424424+ void (*cbfn) (struct csio_hw *, struct csio_mb *))425425+{426426+ struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb);427427+ unsigned int lfc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);428428+429429+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);430430+431431+ cmdp->op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |432432+ FW_CMD_REQUEST |433433+ (wr ? FW_CMD_EXEC : FW_CMD_READ) |434434+ FW_PORT_CMD_PORTID(portid));435435+ if (!wr) {436436+ cmdp->action_to_len16 = htonl(437437+ FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |438438+ FW_CMD_LEN16(sizeof(*cmdp) / 16));439439+ return;440440+ }441441+442442+ /* Set port */443443+ cmdp->action_to_len16 = htonl(444444+ FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |445445+ FW_CMD_LEN16(sizeof(*cmdp) / 16));446446+447447+ if (fc & PAUSE_RX)448448+ lfc |= FW_PORT_CAP_FC_RX;449449+ if (fc & PAUSE_TX)450450+ lfc |= FW_PORT_CAP_FC_TX;451451+452452+ if (!(caps & FW_PORT_CAP_ANEG))453453+ cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) | lfc);454454+ else455455+ cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) |456456+ lfc | mdi);457457+}458458+459459+/*460460+ * csio_mb_process_read_port_rsp - FW PORT command response processing helper461461+ * @hw: The HW structure462462+ * @mbp: Mailbox structure463463+ * @retval: Mailbox return value from Firmware464464+ * @caps: port capabilities465465+ *466466+ */467467+void468468+csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,469469+ enum fw_retval *retval, uint16_t *caps)470470+{471471+ struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb);472472+473473+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->action_to_len16));474474+475475+ if (*retval == FW_SUCCESS)476476+ *caps = ntohs(rsp->u.info.pcap);477477+}478478+479479+/*480480+ * csio_mb_initialize - FW INITIALIZE command helper481481+ * @hw: The HW structure482482+ * @mbp: Mailbox structure483483+ * @tmo: COmmand timeout484484+ * @cbfn: Callback, if any.485485+ *486486+ */487487+void488488+csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,489489+ void (*cbfn) (struct csio_hw *, struct csio_mb *))490490+{491491+ struct fw_initialize_cmd *cmdp = (struct fw_initialize_cmd *)(mbp->mb);492492+493493+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);494494+495495+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_INITIALIZE_CMD) |496496+ FW_CMD_REQUEST | FW_CMD_WRITE);497497+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));498498+499499+}500500+501501+/*502502+ * csio_mb_iq_alloc - Initializes the mailbox to allocate an503503+ * Ingress DMA queue in the firmware.504504+ *505505+ * @hw: The hw structure506506+ * @mbp: Mailbox structure to initialize507507+ * @priv: Private object508508+ * @mb_tmo: Mailbox time-out period (in ms).509509+ * @iq_params: Ingress queue params needed for allocation.510510+ * @cbfn: The call-back function511511+ *512512+ *513513+ */514514+static void515515+csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,516516+ uint32_t mb_tmo, struct csio_iq_params *iq_params,517517+ void (*cbfn) (struct csio_hw *, struct csio_mb *))518518+{519519+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);520520+521521+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);522522+523523+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |524524+ FW_CMD_REQUEST | FW_CMD_EXEC |525525+ FW_IQ_CMD_PFN(iq_params->pfn) |526526+ FW_IQ_CMD_VFN(iq_params->vfn));527527+528528+ cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC |529529+ FW_CMD_LEN16(sizeof(*cmdp) / 16));530530+531531+ cmdp->type_to_iqandstindex = htonl(532532+ FW_IQ_CMD_VIID(iq_params->viid) |533533+ FW_IQ_CMD_TYPE(iq_params->type) |534534+ FW_IQ_CMD_IQASYNCH(iq_params->iqasynch));535535+536536+ cmdp->fl0size = htons(iq_params->fl0size);537537+ cmdp->fl0size = htons(iq_params->fl1size);538538+539539+} /* csio_mb_iq_alloc */540540+541541+/*542542+ * csio_mb_iq_write - Initializes the mailbox for writing into an543543+ * Ingress DMA Queue.544544+ *545545+ * @hw: The HW structure546546+ * @mbp: Mailbox structure to initialize547547+ * @priv: Private object548548+ * @mb_tmo: Mailbox time-out period (in ms).549549+ * @cascaded_req: TRUE - if this request is cascased with iq-alloc request.550550+ * @iq_params: Ingress queue params needed for writing.551551+ * @cbfn: The call-back function552552+ *553553+ * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,554554+ * because this IQ write request can be cascaded with a previous555555+ * IQ alloc request, and we dont want to over-write the bits set by556556+ * that request. This logic will work even in a non-cascaded case, since the557557+ * cmdp structure is zeroed out by CSIO_INIT_MBP.558558+ */559559+static void560560+csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,561561+ uint32_t mb_tmo, bool cascaded_req,562562+ struct csio_iq_params *iq_params,563563+ void (*cbfn) (struct csio_hw *, struct csio_mb *))564564+{565565+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);566566+567567+ uint32_t iq_start_stop = (iq_params->iq_start) ?568568+ FW_IQ_CMD_IQSTART(1) :569569+ FW_IQ_CMD_IQSTOP(1);570570+571571+ /*572572+ * If this IQ write is cascaded with IQ alloc request, do not573573+ * re-initialize with 0's.574574+ *575575+ */576576+ if (!cascaded_req)577577+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);578578+579579+ cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_IQ_CMD) |580580+ FW_CMD_REQUEST | FW_CMD_WRITE |581581+ FW_IQ_CMD_PFN(iq_params->pfn) |582582+ FW_IQ_CMD_VFN(iq_params->vfn));583583+ cmdp->alloc_to_len16 |= htonl(iq_start_stop |584584+ FW_CMD_LEN16(sizeof(*cmdp) / 16));585585+ cmdp->iqid |= htons(iq_params->iqid);586586+ cmdp->fl0id |= htons(iq_params->fl0id);587587+ cmdp->fl1id |= htons(iq_params->fl1id);588588+ cmdp->type_to_iqandstindex |= htonl(589589+ FW_IQ_CMD_IQANDST(iq_params->iqandst) |590590+ FW_IQ_CMD_IQANUS(iq_params->iqanus) |591591+ FW_IQ_CMD_IQANUD(iq_params->iqanud) |592592+ FW_IQ_CMD_IQANDSTINDEX(iq_params->iqandstindex));593593+ cmdp->iqdroprss_to_iqesize |= htons(594594+ FW_IQ_CMD_IQPCIECH(iq_params->iqpciech) |595595+ FW_IQ_CMD_IQDCAEN(iq_params->iqdcaen) |596596+ FW_IQ_CMD_IQDCACPU(iq_params->iqdcacpu) |597597+ FW_IQ_CMD_IQINTCNTTHRESH(iq_params->iqintcntthresh) |598598+ FW_IQ_CMD_IQCPRIO(iq_params->iqcprio) |599599+ FW_IQ_CMD_IQESIZE(iq_params->iqesize));600600+601601+ cmdp->iqsize |= htons(iq_params->iqsize);602602+ cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr);603603+604604+ if (iq_params->type == 0) {605605+ cmdp->iqns_to_fl0congen |= htonl(606606+ FW_IQ_CMD_IQFLINTIQHSEN(iq_params->iqflintiqhsen)|607607+ FW_IQ_CMD_IQFLINTCONGEN(iq_params->iqflintcongen));608608+ }609609+610610+ if (iq_params->fl0size && iq_params->fl0addr &&611611+ (iq_params->fl0id != 0xFFFF)) {612612+613613+ cmdp->iqns_to_fl0congen |= htonl(614614+ FW_IQ_CMD_FL0HOSTFCMODE(iq_params->fl0hostfcmode)|615615+ FW_IQ_CMD_FL0CPRIO(iq_params->fl0cprio) |616616+ FW_IQ_CMD_FL0PADEN(iq_params->fl0paden) |617617+ FW_IQ_CMD_FL0PACKEN(iq_params->fl0packen));618618+ cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(619619+ FW_IQ_CMD_FL0DCAEN(iq_params->fl0dcaen) |620620+ FW_IQ_CMD_FL0DCACPU(iq_params->fl0dcacpu) |621621+ FW_IQ_CMD_FL0FBMIN(iq_params->fl0fbmin) |622622+ FW_IQ_CMD_FL0FBMAX(iq_params->fl0fbmax) |623623+ FW_IQ_CMD_FL0CIDXFTHRESH(iq_params->fl0cidxfthresh));624624+ cmdp->fl0size |= htons(iq_params->fl0size);625625+ cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr);626626+ }627627+} /* csio_mb_iq_write */628628+629629+/*630630+ * csio_mb_iq_alloc_write - Initializes the mailbox for allocating an631631+ * Ingress DMA Queue.632632+ *633633+ * @hw: The HW structure634634+ * @mbp: Mailbox structure to initialize635635+ * @priv: Private data.636636+ * @mb_tmo: Mailbox time-out period (in ms).637637+ * @iq_params: Ingress queue params needed for allocation & writing.638638+ * @cbfn: The call-back function639639+ *640640+ *641641+ */642642+void643643+csio_mb_iq_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,644644+ uint32_t mb_tmo, struct csio_iq_params *iq_params,645645+ void (*cbfn) (struct csio_hw *, struct csio_mb *))646646+{647647+ csio_mb_iq_alloc(hw, mbp, priv, mb_tmo, iq_params, cbfn);648648+ csio_mb_iq_write(hw, mbp, priv, mb_tmo, true, iq_params, cbfn);649649+} /* csio_mb_iq_alloc_write */650650+651651+/*652652+ * csio_mb_iq_alloc_write_rsp - Process the allocation & writing653653+ * of ingress DMA queue mailbox's response.654654+ *655655+ * @hw: The HW structure.656656+ * @mbp: Mailbox structure to initialize.657657+ * @retval: Firmware return value.658658+ * @iq_params: Ingress queue parameters, after allocation and write.659659+ *660660+ */661661+void662662+csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp,663663+ enum fw_retval *ret_val,664664+ struct csio_iq_params *iq_params)665665+{666666+ struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb);667667+668668+ *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));669669+ if (*ret_val == FW_SUCCESS) {670670+ iq_params->physiqid = ntohs(rsp->physiqid);671671+ iq_params->iqid = ntohs(rsp->iqid);672672+ iq_params->fl0id = ntohs(rsp->fl0id);673673+ iq_params->fl1id = ntohs(rsp->fl1id);674674+ } else {675675+ iq_params->physiqid = iq_params->iqid =676676+ iq_params->fl0id = iq_params->fl1id = 0;677677+ }678678+} /* csio_mb_iq_alloc_write_rsp */679679+680680+/*681681+ * csio_mb_iq_free - Initializes the mailbox for freeing a682682+ * specified Ingress DMA Queue.683683+ *684684+ * @hw: The HW structure685685+ * @mbp: Mailbox structure to initialize686686+ * @priv: Private data687687+ * @mb_tmo: Mailbox time-out period (in ms).688688+ * @iq_params: Parameters of ingress queue, that is to be freed.689689+ * @cbfn: The call-back function690690+ *691691+ *692692+ */693693+void694694+csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,695695+ uint32_t mb_tmo, struct csio_iq_params *iq_params,696696+ void (*cbfn) (struct csio_hw *, struct csio_mb *))697697+{698698+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);699699+700700+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);701701+702702+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |703703+ FW_CMD_REQUEST | FW_CMD_EXEC |704704+ FW_IQ_CMD_PFN(iq_params->pfn) |705705+ FW_IQ_CMD_VFN(iq_params->vfn));706706+ cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE |707707+ FW_CMD_LEN16(sizeof(*cmdp) / 16));708708+ cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iq_params->type));709709+710710+ cmdp->iqid = htons(iq_params->iqid);711711+ cmdp->fl0id = htons(iq_params->fl0id);712712+ cmdp->fl1id = htons(iq_params->fl1id);713713+714714+} /* csio_mb_iq_free */715715+716716+/*717717+ * csio_mb_eq_ofld_alloc - Initializes the mailbox for allocating718718+ * an offload-egress queue.719719+ *720720+ * @hw: The HW structure721721+ * @mbp: Mailbox structure to initialize722722+ * @priv: Private data723723+ * @mb_tmo: Mailbox time-out period (in ms).724724+ * @eq_ofld_params: (Offload) Egress queue paramters.725725+ * @cbfn: The call-back function726726+ *727727+ *728728+ */729729+static void730730+csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,731731+ uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,732732+ void (*cbfn) (struct csio_hw *, struct csio_mb *))733733+{734734+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);735735+736736+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);737737+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |738738+ FW_CMD_REQUEST | FW_CMD_EXEC |739739+ FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |740740+ FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));741741+ cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |742742+ FW_CMD_LEN16(sizeof(*cmdp) / 16));743743+744744+} /* csio_mb_eq_ofld_alloc */745745+746746+/*747747+ * csio_mb_eq_ofld_write - Initializes the mailbox for writing748748+ * an alloacted offload-egress queue.749749+ *750750+ * @hw: The HW structure751751+ * @mbp: Mailbox structure to initialize752752+ * @priv: Private data753753+ * @mb_tmo: Mailbox time-out period (in ms).754754+ * @cascaded_req: TRUE - if this request is cascased with Eq-alloc request.755755+ * @eq_ofld_params: (Offload) Egress queue paramters.756756+ * @cbfn: The call-back function757757+ *758758+ *759759+ * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,760760+ * because this EQ write request can be cascaded with a previous761761+ * EQ alloc request, and we dont want to over-write the bits set by762762+ * that request. This logic will work even in a non-cascaded case, since the763763+ * cmdp structure is zeroed out by CSIO_INIT_MBP.764764+ */765765+static void766766+csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,767767+ uint32_t mb_tmo, bool cascaded_req,768768+ struct csio_eq_params *eq_ofld_params,769769+ void (*cbfn) (struct csio_hw *, struct csio_mb *))770770+{771771+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);772772+773773+ uint32_t eq_start_stop = (eq_ofld_params->eqstart) ?774774+ FW_EQ_OFLD_CMD_EQSTART : FW_EQ_OFLD_CMD_EQSTOP;775775+776776+ /*777777+ * If this EQ write is cascaded with EQ alloc request, do not778778+ * re-initialize with 0's.779779+ *780780+ */781781+ if (!cascaded_req)782782+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);783783+784784+ cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |785785+ FW_CMD_REQUEST | FW_CMD_WRITE |786786+ FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |787787+ FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));788788+ cmdp->alloc_to_len16 |= htonl(eq_start_stop |789789+ FW_CMD_LEN16(sizeof(*cmdp) / 16));790790+791791+ cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));792792+793793+ cmdp->fetchszm_to_iqid |= htonl(794794+ FW_EQ_OFLD_CMD_HOSTFCMODE(eq_ofld_params->hostfcmode) |795795+ FW_EQ_OFLD_CMD_CPRIO(eq_ofld_params->cprio) |796796+ FW_EQ_OFLD_CMD_PCIECHN(eq_ofld_params->pciechn) |797797+ FW_EQ_OFLD_CMD_IQID(eq_ofld_params->iqid));798798+799799+ cmdp->dcaen_to_eqsize |= htonl(800800+ FW_EQ_OFLD_CMD_DCAEN(eq_ofld_params->dcaen) |801801+ FW_EQ_OFLD_CMD_DCACPU(eq_ofld_params->dcacpu) |802802+ FW_EQ_OFLD_CMD_FBMIN(eq_ofld_params->fbmin) |803803+ FW_EQ_OFLD_CMD_FBMAX(eq_ofld_params->fbmax) |804804+ FW_EQ_OFLD_CMD_CIDXFTHRESHO(eq_ofld_params->cidxfthresho) |805805+ FW_EQ_OFLD_CMD_CIDXFTHRESH(eq_ofld_params->cidxfthresh) |806806+ FW_EQ_OFLD_CMD_EQSIZE(eq_ofld_params->eqsize));807807+808808+ cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr);809809+810810+} /* csio_mb_eq_ofld_write */811811+812812+/*813813+ * csio_mb_eq_ofld_alloc_write - Initializes the mailbox for allocation814814+ * writing into an Engress DMA Queue.815815+ *816816+ * @hw: The HW structure817817+ * @mbp: Mailbox structure to initialize818818+ * @priv: Private data.819819+ * @mb_tmo: Mailbox time-out period (in ms).820820+ * @eq_ofld_params: (Offload) Egress queue paramters.821821+ * @cbfn: The call-back function822822+ *823823+ *824824+ */825825+void826826+csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp,827827+ void *priv, uint32_t mb_tmo,828828+ struct csio_eq_params *eq_ofld_params,829829+ void (*cbfn) (struct csio_hw *, struct csio_mb *))830830+{831831+ csio_mb_eq_ofld_alloc(hw, mbp, priv, mb_tmo, eq_ofld_params, cbfn);832832+ csio_mb_eq_ofld_write(hw, mbp, priv, mb_tmo, true,833833+ eq_ofld_params, cbfn);834834+} /* csio_mb_eq_ofld_alloc_write */835835+836836+/*837837+ * csio_mb_eq_ofld_alloc_write_rsp - Process the allocation838838+ * & write egress DMA queue mailbox's response.839839+ *840840+ * @hw: The HW structure.841841+ * @mbp: Mailbox structure to initialize.842842+ * @retval: Firmware return value.843843+ * @eq_ofld_params: (Offload) Egress queue paramters.844844+ *845845+ */846846+void847847+csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw,848848+ struct csio_mb *mbp, enum fw_retval *ret_val,849849+ struct csio_eq_params *eq_ofld_params)850850+{851851+ struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb);852852+853853+ *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));854854+855855+ if (*ret_val == FW_SUCCESS) {856856+ eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_GET(857857+ ntohl(rsp->eqid_pkd));858858+ eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_GET(859859+ ntohl(rsp->physeqid_pkd));860860+ } else861861+ eq_ofld_params->eqid = 0;862862+863863+} /* csio_mb_eq_ofld_alloc_write_rsp */864864+865865+/*866866+ * csio_mb_eq_ofld_free - Initializes the mailbox for freeing a867867+ * specified Engress DMA Queue.868868+ *869869+ * @hw: The HW structure870870+ * @mbp: Mailbox structure to initialize871871+ * @priv: Private data area.872872+ * @mb_tmo: Mailbox time-out period (in ms).873873+ * @eq_ofld_params: (Offload) Egress queue paramters, that is to be freed.874874+ * @cbfn: The call-back function875875+ *876876+ *877877+ */878878+void879879+csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,880880+ uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,881881+ void (*cbfn) (struct csio_hw *, struct csio_mb *))882882+{883883+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);884884+885885+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);886886+887887+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |888888+ FW_CMD_REQUEST | FW_CMD_EXEC |889889+ FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |890890+ FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));891891+ cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE |892892+ FW_CMD_LEN16(sizeof(*cmdp) / 16));893893+ cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));894894+895895+} /* csio_mb_eq_ofld_free */896896+897897+/*898898+ * csio_write_fcoe_link_cond_init_mb - Initialize Mailbox to write FCoE link899899+ * condition.900900+ *901901+ * @ln: The Lnode structure902902+ * @mbp: Mailbox structure to initialize903903+ * @mb_tmo: Mailbox time-out period (in ms).904904+ * @cbfn: The call back function.905905+ *906906+ *907907+ */908908+void909909+csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,910910+ uint32_t mb_tmo, uint8_t port_id, uint32_t sub_opcode,911911+ uint8_t cos, bool link_status, uint32_t fcfi,912912+ void (*cbfn) (struct csio_hw *, struct csio_mb *))913913+{914914+ struct fw_fcoe_link_cmd *cmdp =915915+ (struct fw_fcoe_link_cmd *)(mbp->mb);916916+917917+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);918918+919919+ cmdp->op_to_portid = htonl((920920+ FW_CMD_OP(FW_FCOE_LINK_CMD) |921921+ FW_CMD_REQUEST |922922+ FW_CMD_WRITE |923923+ FW_FCOE_LINK_CMD_PORTID(port_id)));924924+ cmdp->sub_opcode_fcfi = htonl(925925+ FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) |926926+ FW_FCOE_LINK_CMD_FCFI(fcfi));927927+ cmdp->lstatus = link_status;928928+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));929929+930930+} /* csio_write_fcoe_link_cond_init_mb */931931+932932+/*933933+ * csio_fcoe_read_res_info_init_mb - Initializes the mailbox for reading FCoE934934+ * resource information(FW_GET_RES_INFO_CMD).935935+ *936936+ * @hw: The HW structure937937+ * @mbp: Mailbox structure to initialize938938+ * @mb_tmo: Mailbox time-out period (in ms).939939+ * @cbfn: The call-back function940940+ *941941+ *942942+ */943943+void944944+csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp,945945+ uint32_t mb_tmo,946946+ void (*cbfn) (struct csio_hw *, struct csio_mb *))947947+{948948+ struct fw_fcoe_res_info_cmd *cmdp =949949+ (struct fw_fcoe_res_info_cmd *)(mbp->mb);950950+951951+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);952952+953953+ cmdp->op_to_read = htonl((FW_CMD_OP(FW_FCOE_RES_INFO_CMD) |954954+ FW_CMD_REQUEST |955955+ FW_CMD_READ));956956+957957+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));958958+959959+} /* csio_fcoe_read_res_info_init_mb */960960+961961+/*962962+ * csio_fcoe_vnp_alloc_init_mb - Initializes the mailbox for allocating VNP963963+ * in the firmware (FW_FCOE_VNP_CMD).964964+ *965965+ * @ln: The Lnode structure.966966+ * @mbp: Mailbox structure to initialize.967967+ * @mb_tmo: Mailbox time-out period (in ms).968968+ * @fcfi: FCF Index.969969+ * @vnpi: vnpi970970+ * @iqid: iqid971971+ * @vnport_wwnn: vnport WWNN972972+ * @vnport_wwpn: vnport WWPN973973+ * @cbfn: The call-back function.974974+ *975975+ *976976+ */977977+void978978+csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,979979+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, uint16_t iqid,980980+ uint8_t vnport_wwnn[8], uint8_t vnport_wwpn[8],981981+ void (*cbfn) (struct csio_hw *, struct csio_mb *))982982+{983983+ struct fw_fcoe_vnp_cmd *cmdp =984984+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);985985+986986+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);987987+988988+ cmdp->op_to_fcfi = htonl((FW_CMD_OP(FW_FCOE_VNP_CMD) |989989+ FW_CMD_REQUEST |990990+ FW_CMD_EXEC |991991+ FW_FCOE_VNP_CMD_FCFI(fcfi)));992992+993993+ cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC |994994+ FW_CMD_LEN16(sizeof(*cmdp) / 16));995995+996996+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));997997+998998+ cmdp->iqid = htons(iqid);999999+10001000+ if (!wwn_to_u64(vnport_wwnn) && !wwn_to_u64(vnport_wwpn))10011001+ cmdp->gen_wwn_to_vnpi |= htonl(FW_FCOE_VNP_CMD_GEN_WWN);10021002+10031003+ if (vnport_wwnn)10041004+ memcpy(cmdp->vnport_wwnn, vnport_wwnn, 8);10051005+ if (vnport_wwpn)10061006+ memcpy(cmdp->vnport_wwpn, vnport_wwpn, 8);10071007+10081008+} /* csio_fcoe_vnp_alloc_init_mb */10091009+10101010+/*10111011+ * csio_fcoe_vnp_read_init_mb - Prepares VNP read cmd.10121012+ * @ln: The Lnode structure.10131013+ * @mbp: Mailbox structure to initialize.10141014+ * @mb_tmo: Mailbox time-out period (in ms).10151015+ * @fcfi: FCF Index.10161016+ * @vnpi: vnpi10171017+ * @cbfn: The call-back handler.10181018+ */10191019+void10201020+csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,10211021+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,10221022+ void (*cbfn) (struct csio_hw *, struct csio_mb *))10231023+{10241024+ struct fw_fcoe_vnp_cmd *cmdp =10251025+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);10261026+10271027+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);10281028+ cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |10291029+ FW_CMD_REQUEST |10301030+ FW_CMD_READ |10311031+ FW_FCOE_VNP_CMD_FCFI(fcfi));10321032+ cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));10331033+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));10341034+}10351035+10361036+/*10371037+ * csio_fcoe_vnp_free_init_mb - Initializes the mailbox for freeing an10381038+ * alloacted VNP in the firmware (FW_FCOE_VNP_CMD).10391039+ *10401040+ * @ln: The Lnode structure.10411041+ * @mbp: Mailbox structure to initialize.10421042+ * @mb_tmo: Mailbox time-out period (in ms).10431043+ * @fcfi: FCF flow id10441044+ * @vnpi: VNP flow id10451045+ * @cbfn: The call-back function.10461046+ * Return: None10471047+ */10481048+void10491049+csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,10501050+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,10511051+ void (*cbfn) (struct csio_hw *, struct csio_mb *))10521052+{10531053+ struct fw_fcoe_vnp_cmd *cmdp =10541054+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);10551055+10561056+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);10571057+10581058+ cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |10591059+ FW_CMD_REQUEST |10601060+ FW_CMD_EXEC |10611061+ FW_FCOE_VNP_CMD_FCFI(fcfi));10621062+ cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE |10631063+ FW_CMD_LEN16(sizeof(*cmdp) / 16));10641064+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));10651065+}10661066+10671067+/*10681068+ * csio_fcoe_read_fcf_init_mb - Initializes the mailbox to read the10691069+ * FCF records.10701070+ *10711071+ * @ln: The Lnode structure10721072+ * @mbp: Mailbox structure to initialize10731073+ * @mb_tmo: Mailbox time-out period (in ms).10741074+ * @fcf_params: FC-Forwarder parameters.10751075+ * @cbfn: The call-back function10761076+ *10771077+ *10781078+ */10791079+void10801080+csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,10811081+ uint32_t mb_tmo, uint32_t portid, uint32_t fcfi,10821082+ void (*cbfn) (struct csio_hw *, struct csio_mb *))10831083+{10841084+ struct fw_fcoe_fcf_cmd *cmdp =10851085+ (struct fw_fcoe_fcf_cmd *)(mbp->mb);10861086+10871087+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);10881088+10891089+ cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_FCF_CMD) |10901090+ FW_CMD_REQUEST |10911091+ FW_CMD_READ |10921092+ FW_FCOE_FCF_CMD_FCFI(fcfi));10931093+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));10941094+10951095+} /* csio_fcoe_read_fcf_init_mb */10961096+10971097+void10981098+csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp,10991099+ uint32_t mb_tmo,11001100+ struct fw_fcoe_port_cmd_params *portparams,11011101+ void (*cbfn)(struct csio_hw *,11021102+ struct csio_mb *))11031103+{11041104+ struct fw_fcoe_stats_cmd *cmdp = (struct fw_fcoe_stats_cmd *)(mbp->mb);11051105+11061106+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);11071107+ mbp->mb_size = 64;11081108+11091109+ cmdp->op_to_flowid = htonl(FW_CMD_OP(FW_FCOE_STATS_CMD) |11101110+ FW_CMD_REQUEST | FW_CMD_READ);11111111+ cmdp->free_to_len16 = htonl(FW_CMD_LEN16(CSIO_MAX_MB_SIZE/16));11121112+11131113+ cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) |11141114+ FW_FCOE_STATS_CMD_PORT(portparams->portid);11151115+11161116+ cmdp->u.ctl.port_valid_ix = FW_FCOE_STATS_CMD_IX(portparams->idx) |11171117+ FW_FCOE_STATS_CMD_PORT_VALID;11181118+11191119+} /* csio_fcoe_read_portparams_init_mb */11201120+11211121+void11221122+csio_mb_process_portparams_rsp(11231123+ struct csio_hw *hw,11241124+ struct csio_mb *mbp,11251125+ enum fw_retval *retval,11261126+ struct fw_fcoe_port_cmd_params *portparams,11271127+ struct fw_fcoe_port_stats *portstats11281128+ )11291129+{11301130+ struct fw_fcoe_stats_cmd *rsp = (struct fw_fcoe_stats_cmd *)(mbp->mb);11311131+ struct fw_fcoe_port_stats stats;11321132+ uint8_t *src;11331133+ uint8_t *dst;11341134+11351135+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->free_to_len16));11361136+11371137+ memset(&stats, 0, sizeof(struct fw_fcoe_port_stats));11381138+11391139+ if (*retval == FW_SUCCESS) {11401140+ dst = (uint8_t *)(&stats) + ((portparams->idx - 1) * 8);11411141+ src = (uint8_t *)rsp + (CSIO_STATS_OFFSET * 8);11421142+ memcpy(dst, src, (portparams->nstats * 8));11431143+ if (portparams->idx == 1) {11441144+ /* Get the first 6 flits from the Mailbox */11451145+ portstats->tx_bcast_bytes =11461146+ be64_to_cpu(stats.tx_bcast_bytes);11471147+ portstats->tx_bcast_frames =11481148+ be64_to_cpu(stats.tx_bcast_frames);11491149+ portstats->tx_mcast_bytes =11501150+ be64_to_cpu(stats.tx_mcast_bytes);11511151+ portstats->tx_mcast_frames =11521152+ be64_to_cpu(stats.tx_mcast_frames);11531153+ portstats->tx_ucast_bytes =11541154+ be64_to_cpu(stats.tx_ucast_bytes);11551155+ portstats->tx_ucast_frames =11561156+ be64_to_cpu(stats.tx_ucast_frames);11571157+ }11581158+ if (portparams->idx == 7) {11591159+ /* Get the second 6 flits from the Mailbox */11601160+ portstats->tx_drop_frames =11611161+ be64_to_cpu(stats.tx_drop_frames);11621162+ portstats->tx_offload_bytes =11631163+ be64_to_cpu(stats.tx_offload_bytes);11641164+ portstats->tx_offload_frames =11651165+ be64_to_cpu(stats.tx_offload_frames);11661166+#if 011671167+ portstats->rx_pf_bytes =11681168+ be64_to_cpu(stats.rx_pf_bytes);11691169+ portstats->rx_pf_frames =11701170+ be64_to_cpu(stats.rx_pf_frames);11711171+#endif11721172+ portstats->rx_bcast_bytes =11731173+ be64_to_cpu(stats.rx_bcast_bytes);11741174+ portstats->rx_bcast_frames =11751175+ be64_to_cpu(stats.rx_bcast_frames);11761176+ portstats->rx_mcast_bytes =11771177+ be64_to_cpu(stats.rx_mcast_bytes);11781178+ }11791179+ if (portparams->idx == 13) {11801180+ /* Get the last 4 flits from the Mailbox */11811181+ portstats->rx_mcast_frames =11821182+ be64_to_cpu(stats.rx_mcast_frames);11831183+ portstats->rx_ucast_bytes =11841184+ be64_to_cpu(stats.rx_ucast_bytes);11851185+ portstats->rx_ucast_frames =11861186+ be64_to_cpu(stats.rx_ucast_frames);11871187+ portstats->rx_err_frames =11881188+ be64_to_cpu(stats.rx_err_frames);11891189+ }11901190+ }11911191+}11921192+11931193+/* Entry points/APIs for MB module */11941194+/*11951195+ * csio_mb_intr_enable - Enable Interrupts from mailboxes.11961196+ * @hw: The HW structure11971197+ *11981198+ * Enables CIM interrupt bit in appropriate INT_ENABLE registers.11991199+ */12001200+void12011201+csio_mb_intr_enable(struct csio_hw *hw)12021202+{12031203+ csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE));12041204+ csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));12051205+}12061206+12071207+/*12081208+ * csio_mb_intr_disable - Disable Interrupts from mailboxes.12091209+ * @hw: The HW structure12101210+ *12111211+ * Disable bit in HostInterruptEnable CIM register.12121212+ */12131213+void12141214+csio_mb_intr_disable(struct csio_hw *hw)12151215+{12161216+ csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE));12171217+ csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));12181218+}12191219+12201220+static void12211221+csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd)12221222+{12231223+ struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd;12241224+12251225+ if ((FW_DEBUG_CMD_TYPE_GET(ntohl(dbg->op_type))) == 1) {12261226+ csio_info(hw, "FW print message:\n");12271227+ csio_info(hw, "\tdebug->dprtstridx = %d\n",12281228+ ntohs(dbg->u.prt.dprtstridx));12291229+ csio_info(hw, "\tdebug->dprtstrparam0 = 0x%x\n",12301230+ ntohl(dbg->u.prt.dprtstrparam0));12311231+ csio_info(hw, "\tdebug->dprtstrparam1 = 0x%x\n",12321232+ ntohl(dbg->u.prt.dprtstrparam1));12331233+ csio_info(hw, "\tdebug->dprtstrparam2 = 0x%x\n",12341234+ ntohl(dbg->u.prt.dprtstrparam2));12351235+ csio_info(hw, "\tdebug->dprtstrparam3 = 0x%x\n",12361236+ ntohl(dbg->u.prt.dprtstrparam3));12371237+ } else {12381238+ /* This is a FW assertion */12391239+ csio_fatal(hw, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",12401240+ dbg->u.assert.filename_0_7,12411241+ ntohl(dbg->u.assert.line),12421242+ ntohl(dbg->u.assert.x),12431243+ ntohl(dbg->u.assert.y));12441244+ }12451245+}12461246+12471247+static void12481248+csio_mb_debug_cmd_handler(struct csio_hw *hw)12491249+{12501250+ int i;12511251+ __be64 cmd[CSIO_MB_MAX_REGS];12521252+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);12531253+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);12541254+ int size = sizeof(struct fw_debug_cmd);12551255+12561256+ /* Copy mailbox data */12571257+ for (i = 0; i < size; i += 8)12581258+ cmd[i / 8] = cpu_to_be64(csio_rd_reg64(hw, data_reg + i));12591259+12601260+ csio_mb_dump_fw_dbg(hw, cmd);12611261+12621262+ /* Notify FW of mailbox by setting owner as UP */12631263+ csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW),12641264+ ctl_reg);12651265+12661266+ csio_rd_reg32(hw, ctl_reg);12671267+ wmb();12681268+}12691269+12701270+/*12711271+ * csio_mb_issue - generic routine for issuing Mailbox commands.12721272+ * @hw: The HW structure12731273+ * @mbp: Mailbox command to issue12741274+ *12751275+ * Caller should hold hw lock across this call.12761276+ */12771277+int12781278+csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)12791279+{12801280+ uint32_t owner, ctl;12811281+ int i;12821282+ uint32_t ii;12831283+ __be64 *cmd = mbp->mb;12841284+ __be64 hdr;12851285+ struct csio_mbm *mbm = &hw->mbm;12861286+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);12871287+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);12881288+ int size = mbp->mb_size;12891289+ int rv = -EINVAL;12901290+ struct fw_cmd_hdr *fw_hdr;12911291+12921292+ /* Determine mode */12931293+ if (mbp->mb_cbfn == NULL) {12941294+ /* Need to issue/get results in the same context */12951295+ if (mbp->tmo < CSIO_MB_POLL_FREQ) {12961296+ csio_err(hw, "Invalid tmo: 0x%x\n", mbp->tmo);12971297+ goto error_out;12981298+ }12991299+ } else if (!csio_is_host_intr_enabled(hw) ||13001300+ !csio_is_hw_intr_enabled(hw)) {13011301+ csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",13021302+ *((uint8_t *)mbp->mb));13031303+ goto error_out;13041304+ }13051305+13061306+ if (mbm->mcurrent != NULL) {13071307+ /* Queue mbox cmd, if another mbox cmd is active */13081308+ if (mbp->mb_cbfn == NULL) {13091309+ rv = -EBUSY;13101310+ csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",13111311+ hw->pfn, *((uint8_t *)mbp->mb));13121312+13131313+ goto error_out;13141314+ } else {13151315+ list_add_tail(&mbp->list, &mbm->req_q);13161316+ CSIO_INC_STATS(mbm, n_activeq);13171317+13181318+ return 0;13191319+ }13201320+ }13211321+13221322+ /* Now get ownership of mailbox */13231323+ owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));13241324+13251325+ if (!csio_mb_is_host_owner(owner)) {13261326+13271327+ for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)13281328+ owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));13291329+ /*13301330+ * Mailbox unavailable. In immediate mode, fail the command.13311331+ * In other modes, enqueue the request.13321332+ */13331333+ if (!csio_mb_is_host_owner(owner)) {13341334+ if (mbp->mb_cbfn == NULL) {13351335+ rv = owner ? -EBUSY : -ETIMEDOUT;13361336+13371337+ csio_dbg(hw,13381338+ "Couldnt own Mailbox %x op:0x%x "13391339+ "owner:%x\n",13401340+ hw->pfn, *((uint8_t *)mbp->mb), owner);13411341+ goto error_out;13421342+ } else {13431343+ if (mbm->mcurrent == NULL) {13441344+ csio_err(hw,13451345+ "Couldnt own Mailbox %x "13461346+ "op:0x%x owner:%x\n",13471347+ hw->pfn, *((uint8_t *)mbp->mb),13481348+ owner);13491349+ csio_err(hw,13501350+ "No outstanding driver"13511351+ " mailbox as well\n");13521352+ goto error_out;13531353+ }13541354+ }13551355+ }13561356+ }13571357+13581358+ /* Mailbox is available, copy mailbox data into it */13591359+ for (i = 0; i < size; i += 8) {13601360+ csio_wr_reg64(hw, be64_to_cpu(*cmd), data_reg + i);13611361+ cmd++;13621362+ }13631363+13641364+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);13651365+13661366+ /* Start completion timers in non-immediate modes and notify FW */13671367+ if (mbp->mb_cbfn != NULL) {13681368+ mbm->mcurrent = mbp;13691369+ mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));13701370+ csio_wr_reg32(hw, MBMSGVALID | MBINTREQ |13711371+ MBOWNER(CSIO_MBOWNER_FW), ctl_reg);13721372+ } else13731373+ csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW),13741374+ ctl_reg);13751375+13761376+ /* Flush posted writes */13771377+ csio_rd_reg32(hw, ctl_reg);13781378+ wmb();13791379+13801380+ CSIO_INC_STATS(mbm, n_req);13811381+13821382+ if (mbp->mb_cbfn)13831383+ return 0;13841384+13851385+ /* Poll for completion in immediate mode */13861386+ cmd = mbp->mb;13871387+13881388+ for (ii = 0; ii < mbp->tmo; ii += CSIO_MB_POLL_FREQ) {13891389+ mdelay(CSIO_MB_POLL_FREQ);13901390+13911391+ /* Check for response */13921392+ ctl = csio_rd_reg32(hw, ctl_reg);13931393+ if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {13941394+13951395+ if (!(ctl & MBMSGVALID)) {13961396+ csio_wr_reg32(hw, 0, ctl_reg);13971397+ continue;13981398+ }13991399+14001400+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);14011401+14021402+ hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));14031403+ fw_hdr = (struct fw_cmd_hdr *)&hdr;14041404+14051405+ switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {14061406+ case FW_DEBUG_CMD:14071407+ csio_mb_debug_cmd_handler(hw);14081408+ continue;14091409+ }14101410+14111411+ /* Copy response */14121412+ for (i = 0; i < size; i += 8)14131413+ *cmd++ = cpu_to_be64(csio_rd_reg6414141414+ (hw, data_reg + i));14151415+ csio_wr_reg32(hw, 0, ctl_reg);14161416+14171417+ if (FW_CMD_RETVAL_GET(*(mbp->mb)))14181418+ CSIO_INC_STATS(mbm, n_err);14191419+14201420+ CSIO_INC_STATS(mbm, n_rsp);14211421+ return 0;14221422+ }14231423+ }14241424+14251425+ CSIO_INC_STATS(mbm, n_tmo);14261426+14271427+ csio_err(hw, "Mailbox %x op:0x%x timed out!\n",14281428+ hw->pfn, *((uint8_t *)cmd));14291429+14301430+ return -ETIMEDOUT;14311431+14321432+error_out:14331433+ CSIO_INC_STATS(mbm, n_err);14341434+ return rv;14351435+}14361436+14371437+/*14381438+ * csio_mb_completions - Completion handler for Mailbox commands14391439+ * @hw: The HW structure14401440+ * @cbfn_q: Completion queue.14411441+ *14421442+ */14431443+void14441444+csio_mb_completions(struct csio_hw *hw, struct list_head *cbfn_q)14451445+{14461446+ struct csio_mb *mbp;14471447+ struct csio_mbm *mbm = &hw->mbm;14481448+ enum fw_retval rv;14491449+14501450+ while (!list_empty(cbfn_q)) {14511451+ mbp = list_first_entry(cbfn_q, struct csio_mb, list);14521452+ list_del_init(&mbp->list);14531453+14541454+ rv = csio_mb_fw_retval(mbp);14551455+ if ((rv != FW_SUCCESS) && (rv != FW_HOSTERROR))14561456+ CSIO_INC_STATS(mbm, n_err);14571457+ else if (rv != FW_HOSTERROR)14581458+ CSIO_INC_STATS(mbm, n_rsp);14591459+14601460+ if (mbp->mb_cbfn)14611461+ mbp->mb_cbfn(hw, mbp);14621462+ }14631463+}14641464+14651465+static void14661466+csio_mb_portmod_changed(struct csio_hw *hw, uint8_t port_id)14671467+{14681468+ static char *mod_str[] = {14691469+ NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"14701470+ };14711471+14721472+ struct csio_pport *port = &hw->pport[port_id];14731473+14741474+ if (port->mod_type == FW_PORT_MOD_TYPE_NONE)14751475+ csio_info(hw, "Port:%d - port module unplugged\n", port_id);14761476+ else if (port->mod_type < ARRAY_SIZE(mod_str))14771477+ csio_info(hw, "Port:%d - %s port module inserted\n", port_id,14781478+ mod_str[port->mod_type]);14791479+ else if (port->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)14801480+ csio_info(hw,14811481+ "Port:%d - unsupported optical port module "14821482+ "inserted\n", port_id);14831483+ else if (port->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)14841484+ csio_info(hw,14851485+ "Port:%d - unknown port module inserted, forcing "14861486+ "TWINAX\n", port_id);14871487+ else if (port->mod_type == FW_PORT_MOD_TYPE_ERROR)14881488+ csio_info(hw, "Port:%d - transceiver module error\n", port_id);14891489+ else14901490+ csio_info(hw, "Port:%d - unknown module type %d inserted\n",14911491+ port_id, port->mod_type);14921492+}14931493+14941494+int14951495+csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd)14961496+{14971497+ uint8_t opcode = *(uint8_t *)cmd;14981498+ struct fw_port_cmd *pcmd;14991499+ uint8_t port_id;15001500+ uint32_t link_status;15011501+ uint16_t action;15021502+ uint8_t mod_type;15031503+15041504+ if (opcode == FW_PORT_CMD) {15051505+ pcmd = (struct fw_port_cmd *)cmd;15061506+ port_id = FW_PORT_CMD_PORTID_GET(15071507+ ntohl(pcmd->op_to_portid));15081508+ action = FW_PORT_CMD_ACTION_GET(15091509+ ntohl(pcmd->action_to_len16));15101510+ if (action != FW_PORT_ACTION_GET_PORT_INFO) {15111511+ csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n",15121512+ action);15131513+ return -EINVAL;15141514+ }15151515+15161516+ link_status = ntohl(pcmd->u.info.lstatus_to_modtype);15171517+ mod_type = FW_PORT_CMD_MODTYPE_GET(link_status);15181518+15191519+ hw->pport[port_id].link_status =15201520+ FW_PORT_CMD_LSTATUS_GET(link_status);15211521+ hw->pport[port_id].link_speed =15221522+ FW_PORT_CMD_LSPEED_GET(link_status);15231523+15241524+ csio_info(hw, "Port:%x - LINK %s\n", port_id,15251525+ FW_PORT_CMD_LSTATUS_GET(link_status) ? "UP" : "DOWN");15261526+15271527+ if (mod_type != hw->pport[port_id].mod_type) {15281528+ hw->pport[port_id].mod_type = mod_type;15291529+ csio_mb_portmod_changed(hw, port_id);15301530+ }15311531+ } else if (opcode == FW_DEBUG_CMD) {15321532+ csio_mb_dump_fw_dbg(hw, cmd);15331533+ } else {15341534+ csio_dbg(hw, "Gen MB can't handle op:0x%x on evtq.\n", opcode);15351535+ return -EINVAL;15361536+ }15371537+15381538+ return 0;15391539+}15401540+15411541+/*15421542+ * csio_mb_isr_handler - Handle mailboxes related interrupts.15431543+ * @hw: The HW structure15441544+ *15451545+ * Called from the ISR to handle Mailbox related interrupts.15461546+ * HW Lock should be held across this call.15471547+ */15481548+int15491549+csio_mb_isr_handler(struct csio_hw *hw)15501550+{15511551+ struct csio_mbm *mbm = &hw->mbm;15521552+ struct csio_mb *mbp = mbm->mcurrent;15531553+ __be64 *cmd;15541554+ uint32_t ctl, cim_cause, pl_cause;15551555+ int i;15561556+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);15571557+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);15581558+ int size;15591559+ __be64 hdr;15601560+ struct fw_cmd_hdr *fw_hdr;15611561+15621562+ pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE));15631563+ cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE));15641564+15651565+ if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) {15661566+ CSIO_INC_STATS(hw, n_mbint_unexp);15671567+ return -EINVAL;15681568+ }15691569+15701570+ /*15711571+ * The cause registers below HAVE to be cleared in the SAME15721572+ * order as below: The low level cause register followed by15731573+ * the upper level cause register. In other words, CIM-cause15741574+ * first followed by PL-Cause next.15751575+ */15761576+ csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE));15771577+ csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE));15781578+15791579+ ctl = csio_rd_reg32(hw, ctl_reg);15801580+15811581+ if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {15821582+15831583+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);15841584+15851585+ if (!(ctl & MBMSGVALID)) {15861586+ csio_warn(hw,15871587+ "Stray mailbox interrupt recvd,"15881588+ " mailbox data not valid\n");15891589+ csio_wr_reg32(hw, 0, ctl_reg);15901590+ /* Flush */15911591+ csio_rd_reg32(hw, ctl_reg);15921592+ return -EINVAL;15931593+ }15941594+15951595+ hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));15961596+ fw_hdr = (struct fw_cmd_hdr *)&hdr;15971597+15981598+ switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {15991599+ case FW_DEBUG_CMD:16001600+ csio_mb_debug_cmd_handler(hw);16011601+ return -EINVAL;16021602+#if 016031603+ case FW_ERROR_CMD:16041604+ case FW_INITIALIZE_CMD: /* When we are not master */16051605+#endif16061606+ }16071607+16081608+ CSIO_ASSERT(mbp != NULL);16091609+16101610+ cmd = mbp->mb;16111611+ size = mbp->mb_size;16121612+ /* Get response */16131613+ for (i = 0; i < size; i += 8)16141614+ *cmd++ = cpu_to_be64(csio_rd_reg6416151615+ (hw, data_reg + i));16161616+16171617+ csio_wr_reg32(hw, 0, ctl_reg);16181618+ /* Flush */16191619+ csio_rd_reg32(hw, ctl_reg);16201620+16211621+ mbm->mcurrent = NULL;16221622+16231623+ /* Add completion to tail of cbfn queue */16241624+ list_add_tail(&mbp->list, &mbm->cbfn_q);16251625+ CSIO_INC_STATS(mbm, n_cbfnq);16261626+16271627+ /*16281628+ * Enqueue event to EventQ. Events processing happens16291629+ * in Event worker thread context16301630+ */16311631+ if (csio_enqueue_evt(hw, CSIO_EVT_MBX, mbp, sizeof(mbp)))16321632+ CSIO_INC_STATS(hw, n_evt_drop);16331633+16341634+ return 0;16351635+16361636+ } else {16371637+ /*16381638+ * We can get here if mailbox MSIX vector is shared,16391639+ * or in INTx case. Or a stray interrupt.16401640+ */16411641+ csio_dbg(hw, "Host not owner, no mailbox interrupt\n");16421642+ CSIO_INC_STATS(hw, n_int_stray);16431643+ return -EINVAL;16441644+ }16451645+}16461646+16471647+/*16481648+ * csio_mb_tmo_handler - Timeout handler16491649+ * @hw: The HW structure16501650+ *16511651+ */16521652+struct csio_mb *16531653+csio_mb_tmo_handler(struct csio_hw *hw)16541654+{16551655+ struct csio_mbm *mbm = &hw->mbm;16561656+ struct csio_mb *mbp = mbm->mcurrent;16571657+ struct fw_cmd_hdr *fw_hdr;16581658+16591659+ /*16601660+ * Could be a race b/w the completion handler and the timer16611661+ * and the completion handler won that race.16621662+ */16631663+ if (mbp == NULL) {16641664+ CSIO_DB_ASSERT(0);16651665+ return NULL;16661666+ }16671667+16681668+ fw_hdr = (struct fw_cmd_hdr *)(mbp->mb);16691669+16701670+ csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn,16711671+ FW_CMD_OP_GET(ntohl(fw_hdr->hi)));16721672+16731673+ mbm->mcurrent = NULL;16741674+ CSIO_INC_STATS(mbm, n_tmo);16751675+ fw_hdr->lo = htonl(FW_CMD_RETVAL(FW_ETIMEDOUT));16761676+16771677+ return mbp;16781678+}16791679+16801680+/*16811681+ * csio_mb_cancel_all - Cancel all waiting commands.16821682+ * @hw: The HW structure16831683+ * @cbfn_q: The callback queue.16841684+ *16851685+ * Caller should hold hw lock across this call.16861686+ */16871687+void16881688+csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q)16891689+{16901690+ struct csio_mb *mbp;16911691+ struct csio_mbm *mbm = &hw->mbm;16921692+ struct fw_cmd_hdr *hdr;16931693+ struct list_head *tmp;16941694+16951695+ if (mbm->mcurrent) {16961696+ mbp = mbm->mcurrent;16971697+16981698+ /* Stop mailbox completion timer */16991699+ del_timer_sync(&mbm->timer);17001700+17011701+ /* Add completion to tail of cbfn queue */17021702+ list_add_tail(&mbp->list, cbfn_q);17031703+ mbm->mcurrent = NULL;17041704+ }17051705+17061706+ if (!list_empty(&mbm->req_q)) {17071707+ list_splice_tail_init(&mbm->req_q, cbfn_q);17081708+ mbm->stats.n_activeq = 0;17091709+ }17101710+17111711+ if (!list_empty(&mbm->cbfn_q)) {17121712+ list_splice_tail_init(&mbm->cbfn_q, cbfn_q);17131713+ mbm->stats.n_cbfnq = 0;17141714+ }17151715+17161716+ if (list_empty(cbfn_q))17171717+ return;17181718+17191719+ list_for_each(tmp, cbfn_q) {17201720+ mbp = (struct csio_mb *)tmp;17211721+ hdr = (struct fw_cmd_hdr *)(mbp->mb);17221722+17231723+ csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n",17241724+ hw->pfn, FW_CMD_OP_GET(ntohl(hdr->hi)));17251725+17261726+ CSIO_INC_STATS(mbm, n_cancel);17271727+ hdr->lo = htonl(FW_CMD_RETVAL(FW_HOSTERROR));17281728+ }17291729+}17301730+17311731+/*17321732+ * csio_mbm_init - Initialize Mailbox module17331733+ * @mbm: Mailbox module17341734+ * @hw: The HW structure17351735+ * @timer: Timing function for interrupting mailboxes17361736+ *17371737+ * Initialize timer and the request/response queues.17381738+ */17391739+int17401740+csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw,17411741+ void (*timer_fn)(uintptr_t))17421742+{17431743+ struct timer_list *timer = &mbm->timer;17441744+17451745+ init_timer(timer);17461746+ timer->function = timer_fn;17471747+ timer->data = (unsigned long)hw;17481748+17491749+ INIT_LIST_HEAD(&mbm->req_q);17501750+ INIT_LIST_HEAD(&mbm->cbfn_q);17511751+ csio_set_mb_intr_idx(mbm, -1);17521752+17531753+ return 0;17541754+}17551755+17561756+/*17571757+ * csio_mbm_exit - Uninitialize mailbox module17581758+ * @mbm: Mailbox module17591759+ *17601760+ * Stop timer.17611761+ */17621762+void17631763+csio_mbm_exit(struct csio_mbm *mbm)17641764+{17651765+ del_timer_sync(&mbm->timer);17661766+17671767+ CSIO_DB_ASSERT(mbm->mcurrent == NULL);17681768+ CSIO_DB_ASSERT(list_empty(&mbm->req_q));17691769+ CSIO_DB_ASSERT(list_empty(&mbm->cbfn_q));17701770+}
+278
drivers/scsi/csiostor/csio_mb.h
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#ifndef __CSIO_MB_H__3636+#define __CSIO_MB_H__3737+3838+#include <linux/timer.h>3939+#include <linux/completion.h>4040+4141+#include "t4fw_api.h"4242+#include "t4fw_api_stor.h"4343+#include "csio_defs.h"4444+4545+#define CSIO_STATS_OFFSET (2)4646+#define CSIO_NUM_STATS_PER_MB (6)4747+4848+struct fw_fcoe_port_cmd_params {4949+ uint8_t portid;5050+ uint8_t idx;5151+ uint8_t nstats;5252+};5353+5454+#define CSIO_DUMP_MB(__hw, __num, __mb) \5555+ csio_dbg(__hw, "\t%llx %llx %llx %llx %llx %llx %llx %llx\n", \5656+ (unsigned long long)csio_rd_reg64(__hw, __mb), \5757+ (unsigned long long)csio_rd_reg64(__hw, __mb + 8), \5858+ (unsigned long long)csio_rd_reg64(__hw, __mb + 16), \5959+ (unsigned long long)csio_rd_reg64(__hw, __mb + 24), \6060+ (unsigned long long)csio_rd_reg64(__hw, __mb + 32), \6161+ (unsigned long long)csio_rd_reg64(__hw, __mb + 40), \6262+ (unsigned long long)csio_rd_reg64(__hw, __mb + 48), \6363+ (unsigned long long)csio_rd_reg64(__hw, __mb + 56))6464+6565+#define CSIO_MB_MAX_REGS 86666+#define CSIO_MAX_MB_SIZE 646767+#define CSIO_MB_POLL_FREQ 5 /* 5 ms */6868+#define CSIO_MB_DEFAULT_TMO FW_CMD_MAX_TIMEOUT6969+7070+/* Device master in HELLO command */7171+enum csio_dev_master { CSIO_MASTER_CANT, CSIO_MASTER_MAY, CSIO_MASTER_MUST };7272+7373+enum csio_mb_owner { CSIO_MBOWNER_NONE, CSIO_MBOWNER_FW, CSIO_MBOWNER_PL };7474+7575+enum csio_dev_state {7676+ CSIO_DEV_STATE_UNINIT,7777+ CSIO_DEV_STATE_INIT,7878+ CSIO_DEV_STATE_ERR7979+};8080+8181+#define FW_PARAM_DEV(param) \8282+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \8383+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))8484+8585+#define FW_PARAM_PFVF(param) \8686+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \8787+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \8888+ FW_PARAMS_PARAM_Y(0) | \8989+ FW_PARAMS_PARAM_Z(0))9090+9191+enum {9292+ PAUSE_RX = 1 << 0,9393+ PAUSE_TX = 1 << 1,9494+ PAUSE_AUTONEG = 1 << 29595+};9696+9797+#define CSIO_INIT_MBP(__mbp, __cp, __tmo, __priv, __fn, __clear) \9898+do { \9999+ if (__clear) \100100+ memset((__cp), 0, \101101+ CSIO_MB_MAX_REGS * sizeof(__be64)); \102102+ INIT_LIST_HEAD(&(__mbp)->list); \103103+ (__mbp)->tmo = (__tmo); \104104+ (__mbp)->priv = (void *)(__priv); \105105+ (__mbp)->mb_cbfn = (__fn); \106106+ (__mbp)->mb_size = sizeof(*(__cp)); \107107+} while (0)108108+109109+struct csio_mbm_stats {110110+ uint32_t n_req; /* number of mbox req */111111+ uint32_t n_rsp; /* number of mbox rsp */112112+ uint32_t n_activeq; /* number of mbox req active Q */113113+ uint32_t n_cbfnq; /* number of mbox req cbfn Q */114114+ uint32_t n_tmo; /* number of mbox timeout */115115+ uint32_t n_cancel; /* number of mbox cancel */116116+ uint32_t n_err; /* number of mbox error */117117+};118118+119119+/* Driver version of Mailbox */120120+struct csio_mb {121121+ struct list_head list; /* for req/resp */122122+ /* queue in driver */123123+ __be64 mb[CSIO_MB_MAX_REGS]; /* MB in HW format */124124+ int mb_size; /* Size of this125125+ * mailbox.126126+ */127127+ uint32_t tmo; /* Timeout */128128+ struct completion cmplobj; /* MB Completion129129+ * object130130+ */131131+ void (*mb_cbfn) (struct csio_hw *, struct csio_mb *);132132+ /* Callback fn */133133+ void *priv; /* Owner private ptr */134134+};135135+136136+struct csio_mbm {137137+ uint32_t a_mbox; /* Async mbox num */138138+ uint32_t intr_idx; /* Interrupt index */139139+ struct timer_list timer; /* Mbox timer */140140+ struct list_head req_q; /* Mbox request queue */141141+ struct list_head cbfn_q; /* Mbox completion q */142142+ struct csio_mb *mcurrent; /* Current mailbox */143143+ uint32_t req_q_cnt; /* Outstanding mbox144144+ * cmds145145+ */146146+ struct csio_mbm_stats stats; /* Statistics */147147+};148148+149149+#define csio_set_mb_intr_idx(_m, _i) ((_m)->intr_idx = (_i))150150+#define csio_get_mb_intr_idx(_m) ((_m)->intr_idx)151151+152152+struct csio_iq_params;153153+struct csio_eq_params;154154+155155+enum fw_retval csio_mb_fw_retval(struct csio_mb *);156156+157157+/* MB helpers */158158+void csio_mb_hello(struct csio_hw *, struct csio_mb *, uint32_t,159159+ uint32_t, uint32_t, enum csio_dev_master,160160+ void (*)(struct csio_hw *, struct csio_mb *));161161+162162+void csio_mb_process_hello_rsp(struct csio_hw *, struct csio_mb *,163163+ enum fw_retval *, enum csio_dev_state *,164164+ uint8_t *);165165+166166+void csio_mb_bye(struct csio_hw *, struct csio_mb *, uint32_t,167167+ void (*)(struct csio_hw *, struct csio_mb *));168168+169169+void csio_mb_reset(struct csio_hw *, struct csio_mb *, uint32_t, int, int,170170+ void (*)(struct csio_hw *, struct csio_mb *));171171+172172+void csio_mb_params(struct csio_hw *, struct csio_mb *, uint32_t, unsigned int,173173+ unsigned int, unsigned int, const u32 *, u32 *, bool,174174+ void (*)(struct csio_hw *, struct csio_mb *));175175+176176+void csio_mb_process_read_params_rsp(struct csio_hw *, struct csio_mb *,177177+ enum fw_retval *, unsigned int , u32 *);178178+179179+void csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,180180+ int reg);181181+182182+void csio_mb_caps_config(struct csio_hw *, struct csio_mb *, uint32_t,183183+ bool, bool, bool, bool,184184+ void (*)(struct csio_hw *, struct csio_mb *));185185+186186+void csio_rss_glb_config(struct csio_hw *, struct csio_mb *,187187+ uint32_t, uint8_t, unsigned int,188188+ void (*)(struct csio_hw *, struct csio_mb *));189189+190190+void csio_mb_pfvf(struct csio_hw *, struct csio_mb *, uint32_t,191191+ unsigned int, unsigned int, unsigned int,192192+ unsigned int, unsigned int, unsigned int,193193+ unsigned int, unsigned int, unsigned int,194194+ unsigned int, unsigned int, unsigned int,195195+ unsigned int, void (*) (struct csio_hw *, struct csio_mb *));196196+197197+void csio_mb_port(struct csio_hw *, struct csio_mb *, uint32_t,198198+ uint8_t, bool, uint32_t, uint16_t,199199+ void (*) (struct csio_hw *, struct csio_mb *));200200+201201+void csio_mb_process_read_port_rsp(struct csio_hw *, struct csio_mb *,202202+ enum fw_retval *, uint16_t *);203203+204204+void csio_mb_initialize(struct csio_hw *, struct csio_mb *, uint32_t,205205+ void (*)(struct csio_hw *, struct csio_mb *));206206+207207+void csio_mb_iq_alloc_write(struct csio_hw *, struct csio_mb *, void *,208208+ uint32_t, struct csio_iq_params *,209209+ void (*) (struct csio_hw *, struct csio_mb *));210210+211211+void csio_mb_iq_alloc_write_rsp(struct csio_hw *, struct csio_mb *,212212+ enum fw_retval *, struct csio_iq_params *);213213+214214+void csio_mb_iq_free(struct csio_hw *, struct csio_mb *, void *,215215+ uint32_t, struct csio_iq_params *,216216+ void (*) (struct csio_hw *, struct csio_mb *));217217+218218+void csio_mb_eq_ofld_alloc_write(struct csio_hw *, struct csio_mb *, void *,219219+ uint32_t, struct csio_eq_params *,220220+ void (*) (struct csio_hw *, struct csio_mb *));221221+222222+void csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *, struct csio_mb *,223223+ enum fw_retval *, struct csio_eq_params *);224224+225225+void csio_mb_eq_ofld_free(struct csio_hw *, struct csio_mb *, void *,226226+ uint32_t , struct csio_eq_params *,227227+ void (*) (struct csio_hw *, struct csio_mb *));228228+229229+void csio_fcoe_read_res_info_init_mb(struct csio_hw *, struct csio_mb *,230230+ uint32_t,231231+ void (*) (struct csio_hw *, struct csio_mb *));232232+233233+void csio_write_fcoe_link_cond_init_mb(struct csio_lnode *, struct csio_mb *,234234+ uint32_t, uint8_t, uint32_t, uint8_t, bool, uint32_t,235235+ void (*) (struct csio_hw *, struct csio_mb *));236236+237237+void csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *, struct csio_mb *,238238+ uint32_t, uint32_t , uint32_t , uint16_t,239239+ uint8_t [8], uint8_t [8],240240+ void (*) (struct csio_hw *, struct csio_mb *));241241+242242+void csio_fcoe_vnp_read_init_mb(struct csio_lnode *, struct csio_mb *,243243+ uint32_t, uint32_t , uint32_t ,244244+ void (*) (struct csio_hw *, struct csio_mb *));245245+246246+void csio_fcoe_vnp_free_init_mb(struct csio_lnode *, struct csio_mb *,247247+ uint32_t , uint32_t, uint32_t ,248248+ void (*) (struct csio_hw *, struct csio_mb *));249249+250250+void csio_fcoe_read_fcf_init_mb(struct csio_lnode *, struct csio_mb *,251251+ uint32_t, uint32_t, uint32_t,252252+ void (*cbfn) (struct csio_hw *, struct csio_mb *));253253+254254+void csio_fcoe_read_portparams_init_mb(struct csio_hw *hw,255255+ struct csio_mb *mbp, uint32_t mb_tmo,256256+ struct fw_fcoe_port_cmd_params *portparams,257257+ void (*cbfn)(struct csio_hw *, struct csio_mb *));258258+259259+void csio_mb_process_portparams_rsp(struct csio_hw *hw, struct csio_mb *mbp,260260+ enum fw_retval *retval,261261+ struct fw_fcoe_port_cmd_params *portparams,262262+ struct fw_fcoe_port_stats *portstats);263263+264264+/* MB module functions */265265+int csio_mbm_init(struct csio_mbm *, struct csio_hw *,266266+ void (*)(uintptr_t));267267+void csio_mbm_exit(struct csio_mbm *);268268+void csio_mb_intr_enable(struct csio_hw *);269269+void csio_mb_intr_disable(struct csio_hw *);270270+271271+int csio_mb_issue(struct csio_hw *, struct csio_mb *);272272+void csio_mb_completions(struct csio_hw *, struct list_head *);273273+int csio_mb_fwevt_handler(struct csio_hw *, __be64 *);274274+int csio_mb_isr_handler(struct csio_hw *);275275+struct csio_mb *csio_mb_tmo_handler(struct csio_hw *);276276+void csio_mb_cancel_all(struct csio_hw *, struct list_head *);277277+278278+#endif /* ifndef __CSIO_MB_H__ */
+912
drivers/scsi/csiostor/csio_rnode.c
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#include <linux/string.h>3636+#include <scsi/scsi_device.h>3737+#include <scsi/scsi_transport_fc.h>3838+#include <scsi/fc/fc_els.h>3939+#include <scsi/fc/fc_fs.h>4040+4141+#include "csio_hw.h"4242+#include "csio_lnode.h"4343+#include "csio_rnode.h"4444+4545+static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *);4646+static void csio_rnode_exit(struct csio_rnode *);4747+4848+/* Static machine forward declarations */4949+static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev);5050+static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev);5151+static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev);5252+static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev);5353+5454+/* RNF event mapping */5555+static enum csio_rn_ev fwevt_to_rnevt[] = {5656+ CSIO_RNFE_NONE, /* None */5757+ CSIO_RNFE_LOGGED_IN, /* PLOGI_ACC_RCVD */5858+ CSIO_RNFE_NONE, /* PLOGI_RJT_RCVD */5959+ CSIO_RNFE_PLOGI_RECV, /* PLOGI_RCVD */6060+ CSIO_RNFE_LOGO_RECV, /* PLOGO_RCVD */6161+ CSIO_RNFE_PRLI_DONE, /* PRLI_ACC_RCVD */6262+ CSIO_RNFE_NONE, /* PRLI_RJT_RCVD */6363+ CSIO_RNFE_PRLI_RECV, /* PRLI_RCVD */6464+ CSIO_RNFE_PRLO_RECV, /* PRLO_RCVD */6565+ CSIO_RNFE_NONE, /* NPORT_ID_CHGD */6666+ CSIO_RNFE_LOGO_RECV, /* FLOGO_RCVD */6767+ CSIO_RNFE_NONE, /* CLR_VIRT_LNK_RCVD */6868+ CSIO_RNFE_LOGGED_IN, /* FLOGI_ACC_RCVD */6969+ CSIO_RNFE_NONE, /* FLOGI_RJT_RCVD */7070+ CSIO_RNFE_LOGGED_IN, /* FDISC_ACC_RCVD */7171+ CSIO_RNFE_NONE, /* FDISC_RJT_RCVD */7272+ CSIO_RNFE_NONE, /* FLOGI_TMO_MAX_RETRY */7373+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_ACC */7474+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_RJT */7575+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_CNFLT */7676+ CSIO_RNFE_NONE, /* PRLI_TMO */7777+ CSIO_RNFE_NONE, /* ADISC_TMO */7878+ CSIO_RNFE_NAME_MISSING, /* RSCN_DEV_LOST */7979+ CSIO_RNFE_NONE, /* SCR_ACC_RCVD */8080+ CSIO_RNFE_NONE, /* ADISC_RJT_RCVD */8181+ CSIO_RNFE_NONE, /* LOGO_SNT */8282+ CSIO_RNFE_LOGO_RECV, /* PROTO_ERR_IMPL_LOGO */8383+};8484+8585+#define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \8686+ CSIO_RNFE_NONE : \8787+ fwevt_to_rnevt[_evt])8888+int8989+csio_is_rnode_ready(struct csio_rnode *rn)9090+{9191+ return csio_match_state(rn, csio_rns_ready);9292+}9393+9494+static int9595+csio_is_rnode_uninit(struct csio_rnode *rn)9696+{9797+ return csio_match_state(rn, csio_rns_uninit);9898+}9999+100100+static int101101+csio_is_rnode_wka(uint8_t rport_type)102102+{103103+ if ((rport_type == FLOGI_VFPORT) ||104104+ (rport_type == FDISC_VFPORT) ||105105+ (rport_type == NS_VNPORT) ||106106+ (rport_type == FDMI_VNPORT))107107+ return 1;108108+109109+ return 0;110110+}111111+112112+/*113113+ * csio_rn_lookup - Finds the rnode with the given flowid114114+ * @ln - lnode115115+ * @flowid - flowid.116116+ *117117+ * Does the rnode lookup on the given lnode and flowid.If no matching entry118118+ * found, NULL is returned.119119+ */120120+static struct csio_rnode *121121+csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid)122122+{123123+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;124124+ struct list_head *tmp;125125+ struct csio_rnode *rn;126126+127127+ list_for_each(tmp, &rnhead->sm.sm_list) {128128+ rn = (struct csio_rnode *) tmp;129129+ if (rn->flowid == flowid)130130+ return rn;131131+ }132132+133133+ return NULL;134134+}135135+136136+/*137137+ * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn138138+ * @ln: lnode139139+ * @wwpn: wwpn140140+ *141141+ * Does the rnode lookup on the given lnode and wwpn. If no matching entry142142+ * found, NULL is returned.143143+ */144144+static struct csio_rnode *145145+csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn)146146+{147147+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;148148+ struct list_head *tmp;149149+ struct csio_rnode *rn;150150+151151+ list_for_each(tmp, &rnhead->sm.sm_list) {152152+ rn = (struct csio_rnode *) tmp;153153+ if (!memcmp(csio_rn_wwpn(rn), wwpn, 8))154154+ return rn;155155+ }156156+157157+ return NULL;158158+}159159+160160+/**161161+ * csio_rnode_lookup_portid - Finds the rnode with the given portid162162+ * @ln: lnode163163+ * @portid: port id164164+ *165165+ * Lookup the rnode list for a given portid. If no matching entry166166+ * found, NULL is returned.167167+ */168168+struct csio_rnode *169169+csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid)170170+{171171+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;172172+ struct list_head *tmp;173173+ struct csio_rnode *rn;174174+175175+ list_for_each(tmp, &rnhead->sm.sm_list) {176176+ rn = (struct csio_rnode *) tmp;177177+ if (rn->nport_id == portid)178178+ return rn;179179+ }180180+181181+ return NULL;182182+}183183+184184+static int185185+csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid,186186+ uint32_t *vnp_flowid)187187+{188188+ struct csio_rnode *rnhead;189189+ struct list_head *tmp, *tmp1;190190+ struct csio_rnode *rn;191191+ struct csio_lnode *ln_tmp;192192+ struct csio_hw *hw = csio_lnode_to_hw(ln);193193+194194+ list_for_each(tmp1, &hw->sln_head) {195195+ ln_tmp = (struct csio_lnode *) tmp1;196196+ if (ln_tmp == ln)197197+ continue;198198+199199+ rnhead = (struct csio_rnode *)&ln_tmp->rnhead;200200+ list_for_each(tmp, &rnhead->sm.sm_list) {201201+202202+ rn = (struct csio_rnode *) tmp;203203+ if (csio_is_rnode_ready(rn)) {204204+ if (rn->flowid == rdev_flowid) {205205+ *vnp_flowid = csio_ln_flowid(ln_tmp);206206+ return 1;207207+ }208208+ }209209+ }210210+ }211211+212212+ return 0;213213+}214214+215215+static struct csio_rnode *216216+csio_alloc_rnode(struct csio_lnode *ln)217217+{218218+ struct csio_hw *hw = csio_lnode_to_hw(ln);219219+220220+ struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC);221221+ if (!rn)222222+ goto err;223223+224224+ memset(rn, 0, sizeof(struct csio_rnode));225225+ if (csio_rnode_init(rn, ln))226226+ goto err_free;227227+228228+ CSIO_INC_STATS(ln, n_rnode_alloc);229229+230230+ return rn;231231+232232+err_free:233233+ mempool_free(rn, hw->rnode_mempool);234234+err:235235+ CSIO_INC_STATS(ln, n_rnode_nomem);236236+ return NULL;237237+}238238+239239+static void240240+csio_free_rnode(struct csio_rnode *rn)241241+{242242+ struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn));243243+244244+ csio_rnode_exit(rn);245245+ CSIO_INC_STATS(rn->lnp, n_rnode_free);246246+ mempool_free(rn, hw->rnode_mempool);247247+}248248+249249+/*250250+ * csio_get_rnode - Gets rnode with the given flowid251251+ * @ln - lnode252252+ * @flowid - flow id.253253+ *254254+ * Does the rnode lookup on the given lnode and flowid. If no matching255255+ * rnode found, then new rnode with given npid is allocated and returned.256256+ */257257+static struct csio_rnode *258258+csio_get_rnode(struct csio_lnode *ln, uint32_t flowid)259259+{260260+ struct csio_rnode *rn;261261+262262+ rn = csio_rn_lookup(ln, flowid);263263+ if (!rn) {264264+ rn = csio_alloc_rnode(ln);265265+ if (!rn)266266+ return NULL;267267+268268+ rn->flowid = flowid;269269+ }270270+271271+ return rn;272272+}273273+274274+/*275275+ * csio_put_rnode - Frees the given rnode276276+ * @ln - lnode277277+ * @flowid - flow id.278278+ *279279+ * Does the rnode lookup on the given lnode and flowid. If no matching280280+ * rnode found, then new rnode with given npid is allocated and returned.281281+ */282282+void283283+csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn)284284+{285285+ CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0);286286+ csio_free_rnode(rn);287287+}288288+289289+/*290290+ * csio_confirm_rnode - confirms rnode based on wwpn.291291+ * @ln: lnode292292+ * @rdev_flowid: remote device flowid293293+ * @rdevp: remote device params294294+ * This routines searches other rnode in list having same wwpn of new rnode.295295+ * If there is a match, then matched rnode is returned and otherwise new rnode296296+ * is returned.297297+ * returns rnode.298298+ */299299+struct csio_rnode *300300+csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,301301+ struct fcoe_rdev_entry *rdevp)302302+{303303+ uint8_t rport_type;304304+ struct csio_rnode *rn, *match_rn;305305+ uint32_t vnp_flowid;306306+ uint32_t *port_id;307307+308308+ port_id = (uint32_t *)&rdevp->r_id[0];309309+ rport_type =310310+ FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);311311+312312+ /* Drop rdev event for cntrl port */313313+ if (rport_type == FAB_CTLR_VNPORT) {314314+ csio_ln_dbg(ln,315315+ "Unhandled rport_type:%d recv in rdev evt "316316+ "ssni:x%x\n", rport_type, rdev_flowid);317317+ return NULL;318318+ }319319+320320+ /* Lookup on flowid */321321+ rn = csio_rn_lookup(ln, rdev_flowid);322322+ if (!rn) {323323+324324+ /* Drop events with duplicate flowid */325325+ if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) {326326+ csio_ln_warn(ln,327327+ "ssni:%x already active on vnpi:%x",328328+ rdev_flowid, vnp_flowid);329329+ return NULL;330330+ }331331+332332+ /* Lookup on wwpn for NPORTs */333333+ rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);334334+ if (!rn)335335+ goto alloc_rnode;336336+337337+ } else {338338+ /* Lookup well-known ports with nport id */339339+ if (csio_is_rnode_wka(rport_type)) {340340+ match_rn = csio_rnode_lookup_portid(ln,341341+ ((ntohl(*port_id) >> 8) & CSIO_DID_MASK));342342+ if (match_rn == NULL) {343343+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;344344+ goto alloc_rnode;345345+ }346346+347347+ /*348348+ * Now compare the wwpn to confirm that349349+ * same port relogged in. If so update the matched rn.350350+ * Else, go ahead and alloc a new rnode.351351+ */352352+ if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {353353+ if (csio_is_rnode_ready(rn)) {354354+ csio_ln_warn(ln,355355+ "rnode is already"356356+ "active ssni:x%x\n",357357+ rdev_flowid);358358+ CSIO_ASSERT(0);359359+ }360360+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;361361+ rn = match_rn;362362+363363+ /* Update rn */364364+ goto found_rnode;365365+ }366366+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;367367+ goto alloc_rnode;368368+ }369369+370370+ /* wwpn match */371371+ if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8))372372+ goto found_rnode;373373+374374+ /* Search for rnode that have same wwpn */375375+ match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);376376+ if (match_rn != NULL) {377377+ csio_ln_dbg(ln,378378+ "ssni:x%x changed for rport name(wwpn):%llx "379379+ "did:x%x\n", rdev_flowid,380380+ wwn_to_u64(rdevp->wwpn),381381+ match_rn->nport_id);382382+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;383383+ rn = match_rn;384384+ } else {385385+ csio_ln_dbg(ln,386386+ "rnode wwpn mismatch found ssni:x%x "387387+ "name(wwpn):%llx\n",388388+ rdev_flowid,389389+ wwn_to_u64(csio_rn_wwpn(rn)));390390+ if (csio_is_rnode_ready(rn)) {391391+ csio_ln_warn(ln,392392+ "rnode is already active "393393+ "wwpn:%llx ssni:x%x\n",394394+ wwn_to_u64(csio_rn_wwpn(rn)),395395+ rdev_flowid);396396+ CSIO_ASSERT(0);397397+ }398398+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;399399+ goto alloc_rnode;400400+ }401401+ }402402+403403+found_rnode:404404+ csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n",405405+ rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));406406+407407+ /* Update flowid */408408+ csio_rn_flowid(rn) = rdev_flowid;409409+410410+ /* update rdev entry */411411+ rn->rdev_entry = rdevp;412412+ CSIO_INC_STATS(ln, n_rnode_match);413413+ return rn;414414+415415+alloc_rnode:416416+ rn = csio_get_rnode(ln, rdev_flowid);417417+ if (!rn)418418+ return NULL;419419+420420+ csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n",421421+ rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));422422+423423+ /* update rdev entry */424424+ rn->rdev_entry = rdevp;425425+ return rn;426426+}427427+428428+/*429429+ * csio_rn_verify_rparams - verify rparams.430430+ * @ln: lnode431431+ * @rn: rnode432432+ * @rdevp: remote device params433433+ * returns success if rparams are verified.434434+ */435435+static int436436+csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn,437437+ struct fcoe_rdev_entry *rdevp)438438+{439439+ uint8_t null[8];440440+ uint8_t rport_type;441441+ uint8_t fc_class;442442+ uint32_t *did;443443+444444+ did = (uint32_t *) &rdevp->r_id[0];445445+ rport_type =446446+ FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);447447+ switch (rport_type) {448448+ case FLOGI_VFPORT:449449+ rn->role = CSIO_RNFR_FABRIC;450450+ if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) {451451+ csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",452452+ csio_rn_flowid(rn));453453+ return -EINVAL;454454+ }455455+ /* NPIV support */456456+ if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos))457457+ ln->flags |= CSIO_LNF_NPIVSUPP;458458+459459+ break;460460+461461+ case NS_VNPORT:462462+ rn->role = CSIO_RNFR_NS;463463+ if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) {464464+ csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",465465+ csio_rn_flowid(rn));466466+ return -EINVAL;467467+ }468468+ break;469469+470470+ case REG_FC4_VNPORT:471471+ case REG_VNPORT:472472+ rn->role = CSIO_RNFR_NPORT;473473+ if (rdevp->event_cause == PRLI_ACC_RCVD ||474474+ rdevp->event_cause == PRLI_RCVD) {475475+ if (FW_RDEV_WR_TASK_RETRY_ID_GET(476476+ rdevp->enh_disc_to_tgt))477477+ rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW;478478+479479+ if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt))480480+ rn->fcp_flags |= FCP_SPPF_RETRY;481481+482482+ if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt))483483+ rn->fcp_flags |= FCP_SPPF_CONF_COMPL;484484+485485+ if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt))486486+ rn->role |= CSIO_RNFR_TARGET;487487+488488+ if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt))489489+ rn->role |= CSIO_RNFR_INITIATOR;490490+ }491491+492492+ break;493493+494494+ case FDMI_VNPORT:495495+ case FAB_CTLR_VNPORT:496496+ rn->role = 0;497497+ break;498498+499499+ default:500500+ csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n",501501+ csio_rn_flowid(rn), rport_type);502502+ return -EINVAL;503503+ }504504+505505+ /* validate wwpn/wwnn for Name server/remote port */506506+ if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) {507507+ memset(null, 0, 8);508508+ if (!memcmp(rdevp->wwnn, null, 8)) {509509+ csio_ln_err(ln,510510+ "ssni:x%x invalid wwnn received from"511511+ " rport did:x%x\n",512512+ csio_rn_flowid(rn),513513+ (ntohl(*did) & CSIO_DID_MASK));514514+ return -EINVAL;515515+ }516516+517517+ if (!memcmp(rdevp->wwpn, null, 8)) {518518+ csio_ln_err(ln,519519+ "ssni:x%x invalid wwpn received from"520520+ " rport did:x%x\n",521521+ csio_rn_flowid(rn),522522+ (ntohl(*did) & CSIO_DID_MASK));523523+ return -EINVAL;524524+ }525525+526526+ }527527+528528+ /* Copy wwnn, wwpn and nport id */529529+ rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK;530530+ memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8);531531+ memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8);532532+ rn->rn_sparm.csp.sp_bb_data = ntohs(rdevp->rcv_fr_sz);533533+ fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos);534534+ rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID);535535+ return 0;536536+}537537+538538+static void539539+__csio_reg_rnode(struct csio_rnode *rn)540540+{541541+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);542542+ struct csio_hw *hw = csio_lnode_to_hw(ln);543543+544544+ spin_unlock_irq(&hw->lock);545545+ csio_reg_rnode(rn);546546+ spin_lock_irq(&hw->lock);547547+548548+ if (rn->role & CSIO_RNFR_TARGET)549549+ ln->n_scsi_tgts++;550550+551551+ if (rn->nport_id == FC_FID_MGMT_SERV)552552+ csio_ln_fdmi_start(ln, (void *) rn);553553+}554554+555555+static void556556+__csio_unreg_rnode(struct csio_rnode *rn)557557+{558558+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);559559+ struct csio_hw *hw = csio_lnode_to_hw(ln);560560+ LIST_HEAD(tmp_q);561561+ int cmpl = 0;562562+563563+ if (!list_empty(&rn->host_cmpl_q)) {564564+ csio_dbg(hw, "Returning completion queue I/Os\n");565565+ list_splice_tail_init(&rn->host_cmpl_q, &tmp_q);566566+ cmpl = 1;567567+ }568568+569569+ if (rn->role & CSIO_RNFR_TARGET) {570570+ ln->n_scsi_tgts--;571571+ ln->last_scan_ntgts--;572572+ }573573+574574+ spin_unlock_irq(&hw->lock);575575+ csio_unreg_rnode(rn);576576+ spin_lock_irq(&hw->lock);577577+578578+ /* Cleanup I/Os that were waiting for rnode to unregister */579579+ if (cmpl)580580+ csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q);581581+582582+}583583+584584+/*****************************************************************************/585585+/* START: Rnode SM */586586+/*****************************************************************************/587587+588588+/*589589+ * csio_rns_uninit -590590+ * @rn - rnode591591+ * @evt - SM event.592592+ *593593+ */594594+static void595595+csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)596596+{597597+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);598598+ int ret = 0;599599+600600+ CSIO_INC_STATS(rn, n_evt_sm[evt]);601601+602602+ switch (evt) {603603+ case CSIO_RNFE_LOGGED_IN:604604+ case CSIO_RNFE_PLOGI_RECV:605605+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);606606+ if (!ret) {607607+ csio_set_state(&rn->sm, csio_rns_ready);608608+ __csio_reg_rnode(rn);609609+ } else {610610+ CSIO_INC_STATS(rn, n_err_inval);611611+ }612612+ break;613613+ case CSIO_RNFE_LOGO_RECV:614614+ csio_ln_dbg(ln,615615+ "ssni:x%x Ignoring event %d recv "616616+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt);617617+ CSIO_INC_STATS(rn, n_evt_drop);618618+ break;619619+ default:620620+ csio_ln_dbg(ln,621621+ "ssni:x%x unexp event %d recv "622622+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt);623623+ CSIO_INC_STATS(rn, n_evt_unexp);624624+ break;625625+ }626626+}627627+628628+/*629629+ * csio_rns_ready -630630+ * @rn - rnode631631+ * @evt - SM event.632632+ *633633+ */634634+static void635635+csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)636636+{637637+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);638638+ int ret = 0;639639+640640+ CSIO_INC_STATS(rn, n_evt_sm[evt]);641641+642642+ switch (evt) {643643+ case CSIO_RNFE_LOGGED_IN:644644+ case CSIO_RNFE_PLOGI_RECV:645645+ csio_ln_dbg(ln,646646+ "ssni:x%x Ignoring event %d recv from did:x%x "647647+ "in rn state[ready]\n", csio_rn_flowid(rn), evt,648648+ rn->nport_id);649649+ CSIO_INC_STATS(rn, n_evt_drop);650650+ break;651651+652652+ case CSIO_RNFE_PRLI_DONE:653653+ case CSIO_RNFE_PRLI_RECV:654654+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);655655+ if (!ret)656656+ __csio_reg_rnode(rn);657657+ else658658+ CSIO_INC_STATS(rn, n_err_inval);659659+660660+ break;661661+ case CSIO_RNFE_DOWN:662662+ csio_set_state(&rn->sm, csio_rns_offline);663663+ __csio_unreg_rnode(rn);664664+665665+ /* FW expected to internally aborted outstanding SCSI WRs666666+ * and return all SCSI WRs to host with status "ABORTED".667667+ */668668+ break;669669+670670+ case CSIO_RNFE_LOGO_RECV:671671+ csio_set_state(&rn->sm, csio_rns_offline);672672+673673+ __csio_unreg_rnode(rn);674674+675675+ /* FW expected to internally aborted outstanding SCSI WRs676676+ * and return all SCSI WRs to host with status "ABORTED".677677+ */678678+ break;679679+680680+ case CSIO_RNFE_CLOSE:681681+ /*682682+ * Each rnode receives CLOSE event when driver is removed or683683+ * device is reset684684+ * Note: All outstanding IOs on remote port need to returned685685+ * to uppper layer with appropriate error before sending686686+ * CLOSE event687687+ */688688+ csio_set_state(&rn->sm, csio_rns_uninit);689689+ __csio_unreg_rnode(rn);690690+ break;691691+692692+ case CSIO_RNFE_NAME_MISSING:693693+ csio_set_state(&rn->sm, csio_rns_disappeared);694694+ __csio_unreg_rnode(rn);695695+696696+ /*697697+ * FW expected to internally aborted outstanding SCSI WRs698698+ * and return all SCSI WRs to host with status "ABORTED".699699+ */700700+701701+ break;702702+703703+ default:704704+ csio_ln_dbg(ln,705705+ "ssni:x%x unexp event %d recv from did:x%x "706706+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt,707707+ rn->nport_id);708708+ CSIO_INC_STATS(rn, n_evt_unexp);709709+ break;710710+ }711711+}712712+713713+/*714714+ * csio_rns_offline -715715+ * @rn - rnode716716+ * @evt - SM event.717717+ *718718+ */719719+static void720720+csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)721721+{722722+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);723723+ int ret = 0;724724+725725+ CSIO_INC_STATS(rn, n_evt_sm[evt]);726726+727727+ switch (evt) {728728+ case CSIO_RNFE_LOGGED_IN:729729+ case CSIO_RNFE_PLOGI_RECV:730730+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);731731+ if (!ret) {732732+ csio_set_state(&rn->sm, csio_rns_ready);733733+ __csio_reg_rnode(rn);734734+ } else {735735+ CSIO_INC_STATS(rn, n_err_inval);736736+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);737737+ }738738+ break;739739+740740+ case CSIO_RNFE_DOWN:741741+ csio_ln_dbg(ln,742742+ "ssni:x%x Ignoring event %d recv from did:x%x "743743+ "in rn state[offline]\n", csio_rn_flowid(rn), evt,744744+ rn->nport_id);745745+ CSIO_INC_STATS(rn, n_evt_drop);746746+ break;747747+748748+ case CSIO_RNFE_CLOSE:749749+ /* Each rnode receives CLOSE event when driver is removed or750750+ * device is reset751751+ * Note: All outstanding IOs on remote port need to returned752752+ * to uppper layer with appropriate error before sending753753+ * CLOSE event754754+ */755755+ csio_set_state(&rn->sm, csio_rns_uninit);756756+ break;757757+758758+ case CSIO_RNFE_NAME_MISSING:759759+ csio_set_state(&rn->sm, csio_rns_disappeared);760760+ break;761761+762762+ default:763763+ csio_ln_dbg(ln,764764+ "ssni:x%x unexp event %d recv from did:x%x "765765+ "in rn state[offline]\n", csio_rn_flowid(rn), evt,766766+ rn->nport_id);767767+ CSIO_INC_STATS(rn, n_evt_unexp);768768+ break;769769+ }770770+}771771+772772+/*773773+ * csio_rns_disappeared -774774+ * @rn - rnode775775+ * @evt - SM event.776776+ *777777+ */778778+static void779779+csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt)780780+{781781+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);782782+ int ret = 0;783783+784784+ CSIO_INC_STATS(rn, n_evt_sm[evt]);785785+786786+ switch (evt) {787787+ case CSIO_RNFE_LOGGED_IN:788788+ case CSIO_RNFE_PLOGI_RECV:789789+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);790790+ if (!ret) {791791+ csio_set_state(&rn->sm, csio_rns_ready);792792+ __csio_reg_rnode(rn);793793+ } else {794794+ CSIO_INC_STATS(rn, n_err_inval);795795+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);796796+ }797797+ break;798798+799799+ case CSIO_RNFE_CLOSE:800800+ /* Each rnode receives CLOSE event when driver is removed or801801+ * device is reset.802802+ * Note: All outstanding IOs on remote port need to returned803803+ * to uppper layer with appropriate error before sending804804+ * CLOSE event805805+ */806806+ csio_set_state(&rn->sm, csio_rns_uninit);807807+ break;808808+809809+ case CSIO_RNFE_DOWN:810810+ case CSIO_RNFE_NAME_MISSING:811811+ csio_ln_dbg(ln,812812+ "ssni:x%x Ignoring event %d recv from did x%x"813813+ "in rn state[disappeared]\n", csio_rn_flowid(rn),814814+ evt, rn->nport_id);815815+ break;816816+817817+ default:818818+ csio_ln_dbg(ln,819819+ "ssni:x%x unexp event %d recv from did x%x"820820+ "in rn state[disappeared]\n", csio_rn_flowid(rn),821821+ evt, rn->nport_id);822822+ CSIO_INC_STATS(rn, n_evt_unexp);823823+ break;824824+ }825825+}826826+827827+/*****************************************************************************/828828+/* END: Rnode SM */829829+/*****************************************************************************/830830+831831+/*832832+ * csio_rnode_devloss_handler - Device loss event handler833833+ * @rn: rnode834834+ *835835+ * Post event to close rnode SM and free rnode.836836+ */837837+void838838+csio_rnode_devloss_handler(struct csio_rnode *rn)839839+{840840+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);841841+842842+ /* ignore if same rnode came back as online */843843+ if (csio_is_rnode_ready(rn))844844+ return;845845+846846+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);847847+848848+ /* Free rn if in uninit state */849849+ if (csio_is_rnode_uninit(rn))850850+ csio_put_rnode(ln, rn);851851+}852852+853853+/**854854+ * csio_rnode_fwevt_handler - Event handler for firmware rnode events.855855+ * @rn: rnode856856+ *857857+ */858858+void859859+csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt)860860+{861861+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);862862+ enum csio_rn_ev evt;863863+864864+ evt = CSIO_FWE_TO_RNFE(fwevt);865865+ if (!evt) {866866+ csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n",867867+ csio_rn_flowid(rn), fwevt);868868+ CSIO_INC_STATS(rn, n_evt_unexp);869869+ return;870870+ }871871+ CSIO_INC_STATS(rn, n_evt_fw[fwevt]);872872+873873+ /* Track previous & current events for debugging */874874+ rn->prev_evt = rn->cur_evt;875875+ rn->cur_evt = fwevt;876876+877877+ /* Post event to rnode SM */878878+ csio_post_event(&rn->sm, evt);879879+880880+ /* Free rn if in uninit state */881881+ if (csio_is_rnode_uninit(rn))882882+ csio_put_rnode(ln, rn);883883+}884884+885885+/*886886+ * csio_rnode_init - Initialize rnode.887887+ * @rn: RNode888888+ * @ln: Associated lnode889889+ *890890+ * Caller is responsible for holding the lock. The lock is required891891+ * to be held for inserting the rnode in ln->rnhead list.892892+ */893893+static int894894+csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln)895895+{896896+ csio_rnode_to_lnode(rn) = ln;897897+ csio_init_state(&rn->sm, csio_rns_uninit);898898+ INIT_LIST_HEAD(&rn->host_cmpl_q);899899+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;900900+901901+ /* Add rnode to list of lnodes->rnhead */902902+ list_add_tail(&rn->sm.sm_list, &ln->rnhead);903903+904904+ return 0;905905+}906906+907907+static void908908+csio_rnode_exit(struct csio_rnode *rn)909909+{910910+ list_del_init(&rn->sm.sm_list);911911+ CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q));912912+}
+141
drivers/scsi/csiostor/csio_rnode.h
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#ifndef __CSIO_RNODE_H__3636+#define __CSIO_RNODE_H__3737+3838+#include "csio_defs.h"3939+4040+/* State machine evets */4141+enum csio_rn_ev {4242+ CSIO_RNFE_NONE = (uint32_t)0, /* None */4343+ CSIO_RNFE_LOGGED_IN, /* [N/F]Port login4444+ * complete.4545+ */4646+ CSIO_RNFE_PRLI_DONE, /* PRLI completed */4747+ CSIO_RNFE_PLOGI_RECV, /* Received PLOGI */4848+ CSIO_RNFE_PRLI_RECV, /* Received PLOGI */4949+ CSIO_RNFE_LOGO_RECV, /* Received LOGO */5050+ CSIO_RNFE_PRLO_RECV, /* Received PRLO */5151+ CSIO_RNFE_DOWN, /* Rnode is down */5252+ CSIO_RNFE_CLOSE, /* Close rnode */5353+ CSIO_RNFE_NAME_MISSING, /* Rnode name missing5454+ * in name server.5555+ */5656+ CSIO_RNFE_MAX_EVENT,5757+};5858+5959+/* rnode stats */6060+struct csio_rnode_stats {6161+ uint32_t n_err; /* error */6262+ uint32_t n_err_inval; /* invalid parameter */6363+ uint32_t n_err_nomem; /* error nomem */6464+ uint32_t n_evt_unexp; /* unexpected event */6565+ uint32_t n_evt_drop; /* unexpected event */6666+ uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */6767+ enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */6868+ uint32_t n_lun_rst; /* Number of resets of6969+ * of LUNs under this7070+ * target7171+ */7272+ uint32_t n_lun_rst_fail; /* Number of LUN reset7373+ * failures.7474+ */7575+ uint32_t n_tgt_rst; /* Number of target resets */7676+ uint32_t n_tgt_rst_fail; /* Number of target reset7777+ * failures.7878+ */7979+};8080+8181+/* Defines for rnode role */8282+#define CSIO_RNFR_INITIATOR 0x18383+#define CSIO_RNFR_TARGET 0x28484+#define CSIO_RNFR_FABRIC 0x48585+#define CSIO_RNFR_NS 0x88686+#define CSIO_RNFR_NPORT 0x108787+8888+struct csio_rnode {8989+ struct csio_sm sm; /* State machine -9090+ * should be the9191+ * 1st member9292+ */9393+ struct csio_lnode *lnp; /* Pointer to owning9494+ * Lnode */9595+ uint32_t flowid; /* Firmware ID */9696+ struct list_head host_cmpl_q; /* SCSI IOs9797+ * pending to completed9898+ * to Mid-layer.9999+ */100100+ /* FC identifiers for remote node */101101+ uint32_t nport_id;102102+ uint16_t fcp_flags; /* FCP Flags */103103+ uint8_t cur_evt; /* Current event */104104+ uint8_t prev_evt; /* Previous event */105105+ uint32_t role; /* Fabric/Target/106106+ * Initiator/NS107107+ */108108+ struct fcoe_rdev_entry *rdev_entry; /* Rdev entry */109109+ struct csio_service_parms rn_sparm;110110+111111+ /* FC transport attributes */112112+ struct fc_rport *rport; /* FC transport rport */113113+ uint32_t supp_classes; /* Supported FC classes */114114+ uint32_t maxframe_size; /* Max Frame size */115115+ uint32_t scsi_id; /* Transport given SCSI id */116116+117117+ struct csio_rnode_stats stats; /* Common rnode stats */118118+};119119+120120+#define csio_rn_flowid(rn) ((rn)->flowid)121121+#define csio_rn_wwpn(rn) ((rn)->rn_sparm.wwpn)122122+#define csio_rn_wwnn(rn) ((rn)->rn_sparm.wwnn)123123+#define csio_rnode_to_lnode(rn) ((rn)->lnp)124124+125125+int csio_is_rnode_ready(struct csio_rnode *rn);126126+void csio_rnode_state_to_str(struct csio_rnode *rn, int8_t *str);127127+128128+struct csio_rnode *csio_rnode_lookup_portid(struct csio_lnode *, uint32_t);129129+struct csio_rnode *csio_confirm_rnode(struct csio_lnode *,130130+ uint32_t, struct fcoe_rdev_entry *);131131+132132+void csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt);133133+134134+void csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn);135135+136136+void csio_reg_rnode(struct csio_rnode *);137137+void csio_unreg_rnode(struct csio_rnode *);138138+139139+void csio_rnode_devloss_handler(struct csio_rnode *);140140+141141+#endif /* ifndef __CSIO_RNODE_H__ */
+2555
drivers/scsi/csiostor/csio_scsi.c
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#include <linux/device.h>3636+#include <linux/delay.h>3737+#include <linux/ctype.h>3838+#include <linux/kernel.h>3939+#include <linux/slab.h>4040+#include <linux/string.h>4141+#include <linux/compiler.h>4242+#include <linux/export.h>4343+#include <linux/module.h>4444+#include <asm/unaligned.h>4545+#include <asm/page.h>4646+#include <scsi/scsi.h>4747+#include <scsi/scsi_device.h>4848+#include <scsi/scsi_transport_fc.h>4949+5050+#include "csio_hw.h"5151+#include "csio_lnode.h"5252+#include "csio_rnode.h"5353+#include "csio_scsi.h"5454+#include "csio_init.h"5555+5656+int csio_scsi_eqsize = 65536;5757+int csio_scsi_iqlen = 128;5858+int csio_scsi_ioreqs = 2048;5959+uint32_t csio_max_scan_tmo;6060+uint32_t csio_delta_scan_tmo = 5;6161+int csio_lun_qdepth = 32;6262+6363+static int csio_ddp_descs = 128;6464+6565+static int csio_do_abrt_cls(struct csio_hw *,6666+ struct csio_ioreq *, bool);6767+6868+static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev);6969+static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev);7070+static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev);7171+static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev);7272+static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev);7373+static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev);7474+7575+/*7676+ * csio_scsi_match_io - Match an ioreq with the given SCSI level data.7777+ * @ioreq: The I/O request7878+ * @sld: Level information7979+ *8080+ * Should be called with lock held.8181+ *8282+ */8383+static bool8484+csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld)8585+{8686+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq);8787+8888+ switch (sld->level) {8989+ case CSIO_LEV_LUN:9090+ if (scmnd == NULL)9191+ return false;9292+9393+ return ((ioreq->lnode == sld->lnode) &&9494+ (ioreq->rnode == sld->rnode) &&9595+ ((uint64_t)scmnd->device->lun == sld->oslun));9696+9797+ case CSIO_LEV_RNODE:9898+ return ((ioreq->lnode == sld->lnode) &&9999+ (ioreq->rnode == sld->rnode));100100+ case CSIO_LEV_LNODE:101101+ return (ioreq->lnode == sld->lnode);102102+ case CSIO_LEV_ALL:103103+ return true;104104+ default:105105+ return false;106106+ }107107+}108108+109109+/*110110+ * csio_scsi_gather_active_ios - Gather active I/Os based on level111111+ * @scm: SCSI module112112+ * @sld: Level information113113+ * @dest: The queue where these I/Os have to be gathered.114114+ *115115+ * Should be called with lock held.116116+ */117117+static void118118+csio_scsi_gather_active_ios(struct csio_scsim *scm,119119+ struct csio_scsi_level_data *sld,120120+ struct list_head *dest)121121+{122122+ struct list_head *tmp, *next;123123+124124+ if (list_empty(&scm->active_q))125125+ return;126126+127127+ /* Just splice the entire active_q into dest */128128+ if (sld->level == CSIO_LEV_ALL) {129129+ list_splice_tail_init(&scm->active_q, dest);130130+ return;131131+ }132132+133133+ list_for_each_safe(tmp, next, &scm->active_q) {134134+ if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) {135135+ list_del_init(tmp);136136+ list_add_tail(tmp, dest);137137+ }138138+ }139139+}140140+141141+static inline bool142142+csio_scsi_itnexus_loss_error(uint16_t error)143143+{144144+ switch (error) {145145+ case FW_ERR_LINK_DOWN:146146+ case FW_RDEV_NOT_READY:147147+ case FW_ERR_RDEV_LOST:148148+ case FW_ERR_RDEV_LOGO:149149+ case FW_ERR_RDEV_IMPL_LOGO:150150+ return 1;151151+ }152152+ return 0;153153+}154154+155155+static inline void156156+csio_scsi_tag(struct scsi_cmnd *scmnd, uint8_t *tag, uint8_t hq,157157+ uint8_t oq, uint8_t sq)158158+{159159+ char stag[2];160160+161161+ if (scsi_populate_tag_msg(scmnd, stag)) {162162+ switch (stag[0]) {163163+ case HEAD_OF_QUEUE_TAG:164164+ *tag = hq;165165+ break;166166+ case ORDERED_QUEUE_TAG:167167+ *tag = oq;168168+ break;169169+ default:170170+ *tag = sq;171171+ break;172172+ }173173+ } else174174+ *tag = 0;175175+}176176+177177+/*178178+ * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod.179179+ * @req: IO req structure.180180+ * @addr: DMA location to place the payload.181181+ *182182+ * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests.183183+ */184184+static inline void185185+csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)186186+{187187+ struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr;188188+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);189189+190190+ /* Check for Task Management */191191+ if (likely(scmnd->SCp.Message == 0)) {192192+ int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);193193+ fcp_cmnd->fc_tm_flags = 0;194194+ fcp_cmnd->fc_cmdref = 0;195195+ fcp_cmnd->fc_pri_ta = 0;196196+197197+ memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);198198+ csio_scsi_tag(scmnd, &fcp_cmnd->fc_pri_ta,199199+ FCP_PTA_HEADQ, FCP_PTA_ORDERED, FCP_PTA_SIMPLE);200200+ fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));201201+202202+ if (req->nsge)203203+ if (req->datadir == DMA_TO_DEVICE)204204+ fcp_cmnd->fc_flags = FCP_CFL_WRDATA;205205+ else206206+ fcp_cmnd->fc_flags = FCP_CFL_RDDATA;207207+ else208208+ fcp_cmnd->fc_flags = 0;209209+ } else {210210+ memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));211211+ int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);212212+ fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message;213213+ }214214+}215215+216216+/*217217+ * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR.218218+ * @req: IO req structure.219219+ * @addr: DMA location to place the payload.220220+ * @size: Size of WR (including FW WR + immed data + rsp SG entry221221+ *222222+ * Wrapper for populating fw_scsi_cmd_wr.223223+ */224224+static inline void225225+csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size)226226+{227227+ struct csio_hw *hw = req->lnode->hwp;228228+ struct csio_rnode *rn = req->rnode;229229+ struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr;230230+ struct csio_dma_buf *dma_buf;231231+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;232232+233233+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_CMD_WR) |234234+ FW_SCSI_CMD_WR_IMMDLEN(imm));235235+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |236236+ FW_WR_LEN16(237237+ DIV_ROUND_UP(size, 16)));238238+239239+ wr->cookie = (uintptr_t) req;240240+ wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));241241+ wr->tmo_val = (uint8_t) req->tmo;242242+ wr->r3 = 0;243243+ memset(&wr->r5, 0, 8);244244+245245+ /* Get RSP DMA buffer */246246+ dma_buf = &req->dma_buf;247247+248248+ /* Prepare RSP SGL */249249+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);250250+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);251251+252252+ wr->r6 = 0;253253+254254+ wr->u.fcoe.ctl_pri = 0;255255+ wr->u.fcoe.cp_en_class = 0;256256+ wr->u.fcoe.r4_lo[0] = 0;257257+ wr->u.fcoe.r4_lo[1] = 0;258258+259259+ /* Frame a FCP command */260260+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr +261261+ sizeof(struct fw_scsi_cmd_wr)));262262+}263263+264264+#define CSIO_SCSI_CMD_WR_SZ(_imm) \265265+ (sizeof(struct fw_scsi_cmd_wr) + /* WR size */ \266266+ ALIGN((_imm), 16)) /* Immed data */267267+268268+#define CSIO_SCSI_CMD_WR_SZ_16(_imm) \269269+ (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16))270270+271271+/*272272+ * csio_scsi_cmd - Create a SCSI CMD WR.273273+ * @req: IO req structure.274274+ *275275+ * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR.276276+ *277277+ */278278+static inline void279279+csio_scsi_cmd(struct csio_ioreq *req)280280+{281281+ struct csio_wr_pair wrp;282282+ struct csio_hw *hw = req->lnode->hwp;283283+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);284284+ uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len);285285+286286+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);287287+ if (unlikely(req->drv_status != 0))288288+ return;289289+290290+ if (wrp.size1 >= size) {291291+ /* Initialize WR in one shot */292292+ csio_scsi_init_cmd_wr(req, wrp.addr1, size);293293+ } else {294294+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);295295+296296+ /*297297+ * Make a temporary copy of the WR and write back298298+ * the copy into the WR pair.299299+ */300300+ csio_scsi_init_cmd_wr(req, (void *)tmpwr, size);301301+ memcpy(wrp.addr1, tmpwr, wrp.size1);302302+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);303303+ }304304+}305305+306306+/*307307+ * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL308308+ * @hw: HW module309309+ * @req: IO request310310+ * @sgl: ULP TX SGL pointer.311311+ *312312+ */313313+static inline void314314+csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,315315+ struct ulptx_sgl *sgl)316316+{317317+ struct ulptx_sge_pair *sge_pair = NULL;318318+ struct scatterlist *sgel;319319+ uint32_t i = 0;320320+ uint32_t xfer_len;321321+ struct list_head *tmp;322322+ struct csio_dma_buf *dma_buf;323323+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);324324+325325+ sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_MORE |326326+ ULPTX_NSGE(req->nsge));327327+ /* Now add the data SGLs */328328+ if (likely(!req->dcopy)) {329329+ scsi_for_each_sg(scmnd, sgel, req->nsge, i) {330330+ if (i == 0) {331331+ sgl->addr0 = cpu_to_be64(sg_dma_address(sgel));332332+ sgl->len0 = cpu_to_be32(sg_dma_len(sgel));333333+ sge_pair = (struct ulptx_sge_pair *)(sgl + 1);334334+ continue;335335+ }336336+ if ((i - 1) & 0x1) {337337+ sge_pair->addr[1] = cpu_to_be64(338338+ sg_dma_address(sgel));339339+ sge_pair->len[1] = cpu_to_be32(340340+ sg_dma_len(sgel));341341+ sge_pair++;342342+ } else {343343+ sge_pair->addr[0] = cpu_to_be64(344344+ sg_dma_address(sgel));345345+ sge_pair->len[0] = cpu_to_be32(346346+ sg_dma_len(sgel));347347+ }348348+ }349349+ } else {350350+ /* Program sg elements with driver's DDP buffer */351351+ xfer_len = scsi_bufflen(scmnd);352352+ list_for_each(tmp, &req->gen_list) {353353+ dma_buf = (struct csio_dma_buf *)tmp;354354+ if (i == 0) {355355+ sgl->addr0 = cpu_to_be64(dma_buf->paddr);356356+ sgl->len0 = cpu_to_be32(357357+ min(xfer_len, dma_buf->len));358358+ sge_pair = (struct ulptx_sge_pair *)(sgl + 1);359359+ } else if ((i - 1) & 0x1) {360360+ sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr);361361+ sge_pair->len[1] = cpu_to_be32(362362+ min(xfer_len, dma_buf->len));363363+ sge_pair++;364364+ } else {365365+ sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr);366366+ sge_pair->len[0] = cpu_to_be32(367367+ min(xfer_len, dma_buf->len));368368+ }369369+ xfer_len -= min(xfer_len, dma_buf->len);370370+ i++;371371+ }372372+ }373373+}374374+375375+/*376376+ * csio_scsi_init_read_wr - Initialize the READ SCSI WR.377377+ * @req: IO req structure.378378+ * @wrp: DMA location to place the payload.379379+ * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL380380+ *381381+ * Wrapper for populating fw_scsi_read_wr.382382+ */383383+static inline void384384+csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size)385385+{386386+ struct csio_hw *hw = req->lnode->hwp;387387+ struct csio_rnode *rn = req->rnode;388388+ struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp;389389+ struct ulptx_sgl *sgl;390390+ struct csio_dma_buf *dma_buf;391391+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;392392+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);393393+394394+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_READ_WR) |395395+ FW_SCSI_READ_WR_IMMDLEN(imm));396396+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |397397+ FW_WR_LEN16(DIV_ROUND_UP(size, 16)));398398+ wr->cookie = (uintptr_t)req;399399+ wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));400400+ wr->tmo_val = (uint8_t)(req->tmo);401401+ wr->use_xfer_cnt = 1;402402+ wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));403403+ wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));404404+ /* Get RSP DMA buffer */405405+ dma_buf = &req->dma_buf;406406+407407+ /* Prepare RSP SGL */408408+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);409409+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);410410+411411+ wr->r4 = 0;412412+413413+ wr->u.fcoe.ctl_pri = 0;414414+ wr->u.fcoe.cp_en_class = 0;415415+ wr->u.fcoe.r3_lo[0] = 0;416416+ wr->u.fcoe.r3_lo[1] = 0;417417+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +418418+ sizeof(struct fw_scsi_read_wr)));419419+420420+ /* Move WR pointer past command and immediate data */421421+ sgl = (struct ulptx_sgl *)((uintptr_t)wrp +422422+ sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16));423423+424424+ /* Fill in the DSGL */425425+ csio_scsi_init_ultptx_dsgl(hw, req, sgl);426426+}427427+428428+/*429429+ * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR.430430+ * @req: IO req structure.431431+ * @wrp: DMA location to place the payload.432432+ * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL433433+ *434434+ * Wrapper for populating fw_scsi_write_wr.435435+ */436436+static inline void437437+csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size)438438+{439439+ struct csio_hw *hw = req->lnode->hwp;440440+ struct csio_rnode *rn = req->rnode;441441+ struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp;442442+ struct ulptx_sgl *sgl;443443+ struct csio_dma_buf *dma_buf;444444+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;445445+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);446446+447447+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_WRITE_WR) |448448+ FW_SCSI_WRITE_WR_IMMDLEN(imm));449449+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |450450+ FW_WR_LEN16(DIV_ROUND_UP(size, 16)));451451+ wr->cookie = (uintptr_t)req;452452+ wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));453453+ wr->tmo_val = (uint8_t)(req->tmo);454454+ wr->use_xfer_cnt = 1;455455+ wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));456456+ wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));457457+ /* Get RSP DMA buffer */458458+ dma_buf = &req->dma_buf;459459+460460+ /* Prepare RSP SGL */461461+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);462462+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);463463+464464+ wr->r4 = 0;465465+466466+ wr->u.fcoe.ctl_pri = 0;467467+ wr->u.fcoe.cp_en_class = 0;468468+ wr->u.fcoe.r3_lo[0] = 0;469469+ wr->u.fcoe.r3_lo[1] = 0;470470+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +471471+ sizeof(struct fw_scsi_write_wr)));472472+473473+ /* Move WR pointer past command and immediate data */474474+ sgl = (struct ulptx_sgl *)((uintptr_t)wrp +475475+ sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16));476476+477477+ /* Fill in the DSGL */478478+ csio_scsi_init_ultptx_dsgl(hw, req, sgl);479479+}480480+481481+/* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */482482+#define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \483483+do { \484484+ (sz) = sizeof(struct fw_scsi_##oper##_wr) + /* WR size */ \485485+ ALIGN((imm), 16) + /* Immed data */ \486486+ sizeof(struct ulptx_sgl); /* ulptx_sgl */ \487487+ \488488+ if (unlikely((req)->nsge > 1)) \489489+ (sz) += (sizeof(struct ulptx_sge_pair) * \490490+ (ALIGN(((req)->nsge - 1), 2) / 2)); \491491+ /* Data SGE */ \492492+} while (0)493493+494494+/*495495+ * csio_scsi_read - Create a SCSI READ WR.496496+ * @req: IO req structure.497497+ *498498+ * Gets a WR slot in the ingress queue and initializes it with499499+ * SCSI READ WR.500500+ *501501+ */502502+static inline void503503+csio_scsi_read(struct csio_ioreq *req)504504+{505505+ struct csio_wr_pair wrp;506506+ uint32_t size;507507+ struct csio_hw *hw = req->lnode->hwp;508508+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);509509+510510+ CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len);511511+ size = ALIGN(size, 16);512512+513513+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);514514+ if (likely(req->drv_status == 0)) {515515+ if (likely(wrp.size1 >= size)) {516516+ /* Initialize WR in one shot */517517+ csio_scsi_init_read_wr(req, wrp.addr1, size);518518+ } else {519519+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);520520+ /*521521+ * Make a temporary copy of the WR and write back522522+ * the copy into the WR pair.523523+ */524524+ csio_scsi_init_read_wr(req, (void *)tmpwr, size);525525+ memcpy(wrp.addr1, tmpwr, wrp.size1);526526+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);527527+ }528528+ }529529+}530530+531531+/*532532+ * csio_scsi_write - Create a SCSI WRITE WR.533533+ * @req: IO req structure.534534+ *535535+ * Gets a WR slot in the ingress queue and initializes it with536536+ * SCSI WRITE WR.537537+ *538538+ */539539+static inline void540540+csio_scsi_write(struct csio_ioreq *req)541541+{542542+ struct csio_wr_pair wrp;543543+ uint32_t size;544544+ struct csio_hw *hw = req->lnode->hwp;545545+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);546546+547547+ CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len);548548+ size = ALIGN(size, 16);549549+550550+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);551551+ if (likely(req->drv_status == 0)) {552552+ if (likely(wrp.size1 >= size)) {553553+ /* Initialize WR in one shot */554554+ csio_scsi_init_write_wr(req, wrp.addr1, size);555555+ } else {556556+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);557557+ /*558558+ * Make a temporary copy of the WR and write back559559+ * the copy into the WR pair.560560+ */561561+ csio_scsi_init_write_wr(req, (void *)tmpwr, size);562562+ memcpy(wrp.addr1, tmpwr, wrp.size1);563563+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);564564+ }565565+ }566566+}567567+568568+/*569569+ * csio_setup_ddp - Setup DDP buffers for Read request.570570+ * @req: IO req structure.571571+ *572572+ * Checks SGLs/Data buffers are virtually contiguous required for DDP.573573+ * If contiguous,driver posts SGLs in the WR otherwise post internal574574+ * buffers for such request for DDP.575575+ */576576+static inline void577577+csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req)578578+{579579+#ifdef __CSIO_DEBUG__580580+ struct csio_hw *hw = req->lnode->hwp;581581+#endif582582+ struct scatterlist *sgel = NULL;583583+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);584584+ uint64_t sg_addr = 0;585585+ uint32_t ddp_pagesz = 4096;586586+ uint32_t buf_off;587587+ struct csio_dma_buf *dma_buf = NULL;588588+ uint32_t alloc_len = 0;589589+ uint32_t xfer_len = 0;590590+ uint32_t sg_len = 0;591591+ uint32_t i;592592+593593+ scsi_for_each_sg(scmnd, sgel, req->nsge, i) {594594+ sg_addr = sg_dma_address(sgel);595595+ sg_len = sg_dma_len(sgel);596596+597597+ buf_off = sg_addr & (ddp_pagesz - 1);598598+599599+ /* Except 1st buffer,all buffer addr have to be Page aligned */600600+ if (i != 0 && buf_off) {601601+ csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n",602602+ sg_addr, sg_len);603603+ goto unaligned;604604+ }605605+606606+ /* Except last buffer,all buffer must end on page boundary */607607+ if ((i != (req->nsge - 1)) &&608608+ ((buf_off + sg_len) & (ddp_pagesz - 1))) {609609+ csio_dbg(hw,610610+ "SGL addr not ending on page boundary"611611+ "(%llx:%d)\n", sg_addr, sg_len);612612+ goto unaligned;613613+ }614614+ }615615+616616+ /* SGL's are virtually contiguous. HW will DDP to SGLs */617617+ req->dcopy = 0;618618+ csio_scsi_read(req);619619+620620+ return;621621+622622+unaligned:623623+ CSIO_INC_STATS(scsim, n_unaligned);624624+ /*625625+ * For unaligned SGLs, driver will allocate internal DDP buffer.626626+ * Once command is completed data from DDP buffer copied to SGLs627627+ */628628+ req->dcopy = 1;629629+630630+ /* Use gen_list to store the DDP buffers */631631+ INIT_LIST_HEAD(&req->gen_list);632632+ xfer_len = scsi_bufflen(scmnd);633633+634634+ i = 0;635635+ /* Allocate ddp buffers for this request */636636+ while (alloc_len < xfer_len) {637637+ dma_buf = csio_get_scsi_ddp(scsim);638638+ if (dma_buf == NULL || i > scsim->max_sge) {639639+ req->drv_status = -EBUSY;640640+ break;641641+ }642642+ alloc_len += dma_buf->len;643643+ /* Added to IO req */644644+ list_add_tail(&dma_buf->list, &req->gen_list);645645+ i++;646646+ }647647+648648+ if (!req->drv_status) {649649+ /* set number of ddp bufs used */650650+ req->nsge = i;651651+ csio_scsi_read(req);652652+ return;653653+ }654654+655655+ /* release dma descs */656656+ if (i > 0)657657+ csio_put_scsi_ddp_list(scsim, &req->gen_list, i);658658+}659659+660660+/*661661+ * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR.662662+ * @req: IO req structure.663663+ * @addr: DMA location to place the payload.664664+ * @size: Size of WR665665+ * @abort: abort OR close666666+ *667667+ * Wrapper for populating fw_scsi_cmd_wr.668668+ */669669+static inline void670670+csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size,671671+ bool abort)672672+{673673+ struct csio_hw *hw = req->lnode->hwp;674674+ struct csio_rnode *rn = req->rnode;675675+ struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr;676676+677677+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_ABRT_CLS_WR));678678+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |679679+ FW_WR_LEN16(680680+ DIV_ROUND_UP(size, 16)));681681+682682+ wr->cookie = (uintptr_t) req;683683+ wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));684684+ wr->tmo_val = (uint8_t) req->tmo;685685+ /* 0 for CHK_ALL_IO tells FW to look up t_cookie */686686+ wr->sub_opcode_to_chk_all_io =687687+ (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) |688688+ FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0));689689+ wr->r3[0] = 0;690690+ wr->r3[1] = 0;691691+ wr->r3[2] = 0;692692+ wr->r3[3] = 0;693693+ /* Since we re-use the same ioreq for abort as well */694694+ wr->t_cookie = (uintptr_t) req;695695+}696696+697697+static inline void698698+csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort)699699+{700700+ struct csio_wr_pair wrp;701701+ struct csio_hw *hw = req->lnode->hwp;702702+ uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16);703703+704704+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);705705+ if (req->drv_status != 0)706706+ return;707707+708708+ if (wrp.size1 >= size) {709709+ /* Initialize WR in one shot */710710+ csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort);711711+ } else {712712+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);713713+ /*714714+ * Make a temporary copy of the WR and write back715715+ * the copy into the WR pair.716716+ */717717+ csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort);718718+ memcpy(wrp.addr1, tmpwr, wrp.size1);719719+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);720720+ }721721+}722722+723723+/*****************************************************************************/724724+/* START: SCSI SM */725725+/*****************************************************************************/726726+static void727727+csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt)728728+{729729+ struct csio_hw *hw = req->lnode->hwp;730730+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);731731+732732+ switch (evt) {733733+ case CSIO_SCSIE_START_IO:734734+735735+ if (req->nsge) {736736+ if (req->datadir == DMA_TO_DEVICE) {737737+ req->dcopy = 0;738738+ csio_scsi_write(req);739739+ } else740740+ csio_setup_ddp(scsim, req);741741+ } else {742742+ csio_scsi_cmd(req);743743+ }744744+745745+ if (likely(req->drv_status == 0)) {746746+ /* change state and enqueue on active_q */747747+ csio_set_state(&req->sm, csio_scsis_io_active);748748+ list_add_tail(&req->sm.sm_list, &scsim->active_q);749749+ csio_wr_issue(hw, req->eq_idx, false);750750+ CSIO_INC_STATS(scsim, n_active);751751+752752+ return;753753+ }754754+ break;755755+756756+ case CSIO_SCSIE_START_TM:757757+ csio_scsi_cmd(req);758758+ if (req->drv_status == 0) {759759+ /*760760+ * NOTE: We collect the affected I/Os prior to issuing761761+ * LUN reset, and not after it. This is to prevent762762+ * aborting I/Os that get issued after the LUN reset,763763+ * but prior to LUN reset completion (in the event that764764+ * the host stack has not blocked I/Os to a LUN that is765765+ * being reset.766766+ */767767+ csio_set_state(&req->sm, csio_scsis_tm_active);768768+ list_add_tail(&req->sm.sm_list, &scsim->active_q);769769+ csio_wr_issue(hw, req->eq_idx, false);770770+ CSIO_INC_STATS(scsim, n_tm_active);771771+ }772772+ return;773773+774774+ case CSIO_SCSIE_ABORT:775775+ case CSIO_SCSIE_CLOSE:776776+ /*777777+ * NOTE:778778+ * We could get here due to :779779+ * - a window in the cleanup path of the SCSI module780780+ * (csio_scsi_abort_io()). Please see NOTE in this function.781781+ * - a window in the time we tried to issue an abort/close782782+ * of a request to FW, and the FW completed the request783783+ * itself.784784+ * Print a message for now, and return INVAL either way.785785+ */786786+ req->drv_status = -EINVAL;787787+ csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req);788788+ break;789789+790790+ default:791791+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);792792+ CSIO_DB_ASSERT(0);793793+ }794794+}795795+796796+static void797797+csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)798798+{799799+ struct csio_hw *hw = req->lnode->hwp;800800+ struct csio_scsim *scm = csio_hw_to_scsim(hw);801801+ struct csio_rnode *rn;802802+803803+ switch (evt) {804804+ case CSIO_SCSIE_COMPLETED:805805+ CSIO_DEC_STATS(scm, n_active);806806+ list_del_init(&req->sm.sm_list);807807+ csio_set_state(&req->sm, csio_scsis_uninit);808808+ /*809809+ * In MSIX mode, with multiple queues, the SCSI compeltions810810+ * could reach us sooner than the FW events sent to indicate811811+ * I-T nexus loss (link down, remote device logo etc). We812812+ * dont want to be returning such I/Os to the upper layer813813+ * immediately, since we wouldnt have reported the I-T nexus814814+ * loss itself. This forces us to serialize such completions815815+ * with the reporting of the I-T nexus loss. Therefore, we816816+ * internally queue up such up such completions in the rnode.817817+ * The reporting of I-T nexus loss to the upper layer is then818818+ * followed by the returning of I/Os in this internal queue.819819+ * Having another state alongwith another queue helps us take820820+ * actions for events such as ABORT received while we are821821+ * in this rnode queue.822822+ */823823+ if (unlikely(req->wr_status != FW_SUCCESS)) {824824+ rn = req->rnode;825825+ /*826826+ * FW says remote device is lost, but rnode827827+ * doesnt reflect it.828828+ */829829+ if (csio_scsi_itnexus_loss_error(req->wr_status) &&830830+ csio_is_rnode_ready(rn)) {831831+ csio_set_state(&req->sm,832832+ csio_scsis_shost_cmpl_await);833833+ list_add_tail(&req->sm.sm_list,834834+ &rn->host_cmpl_q);835835+ }836836+ }837837+838838+ break;839839+840840+ case CSIO_SCSIE_ABORT:841841+ csio_scsi_abrt_cls(req, SCSI_ABORT);842842+ if (req->drv_status == 0) {843843+ csio_wr_issue(hw, req->eq_idx, false);844844+ csio_set_state(&req->sm, csio_scsis_aborting);845845+ }846846+ break;847847+848848+ case CSIO_SCSIE_CLOSE:849849+ csio_scsi_abrt_cls(req, SCSI_CLOSE);850850+ if (req->drv_status == 0) {851851+ csio_wr_issue(hw, req->eq_idx, false);852852+ csio_set_state(&req->sm, csio_scsis_closing);853853+ }854854+ break;855855+856856+ case CSIO_SCSIE_DRVCLEANUP:857857+ req->wr_status = FW_HOSTERROR;858858+ CSIO_DEC_STATS(scm, n_active);859859+ csio_set_state(&req->sm, csio_scsis_uninit);860860+ break;861861+862862+ default:863863+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);864864+ CSIO_DB_ASSERT(0);865865+ }866866+}867867+868868+static void869869+csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt)870870+{871871+ struct csio_hw *hw = req->lnode->hwp;872872+ struct csio_scsim *scm = csio_hw_to_scsim(hw);873873+874874+ switch (evt) {875875+ case CSIO_SCSIE_COMPLETED:876876+ CSIO_DEC_STATS(scm, n_tm_active);877877+ list_del_init(&req->sm.sm_list);878878+ csio_set_state(&req->sm, csio_scsis_uninit);879879+880880+ break;881881+882882+ case CSIO_SCSIE_ABORT:883883+ csio_scsi_abrt_cls(req, SCSI_ABORT);884884+ if (req->drv_status == 0) {885885+ csio_wr_issue(hw, req->eq_idx, false);886886+ csio_set_state(&req->sm, csio_scsis_aborting);887887+ }888888+ break;889889+890890+891891+ case CSIO_SCSIE_CLOSE:892892+ csio_scsi_abrt_cls(req, SCSI_CLOSE);893893+ if (req->drv_status == 0) {894894+ csio_wr_issue(hw, req->eq_idx, false);895895+ csio_set_state(&req->sm, csio_scsis_closing);896896+ }897897+ break;898898+899899+ case CSIO_SCSIE_DRVCLEANUP:900900+ req->wr_status = FW_HOSTERROR;901901+ CSIO_DEC_STATS(scm, n_tm_active);902902+ csio_set_state(&req->sm, csio_scsis_uninit);903903+ break;904904+905905+ default:906906+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);907907+ CSIO_DB_ASSERT(0);908908+ }909909+}910910+911911+static void912912+csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)913913+{914914+ struct csio_hw *hw = req->lnode->hwp;915915+ struct csio_scsim *scm = csio_hw_to_scsim(hw);916916+917917+ switch (evt) {918918+ case CSIO_SCSIE_COMPLETED:919919+ csio_dbg(hw,920920+ "ioreq %p recvd cmpltd (wr_status:%d) "921921+ "in aborting st\n", req, req->wr_status);922922+ /*923923+ * Use -ECANCELED to explicitly tell the ABORTED event that924924+ * the original I/O was returned to driver by FW.925925+ * We dont really care if the I/O was returned with success by926926+ * FW (because the ABORT and completion of the I/O crossed each927927+ * other), or any other return value. Once we are in aborting928928+ * state, the success or failure of the I/O is unimportant to929929+ * us.930930+ */931931+ req->drv_status = -ECANCELED;932932+ break;933933+934934+ case CSIO_SCSIE_ABORT:935935+ CSIO_INC_STATS(scm, n_abrt_dups);936936+ break;937937+938938+ case CSIO_SCSIE_ABORTED:939939+940940+ csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n",941941+ req, req->wr_status, req->drv_status);942942+ /*943943+ * Check if original I/O WR completed before the Abort944944+ * completion.945945+ */946946+ if (req->drv_status != -ECANCELED) {947947+ csio_warn(hw,948948+ "Abort completed before original I/O,"949949+ " req:%p\n", req);950950+ CSIO_DB_ASSERT(0);951951+ }952952+953953+ /*954954+ * There are the following possible scenarios:955955+ * 1. The abort completed successfully, FW returned FW_SUCCESS.956956+ * 2. The completion of an I/O and the receipt of957957+ * abort for that I/O by the FW crossed each other.958958+ * The FW returned FW_EINVAL. The original I/O would have959959+ * returned with FW_SUCCESS or any other SCSI error.960960+ * 3. The FW couldnt sent the abort out on the wire, as there961961+ * was an I-T nexus loss (link down, remote device logged962962+ * out etc). FW sent back an appropriate IT nexus loss status963963+ * for the abort.964964+ * 4. FW sent an abort, but abort timed out (remote device965965+ * didnt respond). FW replied back with966966+ * FW_SCSI_ABORT_TIMEDOUT.967967+ * 5. FW couldnt genuinely abort the request for some reason,968968+ * and sent us an error.969969+ *970970+ * The first 3 scenarios are treated as succesful abort971971+ * operations by the host, while the last 2 are failed attempts972972+ * to abort. Manipulate the return value of the request973973+ * appropriately, so that host can convey these results974974+ * back to the upper layer.975975+ */976976+ if ((req->wr_status == FW_SUCCESS) ||977977+ (req->wr_status == FW_EINVAL) ||978978+ csio_scsi_itnexus_loss_error(req->wr_status))979979+ req->wr_status = FW_SCSI_ABORT_REQUESTED;980980+981981+ CSIO_DEC_STATS(scm, n_active);982982+ list_del_init(&req->sm.sm_list);983983+ csio_set_state(&req->sm, csio_scsis_uninit);984984+ break;985985+986986+ case CSIO_SCSIE_DRVCLEANUP:987987+ req->wr_status = FW_HOSTERROR;988988+ CSIO_DEC_STATS(scm, n_active);989989+ csio_set_state(&req->sm, csio_scsis_uninit);990990+ break;991991+992992+ case CSIO_SCSIE_CLOSE:993993+ /*994994+ * We can receive this event from the module995995+ * cleanup paths, if the FW forgot to reply to the ABORT WR996996+ * and left this ioreq in this state. For now, just ignore997997+ * the event. The CLOSE event is sent to this state, as998998+ * the LINK may have already gone down.999999+ */10001000+ break;10011001+10021002+ default:10031003+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);10041004+ CSIO_DB_ASSERT(0);10051005+ }10061006+}10071007+10081008+static void10091009+csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt)10101010+{10111011+ struct csio_hw *hw = req->lnode->hwp;10121012+ struct csio_scsim *scm = csio_hw_to_scsim(hw);10131013+10141014+ switch (evt) {10151015+ case CSIO_SCSIE_COMPLETED:10161016+ csio_dbg(hw,10171017+ "ioreq %p recvd cmpltd (wr_status:%d) "10181018+ "in closing st\n", req, req->wr_status);10191019+ /*10201020+ * Use -ECANCELED to explicitly tell the CLOSED event that10211021+ * the original I/O was returned to driver by FW.10221022+ * We dont really care if the I/O was returned with success by10231023+ * FW (because the CLOSE and completion of the I/O crossed each10241024+ * other), or any other return value. Once we are in aborting10251025+ * state, the success or failure of the I/O is unimportant to10261026+ * us.10271027+ */10281028+ req->drv_status = -ECANCELED;10291029+ break;10301030+10311031+ case CSIO_SCSIE_CLOSED:10321032+ /*10331033+ * Check if original I/O WR completed before the Close10341034+ * completion.10351035+ */10361036+ if (req->drv_status != -ECANCELED) {10371037+ csio_fatal(hw,10381038+ "Close completed before original I/O,"10391039+ " req:%p\n", req);10401040+ CSIO_DB_ASSERT(0);10411041+ }10421042+10431043+ /*10441044+ * Either close succeeded, or we issued close to FW at the10451045+ * same time FW compelted it to us. Either way, the I/O10461046+ * is closed.10471047+ */10481048+ CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) ||10491049+ (req->wr_status == FW_EINVAL));10501050+ req->wr_status = FW_SCSI_CLOSE_REQUESTED;10511051+10521052+ CSIO_DEC_STATS(scm, n_active);10531053+ list_del_init(&req->sm.sm_list);10541054+ csio_set_state(&req->sm, csio_scsis_uninit);10551055+ break;10561056+10571057+ case CSIO_SCSIE_CLOSE:10581058+ break;10591059+10601060+ case CSIO_SCSIE_DRVCLEANUP:10611061+ req->wr_status = FW_HOSTERROR;10621062+ CSIO_DEC_STATS(scm, n_active);10631063+ csio_set_state(&req->sm, csio_scsis_uninit);10641064+ break;10651065+10661066+ default:10671067+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);10681068+ CSIO_DB_ASSERT(0);10691069+ }10701070+}10711071+10721072+static void10731073+csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt)10741074+{10751075+ switch (evt) {10761076+ case CSIO_SCSIE_ABORT:10771077+ case CSIO_SCSIE_CLOSE:10781078+ /*10791079+ * Just succeed the abort request, and hope that10801080+ * the remote device unregister path will cleanup10811081+ * this I/O to the upper layer within a sane10821082+ * amount of time.10831083+ */10841084+ /*10851085+ * A close can come in during a LINK DOWN. The FW would have10861086+ * returned us the I/O back, but not the remote device lost10871087+ * FW event. In this interval, if the I/O times out at the upper10881088+ * layer, a close can come in. Take the same action as abort:10891089+ * return success, and hope that the remote device unregister10901090+ * path will cleanup this I/O. If the FW still doesnt send10911091+ * the msg, the close times out, and the upper layer resorts10921092+ * to the next level of error recovery.10931093+ */10941094+ req->drv_status = 0;10951095+ break;10961096+ case CSIO_SCSIE_DRVCLEANUP:10971097+ csio_set_state(&req->sm, csio_scsis_uninit);10981098+ break;10991099+ default:11001100+ csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n",11011101+ evt, req);11021102+ CSIO_DB_ASSERT(0);11031103+ }11041104+}11051105+11061106+/*11071107+ * csio_scsi_cmpl_handler - WR completion handler for SCSI.11081108+ * @hw: HW module.11091109+ * @wr: The completed WR from the ingress queue.11101110+ * @len: Length of the WR.11111111+ * @flb: Freelist buffer array.11121112+ * @priv: Private object11131113+ * @scsiwr: Pointer to SCSI WR.11141114+ *11151115+ * This is the WR completion handler called per completion from the11161116+ * ISR. It is called with lock held. It walks past the RSS and CPL message11171117+ * header where the actual WR is present.11181118+ * It then gets the status, WR handle (ioreq pointer) and the len of11191119+ * the WR, based on WR opcode. Only on a non-good status is the entire11201120+ * WR copied into the WR cache (ioreq->fw_wr).11211121+ * The ioreq corresponding to the WR is returned to the caller.11221122+ * NOTE: The SCSI queue doesnt allocate a freelist today, hence11231123+ * no freelist buffer is expected.11241124+ */11251125+struct csio_ioreq *11261126+csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len,11271127+ struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr)11281128+{11291129+ struct csio_ioreq *ioreq = NULL;11301130+ struct cpl_fw6_msg *cpl;11311131+ uint8_t *tempwr;11321132+ uint8_t status;11331133+ struct csio_scsim *scm = csio_hw_to_scsim(hw);11341134+11351135+ /* skip RSS header */11361136+ cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64));11371137+11381138+ if (unlikely(cpl->opcode != CPL_FW6_MSG)) {11391139+ csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n",11401140+ cpl->opcode);11411141+ CSIO_INC_STATS(scm, n_inval_cplop);11421142+ return NULL;11431143+ }11441144+11451145+ tempwr = (uint8_t *)(cpl->data);11461146+ status = csio_wr_status(tempwr);11471147+ *scsiwr = tempwr;11481148+11491149+ if (likely((*tempwr == FW_SCSI_READ_WR) ||11501150+ (*tempwr == FW_SCSI_WRITE_WR) ||11511151+ (*tempwr == FW_SCSI_CMD_WR))) {11521152+ ioreq = (struct csio_ioreq *)((uintptr_t)11531153+ (((struct fw_scsi_read_wr *)tempwr)->cookie));11541154+ CSIO_DB_ASSERT(virt_addr_valid(ioreq));11551155+11561156+ ioreq->wr_status = status;11571157+11581158+ return ioreq;11591159+ }11601160+11611161+ if (*tempwr == FW_SCSI_ABRT_CLS_WR) {11621162+ ioreq = (struct csio_ioreq *)((uintptr_t)11631163+ (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie));11641164+ CSIO_DB_ASSERT(virt_addr_valid(ioreq));11651165+11661166+ ioreq->wr_status = status;11671167+ return ioreq;11681168+ }11691169+11701170+ csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr);11711171+ CSIO_INC_STATS(scm, n_inval_scsiop);11721172+ return NULL;11731173+}11741174+11751175+/*11761176+ * csio_scsi_cleanup_io_q - Cleanup the given queue.11771177+ * @scm: SCSI module.11781178+ * @q: Queue to be cleaned up.11791179+ *11801180+ * Called with lock held. Has to exit with lock held.11811181+ */11821182+void11831183+csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)11841184+{11851185+ struct csio_hw *hw = scm->hw;11861186+ struct csio_ioreq *ioreq;11871187+ struct list_head *tmp, *next;11881188+ struct scsi_cmnd *scmnd;11891189+11901190+ /* Call back the completion routines of the active_q */11911191+ list_for_each_safe(tmp, next, q) {11921192+ ioreq = (struct csio_ioreq *)tmp;11931193+ csio_scsi_drvcleanup(ioreq);11941194+ list_del_init(&ioreq->sm.sm_list);11951195+ scmnd = csio_scsi_cmnd(ioreq);11961196+ spin_unlock_irq(&hw->lock);11971197+11981198+ /*11991199+ * Upper layers may have cleared this command, hence this12001200+ * check to avoid accessing stale references.12011201+ */12021202+ if (scmnd != NULL)12031203+ ioreq->io_cbfn(hw, ioreq);12041204+12051205+ spin_lock_irq(&scm->freelist_lock);12061206+ csio_put_scsi_ioreq(scm, ioreq);12071207+ spin_unlock_irq(&scm->freelist_lock);12081208+12091209+ spin_lock_irq(&hw->lock);12101210+ }12111211+}12121212+12131213+#define CSIO_SCSI_ABORT_Q_POLL_MS 200012141214+12151215+static void12161216+csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd)12171217+{12181218+ struct csio_lnode *ln = ioreq->lnode;12191219+ struct csio_hw *hw = ln->hwp;12201220+ int ready = 0;12211221+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);12221222+ int rv;12231223+12241224+ if (csio_scsi_cmnd(ioreq) != scmnd) {12251225+ CSIO_INC_STATS(scsim, n_abrt_race_comp);12261226+ return;12271227+ }12281228+12291229+ ready = csio_is_lnode_ready(ln);12301230+12311231+ rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));12321232+ if (rv != 0) {12331233+ if (ready)12341234+ CSIO_INC_STATS(scsim, n_abrt_busy_error);12351235+ else12361236+ CSIO_INC_STATS(scsim, n_cls_busy_error);12371237+ }12381238+}12391239+12401240+/*12411241+ * csio_scsi_abort_io_q - Abort all I/Os on given queue12421242+ * @scm: SCSI module.12431243+ * @q: Queue to abort.12441244+ * @tmo: Timeout in ms12451245+ *12461246+ * Attempt to abort all I/Os on given queue, and wait for a max12471247+ * of tmo milliseconds for them to complete. Returns success12481248+ * if all I/Os are aborted. Else returns -ETIMEDOUT.12491249+ * Should be entered with lock held. Exits with lock held.12501250+ * NOTE:12511251+ * Lock has to be held across the loop that aborts I/Os, since dropping the lock12521252+ * in between can cause the list to be corrupted. As a result, the caller12531253+ * of this function has to ensure that the number of I/os to be aborted12541254+ * is finite enough to not cause lock-held-for-too-long issues.12551255+ */12561256+static int12571257+csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)12581258+{12591259+ struct csio_hw *hw = scm->hw;12601260+ struct list_head *tmp, *next;12611261+ int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS);12621262+ struct scsi_cmnd *scmnd;12631263+12641264+ if (list_empty(q))12651265+ return 0;12661266+12671267+ csio_dbg(hw, "Aborting SCSI I/Os\n");12681268+12691269+ /* Now abort/close I/Os in the queue passed */12701270+ list_for_each_safe(tmp, next, q) {12711271+ scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp);12721272+ csio_abrt_cls((struct csio_ioreq *)tmp, scmnd);12731273+ }12741274+12751275+ /* Wait till all active I/Os are completed/aborted/closed */12761276+ while (!list_empty(q) && count--) {12771277+ spin_unlock_irq(&hw->lock);12781278+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);12791279+ spin_lock_irq(&hw->lock);12801280+ }12811281+12821282+ /* all aborts completed */12831283+ if (list_empty(q))12841284+ return 0;12851285+12861286+ return -ETIMEDOUT;12871287+}12881288+12891289+/*12901290+ * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module.12911291+ * @scm: SCSI module.12921292+ * @abort: abort required.12931293+ * Called with lock held, should exit with lock held.12941294+ * Can sleep when waiting for I/Os to complete.12951295+ */12961296+int12971297+csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort)12981298+{12991299+ struct csio_hw *hw = scm->hw;13001300+ int rv = 0;13011301+ int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);13021302+13031303+ /* No I/Os pending */13041304+ if (list_empty(&scm->active_q))13051305+ return 0;13061306+13071307+ /* Wait until all active I/Os are completed */13081308+ while (!list_empty(&scm->active_q) && count--) {13091309+ spin_unlock_irq(&hw->lock);13101310+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);13111311+ spin_lock_irq(&hw->lock);13121312+ }13131313+13141314+ /* all I/Os completed */13151315+ if (list_empty(&scm->active_q))13161316+ return 0;13171317+13181318+ /* Else abort */13191319+ if (abort) {13201320+ rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000);13211321+ if (rv == 0)13221322+ return rv;13231323+ csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");13241324+ }13251325+13261326+ csio_scsi_cleanup_io_q(scm, &scm->active_q);13271327+13281328+ CSIO_DB_ASSERT(list_empty(&scm->active_q));13291329+13301330+ return rv;13311331+}13321332+13331333+/*13341334+ * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode.13351335+ * @scm: SCSI module.13361336+ * @lnode: lnode13371337+ *13381338+ * Called with lock held, should exit with lock held.13391339+ * Can sleep (with dropped lock) when waiting for I/Os to complete.13401340+ */13411341+int13421342+csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln)13431343+{13441344+ struct csio_hw *hw = scm->hw;13451345+ struct csio_scsi_level_data sld;13461346+ int rv;13471347+ int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);13481348+13491349+ csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln);13501350+13511351+ sld.level = CSIO_LEV_LNODE;13521352+ sld.lnode = ln;13531353+ INIT_LIST_HEAD(&ln->cmpl_q);13541354+ csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q);13551355+13561356+ /* No I/Os pending on this lnode */13571357+ if (list_empty(&ln->cmpl_q))13581358+ return 0;13591359+13601360+ /* Wait until all active I/Os on this lnode are completed */13611361+ while (!list_empty(&ln->cmpl_q) && count--) {13621362+ spin_unlock_irq(&hw->lock);13631363+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);13641364+ spin_lock_irq(&hw->lock);13651365+ }13661366+13671367+ /* all I/Os completed */13681368+ if (list_empty(&ln->cmpl_q))13691369+ return 0;13701370+13711371+ csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln);13721372+13731373+ /* I/Os are pending, abort them */13741374+ rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000);13751375+ if (rv != 0) {13761376+ csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");13771377+ csio_scsi_cleanup_io_q(scm, &ln->cmpl_q);13781378+ }13791379+13801380+ CSIO_DB_ASSERT(list_empty(&ln->cmpl_q));13811381+13821382+ return rv;13831383+}13841384+13851385+static ssize_t13861386+csio_show_hw_state(struct device *dev,13871387+ struct device_attribute *attr, char *buf)13881388+{13891389+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));13901390+ struct csio_hw *hw = csio_lnode_to_hw(ln);13911391+13921392+ if (csio_is_hw_ready(hw))13931393+ return snprintf(buf, PAGE_SIZE, "ready\n");13941394+ else13951395+ return snprintf(buf, PAGE_SIZE, "not ready\n");13961396+}13971397+13981398+/* Device reset */13991399+static ssize_t14001400+csio_device_reset(struct device *dev,14011401+ struct device_attribute *attr, const char *buf, size_t count)14021402+{14031403+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));14041404+ struct csio_hw *hw = csio_lnode_to_hw(ln);14051405+14061406+ if (*buf != '1')14071407+ return -EINVAL;14081408+14091409+ /* Delete NPIV lnodes */14101410+ csio_lnodes_exit(hw, 1);14111411+14121412+ /* Block upper IOs */14131413+ csio_lnodes_block_request(hw);14141414+14151415+ spin_lock_irq(&hw->lock);14161416+ csio_hw_reset(hw);14171417+ spin_unlock_irq(&hw->lock);14181418+14191419+ /* Unblock upper IOs */14201420+ csio_lnodes_unblock_request(hw);14211421+ return count;14221422+}14231423+14241424+/* disable port */14251425+static ssize_t14261426+csio_disable_port(struct device *dev,14271427+ struct device_attribute *attr, const char *buf, size_t count)14281428+{14291429+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));14301430+ struct csio_hw *hw = csio_lnode_to_hw(ln);14311431+ bool disable;14321432+14331433+ if (*buf == '1' || *buf == '0')14341434+ disable = (*buf == '1') ? true : false;14351435+ else14361436+ return -EINVAL;14371437+14381438+ /* Block upper IOs */14391439+ csio_lnodes_block_by_port(hw, ln->portid);14401440+14411441+ spin_lock_irq(&hw->lock);14421442+ csio_disable_lnodes(hw, ln->portid, disable);14431443+ spin_unlock_irq(&hw->lock);14441444+14451445+ /* Unblock upper IOs */14461446+ csio_lnodes_unblock_by_port(hw, ln->portid);14471447+ return count;14481448+}14491449+14501450+/* Show debug level */14511451+static ssize_t14521452+csio_show_dbg_level(struct device *dev,14531453+ struct device_attribute *attr, char *buf)14541454+{14551455+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));14561456+14571457+ return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);14581458+}14591459+14601460+/* Store debug level */14611461+static ssize_t14621462+csio_store_dbg_level(struct device *dev,14631463+ struct device_attribute *attr, const char *buf, size_t count)14641464+{14651465+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));14661466+ struct csio_hw *hw = csio_lnode_to_hw(ln);14671467+ uint32_t dbg_level = 0;14681468+14691469+ if (!isdigit(buf[0]))14701470+ return -EINVAL;14711471+14721472+ if (sscanf(buf, "%i", &dbg_level))14731473+ return -EINVAL;14741474+14751475+ ln->params.log_level = dbg_level;14761476+ hw->params.log_level = dbg_level;14771477+14781478+ return 0;14791479+}14801480+14811481+static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL);14821482+static DEVICE_ATTR(device_reset, S_IRUGO | S_IWUSR, NULL, csio_device_reset);14831483+static DEVICE_ATTR(disable_port, S_IRUGO | S_IWUSR, NULL, csio_disable_port);14841484+static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,14851485+ csio_store_dbg_level);14861486+14871487+static struct device_attribute *csio_fcoe_lport_attrs[] = {14881488+ &dev_attr_hw_state,14891489+ &dev_attr_device_reset,14901490+ &dev_attr_disable_port,14911491+ &dev_attr_dbg_level,14921492+ NULL,14931493+};14941494+14951495+static ssize_t14961496+csio_show_num_reg_rnodes(struct device *dev,14971497+ struct device_attribute *attr, char *buf)14981498+{14991499+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));15001500+15011501+ return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);15021502+}15031503+15041504+static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);15051505+15061506+static struct device_attribute *csio_fcoe_vport_attrs[] = {15071507+ &dev_attr_num_reg_rnodes,15081508+ &dev_attr_dbg_level,15091509+ NULL,15101510+};15111511+15121512+static inline uint32_t15131513+csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)15141514+{15151515+ struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);15161516+ struct scatterlist *sg;15171517+ uint32_t bytes_left;15181518+ uint32_t bytes_copy;15191519+ uint32_t buf_off = 0;15201520+ uint32_t start_off = 0;15211521+ uint32_t sg_off = 0;15221522+ void *sg_addr;15231523+ void *buf_addr;15241524+ struct csio_dma_buf *dma_buf;15251525+15261526+ bytes_left = scsi_bufflen(scmnd);15271527+ sg = scsi_sglist(scmnd);15281528+ dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list);15291529+15301530+ /* Copy data from driver buffer to SGs of SCSI CMD */15311531+ while (bytes_left > 0 && sg && dma_buf) {15321532+ if (buf_off >= dma_buf->len) {15331533+ buf_off = 0;15341534+ dma_buf = (struct csio_dma_buf *)15351535+ csio_list_next(dma_buf);15361536+ continue;15371537+ }15381538+15391539+ if (start_off >= sg->length) {15401540+ start_off -= sg->length;15411541+ sg = sg_next(sg);15421542+ continue;15431543+ }15441544+15451545+ buf_addr = dma_buf->vaddr + buf_off;15461546+ sg_off = sg->offset + start_off;15471547+ bytes_copy = min((dma_buf->len - buf_off),15481548+ sg->length - start_off);15491549+ bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)),15501550+ bytes_copy);15511551+15521552+ sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT));15531553+ if (!sg_addr) {15541554+ csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n",15551555+ sg, req);15561556+ break;15571557+ }15581558+15591559+ csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n",15601560+ sg_addr, sg_off, buf_addr, bytes_copy);15611561+ memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy);15621562+ kunmap_atomic(sg_addr);15631563+15641564+ start_off += bytes_copy;15651565+ buf_off += bytes_copy;15661566+ bytes_left -= bytes_copy;15671567+ }15681568+15691569+ if (bytes_left > 0)15701570+ return DID_ERROR;15711571+ else15721572+ return DID_OK;15731573+}15741574+15751575+/*15761576+ * csio_scsi_err_handler - SCSI error handler.15771577+ * @hw: HW module.15781578+ * @req: IO request.15791579+ *15801580+ */15811581+static inline void15821582+csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)15831583+{15841584+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);15851585+ struct csio_scsim *scm = csio_hw_to_scsim(hw);15861586+ struct fcp_resp_with_ext *fcp_resp;15871587+ struct fcp_resp_rsp_info *rsp_info;15881588+ struct csio_dma_buf *dma_buf;15891589+ uint8_t flags, scsi_status = 0;15901590+ uint32_t host_status = DID_OK;15911591+ uint32_t rsp_len = 0, sns_len = 0;15921592+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);15931593+15941594+15951595+ switch (req->wr_status) {15961596+ case FW_HOSTERROR:15971597+ if (unlikely(!csio_is_hw_ready(hw)))15981598+ return;15991599+16001600+ host_status = DID_ERROR;16011601+ CSIO_INC_STATS(scm, n_hosterror);16021602+16031603+ break;16041604+ case FW_SCSI_RSP_ERR:16051605+ dma_buf = &req->dma_buf;16061606+ fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;16071607+ rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);16081608+ flags = fcp_resp->resp.fr_flags;16091609+ scsi_status = fcp_resp->resp.fr_status;16101610+16111611+ if (flags & FCP_RSP_LEN_VAL) {16121612+ rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len);16131613+ if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) ||16141614+ (rsp_info->rsp_code != FCP_TMF_CMPL)) {16151615+ host_status = DID_ERROR;16161616+ goto out;16171617+ }16181618+ }16191619+16201620+ if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) {16211621+ sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len);16221622+ if (sns_len > SCSI_SENSE_BUFFERSIZE)16231623+ sns_len = SCSI_SENSE_BUFFERSIZE;16241624+16251625+ memcpy(cmnd->sense_buffer,16261626+ &rsp_info->_fr_resvd[0] + rsp_len, sns_len);16271627+ CSIO_INC_STATS(scm, n_autosense);16281628+ }16291629+16301630+ scsi_set_resid(cmnd, 0);16311631+16321632+ /* Under run */16331633+ if (flags & FCP_RESID_UNDER) {16341634+ scsi_set_resid(cmnd,16351635+ be32_to_cpu(fcp_resp->ext.fr_resid));16361636+16371637+ if (!(flags & FCP_SNS_LEN_VAL) &&16381638+ (scsi_status == SAM_STAT_GOOD) &&16391639+ ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd))16401640+ < cmnd->underflow))16411641+ host_status = DID_ERROR;16421642+ } else if (flags & FCP_RESID_OVER)16431643+ host_status = DID_ERROR;16441644+16451645+ CSIO_INC_STATS(scm, n_rsperror);16461646+ break;16471647+16481648+ case FW_SCSI_OVER_FLOW_ERR:16491649+ csio_warn(hw,16501650+ "Over-flow error,cmnd:0x%x expected len:0x%x"16511651+ " resid:0x%x\n", cmnd->cmnd[0],16521652+ scsi_bufflen(cmnd), scsi_get_resid(cmnd));16531653+ host_status = DID_ERROR;16541654+ CSIO_INC_STATS(scm, n_ovflerror);16551655+ break;16561656+16571657+ case FW_SCSI_UNDER_FLOW_ERR:16581658+ csio_warn(hw,16591659+ "Under-flow error,cmnd:0x%x expected"16601660+ " len:0x%x resid:0x%x lun:0x%x ssn:0x%x\n",16611661+ cmnd->cmnd[0], scsi_bufflen(cmnd),16621662+ scsi_get_resid(cmnd), cmnd->device->lun,16631663+ rn->flowid);16641664+ host_status = DID_ERROR;16651665+ CSIO_INC_STATS(scm, n_unflerror);16661666+ break;16671667+16681668+ case FW_SCSI_ABORT_REQUESTED:16691669+ case FW_SCSI_ABORTED:16701670+ case FW_SCSI_CLOSE_REQUESTED:16711671+ csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd,16721672+ cmnd->cmnd[0],16731673+ (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ?16741674+ "closed" : "aborted");16751675+ /*16761676+ * csio_eh_abort_handler checks this value to16771677+ * succeed or fail the abort request.16781678+ */16791679+ host_status = DID_REQUEUE;16801680+ if (req->wr_status == FW_SCSI_CLOSE_REQUESTED)16811681+ CSIO_INC_STATS(scm, n_closed);16821682+ else16831683+ CSIO_INC_STATS(scm, n_aborted);16841684+ break;16851685+16861686+ case FW_SCSI_ABORT_TIMEDOUT:16871687+ /* FW timed out the abort itself */16881688+ csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n",16891689+ req, cmnd, req->wr_status);16901690+ host_status = DID_ERROR;16911691+ CSIO_INC_STATS(scm, n_abrt_timedout);16921692+ break;16931693+16941694+ case FW_RDEV_NOT_READY:16951695+ /*16961696+ * In firmware, a RDEV can get into this state16971697+ * temporarily, before moving into dissapeared/lost16981698+ * state. So, the driver should complete the request equivalent16991699+ * to device-disappeared!17001700+ */17011701+ CSIO_INC_STATS(scm, n_rdev_nr_error);17021702+ host_status = DID_ERROR;17031703+ break;17041704+17051705+ case FW_ERR_RDEV_LOST:17061706+ CSIO_INC_STATS(scm, n_rdev_lost_error);17071707+ host_status = DID_ERROR;17081708+ break;17091709+17101710+ case FW_ERR_RDEV_LOGO:17111711+ CSIO_INC_STATS(scm, n_rdev_logo_error);17121712+ host_status = DID_ERROR;17131713+ break;17141714+17151715+ case FW_ERR_RDEV_IMPL_LOGO:17161716+ host_status = DID_ERROR;17171717+ break;17181718+17191719+ case FW_ERR_LINK_DOWN:17201720+ CSIO_INC_STATS(scm, n_link_down_error);17211721+ host_status = DID_ERROR;17221722+ break;17231723+17241724+ case FW_FCOE_NO_XCHG:17251725+ CSIO_INC_STATS(scm, n_no_xchg_error);17261726+ host_status = DID_ERROR;17271727+ break;17281728+17291729+ default:17301730+ csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n",17311731+ req->wr_status, req, cmnd);17321732+ CSIO_DB_ASSERT(0);17331733+17341734+ CSIO_INC_STATS(scm, n_unknown_error);17351735+ host_status = DID_ERROR;17361736+ break;17371737+ }17381738+17391739+out:17401740+ if (req->nsge > 0)17411741+ scsi_dma_unmap(cmnd);17421742+17431743+ cmnd->result = (((host_status) << 16) | scsi_status);17441744+ cmnd->scsi_done(cmnd);17451745+17461746+ /* Wake up waiting threads */17471747+ csio_scsi_cmnd(req) = NULL;17481748+ complete_all(&req->cmplobj);17491749+}17501750+17511751+/*17521752+ * csio_scsi_cbfn - SCSI callback function.17531753+ * @hw: HW module.17541754+ * @req: IO request.17551755+ *17561756+ */17571757+static void17581758+csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)17591759+{17601760+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);17611761+ uint8_t scsi_status = SAM_STAT_GOOD;17621762+ uint32_t host_status = DID_OK;17631763+17641764+ if (likely(req->wr_status == FW_SUCCESS)) {17651765+ if (req->nsge > 0) {17661766+ scsi_dma_unmap(cmnd);17671767+ if (req->dcopy)17681768+ host_status = csio_scsi_copy_to_sgl(hw, req);17691769+ }17701770+17711771+ cmnd->result = (((host_status) << 16) | scsi_status);17721772+ cmnd->scsi_done(cmnd);17731773+ csio_scsi_cmnd(req) = NULL;17741774+ CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success);17751775+ } else {17761776+ /* Error handling */17771777+ csio_scsi_err_handler(hw, req);17781778+ }17791779+}17801780+17811781+/**17821782+ * csio_queuecommand - Entry point to kickstart an I/O request.17831783+ * @host: The scsi_host pointer.17841784+ * @cmnd: The I/O request from ML.17851785+ *17861786+ * This routine does the following:17871787+ * - Checks for HW and Rnode module readiness.17881788+ * - Gets a free ioreq structure (which is already initialized17891789+ * to uninit during its allocation).17901790+ * - Maps SG elements.17911791+ * - Initializes ioreq members.17921792+ * - Kicks off the SCSI state machine for this IO.17931793+ * - Returns busy status on error.17941794+ */17951795+static int17961796+csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)17971797+{17981798+ struct csio_lnode *ln = shost_priv(host);17991799+ struct csio_hw *hw = csio_lnode_to_hw(ln);18001800+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);18011801+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);18021802+ struct csio_ioreq *ioreq = NULL;18031803+ unsigned long flags;18041804+ int nsge = 0;18051805+ int rv = SCSI_MLQUEUE_HOST_BUSY, nr;18061806+ int retval;18071807+ int cpu;18081808+ struct csio_scsi_qset *sqset;18091809+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));18101810+18111811+ if (!blk_rq_cpu_valid(cmnd->request))18121812+ cpu = smp_processor_id();18131813+ else18141814+ cpu = cmnd->request->cpu;18151815+18161816+ sqset = &hw->sqset[ln->portid][cpu];18171817+18181818+ nr = fc_remote_port_chkready(rport);18191819+ if (nr) {18201820+ cmnd->result = nr;18211821+ CSIO_INC_STATS(scsim, n_rn_nr_error);18221822+ goto err_done;18231823+ }18241824+18251825+ if (unlikely(!csio_is_hw_ready(hw))) {18261826+ cmnd->result = (DID_REQUEUE << 16);18271827+ CSIO_INC_STATS(scsim, n_hw_nr_error);18281828+ goto err_done;18291829+ }18301830+18311831+ /* Get req->nsge, if there are SG elements to be mapped */18321832+ nsge = scsi_dma_map(cmnd);18331833+ if (unlikely(nsge < 0)) {18341834+ CSIO_INC_STATS(scsim, n_dmamap_error);18351835+ goto err;18361836+ }18371837+18381838+ /* Do we support so many mappings? */18391839+ if (unlikely(nsge > scsim->max_sge)) {18401840+ csio_warn(hw,18411841+ "More SGEs than can be supported."18421842+ " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge);18431843+ CSIO_INC_STATS(scsim, n_unsupp_sge_error);18441844+ goto err_dma_unmap;18451845+ }18461846+18471847+ /* Get a free ioreq structure - SM is already set to uninit */18481848+ ioreq = csio_get_scsi_ioreq_lock(hw, scsim);18491849+ if (!ioreq) {18501850+ csio_err(hw, "Out of I/O request elements. Active #:%d\n",18511851+ scsim->stats.n_active);18521852+ CSIO_INC_STATS(scsim, n_no_req_error);18531853+ goto err_dma_unmap;18541854+ }18551855+18561856+ ioreq->nsge = nsge;18571857+ ioreq->lnode = ln;18581858+ ioreq->rnode = rn;18591859+ ioreq->iq_idx = sqset->iq_idx;18601860+ ioreq->eq_idx = sqset->eq_idx;18611861+ ioreq->wr_status = 0;18621862+ ioreq->drv_status = 0;18631863+ csio_scsi_cmnd(ioreq) = (void *)cmnd;18641864+ ioreq->tmo = 0;18651865+ ioreq->datadir = cmnd->sc_data_direction;18661866+18671867+ if (cmnd->sc_data_direction == DMA_TO_DEVICE) {18681868+ CSIO_INC_STATS(ln, n_output_requests);18691869+ ln->stats.n_output_bytes += scsi_bufflen(cmnd);18701870+ } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) {18711871+ CSIO_INC_STATS(ln, n_input_requests);18721872+ ln->stats.n_input_bytes += scsi_bufflen(cmnd);18731873+ } else18741874+ CSIO_INC_STATS(ln, n_control_requests);18751875+18761876+ /* Set cbfn */18771877+ ioreq->io_cbfn = csio_scsi_cbfn;18781878+18791879+ /* Needed during abort */18801880+ cmnd->host_scribble = (unsigned char *)ioreq;18811881+ cmnd->SCp.Message = 0;18821882+18831883+ /* Kick off SCSI IO SM on the ioreq */18841884+ spin_lock_irqsave(&hw->lock, flags);18851885+ retval = csio_scsi_start_io(ioreq);18861886+ spin_unlock_irqrestore(&hw->lock, flags);18871887+18881888+ if (retval != 0) {18891889+ csio_err(hw, "ioreq: %p couldnt be started, status:%d\n",18901890+ ioreq, retval);18911891+ CSIO_INC_STATS(scsim, n_busy_error);18921892+ goto err_put_req;18931893+ }18941894+18951895+ return 0;18961896+18971897+err_put_req:18981898+ csio_put_scsi_ioreq_lock(hw, scsim, ioreq);18991899+err_dma_unmap:19001900+ if (nsge > 0)19011901+ scsi_dma_unmap(cmnd);19021902+err:19031903+ return rv;19041904+19051905+err_done:19061906+ cmnd->scsi_done(cmnd);19071907+ return 0;19081908+}19091909+19101910+static int19111911+csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort)19121912+{19131913+ int rv;19141914+ int cpu = smp_processor_id();19151915+ struct csio_lnode *ln = ioreq->lnode;19161916+ struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu];19171917+19181918+ ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS;19191919+ /*19201920+ * Use current processor queue for posting the abort/close, but retain19211921+ * the ingress queue ID of the original I/O being aborted/closed - we19221922+ * need the abort/close completion to be received on the same queue19231923+ * as the original I/O.19241924+ */19251925+ ioreq->eq_idx = sqset->eq_idx;19261926+19271927+ if (abort == SCSI_ABORT)19281928+ rv = csio_scsi_abort(ioreq);19291929+ else19301930+ rv = csio_scsi_close(ioreq);19311931+19321932+ return rv;19331933+}19341934+19351935+static int19361936+csio_eh_abort_handler(struct scsi_cmnd *cmnd)19371937+{19381938+ struct csio_ioreq *ioreq;19391939+ struct csio_lnode *ln = shost_priv(cmnd->device->host);19401940+ struct csio_hw *hw = csio_lnode_to_hw(ln);19411941+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);19421942+ int ready = 0, ret;19431943+ unsigned long tmo = 0;19441944+ int rv;19451945+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);19461946+19471947+ ret = fc_block_scsi_eh(cmnd);19481948+ if (ret)19491949+ return ret;19501950+19511951+ ioreq = (struct csio_ioreq *)cmnd->host_scribble;19521952+ if (!ioreq)19531953+ return SUCCESS;19541954+19551955+ if (!rn)19561956+ return FAILED;19571957+19581958+ csio_dbg(hw,19591959+ "Request to abort ioreq:%p cmd:%p cdb:%08llx"19601960+ " ssni:0x%x lun:%d iq:0x%x\n",19611961+ ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid,19621962+ cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx));19631963+19641964+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) {19651965+ CSIO_INC_STATS(scsim, n_abrt_race_comp);19661966+ return SUCCESS;19671967+ }19681968+19691969+ ready = csio_is_lnode_ready(ln);19701970+ tmo = CSIO_SCSI_ABRT_TMO_MS;19711971+19721972+ spin_lock_irq(&hw->lock);19731973+ rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));19741974+ spin_unlock_irq(&hw->lock);19751975+19761976+ if (rv != 0) {19771977+ if (rv == -EINVAL) {19781978+ /* Return success, if abort/close request issued on19791979+ * already completed IO19801980+ */19811981+ return SUCCESS;19821982+ }19831983+ if (ready)19841984+ CSIO_INC_STATS(scsim, n_abrt_busy_error);19851985+ else19861986+ CSIO_INC_STATS(scsim, n_cls_busy_error);19871987+19881988+ goto inval_scmnd;19891989+ }19901990+19911991+ /* Wait for completion */19921992+ init_completion(&ioreq->cmplobj);19931993+ wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));19941994+19951995+ /* FW didnt respond to abort within our timeout */19961996+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {19971997+19981998+ csio_err(hw, "Abort timed out -- req: %p\n", ioreq);19991999+ CSIO_INC_STATS(scsim, n_abrt_timedout);20002000+20012001+inval_scmnd:20022002+ if (ioreq->nsge > 0)20032003+ scsi_dma_unmap(cmnd);20042004+20052005+ spin_lock_irq(&hw->lock);20062006+ csio_scsi_cmnd(ioreq) = NULL;20072007+ spin_unlock_irq(&hw->lock);20082008+20092009+ cmnd->result = (DID_ERROR << 16);20102010+ cmnd->scsi_done(cmnd);20112011+20122012+ return FAILED;20132013+ }20142014+20152015+ /* FW successfully aborted the request */20162016+ if (host_byte(cmnd->result) == DID_REQUEUE) {20172017+ csio_info(hw,20182018+ "Aborted SCSI command to (%d:%d) serial#:0x%lx\n",20192019+ cmnd->device->id, cmnd->device->lun,20202020+ cmnd->serial_number);20212021+ return SUCCESS;20222022+ } else {20232023+ csio_info(hw,20242024+ "Failed to abort SCSI command, (%d:%d) serial#:0x%lx\n",20252025+ cmnd->device->id, cmnd->device->lun,20262026+ cmnd->serial_number);20272027+ return FAILED;20282028+ }20292029+}20302030+20312031+/*20322032+ * csio_tm_cbfn - TM callback function.20332033+ * @hw: HW module.20342034+ * @req: IO request.20352035+ *20362036+ * Cache the result in 'cmnd', since ioreq will be freed soon20372037+ * after we return from here, and the waiting thread shouldnt trust20382038+ * the ioreq contents.20392039+ */20402040+static void20412041+csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)20422042+{20432043+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);20442044+ struct csio_dma_buf *dma_buf;20452045+ uint8_t flags = 0;20462046+ struct fcp_resp_with_ext *fcp_resp;20472047+ struct fcp_resp_rsp_info *rsp_info;20482048+20492049+ csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n",20502050+ req, req->wr_status);20512051+20522052+ /* Cache FW return status */20532053+ cmnd->SCp.Status = req->wr_status;20542054+20552055+ /* Special handling based on FCP response */20562056+20572057+ /*20582058+ * FW returns us this error, if flags were set. FCP4 says20592059+ * FCP_RSP_LEN_VAL in flags shall be set for TM completions.20602060+ * So if a target were to set this bit, we expect that the20612061+ * rsp_code is set to FCP_TMF_CMPL for a successful TM20622062+ * completion. Any other rsp_code means TM operation failed.20632063+ * If a target were to just ignore setting flags, we treat20642064+ * the TM operation as success, and FW returns FW_SUCCESS.20652065+ */20662066+ if (req->wr_status == FW_SCSI_RSP_ERR) {20672067+ dma_buf = &req->dma_buf;20682068+ fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;20692069+ rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);20702070+20712071+ flags = fcp_resp->resp.fr_flags;20722072+20732073+ /* Modify return status if flags indicate success */20742074+ if (flags & FCP_RSP_LEN_VAL)20752075+ if (rsp_info->rsp_code == FCP_TMF_CMPL)20762076+ cmnd->SCp.Status = FW_SUCCESS;20772077+20782078+ csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);20792079+ }20802080+20812081+ /* Wake up the TM handler thread */20822082+ csio_scsi_cmnd(req) = NULL;20832083+}20842084+20852085+static int20862086+csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)20872087+{20882088+ struct csio_lnode *ln = shost_priv(cmnd->device->host);20892089+ struct csio_hw *hw = csio_lnode_to_hw(ln);20902090+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);20912091+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);20922092+ struct csio_ioreq *ioreq = NULL;20932093+ struct csio_scsi_qset *sqset;20942094+ unsigned long flags;20952095+ int retval;20962096+ int count, ret;20972097+ LIST_HEAD(local_q);20982098+ struct csio_scsi_level_data sld;20992099+21002100+ if (!rn)21012101+ goto fail;21022102+21032103+ csio_dbg(hw, "Request to reset LUN:%d (ssni:0x%x tgtid:%d)\n",21042104+ cmnd->device->lun, rn->flowid, rn->scsi_id);21052105+21062106+ if (!csio_is_lnode_ready(ln)) {21072107+ csio_err(hw,21082108+ "LUN reset cannot be issued on non-ready"21092109+ " local node vnpi:0x%x (LUN:%d)\n",21102110+ ln->vnp_flowid, cmnd->device->lun);21112111+ goto fail;21122112+ }21132113+21142114+ /* Lnode is ready, now wait on rport node readiness */21152115+ ret = fc_block_scsi_eh(cmnd);21162116+ if (ret)21172117+ return ret;21182118+21192119+ /*21202120+ * If we have blocked in the previous call, at this point, either the21212121+ * remote node has come back online, or device loss timer has fired21222122+ * and the remote node is destroyed. Allow the LUN reset only for21232123+ * the former case, since LUN reset is a TMF I/O on the wire, and we21242124+ * need a valid session to issue it.21252125+ */21262126+ if (fc_remote_port_chkready(rn->rport)) {21272127+ csio_err(hw,21282128+ "LUN reset cannot be issued on non-ready"21292129+ " remote node ssni:0x%x (LUN:%d)\n",21302130+ rn->flowid, cmnd->device->lun);21312131+ goto fail;21322132+ }21332133+21342134+ /* Get a free ioreq structure - SM is already set to uninit */21352135+ ioreq = csio_get_scsi_ioreq_lock(hw, scsim);21362136+21372137+ if (!ioreq) {21382138+ csio_err(hw, "Out of IO request elements. Active # :%d\n",21392139+ scsim->stats.n_active);21402140+ goto fail;21412141+ }21422142+21432143+ sqset = &hw->sqset[ln->portid][smp_processor_id()];21442144+ ioreq->nsge = 0;21452145+ ioreq->lnode = ln;21462146+ ioreq->rnode = rn;21472147+ ioreq->iq_idx = sqset->iq_idx;21482148+ ioreq->eq_idx = sqset->eq_idx;21492149+21502150+ csio_scsi_cmnd(ioreq) = cmnd;21512151+ cmnd->host_scribble = (unsigned char *)ioreq;21522152+ cmnd->SCp.Status = 0;21532153+21542154+ cmnd->SCp.Message = FCP_TMF_LUN_RESET;21552155+ ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000;21562156+21572157+ /*21582158+ * FW times the LUN reset for ioreq->tmo, so we got to wait a little21592159+ * longer (10s for now) than that to allow FW to return the timed21602160+ * out command.21612161+ */21622162+ count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS);21632163+21642164+ /* Set cbfn */21652165+ ioreq->io_cbfn = csio_tm_cbfn;21662166+21672167+ /* Save of the ioreq info for later use */21682168+ sld.level = CSIO_LEV_LUN;21692169+ sld.lnode = ioreq->lnode;21702170+ sld.rnode = ioreq->rnode;21712171+ sld.oslun = (uint64_t)cmnd->device->lun;21722172+21732173+ spin_lock_irqsave(&hw->lock, flags);21742174+ /* Kick off TM SM on the ioreq */21752175+ retval = csio_scsi_start_tm(ioreq);21762176+ spin_unlock_irqrestore(&hw->lock, flags);21772177+21782178+ if (retval != 0) {21792179+ csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n",21802180+ ioreq, retval);21812181+ goto fail_ret_ioreq;21822182+ }21832183+21842184+ csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n",21852185+ count * (CSIO_SCSI_TM_POLL_MS / 1000));21862186+ /* Wait for completion */21872187+ while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd)21882188+ && count--)21892189+ msleep(CSIO_SCSI_TM_POLL_MS);21902190+21912191+ /* LUN reset timed-out */21922192+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {21932193+ csio_err(hw, "LUN reset (%d:%d) timed out\n",21942194+ cmnd->device->id, cmnd->device->lun);21952195+21962196+ spin_lock_irq(&hw->lock);21972197+ csio_scsi_drvcleanup(ioreq);21982198+ list_del_init(&ioreq->sm.sm_list);21992199+ spin_unlock_irq(&hw->lock);22002200+22012201+ goto fail_ret_ioreq;22022202+ }22032203+22042204+ /* LUN reset returned, check cached status */22052205+ if (cmnd->SCp.Status != FW_SUCCESS) {22062206+ csio_err(hw, "LUN reset failed (%d:%d), status: %d\n",22072207+ cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status);22082208+ goto fail;22092209+ }22102210+22112211+ /* LUN reset succeeded, Start aborting affected I/Os */22122212+ /*22132213+ * Since the host guarantees during LUN reset that there22142214+ * will not be any more I/Os to that LUN, until the LUN reset22152215+ * completes, we gather pending I/Os after the LUN reset.22162216+ */22172217+ spin_lock_irq(&hw->lock);22182218+ csio_scsi_gather_active_ios(scsim, &sld, &local_q);22192219+22202220+ retval = csio_scsi_abort_io_q(scsim, &local_q, 30000);22212221+ spin_unlock_irq(&hw->lock);22222222+22232223+ /* Aborts may have timed out */22242224+ if (retval != 0) {22252225+ csio_err(hw,22262226+ "Attempt to abort I/Os during LUN reset of %d"22272227+ " returned %d\n", cmnd->device->lun, retval);22282228+ /* Return I/Os back to active_q */22292229+ spin_lock_irq(&hw->lock);22302230+ list_splice_tail_init(&local_q, &scsim->active_q);22312231+ spin_unlock_irq(&hw->lock);22322232+ goto fail;22332233+ }22342234+22352235+ CSIO_INC_STATS(rn, n_lun_rst);22362236+22372237+ csio_info(hw, "LUN reset occurred (%d:%d)\n",22382238+ cmnd->device->id, cmnd->device->lun);22392239+22402240+ return SUCCESS;22412241+22422242+fail_ret_ioreq:22432243+ csio_put_scsi_ioreq_lock(hw, scsim, ioreq);22442244+fail:22452245+ CSIO_INC_STATS(rn, n_lun_rst_fail);22462246+ return FAILED;22472247+}22482248+22492249+static int22502250+csio_slave_alloc(struct scsi_device *sdev)22512251+{22522252+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));22532253+22542254+ if (!rport || fc_remote_port_chkready(rport))22552255+ return -ENXIO;22562256+22572257+ sdev->hostdata = *((struct csio_lnode **)(rport->dd_data));22582258+22592259+ return 0;22602260+}22612261+22622262+static int22632263+csio_slave_configure(struct scsi_device *sdev)22642264+{22652265+ if (sdev->tagged_supported)22662266+ scsi_activate_tcq(sdev, csio_lun_qdepth);22672267+ else22682268+ scsi_deactivate_tcq(sdev, csio_lun_qdepth);22692269+22702270+ return 0;22712271+}22722272+22732273+static void22742274+csio_slave_destroy(struct scsi_device *sdev)22752275+{22762276+ sdev->hostdata = NULL;22772277+}22782278+22792279+static int22802280+csio_scan_finished(struct Scsi_Host *shost, unsigned long time)22812281+{22822282+ struct csio_lnode *ln = shost_priv(shost);22832283+ int rv = 1;22842284+22852285+ spin_lock_irq(shost->host_lock);22862286+ if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list))22872287+ goto out;22882288+22892289+ rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ,22902290+ csio_delta_scan_tmo * HZ);22912291+out:22922292+ spin_unlock_irq(shost->host_lock);22932293+22942294+ return rv;22952295+}22962296+22972297+struct scsi_host_template csio_fcoe_shost_template = {22982298+ .module = THIS_MODULE,22992299+ .name = CSIO_DRV_DESC,23002300+ .proc_name = KBUILD_MODNAME,23012301+ .queuecommand = csio_queuecommand,23022302+ .eh_abort_handler = csio_eh_abort_handler,23032303+ .eh_device_reset_handler = csio_eh_lun_reset_handler,23042304+ .slave_alloc = csio_slave_alloc,23052305+ .slave_configure = csio_slave_configure,23062306+ .slave_destroy = csio_slave_destroy,23072307+ .scan_finished = csio_scan_finished,23082308+ .this_id = -1,23092309+ .sg_tablesize = CSIO_SCSI_MAX_SGE,23102310+ .cmd_per_lun = CSIO_MAX_CMD_PER_LUN,23112311+ .use_clustering = ENABLE_CLUSTERING,23122312+ .shost_attrs = csio_fcoe_lport_attrs,23132313+ .max_sectors = CSIO_MAX_SECTOR_SIZE,23142314+};23152315+23162316+struct scsi_host_template csio_fcoe_shost_vport_template = {23172317+ .module = THIS_MODULE,23182318+ .name = CSIO_DRV_DESC,23192319+ .proc_name = KBUILD_MODNAME,23202320+ .queuecommand = csio_queuecommand,23212321+ .eh_abort_handler = csio_eh_abort_handler,23222322+ .eh_device_reset_handler = csio_eh_lun_reset_handler,23232323+ .slave_alloc = csio_slave_alloc,23242324+ .slave_configure = csio_slave_configure,23252325+ .slave_destroy = csio_slave_destroy,23262326+ .scan_finished = csio_scan_finished,23272327+ .this_id = -1,23282328+ .sg_tablesize = CSIO_SCSI_MAX_SGE,23292329+ .cmd_per_lun = CSIO_MAX_CMD_PER_LUN,23302330+ .use_clustering = ENABLE_CLUSTERING,23312331+ .shost_attrs = csio_fcoe_vport_attrs,23322332+ .max_sectors = CSIO_MAX_SECTOR_SIZE,23332333+};23342334+23352335+/*23362336+ * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs.23372337+ * @scm: SCSI Module23382338+ * @hw: HW device.23392339+ * @buf_size: buffer size23402340+ * @num_buf : Number of buffers.23412341+ *23422342+ * This routine allocates DMA buffers required for SCSI Data xfer, if23432343+ * each SGL buffer for a SCSI Read request posted by SCSI midlayer are23442344+ * not virtually contiguous.23452345+ */23462346+static int23472347+csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,23482348+ int buf_size, int num_buf)23492349+{23502350+ int n = 0;23512351+ struct list_head *tmp;23522352+ struct csio_dma_buf *ddp_desc = NULL;23532353+ uint32_t unit_size = 0;23542354+23552355+ if (!num_buf)23562356+ return 0;23572357+23582358+ if (!buf_size)23592359+ return -EINVAL;23602360+23612361+ INIT_LIST_HEAD(&scm->ddp_freelist);23622362+23632363+ /* Align buf size to page size */23642364+ buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK;23652365+ /* Initialize dma descriptors */23662366+ for (n = 0; n < num_buf; n++) {23672367+ /* Set unit size to request size */23682368+ unit_size = buf_size;23692369+ ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL);23702370+ if (!ddp_desc) {23712371+ csio_err(hw,23722372+ "Failed to allocate ddp descriptors,"23732373+ " Num allocated = %d.\n",23742374+ scm->stats.n_free_ddp);23752375+ goto no_mem;23762376+ }23772377+23782378+ /* Allocate Dma buffers for DDP */23792379+ ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size,23802380+ &ddp_desc->paddr);23812381+ if (!ddp_desc->vaddr) {23822382+ csio_err(hw,23832383+ "SCSI response DMA buffer (ddp) allocation"23842384+ " failed!\n");23852385+ kfree(ddp_desc);23862386+ goto no_mem;23872387+ }23882388+23892389+ ddp_desc->len = unit_size;23902390+23912391+ /* Added it to scsi ddp freelist */23922392+ list_add_tail(&ddp_desc->list, &scm->ddp_freelist);23932393+ CSIO_INC_STATS(scm, n_free_ddp);23942394+ }23952395+23962396+ return 0;23972397+no_mem:23982398+ /* release dma descs back to freelist and free dma memory */23992399+ list_for_each(tmp, &scm->ddp_freelist) {24002400+ ddp_desc = (struct csio_dma_buf *) tmp;24012401+ tmp = csio_list_prev(tmp);24022402+ pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,24032403+ ddp_desc->paddr);24042404+ list_del_init(&ddp_desc->list);24052405+ kfree(ddp_desc);24062406+ }24072407+ scm->stats.n_free_ddp = 0;24082408+24092409+ return -ENOMEM;24102410+}24112411+24122412+/*24132413+ * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs.24142414+ * @scm: SCSI Module24152415+ * @hw: HW device.24162416+ *24172417+ * This routine frees ddp buffers.24182418+ */24192419+static void24202420+csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)24212421+{24222422+ struct list_head *tmp;24232423+ struct csio_dma_buf *ddp_desc;24242424+24252425+ /* release dma descs back to freelist and free dma memory */24262426+ list_for_each(tmp, &scm->ddp_freelist) {24272427+ ddp_desc = (struct csio_dma_buf *) tmp;24282428+ tmp = csio_list_prev(tmp);24292429+ pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,24302430+ ddp_desc->paddr);24312431+ list_del_init(&ddp_desc->list);24322432+ kfree(ddp_desc);24332433+ }24342434+ scm->stats.n_free_ddp = 0;24352435+}24362436+24372437+/**24382438+ * csio_scsim_init - Initialize SCSI Module24392439+ * @scm: SCSI Module24402440+ * @hw: HW module24412441+ *24422442+ */24432443+int24442444+csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw)24452445+{24462446+ int i;24472447+ struct csio_ioreq *ioreq;24482448+ struct csio_dma_buf *dma_buf;24492449+24502450+ INIT_LIST_HEAD(&scm->active_q);24512451+ scm->hw = hw;24522452+24532453+ scm->proto_cmd_len = sizeof(struct fcp_cmnd);24542454+ scm->proto_rsp_len = CSIO_SCSI_RSP_LEN;24552455+ scm->max_sge = CSIO_SCSI_MAX_SGE;24562456+24572457+ spin_lock_init(&scm->freelist_lock);24582458+24592459+ /* Pre-allocate ioreqs and initialize them */24602460+ INIT_LIST_HEAD(&scm->ioreq_freelist);24612461+ for (i = 0; i < csio_scsi_ioreqs; i++) {24622462+24632463+ ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);24642464+ if (!ioreq) {24652465+ csio_err(hw,24662466+ "I/O request element allocation failed, "24672467+ " Num allocated = %d.\n",24682468+ scm->stats.n_free_ioreq);24692469+24702470+ goto free_ioreq;24712471+ }24722472+24732473+ /* Allocate Dma buffers for Response Payload */24742474+ dma_buf = &ioreq->dma_buf;24752475+ dma_buf->vaddr = pci_pool_alloc(hw->scsi_pci_pool, GFP_KERNEL,24762476+ &dma_buf->paddr);24772477+ if (!dma_buf->vaddr) {24782478+ csio_err(hw,24792479+ "SCSI response DMA buffer allocation"24802480+ " failed!\n");24812481+ kfree(ioreq);24822482+ goto free_ioreq;24832483+ }24842484+24852485+ dma_buf->len = scm->proto_rsp_len;24862486+24872487+ /* Set state to uninit */24882488+ csio_init_state(&ioreq->sm, csio_scsis_uninit);24892489+ INIT_LIST_HEAD(&ioreq->gen_list);24902490+ init_completion(&ioreq->cmplobj);24912491+24922492+ list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);24932493+ CSIO_INC_STATS(scm, n_free_ioreq);24942494+ }24952495+24962496+ if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs))24972497+ goto free_ioreq;24982498+24992499+ return 0;25002500+25012501+free_ioreq:25022502+ /*25032503+ * Free up existing allocations, since an error25042504+ * from here means we are returning for good25052505+ */25062506+ while (!list_empty(&scm->ioreq_freelist)) {25072507+ struct csio_sm *tmp;25082508+25092509+ tmp = list_first_entry(&scm->ioreq_freelist,25102510+ struct csio_sm, sm_list);25112511+ list_del_init(&tmp->sm_list);25122512+ ioreq = (struct csio_ioreq *)tmp;25132513+25142514+ dma_buf = &ioreq->dma_buf;25152515+ pci_pool_free(hw->scsi_pci_pool, dma_buf->vaddr,25162516+ dma_buf->paddr);25172517+25182518+ kfree(ioreq);25192519+ }25202520+25212521+ scm->stats.n_free_ioreq = 0;25222522+25232523+ return -ENOMEM;25242524+}25252525+25262526+/**25272527+ * csio_scsim_exit: Uninitialize SCSI Module25282528+ * @scm: SCSI Module25292529+ *25302530+ */25312531+void25322532+csio_scsim_exit(struct csio_scsim *scm)25332533+{25342534+ struct csio_ioreq *ioreq;25352535+ struct csio_dma_buf *dma_buf;25362536+25372537+ while (!list_empty(&scm->ioreq_freelist)) {25382538+ struct csio_sm *tmp;25392539+25402540+ tmp = list_first_entry(&scm->ioreq_freelist,25412541+ struct csio_sm, sm_list);25422542+ list_del_init(&tmp->sm_list);25432543+ ioreq = (struct csio_ioreq *)tmp;25442544+25452545+ dma_buf = &ioreq->dma_buf;25462546+ pci_pool_free(scm->hw->scsi_pci_pool, dma_buf->vaddr,25472547+ dma_buf->paddr);25482548+25492549+ kfree(ioreq);25502550+ }25512551+25522552+ scm->stats.n_free_ioreq = 0;25532553+25542554+ csio_scsi_free_ddp_bufs(scm, scm->hw);25552555+}
+342
drivers/scsi/csiostor/csio_scsi.h
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#ifndef __CSIO_SCSI_H__3636+#define __CSIO_SCSI_H__3737+3838+#include <linux/spinlock_types.h>3939+#include <linux/completion.h>4040+#include <scsi/scsi.h>4141+#include <scsi/scsi_cmnd.h>4242+#include <scsi/scsi_device.h>4343+#include <scsi/scsi_host.h>4444+#include <scsi/scsi_eh.h>4545+#include <scsi/scsi_tcq.h>4646+#include <scsi/fc/fc_fcp.h>4747+4848+#include "csio_defs.h"4949+#include "csio_wr.h"5050+5151+extern struct scsi_host_template csio_fcoe_shost_template;5252+extern struct scsi_host_template csio_fcoe_shost_vport_template;5353+5454+extern int csio_scsi_eqsize;5555+extern int csio_scsi_iqlen;5656+extern int csio_scsi_ioreqs;5757+extern uint32_t csio_max_scan_tmo;5858+extern uint32_t csio_delta_scan_tmo;5959+extern int csio_lun_qdepth;6060+6161+/*6262+ **************************** NOTE *******************************6363+ * How do we calculate MAX FCoE SCSI SGEs? Here is the math:6464+ * Max Egress WR size = 512 bytes6565+ * One SCSI egress WR has the following fixed no of bytes:6666+ * 48 (sizeof(struct fw_scsi_write[read]_wr)) - FW WR6767+ * + 32 (sizeof(struct fc_fcp_cmnd)) - Immediate FCP_CMD6868+ * ------6969+ * 807070+ * ------7171+ * That leaves us with 512 - 96 = 432 bytes for data SGE. Using7272+ * struct ulptx_sgl header for the SGE consumes:7373+ * - 4 bytes for cmnd_sge.7474+ * - 12 bytes for the first SGL.7575+ * That leaves us with 416 bytes for the remaining SGE pairs. Which is7676+ * is 416 / 24 (size(struct ulptx_sge_pair)) = 17 SGE pairs,7777+ * or 34 SGEs. Adding the first SGE fetches us 35 SGEs.7878+ */7979+#define CSIO_SCSI_MAX_SGE 358080+#define CSIO_SCSI_ABRT_TMO_MS 600008181+#define CSIO_SCSI_LUNRST_TMO_MS 600008282+#define CSIO_SCSI_TM_POLL_MS 2000 /* should be less than8383+ * all TM timeouts.8484+ */8585+#define CSIO_SCSI_IQ_WRSZ 1288686+#define CSIO_SCSI_IQSIZE (csio_scsi_iqlen * CSIO_SCSI_IQ_WRSZ)8787+8888+#define CSIO_MAX_SNS_LEN 1288989+#define CSIO_SCSI_RSP_LEN (FCP_RESP_WITH_EXT + 4 + CSIO_MAX_SNS_LEN)9090+9191+/* Reference to scsi_cmnd */9292+#define csio_scsi_cmnd(req) ((req)->scratch1)9393+9494+struct csio_scsi_stats {9595+ uint64_t n_tot_success; /* Total number of good I/Os */9696+ uint32_t n_rn_nr_error; /* No. of remote-node-not-9797+ * ready errors9898+ */9999+ uint32_t n_hw_nr_error; /* No. of hw-module-not-100100+ * ready errors101101+ */102102+ uint32_t n_dmamap_error; /* No. of DMA map erros */103103+ uint32_t n_unsupp_sge_error; /* No. of too-many-SGes104104+ * errors.105105+ */106106+ uint32_t n_no_req_error; /* No. of Out-of-ioreqs error */107107+ uint32_t n_busy_error; /* No. of -EBUSY errors */108108+ uint32_t n_hosterror; /* No. of FW_HOSTERROR I/O */109109+ uint32_t n_rsperror; /* No. of response errors */110110+ uint32_t n_autosense; /* No. of auto sense replies */111111+ uint32_t n_ovflerror; /* No. of overflow errors */112112+ uint32_t n_unflerror; /* No. of underflow errors */113113+ uint32_t n_rdev_nr_error;/* No. of rdev not114114+ * ready errors115115+ */116116+ uint32_t n_rdev_lost_error;/* No. of rdev lost errors */117117+ uint32_t n_rdev_logo_error;/* No. of rdev logo errors */118118+ uint32_t n_link_down_error;/* No. of link down errors */119119+ uint32_t n_no_xchg_error; /* No. no exchange error */120120+ uint32_t n_unknown_error;/* No. of unhandled errors */121121+ uint32_t n_aborted; /* No. of aborted I/Os */122122+ uint32_t n_abrt_timedout; /* No. of abort timedouts */123123+ uint32_t n_abrt_fail; /* No. of abort failures */124124+ uint32_t n_abrt_dups; /* No. of duplicate aborts */125125+ uint32_t n_abrt_race_comp; /* No. of aborts that raced126126+ * with completions.127127+ */128128+ uint32_t n_abrt_busy_error;/* No. of abort failures129129+ * due to -EBUSY.130130+ */131131+ uint32_t n_closed; /* No. of closed I/Os */132132+ uint32_t n_cls_busy_error; /* No. of close failures133133+ * due to -EBUSY.134134+ */135135+ uint32_t n_active; /* No. of IOs in active_q */136136+ uint32_t n_tm_active; /* No. of TMs in active_q */137137+ uint32_t n_wcbfn; /* No. of I/Os in worker138138+ * cbfn q139139+ */140140+ uint32_t n_free_ioreq; /* No. of freelist entries */141141+ uint32_t n_free_ddp; /* No. of DDP freelist */142142+ uint32_t n_unaligned; /* No. of Unaligned SGls */143143+ uint32_t n_inval_cplop; /* No. invalid CPL op's in IQ */144144+ uint32_t n_inval_scsiop; /* No. invalid scsi op's in IQ*/145145+};146146+147147+struct csio_scsim {148148+ struct csio_hw *hw; /* Pointer to HW moduel */149149+ uint8_t max_sge; /* Max SGE */150150+ uint8_t proto_cmd_len; /* Proto specific SCSI151151+ * cmd length152152+ */153153+ uint16_t proto_rsp_len; /* Proto specific SCSI154154+ * response length155155+ */156156+ spinlock_t freelist_lock; /* Lock for ioreq freelist */157157+ struct list_head active_q; /* Outstanding SCSI I/Os */158158+ struct list_head ioreq_freelist; /* Free list of ioreq's */159159+ struct list_head ddp_freelist; /* DDP descriptor freelist */160160+ struct csio_scsi_stats stats; /* This module's statistics */161161+};162162+163163+/* State machine defines */164164+enum csio_scsi_ev {165165+ CSIO_SCSIE_START_IO = 1, /* Start a regular SCSI IO */166166+ CSIO_SCSIE_START_TM, /* Start a TM IO */167167+ CSIO_SCSIE_COMPLETED, /* IO Completed */168168+ CSIO_SCSIE_ABORT, /* Abort IO */169169+ CSIO_SCSIE_ABORTED, /* IO Aborted */170170+ CSIO_SCSIE_CLOSE, /* Close exchange */171171+ CSIO_SCSIE_CLOSED, /* Exchange closed */172172+ CSIO_SCSIE_DRVCLEANUP, /* Driver wants to manually173173+ * cleanup this I/O.174174+ */175175+};176176+177177+enum csio_scsi_lev {178178+ CSIO_LEV_ALL = 1,179179+ CSIO_LEV_LNODE,180180+ CSIO_LEV_RNODE,181181+ CSIO_LEV_LUN,182182+};183183+184184+struct csio_scsi_level_data {185185+ enum csio_scsi_lev level;186186+ struct csio_rnode *rnode;187187+ struct csio_lnode *lnode;188188+ uint64_t oslun;189189+};190190+191191+static inline struct csio_ioreq *192192+csio_get_scsi_ioreq(struct csio_scsim *scm)193193+{194194+ struct csio_sm *req;195195+196196+ if (likely(!list_empty(&scm->ioreq_freelist))) {197197+ req = list_first_entry(&scm->ioreq_freelist,198198+ struct csio_sm, sm_list);199199+ list_del_init(&req->sm_list);200200+ CSIO_DEC_STATS(scm, n_free_ioreq);201201+ return (struct csio_ioreq *)req;202202+ } else203203+ return NULL;204204+}205205+206206+static inline void207207+csio_put_scsi_ioreq(struct csio_scsim *scm, struct csio_ioreq *ioreq)208208+{209209+ list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);210210+ CSIO_INC_STATS(scm, n_free_ioreq);211211+}212212+213213+static inline void214214+csio_put_scsi_ioreq_list(struct csio_scsim *scm, struct list_head *reqlist,215215+ int n)216216+{217217+ list_splice_init(reqlist, &scm->ioreq_freelist);218218+ scm->stats.n_free_ioreq += n;219219+}220220+221221+static inline struct csio_dma_buf *222222+csio_get_scsi_ddp(struct csio_scsim *scm)223223+{224224+ struct csio_dma_buf *ddp;225225+226226+ if (likely(!list_empty(&scm->ddp_freelist))) {227227+ ddp = list_first_entry(&scm->ddp_freelist,228228+ struct csio_dma_buf, list);229229+ list_del_init(&ddp->list);230230+ CSIO_DEC_STATS(scm, n_free_ddp);231231+ return ddp;232232+ } else233233+ return NULL;234234+}235235+236236+static inline void237237+csio_put_scsi_ddp(struct csio_scsim *scm, struct csio_dma_buf *ddp)238238+{239239+ list_add_tail(&ddp->list, &scm->ddp_freelist);240240+ CSIO_INC_STATS(scm, n_free_ddp);241241+}242242+243243+static inline void244244+csio_put_scsi_ddp_list(struct csio_scsim *scm, struct list_head *reqlist,245245+ int n)246246+{247247+ list_splice_tail_init(reqlist, &scm->ddp_freelist);248248+ scm->stats.n_free_ddp += n;249249+}250250+251251+static inline void252252+csio_scsi_completed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)253253+{254254+ csio_post_event(&ioreq->sm, CSIO_SCSIE_COMPLETED);255255+ if (csio_list_deleted(&ioreq->sm.sm_list))256256+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);257257+}258258+259259+static inline void260260+csio_scsi_aborted(struct csio_ioreq *ioreq, struct list_head *cbfn_q)261261+{262262+ csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORTED);263263+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);264264+}265265+266266+static inline void267267+csio_scsi_closed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)268268+{269269+ csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSED);270270+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);271271+}272272+273273+static inline void274274+csio_scsi_drvcleanup(struct csio_ioreq *ioreq)275275+{276276+ csio_post_event(&ioreq->sm, CSIO_SCSIE_DRVCLEANUP);277277+}278278+279279+/*280280+ * csio_scsi_start_io - Kick starts the IO SM.281281+ * @req: io request SM.282282+ *283283+ * needs to be called with lock held.284284+ */285285+static inline int286286+csio_scsi_start_io(struct csio_ioreq *ioreq)287287+{288288+ csio_post_event(&ioreq->sm, CSIO_SCSIE_START_IO);289289+ return ioreq->drv_status;290290+}291291+292292+/*293293+ * csio_scsi_start_tm - Kicks off the Task management IO SM.294294+ * @req: io request SM.295295+ *296296+ * needs to be called with lock held.297297+ */298298+static inline int299299+csio_scsi_start_tm(struct csio_ioreq *ioreq)300300+{301301+ csio_post_event(&ioreq->sm, CSIO_SCSIE_START_TM);302302+ return ioreq->drv_status;303303+}304304+305305+/*306306+ * csio_scsi_abort - Abort an IO request307307+ * @req: io request SM.308308+ *309309+ * needs to be called with lock held.310310+ */311311+static inline int312312+csio_scsi_abort(struct csio_ioreq *ioreq)313313+{314314+ csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORT);315315+ return ioreq->drv_status;316316+}317317+318318+/*319319+ * csio_scsi_close - Close an IO request320320+ * @req: io request SM.321321+ *322322+ * needs to be called with lock held.323323+ */324324+static inline int325325+csio_scsi_close(struct csio_ioreq *ioreq)326326+{327327+ csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSE);328328+ return ioreq->drv_status;329329+}330330+331331+void csio_scsi_cleanup_io_q(struct csio_scsim *, struct list_head *);332332+int csio_scsim_cleanup_io(struct csio_scsim *, bool abort);333333+int csio_scsim_cleanup_io_lnode(struct csio_scsim *,334334+ struct csio_lnode *);335335+struct csio_ioreq *csio_scsi_cmpl_handler(struct csio_hw *, void *, uint32_t,336336+ struct csio_fl_dma_buf *,337337+ void *, uint8_t **);338338+int csio_scsi_qconfig(struct csio_hw *);339339+int csio_scsim_init(struct csio_scsim *, struct csio_hw *);340340+void csio_scsim_exit(struct csio_scsim *);341341+342342+#endif /* __CSIO_SCSI_H__ */
+1632
drivers/scsi/csiostor/csio_wr.c
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#include <linux/kernel.h>3636+#include <linux/string.h>3737+#include <linux/compiler.h>3838+#include <linux/slab.h>3939+#include <asm/page.h>4040+#include <linux/cache.h>4141+4242+#include "csio_hw.h"4343+#include "csio_wr.h"4444+#include "csio_mb.h"4545+#include "csio_defs.h"4646+4747+int csio_intr_coalesce_cnt; /* value:SGE_INGRESS_RX_THRESHOLD[0] */4848+static int csio_sge_thresh_reg; /* SGE_INGRESS_RX_THRESHOLD[0] */4949+5050+int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */5151+static int csio_sge_timer_reg = 1;5252+5353+#define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \5454+ csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg)5555+5656+static void5757+csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)5858+{5959+ sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 +6060+ reg * sizeof(uint32_t));6161+}6262+6363+/* Free list buffer size */6464+static inline uint32_t6565+csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)6666+{6767+ return sge->sge_fl_buf_size[buf->paddr & 0xF];6868+}6969+7070+/* Size of the egress queue status page */7171+static inline uint32_t7272+csio_wr_qstat_pgsz(struct csio_hw *hw)7373+{7474+ return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ? 128 : 64;7575+}7676+7777+/* Ring freelist doorbell */7878+static inline void7979+csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)8080+{8181+ /*8282+ * Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ8383+ * number of bytes in the freelist queue. This translates to atleast8484+ * 8 freelist buffer pointers (since each pointer is 8 bytes).8585+ */8686+ if (flq->inc_idx >= 8) {8787+ csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |8888+ PIDX(flq->inc_idx / 8),8989+ MYPF_REG(SGE_PF_KDOORBELL));9090+ flq->inc_idx &= 7;9191+ }9292+}9393+9494+/* Write a 0 cidx increment value to enable SGE interrupts for this queue */9595+static void9696+csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)9797+{9898+ csio_wr_reg32(hw, CIDXINC(0) |9999+ INGRESSQID(iqid) |100100+ TIMERREG(X_TIMERREG_RESTART_COUNTER),101101+ MYPF_REG(SGE_PF_GTS));102102+}103103+104104+/*105105+ * csio_wr_fill_fl - Populate the FL buffers of a FL queue.106106+ * @hw: HW module.107107+ * @flq: Freelist queue.108108+ *109109+ * Fill up freelist buffer entries with buffers of size specified110110+ * in the size register.111111+ *112112+ */113113+static int114114+csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)115115+{116116+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);117117+ struct csio_sge *sge = &wrm->sge;118118+ __be64 *d = (__be64 *)(flq->vstart);119119+ struct csio_dma_buf *buf = &flq->un.fl.bufs[0];120120+ uint64_t paddr;121121+ int sreg = flq->un.fl.sreg;122122+ int n = flq->credits;123123+124124+ while (n--) {125125+ buf->len = sge->sge_fl_buf_size[sreg];126126+ buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,127127+ &buf->paddr);128128+ if (!buf->vaddr) {129129+ csio_err(hw, "Could only fill %d buffers!\n", n + 1);130130+ return -ENOMEM;131131+ }132132+133133+ paddr = buf->paddr | (sreg & 0xF);134134+135135+ *d++ = cpu_to_be64(paddr);136136+ buf++;137137+ }138138+139139+ return 0;140140+}141141+142142+/*143143+ * csio_wr_update_fl -144144+ * @hw: HW module.145145+ * @flq: Freelist queue.146146+ *147147+ *148148+ */149149+static inline void150150+csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n)151151+{152152+153153+ flq->inc_idx += n;154154+ flq->pidx += n;155155+ if (unlikely(flq->pidx >= flq->credits))156156+ flq->pidx -= (uint16_t)flq->credits;157157+158158+ CSIO_INC_STATS(flq, n_flq_refill);159159+}160160+161161+/*162162+ * csio_wr_alloc_q - Allocate a WR queue and initialize it.163163+ * @hw: HW module164164+ * @qsize: Size of the queue in bytes165165+ * @wrsize: Since of WR in this queue, if fixed.166166+ * @type: Type of queue (Ingress/Egress/Freelist)167167+ * @owner: Module that owns this queue.168168+ * @nflb: Number of freelist buffers for FL.169169+ * @sreg: What is the FL buffer size register?170170+ * @iq_int_handler: Ingress queue handler in INTx mode.171171+ *172172+ * This function allocates and sets up a queue for the caller173173+ * of size qsize, aligned at the required boundary. This is subject to174174+ * be free entries being available in the queue array. If one is found,175175+ * it is initialized with the allocated queue, marked as being used (owner),176176+ * and a handle returned to the caller in form of the queue's index177177+ * into the q_arr array.178178+ * If user has indicated a freelist (by specifying nflb > 0), create179179+ * another queue (with its own index into q_arr) for the freelist. Allocate180180+ * memory for DMA buffer metadata (vaddr, len etc). Save off the freelist181181+ * idx in the ingress queue's flq.idx. This is how a Freelist is associated182182+ * with its owning ingress queue.183183+ */184184+int185185+csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,186186+ uint16_t type, void *owner, uint32_t nflb, int sreg,187187+ iq_handler_t iq_intx_handler)188188+{189189+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);190190+ struct csio_q *q, *flq;191191+ int free_idx = wrm->free_qidx;192192+ int ret_idx = free_idx;193193+ uint32_t qsz;194194+ int flq_idx;195195+196196+ if (free_idx >= wrm->num_q) {197197+ csio_err(hw, "No more free queues.\n");198198+ return -1;199199+ }200200+201201+ switch (type) {202202+ case CSIO_EGRESS:203203+ qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw);204204+ break;205205+ case CSIO_INGRESS:206206+ switch (wrsize) {207207+ case 16:208208+ case 32:209209+ case 64:210210+ case 128:211211+ break;212212+ default:213213+ csio_err(hw, "Invalid Ingress queue WR size:%d\n",214214+ wrsize);215215+ return -1;216216+ }217217+218218+ /*219219+ * Number of elements must be a multiple of 16220220+ * So this includes status page size221221+ */222222+ qsz = ALIGN(qsize/wrsize, 16) * wrsize;223223+224224+ break;225225+ case CSIO_FREELIST:226226+ qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw);227227+ break;228228+ default:229229+ csio_err(hw, "Invalid queue type: 0x%x\n", type);230230+ return -1;231231+ }232232+233233+ q = wrm->q_arr[free_idx];234234+235235+ q->vstart = pci_alloc_consistent(hw->pdev, qsz, &q->pstart);236236+ if (!q->vstart) {237237+ csio_err(hw,238238+ "Failed to allocate DMA memory for "239239+ "queue at id: %d size: %d\n", free_idx, qsize);240240+ return -1;241241+ }242242+243243+ /*244244+ * We need to zero out the contents, importantly for ingress,245245+ * since we start with a generatiom bit of 1 for ingress.246246+ */247247+ memset(q->vstart, 0, qsz);248248+249249+ q->type = type;250250+ q->owner = owner;251251+ q->pidx = q->cidx = q->inc_idx = 0;252252+ q->size = qsz;253253+ q->wr_sz = wrsize; /* If using fixed size WRs */254254+255255+ wrm->free_qidx++;256256+257257+ if (type == CSIO_INGRESS) {258258+ /* Since queue area is set to zero */259259+ q->un.iq.genbit = 1;260260+261261+ /*262262+ * Ingress queue status page size is always the size of263263+ * the ingress queue entry.264264+ */265265+ q->credits = (qsz - q->wr_sz) / q->wr_sz;266266+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz267267+ - q->wr_sz);268268+269269+ /* Allocate memory for FL if requested */270270+ if (nflb > 0) {271271+ flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64),272272+ sizeof(__be64), CSIO_FREELIST,273273+ owner, 0, sreg, NULL);274274+ if (flq_idx == -1) {275275+ csio_err(hw,276276+ "Failed to allocate FL queue"277277+ " for IQ idx:%d\n", free_idx);278278+ return -1;279279+ }280280+281281+ /* Associate the new FL with the Ingress quue */282282+ q->un.iq.flq_idx = flq_idx;283283+284284+ flq = wrm->q_arr[q->un.iq.flq_idx];285285+ flq->un.fl.bufs = kzalloc(flq->credits *286286+ sizeof(struct csio_dma_buf),287287+ GFP_KERNEL);288288+ if (!flq->un.fl.bufs) {289289+ csio_err(hw,290290+ "Failed to allocate FL queue bufs"291291+ " for IQ idx:%d\n", free_idx);292292+ return -1;293293+ }294294+295295+ flq->un.fl.packen = 0;296296+ flq->un.fl.offset = 0;297297+ flq->un.fl.sreg = sreg;298298+299299+ /* Fill up the free list buffers */300300+ if (csio_wr_fill_fl(hw, flq))301301+ return -1;302302+303303+ /*304304+ * Make sure in a FLQ, atleast 1 credit (8 FL buffers)305305+ * remains unpopulated,otherwise HW thinks306306+ * FLQ is empty.307307+ */308308+ flq->pidx = flq->inc_idx = flq->credits - 8;309309+ } else {310310+ q->un.iq.flq_idx = -1;311311+ }312312+313313+ /* Associate the IQ INTx handler. */314314+ q->un.iq.iq_intx_handler = iq_intx_handler;315315+316316+ csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID;317317+318318+ } else if (type == CSIO_EGRESS) {319319+ q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ;320320+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz321321+ - csio_wr_qstat_pgsz(hw));322322+ csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID;323323+ } else { /* Freelist */324324+ q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64);325325+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz326326+ - csio_wr_qstat_pgsz(hw));327327+ csio_q_flid(hw, ret_idx) = CSIO_MAX_QID;328328+ }329329+330330+ return ret_idx;331331+}332332+333333+/*334334+ * csio_wr_iq_create_rsp - Response handler for IQ creation.335335+ * @hw: The HW module.336336+ * @mbp: Mailbox.337337+ * @iq_idx: Ingress queue that got created.338338+ *339339+ * Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids.340340+ */341341+static int342342+csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)343343+{344344+ struct csio_iq_params iqp;345345+ enum fw_retval retval;346346+ uint32_t iq_id;347347+ int flq_idx;348348+349349+ memset(&iqp, 0, sizeof(struct csio_iq_params));350350+351351+ csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp);352352+353353+ if (retval != FW_SUCCESS) {354354+ csio_err(hw, "IQ cmd returned 0x%x!\n", retval);355355+ mempool_free(mbp, hw->mb_mempool);356356+ return -EINVAL;357357+ }358358+359359+ csio_q_iqid(hw, iq_idx) = iqp.iqid;360360+ csio_q_physiqid(hw, iq_idx) = iqp.physiqid;361361+ csio_q_pidx(hw, iq_idx) = csio_q_cidx(hw, iq_idx) = 0;362362+ csio_q_inc_idx(hw, iq_idx) = 0;363363+364364+ /* Actual iq-id. */365365+ iq_id = iqp.iqid - hw->wrm.fw_iq_start;366366+367367+ /* Set the iq-id to iq map table. */368368+ if (iq_id >= CSIO_MAX_IQ) {369369+ csio_err(hw,370370+ "Exceeding MAX_IQ(%d) supported!"371371+ " iqid:%d rel_iqid:%d FW iq_start:%d\n",372372+ CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start);373373+ mempool_free(mbp, hw->mb_mempool);374374+ return -EINVAL;375375+ }376376+ csio_q_set_intr_map(hw, iq_idx, iq_id);377377+378378+ /*379379+ * During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE380380+ * ingress context of this queue. This will block interrupts to381381+ * this queue until the next GTS write. Therefore, we do a382382+ * 0-cidx increment GTS write for this queue just to clear the383383+ * interrupt_sent bit. This will re-enable interrupts to this384384+ * queue.385385+ */386386+ csio_wr_sge_intr_enable(hw, iqp.physiqid);387387+388388+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);389389+ if (flq_idx != -1) {390390+ struct csio_q *flq = hw->wrm.q_arr[flq_idx];391391+392392+ csio_q_flid(hw, flq_idx) = iqp.fl0id;393393+ csio_q_cidx(hw, flq_idx) = 0;394394+ csio_q_pidx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;395395+ csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;396396+397397+ /* Now update SGE about the buffers allocated during init */398398+ csio_wr_ring_fldb(hw, flq);399399+ }400400+401401+ mempool_free(mbp, hw->mb_mempool);402402+403403+ return 0;404404+}405405+406406+/*407407+ * csio_wr_iq_create - Configure an Ingress queue with FW.408408+ * @hw: The HW module.409409+ * @priv: Private data object.410410+ * @iq_idx: Ingress queue index in the WR module.411411+ * @vec: MSIX vector.412412+ * @portid: PCIE Channel to be associated with this queue.413413+ * @async: Is this a FW asynchronous message handling queue?414414+ * @cbfn: Completion callback.415415+ *416416+ * This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox417417+ * with alloc/write bits set.418418+ */419419+int420420+csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,421421+ uint32_t vec, uint8_t portid, bool async,422422+ void (*cbfn) (struct csio_hw *, struct csio_mb *))423423+{424424+ struct csio_mb *mbp;425425+ struct csio_iq_params iqp;426426+ int flq_idx;427427+428428+ memset(&iqp, 0, sizeof(struct csio_iq_params));429429+ csio_q_portid(hw, iq_idx) = portid;430430+431431+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);432432+ if (!mbp) {433433+ csio_err(hw, "IQ command out of memory!\n");434434+ return -ENOMEM;435435+ }436436+437437+ switch (hw->intr_mode) {438438+ case CSIO_IM_INTX:439439+ case CSIO_IM_MSI:440440+ /* For interrupt forwarding queue only */441441+ if (hw->intr_iq_idx == iq_idx)442442+ iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;443443+ else444444+ iqp.iqandst = X_INTERRUPTDESTINATION_IQ;445445+ iqp.iqandstindex =446446+ csio_q_physiqid(hw, hw->intr_iq_idx);447447+ break;448448+ case CSIO_IM_MSIX:449449+ iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;450450+ iqp.iqandstindex = (uint16_t)vec;451451+ break;452452+ case CSIO_IM_NONE:453453+ mempool_free(mbp, hw->mb_mempool);454454+ return -EINVAL;455455+ }456456+457457+ /* Pass in the ingress queue cmd parameters */458458+ iqp.pfn = hw->pfn;459459+ iqp.vfn = 0;460460+ iqp.iq_start = 1;461461+ iqp.viid = 0;462462+ iqp.type = FW_IQ_TYPE_FL_INT_CAP;463463+ iqp.iqasynch = async;464464+ if (csio_intr_coalesce_cnt)465465+ iqp.iqanus = X_UPDATESCHEDULING_COUNTER_OPTTIMER;466466+ else467467+ iqp.iqanus = X_UPDATESCHEDULING_TIMER;468468+ iqp.iqanud = X_UPDATEDELIVERY_INTERRUPT;469469+ iqp.iqpciech = portid;470470+ iqp.iqintcntthresh = (uint8_t)csio_sge_thresh_reg;471471+472472+ switch (csio_q_wr_sz(hw, iq_idx)) {473473+ case 16:474474+ iqp.iqesize = 0; break;475475+ case 32:476476+ iqp.iqesize = 1; break;477477+ case 64:478478+ iqp.iqesize = 2; break;479479+ case 128:480480+ iqp.iqesize = 3; break;481481+ }482482+483483+ iqp.iqsize = csio_q_size(hw, iq_idx) /484484+ csio_q_wr_sz(hw, iq_idx);485485+ iqp.iqaddr = csio_q_pstart(hw, iq_idx);486486+487487+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);488488+ if (flq_idx != -1) {489489+ struct csio_q *flq = hw->wrm.q_arr[flq_idx];490490+491491+ iqp.fl0paden = 1;492492+ iqp.fl0packen = flq->un.fl.packen ? 1 : 0;493493+ iqp.fl0fbmin = X_FETCHBURSTMIN_64B;494494+ iqp.fl0fbmax = X_FETCHBURSTMAX_512B;495495+ iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;496496+ iqp.fl0addr = csio_q_pstart(hw, flq_idx);497497+ }498498+499499+ csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);500500+501501+ if (csio_mb_issue(hw, mbp)) {502502+ csio_err(hw, "Issue of IQ cmd failed!\n");503503+ mempool_free(mbp, hw->mb_mempool);504504+ return -EINVAL;505505+ }506506+507507+ if (cbfn != NULL)508508+ return 0;509509+510510+ return csio_wr_iq_create_rsp(hw, mbp, iq_idx);511511+}512512+513513+/*514514+ * csio_wr_eq_create_rsp - Response handler for EQ creation.515515+ * @hw: The HW module.516516+ * @mbp: Mailbox.517517+ * @eq_idx: Egress queue that got created.518518+ *519519+ * Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids.520520+ */521521+static int522522+csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)523523+{524524+ struct csio_eq_params eqp;525525+ enum fw_retval retval;526526+527527+ memset(&eqp, 0, sizeof(struct csio_eq_params));528528+529529+ csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp);530530+531531+ if (retval != FW_SUCCESS) {532532+ csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval);533533+ mempool_free(mbp, hw->mb_mempool);534534+ return -EINVAL;535535+ }536536+537537+ csio_q_eqid(hw, eq_idx) = (uint16_t)eqp.eqid;538538+ csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid;539539+ csio_q_pidx(hw, eq_idx) = csio_q_cidx(hw, eq_idx) = 0;540540+ csio_q_inc_idx(hw, eq_idx) = 0;541541+542542+ mempool_free(mbp, hw->mb_mempool);543543+544544+ return 0;545545+}546546+547547+/*548548+ * csio_wr_eq_create - Configure an Egress queue with FW.549549+ * @hw: HW module.550550+ * @priv: Private data.551551+ * @eq_idx: Egress queue index in the WR module.552552+ * @iq_idx: Associated ingress queue index.553553+ * @cbfn: Completion callback.554554+ *555555+ * This API configures a offload egress queue with FW by issuing a556556+ * FW_EQ_OFLD_CMD (with alloc + write ) mailbox.557557+ */558558+int559559+csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx,560560+ int iq_idx, uint8_t portid,561561+ void (*cbfn) (struct csio_hw *, struct csio_mb *))562562+{563563+ struct csio_mb *mbp;564564+ struct csio_eq_params eqp;565565+566566+ memset(&eqp, 0, sizeof(struct csio_eq_params));567567+568568+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);569569+ if (!mbp) {570570+ csio_err(hw, "EQ command out of memory!\n");571571+ return -ENOMEM;572572+ }573573+574574+ eqp.pfn = hw->pfn;575575+ eqp.vfn = 0;576576+ eqp.eqstart = 1;577577+ eqp.hostfcmode = X_HOSTFCMODE_STATUS_PAGE;578578+ eqp.iqid = csio_q_iqid(hw, iq_idx);579579+ eqp.fbmin = X_FETCHBURSTMIN_64B;580580+ eqp.fbmax = X_FETCHBURSTMAX_512B;581581+ eqp.cidxfthresh = 0;582582+ eqp.pciechn = portid;583583+ eqp.eqsize = csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ;584584+ eqp.eqaddr = csio_q_pstart(hw, eq_idx);585585+586586+ csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO,587587+ &eqp, cbfn);588588+589589+ if (csio_mb_issue(hw, mbp)) {590590+ csio_err(hw, "Issue of EQ OFLD cmd failed!\n");591591+ mempool_free(mbp, hw->mb_mempool);592592+ return -EINVAL;593593+ }594594+595595+ if (cbfn != NULL)596596+ return 0;597597+598598+ return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx);599599+}600600+601601+/*602602+ * csio_wr_iq_destroy_rsp - Response handler for IQ removal.603603+ * @hw: The HW module.604604+ * @mbp: Mailbox.605605+ * @iq_idx: Ingress queue that was freed.606606+ *607607+ * Handle FW_IQ_CMD (free) mailbox completion.608608+ */609609+static int610610+csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)611611+{612612+ enum fw_retval retval = csio_mb_fw_retval(mbp);613613+ int rv = 0;614614+615615+ if (retval != FW_SUCCESS)616616+ rv = -EINVAL;617617+618618+ mempool_free(mbp, hw->mb_mempool);619619+620620+ return rv;621621+}622622+623623+/*624624+ * csio_wr_iq_destroy - Free an ingress queue.625625+ * @hw: The HW module.626626+ * @priv: Private data object.627627+ * @iq_idx: Ingress queue index to destroy628628+ * @cbfn: Completion callback.629629+ *630630+ * This API frees an ingress queue by issuing the FW_IQ_CMD631631+ * with the free bit set.632632+ */633633+static int634634+csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx,635635+ void (*cbfn)(struct csio_hw *, struct csio_mb *))636636+{637637+ int rv = 0;638638+ struct csio_mb *mbp;639639+ struct csio_iq_params iqp;640640+ int flq_idx;641641+642642+ memset(&iqp, 0, sizeof(struct csio_iq_params));643643+644644+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);645645+ if (!mbp)646646+ return -ENOMEM;647647+648648+ iqp.pfn = hw->pfn;649649+ iqp.vfn = 0;650650+ iqp.iqid = csio_q_iqid(hw, iq_idx);651651+ iqp.type = FW_IQ_TYPE_FL_INT_CAP;652652+653653+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);654654+ if (flq_idx != -1)655655+ iqp.fl0id = csio_q_flid(hw, flq_idx);656656+ else657657+ iqp.fl0id = 0xFFFF;658658+659659+ iqp.fl1id = 0xFFFF;660660+661661+ csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);662662+663663+ rv = csio_mb_issue(hw, mbp);664664+ if (rv != 0) {665665+ mempool_free(mbp, hw->mb_mempool);666666+ return rv;667667+ }668668+669669+ if (cbfn != NULL)670670+ return 0;671671+672672+ return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx);673673+}674674+675675+/*676676+ * csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation.677677+ * @hw: The HW module.678678+ * @mbp: Mailbox.679679+ * @eq_idx: Egress queue that was freed.680680+ *681681+ * Handle FW_OFLD_EQ_CMD (free) mailbox completion.682682+ */683683+static int684684+csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)685685+{686686+ enum fw_retval retval = csio_mb_fw_retval(mbp);687687+ int rv = 0;688688+689689+ if (retval != FW_SUCCESS)690690+ rv = -EINVAL;691691+692692+ mempool_free(mbp, hw->mb_mempool);693693+694694+ return rv;695695+}696696+697697+/*698698+ * csio_wr_eq_destroy - Free an Egress queue.699699+ * @hw: The HW module.700700+ * @priv: Private data object.701701+ * @eq_idx: Egress queue index to destroy702702+ * @cbfn: Completion callback.703703+ *704704+ * This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD705705+ * with the free bit set.706706+ */707707+static int708708+csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx,709709+ void (*cbfn) (struct csio_hw *, struct csio_mb *))710710+{711711+ int rv = 0;712712+ struct csio_mb *mbp;713713+ struct csio_eq_params eqp;714714+715715+ memset(&eqp, 0, sizeof(struct csio_eq_params));716716+717717+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);718718+ if (!mbp)719719+ return -ENOMEM;720720+721721+ eqp.pfn = hw->pfn;722722+ eqp.vfn = 0;723723+ eqp.eqid = csio_q_eqid(hw, eq_idx);724724+725725+ csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn);726726+727727+ rv = csio_mb_issue(hw, mbp);728728+ if (rv != 0) {729729+ mempool_free(mbp, hw->mb_mempool);730730+ return rv;731731+ }732732+733733+ if (cbfn != NULL)734734+ return 0;735735+736736+ return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx);737737+}738738+739739+/*740740+ * csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page741741+ * @hw: HW module742742+ * @qidx: Egress queue index743743+ *744744+ * Cleanup the Egress queue status page.745745+ */746746+static void747747+csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx)748748+{749749+ struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];750750+ struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;751751+752752+ memset(stp, 0, sizeof(*stp));753753+}754754+755755+/*756756+ * csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ757757+ * @hw: HW module758758+ * @qidx: Ingress queue index759759+ *760760+ * Cleanup the footer entries in the given ingress queue,761761+ * set to 1 the internal copy of genbit.762762+ */763763+static void764764+csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx)765765+{766766+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);767767+ struct csio_q *q = wrm->q_arr[qidx];768768+ void *wr;769769+ struct csio_iqwr_footer *ftr;770770+ uint32_t i = 0;771771+772772+ /* set to 1 since we are just about zero out genbit */773773+ q->un.iq.genbit = 1;774774+775775+ for (i = 0; i < q->credits; i++) {776776+ /* Get the WR */777777+ wr = (void *)((uintptr_t)q->vstart +778778+ (i * q->wr_sz));779779+ /* Get the footer */780780+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +781781+ (q->wr_sz - sizeof(*ftr)));782782+ /* Zero out footer */783783+ memset(ftr, 0, sizeof(*ftr));784784+ }785785+}786786+787787+int788788+csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)789789+{790790+ int i, flq_idx;791791+ struct csio_q *q;792792+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);793793+ int rv;794794+795795+ for (i = 0; i < wrm->free_qidx; i++) {796796+ q = wrm->q_arr[i];797797+798798+ switch (q->type) {799799+ case CSIO_EGRESS:800800+ if (csio_q_eqid(hw, i) != CSIO_MAX_QID) {801801+ csio_wr_cleanup_eq_stpg(hw, i);802802+ if (!cmd) {803803+ csio_q_eqid(hw, i) = CSIO_MAX_QID;804804+ continue;805805+ }806806+807807+ rv = csio_wr_eq_destroy(hw, NULL, i, NULL);808808+ if ((rv == -EBUSY) || (rv == -ETIMEDOUT))809809+ cmd = false;810810+811811+ csio_q_eqid(hw, i) = CSIO_MAX_QID;812812+ }813813+ case CSIO_INGRESS:814814+ if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {815815+ csio_wr_cleanup_iq_ftr(hw, i);816816+ if (!cmd) {817817+ csio_q_iqid(hw, i) = CSIO_MAX_QID;818818+ flq_idx = csio_q_iq_flq_idx(hw, i);819819+ if (flq_idx != -1)820820+ csio_q_flid(hw, flq_idx) =821821+ CSIO_MAX_QID;822822+ continue;823823+ }824824+825825+ rv = csio_wr_iq_destroy(hw, NULL, i, NULL);826826+ if ((rv == -EBUSY) || (rv == -ETIMEDOUT))827827+ cmd = false;828828+829829+ csio_q_iqid(hw, i) = CSIO_MAX_QID;830830+ flq_idx = csio_q_iq_flq_idx(hw, i);831831+ if (flq_idx != -1)832832+ csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;833833+ }834834+ default:835835+ break;836836+ }837837+ }838838+839839+ hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED;840840+841841+ return 0;842842+}843843+844844+/*845845+ * csio_wr_get - Get requested size of WR entry/entries from queue.846846+ * @hw: HW module.847847+ * @qidx: Index of queue.848848+ * @size: Cumulative size of Work request(s).849849+ * @wrp: Work request pair.850850+ *851851+ * If requested credits are available, return the start address of the852852+ * work request in the work request pair. Set pidx accordingly and853853+ * return.854854+ *855855+ * NOTE about WR pair:856856+ * ==================857857+ * A WR can start towards the end of a queue, and then continue at the858858+ * beginning, since the queue is considered to be circular. This will859859+ * require a pair of address/size to be passed back to the caller -860860+ * hence Work request pair format.861861+ */862862+int863863+csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size,864864+ struct csio_wr_pair *wrp)865865+{866866+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);867867+ struct csio_q *q = wrm->q_arr[qidx];868868+ void *cwr = (void *)((uintptr_t)(q->vstart) +869869+ (q->pidx * CSIO_QCREDIT_SZ));870870+ struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;871871+ uint16_t cidx = q->cidx = ntohs(stp->cidx);872872+ uint16_t pidx = q->pidx;873873+ uint32_t req_sz = ALIGN(size, CSIO_QCREDIT_SZ);874874+ int req_credits = req_sz / CSIO_QCREDIT_SZ;875875+ int credits;876876+877877+ CSIO_DB_ASSERT(q->owner != NULL);878878+ CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));879879+ CSIO_DB_ASSERT(cidx <= q->credits);880880+881881+ /* Calculate credits */882882+ if (pidx > cidx) {883883+ credits = q->credits - (pidx - cidx) - 1;884884+ } else if (cidx > pidx) {885885+ credits = cidx - pidx - 1;886886+ } else {887887+ /* cidx == pidx, empty queue */888888+ credits = q->credits;889889+ CSIO_INC_STATS(q, n_qempty);890890+ }891891+892892+ /*893893+ * Check if we have enough credits.894894+ * credits = 1 implies queue is full.895895+ */896896+ if (!credits || (req_credits > credits)) {897897+ CSIO_INC_STATS(q, n_qfull);898898+ return -EBUSY;899899+ }900900+901901+ /*902902+ * If we are here, we have enough credits to satisfy the903903+ * request. Check if we are near the end of q, and if WR spills over.904904+ * If it does, use the first addr/size to cover the queue until905905+ * the end. Fit the remainder portion of the request at the top906906+ * of queue and return it in the second addr/len. Set pidx907907+ * accordingly.908908+ */909909+ if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) {910910+ wrp->addr1 = cwr;911911+ wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr);912912+ wrp->addr2 = q->vstart;913913+ wrp->size2 = req_sz - wrp->size1;914914+ q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) /915915+ CSIO_QCREDIT_SZ);916916+ CSIO_INC_STATS(q, n_qwrap);917917+ CSIO_INC_STATS(q, n_eq_wr_split);918918+ } else {919919+ wrp->addr1 = cwr;920920+ wrp->size1 = req_sz;921921+ wrp->addr2 = NULL;922922+ wrp->size2 = 0;923923+ q->pidx += (uint16_t)req_credits;924924+925925+ /* We are the end of queue, roll back pidx to top of queue */926926+ if (unlikely(q->pidx == q->credits)) {927927+ q->pidx = 0;928928+ CSIO_INC_STATS(q, n_qwrap);929929+ }930930+ }931931+932932+ q->inc_idx = (uint16_t)req_credits;933933+934934+ CSIO_INC_STATS(q, n_tot_reqs);935935+936936+ return 0;937937+}938938+939939+/*940940+ * csio_wr_copy_to_wrp - Copies given data into WR.941941+ * @data_buf - Data buffer942942+ * @wrp - Work request pair.943943+ * @wr_off - Work request offset.944944+ * @data_len - Data length.945945+ *946946+ * Copies the given data in Work Request. Work request pair(wrp) specifies947947+ * address information of Work request.948948+ * Returns: none949949+ */950950+void951951+csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,952952+ uint32_t wr_off, uint32_t data_len)953953+{954954+ uint32_t nbytes;955955+956956+ /* Number of space available in buffer addr1 of WRP */957957+ nbytes = ((wrp->size1 - wr_off) >= data_len) ?958958+ data_len : (wrp->size1 - wr_off);959959+960960+ memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);961961+ data_len -= nbytes;962962+963963+ /* Write the remaining data from the begining of circular buffer */964964+ if (data_len) {965965+ CSIO_DB_ASSERT(data_len <= wrp->size2);966966+ CSIO_DB_ASSERT(wrp->addr2 != NULL);967967+ memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len);968968+ }969969+}970970+971971+/*972972+ * csio_wr_issue - Notify chip of Work request.973973+ * @hw: HW module.974974+ * @qidx: Index of queue.975975+ * @prio: 0: Low priority, 1: High priority976976+ *977977+ * Rings the SGE Doorbell by writing the current producer index of the passed978978+ * in queue into the register.979979+ *980980+ */981981+int982982+csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)983983+{984984+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);985985+ struct csio_q *q = wrm->q_arr[qidx];986986+987987+ CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));988988+989989+ wmb();990990+ /* Ring SGE Doorbell writing q->pidx into it */991991+ csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |992992+ PIDX(q->inc_idx), MYPF_REG(SGE_PF_KDOORBELL));993993+ q->inc_idx = 0;994994+995995+ return 0;996996+}997997+998998+static inline uint32_t999999+csio_wr_avail_qcredits(struct csio_q *q)10001000+{10011001+ if (q->pidx > q->cidx)10021002+ return q->pidx - q->cidx;10031003+ else if (q->cidx > q->pidx)10041004+ return q->credits - (q->cidx - q->pidx);10051005+ else10061006+ return 0; /* cidx == pidx, empty queue */10071007+}10081008+10091009+/*10101010+ * csio_wr_inval_flq_buf - Invalidate a free list buffer entry.10111011+ * @hw: HW module.10121012+ * @flq: The freelist queue.10131013+ *10141014+ * Invalidate the driver's version of a freelist buffer entry,10151015+ * without freeing the associated the DMA memory. The entry10161016+ * to be invalidated is picked up from the current Free list10171017+ * queue cidx.10181018+ *10191019+ */10201020+static inline void10211021+csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq)10221022+{10231023+ flq->cidx++;10241024+ if (flq->cidx == flq->credits) {10251025+ flq->cidx = 0;10261026+ CSIO_INC_STATS(flq, n_qwrap);10271027+ }10281028+}10291029+10301030+/*10311031+ * csio_wr_process_fl - Process a freelist completion.10321032+ * @hw: HW module.10331033+ * @q: The ingress queue attached to the Freelist.10341034+ * @wr: The freelist completion WR in the ingress queue.10351035+ * @len_to_qid: The lower 32-bits of the first flit of the RSP footer10361036+ * @iq_handler: Caller's handler for this completion.10371037+ * @priv: Private pointer of caller10381038+ *10391039+ */10401040+static inline void10411041+csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,10421042+ void *wr, uint32_t len_to_qid,10431043+ void (*iq_handler)(struct csio_hw *, void *,10441044+ uint32_t, struct csio_fl_dma_buf *,10451045+ void *),10461046+ void *priv)10471047+{10481048+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);10491049+ struct csio_sge *sge = &wrm->sge;10501050+ struct csio_fl_dma_buf flb;10511051+ struct csio_dma_buf *buf, *fbuf;10521052+ uint32_t bufsz, len, lastlen = 0;10531053+ int n;10541054+ struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx];10551055+10561056+ CSIO_DB_ASSERT(flq != NULL);10571057+10581058+ len = len_to_qid;10591059+10601060+ if (len & IQWRF_NEWBUF) {10611061+ if (flq->un.fl.offset > 0) {10621062+ csio_wr_inval_flq_buf(hw, flq);10631063+ flq->un.fl.offset = 0;10641064+ }10651065+ len = IQWRF_LEN_GET(len);10661066+ }10671067+10681068+ CSIO_DB_ASSERT(len != 0);10691069+10701070+ flb.totlen = len;10711071+10721072+ /* Consume all freelist buffers used for len bytes */10731073+ for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) {10741074+ buf = &flq->un.fl.bufs[flq->cidx];10751075+ bufsz = csio_wr_fl_bufsz(sge, buf);10761076+10771077+ fbuf->paddr = buf->paddr;10781078+ fbuf->vaddr = buf->vaddr;10791079+10801080+ flb.offset = flq->un.fl.offset;10811081+ lastlen = min(bufsz, len);10821082+ fbuf->len = lastlen;10831083+10841084+ len -= lastlen;10851085+ if (!len)10861086+ break;10871087+ csio_wr_inval_flq_buf(hw, flq);10881088+ }10891089+10901090+ flb.defer_free = flq->un.fl.packen ? 0 : 1;10911091+10921092+ iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer),10931093+ &flb, priv);10941094+10951095+ if (flq->un.fl.packen)10961096+ flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align);10971097+ else10981098+ csio_wr_inval_flq_buf(hw, flq);10991099+11001100+}11011101+11021102+/*11031103+ * csio_is_new_iqwr - Is this a new Ingress queue entry ?11041104+ * @q: Ingress quueue.11051105+ * @ftr: Ingress queue WR SGE footer.11061106+ *11071107+ * The entry is new if our generation bit matches the corresponding11081108+ * bit in the footer of the current WR.11091109+ */11101110+static inline bool11111111+csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)11121112+{11131113+ return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT));11141114+}11151115+11161116+/*11171117+ * csio_wr_process_iq - Process elements in Ingress queue.11181118+ * @hw: HW pointer11191119+ * @qidx: Index of queue11201120+ * @iq_handler: Handler for this queue11211121+ * @priv: Caller's private pointer11221122+ *11231123+ * This routine walks through every entry of the ingress queue, calling11241124+ * the provided iq_handler with the entry, until the generation bit11251125+ * flips.11261126+ */11271127+int11281128+csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,11291129+ void (*iq_handler)(struct csio_hw *, void *,11301130+ uint32_t, struct csio_fl_dma_buf *,11311131+ void *),11321132+ void *priv)11331133+{11341134+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);11351135+ void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz));11361136+ struct csio_iqwr_footer *ftr;11371137+ uint32_t wr_type, fw_qid, qid;11381138+ struct csio_q *q_completed;11391139+ struct csio_q *flq = csio_iq_has_fl(q) ?11401140+ wrm->q_arr[q->un.iq.flq_idx] : NULL;11411141+ int rv = 0;11421142+11431143+ /* Get the footer */11441144+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +11451145+ (q->wr_sz - sizeof(*ftr)));11461146+11471147+ /*11481148+ * When q wrapped around last time, driver should have inverted11491149+ * ic.genbit as well.11501150+ */11511151+ while (csio_is_new_iqwr(q, ftr)) {11521152+11531153+ CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <=11541154+ (uintptr_t)q->vwrap);11551155+ rmb();11561156+ wr_type = IQWRF_TYPE_GET(ftr->u.type_gen);11571157+11581158+ switch (wr_type) {11591159+ case X_RSPD_TYPE_CPL:11601160+ /* Subtract footer from WR len */11611161+ iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv);11621162+ break;11631163+ case X_RSPD_TYPE_FLBUF:11641164+ csio_wr_process_fl(hw, q, wr,11651165+ ntohl(ftr->pldbuflen_qid),11661166+ iq_handler, priv);11671167+ break;11681168+ case X_RSPD_TYPE_INTR:11691169+ fw_qid = ntohl(ftr->pldbuflen_qid);11701170+ qid = fw_qid - wrm->fw_iq_start;11711171+ q_completed = hw->wrm.intr_map[qid];11721172+11731173+ if (unlikely(qid ==11741174+ csio_q_physiqid(hw, hw->intr_iq_idx))) {11751175+ /*11761176+ * We are already in the Forward Interrupt11771177+ * Interrupt Queue Service! Do-not service11781178+ * again!11791179+ *11801180+ */11811181+ } else {11821182+ CSIO_DB_ASSERT(q_completed);11831183+ CSIO_DB_ASSERT(11841184+ q_completed->un.iq.iq_intx_handler);11851185+11861186+ /* Call the queue handler. */11871187+ q_completed->un.iq.iq_intx_handler(hw, NULL,11881188+ 0, NULL, (void *)q_completed);11891189+ }11901190+ break;11911191+ default:11921192+ csio_warn(hw, "Unknown resp type 0x%x received\n",11931193+ wr_type);11941194+ CSIO_INC_STATS(q, n_rsp_unknown);11951195+ break;11961196+ }11971197+11981198+ /*11991199+ * Ingress *always* has fixed size WR entries. Therefore,12001200+ * there should always be complete WRs towards the end of12011201+ * queue.12021202+ */12031203+ if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) {12041204+12051205+ /* Roll over to start of queue */12061206+ q->cidx = 0;12071207+ wr = q->vstart;12081208+12091209+ /* Toggle genbit */12101210+ q->un.iq.genbit ^= 0x1;12111211+12121212+ CSIO_INC_STATS(q, n_qwrap);12131213+ } else {12141214+ q->cidx++;12151215+ wr = (void *)((uintptr_t)(q->vstart) +12161216+ (q->cidx * q->wr_sz));12171217+ }12181218+12191219+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +12201220+ (q->wr_sz - sizeof(*ftr)));12211221+ q->inc_idx++;12221222+12231223+ } /* while (q->un.iq.genbit == hdr->genbit) */12241224+12251225+ /*12261226+ * We need to re-arm SGE interrupts in case we got a stray interrupt,12271227+ * especially in msix mode. With INTx, this may be a common occurence.12281228+ */12291229+ if (unlikely(!q->inc_idx)) {12301230+ CSIO_INC_STATS(q, n_stray_comp);12311231+ rv = -EINVAL;12321232+ goto restart;12331233+ }12341234+12351235+ /* Replenish free list buffers if pending falls below low water mark */12361236+ if (flq) {12371237+ uint32_t avail = csio_wr_avail_qcredits(flq);12381238+ if (avail <= 16) {12391239+ /* Make sure in FLQ, atleast 1 credit (8 FL buffers)12401240+ * remains unpopulated otherwise HW thinks12411241+ * FLQ is empty.12421242+ */12431243+ csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail);12441244+ csio_wr_ring_fldb(hw, flq);12451245+ }12461246+ }12471247+12481248+restart:12491249+ /* Now inform SGE about our incremental index value */12501250+ csio_wr_reg32(hw, CIDXINC(q->inc_idx) |12511251+ INGRESSQID(q->un.iq.physiqid) |12521252+ TIMERREG(csio_sge_timer_reg),12531253+ MYPF_REG(SGE_PF_GTS));12541254+ q->stats.n_tot_rsps += q->inc_idx;12551255+12561256+ q->inc_idx = 0;12571257+12581258+ return rv;12591259+}12601260+12611261+int12621262+csio_wr_process_iq_idx(struct csio_hw *hw, int qidx,12631263+ void (*iq_handler)(struct csio_hw *, void *,12641264+ uint32_t, struct csio_fl_dma_buf *,12651265+ void *),12661266+ void *priv)12671267+{12681268+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);12691269+ struct csio_q *iq = wrm->q_arr[qidx];12701270+12711271+ return csio_wr_process_iq(hw, iq, iq_handler, priv);12721272+}12731273+12741274+static int12751275+csio_closest_timer(struct csio_sge *s, int time)12761276+{12771277+ int i, delta, match = 0, min_delta = INT_MAX;12781278+12791279+ for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {12801280+ delta = time - s->timer_val[i];12811281+ if (delta < 0)12821282+ delta = -delta;12831283+ if (delta < min_delta) {12841284+ min_delta = delta;12851285+ match = i;12861286+ }12871287+ }12881288+ return match;12891289+}12901290+12911291+static int12921292+csio_closest_thresh(struct csio_sge *s, int cnt)12931293+{12941294+ int i, delta, match = 0, min_delta = INT_MAX;12951295+12961296+ for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {12971297+ delta = cnt - s->counter_val[i];12981298+ if (delta < 0)12991299+ delta = -delta;13001300+ if (delta < min_delta) {13011301+ min_delta = delta;13021302+ match = i;13031303+ }13041304+ }13051305+ return match;13061306+}13071307+13081308+static void13091309+csio_wr_fixup_host_params(struct csio_hw *hw)13101310+{13111311+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);13121312+ struct csio_sge *sge = &wrm->sge;13131313+ uint32_t clsz = L1_CACHE_BYTES;13141314+ uint32_t s_hps = PAGE_SHIFT - 10;13151315+ uint32_t ingpad = 0;13161316+ uint32_t stat_len = clsz > 64 ? 128 : 64;13171317+13181318+ csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) |13191319+ HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) |13201320+ HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) |13211321+ HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps),13221322+ SGE_HOST_PAGE_SIZE);13231323+13241324+ sge->csio_fl_align = clsz < 32 ? 32 : clsz;13251325+ ingpad = ilog2(sge->csio_fl_align) - 5;13261326+13271327+ csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK |13281328+ EGRSTATUSPAGESIZE(1),13291329+ INGPADBOUNDARY(ingpad) |13301330+ EGRSTATUSPAGESIZE(stat_len != 64));13311331+13321332+ /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */13331333+ csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);13341334+ csio_wr_reg32(hw,13351335+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +13361336+ sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),13371337+ SGE_FL_BUFFER_SIZE2);13381338+ csio_wr_reg32(hw,13391339+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +13401340+ sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),13411341+ SGE_FL_BUFFER_SIZE3);13421342+13431343+ csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);13441344+13451345+ /* default value of rx_dma_offset of the NIC driver */13461346+ csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,13471347+ PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));13481348+}13491349+13501350+static void13511351+csio_init_intr_coalesce_parms(struct csio_hw *hw)13521352+{13531353+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);13541354+ struct csio_sge *sge = &wrm->sge;13551355+13561356+ csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt);13571357+ if (csio_intr_coalesce_cnt) {13581358+ csio_sge_thresh_reg = 0;13591359+ csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER;13601360+ return;13611361+ }13621362+13631363+ csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time);13641364+}13651365+13661366+/*13671367+ * csio_wr_get_sge - Get SGE register values.13681368+ * @hw: HW module.13691369+ *13701370+ * Used by non-master functions and by master-functions relying on config file.13711371+ */13721372+static void13731373+csio_wr_get_sge(struct csio_hw *hw)13741374+{13751375+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);13761376+ struct csio_sge *sge = &wrm->sge;13771377+ uint32_t ingpad;13781378+ int i;13791379+ u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;13801380+ u32 ingress_rx_threshold;13811381+13821382+ sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);13831383+13841384+ ingpad = INGPADBOUNDARY_GET(sge->sge_control);13851385+13861386+ switch (ingpad) {13871387+ case X_INGPCIEBOUNDARY_32B:13881388+ sge->csio_fl_align = 32; break;13891389+ case X_INGPCIEBOUNDARY_64B:13901390+ sge->csio_fl_align = 64; break;13911391+ case X_INGPCIEBOUNDARY_128B:13921392+ sge->csio_fl_align = 128; break;13931393+ case X_INGPCIEBOUNDARY_256B:13941394+ sge->csio_fl_align = 256; break;13951395+ case X_INGPCIEBOUNDARY_512B:13961396+ sge->csio_fl_align = 512; break;13971397+ case X_INGPCIEBOUNDARY_1024B:13981398+ sge->csio_fl_align = 1024; break;13991399+ case X_INGPCIEBOUNDARY_2048B:14001400+ sge->csio_fl_align = 2048; break;14011401+ case X_INGPCIEBOUNDARY_4096B:14021402+ sge->csio_fl_align = 4096; break;14031403+ }14041404+14051405+ for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)14061406+ csio_get_flbuf_size(hw, sge, i);14071407+14081408+ timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1);14091409+ timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3);14101410+ timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5);14111411+14121412+ sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,14131413+ TIMERVALUE0_GET(timer_value_0_and_1));14141414+ sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,14151415+ TIMERVALUE1_GET(timer_value_0_and_1));14161416+ sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,14171417+ TIMERVALUE2_GET(timer_value_2_and_3));14181418+ sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,14191419+ TIMERVALUE3_GET(timer_value_2_and_3));14201420+ sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,14211421+ TIMERVALUE4_GET(timer_value_4_and_5));14221422+ sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,14231423+ TIMERVALUE5_GET(timer_value_4_and_5));14241424+14251425+ ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD);14261426+ sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);14271427+ sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);14281428+ sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);14291429+ sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);14301430+14311431+ csio_init_intr_coalesce_parms(hw);14321432+}14331433+14341434+/*14351435+ * csio_wr_set_sge - Initialize SGE registers14361436+ * @hw: HW module.14371437+ *14381438+ * Used by Master function to initialize SGE registers in the absence14391439+ * of a config file.14401440+ */14411441+static void14421442+csio_wr_set_sge(struct csio_hw *hw)14431443+{14441444+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);14451445+ struct csio_sge *sge = &wrm->sge;14461446+ int i;14471447+14481448+ /*14491449+ * Set up our basic SGE mode to deliver CPL messages to our Ingress14501450+ * Queue and Packet Date to the Free List.14511451+ */14521452+ csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1));14531453+14541454+ sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);14551455+14561456+ /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */14571457+14581458+ /*14591459+ * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows14601460+ * and generate an interrupt when this occurs so we can recover.14611461+ */14621462+ csio_set_reg_field(hw, SGE_DBFIFO_STATUS,14631463+ HP_INT_THRESH(HP_INT_THRESH_MASK) |14641464+ LP_INT_THRESH(LP_INT_THRESH_MASK),14651465+ HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |14661466+ LP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH));14671467+ csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,14681468+ ENABLE_DROP);14691469+14701470+ /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */14711471+14721472+ CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);14731473+ CSIO_SET_FLBUF_SIZE(hw, 2, CSIO_SGE_FLBUF_SIZE2);14741474+ CSIO_SET_FLBUF_SIZE(hw, 3, CSIO_SGE_FLBUF_SIZE3);14751475+ CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);14761476+ CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);14771477+ CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);14781478+ CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7);14791479+ CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8);14801480+14811481+ for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)14821482+ csio_get_flbuf_size(hw, sge, i);14831483+14841484+ /* Initialize interrupt coalescing attributes */14851485+ sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0;14861486+ sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1;14871487+ sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2;14881488+ sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3;14891489+ sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4;14901490+ sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5;14911491+14921492+ sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0;14931493+ sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1;14941494+ sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;14951495+ sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;14961496+14971497+ csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) |14981498+ THRESHOLD_1(sge->counter_val[1]) |14991499+ THRESHOLD_2(sge->counter_val[2]) |15001500+ THRESHOLD_3(sge->counter_val[3]),15011501+ SGE_INGRESS_RX_THRESHOLD);15021502+15031503+ csio_wr_reg32(hw,15041504+ TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) |15051505+ TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])),15061506+ SGE_TIMER_VALUE_0_AND_1);15071507+15081508+ csio_wr_reg32(hw,15091509+ TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) |15101510+ TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])),15111511+ SGE_TIMER_VALUE_2_AND_3);15121512+15131513+ csio_wr_reg32(hw,15141514+ TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) |15151515+ TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])),15161516+ SGE_TIMER_VALUE_4_AND_5);15171517+15181518+ csio_init_intr_coalesce_parms(hw);15191519+}15201520+15211521+void15221522+csio_wr_sge_init(struct csio_hw *hw)15231523+{15241524+ /*15251525+ * If we are master:15261526+ * - If we plan to use the config file, we need to fixup some15271527+ * host specific registers, and read the rest of the SGE15281528+ * configuration.15291529+ * - If we dont plan to use the config file, we need to initialize15301530+ * SGE entirely, including fixing the host specific registers.15311531+ * If we arent the master, we are only allowed to read and work off of15321532+ * the already initialized SGE values.15331533+ *15341534+ * Therefore, before calling this function, we assume that the master-15351535+ * ship of the card, and whether to use config file or not, have15361536+ * already been decided. In other words, CSIO_HWF_USING_SOFT_PARAMS and15371537+ * CSIO_HWF_MASTER should be set/unset.15381538+ */15391539+ if (csio_is_hw_master(hw)) {15401540+ csio_wr_fixup_host_params(hw);15411541+15421542+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)15431543+ csio_wr_get_sge(hw);15441544+ else15451545+ csio_wr_set_sge(hw);15461546+ } else15471547+ csio_wr_get_sge(hw);15481548+}15491549+15501550+/*15511551+ * csio_wrm_init - Initialize Work request module.15521552+ * @wrm: WR module15531553+ * @hw: HW pointer15541554+ *15551555+ * Allocates memory for an array of queue pointers starting at q_arr.15561556+ */15571557+int15581558+csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw)15591559+{15601560+ int i;15611561+15621562+ if (!wrm->num_q) {15631563+ csio_err(hw, "Num queues is not set\n");15641564+ return -EINVAL;15651565+ }15661566+15671567+ wrm->q_arr = kzalloc(sizeof(struct csio_q *) * wrm->num_q, GFP_KERNEL);15681568+ if (!wrm->q_arr)15691569+ goto err;15701570+15711571+ for (i = 0; i < wrm->num_q; i++) {15721572+ wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL);15731573+ if (!wrm->q_arr[i]) {15741574+ while (--i >= 0)15751575+ kfree(wrm->q_arr[i]);15761576+ goto err_free_arr;15771577+ }15781578+ }15791579+ wrm->free_qidx = 0;15801580+15811581+ return 0;15821582+15831583+err_free_arr:15841584+ kfree(wrm->q_arr);15851585+err:15861586+ return -ENOMEM;15871587+}15881588+15891589+/*15901590+ * csio_wrm_exit - Initialize Work request module.15911591+ * @wrm: WR module15921592+ * @hw: HW module15931593+ *15941594+ * Uninitialize WR module. Free q_arr and pointers in it.15951595+ * We have the additional job of freeing the DMA memory associated15961596+ * with the queues.15971597+ */15981598+void15991599+csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)16001600+{16011601+ int i;16021602+ uint32_t j;16031603+ struct csio_q *q;16041604+ struct csio_dma_buf *buf;16051605+16061606+ for (i = 0; i < wrm->num_q; i++) {16071607+ q = wrm->q_arr[i];16081608+16091609+ if (wrm->free_qidx && (i < wrm->free_qidx)) {16101610+ if (q->type == CSIO_FREELIST) {16111611+ if (!q->un.fl.bufs)16121612+ continue;16131613+ for (j = 0; j < q->credits; j++) {16141614+ buf = &q->un.fl.bufs[j];16151615+ if (!buf->vaddr)16161616+ continue;16171617+ pci_free_consistent(hw->pdev, buf->len,16181618+ buf->vaddr,16191619+ buf->paddr);16201620+ }16211621+ kfree(q->un.fl.bufs);16221622+ }16231623+ pci_free_consistent(hw->pdev, q->size,16241624+ q->vstart, q->pstart);16251625+ }16261626+ kfree(q);16271627+ }16281628+16291629+ hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED;16301630+16311631+ kfree(wrm->q_arr);16321632+}
+512
drivers/scsi/csiostor/csio_wr.h
···11+/*22+ * This file is part of the Chelsio FCoE driver for Linux.33+ *44+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.55+ *66+ * This software is available to you under a choice of one of two77+ * licenses. You may choose to be licensed under the terms of the GNU88+ * General Public License (GPL) Version 2, available from the file99+ * COPYING in the main directory of this source tree, or the1010+ * OpenIB.org BSD license below:1111+ *1212+ * Redistribution and use in source and binary forms, with or1313+ * without modification, are permitted provided that the following1414+ * conditions are met:1515+ *1616+ * - Redistributions of source code must retain the above1717+ * copyright notice, this list of conditions and the following1818+ * disclaimer.1919+ *2020+ * - Redistributions in binary form must reproduce the above2121+ * copyright notice, this list of conditions and the following2222+ * disclaimer in the documentation and/or other materials2323+ * provided with the distribution.2424+ *2525+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2626+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2727+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2828+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2929+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN3030+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3131+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3232+ * SOFTWARE.3333+ */3434+3535+#ifndef __CSIO_WR_H__3636+#define __CSIO_WR_H__3737+3838+#include <linux/cache.h>3939+4040+#include "csio_defs.h"4141+#include "t4fw_api.h"4242+#include "t4fw_api_stor.h"4343+4444+/*4545+ * SGE register field values.4646+ */4747+#define X_INGPCIEBOUNDARY_32B 04848+#define X_INGPCIEBOUNDARY_64B 14949+#define X_INGPCIEBOUNDARY_128B 25050+#define X_INGPCIEBOUNDARY_256B 35151+#define X_INGPCIEBOUNDARY_512B 45252+#define X_INGPCIEBOUNDARY_1024B 55353+#define X_INGPCIEBOUNDARY_2048B 65454+#define X_INGPCIEBOUNDARY_4096B 75555+5656+/* GTS register */5757+#define X_TIMERREG_COUNTER0 05858+#define X_TIMERREG_COUNTER1 15959+#define X_TIMERREG_COUNTER2 26060+#define X_TIMERREG_COUNTER3 36161+#define X_TIMERREG_COUNTER4 46262+#define X_TIMERREG_COUNTER5 56363+#define X_TIMERREG_RESTART_COUNTER 66464+#define X_TIMERREG_UPDATE_CIDX 76565+6666+/*6767+ * Egress Context field values6868+ */6969+#define X_FETCHBURSTMIN_16B 07070+#define X_FETCHBURSTMIN_32B 17171+#define X_FETCHBURSTMIN_64B 27272+#define X_FETCHBURSTMIN_128B 37373+7474+#define X_FETCHBURSTMAX_64B 07575+#define X_FETCHBURSTMAX_128B 17676+#define X_FETCHBURSTMAX_256B 27777+#define X_FETCHBURSTMAX_512B 37878+7979+#define X_HOSTFCMODE_NONE 08080+#define X_HOSTFCMODE_INGRESS_QUEUE 18181+#define X_HOSTFCMODE_STATUS_PAGE 28282+#define X_HOSTFCMODE_BOTH 38383+8484+/*8585+ * Ingress Context field values8686+ */8787+#define X_UPDATESCHEDULING_TIMER 08888+#define X_UPDATESCHEDULING_COUNTER_OPTTIMER 18989+9090+#define X_UPDATEDELIVERY_NONE 09191+#define X_UPDATEDELIVERY_INTERRUPT 19292+#define X_UPDATEDELIVERY_STATUS_PAGE 29393+#define X_UPDATEDELIVERY_BOTH 39494+9595+#define X_INTERRUPTDESTINATION_PCIE 09696+#define X_INTERRUPTDESTINATION_IQ 19797+9898+#define X_RSPD_TYPE_FLBUF 09999+#define X_RSPD_TYPE_CPL 1100100+#define X_RSPD_TYPE_INTR 2101101+102102+/* WR status is at the same position as retval in a CMD header */103103+#define csio_wr_status(_wr) \104104+ (FW_CMD_RETVAL_GET(ntohl(((struct fw_cmd_hdr *)(_wr))->lo)))105105+106106+struct csio_hw;107107+108108+extern int csio_intr_coalesce_cnt;109109+extern int csio_intr_coalesce_time;110110+111111+/* Ingress queue params */112112+struct csio_iq_params {113113+114114+ uint8_t iq_start:1;115115+ uint8_t iq_stop:1;116116+ uint8_t pfn:3;117117+118118+ uint8_t vfn;119119+120120+ uint16_t physiqid;121121+ uint16_t iqid;122122+123123+ uint16_t fl0id;124124+ uint16_t fl1id;125125+126126+ uint8_t viid;127127+128128+ uint8_t type;129129+ uint8_t iqasynch;130130+ uint8_t reserved4;131131+132132+ uint8_t iqandst;133133+ uint8_t iqanus;134134+ uint8_t iqanud;135135+136136+ uint16_t iqandstindex;137137+138138+ uint8_t iqdroprss;139139+ uint8_t iqpciech;140140+ uint8_t iqdcaen;141141+142142+ uint8_t iqdcacpu;143143+ uint8_t iqintcntthresh;144144+ uint8_t iqo;145145+146146+ uint8_t iqcprio;147147+ uint8_t iqesize;148148+149149+ uint16_t iqsize;150150+151151+ uint64_t iqaddr;152152+153153+ uint8_t iqflintiqhsen;154154+ uint8_t reserved5;155155+ uint8_t iqflintcongen;156156+ uint8_t iqflintcngchmap;157157+158158+ uint32_t reserved6;159159+160160+ uint8_t fl0hostfcmode;161161+ uint8_t fl0cprio;162162+ uint8_t fl0paden;163163+ uint8_t fl0packen;164164+ uint8_t fl0congen;165165+ uint8_t fl0dcaen;166166+167167+ uint8_t fl0dcacpu;168168+ uint8_t fl0fbmin;169169+170170+ uint8_t fl0fbmax;171171+ uint8_t fl0cidxfthresho;172172+ uint8_t fl0cidxfthresh;173173+174174+ uint16_t fl0size;175175+176176+ uint64_t fl0addr;177177+178178+ uint64_t reserved7;179179+180180+ uint8_t fl1hostfcmode;181181+ uint8_t fl1cprio;182182+ uint8_t fl1paden;183183+ uint8_t fl1packen;184184+ uint8_t fl1congen;185185+ uint8_t fl1dcaen;186186+187187+ uint8_t fl1dcacpu;188188+ uint8_t fl1fbmin;189189+190190+ uint8_t fl1fbmax;191191+ uint8_t fl1cidxfthresho;192192+ uint8_t fl1cidxfthresh;193193+194194+ uint16_t fl1size;195195+196196+ uint64_t fl1addr;197197+};198198+199199+/* Egress queue params */200200+struct csio_eq_params {201201+202202+ uint8_t pfn;203203+ uint8_t vfn;204204+205205+ uint8_t eqstart:1;206206+ uint8_t eqstop:1;207207+208208+ uint16_t physeqid;209209+ uint32_t eqid;210210+211211+ uint8_t hostfcmode:2;212212+ uint8_t cprio:1;213213+ uint8_t pciechn:3;214214+215215+ uint16_t iqid;216216+217217+ uint8_t dcaen:1;218218+ uint8_t dcacpu:5;219219+220220+ uint8_t fbmin:3;221221+ uint8_t fbmax:3;222222+223223+ uint8_t cidxfthresho:1;224224+ uint8_t cidxfthresh:3;225225+226226+ uint16_t eqsize;227227+228228+ uint64_t eqaddr;229229+};230230+231231+struct csio_dma_buf {232232+ struct list_head list;233233+ void *vaddr; /* Virtual address */234234+ dma_addr_t paddr; /* Physical address */235235+ uint32_t len; /* Buffer size */236236+};237237+238238+/* Generic I/O request structure */239239+struct csio_ioreq {240240+ struct csio_sm sm; /* SM, List241241+ * should be the first member242242+ */243243+ int iq_idx; /* Ingress queue index */244244+ int eq_idx; /* Egress queue index */245245+ uint32_t nsge; /* Number of SG elements */246246+ uint32_t tmo; /* Driver timeout */247247+ uint32_t datadir; /* Data direction */248248+ struct csio_dma_buf dma_buf; /* Req/resp DMA buffers */249249+ uint16_t wr_status; /* WR completion status */250250+ int16_t drv_status; /* Driver internal status */251251+ struct csio_lnode *lnode; /* Owner lnode */252252+ struct csio_rnode *rnode; /* Src/destination rnode */253253+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *);254254+ /* completion callback */255255+ void *scratch1; /* Scratch area 1.256256+ */257257+ void *scratch2; /* Scratch area 2. */258258+ struct list_head gen_list; /* Any list associated with259259+ * this ioreq.260260+ */261261+ uint64_t fw_handle; /* Unique handle passed262262+ * to FW263263+ */264264+ uint8_t dcopy; /* Data copy required */265265+ uint8_t reserved1;266266+ uint16_t reserved2;267267+ struct completion cmplobj; /* ioreq completion object */268268+} ____cacheline_aligned_in_smp;269269+270270+/*271271+ * Egress status page for egress cidx updates272272+ */273273+struct csio_qstatus_page {274274+ __be32 qid;275275+ __be16 cidx;276276+ __be16 pidx;277277+};278278+279279+280280+enum {281281+ CSIO_MAX_FLBUF_PER_IQWR = 4,282282+ CSIO_QCREDIT_SZ = 64, /* pidx/cidx increments283283+ * in bytes284284+ */285285+ CSIO_MAX_QID = 0xFFFF,286286+ CSIO_MAX_IQ = 128,287287+288288+ CSIO_SGE_NTIMERS = 6,289289+ CSIO_SGE_NCOUNTERS = 4,290290+ CSIO_SGE_FL_SIZE_REGS = 16,291291+};292292+293293+/* Defines for type */294294+enum {295295+ CSIO_EGRESS = 1,296296+ CSIO_INGRESS = 2,297297+ CSIO_FREELIST = 3,298298+};299299+300300+/*301301+ * Structure for footer (last 2 flits) of Ingress Queue Entry.302302+ */303303+struct csio_iqwr_footer {304304+ __be32 hdrbuflen_pidx;305305+ __be32 pldbuflen_qid;306306+ union {307307+ u8 type_gen;308308+ __be64 last_flit;309309+ } u;310310+};311311+312312+#define IQWRF_NEWBUF (1 << 31)313313+#define IQWRF_LEN_GET(x) (((x) >> 0) & 0x7fffffffU)314314+#define IQWRF_GEN_SHIFT 7315315+#define IQWRF_TYPE_GET(x) (((x) >> 4) & 0x3U)316316+317317+318318+/*319319+ * WR pair:320320+ * ========321321+ * A WR can start towards the end of a queue, and then continue at the322322+ * beginning, since the queue is considered to be circular. This will323323+ * require a pair of address/len to be passed back to the caller -324324+ * hence the Work request pair structure.325325+ */326326+struct csio_wr_pair {327327+ void *addr1;328328+ uint32_t size1;329329+ void *addr2;330330+ uint32_t size2;331331+};332332+333333+/*334334+ * The following structure is used by ingress processing to return the335335+ * free list buffers to consumers.336336+ */337337+struct csio_fl_dma_buf {338338+ struct csio_dma_buf flbufs[CSIO_MAX_FLBUF_PER_IQWR];339339+ /* Freelist DMA buffers */340340+ int offset; /* Offset within the341341+ * first FL buf.342342+ */343343+ uint32_t totlen; /* Total length */344344+ uint8_t defer_free; /* Free of buffer can345345+ * deferred346346+ */347347+};348348+349349+/* Data-types */350350+typedef void (*iq_handler_t)(struct csio_hw *, void *, uint32_t,351351+ struct csio_fl_dma_buf *, void *);352352+353353+struct csio_iq {354354+ uint16_t iqid; /* Queue ID */355355+ uint16_t physiqid; /* Physical Queue ID */356356+ uint16_t genbit; /* Generation bit,357357+ * initially set to 1358358+ */359359+ int flq_idx; /* Freelist queue index */360360+ iq_handler_t iq_intx_handler; /* IQ INTx handler routine */361361+};362362+363363+struct csio_eq {364364+ uint16_t eqid; /* Qid */365365+ uint16_t physeqid; /* Physical Queue ID */366366+ uint8_t wrap[512]; /* Temp area for q-wrap around*/367367+};368368+369369+struct csio_fl {370370+ uint16_t flid; /* Qid */371371+ uint16_t packen; /* Packing enabled? */372372+ int offset; /* Offset within FL buf */373373+ int sreg; /* Size register */374374+ struct csio_dma_buf *bufs; /* Free list buffer ptr array375375+ * indexed using flq->cidx/pidx376376+ */377377+};378378+379379+struct csio_qstats {380380+ uint32_t n_tot_reqs; /* Total no. of Requests */381381+ uint32_t n_tot_rsps; /* Total no. of responses */382382+ uint32_t n_qwrap; /* Queue wraps */383383+ uint32_t n_eq_wr_split; /* Number of split EQ WRs */384384+ uint32_t n_qentry; /* Queue entry */385385+ uint32_t n_qempty; /* Queue empty */386386+ uint32_t n_qfull; /* Queue fulls */387387+ uint32_t n_rsp_unknown; /* Unknown response type */388388+ uint32_t n_stray_comp; /* Stray completion intr */389389+ uint32_t n_flq_refill; /* Number of FL refills */390390+};391391+392392+/* Queue metadata */393393+struct csio_q {394394+ uint16_t type; /* Type: Ingress/Egress/FL */395395+ uint16_t pidx; /* producer index */396396+ uint16_t cidx; /* consumer index */397397+ uint16_t inc_idx; /* Incremental index */398398+ uint32_t wr_sz; /* Size of all WRs in this q399399+ * if fixed400400+ */401401+ void *vstart; /* Base virtual address402402+ * of queue403403+ */404404+ void *vwrap; /* Virtual end address to405405+ * wrap around at406406+ */407407+ uint32_t credits; /* Size of queue in credits */408408+ void *owner; /* Owner */409409+ union { /* Queue contexts */410410+ struct csio_iq iq;411411+ struct csio_eq eq;412412+ struct csio_fl fl;413413+ } un;414414+415415+ dma_addr_t pstart; /* Base physical address of416416+ * queue417417+ */418418+ uint32_t portid; /* PCIE Channel */419419+ uint32_t size; /* Size of queue in bytes */420420+ struct csio_qstats stats; /* Statistics */421421+} ____cacheline_aligned_in_smp;422422+423423+struct csio_sge {424424+ uint32_t csio_fl_align; /* Calculated and cached425425+ * for fast path426426+ */427427+ uint32_t sge_control; /* padding, boundaries,428428+ * lengths, etc.429429+ */430430+ uint32_t sge_host_page_size; /* Host page size */431431+ uint32_t sge_fl_buf_size[CSIO_SGE_FL_SIZE_REGS];432432+ /* free list buffer sizes */433433+ uint16_t timer_val[CSIO_SGE_NTIMERS];434434+ uint8_t counter_val[CSIO_SGE_NCOUNTERS];435435+};436436+437437+/* Work request module */438438+struct csio_wrm {439439+ int num_q; /* Number of queues */440440+ struct csio_q **q_arr; /* Array of queue pointers441441+ * allocated dynamically442442+ * based on configured values443443+ */444444+ uint32_t fw_iq_start; /* Start ID of IQ for this fn*/445445+ uint32_t fw_eq_start; /* Start ID of EQ for this fn*/446446+ struct csio_q *intr_map[CSIO_MAX_IQ];447447+ /* IQ-id to IQ map table. */448448+ int free_qidx; /* queue idx of free queue */449449+ struct csio_sge sge; /* SGE params */450450+};451451+452452+#define csio_get_q(__hw, __idx) ((__hw)->wrm.q_arr[__idx])453453+#define csio_q_type(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->type)454454+#define csio_q_pidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pidx)455455+#define csio_q_cidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->cidx)456456+#define csio_q_inc_idx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->inc_idx)457457+#define csio_q_vstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->vstart)458458+#define csio_q_pstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pstart)459459+#define csio_q_size(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->size)460460+#define csio_q_credits(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->credits)461461+#define csio_q_portid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->portid)462462+#define csio_q_wr_sz(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->wr_sz)463463+#define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid)464464+#define csio_q_physiqid(__hw, __idx) \465465+ ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid)466466+#define csio_q_iq_flq_idx(__hw, __idx) \467467+ ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx)468468+#define csio_q_eqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.eqid)469469+#define csio_q_flid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid)470470+471471+#define csio_q_physeqid(__hw, __idx) \472472+ ((__hw)->wrm.q_arr[(__idx)]->un.eq.physeqid)473473+#define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1)474474+475475+#define csio_q_iq_to_flid(__hw, __iq_idx) \476476+ csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx)477477+#define csio_q_set_intr_map(__hw, __iq_idx, __rel_iq_id) \478478+ (__hw)->wrm.intr_map[__rel_iq_id] = csio_get_q(__hw, __iq_idx)479479+#define csio_q_eq_wrap(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.wrap)480480+481481+struct csio_mb;482482+483483+int csio_wr_alloc_q(struct csio_hw *, uint32_t, uint32_t,484484+ uint16_t, void *, uint32_t, int, iq_handler_t);485485+int csio_wr_iq_create(struct csio_hw *, void *, int,486486+ uint32_t, uint8_t, bool,487487+ void (*)(struct csio_hw *, struct csio_mb *));488488+int csio_wr_eq_create(struct csio_hw *, void *, int, int, uint8_t,489489+ void (*)(struct csio_hw *, struct csio_mb *));490490+int csio_wr_destroy_queues(struct csio_hw *, bool cmd);491491+492492+493493+int csio_wr_get(struct csio_hw *, int, uint32_t,494494+ struct csio_wr_pair *);495495+void csio_wr_copy_to_wrp(void *, struct csio_wr_pair *, uint32_t, uint32_t);496496+int csio_wr_issue(struct csio_hw *, int, bool);497497+int csio_wr_process_iq(struct csio_hw *, struct csio_q *,498498+ void (*)(struct csio_hw *, void *,499499+ uint32_t, struct csio_fl_dma_buf *,500500+ void *),501501+ void *);502502+int csio_wr_process_iq_idx(struct csio_hw *, int,503503+ void (*)(struct csio_hw *, void *,504504+ uint32_t, struct csio_fl_dma_buf *,505505+ void *),506506+ void *);507507+508508+void csio_wr_sge_init(struct csio_hw *);509509+int csio_wrm_init(struct csio_wrm *, struct csio_hw *);510510+void csio_wrm_exit(struct csio_wrm *, struct csio_hw *);511511+512512+#endif /* ifndef __CSIO_WR_H__ */