Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (69 commits)
[SCSI] scsi_transport_fc: Fix synchronization issue while deleting vport
[SCSI] bfa: Update the driver version to 2.1.2.1.
[SCSI] bfa: Remove unused header files and did some cleanup.
[SCSI] bfa: Handle SCSI IO underrun case.
[SCSI] bfa: FCS and include file changes.
[SCSI] bfa: Modified the portstats get/clear logic
[SCSI] bfa: Replace bfa_get_attr() with specific APIs
[SCSI] bfa: New portlog entries for events (FIP/FLOGI/FDISC/LOGO).
[SCSI] bfa: Rename pport to fcport in BFA FCS.
[SCSI] bfa: IOC fixes, check for IOC down condition.
[SCSI] bfa: In MSIX mode, ignore spurious RME interrupts when FCoE ports are in FW mismatch state.
[SCSI] bfa: Fix Command Queue (CPE) full condition check and ack CPE interrupt.
[SCSI] bfa: IOC recovery fix in fcmode.
[SCSI] bfa: AEN and byte alignment fixes.
[SCSI] bfa: Introduce a link notification state machine.
[SCSI] bfa: Added firmware save clear feature for BFA driver.
[SCSI] bfa: FCS authentication related changes.
[SCSI] bfa: PCI VPD, FIP and include file changes.
[SCSI] bfa: Fix to copy fpma MAC when requested by user space application.
[SCSI] bfa: RPORT state machine: direct attach mode fix.
...

+7217 -3538
+1 -1
drivers/infiniband/ulp/iser/iscsi_iser.c
··· 613 613 .cmd_per_lun = ISER_DEF_CMD_PER_LUN, 614 614 .eh_abort_handler = iscsi_eh_abort, 615 615 .eh_device_reset_handler= iscsi_eh_device_reset, 616 - .eh_target_reset_handler= iscsi_eh_target_reset, 616 + .eh_target_reset_handler = iscsi_eh_recover_target, 617 617 .target_alloc = iscsi_target_alloc, 618 618 .use_clustering = DISABLE_CLUSTERING, 619 619 .proc_name = "iscsi_iser",
+6
drivers/scsi/Kconfig
··· 1 1 menu "SCSI device support" 2 2 3 + config SCSI_MOD 4 + tristate 5 + default y if SCSI=n || SCSI=y 6 + default m if SCSI=m 7 + 3 8 config RAID_ATTRS 4 9 tristate "RAID Transport Class" 5 10 default n 6 11 depends on BLOCK 12 + depends on SCSI_MOD 7 13 ---help--- 8 14 Provides RAID 9 15
-7
drivers/scsi/be2iscsi/be_cmds.c
··· 32 32 unsigned int alloc_mcc_tag(struct beiscsi_hba *phba) 33 33 { 34 34 unsigned int tag = 0; 35 - unsigned int num = 0; 36 35 37 - mcc_tag_rdy: 38 36 if (phba->ctrl.mcc_tag_available) { 39 37 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index]; 40 38 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0; 41 39 phba->ctrl.mcc_numtag[tag] = 0; 42 - } else { 43 - udelay(100); 44 - num++; 45 - if (num < mcc_timeout) 46 - goto mcc_tag_rdy; 47 40 } 48 41 if (tag) { 49 42 phba->ctrl.mcc_tag_available--;
+2 -2
drivers/scsi/be2iscsi/be_iscsi.c
··· 482 482 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep); 483 483 if (!tag) { 484 484 SE_DEBUG(DBG_LVL_1, 485 - "mgmt_invalidate_connection Failed for cid=%d \n", 485 + "mgmt_open_connection Failed for cid=%d \n", 486 486 beiscsi_ep->ep_cid); 487 487 } else { 488 488 wait_event_interruptible(phba->ctrl.mcc_wait[tag], ··· 701 701 if (!tag) { 702 702 SE_DEBUG(DBG_LVL_1, 703 703 "mgmt_invalidate_connection Failed for cid=%d \n", 704 - beiscsi_ep->ep_cid); 704 + beiscsi_ep->ep_cid); 705 705 } else { 706 706 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 707 707 phba->ctrl.mcc_numtag[tag]);
+142 -59
drivers/scsi/be2iscsi/be_main.c
··· 58 58 return 0; 59 59 } 60 60 61 + static int beiscsi_eh_abort(struct scsi_cmnd *sc) 62 + { 63 + struct iscsi_cls_session *cls_session; 64 + struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr; 65 + struct beiscsi_io_task *aborted_io_task; 66 + struct iscsi_conn *conn; 67 + struct beiscsi_conn *beiscsi_conn; 68 + struct beiscsi_hba *phba; 69 + struct iscsi_session *session; 70 + struct invalidate_command_table *inv_tbl; 71 + unsigned int cid, tag, num_invalidate; 72 + 73 + cls_session = starget_to_session(scsi_target(sc->device)); 74 + session = cls_session->dd_data; 75 + 76 + spin_lock_bh(&session->lock); 77 + if (!aborted_task || !aborted_task->sc) { 78 + /* we raced */ 79 + spin_unlock_bh(&session->lock); 80 + return SUCCESS; 81 + } 82 + 83 + aborted_io_task = aborted_task->dd_data; 84 + if (!aborted_io_task->scsi_cmnd) { 85 + /* raced or invalid command */ 86 + spin_unlock_bh(&session->lock); 87 + return SUCCESS; 88 + } 89 + spin_unlock_bh(&session->lock); 90 + conn = aborted_task->conn; 91 + beiscsi_conn = conn->dd_data; 92 + phba = beiscsi_conn->phba; 93 + 94 + /* invalidate iocb */ 95 + cid = beiscsi_conn->beiscsi_conn_cid; 96 + inv_tbl = phba->inv_tbl; 97 + memset(inv_tbl, 0x0, sizeof(*inv_tbl)); 98 + inv_tbl->cid = cid; 99 + inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index; 100 + num_invalidate = 1; 101 + tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid); 102 + if (!tag) { 103 + shost_printk(KERN_WARNING, phba->shost, 104 + "mgmt_invalidate_icds could not be" 105 + " submitted\n"); 106 + return FAILED; 107 + } else { 108 + wait_event_interruptible(phba->ctrl.mcc_wait[tag], 109 + phba->ctrl.mcc_numtag[tag]); 110 + free_mcc_tag(&phba->ctrl, tag); 111 + } 112 + 113 + return iscsi_eh_abort(sc); 114 + } 115 + 116 + static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 117 + { 118 + struct iscsi_task *abrt_task; 119 + struct beiscsi_io_task *abrt_io_task; 120 + struct iscsi_conn *conn; 121 + struct beiscsi_conn *beiscsi_conn; 122 + struct beiscsi_hba *phba; 123 + struct iscsi_session *session; 124 + struct iscsi_cls_session *cls_session; 125 + struct invalidate_command_table *inv_tbl; 126 + unsigned int cid, tag, i, num_invalidate; 127 + int rc = FAILED; 128 + 129 + /* invalidate iocbs */ 130 + cls_session = starget_to_session(scsi_target(sc->device)); 131 + session = cls_session->dd_data; 132 + spin_lock_bh(&session->lock); 133 + if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) 134 + goto unlock; 135 + 136 + conn = session->leadconn; 137 + beiscsi_conn = conn->dd_data; 138 + phba = beiscsi_conn->phba; 139 + cid = beiscsi_conn->beiscsi_conn_cid; 140 + inv_tbl = phba->inv_tbl; 141 + memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN); 142 + num_invalidate = 0; 143 + for (i = 0; i < conn->session->cmds_max; i++) { 144 + abrt_task = conn->session->cmds[i]; 145 + abrt_io_task = abrt_task->dd_data; 146 + if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) 147 + continue; 148 + 149 + if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) 150 + continue; 151 + 152 + inv_tbl->cid = cid; 153 + inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; 154 + num_invalidate++; 155 + inv_tbl++; 156 + } 157 + spin_unlock_bh(&session->lock); 158 + inv_tbl = phba->inv_tbl; 159 + 160 + tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid); 161 + if (!tag) { 162 + shost_printk(KERN_WARNING, phba->shost, 163 + "mgmt_invalidate_icds could not be" 164 + " submitted\n"); 165 + return FAILED; 166 + } else { 167 + wait_event_interruptible(phba->ctrl.mcc_wait[tag], 168 + phba->ctrl.mcc_numtag[tag]); 169 + free_mcc_tag(&phba->ctrl, tag); 170 + } 171 + 172 + return iscsi_eh_device_reset(sc); 173 + unlock: 174 + spin_unlock_bh(&session->lock); 175 + return rc; 176 + } 177 + 61 178 /*------------------- PCI Driver operations and data ----------------- */ 62 179 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { 63 180 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, ··· 191 74 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver", 192 75 .proc_name = DRV_NAME, 193 76 .queuecommand = iscsi_queuecommand, 194 - .eh_abort_handler = iscsi_eh_abort, 195 77 .change_queue_depth = iscsi_change_queue_depth, 196 78 .slave_configure = beiscsi_slave_configure, 197 79 .target_alloc = iscsi_target_alloc, 198 - .eh_device_reset_handler = iscsi_eh_device_reset, 199 - .eh_target_reset_handler = iscsi_eh_target_reset, 80 + .eh_abort_handler = beiscsi_eh_abort, 81 + .eh_device_reset_handler = beiscsi_eh_device_reset, 82 + .eh_target_reset_handler = iscsi_eh_session_reset, 200 83 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 201 84 .can_queue = BE2_IO_DEPTH, 202 85 .this_id = -1, ··· 359 242 + BE2_TMFS 360 243 + BE2_NOPOUT_REQ)); 361 244 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; 362 - phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;; 245 + phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2; 363 246 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;; 364 247 phba->params.num_sge_per_io = BE2_SGE; 365 248 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; ··· 1063 946 case HWH_TYPE_IO: 1064 947 case HWH_TYPE_IO_RD: 1065 948 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1066 - ISCSI_OP_NOOP_OUT) { 949 + ISCSI_OP_NOOP_OUT) 1067 950 be_complete_nopin_resp(beiscsi_conn, task, psol); 1068 - } else 951 + else 1069 952 be_complete_io(beiscsi_conn, task, psol); 1070 953 break; 1071 954 1072 955 case HWH_TYPE_LOGOUT: 1073 - be_complete_logout(beiscsi_conn, task, psol); 956 + if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 957 + be_complete_logout(beiscsi_conn, task, psol); 958 + else 959 + be_complete_tmf(beiscsi_conn, task, psol); 960 + 1074 961 break; 1075 962 1076 963 case HWH_TYPE_LOGIN: 1077 964 SE_DEBUG(DBG_LVL_1, 1078 965 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd" 1079 966 "- Solicited path \n"); 1080 - break; 1081 - 1082 - case HWH_TYPE_TMF: 1083 - be_complete_tmf(beiscsi_conn, task, psol); 1084 967 break; 1085 968 1086 969 case HWH_TYPE_NOP: ··· 2169 2052 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2170 2053 ((sizeof(struct iscsi_wrb) * 2171 2054 phba->params.wrbs_per_cxn)); 2172 - for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) { 2055 + for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 2173 2056 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2174 2057 if (num_cxn_wrb) { 2175 2058 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { ··· 3190 3073 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3191 3074 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr); 3192 3075 iowrite32(reg, addr); 3193 - for (i = 0; i <= phba->num_cpus; i++) { 3194 - eq = &phwi_context->be_eq[i].q; 3076 + if (!phba->msix_enabled) { 3077 + eq = &phwi_context->be_eq[0].q; 3195 3078 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id); 3196 3079 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3080 + } else { 3081 + for (i = 0; i <= phba->num_cpus; i++) { 3082 + eq = &phwi_context->be_eq[i].q; 3083 + SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id); 3084 + hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3085 + } 3197 3086 } 3198 - } else 3199 - shost_printk(KERN_WARNING, phba->shost, 3200 - "In hwi_enable_intr, Not Enabled \n"); 3087 + } 3201 3088 return true; 3202 3089 } 3203 3090 ··· 3597 3476 3598 3477 static int beiscsi_mtask(struct iscsi_task *task) 3599 3478 { 3600 - struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data; 3479 + struct beiscsi_io_task *io_task = task->dd_data; 3601 3480 struct iscsi_conn *conn = task->conn; 3602 3481 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 3603 3482 struct beiscsi_hba *phba = beiscsi_conn->phba; 3604 - struct iscsi_session *session; 3605 3483 struct iscsi_wrb *pwrb = NULL; 3606 - struct hwi_controller *phwi_ctrlr; 3607 - struct hwi_wrb_context *pwrb_context; 3608 - struct wrb_handle *pwrb_handle; 3609 3484 unsigned int doorbell = 0; 3610 - unsigned int i, cid; 3611 - struct iscsi_task *aborted_task; 3612 - unsigned int tag; 3485 + unsigned int cid; 3613 3486 3614 3487 cid = beiscsi_conn->beiscsi_conn_cid; 3615 3488 pwrb = io_task->pwrb_handle->pwrb; ··· 3614 3499 io_task->pwrb_handle->wrb_index); 3615 3500 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 3616 3501 io_task->psgl_handle->sgl_index); 3502 + 3617 3503 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 3618 3504 case ISCSI_OP_LOGIN: 3619 3505 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, ··· 3639 3523 hwi_write_buffer(pwrb, task); 3640 3524 break; 3641 3525 case ISCSI_OP_SCSI_TMFUNC: 3642 - session = conn->session; 3643 - i = ((struct iscsi_tm *)task->hdr)->rtt; 3644 - phwi_ctrlr = phba->phwi_ctrlr; 3645 - pwrb_context = &phwi_ctrlr->wrb_context[cid - 3646 - phba->fw_config.iscsi_cid_start]; 3647 - pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i) 3648 - >> 16]; 3649 - aborted_task = pwrb_handle->pio_handle; 3650 - if (!aborted_task) 3651 - return 0; 3652 - 3653 - aborted_io_task = aborted_task->dd_data; 3654 - if (!aborted_io_task->scsi_cmnd) 3655 - return 0; 3656 - 3657 - tag = mgmt_invalidate_icds(phba, 3658 - aborted_io_task->psgl_handle->sgl_index, 3659 - cid); 3660 - if (!tag) { 3661 - shost_printk(KERN_WARNING, phba->shost, 3662 - "mgmt_invalidate_icds could not be" 3663 - " submitted\n"); 3664 - } else { 3665 - wait_event_interruptible(phba->ctrl.mcc_wait[tag], 3666 - phba->ctrl.mcc_numtag[tag]); 3667 - free_mcc_tag(&phba->ctrl, tag); 3668 - } 3669 3526 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 3670 3527 INI_TMF_CMD); 3671 3528 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); ··· 3647 3558 case ISCSI_OP_LOGOUT: 3648 3559 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 3649 3560 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 3650 - HWH_TYPE_LOGOUT); 3561 + HWH_TYPE_LOGOUT); 3651 3562 hwi_write_buffer(pwrb, task); 3652 3563 break; 3653 3564 ··· 3673 3584 3674 3585 static int beiscsi_task_xmit(struct iscsi_task *task) 3675 3586 { 3676 - struct iscsi_conn *conn = task->conn; 3677 3587 struct beiscsi_io_task *io_task = task->dd_data; 3678 3588 struct scsi_cmnd *sc = task->sc; 3679 - struct beiscsi_conn *beiscsi_conn = conn->dd_data; 3680 3589 struct scatterlist *sg; 3681 3590 int num_sg; 3682 3591 unsigned int writedir = 0, xferlen = 0; 3683 3592 3684 - SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t" 3685 - "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid, 3686 - task, conn, beiscsi_conn); 3687 3593 if (!sc) 3688 3594 return beiscsi_mtask(task); 3689 3595 ··· 3783 3699 " Failed in beiscsi_hba_alloc \n"); 3784 3700 goto disable_pci; 3785 3701 } 3786 - SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba); 3787 3702 3788 3703 switch (pcidev->device) { 3789 3704 case BE_DEVICE_ID1:
+8 -3
drivers/scsi/be2iscsi/be_main.h
··· 257 257 unsigned int num_sge; 258 258 }; 259 259 260 + struct invalidate_command_table { 261 + unsigned short icd; 262 + unsigned short cid; 263 + } __packed; 264 + 260 265 struct beiscsi_hba { 261 266 struct hba_parameters params; 262 267 struct hwi_controller *phwi_ctrlr; ··· 334 329 struct work_struct work_cqs; /* The work being queued */ 335 330 struct be_ctrl_info ctrl; 336 331 unsigned int generation; 332 + struct invalidate_command_table inv_tbl[128]; 333 + 337 334 }; 338 335 339 336 struct beiscsi_session { ··· 498 491 struct list_head data_busy_list; 499 492 }; 500 493 501 - #define BE_MIN_ASYNC_ENTRIES 128 502 - 503 494 struct hwi_async_pdu_context { 504 495 struct { 505 496 struct be_bus_address pa_base; ··· 538 533 * This is a varying size list! Do not add anything 539 534 * after this entry!! 540 535 */ 541 - struct hwi_async_entry async_entry[BE_MIN_ASYNC_ENTRIES]; 536 + struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2]; 542 537 }; 543 538 544 539 #define PDUCQE_CODE_MASK 0x0000003F
+9 -5
drivers/scsi/be2iscsi/be_mgmt.c
··· 145 145 } 146 146 147 147 unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, 148 - unsigned int icd, unsigned int cid) 148 + struct invalidate_command_table *inv_tbl, 149 + unsigned int num_invalidate, unsigned int cid) 149 150 { 150 151 struct be_dma_mem nonemb_cmd; 151 152 struct be_ctrl_info *ctrl = &phba->ctrl; 152 153 struct be_mcc_wrb *wrb; 153 154 struct be_sge *sge; 154 155 struct invalidate_commands_params_in *req; 155 - unsigned int tag = 0; 156 + unsigned int i, tag = 0; 156 157 157 158 spin_lock(&ctrl->mbox_lock); 158 159 tag = alloc_mcc_tag(phba); ··· 184 183 sizeof(*req)); 185 184 req->ref_handle = 0; 186 185 req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE; 187 - req->icd_count = 0; 188 - req->table[req->icd_count].icd = icd; 189 - req->table[req->icd_count].cid = cid; 186 + for (i = 0; i < num_invalidate; i++) { 187 + req->table[i].icd = inv_tbl->icd; 188 + req->table[i].cid = inv_tbl->cid; 189 + req->icd_count++; 190 + inv_tbl++; 191 + } 190 192 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); 191 193 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); 192 194 sge->len = cpu_to_le32(nonemb_cmd.size);
+2 -6
drivers/scsi/be2iscsi/be_mgmt.h
··· 94 94 unsigned short cid, 95 95 unsigned int upload_flag); 96 96 unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, 97 - unsigned int icd, unsigned int cid); 97 + struct invalidate_command_table *inv_tbl, 98 + unsigned int num_invalidate, unsigned int cid); 98 99 99 100 struct iscsi_invalidate_connection_params_in { 100 101 struct be_cmd_req_hdr hdr; ··· 115 114 union iscsi_invalidate_connection_params { 116 115 struct iscsi_invalidate_connection_params_in request; 117 116 struct iscsi_invalidate_connection_params_out response; 118 - } __packed; 119 - 120 - struct invalidate_command_table { 121 - unsigned short icd; 122 - unsigned short cid; 123 117 } __packed; 124 118 125 119 struct invalidate_commands_params_in {
+4 -4
drivers/scsi/bfa/Makefile
··· 2 2 3 3 bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o 4 4 5 - bfa-y += bfa_core.o bfa_ioc.o bfa_iocfc.o bfa_fcxp.o bfa_lps.o 6 - bfa-y += bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o 5 + bfa-y += bfa_core.o bfa_ioc.o bfa_ioc_ct.o bfa_ioc_cb.o bfa_iocfc.o bfa_fcxp.o 6 + bfa-y += bfa_lps.o bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o 7 7 bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o 8 8 bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o 9 9 bfa-y += bfa_csdebug.o bfa_sm.o plog.o 10 10 11 - bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o 11 + bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o 12 12 bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o 13 13 bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o 14 14 15 - ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna 15 + ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna -DBFA_PERF_BUILD
+19
drivers/scsi/bfa/bfa_core.c
··· 385 385 } 386 386 387 387 /** 388 + * Clear the saved firmware trace information of an IOC. 389 + */ 390 + void 391 + bfa_debug_fwsave_clear(struct bfa_s *bfa) 392 + { 393 + bfa_ioc_debug_fwsave_clear(&bfa->ioc); 394 + } 395 + 396 + /** 388 397 * Fetch firmware trace data. 389 398 * 390 399 * @param[in] bfa BFA instance ··· 407 398 bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen) 408 399 { 409 400 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen); 401 + } 402 + 403 + /** 404 + * Reset hw semaphore & usage cnt regs and initialize. 405 + */ 406 + void 407 + bfa_chip_reset(struct bfa_s *bfa) 408 + { 409 + bfa_ioc_ownership_reset(&bfa->ioc); 410 + bfa_ioc_pll_init(&bfa->ioc); 410 411 } 411 412 #endif
+976 -755
drivers/scsi/bfa/bfa_fcport.c
··· 23 23 #include <cs/bfa_plog.h> 24 24 #include <aen/bfa_aen_port.h> 25 25 26 - BFA_TRC_FILE(HAL, PPORT); 27 - BFA_MODULE(pport); 28 - 29 - #define bfa_pport_callback(__pport, __event) do { \ 30 - if ((__pport)->bfa->fcs) { \ 31 - (__pport)->event_cbfn((__pport)->event_cbarg, (__event)); \ 32 - } else { \ 33 - (__pport)->hcb_event = (__event); \ 34 - bfa_cb_queue((__pport)->bfa, &(__pport)->hcb_qe, \ 35 - __bfa_cb_port_event, (__pport)); \ 36 - } \ 37 - } while (0) 26 + BFA_TRC_FILE(HAL, FCPORT); 27 + BFA_MODULE(fcport); 38 28 39 29 /* 40 30 * The port is considered disabled if corresponding physical port or IOC are 41 31 * disabled explicitly 42 32 */ 43 33 #define BFA_PORT_IS_DISABLED(bfa) \ 44 - ((bfa_pport_is_disabled(bfa) == BFA_TRUE) || \ 34 + ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \ 45 35 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) 46 36 47 37 /* 48 38 * forward declarations 49 39 */ 50 - static bfa_boolean_t bfa_pport_send_enable(struct bfa_pport_s *port); 51 - static bfa_boolean_t bfa_pport_send_disable(struct bfa_pport_s *port); 52 - static void bfa_pport_update_linkinfo(struct bfa_pport_s *pport); 53 - static void bfa_pport_reset_linkinfo(struct bfa_pport_s *pport); 54 - static void bfa_pport_set_wwns(struct bfa_pport_s *port); 55 - static void __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete); 56 - static void __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete); 57 - static void __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete); 58 - static void bfa_port_stats_timeout(void *cbarg); 59 - static void bfa_port_stats_clr_timeout(void *cbarg); 40 + static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport); 41 + static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport); 42 + static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport); 43 + static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport); 44 + static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport); 45 + static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete); 46 + static void bfa_fcport_callback(struct bfa_fcport_s *fcport, 47 + enum bfa_pport_linkstate event); 48 + static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, 49 + enum bfa_pport_linkstate event); 50 + static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete); 51 + static void bfa_fcport_stats_get_timeout(void *cbarg); 52 + static void bfa_fcport_stats_clr_timeout(void *cbarg); 60 53 61 54 /** 62 55 * bfa_pport_private ··· 58 65 /** 59 66 * BFA port state machine events 60 67 */ 61 - enum bfa_pport_sm_event { 62 - BFA_PPORT_SM_START = 1, /* start port state machine */ 63 - BFA_PPORT_SM_STOP = 2, /* stop port state machine */ 64 - BFA_PPORT_SM_ENABLE = 3, /* enable port */ 65 - BFA_PPORT_SM_DISABLE = 4, /* disable port state machine */ 66 - BFA_PPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */ 67 - BFA_PPORT_SM_LINKUP = 6, /* firmware linkup event */ 68 - BFA_PPORT_SM_LINKDOWN = 7, /* firmware linkup down */ 69 - BFA_PPORT_SM_QRESUME = 8, /* CQ space available */ 70 - BFA_PPORT_SM_HWFAIL = 9, /* IOC h/w failure */ 68 + enum bfa_fcport_sm_event { 69 + BFA_FCPORT_SM_START = 1, /* start port state machine */ 70 + BFA_FCPORT_SM_STOP = 2, /* stop port state machine */ 71 + BFA_FCPORT_SM_ENABLE = 3, /* enable port */ 72 + BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */ 73 + BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */ 74 + BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */ 75 + BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */ 76 + BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */ 77 + BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ 71 78 }; 72 79 73 - static void bfa_pport_sm_uninit(struct bfa_pport_s *pport, 74 - enum bfa_pport_sm_event event); 75 - static void bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport, 76 - enum bfa_pport_sm_event event); 77 - static void bfa_pport_sm_enabling(struct bfa_pport_s *pport, 78 - enum bfa_pport_sm_event event); 79 - static void bfa_pport_sm_linkdown(struct bfa_pport_s *pport, 80 - enum bfa_pport_sm_event event); 81 - static void bfa_pport_sm_linkup(struct bfa_pport_s *pport, 82 - enum bfa_pport_sm_event event); 83 - static void bfa_pport_sm_disabling(struct bfa_pport_s *pport, 84 - enum bfa_pport_sm_event event); 85 - static void bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport, 86 - enum bfa_pport_sm_event event); 87 - static void bfa_pport_sm_disabled(struct bfa_pport_s *pport, 88 - enum bfa_pport_sm_event event); 89 - static void bfa_pport_sm_stopped(struct bfa_pport_s *pport, 90 - enum bfa_pport_sm_event event); 91 - static void bfa_pport_sm_iocdown(struct bfa_pport_s *pport, 92 - enum bfa_pport_sm_event event); 93 - static void bfa_pport_sm_iocfail(struct bfa_pport_s *pport, 94 - enum bfa_pport_sm_event event); 80 + /** 81 + * BFA port link notification state machine events 82 + */ 83 + 84 + enum bfa_fcport_ln_sm_event { 85 + BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */ 86 + BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */ 87 + BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */ 88 + }; 89 + 90 + static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, 91 + enum bfa_fcport_sm_event event); 92 + static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, 93 + enum bfa_fcport_sm_event event); 94 + static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, 95 + enum bfa_fcport_sm_event event); 96 + static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, 97 + enum bfa_fcport_sm_event event); 98 + static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, 99 + enum bfa_fcport_sm_event event); 100 + static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, 101 + enum bfa_fcport_sm_event event); 102 + static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, 103 + enum bfa_fcport_sm_event event); 104 + static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, 105 + enum bfa_fcport_sm_event event); 106 + static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, 107 + enum bfa_fcport_sm_event event); 108 + static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, 109 + enum bfa_fcport_sm_event event); 110 + static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, 111 + enum bfa_fcport_sm_event event); 112 + 113 + static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, 114 + enum bfa_fcport_ln_sm_event event); 115 + static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, 116 + enum bfa_fcport_ln_sm_event event); 117 + static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, 118 + enum bfa_fcport_ln_sm_event event); 119 + static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, 120 + enum bfa_fcport_ln_sm_event event); 121 + static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, 122 + enum bfa_fcport_ln_sm_event event); 123 + static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, 124 + enum bfa_fcport_ln_sm_event event); 125 + static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, 126 + enum bfa_fcport_ln_sm_event event); 95 127 96 128 static struct bfa_sm_table_s hal_pport_sm_table[] = { 97 - {BFA_SM(bfa_pport_sm_uninit), BFA_PPORT_ST_UNINIT}, 98 - {BFA_SM(bfa_pport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT}, 99 - {BFA_SM(bfa_pport_sm_enabling), BFA_PPORT_ST_ENABLING}, 100 - {BFA_SM(bfa_pport_sm_linkdown), BFA_PPORT_ST_LINKDOWN}, 101 - {BFA_SM(bfa_pport_sm_linkup), BFA_PPORT_ST_LINKUP}, 102 - {BFA_SM(bfa_pport_sm_disabling_qwait), 103 - BFA_PPORT_ST_DISABLING_QWAIT}, 104 - {BFA_SM(bfa_pport_sm_disabling), BFA_PPORT_ST_DISABLING}, 105 - {BFA_SM(bfa_pport_sm_disabled), BFA_PPORT_ST_DISABLED}, 106 - {BFA_SM(bfa_pport_sm_stopped), BFA_PPORT_ST_STOPPED}, 107 - {BFA_SM(bfa_pport_sm_iocdown), BFA_PPORT_ST_IOCDOWN}, 108 - {BFA_SM(bfa_pport_sm_iocfail), BFA_PPORT_ST_IOCDOWN}, 129 + {BFA_SM(bfa_fcport_sm_uninit), BFA_PPORT_ST_UNINIT}, 130 + {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT}, 131 + {BFA_SM(bfa_fcport_sm_enabling), BFA_PPORT_ST_ENABLING}, 132 + {BFA_SM(bfa_fcport_sm_linkdown), BFA_PPORT_ST_LINKDOWN}, 133 + {BFA_SM(bfa_fcport_sm_linkup), BFA_PPORT_ST_LINKUP}, 134 + {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PPORT_ST_DISABLING_QWAIT}, 135 + {BFA_SM(bfa_fcport_sm_disabling), BFA_PPORT_ST_DISABLING}, 136 + {BFA_SM(bfa_fcport_sm_disabled), BFA_PPORT_ST_DISABLED}, 137 + {BFA_SM(bfa_fcport_sm_stopped), BFA_PPORT_ST_STOPPED}, 138 + {BFA_SM(bfa_fcport_sm_iocdown), BFA_PPORT_ST_IOCDOWN}, 139 + {BFA_SM(bfa_fcport_sm_iocfail), BFA_PPORT_ST_IOCDOWN}, 109 140 }; 110 141 111 142 static void 112 - bfa_pport_aen_post(struct bfa_pport_s *pport, enum bfa_port_aen_event event) 143 + bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event) 113 144 { 114 145 union bfa_aen_data_u aen_data; 115 - struct bfa_log_mod_s *logmod = pport->bfa->logm; 116 - wwn_t pwwn = pport->pwwn; 146 + struct bfa_log_mod_s *logmod = fcport->bfa->logm; 147 + wwn_t pwwn = fcport->pwwn; 117 148 char pwwn_ptr[BFA_STRING_32]; 118 - struct bfa_ioc_attr_s ioc_attr; 119 149 150 + memset(&aen_data, 0, sizeof(aen_data)); 120 151 wwn2str(pwwn_ptr, pwwn); 121 - switch (event) { 122 - case BFA_PORT_AEN_ONLINE: 123 - bfa_log(logmod, BFA_AEN_PORT_ONLINE, pwwn_ptr); 124 - break; 125 - case BFA_PORT_AEN_OFFLINE: 126 - bfa_log(logmod, BFA_AEN_PORT_OFFLINE, pwwn_ptr); 127 - break; 128 - case BFA_PORT_AEN_ENABLE: 129 - bfa_log(logmod, BFA_AEN_PORT_ENABLE, pwwn_ptr); 130 - break; 131 - case BFA_PORT_AEN_DISABLE: 132 - bfa_log(logmod, BFA_AEN_PORT_DISABLE, pwwn_ptr); 133 - break; 134 - case BFA_PORT_AEN_DISCONNECT: 135 - bfa_log(logmod, BFA_AEN_PORT_DISCONNECT, pwwn_ptr); 136 - break; 137 - case BFA_PORT_AEN_QOS_NEG: 138 - bfa_log(logmod, BFA_AEN_PORT_QOS_NEG, pwwn_ptr); 139 - break; 140 - default: 141 - break; 142 - } 152 + bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event), pwwn_ptr); 143 153 144 - bfa_ioc_get_attr(&pport->bfa->ioc, &ioc_attr); 145 - aen_data.port.ioc_type = ioc_attr.ioc_type; 154 + aen_data.port.ioc_type = bfa_get_type(fcport->bfa); 146 155 aen_data.port.pwwn = pwwn; 147 156 } 148 157 149 158 static void 150 - bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 159 + bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, 160 + enum bfa_fcport_sm_event event) 151 161 { 152 - bfa_trc(pport->bfa, event); 162 + bfa_trc(fcport->bfa, event); 153 163 154 164 switch (event) { 155 - case BFA_PPORT_SM_START: 165 + case BFA_FCPORT_SM_START: 156 166 /** 157 167 * Start event after IOC is configured and BFA is started. 158 168 */ 159 - if (bfa_pport_send_enable(pport)) 160 - bfa_sm_set_state(pport, bfa_pport_sm_enabling); 169 + if (bfa_fcport_send_enable(fcport)) 170 + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); 161 171 else 162 - bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 172 + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); 163 173 break; 164 174 165 - case BFA_PPORT_SM_ENABLE: 175 + case BFA_FCPORT_SM_ENABLE: 166 176 /** 167 177 * Port is persistently configured to be in enabled state. Do 168 178 * not change state. Port enabling is done when START event is ··· 173 177 */ 174 178 break; 175 179 176 - case BFA_PPORT_SM_DISABLE: 180 + case BFA_FCPORT_SM_DISABLE: 177 181 /** 178 182 * If a port is persistently configured to be disabled, the 179 183 * first event will a port disable request. 180 184 */ 181 - bfa_sm_set_state(pport, bfa_pport_sm_disabled); 185 + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); 182 186 break; 183 187 184 - case BFA_PPORT_SM_HWFAIL: 185 - bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 188 + case BFA_FCPORT_SM_HWFAIL: 189 + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 186 190 break; 187 191 188 192 default: 189 - bfa_sm_fault(pport->bfa, event); 193 + bfa_sm_fault(fcport->bfa, event); 190 194 } 191 195 } 192 196 193 197 static void 194 - bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport, 195 - enum bfa_pport_sm_event event) 198 + bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, 199 + enum bfa_fcport_sm_event event) 196 200 { 197 - bfa_trc(pport->bfa, event); 201 + bfa_trc(fcport->bfa, event); 198 202 199 203 switch (event) { 200 - case BFA_PPORT_SM_QRESUME: 201 - bfa_sm_set_state(pport, bfa_pport_sm_enabling); 202 - bfa_pport_send_enable(pport); 204 + case BFA_FCPORT_SM_QRESUME: 205 + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); 206 + bfa_fcport_send_enable(fcport); 203 207 break; 204 208 205 - case BFA_PPORT_SM_STOP: 206 - bfa_reqq_wcancel(&pport->reqq_wait); 207 - bfa_sm_set_state(pport, bfa_pport_sm_stopped); 209 + case BFA_FCPORT_SM_STOP: 210 + bfa_reqq_wcancel(&fcport->reqq_wait); 211 + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 208 212 break; 209 213 210 - case BFA_PPORT_SM_ENABLE: 214 + case BFA_FCPORT_SM_ENABLE: 211 215 /** 212 216 * Already enable is in progress. 213 217 */ 214 218 break; 215 219 216 - case BFA_PPORT_SM_DISABLE: 220 + case BFA_FCPORT_SM_DISABLE: 217 221 /** 218 222 * Just send disable request to firmware when room becomes 219 223 * available in request queue. 220 224 */ 221 - bfa_sm_set_state(pport, bfa_pport_sm_disabled); 222 - bfa_reqq_wcancel(&pport->reqq_wait); 223 - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 225 + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); 226 + bfa_reqq_wcancel(&fcport->reqq_wait); 227 + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 224 228 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 225 - bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); 229 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); 226 230 break; 227 231 228 - case BFA_PPORT_SM_LINKUP: 229 - case BFA_PPORT_SM_LINKDOWN: 232 + case BFA_FCPORT_SM_LINKUP: 233 + case BFA_FCPORT_SM_LINKDOWN: 230 234 /** 231 235 * Possible to get link events when doing back-to-back 232 236 * enable/disables. 233 237 */ 234 238 break; 235 239 236 - case BFA_PPORT_SM_HWFAIL: 237 - bfa_reqq_wcancel(&pport->reqq_wait); 238 - bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 240 + case BFA_FCPORT_SM_HWFAIL: 241 + bfa_reqq_wcancel(&fcport->reqq_wait); 242 + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 239 243 break; 240 244 241 245 default: 242 - bfa_sm_fault(pport->bfa, event); 246 + bfa_sm_fault(fcport->bfa, event); 243 247 } 244 248 } 245 249 246 250 static void 247 - bfa_pport_sm_enabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 251 + bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, 252 + enum bfa_fcport_sm_event event) 248 253 { 249 - bfa_trc(pport->bfa, event); 254 + bfa_trc(fcport->bfa, event); 250 255 251 256 switch (event) { 252 - case BFA_PPORT_SM_FWRSP: 253 - case BFA_PPORT_SM_LINKDOWN: 254 - bfa_sm_set_state(pport, bfa_pport_sm_linkdown); 257 + case BFA_FCPORT_SM_FWRSP: 258 + case BFA_FCPORT_SM_LINKDOWN: 259 + bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); 255 260 break; 256 261 257 - case BFA_PPORT_SM_LINKUP: 258 - bfa_pport_update_linkinfo(pport); 259 - bfa_sm_set_state(pport, bfa_pport_sm_linkup); 262 + case BFA_FCPORT_SM_LINKUP: 263 + bfa_fcport_update_linkinfo(fcport); 264 + bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); 260 265 261 - bfa_assert(pport->event_cbfn); 262 - bfa_pport_callback(pport, BFA_PPORT_LINKUP); 266 + bfa_assert(fcport->event_cbfn); 267 + bfa_fcport_callback(fcport, BFA_PPORT_LINKUP); 263 268 break; 264 269 265 - case BFA_PPORT_SM_ENABLE: 270 + case BFA_FCPORT_SM_ENABLE: 266 271 /** 267 272 * Already being enabled. 268 273 */ 269 274 break; 270 275 271 - case BFA_PPORT_SM_DISABLE: 272 - if (bfa_pport_send_disable(pport)) 273 - bfa_sm_set_state(pport, bfa_pport_sm_disabling); 276 + case BFA_FCPORT_SM_DISABLE: 277 + if (bfa_fcport_send_disable(fcport)) 278 + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); 274 279 else 275 - bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait); 280 + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); 276 281 277 - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 282 + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 278 283 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 279 - bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); 284 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); 280 285 break; 281 286 282 - case BFA_PPORT_SM_STOP: 283 - bfa_sm_set_state(pport, bfa_pport_sm_stopped); 287 + case BFA_FCPORT_SM_STOP: 288 + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 284 289 break; 285 290 286 - case BFA_PPORT_SM_HWFAIL: 287 - bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 291 + case BFA_FCPORT_SM_HWFAIL: 292 + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 288 293 break; 289 294 290 295 default: 291 - bfa_sm_fault(pport->bfa, event); 296 + bfa_sm_fault(fcport->bfa, event); 292 297 } 293 298 } 294 299 295 300 static void 296 - bfa_pport_sm_linkdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 301 + bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, 302 + enum bfa_fcport_sm_event event) 297 303 { 298 - bfa_trc(pport->bfa, event); 304 + struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; 305 + bfa_trc(fcport->bfa, event); 299 306 300 307 switch (event) { 301 - case BFA_PPORT_SM_LINKUP: 302 - bfa_pport_update_linkinfo(pport); 303 - bfa_sm_set_state(pport, bfa_pport_sm_linkup); 304 - bfa_assert(pport->event_cbfn); 305 - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 308 + case BFA_FCPORT_SM_LINKUP: 309 + bfa_fcport_update_linkinfo(fcport); 310 + bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); 311 + bfa_assert(fcport->event_cbfn); 312 + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 306 313 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); 307 - bfa_pport_callback(pport, BFA_PPORT_LINKUP); 308 - bfa_pport_aen_post(pport, BFA_PORT_AEN_ONLINE); 314 + 315 + if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { 316 + 317 + bfa_trc(fcport->bfa, pevent->link_state.fcf.fipenabled); 318 + bfa_trc(fcport->bfa, pevent->link_state.fcf.fipfailed); 319 + 320 + if (pevent->link_state.fcf.fipfailed) 321 + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 322 + BFA_PL_EID_FIP_FCF_DISC, 0, 323 + "FIP FCF Discovery Failed"); 324 + else 325 + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 326 + BFA_PL_EID_FIP_FCF_DISC, 0, 327 + "FIP FCF Discovered"); 328 + } 329 + 330 + bfa_fcport_callback(fcport, BFA_PPORT_LINKUP); 331 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE); 309 332 /** 310 333 * If QoS is enabled and it is not online, 311 334 * Send a separate event. 312 335 */ 313 - if ((pport->cfg.qos_enabled) 314 - && (bfa_os_ntohl(pport->qos_attr.state) != BFA_QOS_ONLINE)) 315 - bfa_pport_aen_post(pport, BFA_PORT_AEN_QOS_NEG); 336 + if ((fcport->cfg.qos_enabled) 337 + && (bfa_os_ntohl(fcport->qos_attr.state) != BFA_QOS_ONLINE)) 338 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG); 316 339 317 340 break; 318 341 319 - case BFA_PPORT_SM_LINKDOWN: 342 + case BFA_FCPORT_SM_LINKDOWN: 320 343 /** 321 344 * Possible to get link down event. 322 345 */ 323 346 break; 324 347 325 - case BFA_PPORT_SM_ENABLE: 348 + case BFA_FCPORT_SM_ENABLE: 326 349 /** 327 350 * Already enabled. 328 351 */ 329 352 break; 330 353 331 - case BFA_PPORT_SM_DISABLE: 332 - if (bfa_pport_send_disable(pport)) 333 - bfa_sm_set_state(pport, bfa_pport_sm_disabling); 354 + case BFA_FCPORT_SM_DISABLE: 355 + if (bfa_fcport_send_disable(fcport)) 356 + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); 334 357 else 335 - bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait); 358 + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); 336 359 337 - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 360 + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 338 361 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 339 - bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); 362 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); 340 363 break; 341 364 342 - case BFA_PPORT_SM_STOP: 343 - bfa_sm_set_state(pport, bfa_pport_sm_stopped); 365 + case BFA_FCPORT_SM_STOP: 366 + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 344 367 break; 345 368 346 - case BFA_PPORT_SM_HWFAIL: 347 - bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 369 + case BFA_FCPORT_SM_HWFAIL: 370 + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 348 371 break; 349 372 350 373 default: 351 - bfa_sm_fault(pport->bfa, event); 374 + bfa_sm_fault(fcport->bfa, event); 352 375 } 353 376 } 354 377 355 378 static void 356 - bfa_pport_sm_linkup(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 379 + bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, 380 + enum bfa_fcport_sm_event event) 357 381 { 358 - bfa_trc(pport->bfa, event); 382 + bfa_trc(fcport->bfa, event); 359 383 360 384 switch (event) { 361 - case BFA_PPORT_SM_ENABLE: 385 + case BFA_FCPORT_SM_ENABLE: 362 386 /** 363 387 * Already enabled. 364 388 */ 365 389 break; 366 390 367 - case BFA_PPORT_SM_DISABLE: 368 - if (bfa_pport_send_disable(pport)) 369 - bfa_sm_set_state(pport, bfa_pport_sm_disabling); 391 + case BFA_FCPORT_SM_DISABLE: 392 + if (bfa_fcport_send_disable(fcport)) 393 + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); 370 394 else 371 - bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait); 395 + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); 372 396 373 - bfa_pport_reset_linkinfo(pport); 374 - bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); 375 - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 397 + bfa_fcport_reset_linkinfo(fcport); 398 + bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN); 399 + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 376 400 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 377 - bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); 378 - bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); 401 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); 402 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); 379 403 break; 380 404 381 - case BFA_PPORT_SM_LINKDOWN: 382 - bfa_sm_set_state(pport, bfa_pport_sm_linkdown); 383 - bfa_pport_reset_linkinfo(pport); 384 - bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); 385 - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 405 + case BFA_FCPORT_SM_LINKDOWN: 406 + bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); 407 + bfa_fcport_reset_linkinfo(fcport); 408 + bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN); 409 + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 386 410 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); 387 - if (BFA_PORT_IS_DISABLED(pport->bfa)) 388 - bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); 411 + if (BFA_PORT_IS_DISABLED(fcport->bfa)) 412 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); 389 413 else 390 - bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); 414 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); 391 415 break; 392 416 393 - case BFA_PPORT_SM_STOP: 394 - bfa_sm_set_state(pport, bfa_pport_sm_stopped); 395 - bfa_pport_reset_linkinfo(pport); 396 - if (BFA_PORT_IS_DISABLED(pport->bfa)) 397 - bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); 417 + case BFA_FCPORT_SM_STOP: 418 + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 419 + bfa_fcport_reset_linkinfo(fcport); 420 + if (BFA_PORT_IS_DISABLED(fcport->bfa)) 421 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); 398 422 else 399 - bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); 423 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); 400 424 break; 401 425 402 - case BFA_PPORT_SM_HWFAIL: 403 - bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 404 - bfa_pport_reset_linkinfo(pport); 405 - bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); 406 - if (BFA_PORT_IS_DISABLED(pport->bfa)) 407 - bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); 426 + case BFA_FCPORT_SM_HWFAIL: 427 + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 428 + bfa_fcport_reset_linkinfo(fcport); 429 + bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN); 430 + if (BFA_PORT_IS_DISABLED(fcport->bfa)) 431 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); 408 432 else 409 - bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); 433 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); 410 434 break; 411 435 412 436 default: 413 - bfa_sm_fault(pport->bfa, event); 437 + bfa_sm_fault(fcport->bfa, event); 414 438 } 415 439 } 416 440 417 441 static void 418 - bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport, 419 - enum bfa_pport_sm_event event) 442 + bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, 443 + enum bfa_fcport_sm_event event) 420 444 { 421 - bfa_trc(pport->bfa, event); 445 + bfa_trc(fcport->bfa, event); 422 446 423 447 switch (event) { 424 - case BFA_PPORT_SM_QRESUME: 425 - bfa_sm_set_state(pport, bfa_pport_sm_disabling); 426 - bfa_pport_send_disable(pport); 448 + case BFA_FCPORT_SM_QRESUME: 449 + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); 450 + bfa_fcport_send_disable(fcport); 427 451 break; 428 452 429 - case BFA_PPORT_SM_STOP: 430 - bfa_sm_set_state(pport, bfa_pport_sm_stopped); 431 - bfa_reqq_wcancel(&pport->reqq_wait); 453 + case BFA_FCPORT_SM_STOP: 454 + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 455 + bfa_reqq_wcancel(&fcport->reqq_wait); 432 456 break; 433 457 434 - case BFA_PPORT_SM_DISABLE: 458 + case BFA_FCPORT_SM_DISABLE: 435 459 /** 436 460 * Already being disabled. 437 461 */ 438 462 break; 439 463 440 - case BFA_PPORT_SM_LINKUP: 441 - case BFA_PPORT_SM_LINKDOWN: 464 + case BFA_FCPORT_SM_LINKUP: 465 + case BFA_FCPORT_SM_LINKDOWN: 442 466 /** 443 467 * Possible to get link events when doing back-to-back 444 468 * enable/disables. 445 469 */ 446 470 break; 447 471 448 - case BFA_PPORT_SM_HWFAIL: 449 - bfa_sm_set_state(pport, bfa_pport_sm_iocfail); 450 - bfa_reqq_wcancel(&pport->reqq_wait); 472 + case BFA_FCPORT_SM_HWFAIL: 473 + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); 474 + bfa_reqq_wcancel(&fcport->reqq_wait); 451 475 break; 452 476 453 477 default: 454 - bfa_sm_fault(pport->bfa, event); 478 + bfa_sm_fault(fcport->bfa, event); 455 479 } 456 480 } 457 481 458 482 static void 459 - bfa_pport_sm_disabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 483 + bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, 484 + enum bfa_fcport_sm_event event) 460 485 { 461 - bfa_trc(pport->bfa, event); 486 + bfa_trc(fcport->bfa, event); 462 487 463 488 switch (event) { 464 - case BFA_PPORT_SM_FWRSP: 465 - bfa_sm_set_state(pport, bfa_pport_sm_disabled); 489 + case BFA_FCPORT_SM_FWRSP: 490 + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); 466 491 break; 467 492 468 - case BFA_PPORT_SM_DISABLE: 493 + case BFA_FCPORT_SM_DISABLE: 469 494 /** 470 495 * Already being disabled. 471 496 */ 472 497 break; 473 498 474 - case BFA_PPORT_SM_ENABLE: 475 - if (bfa_pport_send_enable(pport)) 476 - bfa_sm_set_state(pport, bfa_pport_sm_enabling); 499 + case BFA_FCPORT_SM_ENABLE: 500 + if (bfa_fcport_send_enable(fcport)) 501 + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); 477 502 else 478 - bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 503 + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); 479 504 480 - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 505 + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 481 506 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); 482 - bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE); 507 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); 483 508 break; 484 509 485 - case BFA_PPORT_SM_STOP: 486 - bfa_sm_set_state(pport, bfa_pport_sm_stopped); 510 + case BFA_FCPORT_SM_STOP: 511 + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 487 512 break; 488 513 489 - case BFA_PPORT_SM_LINKUP: 490 - case BFA_PPORT_SM_LINKDOWN: 514 + case BFA_FCPORT_SM_LINKUP: 515 + case BFA_FCPORT_SM_LINKDOWN: 491 516 /** 492 517 * Possible to get link events when doing back-to-back 493 518 * enable/disables. 494 519 */ 495 520 break; 496 521 497 - case BFA_PPORT_SM_HWFAIL: 498 - bfa_sm_set_state(pport, bfa_pport_sm_iocfail); 522 + case BFA_FCPORT_SM_HWFAIL: 523 + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); 499 524 break; 500 525 501 526 default: 502 - bfa_sm_fault(pport->bfa, event); 527 + bfa_sm_fault(fcport->bfa, event); 503 528 } 504 529 } 505 530 506 531 static void 507 - bfa_pport_sm_disabled(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 532 + bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, 533 + enum bfa_fcport_sm_event event) 508 534 { 509 - bfa_trc(pport->bfa, event); 535 + bfa_trc(fcport->bfa, event); 510 536 511 537 switch (event) { 512 - case BFA_PPORT_SM_START: 538 + case BFA_FCPORT_SM_START: 513 539 /** 514 540 * Ignore start event for a port that is disabled. 515 541 */ 516 542 break; 517 543 518 - case BFA_PPORT_SM_STOP: 519 - bfa_sm_set_state(pport, bfa_pport_sm_stopped); 544 + case BFA_FCPORT_SM_STOP: 545 + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 520 546 break; 521 547 522 - case BFA_PPORT_SM_ENABLE: 523 - if (bfa_pport_send_enable(pport)) 524 - bfa_sm_set_state(pport, bfa_pport_sm_enabling); 548 + case BFA_FCPORT_SM_ENABLE: 549 + if (bfa_fcport_send_enable(fcport)) 550 + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); 525 551 else 526 - bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 552 + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); 527 553 528 - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 554 + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 529 555 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); 530 - bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE); 556 + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); 531 557 break; 532 558 533 - case BFA_PPORT_SM_DISABLE: 559 + case BFA_FCPORT_SM_DISABLE: 534 560 /** 535 561 * Already disabled. 536 562 */ 537 563 break; 538 564 539 - case BFA_PPORT_SM_HWFAIL: 540 - bfa_sm_set_state(pport, bfa_pport_sm_iocfail); 565 + case BFA_FCPORT_SM_HWFAIL: 566 + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); 541 567 break; 542 568 543 569 default: 544 - bfa_sm_fault(pport->bfa, event); 570 + bfa_sm_fault(fcport->bfa, event); 545 571 } 546 572 } 547 573 548 574 static void 549 - bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 575 + bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, 576 + enum bfa_fcport_sm_event event) 550 577 { 551 - bfa_trc(pport->bfa, event); 578 + bfa_trc(fcport->bfa, event); 552 579 553 580 switch (event) { 554 - case BFA_PPORT_SM_START: 555 - if (bfa_pport_send_enable(pport)) 556 - bfa_sm_set_state(pport, bfa_pport_sm_enabling); 581 + case BFA_FCPORT_SM_START: 582 + if (bfa_fcport_send_enable(fcport)) 583 + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); 557 584 else 558 - bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 585 + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); 559 586 break; 560 587 561 588 default: ··· 593 574 * Port is enabled. IOC is down/failed. 594 575 */ 595 576 static void 596 - bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 577 + bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, 578 + enum bfa_fcport_sm_event event) 597 579 { 598 - bfa_trc(pport->bfa, event); 580 + bfa_trc(fcport->bfa, event); 599 581 600 582 switch (event) { 601 - case BFA_PPORT_SM_START: 602 - if (bfa_pport_send_enable(pport)) 603 - bfa_sm_set_state(pport, bfa_pport_sm_enabling); 583 + case BFA_FCPORT_SM_START: 584 + if (bfa_fcport_send_enable(fcport)) 585 + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); 604 586 else 605 - bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 587 + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); 606 588 break; 607 589 608 590 default: ··· 618 598 * Port is disabled. IOC is down/failed. 619 599 */ 620 600 static void 621 - bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 601 + bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, 602 + enum bfa_fcport_sm_event event) 622 603 { 623 - bfa_trc(pport->bfa, event); 604 + bfa_trc(fcport->bfa, event); 624 605 625 606 switch (event) { 626 - case BFA_PPORT_SM_START: 627 - bfa_sm_set_state(pport, bfa_pport_sm_disabled); 607 + case BFA_FCPORT_SM_START: 608 + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); 628 609 break; 629 610 630 - case BFA_PPORT_SM_ENABLE: 631 - bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 611 + case BFA_FCPORT_SM_ENABLE: 612 + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 632 613 break; 633 614 634 615 default: ··· 640 619 } 641 620 } 642 621 622 + /** 623 + * Link state is down 624 + */ 625 + static void 626 + bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, 627 + enum bfa_fcport_ln_sm_event event) 628 + { 629 + bfa_trc(ln->fcport->bfa, event); 643 630 631 + switch (event) { 632 + case BFA_FCPORT_LN_SM_LINKUP: 633 + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); 634 + bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP); 635 + break; 636 + 637 + default: 638 + bfa_sm_fault(ln->fcport->bfa, event); 639 + } 640 + } 641 + 642 + /** 643 + * Link state is waiting for down notification 644 + */ 645 + static void 646 + bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, 647 + enum bfa_fcport_ln_sm_event event) 648 + { 649 + bfa_trc(ln->fcport->bfa, event); 650 + 651 + switch (event) { 652 + case BFA_FCPORT_LN_SM_LINKUP: 653 + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); 654 + break; 655 + 656 + case BFA_FCPORT_LN_SM_NOTIFICATION: 657 + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); 658 + break; 659 + 660 + default: 661 + bfa_sm_fault(ln->fcport->bfa, event); 662 + } 663 + } 664 + 665 + /** 666 + * Link state is waiting for down notification and there is a pending up 667 + */ 668 + static void 669 + bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, 670 + enum bfa_fcport_ln_sm_event event) 671 + { 672 + bfa_trc(ln->fcport->bfa, event); 673 + 674 + switch (event) { 675 + case BFA_FCPORT_LN_SM_LINKDOWN: 676 + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); 677 + break; 678 + 679 + case BFA_FCPORT_LN_SM_NOTIFICATION: 680 + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); 681 + bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP); 682 + break; 683 + 684 + default: 685 + bfa_sm_fault(ln->fcport->bfa, event); 686 + } 687 + } 688 + 689 + /** 690 + * Link state is up 691 + */ 692 + static void 693 + bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, 694 + enum bfa_fcport_ln_sm_event event) 695 + { 696 + bfa_trc(ln->fcport->bfa, event); 697 + 698 + switch (event) { 699 + case BFA_FCPORT_LN_SM_LINKDOWN: 700 + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); 701 + bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN); 702 + break; 703 + 704 + default: 705 + bfa_sm_fault(ln->fcport->bfa, event); 706 + } 707 + } 708 + 709 + /** 710 + * Link state is waiting for up notification 711 + */ 712 + static void 713 + bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, 714 + enum bfa_fcport_ln_sm_event event) 715 + { 716 + bfa_trc(ln->fcport->bfa, event); 717 + 718 + switch (event) { 719 + case BFA_FCPORT_LN_SM_LINKDOWN: 720 + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); 721 + break; 722 + 723 + case BFA_FCPORT_LN_SM_NOTIFICATION: 724 + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up); 725 + break; 726 + 727 + default: 728 + bfa_sm_fault(ln->fcport->bfa, event); 729 + } 730 + } 731 + 732 + /** 733 + * Link state is waiting for up notification and there is a pending down 734 + */ 735 + static void 736 + bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, 737 + enum bfa_fcport_ln_sm_event event) 738 + { 739 + bfa_trc(ln->fcport->bfa, event); 740 + 741 + switch (event) { 742 + case BFA_FCPORT_LN_SM_LINKUP: 743 + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf); 744 + break; 745 + 746 + case BFA_FCPORT_LN_SM_NOTIFICATION: 747 + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); 748 + bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN); 749 + break; 750 + 751 + default: 752 + bfa_sm_fault(ln->fcport->bfa, event); 753 + } 754 + } 755 + 756 + /** 757 + * Link state is waiting for up notification and there are pending down and up 758 + */ 759 + static void 760 + bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, 761 + enum bfa_fcport_ln_sm_event event) 762 + { 763 + bfa_trc(ln->fcport->bfa, event); 764 + 765 + switch (event) { 766 + case BFA_FCPORT_LN_SM_LINKDOWN: 767 + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); 768 + break; 769 + 770 + case BFA_FCPORT_LN_SM_NOTIFICATION: 771 + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); 772 + bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN); 773 + break; 774 + 775 + default: 776 + bfa_sm_fault(ln->fcport->bfa, event); 777 + } 778 + } 644 779 645 780 /** 646 781 * bfa_pport_private 647 782 */ 648 783 649 784 static void 650 - __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete) 785 + __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete) 651 786 { 652 - struct bfa_pport_s *pport = cbarg; 787 + struct bfa_fcport_ln_s *ln = cbarg; 653 788 654 789 if (complete) 655 - pport->event_cbfn(pport->event_cbarg, pport->hcb_event); 790 + ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event); 791 + else 792 + bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); 656 793 } 657 794 658 - #define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), \ 795 + static void 796 + bfa_fcport_callback(struct bfa_fcport_s *fcport, enum bfa_pport_linkstate event) 797 + { 798 + if (fcport->bfa->fcs) { 799 + fcport->event_cbfn(fcport->event_cbarg, event); 800 + return; 801 + } 802 + 803 + switch (event) { 804 + case BFA_PPORT_LINKUP: 805 + bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP); 806 + break; 807 + case BFA_PPORT_LINKDOWN: 808 + bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN); 809 + break; 810 + default: 811 + bfa_assert(0); 812 + } 813 + } 814 + 815 + static void 816 + bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event) 817 + { 818 + ln->ln_event = event; 819 + bfa_cb_queue(ln->fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln); 820 + } 821 + 822 + #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \ 659 823 BFA_CACHELINE_SZ)) 660 824 661 825 static void 662 - bfa_pport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, 826 + bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, 663 827 u32 *dm_len) 664 828 { 665 - *dm_len += PPORT_STATS_DMA_SZ; 829 + *dm_len += FCPORT_STATS_DMA_SZ; 666 830 } 667 831 668 832 static void 669 - bfa_pport_qresume(void *cbarg) 833 + bfa_fcport_qresume(void *cbarg) 670 834 { 671 - struct bfa_pport_s *port = cbarg; 835 + struct bfa_fcport_s *fcport = cbarg; 672 836 673 - bfa_sm_send_event(port, BFA_PPORT_SM_QRESUME); 837 + bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME); 674 838 } 675 839 676 840 static void 677 - bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo) 841 + bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo) 678 842 { 679 843 u8 *dm_kva; 680 844 u64 dm_pa; ··· 867 661 dm_kva = bfa_meminfo_dma_virt(meminfo); 868 662 dm_pa = bfa_meminfo_dma_phys(meminfo); 869 663 870 - pport->stats_kva = dm_kva; 871 - pport->stats_pa = dm_pa; 872 - pport->stats = (union bfa_pport_stats_u *)dm_kva; 664 + fcport->stats_kva = dm_kva; 665 + fcport->stats_pa = dm_pa; 666 + fcport->stats = (union bfa_fcport_stats_u *)dm_kva; 873 667 874 - dm_kva += PPORT_STATS_DMA_SZ; 875 - dm_pa += PPORT_STATS_DMA_SZ; 668 + dm_kva += FCPORT_STATS_DMA_SZ; 669 + dm_pa += FCPORT_STATS_DMA_SZ; 876 670 877 671 bfa_meminfo_dma_virt(meminfo) = dm_kva; 878 672 bfa_meminfo_dma_phys(meminfo) = dm_pa; ··· 882 676 * Memory initialization. 883 677 */ 884 678 static void 885 - bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 679 + bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 886 680 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 887 681 { 888 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 889 - struct bfa_pport_cfg_s *port_cfg = &pport->cfg; 682 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 683 + struct bfa_pport_cfg_s *port_cfg = &fcport->cfg; 684 + struct bfa_fcport_ln_s *ln = &fcport->ln; 890 685 891 - bfa_os_memset(pport, 0, sizeof(struct bfa_pport_s)); 892 - pport->bfa = bfa; 686 + bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s)); 687 + fcport->bfa = bfa; 688 + ln->fcport = fcport; 893 689 894 - bfa_pport_mem_claim(pport, meminfo); 690 + bfa_fcport_mem_claim(fcport, meminfo); 895 691 896 - bfa_sm_set_state(pport, bfa_pport_sm_uninit); 692 + bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); 693 + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); 897 694 898 695 /** 899 696 * initialize and set default configuration ··· 908 699 909 700 port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS; 910 701 911 - bfa_reqq_winit(&pport->reqq_wait, bfa_pport_qresume, pport); 702 + bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport); 912 703 } 913 704 914 705 static void 915 - bfa_pport_initdone(struct bfa_s *bfa) 706 + bfa_fcport_initdone(struct bfa_s *bfa) 916 707 { 917 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 708 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 918 709 919 710 /** 920 711 * Initialize port attributes from IOC hardware data. 921 712 */ 922 - bfa_pport_set_wwns(pport); 923 - if (pport->cfg.maxfrsize == 0) 924 - pport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc); 925 - pport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); 926 - pport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); 713 + bfa_fcport_set_wwns(fcport); 714 + if (fcport->cfg.maxfrsize == 0) 715 + fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc); 716 + fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); 717 + fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); 927 718 928 - bfa_assert(pport->cfg.maxfrsize); 929 - bfa_assert(pport->cfg.rx_bbcredit); 930 - bfa_assert(pport->speed_sup); 719 + bfa_assert(fcport->cfg.maxfrsize); 720 + bfa_assert(fcport->cfg.rx_bbcredit); 721 + bfa_assert(fcport->speed_sup); 931 722 } 932 723 933 724 static void 934 - bfa_pport_detach(struct bfa_s *bfa) 725 + bfa_fcport_detach(struct bfa_s *bfa) 935 726 { 936 727 } 937 728 ··· 939 730 * Called when IOC is ready. 940 731 */ 941 732 static void 942 - bfa_pport_start(struct bfa_s *bfa) 733 + bfa_fcport_start(struct bfa_s *bfa) 943 734 { 944 - bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_START); 735 + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); 945 736 } 946 737 947 738 /** 948 739 * Called before IOC is stopped. 949 740 */ 950 741 static void 951 - bfa_pport_stop(struct bfa_s *bfa) 742 + bfa_fcport_stop(struct bfa_s *bfa) 952 743 { 953 - bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_STOP); 744 + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP); 954 745 } 955 746 956 747 /** 957 748 * Called when IOC failure is detected. 958 749 */ 959 750 static void 960 - bfa_pport_iocdisable(struct bfa_s *bfa) 751 + bfa_fcport_iocdisable(struct bfa_s *bfa) 961 752 { 962 - bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_HWFAIL); 753 + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_HWFAIL); 963 754 } 964 755 965 756 static void 966 - bfa_pport_update_linkinfo(struct bfa_pport_s *pport) 757 + bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) 967 758 { 968 - struct bfi_pport_event_s *pevent = pport->event_arg.i2hmsg.event; 759 + struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; 969 760 970 - pport->speed = pevent->link_state.speed; 971 - pport->topology = pevent->link_state.topology; 761 + fcport->speed = pevent->link_state.speed; 762 + fcport->topology = pevent->link_state.topology; 972 763 973 - if (pport->topology == BFA_PPORT_TOPOLOGY_LOOP) 974 - pport->myalpa = pevent->link_state.tl.loop_info.myalpa; 764 + if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP) 765 + fcport->myalpa = 766 + pevent->link_state.tl.loop_info.myalpa; 975 767 976 768 /* 977 769 * QoS Details 978 770 */ 979 - bfa_os_assign(pport->qos_attr, pevent->link_state.qos_attr); 980 - bfa_os_assign(pport->qos_vc_attr, pevent->link_state.qos_vc_attr); 771 + bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr); 772 + bfa_os_assign(fcport->qos_vc_attr, pevent->link_state.qos_vc_attr); 981 773 982 - bfa_trc(pport->bfa, pport->speed); 983 - bfa_trc(pport->bfa, pport->topology); 774 + bfa_trc(fcport->bfa, fcport->speed); 775 + bfa_trc(fcport->bfa, fcport->topology); 984 776 } 985 777 986 778 static void 987 - bfa_pport_reset_linkinfo(struct bfa_pport_s *pport) 779 + bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport) 988 780 { 989 - pport->speed = BFA_PPORT_SPEED_UNKNOWN; 990 - pport->topology = BFA_PPORT_TOPOLOGY_NONE; 781 + fcport->speed = BFA_PPORT_SPEED_UNKNOWN; 782 + fcport->topology = BFA_PPORT_TOPOLOGY_NONE; 991 783 } 992 784 993 785 /** 994 786 * Send port enable message to firmware. 995 787 */ 996 788 static bfa_boolean_t 997 - bfa_pport_send_enable(struct bfa_pport_s *port) 789 + bfa_fcport_send_enable(struct bfa_fcport_s *fcport) 998 790 { 999 - struct bfi_pport_enable_req_s *m; 791 + struct bfi_fcport_enable_req_s *m; 1000 792 1001 793 /** 1002 794 * Increment message tag before queue check, so that responses to old 1003 795 * requests are discarded. 1004 796 */ 1005 - port->msgtag++; 797 + fcport->msgtag++; 1006 798 1007 799 /** 1008 800 * check for room in queue to send request now 1009 801 */ 1010 - m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); 802 + m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 1011 803 if (!m) { 1012 - bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait); 804 + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, 805 + &fcport->reqq_wait); 1013 806 return BFA_FALSE; 1014 807 } 1015 808 1016 - bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_ENABLE_REQ, 1017 - bfa_lpuid(port->bfa)); 1018 - m->nwwn = port->nwwn; 1019 - m->pwwn = port->pwwn; 1020 - m->port_cfg = port->cfg; 1021 - m->msgtag = port->msgtag; 1022 - m->port_cfg.maxfrsize = bfa_os_htons(port->cfg.maxfrsize); 1023 - bfa_dma_be_addr_set(m->stats_dma_addr, port->stats_pa); 1024 - bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_lo); 1025 - bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_hi); 809 + bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ, 810 + bfa_lpuid(fcport->bfa)); 811 + m->nwwn = fcport->nwwn; 812 + m->pwwn = fcport->pwwn; 813 + m->port_cfg = fcport->cfg; 814 + m->msgtag = fcport->msgtag; 815 + m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize); 816 + bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); 817 + bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); 818 + bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); 1026 819 1027 820 /** 1028 821 * queue I/O message to firmware 1029 822 */ 1030 - bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); 823 + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 1031 824 return BFA_TRUE; 1032 825 } 1033 826 ··· 1037 826 * Send port disable message to firmware. 1038 827 */ 1039 828 static bfa_boolean_t 1040 - bfa_pport_send_disable(struct bfa_pport_s *port) 829 + bfa_fcport_send_disable(struct bfa_fcport_s *fcport) 1041 830 { 1042 - bfi_pport_disable_req_t *m; 831 + struct bfi_fcport_req_s *m; 1043 832 1044 833 /** 1045 834 * Increment message tag before queue check, so that responses to old 1046 835 * requests are discarded. 1047 836 */ 1048 - port->msgtag++; 837 + fcport->msgtag++; 1049 838 1050 839 /** 1051 840 * check for room in queue to send request now 1052 841 */ 1053 - m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); 842 + m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 1054 843 if (!m) { 1055 - bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait); 844 + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, 845 + &fcport->reqq_wait); 1056 846 return BFA_FALSE; 1057 847 } 1058 848 1059 - bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_DISABLE_REQ, 1060 - bfa_lpuid(port->bfa)); 1061 - m->msgtag = port->msgtag; 849 + bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ, 850 + bfa_lpuid(fcport->bfa)); 851 + m->msgtag = fcport->msgtag; 1062 852 1063 853 /** 1064 854 * queue I/O message to firmware 1065 855 */ 1066 - bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); 856 + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 1067 857 1068 858 return BFA_TRUE; 1069 859 } 1070 860 1071 861 static void 1072 - bfa_pport_set_wwns(struct bfa_pport_s *port) 862 + bfa_fcport_set_wwns(struct bfa_fcport_s *fcport) 1073 863 { 1074 - port->pwwn = bfa_ioc_get_pwwn(&port->bfa->ioc); 1075 - port->nwwn = bfa_ioc_get_nwwn(&port->bfa->ioc); 864 + fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc); 865 + fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc); 1076 866 1077 - bfa_trc(port->bfa, port->pwwn); 1078 - bfa_trc(port->bfa, port->nwwn); 867 + bfa_trc(fcport->bfa, fcport->pwwn); 868 + bfa_trc(fcport->bfa, fcport->nwwn); 1079 869 } 1080 870 1081 871 static void 1082 - bfa_port_send_txcredit(void *port_cbarg) 872 + bfa_fcport_send_txcredit(void *port_cbarg) 1083 873 { 1084 874 1085 - struct bfa_pport_s *port = port_cbarg; 1086 - struct bfi_pport_set_svc_params_req_s *m; 875 + struct bfa_fcport_s *fcport = port_cbarg; 876 + struct bfi_fcport_set_svc_params_req_s *m; 1087 877 1088 878 /** 1089 879 * check for room in queue to send request now 1090 880 */ 1091 - m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); 881 + m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 1092 882 if (!m) { 1093 - bfa_trc(port->bfa, port->cfg.tx_bbcredit); 883 + bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit); 1094 884 return; 1095 885 } 1096 886 1097 - bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_SET_SVC_PARAMS_REQ, 1098 - bfa_lpuid(port->bfa)); 1099 - m->tx_bbcredit = bfa_os_htons((u16) port->cfg.tx_bbcredit); 887 + bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ, 888 + bfa_lpuid(fcport->bfa)); 889 + m->tx_bbcredit = bfa_os_htons((u16) fcport->cfg.tx_bbcredit); 1100 890 1101 891 /** 1102 892 * queue I/O message to firmware 1103 893 */ 1104 - bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); 894 + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 1105 895 } 1106 896 897 + static void 898 + bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d, 899 + struct bfa_qos_stats_s *s) 900 + { 901 + u32 *dip = (u32 *) d; 902 + u32 *sip = (u32 *) s; 903 + int i; 1107 904 905 + /* Now swap the 32 bit fields */ 906 + for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i) 907 + dip[i] = bfa_os_ntohl(sip[i]); 908 + } 909 + 910 + static void 911 + bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d, 912 + struct bfa_fcoe_stats_s *s) 913 + { 914 + u32 *dip = (u32 *) d; 915 + u32 *sip = (u32 *) s; 916 + int i; 917 + 918 + for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32)); 919 + i = i + 2) { 920 + #ifdef __BIGENDIAN 921 + dip[i] = bfa_os_ntohl(sip[i]); 922 + dip[i + 1] = bfa_os_ntohl(sip[i + 1]); 923 + #else 924 + dip[i] = bfa_os_ntohl(sip[i + 1]); 925 + dip[i + 1] = bfa_os_ntohl(sip[i]); 926 + #endif 927 + } 928 + } 929 + 930 + static void 931 + __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete) 932 + { 933 + struct bfa_fcport_s *fcport = cbarg; 934 + 935 + if (complete) { 936 + if (fcport->stats_status == BFA_STATUS_OK) { 937 + 938 + /* Swap FC QoS or FCoE stats */ 939 + if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) 940 + bfa_fcport_qos_stats_swap( 941 + &fcport->stats_ret->fcqos, 942 + &fcport->stats->fcqos); 943 + else 944 + bfa_fcport_fcoe_stats_swap( 945 + &fcport->stats_ret->fcoe, 946 + &fcport->stats->fcoe); 947 + } 948 + fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); 949 + } else { 950 + fcport->stats_busy = BFA_FALSE; 951 + fcport->stats_status = BFA_STATUS_OK; 952 + } 953 + } 954 + 955 + static void 956 + bfa_fcport_stats_get_timeout(void *cbarg) 957 + { 958 + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; 959 + 960 + bfa_trc(fcport->bfa, fcport->stats_qfull); 961 + 962 + if (fcport->stats_qfull) { 963 + bfa_reqq_wcancel(&fcport->stats_reqq_wait); 964 + fcport->stats_qfull = BFA_FALSE; 965 + } 966 + 967 + fcport->stats_status = BFA_STATUS_ETIMER; 968 + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get, 969 + fcport); 970 + } 971 + 972 + static void 973 + bfa_fcport_send_stats_get(void *cbarg) 974 + { 975 + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; 976 + struct bfi_fcport_req_s *msg; 977 + 978 + msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 979 + 980 + if (!msg) { 981 + fcport->stats_qfull = BFA_TRUE; 982 + bfa_reqq_winit(&fcport->stats_reqq_wait, 983 + bfa_fcport_send_stats_get, fcport); 984 + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, 985 + &fcport->stats_reqq_wait); 986 + return; 987 + } 988 + fcport->stats_qfull = BFA_FALSE; 989 + 990 + bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s)); 991 + bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ, 992 + bfa_lpuid(fcport->bfa)); 993 + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 994 + } 995 + 996 + static void 997 + __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) 998 + { 999 + struct bfa_fcport_s *fcport = cbarg; 1000 + 1001 + if (complete) { 1002 + fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); 1003 + } else { 1004 + fcport->stats_busy = BFA_FALSE; 1005 + fcport->stats_status = BFA_STATUS_OK; 1006 + } 1007 + } 1008 + 1009 + static void 1010 + bfa_fcport_stats_clr_timeout(void *cbarg) 1011 + { 1012 + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; 1013 + 1014 + bfa_trc(fcport->bfa, fcport->stats_qfull); 1015 + 1016 + if (fcport->stats_qfull) { 1017 + bfa_reqq_wcancel(&fcport->stats_reqq_wait); 1018 + fcport->stats_qfull = BFA_FALSE; 1019 + } 1020 + 1021 + fcport->stats_status = BFA_STATUS_ETIMER; 1022 + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 1023 + __bfa_cb_fcport_stats_clr, fcport); 1024 + } 1025 + 1026 + static void 1027 + bfa_fcport_send_stats_clear(void *cbarg) 1028 + { 1029 + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; 1030 + struct bfi_fcport_req_s *msg; 1031 + 1032 + msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 1033 + 1034 + if (!msg) { 1035 + fcport->stats_qfull = BFA_TRUE; 1036 + bfa_reqq_winit(&fcport->stats_reqq_wait, 1037 + bfa_fcport_send_stats_clear, fcport); 1038 + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, 1039 + &fcport->stats_reqq_wait); 1040 + return; 1041 + } 1042 + fcport->stats_qfull = BFA_FALSE; 1043 + 1044 + bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s)); 1045 + bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ, 1046 + bfa_lpuid(fcport->bfa)); 1047 + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 1048 + } 1108 1049 1109 1050 /** 1110 1051 * bfa_pport_public ··· 1266 903 * Firmware message handler. 1267 904 */ 1268 905 void 1269 - bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) 906 + bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) 1270 907 { 1271 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1272 - union bfi_pport_i2h_msg_u i2hmsg; 908 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 909 + union bfi_fcport_i2h_msg_u i2hmsg; 1273 910 1274 911 i2hmsg.msg = msg; 1275 - pport->event_arg.i2hmsg = i2hmsg; 912 + fcport->event_arg.i2hmsg = i2hmsg; 1276 913 1277 914 switch (msg->mhdr.msg_id) { 1278 - case BFI_PPORT_I2H_ENABLE_RSP: 1279 - if (pport->msgtag == i2hmsg.enable_rsp->msgtag) 1280 - bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP); 915 + case BFI_FCPORT_I2H_ENABLE_RSP: 916 + if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) 917 + bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); 1281 918 break; 1282 919 1283 - case BFI_PPORT_I2H_DISABLE_RSP: 1284 - if (pport->msgtag == i2hmsg.enable_rsp->msgtag) 1285 - bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP); 920 + case BFI_FCPORT_I2H_DISABLE_RSP: 921 + if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) 922 + bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); 1286 923 break; 1287 924 1288 - case BFI_PPORT_I2H_EVENT: 925 + case BFI_FCPORT_I2H_EVENT: 1289 926 switch (i2hmsg.event->link_state.linkstate) { 1290 927 case BFA_PPORT_LINKUP: 1291 - bfa_sm_send_event(pport, BFA_PPORT_SM_LINKUP); 928 + bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); 1292 929 break; 1293 930 case BFA_PPORT_LINKDOWN: 1294 - bfa_sm_send_event(pport, BFA_PPORT_SM_LINKDOWN); 931 + bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN); 1295 932 break; 1296 933 case BFA_PPORT_TRUNK_LINKDOWN: 1297 934 /** todo: event notification */ ··· 1299 936 } 1300 937 break; 1301 938 1302 - case BFI_PPORT_I2H_GET_STATS_RSP: 1303 - case BFI_PPORT_I2H_GET_QOS_STATS_RSP: 939 + case BFI_FCPORT_I2H_STATS_GET_RSP: 1304 940 /* 1305 941 * check for timer pop before processing the rsp 1306 942 */ 1307 - if (pport->stats_busy == BFA_FALSE 1308 - || pport->stats_status == BFA_STATUS_ETIMER) 943 + if (fcport->stats_busy == BFA_FALSE || 944 + fcport->stats_status == BFA_STATUS_ETIMER) 1309 945 break; 1310 946 1311 - bfa_timer_stop(&pport->timer); 1312 - pport->stats_status = i2hmsg.getstats_rsp->status; 1313 - bfa_cb_queue(pport->bfa, &pport->hcb_qe, __bfa_cb_port_stats, 1314 - pport); 947 + bfa_timer_stop(&fcport->timer); 948 + fcport->stats_status = i2hmsg.pstatsget_rsp->status; 949 + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 950 + __bfa_cb_fcport_stats_get, fcport); 1315 951 break; 1316 - case BFI_PPORT_I2H_CLEAR_STATS_RSP: 1317 - case BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP: 952 + 953 + case BFI_FCPORT_I2H_STATS_CLEAR_RSP: 1318 954 /* 1319 955 * check for timer pop before processing the rsp 1320 956 */ 1321 - if (pport->stats_busy == BFA_FALSE 1322 - || pport->stats_status == BFA_STATUS_ETIMER) 957 + if (fcport->stats_busy == BFA_FALSE || 958 + fcport->stats_status == BFA_STATUS_ETIMER) 1323 959 break; 1324 960 1325 - bfa_timer_stop(&pport->timer); 1326 - pport->stats_status = BFA_STATUS_OK; 1327 - bfa_cb_queue(pport->bfa, &pport->hcb_qe, 1328 - __bfa_cb_port_stats_clr, pport); 961 + bfa_timer_stop(&fcport->timer); 962 + fcport->stats_status = BFA_STATUS_OK; 963 + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 964 + __bfa_cb_fcport_stats_clr, fcport); 1329 965 break; 1330 966 1331 967 default: 1332 968 bfa_assert(0); 969 + break; 1333 970 } 1334 971 } 1335 - 1336 - 1337 972 1338 973 /** 1339 974 * bfa_pport_api ··· 1341 980 * Registered callback for port events. 1342 981 */ 1343 982 void 1344 - bfa_pport_event_register(struct bfa_s *bfa, 983 + bfa_fcport_event_register(struct bfa_s *bfa, 1345 984 void (*cbfn) (void *cbarg, bfa_pport_event_t event), 1346 985 void *cbarg) 1347 986 { 1348 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 987 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1349 988 1350 - pport->event_cbfn = cbfn; 1351 - pport->event_cbarg = cbarg; 989 + fcport->event_cbfn = cbfn; 990 + fcport->event_cbarg = cbarg; 1352 991 } 1353 992 1354 993 bfa_status_t 1355 - bfa_pport_enable(struct bfa_s *bfa) 994 + bfa_fcport_enable(struct bfa_s *bfa) 1356 995 { 1357 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 996 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1358 997 1359 - if (pport->diag_busy) 998 + if (fcport->diag_busy) 1360 999 return BFA_STATUS_DIAG_BUSY; 1361 1000 else if (bfa_sm_cmp_state 1362 - (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait)) 1001 + (BFA_FCPORT_MOD(bfa), bfa_fcport_sm_disabling_qwait)) 1363 1002 return BFA_STATUS_DEVBUSY; 1364 1003 1365 - bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE); 1004 + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE); 1366 1005 return BFA_STATUS_OK; 1367 1006 } 1368 1007 1369 1008 bfa_status_t 1370 - bfa_pport_disable(struct bfa_s *bfa) 1009 + bfa_fcport_disable(struct bfa_s *bfa) 1371 1010 { 1372 - bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_DISABLE); 1011 + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE); 1373 1012 return BFA_STATUS_OK; 1374 1013 } 1375 1014 ··· 1377 1016 * Configure port speed. 1378 1017 */ 1379 1018 bfa_status_t 1380 - bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) 1019 + bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) 1381 1020 { 1382 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1021 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1383 1022 1384 1023 bfa_trc(bfa, speed); 1385 1024 1386 - if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > pport->speed_sup)) { 1387 - bfa_trc(bfa, pport->speed_sup); 1025 + if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > fcport->speed_sup)) { 1026 + bfa_trc(bfa, fcport->speed_sup); 1388 1027 return BFA_STATUS_UNSUPP_SPEED; 1389 1028 } 1390 1029 1391 - pport->cfg.speed = speed; 1030 + fcport->cfg.speed = speed; 1392 1031 1393 1032 return BFA_STATUS_OK; 1394 1033 } ··· 1397 1036 * Get current speed. 1398 1037 */ 1399 1038 enum bfa_pport_speed 1400 - bfa_pport_get_speed(struct bfa_s *bfa) 1039 + bfa_fcport_get_speed(struct bfa_s *bfa) 1401 1040 { 1402 - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1041 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1403 1042 1404 - return port->speed; 1043 + return fcport->speed; 1405 1044 } 1406 1045 1407 1046 /** 1408 1047 * Configure port topology. 1409 1048 */ 1410 1049 bfa_status_t 1411 - bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology) 1050 + bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology) 1412 1051 { 1413 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1052 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1414 1053 1415 1054 bfa_trc(bfa, topology); 1416 - bfa_trc(bfa, pport->cfg.topology); 1055 + bfa_trc(bfa, fcport->cfg.topology); 1417 1056 1418 1057 switch (topology) { 1419 1058 case BFA_PPORT_TOPOLOGY_P2P: ··· 1425 1064 return BFA_STATUS_EINVAL; 1426 1065 } 1427 1066 1428 - pport->cfg.topology = topology; 1067 + fcport->cfg.topology = topology; 1429 1068 return BFA_STATUS_OK; 1430 1069 } 1431 1070 ··· 1433 1072 * Get current topology. 1434 1073 */ 1435 1074 enum bfa_pport_topology 1436 - bfa_pport_get_topology(struct bfa_s *bfa) 1075 + bfa_fcport_get_topology(struct bfa_s *bfa) 1437 1076 { 1438 - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1077 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1439 1078 1440 - return port->topology; 1079 + return fcport->topology; 1441 1080 } 1442 1081 1443 1082 bfa_status_t 1444 - bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) 1083 + bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) 1445 1084 { 1446 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1085 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1447 1086 1448 1087 bfa_trc(bfa, alpa); 1449 - bfa_trc(bfa, pport->cfg.cfg_hardalpa); 1450 - bfa_trc(bfa, pport->cfg.hardalpa); 1088 + bfa_trc(bfa, fcport->cfg.cfg_hardalpa); 1089 + bfa_trc(bfa, fcport->cfg.hardalpa); 1451 1090 1452 - pport->cfg.cfg_hardalpa = BFA_TRUE; 1453 - pport->cfg.hardalpa = alpa; 1091 + fcport->cfg.cfg_hardalpa = BFA_TRUE; 1092 + fcport->cfg.hardalpa = alpa; 1454 1093 1455 1094 return BFA_STATUS_OK; 1456 1095 } 1457 1096 1458 1097 bfa_status_t 1459 - bfa_pport_clr_hardalpa(struct bfa_s *bfa) 1098 + bfa_fcport_clr_hardalpa(struct bfa_s *bfa) 1460 1099 { 1461 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1100 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1462 1101 1463 - bfa_trc(bfa, pport->cfg.cfg_hardalpa); 1464 - bfa_trc(bfa, pport->cfg.hardalpa); 1102 + bfa_trc(bfa, fcport->cfg.cfg_hardalpa); 1103 + bfa_trc(bfa, fcport->cfg.hardalpa); 1465 1104 1466 - pport->cfg.cfg_hardalpa = BFA_FALSE; 1105 + fcport->cfg.cfg_hardalpa = BFA_FALSE; 1467 1106 return BFA_STATUS_OK; 1468 1107 } 1469 1108 1470 1109 bfa_boolean_t 1471 - bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) 1110 + bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) 1472 1111 { 1473 - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1112 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1474 1113 1475 - *alpa = port->cfg.hardalpa; 1476 - return port->cfg.cfg_hardalpa; 1114 + *alpa = fcport->cfg.hardalpa; 1115 + return fcport->cfg.cfg_hardalpa; 1477 1116 } 1478 1117 1479 1118 u8 1480 - bfa_pport_get_myalpa(struct bfa_s *bfa) 1119 + bfa_fcport_get_myalpa(struct bfa_s *bfa) 1481 1120 { 1482 - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1121 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1483 1122 1484 - return port->myalpa; 1123 + return fcport->myalpa; 1485 1124 } 1486 1125 1487 1126 bfa_status_t 1488 - bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) 1127 + bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) 1489 1128 { 1490 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1129 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1491 1130 1492 1131 bfa_trc(bfa, maxfrsize); 1493 - bfa_trc(bfa, pport->cfg.maxfrsize); 1132 + bfa_trc(bfa, fcport->cfg.maxfrsize); 1494 1133 1495 1134 /* 1496 1135 * with in range ··· 1504 1143 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1))) 1505 1144 return BFA_STATUS_INVLD_DFSZ; 1506 1145 1507 - pport->cfg.maxfrsize = maxfrsize; 1146 + fcport->cfg.maxfrsize = maxfrsize; 1508 1147 return BFA_STATUS_OK; 1509 1148 } 1510 1149 1511 1150 u16 1512 - bfa_pport_get_maxfrsize(struct bfa_s *bfa) 1151 + bfa_fcport_get_maxfrsize(struct bfa_s *bfa) 1513 1152 { 1514 - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1153 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1515 1154 1516 - return port->cfg.maxfrsize; 1155 + return fcport->cfg.maxfrsize; 1517 1156 } 1518 1157 1519 1158 u32 1520 - bfa_pport_mypid(struct bfa_s *bfa) 1159 + bfa_fcport_mypid(struct bfa_s *bfa) 1521 1160 { 1522 - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1161 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1523 1162 1524 - return port->mypid; 1163 + return fcport->mypid; 1525 1164 } 1526 1165 1527 1166 u8 1528 - bfa_pport_get_rx_bbcredit(struct bfa_s *bfa) 1167 + bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) 1529 1168 { 1530 - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1169 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1531 1170 1532 - return port->cfg.rx_bbcredit; 1171 + return fcport->cfg.rx_bbcredit; 1533 1172 } 1534 1173 1535 1174 void 1536 - bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) 1175 + bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) 1537 1176 { 1538 - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1177 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1539 1178 1540 - port->cfg.tx_bbcredit = (u8) tx_bbcredit; 1541 - bfa_port_send_txcredit(port); 1179 + fcport->cfg.tx_bbcredit = (u8) tx_bbcredit; 1180 + bfa_fcport_send_txcredit(fcport); 1542 1181 } 1543 1182 1544 1183 /** ··· 1546 1185 */ 1547 1186 1548 1187 wwn_t 1549 - bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) 1188 + bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) 1550 1189 { 1551 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1190 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1552 1191 if (node) 1553 - return pport->nwwn; 1192 + return fcport->nwwn; 1554 1193 else 1555 - return pport->pwwn; 1194 + return fcport->pwwn; 1556 1195 } 1557 1196 1558 1197 void 1559 - bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr) 1198 + bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr) 1560 1199 { 1561 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1200 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1562 1201 1563 1202 bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s)); 1564 1203 1565 - attr->nwwn = pport->nwwn; 1566 - attr->pwwn = pport->pwwn; 1204 + attr->nwwn = fcport->nwwn; 1205 + attr->pwwn = fcport->pwwn; 1567 1206 1568 - bfa_os_memcpy(&attr->pport_cfg, &pport->cfg, 1207 + bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg, 1569 1208 sizeof(struct bfa_pport_cfg_s)); 1570 1209 /* 1571 1210 * speed attributes 1572 1211 */ 1573 - attr->pport_cfg.speed = pport->cfg.speed; 1574 - attr->speed_supported = pport->speed_sup; 1575 - attr->speed = pport->speed; 1212 + attr->pport_cfg.speed = fcport->cfg.speed; 1213 + attr->speed_supported = fcport->speed_sup; 1214 + attr->speed = fcport->speed; 1576 1215 attr->cos_supported = FC_CLASS_3; 1577 1216 1578 1217 /* 1579 1218 * topology attributes 1580 1219 */ 1581 - attr->pport_cfg.topology = pport->cfg.topology; 1582 - attr->topology = pport->topology; 1220 + attr->pport_cfg.topology = fcport->cfg.topology; 1221 + attr->topology = fcport->topology; 1583 1222 1584 1223 /* 1585 1224 * beacon attributes 1586 1225 */ 1587 - attr->beacon = pport->beacon; 1588 - attr->link_e2e_beacon = pport->link_e2e_beacon; 1589 - attr->plog_enabled = bfa_plog_get_setting(pport->bfa->plog); 1226 + attr->beacon = fcport->beacon; 1227 + attr->link_e2e_beacon = fcport->link_e2e_beacon; 1228 + attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog); 1590 1229 1591 1230 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); 1592 1231 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); 1593 - attr->port_state = bfa_sm_to_state(hal_pport_sm_table, pport->sm); 1594 - if (bfa_ioc_is_disabled(&pport->bfa->ioc)) 1232 + attr->port_state = bfa_sm_to_state(hal_pport_sm_table, fcport->sm); 1233 + if (bfa_ioc_is_disabled(&fcport->bfa->ioc)) 1595 1234 attr->port_state = BFA_PPORT_ST_IOCDIS; 1596 - else if (bfa_ioc_fw_mismatch(&pport->bfa->ioc)) 1235 + else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) 1597 1236 attr->port_state = BFA_PPORT_ST_FWMISMATCH; 1598 1237 } 1599 1238 1600 - static void 1601 - bfa_port_stats_query(void *cbarg) 1602 - { 1603 - struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg; 1604 - bfi_pport_get_stats_req_t *msg; 1605 - 1606 - msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); 1607 - 1608 - if (!msg) { 1609 - port->stats_qfull = BFA_TRUE; 1610 - bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_query, 1611 - port); 1612 - bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait); 1613 - return; 1614 - } 1615 - port->stats_qfull = BFA_FALSE; 1616 - 1617 - bfa_os_memset(msg, 0, sizeof(bfi_pport_get_stats_req_t)); 1618 - bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_GET_STATS_REQ, 1619 - bfa_lpuid(port->bfa)); 1620 - bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); 1621 - 1622 - return; 1623 - } 1624 - 1625 - static void 1626 - bfa_port_stats_clear(void *cbarg) 1627 - { 1628 - struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg; 1629 - bfi_pport_clear_stats_req_t *msg; 1630 - 1631 - msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); 1632 - 1633 - if (!msg) { 1634 - port->stats_qfull = BFA_TRUE; 1635 - bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_clear, 1636 - port); 1637 - bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait); 1638 - return; 1639 - } 1640 - port->stats_qfull = BFA_FALSE; 1641 - 1642 - bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_stats_req_t)); 1643 - bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_STATS_REQ, 1644 - bfa_lpuid(port->bfa)); 1645 - bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); 1646 - return; 1647 - } 1648 - 1649 - static void 1650 - bfa_port_qos_stats_clear(void *cbarg) 1651 - { 1652 - struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg; 1653 - bfi_pport_clear_qos_stats_req_t *msg; 1654 - 1655 - msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); 1656 - 1657 - if (!msg) { 1658 - port->stats_qfull = BFA_TRUE; 1659 - bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_qos_stats_clear, 1660 - port); 1661 - bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait); 1662 - return; 1663 - } 1664 - port->stats_qfull = BFA_FALSE; 1665 - 1666 - bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_qos_stats_req_t)); 1667 - bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ, 1668 - bfa_lpuid(port->bfa)); 1669 - bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); 1670 - return; 1671 - } 1672 - 1673 - static void 1674 - bfa_pport_stats_swap(union bfa_pport_stats_u *d, union bfa_pport_stats_u *s) 1675 - { 1676 - u32 *dip = (u32 *) d; 1677 - u32 *sip = (u32 *) s; 1678 - int i; 1679 - 1680 - /* 1681 - * Do 64 bit fields swap first 1682 - */ 1683 - for (i = 0; 1684 - i < 1685 - ((sizeof(union bfa_pport_stats_u) - 1686 - sizeof(struct bfa_qos_stats_s)) / sizeof(u32)); i = i + 2) { 1687 - #ifdef __BIGENDIAN 1688 - dip[i] = bfa_os_ntohl(sip[i]); 1689 - dip[i + 1] = bfa_os_ntohl(sip[i + 1]); 1690 - #else 1691 - dip[i] = bfa_os_ntohl(sip[i + 1]); 1692 - dip[i + 1] = bfa_os_ntohl(sip[i]); 1693 - #endif 1694 - } 1695 - 1696 - /* 1697 - * Now swap the 32 bit fields 1698 - */ 1699 - for (; i < (sizeof(union bfa_pport_stats_u) / sizeof(u32)); ++i) 1700 - dip[i] = bfa_os_ntohl(sip[i]); 1701 - } 1702 - 1703 - static void 1704 - __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete) 1705 - { 1706 - struct bfa_pport_s *port = cbarg; 1707 - 1708 - if (complete) { 1709 - port->stats_cbfn(port->stats_cbarg, port->stats_status); 1710 - } else { 1711 - port->stats_busy = BFA_FALSE; 1712 - port->stats_status = BFA_STATUS_OK; 1713 - } 1714 - } 1715 - 1716 - static void 1717 - bfa_port_stats_clr_timeout(void *cbarg) 1718 - { 1719 - struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg; 1720 - 1721 - bfa_trc(port->bfa, port->stats_qfull); 1722 - 1723 - if (port->stats_qfull) { 1724 - bfa_reqq_wcancel(&port->stats_reqq_wait); 1725 - port->stats_qfull = BFA_FALSE; 1726 - } 1727 - 1728 - port->stats_status = BFA_STATUS_ETIMER; 1729 - bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats_clr, port); 1730 - } 1731 - 1732 - static void 1733 - __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete) 1734 - { 1735 - struct bfa_pport_s *port = cbarg; 1736 - 1737 - if (complete) { 1738 - if (port->stats_status == BFA_STATUS_OK) 1739 - bfa_pport_stats_swap(port->stats_ret, port->stats); 1740 - port->stats_cbfn(port->stats_cbarg, port->stats_status); 1741 - } else { 1742 - port->stats_busy = BFA_FALSE; 1743 - port->stats_status = BFA_STATUS_OK; 1744 - } 1745 - } 1746 - 1747 - static void 1748 - bfa_port_stats_timeout(void *cbarg) 1749 - { 1750 - struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg; 1751 - 1752 - bfa_trc(port->bfa, port->stats_qfull); 1753 - 1754 - if (port->stats_qfull) { 1755 - bfa_reqq_wcancel(&port->stats_reqq_wait); 1756 - port->stats_qfull = BFA_FALSE; 1757 - } 1758 - 1759 - port->stats_status = BFA_STATUS_ETIMER; 1760 - bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats, port); 1761 - } 1762 - 1763 - #define BFA_PORT_STATS_TOV 1000 1239 + #define BFA_FCPORT_STATS_TOV 1000 1764 1240 1765 1241 /** 1766 - * Fetch port attributes. 1242 + * Fetch port attributes (FCQoS or FCoE). 1767 1243 */ 1768 1244 bfa_status_t 1769 - bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, 1245 + bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, 1770 1246 bfa_cb_pport_t cbfn, void *cbarg) 1771 1247 { 1772 - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1248 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1773 1249 1774 - if (port->stats_busy) { 1775 - bfa_trc(bfa, port->stats_busy); 1250 + if (fcport->stats_busy) { 1251 + bfa_trc(bfa, fcport->stats_busy); 1776 1252 return BFA_STATUS_DEVBUSY; 1777 1253 } 1778 1254 1779 - port->stats_busy = BFA_TRUE; 1780 - port->stats_ret = stats; 1781 - port->stats_cbfn = cbfn; 1782 - port->stats_cbarg = cbarg; 1255 + fcport->stats_busy = BFA_TRUE; 1256 + fcport->stats_ret = stats; 1257 + fcport->stats_cbfn = cbfn; 1258 + fcport->stats_cbarg = cbarg; 1783 1259 1784 - bfa_port_stats_query(port); 1260 + bfa_fcport_send_stats_get(fcport); 1785 1261 1786 - bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port, 1787 - BFA_PORT_STATS_TOV); 1262 + bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout, 1263 + fcport, BFA_FCPORT_STATS_TOV); 1788 1264 return BFA_STATUS_OK; 1789 1265 } 1790 1266 1267 + /** 1268 + * Reset port statistics (FCQoS or FCoE). 1269 + */ 1791 1270 bfa_status_t 1792 - bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) 1271 + bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) 1793 1272 { 1794 - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1273 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1795 1274 1796 - if (port->stats_busy) { 1797 - bfa_trc(bfa, port->stats_busy); 1275 + if (fcport->stats_busy) { 1276 + bfa_trc(bfa, fcport->stats_busy); 1798 1277 return BFA_STATUS_DEVBUSY; 1799 1278 } 1800 1279 1801 - port->stats_busy = BFA_TRUE; 1802 - port->stats_cbfn = cbfn; 1803 - port->stats_cbarg = cbarg; 1280 + fcport->stats_busy = BFA_TRUE; 1281 + fcport->stats_cbfn = cbfn; 1282 + fcport->stats_cbarg = cbarg; 1804 1283 1805 - bfa_port_stats_clear(port); 1284 + bfa_fcport_send_stats_clear(fcport); 1806 1285 1807 - bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port, 1808 - BFA_PORT_STATS_TOV); 1286 + bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout, 1287 + fcport, BFA_FCPORT_STATS_TOV); 1809 1288 return BFA_STATUS_OK; 1810 1289 } 1811 1290 1291 + /** 1292 + * Fetch FCQoS port statistics 1293 + */ 1812 1294 bfa_status_t 1813 - bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap) 1295 + bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, 1296 + bfa_cb_pport_t cbfn, void *cbarg) 1814 1297 { 1815 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1298 + /* Meaningful only for FC mode */ 1299 + bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc)); 1300 + 1301 + return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); 1302 + } 1303 + 1304 + /** 1305 + * Reset FCoE port statistics 1306 + */ 1307 + bfa_status_t 1308 + bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) 1309 + { 1310 + /* Meaningful only for FC mode */ 1311 + bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc)); 1312 + 1313 + return bfa_fcport_clear_stats(bfa, cbfn, cbarg); 1314 + } 1315 + 1316 + /** 1317 + * Fetch FCQoS port statistics 1318 + */ 1319 + bfa_status_t 1320 + bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, 1321 + bfa_cb_pport_t cbfn, void *cbarg) 1322 + { 1323 + /* Meaningful only for FCoE mode */ 1324 + bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc)); 1325 + 1326 + return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); 1327 + } 1328 + 1329 + /** 1330 + * Reset FCoE port statistics 1331 + */ 1332 + bfa_status_t 1333 + bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) 1334 + { 1335 + /* Meaningful only for FCoE mode */ 1336 + bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc)); 1337 + 1338 + return bfa_fcport_clear_stats(bfa, cbfn, cbarg); 1339 + } 1340 + 1341 + bfa_status_t 1342 + bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap) 1343 + { 1344 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1816 1345 1817 1346 bfa_trc(bfa, bitmap); 1818 - bfa_trc(bfa, pport->cfg.trunked); 1819 - bfa_trc(bfa, pport->cfg.trunk_ports); 1347 + bfa_trc(bfa, fcport->cfg.trunked); 1348 + bfa_trc(bfa, fcport->cfg.trunk_ports); 1820 1349 1821 1350 if (!bitmap || (bitmap & (bitmap - 1))) 1822 1351 return BFA_STATUS_EINVAL; 1823 1352 1824 - pport->cfg.trunked = BFA_TRUE; 1825 - pport->cfg.trunk_ports = bitmap; 1353 + fcport->cfg.trunked = BFA_TRUE; 1354 + fcport->cfg.trunk_ports = bitmap; 1826 1355 1827 1356 return BFA_STATUS_OK; 1828 1357 } 1829 1358 1830 1359 void 1831 - bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr) 1360 + bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr) 1832 1361 { 1833 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1362 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1834 1363 1835 - qos_attr->state = bfa_os_ntohl(pport->qos_attr.state); 1836 - qos_attr->total_bb_cr = bfa_os_ntohl(pport->qos_attr.total_bb_cr); 1364 + qos_attr->state = bfa_os_ntohl(fcport->qos_attr.state); 1365 + qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr); 1837 1366 } 1838 1367 1839 1368 void 1840 - bfa_pport_qos_get_vc_attr(struct bfa_s *bfa, 1369 + bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa, 1841 1370 struct bfa_qos_vc_attr_s *qos_vc_attr) 1842 1371 { 1843 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1844 - struct bfa_qos_vc_attr_s *bfa_vc_attr = &pport->qos_vc_attr; 1372 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1373 + struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; 1845 1374 u32 i = 0; 1846 1375 1847 1376 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count); ··· 1754 1503 } 1755 1504 1756 1505 /** 1757 - * Fetch QoS Stats. 1758 - */ 1759 - bfa_status_t 1760 - bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, 1761 - bfa_cb_pport_t cbfn, void *cbarg) 1762 - { 1763 - /* 1764 - * QoS stats is embedded in port stats 1765 - */ 1766 - return bfa_pport_get_stats(bfa, stats, cbfn, cbarg); 1767 - } 1768 - 1769 - bfa_status_t 1770 - bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) 1771 - { 1772 - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1773 - 1774 - if (port->stats_busy) { 1775 - bfa_trc(bfa, port->stats_busy); 1776 - return BFA_STATUS_DEVBUSY; 1777 - } 1778 - 1779 - port->stats_busy = BFA_TRUE; 1780 - port->stats_cbfn = cbfn; 1781 - port->stats_cbarg = cbarg; 1782 - 1783 - bfa_port_qos_stats_clear(port); 1784 - 1785 - bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port, 1786 - BFA_PORT_STATS_TOV); 1787 - return BFA_STATUS_OK; 1788 - } 1789 - 1790 - /** 1791 1506 * Fetch port attributes. 1792 1507 */ 1793 1508 bfa_status_t 1794 - bfa_pport_trunk_disable(struct bfa_s *bfa) 1509 + bfa_fcport_trunk_disable(struct bfa_s *bfa) 1795 1510 { 1796 1511 return BFA_STATUS_OK; 1797 1512 } 1798 1513 1799 1514 bfa_boolean_t 1800 - bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap) 1515 + bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap) 1801 1516 { 1802 - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1517 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1803 1518 1804 - *bitmap = port->cfg.trunk_ports; 1805 - return port->cfg.trunked; 1519 + *bitmap = fcport->cfg.trunk_ports; 1520 + return fcport->cfg.trunked; 1806 1521 } 1807 1522 1808 1523 bfa_boolean_t 1809 - bfa_pport_is_disabled(struct bfa_s *bfa) 1524 + bfa_fcport_is_disabled(struct bfa_s *bfa) 1810 1525 { 1811 - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1526 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1812 1527 1813 - return bfa_sm_to_state(hal_pport_sm_table, port->sm) == 1528 + return bfa_sm_to_state(hal_pport_sm_table, fcport->sm) == 1814 1529 BFA_PPORT_ST_DISABLED; 1815 1530 1816 1531 } 1817 1532 1818 1533 bfa_boolean_t 1819 - bfa_pport_is_ratelim(struct bfa_s *bfa) 1534 + bfa_fcport_is_ratelim(struct bfa_s *bfa) 1820 1535 { 1821 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1536 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1822 1537 1823 - return pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE; 1538 + return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE; 1824 1539 1825 1540 } 1826 1541 1827 1542 void 1828 - bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off) 1543 + bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off) 1829 1544 { 1830 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1545 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1546 + enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); 1831 1547 1832 1548 bfa_trc(bfa, on_off); 1833 - bfa_trc(bfa, pport->cfg.qos_enabled); 1549 + bfa_trc(bfa, fcport->cfg.qos_enabled); 1834 1550 1835 - pport->cfg.qos_enabled = on_off; 1551 + bfa_trc(bfa, ioc_type); 1552 + 1553 + if (ioc_type == BFA_IOC_TYPE_FC) 1554 + fcport->cfg.qos_enabled = on_off; 1836 1555 } 1837 1556 1838 1557 void 1839 - bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off) 1558 + bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off) 1840 1559 { 1841 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1560 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1842 1561 1843 1562 bfa_trc(bfa, on_off); 1844 - bfa_trc(bfa, pport->cfg.ratelimit); 1563 + bfa_trc(bfa, fcport->cfg.ratelimit); 1845 1564 1846 - pport->cfg.ratelimit = on_off; 1847 - if (pport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN) 1848 - pport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS; 1565 + fcport->cfg.ratelimit = on_off; 1566 + if (fcport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN) 1567 + fcport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS; 1849 1568 } 1850 1569 1851 1570 /** 1852 1571 * Configure default minimum ratelim speed 1853 1572 */ 1854 1573 bfa_status_t 1855 - bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) 1574 + bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) 1856 1575 { 1857 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1576 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1858 1577 1859 1578 bfa_trc(bfa, speed); 1860 1579 1861 1580 /* 1862 1581 * Auto and speeds greater than the supported speed, are invalid 1863 1582 */ 1864 - if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > pport->speed_sup)) { 1865 - bfa_trc(bfa, pport->speed_sup); 1583 + if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > fcport->speed_sup)) { 1584 + bfa_trc(bfa, fcport->speed_sup); 1866 1585 return BFA_STATUS_UNSUPP_SPEED; 1867 1586 } 1868 1587 1869 - pport->cfg.trl_def_speed = speed; 1588 + fcport->cfg.trl_def_speed = speed; 1870 1589 1871 1590 return BFA_STATUS_OK; 1872 1591 } ··· 1845 1624 * Get default minimum ratelim speed 1846 1625 */ 1847 1626 enum bfa_pport_speed 1848 - bfa_pport_get_ratelim_speed(struct bfa_s *bfa) 1627 + bfa_fcport_get_ratelim_speed(struct bfa_s *bfa) 1849 1628 { 1850 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1629 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1851 1630 1852 - bfa_trc(bfa, pport->cfg.trl_def_speed); 1853 - return pport->cfg.trl_def_speed; 1631 + bfa_trc(bfa, fcport->cfg.trl_def_speed); 1632 + return fcport->cfg.trl_def_speed; 1854 1633 1855 1634 } 1856 1635 1857 1636 void 1858 - bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status) 1637 + bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status) 1859 1638 { 1860 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1639 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1861 1640 1862 1641 bfa_trc(bfa, status); 1863 - bfa_trc(bfa, pport->diag_busy); 1642 + bfa_trc(bfa, fcport->diag_busy); 1864 1643 1865 - pport->diag_busy = status; 1644 + fcport->diag_busy = status; 1866 1645 } 1867 1646 1868 1647 void 1869 - bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, 1648 + bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, 1870 1649 bfa_boolean_t link_e2e_beacon) 1871 1650 { 1872 - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1651 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 1873 1652 1874 1653 bfa_trc(bfa, beacon); 1875 1654 bfa_trc(bfa, link_e2e_beacon); 1876 - bfa_trc(bfa, pport->beacon); 1877 - bfa_trc(bfa, pport->link_e2e_beacon); 1655 + bfa_trc(bfa, fcport->beacon); 1656 + bfa_trc(bfa, fcport->link_e2e_beacon); 1878 1657 1879 - pport->beacon = beacon; 1880 - pport->link_e2e_beacon = link_e2e_beacon; 1658 + fcport->beacon = beacon; 1659 + fcport->link_e2e_beacon = link_e2e_beacon; 1881 1660 } 1882 1661 1883 1662 bfa_boolean_t 1884 - bfa_pport_is_linkup(struct bfa_s *bfa) 1663 + bfa_fcport_is_linkup(struct bfa_s *bfa) 1885 1664 { 1886 - return bfa_sm_cmp_state(BFA_PORT_MOD(bfa), bfa_pport_sm_linkup); 1665 + return bfa_sm_cmp_state(BFA_FCPORT_MOD(bfa), bfa_fcport_sm_linkup); 1887 1666 } 1888 1667 1889 1668
+46 -17
drivers/scsi/bfa/bfa_fcs.c
··· 36 36 * FCS sub-modules 37 37 */ 38 38 struct bfa_fcs_mod_s { 39 + void (*attach) (struct bfa_fcs_s *fcs); 39 40 void (*modinit) (struct bfa_fcs_s *fcs); 40 41 void (*modexit) (struct bfa_fcs_s *fcs); 41 42 }; ··· 44 43 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit } 45 44 46 45 static struct bfa_fcs_mod_s fcs_modules[] = { 47 - BFA_FCS_MODULE(bfa_fcs_pport), 48 - BFA_FCS_MODULE(bfa_fcs_uf), 49 - BFA_FCS_MODULE(bfa_fcs_fabric), 50 - BFA_FCS_MODULE(bfa_fcs_vport), 51 - BFA_FCS_MODULE(bfa_fcs_rport), 52 - BFA_FCS_MODULE(bfa_fcs_fcpim), 46 + { bfa_fcs_pport_attach, NULL, NULL }, 47 + { bfa_fcs_uf_attach, NULL, NULL }, 48 + { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit, 49 + bfa_fcs_fabric_modexit }, 53 50 }; 54 51 55 52 /** ··· 70 71 */ 71 72 72 73 /** 73 - * FCS instance initialization. 74 - * 75 - * param[in] fcs FCS instance 76 - * param[in] bfa BFA instance 77 - * param[in] bfad BFA driver instance 78 - * 79 - * return None 74 + * fcs attach -- called once to initialize data structures at driver attach time 80 75 */ 81 76 void 82 - bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, 77 + bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, 83 78 bfa_boolean_t min_cfg) 84 79 { 85 80 int i; ··· 88 95 89 96 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { 90 97 mod = &fcs_modules[i]; 91 - mod->modinit(fcs); 98 + if (mod->attach) 99 + mod->attach(fcs); 100 + } 101 + } 102 + 103 + /** 104 + * fcs initialization, called once after bfa initialization is complete 105 + */ 106 + void 107 + bfa_fcs_init(struct bfa_fcs_s *fcs) 108 + { 109 + int i; 110 + struct bfa_fcs_mod_s *mod; 111 + 112 + for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { 113 + mod = &fcs_modules[i]; 114 + if (mod->modinit) 115 + mod->modinit(fcs); 92 116 } 93 117 } 94 118 ··· 137 127 } 138 128 139 129 /** 130 + * @brief 131 + * FCS FDMI Driver Parameter Initialization 132 + * 133 + * @param[in] fcs FCS instance 134 + * @param[in] fdmi_enable TRUE/FALSE 135 + * 136 + * @return None 137 + */ 138 + void 139 + bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable) 140 + { 141 + 142 + fcs->fdmi_enabled = fdmi_enable; 143 + 144 + } 145 + 146 + /** 140 147 * FCS instance cleanup and exit. 141 148 * 142 149 * param[in] fcs FCS instance ··· 170 143 nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]); 171 144 172 145 for (i = 0; i < nmods; i++) { 173 - bfa_wc_up(&fcs->wc); 174 146 175 147 mod = &fcs_modules[i]; 176 - mod->modexit(fcs); 148 + if (mod->modexit) { 149 + bfa_wc_up(&fcs->wc); 150 + mod->modexit(fcs); 151 + } 177 152 } 178 153 179 154 bfa_wc_wait(&fcs->wc);
+35 -40
drivers/scsi/bfa/bfa_fcs_lport.c
··· 114 114 break; 115 115 116 116 default: 117 - bfa_assert(0); 117 + bfa_sm_fault(port->fcs, event); 118 118 } 119 119 } 120 120 ··· 136 136 break; 137 137 138 138 default: 139 - bfa_assert(0); 139 + bfa_sm_fault(port->fcs, event); 140 140 } 141 141 } 142 142 ··· 176 176 break; 177 177 178 178 default: 179 - bfa_assert(0); 179 + bfa_sm_fault(port->fcs, event); 180 180 } 181 181 } 182 182 ··· 214 214 break; 215 215 216 216 default: 217 - bfa_assert(0); 217 + bfa_sm_fault(port->fcs, event); 218 218 } 219 219 } 220 220 ··· 234 234 break; 235 235 236 236 default: 237 - bfa_assert(0); 237 + bfa_sm_fault(port->fcs, event); 238 238 } 239 239 } 240 240 ··· 263 263 264 264 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX); 265 265 266 - switch (event) { 267 - case BFA_LPORT_AEN_ONLINE: 268 - bfa_log(logmod, BFA_AEN_LPORT_ONLINE, lpwwn_ptr, 269 - role_str[role / 2]); 270 - break; 271 - case BFA_LPORT_AEN_OFFLINE: 272 - bfa_log(logmod, BFA_AEN_LPORT_OFFLINE, lpwwn_ptr, 273 - role_str[role / 2]); 274 - break; 275 - case BFA_LPORT_AEN_NEW: 276 - bfa_log(logmod, BFA_AEN_LPORT_NEW, lpwwn_ptr, 277 - role_str[role / 2]); 278 - break; 279 - case BFA_LPORT_AEN_DELETE: 280 - bfa_log(logmod, BFA_AEN_LPORT_DELETE, lpwwn_ptr, 281 - role_str[role / 2]); 282 - break; 283 - case BFA_LPORT_AEN_DISCONNECT: 284 - bfa_log(logmod, BFA_AEN_LPORT_DISCONNECT, lpwwn_ptr, 285 - role_str[role / 2]); 286 - break; 287 - default: 288 - break; 289 - } 266 + bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr, 267 + role_str[role/2]); 290 268 291 269 aen_data.lport.vf_id = port->fabric->vf_id; 292 270 aen_data.lport.roles = role; ··· 851 873 } 852 874 853 875 /** 854 - * Logical port initialization of base or virtual port. 855 - * Called by fabric for base port or by vport for virtual ports. 876 + * Attach time initialization of logical ports. 856 877 */ 857 878 void 858 - bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, 859 - u16 vf_id, struct bfa_port_cfg_s *port_cfg, 860 - struct bfa_fcs_vport_s *vport) 879 + bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, 880 + uint16_t vf_id, struct bfa_fcs_vport_s *vport) 861 881 { 862 882 lport->fcs = fcs; 863 883 lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id); 864 - bfa_os_assign(lport->port_cfg, *port_cfg); 865 884 lport->vport = vport; 866 885 lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) : 867 886 bfa_lps_get_tag(lport->fabric->lps); 868 887 869 888 INIT_LIST_HEAD(&lport->rport_q); 870 889 lport->num_rports = 0; 890 + } 871 891 872 - lport->bfad_port = 873 - bfa_fcb_port_new(fcs->bfad, lport, lport->port_cfg.roles, 892 + /** 893 + * Logical port initialization of base or virtual port. 894 + * Called by fabric for base port or by vport for virtual ports. 895 + */ 896 + 897 + void 898 + bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, 899 + struct bfa_port_cfg_s *port_cfg) 900 + { 901 + struct bfa_fcs_vport_s *vport = lport->vport; 902 + 903 + bfa_os_assign(lport->port_cfg, *port_cfg); 904 + 905 + lport->bfad_port = bfa_fcb_port_new(lport->fcs->bfad, lport, 906 + lport->port_cfg.roles, 874 907 lport->fabric->vf_drv, 875 908 vport ? vport->vport_drv : NULL); 909 + 876 910 bfa_fcs_port_aen_post(lport, BFA_LPORT_AEN_NEW); 877 911 878 912 bfa_sm_set_state(lport, bfa_fcs_port_sm_uninit); 879 913 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 880 914 } 881 - 882 - 883 915 884 916 /** 885 917 * fcs_lport_api ··· 909 921 if (port->fabric) { 910 922 port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric); 911 923 port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric); 924 + port_attr->authfail = 925 + bfa_fcs_fabric_is_auth_failed(port->fabric); 912 926 port_attr->fabric_name = bfa_fcs_port_get_fabric_name(port); 913 927 memcpy(port_attr->fabric_ip_addr, 914 928 bfa_fcs_port_get_fabric_ipaddr(port), 915 929 BFA_FCS_FABRIC_IPADDR_SZ); 916 930 917 - if (port->vport != NULL) 931 + if (port->vport != NULL) { 918 932 port_attr->port_type = BFA_PPORT_TYPE_VPORT; 933 + port_attr->fpma_mac = 934 + bfa_lps_get_lp_mac(port->vport->lps); 935 + } else 936 + port_attr->fpma_mac = 937 + bfa_lps_get_lp_mac(port->fabric->lps); 919 938 920 939 } else { 921 940 port_attr->port_type = BFA_PPORT_TYPE_UNKNOWN;
+2 -9
drivers/scsi/bfa/bfa_fcs_port.c
··· 55 55 } 56 56 57 57 void 58 - bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs) 58 + bfa_fcs_pport_attach(struct bfa_fcs_s *fcs) 59 59 { 60 - bfa_pport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, 61 - fcs); 62 - } 63 - 64 - void 65 - bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs) 66 - { 67 - bfa_fcs_modexit_comp(fcs); 60 + bfa_fcport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, fcs); 68 61 }
+1 -7
drivers/scsi/bfa/bfa_fcs_uf.c
··· 93 93 } 94 94 95 95 void 96 - bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs) 96 + bfa_fcs_uf_attach(struct bfa_fcs_s *fcs) 97 97 { 98 98 bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs); 99 - } 100 - 101 - void 102 - bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs) 103 - { 104 - bfa_fcs_modexit_comp(fcs); 105 99 }
+13
drivers/scsi/bfa/bfa_hw_cb.c
··· 53 53 } 54 54 55 55 void 56 + bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq) 57 + { 58 + } 59 + 60 + static void 61 + bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq) 62 + { 63 + bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, 64 + __HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq)); 65 + } 66 + 67 + void 56 68 bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq) 57 69 { 58 70 } ··· 148 136 void 149 137 bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) 150 138 { 139 + bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix; 151 140 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix; 152 141 } 153 142
+9
drivers/scsi/bfa/bfa_hw_ct.c
··· 85 85 } 86 86 87 87 void 88 + bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq) 89 + { 90 + u32 r32; 91 + 92 + r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); 93 + bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32); 94 + } 95 + 96 + void 88 97 bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq) 89 98 { 90 99 u32 r32;
+81 -30
drivers/scsi/bfa/bfa_intr.c
··· 15 15 * General Public License for more details. 16 16 */ 17 17 #include <bfa.h> 18 - #include <bfi/bfi_cbreg.h> 18 + #include <bfi/bfi_ctreg.h> 19 19 #include <bfa_port_priv.h> 20 20 #include <bfa_intr_priv.h> 21 21 #include <cs/bfa_debug.h> ··· 32 32 bfa_msix_lpu(struct bfa_s *bfa) 33 33 { 34 34 bfa_ioc_mbox_isr(&bfa->ioc); 35 + } 36 + 37 + static void 38 + bfa_reqq_resume(struct bfa_s *bfa, int qid) 39 + { 40 + struct list_head *waitq, *qe, *qen; 41 + struct bfa_reqq_wait_s *wqe; 42 + 43 + waitq = bfa_reqq(bfa, qid); 44 + list_for_each_safe(qe, qen, waitq) { 45 + /** 46 + * Callback only as long as there is room in request queue 47 + */ 48 + if (bfa_reqq_full(bfa, qid)) 49 + break; 50 + 51 + list_del(qe); 52 + wqe = (struct bfa_reqq_wait_s *) qe; 53 + wqe->qresume(wqe->cbarg); 54 + } 35 55 } 36 56 37 57 void ··· 116 96 117 97 bfa_msix_install(bfa); 118 98 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | 119 - __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS); 99 + __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | 100 + __HFN_INT_LL_HALT); 120 101 121 102 if (pci_func == 0) 122 103 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | ··· 148 127 void 149 128 bfa_msix_reqq(struct bfa_s *bfa, int qid) 150 129 { 151 - struct list_head *waitq, *qe, *qen; 152 - struct bfa_reqq_wait_s *wqe; 130 + struct list_head *waitq; 153 131 154 132 qid &= (BFI_IOC_MAX_CQS - 1); 155 133 156 - waitq = bfa_reqq(bfa, qid); 157 - list_for_each_safe(qe, qen, waitq) { 158 - /** 159 - * Callback only as long as there is room in request queue 160 - */ 161 - if (bfa_reqq_full(bfa, qid)) 162 - break; 134 + bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); 163 135 164 - list_del(qe); 165 - wqe = (struct bfa_reqq_wait_s *) qe; 166 - wqe->qresume(wqe->cbarg); 167 - } 136 + /** 137 + * Resume any pending requests in the corresponding reqq. 138 + */ 139 + waitq = bfa_reqq(bfa, qid); 140 + if (!list_empty(waitq)) 141 + bfa_reqq_resume(bfa, qid); 168 142 } 169 143 170 144 void ··· 173 157 } 174 158 175 159 void 176 - bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid) 160 + bfa_msix_rspq(struct bfa_s *bfa, int qid) 177 161 { 178 - struct bfi_msg_s *m; 179 - u32 pi, ci; 162 + struct bfi_msg_s *m; 163 + u32 pi, ci; 164 + struct list_head *waitq; 180 165 181 - bfa_trc_fp(bfa, rsp_qid); 166 + bfa_trc_fp(bfa, qid); 182 167 183 - rsp_qid &= (BFI_IOC_MAX_CQS - 1); 168 + qid &= (BFI_IOC_MAX_CQS - 1); 184 169 185 - bfa->iocfc.hwif.hw_rspq_ack(bfa, rsp_qid); 170 + bfa->iocfc.hwif.hw_rspq_ack(bfa, qid); 186 171 187 - ci = bfa_rspq_ci(bfa, rsp_qid); 188 - pi = bfa_rspq_pi(bfa, rsp_qid); 172 + ci = bfa_rspq_ci(bfa, qid); 173 + pi = bfa_rspq_pi(bfa, qid); 189 174 190 175 bfa_trc_fp(bfa, ci); 191 176 bfa_trc_fp(bfa, pi); 192 177 193 178 if (bfa->rme_process) { 194 179 while (ci != pi) { 195 - m = bfa_rspq_elem(bfa, rsp_qid, ci); 180 + m = bfa_rspq_elem(bfa, qid, ci); 196 181 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX); 197 182 198 183 bfa_isrs[m->mhdr.msg_class] (bfa, m); ··· 205 188 /** 206 189 * update CI 207 190 */ 208 - bfa_rspq_ci(bfa, rsp_qid) = pi; 209 - bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[rsp_qid], pi); 191 + bfa_rspq_ci(bfa, qid) = pi; 192 + bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi); 210 193 bfa_os_mmiowb(); 194 + 195 + /** 196 + * Resume any pending requests in the corresponding reqq. 197 + */ 198 + waitq = bfa_reqq(bfa, qid); 199 + if (!list_empty(waitq)) 200 + bfa_reqq_resume(bfa, qid); 211 201 } 212 202 213 203 void 214 204 bfa_msix_lpu_err(struct bfa_s *bfa, int vec) 215 205 { 216 - u32 intr; 206 + u32 intr, curr_value; 217 207 218 208 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); 219 209 220 210 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) 221 211 bfa_msix_lpu(bfa); 222 212 223 - if (intr & (__HFN_INT_ERR_EMC | 224 - __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | 225 - __HFN_INT_ERR_PSS)) 213 + intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | 214 + __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT); 215 + 216 + if (intr) { 217 + if (intr & __HFN_INT_LL_HALT) { 218 + /** 219 + * If LL_HALT bit is set then FW Init Halt LL Port 220 + * Register needs to be cleared as well so Interrupt 221 + * Status Register will be cleared. 222 + */ 223 + curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt); 224 + curr_value &= ~__FW_INIT_HALT_P; 225 + bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value); 226 + } 227 + 228 + if (intr & __HFN_INT_ERR_PSS) { 229 + /** 230 + * ERR_PSS bit needs to be cleared as well in case 231 + * interrups are shared so driver's interrupt handler is 232 + * still called eventhough it is already masked out. 233 + */ 234 + curr_value = bfa_reg_read( 235 + bfa->ioc.ioc_regs.pss_err_status_reg); 236 + curr_value &= __PSS_ERR_STATUS_SET; 237 + bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg, 238 + curr_value); 239 + } 240 + 241 + bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr); 226 242 bfa_msix_errint(bfa, intr); 243 + } 227 244 } 228 245 229 246 void
+233 -527
drivers/scsi/bfa/bfa_ioc.c
··· 18 18 #include <bfa.h> 19 19 #include <bfa_ioc.h> 20 20 #include <bfa_fwimg_priv.h> 21 - #include <bfa_trcmod_priv.h> 21 + #include <cna/bfa_cna_trcmod.h> 22 22 #include <cs/bfa_debug.h> 23 23 #include <bfi/bfi_ioc.h> 24 24 #include <bfi/bfi_ctreg.h> ··· 27 27 #include <log/bfa_log_hal.h> 28 28 #include <defs/bfa_defs_pci.h> 29 29 30 - BFA_TRC_FILE(HAL, IOC); 30 + BFA_TRC_FILE(CNA, IOC); 31 31 32 32 /** 33 33 * IOC local definitions 34 34 */ 35 35 #define BFA_IOC_TOV 2000 /* msecs */ 36 - #define BFA_IOC_HB_TOV 1000 /* msecs */ 37 - #define BFA_IOC_HB_FAIL_MAX 4 38 - #define BFA_IOC_HWINIT_MAX 2 36 + #define BFA_IOC_HWSEM_TOV 500 /* msecs */ 37 + #define BFA_IOC_HB_TOV 500 /* msecs */ 38 + #define BFA_IOC_HWINIT_MAX 2 39 39 #define BFA_IOC_FWIMG_MINSZ (16 * 1024) 40 - #define BFA_IOC_TOV_RECOVER (BFA_IOC_HB_FAIL_MAX * BFA_IOC_HB_TOV \ 41 - + BFA_IOC_TOV) 40 + #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV 42 41 43 42 #define bfa_ioc_timer_start(__ioc) \ 44 43 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ ··· 50 51 (sizeof(struct bfa_trc_mod_s) - \ 51 52 BFA_TRC_MAX * sizeof(struct bfa_trc_s))) 52 53 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) 53 - #define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) 54 54 55 - #define BFA_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) 56 - #define BFA_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) 57 - #define BFA_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) 58 - bfa_boolean_t bfa_auto_recover = BFA_FALSE; 55 + /** 56 + * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. 57 + */ 58 + 59 + #define bfa_ioc_firmware_lock(__ioc) \ 60 + ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) 61 + #define bfa_ioc_firmware_unlock(__ioc) \ 62 + ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 63 + #define bfa_ioc_fwimg_get_chunk(__ioc, __off) \ 64 + ((__ioc)->ioc_hwif->ioc_fwimg_get_chunk(__ioc, __off)) 65 + #define bfa_ioc_fwimg_get_size(__ioc) \ 66 + ((__ioc)->ioc_hwif->ioc_fwimg_get_size(__ioc)) 67 + #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 68 + #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 69 + #define bfa_ioc_notify_hbfail(__ioc) \ 70 + ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) 71 + 72 + bfa_boolean_t bfa_auto_recover = BFA_TRUE; 59 73 60 74 /* 61 75 * forward declarations ··· 76 64 static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa, 77 65 enum bfa_ioc_aen_event event); 78 66 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); 79 - static void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc); 80 67 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); 81 68 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); 82 69 static void bfa_ioc_timeout(void *ioc); ··· 88 77 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); 89 78 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); 90 79 static void bfa_ioc_recover(struct bfa_ioc_s *ioc); 91 - static bfa_boolean_t bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc); 92 - static void bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc); 93 80 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); 94 81 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); 95 82 ··· 517 508 bfa_trc(ioc, event); 518 509 519 510 switch (event) { 520 - case IOC_E_HWERROR: 521 511 case IOC_E_FWRSP_DISABLE: 512 + bfa_ioc_timer_stop(ioc); 513 + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 514 + break; 515 + 516 + case IOC_E_HWERROR: 522 517 bfa_ioc_timer_stop(ioc); 523 518 /* 524 519 * !!! fall through !!! 525 520 */ 526 521 527 522 case IOC_E_TIMEOUT: 523 + bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); 528 524 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 529 525 break; 530 526 ··· 622 608 * Mark IOC as failed in hardware and stop firmware. 623 609 */ 624 610 bfa_ioc_lpu_stop(ioc); 625 - bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_HBFAIL); 611 + bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); 626 612 627 - if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) { 628 - bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P); 629 - /* 630 - * Wait for halt to take effect 631 - */ 632 - bfa_reg_read(ioc->ioc_regs.ll_halt); 633 - } 613 + /** 614 + * Notify other functions on HB failure. 615 + */ 616 + bfa_ioc_notify_hbfail(ioc); 634 617 635 618 /** 636 619 * Notify driver and common modules registered for notification. ··· 683 672 */ 684 673 break; 685 674 675 + case IOC_E_HWERROR: 676 + /* 677 + * HB failure notification, ignore. 678 + */ 679 + break; 680 + 686 681 default: 687 682 bfa_sm_fault(ioc, event); 688 683 } ··· 717 700 } 718 701 } 719 702 720 - static void 703 + void 721 704 bfa_ioc_sem_timeout(void *ioc_arg) 722 705 { 723 706 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; ··· 725 708 bfa_ioc_hw_sem_get(ioc); 726 709 } 727 710 728 - static void 729 - bfa_ioc_usage_sem_get(struct bfa_ioc_s *ioc) 711 + bfa_boolean_t 712 + bfa_ioc_sem_get(bfa_os_addr_t sem_reg) 730 713 { 731 - u32 r32; 732 - int cnt = 0; 733 - #define BFA_SEM_SPINCNT 1000 714 + u32 r32; 715 + int cnt = 0; 716 + #define BFA_SEM_SPINCNT 3000 734 717 735 - do { 736 - r32 = bfa_reg_read(ioc->ioc_regs.ioc_usage_sem_reg); 718 + r32 = bfa_reg_read(sem_reg); 719 + 720 + while (r32 && (cnt < BFA_SEM_SPINCNT)) { 737 721 cnt++; 738 - if (cnt > BFA_SEM_SPINCNT) 739 - break; 740 - } while (r32 != 0); 722 + bfa_os_udelay(2); 723 + r32 = bfa_reg_read(sem_reg); 724 + } 725 + 726 + if (r32 == 0) 727 + return BFA_TRUE; 728 + 741 729 bfa_assert(cnt < BFA_SEM_SPINCNT); 730 + return BFA_FALSE; 742 731 } 743 732 744 - static void 745 - bfa_ioc_usage_sem_release(struct bfa_ioc_s *ioc) 733 + void 734 + bfa_ioc_sem_release(bfa_os_addr_t sem_reg) 746 735 { 747 - bfa_reg_write(ioc->ioc_regs.ioc_usage_sem_reg, 1); 736 + bfa_reg_write(sem_reg, 1); 748 737 } 749 738 750 739 static void ··· 760 737 761 738 /** 762 739 * First read to the semaphore register will return 0, subsequent reads 763 - * will return 1. Semaphore is released by writing 0 to the register 740 + * will return 1. Semaphore is released by writing 1 to the register 764 741 */ 765 742 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); 766 743 if (r32 == 0) { ··· 769 746 } 770 747 771 748 bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout, 772 - ioc, BFA_IOC_TOV); 749 + ioc, BFA_IOC_HWSEM_TOV); 773 750 } 774 751 775 - static void 752 + void 776 753 bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc) 777 754 { 778 755 bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1); ··· 851 828 /** 852 829 * Get driver and firmware versions. 853 830 */ 854 - static void 831 + void 855 832 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) 856 833 { 857 834 u32 pgnum, pgoff; ··· 870 847 } 871 848 } 872 849 873 - static u32 * 874 - bfa_ioc_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off) 875 - { 876 - if (ioc->ctdev) 877 - return bfi_image_ct_get_chunk(off); 878 - return bfi_image_cb_get_chunk(off); 879 - } 880 - 881 - static u32 882 - bfa_ioc_fwimg_get_size(struct bfa_ioc_s *ioc) 883 - { 884 - return (ioc->ctdev) ? bfi_image_ct_size : bfi_image_cb_size; 885 - } 886 - 887 850 /** 888 851 * Returns TRUE if same. 889 852 */ 890 - static bfa_boolean_t 853 + bfa_boolean_t 891 854 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) 892 855 { 893 856 struct bfi_ioc_image_hdr_s *drv_fwhdr; ··· 927 918 } 928 919 929 920 return bfa_ioc_fwver_cmp(ioc, &fwhdr); 930 - } 931 - 932 - /** 933 - * Return true if firmware of current driver matches the running firmware. 934 - */ 935 - static bfa_boolean_t 936 - bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc) 937 - { 938 - enum bfi_ioc_state ioc_fwstate; 939 - u32 usecnt; 940 - struct bfi_ioc_image_hdr_s fwhdr; 941 - 942 - /** 943 - * Firmware match check is relevant only for CNA. 944 - */ 945 - if (!ioc->cna) 946 - return BFA_TRUE; 947 - 948 - /** 949 - * If bios boot (flash based) -- do not increment usage count 950 - */ 951 - if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) 952 - return BFA_TRUE; 953 - 954 - bfa_ioc_usage_sem_get(ioc); 955 - usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); 956 - 957 - /** 958 - * If usage count is 0, always return TRUE. 959 - */ 960 - if (usecnt == 0) { 961 - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1); 962 - bfa_ioc_usage_sem_release(ioc); 963 - bfa_trc(ioc, usecnt); 964 - return BFA_TRUE; 965 - } 966 - 967 - ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); 968 - bfa_trc(ioc, ioc_fwstate); 969 - 970 - /** 971 - * Use count cannot be non-zero and chip in uninitialized state. 972 - */ 973 - bfa_assert(ioc_fwstate != BFI_IOC_UNINIT); 974 - 975 - /** 976 - * Check if another driver with a different firmware is active 977 - */ 978 - bfa_ioc_fwver_get(ioc, &fwhdr); 979 - if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { 980 - bfa_ioc_usage_sem_release(ioc); 981 - bfa_trc(ioc, usecnt); 982 - return BFA_FALSE; 983 - } 984 - 985 - /** 986 - * Same firmware version. Increment the reference count. 987 - */ 988 - usecnt++; 989 - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); 990 - bfa_ioc_usage_sem_release(ioc); 991 - bfa_trc(ioc, usecnt); 992 - return BFA_TRUE; 993 - } 994 - 995 - static void 996 - bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc) 997 - { 998 - u32 usecnt; 999 - 1000 - /** 1001 - * Firmware lock is relevant only for CNA. 1002 - * If bios boot (flash based) -- do not decrement usage count 1003 - */ 1004 - if (!ioc->cna || (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)) 1005 - return; 1006 - 1007 - /** 1008 - * decrement usage count 1009 - */ 1010 - bfa_ioc_usage_sem_get(ioc); 1011 - usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); 1012 - bfa_assert(usecnt > 0); 1013 - 1014 - usecnt--; 1015 - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); 1016 - bfa_trc(ioc, usecnt); 1017 - 1018 - bfa_ioc_usage_sem_release(ioc); 1019 921 } 1020 922 1021 923 /** ··· 1072 1152 static void 1073 1153 bfa_ioc_hb_check(void *cbarg) 1074 1154 { 1075 - struct bfa_ioc_s *ioc = cbarg; 1076 - u32 hb_count; 1155 + struct bfa_ioc_s *ioc = cbarg; 1156 + u32 hb_count; 1077 1157 1078 1158 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1079 1159 if (ioc->hb_count == hb_count) { 1080 - ioc->hb_fail++; 1081 - } else { 1082 - ioc->hb_count = hb_count; 1083 - ioc->hb_fail = 0; 1084 - } 1085 - 1086 - if (ioc->hb_fail >= BFA_IOC_HB_FAIL_MAX) { 1087 - bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, hb_count); 1088 - ioc->hb_fail = 0; 1160 + bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, 1161 + hb_count); 1089 1162 bfa_ioc_recover(ioc); 1090 1163 return; 1164 + } else { 1165 + ioc->hb_count = hb_count; 1091 1166 } 1092 1167 1093 1168 bfa_ioc_mbox_poll(ioc); 1094 - bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, 1095 - BFA_IOC_HB_TOV); 1169 + bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, 1170 + ioc, BFA_IOC_HB_TOV); 1096 1171 } 1097 1172 1098 1173 static void 1099 1174 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) 1100 1175 { 1101 - ioc->hb_fail = 0; 1102 1176 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1103 1177 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, 1104 1178 BFA_IOC_HB_TOV); ··· 1102 1188 bfa_ioc_hb_stop(struct bfa_ioc_s *ioc) 1103 1189 { 1104 1190 bfa_timer_stop(&ioc->ioc_timer); 1105 - } 1106 - 1107 - /** 1108 - * Host to LPU mailbox message addresses 1109 - */ 1110 - static struct { 1111 - u32 hfn_mbox, lpu_mbox, hfn_pgn; 1112 - } iocreg_fnreg[] = { 1113 - { 1114 - HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0}, { 1115 - HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1}, { 1116 - HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2}, { 1117 - HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3} 1118 - }; 1119 - 1120 - /** 1121 - * Host <-> LPU mailbox command/status registers - port 0 1122 - */ 1123 - static struct { 1124 - u32 hfn, lpu; 1125 - } iocreg_mbcmd_p0[] = { 1126 - { 1127 - HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT}, { 1128 - HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT}, { 1129 - HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT}, { 1130 - HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT} 1131 - }; 1132 - 1133 - /** 1134 - * Host <-> LPU mailbox command/status registers - port 1 1135 - */ 1136 - static struct { 1137 - u32 hfn, lpu; 1138 - } iocreg_mbcmd_p1[] = { 1139 - { 1140 - HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT}, { 1141 - HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT}, { 1142 - HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT}, { 1143 - HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT} 1144 - }; 1145 - 1146 - /** 1147 - * Shared IRQ handling in INTX mode 1148 - */ 1149 - static struct { 1150 - u32 isr, msk; 1151 - } iocreg_shirq_next[] = { 1152 - { 1153 - HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, { 1154 - HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, { 1155 - HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK}, { 1156 - HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK},}; 1157 - 1158 - static void 1159 - bfa_ioc_reg_init(struct bfa_ioc_s *ioc) 1160 - { 1161 - bfa_os_addr_t rb; 1162 - int pcifn = bfa_ioc_pcifn(ioc); 1163 - 1164 - rb = bfa_ioc_bar0(ioc); 1165 - 1166 - ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; 1167 - ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; 1168 - ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; 1169 - 1170 - if (ioc->port_id == 0) { 1171 - ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 1172 - ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 1173 - ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; 1174 - ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; 1175 - ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 1176 - } else { 1177 - ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); 1178 - ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 1179 - ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; 1180 - ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; 1181 - ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 1182 - } 1183 - 1184 - /** 1185 - * Shared IRQ handling in INTX mode 1186 - */ 1187 - ioc->ioc_regs.shirq_isr_next = rb + iocreg_shirq_next[pcifn].isr; 1188 - ioc->ioc_regs.shirq_msk_next = rb + iocreg_shirq_next[pcifn].msk; 1189 - 1190 - /* 1191 - * PSS control registers 1192 - */ 1193 - ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); 1194 - ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG); 1195 - ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG); 1196 - 1197 - /* 1198 - * IOC semaphore registers and serialization 1199 - */ 1200 - ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); 1201 - ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); 1202 - ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 1203 - 1204 - /** 1205 - * sram memory access 1206 - */ 1207 - ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); 1208 - ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB; 1209 - if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) 1210 - ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; 1211 1191 } 1212 1192 1213 1193 /** ··· 1129 1321 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) 1130 1322 boot_type = BFI_BOOT_TYPE_FLASH; 1131 1323 fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno); 1132 - fwimg[BFI_BOOT_TYPE_OFF / sizeof(u32)] = bfa_os_swap32(boot_type); 1133 - fwimg[BFI_BOOT_PARAM_OFF / sizeof(u32)] = 1134 - bfa_os_swap32(boot_param); 1135 1324 1136 1325 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1137 1326 pgoff = bfa_ioc_smem_pgoff(ioc, loff); ··· 1137 1332 1138 1333 for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) { 1139 1334 1140 - if (BFA_FLASH_CHUNK_NO(i) != chunkno) { 1141 - chunkno = BFA_FLASH_CHUNK_NO(i); 1335 + if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { 1336 + chunkno = BFA_IOC_FLASH_CHUNK_NO(i); 1142 1337 fwimg = bfa_ioc_fwimg_get_chunk(ioc, 1143 - BFA_FLASH_CHUNK_ADDR(chunkno)); 1338 + BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 1144 1339 } 1145 1340 1146 1341 /** 1147 1342 * write smem 1148 1343 */ 1149 1344 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 1150 - fwimg[BFA_FLASH_OFFSET_IN_CHUNK(i)]); 1345 + fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]); 1151 1346 1152 1347 loff += sizeof(u32); 1153 1348 ··· 1163 1358 1164 1359 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 1165 1360 bfa_ioc_smem_pgnum(ioc, 0)); 1361 + 1362 + /* 1363 + * Set boot type and boot param at the end. 1364 + */ 1365 + bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF, 1366 + bfa_os_swap32(boot_type)); 1367 + bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF, 1368 + bfa_os_swap32(boot_param)); 1166 1369 } 1167 1370 1168 1371 static void ··· 1253 1440 } 1254 1441 1255 1442 /** 1256 - * Initialize IOC to port mapping. 1257 - */ 1258 - 1259 - #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) 1260 - static void 1261 - bfa_ioc_map_port(struct bfa_ioc_s *ioc) 1262 - { 1263 - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 1264 - u32 r32; 1265 - 1266 - /** 1267 - * For crossbow, port id is same as pci function. 1268 - */ 1269 - if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT) { 1270 - ioc->port_id = bfa_ioc_pcifn(ioc); 1271 - return; 1272 - } 1273 - 1274 - /** 1275 - * For catapult, base port id on personality register and IOC type 1276 - */ 1277 - r32 = bfa_reg_read(rb + FNC_PERS_REG); 1278 - r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); 1279 - ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; 1280 - 1281 - bfa_trc(ioc, bfa_ioc_pcifn(ioc)); 1282 - bfa_trc(ioc, ioc->port_id); 1283 - } 1284 - 1285 - 1286 - 1287 - /** 1288 1443 * bfa_ioc_public 1289 1444 */ 1290 - 1291 - /** 1292 - * Set interrupt mode for a function: INTX or MSIX 1293 - */ 1294 - void 1295 - bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) 1296 - { 1297 - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 1298 - u32 r32, mode; 1299 - 1300 - r32 = bfa_reg_read(rb + FNC_PERS_REG); 1301 - bfa_trc(ioc, r32); 1302 - 1303 - mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & 1304 - __F0_INTX_STATUS; 1305 - 1306 - /** 1307 - * If already in desired mode, do not change anything 1308 - */ 1309 - if (!msix && mode) 1310 - return; 1311 - 1312 - if (msix) 1313 - mode = __F0_INTX_STATUS_MSIX; 1314 - else 1315 - mode = __F0_INTX_STATUS_INTA; 1316 - 1317 - r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 1318 - r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 1319 - bfa_trc(ioc, r32); 1320 - 1321 - bfa_reg_write(rb + FNC_PERS_REG, r32); 1322 - } 1323 - 1324 - bfa_status_t 1325 - bfa_ioc_pll_init(struct bfa_ioc_s *ioc) 1326 - { 1327 - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 1328 - u32 pll_sclk, pll_fclk, r32; 1329 - 1330 - if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) { 1331 - pll_sclk = 1332 - __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN | 1333 - __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(0U) | 1334 - __APP_PLL_312_JITLMT0_1(3U) | 1335 - __APP_PLL_312_CNTLMT0_1(1U); 1336 - pll_fclk = 1337 - __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN | 1338 - __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(0U) | 1339 - __APP_PLL_425_JITLMT0_1(3U) | 1340 - __APP_PLL_425_CNTLMT0_1(1U); 1341 - 1342 - /** 1343 - * For catapult, choose operational mode FC/FCoE 1344 - */ 1345 - if (ioc->fcmode) { 1346 - bfa_reg_write((rb + OP_MODE), 0); 1347 - bfa_reg_write((rb + ETH_MAC_SER_REG), 1348 - __APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 1349 - | __APP_EMS_CHANNEL_SEL); 1350 - } else { 1351 - ioc->pllinit = BFA_TRUE; 1352 - bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE); 1353 - bfa_reg_write((rb + ETH_MAC_SER_REG), 1354 - __APP_EMS_REFCKBUFEN1); 1355 - } 1356 - } else { 1357 - pll_sclk = 1358 - __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN | 1359 - __APP_PLL_312_P0_1(3U) | __APP_PLL_312_JITLMT0_1(3U) | 1360 - __APP_PLL_312_CNTLMT0_1(3U); 1361 - pll_fclk = 1362 - __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN | 1363 - __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) | 1364 - __APP_PLL_425_JITLMT0_1(3U) | 1365 - __APP_PLL_425_CNTLMT0_1(3U); 1366 - } 1367 - 1368 - bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); 1369 - bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); 1370 - 1371 - bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 1372 - bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 1373 - bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 1374 - bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 1375 - bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 1376 - bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 1377 - 1378 - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 1379 - __APP_PLL_312_LOGIC_SOFT_RESET); 1380 - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 1381 - __APP_PLL_312_BYPASS | __APP_PLL_312_LOGIC_SOFT_RESET); 1382 - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 1383 - __APP_PLL_425_LOGIC_SOFT_RESET); 1384 - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 1385 - __APP_PLL_425_BYPASS | __APP_PLL_425_LOGIC_SOFT_RESET); 1386 - bfa_os_udelay(2); 1387 - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 1388 - __APP_PLL_312_LOGIC_SOFT_RESET); 1389 - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 1390 - __APP_PLL_425_LOGIC_SOFT_RESET); 1391 - 1392 - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 1393 - pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET); 1394 - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 1395 - pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET); 1396 - 1397 - /** 1398 - * Wait for PLLs to lock. 1399 - */ 1400 - bfa_os_udelay(2000); 1401 - bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 1402 - bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 1403 - 1404 - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk); 1405 - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk); 1406 - 1407 - if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) { 1408 - bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START); 1409 - bfa_os_udelay(1000); 1410 - r32 = bfa_reg_read((rb + MBIST_STAT_REG)); 1411 - bfa_trc(ioc, r32); 1412 - } 1413 - 1414 - return BFA_STATUS_OK; 1415 - } 1416 1445 1417 1446 /** 1418 1447 * Interface used by diag module to do firmware boot with memory test ··· 1297 1642 void 1298 1643 bfa_ioc_auto_recover(bfa_boolean_t auto_recover) 1299 1644 { 1300 - bfa_auto_recover = BFA_FALSE; 1645 + bfa_auto_recover = auto_recover; 1301 1646 } 1302 1647 1303 1648 ··· 1419 1764 ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT); 1420 1765 ioc->cna = ioc->ctdev && !ioc->fcmode; 1421 1766 1767 + /** 1768 + * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c 1769 + */ 1770 + if (ioc->ctdev) 1771 + bfa_ioc_set_ct_hwif(ioc); 1772 + else 1773 + bfa_ioc_set_cb_hwif(ioc); 1774 + 1422 1775 bfa_ioc_map_port(ioc); 1423 1776 bfa_ioc_reg_init(ioc); 1424 1777 } ··· 1493 1830 void 1494 1831 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) 1495 1832 { 1496 - bfa_assert(ioc->auto_recover); 1497 1833 ioc->dbg_fwsave = dbg_fwsave; 1498 1834 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover); 1499 1835 } ··· 1635 1973 ((__sm) == BFI_IOC_INITING) || \ 1636 1974 ((__sm) == BFI_IOC_HWINIT) || \ 1637 1975 ((__sm) == BFI_IOC_DISABLED) || \ 1638 - ((__sm) == BFI_IOC_HBFAIL) || \ 1976 + ((__sm) == BFI_IOC_FAIL) || \ 1639 1977 ((__sm) == BFI_IOC_CFG_DISABLED)) 1640 1978 1641 1979 /** ··· 1679 2017 struct bfa_adapter_attr_s *ad_attr) 1680 2018 { 1681 2019 struct bfi_ioc_attr_s *ioc_attr; 1682 - char model[BFA_ADAPTER_MODEL_NAME_LEN]; 1683 2020 1684 2021 ioc_attr = ioc->attr; 1685 - bfa_os_memcpy((void *)&ad_attr->serial_num, 1686 - (void *)ioc_attr->brcd_serialnum, 1687 - BFA_ADAPTER_SERIAL_NUM_LEN); 1688 2022 1689 - bfa_os_memcpy(&ad_attr->fw_ver, ioc_attr->fw_version, BFA_VERSION_LEN); 1690 - bfa_os_memcpy(&ad_attr->optrom_ver, ioc_attr->optrom_version, 1691 - BFA_VERSION_LEN); 1692 - bfa_os_memcpy(&ad_attr->manufacturer, BFA_MFG_NAME, 1693 - BFA_ADAPTER_MFG_NAME_LEN); 2023 + bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); 2024 + bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); 2025 + bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); 2026 + bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); 1694 2027 bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd, 1695 2028 sizeof(struct bfa_mfg_vpd_s)); 1696 2029 1697 - ad_attr->nports = BFI_ADAPTER_GETP(NPORTS, ioc_attr->adapter_prop); 1698 - ad_attr->max_speed = BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop); 2030 + ad_attr->nports = bfa_ioc_get_nports(ioc); 2031 + ad_attr->max_speed = bfa_ioc_speed_sup(ioc); 1699 2032 1700 - /** 1701 - * model name 1702 - */ 1703 - if (BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop) == 10) { 1704 - strcpy(model, "BR-10?0"); 1705 - model[5] = '0' + ad_attr->nports; 1706 - } else { 1707 - strcpy(model, "Brocade-??5"); 1708 - model[8] = 1709 - '0' + BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop); 1710 - model[9] = '0' + ad_attr->nports; 1711 - } 2033 + bfa_ioc_get_adapter_model(ioc, ad_attr->model); 2034 + /* For now, model descr uses same model string */ 2035 + bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); 1712 2036 1713 2037 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) 1714 2038 ad_attr->prototype = 1; 1715 2039 else 1716 2040 ad_attr->prototype = 0; 1717 - 1718 - bfa_os_memcpy(&ad_attr->model, model, BFA_ADAPTER_MODEL_NAME_LEN); 1719 - bfa_os_memcpy(&ad_attr->model_descr, &ad_attr->model, 1720 - BFA_ADAPTER_MODEL_NAME_LEN); 1721 2041 1722 2042 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); 1723 2043 ad_attr->mac = bfa_ioc_get_mac(ioc); ··· 1708 2064 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; 1709 2065 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; 1710 2066 ad_attr->asic_rev = ioc_attr->asic_rev; 1711 - ad_attr->hw_ver[0] = 'R'; 1712 - ad_attr->hw_ver[1] = 'e'; 1713 - ad_attr->hw_ver[2] = 'v'; 1714 - ad_attr->hw_ver[3] = '-'; 1715 - ad_attr->hw_ver[4] = ioc_attr->asic_rev; 1716 - ad_attr->hw_ver[5] = '\0'; 2067 + 2068 + bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); 1717 2069 1718 2070 ad_attr->cna_capable = ioc->cna; 2071 + } 2072 + 2073 + enum bfa_ioc_type_e 2074 + bfa_ioc_get_type(struct bfa_ioc_s *ioc) 2075 + { 2076 + if (!ioc->ctdev || ioc->fcmode) 2077 + return BFA_IOC_TYPE_FC; 2078 + else if (ioc->ioc_mc == BFI_MC_IOCFC) 2079 + return BFA_IOC_TYPE_FCoE; 2080 + else if (ioc->ioc_mc == BFI_MC_LL) 2081 + return BFA_IOC_TYPE_LL; 2082 + else { 2083 + bfa_assert(ioc->ioc_mc == BFI_MC_LL); 2084 + return BFA_IOC_TYPE_LL; 2085 + } 2086 + } 2087 + 2088 + void 2089 + bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num) 2090 + { 2091 + bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); 2092 + bfa_os_memcpy((void *)serial_num, 2093 + (void *)ioc->attr->brcd_serialnum, 2094 + BFA_ADAPTER_SERIAL_NUM_LEN); 2095 + } 2096 + 2097 + void 2098 + bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver) 2099 + { 2100 + bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN); 2101 + bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); 2102 + } 2103 + 2104 + void 2105 + bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev) 2106 + { 2107 + bfa_assert(chip_rev); 2108 + 2109 + bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN); 2110 + 2111 + chip_rev[0] = 'R'; 2112 + chip_rev[1] = 'e'; 2113 + chip_rev[2] = 'v'; 2114 + chip_rev[3] = '-'; 2115 + chip_rev[4] = ioc->attr->asic_rev; 2116 + chip_rev[5] = '\0'; 2117 + } 2118 + 2119 + void 2120 + bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver) 2121 + { 2122 + bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN); 2123 + bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version, 2124 + BFA_VERSION_LEN); 2125 + } 2126 + 2127 + void 2128 + bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer) 2129 + { 2130 + bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); 2131 + bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 2132 + } 2133 + 2134 + void 2135 + bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) 2136 + { 2137 + struct bfi_ioc_attr_s *ioc_attr; 2138 + u8 nports; 2139 + u8 max_speed; 2140 + 2141 + bfa_assert(model); 2142 + bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); 2143 + 2144 + ioc_attr = ioc->attr; 2145 + 2146 + nports = bfa_ioc_get_nports(ioc); 2147 + max_speed = bfa_ioc_speed_sup(ioc); 2148 + 2149 + /** 2150 + * model name 2151 + */ 2152 + if (max_speed == 10) { 2153 + strcpy(model, "BR-10?0"); 2154 + model[5] = '0' + nports; 2155 + } else { 2156 + strcpy(model, "Brocade-??5"); 2157 + model[8] = '0' + max_speed; 2158 + model[9] = '0' + nports; 2159 + } 2160 + } 2161 + 2162 + enum bfa_ioc_state 2163 + bfa_ioc_get_state(struct bfa_ioc_s *ioc) 2164 + { 2165 + return bfa_sm_to_state(ioc_sm_table, ioc->fsm); 1719 2166 } 1720 2167 1721 2168 void ··· 1814 2079 { 1815 2080 bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s)); 1816 2081 1817 - ioc_attr->state = bfa_sm_to_state(ioc_sm_table, ioc->fsm); 2082 + ioc_attr->state = bfa_ioc_get_state(ioc); 1818 2083 ioc_attr->port_id = ioc->port_id; 1819 2084 1820 - if (!ioc->ctdev) 1821 - ioc_attr->ioc_type = BFA_IOC_TYPE_FC; 1822 - else if (ioc->ioc_mc == BFI_MC_IOCFC) 1823 - ioc_attr->ioc_type = BFA_IOC_TYPE_FCoE; 1824 - else if (ioc->ioc_mc == BFI_MC_LL) 1825 - ioc_attr->ioc_type = BFA_IOC_TYPE_LL; 2085 + ioc_attr->ioc_type = bfa_ioc_get_type(ioc); 1826 2086 1827 2087 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); 1828 2088 1829 2089 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; 1830 2090 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; 1831 - ioc_attr->pci_attr.chip_rev[0] = 'R'; 1832 - ioc_attr->pci_attr.chip_rev[1] = 'e'; 1833 - ioc_attr->pci_attr.chip_rev[2] = 'v'; 1834 - ioc_attr->pci_attr.chip_rev[3] = '-'; 1835 - ioc_attr->pci_attr.chip_rev[4] = ioc_attr->adapter_attr.asic_rev; 1836 - ioc_attr->pci_attr.chip_rev[5] = '\0'; 2091 + bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 1837 2092 } 1838 2093 1839 2094 /** ··· 1920 2195 } 1921 2196 1922 2197 /** 1923 - * Return true if interrupt should be claimed. 1924 - */ 1925 - bfa_boolean_t 1926 - bfa_ioc_intx_claim(struct bfa_ioc_s *ioc) 1927 - { 1928 - u32 isr, msk; 1929 - 1930 - /** 1931 - * Always claim if not catapult. 1932 - */ 1933 - if (!ioc->ctdev) 1934 - return BFA_TRUE; 1935 - 1936 - /** 1937 - * FALSE if next device is claiming interrupt. 1938 - * TRUE if next device is not interrupting or not present. 1939 - */ 1940 - msk = bfa_reg_read(ioc->ioc_regs.shirq_msk_next); 1941 - isr = bfa_reg_read(ioc->ioc_regs.shirq_isr_next); 1942 - return !(isr & ~msk); 1943 - } 1944 - 1945 - /** 1946 2198 * Send AEN notification 1947 2199 */ 1948 2200 static void ··· 1928 2226 union bfa_aen_data_u aen_data; 1929 2227 struct bfa_log_mod_s *logmod = ioc->logm; 1930 2228 s32 inst_num = 0; 1931 - struct bfa_ioc_attr_s ioc_attr; 2229 + enum bfa_ioc_type_e ioc_type; 1932 2230 1933 - switch (event) { 1934 - case BFA_IOC_AEN_HBGOOD: 1935 - bfa_log(logmod, BFA_AEN_IOC_HBGOOD, inst_num); 1936 - break; 1937 - case BFA_IOC_AEN_HBFAIL: 1938 - bfa_log(logmod, BFA_AEN_IOC_HBFAIL, inst_num); 1939 - break; 1940 - case BFA_IOC_AEN_ENABLE: 1941 - bfa_log(logmod, BFA_AEN_IOC_ENABLE, inst_num); 1942 - break; 1943 - case BFA_IOC_AEN_DISABLE: 1944 - bfa_log(logmod, BFA_AEN_IOC_DISABLE, inst_num); 1945 - break; 1946 - case BFA_IOC_AEN_FWMISMATCH: 1947 - bfa_log(logmod, BFA_AEN_IOC_FWMISMATCH, inst_num); 1948 - break; 1949 - default: 1950 - break; 1951 - } 2231 + bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num); 1952 2232 1953 2233 memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn)); 1954 2234 memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac)); 1955 - bfa_ioc_get_attr(ioc, &ioc_attr); 1956 - switch (ioc_attr.ioc_type) { 2235 + ioc_type = bfa_ioc_get_type(ioc); 2236 + switch (ioc_type) { 1957 2237 case BFA_IOC_TYPE_FC: 1958 2238 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc); 1959 2239 break; ··· 1947 2263 aen_data.ioc.mac = bfa_ioc_get_mac(ioc); 1948 2264 break; 1949 2265 default: 1950 - bfa_assert(ioc_attr.ioc_type == BFA_IOC_TYPE_FC); 2266 + bfa_assert(ioc_type == BFA_IOC_TYPE_FC); 1951 2267 break; 1952 2268 } 1953 - aen_data.ioc.ioc_type = ioc_attr.ioc_type; 2269 + aen_data.ioc.ioc_type = ioc_type; 1954 2270 } 1955 2271 1956 2272 /** ··· 1974 2290 } 1975 2291 1976 2292 /** 2293 + * Clear saved firmware trace 2294 + */ 2295 + void 2296 + bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc) 2297 + { 2298 + ioc->dbg_fwsave_once = BFA_TRUE; 2299 + } 2300 + 2301 + /** 1977 2302 * Retrieve saved firmware trace from a prior IOC failure. 1978 2303 */ 1979 2304 bfa_status_t ··· 1997 2304 1998 2305 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1999 2306 loff = bfa_ioc_smem_pgoff(ioc, loff); 2307 + 2308 + /* 2309 + * Hold semaphore to serialize pll init and fwtrc. 2310 + */ 2311 + if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) 2312 + return BFA_STATUS_FAILED; 2313 + 2000 2314 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 2001 2315 2002 2316 tlen = *trclen; ··· 2029 2329 } 2030 2330 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 2031 2331 bfa_ioc_smem_pgnum(ioc, 0)); 2332 + 2333 + /* 2334 + * release semaphore. 2335 + */ 2336 + bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 2337 + 2032 2338 bfa_trc(ioc, pgnum); 2033 2339 2034 2340 *trclen = tlen * sizeof(u32);
+54 -3
drivers/scsi/bfa/bfa_ioc.h
··· 74 74 bfa_os_addr_t lpu_mbox_cmd; 75 75 bfa_os_addr_t lpu_mbox; 76 76 bfa_os_addr_t pss_ctl_reg; 77 + bfa_os_addr_t pss_err_status_reg; 77 78 bfa_os_addr_t app_pll_fast_ctl_reg; 78 79 bfa_os_addr_t app_pll_slow_ctl_reg; 79 80 bfa_os_addr_t ioc_sem_reg; 80 81 bfa_os_addr_t ioc_usage_sem_reg; 82 + bfa_os_addr_t ioc_init_sem_reg; 81 83 bfa_os_addr_t ioc_usage_reg; 82 84 bfa_os_addr_t host_page_num_fn; 83 85 bfa_os_addr_t heartbeat; 84 86 bfa_os_addr_t ioc_fwstate; 85 87 bfa_os_addr_t ll_halt; 88 + bfa_os_addr_t err_set; 86 89 bfa_os_addr_t shirq_isr_next; 87 90 bfa_os_addr_t shirq_msk_next; 88 91 bfa_os_addr_t smem_page_start; ··· 157 154 struct bfa_timer_s ioc_timer; 158 155 struct bfa_timer_s sem_timer; 159 156 u32 hb_count; 160 - u32 hb_fail; 161 157 u32 retry_count; 162 158 struct list_head hb_notify_q; 163 159 void *dbg_fwsave; ··· 179 177 struct bfi_ioc_attr_s *attr; 180 178 struct bfa_ioc_cbfn_s *cbfn; 181 179 struct bfa_ioc_mbox_mod_s mbox_mod; 180 + struct bfa_ioc_hwif_s *ioc_hwif; 181 + }; 182 + 183 + struct bfa_ioc_hwif_s { 184 + bfa_status_t (*ioc_pll_init) (struct bfa_ioc_s *ioc); 185 + bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc); 186 + void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc); 187 + u32 * (*ioc_fwimg_get_chunk) (struct bfa_ioc_s *ioc, 188 + u32 off); 189 + u32 (*ioc_fwimg_get_size) (struct bfa_ioc_s *ioc); 190 + void (*ioc_reg_init) (struct bfa_ioc_s *ioc); 191 + void (*ioc_map_port) (struct bfa_ioc_s *ioc); 192 + void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc, 193 + bfa_boolean_t msix); 194 + void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc); 195 + void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc); 182 196 }; 183 197 184 198 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) ··· 209 191 #define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) 210 192 #define bfa_ioc_speed_sup(__ioc) \ 211 193 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) 194 + #define bfa_ioc_get_nports(__ioc) \ 195 + BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop) 196 + 197 + #define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) 198 + #define BFA_IOC_FWIMG_MINSZ (16 * 1024) 199 + 200 + #define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) 201 + #define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) 202 + #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) 212 203 213 204 /** 214 205 * IOC mailbox interface ··· 234 207 /** 235 208 * IOC interfaces 236 209 */ 210 + #define bfa_ioc_pll_init(__ioc) ((__ioc)->ioc_hwif->ioc_pll_init(__ioc)) 211 + #define bfa_ioc_isr_mode_set(__ioc, __msix) \ 212 + ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) 213 + #define bfa_ioc_ownership_reset(__ioc) \ 214 + ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) 215 + 216 + void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc); 217 + void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc); 237 218 void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, 238 219 struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod, 239 220 struct bfa_trc_mod_s *trcmod, ··· 258 223 void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param); 259 224 void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); 260 225 void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); 261 - void bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t intx); 262 - bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc); 263 226 bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc); 264 227 bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); 265 228 bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); 266 229 bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc); 267 230 void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc); 231 + enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc); 232 + void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num); 233 + void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver); 234 + void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver); 235 + void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model); 236 + void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, 237 + char *manufacturer); 238 + void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev); 239 + enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc); 240 + 268 241 void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr); 269 242 void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, 270 243 struct bfa_adapter_attr_s *ad_attr); ··· 280 237 void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave); 281 238 bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, 282 239 int *trclen); 240 + void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc); 283 241 bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, 284 242 int *trclen); 285 243 u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr); ··· 289 245 bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc); 290 246 void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, 291 247 struct bfa_ioc_hbfail_notify_s *notify); 248 + bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg); 249 + void bfa_ioc_sem_release(bfa_os_addr_t sem_reg); 250 + void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc); 251 + void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, 252 + struct bfi_ioc_image_hdr_s *fwhdr); 253 + bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, 254 + struct bfi_ioc_image_hdr_s *fwhdr); 292 255 293 256 /* 294 257 * bfa mfg wwn API functions
+274
drivers/scsi/bfa/bfa_ioc_cb.c
··· 1 + /* 2 + * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 3 + * All rights reserved 4 + * www.brocade.com 5 + * 6 + * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms of the GNU General Public License (GPL) Version 2 as 10 + * published by the Free Software Foundation 11 + * 12 + * This program is distributed in the hope that it will be useful, but 13 + * WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 + * General Public License for more details. 16 + */ 17 + 18 + #include <bfa.h> 19 + #include <bfa_ioc.h> 20 + #include <bfa_fwimg_priv.h> 21 + #include <cna/bfa_cna_trcmod.h> 22 + #include <cs/bfa_debug.h> 23 + #include <bfi/bfi_ioc.h> 24 + #include <bfi/bfi_cbreg.h> 25 + #include <log/bfa_log_hal.h> 26 + #include <defs/bfa_defs_pci.h> 27 + 28 + BFA_TRC_FILE(CNA, IOC_CB); 29 + 30 + /* 31 + * forward declarations 32 + */ 33 + static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc); 34 + static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc); 35 + static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc); 36 + static u32 *bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off); 37 + static u32 bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc); 38 + static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc); 39 + static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc); 40 + static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 41 + static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc); 42 + static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc); 43 + 44 + struct bfa_ioc_hwif_s hwif_cb = { 45 + bfa_ioc_cb_pll_init, 46 + bfa_ioc_cb_firmware_lock, 47 + bfa_ioc_cb_firmware_unlock, 48 + bfa_ioc_cb_fwimg_get_chunk, 49 + bfa_ioc_cb_fwimg_get_size, 50 + bfa_ioc_cb_reg_init, 51 + bfa_ioc_cb_map_port, 52 + bfa_ioc_cb_isr_mode_set, 53 + bfa_ioc_cb_notify_hbfail, 54 + bfa_ioc_cb_ownership_reset, 55 + }; 56 + 57 + /** 58 + * Called from bfa_ioc_attach() to map asic specific calls. 59 + */ 60 + void 61 + bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc) 62 + { 63 + ioc->ioc_hwif = &hwif_cb; 64 + } 65 + 66 + static u32 * 67 + bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off) 68 + { 69 + return bfi_image_cb_get_chunk(off); 70 + } 71 + 72 + static u32 73 + bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc) 74 + { 75 + return bfi_image_cb_size; 76 + } 77 + 78 + /** 79 + * Return true if firmware of current driver matches the running firmware. 80 + */ 81 + static bfa_boolean_t 82 + bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc) 83 + { 84 + return BFA_TRUE; 85 + } 86 + 87 + static void 88 + bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc) 89 + { 90 + } 91 + 92 + /** 93 + * Notify other functions on HB failure. 94 + */ 95 + static void 96 + bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc) 97 + { 98 + bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET); 99 + bfa_reg_read(ioc->ioc_regs.err_set); 100 + } 101 + 102 + /** 103 + * Host to LPU mailbox message addresses 104 + */ 105 + static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { 106 + { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, 107 + { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 } 108 + }; 109 + 110 + /** 111 + * Host <-> LPU mailbox command/status registers 112 + */ 113 + static struct { u32 hfn, lpu; } iocreg_mbcmd[] = { 114 + { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, 115 + { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT } 116 + }; 117 + 118 + static void 119 + bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc) 120 + { 121 + bfa_os_addr_t rb; 122 + int pcifn = bfa_ioc_pcifn(ioc); 123 + 124 + rb = bfa_ioc_bar0(ioc); 125 + 126 + ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; 127 + ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; 128 + ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; 129 + 130 + if (ioc->port_id == 0) { 131 + ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 132 + ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 133 + } else { 134 + ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); 135 + ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 136 + } 137 + 138 + /** 139 + * Host <-> LPU mailbox command/status registers 140 + */ 141 + ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn; 142 + ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu; 143 + 144 + /* 145 + * PSS control registers 146 + */ 147 + ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); 148 + ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); 149 + ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_400_CTL_REG); 150 + ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_212_CTL_REG); 151 + 152 + /* 153 + * IOC semaphore registers and serialization 154 + */ 155 + ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); 156 + ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 157 + 158 + /** 159 + * sram memory access 160 + */ 161 + ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); 162 + ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB; 163 + 164 + /* 165 + * err set reg : for notification of hb failure 166 + */ 167 + ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 168 + } 169 + 170 + /** 171 + * Initialize IOC to port mapping. 172 + */ 173 + static void 174 + bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) 175 + { 176 + /** 177 + * For crossbow, port id is same as pci function. 178 + */ 179 + ioc->port_id = bfa_ioc_pcifn(ioc); 180 + bfa_trc(ioc, ioc->port_id); 181 + } 182 + 183 + /** 184 + * Set interrupt mode for a function: INTX or MSIX 185 + */ 186 + static void 187 + bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) 188 + { 189 + } 190 + 191 + static bfa_status_t 192 + bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc) 193 + { 194 + bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 195 + u32 pll_sclk, pll_fclk; 196 + 197 + /* 198 + * Hold semaphore so that nobody can access the chip during init. 199 + */ 200 + bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); 201 + 202 + pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN | 203 + __APP_PLL_212_P0_1(3U) | 204 + __APP_PLL_212_JITLMT0_1(3U) | 205 + __APP_PLL_212_CNTLMT0_1(3U); 206 + pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN | 207 + __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) | 208 + __APP_PLL_400_JITLMT0_1(3U) | 209 + __APP_PLL_400_CNTLMT0_1(3U); 210 + 211 + bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); 212 + bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); 213 + 214 + bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 215 + bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 216 + bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 217 + bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 218 + bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 219 + bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 220 + 221 + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 222 + __APP_PLL_212_LOGIC_SOFT_RESET); 223 + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 224 + __APP_PLL_212_BYPASS | 225 + __APP_PLL_212_LOGIC_SOFT_RESET); 226 + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 227 + __APP_PLL_400_LOGIC_SOFT_RESET); 228 + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 229 + __APP_PLL_400_BYPASS | 230 + __APP_PLL_400_LOGIC_SOFT_RESET); 231 + bfa_os_udelay(2); 232 + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 233 + __APP_PLL_212_LOGIC_SOFT_RESET); 234 + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 235 + __APP_PLL_400_LOGIC_SOFT_RESET); 236 + 237 + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, 238 + pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET); 239 + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, 240 + pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET); 241 + 242 + /** 243 + * Wait for PLLs to lock. 244 + */ 245 + bfa_os_udelay(2000); 246 + bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 247 + bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 248 + 249 + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk); 250 + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk); 251 + 252 + /* 253 + * release semaphore. 254 + */ 255 + bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 256 + 257 + return BFA_STATUS_OK; 258 + } 259 + 260 + /** 261 + * Cleanup hw semaphore and usecnt registers 262 + */ 263 + static void 264 + bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc) 265 + { 266 + 267 + /* 268 + * Read the hw sem reg to make sure that it is locked 269 + * before we clear it. If it is not locked, writing 1 270 + * will lock it instead of clearing it. 271 + */ 272 + bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); 273 + bfa_ioc_hw_sem_release(ioc); 274 + }
+423
drivers/scsi/bfa/bfa_ioc_ct.c
··· 1 + /* 2 + * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 3 + * All rights reserved 4 + * www.brocade.com 5 + * 6 + * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms of the GNU General Public License (GPL) Version 2 as 10 + * published by the Free Software Foundation 11 + * 12 + * This program is distributed in the hope that it will be useful, but 13 + * WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 + * General Public License for more details. 16 + */ 17 + 18 + #include <bfa.h> 19 + #include <bfa_ioc.h> 20 + #include <bfa_fwimg_priv.h> 21 + #include <cna/bfa_cna_trcmod.h> 22 + #include <cs/bfa_debug.h> 23 + #include <bfi/bfi_ioc.h> 24 + #include <bfi/bfi_ctreg.h> 25 + #include <log/bfa_log_hal.h> 26 + #include <defs/bfa_defs_pci.h> 27 + 28 + BFA_TRC_FILE(CNA, IOC_CT); 29 + 30 + /* 31 + * forward declarations 32 + */ 33 + static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc); 34 + static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc); 35 + static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); 36 + static u32* bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc, 37 + u32 off); 38 + static u32 bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc); 39 + static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc); 40 + static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc); 41 + static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 42 + static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc); 43 + static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); 44 + 45 + struct bfa_ioc_hwif_s hwif_ct = { 46 + bfa_ioc_ct_pll_init, 47 + bfa_ioc_ct_firmware_lock, 48 + bfa_ioc_ct_firmware_unlock, 49 + bfa_ioc_ct_fwimg_get_chunk, 50 + bfa_ioc_ct_fwimg_get_size, 51 + bfa_ioc_ct_reg_init, 52 + bfa_ioc_ct_map_port, 53 + bfa_ioc_ct_isr_mode_set, 54 + bfa_ioc_ct_notify_hbfail, 55 + bfa_ioc_ct_ownership_reset, 56 + }; 57 + 58 + /** 59 + * Called from bfa_ioc_attach() to map asic specific calls. 60 + */ 61 + void 62 + bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc) 63 + { 64 + ioc->ioc_hwif = &hwif_ct; 65 + } 66 + 67 + static u32* 68 + bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off) 69 + { 70 + return bfi_image_ct_get_chunk(off); 71 + } 72 + 73 + static u32 74 + bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc) 75 + { 76 + return bfi_image_ct_size; 77 + } 78 + 79 + /** 80 + * Return true if firmware of current driver matches the running firmware. 81 + */ 82 + static bfa_boolean_t 83 + bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) 84 + { 85 + enum bfi_ioc_state ioc_fwstate; 86 + u32 usecnt; 87 + struct bfi_ioc_image_hdr_s fwhdr; 88 + 89 + /** 90 + * Firmware match check is relevant only for CNA. 91 + */ 92 + if (!ioc->cna) 93 + return BFA_TRUE; 94 + 95 + /** 96 + * If bios boot (flash based) -- do not increment usage count 97 + */ 98 + if (bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) 99 + return BFA_TRUE; 100 + 101 + bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 102 + usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); 103 + 104 + /** 105 + * If usage count is 0, always return TRUE. 106 + */ 107 + if (usecnt == 0) { 108 + bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1); 109 + bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 110 + bfa_trc(ioc, usecnt); 111 + return BFA_TRUE; 112 + } 113 + 114 + ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); 115 + bfa_trc(ioc, ioc_fwstate); 116 + 117 + /** 118 + * Use count cannot be non-zero and chip in uninitialized state. 119 + */ 120 + bfa_assert(ioc_fwstate != BFI_IOC_UNINIT); 121 + 122 + /** 123 + * Check if another driver with a different firmware is active 124 + */ 125 + bfa_ioc_fwver_get(ioc, &fwhdr); 126 + if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { 127 + bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 128 + bfa_trc(ioc, usecnt); 129 + return BFA_FALSE; 130 + } 131 + 132 + /** 133 + * Same firmware version. Increment the reference count. 134 + */ 135 + usecnt++; 136 + bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); 137 + bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 138 + bfa_trc(ioc, usecnt); 139 + return BFA_TRUE; 140 + } 141 + 142 + static void 143 + bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) 144 + { 145 + u32 usecnt; 146 + 147 + /** 148 + * Firmware lock is relevant only for CNA. 149 + * If bios boot (flash based) -- do not decrement usage count 150 + */ 151 + if (!ioc->cna || bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) 152 + return; 153 + 154 + /** 155 + * decrement usage count 156 + */ 157 + bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 158 + usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); 159 + bfa_assert(usecnt > 0); 160 + 161 + usecnt--; 162 + bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); 163 + bfa_trc(ioc, usecnt); 164 + 165 + bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 166 + } 167 + 168 + /** 169 + * Notify other functions on HB failure. 170 + */ 171 + static void 172 + bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc) 173 + { 174 + if (ioc->cna) { 175 + bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P); 176 + /* Wait for halt to take effect */ 177 + bfa_reg_read(ioc->ioc_regs.ll_halt); 178 + } else { 179 + bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET); 180 + bfa_reg_read(ioc->ioc_regs.err_set); 181 + } 182 + } 183 + 184 + /** 185 + * Host to LPU mailbox message addresses 186 + */ 187 + static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { 188 + { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, 189 + { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, 190 + { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, 191 + { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } 192 + }; 193 + 194 + /** 195 + * Host <-> LPU mailbox command/status registers - port 0 196 + */ 197 + static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = { 198 + { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT }, 199 + { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT }, 200 + { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT }, 201 + { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT } 202 + }; 203 + 204 + /** 205 + * Host <-> LPU mailbox command/status registers - port 1 206 + */ 207 + static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = { 208 + { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT }, 209 + { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT }, 210 + { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT }, 211 + { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT } 212 + }; 213 + 214 + static void 215 + bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) 216 + { 217 + bfa_os_addr_t rb; 218 + int pcifn = bfa_ioc_pcifn(ioc); 219 + 220 + rb = bfa_ioc_bar0(ioc); 221 + 222 + ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; 223 + ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; 224 + ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; 225 + 226 + if (ioc->port_id == 0) { 227 + ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 228 + ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 229 + ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; 230 + ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; 231 + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 232 + } else { 233 + ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); 234 + ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 235 + ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; 236 + ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; 237 + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 238 + } 239 + 240 + /* 241 + * PSS control registers 242 + */ 243 + ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); 244 + ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); 245 + ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG); 246 + ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG); 247 + 248 + /* 249 + * IOC semaphore registers and serialization 250 + */ 251 + ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); 252 + ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); 253 + ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 254 + ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 255 + 256 + /** 257 + * sram memory access 258 + */ 259 + ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); 260 + ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; 261 + 262 + /* 263 + * err set reg : for notification of hb failure in fcmode 264 + */ 265 + ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 266 + } 267 + 268 + /** 269 + * Initialize IOC to port mapping. 270 + */ 271 + 272 + #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) 273 + static void 274 + bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc) 275 + { 276 + bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 277 + u32 r32; 278 + 279 + /** 280 + * For catapult, base port id on personality register and IOC type 281 + */ 282 + r32 = bfa_reg_read(rb + FNC_PERS_REG); 283 + r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); 284 + ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; 285 + 286 + bfa_trc(ioc, bfa_ioc_pcifn(ioc)); 287 + bfa_trc(ioc, ioc->port_id); 288 + } 289 + 290 + /** 291 + * Set interrupt mode for a function: INTX or MSIX 292 + */ 293 + static void 294 + bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) 295 + { 296 + bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 297 + u32 r32, mode; 298 + 299 + r32 = bfa_reg_read(rb + FNC_PERS_REG); 300 + bfa_trc(ioc, r32); 301 + 302 + mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & 303 + __F0_INTX_STATUS; 304 + 305 + /** 306 + * If already in desired mode, do not change anything 307 + */ 308 + if (!msix && mode) 309 + return; 310 + 311 + if (msix) 312 + mode = __F0_INTX_STATUS_MSIX; 313 + else 314 + mode = __F0_INTX_STATUS_INTA; 315 + 316 + r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 317 + r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 318 + bfa_trc(ioc, r32); 319 + 320 + bfa_reg_write(rb + FNC_PERS_REG, r32); 321 + } 322 + 323 + static bfa_status_t 324 + bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc) 325 + { 326 + bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 327 + u32 pll_sclk, pll_fclk, r32; 328 + 329 + /* 330 + * Hold semaphore so that nobody can access the chip during init. 331 + */ 332 + bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); 333 + 334 + pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST | 335 + __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) | 336 + __APP_PLL_312_JITLMT0_1(3U) | 337 + __APP_PLL_312_CNTLMT0_1(1U); 338 + pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST | 339 + __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) | 340 + __APP_PLL_425_JITLMT0_1(3U) | 341 + __APP_PLL_425_CNTLMT0_1(1U); 342 + 343 + /** 344 + * For catapult, choose operational mode FC/FCoE 345 + */ 346 + if (ioc->fcmode) { 347 + bfa_reg_write((rb + OP_MODE), 0); 348 + bfa_reg_write((rb + ETH_MAC_SER_REG), 349 + __APP_EMS_CMLCKSEL | 350 + __APP_EMS_REFCKBUFEN2 | 351 + __APP_EMS_CHANNEL_SEL); 352 + } else { 353 + ioc->pllinit = BFA_TRUE; 354 + bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE); 355 + bfa_reg_write((rb + ETH_MAC_SER_REG), 356 + __APP_EMS_REFCKBUFEN1); 357 + } 358 + 359 + bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); 360 + bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); 361 + 362 + bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 363 + bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 364 + bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 365 + bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 366 + bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 367 + bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 368 + 369 + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk | 370 + __APP_PLL_312_LOGIC_SOFT_RESET); 371 + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | 372 + __APP_PLL_425_LOGIC_SOFT_RESET); 373 + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk | 374 + __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE); 375 + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | 376 + __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE); 377 + 378 + /** 379 + * Wait for PLLs to lock. 380 + */ 381 + bfa_reg_read(rb + HOSTFN0_INT_MSK); 382 + bfa_os_udelay(2000); 383 + bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 384 + bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 385 + 386 + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk | 387 + __APP_PLL_312_ENABLE); 388 + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | 389 + __APP_PLL_425_ENABLE); 390 + 391 + bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START); 392 + bfa_os_udelay(1000); 393 + r32 = bfa_reg_read((rb + MBIST_STAT_REG)); 394 + bfa_trc(ioc, r32); 395 + /* 396 + * release semaphore. 397 + */ 398 + bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 399 + 400 + return BFA_STATUS_OK; 401 + } 402 + 403 + /** 404 + * Cleanup hw semaphore and usecnt registers 405 + */ 406 + static void 407 + bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) 408 + { 409 + 410 + if (ioc->cna) { 411 + bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 412 + bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0); 413 + bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 414 + } 415 + 416 + /* 417 + * Read the hw sem reg to make sure that it is locked 418 + * before we clear it. If it is not locked, writing 1 419 + * will lock it instead of clearing it. 420 + */ 421 + bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); 422 + bfa_ioc_hw_sem_release(ioc); 423 + }
+19 -5
drivers/scsi/bfa/bfa_iocfc.c
··· 172 172 */ 173 173 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) { 174 174 iocfc->hwif.hw_reginit = bfa_hwct_reginit; 175 + iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; 175 176 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; 176 177 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; 177 178 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; ··· 181 180 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; 182 181 } else { 183 182 iocfc->hwif.hw_reginit = bfa_hwcb_reginit; 183 + iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack; 184 184 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; 185 185 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; 186 186 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; ··· 338 336 bfa_cb_init(bfa->bfad, BFA_STATUS_OK); 339 337 else 340 338 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); 341 - } else 342 - bfa->iocfc.action = BFA_IOCFC_ACT_NONE; 339 + } else { 340 + if (bfa->iocfc.cfgdone) 341 + bfa->iocfc.action = BFA_IOCFC_ACT_NONE; 342 + } 343 343 } 344 344 345 345 static void ··· 623 619 624 620 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod, 625 621 bfa->trcmod, bfa->aen, bfa->logm); 626 - bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC); 627 - bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); 628 622 629 623 /** 630 624 * Choose FC (ssid: 0x1C) v/s FCoE (ssid: 0x14) mode. 631 625 */ 632 626 if (0) 633 627 bfa_ioc_set_fcmode(&bfa->ioc); 628 + 629 + bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC); 630 + bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); 634 631 635 632 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); 636 633 bfa_iocfc_mem_claim(bfa, cfg, meminfo); ··· 659 654 { 660 655 bfa->iocfc.action = BFA_IOCFC_ACT_INIT; 661 656 bfa_ioc_enable(&bfa->ioc); 662 - bfa_msix_install(bfa); 663 657 } 664 658 665 659 /** ··· 801 797 return BFA_STATUS_DEVBUSY; 802 798 } 803 799 800 + if (!bfa_iocfc_is_operational(bfa)) { 801 + bfa_trc(bfa, 0); 802 + return BFA_STATUS_IOC_NON_OP; 803 + } 804 + 804 805 iocfc->stats_busy = BFA_TRUE; 805 806 iocfc->stats_ret = stats; 806 807 iocfc->stats_cbfn = cbfn; ··· 824 815 if (iocfc->stats_busy) { 825 816 bfa_trc(bfa, iocfc->stats_busy); 826 817 return BFA_STATUS_DEVBUSY; 818 + } 819 + 820 + if (!bfa_iocfc_is_operational(bfa)) { 821 + bfa_trc(bfa, 0); 822 + return BFA_STATUS_IOC_NON_OP; 827 823 } 828 824 829 825 iocfc->stats_busy = BFA_TRUE;
+3
drivers/scsi/bfa/bfa_iocfc.h
··· 54 54 */ 55 55 struct bfa_hwif_s { 56 56 void (*hw_reginit)(struct bfa_s *bfa); 57 + void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq); 57 58 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq); 58 59 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs); 59 60 void (*hw_msix_install)(struct bfa_s *bfa); ··· 144 143 void bfa_msix_lpu_err(struct bfa_s *bfa, int vec); 145 144 146 145 void bfa_hwcb_reginit(struct bfa_s *bfa); 146 + void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq); 147 147 void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq); 148 148 void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs); 149 149 void bfa_hwcb_msix_install(struct bfa_s *bfa); ··· 153 151 void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, 154 152 u32 *nvecs, u32 *maxvec); 155 153 void bfa_hwct_reginit(struct bfa_s *bfa); 154 + void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); 156 155 void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq); 157 156 void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs); 158 157 void bfa_hwct_msix_install(struct bfa_s *bfa);
+11 -11
drivers/scsi/bfa/bfa_ioim.c
··· 149 149 break; 150 150 151 151 default: 152 - bfa_assert(0); 152 + bfa_sm_fault(ioim->bfa, event); 153 153 } 154 154 } 155 155 ··· 194 194 break; 195 195 196 196 default: 197 - bfa_assert(0); 197 + bfa_sm_fault(ioim->bfa, event); 198 198 } 199 199 } 200 200 ··· 259 259 break; 260 260 261 261 default: 262 - bfa_assert(0); 262 + bfa_sm_fault(ioim->bfa, event); 263 263 } 264 264 } 265 265 ··· 317 317 break; 318 318 319 319 default: 320 - bfa_assert(0); 320 + bfa_sm_fault(ioim->bfa, event); 321 321 } 322 322 } 323 323 ··· 377 377 break; 378 378 379 379 default: 380 - bfa_assert(0); 380 + bfa_sm_fault(ioim->bfa, event); 381 381 } 382 382 } 383 383 ··· 419 419 break; 420 420 421 421 default: 422 - bfa_assert(0); 422 + bfa_sm_fault(ioim->bfa, event); 423 423 } 424 424 } 425 425 ··· 467 467 break; 468 468 469 469 default: 470 - bfa_assert(0); 470 + bfa_sm_fault(ioim->bfa, event); 471 471 } 472 472 } 473 473 ··· 516 516 break; 517 517 518 518 default: 519 - bfa_assert(0); 519 + bfa_sm_fault(ioim->bfa, event); 520 520 } 521 521 } 522 522 ··· 544 544 break; 545 545 546 546 default: 547 - bfa_assert(0); 547 + bfa_sm_fault(ioim->bfa, event); 548 548 } 549 549 } 550 550 ··· 577 577 break; 578 578 579 579 default: 580 - bfa_assert(0); 580 + bfa_sm_fault(ioim->bfa, event); 581 581 } 582 582 } 583 583 ··· 605 605 break; 606 606 607 607 default: 608 - bfa_assert(0); 608 + bfa_sm_fault(ioim->bfa, event); 609 609 } 610 610 } 611 611
+15 -15
drivers/scsi/bfa/bfa_itnim.c
··· 144 144 break; 145 145 146 146 default: 147 - bfa_assert(0); 147 + bfa_sm_fault(itnim->bfa, event); 148 148 } 149 149 } 150 150 ··· 175 175 break; 176 176 177 177 default: 178 - bfa_assert(0); 178 + bfa_sm_fault(itnim->bfa, event); 179 179 } 180 180 } 181 181 ··· 212 212 break; 213 213 214 214 default: 215 - bfa_assert(0); 215 + bfa_sm_fault(itnim->bfa, event); 216 216 } 217 217 } 218 218 ··· 247 247 break; 248 248 249 249 default: 250 - bfa_assert(0); 250 + bfa_sm_fault(itnim->bfa, event); 251 251 } 252 252 } 253 253 ··· 275 275 break; 276 276 277 277 default: 278 - bfa_assert(0); 278 + bfa_sm_fault(itnim->bfa, event); 279 279 } 280 280 } 281 281 ··· 317 317 break; 318 318 319 319 default: 320 - bfa_assert(0); 320 + bfa_sm_fault(itnim->bfa, event); 321 321 } 322 322 } 323 323 ··· 348 348 break; 349 349 350 350 default: 351 - bfa_assert(0); 351 + bfa_sm_fault(itnim->bfa, event); 352 352 } 353 353 } 354 354 ··· 385 385 break; 386 386 387 387 default: 388 - bfa_assert(0); 388 + bfa_sm_fault(itnim->bfa, event); 389 389 } 390 390 } 391 391 ··· 413 413 break; 414 414 415 415 default: 416 - bfa_assert(0); 416 + bfa_sm_fault(itnim->bfa, event); 417 417 } 418 418 } 419 419 ··· 442 442 break; 443 443 444 444 default: 445 - bfa_assert(0); 445 + bfa_sm_fault(itnim->bfa, event); 446 446 } 447 447 } 448 448 ··· 470 470 break; 471 471 472 472 default: 473 - bfa_assert(0); 473 + bfa_sm_fault(itnim->bfa, event); 474 474 } 475 475 } 476 476 ··· 502 502 break; 503 503 504 504 default: 505 - bfa_assert(0); 505 + bfa_sm_fault(itnim->bfa, event); 506 506 } 507 507 } 508 508 ··· 538 538 break; 539 539 540 540 default: 541 - bfa_assert(0); 541 + bfa_sm_fault(itnim->bfa, event); 542 542 } 543 543 } 544 544 ··· 559 559 break; 560 560 561 561 default: 562 - bfa_assert(0); 562 + bfa_sm_fault(itnim->bfa, event); 563 563 } 564 564 } 565 565 ··· 583 583 break; 584 584 585 585 default: 586 - bfa_assert(0); 586 + bfa_sm_fault(itnim->bfa, event); 587 587 } 588 588 } 589 589
+125 -9
drivers/scsi/bfa/bfa_lps.c
··· 18 18 #include <bfa.h> 19 19 #include <bfi/bfi_lps.h> 20 20 #include <cs/bfa_debug.h> 21 + #include <defs/bfa_defs_pci.h> 21 22 22 23 BFA_TRC_FILE(HAL, LPS); 23 24 BFA_MODULE(lps); 24 25 25 26 #define BFA_LPS_MIN_LPORTS (1) 26 27 #define BFA_LPS_MAX_LPORTS (256) 28 + 29 + /* 30 + * Maximum Vports supported per physical port or vf. 31 + */ 32 + #define BFA_LPS_MAX_VPORTS_SUPP_CB 255 33 + #define BFA_LPS_MAX_VPORTS_SUPP_CT 190 27 34 28 35 /** 29 36 * forward declarations ··· 56 49 static void bfa_lps_send_logout(struct bfa_lps_s *lps); 57 50 static void bfa_lps_login_comp(struct bfa_lps_s *lps); 58 51 static void bfa_lps_logout_comp(struct bfa_lps_s *lps); 59 - 52 + static void bfa_lps_cvl_event(struct bfa_lps_s *lps); 60 53 61 54 /** 62 55 * lps_pvt BFA LPS private functions ··· 69 62 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */ 70 63 BFA_LPS_SM_DELETE = 5, /* lps delete from user */ 71 64 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */ 65 + BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */ 72 66 }; 73 67 74 68 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); ··· 99 91 bfa_sm_set_state(lps, bfa_lps_sm_login); 100 92 bfa_lps_send_login(lps); 101 93 } 94 + if (lps->fdisc) 95 + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 96 + BFA_PL_EID_LOGIN, 0, "FDISC Request"); 97 + else 98 + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 99 + BFA_PL_EID_LOGIN, 0, "FLOGI Request"); 102 100 break; 103 101 104 102 case BFA_LPS_SM_LOGOUT: ··· 115 101 bfa_lps_free(lps); 116 102 break; 117 103 104 + case BFA_LPS_SM_RX_CVL: 118 105 case BFA_LPS_SM_OFFLINE: 119 106 break; 120 107 ··· 127 112 break; 128 113 129 114 default: 130 - bfa_assert(0); 115 + bfa_sm_fault(lps->bfa, event); 131 116 } 132 117 } 133 118 ··· 142 127 143 128 switch (event) { 144 129 case BFA_LPS_SM_FWRSP: 145 - if (lps->status == BFA_STATUS_OK) 130 + if (lps->status == BFA_STATUS_OK) { 146 131 bfa_sm_set_state(lps, bfa_lps_sm_online); 147 - else 132 + if (lps->fdisc) 133 + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 134 + BFA_PL_EID_LOGIN, 0, "FDISC Accept"); 135 + else 136 + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 137 + BFA_PL_EID_LOGIN, 0, "FLOGI Accept"); 138 + } else { 148 139 bfa_sm_set_state(lps, bfa_lps_sm_init); 140 + if (lps->fdisc) 141 + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 142 + BFA_PL_EID_LOGIN, 0, 143 + "FDISC Fail (RJT or timeout)"); 144 + else 145 + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 146 + BFA_PL_EID_LOGIN, 0, 147 + "FLOGI Fail (RJT or timeout)"); 148 + } 149 149 bfa_lps_login_comp(lps); 150 150 break; 151 151 ··· 169 139 break; 170 140 171 141 default: 172 - bfa_assert(0); 142 + bfa_sm_fault(lps->bfa, event); 173 143 } 174 144 } 175 145 ··· 192 162 bfa_reqq_wcancel(&lps->wqe); 193 163 break; 194 164 165 + case BFA_LPS_SM_RX_CVL: 166 + /* 167 + * Login was not even sent out; so when getting out 168 + * of this state, it will appear like a login retry 169 + * after Clear virtual link 170 + */ 171 + break; 172 + 195 173 default: 196 - bfa_assert(0); 174 + bfa_sm_fault(lps->bfa, event); 197 175 } 198 176 } 199 177 ··· 223 185 bfa_sm_set_state(lps, bfa_lps_sm_logout); 224 186 bfa_lps_send_logout(lps); 225 187 } 188 + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 189 + BFA_PL_EID_LOGO, 0, "Logout"); 190 + break; 191 + 192 + case BFA_LPS_SM_RX_CVL: 193 + bfa_sm_set_state(lps, bfa_lps_sm_init); 194 + 195 + /* Let the vport module know about this event */ 196 + bfa_lps_cvl_event(lps); 197 + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 198 + BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); 226 199 break; 227 200 228 201 case BFA_LPS_SM_OFFLINE: ··· 242 193 break; 243 194 244 195 default: 245 - bfa_assert(0); 196 + bfa_sm_fault(lps->bfa, event); 246 197 } 247 198 } 248 199 ··· 266 217 break; 267 218 268 219 default: 269 - bfa_assert(0); 220 + bfa_sm_fault(lps->bfa, event); 270 221 } 271 222 } 272 223 ··· 291 242 break; 292 243 293 244 default: 294 - bfa_assert(0); 245 + bfa_sm_fault(lps->bfa, event); 295 246 } 296 247 } 297 248 ··· 445 396 } 446 397 447 398 /** 399 + * Firmware received a Clear virtual link request (for FCoE) 400 + */ 401 + static void 402 + bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl) 403 + { 404 + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 405 + struct bfa_lps_s *lps; 406 + 407 + lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag); 408 + 409 + bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); 410 + } 411 + 412 + /** 448 413 * Space is available in request queue, resume queueing request to firmware. 449 414 */ 450 415 static void ··· 594 531 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg); 595 532 } 596 533 534 + /** 535 + * Clear virtual link completion handler for non-fcs 536 + */ 537 + static void 538 + bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete) 539 + { 540 + struct bfa_lps_s *lps = arg; 597 541 542 + if (!complete) 543 + return; 544 + 545 + /* Clear virtual link to base port will result in link down */ 546 + if (lps->fdisc) 547 + bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); 548 + } 549 + 550 + /** 551 + * Received Clear virtual link event --direct call for fcs, 552 + * queue for others 553 + */ 554 + static void 555 + bfa_lps_cvl_event(struct bfa_lps_s *lps) 556 + { 557 + if (!lps->bfa->fcs) { 558 + bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb, 559 + lps); 560 + return; 561 + } 562 + 563 + /* Clear virtual link to base port will result in link down */ 564 + if (lps->fdisc) 565 + bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); 566 + } 567 + 568 + u32 569 + bfa_lps_get_max_vport(struct bfa_s *bfa) 570 + { 571 + if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) 572 + return BFA_LPS_MAX_VPORTS_SUPP_CT; 573 + else 574 + return BFA_LPS_MAX_VPORTS_SUPP_CB; 575 + } 598 576 599 577 /** 600 578 * lps_public BFA LPS public functions ··· 856 752 return lps->lsrjt_expl; 857 753 } 858 754 755 + /** 756 + * Return fpma/spma MAC for lport 757 + */ 758 + struct mac_s 759 + bfa_lps_get_lp_mac(struct bfa_lps_s *lps) 760 + { 761 + return lps->lp_mac; 762 + } 859 763 860 764 /** 861 765 * LPS firmware message class handler. ··· 883 771 884 772 case BFI_LPS_H2I_LOGOUT_RSP: 885 773 bfa_lps_logout_rsp(bfa, msg.logout_rsp); 774 + break; 775 + 776 + case BFI_LPS_H2I_CVL_EVENT: 777 + bfa_lps_rx_cvl_event(bfa, msg.cvl_event); 886 778 break; 887 779 888 780 default:
+2 -2
drivers/scsi/bfa/bfa_module.c
··· 24 24 */ 25 25 struct bfa_module_s *hal_mods[] = { 26 26 &hal_mod_sgpg, 27 - &hal_mod_pport, 27 + &hal_mod_fcport, 28 28 &hal_mod_fcxp, 29 29 &hal_mod_lps, 30 30 &hal_mod_uf, ··· 45 45 bfa_isr_unhandled, /* BFI_MC_DIAG */ 46 46 bfa_isr_unhandled, /* BFI_MC_FLASH */ 47 47 bfa_isr_unhandled, /* BFI_MC_CEE */ 48 - bfa_pport_isr, /* BFI_MC_PORT */ 48 + bfa_fcport_isr, /* BFI_MC_FCPORT */ 49 49 bfa_isr_unhandled, /* BFI_MC_IOCFC */ 50 50 bfa_isr_unhandled, /* BFI_MC_LL */ 51 51 bfa_uf_isr, /* BFI_MC_UF */
+1 -1
drivers/scsi/bfa/bfa_modules_priv.h
··· 29 29 30 30 31 31 struct bfa_modules_s { 32 - struct bfa_pport_s pport; /* physical port module */ 32 + struct bfa_fcport_s fcport; /* fc port module */ 33 33 struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */ 34 34 struct bfa_lps_mod_s lps_mod; /* fcxp module */ 35 35 struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */
+29 -28
drivers/scsi/bfa/bfa_port_priv.h
··· 23 23 #include "bfa_intr_priv.h" 24 24 25 25 /** 26 - * BFA physical port data structure 26 + * Link notification data structure 27 27 */ 28 - struct bfa_pport_s { 28 + struct bfa_fcport_ln_s { 29 + struct bfa_fcport_s *fcport; 30 + bfa_sm_t sm; 31 + struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */ 32 + enum bfa_pport_linkstate ln_event; /* ln event for callback */ 33 + }; 34 + 35 + /** 36 + * BFA FC port data structure 37 + */ 38 + struct bfa_fcport_s { 29 39 struct bfa_s *bfa; /* parent BFA instance */ 30 40 bfa_sm_t sm; /* port state machine */ 31 41 wwn_t nwwn; /* node wwn of physical port */ ··· 46 36 enum bfa_pport_topology topology; /* current topology */ 47 37 u8 myalpa; /* my ALPA in LOOP topology */ 48 38 u8 rsvd[3]; 39 + u32 mypid:24; 40 + u32 rsvd_b:8; 49 41 struct bfa_pport_cfg_s cfg; /* current port configuration */ 50 42 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ 51 43 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ ··· 61 49 void (*event_cbfn) (void *cbarg, 62 50 bfa_pport_event_t event); 63 51 union { 64 - union bfi_pport_i2h_msg_u i2hmsg; 52 + union bfi_fcport_i2h_msg_u i2hmsg; 65 53 } event_arg; 66 54 void *bfad; /* BFA driver handle */ 55 + struct bfa_fcport_ln_s ln; /* Link Notification */ 67 56 struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */ 68 - enum bfa_pport_linkstate hcb_event; 69 - /* link event for callback */ 57 + struct bfa_timer_s timer; /* timer */ 70 58 u32 msgtag; /* fimrware msg tag for reply */ 71 59 u8 *stats_kva; 72 60 u64 stats_pa; 73 - union bfa_pport_stats_u *stats; /* pport stats */ 74 - u32 mypid:24; 75 - u32 rsvd_b:8; 76 - struct bfa_timer_s timer; /* timer */ 77 - union bfa_pport_stats_u *stats_ret; 78 - /* driver stats location */ 79 - bfa_status_t stats_status; 80 - /* stats/statsclr status */ 81 - bfa_boolean_t stats_busy; 82 - /* outstanding stats/statsclr */ 83 - bfa_boolean_t stats_qfull; 84 - bfa_boolean_t diag_busy; 85 - /* diag busy status */ 86 - bfa_boolean_t beacon; 87 - /* port beacon status */ 88 - bfa_boolean_t link_e2e_beacon; 89 - /* link beacon status */ 90 - bfa_cb_pport_t stats_cbfn; 91 - /* driver callback function */ 92 - void *stats_cbarg; 93 - /* *!< user callback arg */ 61 + union bfa_fcport_stats_u *stats; 62 + union bfa_fcport_stats_u *stats_ret; /* driver stats location */ 63 + bfa_status_t stats_status; /* stats/statsclr status */ 64 + bfa_boolean_t stats_busy; /* outstanding stats/statsclr */ 65 + bfa_boolean_t stats_qfull; 66 + bfa_cb_pport_t stats_cbfn; /* driver callback function */ 67 + void *stats_cbarg; /* *!< user callback arg */ 68 + bfa_boolean_t diag_busy; /* diag busy status */ 69 + bfa_boolean_t beacon; /* port beacon status */ 70 + bfa_boolean_t link_e2e_beacon; /* link beacon status */ 94 71 }; 95 72 96 - #define BFA_PORT_MOD(__bfa) (&(__bfa)->modules.pport) 73 + #define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport) 97 74 98 75 /* 99 76 * public functions 100 77 */ 101 - void bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 78 + void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 102 79 #endif /* __BFA_PORT_PRIV_H__ */
+1 -1
drivers/scsi/bfa/bfa_priv.h
··· 101 101 extern struct bfa_module_s hal_mod_flash; 102 102 extern struct bfa_module_s hal_mod_fcdiag; 103 103 extern struct bfa_module_s hal_mod_sgpg; 104 - extern struct bfa_module_s hal_mod_pport; 104 + extern struct bfa_module_s hal_mod_fcport; 105 105 extern struct bfa_module_s hal_mod_fcxp; 106 106 extern struct bfa_module_s hal_mod_lps; 107 107 extern struct bfa_module_s hal_mod_uf;
+13 -13
drivers/scsi/bfa/bfa_rport.c
··· 114 114 115 115 default: 116 116 bfa_stats(rp, sm_un_unexp); 117 - bfa_assert(0); 117 + bfa_sm_fault(rp->bfa, event); 118 118 } 119 119 } 120 120 ··· 146 146 147 147 default: 148 148 bfa_stats(rp, sm_cr_unexp); 149 - bfa_assert(0); 149 + bfa_sm_fault(rp->bfa, event); 150 150 } 151 151 } 152 152 ··· 183 183 184 184 default: 185 185 bfa_stats(rp, sm_fwc_unexp); 186 - bfa_assert(0); 186 + bfa_sm_fault(rp->bfa, event); 187 187 } 188 188 } 189 189 ··· 224 224 225 225 default: 226 226 bfa_stats(rp, sm_fwc_unexp); 227 - bfa_assert(0); 227 + bfa_sm_fault(rp->bfa, event); 228 228 } 229 229 } 230 230 ··· 296 296 297 297 default: 298 298 bfa_stats(rp, sm_on_unexp); 299 - bfa_assert(0); 299 + bfa_sm_fault(rp->bfa, event); 300 300 } 301 301 } 302 302 ··· 329 329 330 330 default: 331 331 bfa_stats(rp, sm_fwd_unexp); 332 - bfa_assert(0); 332 + bfa_sm_fault(rp->bfa, event); 333 333 } 334 334 } 335 335 ··· 359 359 360 360 default: 361 361 bfa_stats(rp, sm_fwd_unexp); 362 - bfa_assert(0); 362 + bfa_sm_fault(rp->bfa, event); 363 363 } 364 364 } 365 365 ··· 394 394 395 395 default: 396 396 bfa_stats(rp, sm_off_unexp); 397 - bfa_assert(0); 397 + bfa_sm_fault(rp->bfa, event); 398 398 } 399 399 } 400 400 ··· 421 421 break; 422 422 423 423 default: 424 - bfa_assert(0); 424 + bfa_sm_fault(rp->bfa, event); 425 425 } 426 426 } 427 427 ··· 446 446 break; 447 447 448 448 default: 449 - bfa_assert(0); 449 + bfa_sm_fault(rp->bfa, event); 450 450 } 451 451 } 452 452 ··· 477 477 478 478 default: 479 479 bfa_stats(rp, sm_delp_unexp); 480 - bfa_assert(0); 480 + bfa_sm_fault(rp->bfa, event); 481 481 } 482 482 } 483 483 ··· 512 512 513 513 default: 514 514 bfa_stats(rp, sm_offp_unexp); 515 - bfa_assert(0); 515 + bfa_sm_fault(rp->bfa, event); 516 516 } 517 517 } 518 518 ··· 550 550 551 551 default: 552 552 bfa_stats(rp, sm_iocd_unexp); 553 - bfa_assert(0); 553 + bfa_sm_fault(rp->bfa, event); 554 554 } 555 555 } 556 556
+30 -32
drivers/scsi/bfa/bfa_trcmod_priv.h
··· 29 29 * !!! needed between trace utility and driver version 30 30 */ 31 31 enum { 32 - BFA_TRC_HAL_IOC = 1, 33 - BFA_TRC_HAL_INTR = 2, 34 - BFA_TRC_HAL_FCXP = 3, 35 - BFA_TRC_HAL_UF = 4, 36 - BFA_TRC_HAL_DIAG = 5, 37 - BFA_TRC_HAL_RPORT = 6, 38 - BFA_TRC_HAL_FCPIM = 7, 39 - BFA_TRC_HAL_IOIM = 8, 40 - BFA_TRC_HAL_TSKIM = 9, 41 - BFA_TRC_HAL_ITNIM = 10, 42 - BFA_TRC_HAL_PPORT = 11, 43 - BFA_TRC_HAL_SGPG = 12, 44 - BFA_TRC_HAL_FLASH = 13, 45 - BFA_TRC_HAL_DEBUG = 14, 46 - BFA_TRC_HAL_WWN = 15, 47 - BFA_TRC_HAL_FLASH_RAW = 16, 48 - BFA_TRC_HAL_SBOOT = 17, 49 - BFA_TRC_HAL_SBOOT_IO = 18, 50 - BFA_TRC_HAL_SBOOT_INTR = 19, 51 - BFA_TRC_HAL_SBTEST = 20, 52 - BFA_TRC_HAL_IPFC = 21, 53 - BFA_TRC_HAL_IOCFC = 22, 54 - BFA_TRC_HAL_FCPTM = 23, 55 - BFA_TRC_HAL_IOTM = 24, 56 - BFA_TRC_HAL_TSKTM = 25, 57 - BFA_TRC_HAL_TIN = 26, 58 - BFA_TRC_HAL_LPS = 27, 59 - BFA_TRC_HAL_FCDIAG = 28, 60 - BFA_TRC_HAL_PBIND = 29, 61 - BFA_TRC_HAL_IOCFC_CT = 30, 62 - BFA_TRC_HAL_IOCFC_CB = 31, 63 - BFA_TRC_HAL_IOCFC_Q = 32, 32 + BFA_TRC_HAL_INTR = 1, 33 + BFA_TRC_HAL_FCXP = 2, 34 + BFA_TRC_HAL_UF = 3, 35 + BFA_TRC_HAL_RPORT = 4, 36 + BFA_TRC_HAL_FCPIM = 5, 37 + BFA_TRC_HAL_IOIM = 6, 38 + BFA_TRC_HAL_TSKIM = 7, 39 + BFA_TRC_HAL_ITNIM = 8, 40 + BFA_TRC_HAL_FCPORT = 9, 41 + BFA_TRC_HAL_SGPG = 10, 42 + BFA_TRC_HAL_FLASH = 11, 43 + BFA_TRC_HAL_DEBUG = 12, 44 + BFA_TRC_HAL_WWN = 13, 45 + BFA_TRC_HAL_FLASH_RAW = 14, 46 + BFA_TRC_HAL_SBOOT = 15, 47 + BFA_TRC_HAL_SBOOT_IO = 16, 48 + BFA_TRC_HAL_SBOOT_INTR = 17, 49 + BFA_TRC_HAL_SBTEST = 18, 50 + BFA_TRC_HAL_IPFC = 19, 51 + BFA_TRC_HAL_IOCFC = 20, 52 + BFA_TRC_HAL_FCPTM = 21, 53 + BFA_TRC_HAL_IOTM = 22, 54 + BFA_TRC_HAL_TSKTM = 23, 55 + BFA_TRC_HAL_TIN = 24, 56 + BFA_TRC_HAL_LPS = 25, 57 + BFA_TRC_HAL_FCDIAG = 26, 58 + BFA_TRC_HAL_PBIND = 27, 59 + BFA_TRC_HAL_IOCFC_CT = 28, 60 + BFA_TRC_HAL_IOCFC_CB = 29, 61 + BFA_TRC_HAL_IOCFC_Q = 30, 64 62 }; 65 63 66 64 #endif /* __BFA_TRCMOD_PRIV_H__ */
+7 -7
drivers/scsi/bfa/bfa_tskim.c
··· 110 110 break; 111 111 112 112 default: 113 - bfa_assert(0); 113 + bfa_sm_fault(tskim->bfa, event); 114 114 } 115 115 } 116 116 ··· 146 146 break; 147 147 148 148 default: 149 - bfa_assert(0); 149 + bfa_sm_fault(tskim->bfa, event); 150 150 } 151 151 } 152 152 ··· 178 178 break; 179 179 180 180 default: 181 - bfa_assert(0); 181 + bfa_sm_fault(tskim->bfa, event); 182 182 } 183 183 } 184 184 ··· 207 207 break; 208 208 209 209 default: 210 - bfa_assert(0); 210 + bfa_sm_fault(tskim->bfa, event); 211 211 } 212 212 } 213 213 ··· 242 242 break; 243 243 244 244 default: 245 - bfa_assert(0); 245 + bfa_sm_fault(tskim->bfa, event); 246 246 } 247 247 } 248 248 ··· 277 277 break; 278 278 279 279 default: 280 - bfa_assert(0); 280 + bfa_sm_fault(tskim->bfa, event); 281 281 } 282 282 } 283 283 ··· 303 303 break; 304 304 305 305 default: 306 - bfa_assert(0); 306 + bfa_sm_fault(tskim->bfa, event); 307 307 } 308 308 } 309 309
+156 -52
drivers/scsi/bfa/bfad.c
··· 20 20 */ 21 21 22 22 #include <linux/module.h> 23 + #include <linux/kthread.h> 23 24 #include "bfad_drv.h" 24 25 #include "bfad_im.h" 25 26 #include "bfad_tm.h" ··· 54 53 static int ioc_auto_recover = BFA_TRUE; 55 54 static int ipfc_enable = BFA_FALSE; 56 55 static int ipfc_mtu = -1; 56 + static int fdmi_enable = BFA_TRUE; 57 57 int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; 58 58 int bfa_linkup_delay = -1; 59 59 ··· 76 74 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); 77 75 module_param(ipfc_enable, int, S_IRUGO | S_IWUSR); 78 76 module_param(ipfc_mtu, int, S_IRUGO | S_IWUSR); 77 + module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); 79 78 module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); 80 79 81 80 /* ··· 98 95 99 96 if (ipfc_enable) 100 97 bfad_ipfc_probe(bfad); 98 + 99 + bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; 101 100 ext: 102 101 return rc; 103 102 } ··· 111 106 bfad_tm_probe_undo(bfad); 112 107 if (ipfc_enable) 113 108 bfad_ipfc_probe_undo(bfad); 109 + bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; 114 110 } 115 111 116 112 static void ··· 179 173 { 180 174 struct bfad_s *bfad = drv; 181 175 182 - if (init_status == BFA_STATUS_OK) 176 + if (init_status == BFA_STATUS_OK) { 183 177 bfad->bfad_flags |= BFAD_HAL_INIT_DONE; 178 + 179 + /* If BFAD_HAL_INIT_FAIL flag is set: 180 + * Wake up the kernel thread to start 181 + * the bfad operations after HAL init done 182 + */ 183 + if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) { 184 + bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL; 185 + wake_up_process(bfad->bfad_tsk); 186 + } 187 + } 184 188 185 189 complete(&bfad->comp); 186 190 } ··· 664 648 665 649 sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no); 666 650 memcpy(port_cfg.sym_name.symname, symname, strlen(symname)); 667 - bfa_pport_get_attr(&bfad->bfa, &attr); 651 + bfa_fcport_get_attr(&bfad->bfa, &attr); 668 652 port_cfg.nwwn = attr.nwwn; 669 653 port_cfg.pwwn = attr.pwwn; 670 654 ··· 677 661 bfa_status_t rc; 678 662 unsigned long flags; 679 663 struct bfa_fcs_driver_info_s driver_info; 680 - int i; 681 664 682 665 bfad->cfg_data.rport_del_timeout = rport_del_timeout; 683 666 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; ··· 696 681 bfa_init_log(&bfad->bfa, bfad->logmod); 697 682 bfa_init_trc(&bfad->bfa, bfad->trcmod); 698 683 bfa_init_aen(&bfad->bfa, bfad->aen); 699 - INIT_LIST_HEAD(&bfad->file_q); 700 - INIT_LIST_HEAD(&bfad->file_free_q); 701 - for (i = 0; i < BFAD_AEN_MAX_APPS; i++) { 702 - bfa_q_qe_init(&bfad->file_buf[i].qe); 703 - list_add_tail(&bfad->file_buf[i].qe, &bfad->file_free_q); 704 - } 684 + memset(bfad->file_map, 0, sizeof(bfad->file_map)); 705 685 bfa_init_plog(&bfad->bfa, &bfad->plog_buf); 706 686 bfa_plog_init(&bfad->plog_buf); 707 687 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, ··· 756 746 bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod); 757 747 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); 758 748 bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen); 759 - bfa_fcs_init(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); 749 + bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); 750 + 751 + /* Do FCS init only when HAL init is done */ 752 + if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 753 + bfa_fcs_init(&bfad->bfa_fcs); 754 + bfad->bfad_flags |= BFAD_FCS_INIT_DONE; 755 + } 756 + 760 757 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); 758 + bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable); 761 759 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 762 760 763 761 bfad->bfad_flags |= BFAD_DRV_INIT_DONE; ··· 781 763 void 782 764 bfad_drv_uninit(struct bfad_s *bfad) 783 765 { 766 + unsigned long flags; 767 + 768 + spin_lock_irqsave(&bfad->bfad_lock, flags); 769 + init_completion(&bfad->comp); 770 + bfa_stop(&bfad->bfa); 771 + spin_unlock_irqrestore(&bfad->bfad_lock, flags); 772 + wait_for_completion(&bfad->comp); 773 + 784 774 del_timer_sync(&bfad->hal_tmo); 785 775 bfa_isr_disable(&bfad->bfa); 786 776 bfa_detach(&bfad->bfa); 787 777 bfad_remove_intr(bfad); 788 - bfa_assert(list_empty(&bfad->file_q)); 789 778 bfad_hal_mem_release(bfad); 779 + 780 + bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; 790 781 } 791 782 792 783 void ··· 886 859 bfa_log_set_level_all(&bfad->log_data, log_level); 887 860 } 888 861 862 + bfa_status_t 863 + bfad_start_ops(struct bfad_s *bfad) 864 + { 865 + int retval; 866 + 867 + /* PPORT FCS config */ 868 + bfad_fcs_port_cfg(bfad); 869 + 870 + retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM); 871 + if (retval != BFA_STATUS_OK) 872 + goto out_cfg_pport_failure; 873 + 874 + /* BFAD level FC4 (IM/TM/IPFC) specific resource allocation */ 875 + retval = bfad_fc4_probe(bfad); 876 + if (retval != BFA_STATUS_OK) { 877 + printk(KERN_WARNING "bfad_fc4_probe failed\n"); 878 + goto out_fc4_probe_failure; 879 + } 880 + 881 + bfad_drv_start(bfad); 882 + 883 + /* 884 + * If bfa_linkup_delay is set to -1 default; try to retrive the 885 + * value using the bfad_os_get_linkup_delay(); else use the 886 + * passed in module param value as the bfa_linkup_delay. 887 + */ 888 + if (bfa_linkup_delay < 0) { 889 + 890 + bfa_linkup_delay = bfad_os_get_linkup_delay(bfad); 891 + bfad_os_rport_online_wait(bfad); 892 + bfa_linkup_delay = -1; 893 + 894 + } else { 895 + bfad_os_rport_online_wait(bfad); 896 + } 897 + 898 + bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name); 899 + 900 + return BFA_STATUS_OK; 901 + 902 + out_fc4_probe_failure: 903 + bfad_fc4_probe_undo(bfad); 904 + bfad_uncfg_pport(bfad); 905 + out_cfg_pport_failure: 906 + return BFA_STATUS_FAILED; 907 + } 908 + 909 + int 910 + bfad_worker (void *ptr) 911 + { 912 + struct bfad_s *bfad; 913 + unsigned long flags; 914 + 915 + bfad = (struct bfad_s *)ptr; 916 + 917 + while (!kthread_should_stop()) { 918 + 919 + /* Check if the FCS init is done from bfad_drv_init; 920 + * if not done do FCS init and set the flag. 921 + */ 922 + if (!(bfad->bfad_flags & BFAD_FCS_INIT_DONE)) { 923 + spin_lock_irqsave(&bfad->bfad_lock, flags); 924 + bfa_fcs_init(&bfad->bfa_fcs); 925 + bfad->bfad_flags |= BFAD_FCS_INIT_DONE; 926 + spin_unlock_irqrestore(&bfad->bfad_lock, flags); 927 + } 928 + 929 + /* Start the bfad operations after HAL init done */ 930 + bfad_start_ops(bfad); 931 + 932 + spin_lock_irqsave(&bfad->bfad_lock, flags); 933 + bfad->bfad_tsk = NULL; 934 + spin_unlock_irqrestore(&bfad->bfad_lock, flags); 935 + 936 + break; 937 + } 938 + 939 + return 0; 940 + } 941 + 889 942 /* 890 943 * PCI_entry PCI driver entries * { 891 944 */ ··· 978 871 { 979 872 struct bfad_s *bfad; 980 873 int error = -ENODEV, retval; 981 - char buf[16]; 982 874 983 875 /* 984 876 * For single port cards - only claim function 0 ··· 1008 902 bfa_trc(bfad, bfad_inst); 1009 903 1010 904 bfad->logmod = &bfad->log_data; 1011 - sprintf(buf, "%d", bfad_inst); 1012 - bfa_log_init(bfad->logmod, buf, bfa_os_printf); 905 + bfa_log_init(bfad->logmod, (char *)pci_name(pdev), bfa_os_printf); 1013 906 1014 907 bfad_drv_log_level_set(bfad); 1015 908 ··· 1038 933 bfad->ref_count = 0; 1039 934 bfad->pport.bfad = bfad; 1040 935 936 + bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s", 937 + "bfad_worker"); 938 + if (IS_ERR(bfad->bfad_tsk)) { 939 + printk(KERN_INFO "bfad[%d]: Kernel thread" 940 + " creation failed!\n", 941 + bfad->inst_no); 942 + goto out_kthread_create_failure; 943 + } 944 + 1041 945 retval = bfad_drv_init(bfad); 1042 946 if (retval != BFA_STATUS_OK) 1043 947 goto out_drv_init_failure; 1044 948 if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 949 + bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; 1045 950 printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no); 1046 951 goto ok; 1047 952 } 1048 953 1049 - /* 1050 - * PPORT FCS config 1051 - */ 1052 - bfad_fcs_port_cfg(bfad); 1053 - 1054 - retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM); 954 + retval = bfad_start_ops(bfad); 1055 955 if (retval != BFA_STATUS_OK) 1056 - goto out_cfg_pport_failure; 956 + goto out_start_ops_failure; 1057 957 1058 - /* 1059 - * BFAD level FC4 (IM/TM/IPFC) specific resource allocation 1060 - */ 1061 - retval = bfad_fc4_probe(bfad); 1062 - if (retval != BFA_STATUS_OK) { 1063 - printk(KERN_WARNING "bfad_fc4_probe failed\n"); 1064 - goto out_fc4_probe_failure; 1065 - } 958 + kthread_stop(bfad->bfad_tsk); 959 + bfad->bfad_tsk = NULL; 1066 960 1067 - bfad_drv_start(bfad); 1068 - 1069 - /* 1070 - * If bfa_linkup_delay is set to -1 default; try to retrive the 1071 - * value using the bfad_os_get_linkup_delay(); else use the 1072 - * passed in module param value as the bfa_linkup_delay. 1073 - */ 1074 - if (bfa_linkup_delay < 0) { 1075 - bfa_linkup_delay = bfad_os_get_linkup_delay(bfad); 1076 - bfad_os_rport_online_wait(bfad); 1077 - bfa_linkup_delay = -1; 1078 - } else { 1079 - bfad_os_rport_online_wait(bfad); 1080 - } 1081 - 1082 - bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name); 1083 961 ok: 1084 962 return 0; 1085 963 1086 - out_fc4_probe_failure: 1087 - bfad_fc4_probe_undo(bfad); 1088 - bfad_uncfg_pport(bfad); 1089 - out_cfg_pport_failure: 964 + out_start_ops_failure: 1090 965 bfad_drv_uninit(bfad); 1091 966 out_drv_init_failure: 967 + kthread_stop(bfad->bfad_tsk); 968 + out_kthread_create_failure: 1092 969 mutex_lock(&bfad_mutex); 1093 970 bfad_inst--; 1094 971 list_del(&bfad->list_entry); ··· 1095 1008 1096 1009 bfa_trc(bfad, bfad->inst_no); 1097 1010 1011 + spin_lock_irqsave(&bfad->bfad_lock, flags); 1012 + if (bfad->bfad_tsk != NULL) 1013 + kthread_stop(bfad->bfad_tsk); 1014 + spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1015 + 1098 1016 if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE) 1099 1017 && !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 1100 1018 ··· 1116 1024 goto remove_sysfs; 1117 1025 } 1118 1026 1119 - if (bfad->bfad_flags & BFAD_HAL_START_DONE) 1027 + if (bfad->bfad_flags & BFAD_HAL_START_DONE) { 1120 1028 bfad_drv_stop(bfad); 1029 + } else if (bfad->bfad_flags & BFAD_DRV_INIT_DONE) { 1030 + /* Invoking bfa_stop() before bfa_detach 1031 + * when HAL and DRV init are success 1032 + * but HAL start did not occur. 1033 + */ 1034 + spin_lock_irqsave(&bfad->bfad_lock, flags); 1035 + init_completion(&bfad->comp); 1036 + bfa_stop(&bfad->bfa); 1037 + spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1038 + wait_for_completion(&bfad->comp); 1039 + } 1121 1040 1122 1041 bfad_remove_intr(bfad); 1123 - 1124 1042 del_timer_sync(&bfad->hal_tmo); 1125 - bfad_fc4_probe_undo(bfad); 1043 + 1044 + if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) 1045 + bfad_fc4_probe_undo(bfad); 1126 1046 1127 1047 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) 1128 1048 bfad_uncfg_pport(bfad);
+34 -42
drivers/scsi/bfa/bfad_attr.c
··· 141 141 struct bfad_s *bfad = im_port->bfad; 142 142 struct bfa_pport_attr_s attr; 143 143 144 - bfa_pport_get_attr(&bfad->bfa, &attr); 144 + bfa_fcport_get_attr(&bfad->bfa, &attr); 145 145 146 146 switch (attr.port_type) { 147 147 case BFA_PPORT_TYPE_NPORT: ··· 173 173 struct bfad_s *bfad = im_port->bfad; 174 174 struct bfa_pport_attr_s attr; 175 175 176 - bfa_pport_get_attr(&bfad->bfa, &attr); 176 + bfa_fcport_get_attr(&bfad->bfa, &attr); 177 177 178 178 switch (attr.port_state) { 179 179 case BFA_PPORT_ST_LINKDOWN: ··· 229 229 (struct bfad_im_port_s *) shost->hostdata[0]; 230 230 struct bfad_s *bfad = im_port->bfad; 231 231 struct bfa_pport_attr_s attr; 232 + unsigned long flags; 232 233 233 - bfa_pport_get_attr(&bfad->bfa, &attr); 234 + spin_lock_irqsave(shost->host_lock, flags); 235 + bfa_fcport_get_attr(&bfad->bfa, &attr); 234 236 switch (attr.speed) { 235 237 case BFA_PPORT_SPEED_8GBPS: 236 238 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; ··· 250 248 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 251 249 break; 252 250 } 251 + spin_unlock_irqrestore(shost->host_lock, flags); 253 252 } 254 253 255 254 /** ··· 288 285 init_completion(&fcomp.comp); 289 286 spin_lock_irqsave(&bfad->bfad_lock, flags); 290 287 memset(hstats, 0, sizeof(struct fc_host_statistics)); 291 - rc = bfa_pport_get_stats(&bfad->bfa, 288 + rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), 292 289 (union bfa_pport_stats_u *) hstats, 293 290 bfad_hcb_comp, &fcomp); 294 291 spin_unlock_irqrestore(&bfad->bfad_lock, flags); ··· 315 312 316 313 init_completion(&fcomp.comp); 317 314 spin_lock_irqsave(&bfad->bfad_lock, flags); 318 - rc = bfa_pport_clear_stats(&bfad->bfa, bfad_hcb_comp, &fcomp); 315 + rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp, 316 + &fcomp); 319 317 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 320 318 321 319 if (rc != BFA_STATUS_OK) ··· 425 421 struct bfad_im_port_s *im_port = 426 422 (struct bfad_im_port_s *) shost->hostdata[0]; 427 423 struct bfad_s *bfad = im_port->bfad; 428 - struct bfa_ioc_attr_s ioc_attr; 424 + char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; 429 425 430 - memset(&ioc_attr, 0, sizeof(ioc_attr)); 431 - bfa_get_attr(&bfad->bfa, &ioc_attr); 432 - return snprintf(buf, PAGE_SIZE, "%s\n", 433 - ioc_attr.adapter_attr.serial_num); 426 + bfa_get_adapter_serial_num(&bfad->bfa, serial_num); 427 + return snprintf(buf, PAGE_SIZE, "%s\n", serial_num); 434 428 } 435 429 436 430 static ssize_t ··· 439 437 struct bfad_im_port_s *im_port = 440 438 (struct bfad_im_port_s *) shost->hostdata[0]; 441 439 struct bfad_s *bfad = im_port->bfad; 442 - struct bfa_ioc_attr_s ioc_attr; 440 + char model[BFA_ADAPTER_MODEL_NAME_LEN]; 443 441 444 - memset(&ioc_attr, 0, sizeof(ioc_attr)); 445 - bfa_get_attr(&bfad->bfa, &ioc_attr); 446 - return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.model); 442 + bfa_get_adapter_model(&bfad->bfa, model); 443 + return snprintf(buf, PAGE_SIZE, "%s\n", model); 447 444 } 448 445 449 446 static ssize_t ··· 453 452 struct bfad_im_port_s *im_port = 454 453 (struct bfad_im_port_s *) shost->hostdata[0]; 455 454 struct bfad_s *bfad = im_port->bfad; 456 - struct bfa_ioc_attr_s ioc_attr; 455 + char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; 457 456 458 - memset(&ioc_attr, 0, sizeof(ioc_attr)); 459 - bfa_get_attr(&bfad->bfa, &ioc_attr); 460 - return snprintf(buf, PAGE_SIZE, "%s\n", 461 - ioc_attr.adapter_attr.model_descr); 457 + bfa_get_adapter_model(&bfad->bfa, model_descr); 458 + return snprintf(buf, PAGE_SIZE, "%s\n", model_descr); 462 459 } 463 460 464 461 static ssize_t ··· 481 482 struct bfad_im_port_s *im_port = 482 483 (struct bfad_im_port_s *) shost->hostdata[0]; 483 484 struct bfad_s *bfad = im_port->bfad; 484 - struct bfa_ioc_attr_s ioc_attr; 485 + char model[BFA_ADAPTER_MODEL_NAME_LEN]; 486 + char fw_ver[BFA_VERSION_LEN]; 485 487 486 - memset(&ioc_attr, 0, sizeof(ioc_attr)); 487 - bfa_get_attr(&bfad->bfa, &ioc_attr); 488 - 488 + bfa_get_adapter_model(&bfad->bfa, model); 489 + bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); 489 490 return snprintf(buf, PAGE_SIZE, "Brocade %s FV%s DV%s\n", 490 - ioc_attr.adapter_attr.model, 491 - ioc_attr.adapter_attr.fw_ver, BFAD_DRIVER_VERSION); 491 + model, fw_ver, BFAD_DRIVER_VERSION); 492 492 } 493 493 494 494 static ssize_t ··· 498 500 struct bfad_im_port_s *im_port = 499 501 (struct bfad_im_port_s *) shost->hostdata[0]; 500 502 struct bfad_s *bfad = im_port->bfad; 501 - struct bfa_ioc_attr_s ioc_attr; 503 + char hw_ver[BFA_VERSION_LEN]; 502 504 503 - memset(&ioc_attr, 0, sizeof(ioc_attr)); 504 - bfa_get_attr(&bfad->bfa, &ioc_attr); 505 - return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.hw_ver); 505 + bfa_get_pci_chip_rev(&bfad->bfa, hw_ver); 506 + return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver); 506 507 } 507 508 508 509 static ssize_t ··· 519 522 struct bfad_im_port_s *im_port = 520 523 (struct bfad_im_port_s *) shost->hostdata[0]; 521 524 struct bfad_s *bfad = im_port->bfad; 522 - struct bfa_ioc_attr_s ioc_attr; 525 + char optrom_ver[BFA_VERSION_LEN]; 523 526 524 - memset(&ioc_attr, 0, sizeof(ioc_attr)); 525 - bfa_get_attr(&bfad->bfa, &ioc_attr); 526 - return snprintf(buf, PAGE_SIZE, "%s\n", 527 - ioc_attr.adapter_attr.optrom_ver); 527 + bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver); 528 + return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver); 528 529 } 529 530 530 531 static ssize_t ··· 533 538 struct bfad_im_port_s *im_port = 534 539 (struct bfad_im_port_s *) shost->hostdata[0]; 535 540 struct bfad_s *bfad = im_port->bfad; 536 - struct bfa_ioc_attr_s ioc_attr; 541 + char fw_ver[BFA_VERSION_LEN]; 537 542 538 - memset(&ioc_attr, 0, sizeof(ioc_attr)); 539 - bfa_get_attr(&bfad->bfa, &ioc_attr); 540 - return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.fw_ver); 543 + bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); 544 + return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver); 541 545 } 542 546 543 547 static ssize_t ··· 547 553 struct bfad_im_port_s *im_port = 548 554 (struct bfad_im_port_s *) shost->hostdata[0]; 549 555 struct bfad_s *bfad = im_port->bfad; 550 - struct bfa_ioc_attr_s ioc_attr; 551 556 552 - memset(&ioc_attr, 0, sizeof(ioc_attr)); 553 - bfa_get_attr(&bfad->bfa, &ioc_attr); 554 - return snprintf(buf, PAGE_SIZE, "%d\n", ioc_attr.adapter_attr.nports); 557 + return snprintf(buf, PAGE_SIZE, "%d\n", 558 + bfa_get_nports(&bfad->bfa)); 555 559 } 556 560 557 561 static ssize_t
-9
drivers/scsi/bfa/bfad_attr.h
··· 17 17 18 18 #ifndef __BFAD_ATTR_H__ 19 19 #define __BFAD_ATTR_H__ 20 - /** 21 - * bfad_attr.h VMware driver configuration interface module. 22 - */ 23 20 24 21 /** 25 22 * FC_transport_template FC transport template ··· 48 51 */ 49 52 void 50 53 bfad_im_get_host_port_id(struct Scsi_Host *shost); 51 - 52 - /** 53 - * FC transport template entry, issue a LIP. 54 - */ 55 - int 56 - bfad_im_issue_fc_host_lip(struct Scsi_Host *shost); 57 54 58 55 struct Scsi_Host* 59 56 bfad_os_starget_to_shost(struct scsi_target *starget);
+19 -16
drivers/scsi/bfa/bfad_drv.h
··· 46 46 #ifdef BFA_DRIVER_VERSION 47 47 #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 48 48 #else 49 - #define BFAD_DRIVER_VERSION "2.0.0.0" 49 + #define BFAD_DRIVER_VERSION "2.1.2.1" 50 50 #endif 51 51 52 52 ··· 62 62 #define BFAD_HAL_START_DONE 0x00000010 63 63 #define BFAD_PORT_ONLINE 0x00000020 64 64 #define BFAD_RPORT_ONLINE 0x00000040 65 - 65 + #define BFAD_FCS_INIT_DONE 0x00000080 66 + #define BFAD_HAL_INIT_FAIL 0x00000100 67 + #define BFAD_FC4_PROBE_DONE 0x00000200 66 68 #define BFAD_PORT_DELETE 0x00000001 67 69 68 70 /* ··· 139 137 u32 binding_method; 140 138 }; 141 139 142 - #define BFAD_AEN_MAX_APPS 8 143 - struct bfad_aen_file_s { 144 - struct list_head qe; 145 - struct bfad_s *bfad; 146 - s32 ri; 147 - s32 app_id; 140 + union bfad_tmp_buf { 141 + /* From struct bfa_adapter_attr_s */ 142 + char manufacturer[BFA_ADAPTER_MFG_NAME_LEN]; 143 + char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; 144 + char model[BFA_ADAPTER_MODEL_NAME_LEN]; 145 + char fw_ver[BFA_VERSION_LEN]; 146 + char optrom_ver[BFA_VERSION_LEN]; 147 + 148 + /* From struct bfa_ioc_pci_attr_s */ 149 + u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */ 148 150 }; 149 151 150 152 /* ··· 174 168 u32 inst_no; /* BFAD instance number */ 175 169 u32 bfad_flags; 176 170 spinlock_t bfad_lock; 171 + struct task_struct *bfad_tsk; 177 172 struct bfad_cfg_param_s cfg_data; 178 173 struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY]; 179 174 int nvec; ··· 190 183 struct bfa_log_mod_s *logmod; 191 184 struct bfa_aen_s *aen; 192 185 struct bfa_aen_s aen_buf; 193 - struct bfad_aen_file_s file_buf[BFAD_AEN_MAX_APPS]; 194 - struct list_head file_q; 195 - struct list_head file_free_q; 186 + void *file_map[BFA_AEN_MAX_APP]; 196 187 struct bfa_plog_s plog_buf; 197 188 int ref_count; 198 189 bfa_boolean_t ipfc_enabled; 190 + union bfad_tmp_buf tmp_buf; 199 191 struct fc_host_statistics link_stats; 200 - 201 - struct kobject *bfa_kobj; 202 - struct kobject *ioc_kobj; 203 - struct kobject *pport_kobj; 204 - struct kobject *lport_kobj; 205 192 }; 206 193 207 194 /* ··· 259 258 struct bfa_port_cfg_s *port_cfg); 260 259 bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role); 261 260 bfa_status_t bfad_drv_init(struct bfad_s *bfad); 261 + bfa_status_t bfad_start_ops(struct bfad_s *bfad); 262 262 void bfad_drv_start(struct bfad_s *bfad); 263 263 void bfad_uncfg_pport(struct bfad_s *bfad); 264 264 void bfad_drv_stop(struct bfad_s *bfad); ··· 281 279 void bfad_drv_log_level_set(struct bfad_s *bfad); 282 280 bfa_status_t bfad_fc4_module_init(void); 283 281 void bfad_fc4_module_exit(void); 282 + int bfad_worker (void *ptr); 284 283 285 284 void bfad_pci_remove(struct pci_dev *pdev); 286 285 int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
+24 -29
drivers/scsi/bfa/bfad_im.c
··· 43 43 struct bfad_s *bfad = drv; 44 44 struct bfad_itnim_data_s *itnim_data; 45 45 struct bfad_itnim_s *itnim; 46 + u8 host_status = DID_OK; 46 47 47 48 switch (io_status) { 48 49 case BFI_IOIM_STS_OK: 49 50 bfa_trc(bfad, scsi_status); 50 - cmnd->result = ScsiResult(DID_OK, scsi_status); 51 51 scsi_set_resid(cmnd, 0); 52 52 53 53 if (sns_len > 0) { ··· 56 56 sns_len = SCSI_SENSE_BUFFERSIZE; 57 57 memcpy(cmnd->sense_buffer, sns_info, sns_len); 58 58 } 59 - if (residue > 0) 59 + if (residue > 0) { 60 + bfa_trc(bfad, residue); 60 61 scsi_set_resid(cmnd, residue); 62 + if (!sns_len && (scsi_status == SAM_STAT_GOOD) && 63 + (scsi_bufflen(cmnd) - residue) < 64 + cmnd->underflow) { 65 + bfa_trc(bfad, 0); 66 + host_status = DID_ERROR; 67 + } 68 + } 69 + cmnd->result = ScsiResult(host_status, scsi_status); 70 + 61 71 break; 62 72 63 73 case BFI_IOIM_STS_ABORTED: ··· 177 167 static char bfa_buf[256]; 178 168 struct bfad_im_port_s *im_port = 179 169 (struct bfad_im_port_s *) shost->hostdata[0]; 180 - struct bfa_ioc_attr_s ioc_attr; 181 170 struct bfad_s *bfad = im_port->bfad; 171 + char model[BFA_ADAPTER_MODEL_NAME_LEN]; 182 172 183 - memset(&ioc_attr, 0, sizeof(ioc_attr)); 184 - bfa_get_attr(&bfad->bfa, &ioc_attr); 173 + bfa_get_adapter_model(&bfad->bfa, model); 185 174 186 175 memset(bfa_buf, 0, sizeof(bfa_buf)); 187 176 snprintf(bfa_buf, sizeof(bfa_buf), 188 - "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s", 189 - ioc_attr.adapter_attr.model, bfad->pci_name, 190 - BFAD_DRIVER_VERSION); 177 + "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s", 178 + model, bfad->pci_name, BFAD_DRIVER_VERSION); 191 179 return bfa_buf; 192 180 } 193 181 ··· 507 499 { 508 500 itnim->state = ITNIM_STATE_TIMEOUT; 509 501 } 510 - 511 - /** 512 - * Path TOV processing begin notification -- dummy for linux 513 - */ 514 - void 515 - bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim) 516 - { 517 - } 518 - 519 - 520 502 521 503 /** 522 504 * Allocate a Scsi_Host for a port. ··· 929 931 struct Scsi_Host *host = im_port->shost; 930 932 struct bfad_s *bfad = im_port->bfad; 931 933 struct bfad_port_s *port = im_port->port; 932 - union attr { 933 - struct bfa_pport_attr_s pattr; 934 - struct bfa_ioc_attr_s ioc_attr; 935 - } attr; 934 + struct bfa_pport_attr_s pattr; 935 + char model[BFA_ADAPTER_MODEL_NAME_LEN]; 936 + char fw_ver[BFA_VERSION_LEN]; 936 937 937 938 fc_host_node_name(host) = 938 939 bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port))); ··· 951 954 /* For fibre channel services type 0x20 */ 952 955 fc_host_supported_fc4s(host)[7] = 1; 953 956 954 - memset(&attr.ioc_attr, 0, sizeof(attr.ioc_attr)); 955 - bfa_get_attr(&bfad->bfa, &attr.ioc_attr); 957 + bfa_get_adapter_model(&bfad->bfa, model); 958 + bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); 956 959 sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s", 957 - attr.ioc_attr.adapter_attr.model, 958 - attr.ioc_attr.adapter_attr.fw_ver, BFAD_DRIVER_VERSION); 960 + model, fw_ver, BFAD_DRIVER_VERSION); 959 961 960 962 fc_host_supported_speeds(host) = 0; 961 963 fc_host_supported_speeds(host) |= 962 964 FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | 963 965 FC_PORTSPEED_1GBIT; 964 966 965 - memset(&attr.pattr, 0, sizeof(attr.pattr)); 966 - bfa_pport_get_attr(&bfad->bfa, &attr.pattr); 967 - fc_host_maxframe_size(host) = attr.pattr.pport_cfg.maxfrsize; 967 + bfa_fcport_get_attr(&bfad->bfa, &pattr); 968 + fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize; 968 969 } 969 970 970 971 static void
-5
drivers/scsi/bfa/bfad_im.h
··· 23 23 24 24 #define FCPI_NAME " fcpim" 25 25 26 - void bfad_flags_set(struct bfad_s *bfad, u32 flags); 27 26 bfa_status_t bfad_im_module_init(void); 28 27 void bfad_im_module_exit(void); 29 28 bfa_status_t bfad_im_probe(struct bfad_s *bfad); ··· 125 126 void bfad_os_destroy_workq(struct bfad_im_s *im); 126 127 void bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv); 127 128 void bfad_os_fc_host_init(struct bfad_im_port_s *im_port); 128 - void bfad_os_init_work(struct bfad_im_port_s *im_port); 129 129 void bfad_os_scsi_host_free(struct bfad_s *bfad, 130 130 struct bfad_im_port_s *im_port); 131 131 void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, ··· 134 136 int bfad_os_scsi_add_host(struct Scsi_Host *shost, 135 137 struct bfad_im_port_s *im_port, struct bfad_s *bfad); 136 138 137 - /* 138 - * scsi_host_template entries 139 - */ 140 139 void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port, 141 140 struct bfad_itnim_s *itnim); 142 141
+8 -3
drivers/scsi/bfa/bfad_intr.c
··· 23 23 /** 24 24 * bfa_isr BFA driver interrupt functions 25 25 */ 26 - static int msix_disable; 27 - module_param(msix_disable, int, S_IRUGO | S_IWUSR); 26 + static int msix_disable_cb; 27 + static int msix_disable_ct; 28 + module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR); 29 + module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR); 28 30 /** 29 31 * Line based interrupt handler. 30 32 */ ··· 143 141 int error = 0; 144 142 u32 mask = 0, i, num_bit = 0, max_bit = 0; 145 143 struct msix_entry msix_entries[MAX_MSIX_ENTRY]; 144 + struct pci_dev *pdev = bfad->pcidev; 146 145 147 146 /* Call BFA to get the msix map for this PCI function. */ 148 147 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); ··· 151 148 /* Set up the msix entry table */ 152 149 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); 153 150 154 - if (!msix_disable) { 151 + if ((pdev->device == BFA_PCI_DEVICE_ID_CT && !msix_disable_ct) || 152 + (pdev->device != BFA_PCI_DEVICE_ID_CT && !msix_disable_cb)) { 153 + 155 154 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); 156 155 if (error) { 157 156 /*
+31 -28
drivers/scsi/bfa/fabric.c
··· 37 37 #define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ 38 38 39 39 #define bfa_fcs_fabric_set_opertype(__fabric) do { \ 40 - if (bfa_pport_get_topology((__fabric)->fcs->bfa) \ 40 + if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \ 41 41 == BFA_PPORT_TOPOLOGY_P2P) \ 42 42 (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \ 43 43 else \ ··· 136 136 case BFA_FCS_FABRIC_SM_CREATE: 137 137 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); 138 138 bfa_fcs_fabric_init(fabric); 139 - bfa_fcs_lport_init(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, 140 - &fabric->bport.port_cfg, NULL); 139 + bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg); 141 140 break; 142 141 143 142 case BFA_FCS_FABRIC_SM_LINK_UP: ··· 160 161 161 162 switch (event) { 162 163 case BFA_FCS_FABRIC_SM_START: 163 - if (bfa_pport_is_linkup(fabric->fcs->bfa)) { 164 + if (bfa_fcport_is_linkup(fabric->fcs->bfa)) { 164 165 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); 165 166 bfa_fcs_fabric_login(fabric); 166 167 } else ··· 224 225 switch (event) { 225 226 case BFA_FCS_FABRIC_SM_CONT_OP: 226 227 227 - bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); 228 + bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); 228 229 fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; 229 230 230 231 if (fabric->auth_reqd && fabric->is_auth) { ··· 251 252 252 253 case BFA_FCS_FABRIC_SM_NO_FABRIC: 253 254 fabric->fab_type = BFA_FCS_FABRIC_N2N; 254 - bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); 255 + bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); 255 256 bfa_fcs_fabric_notify_online(fabric); 256 257 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); 257 258 break; ··· 418 419 419 420 case BFA_FCS_FABRIC_SM_NO_FABRIC: 420 421 bfa_trc(fabric->fcs, fabric->bb_credit); 421 - bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); 422 + bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); 422 423 break; 423 424 424 425 default: ··· 562 563 bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric) 563 564 { 564 565 struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg; 565 - struct bfa_adapter_attr_s adapter_attr; 566 + char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0}; 566 567 struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; 567 568 568 - bfa_os_memset((void *)&adapter_attr, 0, 569 - sizeof(struct bfa_adapter_attr_s)); 570 - bfa_ioc_get_adapter_attr(&fabric->fcs->bfa->ioc, &adapter_attr); 569 + bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); 571 570 572 571 /* 573 572 * Model name/number 574 573 */ 575 - strncpy((char *)&port_cfg->sym_name, adapter_attr.model, 574 + strncpy((char *)&port_cfg->sym_name, model, 576 575 BFA_FCS_PORT_SYMBNAME_MODEL_SZ); 577 576 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, 578 577 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); ··· 716 719 struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg; 717 720 u8 alpa = 0; 718 721 719 - if (bfa_pport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP) 720 - alpa = bfa_pport_get_myalpa(bfa); 722 + if (bfa_fcport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP) 723 + alpa = bfa_fcport_get_myalpa(bfa); 721 724 722 - bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_pport_get_maxfrsize(bfa), 725 + bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa), 723 726 pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); 724 727 725 728 fabric->stats.flogi_sent++; ··· 811 814 */ 812 815 813 816 /** 814 - * Module initialization 817 + * Attach time initialization 815 818 */ 816 819 void 817 - bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) 820 + bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs) 818 821 { 819 822 struct bfa_fcs_fabric_s *fabric; 820 823 ··· 838 841 bfa_wc_up(&fabric->wc); /* For the base port */ 839 842 840 843 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); 841 - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CREATE); 844 + bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL); 845 + } 846 + 847 + void 848 + bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) 849 + { 850 + bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE); 842 851 bfa_trc(fcs, 0); 843 852 } 844 853 ··· 891 888 bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric) 892 889 { 893 890 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback); 891 + } 892 + 893 + bfa_boolean_t 894 + bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric) 895 + { 896 + return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed); 894 897 } 895 898 896 899 enum bfa_pport_type ··· 1174 1165 reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 1175 1166 bfa_os_hton3b(FC_FABRIC_PORT), 1176 1167 n2n_port->reply_oxid, pcfg->pwwn, 1177 - pcfg->nwwn, bfa_pport_get_maxfrsize(bfa), 1178 - bfa_pport_get_rx_bbcredit(bfa)); 1168 + pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa), 1169 + bfa_fcport_get_rx_bbcredit(bfa)); 1179 1170 1180 1171 bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps), 1181 1172 BFA_FALSE, FC_CLASS_3, reqlen, &fchs, ··· 1233 1224 wwn2str(pwwn_ptr, pwwn); 1234 1225 wwn2str(fwwn_ptr, fwwn); 1235 1226 1236 - switch (event) { 1237 - case BFA_PORT_AEN_FABRIC_NAME_CHANGE: 1238 - bfa_log(logmod, BFA_AEN_PORT_FABRIC_NAME_CHANGE, pwwn_ptr, 1239 - fwwn_ptr); 1240 - break; 1241 - default: 1242 - break; 1243 - } 1227 + bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event), 1228 + pwwn_ptr, fwwn_ptr); 1244 1229 1245 1230 aen_data.port.pwwn = pwwn; 1246 1231 aen_data.port.fwwn = fwwn;
+6
drivers/scsi/bfa/fcbuild.h
··· 72 72 case RPSC_OP_SPEED_8G: 73 73 return BFA_PPORT_SPEED_8GBPS; 74 74 75 + case RPSC_OP_SPEED_10G: 76 + return BFA_PPORT_SPEED_10GBPS; 77 + 75 78 default: 76 79 return BFA_PPORT_SPEED_UNKNOWN; 77 80 } ··· 99 96 100 97 case BFA_PPORT_SPEED_8GBPS: 101 98 return RPSC_OP_SPEED_8G; 99 + 100 + case BFA_PPORT_SPEED_10GBPS: 101 + return RPSC_OP_SPEED_10G; 102 102 103 103 default: 104 104 return RPSC_OP_SPEED_NOT_EST;
+10 -41
drivers/scsi/bfa/fcpim.c
··· 126 126 break; 127 127 128 128 default: 129 - bfa_assert(0); 129 + bfa_sm_fault(itnim->fcs, event); 130 130 } 131 131 132 132 } ··· 161 161 break; 162 162 163 163 default: 164 - bfa_assert(0); 164 + bfa_sm_fault(itnim->fcs, event); 165 165 } 166 166 } 167 167 ··· 205 205 break; 206 206 207 207 default: 208 - bfa_assert(0); 208 + bfa_sm_fault(itnim->fcs, event); 209 209 } 210 210 } 211 211 ··· 240 240 break; 241 241 242 242 default: 243 - bfa_assert(0); 243 + bfa_sm_fault(itnim->fcs, event); 244 244 } 245 245 } 246 246 ··· 270 270 break; 271 271 272 272 default: 273 - bfa_assert(0); 273 + bfa_sm_fault(itnim->fcs, event); 274 274 } 275 275 } 276 276 ··· 298 298 break; 299 299 300 300 default: 301 - bfa_assert(0); 301 + bfa_sm_fault(itnim->fcs, event); 302 302 } 303 303 } 304 304 ··· 321 321 break; 322 322 323 323 default: 324 - bfa_assert(0); 324 + bfa_sm_fault(itnim->fcs, event); 325 325 } 326 326 } 327 327 ··· 354 354 break; 355 355 356 356 default: 357 - bfa_assert(0); 357 + bfa_sm_fault(itnim->fcs, event); 358 358 } 359 359 } 360 360 ··· 385 385 wwn2str(lpwwn_ptr, lpwwn); 386 386 wwn2str(rpwwn_ptr, rpwwn); 387 387 388 - switch (event) { 389 - case BFA_ITNIM_AEN_ONLINE: 390 - bfa_log(logmod, BFA_AEN_ITNIM_ONLINE, rpwwn_ptr, lpwwn_ptr); 391 - break; 392 - case BFA_ITNIM_AEN_OFFLINE: 393 - bfa_log(logmod, BFA_AEN_ITNIM_OFFLINE, rpwwn_ptr, lpwwn_ptr); 394 - break; 395 - case BFA_ITNIM_AEN_DISCONNECT: 396 - bfa_log(logmod, BFA_AEN_ITNIM_DISCONNECT, rpwwn_ptr, lpwwn_ptr); 397 - break; 398 - default: 399 - break; 400 - } 388 + bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, event), 389 + rpwwn_ptr, lpwwn_ptr); 401 390 402 391 aen_data.itnim.vf_id = rport->port->fabric->vf_id; 403 392 aen_data.itnim.ppwwn = ··· 678 689 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; 679 690 680 691 bfa_trc(itnim->fcs, itnim->rport->pwwn); 681 - bfa_fcb_itnim_tov_begin(itnim->itnim_drv); 682 692 } 683 693 684 694 /** ··· 810 822 bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim) 811 823 { 812 824 } 813 - 814 - /** 815 - * Module initialization 816 - */ 817 - void 818 - bfa_fcs_fcpim_modinit(struct bfa_fcs_s *fcs) 819 - { 820 - } 821 - 822 - /** 823 - * Module cleanup 824 - */ 825 - void 826 - bfa_fcs_fcpim_modexit(struct bfa_fcs_s *fcs) 827 - { 828 - bfa_fcs_modexit_comp(fcs); 829 - } 830 - 831 -
+2
drivers/scsi/bfa/fcs_fabric.h
··· 29 29 /* 30 30 * fcs friend functions: only between fcs modules 31 31 */ 32 + void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs); 32 33 void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs); 33 34 void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs); 34 35 void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs); ··· 47 46 struct fchs_s *fchs, u16 len); 48 47 u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric); 49 48 bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric); 49 + bfa_boolean_t bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric); 50 50 enum bfa_pport_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric); 51 51 void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric); 52 52 void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
-5
drivers/scsi/bfa/fcs_fcpim.h
··· 34 34 void bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim); 35 35 void bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim); 36 36 37 - /* 38 - * Modudle init/cleanup routines. 39 - */ 40 - void bfa_fcs_fcpim_modinit(struct bfa_fcs_s *fcs); 41 - void bfa_fcs_fcpim_modexit(struct bfa_fcs_s *fcs); 42 37 void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs, 43 38 u16 len); 44 39 #endif /* __FCS_FCPIM_H__ */
+4 -3
drivers/scsi/bfa/fcs_lport.h
··· 84 84 * Following routines will be called by Fabric to indicate port 85 85 * online/offline to vport. 86 86 */ 87 - void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, 88 - u16 vf_id, struct bfa_port_cfg_s *port_cfg, 89 - struct bfa_fcs_vport_s *vport); 87 + void bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, 88 + uint16_t vf_id, struct bfa_fcs_vport_s *vport); 89 + void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, 90 + struct bfa_port_cfg_s *port_cfg); 90 91 void bfa_fcs_port_online(struct bfa_fcs_port_s *port); 91 92 void bfa_fcs_port_offline(struct bfa_fcs_port_s *port); 92 93 void bfa_fcs_port_delete(struct bfa_fcs_port_s *port);
+1 -2
drivers/scsi/bfa/fcs_port.h
··· 26 26 /* 27 27 * fcs friend functions: only between fcs modules 28 28 */ 29 - void bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs); 30 - void bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs); 29 + void bfa_fcs_pport_attach(struct bfa_fcs_s *fcs); 31 30 32 31 #endif /* __FCS_PPORT_H__ */
-3
drivers/scsi/bfa/fcs_rport.h
··· 24 24 25 25 #include <fcs/bfa_fcs_rport.h> 26 26 27 - void bfa_fcs_rport_modinit(struct bfa_fcs_s *fcs); 28 - void bfa_fcs_rport_modexit(struct bfa_fcs_s *fcs); 29 - 30 27 void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, 31 28 u16 len); 32 29 void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
+1 -2
drivers/scsi/bfa/fcs_uf.h
··· 26 26 /* 27 27 * fcs friend functions: only between fcs modules 28 28 */ 29 - void bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs); 30 - void bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs); 29 + void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs); 31 30 32 31 #endif /* __FCS_UF_H__ */
-8
drivers/scsi/bfa/fcs_vport.h
··· 22 22 #include <fcs/bfa_fcs_vport.h> 23 23 #include <defs/bfa_defs_pci.h> 24 24 25 - /* 26 - * Modudle init/cleanup routines. 27 - */ 28 - 29 - void bfa_fcs_vport_modinit(struct bfa_fcs_s *fcs); 30 - void bfa_fcs_vport_modexit(struct bfa_fcs_s *fcs); 31 - 32 25 void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); 33 26 void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport); 34 27 void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); 35 28 void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); 36 - u32 bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs); 37 29 38 30 #endif /* __FCS_VPORT_H__ */ 39 31
+43 -36
drivers/scsi/bfa/fdmi.c
··· 116 116 enum port_fdmi_event event); 117 117 static void bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi, 118 118 enum port_fdmi_event event); 119 + static void bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi, 120 + enum port_fdmi_event event); 121 + 119 122 /** 120 123 * Start in offline state - awaiting MS to send start. 121 124 */ ··· 158 155 break; 159 156 160 157 default: 161 - bfa_assert(0); 158 + bfa_sm_fault(port->fcs, event); 162 159 } 163 160 } 164 161 ··· 183 180 break; 184 181 185 182 default: 186 - bfa_assert(0); 183 + bfa_sm_fault(port->fcs, event); 187 184 } 188 185 } 189 186 ··· 230 227 break; 231 228 232 229 default: 233 - bfa_assert(0); 230 + bfa_sm_fault(port->fcs, event); 234 231 } 235 232 } 236 233 ··· 258 255 break; 259 256 260 257 default: 261 - bfa_assert(0); 258 + bfa_sm_fault(port->fcs, event); 262 259 } 263 260 } 264 261 ··· 286 283 break; 287 284 288 285 default: 289 - bfa_assert(0); 286 + bfa_sm_fault(port->fcs, event); 290 287 } 291 288 } 292 289 ··· 331 328 break; 332 329 333 330 default: 334 - bfa_assert(0); 331 + bfa_sm_fault(port->fcs, event); 335 332 } 336 333 } 337 334 ··· 359 356 break; 360 357 361 358 default: 362 - bfa_assert(0); 359 + bfa_sm_fault(port->fcs, event); 363 360 } 364 361 } 365 362 ··· 387 384 break; 388 385 389 386 default: 390 - bfa_assert(0); 387 + bfa_sm_fault(port->fcs, event); 391 388 } 392 389 } 393 390 ··· 431 428 break; 432 429 433 430 default: 434 - bfa_assert(0); 431 + bfa_sm_fault(port->fcs, event); 435 432 } 436 433 } 437 434 ··· 459 456 break; 460 457 461 458 default: 462 - bfa_assert(0); 459 + bfa_sm_fault(port->fcs, event); 463 460 } 464 461 } 465 462 ··· 478 475 break; 479 476 480 477 default: 481 - bfa_assert(0); 478 + bfa_sm_fault(port->fcs, event); 482 479 } 483 480 } 484 481 482 + /** 483 + * FDMI is disabled state. 484 + */ 485 + static void 486 + bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi, 487 + enum port_fdmi_event event) 488 + { 489 + struct bfa_fcs_port_s *port = fdmi->ms->port; 490 + 491 + bfa_trc(port->fcs, port->port_cfg.pwwn); 492 + bfa_trc(port->fcs, event); 493 + 494 + /* No op State. It can only be enabled at Driver Init. */ 495 + } 485 496 486 497 /** 487 498 * RHBA : Register HBA Attributes. ··· 1114 1097 { 1115 1098 struct bfa_fcs_port_s *port = fdmi->ms->port; 1116 1099 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; 1117 - struct bfa_adapter_attr_s adapter_attr; 1118 1100 1119 1101 bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); 1120 - bfa_os_memset(&adapter_attr, 0, sizeof(struct bfa_adapter_attr_s)); 1121 1102 1122 - bfa_ioc_get_adapter_attr(&port->fcs->bfa->ioc, &adapter_attr); 1123 - 1124 - strncpy(hba_attr->manufacturer, adapter_attr.manufacturer, 1125 - sizeof(adapter_attr.manufacturer)); 1126 - 1127 - strncpy(hba_attr->serial_num, adapter_attr.serial_num, 1128 - sizeof(adapter_attr.serial_num)); 1129 - 1130 - strncpy(hba_attr->model, adapter_attr.model, sizeof(hba_attr->model)); 1131 - 1132 - strncpy(hba_attr->model_desc, adapter_attr.model_descr, 1133 - sizeof(hba_attr->model_desc)); 1134 - 1135 - strncpy(hba_attr->hw_version, adapter_attr.hw_ver, 1136 - sizeof(hba_attr->hw_version)); 1103 + bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc, 1104 + hba_attr->manufacturer); 1105 + bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc, 1106 + hba_attr->serial_num); 1107 + bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model); 1108 + bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model_desc); 1109 + bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc, hba_attr->hw_version); 1110 + bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc, 1111 + hba_attr->option_rom_ver); 1112 + bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc, hba_attr->fw_version); 1137 1113 1138 1114 strncpy(hba_attr->driver_version, (char *)driver_info->version, 1139 1115 sizeof(hba_attr->driver_version)); 1140 - 1141 - strncpy(hba_attr->option_rom_ver, adapter_attr.optrom_ver, 1142 - sizeof(hba_attr->option_rom_ver)); 1143 - 1144 - strncpy(hba_attr->fw_version, adapter_attr.fw_ver, 1145 - sizeof(hba_attr->fw_version)); 1146 1116 1147 1117 strncpy(hba_attr->os_name, driver_info->host_os_name, 1148 1118 sizeof(hba_attr->os_name)); ··· 1162 1158 /* 1163 1159 * get pport attributes from hal 1164 1160 */ 1165 - bfa_pport_get_attr(port->fcs->bfa, &pport_attr); 1161 + bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); 1166 1162 1167 1163 /* 1168 1164 * get FC4 type Bitmask ··· 1205 1201 struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi; 1206 1202 1207 1203 fdmi->ms = ms; 1208 - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); 1204 + if (ms->port->fcs->fdmi_enabled) 1205 + bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); 1206 + else 1207 + bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_disabled); 1209 1208 } 1210 1209 1211 1210 void
+27 -23
drivers/scsi/bfa/include/aen/bfa_aen.h
··· 18 18 #define __BFA_AEN_H__ 19 19 20 20 #include "defs/bfa_defs_aen.h" 21 + #include "defs/bfa_defs_status.h" 22 + #include "cs/bfa_debug.h" 21 23 22 - #define BFA_AEN_MAX_ENTRY 512 24 + #define BFA_AEN_MAX_ENTRY 512 23 25 24 - extern s32 bfa_aen_max_cfg_entry; 26 + extern int bfa_aen_max_cfg_entry; 25 27 struct bfa_aen_s { 26 28 void *bfad; 27 - s32 max_entry; 28 - s32 write_index; 29 - s32 read_index; 30 - u32 bfad_num; 31 - u32 seq_num; 29 + int max_entry; 30 + int write_index; 31 + int read_index; 32 + int bfad_num; 33 + int seq_num; 32 34 void (*aen_cb_notify)(void *bfad); 33 35 void (*gettimeofday)(struct bfa_timeval_s *tv); 34 - struct bfa_trc_mod_s *trcmod; 35 - struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */ 36 + struct bfa_trc_mod_s *trcmod; 37 + int app_ri[BFA_AEN_MAX_APP]; /* For multiclient support */ 38 + struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */ 36 39 }; 37 40 38 41 ··· 48 45 bfa_aen_max_cfg_entry = max_entry; 49 46 } 50 47 51 - static inline s32 48 + static inline int 52 49 bfa_aen_get_max_cfg_entry(void) 53 50 { 54 51 return bfa_aen_max_cfg_entry; 55 52 } 56 53 57 - static inline s32 54 + static inline int 58 55 bfa_aen_get_meminfo(void) 59 56 { 60 57 return sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry(); 61 58 } 62 59 63 - static inline s32 60 + static inline int 64 61 bfa_aen_get_wi(struct bfa_aen_s *aen) 65 62 { 66 63 return aen->write_index; 67 64 } 68 65 69 - static inline s32 66 + static inline int 70 67 bfa_aen_get_ri(struct bfa_aen_s *aen) 71 68 { 72 69 return aen->read_index; 73 70 } 74 71 75 - static inline s32 76 - bfa_aen_fetch_count(struct bfa_aen_s *aen, s32 read_index) 72 + static inline int 73 + bfa_aen_fetch_count(struct bfa_aen_s *aen, enum bfa_aen_app app_id) 77 74 { 78 - return ((aen->write_index + aen->max_entry) - read_index) 75 + bfa_assert((app_id < BFA_AEN_MAX_APP) && (app_id >= bfa_aen_app_bcu)); 76 + return ((aen->write_index + aen->max_entry) - aen->app_ri[app_id]) 79 77 % aen->max_entry; 80 78 } 81 79 82 - s32 bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod, 83 - void *bfad, u32 inst_id, void (*aen_cb_notify)(void *), 80 + int bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod, 81 + void *bfad, int bfad_num, void (*aen_cb_notify)(void *), 84 82 void (*gettimeofday)(struct bfa_timeval_s *)); 85 83 86 - s32 bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category, 84 + void bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category, 87 85 int aen_type, union bfa_aen_data_u *aen_data); 88 86 89 - s32 bfa_aen_fetch(struct bfa_aen_s *aen, struct bfa_aen_entry_s *aen_entry, 90 - s32 entry_space, s32 rii, s32 *ri_arr, 91 - s32 ri_arr_cnt); 87 + bfa_status_t bfa_aen_fetch(struct bfa_aen_s *aen, 88 + struct bfa_aen_entry_s *aen_entry, 89 + int entry_req, enum bfa_aen_app app_id, int *entry_ret); 92 90 93 - s32 bfa_aen_get_inst(struct bfa_aen_s *aen); 91 + int bfa_aen_get_inst(struct bfa_aen_s *aen); 94 92 95 93 #endif /* __BFA_AEN_H__ */
+22
drivers/scsi/bfa/include/bfa.h
··· 106 106 bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats) 107 107 #define bfa_ioc_clear_stats(__bfa) \ 108 108 bfa_ioc_clr_stats(&(__bfa)->ioc) 109 + #define bfa_get_nports(__bfa) \ 110 + bfa_ioc_get_nports(&(__bfa)->ioc) 111 + #define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \ 112 + bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer) 113 + #define bfa_get_adapter_model(__bfa, __model) \ 114 + bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model) 115 + #define bfa_get_adapter_serial_num(__bfa, __serial_num) \ 116 + bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num) 117 + #define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \ 118 + bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver) 119 + #define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \ 120 + bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver) 121 + #define bfa_get_pci_chip_rev(__bfa, __chip_rev) \ 122 + bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev) 123 + #define bfa_get_ioc_state(__bfa) \ 124 + bfa_ioc_get_state(&(__bfa)->ioc) 125 + #define bfa_get_type(__bfa) \ 126 + bfa_ioc_get_type(&(__bfa)->ioc) 127 + #define bfa_get_mac(__bfa) \ 128 + bfa_ioc_get_mac(&(__bfa)->ioc) 109 129 110 130 /* 111 131 * bfa API functions ··· 181 161 void bfa_iocfc_enable(struct bfa_s *bfa); 182 162 void bfa_iocfc_disable(struct bfa_s *bfa); 183 163 void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); 164 + void bfa_chip_reset(struct bfa_s *bfa); 184 165 void bfa_cb_ioc_disable(void *bfad); 185 166 void bfa_timer_tick(struct bfa_s *bfa); 186 167 #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ ··· 192 171 */ 193 172 bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen); 194 173 bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen); 174 + void bfa_debug_fwsave_clear(struct bfa_s *bfa); 195 175 196 176 #include "bfa_priv.h" 197 177
+57 -44
drivers/scsi/bfa/include/bfa_svc.h
··· 26 26 #include <defs/bfa_defs_pport.h> 27 27 #include <defs/bfa_defs_rport.h> 28 28 #include <defs/bfa_defs_qos.h> 29 + #include <defs/bfa_defs_fcport.h> 29 30 #include <cs/bfa_sm.h> 30 31 #include <bfa.h> 31 32 ··· 36 35 struct bfa_rport_info_s { 37 36 u16 max_frmsz; /* max rcv pdu size */ 38 37 u32 pid:24, /* remote port ID */ 39 - lp_tag:8; 38 + lp_tag:8; /* tag */ 40 39 u32 local_pid:24, /* local port ID */ 41 40 cisc:8; /* CIRO supported */ 42 41 u8 fc_class; /* supported FC classes. enum fc_cos */ ··· 55 54 void *rport_drv; /* fcs/driver rport object */ 56 55 u16 fw_handle; /* firmware rport handle */ 57 56 u16 rport_tag; /* BFA rport tag */ 58 - struct bfa_rport_info_s rport_info; /* rport info from *fcs/driver */ 57 + struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */ 59 58 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ 60 59 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */ 61 60 struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */ ··· 102 101 struct bfa_uf_s { 103 102 struct list_head qe; /* queue element */ 104 103 struct bfa_s *bfa; /* bfa instance */ 105 - u16 uf_tag; /* identifying tag f/w messages */ 104 + u16 uf_tag; /* identifying tag fw msgs */ 106 105 u16 vf_id; 107 106 u16 src_rport_handle; 108 107 u16 rsvd; ··· 128 127 u8 reqq; /* lport request queue */ 129 128 u8 alpa; /* ALPA for loop topologies */ 130 129 u32 lp_pid; /* lport port ID */ 131 - bfa_boolean_t fdisc; /* send FDISC instead of FLOGI*/ 130 + bfa_boolean_t fdisc; /* send FDISC instead of FLOGI */ 132 131 bfa_boolean_t auth_en; /* enable authentication */ 133 132 bfa_boolean_t auth_req; /* authentication required */ 134 133 bfa_boolean_t npiv_en; /* NPIV is allowed by peer */ ··· 152 151 bfa_eproto_status_t ext_status; 153 152 }; 154 153 154 + #define BFA_FCPORT(_bfa) (&((_bfa)->modules.port)) 155 + 155 156 /* 156 157 * bfa pport API functions 157 158 */ 158 - bfa_status_t bfa_pport_enable(struct bfa_s *bfa); 159 - bfa_status_t bfa_pport_disable(struct bfa_s *bfa); 160 - bfa_status_t bfa_pport_cfg_speed(struct bfa_s *bfa, 159 + bfa_status_t bfa_fcport_enable(struct bfa_s *bfa); 160 + bfa_status_t bfa_fcport_disable(struct bfa_s *bfa); 161 + bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa, 161 162 enum bfa_pport_speed speed); 162 - enum bfa_pport_speed bfa_pport_get_speed(struct bfa_s *bfa); 163 - bfa_status_t bfa_pport_cfg_topology(struct bfa_s *bfa, 163 + enum bfa_pport_speed bfa_fcport_get_speed(struct bfa_s *bfa); 164 + bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, 164 165 enum bfa_pport_topology topo); 165 - enum bfa_pport_topology bfa_pport_get_topology(struct bfa_s *bfa); 166 - bfa_status_t bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); 167 - bfa_boolean_t bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); 168 - u8 bfa_pport_get_myalpa(struct bfa_s *bfa); 169 - bfa_status_t bfa_pport_clr_hardalpa(struct bfa_s *bfa); 170 - bfa_status_t bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize); 171 - u16 bfa_pport_get_maxfrsize(struct bfa_s *bfa); 172 - u32 bfa_pport_mypid(struct bfa_s *bfa); 173 - u8 bfa_pport_get_rx_bbcredit(struct bfa_s *bfa); 174 - bfa_status_t bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap); 175 - bfa_status_t bfa_pport_trunk_disable(struct bfa_s *bfa); 176 - bfa_boolean_t bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap); 177 - void bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr); 178 - wwn_t bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node); 179 - bfa_status_t bfa_pport_get_stats(struct bfa_s *bfa, 180 - union bfa_pport_stats_u *stats, 181 - bfa_cb_pport_t cbfn, void *cbarg); 182 - bfa_status_t bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, 183 - void *cbarg); 184 - void bfa_pport_event_register(struct bfa_s *bfa, 166 + enum bfa_pport_topology bfa_fcport_get_topology(struct bfa_s *bfa); 167 + bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); 168 + bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); 169 + u8 bfa_fcport_get_myalpa(struct bfa_s *bfa); 170 + bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa); 171 + bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize); 172 + u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa); 173 + u32 bfa_fcport_mypid(struct bfa_s *bfa); 174 + u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa); 175 + bfa_status_t bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap); 176 + bfa_status_t bfa_fcport_trunk_disable(struct bfa_s *bfa); 177 + bfa_boolean_t bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap); 178 + void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr); 179 + wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node); 180 + void bfa_fcport_event_register(struct bfa_s *bfa, 185 181 void (*event_cbfn) (void *cbarg, 186 182 bfa_pport_event_t event), void *event_cbarg); 187 - bfa_boolean_t bfa_pport_is_disabled(struct bfa_s *bfa); 188 - void bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off); 189 - void bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off); 190 - bfa_status_t bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, 183 + bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); 184 + void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off); 185 + void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off); 186 + bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, 191 187 enum bfa_pport_speed speed); 192 - enum bfa_pport_speed bfa_pport_get_ratelim_speed(struct bfa_s *bfa); 188 + enum bfa_pport_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); 193 189 194 - void bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); 195 - void bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status); 196 - void bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, 190 + void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); 191 + void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status); 192 + void bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, 197 193 bfa_boolean_t link_e2e_beacon); 198 194 void bfa_cb_pport_event(void *cbarg, bfa_pport_event_t event); 199 - void bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr); 200 - void bfa_pport_qos_get_vc_attr(struct bfa_s *bfa, 195 + void bfa_fcport_qos_get_attr(struct bfa_s *bfa, 196 + struct bfa_qos_attr_s *qos_attr); 197 + void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa, 201 198 struct bfa_qos_vc_attr_s *qos_vc_attr); 202 - bfa_status_t bfa_pport_get_qos_stats(struct bfa_s *bfa, 203 - union bfa_pport_stats_u *stats, 199 + bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa, 200 + union bfa_fcport_stats_u *stats, 204 201 bfa_cb_pport_t cbfn, void *cbarg); 205 - bfa_status_t bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, 202 + bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, 206 203 void *cbarg); 207 - bfa_boolean_t bfa_pport_is_ratelim(struct bfa_s *bfa); 208 - bfa_boolean_t bfa_pport_is_linkup(struct bfa_s *bfa); 204 + bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, 205 + union bfa_fcport_stats_u *stats, 206 + bfa_cb_pport_t cbfn, void *cbarg); 207 + bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, 208 + void *cbarg); 209 + 210 + bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa); 211 + bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa); 212 + bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, 213 + union bfa_fcport_stats_u *stats, 214 + bfa_cb_pport_t cbfn, void *cbarg); 215 + bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, 216 + void *cbarg); 209 217 210 218 /* 211 219 * bfa rport API functions ··· 303 293 * bfa lport service api 304 294 */ 305 295 296 + u32 bfa_lps_get_max_vport(struct bfa_s *bfa); 306 297 struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa); 307 298 void bfa_lps_delete(struct bfa_lps_s *lps); 308 299 void bfa_lps_discard(struct bfa_lps_s *lps); ··· 326 315 wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps); 327 316 u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps); 328 317 u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps); 318 + mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps); 329 319 void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status); 330 320 void bfa_cb_lps_flogo_comp(void *bfad, void *uarg); 331 321 void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status); 332 322 void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); 323 + void bfa_cb_lps_cvl_event(void *bfad, void *uarg); 333 324 334 325 #endif /* __BFA_SVC_H__ */ 335 326
+1 -1
drivers/scsi/bfa/include/bfa_timer.h
··· 41 41 struct list_head timer_q; 42 42 }; 43 43 44 - #define BFA_TIMER_FREQ 500 /**< specified in millisecs */ 44 + #define BFA_TIMER_FREQ 200 /**< specified in millisecs */ 45 45 46 46 void bfa_timer_beat(struct bfa_timer_mod_s *mod); 47 47 void bfa_timer_init(struct bfa_timer_mod_s *mod);
+2 -2
drivers/scsi/bfa/include/bfi/bfi.h
··· 143 143 BFI_MC_IOC = 1, /* IO Controller (IOC) */ 144 144 BFI_MC_DIAG = 2, /* Diagnostic Msgs */ 145 145 BFI_MC_FLASH = 3, /* Flash message class */ 146 - BFI_MC_CEE = 4, 147 - BFI_MC_FC_PORT = 5, /* FC port */ 146 + BFI_MC_CEE = 4, /* CEE */ 147 + BFI_MC_FCPORT = 5, /* FC port */ 148 148 BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */ 149 149 BFI_MC_LL = 7, /* Link Layer */ 150 150 BFI_MC_UF = 8, /* Unsolicited frame receive */
+15 -1
drivers/scsi/bfa/include/bfi/bfi_cbreg.h
··· 177 177 #define __PSS_LMEM_INIT_EN 0x00000100 178 178 #define __PSS_LPU1_RESET 0x00000002 179 179 #define __PSS_LPU0_RESET 0x00000001 180 - 180 + #define PSS_ERR_STATUS_REG 0x00018810 181 + #define __PSS_LMEM1_CORR_ERR 0x00000800 182 + #define __PSS_LMEM0_CORR_ERR 0x00000400 183 + #define __PSS_LMEM1_UNCORR_ERR 0x00000200 184 + #define __PSS_LMEM0_UNCORR_ERR 0x00000100 185 + #define __PSS_BAL_PERR 0x00000080 186 + #define __PSS_DIP_IF_ERR 0x00000040 187 + #define __PSS_IOH_IF_ERR 0x00000020 188 + #define __PSS_TDS_IF_ERR 0x00000010 189 + #define __PSS_RDS_IF_ERR 0x00000008 190 + #define __PSS_SGM_IF_ERR 0x00000004 191 + #define __PSS_LPU1_RAM_ERR 0x00000002 192 + #define __PSS_LPU0_RAM_ERR 0x00000001 193 + #define ERR_SET_REG 0x00018818 194 + #define __PSS_ERR_STATUS_SET 0x00000fff 181 195 182 196 /* 183 197 * These definitions are either in error/missing in spec. Its auto-generated
+26
drivers/scsi/bfa/include/bfi/bfi_ctreg.h
··· 430 430 #define __PSS_LMEM_INIT_EN 0x00000100 431 431 #define __PSS_LPU1_RESET 0x00000002 432 432 #define __PSS_LPU0_RESET 0x00000001 433 + #define PSS_ERR_STATUS_REG 0x00018810 434 + #define __PSS_LPU1_TCM_READ_ERR 0x00200000 435 + #define __PSS_LPU0_TCM_READ_ERR 0x00100000 436 + #define __PSS_LMEM5_CORR_ERR 0x00080000 437 + #define __PSS_LMEM4_CORR_ERR 0x00040000 438 + #define __PSS_LMEM3_CORR_ERR 0x00020000 439 + #define __PSS_LMEM2_CORR_ERR 0x00010000 440 + #define __PSS_LMEM1_CORR_ERR 0x00008000 441 + #define __PSS_LMEM0_CORR_ERR 0x00004000 442 + #define __PSS_LMEM5_UNCORR_ERR 0x00002000 443 + #define __PSS_LMEM4_UNCORR_ERR 0x00001000 444 + #define __PSS_LMEM3_UNCORR_ERR 0x00000800 445 + #define __PSS_LMEM2_UNCORR_ERR 0x00000400 446 + #define __PSS_LMEM1_UNCORR_ERR 0x00000200 447 + #define __PSS_LMEM0_UNCORR_ERR 0x00000100 448 + #define __PSS_BAL_PERR 0x00000080 449 + #define __PSS_DIP_IF_ERR 0x00000040 450 + #define __PSS_IOH_IF_ERR 0x00000020 451 + #define __PSS_TDS_IF_ERR 0x00000010 452 + #define __PSS_RDS_IF_ERR 0x00000008 453 + #define __PSS_SGM_IF_ERR 0x00000004 454 + #define __PSS_LPU1_RAM_ERR 0x00000002 455 + #define __PSS_LPU0_RAM_ERR 0x00000001 456 + #define ERR_SET_REG 0x00018818 457 + #define __PSS_ERR_STATUS_SET 0x003fffff 433 458 #define HQM_QSET0_RXQ_DRBL_P0 0x00038000 434 459 #define __RXQ0_ADD_VECTORS_P 0x80000000 435 460 #define __RXQ0_STOP_P 0x40000000 ··· 614 589 #define __HFN_INT_MBOX_LPU1 0x00200000U 615 590 #define __HFN_INT_MBOX1_LPU0 0x00400000U 616 591 #define __HFN_INT_MBOX1_LPU1 0x00800000U 592 + #define __HFN_INT_LL_HALT 0x01000000U 617 593 #define __HFN_INT_CPE_MASK 0x000000ffU 618 594 #define __HFN_INT_RME_MASK 0x0000ff00U 619 595
+1 -1
drivers/scsi/bfa/include/bfi/bfi_ioc.h
··· 123 123 BFI_IOC_DISABLING = 5, /* IOC is being disabled */ 124 124 BFI_IOC_DISABLED = 6, /* IOC is disabled */ 125 125 BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */ 126 - BFI_IOC_HBFAIL = 8, /* IOC heart-beat failure */ 126 + BFI_IOC_FAIL = 8, /* IOC heart-beat failure */ 127 127 BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */ 128 128 }; 129 129
+8
drivers/scsi/bfa/include/bfi/bfi_lps.h
··· 30 30 enum bfi_lps_i2h_msgs { 31 31 BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1), 32 32 BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2), 33 + BFI_LPS_H2I_CVL_EVENT = BFA_I2HM(3), 33 34 }; 34 35 35 36 struct bfi_lps_login_req_s { ··· 78 77 u8 rsvd[2]; 79 78 }; 80 79 80 + struct bfi_lps_cvl_event_s { 81 + struct bfi_mhdr_s mh; /* common msg header */ 82 + u8 lp_tag; 83 + u8 rsvd[3]; 84 + }; 85 + 81 86 union bfi_lps_h2i_msg_u { 82 87 struct bfi_mhdr_s *msg; 83 88 struct bfi_lps_login_req_s *login_req; ··· 94 87 struct bfi_msg_s *msg; 95 88 struct bfi_lps_login_rsp_s *login_rsp; 96 89 struct bfi_lps_logout_rsp_s *logout_rsp; 90 + struct bfi_lps_cvl_event_s *cvl_event; 97 91 }; 98 92 99 93 #pragma pack()
+53 -119
drivers/scsi/bfa/include/bfi/bfi_pport.h
··· 22 22 23 23 #pragma pack(1) 24 24 25 - enum bfi_pport_h2i { 26 - BFI_PPORT_H2I_ENABLE_REQ = (1), 27 - BFI_PPORT_H2I_DISABLE_REQ = (2), 28 - BFI_PPORT_H2I_GET_STATS_REQ = (3), 29 - BFI_PPORT_H2I_CLEAR_STATS_REQ = (4), 30 - BFI_PPORT_H2I_SET_SVC_PARAMS_REQ = (5), 31 - BFI_PPORT_H2I_ENABLE_RX_VF_TAG_REQ = (6), 32 - BFI_PPORT_H2I_ENABLE_TX_VF_TAG_REQ = (7), 33 - BFI_PPORT_H2I_GET_QOS_STATS_REQ = (8), 34 - BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ = (9), 25 + enum bfi_fcport_h2i { 26 + BFI_FCPORT_H2I_ENABLE_REQ = (1), 27 + BFI_FCPORT_H2I_DISABLE_REQ = (2), 28 + BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ = (3), 29 + BFI_FCPORT_H2I_STATS_GET_REQ = (4), 30 + BFI_FCPORT_H2I_STATS_CLEAR_REQ = (5), 35 31 }; 36 32 37 - enum bfi_pport_i2h { 38 - BFI_PPORT_I2H_ENABLE_RSP = BFA_I2HM(1), 39 - BFI_PPORT_I2H_DISABLE_RSP = BFA_I2HM(2), 40 - BFI_PPORT_I2H_GET_STATS_RSP = BFA_I2HM(3), 41 - BFI_PPORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), 42 - BFI_PPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(5), 43 - BFI_PPORT_I2H_ENABLE_RX_VF_TAG_RSP = BFA_I2HM(6), 44 - BFI_PPORT_I2H_ENABLE_TX_VF_TAG_RSP = BFA_I2HM(7), 45 - BFI_PPORT_I2H_EVENT = BFA_I2HM(8), 46 - BFI_PPORT_I2H_GET_QOS_STATS_RSP = BFA_I2HM(9), 47 - BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP = BFA_I2HM(10), 33 + enum bfi_fcport_i2h { 34 + BFI_FCPORT_I2H_ENABLE_RSP = BFA_I2HM(1), 35 + BFI_FCPORT_I2H_DISABLE_RSP = BFA_I2HM(2), 36 + BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(3), 37 + BFI_FCPORT_I2H_STATS_GET_RSP = BFA_I2HM(4), 38 + BFI_FCPORT_I2H_STATS_CLEAR_RSP = BFA_I2HM(5), 39 + BFI_FCPORT_I2H_EVENT = BFA_I2HM(6), 48 40 }; 49 41 50 42 /** 51 43 * Generic REQ type 52 44 */ 53 - struct bfi_pport_generic_req_s { 45 + struct bfi_fcport_req_s { 54 46 struct bfi_mhdr_s mh; /* msg header */ 55 - u32 msgtag; /* msgtag for reply */ 47 + u32 msgtag; /* msgtag for reply */ 56 48 }; 57 49 58 50 /** 59 51 * Generic RSP type 60 52 */ 61 - struct bfi_pport_generic_rsp_s { 53 + struct bfi_fcport_rsp_s { 62 54 struct bfi_mhdr_s mh; /* common msg header */ 63 - u8 status; /* port enable status */ 64 - u8 rsvd[3]; 65 - u32 msgtag; /* msgtag for reply */ 55 + u8 status; /* port enable status */ 56 + u8 rsvd[3]; 57 + u32 msgtag; /* msgtag for reply */ 66 58 }; 67 59 68 60 /** 69 - * BFI_PPORT_H2I_ENABLE_REQ 61 + * BFI_FCPORT_H2I_ENABLE_REQ 70 62 */ 71 - struct bfi_pport_enable_req_s { 63 + struct bfi_fcport_enable_req_s { 72 64 struct bfi_mhdr_s mh; /* msg header */ 73 - u32 rsvd1; 74 - wwn_t nwwn; /* node wwn of physical port */ 75 - wwn_t pwwn; /* port wwn of physical port */ 76 - struct bfa_pport_cfg_s port_cfg; /* port configuration */ 77 - union bfi_addr_u stats_dma_addr; /* DMA address for stats */ 78 - u32 msgtag; /* msgtag for reply */ 79 - u32 rsvd2; 65 + u32 rsvd1; 66 + wwn_t nwwn; /* node wwn of physical port */ 67 + wwn_t pwwn; /* port wwn of physical port */ 68 + struct bfa_pport_cfg_s port_cfg; /* port configuration */ 69 + union bfi_addr_u stats_dma_addr; /* DMA address for stats */ 70 + u32 msgtag; /* msgtag for reply */ 71 + u32 rsvd2; 80 72 }; 81 73 82 74 /** 83 - * BFI_PPORT_I2H_ENABLE_RSP 75 + * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ 84 76 */ 85 - #define bfi_pport_enable_rsp_t struct bfi_pport_generic_rsp_s 86 - 87 - /** 88 - * BFI_PPORT_H2I_DISABLE_REQ 89 - */ 90 - #define bfi_pport_disable_req_t struct bfi_pport_generic_req_s 91 - 92 - /** 93 - * BFI_PPORT_I2H_DISABLE_RSP 94 - */ 95 - #define bfi_pport_disable_rsp_t struct bfi_pport_generic_rsp_s 96 - 97 - /** 98 - * BFI_PPORT_H2I_GET_STATS_REQ 99 - */ 100 - #define bfi_pport_get_stats_req_t struct bfi_pport_generic_req_s 101 - 102 - /** 103 - * BFI_PPORT_I2H_GET_STATS_RSP 104 - */ 105 - #define bfi_pport_get_stats_rsp_t struct bfi_pport_generic_rsp_s 106 - 107 - /** 108 - * BFI_PPORT_H2I_CLEAR_STATS_REQ 109 - */ 110 - #define bfi_pport_clear_stats_req_t struct bfi_pport_generic_req_s 111 - 112 - /** 113 - * BFI_PPORT_I2H_CLEAR_STATS_RSP 114 - */ 115 - #define bfi_pport_clear_stats_rsp_t struct bfi_pport_generic_rsp_s 116 - 117 - /** 118 - * BFI_PPORT_H2I_GET_QOS_STATS_REQ 119 - */ 120 - #define bfi_pport_get_qos_stats_req_t struct bfi_pport_generic_req_s 121 - 122 - /** 123 - * BFI_PPORT_H2I_GET_QOS_STATS_RSP 124 - */ 125 - #define bfi_pport_get_qos_stats_rsp_t struct bfi_pport_generic_rsp_s 126 - 127 - /** 128 - * BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ 129 - */ 130 - #define bfi_pport_clear_qos_stats_req_t struct bfi_pport_generic_req_s 131 - 132 - /** 133 - * BFI_PPORT_H2I_CLEAR_QOS_STATS_RSP 134 - */ 135 - #define bfi_pport_clear_qos_stats_rsp_t struct bfi_pport_generic_rsp_s 136 - 137 - /** 138 - * BFI_PPORT_H2I_SET_SVC_PARAMS_REQ 139 - */ 140 - struct bfi_pport_set_svc_params_req_s { 77 + struct bfi_fcport_set_svc_params_req_s { 141 78 struct bfi_mhdr_s mh; /* msg header */ 142 - u16 tx_bbcredit; /* Tx credits */ 143 - u16 rsvd; 79 + u16 tx_bbcredit; /* Tx credits */ 80 + u16 rsvd; 144 81 }; 145 82 146 83 /** 147 - * BFI_PPORT_I2H_SET_SVC_PARAMS_RSP 84 + * BFI_FCPORT_I2H_EVENT 148 85 */ 149 - 150 - /** 151 - * BFI_PPORT_I2H_EVENT 152 - */ 153 - struct bfi_pport_event_s { 86 + struct bfi_fcport_event_s { 154 87 struct bfi_mhdr_s mh; /* common msg header */ 155 88 struct bfa_pport_link_s link_state; 156 89 }; 157 90 158 - union bfi_pport_h2i_msg_u { 91 + /** 92 + * fcport H2I message 93 + */ 94 + union bfi_fcport_h2i_msg_u { 159 95 struct bfi_mhdr_s *mhdr; 160 - struct bfi_pport_enable_req_s *penable; 161 - struct bfi_pport_generic_req_s *pdisable; 162 - struct bfi_pport_generic_req_s *pgetstats; 163 - struct bfi_pport_generic_req_s *pclearstats; 164 - struct bfi_pport_set_svc_params_req_s *psetsvcparams; 165 - struct bfi_pport_get_qos_stats_req_s *pgetqosstats; 166 - struct bfi_pport_generic_req_s *pclearqosstats; 96 + struct bfi_fcport_enable_req_s *penable; 97 + struct bfi_fcport_req_s *pdisable; 98 + struct bfi_fcport_set_svc_params_req_s *psetsvcparams; 99 + struct bfi_fcport_req_s *pstatsget; 100 + struct bfi_fcport_req_s *pstatsclear; 167 101 }; 168 102 169 - union bfi_pport_i2h_msg_u { 103 + /** 104 + * fcport I2H message 105 + */ 106 + union bfi_fcport_i2h_msg_u { 170 107 struct bfi_msg_s *msg; 171 - struct bfi_pport_generic_rsp_s *enable_rsp; 172 - struct bfi_pport_disable_rsp_s *disable_rsp; 173 - struct bfi_pport_generic_rsp_s *getstats_rsp; 174 - struct bfi_pport_clear_stats_rsp_s *clearstats_rsp; 175 - struct bfi_pport_set_svc_params_rsp_s *setsvcparasm_rsp; 176 - struct bfi_pport_get_qos_stats_rsp_s *getqosstats_rsp; 177 - struct bfi_pport_clear_qos_stats_rsp_s *clearqosstats_rsp; 178 - struct bfi_pport_event_s *event; 108 + struct bfi_fcport_rsp_s *penable_rsp; 109 + struct bfi_fcport_rsp_s *pdisable_rsp; 110 + struct bfi_fcport_rsp_s *psetsvcparams_rsp; 111 + struct bfi_fcport_rsp_s *pstatsget_rsp; 112 + struct bfi_fcport_rsp_s *pstatsclear_rsp; 113 + struct bfi_fcport_event_s *event; 179 114 }; 180 115 181 116 #pragma pack() 182 117 183 118 #endif /* __BFI_PPORT_H__ */ 184 -
+4
drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
··· 31 31 enum { 32 32 BFA_TRC_CNA_CEE = 1, 33 33 BFA_TRC_CNA_PORT = 2, 34 + BFA_TRC_CNA_IOC = 3, 35 + BFA_TRC_CNA_DIAG = 4, 36 + BFA_TRC_CNA_IOC_CB = 5, 37 + BFA_TRC_CNA_IOC_CT = 6, 34 38 }; 35 39 36 40 #endif /* __BFA_CNA_TRCMOD_H__ */
+1 -1
drivers/scsi/bfa/include/cs/bfa_log.h
··· 157 157 158 158 159 159 struct bfa_log_mod_s { 160 - char instance_info[16]; /* instance info */ 160 + char instance_info[BFA_STRING_32]; /* instance info */ 161 161 int log_level[BFA_LOG_MODULE_ID_MAX + 1]; 162 162 /* log level for modules */ 163 163 bfa_log_cb_t cbfn; /* callback function */
+7 -2
drivers/scsi/bfa/include/cs/bfa_plog.h
··· 80 80 BFA_PL_MID_HAL_FCXP = 4, 81 81 BFA_PL_MID_HAL_UF = 5, 82 82 BFA_PL_MID_FCS = 6, 83 - BFA_PL_MID_MAX = 7 83 + BFA_PL_MID_LPS = 7, 84 + BFA_PL_MID_MAX = 8 84 85 }; 85 86 86 87 #define BFA_PL_MID_STRLEN 8 ··· 119 118 BFA_PL_EID_RSCN = 17, 120 119 BFA_PL_EID_DEBUG = 18, 121 120 BFA_PL_EID_MISC = 19, 122 - BFA_PL_EID_MAX = 20 121 + BFA_PL_EID_FIP_FCF_DISC = 20, 122 + BFA_PL_EID_FIP_FCF_CVL = 21, 123 + BFA_PL_EID_LOGIN = 22, 124 + BFA_PL_EID_LOGO = 23, 125 + BFA_PL_EID_MAX = 24 123 126 }; 124 127 125 128 #define BFA_PL_ENAME_STRLEN 8
+8
drivers/scsi/bfa/include/cs/bfa_sm.h
··· 23 23 #define __BFA_SM_H__ 24 24 25 25 typedef void (*bfa_sm_t)(void *sm, int event); 26 + /** 27 + * oc - object class eg. bfa_ioc 28 + * st - state, eg. reset 29 + * otype - object type, eg. struct bfa_ioc_s 30 + * etype - object type, eg. enum ioc_event 31 + */ 32 + #define bfa_sm_state_decl(oc, st, otype, etype) \ 33 + static void oc ## _sm_ ## st(otype * fsm, etype event) 26 34 27 35 #define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state)) 28 36 #define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
+10
drivers/scsi/bfa/include/defs/bfa_defs_aen.h
··· 30 30 #include <defs/bfa_defs_audit.h> 31 31 #include <defs/bfa_defs_ethport.h> 32 32 33 + #define BFA_AEN_MAX_APP 5 34 + 35 + enum bfa_aen_app { 36 + bfa_aen_app_bcu = 0, /* No thread for bcu */ 37 + bfa_aen_app_hcm = 1, 38 + bfa_aen_app_cim = 2, 39 + bfa_aen_app_snia = 3, 40 + bfa_aen_app_test = 4, /* To be removed after unit test */ 41 + }; 42 + 33 43 enum bfa_aen_category { 34 44 BFA_AEN_CAT_ADAPTER = 1, 35 45 BFA_AEN_CAT_PORT = 2,
+22
drivers/scsi/bfa/include/defs/bfa_defs_auth.h
··· 23 23 #define PRIVATE_KEY 19009 24 24 #define KEY_LEN 32399 25 25 #define BFA_AUTH_SECRET_STRING_LEN 256 26 + #define BFA_AUTH_FAIL_NO_PASSWORD 0xFE 26 27 #define BFA_AUTH_FAIL_TIMEOUT 0xFF 27 28 28 29 /** ··· 40 39 BFA_AUTH_SUCCESS = 7, /* state - success */ 41 40 BFA_AUTH_FAILED = 8, /* state - failed */ 42 41 BFA_AUTH_STATUS_UNKNOWN = 9, /* authentication status unknown */ 42 + }; 43 + 44 + enum bfa_auth_rej_code { 45 + BFA_AUTH_RJT_CODE_AUTH_FAILURE = 1, /* auth failure */ 46 + BFA_AUTH_RJT_CODE_LOGICAL_ERR = 2, /* logical error */ 47 + }; 48 + 49 + /** 50 + * Authentication reject codes 51 + */ 52 + enum bfa_auth_rej_code_exp { 53 + BFA_AUTH_MECH_NOT_USABLE = 1, /* auth. mechanism not usable */ 54 + BFA_AUTH_DH_GROUP_NOT_USABLE = 2, /* DH Group not usable */ 55 + BFA_AUTH_HASH_FUNC_NOT_USABLE = 3, /* hash Function not usable */ 56 + BFA_AUTH_AUTH_XACT_STARTED = 4, /* auth xact started */ 57 + BFA_AUTH_AUTH_FAILED = 5, /* auth failed */ 58 + BFA_AUTH_INCORRECT_PLD = 6, /* incorrect payload */ 59 + BFA_AUTH_INCORRECT_PROTO_MSG = 7, /* incorrect proto msg */ 60 + BFA_AUTH_RESTART_AUTH_PROTO = 8, /* restart auth protocol */ 61 + BFA_AUTH_AUTH_CONCAT_NOT_SUPP = 9, /* auth concat not supported */ 62 + BFA_AUTH_PROTO_VER_NOT_SUPP = 10,/* proto version not supported */ 43 63 }; 44 64 45 65 struct auth_proto_stats_s {
+6 -8
drivers/scsi/bfa/include/defs/bfa_defs_cee.h
··· 28 28 29 29 #define BFA_CEE_LLDP_MAX_STRING_LEN (128) 30 30 31 - 32 - /* FIXME: this is coming from the protocol spec. Can the host & apps share the 33 - protocol .h files ? 34 - */ 35 31 #define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001 36 32 #define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002 37 33 #define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004 ··· 90 94 /* CEE status */ 91 95 /* Making this to tri-state for the benefit of port list command */ 92 96 enum bfa_cee_status_e { 93 - CEE_PHY_DOWN = 0, 94 - CEE_PHY_UP = 1, 95 - CEE_UP = 2, 97 + CEE_UP = 0, 98 + CEE_PHY_UP = 1, 99 + CEE_LOOPBACK = 2, 100 + CEE_PHY_DOWN = 3, 96 101 }; 97 102 98 103 /* CEE Query */ ··· 104 107 struct bfa_cee_dcbx_cfg_s dcbx_remote; 105 108 mac_t src_mac; 106 109 u8 link_speed; 107 - u8 filler[3]; 110 + u8 nw_priority; 111 + u8 filler[2]; 108 112 }; 109 113 110 114
+2 -1
drivers/scsi/bfa/include/defs/bfa_defs_driver.h
··· 21 21 /** 22 22 * Driver statistics 23 23 */ 24 + struct bfa_driver_stats_s { 24 25 u16 tm_io_abort; 25 26 u16 tm_io_abort_comp; 26 27 u16 tm_lun_reset; ··· 35 34 u64 output_req; 36 35 u64 input_words; 37 36 u64 output_words; 38 - } bfa_driver_stats_t; 37 + }; 39 38 40 39 41 40 #endif /* __BFA_DEFS_DRIVER_H__ */
+1
drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
··· 19 19 #define __BFA_DEFS_ETHPORT_H__ 20 20 21 21 #include <defs/bfa_defs_status.h> 22 + #include <defs/bfa_defs_port.h> 22 23 #include <protocol/types.h> 23 24 #include <cna/pstats/phyport_defs.h> 24 25 #include <cna/pstats/ethport_defs.h>
+94
drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
··· 1 + /* 2 + * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 3 + * All rights reserved 4 + * www.brocade.com 5 + * 6 + * bfa_defs_fcport.h 7 + * 8 + * Linux driver for Brocade Fibre Channel Host Bus Adapter. 9 + * 10 + * This program is free software; you can redistribute it and/or modify it 11 + * under the terms of the GNU General Public License (GPL) Version 2 as 12 + * published by the Free Software Foundation 13 + * 14 + * This program is distributed in the hope that it will be useful, but 15 + * WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 + * General Public License for more details. 18 + */ 19 + #ifndef __BFA_DEFS_FCPORT_H__ 20 + #define __BFA_DEFS_FCPORT_H__ 21 + 22 + #include <defs/bfa_defs_types.h> 23 + #include <protocol/types.h> 24 + 25 + #pragma pack(1) 26 + 27 + /** 28 + * FCoE statistics 29 + */ 30 + struct bfa_fcoe_stats_s { 31 + u64 secs_reset; /* Seconds since stats reset */ 32 + u64 cee_linkups; /* CEE link up */ 33 + u64 cee_linkdns; /* CEE link down */ 34 + u64 fip_linkups; /* FIP link up */ 35 + u64 fip_linkdns; /* FIP link down */ 36 + u64 fip_fails; /* FIP failures */ 37 + u64 mac_invalids; /* Invalid mac assignments */ 38 + u64 vlan_req; /* Vlan requests */ 39 + u64 vlan_notify; /* Vlan notifications */ 40 + u64 vlan_err; /* Vlan notification errors */ 41 + u64 vlan_timeouts; /* Vlan request timeouts */ 42 + u64 vlan_invalids; /* Vlan invalids */ 43 + u64 disc_req; /* Discovery requests */ 44 + u64 disc_rsp; /* Discovery responses */ 45 + u64 disc_err; /* Discovery error frames */ 46 + u64 disc_unsol; /* Discovery unsolicited */ 47 + u64 disc_timeouts; /* Discovery timeouts */ 48 + u64 disc_fcf_unavail; /* Discovery FCF not avail */ 49 + u64 linksvc_unsupp; /* FIP link service req unsupp. */ 50 + u64 linksvc_err; /* FIP link service req errors */ 51 + u64 logo_req; /* FIP logo */ 52 + u64 clrvlink_req; /* Clear virtual link requests */ 53 + u64 op_unsupp; /* FIP operation unsupp. */ 54 + u64 untagged; /* FIP untagged frames */ 55 + u64 txf_ucast; /* Tx FCoE unicast frames */ 56 + u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */ 57 + u64 txf_ucast_octets; /* Tx FCoE unicast octets */ 58 + u64 txf_mcast; /* Tx FCoE mutlicast frames */ 59 + u64 txf_mcast_vlan; /* Tx FCoE mutlicast vlan frames */ 60 + u64 txf_mcast_octets; /* Tx FCoE multicast octets */ 61 + u64 txf_bcast; /* Tx FCoE broadcast frames */ 62 + u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */ 63 + u64 txf_bcast_octets; /* Tx FCoE broadcast octets */ 64 + u64 txf_timeout; /* Tx timeouts */ 65 + u64 txf_parity_errors; /* Transmit parity err */ 66 + u64 txf_fid_parity_errors; /* Transmit FID parity err */ 67 + u64 tx_pause; /* Tx pause frames */ 68 + u64 tx_zero_pause; /* Tx zero pause frames */ 69 + u64 tx_first_pause; /* Tx first pause frames */ 70 + u64 rx_pause; /* Rx pause frames */ 71 + u64 rx_zero_pause; /* Rx zero pause frames */ 72 + u64 rx_first_pause; /* Rx first pause frames */ 73 + u64 rxf_ucast_octets; /* Rx unicast octets */ 74 + u64 rxf_ucast; /* Rx unicast frames */ 75 + u64 rxf_ucast_vlan; /* Rx unicast vlan frames */ 76 + u64 rxf_mcast_octets; /* Rx multicast octets */ 77 + u64 rxf_mcast; /* Rx multicast frames */ 78 + u64 rxf_mcast_vlan; /* Rx multicast vlan frames */ 79 + u64 rxf_bcast_octets; /* Rx broadcast octests */ 80 + u64 rxf_bcast; /* Rx broadcast frames */ 81 + u64 rxf_bcast_vlan; /* Rx broadcast vlan frames */ 82 + }; 83 + 84 + /** 85 + * QoS or FCoE stats (fcport stats excluding physical FC port stats) 86 + */ 87 + union bfa_fcport_stats_u { 88 + struct bfa_qos_stats_s fcqos; 89 + struct bfa_fcoe_stats_s fcoe; 90 + }; 91 + 92 + #pragma pack() 93 + 94 + #endif /* __BFA_DEFS_FCPORT_H__ */
-32
drivers/scsi/bfa/include/defs/bfa_defs_im_common.h
··· 1 - /* 2 - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 3 - * All rights reserved 4 - * www.brocade.com 5 - * 6 - * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 - * 8 - * This program is free software; you can redistribute it and/or modify it 9 - * under the terms of the GNU General Public License (GPL) Version 2 as 10 - * published by the Free Software Foundation 11 - * 12 - * This program is distributed in the hope that it will be useful, but 13 - * WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 - * General Public License for more details. 16 - */ 17 - 18 - #ifndef __BFA_DEFS_IM_COMMON_H__ 19 - #define __BFA_DEFS_IM_COMMON_H__ 20 - 21 - #define BFA_ADAPTER_NAME_LEN 256 22 - #define BFA_ADAPTER_GUID_LEN 256 23 - #define RESERVED_VLAN_NAME L"PORT VLAN" 24 - #define PASSTHRU_VLAN_NAME L"PASSTHRU VLAN" 25 - 26 - u64 tx_pkt_cnt; 27 - u64 rx_pkt_cnt; 28 - u32 duration; 29 - u8 status; 30 - } bfa_im_stats_t, *pbfa_im_stats_t; 31 - 32 - #endif /* __BFA_DEFS_IM_COMMON_H__ */
-72
drivers/scsi/bfa/include/defs/bfa_defs_im_team.h
··· 1 - /* 2 - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 3 - * All rights reserved 4 - * www.brocade.com 5 - * 6 - * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 - * 8 - * This program is free software; you can redistribute it and/or modify it 9 - * under the terms of the GNU General Public License (GPL) Version 2 as 10 - * published by the Free Software Foundation 11 - * 12 - * This program is distributed in the hope that it will be useful, but 13 - * WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 - * General Public License for more details. 16 - */ 17 - 18 - #ifndef __BFA_DEFS_IM_TEAM_H__ 19 - #define __BFA_DEFS_IM_TEAM_H__ 20 - 21 - #include <protocol/types.h> 22 - 23 - #define BFA_TEAM_MAX_PORTS 8 24 - #define BFA_TEAM_NAME_LEN 256 25 - #define BFA_MAX_NUM_TEAMS 16 26 - #define BFA_TEAM_INVALID_DELAY -1 27 - 28 - BFA_LACP_RATE_SLOW = 1, 29 - BFA_LACP_RATE_FAST 30 - } bfa_im_lacp_rate_t; 31 - 32 - BFA_TEAM_MODE_FAIL_OVER = 1, 33 - BFA_TEAM_MODE_FAIL_BACK, 34 - BFA_TEAM_MODE_LACP, 35 - BFA_TEAM_MODE_NONE 36 - } bfa_im_team_mode_t; 37 - 38 - BFA_XMIT_POLICY_L2 = 1, 39 - BFA_XMIT_POLICY_L3_L4 40 - } bfa_im_xmit_policy_t; 41 - 42 - bfa_im_team_mode_t team_mode; 43 - bfa_im_lacp_rate_t lacp_rate; 44 - bfa_im_xmit_policy_t xmit_policy; 45 - int delay; 46 - wchar_t primary[BFA_ADAPTER_NAME_LEN]; 47 - wchar_t preferred_primary[BFA_ADAPTER_NAME_LEN]; 48 - mac_t mac; 49 - u16 num_ports; 50 - u16 num_vlans; 51 - u16 vlan_list[BFA_MAX_VLANS_PER_PORT]; 52 - wchar_t team_guid_list[BFA_TEAM_MAX_PORTS][BFA_ADAPTER_GUID_LEN]; 53 - wchar_t ioc_name_list[BFA_TEAM_MAX_PORTS][BFA_ADAPTER_NAME_LEN]; 54 - } bfa_im_team_attr_t; 55 - 56 - wchar_t team_name[BFA_TEAM_NAME_LEN]; 57 - bfa_im_xmit_policy_t xmit_policy; 58 - int delay; 59 - wchar_t primary[BFA_ADAPTER_NAME_LEN]; 60 - wchar_t preferred_primary[BFA_ADAPTER_NAME_LEN]; 61 - } bfa_im_team_edit_t, *pbfa_im_team_edit_t; 62 - 63 - wchar_t team_name[BFA_TEAM_NAME_LEN]; 64 - bfa_im_team_mode_t team_mode; 65 - mac_t mac; 66 - } bfa_im_team_info_t; 67 - 68 - bfa_im_team_info_t team_info[BFA_MAX_NUM_TEAMS]; 69 - u16 num_teams; 70 - } bfa_im_team_list_t, *pbfa_im_team_list_t; 71 - 72 - #endif /* __BFA_DEFS_IM_TEAM_H__ */
+2 -1
drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
··· 126 126 struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */ 127 127 struct bfa_ioc_pci_attr_s pci_attr; 128 128 u8 port_id; /* port number */ 129 + u8 rsvd[7]; /*!< 64bit align */ 129 130 }; 130 131 131 132 /** ··· 144 143 * BFA IOC level event data, now just a place holder 145 144 */ 146 145 struct bfa_ioc_aen_data_s { 147 - enum bfa_ioc_type_e ioc_type; 148 146 wwn_t pwwn; 147 + s16 ioc_type; 149 148 mac_t mac; 150 149 }; 151 150
+10 -2
drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
··· 26 26 27 27 #define BFA_IOCFC_INTR_DELAY 1125 28 28 #define BFA_IOCFC_INTR_LATENCY 225 29 + #define BFA_IOCFCOE_INTR_DELAY 25 30 + #define BFA_IOCFCOE_INTR_LATENCY 5 29 31 30 32 /** 31 33 * Interrupt coalescing configuration. ··· 52 50 u16 num_fcxp_reqs; /* unassisted FC exchanges */ 53 51 u16 num_uf_bufs; /* unsolicited recv buffers */ 54 52 u8 num_cqs; 55 - u8 rsvd; 53 + u8 rsvd[5]; 56 54 }; 57 55 58 56 struct bfa_iocfc_drvcfg_s { ··· 226 224 227 225 228 226 struct bfa_fw_fip_stats_s { 227 + u32 vlan_req; /* vlan discovery requests */ 228 + u32 vlan_notify; /* vlan notifications */ 229 + u32 vlan_err; /* vlan response error */ 230 + u32 vlan_timeouts; /* vlan disvoery timeouts */ 231 + u32 vlan_invalids; /* invalid vlan in discovery advert. */ 229 232 u32 disc_req; /* Discovery solicit requests */ 230 233 u32 disc_rsp; /* Discovery solicit response */ 231 234 u32 disc_err; /* Discovery advt. parse errors */ 232 235 u32 disc_unsol; /* Discovery unsolicited */ 233 236 u32 disc_timeouts; /* Discovery timeouts */ 237 + u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */ 234 238 u32 linksvc_unsupp; /* Unsupported link service req */ 235 239 u32 linksvc_err; /* Parse error in link service req */ 236 240 u32 logo_req; /* Number of FIP logos received */ 237 241 u32 clrvlink_req; /* Clear virtual link req */ 238 242 u32 op_unsupp; /* Unsupported FIP operation */ 239 243 u32 untagged; /* Untagged frames (ignored) */ 240 - u32 rsvd; 244 + u32 invalid_version; /*!< Invalid FIP version */ 241 245 }; 242 246 243 247
+2 -2
drivers/scsi/bfa/include/defs/bfa_defs_lport.h
··· 59 59 */ 60 60 struct bfa_lport_aen_data_s { 61 61 u16 vf_id; /* vf_id of this logical port */ 62 - u16 rsvd; 63 - enum bfa_port_role roles; /* Logical port mode,IM/TM/IP etc */ 62 + s16 roles; /* Logical port mode,IM/TM/IP etc */ 63 + u32 rsvd; 64 64 wwn_t ppwwn; /* WWN of its physical port */ 65 65 wwn_t lpwwn; /* WWN of this logical port */ 66 66 };
+98 -13
drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
··· 22 22 /** 23 23 * Manufacturing block version 24 24 */ 25 - #define BFA_MFG_VERSION 1 25 + #define BFA_MFG_VERSION 2 26 + 27 + /** 28 + * Manufacturing block encrypted version 29 + */ 30 + #define BFA_MFG_ENC_VER 2 31 + 32 + /** 33 + * Manufacturing block version 1 length 34 + */ 35 + #define BFA_MFG_VER1_LEN 128 36 + 37 + /** 38 + * Manufacturing block header length 39 + */ 40 + #define BFA_MFG_HDR_LEN 4 41 + 42 + /** 43 + * Checksum size 44 + */ 45 + #define BFA_MFG_CHKSUM_SIZE 16 46 + 47 + /** 48 + * Manufacturing block encrypted version 49 + */ 50 + #define BFA_MFG_ENC_VER 2 51 + 52 + /** 53 + * Manufacturing block version 1 length 54 + */ 55 + #define BFA_MFG_VER1_LEN 128 56 + 57 + /** 58 + * Manufacturing block header length 59 + */ 60 + #define BFA_MFG_HDR_LEN 4 61 + 62 + /** 63 + * Checksum size 64 + */ 65 + #define BFA_MFG_CHKSUM_SIZE 16 26 66 27 67 /** 28 68 * Manufacturing block format ··· 70 30 #define BFA_MFG_SERIALNUM_SIZE 11 71 31 #define BFA_MFG_PARTNUM_SIZE 14 72 32 #define BFA_MFG_SUPPLIER_ID_SIZE 10 73 - #define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20 74 - #define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20 75 - #define BFA_MFG_SUPPLIER_REVISION_SIZE 4 33 + #define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20 34 + #define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20 35 + #define BFA_MFG_SUPPLIER_REVISION_SIZE 4 76 36 #define STRSZ(_n) (((_n) + 4) & ~3) 37 + 38 + /** 39 + * Manufacturing card type 40 + */ 41 + enum { 42 + BFA_MFG_TYPE_CB_MAX = 825, /* Crossbow card type max */ 43 + BFA_MFG_TYPE_FC8P2 = 825, /* 8G 2port FC card */ 44 + BFA_MFG_TYPE_FC8P1 = 815, /* 8G 1port FC card */ 45 + BFA_MFG_TYPE_FC4P2 = 425, /* 4G 2port FC card */ 46 + BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */ 47 + BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */ 48 + BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */ 49 + }; 50 + 51 + #pragma pack(1) 52 + 53 + /** 54 + * Card type to port number conversion 55 + */ 56 + #define bfa_mfg_type2port_num(card_type) (((card_type) / 10) % 10) 57 + 58 + 59 + /** 60 + * All numerical fields are in big-endian format. 61 + */ 62 + struct bfa_mfg_block_s { 63 + }; 77 64 78 65 /** 79 66 * VPD data length 80 67 */ 81 - #define BFA_MFG_VPD_LEN 256 68 + #define BFA_MFG_VPD_LEN 512 69 + 70 + #define BFA_MFG_VPD_PCI_HDR_OFF 137 71 + #define BFA_MFG_VPD_PCI_VER_MASK 0x07 /* version mask 3 bits */ 72 + #define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /* vendor mask 5 bits */ 73 + 74 + /** 75 + * VPD vendor tag 76 + */ 77 + enum { 78 + BFA_MFG_VPD_UNKNOWN = 0, /* vendor unknown */ 79 + BFA_MFG_VPD_IBM = 1, /* vendor IBM */ 80 + BFA_MFG_VPD_HP = 2, /* vendor HP */ 81 + BFA_MFG_VPD_DELL = 3, /* vendor DELL */ 82 + BFA_MFG_VPD_PCI_IBM = 0x08, /* PCI VPD IBM */ 83 + BFA_MFG_VPD_PCI_HP = 0x10, /* PCI VPD HP */ 84 + BFA_MFG_VPD_PCI_DELL = 0x20, /* PCI VPD DELL */ 85 + BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */ 86 + }; 82 87 83 88 /** 84 89 * All numerical fields are in big-endian format. 85 90 */ 86 91 struct bfa_mfg_vpd_s { 87 - u8 version; /* vpd data version */ 88 - u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */ 89 - u8 chksum; /* u8 checksum */ 90 - u8 vendor; /* vendor */ 91 - u8 len; /* vpd data length excluding header */ 92 - u8 rsv; 93 - u8 data[BFA_MFG_VPD_LEN]; /* vpd data */ 92 + u8 version; /* vpd data version */ 93 + u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */ 94 + u8 chksum; /* u8 checksum */ 95 + u8 vendor; /* vendor */ 96 + u8 len; /* vpd data length excluding header */ 97 + u8 rsv; 98 + u8 data[BFA_MFG_VPD_LEN]; /* vpd data */ 94 99 }; 95 100 96 - #pragma pack(1) 101 + #pragma pack() 97 102 98 103 #endif /* __BFA_DEFS_MFG_H__ */
+11 -8
drivers/scsi/bfa/include/defs/bfa_defs_port.h
··· 185 185 wwn_t fabric_name; /* attached switch's nwwn */ 186 186 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached 187 187 * fabric's ip addr */ 188 + struct mac_s fpma_mac; /* Lport's FPMA Mac address */ 189 + u16 authfail; /* auth failed state */ 188 190 }; 189 191 190 192 /** ··· 234 232 }; 235 233 236 234 struct bfa_port_aen_data_s { 237 - enum bfa_ioc_type_e ioc_type; 238 - wwn_t pwwn; /* WWN of the physical port */ 239 - wwn_t fwwn; /* WWN of the fabric port */ 240 - mac_t mac; /* MAC addres of the ethernet port, 241 - * applicable to CNA port only */ 242 - int phy_port_num; /*! For SFP related events */ 243 - enum bfa_port_aen_sfp_pom level; /* Only transitions will 244 - * be informed */ 235 + wwn_t pwwn; /* WWN of the physical port */ 236 + wwn_t fwwn; /* WWN of the fabric port */ 237 + s32 phy_port_num; /*! For SFP related events */ 238 + s16 ioc_type; 239 + s16 level; /* Only transitions will 240 + * be informed */ 241 + struct mac_s mac; /* MAC address of the ethernet port, 242 + * applicable to CNA port only */ 243 + s16 rsvd; 245 244 }; 246 245 247 246 #endif /* __BFA_DEFS_PORT_H__ */
+87 -64
drivers/scsi/bfa/include/defs/bfa_defs_pport.h
··· 232 232 u32 pid; /* port ID */ 233 233 enum bfa_pport_type port_type; /* current topology */ 234 234 u32 loopback; /* external loopback */ 235 - u32 rsvd1; 235 + u32 authfail; /* auth fail state */ 236 236 u32 rsvd2; /* padding for 64 bit */ 237 237 }; 238 238 ··· 240 240 * FC Port statistics. 241 241 */ 242 242 struct bfa_pport_fc_stats_s { 243 - u64 secs_reset; /* seconds since stats is reset */ 244 - u64 tx_frames; /* transmitted frames */ 245 - u64 tx_words; /* transmitted words */ 246 - u64 rx_frames; /* received frames */ 247 - u64 rx_words; /* received words */ 248 - u64 lip_count; /* LIPs seen */ 249 - u64 nos_count; /* NOS count */ 250 - u64 error_frames; /* errored frames (sent?) */ 251 - u64 dropped_frames; /* dropped frames */ 252 - u64 link_failures; /* link failure count */ 253 - u64 loss_of_syncs; /* loss of sync count */ 254 - u64 loss_of_signals;/* loss of signal count */ 255 - u64 primseq_errs; /* primitive sequence protocol */ 256 - u64 bad_os_count; /* invalid ordered set */ 257 - u64 err_enc_out; /* Encoding error outside frame */ 258 - u64 invalid_crcs; /* frames received with invalid CRC*/ 259 - u64 undersized_frm; /* undersized frames */ 260 - u64 oversized_frm; /* oversized frames */ 261 - u64 bad_eof_frm; /* frames with bad EOF */ 262 - struct bfa_qos_stats_s qos_stats; /* QoS statistics */ 243 + u64 secs_reset; /* Seconds since stats is reset */ 244 + u64 tx_frames; /* Tx frames */ 245 + u64 tx_words; /* Tx words */ 246 + u64 tx_lip; /* TX LIP */ 247 + u64 tx_nos; /* Tx NOS */ 248 + u64 tx_ols; /* Tx OLS */ 249 + u64 tx_lr; /* Tx LR */ 250 + u64 tx_lrr; /* Tx LRR */ 251 + u64 rx_frames; /* Rx frames */ 252 + u64 rx_words; /* Rx words */ 253 + u64 lip_count; /* Rx LIP */ 254 + u64 nos_count; /* Rx NOS */ 255 + u64 ols_count; /* Rx OLS */ 256 + u64 lr_count; /* Rx LR */ 257 + u64 lrr_count; /* Rx LRR */ 258 + u64 invalid_crcs; /* Rx CRC err frames */ 259 + u64 invalid_crc_gd_eof; /* Rx CRC err good EOF frames */ 260 + u64 undersized_frm; /* Rx undersized frames */ 261 + u64 oversized_frm; /* Rx oversized frames */ 262 + u64 bad_eof_frm; /* Rx frames with bad EOF */ 263 + u64 error_frames; /* Errored frames */ 264 + u64 dropped_frames; /* Dropped frames */ 265 + u64 link_failures; /* Link Failure (LF) count */ 266 + u64 loss_of_syncs; /* Loss of sync count */ 267 + u64 loss_of_signals;/* Loss of signal count */ 268 + u64 primseq_errs; /* Primitive sequence protocol err. */ 269 + u64 bad_os_count; /* Invalid ordered sets */ 270 + u64 err_enc_out; /* Encoding err nonframe_8b10b */ 271 + u64 err_enc; /* Encoding err frame_8b10b */ 263 272 }; 264 273 265 274 /** 266 275 * Eth Port statistics. 267 276 */ 268 277 struct bfa_pport_eth_stats_s { 269 - u64 secs_reset; /* seconds since stats is reset */ 270 - u64 frame_64; /* both rx and tx counter */ 271 - u64 frame_65_127; /* both rx and tx counter */ 272 - u64 frame_128_255; /* both rx and tx counter */ 273 - u64 frame_256_511; /* both rx and tx counter */ 274 - u64 frame_512_1023; /* both rx and tx counter */ 275 - u64 frame_1024_1518; /* both rx and tx counter */ 276 - u64 frame_1519_1522; /* both rx and tx counter */ 277 - 278 - u64 tx_bytes; 279 - u64 tx_packets; 280 - u64 tx_mcast_packets; 281 - u64 tx_bcast_packets; 282 - u64 tx_control_frame; 283 - u64 tx_drop; 284 - u64 tx_jabber; 285 - u64 tx_fcs_error; 286 - u64 tx_fragments; 287 - 288 - u64 rx_bytes; 289 - u64 rx_packets; 290 - u64 rx_mcast_packets; 291 - u64 rx_bcast_packets; 292 - u64 rx_control_frames; 293 - u64 rx_unknown_opcode; 294 - u64 rx_drop; 295 - u64 rx_jabber; 296 - u64 rx_fcs_error; 297 - u64 rx_alignment_error; 298 - u64 rx_frame_length_error; 299 - u64 rx_code_error; 300 - u64 rx_fragments; 301 - 302 - u64 rx_pause; /* BPC */ 303 - u64 rx_zero_pause; /* BPC Pause cancellation */ 304 - u64 tx_pause; /* BPC */ 305 - u64 tx_zero_pause; /* BPC Pause cancellation */ 306 - u64 rx_fcoe_pause; /* BPC */ 307 - u64 rx_fcoe_zero_pause; /* BPC Pause cancellation */ 308 - u64 tx_fcoe_pause; /* BPC */ 309 - u64 tx_fcoe_zero_pause; /* BPC Pause cancellation */ 278 + u64 secs_reset; /* Seconds since stats is reset */ 279 + u64 frame_64; /* Frames 64 bytes */ 280 + u64 frame_65_127; /* Frames 65-127 bytes */ 281 + u64 frame_128_255; /* Frames 128-255 bytes */ 282 + u64 frame_256_511; /* Frames 256-511 bytes */ 283 + u64 frame_512_1023; /* Frames 512-1023 bytes */ 284 + u64 frame_1024_1518; /* Frames 1024-1518 bytes */ 285 + u64 frame_1519_1522; /* Frames 1519-1522 bytes */ 286 + u64 tx_bytes; /* Tx bytes */ 287 + u64 tx_packets; /* Tx packets */ 288 + u64 tx_mcast_packets; /* Tx multicast packets */ 289 + u64 tx_bcast_packets; /* Tx broadcast packets */ 290 + u64 tx_control_frame; /* Tx control frame */ 291 + u64 tx_drop; /* Tx drops */ 292 + u64 tx_jabber; /* Tx jabber */ 293 + u64 tx_fcs_error; /* Tx FCS error */ 294 + u64 tx_fragments; /* Tx fragments */ 295 + u64 rx_bytes; /* Rx bytes */ 296 + u64 rx_packets; /* Rx packets */ 297 + u64 rx_mcast_packets; /* Rx multicast packets */ 298 + u64 rx_bcast_packets; /* Rx broadcast packets */ 299 + u64 rx_control_frames; /* Rx control frames */ 300 + u64 rx_unknown_opcode; /* Rx unknown opcode */ 301 + u64 rx_drop; /* Rx drops */ 302 + u64 rx_jabber; /* Rx jabber */ 303 + u64 rx_fcs_error; /* Rx FCS errors */ 304 + u64 rx_alignment_error; /* Rx alignment errors */ 305 + u64 rx_frame_length_error; /* Rx frame len errors */ 306 + u64 rx_code_error; /* Rx code errors */ 307 + u64 rx_fragments; /* Rx fragments */ 308 + u64 rx_pause; /* Rx pause */ 309 + u64 rx_zero_pause; /* Rx zero pause */ 310 + u64 tx_pause; /* Tx pause */ 311 + u64 tx_zero_pause; /* Tx zero pause */ 312 + u64 rx_fcoe_pause; /* Rx fcoe pause */ 313 + u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */ 314 + u64 tx_fcoe_pause; /* Tx FCoE pause */ 315 + u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */ 310 316 }; 311 317 312 318 /** ··· 339 333 }; 340 334 341 335 /** 342 - * Port RNID info. 343 - */ 336 + * Port RNI */ 344 337 struct bfa_pport_rnid_s { 345 338 wwn_t wwn; 346 339 u32 unittype; ··· 350 345 u8 ipaddr[16]; 351 346 u16 rsvd; 352 347 u16 topologydiscoveryflags; 348 + }; 349 + 350 + struct bfa_fcport_fcf_s { 351 + wwn_t name; /* FCF name */ 352 + wwn_t fabric_name; /* Fabric Name */ 353 + u8 fipenabled; /* FIP enabled or not */ 354 + u8 fipfailed; /* FIP failed or not */ 355 + u8 resv[2]; 356 + u8 pri; /* FCF priority */ 357 + u8 version; /* FIP version used */ 358 + u8 available; /* Available for login */ 359 + u8 fka_disabled; /* FKA is disabled */ 360 + u8 maxsz_verified; /* FCoE max size verified */ 361 + u8 fc_map[3]; /* FC map */ 362 + u16 vlan; /* FCoE vlan tag/priority */ 363 + u32 fka_adv_per; /* FIP ka advert. period */ 364 + struct mac_s mac; /* FCF mac */ 353 365 }; 354 366 355 367 /** ··· 400 378 struct fc_alpabm_s alpabm; /* alpa bitmap */ 401 379 } loop_info; 402 380 } tl; 381 + struct bfa_fcport_fcf_s fcf; /*!< FCF information (for FCoE) */ 403 382 }; 404 383 405 384 #endif /* __BFA_DEFS_PPORT_H__ */
+12 -5
drivers/scsi/bfa/include/defs/bfa_defs_status.h
··· 180 180 BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, /* Given adapter is part 181 181 * of another team */ 182 182 BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, /* Adapter has VLANs configured. 183 - * Delete all VLANs before 184 - * creating team */ 183 + * Delete all VLANs to become 184 + * part of the team */ 185 185 BFA_STATUS_IM_PVID_MISMATCH = 116, /* Mismatching PVIDs configured 186 186 * for adapters */ 187 187 BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, /* Mismatching link speeds ··· 213 213 * loaded */ 214 214 BFA_STATUS_CARD_TYPE_MISMATCH = 131, /* Card type mismatch */ 215 215 BFA_STATUS_BAD_ASICBLK = 132, /* Bad ASIC block */ 216 - BFA_STATUS_NO_DRIVER = 133, /* Storage/Ethernet driver not loaded */ 216 + BFA_STATUS_NO_DRIVER = 133, /* Brocade adapter/driver not installed or loaded */ 217 217 BFA_STATUS_INVALID_MAC = 134, /* Invalid mac address */ 218 218 BFA_STATUS_IM_NO_VLAN = 135, /* No VLANs configured on the adapter */ 219 219 BFA_STATUS_IM_ETH_LB_FAILED = 136, /* Ethernet loopback test failed */ ··· 228 228 BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsytem 229 229 * handle Failed. Please try 230 230 * after some time */ 231 - BFA_STATUS_IM_NOT_BOUND = 143, /* Brocade 10G Ethernet Service is not 232 - * Enabled on this port */ 231 + BFA_STATUS_IM_NOT_BOUND = 143, /* IM driver is not active */ 233 232 BFA_STATUS_INSUFFICIENT_PERMS = 144, /* User doesn't have sufficient 234 233 * permissions to execute the BCU 235 234 * application */ ··· 241 242 * failed */ 242 243 BFA_STATUS_IM_UNBIND_FAILED = 149, /* ! < IM Driver unbind operation 243 244 * failed */ 245 + BFA_STATUS_IM_PORT_IN_TEAM = 150, /* Port is already part of the 246 + * team */ 247 + BFA_STATUS_IM_VLAN_NOT_FOUND = 151, /* VLAN ID doesn't exists */ 248 + BFA_STATUS_IM_TEAM_NOT_FOUND = 152, /* Teaming configuration doesn't 249 + * exists */ 250 + BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153, /* Given settings are not 251 + * allowed for the current 252 + * Teaming mode */ 244 253 BFA_STATUS_MAX_VAL /* Unknown error code */ 245 254 }; 246 255 #define bfa_status_t enum bfa_status
-1
drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
··· 70 70 */ 71 71 void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv); 72 72 73 - void bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim_drv); 74 73 void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim_drv); 75 74 76 75 #endif /* __BFAD_FCB_FCPIM_H__ */
+4 -1
drivers/scsi/bfa/include/fcs/bfa_fcs.h
··· 49 49 struct bfa_trc_mod_s *trcmod; /* tracing module */ 50 50 struct bfa_aen_s *aen; /* aen component */ 51 51 bfa_boolean_t vf_enabled; /* VF mode is enabled */ 52 + bfa_boolean_t fdmi_enabled; /*!< FDMI is enabled */ 52 53 bfa_boolean_t min_cfg; /* min cfg enabled/disabled */ 53 54 u16 port_vfid; /* port default VF ID */ 54 55 struct bfa_fcs_driver_info_s driver_info; ··· 61 60 /* 62 61 * bfa fcs API functions 63 62 */ 64 - void bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, 63 + void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, 65 64 bfa_boolean_t min_cfg); 65 + void bfa_fcs_init(struct bfa_fcs_s *fcs); 66 66 void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, 67 67 struct bfa_fcs_driver_info_s *driver_info); 68 + void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable); 68 69 void bfa_fcs_exit(struct bfa_fcs_s *fcs); 69 70 void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod); 70 71 void bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod);
-8
drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
··· 34 34 struct bfa_fcs_fabric_s; 35 35 36 36 /* 37 - * @todo : need to move to a global config file. 38 - * Maximum Vports supported per physical port or vf. 39 - */ 40 - #define BFA_FCS_MAX_VPORTS_SUPP_CB 255 41 - #define BFA_FCS_MAX_VPORTS_SUPP_CT 191 42 - 43 - /* 44 - * @todo : need to move to a global config file. 45 37 * Maximum Rports supported per port (physical/logical). 46 38 */ 47 39 #define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */
+6
drivers/scsi/bfa/include/log/bfa_log_hal.h
··· 27 27 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 3) 28 28 #define BFA_LOG_HAL_SM_ASSERT \ 29 29 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 4) 30 + #define BFA_LOG_HAL_DRIVER_ERROR \ 31 + (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 5) 32 + #define BFA_LOG_HAL_DRIVER_CONFIG_ERROR \ 33 + (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 6) 34 + #define BFA_LOG_HAL_MBOX_ERROR \ 35 + (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 7) 30 36 #endif
+16
drivers/scsi/bfa/include/log/bfa_log_linux.h
··· 41 41 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 10) 42 42 #define BFA_LOG_LINUX_SCSI_ABORT_COMP \ 43 43 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 11) 44 + #define BFA_LOG_LINUX_DRIVER_CONFIG_ERROR \ 45 + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 12) 46 + #define BFA_LOG_LINUX_BNA_STATE_MACHINE \ 47 + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 13) 48 + #define BFA_LOG_LINUX_IOC_ERROR \ 49 + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 14) 50 + #define BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR \ 51 + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 15) 52 + #define BFA_LOG_LINUX_RING_BUFFER_ERROR \ 53 + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 16) 54 + #define BFA_LOG_LINUX_DRIVER_ERROR \ 55 + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 17) 56 + #define BFA_LOG_LINUX_DRIVER_DIAG \ 57 + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 18) 58 + #define BFA_LOG_LINUX_DRIVER_AEN \ 59 + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 19) 44 60 #endif
+5
drivers/scsi/bfa/include/protocol/fc.h
··· 50 50 51 51 u32 ro; /* relative offset */ 52 52 }; 53 + 54 + #define FC_SOF_LEN 4 55 + #define FC_EOF_LEN 4 56 + #define FC_CRC_LEN 4 57 + 53 58 /* 54 59 * Fibre Channel BB_E Header Structure 55 60 */
-75
drivers/scsi/bfa/include/protocol/pcifw.h
··· 1 - /* 2 - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 3 - * All rights reserved 4 - * www.brocade.com 5 - * 6 - * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 - * 8 - * This program is free software; you can redistribute it and/or modify it 9 - * under the terms of the GNU General Public License (GPL) Version 2 as 10 - * published by the Free Software Foundation 11 - * 12 - * This program is distributed in the hope that it will be useful, but 13 - * WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 - * General Public License for more details. 16 - */ 17 - 18 - /** 19 - * pcifw.h PCI FW related headers 20 - */ 21 - 22 - #ifndef __PCIFW_H__ 23 - #define __PCIFW_H__ 24 - 25 - #pragma pack(1) 26 - 27 - struct pnp_hdr_s{ 28 - u32 signature; /* "$PnP" */ 29 - u8 rev; /* Struct revision */ 30 - u8 len; /* Header structure len in multiples 31 - * of 16 bytes */ 32 - u16 off; /* Offset to next header 00 if none */ 33 - u8 rsvd; /* Reserved byte */ 34 - u8 cksum; /* 8-bit checksum for this header */ 35 - u32 pnp_dev_id; /* PnP Device Id */ 36 - u16 mfstr; /* Pointer to manufacturer string */ 37 - u16 prstr; /* Pointer to product string */ 38 - u8 devtype[3]; /* Device Type Code */ 39 - u8 devind; /* Device Indicator */ 40 - u16 bcventr; /* Bootstrap entry vector */ 41 - u16 rsvd2; /* Reserved */ 42 - u16 sriv; /* Static resource information vector */ 43 - }; 44 - 45 - struct pci_3_0_ds_s{ 46 - u32 sig; /* Signature "PCIR" */ 47 - u16 vendid; /* Vendor ID */ 48 - u16 devid; /* Device ID */ 49 - u16 devlistoff; /* Device List Offset */ 50 - u16 len; /* PCI Data Structure Length */ 51 - u8 rev; /* PCI Data Structure Revision */ 52 - u8 clcode[3]; /* Class Code */ 53 - u16 imglen; /* Code image length in multiples of 54 - * 512 bytes */ 55 - u16 coderev; /* Revision level of code/data */ 56 - u8 codetype; /* Code type 0x00 - BIOS */ 57 - u8 indr; /* Last image indicator */ 58 - u16 mrtimglen; /* Max Run Time Image Length */ 59 - u16 cuoff; /* Config Utility Code Header Offset */ 60 - u16 dmtfclp; /* DMTF CLP entry point offset */ 61 - }; 62 - 63 - struct pci_optrom_hdr_s{ 64 - u16 sig; /* Signature 0x55AA */ 65 - u8 len; /* Option ROM length in units of 512 bytes */ 66 - u8 inivec[3]; /* Initialization vector */ 67 - u8 rsvd[16]; /* Reserved field */ 68 - u16 verptr; /* Pointer to version string - private */ 69 - u16 pcids; /* Pointer to PCI data structure */ 70 - u16 pnphdr; /* Pointer to PnP expansion header */ 71 - }; 72 - 73 - #pragma pack() 74 - 75 - #endif
+1 -1
drivers/scsi/bfa/loop.c
··· 162 162 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa, 163 163 bfa_fcs_port_get_fcid(port), 0, 164 164 port->port_cfg.pwwn, port->port_cfg.nwwn, 165 - bfa_pport_get_maxfrsize(port->fcs->bfa)); 165 + bfa_fcport_get_maxfrsize(port->fcs->bfa)); 166 166 167 167 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 168 168 FC_CLASS_3, len, &fchs,
+3 -2
drivers/scsi/bfa/lport_api.c
··· 156 156 /* 157 157 * Get Physical port's current speed 158 158 */ 159 - bfa_pport_get_attr(port->fcs->bfa, &pport_attr); 159 + bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); 160 160 pport_speed = pport_attr.speed; 161 161 bfa_trc(fcs, pport_speed); 162 162 ··· 235 235 port_info->port_wwn = bfa_fcs_port_get_pwwn(port); 236 236 port_info->node_wwn = bfa_fcs_port_get_nwwn(port); 237 237 238 - port_info->max_vports_supp = bfa_fcs_vport_get_max(port->fcs); 238 + port_info->max_vports_supp = 239 + bfa_lps_get_max_vport(port->fcs->bfa); 239 240 port_info->num_vports_inuse = 240 241 bfa_fcs_fabric_vport_count(port->fabric); 241 242 port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
+13 -16
drivers/scsi/bfa/ms.c
··· 118 118 break; 119 119 120 120 default: 121 - bfa_assert(0); 121 + bfa_sm_fault(ms->port->fcs, event); 122 122 } 123 123 } 124 124 ··· 141 141 break; 142 142 143 143 default: 144 - bfa_assert(0); 144 + bfa_sm_fault(ms->port->fcs, event); 145 145 } 146 146 } 147 147 ··· 190 190 break; 191 191 192 192 default: 193 - bfa_assert(0); 193 + bfa_sm_fault(ms->port->fcs, event); 194 194 } 195 195 } 196 196 ··· 216 216 break; 217 217 218 218 default: 219 - bfa_assert(0); 219 + bfa_sm_fault(ms->port->fcs, event); 220 220 } 221 221 } 222 222 ··· 230 230 switch (event) { 231 231 case MSSM_EVENT_PORT_OFFLINE: 232 232 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); 233 - /* 234 - * now invoke MS related sub-modules 235 - */ 236 - bfa_fcs_port_fdmi_offline(ms); 237 233 break; 238 234 239 235 case MSSM_EVENT_PORT_FABRIC_RSCN: ··· 239 243 break; 240 244 241 245 default: 242 - bfa_assert(0); 246 + bfa_sm_fault(ms->port->fcs, event); 243 247 } 244 248 } 245 249 ··· 262 266 break; 263 267 264 268 default: 265 - bfa_assert(0); 269 + bfa_sm_fault(ms->port->fcs, event); 266 270 } 267 271 } 268 272 ··· 300 304 break; 301 305 302 306 default: 303 - bfa_assert(0); 307 + bfa_sm_fault(ms->port->fcs, event); 304 308 } 305 309 } 306 310 ··· 326 330 break; 327 331 328 332 default: 329 - bfa_assert(0); 333 + bfa_sm_fault(ms->port->fcs, event); 330 334 } 331 335 } 332 336 ··· 462 466 break; 463 467 464 468 default: 465 - bfa_assert(0); 469 + bfa_sm_fault(ms->port->fcs, event); 466 470 } 467 471 } 468 472 ··· 498 502 break; 499 503 500 504 default: 501 - bfa_assert(0); 505 + bfa_sm_fault(ms->port->fcs, event); 502 506 } 503 507 } 504 508 ··· 524 528 break; 525 529 526 530 default: 527 - bfa_assert(0); 531 + bfa_sm_fault(ms->port->fcs, event); 528 532 } 529 533 } 530 534 ··· 633 637 bfa_os_hton3b(FC_MGMT_SERVER), 634 638 bfa_fcs_port_get_fcid(port), 0, 635 639 port->port_cfg.pwwn, port->port_cfg.nwwn, 636 - bfa_pport_get_maxfrsize(port->fcs->bfa)); 640 + bfa_fcport_get_maxfrsize(port->fcs->bfa)); 637 641 638 642 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 639 643 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_plogi_response, ··· 731 735 732 736 ms->port = port; 733 737 bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE); 738 + bfa_fcs_port_fdmi_offline(ms); 734 739 } 735 740 736 741 void
+18 -18
drivers/scsi/bfa/ns.c
··· 164 164 break; 165 165 166 166 default: 167 - bfa_assert(0); 167 + bfa_sm_fault(ns->port->fcs, event); 168 168 } 169 169 } 170 170 ··· 187 187 break; 188 188 189 189 default: 190 - bfa_assert(0); 190 + bfa_sm_fault(ns->port->fcs, event); 191 191 } 192 192 } 193 193 ··· 221 221 break; 222 222 223 223 default: 224 - bfa_assert(0); 224 + bfa_sm_fault(ns->port->fcs, event); 225 225 } 226 226 } 227 227 ··· 247 247 break; 248 248 249 249 default: 250 - bfa_assert(0); 250 + bfa_sm_fault(ns->port->fcs, event); 251 251 } 252 252 } 253 253 ··· 270 270 break; 271 271 272 272 default: 273 - bfa_assert(0); 273 + bfa_sm_fault(ns->port->fcs, event); 274 274 } 275 275 } 276 276 ··· 304 304 break; 305 305 306 306 default: 307 - bfa_assert(0); 307 + bfa_sm_fault(ns->port->fcs, event); 308 308 } 309 309 } 310 310 ··· 330 330 break; 331 331 332 332 default: 333 - bfa_assert(0); 333 + bfa_sm_fault(ns->port->fcs, event); 334 334 } 335 335 } 336 336 ··· 353 353 break; 354 354 355 355 default: 356 - bfa_assert(0); 356 + bfa_sm_fault(ns->port->fcs, event); 357 357 } 358 358 } 359 359 ··· 390 390 break; 391 391 392 392 default: 393 - bfa_assert(0); 393 + bfa_sm_fault(ns->port->fcs, event); 394 394 } 395 395 } 396 396 ··· 413 413 break; 414 414 415 415 default: 416 - bfa_assert(0); 416 + bfa_sm_fault(ns->port->fcs, event); 417 417 } 418 418 } 419 419 ··· 436 436 break; 437 437 438 438 default: 439 - bfa_assert(0); 439 + bfa_sm_fault(ns->port->fcs, event); 440 440 } 441 441 } 442 442 ··· 494 494 break; 495 495 496 496 default: 497 - bfa_assert(0); 497 + bfa_sm_fault(ns->port->fcs, event); 498 498 } 499 499 } 500 500 ··· 517 517 break; 518 518 519 519 default: 520 - bfa_assert(0); 520 + bfa_sm_fault(ns->port->fcs, event); 521 521 } 522 522 } 523 523 static void ··· 539 539 break; 540 540 541 541 default: 542 - bfa_assert(0); 542 + bfa_sm_fault(ns->port->fcs, event); 543 543 } 544 544 } 545 545 ··· 575 575 break; 576 576 577 577 default: 578 - bfa_assert(0); 578 + bfa_sm_fault(ns->port->fcs, event); 579 579 } 580 580 } 581 581 ··· 598 598 break; 599 599 600 600 default: 601 - bfa_assert(0); 601 + bfa_sm_fault(ns->port->fcs, event); 602 602 } 603 603 } 604 604 ··· 626 626 break; 627 627 628 628 default: 629 - bfa_assert(0); 629 + bfa_sm_fault(ns->port->fcs, event); 630 630 } 631 631 } 632 632 ··· 660 660 bfa_os_hton3b(FC_NAME_SERVER), 661 661 bfa_fcs_port_get_fcid(port), 0, 662 662 port->port_cfg.pwwn, port->port_cfg.nwwn, 663 - bfa_pport_get_maxfrsize(port->fcs->bfa)); 663 + bfa_fcport_get_maxfrsize(port->fcs->bfa)); 664 664 665 665 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 666 666 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response,
+39 -52
drivers/scsi/bfa/rport.c
··· 224 224 break; 225 225 226 226 default: 227 - bfa_assert(0); 227 + bfa_sm_fault(rport->fcs, event); 228 228 } 229 229 } 230 230 ··· 276 276 break; 277 277 278 278 default: 279 - bfa_assert(0); 279 + bfa_sm_fault(rport->fcs, event); 280 280 } 281 281 } 282 282 ··· 332 332 break; 333 333 334 334 default: 335 - bfa_assert(0); 335 + bfa_sm_fault(rport->fcs, event); 336 336 } 337 337 } 338 338 ··· 406 406 break; 407 407 408 408 default: 409 - bfa_assert(0); 409 + bfa_sm_fault(rport->fcs, event); 410 410 } 411 411 } 412 412 ··· 481 481 break; 482 482 483 483 default: 484 - bfa_assert(0); 484 + bfa_sm_fault(rport->fcs, event); 485 485 } 486 486 } 487 487 ··· 534 534 break; 535 535 536 536 default: 537 - bfa_assert(0); 537 + bfa_sm_fault(rport->fcs, event); 538 538 } 539 539 } 540 540 ··· 589 589 break; 590 590 591 591 default: 592 - bfa_assert(0); 592 + bfa_sm_fault(rport->fcs, event); 593 593 } 594 594 } 595 595 ··· 646 646 break; 647 647 648 648 default: 649 - bfa_assert(0); 649 + bfa_sm_fault(rport->fcs, event); 650 650 } 651 651 } 652 652 ··· 704 704 break; 705 705 706 706 default: 707 - bfa_assert(0); 707 + bfa_sm_fault(rport->fcs, event); 708 708 } 709 709 } 710 710 ··· 754 754 break; 755 755 756 756 default: 757 - bfa_assert(0); 757 + bfa_sm_fault(rport->fcs, event); 758 758 } 759 759 } 760 760 ··· 816 816 break; 817 817 818 818 default: 819 - bfa_assert(0); 819 + bfa_sm_fault(rport->fcs, event); 820 820 } 821 821 } 822 822 ··· 846 846 break; 847 847 848 848 default: 849 - bfa_assert(0); 849 + bfa_sm_fault(rport->fcs, event); 850 850 } 851 851 } 852 852 ··· 869 869 break; 870 870 871 871 default: 872 - bfa_assert(0); 872 + bfa_sm_fault(rport->fcs, event); 873 873 } 874 874 } 875 875 ··· 905 905 break; 906 906 907 907 default: 908 - bfa_assert(0); 908 + bfa_sm_fault(rport->fcs, event); 909 909 } 910 910 } 911 911 ··· 925 925 case RPSM_EVENT_HCB_OFFLINE: 926 926 case RPSM_EVENT_ADDRESS_CHANGE: 927 927 if (bfa_fcs_port_is_online(rport->port)) { 928 - bfa_sm_set_state(rport, 929 - bfa_fcs_rport_sm_nsdisc_sending); 930 - rport->ns_retries = 0; 931 - bfa_fcs_rport_send_gidpn(rport, NULL); 928 + if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { 929 + bfa_sm_set_state(rport, 930 + bfa_fcs_rport_sm_nsdisc_sending); 931 + rport->ns_retries = 0; 932 + bfa_fcs_rport_send_gidpn(rport, NULL); 933 + } else { 934 + bfa_sm_set_state(rport, 935 + bfa_fcs_rport_sm_plogi_sending); 936 + rport->plogi_retries = 0; 937 + bfa_fcs_rport_send_plogi(rport, NULL); 938 + } 932 939 } else { 933 940 rport->pid = 0; 934 941 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); ··· 958 951 break; 959 952 960 953 default: 961 - bfa_assert(0); 954 + bfa_sm_fault(rport->fcs, event); 962 955 } 963 956 } 964 957 ··· 1018 1011 break; 1019 1012 1020 1013 default: 1021 - bfa_assert(0); 1014 + bfa_sm_fault(rport->fcs, event); 1022 1015 } 1023 1016 } 1024 1017 ··· 1045 1038 break; 1046 1039 1047 1040 default: 1048 - bfa_assert(0); 1041 + bfa_sm_fault(rport->fcs, event); 1049 1042 } 1050 1043 } 1051 1044 ··· 1080 1073 break; 1081 1074 1082 1075 default: 1083 - bfa_assert(0); 1076 + bfa_sm_fault(rport->fcs, event); 1084 1077 } 1085 1078 } 1086 1079 ··· 1139 1132 break; 1140 1133 1141 1134 default: 1142 - bfa_assert(0); 1135 + bfa_sm_fault(rport->fcs, event); 1143 1136 } 1144 1137 } 1145 1138 ··· 1195 1188 break; 1196 1189 1197 1190 default: 1198 - bfa_assert(0); 1191 + bfa_sm_fault(rport->fcs, event); 1199 1192 } 1200 1193 } 1201 1194 ··· 1256 1249 break; 1257 1250 1258 1251 default: 1259 - bfa_assert(0); 1252 + bfa_sm_fault(rport->fcs, event); 1260 1253 } 1261 1254 } 1262 1255 ··· 1341 1334 break; 1342 1335 1343 1336 default: 1344 - bfa_assert(0); 1337 + bfa_sm_fault(rport->fcs, event); 1345 1338 } 1346 1339 } 1347 1340 ··· 1373 1366 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1374 1367 bfa_fcs_port_get_fcid(port), 0, 1375 1368 port->port_cfg.pwwn, port->port_cfg.nwwn, 1376 - bfa_pport_get_maxfrsize(port->fcs->bfa)); 1369 + bfa_fcport_get_maxfrsize(port->fcs->bfa)); 1377 1370 1378 1371 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1379 1372 FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response, ··· 1485 1478 len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1486 1479 bfa_fcs_port_get_fcid(port), rport->reply_oxid, 1487 1480 port->port_cfg.pwwn, port->port_cfg.nwwn, 1488 - bfa_pport_get_maxfrsize(port->fcs->bfa)); 1481 + bfa_fcport_get_maxfrsize(port->fcs->bfa)); 1489 1482 1490 1483 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1491 1484 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); ··· 1820 1813 /* 1821 1814 * get curent speed from pport attributes from BFA 1822 1815 */ 1823 - bfa_pport_get_attr(port->fcs->bfa, &pport_attr); 1816 + bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); 1824 1817 1825 1818 speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed); 1826 1819 ··· 2039 2032 2040 2033 switch (event) { 2041 2034 case BFA_RPORT_AEN_ONLINE: 2042 - bfa_log(logmod, BFA_AEN_RPORT_ONLINE, rpwwn_ptr, lpwwn_ptr); 2043 - break; 2044 2035 case BFA_RPORT_AEN_OFFLINE: 2045 - bfa_log(logmod, BFA_AEN_RPORT_OFFLINE, rpwwn_ptr, lpwwn_ptr); 2046 - break; 2047 2036 case BFA_RPORT_AEN_DISCONNECT: 2048 - bfa_log(logmod, BFA_AEN_RPORT_DISCONNECT, rpwwn_ptr, lpwwn_ptr); 2037 + bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, event), 2038 + rpwwn_ptr, lpwwn_ptr); 2049 2039 break; 2050 2040 case BFA_RPORT_AEN_QOS_PRIO: 2051 2041 aen_data.rport.priv.qos = data->priv.qos; ··· 2168 2164 bfa_trc(port->fcs, port->fabric->bb_credit); 2169 2165 2170 2166 port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred); 2171 - bfa_pport_set_tx_bbcredit(port->fcs->bfa, 2167 + bfa_fcport_set_tx_bbcredit(port->fcs->bfa, 2172 2168 port->fabric->bb_credit); 2173 2169 } 2174 2170 ··· 2576 2572 2577 2573 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 2578 2574 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 2579 - } 2580 - 2581 - /** 2582 - * Module initialization 2583 - */ 2584 - void 2585 - bfa_fcs_rport_modinit(struct bfa_fcs_s *fcs) 2586 - { 2587 - } 2588 - 2589 - /** 2590 - * Module cleanup 2591 - */ 2592 - void 2593 - bfa_fcs_rport_modexit(struct bfa_fcs_s *fcs) 2594 - { 2595 - bfa_fcs_modexit_comp(fcs); 2596 2575 } 2597 2576 2598 2577 /**
+1 -1
drivers/scsi/bfa/rport_api.c
··· 102 102 rport_attr->qos_attr = qos_attr; 103 103 104 104 rport_attr->trl_enforced = BFA_FALSE; 105 - if (bfa_pport_is_ratelim(port->fcs->bfa)) { 105 + if (bfa_fcport_is_ratelim(port->fcs->bfa)) { 106 106 if ((rport->rpf.rpsc_speed == BFA_PPORT_SPEED_UNKNOWN) || 107 107 (rport->rpf.rpsc_speed < 108 108 bfa_fcs_port_get_rport_max_speed(port)))
+6 -6
drivers/scsi/bfa/rport_ftrs.c
··· 91 91 break; 92 92 93 93 default: 94 - bfa_assert(0); 94 + bfa_sm_fault(rport->fcs, event); 95 95 } 96 96 } 97 97 ··· 114 114 break; 115 115 116 116 default: 117 - bfa_assert(0); 117 + bfa_sm_fault(rport->fcs, event); 118 118 } 119 119 } 120 120 ··· 160 160 break; 161 161 162 162 default: 163 - bfa_assert(0); 163 + bfa_sm_fault(rport->fcs, event); 164 164 } 165 165 } 166 166 ··· 186 186 break; 187 187 188 188 default: 189 - bfa_assert(0); 189 + bfa_sm_fault(rport->fcs, event); 190 190 } 191 191 } 192 192 ··· 206 206 break; 207 207 208 208 default: 209 - bfa_assert(0); 209 + bfa_sm_fault(rport->fcs, event); 210 210 } 211 211 } 212 212 ··· 229 229 break; 230 230 231 231 default: 232 - bfa_assert(0); 232 + bfa_sm_fault(rport->fcs, event); 233 233 } 234 234 } 235 235 /**
+5 -5
drivers/scsi/bfa/scn.c
··· 90 90 break; 91 91 92 92 default: 93 - bfa_assert(0); 93 + bfa_sm_fault(scn->port->fcs, event); 94 94 } 95 95 } 96 96 ··· 109 109 break; 110 110 111 111 default: 112 - bfa_assert(0); 112 + bfa_sm_fault(scn->port->fcs, event); 113 113 } 114 114 } 115 115 ··· 137 137 break; 138 138 139 139 default: 140 - bfa_assert(0); 140 + bfa_sm_fault(scn->port->fcs, event); 141 141 } 142 142 } 143 143 ··· 157 157 break; 158 158 159 159 default: 160 - bfa_assert(0); 160 + bfa_sm_fault(scn->port->fcs, event); 161 161 } 162 162 } 163 163 ··· 171 171 break; 172 172 173 173 default: 174 - bfa_assert(0); 174 + bfa_sm_fault(scn->port->fcs, event); 175 175 } 176 176 } 177 177
+26 -60
drivers/scsi/bfa/vport.c
··· 122 122 break; 123 123 124 124 default: 125 - bfa_assert(0); 125 + bfa_sm_fault(__vport_fcs(vport), event); 126 126 } 127 127 } 128 128 ··· 165 165 break; 166 166 167 167 default: 168 - bfa_assert(0); 168 + bfa_sm_fault(__vport_fcs(vport), event); 169 169 } 170 170 } 171 171 ··· 202 202 break; 203 203 204 204 default: 205 - bfa_assert(0); 205 + bfa_sm_fault(__vport_fcs(vport), event); 206 206 } 207 207 } 208 208 ··· 249 249 break; 250 250 251 251 default: 252 - bfa_assert(0); 252 + bfa_sm_fault(__vport_fcs(vport), event); 253 253 } 254 254 } 255 255 ··· 283 283 break; 284 284 285 285 default: 286 - bfa_assert(0); 286 + bfa_sm_fault(__vport_fcs(vport), event); 287 287 } 288 288 } 289 289 ··· 310 310 break; 311 311 312 312 default: 313 - bfa_assert(0); 313 + bfa_sm_fault(__vport_fcs(vport), event); 314 314 } 315 315 } 316 316 ··· 339 339 break; 340 340 341 341 default: 342 - bfa_assert(0); 342 + bfa_sm_fault(__vport_fcs(vport), event); 343 343 } 344 344 } 345 345 ··· 387 387 break; 388 388 389 389 default: 390 - bfa_assert(0); 390 + bfa_sm_fault(__vport_fcs(vport), event); 391 391 } 392 392 } 393 393 ··· 419 419 break; 420 420 421 421 default: 422 - bfa_assert(0); 422 + bfa_sm_fault(__vport_fcs(vport), event); 423 423 } 424 424 } 425 425 ··· 447 447 448 448 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX); 449 449 450 - switch (event) { 451 - case BFA_LPORT_AEN_NPIV_DUP_WWN: 452 - bfa_log(logmod, BFA_AEN_LPORT_NPIV_DUP_WWN, lpwwn_ptr, 453 - role_str[role / 2]); 454 - break; 455 - case BFA_LPORT_AEN_NPIV_FABRIC_MAX: 456 - bfa_log(logmod, BFA_AEN_LPORT_NPIV_FABRIC_MAX, lpwwn_ptr, 457 - role_str[role / 2]); 458 - break; 459 - case BFA_LPORT_AEN_NPIV_UNKNOWN: 460 - bfa_log(logmod, BFA_AEN_LPORT_NPIV_UNKNOWN, lpwwn_ptr, 461 - role_str[role / 2]); 462 - break; 463 - default: 464 - break; 465 - } 450 + bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr, 451 + role_str[role/2]); 466 452 467 453 aen_data.lport.vf_id = port->fabric->vf_id; 468 454 aen_data.lport.roles = role; ··· 464 478 bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport) 465 479 { 466 480 bfa_lps_fdisc(vport->lps, vport, 467 - bfa_pport_get_maxfrsize(__vport_bfa(vport)), 481 + bfa_fcport_get_maxfrsize(__vport_bfa(vport)), 468 482 __vport_pwwn(vport), __vport_nwwn(vport)); 469 483 vport->vport_stats.fdisc_sent++; 470 484 } ··· 603 617 } 604 618 605 619 /** 606 - * Module initialization 607 - */ 608 - void 609 - bfa_fcs_vport_modinit(struct bfa_fcs_s *fcs) 610 - { 611 - } 612 - 613 - /** 614 - * Module cleanup 615 - */ 616 - void 617 - bfa_fcs_vport_modexit(struct bfa_fcs_s *fcs) 618 - { 619 - bfa_fcs_modexit_comp(fcs); 620 - } 621 - 622 - u32 623 - bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs) 624 - { 625 - struct bfa_ioc_attr_s ioc_attr; 626 - 627 - bfa_get_attr(fcs->bfa, &ioc_attr); 628 - 629 - if (ioc_attr.pci_attr.device_id == BFA_PCI_DEVICE_ID_CT) 630 - return BFA_FCS_MAX_VPORTS_SUPP_CT; 631 - else 632 - return BFA_FCS_MAX_VPORTS_SUPP_CB; 633 - } 634 - 635 - 636 - 637 - /** 638 620 * fcs_vport_api Virtual port API 639 621 */ 640 622 ··· 638 684 return BFA_STATUS_VPORT_EXISTS; 639 685 640 686 if (bfa_fcs_fabric_vport_count(&fcs->fabric) == 641 - bfa_fcs_vport_get_max(fcs)) 687 + bfa_lps_get_max_vport(fcs->bfa)) 642 688 return BFA_STATUS_VPORT_MAX; 643 689 644 690 vport->lps = bfa_lps_alloc(fcs->bfa); ··· 648 694 vport->vport_drv = vport_drv; 649 695 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); 650 696 651 - bfa_fcs_lport_init(&vport->lport, fcs, vf_id, vport_cfg, vport); 697 + bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport); 698 + bfa_fcs_lport_init(&vport->lport, vport_cfg); 652 699 653 700 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE); 654 701 ··· 843 888 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); 844 889 } 845 890 891 + /** 892 + * Received clear virtual link 893 + */ 894 + void 895 + bfa_cb_lps_cvl_event(void *bfad, void *uarg) 896 + { 897 + struct bfa_fcs_vport_s *vport = uarg; 846 898 899 + /* Send an Offline followed by an ONLINE */ 900 + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); 901 + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); 902 + }
+1 -1
drivers/scsi/bnx2i/bnx2i_iscsi.c
··· 1989 1989 .queuecommand = iscsi_queuecommand, 1990 1990 .eh_abort_handler = iscsi_eh_abort, 1991 1991 .eh_device_reset_handler = iscsi_eh_device_reset, 1992 - .eh_target_reset_handler = iscsi_eh_target_reset, 1992 + .eh_target_reset_handler = iscsi_eh_recover_target, 1993 1993 .change_queue_depth = iscsi_change_queue_depth, 1994 1994 .can_queue = 1024, 1995 1995 .max_sectors = 127,
+1 -1
drivers/scsi/cxgb3i/cxgb3i_iscsi.c
··· 915 915 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 916 916 .eh_abort_handler = iscsi_eh_abort, 917 917 .eh_device_reset_handler = iscsi_eh_device_reset, 918 - .eh_target_reset_handler = iscsi_eh_target_reset, 918 + .eh_target_reset_handler = iscsi_eh_recover_target, 919 919 .target_alloc = iscsi_target_alloc, 920 920 .use_clustering = DISABLE_CLUSTERING, 921 921 .this_id = -1,
+4 -2
drivers/scsi/device_handler/scsi_dh_emc.c
··· 272 272 int len = 0; 273 273 274 274 rq = blk_get_request(sdev->request_queue, 275 - (cmd == MODE_SELECT) ? WRITE : READ, GFP_NOIO); 275 + (cmd != INQUIRY) ? WRITE : READ, GFP_NOIO); 276 276 if (!rq) { 277 277 sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed"); 278 278 return NULL; ··· 286 286 len = sizeof(short_trespass); 287 287 rq->cmd_flags |= REQ_RW; 288 288 rq->cmd[1] = 0x10; 289 + rq->cmd[4] = len; 289 290 break; 290 291 case MODE_SELECT_10: 291 292 len = sizeof(long_trespass); 292 293 rq->cmd_flags |= REQ_RW; 293 294 rq->cmd[1] = 0x10; 295 + rq->cmd[8] = len; 294 296 break; 295 297 case INQUIRY: 296 298 len = CLARIION_BUFFER_SIZE; 299 + rq->cmd[4] = len; 297 300 memset(buffer, 0, len); 298 301 break; 299 302 default: ··· 304 301 break; 305 302 } 306 303 307 - rq->cmd[4] = len; 308 304 rq->cmd_type = REQ_TYPE_BLOCK_PC; 309 305 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | 310 306 REQ_FAILFAST_DRIVER;
+152 -178
drivers/scsi/hpsa.c
··· 43 43 #include <scsi/scsi_cmnd.h> 44 44 #include <scsi/scsi_device.h> 45 45 #include <scsi/scsi_host.h> 46 + #include <scsi/scsi_tcq.h> 46 47 #include <linux/cciss_ioctl.h> 47 48 #include <linux/string.h> 48 49 #include <linux/bitmap.h> ··· 53 52 #include "hpsa.h" 54 53 55 54 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 56 - #define HPSA_DRIVER_VERSION "2.0.1-3" 55 + #define HPSA_DRIVER_VERSION "2.0.2-1" 57 56 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 58 57 59 58 /* How long to wait (in milliseconds) for board to go into simple mode */ ··· 135 134 static void hpsa_scan_start(struct Scsi_Host *); 136 135 static int hpsa_scan_finished(struct Scsi_Host *sh, 137 136 unsigned long elapsed_time); 137 + static int hpsa_change_queue_depth(struct scsi_device *sdev, 138 + int qdepth, int reason); 138 139 139 140 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 140 141 static int hpsa_slave_alloc(struct scsi_device *sdev); ··· 185 182 .queuecommand = hpsa_scsi_queue_command, 186 183 .scan_start = hpsa_scan_start, 187 184 .scan_finished = hpsa_scan_finished, 185 + .change_queue_depth = hpsa_change_queue_depth, 188 186 .this_id = -1, 189 - .sg_tablesize = MAXSGENTRIES, 190 187 .use_clustering = ENABLE_CLUSTERING, 191 188 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 192 189 .ioctl = hpsa_ioctl, ··· 211 208 return (struct ctlr_info *) *priv; 212 209 } 213 210 214 - static struct task_struct *hpsa_scan_thread; 215 - static DEFINE_MUTEX(hpsa_scan_mutex); 216 - static LIST_HEAD(hpsa_scan_q); 217 - static int hpsa_scan_func(void *data); 218 - 219 - /** 220 - * add_to_scan_list() - add controller to rescan queue 221 - * @h: Pointer to the controller. 222 - * 223 - * Adds the controller to the rescan queue if not already on the queue. 224 - * 225 - * returns 1 if added to the queue, 0 if skipped (could be on the 226 - * queue already, or the controller could be initializing or shutting 227 - * down). 228 - **/ 229 - static int add_to_scan_list(struct ctlr_info *h) 230 - { 231 - struct ctlr_info *test_h; 232 - int found = 0; 233 - int ret = 0; 234 - 235 - if (h->busy_initializing) 236 - return 0; 237 - 238 - /* 239 - * If we don't get the lock, it means the driver is unloading 240 - * and there's no point in scheduling a new scan. 241 - */ 242 - if (!mutex_trylock(&h->busy_shutting_down)) 243 - return 0; 244 - 245 - mutex_lock(&hpsa_scan_mutex); 246 - list_for_each_entry(test_h, &hpsa_scan_q, scan_list) { 247 - if (test_h == h) { 248 - found = 1; 249 - break; 250 - } 251 - } 252 - if (!found && !h->busy_scanning) { 253 - INIT_COMPLETION(h->scan_wait); 254 - list_add_tail(&h->scan_list, &hpsa_scan_q); 255 - ret = 1; 256 - } 257 - mutex_unlock(&hpsa_scan_mutex); 258 - mutex_unlock(&h->busy_shutting_down); 259 - 260 - return ret; 261 - } 262 - 263 - /** 264 - * remove_from_scan_list() - remove controller from rescan queue 265 - * @h: Pointer to the controller. 266 - * 267 - * Removes the controller from the rescan queue if present. Blocks if 268 - * the controller is currently conducting a rescan. The controller 269 - * can be in one of three states: 270 - * 1. Doesn't need a scan 271 - * 2. On the scan list, but not scanning yet (we remove it) 272 - * 3. Busy scanning (and not on the list). In this case we want to wait for 273 - * the scan to complete to make sure the scanning thread for this 274 - * controller is completely idle. 275 - **/ 276 - static void remove_from_scan_list(struct ctlr_info *h) 277 - { 278 - struct ctlr_info *test_h, *tmp_h; 279 - 280 - mutex_lock(&hpsa_scan_mutex); 281 - list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) { 282 - if (test_h == h) { /* state 2. */ 283 - list_del(&h->scan_list); 284 - complete_all(&h->scan_wait); 285 - mutex_unlock(&hpsa_scan_mutex); 286 - return; 287 - } 288 - } 289 - if (h->busy_scanning) { /* state 3. */ 290 - mutex_unlock(&hpsa_scan_mutex); 291 - wait_for_completion(&h->scan_wait); 292 - } else { /* state 1, nothing to do. */ 293 - mutex_unlock(&hpsa_scan_mutex); 294 - } 295 - } 296 - 297 - /* hpsa_scan_func() - kernel thread used to rescan controllers 298 - * @data: Ignored. 299 - * 300 - * A kernel thread used scan for drive topology changes on 301 - * controllers. The thread processes only one controller at a time 302 - * using a queue. Controllers are added to the queue using 303 - * add_to_scan_list() and removed from the queue either after done 304 - * processing or using remove_from_scan_list(). 305 - * 306 - * returns 0. 307 - **/ 308 - static int hpsa_scan_func(__attribute__((unused)) void *data) 309 - { 310 - struct ctlr_info *h; 311 - int host_no; 312 - 313 - while (1) { 314 - set_current_state(TASK_INTERRUPTIBLE); 315 - schedule(); 316 - if (kthread_should_stop()) 317 - break; 318 - 319 - while (1) { 320 - mutex_lock(&hpsa_scan_mutex); 321 - if (list_empty(&hpsa_scan_q)) { 322 - mutex_unlock(&hpsa_scan_mutex); 323 - break; 324 - } 325 - h = list_entry(hpsa_scan_q.next, struct ctlr_info, 326 - scan_list); 327 - list_del(&h->scan_list); 328 - h->busy_scanning = 1; 329 - mutex_unlock(&hpsa_scan_mutex); 330 - host_no = h->scsi_host ? h->scsi_host->host_no : -1; 331 - hpsa_scan_start(h->scsi_host); 332 - complete_all(&h->scan_wait); 333 - mutex_lock(&hpsa_scan_mutex); 334 - h->busy_scanning = 0; 335 - mutex_unlock(&hpsa_scan_mutex); 336 - } 337 - } 338 - return 0; 339 - } 340 - 341 211 static int check_for_unit_attention(struct ctlr_info *h, 342 212 struct CommandList *c) 343 213 { ··· 228 352 break; 229 353 case REPORT_LUNS_CHANGED: 230 354 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data " 231 - "changed\n", h->ctlr); 355 + "changed, action required\n", h->ctlr); 232 356 /* 233 - * Here, we could call add_to_scan_list and wake up the scan thread, 234 - * except that it's quite likely that we will get more than one 235 - * REPORT_LUNS_CHANGED condition in quick succession, which means 236 - * that those which occur after the first one will likely happen 237 - * *during* the hpsa_scan_thread's rescan. And the rescan code is not 238 - * robust enough to restart in the middle, undoing what it has already 239 - * done, and it's not clear that it's even possible to do this, since 240 - * part of what it does is notify the SCSI mid layer, which starts 241 - * doing it's own i/o to read partition tables and so on, and the 242 - * driver doesn't have visibility to know what might need undoing. 243 - * In any event, if possible, it is horribly complicated to get right 244 - * so we just don't do it for now. 245 - * 246 357 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. 247 358 */ 248 359 break; ··· 256 393 struct ctlr_info *h; 257 394 struct Scsi_Host *shost = class_to_shost(dev); 258 395 h = shost_to_hba(shost); 259 - if (add_to_scan_list(h)) { 260 - wake_up_process(hpsa_scan_thread); 261 - wait_for_completion_interruptible(&h->scan_wait); 262 - } 396 + hpsa_scan_start(h->scsi_host); 263 397 return count; 264 398 } 265 399 ··· 843 983 spin_lock_init(&h->devlock); 844 984 } 845 985 986 + static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 987 + { 988 + int i; 989 + 990 + if (!h->cmd_sg_list) 991 + return; 992 + for (i = 0; i < h->nr_cmds; i++) { 993 + kfree(h->cmd_sg_list[i]); 994 + h->cmd_sg_list[i] = NULL; 995 + } 996 + kfree(h->cmd_sg_list); 997 + h->cmd_sg_list = NULL; 998 + } 999 + 1000 + static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) 1001 + { 1002 + int i; 1003 + 1004 + if (h->chainsize <= 0) 1005 + return 0; 1006 + 1007 + h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, 1008 + GFP_KERNEL); 1009 + if (!h->cmd_sg_list) 1010 + return -ENOMEM; 1011 + for (i = 0; i < h->nr_cmds; i++) { 1012 + h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * 1013 + h->chainsize, GFP_KERNEL); 1014 + if (!h->cmd_sg_list[i]) 1015 + goto clean; 1016 + } 1017 + return 0; 1018 + 1019 + clean: 1020 + hpsa_free_sg_chain_blocks(h); 1021 + return -ENOMEM; 1022 + } 1023 + 1024 + static void hpsa_map_sg_chain_block(struct ctlr_info *h, 1025 + struct CommandList *c) 1026 + { 1027 + struct SGDescriptor *chain_sg, *chain_block; 1028 + u64 temp64; 1029 + 1030 + chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1031 + chain_block = h->cmd_sg_list[c->cmdindex]; 1032 + chain_sg->Ext = HPSA_SG_CHAIN; 1033 + chain_sg->Len = sizeof(*chain_sg) * 1034 + (c->Header.SGTotal - h->max_cmd_sg_entries); 1035 + temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, 1036 + PCI_DMA_TODEVICE); 1037 + chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); 1038 + chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); 1039 + } 1040 + 1041 + static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, 1042 + struct CommandList *c) 1043 + { 1044 + struct SGDescriptor *chain_sg; 1045 + union u64bit temp64; 1046 + 1047 + if (c->Header.SGTotal <= h->max_cmd_sg_entries) 1048 + return; 1049 + 1050 + chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1051 + temp64.val32.lower = chain_sg->Addr.lower; 1052 + temp64.val32.upper = chain_sg->Addr.upper; 1053 + pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 1054 + } 1055 + 846 1056 static void complete_scsi_command(struct CommandList *cp, 847 1057 int timeout, u32 tag) 848 1058 { ··· 929 999 h = cp->h; 930 1000 931 1001 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 1002 + if (cp->Header.SGTotal > h->max_cmd_sg_entries) 1003 + hpsa_unmap_sg_chain_block(h, cp); 932 1004 933 1005 cmd->result = (DID_OK << 16); /* host byte */ 934 1006 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 935 - cmd->result |= (ei->ScsiStatus << 1); 1007 + cmd->result |= ei->ScsiStatus; 936 1008 937 1009 /* copy the sense data whether we need to or not. */ 938 1010 memcpy(cmd->sense_buffer, ei->SenseInfo, ··· 1135 1203 sh->max_id = HPSA_MAX_LUN; 1136 1204 sh->can_queue = h->nr_cmds; 1137 1205 sh->cmd_per_lun = h->nr_cmds; 1206 + sh->sg_tablesize = h->maxsgentries; 1138 1207 h->scsi_host = sh; 1139 1208 sh->hostdata[0] = (unsigned long) h; 1140 1209 sh->irq = h->intr[PERF_MODE_INT]; ··· 1315 1382 1316 1383 if (c == NULL) { /* trouble... */ 1317 1384 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1318 - return -1; 1385 + return -ENOMEM; 1319 1386 } 1320 1387 1321 1388 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); ··· 1837 1904 * dma mapping and fills in the scatter gather entries of the 1838 1905 * hpsa command, cp. 1839 1906 */ 1840 - static int hpsa_scatter_gather(struct pci_dev *pdev, 1907 + static int hpsa_scatter_gather(struct ctlr_info *h, 1841 1908 struct CommandList *cp, 1842 1909 struct scsi_cmnd *cmd) 1843 1910 { 1844 1911 unsigned int len; 1845 1912 struct scatterlist *sg; 1846 1913 u64 addr64; 1847 - int use_sg, i; 1914 + int use_sg, i, sg_index, chained; 1915 + struct SGDescriptor *curr_sg; 1848 1916 1849 - BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES); 1917 + BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 1850 1918 1851 1919 use_sg = scsi_dma_map(cmd); 1852 1920 if (use_sg < 0) ··· 1856 1922 if (!use_sg) 1857 1923 goto sglist_finished; 1858 1924 1925 + curr_sg = cp->SG; 1926 + chained = 0; 1927 + sg_index = 0; 1859 1928 scsi_for_each_sg(cmd, sg, use_sg, i) { 1929 + if (i == h->max_cmd_sg_entries - 1 && 1930 + use_sg > h->max_cmd_sg_entries) { 1931 + chained = 1; 1932 + curr_sg = h->cmd_sg_list[cp->cmdindex]; 1933 + sg_index = 0; 1934 + } 1860 1935 addr64 = (u64) sg_dma_address(sg); 1861 1936 len = sg_dma_len(sg); 1862 - cp->SG[i].Addr.lower = 1863 - (u32) (addr64 & (u64) 0x00000000FFFFFFFF); 1864 - cp->SG[i].Addr.upper = 1865 - (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); 1866 - cp->SG[i].Len = len; 1867 - cp->SG[i].Ext = 0; /* we are not chaining */ 1937 + curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 1938 + curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 1939 + curr_sg->Len = len; 1940 + curr_sg->Ext = 0; /* we are not chaining */ 1941 + curr_sg++; 1942 + } 1943 + 1944 + if (use_sg + chained > h->maxSG) 1945 + h->maxSG = use_sg + chained; 1946 + 1947 + if (chained) { 1948 + cp->Header.SGList = h->max_cmd_sg_entries; 1949 + cp->Header.SGTotal = (u16) (use_sg + 1); 1950 + hpsa_map_sg_chain_block(h, cp); 1951 + return 0; 1868 1952 } 1869 1953 1870 1954 sglist_finished: ··· 1978 2026 break; 1979 2027 } 1980 2028 1981 - if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */ 2029 + if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 1982 2030 cmd_free(h, c); 1983 2031 return SCSI_MLQUEUE_HOST_BUSY; 1984 2032 } ··· 2027 2075 finished = h->scan_finished; 2028 2076 spin_unlock_irqrestore(&h->scan_lock, flags); 2029 2077 return finished; 2078 + } 2079 + 2080 + static int hpsa_change_queue_depth(struct scsi_device *sdev, 2081 + int qdepth, int reason) 2082 + { 2083 + struct ctlr_info *h = sdev_to_hba(sdev); 2084 + 2085 + if (reason != SCSI_QDEPTH_DEFAULT) 2086 + return -ENOTSUPP; 2087 + 2088 + if (qdepth < 1) 2089 + qdepth = 1; 2090 + else 2091 + if (qdepth > h->nr_cmds) 2092 + qdepth = h->nr_cmds; 2093 + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 2094 + return sdev->queue_depth; 2030 2095 } 2031 2096 2032 2097 static void hpsa_unregister_scsi(struct ctlr_info *h) ··· 2930 2961 return IRQ_HANDLED; 2931 2962 } 2932 2963 2933 - /* Send a message CDB to the firmwart. */ 2964 + /* Send a message CDB to the firmware. */ 2934 2965 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 2935 2966 unsigned char type) 2936 2967 { ··· 3265 3296 h->intr[PERF_MODE_INT] = pdev->irq; 3266 3297 } 3267 3298 3268 - static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) 3299 + static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) 3269 3300 { 3270 3301 ushort subsystem_vendor_id, subsystem_device_id, command; 3271 3302 u32 board_id, scratchpad = 0; ··· 3374 3405 3375 3406 h->board_id = board_id; 3376 3407 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 3408 + h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 3409 + 3410 + /* 3411 + * Limit in-command s/g elements to 32 save dma'able memory. 3412 + * Howvever spec says if 0, use 31 3413 + */ 3414 + 3415 + h->max_cmd_sg_entries = 31; 3416 + if (h->maxsgentries > 512) { 3417 + h->max_cmd_sg_entries = 32; 3418 + h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; 3419 + h->maxsgentries--; /* save one for chain pointer */ 3420 + } else { 3421 + h->maxsgentries = 31; /* default to traditional values */ 3422 + h->chainsize = 0; 3423 + } 3424 + 3377 3425 h->product_name = products[prod_index].product_name; 3378 3426 h->access = *(products[prod_index].access); 3379 3427 /* Allow room for some ioctls */ ··· 3518 3532 h->busy_initializing = 1; 3519 3533 INIT_HLIST_HEAD(&h->cmpQ); 3520 3534 INIT_HLIST_HEAD(&h->reqQ); 3521 - mutex_init(&h->busy_shutting_down); 3522 - init_completion(&h->scan_wait); 3523 3535 rc = hpsa_pci_init(h, pdev); 3524 3536 if (rc != 0) 3525 3537 goto clean1; ··· 3571 3587 rc = -ENOMEM; 3572 3588 goto clean4; 3573 3589 } 3590 + if (hpsa_allocate_sg_chain_blocks(h)) 3591 + goto clean4; 3574 3592 spin_lock_init(&h->lock); 3575 3593 spin_lock_init(&h->scan_lock); 3576 3594 init_waitqueue_head(&h->scan_wait_queue); ··· 3595 3609 return 1; 3596 3610 3597 3611 clean4: 3612 + hpsa_free_sg_chain_blocks(h); 3598 3613 kfree(h->cmd_pool_bits); 3599 3614 if (h->cmd_pool) 3600 3615 pci_free_consistent(h->pdev, ··· 3668 3681 return; 3669 3682 } 3670 3683 h = pci_get_drvdata(pdev); 3671 - mutex_lock(&h->busy_shutting_down); 3672 - remove_from_scan_list(h); 3673 3684 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 3674 3685 hpsa_shutdown(pdev); 3675 3686 iounmap(h->vaddr); 3687 + hpsa_free_sg_chain_blocks(h); 3676 3688 pci_free_consistent(h->pdev, 3677 3689 h->nr_cmds * sizeof(struct CommandList), 3678 3690 h->cmd_pool, h->cmd_pool_dhandle); ··· 3689 3703 */ 3690 3704 pci_release_regions(pdev); 3691 3705 pci_set_drvdata(pdev, NULL); 3692 - mutex_unlock(&h->busy_shutting_down); 3693 3706 kfree(h); 3694 3707 } 3695 3708 ··· 3842 3857 */ 3843 3858 static int __init hpsa_init(void) 3844 3859 { 3845 - int err; 3846 - /* Start the scan thread */ 3847 - hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan"); 3848 - if (IS_ERR(hpsa_scan_thread)) { 3849 - err = PTR_ERR(hpsa_scan_thread); 3850 - return -ENODEV; 3851 - } 3852 - err = pci_register_driver(&hpsa_pci_driver); 3853 - if (err) 3854 - kthread_stop(hpsa_scan_thread); 3855 - return err; 3860 + return pci_register_driver(&hpsa_pci_driver); 3856 3861 } 3857 3862 3858 3863 static void __exit hpsa_cleanup(void) 3859 3864 { 3860 3865 pci_unregister_driver(&hpsa_pci_driver); 3861 - kthread_stop(hpsa_scan_thread); 3862 3866 } 3863 3867 3864 3868 module_init(hpsa_init);
+4 -3
drivers/scsi/hpsa.h
··· 83 83 unsigned int maxQsinceinit; 84 84 unsigned int maxSG; 85 85 spinlock_t lock; 86 + int maxsgentries; 87 + u8 max_cmd_sg_entries; 88 + int chainsize; 89 + struct SGDescriptor **cmd_sg_list; 86 90 87 91 /* pointers to command and error info pool */ 88 92 struct CommandList *cmd_pool; ··· 101 97 int scan_finished; 102 98 spinlock_t scan_lock; 103 99 wait_queue_head_t scan_wait_queue; 104 - struct mutex busy_shutting_down; 105 - struct list_head scan_list; 106 - struct completion scan_wait; 107 100 108 101 struct Scsi_Host *scsi_host; 109 102 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
+12 -8
drivers/scsi/hpsa_cmd.h
··· 23 23 24 24 /* general boundary defintions */ 25 25 #define SENSEINFOBYTES 32 /* may vary between hbas */ 26 - #define MAXSGENTRIES 31 26 + #define MAXSGENTRIES 32 27 + #define HPSA_SG_CHAIN 0x80000000 27 28 #define MAXREPLYQS 256 28 29 29 30 /* Command Status value */ ··· 306 305 int cmd_type; 307 306 long cmdindex; 308 307 struct hlist_node list; 309 - struct CommandList *prev; 310 - struct CommandList *next; 311 308 struct request *rq; 312 309 struct completion *waiting; 313 - int retry_count; 314 310 void *scsi_cmd; 315 311 316 312 /* on 64 bit architectures, to get this to be 32-byte-aligned 317 - * it so happens we need no padding, on 32 bit systems, 318 - * we need 8 bytes of padding. This does that. 313 + * it so happens we need PAD_64 bytes of padding, on 32 bit systems, 314 + * we need PAD_32 bytes of padding (see below). This does that. 315 + * If it happens that 64 bit and 32 bit systems need different 316 + * padding, PAD_32 and PAD_64 can be set independently, and. 317 + * the code below will do the right thing. 319 318 */ 320 - #define COMMANDLIST_PAD ((8 - sizeof(long))/4 * 8) 319 + #define IS_32_BIT ((8 - sizeof(long))/4) 320 + #define IS_64_BIT (!IS_32_BIT) 321 + #define PAD_32 (4) 322 + #define PAD_64 (4) 323 + #define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) 321 324 u8 pad[COMMANDLIST_PAD]; 322 - 323 325 }; 324 326 325 327 /* Configuration Table Structure */
+27
drivers/scsi/ibmvscsi/ibmvfc.c
··· 29 29 #include <linux/interrupt.h> 30 30 #include <linux/kthread.h> 31 31 #include <linux/of.h> 32 + #include <linux/pm.h> 32 33 #include <linux/stringify.h> 33 34 #include <asm/firmware.h> 34 35 #include <asm/irq.h> ··· 4737 4736 } 4738 4737 4739 4738 /** 4739 + * ibmvfc_resume - Resume from suspend 4740 + * @dev: device struct 4741 + * 4742 + * We may have lost an interrupt across suspend/resume, so kick the 4743 + * interrupt handler 4744 + * 4745 + */ 4746 + static int ibmvfc_resume(struct device *dev) 4747 + { 4748 + unsigned long flags; 4749 + struct ibmvfc_host *vhost = dev_get_drvdata(dev); 4750 + struct vio_dev *vdev = to_vio_dev(dev); 4751 + 4752 + spin_lock_irqsave(vhost->host->host_lock, flags); 4753 + vio_disable_interrupts(vdev); 4754 + tasklet_schedule(&vhost->tasklet); 4755 + spin_unlock_irqrestore(vhost->host->host_lock, flags); 4756 + return 0; 4757 + } 4758 + 4759 + /** 4740 4760 * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver 4741 4761 * @vdev: vio device struct 4742 4762 * ··· 4777 4755 }; 4778 4756 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table); 4779 4757 4758 + static struct dev_pm_ops ibmvfc_pm_ops = { 4759 + .resume = ibmvfc_resume 4760 + }; 4761 + 4780 4762 static struct vio_driver ibmvfc_driver = { 4781 4763 .id_table = ibmvfc_device_table, 4782 4764 .probe = ibmvfc_probe, ··· 4789 4763 .driver = { 4790 4764 .name = IBMVFC_NAME, 4791 4765 .owner = THIS_MODULE, 4766 + .pm = &ibmvfc_pm_ops, 4792 4767 } 4793 4768 }; 4794 4769
+19
drivers/scsi/ibmvscsi/ibmvscsi.c
··· 71 71 #include <linux/dma-mapping.h> 72 72 #include <linux/delay.h> 73 73 #include <linux/of.h> 74 + #include <linux/pm.h> 74 75 #include <asm/firmware.h> 75 76 #include <asm/vio.h> 76 77 #include <scsi/scsi.h> ··· 1992 1991 } 1993 1992 1994 1993 /** 1994 + * ibmvscsi_resume: Resume from suspend 1995 + * @dev: device struct 1996 + * 1997 + * We may have lost an interrupt across suspend/resume, so kick the 1998 + * interrupt handler 1999 + */ 2000 + static int ibmvscsi_resume(struct device *dev) 2001 + { 2002 + struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev); 2003 + return ibmvscsi_ops->resume(hostdata); 2004 + } 2005 + 2006 + /** 1995 2007 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 1996 2008 * support. 1997 2009 */ ··· 2014 2000 }; 2015 2001 MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); 2016 2002 2003 + static struct dev_pm_ops ibmvscsi_pm_ops = { 2004 + .resume = ibmvscsi_resume 2005 + }; 2006 + 2017 2007 static struct vio_driver ibmvscsi_driver = { 2018 2008 .id_table = ibmvscsi_device_table, 2019 2009 .probe = ibmvscsi_probe, ··· 2026 2008 .driver = { 2027 2009 .name = "ibmvscsi", 2028 2010 .owner = THIS_MODULE, 2011 + .pm = &ibmvscsi_pm_ops, 2029 2012 } 2030 2013 }; 2031 2014
+1
drivers/scsi/ibmvscsi/ibmvscsi.h
··· 120 120 struct ibmvscsi_host_data *hostdata); 121 121 int (*send_crq)(struct ibmvscsi_host_data *hostdata, 122 122 u64 word1, u64 word2); 123 + int (*resume) (struct ibmvscsi_host_data *hostdata); 123 124 }; 124 125 125 126 extern struct ibmvscsi_ops iseriesvscsi_ops;
+6
drivers/scsi/ibmvscsi/iseries_vscsi.c
··· 158 158 0); 159 159 } 160 160 161 + static int iseriesvscsi_resume(struct ibmvscsi_host_data *hostdata) 162 + { 163 + return 0; 164 + } 165 + 161 166 struct ibmvscsi_ops iseriesvscsi_ops = { 162 167 .init_crq_queue = iseriesvscsi_init_crq_queue, 163 168 .release_crq_queue = iseriesvscsi_release_crq_queue, 164 169 .reset_crq_queue = iseriesvscsi_reset_crq_queue, 165 170 .reenable_crq_queue = iseriesvscsi_reenable_crq_queue, 166 171 .send_crq = iseriesvscsi_send_crq, 172 + .resume = iseriesvscsi_resume, 167 173 };
+13
drivers/scsi/ibmvscsi/rpa_vscsi.c
··· 334 334 return rc; 335 335 } 336 336 337 + /** 338 + * rpavscsi_resume: - resume after suspend 339 + * @hostdata: ibmvscsi_host_data of host 340 + * 341 + */ 342 + static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata) 343 + { 344 + vio_disable_interrupts(to_vio_dev(hostdata->dev)); 345 + tasklet_schedule(&hostdata->srp_task); 346 + return 0; 347 + } 348 + 337 349 struct ibmvscsi_ops rpavscsi_ops = { 338 350 .init_crq_queue = rpavscsi_init_crq_queue, 339 351 .release_crq_queue = rpavscsi_release_crq_queue, 340 352 .reset_crq_queue = rpavscsi_reset_crq_queue, 341 353 .reenable_crq_queue = rpavscsi_reenable_crq_queue, 342 354 .send_crq = rpavscsi_send_crq, 355 + .resume = rpavscsi_resume, 343 356 };
+1349 -413
drivers/scsi/ipr.c
··· 72 72 #include <linux/moduleparam.h> 73 73 #include <linux/libata.h> 74 74 #include <linux/hdreg.h> 75 + #include <linux/reboot.h> 76 + #include <linux/stringify.h> 75 77 #include <asm/io.h> 76 78 #include <asm/irq.h> 77 79 #include <asm/processor.h> ··· 93 91 static int ipr_testmode = 0; 94 92 static unsigned int ipr_fastfail = 0; 95 93 static unsigned int ipr_transop_timeout = 0; 96 - static unsigned int ipr_enable_cache = 1; 97 94 static unsigned int ipr_debug = 0; 95 + static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; 98 96 static unsigned int ipr_dual_ioa_raid = 1; 99 97 static DEFINE_SPINLOCK(ipr_driver_lock); 100 98 ··· 106 104 { 107 105 .set_interrupt_mask_reg = 0x0022C, 108 106 .clr_interrupt_mask_reg = 0x00230, 107 + .clr_interrupt_mask_reg32 = 0x00230, 109 108 .sense_interrupt_mask_reg = 0x0022C, 109 + .sense_interrupt_mask_reg32 = 0x0022C, 110 110 .clr_interrupt_reg = 0x00228, 111 + .clr_interrupt_reg32 = 0x00228, 111 112 .sense_interrupt_reg = 0x00224, 113 + .sense_interrupt_reg32 = 0x00224, 112 114 .ioarrin_reg = 0x00404, 113 115 .sense_uproc_interrupt_reg = 0x00214, 116 + .sense_uproc_interrupt_reg32 = 0x00214, 114 117 .set_uproc_interrupt_reg = 0x00214, 115 - .clr_uproc_interrupt_reg = 0x00218 118 + .set_uproc_interrupt_reg32 = 0x00214, 119 + .clr_uproc_interrupt_reg = 0x00218, 120 + .clr_uproc_interrupt_reg32 = 0x00218 116 121 } 117 122 }, 118 123 { /* Snipe and Scamp */ ··· 128 119 { 129 120 .set_interrupt_mask_reg = 0x00288, 130 121 .clr_interrupt_mask_reg = 0x0028C, 122 + .clr_interrupt_mask_reg32 = 0x0028C, 131 123 .sense_interrupt_mask_reg = 0x00288, 124 + .sense_interrupt_mask_reg32 = 0x00288, 132 125 .clr_interrupt_reg = 0x00284, 126 + .clr_interrupt_reg32 = 0x00284, 133 127 .sense_interrupt_reg = 0x00280, 128 + .sense_interrupt_reg32 = 0x00280, 134 129 .ioarrin_reg = 0x00504, 135 130 .sense_uproc_interrupt_reg = 0x00290, 131 + .sense_uproc_interrupt_reg32 = 0x00290, 136 132 .set_uproc_interrupt_reg = 0x00290, 137 - .clr_uproc_interrupt_reg = 0x00294 133 + .set_uproc_interrupt_reg32 = 0x00290, 134 + .clr_uproc_interrupt_reg = 0x00294, 135 + .clr_uproc_interrupt_reg32 = 0x00294 136 + } 137 + }, 138 + { /* CRoC */ 139 + .mailbox = 0x00040, 140 + .cache_line_size = 0x20, 141 + { 142 + .set_interrupt_mask_reg = 0x00010, 143 + .clr_interrupt_mask_reg = 0x00018, 144 + .clr_interrupt_mask_reg32 = 0x0001C, 145 + .sense_interrupt_mask_reg = 0x00010, 146 + .sense_interrupt_mask_reg32 = 0x00014, 147 + .clr_interrupt_reg = 0x00008, 148 + .clr_interrupt_reg32 = 0x0000C, 149 + .sense_interrupt_reg = 0x00000, 150 + .sense_interrupt_reg32 = 0x00004, 151 + .ioarrin_reg = 0x00070, 152 + .sense_uproc_interrupt_reg = 0x00020, 153 + .sense_uproc_interrupt_reg32 = 0x00024, 154 + .set_uproc_interrupt_reg = 0x00020, 155 + .set_uproc_interrupt_reg32 = 0x00024, 156 + .clr_uproc_interrupt_reg = 0x00028, 157 + .clr_uproc_interrupt_reg32 = 0x0002C, 158 + .init_feedback_reg = 0x0005C, 159 + .dump_addr_reg = 0x00064, 160 + .dump_data_reg = 0x00068 138 161 } 139 162 }, 140 163 }; 141 164 142 165 static const struct ipr_chip_t ipr_chip[] = { 143 - { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] }, 144 - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] }, 145 - { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, 146 - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, 147 - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] }, 148 - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] }, 149 - { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] } 166 + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, 167 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, 168 + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, 169 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, 170 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] }, 171 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }, 172 + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }, 173 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }, 174 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] } 150 175 }; 151 176 152 177 static int ipr_max_bus_speeds [] = { ··· 199 156 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); 200 157 module_param_named(transop_timeout, ipr_transop_timeout, int, 0); 201 158 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); 202 - module_param_named(enable_cache, ipr_enable_cache, int, 0); 203 - MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)"); 204 159 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR); 205 160 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); 206 161 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); 207 162 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); 163 + module_param_named(max_devs, ipr_max_devs, int, 0); 164 + MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " 165 + "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); 208 166 MODULE_LICENSE("GPL"); 209 167 MODULE_VERSION(IPR_DRIVER_VERSION); 210 168 ··· 224 180 "FFFE: Soft device bus error recovered by the IOA"}, 225 181 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL, 226 182 "4101: Soft device bus fabric error"}, 183 + {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL, 184 + "FFFC: Logical block guard error recovered by the device"}, 185 + {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL, 186 + "FFFC: Logical block reference tag error recovered by the device"}, 187 + {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL, 188 + "4171: Recovered scatter list tag / sequence number error"}, 189 + {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL, 190 + "FF3D: Recovered logical block CRC error on IOA to Host transfer"}, 191 + {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL, 192 + "4171: Recovered logical block sequence number error on IOA to Host transfer"}, 193 + {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL, 194 + "FFFD: Recovered logical block reference tag error detected by the IOA"}, 195 + {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL, 196 + "FFFD: Logical block guard error recovered by the IOA"}, 227 197 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL, 228 198 "FFF9: Device sector reassign successful"}, 229 199 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL, ··· 294 236 "3120: SCSI bus is not operational"}, 295 237 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL, 296 238 "4100: Hard device bus fabric error"}, 239 + {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL, 240 + "310C: Logical block guard error detected by the device"}, 241 + {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL, 242 + "310C: Logical block reference tag error detected by the device"}, 243 + {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL, 244 + "4170: Scatter list tag / sequence number error"}, 245 + {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL, 246 + "8150: Logical block CRC error on IOA to Host transfer"}, 247 + {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL, 248 + "4170: Logical block sequence number error on IOA to Host transfer"}, 249 + {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL, 250 + "310D: Logical block reference tag error detected by the IOA"}, 251 + {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL, 252 + "310D: Logical block guard error detected by the IOA"}, 297 253 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL, 298 254 "9000: IOA reserved area data check"}, 299 255 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL, 300 256 "9001: IOA reserved area invalid data pattern"}, 301 257 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL, 302 258 "9002: IOA reserved area LRC error"}, 259 + {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL, 260 + "Hardware Error, IOA metadata access error"}, 303 261 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL, 304 262 "102E: Out of alternate sectors for disk storage"}, 305 263 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL, ··· 380 306 "Illegal request, commands not allowed to this device"}, 381 307 {0x05258100, 0, 0, 382 308 "Illegal request, command not allowed to a secondary adapter"}, 309 + {0x05258200, 0, 0, 310 + "Illegal request, command not allowed to a non-optimized resource"}, 383 311 {0x05260000, 0, 0, 384 312 "Illegal request, invalid field in parameter list"}, 385 313 {0x05260100, 0, 0, ··· 544 468 trace_entry->time = jiffies; 545 469 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 546 470 trace_entry->type = type; 547 - trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command; 471 + if (ipr_cmd->ioa_cfg->sis64) 472 + trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command; 473 + else 474 + trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command; 548 475 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; 549 476 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; 550 477 trace_entry->u.add_data = add_data; ··· 567 488 { 568 489 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 569 490 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 570 - dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr); 491 + dma_addr_t dma_addr = ipr_cmd->dma_addr; 571 492 572 493 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 573 - ioarcb->write_data_transfer_length = 0; 494 + ioarcb->data_transfer_length = 0; 574 495 ioarcb->read_data_transfer_length = 0; 575 - ioarcb->write_ioadl_len = 0; 496 + ioarcb->ioadl_len = 0; 576 497 ioarcb->read_ioadl_len = 0; 577 - ioarcb->write_ioadl_addr = 578 - cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); 579 - ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 498 + 499 + if (ipr_cmd->ioa_cfg->sis64) 500 + ioarcb->u.sis64_addr_data.data_ioadl_addr = 501 + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 502 + else { 503 + ioarcb->write_ioadl_addr = 504 + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 505 + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 506 + } 507 + 580 508 ioasa->ioasc = 0; 581 509 ioasa->residual_data_len = 0; 582 510 ioasa->u.gata.status = 0; ··· 648 562 ioa_cfg->allow_interrupts = 0; 649 563 650 564 /* Set interrupt mask to stop all new interrupts */ 651 - writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); 565 + if (ioa_cfg->sis64) 566 + writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); 567 + else 568 + writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); 652 569 653 570 /* Clear any pending interrupts */ 654 - writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg); 571 + if (ioa_cfg->sis64) 572 + writel(~0, ioa_cfg->regs.clr_interrupt_reg); 573 + writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); 655 574 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 656 575 } 657 576 ··· 784 693 } 785 694 786 695 /** 696 + * ipr_send_command - Send driver initiated requests. 697 + * @ipr_cmd: ipr command struct 698 + * 699 + * This function sends a command to the adapter using the correct write call. 700 + * In the case of sis64, calculate the ioarcb size required. Then or in the 701 + * appropriate bits. 702 + * 703 + * Return value: 704 + * none 705 + **/ 706 + static void ipr_send_command(struct ipr_cmnd *ipr_cmd) 707 + { 708 + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 709 + dma_addr_t send_dma_addr = ipr_cmd->dma_addr; 710 + 711 + if (ioa_cfg->sis64) { 712 + /* The default size is 256 bytes */ 713 + send_dma_addr |= 0x1; 714 + 715 + /* If the number of ioadls * size of ioadl > 128 bytes, 716 + then use a 512 byte ioarcb */ 717 + if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) 718 + send_dma_addr |= 0x4; 719 + writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); 720 + } else 721 + writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); 722 + } 723 + 724 + /** 787 725 * ipr_do_req - Send driver initiated requests. 788 726 * @ipr_cmd: ipr command struct 789 727 * @done: done function ··· 844 724 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); 845 725 846 726 mb(); 847 - writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), 848 - ioa_cfg->regs.ioarrin_reg); 727 + 728 + ipr_send_command(ipr_cmd); 849 729 } 850 730 851 731 /** ··· 864 744 ipr_cmd->sibling = NULL; 865 745 else 866 746 complete(&ipr_cmd->completion); 747 + } 748 + 749 + /** 750 + * ipr_init_ioadl - initialize the ioadl for the correct SIS type 751 + * @ipr_cmd: ipr command struct 752 + * @dma_addr: dma address 753 + * @len: transfer length 754 + * @flags: ioadl flag value 755 + * 756 + * This function initializes an ioadl in the case where there is only a single 757 + * descriptor. 758 + * 759 + * Return value: 760 + * nothing 761 + **/ 762 + static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr, 763 + u32 len, int flags) 764 + { 765 + struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 766 + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 767 + 768 + ipr_cmd->dma_use_sg = 1; 769 + 770 + if (ipr_cmd->ioa_cfg->sis64) { 771 + ioadl64->flags = cpu_to_be32(flags); 772 + ioadl64->data_len = cpu_to_be32(len); 773 + ioadl64->address = cpu_to_be64(dma_addr); 774 + 775 + ipr_cmd->ioarcb.ioadl_len = 776 + cpu_to_be32(sizeof(struct ipr_ioadl64_desc)); 777 + ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); 778 + } else { 779 + ioadl->flags_and_data_len = cpu_to_be32(flags | len); 780 + ioadl->address = cpu_to_be32(dma_addr); 781 + 782 + if (flags == IPR_IOADL_FLAGS_READ_LAST) { 783 + ipr_cmd->ioarcb.read_ioadl_len = 784 + cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 785 + ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); 786 + } else { 787 + ipr_cmd->ioarcb.ioadl_len = 788 + cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 789 + ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); 790 + } 791 + } 867 792 } 868 793 869 794 /** ··· 968 803 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; 969 804 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; 970 805 971 - ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam)); 972 - ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 973 - ipr_cmd->ioadl[0].flags_and_data_len = 974 - cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam)); 975 - ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma); 806 + ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, 807 + sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); 976 808 977 809 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) 978 810 ipr_cmd->done = ipr_process_ccn; ··· 979 817 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); 980 818 981 819 mb(); 982 - writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), 983 - ioa_cfg->regs.ioarrin_reg); 820 + 821 + ipr_send_command(ipr_cmd); 984 822 } else { 985 823 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 986 824 } 987 825 } 988 826 989 827 /** 990 - * ipr_init_res_entry - Initialize a resource entry struct. 828 + * ipr_update_ata_class - Update the ata class in the resource entry 991 829 * @res: resource entry struct 830 + * @proto: cfgte device bus protocol value 992 831 * 993 832 * Return value: 994 833 * none 995 834 **/ 996 - static void ipr_init_res_entry(struct ipr_resource_entry *res) 835 + static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto) 997 836 { 837 + switch(proto) { 838 + case IPR_PROTO_SATA: 839 + case IPR_PROTO_SAS_STP: 840 + res->ata_class = ATA_DEV_ATA; 841 + break; 842 + case IPR_PROTO_SATA_ATAPI: 843 + case IPR_PROTO_SAS_STP_ATAPI: 844 + res->ata_class = ATA_DEV_ATAPI; 845 + break; 846 + default: 847 + res->ata_class = ATA_DEV_UNKNOWN; 848 + break; 849 + }; 850 + } 851 + 852 + /** 853 + * ipr_init_res_entry - Initialize a resource entry struct. 854 + * @res: resource entry struct 855 + * @cfgtew: config table entry wrapper struct 856 + * 857 + * Return value: 858 + * none 859 + **/ 860 + static void ipr_init_res_entry(struct ipr_resource_entry *res, 861 + struct ipr_config_table_entry_wrapper *cfgtew) 862 + { 863 + int found = 0; 864 + unsigned int proto; 865 + struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; 866 + struct ipr_resource_entry *gscsi_res = NULL; 867 + 998 868 res->needs_sync_complete = 0; 999 869 res->in_erp = 0; 1000 870 res->add_to_ml = 0; ··· 1034 840 res->resetting_device = 0; 1035 841 res->sdev = NULL; 1036 842 res->sata_port = NULL; 843 + 844 + if (ioa_cfg->sis64) { 845 + proto = cfgtew->u.cfgte64->proto; 846 + res->res_flags = cfgtew->u.cfgte64->res_flags; 847 + res->qmodel = IPR_QUEUEING_MODEL64(res); 848 + res->type = cfgtew->u.cfgte64->res_type & 0x0f; 849 + 850 + memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, 851 + sizeof(res->res_path)); 852 + 853 + res->bus = 0; 854 + res->lun = scsilun_to_int(&res->dev_lun); 855 + 856 + if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { 857 + list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { 858 + if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) { 859 + found = 1; 860 + res->target = gscsi_res->target; 861 + break; 862 + } 863 + } 864 + if (!found) { 865 + res->target = find_first_zero_bit(ioa_cfg->target_ids, 866 + ioa_cfg->max_devs_supported); 867 + set_bit(res->target, ioa_cfg->target_ids); 868 + } 869 + 870 + memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 871 + sizeof(res->dev_lun.scsi_lun)); 872 + } else if (res->type == IPR_RES_TYPE_IOAFP) { 873 + res->bus = IPR_IOAFP_VIRTUAL_BUS; 874 + res->target = 0; 875 + } else if (res->type == IPR_RES_TYPE_ARRAY) { 876 + res->bus = IPR_ARRAY_VIRTUAL_BUS; 877 + res->target = find_first_zero_bit(ioa_cfg->array_ids, 878 + ioa_cfg->max_devs_supported); 879 + set_bit(res->target, ioa_cfg->array_ids); 880 + } else if (res->type == IPR_RES_TYPE_VOLUME_SET) { 881 + res->bus = IPR_VSET_VIRTUAL_BUS; 882 + res->target = find_first_zero_bit(ioa_cfg->vset_ids, 883 + ioa_cfg->max_devs_supported); 884 + set_bit(res->target, ioa_cfg->vset_ids); 885 + } else { 886 + res->target = find_first_zero_bit(ioa_cfg->target_ids, 887 + ioa_cfg->max_devs_supported); 888 + set_bit(res->target, ioa_cfg->target_ids); 889 + } 890 + } else { 891 + proto = cfgtew->u.cfgte->proto; 892 + res->qmodel = IPR_QUEUEING_MODEL(res); 893 + res->flags = cfgtew->u.cfgte->flags; 894 + if (res->flags & IPR_IS_IOA_RESOURCE) 895 + res->type = IPR_RES_TYPE_IOAFP; 896 + else 897 + res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; 898 + 899 + res->bus = cfgtew->u.cfgte->res_addr.bus; 900 + res->target = cfgtew->u.cfgte->res_addr.target; 901 + res->lun = cfgtew->u.cfgte->res_addr.lun; 902 + } 903 + 904 + ipr_update_ata_class(res, proto); 905 + } 906 + 907 + /** 908 + * ipr_is_same_device - Determine if two devices are the same. 909 + * @res: resource entry struct 910 + * @cfgtew: config table entry wrapper struct 911 + * 912 + * Return value: 913 + * 1 if the devices are the same / 0 otherwise 914 + **/ 915 + static int ipr_is_same_device(struct ipr_resource_entry *res, 916 + struct ipr_config_table_entry_wrapper *cfgtew) 917 + { 918 + if (res->ioa_cfg->sis64) { 919 + if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, 920 + sizeof(cfgtew->u.cfgte64->dev_id)) && 921 + !memcmp(&res->lun, &cfgtew->u.cfgte64->lun, 922 + sizeof(cfgtew->u.cfgte64->lun))) { 923 + return 1; 924 + } 925 + } else { 926 + if (res->bus == cfgtew->u.cfgte->res_addr.bus && 927 + res->target == cfgtew->u.cfgte->res_addr.target && 928 + res->lun == cfgtew->u.cfgte->res_addr.lun) 929 + return 1; 930 + } 931 + 932 + return 0; 933 + } 934 + 935 + /** 936 + * ipr_format_resource_path - Format the resource path for printing. 937 + * @res_path: resource path 938 + * @buf: buffer 939 + * 940 + * Return value: 941 + * pointer to buffer 942 + **/ 943 + static char *ipr_format_resource_path(u8 *res_path, char *buffer) 944 + { 945 + int i; 946 + 947 + sprintf(buffer, "%02X", res_path[0]); 948 + for (i=1; res_path[i] != 0xff; i++) 949 + sprintf(buffer, "%s-%02X", buffer, res_path[i]); 950 + 951 + return buffer; 952 + } 953 + 954 + /** 955 + * ipr_update_res_entry - Update the resource entry. 956 + * @res: resource entry struct 957 + * @cfgtew: config table entry wrapper struct 958 + * 959 + * Return value: 960 + * none 961 + **/ 962 + static void ipr_update_res_entry(struct ipr_resource_entry *res, 963 + struct ipr_config_table_entry_wrapper *cfgtew) 964 + { 965 + char buffer[IPR_MAX_RES_PATH_LENGTH]; 966 + unsigned int proto; 967 + int new_path = 0; 968 + 969 + if (res->ioa_cfg->sis64) { 970 + res->flags = cfgtew->u.cfgte64->flags; 971 + res->res_flags = cfgtew->u.cfgte64->res_flags; 972 + res->type = cfgtew->u.cfgte64->res_type & 0x0f; 973 + 974 + memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, 975 + sizeof(struct ipr_std_inq_data)); 976 + 977 + res->qmodel = IPR_QUEUEING_MODEL64(res); 978 + proto = cfgtew->u.cfgte64->proto; 979 + res->res_handle = cfgtew->u.cfgte64->res_handle; 980 + res->dev_id = cfgtew->u.cfgte64->dev_id; 981 + 982 + memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 983 + sizeof(res->dev_lun.scsi_lun)); 984 + 985 + if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path, 986 + sizeof(res->res_path))) { 987 + memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, 988 + sizeof(res->res_path)); 989 + new_path = 1; 990 + } 991 + 992 + if (res->sdev && new_path) 993 + sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", 994 + ipr_format_resource_path(&res->res_path[0], &buffer[0])); 995 + } else { 996 + res->flags = cfgtew->u.cfgte->flags; 997 + if (res->flags & IPR_IS_IOA_RESOURCE) 998 + res->type = IPR_RES_TYPE_IOAFP; 999 + else 1000 + res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; 1001 + 1002 + memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data, 1003 + sizeof(struct ipr_std_inq_data)); 1004 + 1005 + res->qmodel = IPR_QUEUEING_MODEL(res); 1006 + proto = cfgtew->u.cfgte->proto; 1007 + res->res_handle = cfgtew->u.cfgte->res_handle; 1008 + } 1009 + 1010 + ipr_update_ata_class(res, proto); 1011 + } 1012 + 1013 + /** 1014 + * ipr_clear_res_target - Clear the bit in the bit map representing the target 1015 + * for the resource. 1016 + * @res: resource entry struct 1017 + * @cfgtew: config table entry wrapper struct 1018 + * 1019 + * Return value: 1020 + * none 1021 + **/ 1022 + static void ipr_clear_res_target(struct ipr_resource_entry *res) 1023 + { 1024 + struct ipr_resource_entry *gscsi_res = NULL; 1025 + struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; 1026 + 1027 + if (!ioa_cfg->sis64) 1028 + return; 1029 + 1030 + if (res->bus == IPR_ARRAY_VIRTUAL_BUS) 1031 + clear_bit(res->target, ioa_cfg->array_ids); 1032 + else if (res->bus == IPR_VSET_VIRTUAL_BUS) 1033 + clear_bit(res->target, ioa_cfg->vset_ids); 1034 + else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) { 1035 + list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) 1036 + if (gscsi_res->dev_id == res->dev_id && gscsi_res != res) 1037 + return; 1038 + clear_bit(res->target, ioa_cfg->target_ids); 1039 + 1040 + } else if (res->bus == 0) 1041 + clear_bit(res->target, ioa_cfg->target_ids); 1037 1042 } 1038 1043 1039 1044 /** ··· 1244 851 * none 1245 852 **/ 1246 853 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, 1247 - struct ipr_hostrcb *hostrcb) 854 + struct ipr_hostrcb *hostrcb) 1248 855 { 1249 856 struct ipr_resource_entry *res = NULL; 1250 - struct ipr_config_table_entry *cfgte; 857 + struct ipr_config_table_entry_wrapper cfgtew; 858 + __be32 cc_res_handle; 859 + 1251 860 u32 is_ndn = 1; 1252 861 1253 - cfgte = &hostrcb->hcam.u.ccn.cfgte; 862 + if (ioa_cfg->sis64) { 863 + cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64; 864 + cc_res_handle = cfgtew.u.cfgte64->res_handle; 865 + } else { 866 + cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte; 867 + cc_res_handle = cfgtew.u.cfgte->res_handle; 868 + } 1254 869 1255 870 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 1256 - if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr, 1257 - sizeof(cfgte->res_addr))) { 871 + if (res->res_handle == cc_res_handle) { 1258 872 is_ndn = 0; 1259 873 break; 1260 874 } ··· 1279 879 struct ipr_resource_entry, queue); 1280 880 1281 881 list_del(&res->queue); 1282 - ipr_init_res_entry(res); 882 + ipr_init_res_entry(res, &cfgtew); 1283 883 list_add_tail(&res->queue, &ioa_cfg->used_res_q); 1284 884 } 1285 885 1286 - memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); 886 + ipr_update_res_entry(res, &cfgtew); 1287 887 1288 888 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { 1289 889 if (res->sdev) { 1290 890 res->del_from_ml = 1; 1291 - res->cfgte.res_handle = IPR_INVALID_RES_HANDLE; 891 + res->res_handle = IPR_INVALID_RES_HANDLE; 1292 892 if (ioa_cfg->allow_ml_add_del) 1293 893 schedule_work(&ioa_cfg->work_q); 1294 - } else 894 + } else { 895 + ipr_clear_res_target(res); 1295 896 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 897 + } 1296 898 } else if (!res->sdev) { 1297 899 res->add_to_ml = 1; 1298 900 if (ioa_cfg->allow_ml_add_del) ··· 1446 1044 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, 1447 1045 struct ipr_hostrcb *hostrcb) 1448 1046 { 1449 - struct ipr_hostrcb_type_12_error *error = 1450 - &hostrcb->hcam.u.error.u.type_12_error; 1047 + struct ipr_hostrcb_type_12_error *error; 1048 + 1049 + if (ioa_cfg->sis64) 1050 + error = &hostrcb->hcam.u.error64.u.type_12_error; 1051 + else 1052 + error = &hostrcb->hcam.u.error.u.type_12_error; 1451 1053 1452 1054 ipr_err("-----Current Configuration-----\n"); 1453 1055 ipr_err("Cache Directory Card Information:\n"); ··· 1530 1124 ipr_err_separator; 1531 1125 1532 1126 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); 1127 + ipr_log_ext_vpd(&dev_entry->vpd); 1128 + 1129 + ipr_err("-----New Device Information-----\n"); 1130 + ipr_log_ext_vpd(&dev_entry->new_vpd); 1131 + 1132 + ipr_err("Cache Directory Card Information:\n"); 1133 + ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); 1134 + 1135 + ipr_err("Adapter Card Information:\n"); 1136 + ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); 1137 + } 1138 + } 1139 + 1140 + /** 1141 + * ipr_log_sis64_config_error - Log a device error. 1142 + * @ioa_cfg: ioa config struct 1143 + * @hostrcb: hostrcb struct 1144 + * 1145 + * Return value: 1146 + * none 1147 + **/ 1148 + static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg, 1149 + struct ipr_hostrcb *hostrcb) 1150 + { 1151 + int errors_logged, i; 1152 + struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry; 1153 + struct ipr_hostrcb_type_23_error *error; 1154 + char buffer[IPR_MAX_RES_PATH_LENGTH]; 1155 + 1156 + error = &hostrcb->hcam.u.error64.u.type_23_error; 1157 + errors_logged = be32_to_cpu(error->errors_logged); 1158 + 1159 + ipr_err("Device Errors Detected/Logged: %d/%d\n", 1160 + be32_to_cpu(error->errors_detected), errors_logged); 1161 + 1162 + dev_entry = error->dev; 1163 + 1164 + for (i = 0; i < errors_logged; i++, dev_entry++) { 1165 + ipr_err_separator; 1166 + 1167 + ipr_err("Device %d : %s", i + 1, 1168 + ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0])); 1533 1169 ipr_log_ext_vpd(&dev_entry->vpd); 1534 1170 1535 1171 ipr_err("-----New Device Information-----\n"); ··· 1779 1331 { 1780 1332 struct ipr_hostrcb_type_17_error *error; 1781 1333 1782 - error = &hostrcb->hcam.u.error.u.type_17_error; 1334 + if (ioa_cfg->sis64) 1335 + error = &hostrcb->hcam.u.error64.u.type_17_error; 1336 + else 1337 + error = &hostrcb->hcam.u.error.u.type_17_error; 1338 + 1783 1339 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1784 1340 strim(error->failure_reason); 1785 1341 ··· 1888 1436 1889 1437 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state, 1890 1438 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); 1439 + } 1440 + 1441 + /** 1442 + * ipr_log64_fabric_path - Log a fabric path error 1443 + * @hostrcb: hostrcb struct 1444 + * @fabric: fabric descriptor 1445 + * 1446 + * Return value: 1447 + * none 1448 + **/ 1449 + static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb, 1450 + struct ipr_hostrcb64_fabric_desc *fabric) 1451 + { 1452 + int i, j; 1453 + u8 path_state = fabric->path_state; 1454 + u8 active = path_state & IPR_PATH_ACTIVE_MASK; 1455 + u8 state = path_state & IPR_PATH_STATE_MASK; 1456 + char buffer[IPR_MAX_RES_PATH_LENGTH]; 1457 + 1458 + for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { 1459 + if (path_active_desc[i].active != active) 1460 + continue; 1461 + 1462 + for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { 1463 + if (path_state_desc[j].state != state) 1464 + continue; 1465 + 1466 + ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n", 1467 + path_active_desc[i].desc, path_state_desc[j].desc, 1468 + ipr_format_resource_path(&fabric->res_path[0], &buffer[0])); 1469 + return; 1470 + } 1471 + } 1472 + 1473 + ipr_err("Path state=%02X Resource Path=%s\n", path_state, 1474 + ipr_format_resource_path(&fabric->res_path[0], &buffer[0])); 1891 1475 } 1892 1476 1893 1477 static const struct { ··· 2035 1547 } 2036 1548 2037 1549 /** 1550 + * ipr_log64_path_elem - Log a fabric path element. 1551 + * @hostrcb: hostrcb struct 1552 + * @cfg: fabric path element struct 1553 + * 1554 + * Return value: 1555 + * none 1556 + **/ 1557 + static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb, 1558 + struct ipr_hostrcb64_config_element *cfg) 1559 + { 1560 + int i, j; 1561 + u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK; 1562 + u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; 1563 + u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; 1564 + char buffer[IPR_MAX_RES_PATH_LENGTH]; 1565 + 1566 + if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64) 1567 + return; 1568 + 1569 + for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { 1570 + if (path_type_desc[i].type != type) 1571 + continue; 1572 + 1573 + for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { 1574 + if (path_status_desc[j].status != status) 1575 + continue; 1576 + 1577 + ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n", 1578 + path_status_desc[j].desc, path_type_desc[i].desc, 1579 + ipr_format_resource_path(&cfg->res_path[0], &buffer[0]), 1580 + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 1581 + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 1582 + return; 1583 + } 1584 + } 1585 + ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s " 1586 + "WWN=%08X%08X\n", cfg->type_status, 1587 + ipr_format_resource_path(&cfg->res_path[0], &buffer[0]), 1588 + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 1589 + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 1590 + } 1591 + 1592 + /** 2038 1593 * ipr_log_fabric_error - Log a fabric error. 2039 1594 * @ioa_cfg: ioa config struct 2040 1595 * @hostrcb: hostrcb struct ··· 2108 1577 2109 1578 add_len -= be16_to_cpu(fabric->length); 2110 1579 fabric = (struct ipr_hostrcb_fabric_desc *) 1580 + ((unsigned long)fabric + be16_to_cpu(fabric->length)); 1581 + } 1582 + 1583 + ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len); 1584 + } 1585 + 1586 + /** 1587 + * ipr_log_sis64_array_error - Log a sis64 array error. 1588 + * @ioa_cfg: ioa config struct 1589 + * @hostrcb: hostrcb struct 1590 + * 1591 + * Return value: 1592 + * none 1593 + **/ 1594 + static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, 1595 + struct ipr_hostrcb *hostrcb) 1596 + { 1597 + int i, num_entries; 1598 + struct ipr_hostrcb_type_24_error *error; 1599 + struct ipr_hostrcb64_array_data_entry *array_entry; 1600 + char buffer[IPR_MAX_RES_PATH_LENGTH]; 1601 + const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 1602 + 1603 + error = &hostrcb->hcam.u.error64.u.type_24_error; 1604 + 1605 + ipr_err_separator; 1606 + 1607 + ipr_err("RAID %s Array Configuration: %s\n", 1608 + error->protection_level, 1609 + ipr_format_resource_path(&error->last_res_path[0], &buffer[0])); 1610 + 1611 + ipr_err_separator; 1612 + 1613 + array_entry = error->array_member; 1614 + num_entries = min_t(u32, be32_to_cpu(error->num_entries), 1615 + sizeof(error->array_member)); 1616 + 1617 + for (i = 0; i < num_entries; i++, array_entry++) { 1618 + 1619 + if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 1620 + continue; 1621 + 1622 + if (error->exposed_mode_adn == i) 1623 + ipr_err("Exposed Array Member %d:\n", i); 1624 + else 1625 + ipr_err("Array Member %d:\n", i); 1626 + 1627 + ipr_err("Array Member %d:\n", i); 1628 + ipr_log_ext_vpd(&array_entry->vpd); 1629 + ipr_err("Current Location: %s", 1630 + ipr_format_resource_path(&array_entry->res_path[0], &buffer[0])); 1631 + ipr_err("Expected Location: %s", 1632 + ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0])); 1633 + 1634 + ipr_err_separator; 1635 + } 1636 + } 1637 + 1638 + /** 1639 + * ipr_log_sis64_fabric_error - Log a sis64 fabric error. 1640 + * @ioa_cfg: ioa config struct 1641 + * @hostrcb: hostrcb struct 1642 + * 1643 + * Return value: 1644 + * none 1645 + **/ 1646 + static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg, 1647 + struct ipr_hostrcb *hostrcb) 1648 + { 1649 + struct ipr_hostrcb_type_30_error *error; 1650 + struct ipr_hostrcb64_fabric_desc *fabric; 1651 + struct ipr_hostrcb64_config_element *cfg; 1652 + int i, add_len; 1653 + 1654 + error = &hostrcb->hcam.u.error64.u.type_30_error; 1655 + 1656 + error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1657 + ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); 1658 + 1659 + add_len = be32_to_cpu(hostrcb->hcam.length) - 1660 + (offsetof(struct ipr_hostrcb64_error, u) + 1661 + offsetof(struct ipr_hostrcb_type_30_error, desc)); 1662 + 1663 + for (i = 0, fabric = error->desc; i < error->num_entries; i++) { 1664 + ipr_log64_fabric_path(hostrcb, fabric); 1665 + for_each_fabric_cfg(fabric, cfg) 1666 + ipr_log64_path_elem(hostrcb, cfg); 1667 + 1668 + add_len -= be16_to_cpu(fabric->length); 1669 + fabric = (struct ipr_hostrcb64_fabric_desc *) 2111 1670 ((unsigned long)fabric + be16_to_cpu(fabric->length)); 2112 1671 } 2113 1672 ··· 2263 1642 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) 2264 1643 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); 2265 1644 2266 - ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); 1645 + if (ioa_cfg->sis64) 1646 + ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); 1647 + else 1648 + ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2267 1649 2268 - if (ioasc == IPR_IOASC_BUS_WAS_RESET || 2269 - ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) { 1650 + if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || 1651 + ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) { 2270 1652 /* Tell the midlayer we had a bus reset so it will handle the UA properly */ 2271 1653 scsi_report_bus_reset(ioa_cfg->host, 2272 - hostrcb->hcam.u.error.failing_dev_res_addr.bus); 1654 + hostrcb->hcam.u.error.fd_res_addr.bus); 2273 1655 } 2274 1656 2275 1657 error_index = ipr_get_error(ioasc); ··· 2320 1696 case IPR_HOST_RCB_OVERLAY_ID_20: 2321 1697 ipr_log_fabric_error(ioa_cfg, hostrcb); 2322 1698 break; 1699 + case IPR_HOST_RCB_OVERLAY_ID_23: 1700 + ipr_log_sis64_config_error(ioa_cfg, hostrcb); 1701 + break; 1702 + case IPR_HOST_RCB_OVERLAY_ID_24: 1703 + case IPR_HOST_RCB_OVERLAY_ID_26: 1704 + ipr_log_sis64_array_error(ioa_cfg, hostrcb); 1705 + break; 1706 + case IPR_HOST_RCB_OVERLAY_ID_30: 1707 + ipr_log_sis64_fabric_error(ioa_cfg, hostrcb); 1708 + break; 2323 1709 case IPR_HOST_RCB_OVERLAY_ID_1: 2324 1710 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: 2325 1711 default: ··· 2354 1720 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2355 1721 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 2356 1722 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 2357 - u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); 1723 + u32 fd_ioasc; 1724 + 1725 + if (ioa_cfg->sis64) 1726 + fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); 1727 + else 1728 + fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2358 1729 2359 1730 list_del(&hostrcb->queue); 2360 1731 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); ··· 2484 1845 ipr_find_ses_entry(struct ipr_resource_entry *res) 2485 1846 { 2486 1847 int i, j, matches; 1848 + struct ipr_std_inq_vpids *vpids; 2487 1849 const struct ipr_ses_table_entry *ste = ipr_ses_table; 2488 1850 2489 1851 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { 2490 1852 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { 2491 1853 if (ste->compare_product_id_byte[j] == 'X') { 2492 - if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j]) 1854 + vpids = &res->std_inq_data.vpids; 1855 + if (vpids->product_id[j] == ste->product_id[j]) 2493 1856 matches++; 2494 1857 else 2495 1858 break; ··· 2526 1885 2527 1886 /* Loop through each config table entry in the config table buffer */ 2528 1887 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 2529 - if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data))) 1888 + if (!(IPR_IS_SES_DEVICE(res->std_inq_data))) 2530 1889 continue; 2531 1890 2532 - if (bus != res->cfgte.res_addr.bus) 1891 + if (bus != res->bus) 2533 1892 continue; 2534 1893 2535 1894 if (!(ste = ipr_find_ses_entry(res))) ··· 2575 1934 } 2576 1935 2577 1936 /** 1937 + * ipr_get_sis64_dump_data_section - Dump IOA memory 1938 + * @ioa_cfg: ioa config struct 1939 + * @start_addr: adapter address to dump 1940 + * @dest: destination kernel buffer 1941 + * @length_in_words: length to dump in 4 byte words 1942 + * 1943 + * Return value: 1944 + * 0 on success 1945 + **/ 1946 + static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg, 1947 + u32 start_addr, 1948 + __be32 *dest, u32 length_in_words) 1949 + { 1950 + int i; 1951 + 1952 + for (i = 0; i < length_in_words; i++) { 1953 + writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); 1954 + *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); 1955 + dest++; 1956 + } 1957 + 1958 + return 0; 1959 + } 1960 + 1961 + /** 2578 1962 * ipr_get_ldump_data_section - Dump IOA memory 2579 1963 * @ioa_cfg: ioa config struct 2580 1964 * @start_addr: adapter address to dump ··· 2616 1950 volatile u32 temp_pcii_reg; 2617 1951 int i, delay = 0; 2618 1952 1953 + if (ioa_cfg->sis64) 1954 + return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr, 1955 + dest, length_in_words); 1956 + 2619 1957 /* Write IOA interrupt reg starting LDUMP state */ 2620 1958 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), 2621 - ioa_cfg->regs.set_uproc_interrupt_reg); 1959 + ioa_cfg->regs.set_uproc_interrupt_reg32); 2622 1960 2623 1961 /* Wait for IO debug acknowledge */ 2624 1962 if (ipr_wait_iodbg_ack(ioa_cfg, ··· 2641 1971 2642 1972 /* Signal address valid - clear IOA Reset alert */ 2643 1973 writel(IPR_UPROCI_RESET_ALERT, 2644 - ioa_cfg->regs.clr_uproc_interrupt_reg); 1974 + ioa_cfg->regs.clr_uproc_interrupt_reg32); 2645 1975 2646 1976 for (i = 0; i < length_in_words; i++) { 2647 1977 /* Wait for IO debug acknowledge */ ··· 2666 1996 2667 1997 /* Signal end of block transfer. Set reset alert then clear IO debug ack */ 2668 1998 writel(IPR_UPROCI_RESET_ALERT, 2669 - ioa_cfg->regs.set_uproc_interrupt_reg); 1999 + ioa_cfg->regs.set_uproc_interrupt_reg32); 2670 2000 2671 2001 writel(IPR_UPROCI_IO_DEBUG_ALERT, 2672 - ioa_cfg->regs.clr_uproc_interrupt_reg); 2002 + ioa_cfg->regs.clr_uproc_interrupt_reg32); 2673 2003 2674 2004 /* Signal dump data received - Clear IO debug Ack */ 2675 2005 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ··· 2678 2008 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ 2679 2009 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { 2680 2010 temp_pcii_reg = 2681 - readl(ioa_cfg->regs.sense_uproc_interrupt_reg); 2011 + readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); 2682 2012 2683 2013 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT)) 2684 2014 return 0; ··· 2877 2207 u32 num_entries, start_off, end_off; 2878 2208 u32 bytes_to_copy, bytes_copied, rc; 2879 2209 struct ipr_sdt *sdt; 2210 + int valid = 1; 2880 2211 int i; 2881 2212 2882 2213 ENTER; ··· 2891 2220 2892 2221 start_addr = readl(ioa_cfg->ioa_mailbox); 2893 2222 2894 - if (!ipr_sdt_is_fmt2(start_addr)) { 2223 + if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { 2895 2224 dev_err(&ioa_cfg->pdev->dev, 2896 2225 "Invalid dump table format: %lx\n", start_addr); 2897 2226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); ··· 2920 2249 2921 2250 /* IOA Dump entry */ 2922 2251 ipr_init_dump_entry_hdr(&ioa_dump->hdr); 2923 - ioa_dump->format = IPR_SDT_FMT2; 2924 2252 ioa_dump->hdr.len = 0; 2925 2253 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 2926 2254 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; ··· 2934 2264 sizeof(struct ipr_sdt) / sizeof(__be32)); 2935 2265 2936 2266 /* Smart Dump table is ready to use and the first entry is valid */ 2937 - if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) { 2267 + if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && 2268 + (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { 2938 2269 dev_err(&ioa_cfg->pdev->dev, 2939 2270 "Dump of IOA failed. Dump table not valid: %d, %X.\n", 2940 2271 rc, be32_to_cpu(sdt->hdr.state)); ··· 2959 2288 } 2960 2289 2961 2290 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { 2962 - sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset); 2963 - start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; 2964 - end_off = be32_to_cpu(sdt->entry[i].end_offset); 2291 + sdt_word = be32_to_cpu(sdt->entry[i].start_token); 2292 + if (ioa_cfg->sis64) 2293 + bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token); 2294 + else { 2295 + start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; 2296 + end_off = be32_to_cpu(sdt->entry[i].end_token); 2965 2297 2966 - if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) { 2967 - bytes_to_copy = end_off - start_off; 2298 + if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) 2299 + bytes_to_copy = end_off - start_off; 2300 + else 2301 + valid = 0; 2302 + } 2303 + if (valid) { 2968 2304 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) { 2969 2305 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; 2970 2306 continue; ··· 3100 2422 3101 2423 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3102 2424 if (res->add_to_ml) { 3103 - bus = res->cfgte.res_addr.bus; 3104 - target = res->cfgte.res_addr.target; 3105 - lun = res->cfgte.res_addr.lun; 2425 + bus = res->bus; 2426 + target = res->target; 2427 + lun = res->lun; 3106 2428 res->add_to_ml = 0; 3107 2429 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3108 2430 scsi_add_device(ioa_cfg->host, bus, target, lun); ··· 3155 2477 .read = ipr_read_trace, 3156 2478 }; 3157 2479 #endif 3158 - 3159 - static const struct { 3160 - enum ipr_cache_state state; 3161 - char *name; 3162 - } cache_state [] = { 3163 - { CACHE_NONE, "none" }, 3164 - { CACHE_DISABLED, "disabled" }, 3165 - { CACHE_ENABLED, "enabled" } 3166 - }; 3167 - 3168 - /** 3169 - * ipr_show_write_caching - Show the write caching attribute 3170 - * @dev: device struct 3171 - * @buf: buffer 3172 - * 3173 - * Return value: 3174 - * number of bytes printed to buffer 3175 - **/ 3176 - static ssize_t ipr_show_write_caching(struct device *dev, 3177 - struct device_attribute *attr, char *buf) 3178 - { 3179 - struct Scsi_Host *shost = class_to_shost(dev); 3180 - struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3181 - unsigned long lock_flags = 0; 3182 - int i, len = 0; 3183 - 3184 - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3185 - for (i = 0; i < ARRAY_SIZE(cache_state); i++) { 3186 - if (cache_state[i].state == ioa_cfg->cache_state) { 3187 - len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name); 3188 - break; 3189 - } 3190 - } 3191 - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3192 - return len; 3193 - } 3194 - 3195 - 3196 - /** 3197 - * ipr_store_write_caching - Enable/disable adapter write cache 3198 - * @dev: device struct 3199 - * @buf: buffer 3200 - * @count: buffer size 3201 - * 3202 - * This function will enable/disable adapter write cache. 3203 - * 3204 - * Return value: 3205 - * count on success / other on failure 3206 - **/ 3207 - static ssize_t ipr_store_write_caching(struct device *dev, 3208 - struct device_attribute *attr, 3209 - const char *buf, size_t count) 3210 - { 3211 - struct Scsi_Host *shost = class_to_shost(dev); 3212 - struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3213 - unsigned long lock_flags = 0; 3214 - enum ipr_cache_state new_state = CACHE_INVALID; 3215 - int i; 3216 - 3217 - if (!capable(CAP_SYS_ADMIN)) 3218 - return -EACCES; 3219 - if (ioa_cfg->cache_state == CACHE_NONE) 3220 - return -EINVAL; 3221 - 3222 - for (i = 0; i < ARRAY_SIZE(cache_state); i++) { 3223 - if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) { 3224 - new_state = cache_state[i].state; 3225 - break; 3226 - } 3227 - } 3228 - 3229 - if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED) 3230 - return -EINVAL; 3231 - 3232 - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3233 - if (ioa_cfg->cache_state == new_state) { 3234 - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3235 - return count; 3236 - } 3237 - 3238 - ioa_cfg->cache_state = new_state; 3239 - dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n", 3240 - new_state == CACHE_ENABLED ? "Enabling" : "Disabling"); 3241 - if (!ioa_cfg->in_reset_reload) 3242 - ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 3243 - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3244 - wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3245 - 3246 - return count; 3247 - } 3248 - 3249 - static struct device_attribute ipr_ioa_cache_attr = { 3250 - .attr = { 3251 - .name = "write_cache", 3252 - .mode = S_IRUGO | S_IWUSR, 3253 - }, 3254 - .show = ipr_show_write_caching, 3255 - .store = ipr_store_write_caching 3256 - }; 3257 2480 3258 2481 /** 3259 2482 * ipr_show_fw_version - Show the firmware version ··· 3555 2976 } 3556 2977 3557 2978 /** 2979 + * ipr_build_ucode_ioadl64 - Build a microcode download IOADL 2980 + * @ipr_cmd: ipr command struct 2981 + * @sglist: scatter/gather list 2982 + * 2983 + * Builds a microcode download IOA data list (IOADL). 2984 + * 2985 + **/ 2986 + static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd, 2987 + struct ipr_sglist *sglist) 2988 + { 2989 + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 2990 + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 2991 + struct scatterlist *scatterlist = sglist->scatterlist; 2992 + int i; 2993 + 2994 + ipr_cmd->dma_use_sg = sglist->num_dma_sg; 2995 + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 2996 + ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); 2997 + 2998 + ioarcb->ioadl_len = 2999 + cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 3000 + for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 3001 + ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE); 3002 + ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i])); 3003 + ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i])); 3004 + } 3005 + 3006 + ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 3007 + } 3008 + 3009 + /** 3558 3010 * ipr_build_ucode_ioadl - Build a microcode download IOADL 3559 3011 * @ipr_cmd: ipr command struct 3560 3012 * @sglist: scatter/gather list ··· 3597 2987 struct ipr_sglist *sglist) 3598 2988 { 3599 2989 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3600 - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 2990 + struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 3601 2991 struct scatterlist *scatterlist = sglist->scatterlist; 3602 2992 int i; 3603 2993 3604 2994 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3605 2995 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 3606 - ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len); 3607 - ioarcb->write_ioadl_len = 2996 + ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); 2997 + 2998 + ioarcb->ioadl_len = 3608 2999 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 3609 3000 3610 3001 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { ··· 3757 3146 &ipr_ioa_state_attr, 3758 3147 &ipr_ioa_reset_attr, 3759 3148 &ipr_update_fw_attr, 3760 - &ipr_ioa_cache_attr, 3761 3149 NULL, 3762 3150 }; 3763 3151 ··· 4060 3450 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4061 3451 res = (struct ipr_resource_entry *)sdev->hostdata; 4062 3452 if (res) 4063 - len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle); 3453 + len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle); 4064 3454 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4065 3455 return len; 4066 3456 } ··· 4073 3463 .show = ipr_show_adapter_handle 4074 3464 }; 4075 3465 3466 + /** 3467 + * ipr_show_resource_path - Show the resource path for this device. 3468 + * @dev: device struct 3469 + * @buf: buffer 3470 + * 3471 + * Return value: 3472 + * number of bytes printed to buffer 3473 + **/ 3474 + static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf) 3475 + { 3476 + struct scsi_device *sdev = to_scsi_device(dev); 3477 + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 3478 + struct ipr_resource_entry *res; 3479 + unsigned long lock_flags = 0; 3480 + ssize_t len = -ENXIO; 3481 + char buffer[IPR_MAX_RES_PATH_LENGTH]; 3482 + 3483 + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3484 + res = (struct ipr_resource_entry *)sdev->hostdata; 3485 + if (res) 3486 + len = snprintf(buf, PAGE_SIZE, "%s\n", 3487 + ipr_format_resource_path(&res->res_path[0], &buffer[0])); 3488 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3489 + return len; 3490 + } 3491 + 3492 + static struct device_attribute ipr_resource_path_attr = { 3493 + .attr = { 3494 + .name = "resource_path", 3495 + .mode = S_IRUSR, 3496 + }, 3497 + .show = ipr_show_resource_path 3498 + }; 3499 + 4076 3500 static struct device_attribute *ipr_dev_attrs[] = { 4077 3501 &ipr_adapter_handle_attr, 3502 + &ipr_resource_path_attr, 4078 3503 NULL, 4079 3504 }; 4080 3505 ··· 4162 3517 struct ipr_resource_entry *res; 4163 3518 4164 3519 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4165 - if ((res->cfgte.res_addr.bus == starget->channel) && 4166 - (res->cfgte.res_addr.target == starget->id) && 4167 - (res->cfgte.res_addr.lun == 0)) { 3520 + if ((res->bus == starget->channel) && 3521 + (res->target == starget->id) && 3522 + (res->lun == 0)) { 4168 3523 return res; 4169 3524 } 4170 3525 } ··· 4234 3589 static void ipr_target_destroy(struct scsi_target *starget) 4235 3590 { 4236 3591 struct ipr_sata_port *sata_port = starget->hostdata; 3592 + struct Scsi_Host *shost = dev_to_shost(&starget->dev); 3593 + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 3594 + 3595 + if (ioa_cfg->sis64) { 3596 + if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) 3597 + clear_bit(starget->id, ioa_cfg->array_ids); 3598 + else if (starget->channel == IPR_VSET_VIRTUAL_BUS) 3599 + clear_bit(starget->id, ioa_cfg->vset_ids); 3600 + else if (starget->channel == 0) 3601 + clear_bit(starget->id, ioa_cfg->target_ids); 3602 + } 4237 3603 4238 3604 if (sata_port) { 4239 3605 starget->hostdata = NULL; ··· 4266 3610 struct ipr_resource_entry *res; 4267 3611 4268 3612 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4269 - if ((res->cfgte.res_addr.bus == sdev->channel) && 4270 - (res->cfgte.res_addr.target == sdev->id) && 4271 - (res->cfgte.res_addr.lun == sdev->lun)) 3613 + if ((res->bus == sdev->channel) && 3614 + (res->target == sdev->id) && 3615 + (res->lun == sdev->lun)) 4272 3616 return res; 4273 3617 } 4274 3618 ··· 4317 3661 struct ipr_resource_entry *res; 4318 3662 struct ata_port *ap = NULL; 4319 3663 unsigned long lock_flags = 0; 3664 + char buffer[IPR_MAX_RES_PATH_LENGTH]; 4320 3665 4321 3666 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4322 3667 res = sdev->hostdata; ··· 4344 3687 ata_sas_slave_configure(sdev, ap); 4345 3688 } else 4346 3689 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 3690 + if (ioa_cfg->sis64) 3691 + sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", 3692 + ipr_format_resource_path(&res->res_path[0], &buffer[0])); 4347 3693 return 0; 4348 3694 } 4349 3695 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); ··· 4488 3828 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4489 3829 ioarcb = &ipr_cmd->ioarcb; 4490 3830 cmd_pkt = &ioarcb->cmd_pkt; 4491 - regs = &ioarcb->add_data.u.regs; 4492 3831 4493 - ioarcb->res_handle = res->cfgte.res_handle; 3832 + if (ipr_cmd->ioa_cfg->sis64) { 3833 + regs = &ipr_cmd->i.ata_ioadl.regs; 3834 + ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); 3835 + } else 3836 + regs = &ioarcb->u.add_data.u.regs; 3837 + 3838 + ioarcb->res_handle = res->res_handle; 4494 3839 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 4495 3840 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 4496 3841 if (ipr_is_gata(res)) { 4497 3842 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; 4498 - ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags)); 3843 + ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags)); 4499 3844 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 4500 3845 } 4501 3846 ··· 4545 3880 res = sata_port->res; 4546 3881 if (res) { 4547 3882 rc = ipr_device_reset(ioa_cfg, res); 4548 - switch(res->cfgte.proto) { 4549 - case IPR_PROTO_SATA: 4550 - case IPR_PROTO_SAS_STP: 4551 - *classes = ATA_DEV_ATA; 4552 - break; 4553 - case IPR_PROTO_SATA_ATAPI: 4554 - case IPR_PROTO_SAS_STP_ATAPI: 4555 - *classes = ATA_DEV_ATAPI; 4556 - break; 4557 - default: 4558 - *classes = ATA_DEV_UNKNOWN; 4559 - break; 4560 - }; 3883 + *classes = res->ata_class; 4561 3884 } 4562 3885 4563 3886 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); ··· 4590 3937 return FAILED; 4591 3938 4592 3939 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4593 - if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 3940 + if (ipr_cmd->ioarcb.res_handle == res->res_handle) { 4594 3941 if (ipr_cmd->scsi_cmd) 4595 3942 ipr_cmd->done = ipr_scsi_eh_done; 4596 3943 if (ipr_cmd->qc) ··· 4612 3959 spin_lock_irq(scsi_cmd->device->host->host_lock); 4613 3960 4614 3961 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4615 - if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 3962 + if (ipr_cmd->ioarcb.res_handle == res->res_handle) { 4616 3963 rc = -EIO; 4617 3964 break; 4618 3965 } ··· 4651 3998 struct ipr_resource_entry *res; 4652 3999 4653 4000 ENTER; 4654 - list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4655 - if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle, 4656 - sizeof(res->cfgte.res_handle))) { 4657 - scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus); 4658 - break; 4001 + if (!ioa_cfg->sis64) 4002 + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4003 + if (res->res_handle == ipr_cmd->ioarcb.res_handle) { 4004 + scsi_report_bus_reset(ioa_cfg->host, res->bus); 4005 + break; 4006 + } 4659 4007 } 4660 - } 4661 4008 4662 4009 /* 4663 4010 * If abort has not completed, indicate the reset has, else call the ··· 4755 4102 return SUCCESS; 4756 4103 4757 4104 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4758 - ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; 4105 + ipr_cmd->ioarcb.res_handle = res->res_handle; 4759 4106 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 4760 4107 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 4761 4108 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; ··· 4892 4239 return IRQ_NONE; 4893 4240 } 4894 4241 4895 - int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 4896 - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 4242 + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); 4243 + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; 4897 4244 4898 - /* If an interrupt on the adapter did not occur, ignore it */ 4245 + /* If an interrupt on the adapter did not occur, ignore it. 4246 + * Or in the case of SIS 64, check for a stage change interrupt. 4247 + */ 4899 4248 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) { 4249 + if (ioa_cfg->sis64) { 4250 + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 4251 + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 4252 + if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { 4253 + 4254 + /* clear stage change */ 4255 + writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); 4256 + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 4257 + list_del(&ioa_cfg->reset_cmd->queue); 4258 + del_timer(&ioa_cfg->reset_cmd->timer); 4259 + ipr_reset_ioa_job(ioa_cfg->reset_cmd); 4260 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4261 + return IRQ_HANDLED; 4262 + } 4263 + } 4264 + 4900 4265 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4901 4266 return IRQ_NONE; 4902 4267 } ··· 4957 4286 if (ipr_cmd != NULL) { 4958 4287 /* Clear the PCI interrupt */ 4959 4288 do { 4960 - writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg); 4961 - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 4289 + writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 4290 + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; 4962 4291 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 4963 4292 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 4964 4293 ··· 4980 4309 } 4981 4310 4982 4311 /** 4983 - * ipr_build_ioadl - Build a scatter/gather list and map the buffer 4312 + * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer 4984 4313 * @ioa_cfg: ioa config struct 4985 4314 * @ipr_cmd: ipr command struct 4986 4315 * 4987 4316 * Return value: 4988 4317 * 0 on success / -1 on failure 4989 4318 **/ 4990 - static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, 4991 - struct ipr_cmnd *ipr_cmd) 4319 + static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, 4320 + struct ipr_cmnd *ipr_cmd) 4992 4321 { 4993 4322 int i, nseg; 4994 4323 struct scatterlist *sg; ··· 4996 4325 u32 ioadl_flags = 0; 4997 4326 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 4998 4327 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 4999 - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 4328 + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 5000 4329 5001 4330 length = scsi_bufflen(scsi_cmd); 5002 4331 if (!length) ··· 5013 4342 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 5014 4343 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5015 4344 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5016 - ioarcb->write_data_transfer_length = cpu_to_be32(length); 5017 - ioarcb->write_ioadl_len = 4345 + } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) 4346 + ioadl_flags = IPR_IOADL_FLAGS_READ; 4347 + 4348 + scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { 4349 + ioadl64[i].flags = cpu_to_be32(ioadl_flags); 4350 + ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg)); 4351 + ioadl64[i].address = cpu_to_be64(sg_dma_address(sg)); 4352 + } 4353 + 4354 + ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 4355 + return 0; 4356 + } 4357 + 4358 + /** 4359 + * ipr_build_ioadl - Build a scatter/gather list and map the buffer 4360 + * @ioa_cfg: ioa config struct 4361 + * @ipr_cmd: ipr command struct 4362 + * 4363 + * Return value: 4364 + * 0 on success / -1 on failure 4365 + **/ 4366 + static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, 4367 + struct ipr_cmnd *ipr_cmd) 4368 + { 4369 + int i, nseg; 4370 + struct scatterlist *sg; 4371 + u32 length; 4372 + u32 ioadl_flags = 0; 4373 + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 4374 + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 4375 + struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 4376 + 4377 + length = scsi_bufflen(scsi_cmd); 4378 + if (!length) 4379 + return 0; 4380 + 4381 + nseg = scsi_dma_map(scsi_cmd); 4382 + if (nseg < 0) { 4383 + dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); 4384 + return -1; 4385 + } 4386 + 4387 + ipr_cmd->dma_use_sg = nseg; 4388 + 4389 + if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 4390 + ioadl_flags = IPR_IOADL_FLAGS_WRITE; 4391 + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 4392 + ioarcb->data_transfer_length = cpu_to_be32(length); 4393 + ioarcb->ioadl_len = 5018 4394 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5019 4395 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { 5020 4396 ioadl_flags = IPR_IOADL_FLAGS_READ; ··· 5070 4352 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5071 4353 } 5072 4354 5073 - if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { 5074 - ioadl = ioarcb->add_data.u.ioadl; 5075 - ioarcb->write_ioadl_addr = 5076 - cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) + 5077 - offsetof(struct ipr_ioarcb, add_data)); 4355 + if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { 4356 + ioadl = ioarcb->u.add_data.u.ioadl; 4357 + ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + 4358 + offsetof(struct ipr_ioarcb, u.add_data)); 5078 4359 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 5079 4360 } 5080 4361 ··· 5163 4446 { 5164 4447 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5165 4448 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 5166 - dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr); 4449 + dma_addr_t dma_addr = ipr_cmd->dma_addr; 5167 4450 5168 4451 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 5169 - ioarcb->write_data_transfer_length = 0; 4452 + ioarcb->data_transfer_length = 0; 5170 4453 ioarcb->read_data_transfer_length = 0; 5171 - ioarcb->write_ioadl_len = 0; 4454 + ioarcb->ioadl_len = 0; 5172 4455 ioarcb->read_ioadl_len = 0; 5173 4456 ioasa->ioasc = 0; 5174 4457 ioasa->residual_data_len = 0; 5175 - ioarcb->write_ioadl_addr = 5176 - cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); 5177 - ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 4458 + 4459 + if (ipr_cmd->ioa_cfg->sis64) 4460 + ioarcb->u.sis64_addr_data.data_ioadl_addr = 4461 + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 4462 + else { 4463 + ioarcb->write_ioadl_addr = 4464 + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 4465 + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 4466 + } 5178 4467 } 5179 4468 5180 4469 /** ··· 5212 4489 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5213 4490 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); 5214 4491 5215 - ipr_cmd->ioadl[0].flags_and_data_len = 5216 - cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE); 5217 - ipr_cmd->ioadl[0].address = 5218 - cpu_to_be32(ipr_cmd->sense_buffer_dma); 5219 - 5220 - ipr_cmd->ioarcb.read_ioadl_len = 5221 - cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 5222 - ipr_cmd->ioarcb.read_data_transfer_length = 5223 - cpu_to_be32(SCSI_SENSE_BUFFERSIZE); 4492 + ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, 4493 + SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST); 5224 4494 5225 4495 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, 5226 4496 IPR_REQUEST_SENSE_TIMEOUT * 2); ··· 5609 4893 5610 4894 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); 5611 4895 ipr_cmd->scsi_cmd = scsi_cmd; 5612 - ioarcb->res_handle = res->cfgte.res_handle; 4896 + ioarcb->res_handle = res->res_handle; 5613 4897 ipr_cmd->done = ipr_scsi_done; 5614 - ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); 4898 + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 5615 4899 5616 4900 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { 5617 4901 if (scsi_cmd->underflow == 0) ··· 5632 4916 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) 5633 4917 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 5634 4918 5635 - if (likely(rc == 0)) 5636 - rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 4919 + if (likely(rc == 0)) { 4920 + if (ioa_cfg->sis64) 4921 + rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); 4922 + else 4923 + rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 4924 + } 5637 4925 5638 4926 if (likely(rc == 0)) { 5639 4927 mb(); 5640 - writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), 5641 - ioa_cfg->regs.ioarrin_reg); 4928 + ipr_send_command(ipr_cmd); 5642 4929 } else { 5643 4930 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5644 4931 return SCSI_MLQUEUE_HOST_BUSY; ··· 5754 5035 goto out_unlock; 5755 5036 } 5756 5037 5757 - switch(res->cfgte.proto) { 5758 - case IPR_PROTO_SATA: 5759 - case IPR_PROTO_SAS_STP: 5760 - ap->link.device[0].class = ATA_DEV_ATA; 5761 - break; 5762 - case IPR_PROTO_SATA_ATAPI: 5763 - case IPR_PROTO_SAS_STP_ATAPI: 5764 - ap->link.device[0].class = ATA_DEV_ATAPI; 5765 - break; 5766 - default: 5767 - ap->link.device[0].class = ATA_DEV_UNKNOWN; 5038 + ap->link.device[0].class = res->ata_class; 5039 + if (ap->link.device[0].class == ATA_DEV_UNKNOWN) 5768 5040 ata_port_disable(ap); 5769 - break; 5770 - }; 5771 5041 5772 5042 out_unlock: 5773 5043 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); ··· 5842 5134 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 5843 5135 5844 5136 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) 5845 - scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus, 5846 - res->cfgte.res_addr.target); 5137 + scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); 5847 5138 5848 5139 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 5849 5140 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status); ··· 5850 5143 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status); 5851 5144 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5852 5145 ata_qc_complete(qc); 5146 + } 5147 + 5148 + /** 5149 + * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list 5150 + * @ipr_cmd: ipr command struct 5151 + * @qc: ATA queued command 5152 + * 5153 + **/ 5154 + static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, 5155 + struct ata_queued_cmd *qc) 5156 + { 5157 + u32 ioadl_flags = 0; 5158 + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5159 + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 5160 + struct ipr_ioadl64_desc *last_ioadl64 = NULL; 5161 + int len = qc->nbytes; 5162 + struct scatterlist *sg; 5163 + unsigned int si; 5164 + dma_addr_t dma_addr = ipr_cmd->dma_addr; 5165 + 5166 + if (len == 0) 5167 + return; 5168 + 5169 + if (qc->dma_dir == DMA_TO_DEVICE) { 5170 + ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5171 + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5172 + } else if (qc->dma_dir == DMA_FROM_DEVICE) 5173 + ioadl_flags = IPR_IOADL_FLAGS_READ; 5174 + 5175 + ioarcb->data_transfer_length = cpu_to_be32(len); 5176 + ioarcb->ioadl_len = 5177 + cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 5178 + ioarcb->u.sis64_addr_data.data_ioadl_addr = 5179 + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl)); 5180 + 5181 + for_each_sg(qc->sg, sg, qc->n_elem, si) { 5182 + ioadl64->flags = cpu_to_be32(ioadl_flags); 5183 + ioadl64->data_len = cpu_to_be32(sg_dma_len(sg)); 5184 + ioadl64->address = cpu_to_be64(sg_dma_address(sg)); 5185 + 5186 + last_ioadl64 = ioadl64; 5187 + ioadl64++; 5188 + } 5189 + 5190 + if (likely(last_ioadl64)) 5191 + last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 5853 5192 } 5854 5193 5855 5194 /** ··· 5909 5156 { 5910 5157 u32 ioadl_flags = 0; 5911 5158 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5912 - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 5159 + struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 5913 5160 struct ipr_ioadl_desc *last_ioadl = NULL; 5914 5161 int len = qc->nbytes; 5915 5162 struct scatterlist *sg; ··· 5921 5168 if (qc->dma_dir == DMA_TO_DEVICE) { 5922 5169 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5923 5170 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5924 - ioarcb->write_data_transfer_length = cpu_to_be32(len); 5925 - ioarcb->write_ioadl_len = 5171 + ioarcb->data_transfer_length = cpu_to_be32(len); 5172 + ioarcb->ioadl_len = 5926 5173 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5927 5174 } else if (qc->dma_dir == DMA_FROM_DEVICE) { 5928 5175 ioadl_flags = IPR_IOADL_FLAGS_READ; ··· 5965 5212 5966 5213 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5967 5214 ioarcb = &ipr_cmd->ioarcb; 5968 - regs = &ioarcb->add_data.u.regs; 5969 5215 5970 - memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data)); 5971 - ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs)); 5216 + if (ioa_cfg->sis64) { 5217 + regs = &ipr_cmd->i.ata_ioadl.regs; 5218 + ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); 5219 + } else 5220 + regs = &ioarcb->u.add_data.u.regs; 5221 + 5222 + memset(regs, 0, sizeof(*regs)); 5223 + ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs)); 5972 5224 5973 5225 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 5974 5226 ipr_cmd->qc = qc; 5975 5227 ipr_cmd->done = ipr_sata_done; 5976 - ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; 5228 + ipr_cmd->ioarcb.res_handle = res->res_handle; 5977 5229 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; 5978 5230 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 5979 5231 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5980 5232 ipr_cmd->dma_use_sg = qc->n_elem; 5981 5233 5982 - ipr_build_ata_ioadl(ipr_cmd, qc); 5234 + if (ioa_cfg->sis64) 5235 + ipr_build_ata_ioadl64(ipr_cmd, qc); 5236 + else 5237 + ipr_build_ata_ioadl(ipr_cmd, qc); 5238 + 5983 5239 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 5984 5240 ipr_copy_sata_tf(regs, &qc->tf); 5985 5241 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); 5986 - ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); 5242 + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 5987 5243 5988 5244 switch (qc->tf.protocol) { 5989 5245 case ATA_PROT_NODATA: ··· 6019 5257 } 6020 5258 6021 5259 mb(); 6022 - writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr), 6023 - ioa_cfg->regs.ioarrin_reg); 5260 + 5261 + ipr_send_command(ipr_cmd); 5262 + 6024 5263 return 0; 6025 5264 } 6026 5265 ··· 6222 5459 * ipr_set_supported_devs - Send Set Supported Devices for a device 6223 5460 * @ipr_cmd: ipr command struct 6224 5461 * 6225 - * This function send a Set Supported Devices to the adapter 5462 + * This function sends a Set Supported Devices to the adapter 6226 5463 * 6227 5464 * Return value: 6228 5465 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN ··· 6231 5468 { 6232 5469 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6233 5470 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; 6234 - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 6235 5471 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6236 5472 struct ipr_resource_entry *res = ipr_cmd->u.res; 6237 5473 ··· 6241 5479 continue; 6242 5480 6243 5481 ipr_cmd->u.res = res; 6244 - ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids); 5482 + ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids); 6245 5483 6246 5484 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6247 5485 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6248 5486 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6249 5487 6250 5488 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; 5489 + ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES; 6251 5490 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; 6252 5491 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; 6253 5492 6254 - ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | 6255 - sizeof(struct ipr_supported_device)); 6256 - ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma + 6257 - offsetof(struct ipr_misc_cbs, supp_dev)); 6258 - ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 6259 - ioarcb->write_data_transfer_length = 6260 - cpu_to_be32(sizeof(struct ipr_supported_device)); 5493 + ipr_init_ioadl(ipr_cmd, 5494 + ioa_cfg->vpd_cbs_dma + 5495 + offsetof(struct ipr_misc_cbs, supp_dev), 5496 + sizeof(struct ipr_supported_device), 5497 + IPR_IOADL_FLAGS_WRITE_LAST); 6261 5498 6262 5499 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 6263 5500 IPR_SET_SUP_DEVICE_TIMEOUT); 6264 5501 6265 - ipr_cmd->job_step = ipr_set_supported_devs; 5502 + if (!ioa_cfg->sis64) 5503 + ipr_cmd->job_step = ipr_set_supported_devs; 6266 5504 return IPR_RC_JOB_RETURN; 6267 5505 } 6268 5506 6269 5507 return IPR_RC_JOB_CONTINUE; 6270 - } 6271 - 6272 - /** 6273 - * ipr_setup_write_cache - Disable write cache if needed 6274 - * @ipr_cmd: ipr command struct 6275 - * 6276 - * This function sets up adapters write cache to desired setting 6277 - * 6278 - * Return value: 6279 - * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 6280 - **/ 6281 - static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd) 6282 - { 6283 - struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6284 - 6285 - ipr_cmd->job_step = ipr_set_supported_devs; 6286 - ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, 6287 - struct ipr_resource_entry, queue); 6288 - 6289 - if (ioa_cfg->cache_state != CACHE_DISABLED) 6290 - return IPR_RC_JOB_CONTINUE; 6291 - 6292 - ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6293 - ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6294 - ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 6295 - ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; 6296 - 6297 - ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 6298 - 6299 - return IPR_RC_JOB_RETURN; 6300 5508 } 6301 5509 6302 5510 /** ··· 6427 5695 * none 6428 5696 **/ 6429 5697 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, 6430 - __be32 res_handle, u8 parm, u32 dma_addr, 6431 - u8 xfer_len) 5698 + __be32 res_handle, u8 parm, 5699 + dma_addr_t dma_addr, u8 xfer_len) 6432 5700 { 6433 - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 6434 5701 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6435 5702 6436 5703 ioarcb->res_handle = res_handle; ··· 6439 5708 ioarcb->cmd_pkt.cdb[1] = parm; 6440 5709 ioarcb->cmd_pkt.cdb[4] = xfer_len; 6441 5710 6442 - ioadl->flags_and_data_len = 6443 - cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len); 6444 - ioadl->address = cpu_to_be32(dma_addr); 6445 - ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 6446 - ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len); 5711 + ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST); 6447 5712 } 6448 5713 6449 5714 /** ··· 6469 5742 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 6470 5743 length); 6471 5744 6472 - ipr_cmd->job_step = ipr_setup_write_cache; 5745 + ipr_cmd->job_step = ipr_set_supported_devs; 5746 + ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, 5747 + struct ipr_resource_entry, queue); 6473 5748 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 6474 5749 6475 5750 LEAVE; ··· 6491 5762 **/ 6492 5763 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, 6493 5764 __be32 res_handle, 6494 - u8 parm, u32 dma_addr, u8 xfer_len) 5765 + u8 parm, dma_addr_t dma_addr, u8 xfer_len) 6495 5766 { 6496 - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 6497 5767 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6498 5768 6499 5769 ioarcb->res_handle = res_handle; ··· 6501 5773 ioarcb->cmd_pkt.cdb[4] = xfer_len; 6502 5774 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 6503 5775 6504 - ioadl->flags_and_data_len = 6505 - cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len); 6506 - ioadl->address = cpu_to_be32(dma_addr); 6507 - ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 6508 - ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len); 5776 + ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); 6509 5777 } 6510 5778 6511 5779 /** ··· 6539 5815 **/ 6540 5816 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) 6541 5817 { 5818 + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6542 5819 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 6543 5820 6544 5821 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 6545 - ipr_cmd->job_step = ipr_setup_write_cache; 5822 + ipr_cmd->job_step = ipr_set_supported_devs; 5823 + ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, 5824 + struct ipr_resource_entry, queue); 6546 5825 return IPR_RC_JOB_CONTINUE; 6547 5826 } 6548 5827 ··· 6685 5958 { 6686 5959 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6687 5960 struct ipr_resource_entry *res, *temp; 6688 - struct ipr_config_table_entry *cfgte; 6689 - int found, i; 5961 + struct ipr_config_table_entry_wrapper cfgtew; 5962 + int entries, found, flag, i; 6690 5963 LIST_HEAD(old_res); 6691 5964 6692 5965 ENTER; 6693 - if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ) 5966 + if (ioa_cfg->sis64) 5967 + flag = ioa_cfg->u.cfg_table64->hdr64.flags; 5968 + else 5969 + flag = ioa_cfg->u.cfg_table->hdr.flags; 5970 + 5971 + if (flag & IPR_UCODE_DOWNLOAD_REQ) 6694 5972 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); 6695 5973 6696 5974 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) 6697 5975 list_move_tail(&res->queue, &old_res); 6698 5976 6699 - for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) { 6700 - cfgte = &ioa_cfg->cfg_table->dev[i]; 5977 + if (ioa_cfg->sis64) 5978 + entries = ioa_cfg->u.cfg_table64->hdr64.num_entries; 5979 + else 5980 + entries = ioa_cfg->u.cfg_table->hdr.num_entries; 5981 + 5982 + for (i = 0; i < entries; i++) { 5983 + if (ioa_cfg->sis64) 5984 + cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; 5985 + else 5986 + cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; 6701 5987 found = 0; 6702 5988 6703 5989 list_for_each_entry_safe(res, temp, &old_res, queue) { 6704 - if (!memcmp(&res->cfgte.res_addr, 6705 - &cfgte->res_addr, sizeof(cfgte->res_addr))) { 5990 + if (ipr_is_same_device(res, &cfgtew)) { 6706 5991 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6707 5992 found = 1; 6708 5993 break; ··· 6731 5992 res = list_entry(ioa_cfg->free_res_q.next, 6732 5993 struct ipr_resource_entry, queue); 6733 5994 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6734 - ipr_init_res_entry(res); 5995 + ipr_init_res_entry(res, &cfgtew); 6735 5996 res->add_to_ml = 1; 6736 5997 } 6737 5998 6738 5999 if (found) 6739 - memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); 6000 + ipr_update_res_entry(res, &cfgtew); 6740 6001 } 6741 6002 6742 6003 list_for_each_entry_safe(res, temp, &old_res, queue) { 6743 6004 if (res->sdev) { 6744 6005 res->del_from_ml = 1; 6745 - res->cfgte.res_handle = IPR_INVALID_RES_HANDLE; 6006 + res->res_handle = IPR_INVALID_RES_HANDLE; 6746 6007 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6747 - } else { 6748 - list_move_tail(&res->queue, &ioa_cfg->free_res_q); 6749 6008 } 6009 + } 6010 + 6011 + list_for_each_entry_safe(res, temp, &old_res, queue) { 6012 + ipr_clear_res_target(res); 6013 + list_move_tail(&res->queue, &ioa_cfg->free_res_q); 6750 6014 } 6751 6015 6752 6016 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) ··· 6775 6033 { 6776 6034 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6777 6035 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6778 - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 6779 6036 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 6780 6037 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; 6781 6038 ··· 6788 6047 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6789 6048 6790 6049 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; 6791 - ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff; 6792 - ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff; 6050 + ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; 6051 + ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; 6793 6052 6794 - ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 6795 - ioarcb->read_data_transfer_length = 6796 - cpu_to_be32(sizeof(struct ipr_config_table)); 6797 - 6798 - ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma); 6799 - ioadl->flags_and_data_len = 6800 - cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table)); 6053 + ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, 6054 + IPR_IOADL_FLAGS_READ_LAST); 6801 6055 6802 6056 ipr_cmd->job_step = ipr_init_res_table; 6803 6057 ··· 6812 6076 * none 6813 6077 **/ 6814 6078 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, 6815 - u32 dma_addr, u8 xfer_len) 6079 + dma_addr_t dma_addr, u8 xfer_len) 6816 6080 { 6817 6081 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6818 - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 6819 6082 6820 6083 ENTER; 6821 6084 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; ··· 6825 6090 ioarcb->cmd_pkt.cdb[2] = page; 6826 6091 ioarcb->cmd_pkt.cdb[4] = xfer_len; 6827 6092 6828 - ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 6829 - ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len); 6830 - 6831 - ioadl->address = cpu_to_be32(dma_addr); 6832 - ioadl->flags_and_data_len = 6833 - cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len); 6093 + ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); 6834 6094 6835 6095 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 6836 6096 LEAVE; ··· 6896 6166 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) 6897 6167 { 6898 6168 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6899 - struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; 6900 6169 6901 6170 ENTER; 6902 - 6903 - if (!ipr_inquiry_page_supported(page0, 1)) 6904 - ioa_cfg->cache_state = CACHE_NONE; 6905 6171 6906 6172 ipr_cmd->job_step = ipr_ioafp_cap_inquiry; 6907 6173 ··· 6966 6240 } 6967 6241 6968 6242 /** 6969 - * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ. 6243 + * ipr_ioafp_identify_hrrq - Send Identify Host RRQ. 6970 6244 * @ipr_cmd: ipr command struct 6971 6245 * 6972 6246 * This function send an Identify Host Request Response Queue ··· 6975 6249 * Return value: 6976 6250 * IPR_RC_JOB_RETURN 6977 6251 **/ 6978 - static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd) 6252 + static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd) 6979 6253 { 6980 6254 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6981 6255 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; ··· 6987 6261 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6988 6262 6989 6263 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6264 + if (ioa_cfg->sis64) 6265 + ioarcb->cmd_pkt.cdb[1] = 0x1; 6990 6266 ioarcb->cmd_pkt.cdb[2] = 6991 - ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff; 6267 + ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff; 6992 6268 ioarcb->cmd_pkt.cdb[3] = 6993 - ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff; 6269 + ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff; 6994 6270 ioarcb->cmd_pkt.cdb[4] = 6995 - ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff; 6271 + ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff; 6996 6272 ioarcb->cmd_pkt.cdb[5] = 6997 - ((u32) ioa_cfg->host_rrq_dma) & 0xff; 6273 + ((u64) ioa_cfg->host_rrq_dma) & 0xff; 6998 6274 ioarcb->cmd_pkt.cdb[7] = 6999 6275 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff; 7000 6276 ioarcb->cmd_pkt.cdb[8] = 7001 6277 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff; 6278 + 6279 + if (ioa_cfg->sis64) { 6280 + ioarcb->cmd_pkt.cdb[10] = 6281 + ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff; 6282 + ioarcb->cmd_pkt.cdb[11] = 6283 + ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff; 6284 + ioarcb->cmd_pkt.cdb[12] = 6285 + ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff; 6286 + ioarcb->cmd_pkt.cdb[13] = 6287 + ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff; 6288 + } 7002 6289 7003 6290 ipr_cmd->job_step = ipr_ioafp_std_inquiry; 7004 6291 ··· 7093 6354 ioa_cfg->toggle_bit = 1; 7094 6355 7095 6356 /* Zero out config table */ 7096 - memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table)); 6357 + memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); 6358 + } 6359 + 6360 + /** 6361 + * ipr_reset_next_stage - Process IPL stage change based on feedback register. 6362 + * @ipr_cmd: ipr command struct 6363 + * 6364 + * Return value: 6365 + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 6366 + **/ 6367 + static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) 6368 + { 6369 + unsigned long stage, stage_time; 6370 + u32 feedback; 6371 + volatile u32 int_reg; 6372 + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6373 + u64 maskval = 0; 6374 + 6375 + feedback = readl(ioa_cfg->regs.init_feedback_reg); 6376 + stage = feedback & IPR_IPL_INIT_STAGE_MASK; 6377 + stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK; 6378 + 6379 + ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); 6380 + 6381 + /* sanity check the stage_time value */ 6382 + if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) 6383 + stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; 6384 + else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) 6385 + stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; 6386 + 6387 + if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) { 6388 + writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); 6389 + int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 6390 + stage_time = ioa_cfg->transop_timeout; 6391 + ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 6392 + } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) { 6393 + ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 6394 + maskval = IPR_PCII_IPL_STAGE_CHANGE; 6395 + maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER; 6396 + writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); 6397 + int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 6398 + return IPR_RC_JOB_CONTINUE; 6399 + } 6400 + 6401 + ipr_cmd->timer.data = (unsigned long) ipr_cmd; 6402 + ipr_cmd->timer.expires = jiffies + stage_time * HZ; 6403 + ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 6404 + ipr_cmd->done = ipr_reset_ioa_job; 6405 + add_timer(&ipr_cmd->timer); 6406 + list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 6407 + 6408 + return IPR_RC_JOB_RETURN; 7097 6409 } 7098 6410 7099 6411 /** ··· 7163 6373 volatile u32 int_reg; 7164 6374 7165 6375 ENTER; 7166 - ipr_cmd->job_step = ipr_ioafp_indentify_hrrq; 6376 + ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7167 6377 ipr_init_ioa_mem(ioa_cfg); 7168 6378 7169 6379 ioa_cfg->allow_interrupts = 1; ··· 7171 6381 7172 6382 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 7173 6383 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), 7174 - ioa_cfg->regs.clr_interrupt_mask_reg); 6384 + ioa_cfg->regs.clr_interrupt_mask_reg32); 7175 6385 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7176 6386 return IPR_RC_JOB_CONTINUE; 7177 6387 } 7178 6388 7179 6389 /* Enable destructive diagnostics on IOA */ 7180 - writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg); 6390 + writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); 7181 6391 7182 - writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg); 6392 + writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); 6393 + if (ioa_cfg->sis64) 6394 + writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg); 6395 + 7183 6396 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7184 6397 7185 6398 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); 6399 + 6400 + if (ioa_cfg->sis64) { 6401 + ipr_cmd->job_step = ipr_reset_next_stage; 6402 + return IPR_RC_JOB_CONTINUE; 6403 + } 7186 6404 7187 6405 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 7188 6406 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); ··· 7261 6463 7262 6464 mailbox = readl(ioa_cfg->ioa_mailbox); 7263 6465 7264 - if (!ipr_sdt_is_fmt2(mailbox)) { 6466 + if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { 7265 6467 ipr_unit_check_no_data(ioa_cfg); 7266 6468 return; 7267 6469 } ··· 7270 6472 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, 7271 6473 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32)); 7272 6474 7273 - if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) || 7274 - !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) { 6475 + if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) || 6476 + ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && 6477 + (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { 7275 6478 ipr_unit_check_no_data(ioa_cfg); 7276 6479 return; 7277 6480 } 7278 6481 7279 6482 /* Find length of the first sdt entry (UC buffer) */ 7280 - length = (be32_to_cpu(sdt.entry[0].end_offset) - 7281 - be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK; 6483 + if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE) 6484 + length = be32_to_cpu(sdt.entry[0].end_token); 6485 + else 6486 + length = (be32_to_cpu(sdt.entry[0].end_token) - 6487 + be32_to_cpu(sdt.entry[0].start_token)) & 6488 + IPR_FMT2_MBX_ADDR_MASK; 7282 6489 7283 6490 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, 7284 6491 struct ipr_hostrcb, queue); ··· 7291 6488 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); 7292 6489 7293 6490 rc = ipr_get_ldump_data_section(ioa_cfg, 7294 - be32_to_cpu(sdt.entry[0].bar_str_offset), 6491 + be32_to_cpu(sdt.entry[0].start_token), 7295 6492 (__be32 *)&hostrcb->hcam, 7296 6493 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); 7297 6494 7298 6495 if (!rc) { 7299 6496 ipr_handle_log_data(ioa_cfg, hostrcb); 7300 - ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); 6497 + ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 7301 6498 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED && 7302 6499 ioa_cfg->sdt_state == GET_DUMP) 7303 6500 ioa_cfg->sdt_state = WAIT_FOR_DUMP; ··· 7525 6722 7526 6723 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) { 7527 6724 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 7528 - writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg); 6725 + writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); 7529 6726 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; 7530 6727 } else { 7531 6728 ipr_cmd->job_step = ioa_cfg->reset; ··· 7588 6785 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; 7589 6786 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; 7590 6787 7591 - ipr_build_ucode_ioadl(ipr_cmd, sglist); 6788 + if (ioa_cfg->sis64) 6789 + ipr_build_ucode_ioadl64(ipr_cmd, sglist); 6790 + else 6791 + ipr_build_ucode_ioadl(ipr_cmd, sglist); 7592 6792 ipr_cmd->job_step = ipr_reset_ucode_download_done; 7593 6793 7594 6794 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, ··· 7960 7154 ipr_free_cmd_blks(ioa_cfg); 7961 7155 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 7962 7156 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 7963 - pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table), 7964 - ioa_cfg->cfg_table, 7157 + pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size, 7158 + ioa_cfg->u.cfg_table, 7965 7159 ioa_cfg->cfg_table_dma); 7966 7160 7967 7161 for (i = 0; i < IPR_NUM_HCAMS; i++) { ··· 8015 7209 int i; 8016 7210 8017 7211 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, 8018 - sizeof(struct ipr_cmnd), 8, 0); 7212 + sizeof(struct ipr_cmnd), 16, 0); 8019 7213 8020 7214 if (!ioa_cfg->ipr_cmd_pool) 8021 7215 return -ENOMEM; ··· 8033 7227 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; 8034 7228 8035 7229 ioarcb = &ipr_cmd->ioarcb; 8036 - ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr); 7230 + ipr_cmd->dma_addr = dma_addr; 7231 + if (ioa_cfg->sis64) 7232 + ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); 7233 + else 7234 + ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); 7235 + 8037 7236 ioarcb->host_response_handle = cpu_to_be32(i << 2); 8038 - ioarcb->write_ioadl_addr = 8039 - cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); 8040 - ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 8041 - ioarcb->ioasa_host_pci_addr = 8042 - cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); 7237 + if (ioa_cfg->sis64) { 7238 + ioarcb->u.sis64_addr_data.data_ioadl_addr = 7239 + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 7240 + ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = 7241 + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa)); 7242 + } else { 7243 + ioarcb->write_ioadl_addr = 7244 + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 7245 + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 7246 + ioarcb->ioasa_host_pci_addr = 7247 + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); 7248 + } 8043 7249 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); 8044 7250 ipr_cmd->cmd_index = i; 8045 7251 ipr_cmd->ioa_cfg = ioa_cfg; ··· 8078 7260 8079 7261 ENTER; 8080 7262 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) * 8081 - IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL); 7263 + ioa_cfg->max_devs_supported, GFP_KERNEL); 8082 7264 8083 7265 if (!ioa_cfg->res_entries) 8084 7266 goto out; 8085 7267 8086 - for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++) 7268 + if (ioa_cfg->sis64) { 7269 + ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) * 7270 + BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); 7271 + ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) * 7272 + BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); 7273 + ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) * 7274 + BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); 7275 + } 7276 + 7277 + for (i = 0; i < ioa_cfg->max_devs_supported; i++) { 8087 7278 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); 7279 + ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; 7280 + } 8088 7281 8089 7282 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev, 8090 7283 sizeof(struct ipr_misc_cbs), ··· 8114 7285 if (!ioa_cfg->host_rrq) 8115 7286 goto out_ipr_free_cmd_blocks; 8116 7287 8117 - ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev, 8118 - sizeof(struct ipr_config_table), 8119 - &ioa_cfg->cfg_table_dma); 7288 + ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev, 7289 + ioa_cfg->cfg_table_size, 7290 + &ioa_cfg->cfg_table_dma); 8120 7291 8121 - if (!ioa_cfg->cfg_table) 7292 + if (!ioa_cfg->u.cfg_table) 8122 7293 goto out_free_host_rrq; 8123 7294 8124 7295 for (i = 0; i < IPR_NUM_HCAMS; i++) { ··· 8152 7323 ioa_cfg->hostrcb[i], 8153 7324 ioa_cfg->hostrcb_dma[i]); 8154 7325 } 8155 - pci_free_consistent(pdev, sizeof(struct ipr_config_table), 8156 - ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma); 7326 + pci_free_consistent(pdev, ioa_cfg->cfg_table_size, 7327 + ioa_cfg->u.cfg_table, 7328 + ioa_cfg->cfg_table_dma); 8157 7329 out_free_host_rrq: 8158 7330 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 8159 7331 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); ··· 8229 7399 init_waitqueue_head(&ioa_cfg->reset_wait_q); 8230 7400 init_waitqueue_head(&ioa_cfg->msi_wait_q); 8231 7401 ioa_cfg->sdt_state = INACTIVE; 8232 - if (ipr_enable_cache) 8233 - ioa_cfg->cache_state = CACHE_ENABLED; 8234 - else 8235 - ioa_cfg->cache_state = CACHE_DISABLED; 8236 7402 8237 7403 ipr_initialize_bus_attr(ioa_cfg); 7404 + ioa_cfg->max_devs_supported = ipr_max_devs; 8238 7405 8239 - host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; 8240 - host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; 7406 + if (ioa_cfg->sis64) { 7407 + host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; 7408 + host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; 7409 + if (ipr_max_devs > IPR_MAX_SIS64_DEVS) 7410 + ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; 7411 + } else { 7412 + host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; 7413 + host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; 7414 + if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) 7415 + ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; 7416 + } 8241 7417 host->max_channel = IPR_MAX_BUS_TO_SCAN; 8242 7418 host->unique_id = host->host_no; 8243 7419 host->max_cmd_len = IPR_MAX_CDB_LEN; ··· 8255 7419 8256 7420 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; 8257 7421 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; 7422 + t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; 8258 7423 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; 7424 + t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; 8259 7425 t->clr_interrupt_reg = base + p->clr_interrupt_reg; 7426 + t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; 8260 7427 t->sense_interrupt_reg = base + p->sense_interrupt_reg; 7428 + t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; 8261 7429 t->ioarrin_reg = base + p->ioarrin_reg; 8262 7430 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; 7431 + t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; 8263 7432 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; 7433 + t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; 8264 7434 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; 7435 + t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; 7436 + 7437 + if (ioa_cfg->sis64) { 7438 + t->init_feedback_reg = base + p->init_feedback_reg; 7439 + t->dump_addr_reg = base + p->dump_addr_reg; 7440 + t->dump_data_reg = base + p->dump_data_reg; 7441 + } 8265 7442 } 8266 7443 8267 7444 /** ··· 8346 7497 init_waitqueue_head(&ioa_cfg->msi_wait_q); 8347 7498 ioa_cfg->msi_received = 0; 8348 7499 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 8349 - writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg); 7500 + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); 8350 7501 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8351 7502 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 8352 7503 ··· 8357 7508 } else if (ipr_debug) 8358 7509 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq); 8359 7510 8360 - writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg); 7511 + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); 8361 7512 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 8362 7513 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); 8363 7514 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); ··· 8427 7578 goto out_scsi_host_put; 8428 7579 } 8429 7580 7581 + /* set SIS 32 or SIS 64 */ 7582 + ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; 8430 7583 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; 8431 7584 8432 7585 if (ipr_transop_timeout) ··· 8466 7615 8467 7616 pci_set_master(pdev); 8468 7617 8469 - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 7618 + if (ioa_cfg->sis64) { 7619 + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 7620 + if (rc < 0) { 7621 + dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n"); 7622 + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 7623 + } 7624 + 7625 + } else 7626 + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 7627 + 8470 7628 if (rc < 0) { 8471 7629 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); 8472 7630 goto cleanup_nomem; ··· 8517 7657 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 8518 7658 goto cleanup_nomem; 8519 7659 7660 + if (ioa_cfg->sis64) 7661 + ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) 7662 + + ((sizeof(struct ipr_config_table_entry64) 7663 + * ioa_cfg->max_devs_supported))); 7664 + else 7665 + ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) 7666 + + ((sizeof(struct ipr_config_table_entry) 7667 + * ioa_cfg->max_devs_supported))); 7668 + 8520 7669 rc = ipr_alloc_mem(ioa_cfg); 8521 7670 if (rc < 0) { 8522 7671 dev_err(&pdev->dev, ··· 8537 7668 * If HRRQ updated interrupt is not masked, or reset alert is set, 8538 7669 * the card is in an unknown state and needs a hard reset 8539 7670 */ 8540 - mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8541 - interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); 8542 - uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg); 7671 + mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); 7672 + interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); 7673 + uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); 8543 7674 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) 8544 7675 ioa_cfg->needs_hard_reset = 1; 8545 7676 if (interrupts & IPR_PCII_ERROR_INTERRUPTS) ··· 8827 7958 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 8828 7959 IPR_USE_LONG_TRANSOP_TIMEOUT }, 8829 7960 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 8830 - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 8831 - IPR_USE_LONG_TRANSOP_TIMEOUT }, 8832 - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 8833 7961 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, 8834 7962 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 8835 7963 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, ··· 8841 7975 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 8842 7976 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 8843 7977 IPR_USE_LONG_TRANSOP_TIMEOUT }, 8844 - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E, 8845 - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 8846 - IPR_USE_LONG_TRANSOP_TIMEOUT }, 7978 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 7979 + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 }, 7980 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 7981 + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 }, 7982 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 7983 + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, 7984 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, 7985 + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, 7986 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, 7987 + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 }, 7988 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, 7989 + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 }, 7990 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, 7991 + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 }, 7992 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, 7993 + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 }, 8847 7994 { } 8848 7995 }; 8849 7996 MODULE_DEVICE_TABLE(pci, ipr_pci_table); ··· 8876 7997 }; 8877 7998 8878 7999 /** 8000 + * ipr_halt_done - Shutdown prepare completion 8001 + * 8002 + * Return value: 8003 + * none 8004 + **/ 8005 + static void ipr_halt_done(struct ipr_cmnd *ipr_cmd) 8006 + { 8007 + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8008 + 8009 + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 8010 + } 8011 + 8012 + /** 8013 + * ipr_halt - Issue shutdown prepare to all adapters 8014 + * 8015 + * Return value: 8016 + * NOTIFY_OK on success / NOTIFY_DONE on failure 8017 + **/ 8018 + static int ipr_halt(struct notifier_block *nb, ulong event, void *buf) 8019 + { 8020 + struct ipr_cmnd *ipr_cmd; 8021 + struct ipr_ioa_cfg *ioa_cfg; 8022 + unsigned long flags = 0; 8023 + 8024 + if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) 8025 + return NOTIFY_DONE; 8026 + 8027 + spin_lock(&ipr_driver_lock); 8028 + 8029 + list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { 8030 + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8031 + if (!ioa_cfg->allow_cmds) { 8032 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8033 + continue; 8034 + } 8035 + 8036 + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 8037 + ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 8038 + ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 8039 + ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 8040 + ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; 8041 + 8042 + ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 8043 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8044 + } 8045 + spin_unlock(&ipr_driver_lock); 8046 + 8047 + return NOTIFY_OK; 8048 + } 8049 + 8050 + static struct notifier_block ipr_notifier = { 8051 + ipr_halt, NULL, 0 8052 + }; 8053 + 8054 + /** 8879 8055 * ipr_init - Module entry point 8880 8056 * 8881 8057 * Return value: ··· 8941 8007 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", 8942 8008 IPR_DRIVER_VERSION, IPR_DRIVER_DATE); 8943 8009 8010 + register_reboot_notifier(&ipr_notifier); 8944 8011 return pci_register_driver(&ipr_driver); 8945 8012 } 8946 8013 ··· 8955 8020 **/ 8956 8021 static void __exit ipr_exit(void) 8957 8022 { 8023 + unregister_reboot_notifier(&ipr_notifier); 8958 8024 pci_unregister_driver(&ipr_driver); 8959 8025 } 8960 8026
+363 -104
drivers/scsi/ipr.h
··· 37 37 /* 38 38 * Literals 39 39 */ 40 - #define IPR_DRIVER_VERSION "2.4.3" 41 - #define IPR_DRIVER_DATE "(June 10, 2009)" 40 + #define IPR_DRIVER_VERSION "2.5.0" 41 + #define IPR_DRIVER_DATE "(February 11, 2010)" 42 42 43 43 /* 44 44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding ··· 55 55 #define IPR_NUM_BASE_CMD_BLKS 100 56 56 57 57 #define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339 58 - #define PCI_DEVICE_ID_IBM_SCAMP_E 0x034A 58 + 59 + #define PCI_DEVICE_ID_IBM_CROC_FPGA_E2 0x033D 60 + #define PCI_DEVICE_ID_IBM_CROC_ASIC_E2 0x034A 59 61 60 62 #define IPR_SUBS_DEV_ID_2780 0x0264 61 63 #define IPR_SUBS_DEV_ID_5702 0x0266 ··· 72 70 #define IPR_SUBS_DEV_ID_572A 0x02C1 73 71 #define IPR_SUBS_DEV_ID_572B 0x02C2 74 72 #define IPR_SUBS_DEV_ID_572F 0x02C3 75 - #define IPR_SUBS_DEV_ID_574D 0x030B 76 73 #define IPR_SUBS_DEV_ID_574E 0x030A 77 74 #define IPR_SUBS_DEV_ID_575B 0x030D 78 75 #define IPR_SUBS_DEV_ID_575C 0x0338 79 - #define IPR_SUBS_DEV_ID_575D 0x033E 80 76 #define IPR_SUBS_DEV_ID_57B3 0x033A 81 77 #define IPR_SUBS_DEV_ID_57B7 0x0360 82 78 #define IPR_SUBS_DEV_ID_57B8 0x02C2 79 + 80 + #define IPR_SUBS_DEV_ID_57B4 0x033B 81 + #define IPR_SUBS_DEV_ID_57B2 0x035F 82 + #define IPR_SUBS_DEV_ID_57C6 0x0357 83 + 84 + #define IPR_SUBS_DEV_ID_57B5 0x033C 85 + #define IPR_SUBS_DEV_ID_57CE 0x035E 86 + #define IPR_SUBS_DEV_ID_57B1 0x0355 87 + 88 + #define IPR_SUBS_DEV_ID_574D 0x0356 89 + #define IPR_SUBS_DEV_ID_575D 0x035D 83 90 84 91 #define IPR_NAME "ipr" 85 92 ··· 129 118 #define IPR_NUM_LOG_HCAMS 2 130 119 #define IPR_NUM_CFG_CHG_HCAMS 2 131 120 #define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS) 121 + 122 + #define IPR_MAX_SIS64_TARGETS_PER_BUS 1024 123 + #define IPR_MAX_SIS64_LUNS_PER_TARGET 0xffffffff 124 + 132 125 #define IPR_MAX_NUM_TARGETS_PER_BUS 256 133 126 #define IPR_MAX_NUM_LUNS_PER_TARGET 256 134 127 #define IPR_MAX_NUM_VSET_LUNS_PER_TARGET 8 ··· 147 132 148 133 /* We need resources for HCAMS, IOA reset, IOA bringdown, and ERP */ 149 134 #define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \ 150 - ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 3) 135 + ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4) 151 136 152 137 #define IPR_MAX_COMMANDS IPR_NUM_BASE_CMD_BLKS 153 138 #define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \ 154 139 IPR_NUM_INTERNAL_CMD_BLKS) 155 140 156 141 #define IPR_MAX_PHYSICAL_DEVS 192 142 + #define IPR_DEFAULT_SIS64_DEVS 1024 143 + #define IPR_MAX_SIS64_DEVS 4096 157 144 158 145 #define IPR_MAX_SGLIST 64 159 146 #define IPR_IOA_MAX_SECTORS 32767 ··· 190 173 #define IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE 0x01 191 174 #define IPR_HCAM_CDB_OP_CODE_LOG_DATA 0x02 192 175 #define IPR_SET_SUPPORTED_DEVICES 0xFB 176 + #define IPR_SET_ALL_SUPPORTED_DEVICES 0x80 193 177 #define IPR_IOA_SHUTDOWN 0xF7 194 178 #define IPR_WR_BUF_DOWNLOAD_AND_SAVE 0x05 195 179 ··· 239 221 #define IPR_SDT_FMT2_BAR5_SEL 0x5 240 222 #define IPR_SDT_FMT2_EXP_ROM_SEL 0x8 241 223 #define IPR_FMT2_SDT_READY_TO_USE 0xC4D4E3F2 224 + #define IPR_FMT3_SDT_READY_TO_USE 0xC4D4E3F3 242 225 #define IPR_DOORBELL 0x82800000 243 226 #define IPR_RUNTIME_RESET 0x40000000 227 + 228 + #define IPR_IPL_INIT_MIN_STAGE_TIME 5 229 + #define IPR_IPL_INIT_STAGE_UNKNOWN 0x0 230 + #define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000 231 + #define IPR_IPL_INIT_STAGE_MASK 0xff000000 232 + #define IPR_IPL_INIT_STAGE_TIME_MASK 0x0000ffff 233 + #define IPR_PCII_IPL_STAGE_CHANGE (0x80000000 >> 0) 244 234 245 235 #define IPR_PCII_IOA_TRANS_TO_OPER (0x80000000 >> 0) 246 236 #define IPR_PCII_IOARCB_XFER_FAILED (0x80000000 >> 3) ··· 344 318 u8 serial_num[IPR_SERIAL_NUM_LEN]; 345 319 }__attribute__ ((packed)); 346 320 321 + #define IPR_RES_TYPE_AF_DASD 0x00 322 + #define IPR_RES_TYPE_GENERIC_SCSI 0x01 323 + #define IPR_RES_TYPE_VOLUME_SET 0x02 324 + #define IPR_RES_TYPE_REMOTE_AF_DASD 0x03 325 + #define IPR_RES_TYPE_GENERIC_ATA 0x04 326 + #define IPR_RES_TYPE_ARRAY 0x05 327 + #define IPR_RES_TYPE_IOAFP 0xff 328 + 347 329 struct ipr_config_table_entry { 348 330 u8 proto; 349 331 #define IPR_PROTO_SATA 0x02 350 332 #define IPR_PROTO_SATA_ATAPI 0x03 351 333 #define IPR_PROTO_SAS_STP 0x06 352 - #define IPR_PROTO_SAS_STP_ATAPI 0x07 334 + #define IPR_PROTO_SAS_STP_ATAPI 0x07 353 335 u8 array_id; 354 336 u8 flags; 355 - #define IPR_IS_IOA_RESOURCE 0x80 356 - #define IPR_IS_ARRAY_MEMBER 0x20 357 - #define IPR_IS_HOT_SPARE 0x10 358 - 337 + #define IPR_IS_IOA_RESOURCE 0x80 359 338 u8 rsvd_subtype; 360 - #define IPR_RES_SUBTYPE(res) (((res)->cfgte.rsvd_subtype) & 0x0f) 361 - #define IPR_SUBTYPE_AF_DASD 0 362 - #define IPR_SUBTYPE_GENERIC_SCSI 1 363 - #define IPR_SUBTYPE_VOLUME_SET 2 364 - #define IPR_SUBTYPE_GENERIC_ATA 4 365 339 366 - #define IPR_QUEUEING_MODEL(res) ((((res)->cfgte.flags) & 0x70) >> 4) 367 - #define IPR_QUEUE_FROZEN_MODEL 0 340 + #define IPR_QUEUEING_MODEL(res) ((((res)->flags) & 0x70) >> 4) 341 + #define IPR_QUEUE_FROZEN_MODEL 0 368 342 #define IPR_QUEUE_NACA_MODEL 1 369 343 370 344 struct ipr_res_addr res_addr; ··· 373 347 struct ipr_std_inq_data std_inq_data; 374 348 }__attribute__ ((packed, aligned (4))); 375 349 350 + struct ipr_config_table_entry64 { 351 + u8 res_type; 352 + u8 proto; 353 + u8 vset_num; 354 + u8 array_id; 355 + __be16 flags; 356 + __be16 res_flags; 357 + #define IPR_QUEUEING_MODEL64(res) ((((res)->res_flags) & 0x7000) >> 12) 358 + __be32 res_handle; 359 + u8 dev_id_type; 360 + u8 reserved[3]; 361 + __be64 dev_id; 362 + __be64 lun; 363 + __be64 lun_wwn[2]; 364 + #define IPR_MAX_RES_PATH_LENGTH 24 365 + __be64 res_path; 366 + struct ipr_std_inq_data std_inq_data; 367 + u8 reserved2[4]; 368 + __be64 reserved3[2]; // description text 369 + u8 reserved4[8]; 370 + }__attribute__ ((packed, aligned (8))); 371 + 376 372 struct ipr_config_table_hdr { 377 373 u8 num_entries; 378 374 u8 flags; ··· 402 354 __be16 reserved; 403 355 }__attribute__((packed, aligned (4))); 404 356 405 - struct ipr_config_table { 406 - struct ipr_config_table_hdr hdr; 407 - struct ipr_config_table_entry dev[IPR_MAX_PHYSICAL_DEVS]; 357 + struct ipr_config_table_hdr64 { 358 + __be16 num_entries; 359 + __be16 reserved; 360 + u8 flags; 361 + u8 reserved2[11]; 408 362 }__attribute__((packed, aligned (4))); 409 363 364 + struct ipr_config_table { 365 + struct ipr_config_table_hdr hdr; 366 + struct ipr_config_table_entry dev[0]; 367 + }__attribute__((packed, aligned (4))); 368 + 369 + struct ipr_config_table64 { 370 + struct ipr_config_table_hdr64 hdr64; 371 + struct ipr_config_table_entry64 dev[0]; 372 + }__attribute__((packed, aligned (8))); 373 + 374 + struct ipr_config_table_entry_wrapper { 375 + union { 376 + struct ipr_config_table_entry *cfgte; 377 + struct ipr_config_table_entry64 *cfgte64; 378 + } u; 379 + }; 380 + 410 381 struct ipr_hostrcb_cfg_ch_not { 411 - struct ipr_config_table_entry cfgte; 382 + union { 383 + struct ipr_config_table_entry cfgte; 384 + struct ipr_config_table_entry64 cfgte64; 385 + } u; 412 386 u8 reserved[936]; 413 387 }__attribute__((packed, aligned (4))); 414 388 ··· 451 381 #define IPR_RQTYPE_HCAM 0x02 452 382 #define IPR_RQTYPE_ATA_PASSTHRU 0x04 453 383 454 - u8 luntar_luntrn; 384 + u8 reserved2; 455 385 456 386 u8 flags_hi; 457 387 #define IPR_FLAGS_HI_WRITE_NOT_READ 0x80 ··· 473 403 __be16 timeout; 474 404 }__attribute__ ((packed, aligned(4))); 475 405 476 - struct ipr_ioarcb_ata_regs { 406 + struct ipr_ioarcb_ata_regs { /* 22 bytes */ 477 407 u8 flags; 478 408 #define IPR_ATA_FLAG_PACKET_CMD 0x80 479 409 #define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40 ··· 512 442 __be32 address; 513 443 }__attribute__((packed, aligned (8))); 514 444 445 + struct ipr_ioadl64_desc { 446 + __be32 flags; 447 + __be32 data_len; 448 + __be64 address; 449 + }__attribute__((packed, aligned (16))); 450 + 451 + struct ipr_ata64_ioadl { 452 + struct ipr_ioarcb_ata_regs regs; 453 + u16 reserved[5]; 454 + struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; 455 + }__attribute__((packed, aligned (16))); 456 + 515 457 struct ipr_ioarcb_add_data { 516 458 union { 517 459 struct ipr_ioarcb_ata_regs regs; 518 460 struct ipr_ioadl_desc ioadl[5]; 519 461 __be32 add_cmd_parms[10]; 520 - }u; 521 - }__attribute__ ((packed, aligned(4))); 462 + } u; 463 + }__attribute__ ((packed, aligned (4))); 464 + 465 + struct ipr_ioarcb_sis64_add_addr_ecb { 466 + __be64 ioasa_host_pci_addr; 467 + __be64 data_ioadl_addr; 468 + __be64 reserved; 469 + __be32 ext_control_buf[4]; 470 + }__attribute__((packed, aligned (8))); 522 471 523 472 /* IOA Request Control Block 128 bytes */ 524 473 struct ipr_ioarcb { 525 - __be32 ioarcb_host_pci_addr; 526 - __be32 reserved; 474 + union { 475 + __be32 ioarcb_host_pci_addr; 476 + __be64 ioarcb_host_pci_addr64; 477 + } a; 527 478 __be32 res_handle; 528 479 __be32 host_response_handle; 529 480 __be32 reserved1; 530 481 __be32 reserved2; 531 482 __be32 reserved3; 532 483 533 - __be32 write_data_transfer_length; 484 + __be32 data_transfer_length; 534 485 __be32 read_data_transfer_length; 535 486 __be32 write_ioadl_addr; 536 - __be32 write_ioadl_len; 487 + __be32 ioadl_len; 537 488 __be32 read_ioadl_addr; 538 489 __be32 read_ioadl_len; 539 490 ··· 564 473 565 474 struct ipr_cmd_pkt cmd_pkt; 566 475 567 - __be32 add_cmd_parms_len; 568 - struct ipr_ioarcb_add_data add_data; 476 + __be16 add_cmd_parms_offset; 477 + __be16 add_cmd_parms_len; 478 + 479 + union { 480 + struct ipr_ioarcb_add_data add_data; 481 + struct ipr_ioarcb_sis64_add_addr_ecb sis64_addr_data; 482 + } u; 483 + 569 484 }__attribute__((packed, aligned (4))); 570 485 571 486 struct ipr_ioasa_vset { ··· 773 676 struct ipr_ext_vpd cfc_last_with_dev_vpd; 774 677 }__attribute__((packed, aligned (4))); 775 678 679 + struct ipr_hostrcb64_device_data_entry_enhanced { 680 + struct ipr_ext_vpd vpd; 681 + u8 ccin[4]; 682 + u8 res_path[8]; 683 + struct ipr_ext_vpd new_vpd; 684 + u8 new_ccin[4]; 685 + struct ipr_ext_vpd ioa_last_with_dev_vpd; 686 + struct ipr_ext_vpd cfc_last_with_dev_vpd; 687 + }__attribute__((packed, aligned (4))); 688 + 776 689 struct ipr_hostrcb_array_data_entry { 777 690 struct ipr_vpd vpd; 778 691 struct ipr_res_addr expected_dev_res_addr; 779 692 struct ipr_res_addr dev_res_addr; 693 + }__attribute__((packed, aligned (4))); 694 + 695 + struct ipr_hostrcb64_array_data_entry { 696 + struct ipr_ext_vpd vpd; 697 + u8 ccin[4]; 698 + u8 expected_res_path[8]; 699 + u8 res_path[8]; 780 700 }__attribute__((packed, aligned (4))); 781 701 782 702 struct ipr_hostrcb_array_data_entry_enhanced { ··· 847 733 struct ipr_hostrcb_device_data_entry_enhanced dev[3]; 848 734 }__attribute__((packed, aligned (4))); 849 735 736 + struct ipr_hostrcb_type_23_error { 737 + struct ipr_ext_vpd ioa_vpd; 738 + struct ipr_ext_vpd cfc_vpd; 739 + __be32 errors_detected; 740 + __be32 errors_logged; 741 + struct ipr_hostrcb64_device_data_entry_enhanced dev[3]; 742 + }__attribute__((packed, aligned (4))); 743 + 850 744 struct ipr_hostrcb_type_04_error { 851 745 struct ipr_vpd ioa_vpd; 852 746 struct ipr_vpd cfc_vpd; ··· 880 758 u8 protection_level[8]; 881 759 __be32 num_entries; 882 760 struct ipr_hostrcb_array_data_entry_enhanced array_member[18]; 761 + }__attribute__((packed, aligned (4))); 762 + 763 + struct ipr_hostrcb_type_24_error { 764 + struct ipr_ext_vpd ioa_vpd; 765 + struct ipr_ext_vpd cfc_vpd; 766 + u8 reserved[2]; 767 + u8 exposed_mode_adn; 768 + #define IPR_INVALID_ARRAY_DEV_NUM 0xff 769 + u8 array_id; 770 + u8 last_res_path[8]; 771 + u8 protection_level[8]; 772 + struct ipr_ext_vpd array_vpd; 773 + u8 description[16]; 774 + u8 reserved2[3]; 775 + u8 num_entries; 776 + struct ipr_hostrcb64_array_data_entry array_member[32]; 883 777 }__attribute__((packed, aligned (4))); 884 778 885 779 struct ipr_hostrcb_type_07_error { ··· 935 797 __be32 wwid[2]; 936 798 }__attribute__((packed, aligned (4))); 937 799 800 + struct ipr_hostrcb64_config_element { 801 + __be16 length; 802 + u8 descriptor_id; 803 + #define IPR_DESCRIPTOR_MASK 0xC0 804 + #define IPR_DESCRIPTOR_SIS64 0x00 805 + 806 + u8 reserved; 807 + u8 type_status; 808 + 809 + u8 reserved2[2]; 810 + u8 link_rate; 811 + 812 + u8 res_path[8]; 813 + __be32 wwid[2]; 814 + }__attribute__((packed, aligned (8))); 815 + 938 816 struct ipr_hostrcb_fabric_desc { 939 817 __be16 length; 940 818 u8 ioa_port; ··· 972 818 struct ipr_hostrcb_config_element elem[1]; 973 819 }__attribute__((packed, aligned (4))); 974 820 821 + struct ipr_hostrcb64_fabric_desc { 822 + __be16 length; 823 + u8 descriptor_id; 824 + 825 + u8 reserved; 826 + u8 path_state; 827 + 828 + u8 reserved2[2]; 829 + u8 res_path[8]; 830 + u8 reserved3[6]; 831 + __be16 num_entries; 832 + struct ipr_hostrcb64_config_element elem[1]; 833 + }__attribute__((packed, aligned (8))); 834 + 975 835 #define for_each_fabric_cfg(fabric, cfg) \ 976 836 for (cfg = (fabric)->elem; \ 977 837 cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \ ··· 998 830 struct ipr_hostrcb_fabric_desc desc[1]; 999 831 }__attribute__((packed, aligned (4))); 1000 832 833 + struct ipr_hostrcb_type_30_error { 834 + u8 failure_reason[64]; 835 + u8 reserved[3]; 836 + u8 num_entries; 837 + struct ipr_hostrcb64_fabric_desc desc[1]; 838 + }__attribute__((packed, aligned (4))); 839 + 1001 840 struct ipr_hostrcb_error { 1002 - __be32 failing_dev_ioasc; 1003 - struct ipr_res_addr failing_dev_res_addr; 1004 - __be32 failing_dev_res_handle; 841 + __be32 fd_ioasc; 842 + struct ipr_res_addr fd_res_addr; 843 + __be32 fd_res_handle; 1005 844 __be32 prc; 1006 845 union { 1007 846 struct ipr_hostrcb_type_ff_error type_ff_error; ··· 1024 849 struct ipr_hostrcb_type_20_error type_20_error; 1025 850 } u; 1026 851 }__attribute__((packed, aligned (4))); 852 + 853 + struct ipr_hostrcb64_error { 854 + __be32 fd_ioasc; 855 + __be32 ioa_fw_level; 856 + __be32 fd_res_handle; 857 + __be32 prc; 858 + __be64 fd_dev_id; 859 + __be64 fd_lun; 860 + u8 fd_res_path[8]; 861 + __be64 time_stamp; 862 + u8 reserved[2]; 863 + union { 864 + struct ipr_hostrcb_type_ff_error type_ff_error; 865 + struct ipr_hostrcb_type_12_error type_12_error; 866 + struct ipr_hostrcb_type_17_error type_17_error; 867 + struct ipr_hostrcb_type_23_error type_23_error; 868 + struct ipr_hostrcb_type_24_error type_24_error; 869 + struct ipr_hostrcb_type_30_error type_30_error; 870 + } u; 871 + }__attribute__((packed, aligned (8))); 1027 872 1028 873 struct ipr_hostrcb_raw { 1029 874 __be32 data[sizeof(struct ipr_hostrcb_error)/sizeof(__be32)]; ··· 1082 887 #define IPR_HOST_RCB_OVERLAY_ID_16 0x16 1083 888 #define IPR_HOST_RCB_OVERLAY_ID_17 0x17 1084 889 #define IPR_HOST_RCB_OVERLAY_ID_20 0x20 1085 - #define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF 890 + #define IPR_HOST_RCB_OVERLAY_ID_23 0x23 891 + #define IPR_HOST_RCB_OVERLAY_ID_24 0x24 892 + #define IPR_HOST_RCB_OVERLAY_ID_26 0x26 893 + #define IPR_HOST_RCB_OVERLAY_ID_30 0x30 894 + #define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF 1086 895 1087 896 u8 reserved1[3]; 1088 897 __be32 ilid; ··· 1096 897 1097 898 union { 1098 899 struct ipr_hostrcb_error error; 900 + struct ipr_hostrcb64_error error64; 1099 901 struct ipr_hostrcb_cfg_ch_not ccn; 1100 902 struct ipr_hostrcb_raw raw; 1101 903 } u; ··· 1107 907 dma_addr_t hostrcb_dma; 1108 908 struct list_head queue; 1109 909 struct ipr_ioa_cfg *ioa_cfg; 910 + char rp_buffer[IPR_MAX_RES_PATH_LENGTH]; 1110 911 }; 1111 912 1112 913 /* IPR smart dump table structures */ 1113 914 struct ipr_sdt_entry { 1114 - __be32 bar_str_offset; 1115 - __be32 end_offset; 1116 - u8 entry_byte; 1117 - u8 reserved[3]; 915 + __be32 start_token; 916 + __be32 end_token; 917 + u8 reserved[4]; 1118 918 1119 919 u8 flags; 1120 920 #define IPR_SDT_ENDIAN 0x80 ··· 1160 960 }; 1161 961 1162 962 struct ipr_resource_entry { 1163 - struct ipr_config_table_entry cfgte; 1164 963 u8 needs_sync_complete:1; 1165 964 u8 in_erp:1; 1166 965 u8 add_to_ml:1; 1167 966 u8 del_from_ml:1; 1168 967 u8 resetting_device:1; 1169 968 969 + u32 bus; /* AKA channel */ 970 + u32 target; /* AKA id */ 971 + u32 lun; 972 + #define IPR_ARRAY_VIRTUAL_BUS 0x1 973 + #define IPR_VSET_VIRTUAL_BUS 0x2 974 + #define IPR_IOAFP_VIRTUAL_BUS 0x3 975 + 976 + #define IPR_GET_RES_PHYS_LOC(res) \ 977 + (((res)->bus << 24) | ((res)->target << 8) | (res)->lun) 978 + 979 + u8 ata_class; 980 + 981 + u8 flags; 982 + __be16 res_flags; 983 + 984 + __be32 type; 985 + 986 + u8 qmodel; 987 + struct ipr_std_inq_data std_inq_data; 988 + 989 + __be32 res_handle; 990 + __be64 dev_id; 991 + struct scsi_lun dev_lun; 992 + u8 res_path[8]; 993 + 994 + struct ipr_ioa_cfg *ioa_cfg; 1170 995 struct scsi_device *sdev; 1171 996 struct ipr_sata_port *sata_port; 1172 997 struct list_head queue; 1173 - }; 998 + }; /* struct ipr_resource_entry */ 1174 999 1175 1000 struct ipr_resource_hdr { 1176 1001 u16 num_entries; 1177 1002 u16 reserved; 1178 - }; 1179 - 1180 - struct ipr_resource_table { 1181 - struct ipr_resource_hdr hdr; 1182 - struct ipr_resource_entry dev[IPR_MAX_PHYSICAL_DEVS]; 1183 1003 }; 1184 1004 1185 1005 struct ipr_misc_cbs { ··· 1214 994 struct ipr_interrupt_offsets { 1215 995 unsigned long set_interrupt_mask_reg; 1216 996 unsigned long clr_interrupt_mask_reg; 997 + unsigned long clr_interrupt_mask_reg32; 1217 998 unsigned long sense_interrupt_mask_reg; 999 + unsigned long sense_interrupt_mask_reg32; 1218 1000 unsigned long clr_interrupt_reg; 1001 + unsigned long clr_interrupt_reg32; 1219 1002 1220 1003 unsigned long sense_interrupt_reg; 1004 + unsigned long sense_interrupt_reg32; 1221 1005 unsigned long ioarrin_reg; 1222 1006 unsigned long sense_uproc_interrupt_reg; 1007 + unsigned long sense_uproc_interrupt_reg32; 1223 1008 unsigned long set_uproc_interrupt_reg; 1009 + unsigned long set_uproc_interrupt_reg32; 1224 1010 unsigned long clr_uproc_interrupt_reg; 1011 + unsigned long clr_uproc_interrupt_reg32; 1012 + 1013 + unsigned long init_feedback_reg; 1014 + 1015 + unsigned long dump_addr_reg; 1016 + unsigned long dump_data_reg; 1225 1017 }; 1226 1018 1227 1019 struct ipr_interrupts { 1228 1020 void __iomem *set_interrupt_mask_reg; 1229 1021 void __iomem *clr_interrupt_mask_reg; 1022 + void __iomem *clr_interrupt_mask_reg32; 1230 1023 void __iomem *sense_interrupt_mask_reg; 1024 + void __iomem *sense_interrupt_mask_reg32; 1231 1025 void __iomem *clr_interrupt_reg; 1026 + void __iomem *clr_interrupt_reg32; 1232 1027 1233 1028 void __iomem *sense_interrupt_reg; 1029 + void __iomem *sense_interrupt_reg32; 1234 1030 void __iomem *ioarrin_reg; 1235 1031 void __iomem *sense_uproc_interrupt_reg; 1032 + void __iomem *sense_uproc_interrupt_reg32; 1236 1033 void __iomem *set_uproc_interrupt_reg; 1034 + void __iomem *set_uproc_interrupt_reg32; 1237 1035 void __iomem *clr_uproc_interrupt_reg; 1036 + void __iomem *clr_uproc_interrupt_reg32; 1037 + 1038 + void __iomem *init_feedback_reg; 1039 + 1040 + void __iomem *dump_addr_reg; 1041 + void __iomem *dump_data_reg; 1238 1042 }; 1239 1043 1240 1044 struct ipr_chip_cfg_t { ··· 1273 1029 u16 intr_type; 1274 1030 #define IPR_USE_LSI 0x00 1275 1031 #define IPR_USE_MSI 0x01 1032 + u16 sis_type; 1033 + #define IPR_SIS32 0x00 1034 + #define IPR_SIS64 0x01 1276 1035 const struct ipr_chip_cfg_t *cfg; 1277 1036 }; 1278 1037 ··· 1320 1073 DUMP_OBTAINED 1321 1074 }; 1322 1075 1323 - enum ipr_cache_state { 1324 - CACHE_NONE, 1325 - CACHE_DISABLED, 1326 - CACHE_ENABLED, 1327 - CACHE_INVALID 1328 - }; 1329 - 1330 1076 /* Per-controller data */ 1331 1077 struct ipr_ioa_cfg { 1332 1078 char eye_catcher[8]; ··· 1339 1099 u8 dual_raid:1; 1340 1100 u8 needs_warm_reset:1; 1341 1101 u8 msi_received:1; 1102 + u8 sis64:1; 1342 1103 1343 1104 u8 revid; 1344 1105 1345 - enum ipr_cache_state cache_state; 1106 + /* 1107 + * Bitmaps for SIS64 generated target values 1108 + */ 1109 + unsigned long *target_ids; 1110 + unsigned long *array_ids; 1111 + unsigned long *vset_ids; 1112 + 1346 1113 u16 type; /* CCIN of the card */ 1347 1114 1348 1115 u8 log_level; ··· 1380 1133 1381 1134 char cfg_table_start[8]; 1382 1135 #define IPR_CFG_TBL_START "cfg" 1383 - struct ipr_config_table *cfg_table; 1136 + union { 1137 + struct ipr_config_table *cfg_table; 1138 + struct ipr_config_table64 *cfg_table64; 1139 + } u; 1384 1140 dma_addr_t cfg_table_dma; 1141 + u32 cfg_table_size; 1142 + u32 max_devs_supported; 1385 1143 1386 1144 char resource_table_label[8]; 1387 1145 #define IPR_RES_TABLE_LABEL "res_tbl" ··· 1454 1202 char ipr_cmd_label[8]; 1455 1203 #define IPR_CMD_LABEL "ipr_cmd" 1456 1204 struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; 1457 - u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; 1458 - }; 1205 + dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; 1206 + }; /* struct ipr_ioa_cfg */ 1459 1207 1460 1208 struct ipr_cmnd { 1461 1209 struct ipr_ioarcb ioarcb; 1210 + union { 1211 + struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES]; 1212 + struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; 1213 + struct ipr_ata64_ioadl ata_ioadl; 1214 + } i; 1462 1215 struct ipr_ioasa ioasa; 1463 - struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES]; 1464 1216 struct list_head queue; 1465 1217 struct scsi_cmnd *scsi_cmd; 1466 1218 struct ata_queued_cmd *qc; ··· 1477 1221 u8 sense_buffer[SCSI_SENSE_BUFFERSIZE]; 1478 1222 dma_addr_t sense_buffer_dma; 1479 1223 unsigned short dma_use_sg; 1480 - dma_addr_t dma_handle; 1224 + dma_addr_t dma_addr; 1481 1225 struct ipr_cmnd *sibling; 1482 1226 union { 1483 1227 enum ipr_shutdown_type shutdown_type; ··· 1570 1314 u32 next_page_index; 1571 1315 u32 page_offset; 1572 1316 u32 format; 1573 - #define IPR_SDT_FMT2 2 1574 - #define IPR_SDT_UNKNOWN 3 1575 1317 }__attribute__((packed, aligned (4))); 1576 1318 1577 1319 struct ipr_dump { ··· 1631 1377 #define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__) 1632 1378 #define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)) 1633 1379 1380 + #define ipr_res_printk(level, ioa_cfg, bus, target, lun, fmt, ...) \ 1381 + printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \ 1382 + bus, target, lun, ##__VA_ARGS__) 1383 + 1384 + #define ipr_res_err(ioa_cfg, res, fmt, ...) \ 1385 + ipr_res_printk(KERN_ERR, ioa_cfg, (res)->bus, (res)->target, (res)->lun, fmt, ##__VA_ARGS__) 1386 + 1634 1387 #define ipr_ra_printk(level, ioa_cfg, ra, fmt, ...) \ 1635 1388 printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \ 1636 1389 (ra).bus, (ra).target, (ra).lun, ##__VA_ARGS__) 1637 1390 1638 1391 #define ipr_ra_err(ioa_cfg, ra, fmt, ...) \ 1639 1392 ipr_ra_printk(KERN_ERR, ioa_cfg, ra, fmt, ##__VA_ARGS__) 1640 - 1641 - #define ipr_res_err(ioa_cfg, res, fmt, ...) \ 1642 - ipr_ra_err(ioa_cfg, (res)->cfgte.res_addr, fmt, ##__VA_ARGS__) 1643 1393 1644 1394 #define ipr_phys_res_err(ioa_cfg, res, fmt, ...) \ 1645 1395 { \ ··· 1657 1399 } 1658 1400 1659 1401 #define ipr_hcam_err(hostrcb, fmt, ...) \ 1660 - { \ 1661 - if (ipr_is_device(&(hostrcb)->hcam.u.error.failing_dev_res_addr)) { \ 1662 - ipr_ra_err((hostrcb)->ioa_cfg, \ 1663 - (hostrcb)->hcam.u.error.failing_dev_res_addr, \ 1664 - fmt, ##__VA_ARGS__); \ 1665 - } else { \ 1666 - dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, ##__VA_ARGS__); \ 1667 - } \ 1402 + { \ 1403 + if (ipr_is_device(hostrcb)) { \ 1404 + if ((hostrcb)->ioa_cfg->sis64) { \ 1405 + printk(KERN_ERR IPR_NAME ": %s: " fmt, \ 1406 + ipr_format_resource_path(&hostrcb->hcam.u.error64.fd_res_path[0], \ 1407 + &hostrcb->rp_buffer[0]), \ 1408 + __VA_ARGS__); \ 1409 + } else { \ 1410 + ipr_ra_err((hostrcb)->ioa_cfg, \ 1411 + (hostrcb)->hcam.u.error.fd_res_addr, \ 1412 + fmt, __VA_ARGS__); \ 1413 + } \ 1414 + } else { \ 1415 + dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, __VA_ARGS__); \ 1416 + } \ 1668 1417 } 1669 1418 1670 1419 #define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\ ··· 1697 1432 **/ 1698 1433 static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res) 1699 1434 { 1700 - return (res->cfgte.flags & IPR_IS_IOA_RESOURCE) ? 1 : 0; 1435 + return res->type == IPR_RES_TYPE_IOAFP; 1701 1436 } 1702 1437 1703 1438 /** ··· 1709 1444 **/ 1710 1445 static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res) 1711 1446 { 1712 - if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) && 1713 - !ipr_is_ioa_resource(res) && 1714 - IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_AF_DASD) 1715 - return 1; 1716 - else 1717 - return 0; 1447 + return res->type == IPR_RES_TYPE_AF_DASD || 1448 + res->type == IPR_RES_TYPE_REMOTE_AF_DASD; 1718 1449 } 1719 1450 1720 1451 /** ··· 1722 1461 **/ 1723 1462 static inline int ipr_is_vset_device(struct ipr_resource_entry *res) 1724 1463 { 1725 - if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) && 1726 - !ipr_is_ioa_resource(res) && 1727 - IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_VOLUME_SET) 1728 - return 1; 1729 - else 1730 - return 0; 1464 + return res->type == IPR_RES_TYPE_VOLUME_SET; 1731 1465 } 1732 1466 1733 1467 /** ··· 1734 1478 **/ 1735 1479 static inline int ipr_is_gscsi(struct ipr_resource_entry *res) 1736 1480 { 1737 - if (!ipr_is_ioa_resource(res) && 1738 - IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_SCSI) 1739 - return 1; 1740 - else 1741 - return 0; 1481 + return res->type == IPR_RES_TYPE_GENERIC_SCSI; 1742 1482 } 1743 1483 1744 1484 /** ··· 1747 1495 static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res) 1748 1496 { 1749 1497 if (ipr_is_af_dasd_device(res) || 1750 - (ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))) 1498 + (ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->std_inq_data))) 1751 1499 return 1; 1752 1500 else 1753 1501 return 0; ··· 1762 1510 **/ 1763 1511 static inline int ipr_is_gata(struct ipr_resource_entry *res) 1764 1512 { 1765 - if (!ipr_is_ioa_resource(res) && 1766 - IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_ATA) 1767 - return 1; 1768 - else 1769 - return 0; 1513 + return res->type == IPR_RES_TYPE_GENERIC_ATA; 1770 1514 } 1771 1515 1772 1516 /** ··· 1774 1526 **/ 1775 1527 static inline int ipr_is_naca_model(struct ipr_resource_entry *res) 1776 1528 { 1777 - if (ipr_is_gscsi(res) && IPR_QUEUEING_MODEL(res) == IPR_QUEUE_NACA_MODEL) 1529 + if (ipr_is_gscsi(res) && res->qmodel == IPR_QUEUE_NACA_MODEL) 1778 1530 return 1; 1779 1531 return 0; 1780 1532 } 1781 1533 1782 1534 /** 1783 - * ipr_is_device - Determine if resource address is that of a device 1784 - * @res_addr: resource address struct 1535 + * ipr_is_device - Determine if the hostrcb structure is related to a device 1536 + * @hostrcb: host resource control blocks struct 1785 1537 * 1786 1538 * Return value: 1787 1539 * 1 if AF / 0 if not AF 1788 1540 **/ 1789 - static inline int ipr_is_device(struct ipr_res_addr *res_addr) 1541 + static inline int ipr_is_device(struct ipr_hostrcb *hostrcb) 1790 1542 { 1791 - if ((res_addr->bus < IPR_MAX_NUM_BUSES) && 1792 - (res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1))) 1793 - return 1; 1543 + struct ipr_res_addr *res_addr; 1544 + u8 *res_path; 1794 1545 1546 + if (hostrcb->ioa_cfg->sis64) { 1547 + res_path = &hostrcb->hcam.u.error64.fd_res_path[0]; 1548 + if ((res_path[0] == 0x00 || res_path[0] == 0x80 || 1549 + res_path[0] == 0x81) && res_path[2] != 0xFF) 1550 + return 1; 1551 + } else { 1552 + res_addr = &hostrcb->hcam.u.error.fd_res_addr; 1553 + 1554 + if ((res_addr->bus < IPR_MAX_NUM_BUSES) && 1555 + (res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1))) 1556 + return 1; 1557 + } 1795 1558 return 0; 1796 1559 } 1797 1560
+1 -1
drivers/scsi/iscsi_tcp.c
··· 874 874 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 875 875 .eh_abort_handler = iscsi_eh_abort, 876 876 .eh_device_reset_handler= iscsi_eh_device_reset, 877 - .eh_target_reset_handler= iscsi_eh_target_reset, 877 + .eh_target_reset_handler = iscsi_eh_recover_target, 878 878 .use_clustering = DISABLE_CLUSTERING, 879 879 .slave_alloc = iscsi_sw_tcp_slave_alloc, 880 880 .slave_configure = iscsi_sw_tcp_slave_configure,
+19 -4
drivers/scsi/libiscsi.c
··· 2338 2338 * This function will wait for a relogin, session termination from 2339 2339 * userspace, or a recovery/replacement timeout. 2340 2340 */ 2341 - static int iscsi_eh_session_reset(struct scsi_cmnd *sc) 2341 + int iscsi_eh_session_reset(struct scsi_cmnd *sc) 2342 2342 { 2343 2343 struct iscsi_cls_session *cls_session; 2344 2344 struct iscsi_session *session; ··· 2389 2389 mutex_unlock(&session->eh_mutex); 2390 2390 return SUCCESS; 2391 2391 } 2392 + EXPORT_SYMBOL_GPL(iscsi_eh_session_reset); 2392 2393 2393 2394 static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) 2394 2395 { ··· 2404 2403 * iscsi_eh_target_reset - reset target 2405 2404 * @sc: scsi command 2406 2405 * 2407 - * This will attempt to send a warm target reset. If that fails 2408 - * then we will drop the session and attempt ERL0 recovery. 2406 + * This will attempt to send a warm target reset. 2409 2407 */ 2410 2408 int iscsi_eh_target_reset(struct scsi_cmnd *sc) 2411 2409 { ··· 2476 2476 ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname, 2477 2477 rc == SUCCESS ? "SUCCESS" : "FAILED"); 2478 2478 mutex_unlock(&session->eh_mutex); 2479 + return rc; 2480 + } 2481 + EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); 2479 2482 2483 + /** 2484 + * iscsi_eh_recover_target - reset target and possibly the session 2485 + * @sc: scsi command 2486 + * 2487 + * This will attempt to send a warm target reset. If that fails, 2488 + * we will escalate to ERL0 session recovery. 2489 + */ 2490 + int iscsi_eh_recover_target(struct scsi_cmnd *sc) 2491 + { 2492 + int rc; 2493 + 2494 + rc = iscsi_eh_target_reset(sc); 2480 2495 if (rc == FAILED) 2481 2496 rc = iscsi_eh_session_reset(sc); 2482 2497 return rc; 2483 2498 } 2484 - EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); 2499 + EXPORT_SYMBOL_GPL(iscsi_eh_recover_target); 2485 2500 2486 2501 /* 2487 2502 * Pre-allocate a pool of @max items of @item_size. By default, the pool
+9 -1
drivers/scsi/lpfc/lpfc.h
··· 37 37 the NameServer before giving up. */ 38 38 #define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ 39 39 #define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ 40 + #define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi 41 + cmnd for menlo needs nearly twice as for firmware 42 + downloads using bsg */ 40 43 #define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ 41 44 #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 42 45 #define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ ··· 512 509 int (*lpfc_hba_down_link) 513 510 (struct lpfc_hba *); 514 511 515 - 516 512 /* SLI4 specific HBA data structure */ 517 513 struct lpfc_sli4_hba sli4_hba; 518 514 ··· 625 623 uint32_t cfg_log_verbose; 626 624 uint32_t cfg_aer_support; 627 625 uint32_t cfg_suppress_link_up; 626 + #define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ 627 + #define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ 628 + #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ 628 629 629 630 lpfc_vpd_t vpd; /* vital product data */ 630 631 ··· 809 804 struct list_head ct_ev_waiters; 810 805 struct unsol_rcv_ct_ctx ct_ctx[64]; 811 806 uint32_t ctx_idx; 807 + 808 + uint8_t menlo_flag; /* menlo generic flags */ 809 + #define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */ 812 810 }; 813 811 814 812 static inline struct Scsi_Host *
+4 -3
drivers/scsi/lpfc/lpfc_attr.c
··· 1939 1939 # 0x2 = never bring up link 1940 1940 # Default value is 0. 1941 1941 */ 1942 - LPFC_ATTR_R(suppress_link_up, 0, 0, 2, "Suppress Link Up at initialization"); 1942 + LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, 1943 + LPFC_DELAY_INIT_LINK_INDEFINITELY, 1944 + "Suppress Link Up at initialization"); 1943 1945 1944 1946 /* 1945 1947 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear ··· 1968 1966 { 1969 1967 struct Scsi_Host *shost = class_to_shost(dev); 1970 1968 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1971 - int val = 0; 1972 - val = vport->cfg_devloss_tmo; 1969 + 1973 1970 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); 1974 1971 } 1975 1972
+332
drivers/scsi/lpfc/lpfc_bsg.c
··· 83 83 struct fc_bsg_job *set_job; 84 84 }; 85 85 86 + #define MENLO_DID 0x0000FC0E 87 + 88 + struct lpfc_bsg_menlo { 89 + struct lpfc_iocbq *cmdiocbq; 90 + struct lpfc_iocbq *rspiocbq; 91 + struct lpfc_dmabuf *bmp; 92 + 93 + /* job waiting for this iocb to finish */ 94 + struct fc_bsg_job *set_job; 95 + }; 96 + 86 97 #define TYPE_EVT 1 87 98 #define TYPE_IOCB 2 88 99 #define TYPE_MBOX 3 100 + #define TYPE_MENLO 4 89 101 struct bsg_job_data { 90 102 uint32_t type; 91 103 union { 92 104 struct lpfc_bsg_event *evt; 93 105 struct lpfc_bsg_iocb iocb; 94 106 struct lpfc_bsg_mbox mbox; 107 + struct lpfc_bsg_menlo menlo; 95 108 } context_un; 96 109 }; 97 110 ··· 2469 2456 case MBX_PORT_IOV_CONTROL: 2470 2457 break; 2471 2458 case MBX_SET_VARIABLE: 2459 + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2460 + "1226 mbox: set_variable 0x%x, 0x%x\n", 2461 + mb->un.varWords[0], 2462 + mb->un.varWords[1]); 2463 + if ((mb->un.varWords[0] == SETVAR_MLOMNT) 2464 + && (mb->un.varWords[1] == 1)) { 2465 + phba->wait_4_mlo_maint_flg = 1; 2466 + } else if (mb->un.varWords[0] == SETVAR_MLORST) { 2467 + phba->link_flag &= ~LS_LOOPBACK_MODE; 2468 + phba->fc_topology = TOPOLOGY_PT_PT; 2469 + } 2470 + break; 2472 2471 case MBX_RUN_BIU_DIAG64: 2473 2472 case MBX_READ_EVENT_LOG: 2474 2473 case MBX_READ_SPARM64: ··· 2663 2638 } 2664 2639 2665 2640 /** 2641 + * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler 2642 + * @phba: Pointer to HBA context object. 2643 + * @cmdiocbq: Pointer to command iocb. 2644 + * @rspiocbq: Pointer to response iocb. 2645 + * 2646 + * This function is the completion handler for iocbs issued using 2647 + * lpfc_menlo_cmd function. This function is called by the 2648 + * ring event handler function without any lock held. This function 2649 + * can be called from both worker thread context and interrupt 2650 + * context. This function also can be called from another thread which 2651 + * cleans up the SLI layer objects. 2652 + * This function copies the contents of the response iocb to the 2653 + * response iocb memory object provided by the caller of 2654 + * lpfc_sli_issue_iocb_wait and then wakes up the thread which 2655 + * sleeps for the iocb completion. 2656 + **/ 2657 + static void 2658 + lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, 2659 + struct lpfc_iocbq *cmdiocbq, 2660 + struct lpfc_iocbq *rspiocbq) 2661 + { 2662 + struct bsg_job_data *dd_data; 2663 + struct fc_bsg_job *job; 2664 + IOCB_t *rsp; 2665 + struct lpfc_dmabuf *bmp; 2666 + struct lpfc_bsg_menlo *menlo; 2667 + unsigned long flags; 2668 + struct menlo_response *menlo_resp; 2669 + int rc = 0; 2670 + 2671 + spin_lock_irqsave(&phba->ct_ev_lock, flags); 2672 + dd_data = cmdiocbq->context1; 2673 + if (!dd_data) { 2674 + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2675 + return; 2676 + } 2677 + 2678 + menlo = &dd_data->context_un.menlo; 2679 + job = menlo->set_job; 2680 + job->dd_data = NULL; /* so timeout handler does not reply */ 2681 + 2682 + spin_lock_irqsave(&phba->hbalock, flags); 2683 + cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 2684 + if (cmdiocbq->context2 && rspiocbq) 2685 + memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 2686 + &rspiocbq->iocb, sizeof(IOCB_t)); 2687 + spin_unlock_irqrestore(&phba->hbalock, flags); 2688 + 2689 + bmp = menlo->bmp; 2690 + rspiocbq = menlo->rspiocbq; 2691 + rsp = &rspiocbq->iocb; 2692 + 2693 + pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 2694 + job->request_payload.sg_cnt, DMA_TO_DEVICE); 2695 + pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 2696 + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2697 + 2698 + /* always return the xri, this would be used in the case 2699 + * of a menlo download to allow the data to be sent as a continuation 2700 + * of the exchange. 2701 + */ 2702 + menlo_resp = (struct menlo_response *) 2703 + job->reply->reply_data.vendor_reply.vendor_rsp; 2704 + menlo_resp->xri = rsp->ulpContext; 2705 + if (rsp->ulpStatus) { 2706 + if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 2707 + switch (rsp->un.ulpWord[4] & 0xff) { 2708 + case IOERR_SEQUENCE_TIMEOUT: 2709 + rc = -ETIMEDOUT; 2710 + break; 2711 + case IOERR_INVALID_RPI: 2712 + rc = -EFAULT; 2713 + break; 2714 + default: 2715 + rc = -EACCES; 2716 + break; 2717 + } 2718 + } else 2719 + rc = -EACCES; 2720 + } else 2721 + job->reply->reply_payload_rcv_len = 2722 + rsp->un.genreq64.bdl.bdeSize; 2723 + 2724 + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 2725 + lpfc_sli_release_iocbq(phba, rspiocbq); 2726 + lpfc_sli_release_iocbq(phba, cmdiocbq); 2727 + kfree(bmp); 2728 + kfree(dd_data); 2729 + /* make error code available to userspace */ 2730 + job->reply->result = rc; 2731 + /* complete the job back to userspace */ 2732 + job->job_done(job); 2733 + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2734 + return; 2735 + } 2736 + 2737 + /** 2738 + * lpfc_menlo_cmd - send an ioctl for menlo hardware 2739 + * @job: fc_bsg_job to handle 2740 + * 2741 + * This function issues a gen request 64 CR ioctl for all menlo cmd requests, 2742 + * all the command completions will return the xri for the command. 2743 + * For menlo data requests a gen request 64 CX is used to continue the exchange 2744 + * supplied in the menlo request header xri field. 2745 + **/ 2746 + static int 2747 + lpfc_menlo_cmd(struct fc_bsg_job *job) 2748 + { 2749 + struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 2750 + struct lpfc_hba *phba = vport->phba; 2751 + struct lpfc_iocbq *cmdiocbq, *rspiocbq; 2752 + IOCB_t *cmd, *rsp; 2753 + int rc = 0; 2754 + struct menlo_command *menlo_cmd; 2755 + struct menlo_response *menlo_resp; 2756 + struct lpfc_dmabuf *bmp = NULL; 2757 + int request_nseg; 2758 + int reply_nseg; 2759 + struct scatterlist *sgel = NULL; 2760 + int numbde; 2761 + dma_addr_t busaddr; 2762 + struct bsg_job_data *dd_data; 2763 + struct ulp_bde64 *bpl = NULL; 2764 + 2765 + /* in case no data is returned return just the return code */ 2766 + job->reply->reply_payload_rcv_len = 0; 2767 + 2768 + if (job->request_len < 2769 + sizeof(struct fc_bsg_request) + 2770 + sizeof(struct menlo_command)) { 2771 + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2772 + "2784 Received MENLO_CMD request below " 2773 + "minimum size\n"); 2774 + rc = -ERANGE; 2775 + goto no_dd_data; 2776 + } 2777 + 2778 + if (job->reply_len < 2779 + sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { 2780 + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2781 + "2785 Received MENLO_CMD reply below " 2782 + "minimum size\n"); 2783 + rc = -ERANGE; 2784 + goto no_dd_data; 2785 + } 2786 + 2787 + if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) { 2788 + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2789 + "2786 Adapter does not support menlo " 2790 + "commands\n"); 2791 + rc = -EPERM; 2792 + goto no_dd_data; 2793 + } 2794 + 2795 + menlo_cmd = (struct menlo_command *) 2796 + job->request->rqst_data.h_vendor.vendor_cmd; 2797 + 2798 + menlo_resp = (struct menlo_response *) 2799 + job->reply->reply_data.vendor_reply.vendor_rsp; 2800 + 2801 + /* allocate our bsg tracking structure */ 2802 + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 2803 + if (!dd_data) { 2804 + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2805 + "2787 Failed allocation of dd_data\n"); 2806 + rc = -ENOMEM; 2807 + goto no_dd_data; 2808 + } 2809 + 2810 + bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2811 + if (!bmp) { 2812 + rc = -ENOMEM; 2813 + goto free_dd; 2814 + } 2815 + 2816 + cmdiocbq = lpfc_sli_get_iocbq(phba); 2817 + if (!cmdiocbq) { 2818 + rc = -ENOMEM; 2819 + goto free_bmp; 2820 + } 2821 + 2822 + rspiocbq = lpfc_sli_get_iocbq(phba); 2823 + if (!rspiocbq) { 2824 + rc = -ENOMEM; 2825 + goto free_cmdiocbq; 2826 + } 2827 + 2828 + rsp = &rspiocbq->iocb; 2829 + 2830 + bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 2831 + if (!bmp->virt) { 2832 + rc = -ENOMEM; 2833 + goto free_rspiocbq; 2834 + } 2835 + 2836 + INIT_LIST_HEAD(&bmp->list); 2837 + bpl = (struct ulp_bde64 *) bmp->virt; 2838 + request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 2839 + job->request_payload.sg_cnt, DMA_TO_DEVICE); 2840 + for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 2841 + busaddr = sg_dma_address(sgel); 2842 + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 2843 + bpl->tus.f.bdeSize = sg_dma_len(sgel); 2844 + bpl->tus.w = cpu_to_le32(bpl->tus.w); 2845 + bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 2846 + bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 2847 + bpl++; 2848 + } 2849 + 2850 + reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, 2851 + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2852 + for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { 2853 + busaddr = sg_dma_address(sgel); 2854 + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 2855 + bpl->tus.f.bdeSize = sg_dma_len(sgel); 2856 + bpl->tus.w = cpu_to_le32(bpl->tus.w); 2857 + bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 2858 + bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 2859 + bpl++; 2860 + } 2861 + 2862 + cmd = &cmdiocbq->iocb; 2863 + cmd->un.genreq64.bdl.ulpIoTag32 = 0; 2864 + cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 2865 + cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 2866 + cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 2867 + cmd->un.genreq64.bdl.bdeSize = 2868 + (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 2869 + cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 2870 + cmd->un.genreq64.w5.hcsw.Dfctl = 0; 2871 + cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD; 2872 + cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ 2873 + cmd->ulpBdeCount = 1; 2874 + cmd->ulpClass = CLASS3; 2875 + cmd->ulpOwner = OWN_CHIP; 2876 + cmd->ulpLe = 1; /* Limited Edition */ 2877 + cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 2878 + cmdiocbq->vport = phba->pport; 2879 + /* We want the firmware to timeout before we do */ 2880 + cmd->ulpTimeout = MENLO_TIMEOUT - 5; 2881 + cmdiocbq->context3 = bmp; 2882 + cmdiocbq->context2 = rspiocbq; 2883 + cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; 2884 + cmdiocbq->context1 = dd_data; 2885 + cmdiocbq->context2 = rspiocbq; 2886 + if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { 2887 + cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 2888 + cmd->ulpPU = MENLO_PU; /* 3 */ 2889 + cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ 2890 + cmd->ulpContext = MENLO_CONTEXT; /* 0 */ 2891 + } else { 2892 + cmd->ulpCommand = CMD_GEN_REQUEST64_CX; 2893 + cmd->ulpPU = 1; 2894 + cmd->un.ulpWord[4] = 0; 2895 + cmd->ulpContext = menlo_cmd->xri; 2896 + } 2897 + 2898 + dd_data->type = TYPE_MENLO; 2899 + dd_data->context_un.menlo.cmdiocbq = cmdiocbq; 2900 + dd_data->context_un.menlo.rspiocbq = rspiocbq; 2901 + dd_data->context_un.menlo.set_job = job; 2902 + dd_data->context_un.menlo.bmp = bmp; 2903 + 2904 + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 2905 + MENLO_TIMEOUT - 5); 2906 + if (rc == IOCB_SUCCESS) 2907 + return 0; /* done for now */ 2908 + 2909 + /* iocb failed so cleanup */ 2910 + pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 2911 + job->request_payload.sg_cnt, DMA_TO_DEVICE); 2912 + pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 2913 + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2914 + 2915 + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 2916 + 2917 + free_rspiocbq: 2918 + lpfc_sli_release_iocbq(phba, rspiocbq); 2919 + free_cmdiocbq: 2920 + lpfc_sli_release_iocbq(phba, cmdiocbq); 2921 + free_bmp: 2922 + kfree(bmp); 2923 + free_dd: 2924 + kfree(dd_data); 2925 + no_dd_data: 2926 + /* make error code available to userspace */ 2927 + job->reply->result = rc; 2928 + job->dd_data = NULL; 2929 + return rc; 2930 + } 2931 + /** 2666 2932 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 2667 2933 * @job: fc_bsg_job to handle 2668 2934 **/ ··· 2984 2668 break; 2985 2669 case LPFC_BSG_VENDOR_MBOX: 2986 2670 rc = lpfc_bsg_mbox_cmd(job); 2671 + break; 2672 + case LPFC_BSG_VENDOR_MENLO_CMD: 2673 + case LPFC_BSG_VENDOR_MENLO_DATA: 2674 + rc = lpfc_menlo_cmd(job); 2987 2675 break; 2988 2676 default: 2989 2677 rc = -EINVAL; ··· 3048 2728 struct lpfc_bsg_event *evt; 3049 2729 struct lpfc_bsg_iocb *iocb; 3050 2730 struct lpfc_bsg_mbox *mbox; 2731 + struct lpfc_bsg_menlo *menlo; 3051 2732 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3052 2733 struct bsg_job_data *dd_data; 3053 2734 unsigned long flags; ··· 3095 2774 job->reply->result = -EAGAIN; 3096 2775 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3097 2776 job->job_done(job); 2777 + break; 2778 + case TYPE_MENLO: 2779 + menlo = &dd_data->context_un.menlo; 2780 + cmdiocb = menlo->cmdiocbq; 2781 + /* hint to completion handler that the job timed out */ 2782 + job->reply->result = -EAGAIN; 2783 + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2784 + /* this will call our completion handler */ 2785 + spin_lock_irq(&phba->hbalock); 2786 + lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 2787 + spin_unlock_irq(&phba->hbalock); 3098 2788 break; 3099 2789 default: 3100 2790 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+12
drivers/scsi/lpfc/lpfc_bsg.h
··· 31 31 #define LPFC_BSG_VENDOR_DIAG_TEST 5 32 32 #define LPFC_BSG_VENDOR_GET_MGMT_REV 6 33 33 #define LPFC_BSG_VENDOR_MBOX 7 34 + #define LPFC_BSG_VENDOR_MENLO_CMD 8 35 + #define LPFC_BSG_VENDOR_MENLO_DATA 9 34 36 35 37 struct set_ct_event { 36 38 uint32_t command; ··· 96 94 uint32_t inExtWLen; 97 95 uint32_t outExtWLen; 98 96 uint8_t mbOffset; 97 + }; 98 + 99 + /* Used for menlo command or menlo data. The xri is only used for menlo data */ 100 + struct menlo_command { 101 + uint32_t cmd; 102 + uint32_t xri; 103 + }; 104 + 105 + struct menlo_response { 106 + uint32_t xri; /* return the xri of the iocb exchange */ 99 107 }; 100 108
+6 -1
drivers/scsi/lpfc/lpfc_crtn.h
··· 63 63 void lpfc_port_link_failure(struct lpfc_vport *); 64 64 void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 65 65 void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 66 + void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); 66 67 void lpfc_retry_pport_discovery(struct lpfc_hba *); 67 68 68 69 void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); ··· 222 221 void lpfc_unregister_unused_fcf(struct lpfc_hba *); 223 222 int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *); 224 223 void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); 224 + void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); 225 + uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); 226 + int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); 227 + void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); 225 228 226 229 int lpfc_mem_alloc(struct lpfc_hba *, int align); 227 230 void lpfc_mem_free(struct lpfc_hba *); ··· 390 385 int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 391 386 void lpfc_start_fdiscs(struct lpfc_hba *phba); 392 387 struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); 393 - 388 + struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t); 394 389 #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 395 390 #define HBA_EVENT_RSCN 5 396 391 #define HBA_EVENT_LINK_UP 2
+121 -21
drivers/scsi/lpfc/lpfc_els.c
··· 771 771 struct lpfc_nodelist *ndlp = cmdiocb->context1; 772 772 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 773 773 struct serv_parm *sp; 774 + uint16_t fcf_index; 774 775 int rc; 775 776 776 777 /* Check to see if link went down during discovery */ ··· 789 788 vport->port_state); 790 789 791 790 if (irsp->ulpStatus) { 791 + /* 792 + * In case of FIP mode, perform round robin FCF failover 793 + * due to new FCF discovery 794 + */ 795 + if ((phba->hba_flag & HBA_FIP_SUPPORT) && 796 + (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 797 + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 798 + "2611 FLOGI failed on registered " 799 + "FCF record fcf_index:%d, trying " 800 + "to perform round robin failover\n", 801 + phba->fcf.current_rec.fcf_indx); 802 + fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 803 + if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { 804 + /* 805 + * Exhausted the eligible FCF record list, 806 + * fail through to retry FLOGI on current 807 + * FCF record. 808 + */ 809 + lpfc_printf_log(phba, KERN_WARNING, 810 + LOG_FIP | LOG_ELS, 811 + "2760 FLOGI exhausted FCF " 812 + "round robin failover list, " 813 + "retry FLOGI on the current " 814 + "registered FCF index:%d\n", 815 + phba->fcf.current_rec.fcf_indx); 816 + spin_lock_irq(&phba->hbalock); 817 + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 818 + spin_unlock_irq(&phba->hbalock); 819 + } else { 820 + rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, 821 + fcf_index); 822 + if (rc) { 823 + lpfc_printf_log(phba, KERN_WARNING, 824 + LOG_FIP | LOG_ELS, 825 + "2761 FLOGI round " 826 + "robin FCF failover " 827 + "read FCF failed " 828 + "rc:x%x, fcf_index:" 829 + "%d\n", rc, 830 + phba->fcf.current_rec.fcf_indx); 831 + spin_lock_irq(&phba->hbalock); 832 + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 833 + spin_unlock_irq(&phba->hbalock); 834 + } else 835 + goto out; 836 + } 837 + } 838 + 792 839 /* Check for retry */ 793 840 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 794 841 goto out; ··· 855 806 } 856 807 857 808 /* FLOGI failure */ 858 - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 859 - "0100 FLOGI failure Data: x%x x%x " 860 - "x%x\n", 809 + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 810 + "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n", 861 811 irsp->ulpStatus, irsp->un.ulpWord[4], 862 812 irsp->ulpTimeout); 863 813 goto flogifail; ··· 890 842 else 891 843 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 892 844 893 - if (!rc) 845 + if (!rc) { 846 + /* Mark the FCF discovery process done */ 847 + lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS, 848 + "2769 FLOGI successful on FCF record: " 849 + "current_fcf_index:x%x, terminate FCF " 850 + "round robin failover process\n", 851 + phba->fcf.current_rec.fcf_indx); 852 + spin_lock_irq(&phba->hbalock); 853 + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 854 + spin_unlock_irq(&phba->hbalock); 894 855 goto out; 856 + } 895 857 } 896 858 897 859 flogifail: ··· 1467 1409 goto out; 1468 1410 } 1469 1411 /* PLOGI failed */ 1412 + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1413 + "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 1414 + ndlp->nlp_DID, irsp->ulpStatus, 1415 + irsp->un.ulpWord[4]); 1470 1416 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1471 1417 if (lpfc_error_lost_link(irsp)) 1472 1418 rc = NLP_STE_FREED_NODE; ··· 1639 1577 goto out; 1640 1578 } 1641 1579 /* PRLI failed */ 1580 + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1581 + "2754 PRLI failure DID:%06X Status:x%x/x%x\n", 1582 + ndlp->nlp_DID, irsp->ulpStatus, 1583 + irsp->un.ulpWord[4]); 1642 1584 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1643 1585 if (lpfc_error_lost_link(irsp)) 1644 1586 goto out; ··· 1926 1860 goto out; 1927 1861 } 1928 1862 /* ADISC failed */ 1863 + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1864 + "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 1865 + ndlp->nlp_DID, irsp->ulpStatus, 1866 + irsp->un.ulpWord[4]); 1929 1867 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1930 1868 if (!lpfc_error_lost_link(irsp)) 1931 1869 lpfc_disc_state_machine(vport, ndlp, cmdiocb, ··· 2079 2009 /* ELS command is being retried */ 2080 2010 goto out; 2081 2011 /* LOGO failed */ 2012 + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2013 + "2756 LOGO failure DID:%06X Status:x%x/x%x\n", 2014 + ndlp->nlp_DID, irsp->ulpStatus, 2015 + irsp->un.ulpWord[4]); 2082 2016 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2083 2017 if (lpfc_error_lost_link(irsp)) 2084 2018 goto out; ··· 6063 5989 if (phba->sli_rev < LPFC_SLI_REV4) 6064 5990 lpfc_issue_fabric_reglogin(vport); 6065 5991 else { 6066 - lpfc_start_fdiscs(phba); 5992 + /* 5993 + * If the physical port is instantiated using 5994 + * FDISC, do not start vport discovery. 5995 + */ 5996 + if (vport->port_state != LPFC_FDISC) 5997 + lpfc_start_fdiscs(phba); 6067 5998 lpfc_do_scr_ns_plogi(phba, vport); 6068 5999 } 6069 6000 } else ··· 6134 6055 } 6135 6056 6136 6057 /** 6137 - * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 6058 + * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 6138 6059 * @phba: pointer to lpfc hba data structure. 6139 6060 * 6140 - * This routine abort all pending discovery commands and 6141 - * start a timer to retry FLOGI for the physical port 6142 - * discovery. 6061 + * This routine cancels the retry delay timers to all the vports. 6143 6062 **/ 6144 6063 void 6145 - lpfc_retry_pport_discovery(struct lpfc_hba *phba) 6064 + lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 6146 6065 { 6147 6066 struct lpfc_vport **vports; 6148 6067 struct lpfc_nodelist *ndlp; 6149 - struct Scsi_Host *shost; 6150 - int i; 6151 6068 uint32_t link_state; 6069 + int i; 6152 6070 6153 6071 /* Treat this failure as linkdown for all vports */ 6154 6072 link_state = phba->link_state; ··· 6163 6087 } 6164 6088 lpfc_destroy_vport_work_array(phba, vports); 6165 6089 } 6090 + } 6091 + 6092 + /** 6093 + * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 6094 + * @phba: pointer to lpfc hba data structure. 6095 + * 6096 + * This routine abort all pending discovery commands and 6097 + * start a timer to retry FLOGI for the physical port 6098 + * discovery. 6099 + **/ 6100 + void 6101 + lpfc_retry_pport_discovery(struct lpfc_hba *phba) 6102 + { 6103 + struct lpfc_nodelist *ndlp; 6104 + struct Scsi_Host *shost; 6105 + 6106 + /* Cancel the all vports retry delay retry timers */ 6107 + lpfc_cancel_all_vport_retry_delay_timer(phba); 6166 6108 6167 6109 /* If fabric require FLOGI, then re-instantiate physical login */ 6168 6110 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 6169 6111 if (!ndlp) 6170 6112 return; 6171 - 6172 6113 6173 6114 shost = lpfc_shost_from_vport(phba->pport); 6174 6115 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); ··· 6312 6219 lpfc_mbx_unreg_vpi(vport); 6313 6220 spin_lock_irq(shost->host_lock); 6314 6221 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6315 - vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 6222 + if (phba->sli_rev == LPFC_SLI_REV4) 6223 + vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 6316 6224 spin_unlock_irq(shost->host_lock); 6317 6225 } 6318 6226 ··· 6891 6797 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6892 6798 unsigned long iflag = 0; 6893 6799 6894 - spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); 6800 + spin_lock_irqsave(&phba->hbalock, iflag); 6801 + spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 6895 6802 list_for_each_entry_safe(sglq_entry, sglq_next, 6896 6803 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 6897 6804 if (sglq_entry->sli4_xritag == xri) { 6898 6805 list_del(&sglq_entry->list); 6899 - spin_unlock_irqrestore( 6900 - &phba->sli4_hba.abts_sgl_list_lock, 6901 - iflag); 6902 - spin_lock_irqsave(&phba->hbalock, iflag); 6903 - 6904 6806 list_add_tail(&sglq_entry->list, 6905 6807 &phba->sli4_hba.lpfc_sgl_list); 6808 + sglq_entry->state = SGL_FREED; 6809 + spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 6906 6810 spin_unlock_irqrestore(&phba->hbalock, iflag); 6907 6811 return; 6908 6812 } 6909 6813 } 6910 - spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag); 6814 + spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 6815 + sglq_entry = __lpfc_get_active_sglq(phba, xri); 6816 + if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 6817 + spin_unlock_irqrestore(&phba->hbalock, iflag); 6818 + return; 6819 + } 6820 + sglq_entry->state = SGL_XRI_ABORTED; 6821 + spin_unlock_irqrestore(&phba->hbalock, iflag); 6822 + return; 6911 6823 }
+411 -116
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 1481 1481 int 1482 1482 lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) 1483 1483 { 1484 - LPFC_MBOXQ_t *mbox; 1485 - int rc; 1486 1484 /* 1487 1485 * If the Link is up and no FCoE events while in the 1488 1486 * FCF discovery, no need to restart FCF discovery. ··· 1489 1491 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1490 1492 return 0; 1491 1493 1494 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1495 + "2768 Pending link or FCF event during current " 1496 + "handling of the previous event: link_state:x%x, " 1497 + "evt_tag_at_scan:x%x, evt_tag_current:x%x\n", 1498 + phba->link_state, phba->fcoe_eventtag_at_fcf_scan, 1499 + phba->fcoe_eventtag); 1500 + 1492 1501 spin_lock_irq(&phba->hbalock); 1493 1502 phba->fcf.fcf_flag &= ~FCF_AVAILABLE; 1494 1503 spin_unlock_irq(&phba->hbalock); 1495 1504 1496 - if (phba->link_state >= LPFC_LINK_UP) 1497 - lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 1498 - else { 1505 + if (phba->link_state >= LPFC_LINK_UP) { 1506 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 1507 + "2780 Restart FCF table scan due to " 1508 + "pending FCF event:evt_tag_at_scan:x%x, " 1509 + "evt_tag_current:x%x\n", 1510 + phba->fcoe_eventtag_at_fcf_scan, 1511 + phba->fcoe_eventtag); 1512 + lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 1513 + } else { 1499 1514 /* 1500 1515 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS 1501 1516 * flag 1502 1517 */ 1503 1518 spin_lock_irq(&phba->hbalock); 1504 1519 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1505 - phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 1520 + phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); 1506 1521 spin_unlock_irq(&phba->hbalock); 1507 1522 } 1508 1523 1524 + /* Unregister the currently registered FCF if required */ 1509 1525 if (unreg_fcf) { 1510 1526 spin_lock_irq(&phba->hbalock); 1511 1527 phba->fcf.fcf_flag &= ~FCF_REGISTERED; 1512 1528 spin_unlock_irq(&phba->hbalock); 1513 - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1514 - if (!mbox) { 1515 - lpfc_printf_log(phba, KERN_ERR, 1516 - LOG_DISCOVERY|LOG_MBOX, 1517 - "2610 UNREG_FCFI mbox allocation failed\n"); 1518 - return 1; 1519 - } 1520 - lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); 1521 - mbox->vport = phba->pport; 1522 - mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; 1523 - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 1524 - if (rc == MBX_NOT_FINISHED) { 1525 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 1526 - "2611 UNREG_FCFI issue mbox failed\n"); 1527 - mempool_free(mbox, phba->mbox_mem_pool); 1528 - } 1529 + lpfc_sli4_unregister_fcf(phba); 1529 1530 } 1530 - 1531 1531 return 1; 1532 1532 } 1533 1533 1534 1534 /** 1535 - * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. 1535 + * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command 1536 1536 * @phba: pointer to lpfc hba data structure. 1537 1537 * @mboxq: pointer to mailbox object. 1538 + * @next_fcf_index: pointer to holder of next fcf index. 1538 1539 * 1539 - * This function iterate through all the fcf records available in 1540 - * HBA and choose the optimal FCF record for discovery. After finding 1541 - * the FCF for discovery it register the FCF record and kick start 1542 - * discovery. 1543 - * If FCF_IN_USE flag is set in currently used FCF, the routine try to 1544 - * use a FCF record which match fabric name and mac address of the 1545 - * currently used FCF record. 1546 - * If the driver support only one FCF, it will try to use the FCF record 1547 - * used by BOOT_BIOS. 1540 + * This routine parses the non-embedded fcf mailbox command by performing the 1541 + * necessarily error checking, non-embedded read FCF record mailbox command 1542 + * SGE parsing, and endianness swapping. 1543 + * 1544 + * Returns the pointer to the new FCF record in the non-embedded mailbox 1545 + * command DMA memory if successfully, other NULL. 1548 1546 */ 1549 - void 1550 - lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1547 + static struct fcf_record * 1548 + lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 1549 + uint16_t *next_fcf_index) 1551 1550 { 1552 1551 void *virt_addr; 1553 1552 dma_addr_t phys_addr; 1554 - uint8_t *bytep; 1555 1553 struct lpfc_mbx_sge sge; 1556 1554 struct lpfc_mbx_read_fcf_tbl *read_fcf; 1557 1555 uint32_t shdr_status, shdr_add_status; 1558 1556 union lpfc_sli4_cfg_shdr *shdr; 1559 1557 struct fcf_record *new_fcf_record; 1560 - uint32_t boot_flag, addr_mode; 1561 - uint32_t next_fcf_index; 1562 - struct lpfc_fcf_rec *fcf_rec = NULL; 1563 - unsigned long iflags; 1564 - uint16_t vlan_id; 1565 - int rc; 1566 - 1567 - /* If there is pending FCoE event restart FCF table scan */ 1568 - if (lpfc_check_pending_fcoe_event(phba, 0)) { 1569 - lpfc_sli4_mbox_cmd_free(phba, mboxq); 1570 - return; 1571 - } 1572 1558 1573 1559 /* Get the first SGE entry from the non-embedded DMA memory. This 1574 1560 * routine only uses a single SGE. ··· 1563 1581 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1564 1582 "2524 Failed to get the non-embedded SGE " 1565 1583 "virtual address\n"); 1566 - goto out; 1584 + return NULL; 1567 1585 } 1568 1586 virt_addr = mboxq->sge_array->addr[0]; 1569 1587 1570 1588 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; 1571 1589 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 1572 - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 1573 - &shdr->response); 1574 - /* 1575 - * The FCF Record was read and there is no reason for the driver 1576 - * to maintain the FCF record data or memory. Instead, just need 1577 - * to book keeping the FCFIs can be used. 1578 - */ 1590 + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 1579 1591 if (shdr_status || shdr_add_status) { 1580 - if (shdr_status == STATUS_FCF_TABLE_EMPTY) { 1581 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1592 + if (shdr_status == STATUS_FCF_TABLE_EMPTY) 1593 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 1582 1594 "2726 READ_FCF_RECORD Indicates empty " 1583 1595 "FCF table.\n"); 1584 - } else { 1585 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1596 + else 1597 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 1586 1598 "2521 READ_FCF_RECORD mailbox failed " 1587 - "with status x%x add_status x%x, mbx\n", 1588 - shdr_status, shdr_add_status); 1589 - } 1590 - goto out; 1599 + "with status x%x add_status x%x, " 1600 + "mbx\n", shdr_status, shdr_add_status); 1601 + return NULL; 1591 1602 } 1592 - /* Interpreting the returned information of FCF records */ 1603 + 1604 + /* Interpreting the returned information of the FCF record */ 1593 1605 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; 1594 1606 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, 1595 1607 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1596 - next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); 1597 - 1608 + *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); 1598 1609 new_fcf_record = (struct fcf_record *)(virt_addr + 1599 1610 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1600 1611 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, 1601 1612 sizeof(struct fcf_record)); 1602 - bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 1603 1613 1614 + return new_fcf_record; 1615 + } 1616 + 1617 + /** 1618 + * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record 1619 + * @phba: pointer to lpfc hba data structure. 1620 + * @fcf_record: pointer to the fcf record. 1621 + * @vlan_id: the lowest vlan identifier associated to this fcf record. 1622 + * @next_fcf_index: the index to the next fcf record in hba's fcf table. 1623 + * 1624 + * This routine logs the detailed FCF record if the LOG_FIP loggin is 1625 + * enabled. 1626 + **/ 1627 + static void 1628 + lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, 1629 + struct fcf_record *fcf_record, 1630 + uint16_t vlan_id, 1631 + uint16_t next_fcf_index) 1632 + { 1633 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1634 + "2764 READ_FCF_RECORD:\n" 1635 + "\tFCF_Index : x%x\n" 1636 + "\tFCF_Avail : x%x\n" 1637 + "\tFCF_Valid : x%x\n" 1638 + "\tFIP_Priority : x%x\n" 1639 + "\tMAC_Provider : x%x\n" 1640 + "\tLowest VLANID : x%x\n" 1641 + "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n" 1642 + "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" 1643 + "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" 1644 + "\tNext_FCF_Index: x%x\n", 1645 + bf_get(lpfc_fcf_record_fcf_index, fcf_record), 1646 + bf_get(lpfc_fcf_record_fcf_avail, fcf_record), 1647 + bf_get(lpfc_fcf_record_fcf_valid, fcf_record), 1648 + fcf_record->fip_priority, 1649 + bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), 1650 + vlan_id, 1651 + bf_get(lpfc_fcf_record_mac_0, fcf_record), 1652 + bf_get(lpfc_fcf_record_mac_1, fcf_record), 1653 + bf_get(lpfc_fcf_record_mac_2, fcf_record), 1654 + bf_get(lpfc_fcf_record_mac_3, fcf_record), 1655 + bf_get(lpfc_fcf_record_mac_4, fcf_record), 1656 + bf_get(lpfc_fcf_record_mac_5, fcf_record), 1657 + bf_get(lpfc_fcf_record_fab_name_0, fcf_record), 1658 + bf_get(lpfc_fcf_record_fab_name_1, fcf_record), 1659 + bf_get(lpfc_fcf_record_fab_name_2, fcf_record), 1660 + bf_get(lpfc_fcf_record_fab_name_3, fcf_record), 1661 + bf_get(lpfc_fcf_record_fab_name_4, fcf_record), 1662 + bf_get(lpfc_fcf_record_fab_name_5, fcf_record), 1663 + bf_get(lpfc_fcf_record_fab_name_6, fcf_record), 1664 + bf_get(lpfc_fcf_record_fab_name_7, fcf_record), 1665 + bf_get(lpfc_fcf_record_switch_name_0, fcf_record), 1666 + bf_get(lpfc_fcf_record_switch_name_1, fcf_record), 1667 + bf_get(lpfc_fcf_record_switch_name_2, fcf_record), 1668 + bf_get(lpfc_fcf_record_switch_name_3, fcf_record), 1669 + bf_get(lpfc_fcf_record_switch_name_4, fcf_record), 1670 + bf_get(lpfc_fcf_record_switch_name_5, fcf_record), 1671 + bf_get(lpfc_fcf_record_switch_name_6, fcf_record), 1672 + bf_get(lpfc_fcf_record_switch_name_7, fcf_record), 1673 + next_fcf_index); 1674 + } 1675 + 1676 + /** 1677 + * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 1678 + * @phba: pointer to lpfc hba data structure. 1679 + * @mboxq: pointer to mailbox object. 1680 + * 1681 + * This function iterates through all the fcf records available in 1682 + * HBA and chooses the optimal FCF record for discovery. After finding 1683 + * the FCF for discovery it registers the FCF record and kicks start 1684 + * discovery. 1685 + * If FCF_IN_USE flag is set in currently used FCF, the routine tries to 1686 + * use an FCF record which matches fabric name and mac address of the 1687 + * currently used FCF record. 1688 + * If the driver supports only one FCF, it will try to use the FCF record 1689 + * used by BOOT_BIOS. 1690 + */ 1691 + void 1692 + lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1693 + { 1694 + struct fcf_record *new_fcf_record; 1695 + uint32_t boot_flag, addr_mode; 1696 + uint16_t fcf_index, next_fcf_index; 1697 + struct lpfc_fcf_rec *fcf_rec = NULL; 1698 + uint16_t vlan_id; 1699 + int rc; 1700 + 1701 + /* If there is pending FCoE event restart FCF table scan */ 1702 + if (lpfc_check_pending_fcoe_event(phba, 0)) { 1703 + lpfc_sli4_mbox_cmd_free(phba, mboxq); 1704 + return; 1705 + } 1706 + 1707 + /* Parse the FCF record from the non-embedded mailbox command */ 1708 + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 1709 + &next_fcf_index); 1710 + if (!new_fcf_record) { 1711 + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1712 + "2765 Mailbox command READ_FCF_RECORD " 1713 + "failed to retrieve a FCF record.\n"); 1714 + /* Let next new FCF event trigger fast failover */ 1715 + spin_lock_irq(&phba->hbalock); 1716 + phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1717 + spin_unlock_irq(&phba->hbalock); 1718 + lpfc_sli4_mbox_cmd_free(phba, mboxq); 1719 + return; 1720 + } 1721 + 1722 + /* Check the FCF record against the connection list */ 1604 1723 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 1605 1724 &addr_mode, &vlan_id); 1725 + 1726 + /* Log the FCF record information if turned on */ 1727 + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 1728 + next_fcf_index); 1729 + 1606 1730 /* 1607 1731 * If the fcf record does not match with connect list entries 1608 - * read the next entry. 1732 + * read the next entry; otherwise, this is an eligible FCF 1733 + * record for round robin FCF failover. 1609 1734 */ 1610 - if (!rc) 1735 + if (!rc) { 1736 + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1737 + "2781 FCF record fcf_index:x%x failed FCF " 1738 + "connection list check, fcf_avail:x%x, " 1739 + "fcf_valid:x%x\n", 1740 + bf_get(lpfc_fcf_record_fcf_index, 1741 + new_fcf_record), 1742 + bf_get(lpfc_fcf_record_fcf_avail, 1743 + new_fcf_record), 1744 + bf_get(lpfc_fcf_record_fcf_valid, 1745 + new_fcf_record)); 1611 1746 goto read_next_fcf; 1747 + } else { 1748 + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 1749 + rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); 1750 + if (rc) 1751 + goto read_next_fcf; 1752 + } 1753 + 1612 1754 /* 1613 1755 * If this is not the first FCF discovery of the HBA, use last 1614 1756 * FCF record for the discovery. The condition that a rescan 1615 1757 * matches the in-use FCF record: fabric name, switch name, mac 1616 1758 * address, and vlan_id. 1617 1759 */ 1618 - spin_lock_irqsave(&phba->hbalock, iflags); 1760 + spin_lock_irq(&phba->hbalock); 1619 1761 if (phba->fcf.fcf_flag & FCF_IN_USE) { 1620 1762 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, 1621 1763 new_fcf_record) && ··· 1755 1649 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 1756 1650 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 1757 1651 /* If in fast failover, mark it's completed */ 1758 - phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 1759 - spin_unlock_irqrestore(&phba->hbalock, iflags); 1652 + phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | 1653 + FCF_DISCOVERY); 1654 + spin_unlock_irq(&phba->hbalock); 1760 1655 goto out; 1761 1656 } 1762 1657 /* ··· 1768 1661 * next candidate. 1769 1662 */ 1770 1663 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 1771 - spin_unlock_irqrestore(&phba->hbalock, iflags); 1664 + spin_unlock_irq(&phba->hbalock); 1772 1665 goto read_next_fcf; 1773 1666 } 1774 1667 } ··· 1776 1669 * Update on failover FCF record only if it's in FCF fast-failover 1777 1670 * period; otherwise, update on current FCF record. 1778 1671 */ 1779 - if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { 1780 - /* Fast FCF failover only to the same fabric name */ 1781 - if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, 1782 - new_fcf_record)) 1783 - fcf_rec = &phba->fcf.failover_rec; 1784 - else 1785 - goto read_next_fcf; 1786 - } else 1672 + if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 1673 + fcf_rec = &phba->fcf.failover_rec; 1674 + else 1787 1675 fcf_rec = &phba->fcf.current_rec; 1788 1676 1789 1677 if (phba->fcf.fcf_flag & FCF_AVAILABLE) { ··· 1791 1689 /* Choose this FCF record */ 1792 1690 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1793 1691 addr_mode, vlan_id, BOOT_ENABLE); 1794 - spin_unlock_irqrestore(&phba->hbalock, iflags); 1692 + spin_unlock_irq(&phba->hbalock); 1795 1693 goto read_next_fcf; 1796 1694 } 1797 1695 /* ··· 1800 1698 * the next FCF record. 1801 1699 */ 1802 1700 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { 1803 - spin_unlock_irqrestore(&phba->hbalock, iflags); 1701 + spin_unlock_irq(&phba->hbalock); 1804 1702 goto read_next_fcf; 1805 1703 } 1806 1704 /* 1807 1705 * If the new hba FCF record has lower priority value 1808 1706 * than the driver FCF record, use the new record. 1809 1707 */ 1810 - if (lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record) && 1811 - (new_fcf_record->fip_priority < fcf_rec->priority)) { 1708 + if (new_fcf_record->fip_priority < fcf_rec->priority) { 1812 1709 /* Choose this FCF record */ 1813 1710 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1814 1711 addr_mode, vlan_id, 0); 1815 1712 } 1816 - spin_unlock_irqrestore(&phba->hbalock, iflags); 1713 + spin_unlock_irq(&phba->hbalock); 1817 1714 goto read_next_fcf; 1818 1715 } 1819 1716 /* ··· 1825 1724 BOOT_ENABLE : 0)); 1826 1725 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1827 1726 } 1828 - spin_unlock_irqrestore(&phba->hbalock, iflags); 1727 + spin_unlock_irq(&phba->hbalock); 1829 1728 goto read_next_fcf; 1830 1729 1831 1730 read_next_fcf: ··· 1841 1740 * FCF scan inprogress, and do nothing 1842 1741 */ 1843 1742 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { 1844 - spin_lock_irqsave(&phba->hbalock, iflags); 1743 + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1744 + "2782 No suitable FCF record " 1745 + "found during this round of " 1746 + "post FCF rediscovery scan: " 1747 + "fcf_evt_tag:x%x, fcf_index: " 1748 + "x%x\n", 1749 + phba->fcoe_eventtag_at_fcf_scan, 1750 + bf_get(lpfc_fcf_record_fcf_index, 1751 + new_fcf_record)); 1752 + /* 1753 + * Let next new FCF event trigger fast 1754 + * failover 1755 + */ 1756 + spin_lock_irq(&phba->hbalock); 1845 1757 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1846 - spin_unlock_irqrestore(&phba->hbalock, iflags); 1758 + spin_unlock_irq(&phba->hbalock); 1847 1759 return; 1848 1760 } 1849 1761 /* ··· 1868 1754 * record. 1869 1755 */ 1870 1756 1871 - /* unregister the current in-use FCF record */ 1757 + /* Unregister the current in-use FCF record */ 1872 1758 lpfc_unregister_fcf(phba); 1873 - /* replace in-use record with the new record */ 1759 + 1760 + /* Replace in-use record with the new record */ 1874 1761 memcpy(&phba->fcf.current_rec, 1875 1762 &phba->fcf.failover_rec, 1876 1763 sizeof(struct lpfc_fcf_rec)); 1877 1764 /* mark the FCF fast failover completed */ 1878 - spin_lock_irqsave(&phba->hbalock, iflags); 1765 + spin_lock_irq(&phba->hbalock); 1879 1766 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 1880 - spin_unlock_irqrestore(&phba->hbalock, iflags); 1767 + spin_unlock_irq(&phba->hbalock); 1768 + /* 1769 + * Set up the initial registered FCF index for FLOGI 1770 + * round robin FCF failover. 1771 + */ 1772 + phba->fcf.fcf_rr_init_indx = 1773 + phba->fcf.failover_rec.fcf_indx; 1881 1774 /* Register to the new FCF record */ 1882 1775 lpfc_register_fcf(phba); 1883 1776 } else { ··· 1897 1776 return; 1898 1777 /* 1899 1778 * Otherwise, initial scan or post linkdown rescan, 1900 - * register with the best fit FCF record found so 1901 - * far through the scanning process. 1779 + * register with the best FCF record found so far 1780 + * through the FCF scanning process. 1902 1781 */ 1782 + 1783 + /* mark the initial FCF discovery completed */ 1784 + spin_lock_irq(&phba->hbalock); 1785 + phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 1786 + spin_unlock_irq(&phba->hbalock); 1787 + /* 1788 + * Set up the initial registered FCF index for FLOGI 1789 + * round robin FCF failover 1790 + */ 1791 + phba->fcf.fcf_rr_init_indx = 1792 + phba->fcf.current_rec.fcf_indx; 1793 + /* Register to the new FCF record */ 1903 1794 lpfc_register_fcf(phba); 1904 1795 } 1905 1796 } else 1906 - lpfc_sli4_read_fcf_record(phba, next_fcf_index); 1797 + lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index); 1907 1798 return; 1908 1799 1909 1800 out: ··· 1923 1790 lpfc_register_fcf(phba); 1924 1791 1925 1792 return; 1793 + } 1794 + 1795 + /** 1796 + * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler 1797 + * @phba: pointer to lpfc hba data structure. 1798 + * @mboxq: pointer to mailbox object. 1799 + * 1800 + * This is the callback function for FLOGI failure round robin FCF failover 1801 + * read FCF record mailbox command from the eligible FCF record bmask for 1802 + * performing the failover. If the FCF read back is not valid/available, it 1803 + * fails through to retrying FLOGI to the currently registered FCF again. 1804 + * Otherwise, if the FCF read back is valid and available, it will set the 1805 + * newly read FCF record to the failover FCF record, unregister currently 1806 + * registered FCF record, copy the failover FCF record to the current 1807 + * FCF record, and then register the current FCF record before proceeding 1808 + * to trying FLOGI on the new failover FCF. 1809 + */ 1810 + void 1811 + lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1812 + { 1813 + struct fcf_record *new_fcf_record; 1814 + uint32_t boot_flag, addr_mode; 1815 + uint16_t next_fcf_index; 1816 + uint16_t current_fcf_index; 1817 + uint16_t vlan_id; 1818 + 1819 + /* If link state is not up, stop the round robin failover process */ 1820 + if (phba->link_state < LPFC_LINK_UP) { 1821 + spin_lock_irq(&phba->hbalock); 1822 + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1823 + spin_unlock_irq(&phba->hbalock); 1824 + lpfc_sli4_mbox_cmd_free(phba, mboxq); 1825 + return; 1826 + } 1827 + 1828 + /* Parse the FCF record from the non-embedded mailbox command */ 1829 + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 1830 + &next_fcf_index); 1831 + if (!new_fcf_record) { 1832 + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1833 + "2766 Mailbox command READ_FCF_RECORD " 1834 + "failed to retrieve a FCF record.\n"); 1835 + goto out; 1836 + } 1837 + 1838 + /* Get the needed parameters from FCF record */ 1839 + lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 1840 + &addr_mode, &vlan_id); 1841 + 1842 + /* Log the FCF record information if turned on */ 1843 + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 1844 + next_fcf_index); 1845 + 1846 + /* Upload new FCF record to the failover FCF record */ 1847 + spin_lock_irq(&phba->hbalock); 1848 + __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, 1849 + new_fcf_record, addr_mode, vlan_id, 1850 + (boot_flag ? BOOT_ENABLE : 0)); 1851 + spin_unlock_irq(&phba->hbalock); 1852 + 1853 + current_fcf_index = phba->fcf.current_rec.fcf_indx; 1854 + 1855 + /* Unregister the current in-use FCF record */ 1856 + lpfc_unregister_fcf(phba); 1857 + 1858 + /* Replace in-use record with the new record */ 1859 + memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, 1860 + sizeof(struct lpfc_fcf_rec)); 1861 + 1862 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1863 + "2783 FLOGI round robin FCF failover from FCF " 1864 + "(index:x%x) to FCF (index:x%x).\n", 1865 + current_fcf_index, 1866 + bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); 1867 + 1868 + out: 1869 + lpfc_sli4_mbox_cmd_free(phba, mboxq); 1870 + lpfc_register_fcf(phba); 1871 + } 1872 + 1873 + /** 1874 + * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler. 1875 + * @phba: pointer to lpfc hba data structure. 1876 + * @mboxq: pointer to mailbox object. 1877 + * 1878 + * This is the callback function of read FCF record mailbox command for 1879 + * updating the eligible FCF bmask for FLOGI failure round robin FCF 1880 + * failover when a new FCF event happened. If the FCF read back is 1881 + * valid/available and it passes the connection list check, it updates 1882 + * the bmask for the eligible FCF record for round robin failover. 1883 + */ 1884 + void 1885 + lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1886 + { 1887 + struct fcf_record *new_fcf_record; 1888 + uint32_t boot_flag, addr_mode; 1889 + uint16_t fcf_index, next_fcf_index; 1890 + uint16_t vlan_id; 1891 + int rc; 1892 + 1893 + /* If link state is not up, no need to proceed */ 1894 + if (phba->link_state < LPFC_LINK_UP) 1895 + goto out; 1896 + 1897 + /* If FCF discovery period is over, no need to proceed */ 1898 + if (phba->fcf.fcf_flag & FCF_DISCOVERY) 1899 + goto out; 1900 + 1901 + /* Parse the FCF record from the non-embedded mailbox command */ 1902 + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 1903 + &next_fcf_index); 1904 + if (!new_fcf_record) { 1905 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1906 + "2767 Mailbox command READ_FCF_RECORD " 1907 + "failed to retrieve a FCF record.\n"); 1908 + goto out; 1909 + } 1910 + 1911 + /* Check the connection list for eligibility */ 1912 + rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 1913 + &addr_mode, &vlan_id); 1914 + 1915 + /* Log the FCF record information if turned on */ 1916 + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 1917 + next_fcf_index); 1918 + 1919 + if (!rc) 1920 + goto out; 1921 + 1922 + /* Update the eligible FCF record index bmask */ 1923 + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 1924 + rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); 1925 + 1926 + out: 1927 + lpfc_sli4_mbox_cmd_free(phba, mboxq); 1926 1928 } 1927 1929 1928 1930 /** ··· 2292 2024 int rc; 2293 2025 struct fcf_record *fcf_record; 2294 2026 2295 - sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2296 - 2297 2027 spin_lock_irq(&phba->hbalock); 2298 2028 switch (la->UlnkSpeed) { 2299 2029 case LA_1GHZ_LINK: ··· 2383 2117 spin_unlock_irq(&phba->hbalock); 2384 2118 2385 2119 lpfc_linkup(phba); 2386 - if (sparam_mbox) { 2387 - lpfc_read_sparam(phba, sparam_mbox, 0); 2388 - sparam_mbox->vport = vport; 2389 - sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 2390 - rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); 2391 - if (rc == MBX_NOT_FINISHED) { 2392 - mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 2393 - lpfc_mbuf_free(phba, mp->virt, mp->phys); 2394 - kfree(mp); 2395 - mempool_free(sparam_mbox, phba->mbox_mem_pool); 2396 - goto out; 2397 - } 2120 + sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2121 + if (!sparam_mbox) 2122 + goto out; 2123 + 2124 + rc = lpfc_read_sparam(phba, sparam_mbox, 0); 2125 + if (rc) { 2126 + mempool_free(sparam_mbox, phba->mbox_mem_pool); 2127 + goto out; 2128 + } 2129 + sparam_mbox->vport = vport; 2130 + sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 2131 + rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); 2132 + if (rc == MBX_NOT_FINISHED) { 2133 + mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 2134 + lpfc_mbuf_free(phba, mp->virt, mp->phys); 2135 + kfree(mp); 2136 + mempool_free(sparam_mbox, phba->mbox_mem_pool); 2137 + goto out; 2398 2138 } 2399 2139 2400 2140 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { ··· 2458 2186 spin_unlock_irq(&phba->hbalock); 2459 2187 return; 2460 2188 } 2189 + /* This is the initial FCF discovery scan */ 2190 + phba->fcf.fcf_flag |= FCF_INIT_DISC; 2461 2191 spin_unlock_irq(&phba->hbalock); 2462 - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 2463 - if (rc) 2192 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 2193 + "2778 Start FCF table scan at linkup\n"); 2194 + 2195 + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 2196 + LPFC_FCOE_FCF_GET_FIRST); 2197 + if (rc) { 2198 + spin_lock_irq(&phba->hbalock); 2199 + phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 2200 + spin_unlock_irq(&phba->hbalock); 2464 2201 goto out; 2202 + } 2465 2203 } 2466 2204 2467 2205 return; ··· 3661 3379 shost = lpfc_shost_from_vport(vports[i]); 3662 3380 spin_lock_irq(shost->host_lock); 3663 3381 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 3664 - if (ndlp->nlp_flag & NLP_RPI_VALID) 3382 + if (ndlp->nlp_flag & NLP_RPI_VALID) { 3383 + /* The mempool_alloc might sleep */ 3384 + spin_unlock_irq(shost->host_lock); 3665 3385 lpfc_unreg_rpi(vports[i], ndlp); 3386 + spin_lock_irq(shost->host_lock); 3387 + } 3666 3388 } 3667 3389 spin_unlock_irq(shost->host_lock); 3668 3390 } ··· 5042 4756 return; 5043 4757 /* Reset HBA FCF states after successful unregister FCF */ 5044 4758 phba->fcf.fcf_flag = 0; 4759 + phba->fcf.current_rec.flag = 0; 5045 4760 5046 4761 /* 5047 4762 * If driver is not unloading, check if there is any other ··· 5052 4765 (phba->link_state < LPFC_LINK_UP)) 5053 4766 return; 5054 4767 5055 - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 4768 + /* This is considered as the initial FCF discovery scan */ 4769 + spin_lock_irq(&phba->hbalock); 4770 + phba->fcf.fcf_flag |= FCF_INIT_DISC; 4771 + spin_unlock_irq(&phba->hbalock); 4772 + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5056 4773 5057 - if (rc) 4774 + if (rc) { 4775 + spin_lock_irq(&phba->hbalock); 4776 + phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 4777 + spin_unlock_irq(&phba->hbalock); 5058 4778 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 5059 4779 "2553 lpfc_unregister_unused_fcf failed " 5060 4780 "to read FCF record HBA state x%x\n", 5061 4781 phba->pport->port_state); 4782 + } 5062 4783 } 5063 4784 5064 4785 /**
+228 -49
drivers/scsi/lpfc/lpfc_init.c
··· 350 350 mb = &pmb->u.mb; 351 351 352 352 /* Get login parameters for NID. */ 353 - lpfc_read_sparam(phba, pmb, 0); 353 + rc = lpfc_read_sparam(phba, pmb, 0); 354 + if (rc) { 355 + mempool_free(pmb, phba->mbox_mem_pool); 356 + return -ENOMEM; 357 + } 358 + 354 359 pmb->vport = vport; 355 360 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 356 361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, ··· 364 359 mb->mbxCommand, mb->mbxStatus); 365 360 phba->link_state = LPFC_HBA_ERROR; 366 361 mp = (struct lpfc_dmabuf *) pmb->context1; 367 - mempool_free( pmb, phba->mbox_mem_pool); 362 + mempool_free(pmb, phba->mbox_mem_pool); 368 363 lpfc_mbuf_free(phba, mp->virt, mp->phys); 369 364 kfree(mp); 370 365 return -EIO; ··· 549 544 mempool_free(pmb, phba->mbox_mem_pool); 550 545 return -EIO; 551 546 } 552 - } else if (phba->cfg_suppress_link_up == 0) { 547 + } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 553 548 lpfc_init_link(phba, pmb, phba->cfg_topology, 554 549 phba->cfg_link_speed); 555 550 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; ··· 576 571 } 577 572 /* MBOX buffer will be freed in mbox compl */ 578 573 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 574 + if (!pmb) { 575 + phba->link_state = LPFC_HBA_ERROR; 576 + return -ENOMEM; 577 + } 578 + 579 579 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 580 580 pmb->mbox_cmpl = lpfc_config_async_cmpl; 581 581 pmb->vport = phba->pport; ··· 598 588 599 589 /* Get Option rom version */ 600 590 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 591 + if (!pmb) { 592 + phba->link_state = LPFC_HBA_ERROR; 593 + return -ENOMEM; 594 + } 595 + 601 596 lpfc_dump_wakeup_param(phba, pmb); 602 597 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 603 598 pmb->vport = phba->pport; ··· 667 652 mempool_free(pmb, phba->mbox_mem_pool); 668 653 return -EIO; 669 654 } 670 - phba->cfg_suppress_link_up = 0; 655 + phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 671 656 672 657 return 0; 673 658 } ··· 822 807 LIST_HEAD(aborts); 823 808 int ret; 824 809 unsigned long iflag = 0; 810 + struct lpfc_sglq *sglq_entry = NULL; 811 + 825 812 ret = lpfc_hba_down_post_s3(phba); 826 813 if (ret) 827 814 return ret; ··· 839 822 * list. 840 823 */ 841 824 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 825 + list_for_each_entry(sglq_entry, 826 + &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 827 + sglq_entry->state = SGL_FREED; 828 + 842 829 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 843 830 &phba->sli4_hba.lpfc_sgl_list); 844 831 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); ··· 2199 2178 void 2200 2179 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2201 2180 { 2202 - /* Clear pending FCF rediscovery wait timer */ 2203 - phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2181 + /* Clear pending FCF rediscovery wait and failover in progress flags */ 2182 + phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND | 2183 + FCF_DEAD_DISC | 2184 + FCF_ACVL_DISC); 2204 2185 /* Now, try to stop the timer */ 2205 2186 del_timer(&phba->fcf.redisc_wait); 2206 2187 } ··· 2599 2576 init_timer(&vport->els_tmofunc); 2600 2577 vport->els_tmofunc.function = lpfc_els_timeout; 2601 2578 vport->els_tmofunc.data = (unsigned long)vport; 2579 + if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 2580 + phba->menlo_flag |= HBA_MENLO_SUPPORT; 2581 + /* check for menlo minimum sg count */ 2582 + if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) { 2583 + phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 2584 + shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2585 + } 2586 + } 2602 2587 2603 2588 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2604 2589 if (error) ··· 2943 2912 /* FCF rediscovery event to worker thread */ 2944 2913 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2945 2914 spin_unlock_irq(&phba->hbalock); 2915 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2916 + "2776 FCF rediscover wait timer expired, post " 2917 + "a worker thread event for FCF table scan\n"); 2946 2918 /* wake up worker thread */ 2947 2919 lpfc_worker_wake_up(phba); 2948 2920 } ··· 3217 3183 } 3218 3184 3219 3185 /** 3186 + * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 3187 + * @vport: pointer to vport data structure. 3188 + * 3189 + * This routine is to perform Clear Virtual Link (CVL) on a vport in 3190 + * response to a CVL event. 3191 + * 3192 + * Return the pointer to the ndlp with the vport if successful, otherwise 3193 + * return NULL. 3194 + **/ 3195 + static struct lpfc_nodelist * 3196 + lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 3197 + { 3198 + struct lpfc_nodelist *ndlp; 3199 + struct Scsi_Host *shost; 3200 + struct lpfc_hba *phba; 3201 + 3202 + if (!vport) 3203 + return NULL; 3204 + ndlp = lpfc_findnode_did(vport, Fabric_DID); 3205 + if (!ndlp) 3206 + return NULL; 3207 + phba = vport->phba; 3208 + if (!phba) 3209 + return NULL; 3210 + if (phba->pport->port_state <= LPFC_FLOGI) 3211 + return NULL; 3212 + /* If virtual link is not yet instantiated ignore CVL */ 3213 + if (vport->port_state <= LPFC_FDISC) 3214 + return NULL; 3215 + shost = lpfc_shost_from_vport(vport); 3216 + if (!shost) 3217 + return NULL; 3218 + lpfc_linkdown_port(vport); 3219 + lpfc_cleanup_pending_mbox(vport); 3220 + spin_lock_irq(shost->host_lock); 3221 + vport->fc_flag |= FC_VPORT_CVL_RCVD; 3222 + spin_unlock_irq(shost->host_lock); 3223 + 3224 + return ndlp; 3225 + } 3226 + 3227 + /** 3228 + * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 3229 + * @vport: pointer to lpfc hba data structure. 3230 + * 3231 + * This routine is to perform Clear Virtual Link (CVL) on all vports in 3232 + * response to a FCF dead event. 3233 + **/ 3234 + static void 3235 + lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 3236 + { 3237 + struct lpfc_vport **vports; 3238 + int i; 3239 + 3240 + vports = lpfc_create_vport_work_array(phba); 3241 + if (vports) 3242 + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3243 + lpfc_sli4_perform_vport_cvl(vports[i]); 3244 + lpfc_destroy_vport_work_array(phba, vports); 3245 + } 3246 + 3247 + /** 3220 3248 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3221 3249 * @phba: pointer to lpfc hba data structure. 3222 3250 * @acqe_link: pointer to the async fcoe completion queue entry. ··· 3294 3198 struct lpfc_vport *vport; 3295 3199 struct lpfc_nodelist *ndlp; 3296 3200 struct Scsi_Host *shost; 3297 - uint32_t link_state; 3298 3201 int active_vlink_present; 3299 3202 struct lpfc_vport **vports; 3300 3203 int i; ··· 3303 3208 switch (event_type) { 3304 3209 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3305 3210 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3306 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3307 - "2546 New FCF found index 0x%x tag 0x%x\n", 3308 - acqe_fcoe->index, 3309 - acqe_fcoe->event_tag); 3211 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3212 + "2546 New FCF found/FCF parameter modified event: " 3213 + "evt_tag:x%x, fcf_index:x%x\n", 3214 + acqe_fcoe->event_tag, acqe_fcoe->index); 3215 + 3310 3216 spin_lock_irq(&phba->hbalock); 3311 3217 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || 3312 3218 (phba->hba_flag & FCF_DISC_INPROGRESS)) { ··· 3318 3222 spin_unlock_irq(&phba->hbalock); 3319 3223 break; 3320 3224 } 3225 + 3321 3226 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3322 3227 /* 3323 3228 * If fast FCF failover rescan event is pending, ··· 3329 3232 } 3330 3233 spin_unlock_irq(&phba->hbalock); 3331 3234 3332 - /* Read the FCF table and re-discover SAN. */ 3333 - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3235 + if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 3236 + !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 3237 + /* 3238 + * During period of FCF discovery, read the FCF 3239 + * table record indexed by the event to update 3240 + * FCF round robin failover eligible FCF bmask. 3241 + */ 3242 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3243 + LOG_DISCOVERY, 3244 + "2779 Read new FCF record with " 3245 + "fcf_index:x%x for updating FCF " 3246 + "round robin failover bmask\n", 3247 + acqe_fcoe->index); 3248 + rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); 3249 + } 3250 + 3251 + /* Otherwise, scan the entire FCF table and re-discover SAN */ 3252 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3253 + "2770 Start FCF table scan due to new FCF " 3254 + "event: evt_tag:x%x, fcf_index:x%x\n", 3255 + acqe_fcoe->event_tag, acqe_fcoe->index); 3256 + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3257 + LPFC_FCOE_FCF_GET_FIRST); 3334 3258 if (rc) 3335 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3336 - "2547 Read FCF record failed 0x%x\n", 3337 - rc); 3259 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3260 + "2547 Issue FCF scan read FCF mailbox " 3261 + "command failed 0x%x\n", rc); 3338 3262 break; 3339 3263 3340 3264 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: ··· 3366 3248 break; 3367 3249 3368 3250 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3369 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3251 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3370 3252 "2549 FCF disconnected from network index 0x%x" 3371 3253 " tag 0x%x\n", acqe_fcoe->index, 3372 3254 acqe_fcoe->event_tag); 3373 3255 /* If the event is not for currently used fcf do nothing */ 3374 3256 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) 3375 3257 break; 3376 - /* 3377 - * Currently, driver support only one FCF - so treat this as 3378 - * a link down, but save the link state because we don't want 3379 - * it to be changed to Link Down unless it is already down. 3258 + /* We request port to rediscover the entire FCF table for 3259 + * a fast recovery from case that the current FCF record 3260 + * is no longer valid if we are not in the middle of FCF 3261 + * failover process already. 3380 3262 */ 3381 - link_state = phba->link_state; 3382 - lpfc_linkdown(phba); 3383 - phba->link_state = link_state; 3384 - /* Unregister FCF if no devices connected to it */ 3385 - lpfc_unregister_unused_fcf(phba); 3263 + spin_lock_irq(&phba->hbalock); 3264 + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3265 + spin_unlock_irq(&phba->hbalock); 3266 + /* Update FLOGI FCF failover eligible FCF bmask */ 3267 + lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); 3268 + break; 3269 + } 3270 + /* Mark the fast failover process in progress */ 3271 + phba->fcf.fcf_flag |= FCF_DEAD_DISC; 3272 + spin_unlock_irq(&phba->hbalock); 3273 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3274 + "2771 Start FCF fast failover process due to " 3275 + "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 3276 + "\n", acqe_fcoe->event_tag, acqe_fcoe->index); 3277 + rc = lpfc_sli4_redisc_fcf_table(phba); 3278 + if (rc) { 3279 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3280 + LOG_DISCOVERY, 3281 + "2772 Issue FCF rediscover mabilbox " 3282 + "command failed, fail through to FCF " 3283 + "dead event\n"); 3284 + spin_lock_irq(&phba->hbalock); 3285 + phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 3286 + spin_unlock_irq(&phba->hbalock); 3287 + /* 3288 + * Last resort will fail over by treating this 3289 + * as a link down to FCF registration. 3290 + */ 3291 + lpfc_sli4_fcf_dead_failthrough(phba); 3292 + } else 3293 + /* Handling fast FCF failover to a DEAD FCF event 3294 + * is considered equalivant to receiving CVL to all 3295 + * vports. 3296 + */ 3297 + lpfc_sli4_perform_all_vport_cvl(phba); 3386 3298 break; 3387 3299 case LPFC_FCOE_EVENT_TYPE_CVL: 3388 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3300 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3389 3301 "2718 Clear Virtual Link Received for VPI 0x%x" 3390 3302 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3391 3303 vport = lpfc_find_vport_by_vpid(phba, 3392 3304 acqe_fcoe->index - phba->vpi_base); 3393 - if (!vport) 3394 - break; 3395 - ndlp = lpfc_findnode_did(vport, Fabric_DID); 3305 + ndlp = lpfc_sli4_perform_vport_cvl(vport); 3396 3306 if (!ndlp) 3397 3307 break; 3398 - shost = lpfc_shost_from_vport(vport); 3399 - if (phba->pport->port_state <= LPFC_FLOGI) 3400 - break; 3401 - /* If virtual link is not yet instantiated ignore CVL */ 3402 - if (vport->port_state <= LPFC_FDISC) 3403 - break; 3404 - 3405 - lpfc_linkdown_port(vport); 3406 - lpfc_cleanup_pending_mbox(vport); 3407 - spin_lock_irq(shost->host_lock); 3408 - vport->fc_flag |= FC_VPORT_CVL_RCVD; 3409 - spin_unlock_irq(shost->host_lock); 3410 3308 active_vlink_present = 0; 3411 3309 3412 3310 vports = lpfc_create_vport_work_array(phba); ··· 3445 3311 * re-instantiate the Vlink using FDISC. 3446 3312 */ 3447 3313 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3314 + shost = lpfc_shost_from_vport(vport); 3448 3315 spin_lock_irq(shost->host_lock); 3449 3316 ndlp->nlp_flag |= NLP_DELAY_TMO; 3450 3317 spin_unlock_irq(shost->host_lock); ··· 3456 3321 * Otherwise, we request port to rediscover 3457 3322 * the entire FCF table for a fast recovery 3458 3323 * from possible case that the current FCF 3459 - * is no longer valid. 3324 + * is no longer valid if we are not already 3325 + * in the FCF failover process. 3460 3326 */ 3327 + spin_lock_irq(&phba->hbalock); 3328 + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3329 + spin_unlock_irq(&phba->hbalock); 3330 + break; 3331 + } 3332 + /* Mark the fast failover process in progress */ 3333 + phba->fcf.fcf_flag |= FCF_ACVL_DISC; 3334 + spin_unlock_irq(&phba->hbalock); 3335 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3336 + LOG_DISCOVERY, 3337 + "2773 Start FCF fast failover due " 3338 + "to CVL event: evt_tag:x%x\n", 3339 + acqe_fcoe->event_tag); 3461 3340 rc = lpfc_sli4_redisc_fcf_table(phba); 3462 - if (rc) 3341 + if (rc) { 3342 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3343 + LOG_DISCOVERY, 3344 + "2774 Issue FCF rediscover " 3345 + "mabilbox command failed, " 3346 + "through to CVL event\n"); 3347 + spin_lock_irq(&phba->hbalock); 3348 + phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 3349 + spin_unlock_irq(&phba->hbalock); 3463 3350 /* 3464 3351 * Last resort will be re-try on the 3465 3352 * the current registered FCF entry. 3466 3353 */ 3467 3354 lpfc_retry_pport_discovery(phba); 3355 + } 3468 3356 } 3469 3357 break; 3470 3358 default: ··· 3584 3426 spin_unlock_irq(&phba->hbalock); 3585 3427 3586 3428 /* Scan FCF table from the first entry to re-discover SAN */ 3587 - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3429 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3430 + "2777 Start FCF table scan after FCF " 3431 + "rediscovery quiescent period over\n"); 3432 + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3588 3433 if (rc) 3589 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3590 - "2747 Post FCF rediscovery read FCF record " 3591 - "failed 0x%x\n", rc); 3434 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3435 + "2747 Issue FCF scan read FCF mailbox " 3436 + "command failed 0x%x\n", rc); 3592 3437 } 3593 3438 3594 3439 /** ··· 3883 3722 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 3884 3723 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 3885 3724 struct lpfc_mqe *mqe; 3725 + int longs; 3886 3726 3887 3727 /* Before proceed, wait for POST done and device ready */ 3888 3728 rc = lpfc_sli4_post_status_check(phba); ··· 4060 3898 goto out_free_active_sgl; 4061 3899 } 4062 3900 3901 + /* Allocate eligible FCF bmask memory for FCF round robin failover */ 3902 + longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 3903 + phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 3904 + GFP_KERNEL); 3905 + if (!phba->fcf.fcf_rr_bmask) { 3906 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3907 + "2759 Failed allocate memory for FCF round " 3908 + "robin failover bmask\n"); 3909 + goto out_remove_rpi_hdrs; 3910 + } 3911 + 4063 3912 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4064 3913 phba->cfg_fcp_eq_count), GFP_KERNEL); 4065 3914 if (!phba->sli4_hba.fcp_eq_hdl) { 4066 3915 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4067 3916 "2572 Failed allocate memory for fast-path " 4068 3917 "per-EQ handle array\n"); 4069 - goto out_remove_rpi_hdrs; 3918 + goto out_free_fcf_rr_bmask; 4070 3919 } 4071 3920 4072 3921 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * ··· 4130 3957 4131 3958 out_free_fcp_eq_hdl: 4132 3959 kfree(phba->sli4_hba.fcp_eq_hdl); 3960 + out_free_fcf_rr_bmask: 3961 + kfree(phba->fcf.fcf_rr_bmask); 4133 3962 out_remove_rpi_hdrs: 4134 3963 lpfc_sli4_remove_rpi_hdrs(phba); 4135 3964 out_free_active_sgl: ··· 4176 4001 /* Free the allocated rpi headers. */ 4177 4002 lpfc_sli4_remove_rpi_hdrs(phba); 4178 4003 lpfc_sli4_remove_rpis(phba); 4004 + 4005 + /* Free eligible FCF index bmask */ 4006 + kfree(phba->fcf.fcf_rr_bmask); 4179 4007 4180 4008 /* Free the ELS sgl list */ 4181 4009 lpfc_free_active_sgl(phba); ··· 4575 4397 4576 4398 /* The list order is used by later block SGL registraton */ 4577 4399 spin_lock_irq(&phba->hbalock); 4400 + sglq_entry->state = SGL_FREED; 4578 4401 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4579 4402 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4580 4403 phba->sli4_hba.total_sglq_bufs++;
+1
drivers/scsi/lpfc/lpfc_logmsg.h
··· 35 35 #define LOG_VPORT 0x00004000 /* NPIV events */ 36 36 #define LOF_SECURITY 0x00008000 /* Security events */ 37 37 #define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ 38 + #define LOG_FIP 0x00020000 /* FIP events */ 38 39 #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ 39 40 40 41 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
+4 -4
drivers/scsi/lpfc/lpfc_mbox.c
··· 1748 1748 } 1749 1749 1750 1750 /** 1751 - * lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd 1751 + * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd 1752 1752 * @phba: pointer to lpfc hba data structure. 1753 1753 * @fcf_index: index to fcf table. 1754 1754 * ··· 1759 1759 * NULL. 1760 1760 **/ 1761 1761 int 1762 - lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba, 1763 - struct lpfcMboxq *mboxq, 1764 - uint16_t fcf_index) 1762 + lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba, 1763 + struct lpfcMboxq *mboxq, 1764 + uint16_t fcf_index) 1765 1765 { 1766 1766 void *virt_addr; 1767 1767 dma_addr_t phys_addr;
+40 -9
drivers/scsi/lpfc/lpfc_scsi.c
··· 620 620 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 621 621 struct lpfc_scsi_buf *psb, *next_psb; 622 622 unsigned long iflag = 0; 623 + struct lpfc_iocbq *iocbq; 624 + int i; 623 625 624 - spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); 626 + spin_lock_irqsave(&phba->hbalock, iflag); 627 + spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 625 628 list_for_each_entry_safe(psb, next_psb, 626 629 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { 627 630 if (psb->cur_iocbq.sli4_xritag == xri) { 628 631 list_del(&psb->list); 629 632 psb->exch_busy = 0; 630 633 psb->status = IOSTAT_SUCCESS; 631 - spin_unlock_irqrestore( 632 - &phba->sli4_hba.abts_scsi_buf_list_lock, 633 - iflag); 634 + spin_unlock( 635 + &phba->sli4_hba.abts_scsi_buf_list_lock); 636 + spin_unlock_irqrestore(&phba->hbalock, iflag); 634 637 lpfc_release_scsi_buf_s4(phba, psb); 635 638 return; 636 639 } 637 640 } 638 - spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, 639 - iflag); 641 + spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 642 + for (i = 1; i <= phba->sli.last_iotag; i++) { 643 + iocbq = phba->sli.iocbq_lookup[i]; 644 + 645 + if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 646 + (iocbq->iocb_flag & LPFC_IO_LIBDFC)) 647 + continue; 648 + if (iocbq->sli4_xritag != xri) 649 + continue; 650 + psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 651 + psb->exch_busy = 0; 652 + spin_unlock_irqrestore(&phba->hbalock, iflag); 653 + return; 654 + 655 + } 656 + spin_unlock_irqrestore(&phba->hbalock, iflag); 640 657 } 641 658 642 659 /** ··· 1023 1006 struct scatterlist *sgel = NULL; 1024 1007 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1025 1008 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 1009 + struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; 1026 1010 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1027 1011 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 1028 1012 dma_addr_t physaddr; ··· 1074 1056 physaddr = sg_dma_address(sgel); 1075 1057 if (phba->sli_rev == 3 && 1076 1058 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1059 + !(iocbq->iocb_flag & DSS_SECURITY_OP) && 1077 1060 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 1078 1061 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1079 1062 data_bde->tus.f.bdeSize = sg_dma_len(sgel); ··· 1101 1082 * explicitly reinitialized since all iocb memory resources are reused. 1102 1083 */ 1103 1084 if (phba->sli_rev == 3 && 1104 - !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 1085 + !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1086 + !(iocbq->iocb_flag & DSS_SECURITY_OP)) { 1105 1087 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 1106 1088 /* 1107 1089 * The extended IOCB format can only fit 3 BDE or a BPL. ··· 1127 1107 } else { 1128 1108 iocb_cmd->un.fcpi64.bdl.bdeSize = 1129 1109 ((num_bde + 2) * sizeof(struct ulp_bde64)); 1110 + iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 1130 1111 } 1131 1112 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 1132 1113 ··· 2100 2079 2101 2080 if (resp_info & RSP_LEN_VALID) { 2102 2081 rsplen = be32_to_cpu(fcprsp->rspRspLen); 2103 - if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || 2104 - (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { 2082 + if (rsplen != 0 && rsplen != 4 && rsplen != 8) { 2105 2083 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 2106 2084 "2719 Invalid response length: " 2107 2085 "tgt x%x lun x%x cmnd x%x rsplen x%x\n", 2108 2086 cmnd->device->id, 2109 2087 cmnd->device->lun, cmnd->cmnd[0], 2110 2088 rsplen); 2089 + host_status = DID_ERROR; 2090 + goto out; 2091 + } 2092 + if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { 2093 + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 2094 + "2757 Protocol failure detected during " 2095 + "processing of FCP I/O op: " 2096 + "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n", 2097 + cmnd->device->id, 2098 + cmnd->device->lun, cmnd->cmnd[0], 2099 + fcprsp->rspInfo3); 2111 2100 host_status = DID_ERROR; 2112 2101 goto out; 2113 2102 }
+349 -64
drivers/scsi/lpfc/lpfc_sli.c
··· 494 494 * 495 495 * Returns sglq ponter = success, NULL = Failure. 496 496 **/ 497 - static struct lpfc_sglq * 497 + struct lpfc_sglq * 498 498 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 499 499 { 500 500 uint16_t adj_xri; ··· 526 526 return NULL; 527 527 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; 528 528 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 529 + sglq->state = SGL_ALLOCATED; 529 530 return sglq; 530 531 } 531 532 ··· 581 580 else 582 581 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 583 582 if (sglq) { 584 - if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) { 583 + if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 584 + (sglq->state != SGL_XRI_ABORTED)) { 585 585 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 586 586 iflag); 587 587 list_add(&sglq->list, 588 588 &phba->sli4_hba.lpfc_abts_els_sgl_list); 589 589 spin_unlock_irqrestore( 590 590 &phba->sli4_hba.abts_sgl_list_lock, iflag); 591 - } else 591 + } else { 592 + sglq->state = SGL_FREED; 592 593 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 594 + } 593 595 } 594 596 595 597 ··· 2262 2258 spin_unlock_irqrestore(&phba->hbalock, 2263 2259 iflag); 2264 2260 } 2265 - if ((phba->sli_rev == LPFC_SLI_REV4) && 2266 - (saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) { 2267 - /* Set cmdiocb flag for the exchange 2268 - * busy so sgl (xri) will not be 2269 - * released until the abort xri is 2270 - * received from hba, clear the 2271 - * LPFC_DRIVER_ABORTED bit in case 2272 - * it was driver initiated abort. 2273 - */ 2274 - spin_lock_irqsave(&phba->hbalock, 2275 - iflag); 2276 - cmdiocbp->iocb_flag &= 2277 - ~LPFC_DRIVER_ABORTED; 2278 - cmdiocbp->iocb_flag |= 2279 - LPFC_EXCHANGE_BUSY; 2280 - spin_unlock_irqrestore(&phba->hbalock, 2281 - iflag); 2282 - cmdiocbp->iocb.ulpStatus = 2283 - IOSTAT_LOCAL_REJECT; 2284 - cmdiocbp->iocb.un.ulpWord[4] = 2285 - IOERR_ABORT_REQUESTED; 2286 - /* 2287 - * For SLI4, irsiocb contains NO_XRI 2288 - * in sli_xritag, it shall not affect 2289 - * releasing sgl (xri) process. 2290 - */ 2291 - saveq->iocb.ulpStatus = 2292 - IOSTAT_LOCAL_REJECT; 2293 - saveq->iocb.un.ulpWord[4] = 2294 - IOERR_SLI_ABORTED; 2295 - spin_lock_irqsave(&phba->hbalock, 2296 - iflag); 2297 - saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2298 - spin_unlock_irqrestore(&phba->hbalock, 2299 - iflag); 2261 + if (phba->sli_rev == LPFC_SLI_REV4) { 2262 + if (saveq->iocb_flag & 2263 + LPFC_EXCHANGE_BUSY) { 2264 + /* Set cmdiocb flag for the 2265 + * exchange busy so sgl (xri) 2266 + * will not be released until 2267 + * the abort xri is received 2268 + * from hba. 2269 + */ 2270 + spin_lock_irqsave( 2271 + &phba->hbalock, iflag); 2272 + cmdiocbp->iocb_flag |= 2273 + LPFC_EXCHANGE_BUSY; 2274 + spin_unlock_irqrestore( 2275 + &phba->hbalock, iflag); 2276 + } 2277 + if (cmdiocbp->iocb_flag & 2278 + LPFC_DRIVER_ABORTED) { 2279 + /* 2280 + * Clear LPFC_DRIVER_ABORTED 2281 + * bit in case it was driver 2282 + * initiated abort. 2283 + */ 2284 + spin_lock_irqsave( 2285 + &phba->hbalock, iflag); 2286 + cmdiocbp->iocb_flag &= 2287 + ~LPFC_DRIVER_ABORTED; 2288 + spin_unlock_irqrestore( 2289 + &phba->hbalock, iflag); 2290 + cmdiocbp->iocb.ulpStatus = 2291 + IOSTAT_LOCAL_REJECT; 2292 + cmdiocbp->iocb.un.ulpWord[4] = 2293 + IOERR_ABORT_REQUESTED; 2294 + /* 2295 + * For SLI4, irsiocb contains 2296 + * NO_XRI in sli_xritag, it 2297 + * shall not affect releasing 2298 + * sgl (xri) process. 2299 + */ 2300 + saveq->iocb.ulpStatus = 2301 + IOSTAT_LOCAL_REJECT; 2302 + saveq->iocb.un.ulpWord[4] = 2303 + IOERR_SLI_ABORTED; 2304 + spin_lock_irqsave( 2305 + &phba->hbalock, iflag); 2306 + saveq->iocb_flag |= 2307 + LPFC_DELAY_MEM_FREE; 2308 + spin_unlock_irqrestore( 2309 + &phba->hbalock, iflag); 2310 + } 2300 2311 } 2301 2312 } 2302 2313 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); ··· 2534 2515 2535 2516 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2536 2517 &rspiocbq); 2537 - if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 2538 - spin_unlock_irqrestore(&phba->hbalock, 2539 - iflag); 2540 - (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2541 - &rspiocbq); 2542 - spin_lock_irqsave(&phba->hbalock, 2543 - iflag); 2544 - } 2518 + if (unlikely(!cmdiocbq)) 2519 + break; 2520 + if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 2521 + cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 2522 + if (cmdiocbq->iocb_cmpl) { 2523 + spin_unlock_irqrestore(&phba->hbalock, iflag); 2524 + (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2525 + &rspiocbq); 2526 + spin_lock_irqsave(&phba->hbalock, iflag); 2527 + } 2545 2528 break; 2546 2529 case LPFC_UNSOL_IOCB: 2547 2530 spin_unlock_irqrestore(&phba->hbalock, iflag); ··· 3112 3091 3113 3092 /* Check to see if any errors occurred during init */ 3114 3093 if ((status & HS_FFERM) || (i >= 20)) { 3094 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3095 + "2751 Adapter failed to restart, " 3096 + "status reg x%x, FW Data: A8 x%x AC x%x\n", 3097 + status, 3098 + readl(phba->MBslimaddr + 0xa8), 3099 + readl(phba->MBslimaddr + 0xac)); 3115 3100 phba->link_state = LPFC_HBA_ERROR; 3116 3101 retval = 1; 3117 3102 } ··· 3305 3278 if (retval != MBX_SUCCESS) { 3306 3279 if (retval != MBX_BUSY) 3307 3280 mempool_free(pmb, phba->mbox_mem_pool); 3281 + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3282 + "2752 KILL_BOARD command failed retval %d\n", 3283 + retval); 3308 3284 spin_lock_irq(&phba->hbalock); 3309 3285 phba->link_flag &= ~LS_IGNORE_ERATT; 3310 3286 spin_unlock_irq(&phba->hbalock); ··· 4065 4035 4066 4036 lpfc_sli_hba_setup_error: 4067 4037 phba->link_state = LPFC_HBA_ERROR; 4068 - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4038 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4069 4039 "0445 Firmware initialization failed\n"); 4070 4040 return rc; 4071 4041 } ··· 4418 4388 spin_unlock_irq(&phba->hbalock); 4419 4389 4420 4390 /* Read the port's service parameters. */ 4421 - lpfc_read_sparam(phba, mboxq, vport->vpi); 4391 + rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 4392 + if (rc) { 4393 + phba->link_state = LPFC_HBA_ERROR; 4394 + rc = -ENOMEM; 4395 + goto out_free_vpd; 4396 + } 4397 + 4422 4398 mboxq->vport = vport; 4423 4399 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4424 4400 mp = (struct lpfc_dmabuf *) mboxq->context1; ··· 4518 4482 4519 4483 /* Post receive buffers to the device */ 4520 4484 lpfc_sli4_rb_setup(phba); 4485 + 4486 + /* Reset HBA FCF states after HBA reset */ 4487 + phba->fcf.fcf_flag = 0; 4488 + phba->fcf.current_rec.flag = 0; 4521 4489 4522 4490 /* Start the ELS watchdog timer */ 4523 4491 mod_timer(&vport->els_tmofunc, ··· 7476 7436 { 7477 7437 wait_queue_head_t *pdone_q; 7478 7438 unsigned long iflags; 7439 + struct lpfc_scsi_buf *lpfc_cmd; 7479 7440 7480 7441 spin_lock_irqsave(&phba->hbalock, iflags); 7481 7442 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 7482 7443 if (cmdiocbq->context2 && rspiocbq) 7483 7444 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 7484 7445 &rspiocbq->iocb, sizeof(IOCB_t)); 7446 + 7447 + /* Set the exchange busy flag for task management commands */ 7448 + if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 7449 + !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 7450 + lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 7451 + cur_iocbq); 7452 + lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 7453 + } 7485 7454 7486 7455 pdone_q = cmdiocbq->context_un.wait_queue; 7487 7456 if (pdone_q) ··· 9109 9060 9110 9061 /* Fake the irspiocb and copy necessary response information */ 9111 9062 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 9063 + 9064 + if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 9065 + spin_lock_irqsave(&phba->hbalock, iflags); 9066 + cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 9067 + spin_unlock_irqrestore(&phba->hbalock, iflags); 9068 + } 9112 9069 9113 9070 /* Pass the cmd_iocb and the rsp state to the upper layer */ 9114 9071 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); ··· 11996 11941 } 11997 11942 11998 11943 /** 11999 - * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. 11944 + * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 12000 11945 * @phba: pointer to lpfc hba data structure. 12001 11946 * @fcf_index: FCF table entry offset. 12002 11947 * 12003 - * This routine is invoked to read up to @fcf_num of FCF record from the 12004 - * device starting with the given @fcf_index. 11948 + * This routine is invoked to scan the entire FCF table by reading FCF 11949 + * record and processing it one at a time starting from the @fcf_index 11950 + * for initial FCF discovery or fast FCF failover rediscovery. 11951 + * 11952 + * Return 0 if the mailbox command is submitted sucessfully, none 0 11953 + * otherwise. 12005 11954 **/ 12006 11955 int 12007 - lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) 11956 + lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 12008 11957 { 12009 11958 int rc = 0, error; 12010 11959 LPFC_MBOXQ_t *mboxq; ··· 12020 11961 "2000 Failed to allocate mbox for " 12021 11962 "READ_FCF cmd\n"); 12022 11963 error = -ENOMEM; 12023 - goto fail_fcfscan; 11964 + goto fail_fcf_scan; 12024 11965 } 12025 11966 /* Construct the read FCF record mailbox command */ 12026 - rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index); 11967 + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 12027 11968 if (rc) { 12028 11969 error = -EINVAL; 12029 - goto fail_fcfscan; 11970 + goto fail_fcf_scan; 12030 11971 } 12031 11972 /* Issue the mailbox command asynchronously */ 12032 11973 mboxq->vport = phba->pport; 12033 - mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; 11974 + mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 12034 11975 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12035 11976 if (rc == MBX_NOT_FINISHED) 12036 11977 error = -EIO; ··· 12038 11979 spin_lock_irq(&phba->hbalock); 12039 11980 phba->hba_flag |= FCF_DISC_INPROGRESS; 12040 11981 spin_unlock_irq(&phba->hbalock); 11982 + /* Reset FCF round robin index bmask for new scan */ 11983 + if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 11984 + memset(phba->fcf.fcf_rr_bmask, 0, 11985 + sizeof(*phba->fcf.fcf_rr_bmask)); 12041 11986 error = 0; 12042 11987 } 12043 - fail_fcfscan: 11988 + fail_fcf_scan: 12044 11989 if (error) { 12045 11990 if (mboxq) 12046 11991 lpfc_sli4_mbox_cmd_free(phba, mboxq); ··· 12054 11991 spin_unlock_irq(&phba->hbalock); 12055 11992 } 12056 11993 return error; 11994 + } 11995 + 11996 + /** 11997 + * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf. 11998 + * @phba: pointer to lpfc hba data structure. 11999 + * @fcf_index: FCF table entry offset. 12000 + * 12001 + * This routine is invoked to read an FCF record indicated by @fcf_index 12002 + * and to use it for FLOGI round robin FCF failover. 12003 + * 12004 + * Return 0 if the mailbox command is submitted sucessfully, none 0 12005 + * otherwise. 12006 + **/ 12007 + int 12008 + lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 12009 + { 12010 + int rc = 0, error; 12011 + LPFC_MBOXQ_t *mboxq; 12012 + 12013 + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12014 + if (!mboxq) { 12015 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 12016 + "2763 Failed to allocate mbox for " 12017 + "READ_FCF cmd\n"); 12018 + error = -ENOMEM; 12019 + goto fail_fcf_read; 12020 + } 12021 + /* Construct the read FCF record mailbox command */ 12022 + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 12023 + if (rc) { 12024 + error = -EINVAL; 12025 + goto fail_fcf_read; 12026 + } 12027 + /* Issue the mailbox command asynchronously */ 12028 + mboxq->vport = phba->pport; 12029 + mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 12030 + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12031 + if (rc == MBX_NOT_FINISHED) 12032 + error = -EIO; 12033 + else 12034 + error = 0; 12035 + 12036 + fail_fcf_read: 12037 + if (error && mboxq) 12038 + lpfc_sli4_mbox_cmd_free(phba, mboxq); 12039 + return error; 12040 + } 12041 + 12042 + /** 12043 + * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 12044 + * @phba: pointer to lpfc hba data structure. 12045 + * @fcf_index: FCF table entry offset. 12046 + * 12047 + * This routine is invoked to read an FCF record indicated by @fcf_index to 12048 + * determine whether it's eligible for FLOGI round robin failover list. 12049 + * 12050 + * Return 0 if the mailbox command is submitted sucessfully, none 0 12051 + * otherwise. 12052 + **/ 12053 + int 12054 + lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 12055 + { 12056 + int rc = 0, error; 12057 + LPFC_MBOXQ_t *mboxq; 12058 + 12059 + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12060 + if (!mboxq) { 12061 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 12062 + "2758 Failed to allocate mbox for " 12063 + "READ_FCF cmd\n"); 12064 + error = -ENOMEM; 12065 + goto fail_fcf_read; 12066 + } 12067 + /* Construct the read FCF record mailbox command */ 12068 + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 12069 + if (rc) { 12070 + error = -EINVAL; 12071 + goto fail_fcf_read; 12072 + } 12073 + /* Issue the mailbox command asynchronously */ 12074 + mboxq->vport = phba->pport; 12075 + mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 12076 + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12077 + if (rc == MBX_NOT_FINISHED) 12078 + error = -EIO; 12079 + else 12080 + error = 0; 12081 + 12082 + fail_fcf_read: 12083 + if (error && mboxq) 12084 + lpfc_sli4_mbox_cmd_free(phba, mboxq); 12085 + return error; 12086 + } 12087 + 12088 + /** 12089 + * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 12090 + * @phba: pointer to lpfc hba data structure. 12091 + * 12092 + * This routine is to get the next eligible FCF record index in a round 12093 + * robin fashion. If the next eligible FCF record index equals to the 12094 + * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 12095 + * shall be returned, otherwise, the next eligible FCF record's index 12096 + * shall be returned. 12097 + **/ 12098 + uint16_t 12099 + lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 12100 + { 12101 + uint16_t next_fcf_index; 12102 + 12103 + /* Search from the currently registered FCF index */ 12104 + next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 12105 + LPFC_SLI4_FCF_TBL_INDX_MAX, 12106 + phba->fcf.current_rec.fcf_indx); 12107 + /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 12108 + if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 12109 + next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 12110 + LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 12111 + /* Round robin failover stop condition */ 12112 + if (next_fcf_index == phba->fcf.fcf_rr_init_indx) 12113 + return LPFC_FCOE_FCF_NEXT_NONE; 12114 + 12115 + return next_fcf_index; 12116 + } 12117 + 12118 + /** 12119 + * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 12120 + * @phba: pointer to lpfc hba data structure. 12121 + * 12122 + * This routine sets the FCF record index in to the eligible bmask for 12123 + * round robin failover search. It checks to make sure that the index 12124 + * does not go beyond the range of the driver allocated bmask dimension 12125 + * before setting the bit. 12126 + * 12127 + * Returns 0 if the index bit successfully set, otherwise, it returns 12128 + * -EINVAL. 12129 + **/ 12130 + int 12131 + lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 12132 + { 12133 + if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12134 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12135 + "2610 HBA FCF index reached driver's " 12136 + "book keeping dimension: fcf_index:%d, " 12137 + "driver_bmask_max:%d\n", 12138 + fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12139 + return -EINVAL; 12140 + } 12141 + /* Set the eligible FCF record index bmask */ 12142 + set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12143 + 12144 + return 0; 12145 + } 12146 + 12147 + /** 12148 + * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index 12149 + * @phba: pointer to lpfc hba data structure. 12150 + * 12151 + * This routine clears the FCF record index from the eligible bmask for 12152 + * round robin failover search. It checks to make sure that the index 12153 + * does not go beyond the range of the driver allocated bmask dimension 12154 + * before clearing the bit. 12155 + **/ 12156 + void 12157 + lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 12158 + { 12159 + if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12160 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12161 + "2762 HBA FCF index goes beyond driver's " 12162 + "book keeping dimension: fcf_index:%d, " 12163 + "driver_bmask_max:%d\n", 12164 + fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12165 + return; 12166 + } 12167 + /* Clear the eligible FCF record index bmask */ 12168 + clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12057 12169 } 12058 12170 12059 12171 /** ··· 12252 12014 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 12253 12015 &redisc_fcf->header.cfg_shdr.response); 12254 12016 if (shdr_status || shdr_add_status) { 12255 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12017 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12256 12018 "2746 Requesting for FCF rediscovery failed " 12257 12019 "status x%x add_status x%x\n", 12258 12020 shdr_status, shdr_add_status); 12259 - /* 12260 - * Request failed, last resort to re-try current 12261 - * registered FCF entry 12262 - */ 12263 - lpfc_retry_pport_discovery(phba); 12264 - } else 12021 + if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 12022 + spin_lock_irq(&phba->hbalock); 12023 + phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 12024 + spin_unlock_irq(&phba->hbalock); 12025 + /* 12026 + * CVL event triggered FCF rediscover request failed, 12027 + * last resort to re-try current registered FCF entry. 12028 + */ 12029 + lpfc_retry_pport_discovery(phba); 12030 + } else { 12031 + spin_lock_irq(&phba->hbalock); 12032 + phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 12033 + spin_unlock_irq(&phba->hbalock); 12034 + /* 12035 + * DEAD FCF event triggered FCF rediscover request 12036 + * failed, last resort to fail over as a link down 12037 + * to FCF registration. 12038 + */ 12039 + lpfc_sli4_fcf_dead_failthrough(phba); 12040 + } 12041 + } else { 12042 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12043 + "2775 Start FCF rediscovery quiescent period " 12044 + "wait timer before scaning FCF table\n"); 12265 12045 /* 12266 12046 * Start FCF rediscovery wait timer for pending FCF 12267 12047 * before rescan FCF record table. 12268 12048 */ 12269 12049 lpfc_fcf_redisc_wait_start_timer(phba); 12050 + } 12270 12051 12271 12052 mempool_free(mbox, phba->mbox_mem_pool); 12272 12053 } ··· 12303 12046 LPFC_MBOXQ_t *mbox; 12304 12047 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 12305 12048 int rc, length; 12049 + 12050 + /* Cancel retry delay timers to all vports before FCF rediscover */ 12051 + lpfc_cancel_all_vport_retry_delay_timer(phba); 12306 12052 12307 12053 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12308 12054 if (!mbox) { ··· 12335 12075 return -EIO; 12336 12076 } 12337 12077 return 0; 12078 + } 12079 + 12080 + /** 12081 + * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 12082 + * @phba: pointer to lpfc hba data structure. 12083 + * 12084 + * This function is the failover routine as a last resort to the FCF DEAD 12085 + * event when driver failed to perform fast FCF failover. 12086 + **/ 12087 + void 12088 + lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 12089 + { 12090 + uint32_t link_state; 12091 + 12092 + /* 12093 + * Last resort as FCF DEAD event failover will treat this as 12094 + * a link down, but save the link state because we don't want 12095 + * it to be changed to Link Down unless it is already down. 12096 + */ 12097 + link_state = phba->link_state; 12098 + lpfc_linkdown(phba); 12099 + phba->link_state = link_state; 12100 + 12101 + /* Unregister FCF if no devices connected to it */ 12102 + lpfc_unregister_unused_fcf(phba); 12338 12103 } 12339 12104 12340 12105 /**
+1
drivers/scsi/lpfc/lpfc_sli.h
··· 62 62 #define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ 63 63 #define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ 64 64 #define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ 65 + #define DSS_SECURITY_OP 0x100 /* security IO */ 65 66 66 67 #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 67 68 #define LPFC_FIP_ELS_ID_SHIFT 14
+31 -7
drivers/scsi/lpfc/lpfc_sli4.h
··· 153 153 #define FCF_REGISTERED 0x02 /* FCF registered with FW */ 154 154 #define FCF_SCAN_DONE 0x04 /* FCF table scan done */ 155 155 #define FCF_IN_USE 0x08 /* Atleast one discovery completed */ 156 - #define FCF_REDISC_PEND 0x10 /* FCF rediscovery pending */ 157 - #define FCF_REDISC_EVT 0x20 /* FCF rediscovery event to worker thread */ 158 - #define FCF_REDISC_FOV 0x40 /* Post FCF rediscovery fast failover */ 156 + #define FCF_INIT_DISC 0x10 /* Initial FCF discovery */ 157 + #define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */ 158 + #define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */ 159 + #define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC) 160 + #define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ 161 + #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ 162 + #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ 159 163 uint32_t addr_mode; 164 + uint16_t fcf_rr_init_indx; 160 165 struct lpfc_fcf_rec current_rec; 161 166 struct lpfc_fcf_rec failover_rec; 162 167 struct timer_list redisc_wait; 168 + unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */ 163 169 }; 170 + 171 + /* 172 + * Maximum FCF table index, it is for driver internal book keeping, it 173 + * just needs to be no less than the supported HBA's FCF table size. 174 + */ 175 + #define LPFC_SLI4_FCF_TBL_INDX_MAX 32 164 176 165 177 #define LPFC_REGION23_SIGNATURE "RG23" 166 178 #define LPFC_REGION23_VERSION 1 ··· 443 431 SCSI_BUFF_TYPE 444 432 }; 445 433 434 + enum lpfc_sgl_state { 435 + SGL_FREED, 436 + SGL_ALLOCATED, 437 + SGL_XRI_ABORTED 438 + }; 439 + 446 440 struct lpfc_sglq { 447 441 /* lpfc_sglqs are used in double linked lists */ 448 442 struct list_head list; 449 443 struct list_head clist; 450 444 enum lpfc_sge_type buff_type; /* is this a scsi sgl */ 445 + enum lpfc_sgl_state state; 451 446 uint16_t iotag; /* pre-assigned IO tag */ 452 447 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 453 448 struct sli4_sge *sgl; /* pre-assigned SGL */ ··· 482 463 void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); 483 464 void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, 484 465 struct lpfc_mbx_sge *); 485 - int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *, 486 - uint16_t); 466 + int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *, 467 + uint16_t); 487 468 488 469 void lpfc_sli4_hba_reset(struct lpfc_hba *); 489 470 struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, ··· 542 523 uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); 543 524 uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); 544 525 void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); 545 - int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); 546 - void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); 526 + int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t); 527 + int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t); 528 + int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t); 529 + void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); 530 + void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); 531 + void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); 532 + int lpfc_sli4_unregister_fcf(struct lpfc_hba *); 547 533 int lpfc_sli4_post_status_check(struct lpfc_hba *); 548 534 uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); 549 535
+1 -1
drivers/scsi/lpfc/lpfc_version.h
··· 18 18 * included with this package. * 19 19 *******************************************************************/ 20 20 21 - #define LPFC_DRIVER_VERSION "8.3.9" 21 + #define LPFC_DRIVER_VERSION "8.3.10" 22 22 #define LPFC_DRIVER_NAME "lpfc" 23 23 #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24 24 #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
+6 -1
drivers/scsi/lpfc/lpfc_vport.c
··· 123 123 } 124 124 mb = &pmb->u.mb; 125 125 126 - lpfc_read_sparam(phba, pmb, vport->vpi); 126 + rc = lpfc_read_sparam(phba, pmb, vport->vpi); 127 + if (rc) { 128 + mempool_free(pmb, phba->mbox_mem_pool); 129 + return -ENOMEM; 130 + } 131 + 127 132 /* 128 133 * Grab buffer pointer and clear context1 so we can use 129 134 * lpfc_sli_issue_box_wait
+4
drivers/scsi/osd/osd_initiator.c
··· 1433 1433 cdbh->command_specific_options |= or->attributes_mode; 1434 1434 if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) { 1435 1435 ret = _osd_req_finalize_attr_page(or); 1436 + if (ret) { 1437 + OSD_DEBUG("_osd_req_finalize_attr_page failed\n"); 1438 + return ret; 1439 + } 1436 1440 } else { 1437 1441 /* TODO: I think that for the GET_ATTR command these 2 should 1438 1442 * be reversed to keep them in execution order (for embeded
+2
drivers/scsi/raid_class.c
··· 63 63 * emulated RAID devices, so start with SCSI */ 64 64 struct raid_internal *i = ac_to_raid_internal(cont); 65 65 66 + #if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE) 66 67 if (scsi_is_sdev_device(dev)) { 67 68 struct scsi_device *sdev = to_scsi_device(dev); 68 69 ··· 72 71 73 72 return i->f->is_raid(dev); 74 73 } 74 + #endif 75 75 /* FIXME: look at other subsystems too */ 76 76 return 0; 77 77 }
+12 -12
drivers/scsi/scsi_transport_fc.c
··· 1232 1232 { 1233 1233 struct fc_vport *vport = transport_class_to_vport(dev); 1234 1234 struct Scsi_Host *shost = vport_to_shost(vport); 1235 + unsigned long flags; 1236 + 1237 + spin_lock_irqsave(shost->host_lock, flags); 1238 + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) { 1239 + spin_unlock_irqrestore(shost->host_lock, flags); 1240 + return -EBUSY; 1241 + } 1242 + vport->flags |= FC_VPORT_DELETING; 1243 + spin_unlock_irqrestore(shost->host_lock, flags); 1235 1244 1236 1245 fc_queue_work(shost, &vport->vport_delete_work); 1237 1246 return count; ··· 1830 1821 list_for_each_entry(vport, &fc_host->vports, peers) { 1831 1822 if ((vport->channel == 0) && 1832 1823 (vport->port_name == wwpn) && (vport->node_name == wwnn)) { 1824 + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) 1825 + break; 1826 + vport->flags |= FC_VPORT_DELETING; 1833 1827 match = 1; 1834 1828 break; 1835 1829 } ··· 3381 3369 struct device *dev = &vport->dev; 3382 3370 unsigned long flags; 3383 3371 int stat; 3384 - 3385 - spin_lock_irqsave(shost->host_lock, flags); 3386 - if (vport->flags & FC_VPORT_CREATING) { 3387 - spin_unlock_irqrestore(shost->host_lock, flags); 3388 - return -EBUSY; 3389 - } 3390 - if (vport->flags & (FC_VPORT_DEL)) { 3391 - spin_unlock_irqrestore(shost->host_lock, flags); 3392 - return -EALREADY; 3393 - } 3394 - vport->flags |= FC_VPORT_DELETING; 3395 - spin_unlock_irqrestore(shost->host_lock, flags); 3396 3372 3397 3373 if (i->f->vport_delete) 3398 3374 stat = i->f->vport_delete(vport);
+2 -2
drivers/scsi/sd.c
··· 1948 1948 { 1949 1949 struct request_queue *q = sdkp->disk->queue; 1950 1950 unsigned int sector_sz = sdkp->device->sector_size; 1951 - const int vpd_len = 32; 1951 + const int vpd_len = 64; 1952 1952 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); 1953 1953 1954 1954 if (!buffer || ··· 1998 1998 { 1999 1999 unsigned char *buffer; 2000 2000 u16 rot; 2001 - const int vpd_len = 32; 2001 + const int vpd_len = 64; 2002 2002 2003 2003 buffer = kmalloc(vpd_len, GFP_KERNEL); 2004 2004
+2 -1
include/scsi/libiscsi.h
··· 338 338 extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, 339 339 int reason); 340 340 extern int iscsi_eh_abort(struct scsi_cmnd *sc); 341 - extern int iscsi_eh_target_reset(struct scsi_cmnd *sc); 341 + extern int iscsi_eh_recover_target(struct scsi_cmnd *sc); 342 + extern int iscsi_eh_session_reset(struct scsi_cmnd *sc); 342 343 extern int iscsi_eh_device_reset(struct scsi_cmnd *sc); 343 344 extern int iscsi_queuecommand(struct scsi_cmnd *sc, 344 345 void (*done)(struct scsi_cmnd *));