Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull first round of SCSI updates from James Bottomley:
"This patch set is driver updates for qla4xxx, scsi_debug, pm80xx,
fcoe/libfc, eas2r, lpfc, be2iscsi and megaraid_sas plus some assorted
bug fixes and cleanups"

* tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (106 commits)
[SCSI] scsi_error: Escalate to LUN reset if abort fails
[SCSI] Add 'eh_deadline' to limit SCSI EH runtime
[SCSI] remove check for 'resetting'
[SCSI] dc395: Move 'last_reset' into internal host structure
[SCSI] tmscsim: Move 'last_reset' into host structure
[SCSI] advansys: Remove 'last_reset' references
[SCSI] dpt_i2o: return SCSI_MLQUEUE_HOST_BUSY when in reset
[SCSI] dpt_i2o: Remove DPTI_STATE_IOCTL
[SCSI] megaraid_sas: Fix synchronization problem between sysPD IO path and AEN path
[SCSI] lpfc: Fix typo on NULL assignment
[SCSI] scsi_dh_alua: ALUA handler attach should succeed while TPG is transitioning
[SCSI] scsi_dh_alua: ALUA check sense should retry device internal reset unit attention
[SCSI] esas2r: Cleanup snprinf formatting of firmware version
[SCSI] esas2r: Remove superfluous mask of pcie_cap_reg
[SCSI] esas2r: Fixes for big-endian platforms
[SCSI] esas2r: Directly call kernel functions for atomic bit operations
[SCSI] lpfc 8.3.43: Update lpfc version to driver version 8.3.43
[SCSI] lpfc 8.3.43: Fixed not processing task management IOCB response status
[SCSI] lpfc 8.3.43: Fixed spinlock hang.
[SCSI] lpfc 8.3.43: Fixed invalid Total_Data_Placed value received for els and ct command responses
...

+5605 -1434
+1 -1
MAINTAINERS
··· 1867 1867 F: drivers/net/wireless/brcm80211/ 1868 1868 1869 1869 BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER 1870 - M: Bhanu Prakash Gollapudi <bprakash@broadcom.com> 1870 + M: Eddie Wai <eddie.wai@broadcom.com> 1871 1871 L: linux-scsi@vger.kernel.org 1872 1872 S: Supported 1873 1873 F: drivers/scsi/bnx2fc/
+23 -13
drivers/scsi/BusLogic.c
··· 26 26 27 27 */ 28 28 29 - #define blogic_drvr_version "2.1.16" 30 - #define blogic_drvr_date "18 July 2002" 29 + #define blogic_drvr_version "2.1.17" 30 + #define blogic_drvr_date "12 September 2013" 31 31 32 32 #include <linux/module.h> 33 33 #include <linux/init.h> ··· 311 311 caller. 312 312 */ 313 313 314 - static void blogic_dealloc_ccb(struct blogic_ccb *ccb) 314 + static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap) 315 315 { 316 316 struct blogic_adapter *adapter = ccb->adapter; 317 317 318 - scsi_dma_unmap(ccb->command); 319 - pci_unmap_single(adapter->pci_device, ccb->sensedata, 318 + if (ccb->command != NULL) 319 + scsi_dma_unmap(ccb->command); 320 + if (dma_unmap) 321 + pci_unmap_single(adapter->pci_device, ccb->sensedata, 320 322 ccb->sense_datalen, PCI_DMA_FROMDEVICE); 321 323 322 324 ccb->command = NULL; ··· 2764 2762 /* 2765 2763 Place CCB back on the Host Adapter's free list. 2766 2764 */ 2767 - blogic_dealloc_ccb(ccb); 2768 - #if 0 /* this needs to be redone different for new EH */ 2765 + blogic_dealloc_ccb(ccb, 1); 2766 + #if 0 /* this needs to be redone different for new EH */ 2769 2767 /* 2770 2768 Bus Device Reset CCBs have the command field 2771 2769 non-NULL only when a Bus Device Reset was requested ··· 2793 2791 if (ccb->status == BLOGIC_CCB_RESET && 2794 2792 ccb->tgt_id == tgt_id) { 2795 2793 command = ccb->command; 2796 - blogic_dealloc_ccb(ccb); 2794 + blogic_dealloc_ccb(ccb, 1); 2797 2795 adapter->active_cmds[tgt_id]--; 2798 2796 command->result = DID_RESET << 16; 2799 2797 command->scsi_done(command); ··· 2864 2862 /* 2865 2863 Place CCB back on the Host Adapter's free list. 2866 2864 */ 2867 - blogic_dealloc_ccb(ccb); 2865 + blogic_dealloc_ccb(ccb, 1); 2868 2866 /* 2869 2867 Call the SCSI Command Completion Routine. 2870 2868 */ ··· 3036 3034 int buflen = scsi_bufflen(command); 3037 3035 int count; 3038 3036 struct blogic_ccb *ccb; 3037 + dma_addr_t sense_buf; 3039 3038 3040 3039 /* 3041 3040 SCSI REQUEST_SENSE commands will be executed automatically by the ··· 3182 3179 } 3183 3180 memcpy(ccb->cdb, cdb, cdblen); 3184 3181 ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE; 3185 - ccb->sensedata = pci_map_single(adapter->pci_device, 3182 + ccb->command = command; 3183 + sense_buf = pci_map_single(adapter->pci_device, 3186 3184 command->sense_buffer, ccb->sense_datalen, 3187 3185 PCI_DMA_FROMDEVICE); 3188 - ccb->command = command; 3186 + if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) { 3187 + blogic_err("DMA mapping for sense data buffer failed\n", 3188 + adapter); 3189 + blogic_dealloc_ccb(ccb, 0); 3190 + return SCSI_MLQUEUE_HOST_BUSY; 3191 + } 3192 + ccb->sensedata = sense_buf; 3189 3193 command->scsi_done = comp_cb; 3190 3194 if (blogic_multimaster_type(adapter)) { 3191 3195 /* ··· 3213 3203 if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, 3214 3204 ccb)) { 3215 3205 blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter); 3216 - blogic_dealloc_ccb(ccb); 3206 + blogic_dealloc_ccb(ccb, 1); 3217 3207 command->result = DID_ERROR << 16; 3218 3208 command->scsi_done(command); 3219 3209 } ··· 3347 3337 3348 3338 for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all) 3349 3339 if (ccb->status == BLOGIC_CCB_ACTIVE) 3350 - blogic_dealloc_ccb(ccb); 3340 + blogic_dealloc_ccb(ccb, 1); 3351 3341 /* 3352 3342 * Wait a few seconds between the Host Adapter Hard Reset which 3353 3343 * initiates a SCSI Bus Reset and issuing any SCSI Commands. Some
+4 -4
drivers/scsi/advansys.c
··· 2511 2511 struct asc_board *boardp = shost_priv(s); 2512 2512 2513 2513 printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); 2514 - printk(" host_busy %u, host_no %d, last_reset %d,\n", 2515 - s->host_busy, s->host_no, (unsigned)s->last_reset); 2514 + printk(" host_busy %u, host_no %d,\n", 2515 + s->host_busy, s->host_no); 2516 2516 2517 2517 printk(" base 0x%lx, io_port 0x%lx, irq %d,\n", 2518 2518 (ulong)s->base, (ulong)s->io_port, boardp->irq); ··· 3345 3345 shost->host_no); 3346 3346 3347 3347 seq_printf(m, 3348 - " host_busy %u, last_reset %lu, max_id %u, max_lun %u, max_channel %u\n", 3349 - shost->host_busy, shost->last_reset, shost->max_id, 3348 + " host_busy %u, max_id %u, max_lun %u, max_channel %u\n", 3349 + shost->host_busy, shost->max_id, 3350 3350 shost->max_lun, shost->max_channel); 3351 3351 3352 3352 seq_printf(m,
+1 -1
drivers/scsi/be2iscsi/be.h
··· 128 128 129 129 #define PAGE_SHIFT_4K 12 130 130 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) 131 - #define mcc_timeout 120000 /* 5s timeout */ 131 + #define mcc_timeout 120000 /* 12s timeout */ 132 132 133 133 /* Returns number of pages spanned by the data starting at the given addr */ 134 134 #define PAGES_4K_SPANNED(_address, size) \
+175 -31
drivers/scsi/be2iscsi/be_cmds.c
··· 17 17 18 18 #include <scsi/iscsi_proto.h> 19 19 20 + #include "be_main.h" 20 21 #include "be.h" 21 22 #include "be_mgmt.h" 22 - #include "be_main.h" 23 23 24 24 int beiscsi_pci_soft_reset(struct beiscsi_hba *phba) 25 25 { ··· 158 158 struct be_cmd_resp_hdr *ioctl_resp_hdr; 159 159 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; 160 160 161 - if (beiscsi_error(phba)) 161 + if (beiscsi_error(phba)) { 162 + free_mcc_tag(&phba->ctrl, tag); 162 163 return -EIO; 164 + } 163 165 164 166 /* wait for the mccq completion */ 165 167 rc = wait_event_interruptible_timeout( ··· 175 173 BEISCSI_LOG_INIT | BEISCSI_LOG_EH | 176 174 BEISCSI_LOG_CONFIG, 177 175 "BC_%d : MBX Cmd Completion timed out\n"); 178 - rc = -EAGAIN; 176 + rc = -EBUSY; 177 + 178 + /* decrement the mccq used count */ 179 + atomic_dec(&phba->ctrl.mcc_obj.q.used); 180 + 179 181 goto release_mcc_tag; 180 182 } else 181 183 rc = 0; ··· 214 208 215 209 if (status == MCC_STATUS_INSUFFICIENT_BUFFER) { 216 210 ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr; 217 - if (ioctl_resp_hdr->response_length) 218 - goto release_mcc_tag; 211 + beiscsi_log(phba, KERN_WARNING, 212 + BEISCSI_LOG_INIT | BEISCSI_LOG_EH | 213 + BEISCSI_LOG_CONFIG, 214 + "BC_%d : Insufficent Buffer Error " 215 + "Resp_Len : %d Actual_Resp_Len : %d\n", 216 + ioctl_resp_hdr->response_length, 217 + ioctl_resp_hdr->actual_resp_len); 218 + 219 + rc = -EAGAIN; 220 + goto release_mcc_tag; 219 221 } 220 - rc = -EAGAIN; 222 + rc = -EIO; 221 223 } 222 224 223 225 release_mcc_tag: ··· 377 363 } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) || 378 364 ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && 379 365 (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { 380 - phba->state = BE_ADAPTER_UP; 366 + phba->state = BE_ADAPTER_LINK_UP; 381 367 382 368 beiscsi_log(phba, KERN_ERR, 383 369 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, ··· 500 486 **/ 501 487 static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl) 502 488 { 489 + #define BEISCSI_MBX_RDY_BIT_TIMEOUT 4000 /* 4sec */ 503 490 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; 504 491 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); 505 - uint32_t wait = 0; 492 + unsigned long timeout; 493 + bool read_flag = false; 494 + int ret = 0, i; 506 495 u32 ready; 496 + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q); 497 + 498 + if (beiscsi_error(phba)) 499 + return -EIO; 500 + 501 + timeout = jiffies + (HZ * 110); 507 502 508 503 do { 509 - 510 - if (beiscsi_error(phba)) 511 - return -EIO; 512 - 513 - ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; 514 - if (ready) 515 - break; 516 - 517 - if (wait > BEISCSI_HOST_MBX_TIMEOUT) { 518 - beiscsi_log(phba, KERN_ERR, 519 - BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 520 - "BC_%d : FW Timed Out\n"); 521 - phba->fw_timeout = true; 522 - beiscsi_ue_detect(phba); 523 - return -EBUSY; 504 + for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) { 505 + ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; 506 + if (ready) { 507 + read_flag = true; 508 + break; 509 + } 510 + mdelay(1); 524 511 } 525 512 526 - mdelay(1); 527 - wait++; 528 - } while (true); 529 - return 0; 513 + if (!read_flag) { 514 + wait_event_timeout(rdybit_check_q, 515 + (read_flag != true), 516 + HZ * 5); 517 + } 518 + } while ((time_before(jiffies, timeout)) && !read_flag); 519 + 520 + if (!read_flag) { 521 + beiscsi_log(phba, KERN_ERR, 522 + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 523 + "BC_%d : FW Timed Out\n"); 524 + phba->fw_timeout = true; 525 + beiscsi_ue_detect(phba); 526 + ret = -EBUSY; 527 + } 528 + 529 + return ret; 530 530 } 531 531 532 532 /* ··· 727 699 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; 728 700 struct be_mcc_wrb *wrb; 729 701 730 - BUG_ON(atomic_read(&mccq->used) >= mccq->len); 702 + WARN_ON(atomic_read(&mccq->used) >= mccq->len); 731 703 wrb = queue_head_node(mccq); 732 704 memset(wrb, 0, sizeof(*wrb)); 733 705 wrb->tag0 = (mccq->head & 0x000000FF) << 16; ··· 1037 1009 return status; 1038 1010 } 1039 1011 1012 + /** 1013 + * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter 1014 + * @ctrl: ptr to ctrl_info 1015 + * @cq: Completion Queue 1016 + * @dq: Default Queue 1017 + * @lenght: ring size 1018 + * @entry_size: size of each entry in DEFQ 1019 + * @is_header: Header or Data DEFQ 1020 + * @ulp_num: Bind to which ULP 1021 + * 1022 + * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted 1023 + * on this queue by the FW 1024 + * 1025 + * return 1026 + * Success: 0 1027 + * Failure: Non-Zero Value 1028 + * 1029 + **/ 1040 1030 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, 1041 1031 struct be_queue_info *cq, 1042 1032 struct be_queue_info *dq, int length, 1043 - int entry_size) 1033 + int entry_size, uint8_t is_header, 1034 + uint8_t ulp_num) 1044 1035 { 1045 1036 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 1046 1037 struct be_defq_create_req *req = embedded_payload(wrb); ··· 1077 1030 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req)); 1078 1031 1079 1032 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1033 + if (phba->fw_config.dual_ulp_aware) { 1034 + req->ulp_num = ulp_num; 1035 + req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT); 1036 + req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT); 1037 + } 1080 1038 1081 1039 if (is_chip_be2_be3r(phba)) { 1082 1040 AMAP_SET_BITS(struct amap_be_default_pdu_context, ··· 1119 1067 1120 1068 status = be_mbox_notify(ctrl); 1121 1069 if (!status) { 1070 + struct be_ring *defq_ring; 1122 1071 struct be_defq_create_resp *resp = embedded_payload(wrb); 1123 1072 1124 1073 dq->id = le16_to_cpu(resp->id); 1125 1074 dq->created = true; 1075 + if (is_header) 1076 + defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num]; 1077 + else 1078 + defq_ring = &phba->phwi_ctrlr-> 1079 + default_pdu_data[ulp_num]; 1080 + 1081 + defq_ring->id = dq->id; 1082 + 1083 + if (!phba->fw_config.dual_ulp_aware) { 1084 + defq_ring->ulp_num = BEISCSI_ULP0; 1085 + defq_ring->doorbell_offset = DB_RXULP0_OFFSET; 1086 + } else { 1087 + defq_ring->ulp_num = resp->ulp_num; 1088 + defq_ring->doorbell_offset = resp->doorbell_offset; 1089 + } 1126 1090 } 1127 1091 spin_unlock(&ctrl->mbox_lock); 1128 1092 1129 1093 return status; 1130 1094 } 1131 1095 1132 - int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, 1133 - struct be_queue_info *wrbq) 1096 + /** 1097 + * be_cmd_wrbq_create()- Create WRBQ 1098 + * @ctrl: ptr to ctrl_info 1099 + * @q_mem: memory details for the queue 1100 + * @wrbq: queue info 1101 + * @pwrb_context: ptr to wrb_context 1102 + * @ulp_num: ULP on which the WRBQ is to be created 1103 + * 1104 + * Create WRBQ on the passed ULP_NUM. 1105 + * 1106 + **/ 1107 + int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, 1108 + struct be_dma_mem *q_mem, 1109 + struct be_queue_info *wrbq, 1110 + struct hwi_wrb_context *pwrb_context, 1111 + uint8_t ulp_num) 1134 1112 { 1135 1113 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 1136 1114 struct be_wrbq_create_req *req = embedded_payload(wrb); 1137 1115 struct be_wrbq_create_resp *resp = embedded_payload(wrb); 1116 + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); 1138 1117 int status; 1139 1118 1140 1119 spin_lock(&ctrl->mbox_lock); ··· 1176 1093 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 1177 1094 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req)); 1178 1095 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1096 + 1097 + if (phba->fw_config.dual_ulp_aware) { 1098 + req->ulp_num = ulp_num; 1099 + req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT); 1100 + req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT); 1101 + } 1102 + 1179 1103 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1180 1104 1181 1105 status = be_mbox_notify(ctrl); 1182 1106 if (!status) { 1183 1107 wrbq->id = le16_to_cpu(resp->cid); 1184 1108 wrbq->created = true; 1109 + 1110 + pwrb_context->cid = wrbq->id; 1111 + if (!phba->fw_config.dual_ulp_aware) { 1112 + pwrb_context->doorbell_offset = DB_TXULP0_OFFSET; 1113 + pwrb_context->ulp_num = BEISCSI_ULP0; 1114 + } else { 1115 + pwrb_context->ulp_num = resp->ulp_num; 1116 + pwrb_context->doorbell_offset = resp->doorbell_offset; 1117 + } 1185 1118 } 1119 + spin_unlock(&ctrl->mbox_lock); 1120 + return status; 1121 + } 1122 + 1123 + int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl, 1124 + struct be_dma_mem *q_mem) 1125 + { 1126 + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 1127 + struct be_post_template_pages_req *req = embedded_payload(wrb); 1128 + int status; 1129 + 1130 + spin_lock(&ctrl->mbox_lock); 1131 + 1132 + memset(wrb, 0, sizeof(*wrb)); 1133 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 1134 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1135 + OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS, 1136 + sizeof(*req)); 1137 + 1138 + req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1139 + req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; 1140 + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1141 + 1142 + status = be_mbox_notify(ctrl); 1143 + spin_unlock(&ctrl->mbox_lock); 1144 + return status; 1145 + } 1146 + 1147 + int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl) 1148 + { 1149 + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 1150 + struct be_remove_template_pages_req *req = embedded_payload(wrb); 1151 + int status; 1152 + 1153 + spin_lock(&ctrl->mbox_lock); 1154 + 1155 + memset(wrb, 0, sizeof(*wrb)); 1156 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 1157 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1158 + OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS, 1159 + sizeof(*req)); 1160 + 1161 + req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; 1162 + 1163 + status = be_mbox_notify(ctrl); 1186 1164 spin_unlock(&ctrl->mbox_lock); 1187 1165 return status; 1188 1166 }
+60 -5
drivers/scsi/be2iscsi/be_cmds.h
··· 40 40 u32 tag1; /* dword 3 */ 41 41 u32 rsvd; /* dword 4 */ 42 42 union { 43 + #define EMBED_MBX_MAX_PAYLOAD_SIZE 220 43 44 u8 embedded_payload[236]; /* used by embedded cmds */ 44 45 struct be_sge sgl[19]; /* used by non-embedded cmds */ 45 46 } payload; ··· 163 162 #define OPCODE_COMMON_CQ_CREATE 12 164 163 #define OPCODE_COMMON_EQ_CREATE 13 165 164 #define OPCODE_COMMON_MCC_CREATE 21 165 + #define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS 24 166 + #define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS 25 166 167 #define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32 167 168 #define OPCODE_COMMON_GET_FW_VERSION 35 168 169 #define OPCODE_COMMON_MODIFY_EQ_DELAY 41 ··· 220 217 u32 hi; 221 218 }; 222 219 220 + struct virt_addr { 221 + u32 lo; 222 + u32 hi; 223 + }; 223 224 /************************** 224 225 * BE Command definitions * 225 226 **************************/ ··· 729 722 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, 730 723 struct be_queue_info *cq, 731 724 struct be_queue_info *dq, int length, 732 - int entry_size); 725 + int entry_size, uint8_t is_header, 726 + uint8_t ulp_num); 727 + 728 + int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl, 729 + struct be_dma_mem *q_mem); 730 + 731 + int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl); 733 732 734 733 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, 735 734 struct be_dma_mem *q_mem, u32 page_offset, ··· 744 731 int beiscsi_cmd_reset_function(struct beiscsi_hba *phba); 745 732 746 733 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, 747 - struct be_queue_info *wrbq); 734 + struct be_queue_info *wrbq, 735 + struct hwi_wrb_context *pwrb_context, 736 + uint8_t ulp_num); 748 737 749 738 bool is_link_state_evt(u32 trailer); 750 739 ··· 791 776 struct be_cmd_req_hdr hdr; 792 777 u16 num_pages; 793 778 u8 ulp_num; 794 - u8 rsvd0; 779 + #define BEISCSI_DUAL_ULP_AWARE_BIT 0 /* Byte 3 - Bit 0 */ 780 + #define BEISCSI_BIND_Q_TO_ULP_BIT 1 /* Byte 3 - Bit 1 */ 781 + u8 dua_feature; 795 782 struct be_default_pdu_context context; 796 783 struct phys_addr pages[8]; 797 784 } __packed; ··· 801 784 struct be_defq_create_resp { 802 785 struct be_cmd_req_hdr hdr; 803 786 u16 id; 787 + u8 rsvd0; 788 + u8 ulp_num; 789 + u32 doorbell_offset; 790 + u16 register_set; 791 + u16 doorbell_format; 792 + } __packed; 793 + 794 + struct be_post_template_pages_req { 795 + struct be_cmd_req_hdr hdr; 796 + u16 num_pages; 797 + #define BEISCSI_TEMPLATE_HDR_TYPE_ISCSI 0x1 798 + u16 type; 799 + struct phys_addr scratch_pa; 800 + struct virt_addr scratch_va; 801 + struct virt_addr pages_va; 802 + struct phys_addr pages[16]; 803 + } __packed; 804 + 805 + struct be_remove_template_pages_req { 806 + struct be_cmd_req_hdr hdr; 807 + u16 type; 804 808 u16 rsvd0; 805 809 } __packed; 806 810 ··· 838 800 struct be_cmd_req_hdr hdr; 839 801 u16 num_pages; 840 802 u8 ulp_num; 841 - u8 rsvd0; 803 + u8 dua_feature; 842 804 struct phys_addr pages[8]; 843 805 } __packed; 844 806 845 807 struct be_wrbq_create_resp { 846 808 struct be_cmd_resp_hdr resp_hdr; 847 809 u16 cid; 848 - u16 rsvd0; 810 + u8 rsvd0; 811 + u8 ulp_num; 812 + u32 doorbell_offset; 813 + u16 register_set; 814 + u16 doorbell_format; 849 815 } __packed; 850 816 851 817 #define SOL_CID_MASK 0x0000FFC0 ··· 1044 1002 } __packed; 1045 1003 1046 1004 struct be_ulp_fw_cfg { 1005 + #define BEISCSI_ULP_ISCSI_INI_MODE 0x10 1047 1006 u32 ulp_mode; 1048 1007 u32 etx_base; 1049 1008 u32 etx_count; ··· 1060 1017 u32 icd_count; 1061 1018 }; 1062 1019 1020 + struct be_ulp_chain_icd { 1021 + u32 chain_base; 1022 + u32 chain_count; 1023 + }; 1024 + 1063 1025 struct be_fw_cfg { 1064 1026 struct be_cmd_req_hdr hdr; 1065 1027 u32 be_config_number; 1066 1028 u32 asic_revision; 1067 1029 u32 phys_port; 1030 + #define BEISCSI_FUNC_ISCSI_INI_MODE 0x10 1031 + #define BEISCSI_FUNC_DUA_MODE 0x800 1068 1032 u32 function_mode; 1069 1033 struct be_ulp_fw_cfg ulp[2]; 1070 1034 u32 function_caps; 1035 + u32 cqid_base; 1036 + u32 cqid_count; 1037 + u32 eqid_base; 1038 + u32 eqid_count; 1039 + struct be_ulp_chain_icd chain_icd[2]; 1071 1040 } __packed; 1072 1041 1073 1042 struct be_cmd_get_all_if_id_req {
+130 -41
drivers/scsi/be2iscsi/be_iscsi.c
··· 58 58 } 59 59 beiscsi_ep = ep->dd_data; 60 60 phba = beiscsi_ep->phba; 61 - shost = phba->shost; 62 61 63 - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 64 - "BS_%d : In beiscsi_session_create\n"); 62 + if (phba->state & BE_ADAPTER_PCI_ERR) { 63 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 64 + "BS_%d : PCI_ERROR Recovery\n"); 65 + return NULL; 66 + } else { 67 + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 68 + "BS_%d : In beiscsi_session_create\n"); 69 + } 65 70 66 71 if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) { 67 72 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, ··· 79 74 cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn; 80 75 } 81 76 77 + shost = phba->shost; 82 78 cls_session = iscsi_session_setup(&beiscsi_iscsi_transport, 83 79 shost, cmds_max, 84 80 sizeof(*beiscsi_sess), ··· 200 194 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 201 195 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 202 196 struct beiscsi_hba *phba = iscsi_host_priv(shost); 197 + struct hwi_controller *phwi_ctrlr = phba->phwi_ctrlr; 198 + struct hwi_wrb_context *pwrb_context; 203 199 struct beiscsi_endpoint *beiscsi_ep; 204 200 struct iscsi_endpoint *ep; 205 201 ··· 222 214 return -EEXIST; 223 215 } 224 216 217 + pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID( 218 + beiscsi_ep->ep_cid)]; 219 + 225 220 beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid; 226 221 beiscsi_conn->ep = beiscsi_ep; 227 222 beiscsi_ep->conn = beiscsi_conn; 223 + beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset; 228 224 229 225 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 230 226 "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n", ··· 277 265 278 266 void beiscsi_create_def_ifaces(struct beiscsi_hba *phba) 279 267 { 280 - struct be_cmd_get_if_info_resp if_info; 268 + struct be_cmd_get_if_info_resp *if_info; 281 269 282 - if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) 270 + if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) { 283 271 beiscsi_create_ipv4_iface(phba); 272 + kfree(if_info); 273 + } 284 274 285 - if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) 275 + if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) { 286 276 beiscsi_create_ipv6_iface(phba); 277 + kfree(if_info); 278 + } 287 279 } 288 280 289 281 void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba) ··· 483 467 uint32_t rm_len = dt_len; 484 468 int ret = 0 ; 485 469 470 + if (phba->state & BE_ADAPTER_PCI_ERR) { 471 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 472 + "BS_%d : In PCI_ERROR Recovery\n"); 473 + return -EBUSY; 474 + } 475 + 486 476 nla_for_each_attr(attrib, data, dt_len, rm_len) { 487 477 iface_param = nla_data(attrib); 488 478 ··· 534 512 struct iscsi_iface *iface, int param, 535 513 char *buf) 536 514 { 537 - struct be_cmd_get_if_info_resp if_info; 515 + struct be_cmd_get_if_info_resp *if_info; 538 516 int len, ip_type = BE2_IPV4; 539 - 540 - memset(&if_info, 0, sizeof(if_info)); 541 517 542 518 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) 543 519 ip_type = BE2_IPV6; 544 520 545 521 len = mgmt_get_if_info(phba, ip_type, &if_info); 546 - if (len) 522 + if (len) { 523 + kfree(if_info); 547 524 return len; 525 + } 548 526 549 527 switch (param) { 550 528 case ISCSI_NET_PARAM_IPV4_ADDR: 551 - len = sprintf(buf, "%pI4\n", &if_info.ip_addr.addr); 529 + len = sprintf(buf, "%pI4\n", if_info->ip_addr.addr); 552 530 break; 553 531 case ISCSI_NET_PARAM_IPV6_ADDR: 554 - len = sprintf(buf, "%pI6\n", &if_info.ip_addr.addr); 532 + len = sprintf(buf, "%pI6\n", if_info->ip_addr.addr); 555 533 break; 556 534 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 557 - if (!if_info.dhcp_state) 535 + if (!if_info->dhcp_state) 558 536 len = sprintf(buf, "static\n"); 559 537 else 560 538 len = sprintf(buf, "dhcp\n"); 561 539 break; 562 540 case ISCSI_NET_PARAM_IPV4_SUBNET: 563 - len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask); 541 + len = sprintf(buf, "%pI4\n", if_info->ip_addr.subnet_mask); 564 542 break; 565 543 case ISCSI_NET_PARAM_VLAN_ENABLED: 566 544 len = sprintf(buf, "%s\n", 567 - (if_info.vlan_priority == BEISCSI_VLAN_DISABLE) 545 + (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) 568 546 ? "Disabled\n" : "Enabled\n"); 569 547 break; 570 548 case ISCSI_NET_PARAM_VLAN_ID: 571 - if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE) 549 + if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) 572 550 return -EINVAL; 573 551 else 574 552 len = sprintf(buf, "%d\n", 575 - (if_info.vlan_priority & 553 + (if_info->vlan_priority & 576 554 ISCSI_MAX_VLAN_ID)); 577 555 break; 578 556 case ISCSI_NET_PARAM_VLAN_PRIORITY: 579 - if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE) 557 + if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) 580 558 return -EINVAL; 581 559 else 582 560 len = sprintf(buf, "%d\n", 583 - ((if_info.vlan_priority >> 13) & 561 + ((if_info->vlan_priority >> 13) & 584 562 ISCSI_MAX_VLAN_PRIORITY)); 585 563 break; 586 564 default: 587 565 WARN_ON(1); 588 566 } 589 567 568 + kfree(if_info); 590 569 return len; 591 570 } 592 571 ··· 599 576 struct beiscsi_hba *phba = iscsi_host_priv(shost); 600 577 struct be_cmd_get_def_gateway_resp gateway; 601 578 int len = -ENOSYS; 579 + 580 + if (phba->state & BE_ADAPTER_PCI_ERR) { 581 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 582 + "BS_%d : In PCI_ERROR Recovery\n"); 583 + return -EBUSY; 584 + } 602 585 603 586 switch (param) { 604 587 case ISCSI_NET_PARAM_IPV4_ADDR: ··· 701 672 session->max_burst = 262144; 702 673 break; 703 674 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 704 - if ((conn->max_xmit_dlength > 65536) || 705 - (conn->max_xmit_dlength == 0)) 675 + if (conn->max_xmit_dlength > 65536) 706 676 conn->max_xmit_dlength = 65536; 707 677 default: 708 678 return 0; ··· 755 727 struct beiscsi_hba *phba = iscsi_host_priv(shost); 756 728 struct iscsi_cls_host *ihost = shost->shost_data; 757 729 758 - ihost->port_state = (phba->state == BE_ADAPTER_UP) ? 730 + ihost->port_state = (phba->state == BE_ADAPTER_LINK_UP) ? 759 731 ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN; 760 732 } 761 733 ··· 823 795 struct beiscsi_hba *phba = iscsi_host_priv(shost); 824 796 int status = 0; 825 797 826 - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 827 - "BS_%d : In beiscsi_get_host_param," 828 - " param= %d\n", param); 798 + 799 + if (phba->state & BE_ADAPTER_PCI_ERR) { 800 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 801 + "BS_%d : In PCI_ERROR Recovery\n"); 802 + return -EBUSY; 803 + } else { 804 + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 805 + "BS_%d : In beiscsi_get_host_param," 806 + " param = %d\n", param); 807 + } 829 808 830 809 switch (param) { 831 810 case ISCSI_HOST_PARAM_HWADDRESS: ··· 875 840 struct be_cmd_get_nic_conf_resp resp; 876 841 int rc; 877 842 878 - if (strlen(phba->mac_address)) 843 + if (phba->mac_addr_set) 879 844 return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); 880 845 881 846 memset(&resp, 0, sizeof(resp)); ··· 883 848 if (rc) 884 849 return rc; 885 850 851 + phba->mac_addr_set = true; 886 852 memcpy(phba->mac_address, resp.mac_address, ETH_ALEN); 887 853 return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); 888 854 } ··· 959 923 session->max_r2t); 960 924 AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params, 961 925 (conn->exp_statsn - 1)); 926 + AMAP_SET_BITS(struct amap_beiscsi_offload_params, 927 + max_recv_data_segment_length, params, 928 + conn->max_recv_dlength); 929 + 962 930 } 963 931 964 932 /** ··· 975 935 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 976 936 struct beiscsi_endpoint *beiscsi_ep; 977 937 struct beiscsi_offload_params params; 938 + struct beiscsi_hba *phba; 978 939 979 - beiscsi_log(beiscsi_conn->phba, KERN_INFO, 980 - BEISCSI_LOG_CONFIG, 981 - "BS_%d : In beiscsi_conn_start\n"); 940 + phba = ((struct beiscsi_conn *)conn->dd_data)->phba; 941 + 942 + if (phba->state & BE_ADAPTER_PCI_ERR) { 943 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 944 + "BS_%d : In PCI_ERROR Recovery\n"); 945 + return -EBUSY; 946 + } else { 947 + beiscsi_log(beiscsi_conn->phba, KERN_INFO, 948 + BEISCSI_LOG_CONFIG, 949 + "BS_%d : In beiscsi_conn_start\n"); 950 + } 982 951 983 952 memset(&params, 0, sizeof(struct beiscsi_offload_params)); 984 953 beiscsi_ep = beiscsi_conn->ep; ··· 1009 960 */ 1010 961 static int beiscsi_get_cid(struct beiscsi_hba *phba) 1011 962 { 1012 - unsigned short cid = 0xFFFF; 963 + unsigned short cid = 0xFFFF, cid_from_ulp; 964 + struct ulp_cid_info *cid_info = NULL; 965 + uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1; 1013 966 1014 - if (!phba->avlbl_cids) 1015 - return cid; 967 + /* Find the ULP which has more CID available */ 968 + cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ? 969 + BEISCSI_ULP0_AVLBL_CID(phba) : 0; 970 + cid_avlbl_ulp1 = (phba->cid_array_info[BEISCSI_ULP1]) ? 971 + BEISCSI_ULP1_AVLBL_CID(phba) : 0; 972 + cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ? 973 + BEISCSI_ULP0 : BEISCSI_ULP1; 1016 974 1017 - cid = phba->cid_array[phba->cid_alloc++]; 1018 - if (phba->cid_alloc == phba->params.cxns_per_ctrl) 1019 - phba->cid_alloc = 0; 1020 - phba->avlbl_cids--; 975 + if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) { 976 + cid_info = phba->cid_array_info[cid_from_ulp]; 977 + if (!cid_info->avlbl_cids) 978 + return cid; 979 + 980 + cid = cid_info->cid_array[cid_info->cid_alloc++]; 981 + 982 + if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT( 983 + phba, cid_from_ulp)) 984 + cid_info->cid_alloc = 0; 985 + 986 + cid_info->avlbl_cids--; 987 + } 1021 988 return cid; 1022 989 } 1023 990 ··· 1044 979 */ 1045 980 static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid) 1046 981 { 1047 - phba->avlbl_cids++; 1048 - phba->cid_array[phba->cid_free++] = cid; 1049 - if (phba->cid_free == phba->params.cxns_per_ctrl) 1050 - phba->cid_free = 0; 982 + uint16_t cid_post_ulp; 983 + struct hwi_controller *phwi_ctrlr; 984 + struct hwi_wrb_context *pwrb_context; 985 + struct ulp_cid_info *cid_info = NULL; 986 + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); 987 + 988 + phwi_ctrlr = phba->phwi_ctrlr; 989 + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 990 + cid_post_ulp = pwrb_context->ulp_num; 991 + 992 + cid_info = phba->cid_array_info[cid_post_ulp]; 993 + cid_info->avlbl_cids++; 994 + 995 + cid_info->cid_array[cid_info->cid_free++] = cid; 996 + if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp)) 997 + cid_info->cid_free = 0; 1051 998 } 1052 999 1053 1000 /** ··· 1212 1135 return ERR_PTR(ret); 1213 1136 } 1214 1137 1215 - if (phba->state != BE_ADAPTER_UP) { 1138 + if (phba->state & BE_ADAPTER_PCI_ERR) { 1139 + ret = -EBUSY; 1140 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 1141 + "BS_%d : In PCI_ERROR Recovery\n"); 1142 + return ERR_PTR(ret); 1143 + } else if (phba->state & BE_ADAPTER_LINK_DOWN) { 1216 1144 ret = -EBUSY; 1217 1145 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1218 1146 "BS_%d : The Adapter Port state is Down!!!\n"); ··· 1342 1260 tcp_upload_flag = CONNECTION_UPLOAD_ABORT; 1343 1261 } 1344 1262 1263 + if (phba->state & BE_ADAPTER_PCI_ERR) { 1264 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 1265 + "BS_%d : PCI_ERROR Recovery\n"); 1266 + goto free_ep; 1267 + } 1268 + 1345 1269 tag = mgmt_invalidate_connection(phba, beiscsi_ep, 1346 1270 beiscsi_ep->ep_cid, 1347 1271 mgmt_invalidate_flag, ··· 1360 1272 1361 1273 beiscsi_mccq_compl(phba, tag, NULL, NULL); 1362 1274 beiscsi_close_conn(beiscsi_ep, tcp_upload_flag); 1275 + free_ep: 1363 1276 beiscsi_free_ep(beiscsi_ep); 1364 1277 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); 1365 1278 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
+977 -366
drivers/scsi/be2iscsi/be_main.c
··· 149 149 "\t\t\t\tMiscellaneous Events : 0x04\n" 150 150 "\t\t\t\tError Handling : 0x08\n" 151 151 "\t\t\t\tIO Path Events : 0x10\n" 152 - "\t\t\t\tConfiguration Path : 0x20\n"); 152 + "\t\t\t\tConfiguration Path : 0x20\n" 153 + "\t\t\t\tiSCSI Protocol : 0x40\n"); 153 154 154 155 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 155 156 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 156 157 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); 157 - DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL); 158 + DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); 159 + DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, 160 + beiscsi_active_session_disp, NULL); 161 + DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, 162 + beiscsi_free_session_disp, NULL); 158 163 struct device_attribute *beiscsi_attrs[] = { 159 164 &dev_attr_beiscsi_log_enable, 160 165 &dev_attr_beiscsi_drvr_ver, 161 166 &dev_attr_beiscsi_adapter_family, 162 167 &dev_attr_beiscsi_fw_ver, 163 - &dev_attr_beiscsi_active_cid_count, 168 + &dev_attr_beiscsi_active_session_count, 169 + &dev_attr_beiscsi_free_session_count, 170 + &dev_attr_beiscsi_phys_port, 164 171 NULL, 165 172 }; 166 173 ··· 246 239 return SUCCESS; 247 240 } 248 241 spin_unlock_bh(&session->lock); 242 + /* Invalidate WRB Posted for this Task */ 243 + AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 244 + aborted_io_task->pwrb_handle->pwrb, 245 + 1); 246 + 249 247 conn = aborted_task->conn; 250 248 beiscsi_conn = conn->dd_data; 251 249 phba = beiscsi_conn->phba; ··· 327 315 328 316 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) 329 317 continue; 318 + 319 + /* Invalidate WRB Posted for this Task */ 320 + AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 321 + abrt_io_task->pwrb_handle->pwrb, 322 + 1); 330 323 331 324 inv_tbl->cid = cid; 332 325 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; ··· 716 699 return status; 717 700 } 718 701 702 + /** 703 + * beiscsi_get_params()- Set the config paramters 704 + * @phba: ptr device priv structure 705 + **/ 719 706 static void beiscsi_get_params(struct beiscsi_hba *phba) 720 707 { 721 - phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count 722 - - (phba->fw_config.iscsi_cid_count 723 - + BE2_TMFS 724 - + BE2_NOPOUT_REQ)); 725 - phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; 726 - phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count; 727 - phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; 708 + uint32_t total_cid_count = 0; 709 + uint32_t total_icd_count = 0; 710 + uint8_t ulp_num = 0; 711 + 712 + total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + 713 + BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); 714 + 715 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 716 + uint32_t align_mask = 0; 717 + uint32_t icd_post_per_page = 0; 718 + uint32_t icd_count_unavailable = 0; 719 + uint32_t icd_start = 0, icd_count = 0; 720 + uint32_t icd_start_align = 0, icd_count_align = 0; 721 + 722 + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 723 + icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 724 + icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 725 + 726 + /* Get ICD count that can be posted on each page */ 727 + icd_post_per_page = (PAGE_SIZE / (BE2_SGE * 728 + sizeof(struct iscsi_sge))); 729 + align_mask = (icd_post_per_page - 1); 730 + 731 + /* Check if icd_start is aligned ICD per page posting */ 732 + if (icd_start % icd_post_per_page) { 733 + icd_start_align = ((icd_start + 734 + icd_post_per_page) & 735 + ~(align_mask)); 736 + phba->fw_config. 737 + iscsi_icd_start[ulp_num] = 738 + icd_start_align; 739 + } 740 + 741 + icd_count_align = (icd_count & ~align_mask); 742 + 743 + /* ICD discarded in the process of alignment */ 744 + if (icd_start_align) 745 + icd_count_unavailable = ((icd_start_align - 746 + icd_start) + 747 + (icd_count - 748 + icd_count_align)); 749 + 750 + /* Updated ICD count available */ 751 + phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - 752 + icd_count_unavailable); 753 + 754 + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 755 + "BM_%d : Aligned ICD values\n" 756 + "\t ICD Start : %d\n" 757 + "\t ICD Count : %d\n" 758 + "\t ICD Discarded : %d\n", 759 + phba->fw_config. 760 + iscsi_icd_start[ulp_num], 761 + phba->fw_config. 762 + iscsi_icd_count[ulp_num], 763 + icd_count_unavailable); 764 + break; 765 + } 766 + } 767 + 768 + total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 769 + phba->params.ios_per_ctrl = (total_icd_count - 770 + (total_cid_count + 771 + BE2_TMFS + BE2_NOPOUT_REQ)); 772 + phba->params.cxns_per_ctrl = total_cid_count; 773 + phba->params.asyncpdus_per_ctrl = total_cid_count; 774 + phba->params.icds_per_ctrl = total_icd_count; 728 775 phba->params.num_sge_per_io = BE2_SGE; 729 776 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 730 777 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 731 778 phba->params.eq_timer = 64; 732 - phba->params.num_eq_entries = 733 - (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 734 - + BE2_TMFS) / 512) + 1) * 512; 735 - phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024) 736 - ? 1024 : phba->params.num_eq_entries; 737 - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 738 - "BM_%d : phba->params.num_eq_entries=%d\n", 739 - phba->params.num_eq_entries); 740 - phba->params.num_cq_entries = 741 - (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 742 - + BE2_TMFS) / 512) + 1) * 512; 779 + phba->params.num_eq_entries = 1024; 780 + phba->params.num_cq_entries = 1024; 743 781 phba->params.wrbs_per_cxn = 256; 744 782 } 745 783 ··· 1685 1613 1686 1614 WARN_ON(!pasync_handle); 1687 1615 1688 - pasync_handle->cri = 1689 - BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); 1616 + pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID( 1617 + beiscsi_conn->beiscsi_conn_cid); 1690 1618 pasync_handle->is_header = is_header; 1691 1619 pasync_handle->buffer_len = dpl; 1692 1620 *pcq_index = index; ··· 1746 1674 } 1747 1675 1748 1676 static void hwi_free_async_msg(struct beiscsi_hba *phba, 1749 - unsigned int cri) 1677 + struct hwi_async_pdu_context *pasync_ctx, 1678 + unsigned int cri) 1750 1679 { 1751 - struct hwi_controller *phwi_ctrlr; 1752 - struct hwi_async_pdu_context *pasync_ctx; 1753 1680 struct async_pdu_handle *pasync_handle, *tmp_handle; 1754 1681 struct list_head *plist; 1755 1682 1756 - phwi_ctrlr = phba->phwi_ctrlr; 1757 - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1758 - 1759 1683 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1760 - 1761 1684 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { 1762 1685 list_del(&pasync_handle->link); 1763 1686 ··· 1787 1720 } 1788 1721 1789 1722 static void hwi_post_async_buffers(struct beiscsi_hba *phba, 1790 - unsigned int is_header) 1723 + unsigned int is_header, uint8_t ulp_num) 1791 1724 { 1792 1725 struct hwi_controller *phwi_ctrlr; 1793 1726 struct hwi_async_pdu_context *pasync_ctx; ··· 1795 1728 struct list_head *pfree_link, *pbusy_list; 1796 1729 struct phys_addr *pasync_sge; 1797 1730 unsigned int ring_id, num_entries; 1798 - unsigned int host_write_num; 1731 + unsigned int host_write_num, doorbell_offset; 1799 1732 unsigned int writables; 1800 1733 unsigned int i = 0; 1801 1734 u32 doorbell = 0; 1802 1735 1803 1736 phwi_ctrlr = phba->phwi_ctrlr; 1804 - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1737 + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1805 1738 num_entries = pasync_ctx->num_entries; 1806 1739 1807 1740 if (is_header) { ··· 1809 1742 pasync_ctx->async_header.free_entries); 1810 1743 pfree_link = pasync_ctx->async_header.free_list.next; 1811 1744 host_write_num = pasync_ctx->async_header.host_write_ptr; 1812 - ring_id = phwi_ctrlr->default_pdu_hdr.id; 1745 + ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; 1746 + doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. 1747 + doorbell_offset; 1813 1748 } else { 1814 1749 writables = min(pasync_ctx->async_data.writables, 1815 1750 pasync_ctx->async_data.free_entries); 1816 1751 pfree_link = pasync_ctx->async_data.free_list.next; 1817 1752 host_write_num = pasync_ctx->async_data.host_write_ptr; 1818 - ring_id = phwi_ctrlr->default_pdu_data.id; 1753 + ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; 1754 + doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. 1755 + doorbell_offset; 1819 1756 } 1820 1757 1821 1758 writables = (writables / 8) * 8; ··· 1867 1796 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK) 1868 1797 << DB_DEF_PDU_CQPROC_SHIFT; 1869 1798 1870 - iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET); 1799 + iowrite32(doorbell, phba->db_va + doorbell_offset); 1871 1800 } 1872 1801 } 1873 1802 ··· 1879 1808 struct hwi_async_pdu_context *pasync_ctx; 1880 1809 struct async_pdu_handle *pasync_handle = NULL; 1881 1810 unsigned int cq_index = -1; 1811 + uint16_t cri_index = BE_GET_CRI_FROM_CID( 1812 + beiscsi_conn->beiscsi_conn_cid); 1882 1813 1883 1814 phwi_ctrlr = phba->phwi_ctrlr; 1884 - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1815 + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, 1816 + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 1817 + cri_index)); 1885 1818 1886 1819 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 1887 1820 pdpdu_cqe, &cq_index); ··· 1894 1819 hwi_update_async_writables(phba, pasync_ctx, 1895 1820 pasync_handle->is_header, cq_index); 1896 1821 1897 - hwi_free_async_msg(phba, pasync_handle->cri); 1898 - hwi_post_async_buffers(phba, pasync_handle->is_header); 1822 + hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri); 1823 + hwi_post_async_buffers(phba, pasync_handle->is_header, 1824 + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 1825 + cri_index)); 1899 1826 } 1900 1827 1901 1828 static unsigned int ··· 1936 1859 phdr, hdr_len, pfirst_buffer, 1937 1860 offset); 1938 1861 1939 - hwi_free_async_msg(phba, cri); 1862 + hwi_free_async_msg(phba, pasync_ctx, cri); 1940 1863 return 0; 1941 1864 } 1942 1865 ··· 1952 1875 struct pdu_base *ppdu; 1953 1876 1954 1877 phwi_ctrlr = phba->phwi_ctrlr; 1955 - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1878 + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, 1879 + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 1880 + BE_GET_CRI_FROM_CID(beiscsi_conn-> 1881 + beiscsi_conn_cid))); 1956 1882 1957 1883 list_del(&pasync_handle->link); 1958 1884 if (pasync_handle->is_header) { 1959 1885 pasync_ctx->async_header.busy_entries--; 1960 1886 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { 1961 - hwi_free_async_msg(phba, cri); 1887 + hwi_free_async_msg(phba, pasync_ctx, cri); 1962 1888 BUG(); 1963 1889 } 1964 1890 ··· 2016 1936 struct hwi_async_pdu_context *pasync_ctx; 2017 1937 struct async_pdu_handle *pasync_handle = NULL; 2018 1938 unsigned int cq_index = -1; 1939 + uint16_t cri_index = BE_GET_CRI_FROM_CID( 1940 + beiscsi_conn->beiscsi_conn_cid); 2019 1941 2020 1942 phwi_ctrlr = phba->phwi_ctrlr; 2021 - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1943 + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, 1944 + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 1945 + cri_index)); 1946 + 2022 1947 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 2023 1948 pdpdu_cqe, &cq_index); 2024 1949 ··· 2032 1947 pasync_handle->is_header, cq_index); 2033 1948 2034 1949 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle); 2035 - hwi_post_async_buffers(phba, pasync_handle->is_header); 1950 + hwi_post_async_buffers(phba, pasync_handle->is_header, 1951 + BEISCSI_GET_ULP_FROM_CRI( 1952 + phwi_ctrlr, cri_index)); 2036 1953 } 2037 1954 2038 1955 static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) ··· 2159 2072 "BM_%d : Received %s[%d] on CID : %d\n", 2160 2073 cqe_desc[code], code, cid); 2161 2074 2075 + spin_lock_bh(&phba->async_pdu_lock); 2162 2076 hwi_process_default_pdu_ring(beiscsi_conn, phba, 2163 2077 (struct i_t_dpdu_cqe *)sol); 2078 + spin_unlock_bh(&phba->async_pdu_lock); 2164 2079 break; 2165 2080 case UNSOL_DATA_NOTIFY: 2166 2081 beiscsi_log(phba, KERN_INFO, ··· 2170 2081 "BM_%d : Received %s[%d] on CID : %d\n", 2171 2082 cqe_desc[code], code, cid); 2172 2083 2084 + spin_lock_bh(&phba->async_pdu_lock); 2173 2085 hwi_process_default_pdu_ring(beiscsi_conn, phba, 2174 2086 (struct i_t_dpdu_cqe *)sol); 2087 + spin_unlock_bh(&phba->async_pdu_lock); 2175 2088 break; 2176 2089 case CXN_INVALIDATE_INDEX_NOTIFY: 2177 2090 case CMD_INVALIDATED_NOTIFY: ··· 2201 2110 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2202 2111 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 2203 2112 cqe_desc[code], code, cid); 2113 + spin_lock_bh(&phba->async_pdu_lock); 2204 2114 hwi_flush_default_pdu_buffer(phba, beiscsi_conn, 2205 2115 (struct i_t_dpdu_cqe *) sol); 2116 + spin_unlock_bh(&phba->async_pdu_lock); 2206 2117 break; 2207 2118 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 2208 2119 case CXN_KILLED_BURST_LEN_MISMATCH: ··· 2569 2476 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2570 2477 } 2571 2478 2479 + /** 2480 + * beiscsi_find_mem_req()- Find mem needed 2481 + * @phba: ptr to HBA struct 2482 + **/ 2572 2483 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2573 2484 { 2485 + uint8_t mem_descr_index, ulp_num; 2574 2486 unsigned int num_cq_pages, num_async_pdu_buf_pages; 2575 2487 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2576 2488 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2577 2489 2578 2490 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 2579 2491 sizeof(struct sol_cqe)); 2580 - num_async_pdu_buf_pages = 2581 - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2582 - phba->params.defpdu_hdr_sz); 2583 - num_async_pdu_buf_sgl_pages = 2584 - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2585 - sizeof(struct phys_addr)); 2586 - num_async_pdu_data_pages = 2587 - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2588 - phba->params.defpdu_data_sz); 2589 - num_async_pdu_data_sgl_pages = 2590 - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2591 - sizeof(struct phys_addr)); 2592 2492 2593 2493 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2594 2494 ··· 2603 2517 phba->params.icds_per_ctrl; 2604 2518 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2605 2519 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2520 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2521 + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2606 2522 2607 - phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] = 2608 - num_async_pdu_buf_pages * PAGE_SIZE; 2609 - phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] = 2610 - num_async_pdu_data_pages * PAGE_SIZE; 2611 - phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] = 2612 - num_async_pdu_buf_sgl_pages * PAGE_SIZE; 2613 - phba->mem_req[HWI_MEM_ASYNC_DATA_RING] = 2614 - num_async_pdu_data_sgl_pages * PAGE_SIZE; 2615 - phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] = 2616 - phba->params.asyncpdus_per_ctrl * 2617 - sizeof(struct async_pdu_handle); 2618 - phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] = 2619 - phba->params.asyncpdus_per_ctrl * 2620 - sizeof(struct async_pdu_handle); 2621 - phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] = 2622 - sizeof(struct hwi_async_pdu_context) + 2623 - (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry)); 2523 + num_async_pdu_buf_sgl_pages = 2524 + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2525 + phba, ulp_num) * 2526 + sizeof(struct phys_addr)); 2527 + 2528 + num_async_pdu_buf_pages = 2529 + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2530 + phba, ulp_num) * 2531 + phba->params.defpdu_hdr_sz); 2532 + 2533 + num_async_pdu_data_pages = 2534 + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2535 + phba, ulp_num) * 2536 + phba->params.defpdu_data_sz); 2537 + 2538 + num_async_pdu_data_sgl_pages = 2539 + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2540 + phba, ulp_num) * 2541 + sizeof(struct phys_addr)); 2542 + 2543 + mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + 2544 + (ulp_num * MEM_DESCR_OFFSET)); 2545 + phba->mem_req[mem_descr_index] = 2546 + BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2547 + BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; 2548 + 2549 + mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2550 + (ulp_num * MEM_DESCR_OFFSET)); 2551 + phba->mem_req[mem_descr_index] = 2552 + num_async_pdu_buf_pages * 2553 + PAGE_SIZE; 2554 + 2555 + mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2556 + (ulp_num * MEM_DESCR_OFFSET)); 2557 + phba->mem_req[mem_descr_index] = 2558 + num_async_pdu_data_pages * 2559 + PAGE_SIZE; 2560 + 2561 + mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2562 + (ulp_num * MEM_DESCR_OFFSET)); 2563 + phba->mem_req[mem_descr_index] = 2564 + num_async_pdu_buf_sgl_pages * 2565 + PAGE_SIZE; 2566 + 2567 + mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + 2568 + (ulp_num * MEM_DESCR_OFFSET)); 2569 + phba->mem_req[mem_descr_index] = 2570 + num_async_pdu_data_sgl_pages * 2571 + PAGE_SIZE; 2572 + 2573 + mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2574 + (ulp_num * MEM_DESCR_OFFSET)); 2575 + phba->mem_req[mem_descr_index] = 2576 + BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2577 + sizeof(struct async_pdu_handle); 2578 + 2579 + mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2580 + (ulp_num * MEM_DESCR_OFFSET)); 2581 + phba->mem_req[mem_descr_index] = 2582 + BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2583 + sizeof(struct async_pdu_handle); 2584 + 2585 + mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2586 + (ulp_num * MEM_DESCR_OFFSET)); 2587 + phba->mem_req[mem_descr_index] = 2588 + sizeof(struct hwi_async_pdu_context) + 2589 + (BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2590 + sizeof(struct hwi_async_entry)); 2591 + } 2592 + } 2624 2593 } 2625 2594 2626 2595 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) ··· 2717 2576 2718 2577 mem_descr = phba->init_mem; 2719 2578 for (i = 0; i < SE_MEM_MAX; i++) { 2579 + if (!phba->mem_req[i]) { 2580 + mem_descr->mem_array = NULL; 2581 + mem_descr++; 2582 + continue; 2583 + } 2584 + 2720 2585 j = 0; 2721 2586 mem_arr = mem_arr_orig; 2722 2587 alloc_size = phba->mem_req[i]; ··· 2844 2697 /* Allocate memory for WRBQ */ 2845 2698 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2846 2699 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * 2847 - phba->fw_config.iscsi_cid_count, 2700 + phba->params.cxns_per_ctrl, 2848 2701 GFP_KERNEL); 2849 2702 if (!phwi_ctxt->be_wrbq) { 2850 2703 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, ··· 2926 2779 2927 2780 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2928 2781 { 2782 + uint8_t ulp_num; 2929 2783 struct hwi_controller *phwi_ctrlr; 2930 2784 struct hba_parameters *p = &phba->params; 2931 2785 struct hwi_async_pdu_context *pasync_ctx; ··· 2934 2786 unsigned int index, idx, num_per_mem, num_async_data; 2935 2787 struct be_mem_descriptor *mem_descr; 2936 2788 2937 - mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2938 - mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT; 2789 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2790 + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2939 2791 2940 - phwi_ctrlr = phba->phwi_ctrlr; 2941 - phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *) 2792 + mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2793 + mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2794 + (ulp_num * MEM_DESCR_OFFSET)); 2795 + 2796 + phwi_ctrlr = phba->phwi_ctrlr; 2797 + phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = 2798 + (struct hwi_async_pdu_context *) 2799 + mem_descr->mem_array[0].virtual_address; 2800 + 2801 + pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 2802 + memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2803 + 2804 + pasync_ctx->async_entry = 2805 + (struct hwi_async_entry *) 2806 + ((long unsigned int)pasync_ctx + 2807 + sizeof(struct hwi_async_pdu_context)); 2808 + 2809 + pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba, 2810 + ulp_num); 2811 + pasync_ctx->buffer_size = p->defpdu_hdr_sz; 2812 + 2813 + mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2814 + mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2815 + (ulp_num * MEM_DESCR_OFFSET); 2816 + if (mem_descr->mem_array[0].virtual_address) { 2817 + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2818 + "BM_%d : hwi_init_async_pdu_ctx" 2819 + " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", 2820 + ulp_num, 2821 + mem_descr->mem_array[0]. 2822 + virtual_address); 2823 + } else 2824 + beiscsi_log(phba, KERN_WARNING, 2825 + BEISCSI_LOG_INIT, 2826 + "BM_%d : No Virtual address for ULP : %d\n", 2827 + ulp_num); 2828 + 2829 + pasync_ctx->async_header.va_base = 2942 2830 mem_descr->mem_array[0].virtual_address; 2943 - pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; 2944 - memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2945 2831 2946 - pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) * 2947 - phba->fw_config.iscsi_cid_count, 2948 - GFP_KERNEL); 2949 - if (!pasync_ctx->async_entry) { 2950 - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2951 - "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n"); 2952 - return -ENOMEM; 2953 - } 2832 + pasync_ctx->async_header.pa_base.u.a64.address = 2833 + mem_descr->mem_array[0]. 2834 + bus_address.u.a64.address; 2954 2835 2955 - pasync_ctx->num_entries = p->asyncpdus_per_ctrl; 2956 - pasync_ctx->buffer_size = p->defpdu_hdr_sz; 2836 + mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2837 + mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2838 + (ulp_num * MEM_DESCR_OFFSET); 2839 + if (mem_descr->mem_array[0].virtual_address) { 2840 + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2841 + "BM_%d : hwi_init_async_pdu_ctx" 2842 + " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", 2843 + ulp_num, 2844 + mem_descr->mem_array[0]. 2845 + virtual_address); 2846 + } else 2847 + beiscsi_log(phba, KERN_WARNING, 2848 + BEISCSI_LOG_INIT, 2849 + "BM_%d : No Virtual address for ULP : %d\n", 2850 + ulp_num); 2957 2851 2958 - mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2959 - mem_descr += HWI_MEM_ASYNC_HEADER_BUF; 2960 - if (mem_descr->mem_array[0].virtual_address) { 2961 - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2962 - "BM_%d : hwi_init_async_pdu_ctx" 2963 - " HWI_MEM_ASYNC_HEADER_BUF va=%p\n", 2964 - mem_descr->mem_array[0].virtual_address); 2965 - } else 2966 - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2967 - "BM_%d : No Virtual address\n"); 2852 + pasync_ctx->async_header.ring_base = 2853 + mem_descr->mem_array[0].virtual_address; 2968 2854 2969 - pasync_ctx->async_header.va_base = 2970 - mem_descr->mem_array[0].virtual_address; 2855 + mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2856 + mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2857 + (ulp_num * MEM_DESCR_OFFSET); 2858 + if (mem_descr->mem_array[0].virtual_address) { 2859 + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2860 + "BM_%d : hwi_init_async_pdu_ctx" 2861 + " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", 2862 + ulp_num, 2863 + mem_descr->mem_array[0]. 2864 + virtual_address); 2865 + } else 2866 + beiscsi_log(phba, KERN_WARNING, 2867 + BEISCSI_LOG_INIT, 2868 + "BM_%d : No Virtual address for ULP : %d\n", 2869 + ulp_num); 2971 2870 2972 - pasync_ctx->async_header.pa_base.u.a64.address = 2973 - mem_descr->mem_array[0].bus_address.u.a64.address; 2871 + pasync_ctx->async_header.handle_base = 2872 + mem_descr->mem_array[0].virtual_address; 2873 + pasync_ctx->async_header.writables = 0; 2874 + INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); 2974 2875 2975 - mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2976 - mem_descr += HWI_MEM_ASYNC_HEADER_RING; 2977 - if (mem_descr->mem_array[0].virtual_address) { 2978 - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2979 - "BM_%d : hwi_init_async_pdu_ctx" 2980 - " HWI_MEM_ASYNC_HEADER_RING va=%p\n", 2981 - mem_descr->mem_array[0].virtual_address); 2982 - } else 2983 - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2984 - "BM_%d : No Virtual address\n"); 2876 + mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2877 + mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 2878 + (ulp_num * MEM_DESCR_OFFSET); 2879 + if (mem_descr->mem_array[0].virtual_address) { 2880 + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2881 + "BM_%d : hwi_init_async_pdu_ctx" 2882 + " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", 2883 + ulp_num, 2884 + mem_descr->mem_array[0]. 2885 + virtual_address); 2886 + } else 2887 + beiscsi_log(phba, KERN_WARNING, 2888 + BEISCSI_LOG_INIT, 2889 + "BM_%d : No Virtual address for ULP : %d\n", 2890 + ulp_num); 2985 2891 2986 - pasync_ctx->async_header.ring_base = 2987 - mem_descr->mem_array[0].virtual_address; 2892 + pasync_ctx->async_data.ring_base = 2893 + mem_descr->mem_array[0].virtual_address; 2988 2894 2989 - mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2990 - mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE; 2991 - if (mem_descr->mem_array[0].virtual_address) { 2992 - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2993 - "BM_%d : hwi_init_async_pdu_ctx" 2994 - " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n", 2995 - mem_descr->mem_array[0].virtual_address); 2996 - } else 2997 - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2998 - "BM_%d : No Virtual address\n"); 2895 + mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2896 + mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2897 + (ulp_num * MEM_DESCR_OFFSET); 2898 + if (!mem_descr->mem_array[0].virtual_address) 2899 + beiscsi_log(phba, KERN_WARNING, 2900 + BEISCSI_LOG_INIT, 2901 + "BM_%d : No Virtual address for ULP : %d\n", 2902 + ulp_num); 2999 2903 3000 - pasync_ctx->async_header.handle_base = 3001 - mem_descr->mem_array[0].virtual_address; 3002 - pasync_ctx->async_header.writables = 0; 3003 - INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); 2904 + pasync_ctx->async_data.handle_base = 2905 + mem_descr->mem_array[0].virtual_address; 2906 + pasync_ctx->async_data.writables = 0; 2907 + INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); 3004 2908 2909 + pasync_header_h = 2910 + (struct async_pdu_handle *) 2911 + pasync_ctx->async_header.handle_base; 2912 + pasync_data_h = 2913 + (struct async_pdu_handle *) 2914 + pasync_ctx->async_data.handle_base; 3005 2915 3006 - mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3007 - mem_descr += HWI_MEM_ASYNC_DATA_RING; 3008 - if (mem_descr->mem_array[0].virtual_address) { 3009 - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3010 - "BM_%d : hwi_init_async_pdu_ctx" 3011 - " HWI_MEM_ASYNC_DATA_RING va=%p\n", 3012 - mem_descr->mem_array[0].virtual_address); 3013 - } else 3014 - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 3015 - "BM_%d : No Virtual address\n"); 2916 + mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2917 + mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2918 + (ulp_num * MEM_DESCR_OFFSET); 2919 + if (mem_descr->mem_array[0].virtual_address) { 2920 + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2921 + "BM_%d : hwi_init_async_pdu_ctx" 2922 + " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", 2923 + ulp_num, 2924 + mem_descr->mem_array[0]. 2925 + virtual_address); 2926 + } else 2927 + beiscsi_log(phba, KERN_WARNING, 2928 + BEISCSI_LOG_INIT, 2929 + "BM_%d : No Virtual address for ULP : %d\n", 2930 + ulp_num); 3016 2931 3017 - pasync_ctx->async_data.ring_base = 3018 - mem_descr->mem_array[0].virtual_address; 3019 - 3020 - mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3021 - mem_descr += HWI_MEM_ASYNC_DATA_HANDLE; 3022 - if (!mem_descr->mem_array[0].virtual_address) 3023 - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 3024 - "BM_%d : No Virtual address\n"); 3025 - 3026 - pasync_ctx->async_data.handle_base = 3027 - mem_descr->mem_array[0].virtual_address; 3028 - pasync_ctx->async_data.writables = 0; 3029 - INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); 3030 - 3031 - pasync_header_h = 3032 - (struct async_pdu_handle *)pasync_ctx->async_header.handle_base; 3033 - pasync_data_h = 3034 - (struct async_pdu_handle *)pasync_ctx->async_data.handle_base; 3035 - 3036 - mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3037 - mem_descr += HWI_MEM_ASYNC_DATA_BUF; 3038 - if (mem_descr->mem_array[0].virtual_address) { 3039 - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3040 - "BM_%d : hwi_init_async_pdu_ctx" 3041 - " HWI_MEM_ASYNC_DATA_BUF va=%p\n", 3042 - mem_descr->mem_array[0].virtual_address); 3043 - } else 3044 - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 3045 - "BM_%d : No Virtual address\n"); 3046 - 3047 - idx = 0; 3048 - pasync_ctx->async_data.va_base = 3049 - mem_descr->mem_array[idx].virtual_address; 3050 - pasync_ctx->async_data.pa_base.u.a64.address = 3051 - mem_descr->mem_array[idx].bus_address.u.a64.address; 3052 - 3053 - num_async_data = ((mem_descr->mem_array[idx].size) / 3054 - phba->params.defpdu_data_sz); 3055 - num_per_mem = 0; 3056 - 3057 - for (index = 0; index < p->asyncpdus_per_ctrl; index++) { 3058 - pasync_header_h->cri = -1; 3059 - pasync_header_h->index = (char)index; 3060 - INIT_LIST_HEAD(&pasync_header_h->link); 3061 - pasync_header_h->pbuffer = 3062 - (void *)((unsigned long) 3063 - (pasync_ctx->async_header.va_base) + 3064 - (p->defpdu_hdr_sz * index)); 3065 - 3066 - pasync_header_h->pa.u.a64.address = 3067 - pasync_ctx->async_header.pa_base.u.a64.address + 3068 - (p->defpdu_hdr_sz * index); 3069 - 3070 - list_add_tail(&pasync_header_h->link, 3071 - &pasync_ctx->async_header.free_list); 3072 - pasync_header_h++; 3073 - pasync_ctx->async_header.free_entries++; 3074 - pasync_ctx->async_header.writables++; 3075 - 3076 - INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list); 3077 - INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 3078 - header_busy_list); 3079 - pasync_data_h->cri = -1; 3080 - pasync_data_h->index = (char)index; 3081 - INIT_LIST_HEAD(&pasync_data_h->link); 3082 - 3083 - if (!num_async_data) { 3084 - num_per_mem = 0; 3085 - idx++; 2932 + idx = 0; 3086 2933 pasync_ctx->async_data.va_base = 3087 2934 mem_descr->mem_array[idx].virtual_address; 3088 2935 pasync_ctx->async_data.pa_base.u.a64.address = ··· 3086 2943 3087 2944 num_async_data = ((mem_descr->mem_array[idx].size) / 3088 2945 phba->params.defpdu_data_sz); 2946 + num_per_mem = 0; 2947 + 2948 + for (index = 0; index < BEISCSI_GET_CID_COUNT 2949 + (phba, ulp_num); index++) { 2950 + pasync_header_h->cri = -1; 2951 + pasync_header_h->index = (char)index; 2952 + INIT_LIST_HEAD(&pasync_header_h->link); 2953 + pasync_header_h->pbuffer = 2954 + (void *)((unsigned long) 2955 + (pasync_ctx-> 2956 + async_header.va_base) + 2957 + (p->defpdu_hdr_sz * index)); 2958 + 2959 + pasync_header_h->pa.u.a64.address = 2960 + pasync_ctx->async_header.pa_base.u.a64. 2961 + address + (p->defpdu_hdr_sz * index); 2962 + 2963 + list_add_tail(&pasync_header_h->link, 2964 + &pasync_ctx->async_header. 2965 + free_list); 2966 + pasync_header_h++; 2967 + pasync_ctx->async_header.free_entries++; 2968 + pasync_ctx->async_header.writables++; 2969 + 2970 + INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2971 + wait_queue.list); 2972 + INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2973 + header_busy_list); 2974 + pasync_data_h->cri = -1; 2975 + pasync_data_h->index = (char)index; 2976 + INIT_LIST_HEAD(&pasync_data_h->link); 2977 + 2978 + if (!num_async_data) { 2979 + num_per_mem = 0; 2980 + idx++; 2981 + pasync_ctx->async_data.va_base = 2982 + mem_descr->mem_array[idx]. 2983 + virtual_address; 2984 + pasync_ctx->async_data.pa_base.u. 2985 + a64.address = 2986 + mem_descr->mem_array[idx]. 2987 + bus_address.u.a64.address; 2988 + num_async_data = 2989 + ((mem_descr->mem_array[idx]. 2990 + size) / 2991 + phba->params.defpdu_data_sz); 2992 + } 2993 + pasync_data_h->pbuffer = 2994 + (void *)((unsigned long) 2995 + (pasync_ctx->async_data.va_base) + 2996 + (p->defpdu_data_sz * num_per_mem)); 2997 + 2998 + pasync_data_h->pa.u.a64.address = 2999 + pasync_ctx->async_data.pa_base.u.a64. 3000 + address + (p->defpdu_data_sz * 3001 + num_per_mem); 3002 + num_per_mem++; 3003 + num_async_data--; 3004 + 3005 + list_add_tail(&pasync_data_h->link, 3006 + &pasync_ctx->async_data. 3007 + free_list); 3008 + pasync_data_h++; 3009 + pasync_ctx->async_data.free_entries++; 3010 + pasync_ctx->async_data.writables++; 3011 + 3012 + INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 3013 + data_busy_list); 3014 + } 3015 + 3016 + pasync_ctx->async_header.host_write_ptr = 0; 3017 + pasync_ctx->async_header.ep_read_ptr = -1; 3018 + pasync_ctx->async_data.host_write_ptr = 0; 3019 + pasync_ctx->async_data.ep_read_ptr = -1; 3089 3020 } 3090 - pasync_data_h->pbuffer = 3091 - (void *)((unsigned long) 3092 - (pasync_ctx->async_data.va_base) + 3093 - (p->defpdu_data_sz * num_per_mem)); 3094 - 3095 - pasync_data_h->pa.u.a64.address = 3096 - pasync_ctx->async_data.pa_base.u.a64.address + 3097 - (p->defpdu_data_sz * num_per_mem); 3098 - num_per_mem++; 3099 - num_async_data--; 3100 - 3101 - list_add_tail(&pasync_data_h->link, 3102 - &pasync_ctx->async_data.free_list); 3103 - pasync_data_h++; 3104 - pasync_ctx->async_data.free_entries++; 3105 - pasync_ctx->async_data.writables++; 3106 - 3107 - INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list); 3108 3021 } 3109 - 3110 - pasync_ctx->async_header.host_write_ptr = 0; 3111 - pasync_ctx->async_header.ep_read_ptr = -1; 3112 - pasync_ctx->async_data.host_write_ptr = 0; 3113 - pasync_ctx->async_data.ep_read_ptr = -1; 3114 3022 3115 3023 return 0; 3116 3024 } ··· 3358 3164 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 3359 3165 struct hwi_context_memory *phwi_context, 3360 3166 struct hwi_controller *phwi_ctrlr, 3361 - unsigned int def_pdu_ring_sz) 3167 + unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3362 3168 { 3363 3169 unsigned int idx; 3364 3170 int ret; ··· 3368 3174 void *dq_vaddress; 3369 3175 3370 3176 idx = 0; 3371 - dq = &phwi_context->be_def_hdrq; 3177 + dq = &phwi_context->be_def_hdrq[ulp_num]; 3372 3178 cq = &phwi_context->be_cq[0]; 3373 3179 mem = &dq->dma_mem; 3374 3180 mem_descr = phba->init_mem; 3375 - mem_descr += HWI_MEM_ASYNC_HEADER_RING; 3181 + mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 3182 + (ulp_num * MEM_DESCR_OFFSET); 3376 3183 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3377 3184 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 3378 3185 sizeof(struct phys_addr), 3379 3186 sizeof(struct phys_addr), dq_vaddress); 3380 3187 if (ret) { 3381 3188 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3382 - "BM_%d : be_fill_queue Failed for DEF PDU HDR\n"); 3189 + "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", 3190 + ulp_num); 3191 + 3383 3192 return ret; 3384 3193 } 3385 3194 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3386 3195 bus_address.u.a64.address; 3387 3196 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 3388 3197 def_pdu_ring_sz, 3389 - phba->params.defpdu_hdr_sz); 3198 + phba->params.defpdu_hdr_sz, 3199 + BEISCSI_DEFQ_HDR, ulp_num); 3390 3200 if (ret) { 3391 3201 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3392 - "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n"); 3202 + "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", 3203 + ulp_num); 3204 + 3393 3205 return ret; 3394 3206 } 3395 - phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id; 3396 - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3397 - "BM_%d : iscsi def pdu id is %d\n", 3398 - phwi_context->be_def_hdrq.id); 3399 3207 3400 - hwi_post_async_buffers(phba, 1); 3208 + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3209 + "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", 3210 + ulp_num, 3211 + phwi_context->be_def_hdrq[ulp_num].id); 3212 + hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num); 3401 3213 return 0; 3402 3214 } 3403 3215 ··· 3411 3211 beiscsi_create_def_data(struct beiscsi_hba *phba, 3412 3212 struct hwi_context_memory *phwi_context, 3413 3213 struct hwi_controller *phwi_ctrlr, 3414 - unsigned int def_pdu_ring_sz) 3214 + unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3415 3215 { 3416 3216 unsigned int idx; 3417 3217 int ret; ··· 3421 3221 void *dq_vaddress; 3422 3222 3423 3223 idx = 0; 3424 - dataq = &phwi_context->be_def_dataq; 3224 + dataq = &phwi_context->be_def_dataq[ulp_num]; 3425 3225 cq = &phwi_context->be_cq[0]; 3426 3226 mem = &dataq->dma_mem; 3427 3227 mem_descr = phba->init_mem; 3428 - mem_descr += HWI_MEM_ASYNC_DATA_RING; 3228 + mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3229 + (ulp_num * MEM_DESCR_OFFSET); 3429 3230 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3430 3231 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 3431 3232 sizeof(struct phys_addr), 3432 3233 sizeof(struct phys_addr), dq_vaddress); 3433 3234 if (ret) { 3434 3235 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3435 - "BM_%d : be_fill_queue Failed for DEF PDU DATA\n"); 3236 + "BM_%d : be_fill_queue Failed for DEF PDU " 3237 + "DATA on ULP : %d\n", 3238 + ulp_num); 3239 + 3436 3240 return ret; 3437 3241 } 3438 3242 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3439 3243 bus_address.u.a64.address; 3440 3244 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 3441 3245 def_pdu_ring_sz, 3442 - phba->params.defpdu_data_sz); 3246 + phba->params.defpdu_data_sz, 3247 + BEISCSI_DEFQ_DATA, ulp_num); 3443 3248 if (ret) { 3444 3249 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3445 3250 "BM_%d be_cmd_create_default_pdu_queue" 3446 - " Failed for DEF PDU DATA\n"); 3251 + " Failed for DEF PDU DATA on ULP : %d\n", 3252 + ulp_num); 3447 3253 return ret; 3448 3254 } 3449 - phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id; 3450 - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3451 - "BM_%d : iscsi def data id is %d\n", 3452 - phwi_context->be_def_dataq.id); 3453 3255 3454 - hwi_post_async_buffers(phba, 0); 3455 3256 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3456 - "BM_%d : DEFAULT PDU DATA RING CREATED\n"); 3257 + "BM_%d : iscsi def data id on ULP : %d is %d\n", 3258 + ulp_num, 3259 + phwi_context->be_def_dataq[ulp_num].id); 3457 3260 3261 + hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num); 3262 + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3263 + "BM_%d : DEFAULT PDU DATA RING CREATED" 3264 + "on ULP : %d\n", ulp_num); 3265 + 3266 + return 0; 3267 + } 3268 + 3269 + 3270 + static int 3271 + beiscsi_post_template_hdr(struct beiscsi_hba *phba) 3272 + { 3273 + struct be_mem_descriptor *mem_descr; 3274 + struct mem_array *pm_arr; 3275 + struct be_dma_mem sgl; 3276 + int status, ulp_num; 3277 + 3278 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3279 + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3280 + mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3281 + mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + 3282 + (ulp_num * MEM_DESCR_OFFSET); 3283 + pm_arr = mem_descr->mem_array; 3284 + 3285 + hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3286 + status = be_cmd_iscsi_post_template_hdr( 3287 + &phba->ctrl, &sgl); 3288 + 3289 + if (status != 0) { 3290 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3291 + "BM_%d : Post Template HDR Failed for" 3292 + "ULP_%d\n", ulp_num); 3293 + return status; 3294 + } 3295 + 3296 + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3297 + "BM_%d : Template HDR Pages Posted for" 3298 + "ULP_%d\n", ulp_num); 3299 + } 3300 + } 3458 3301 return 0; 3459 3302 } 3460 3303 ··· 3508 3265 struct mem_array *pm_arr; 3509 3266 unsigned int page_offset, i; 3510 3267 struct be_dma_mem sgl; 3511 - int status; 3268 + int status, ulp_num = 0; 3512 3269 3513 3270 mem_descr = phba->init_mem; 3514 3271 mem_descr += HWI_MEM_SGE; 3515 3272 pm_arr = mem_descr->mem_array; 3516 3273 3274 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3275 + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3276 + break; 3277 + 3517 3278 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3518 - phba->fw_config.iscsi_icd_start) / PAGE_SIZE; 3279 + phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; 3519 3280 for (i = 0; i < mem_descr->num_elements; i++) { 3520 3281 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3521 3282 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, ··· 3571 3324 { 3572 3325 unsigned int wrb_mem_index, offset, size, num_wrb_rings; 3573 3326 u64 pa_addr_lo; 3574 - unsigned int idx, num, i; 3327 + unsigned int idx, num, i, ulp_num; 3575 3328 struct mem_array *pwrb_arr; 3576 3329 void *wrb_vaddr; 3577 3330 struct be_dma_mem sgl; 3578 3331 struct be_mem_descriptor *mem_descr; 3579 3332 struct hwi_wrb_context *pwrb_context; 3580 3333 int status; 3334 + uint8_t ulp_count = 0, ulp_base_num = 0; 3335 + uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; 3581 3336 3582 3337 idx = 0; 3583 3338 mem_descr = phba->init_mem; ··· 3623 3374 num_wrb_rings--; 3624 3375 } 3625 3376 } 3377 + 3378 + /* Get the ULP Count */ 3379 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3380 + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3381 + ulp_count++; 3382 + ulp_base_num = ulp_num; 3383 + cid_count_ulp[ulp_num] = 3384 + BEISCSI_GET_CID_COUNT(phba, ulp_num); 3385 + } 3386 + 3626 3387 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3627 3388 wrb_mem_index = 0; 3628 3389 offset = 0; 3629 3390 size = 0; 3630 3391 3392 + if (ulp_count > 1) { 3393 + ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; 3394 + 3395 + if (!cid_count_ulp[ulp_base_num]) 3396 + ulp_base_num = (ulp_base_num + 1) % 3397 + BEISCSI_ULP_COUNT; 3398 + 3399 + cid_count_ulp[ulp_base_num]--; 3400 + } 3401 + 3402 + 3631 3403 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3632 3404 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3633 - &phwi_context->be_wrbq[i]); 3405 + &phwi_context->be_wrbq[i], 3406 + &phwi_ctrlr->wrb_context[i], 3407 + ulp_base_num); 3634 3408 if (status != 0) { 3635 3409 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3636 3410 "BM_%d : wrbq create failed."); ··· 3661 3389 return status; 3662 3390 } 3663 3391 pwrb_context = &phwi_ctrlr->wrb_context[i]; 3664 - pwrb_context->cid = phwi_context->be_wrbq[i].id; 3665 3392 BE_SET_CID_TO_CRI(i, pwrb_context->cid); 3666 3393 } 3667 3394 kfree(pwrb_arr); ··· 3704 3433 struct hwi_controller *phwi_ctrlr; 3705 3434 struct hwi_context_memory *phwi_context; 3706 3435 struct hwi_async_pdu_context *pasync_ctx; 3707 - int i, eq_num; 3436 + int i, eq_num, ulp_num; 3708 3437 3709 3438 phwi_ctrlr = phba->phwi_ctrlr; 3710 3439 phwi_context = phwi_ctrlr->phwi_ctxt; 3440 + 3441 + be_cmd_iscsi_remove_template_hdr(ctrl); 3442 + 3711 3443 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3712 3444 q = &phwi_context->be_wrbq[i]; 3713 3445 if (q->created) ··· 3719 3445 kfree(phwi_context->be_wrbq); 3720 3446 free_wrb_handles(phba); 3721 3447 3722 - q = &phwi_context->be_def_hdrq; 3723 - if (q->created) 3724 - beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3448 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3449 + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3725 3450 3726 - q = &phwi_context->be_def_dataq; 3727 - if (q->created) 3728 - beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3451 + q = &phwi_context->be_def_hdrq[ulp_num]; 3452 + if (q->created) 3453 + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3454 + 3455 + q = &phwi_context->be_def_dataq[ulp_num]; 3456 + if (q->created) 3457 + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3458 + 3459 + pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 3460 + } 3461 + } 3729 3462 3730 3463 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3731 3464 ··· 3751 3470 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3752 3471 } 3753 3472 be_mcc_queues_destroy(phba); 3754 - 3755 - pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; 3756 - kfree(pasync_ctx->async_entry); 3757 3473 be_cmd_fw_uninit(ctrl); 3758 3474 } 3759 3475 ··· 3816 3538 BEISCSI_MAX_NUM_CPUS : num_cpus; 3817 3539 break; 3818 3540 case BE_GEN4: 3819 - phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ? 3820 - OC_SKH_MAX_NUM_CPUS : num_cpus; 3541 + /* 3542 + * If eqid_count == 1 fall back to 3543 + * INTX mechanism 3544 + **/ 3545 + if (phba->fw_config.eqid_count == 1) { 3546 + enable_msix = 0; 3547 + phba->num_cpus = 1; 3548 + return; 3549 + } 3550 + 3551 + phba->num_cpus = 3552 + (num_cpus > (phba->fw_config.eqid_count - 1)) ? 3553 + (phba->fw_config.eqid_count - 1) : num_cpus; 3821 3554 break; 3822 3555 default: 3823 3556 phba->num_cpus = 1; ··· 3841 3552 struct hwi_context_memory *phwi_context; 3842 3553 unsigned int def_pdu_ring_sz; 3843 3554 struct be_ctrl_info *ctrl = &phba->ctrl; 3844 - int status; 3555 + int status, ulp_num; 3845 3556 3846 - def_pdu_ring_sz = 3847 - phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr); 3848 3557 phwi_ctrlr = phba->phwi_ctrlr; 3849 3558 phwi_context = phwi_ctrlr->phwi_ctxt; 3850 3559 phwi_context->max_eqd = 0; ··· 3875 3588 goto error; 3876 3589 } 3877 3590 3878 - status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr, 3879 - def_pdu_ring_sz); 3880 - if (status != 0) { 3881 - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3882 - "BM_%d : Default Header not created\n"); 3883 - goto error; 3884 - } 3591 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3592 + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3885 3593 3886 - status = beiscsi_create_def_data(phba, phwi_context, 3887 - phwi_ctrlr, def_pdu_ring_sz); 3888 - if (status != 0) { 3889 - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3890 - "BM_%d : Default Data not created\n"); 3891 - goto error; 3594 + def_pdu_ring_sz = 3595 + BEISCSI_GET_CID_COUNT(phba, ulp_num) * 3596 + sizeof(struct phys_addr); 3597 + 3598 + status = beiscsi_create_def_hdr(phba, phwi_context, 3599 + phwi_ctrlr, 3600 + def_pdu_ring_sz, 3601 + ulp_num); 3602 + if (status != 0) { 3603 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3604 + "BM_%d : Default Header not created for ULP : %d\n", 3605 + ulp_num); 3606 + goto error; 3607 + } 3608 + 3609 + status = beiscsi_create_def_data(phba, phwi_context, 3610 + phwi_ctrlr, 3611 + def_pdu_ring_sz, 3612 + ulp_num); 3613 + if (status != 0) { 3614 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3615 + "BM_%d : Default Data not created for ULP : %d\n", 3616 + ulp_num); 3617 + goto error; 3618 + } 3619 + } 3892 3620 } 3893 3621 3894 3622 status = beiscsi_post_pages(phba); ··· 3913 3611 goto error; 3914 3612 } 3915 3613 3614 + status = beiscsi_post_template_hdr(phba); 3615 + if (status != 0) { 3616 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3617 + "BM_%d : Template HDR Posting for CXN Failed\n"); 3618 + } 3619 + 3916 3620 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3917 3621 if (status != 0) { 3918 3622 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3919 3623 "BM_%d : WRB Rings not created\n"); 3920 3624 goto error; 3625 + } 3626 + 3627 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3628 + uint16_t async_arr_idx = 0; 3629 + 3630 + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3631 + uint16_t cri = 0; 3632 + struct hwi_async_pdu_context *pasync_ctx; 3633 + 3634 + pasync_ctx = HWI_GET_ASYNC_PDU_CTX( 3635 + phwi_ctrlr, ulp_num); 3636 + for (cri = 0; cri < 3637 + phba->params.cxns_per_ctrl; cri++) { 3638 + if (ulp_num == BEISCSI_GET_ULP_FROM_CRI 3639 + (phwi_ctrlr, cri)) 3640 + pasync_ctx->cid_to_async_cri_map[ 3641 + phwi_ctrlr->wrb_context[cri].cid] = 3642 + async_arr_idx++; 3643 + } 3644 + } 3921 3645 } 3922 3646 3923 3647 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, ··· 4010 3682 (unsigned long)mem_descr->mem_array[j - 1]. 4011 3683 bus_address.u.a64.address); 4012 3684 } 3685 + 4013 3686 kfree(mem_descr->mem_array); 4014 3687 mem_descr++; 4015 3688 } ··· 4050 3721 struct sgl_handle *psgl_handle; 4051 3722 struct iscsi_sge *pfrag; 4052 3723 unsigned int arr_index, i, idx; 3724 + unsigned int ulp_icd_start, ulp_num = 0; 4053 3725 4054 3726 phba->io_sgl_hndl_avbl = 0; 4055 3727 phba->eh_sgl_hndl_avbl = 0; ··· 4117 3787 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 4118 3788 mem_descr_sg->num_elements); 4119 3789 3790 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3791 + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3792 + break; 3793 + 3794 + ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 3795 + 4120 3796 arr_index = 0; 4121 3797 idx = 0; 4122 3798 while (idx < mem_descr_sg->num_elements) { ··· 4141 3805 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 4142 3806 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 4143 3807 pfrag += phba->params.num_sge_per_io; 4144 - psgl_handle->sgl_index = 4145 - phba->fw_config.iscsi_icd_start + arr_index++; 3808 + psgl_handle->sgl_index = ulp_icd_start + arr_index++; 4146 3809 } 4147 3810 idx++; 4148 3811 } ··· 4154 3819 4155 3820 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 4156 3821 { 4157 - int i; 3822 + int ret; 3823 + uint16_t i, ulp_num; 3824 + struct ulp_cid_info *ptr_cid_info = NULL; 4158 3825 4159 - phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, 4160 - GFP_KERNEL); 4161 - if (!phba->cid_array) { 4162 - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4163 - "BM_%d : Failed to allocate memory in " 4164 - "hba_setup_cid_tbls\n"); 4165 - return -ENOMEM; 3826 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3827 + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 3828 + ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), 3829 + GFP_KERNEL); 3830 + 3831 + if (!ptr_cid_info) { 3832 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3833 + "BM_%d : Failed to allocate memory" 3834 + "for ULP_CID_INFO for ULP : %d\n", 3835 + ulp_num); 3836 + ret = -ENOMEM; 3837 + goto free_memory; 3838 + 3839 + } 3840 + 3841 + /* Allocate memory for CID array */ 3842 + ptr_cid_info->cid_array = kzalloc(sizeof(void *) * 3843 + BEISCSI_GET_CID_COUNT(phba, 3844 + ulp_num), GFP_KERNEL); 3845 + if (!ptr_cid_info->cid_array) { 3846 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3847 + "BM_%d : Failed to allocate memory" 3848 + "for CID_ARRAY for ULP : %d\n", 3849 + ulp_num); 3850 + kfree(ptr_cid_info); 3851 + ptr_cid_info = NULL; 3852 + ret = -ENOMEM; 3853 + 3854 + goto free_memory; 3855 + } 3856 + ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( 3857 + phba, ulp_num); 3858 + 3859 + /* Save the cid_info_array ptr */ 3860 + phba->cid_array_info[ulp_num] = ptr_cid_info; 3861 + } 4166 3862 } 4167 3863 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * 4168 3864 phba->params.cxns_per_ctrl, GFP_KERNEL); ··· 4201 3835 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4202 3836 "BM_%d : Failed to allocate memory in " 4203 3837 "hba_setup_cid_tbls\n"); 4204 - kfree(phba->cid_array); 4205 - phba->cid_array = NULL; 4206 - return -ENOMEM; 3838 + ret = -ENOMEM; 3839 + 3840 + goto free_memory; 4207 3841 } 4208 3842 4209 3843 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * ··· 4213 3847 "BM_%d : Failed to allocate memory in" 4214 3848 "hba_setup_cid_tbls\n"); 4215 3849 4216 - kfree(phba->cid_array); 4217 3850 kfree(phba->ep_array); 4218 - phba->cid_array = NULL; 4219 3851 phba->ep_array = NULL; 4220 - return -ENOMEM; 3852 + ret = -ENOMEM; 4221 3853 } 4222 3854 4223 - for (i = 0; i < phba->params.cxns_per_ctrl; i++) 4224 - phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid; 3855 + for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3856 + ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; 4225 3857 4226 - phba->avlbl_cids = phba->params.cxns_per_ctrl; 3858 + ptr_cid_info = phba->cid_array_info[ulp_num]; 3859 + ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = 3860 + phba->phwi_ctrlr->wrb_context[i].cid; 3861 + 3862 + } 3863 + 3864 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3865 + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 3866 + ptr_cid_info = phba->cid_array_info[ulp_num]; 3867 + 3868 + ptr_cid_info->cid_alloc = 0; 3869 + ptr_cid_info->cid_free = 0; 3870 + } 3871 + } 4227 3872 return 0; 3873 + 3874 + free_memory: 3875 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3876 + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 3877 + ptr_cid_info = phba->cid_array_info[ulp_num]; 3878 + 3879 + if (ptr_cid_info) { 3880 + kfree(ptr_cid_info->cid_array); 3881 + kfree(ptr_cid_info); 3882 + phba->cid_array_info[ulp_num] = NULL; 3883 + } 3884 + } 3885 + } 3886 + 3887 + return ret; 4228 3888 } 4229 3889 4230 3890 static void hwi_enable_intr(struct beiscsi_hba *phba) ··· 4505 4113 4506 4114 static void beiscsi_clean_port(struct beiscsi_hba *phba) 4507 4115 { 4508 - int mgmt_status; 4116 + int mgmt_status, ulp_num; 4117 + struct ulp_cid_info *ptr_cid_info = NULL; 4509 4118 4510 - mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0); 4511 - if (mgmt_status) 4512 - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4513 - "BM_%d : mgmt_epfw_cleanup FAILED\n"); 4119 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4120 + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4121 + mgmt_status = mgmt_epfw_cleanup(phba, ulp_num); 4122 + if (mgmt_status) 4123 + beiscsi_log(phba, KERN_WARNING, 4124 + BEISCSI_LOG_INIT, 4125 + "BM_%d : mgmt_epfw_cleanup FAILED" 4126 + " for ULP_%d\n", ulp_num); 4127 + } 4128 + } 4514 4129 4515 4130 hwi_purge_eq(phba); 4516 4131 hwi_cleanup(phba); 4517 4132 kfree(phba->io_sgl_hndl_base); 4518 4133 kfree(phba->eh_sgl_hndl_base); 4519 - kfree(phba->cid_array); 4520 4134 kfree(phba->ep_array); 4521 4135 kfree(phba->conn_table); 4136 + 4137 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4138 + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4139 + ptr_cid_info = phba->cid_array_info[ulp_num]; 4140 + 4141 + if (ptr_cid_info) { 4142 + kfree(ptr_cid_info->cid_array); 4143 + kfree(ptr_cid_info); 4144 + phba->cid_array_info[ulp_num] = NULL; 4145 + } 4146 + } 4147 + } 4148 + 4522 4149 } 4523 4150 4524 4151 /** ··· 4666 4255 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 4667 4256 << DB_DEF_PDU_WRB_INDEX_SHIFT; 4668 4257 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4669 - 4670 - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4258 + iowrite32(doorbell, phba->db_va + 4259 + beiscsi_conn->doorbell_offset); 4671 4260 } 4672 4261 4673 4262 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, ··· 4892 4481 DB_DEF_PDU_WRB_INDEX_MASK) << 4893 4482 DB_DEF_PDU_WRB_INDEX_SHIFT; 4894 4483 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4895 - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4484 + iowrite32(doorbell, phba->db_va + 4485 + beiscsi_conn->doorbell_offset); 4896 4486 return 0; 4897 4487 } 4898 4488 ··· 4948 4536 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4949 4537 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4950 4538 4951 - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4539 + iowrite32(doorbell, phba->db_va + 4540 + beiscsi_conn->doorbell_offset); 4952 4541 return 0; 4953 4542 } 4954 4543 ··· 5051 4638 doorbell |= (io_task->pwrb_handle->wrb_index & 5052 4639 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 5053 4640 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 5054 - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4641 + iowrite32(doorbell, phba->db_va + 4642 + beiscsi_conn->doorbell_offset); 5055 4643 return 0; 5056 4644 } 5057 4645 ··· 5077 4663 struct beiscsi_hba *phba = NULL; 5078 4664 5079 4665 phba = ((struct beiscsi_conn *)conn->dd_data)->phba; 5080 - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO, 5081 - "BM_%d : scsi_dma_map Failed\n"); 4666 + beiscsi_log(phba, KERN_ERR, 4667 + BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, 4668 + "BM_%d : scsi_dma_map Failed " 4669 + "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", 4670 + be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), 4671 + io_task->libiscsi_itt, scsi_bufflen(sc)); 5082 4672 5083 4673 return num_sg; 5084 4674 } ··· 5187 4769 /* 5188 4770 * beiscsi_quiesce()- Cleanup Driver resources 5189 4771 * @phba: Instance Priv structure 4772 + * @unload_state:i Clean or EEH unload state 5190 4773 * 5191 4774 * Free the OS and HW resources held by the driver 5192 4775 **/ 5193 - static void beiscsi_quiesce(struct beiscsi_hba *phba) 4776 + static void beiscsi_quiesce(struct beiscsi_hba *phba, 4777 + uint32_t unload_state) 5194 4778 { 5195 4779 struct hwi_controller *phwi_ctrlr; 5196 4780 struct hwi_context_memory *phwi_context; ··· 5205 4785 if (phba->msix_enabled) { 5206 4786 for (i = 0; i <= phba->num_cpus; i++) { 5207 4787 msix_vec = phba->msix_entries[i].vector; 4788 + synchronize_irq(msix_vec); 5208 4789 free_irq(msix_vec, &phwi_context->be_eq[i]); 5209 4790 kfree(phba->msi_name[i]); 5210 4791 } 5211 4792 } else 5212 - if (phba->pcidev->irq) 4793 + if (phba->pcidev->irq) { 4794 + synchronize_irq(phba->pcidev->irq); 5213 4795 free_irq(phba->pcidev->irq, phba); 4796 + } 5214 4797 pci_disable_msix(phba->pcidev); 5215 - destroy_workqueue(phba->wq); 4798 + 5216 4799 if (blk_iopoll_enabled) 5217 4800 for (i = 0; i < phba->num_cpus; i++) { 5218 4801 pbe_eq = &phwi_context->be_eq[i]; 5219 4802 blk_iopoll_disable(&pbe_eq->iopoll); 5220 4803 } 5221 4804 5222 - beiscsi_clean_port(phba); 5223 - beiscsi_free_mem(phba); 4805 + if (unload_state == BEISCSI_CLEAN_UNLOAD) { 4806 + destroy_workqueue(phba->wq); 4807 + beiscsi_clean_port(phba); 4808 + beiscsi_free_mem(phba); 5224 4809 5225 - beiscsi_unmap_pci_function(phba); 5226 - pci_free_consistent(phba->pcidev, 5227 - phba->ctrl.mbox_mem_alloced.size, 5228 - phba->ctrl.mbox_mem_alloced.va, 5229 - phba->ctrl.mbox_mem_alloced.dma); 4810 + beiscsi_unmap_pci_function(phba); 4811 + pci_free_consistent(phba->pcidev, 4812 + phba->ctrl.mbox_mem_alloced.size, 4813 + phba->ctrl.mbox_mem_alloced.va, 4814 + phba->ctrl.mbox_mem_alloced.dma); 4815 + } else { 4816 + hwi_purge_eq(phba); 4817 + hwi_cleanup(phba); 4818 + } 5230 4819 5231 4820 cancel_delayed_work_sync(&phba->beiscsi_hw_check_task); 5232 4821 } ··· 5252 4823 } 5253 4824 5254 4825 beiscsi_destroy_def_ifaces(phba); 5255 - beiscsi_quiesce(phba); 4826 + beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); 5256 4827 iscsi_boot_destroy_kset(phba->boot_kset); 5257 4828 iscsi_host_remove(phba->shost); 5258 4829 pci_dev_put(phba->pcidev); 5259 4830 iscsi_host_free(phba->shost); 4831 + pci_disable_pcie_error_reporting(pcidev); 4832 + pci_set_drvdata(pcidev, NULL); 5260 4833 pci_disable_device(pcidev); 5261 4834 } 5262 4835 ··· 5273 4842 return; 5274 4843 } 5275 4844 5276 - beiscsi_quiesce(phba); 4845 + beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); 5277 4846 pci_disable_device(pcidev); 5278 4847 } 5279 4848 ··· 5311 4880 msecs_to_jiffies(1000)); 5312 4881 } 5313 4882 4883 + 4884 + static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, 4885 + pci_channel_state_t state) 4886 + { 4887 + struct beiscsi_hba *phba = NULL; 4888 + 4889 + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 4890 + phba->state |= BE_ADAPTER_PCI_ERR; 4891 + 4892 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4893 + "BM_%d : EEH error detected\n"); 4894 + 4895 + beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD); 4896 + 4897 + if (state == pci_channel_io_perm_failure) { 4898 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4899 + "BM_%d : EEH : State PERM Failure"); 4900 + return PCI_ERS_RESULT_DISCONNECT; 4901 + } 4902 + 4903 + pci_disable_device(pdev); 4904 + 4905 + /* The error could cause the FW to trigger a flash debug dump. 4906 + * Resetting the card while flash dump is in progress 4907 + * can cause it not to recover; wait for it to finish. 4908 + * Wait only for first function as it is needed only once per 4909 + * adapter. 4910 + **/ 4911 + if (pdev->devfn == 0) 4912 + ssleep(30); 4913 + 4914 + return PCI_ERS_RESULT_NEED_RESET; 4915 + } 4916 + 4917 + static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) 4918 + { 4919 + struct beiscsi_hba *phba = NULL; 4920 + int status = 0; 4921 + 4922 + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 4923 + 4924 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4925 + "BM_%d : EEH Reset\n"); 4926 + 4927 + status = pci_enable_device(pdev); 4928 + if (status) 4929 + return PCI_ERS_RESULT_DISCONNECT; 4930 + 4931 + pci_set_master(pdev); 4932 + pci_set_power_state(pdev, PCI_D0); 4933 + pci_restore_state(pdev); 4934 + 4935 + /* Wait for the CHIP Reset to complete */ 4936 + status = be_chk_reset_complete(phba); 4937 + if (!status) { 4938 + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4939 + "BM_%d : EEH Reset Completed\n"); 4940 + } else { 4941 + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4942 + "BM_%d : EEH Reset Completion Failure\n"); 4943 + return PCI_ERS_RESULT_DISCONNECT; 4944 + } 4945 + 4946 + pci_cleanup_aer_uncorrect_error_status(pdev); 4947 + return PCI_ERS_RESULT_RECOVERED; 4948 + } 4949 + 4950 + static void beiscsi_eeh_resume(struct pci_dev *pdev) 4951 + { 4952 + int ret = 0, i; 4953 + struct be_eq_obj *pbe_eq; 4954 + struct beiscsi_hba *phba = NULL; 4955 + struct hwi_controller *phwi_ctrlr; 4956 + struct hwi_context_memory *phwi_context; 4957 + 4958 + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 4959 + pci_save_state(pdev); 4960 + 4961 + if (enable_msix) 4962 + find_num_cpus(phba); 4963 + else 4964 + phba->num_cpus = 1; 4965 + 4966 + if (enable_msix) { 4967 + beiscsi_msix_enable(phba); 4968 + if (!phba->msix_enabled) 4969 + phba->num_cpus = 1; 4970 + } 4971 + 4972 + ret = beiscsi_cmd_reset_function(phba); 4973 + if (ret) { 4974 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4975 + "BM_%d : Reset Failed\n"); 4976 + goto ret_err; 4977 + } 4978 + 4979 + ret = be_chk_reset_complete(phba); 4980 + if (ret) { 4981 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4982 + "BM_%d : Failed to get out of reset.\n"); 4983 + goto ret_err; 4984 + } 4985 + 4986 + beiscsi_get_params(phba); 4987 + phba->shost->max_id = phba->params.cxns_per_ctrl; 4988 + phba->shost->can_queue = phba->params.ios_per_ctrl; 4989 + ret = hwi_init_controller(phba); 4990 + 4991 + for (i = 0; i < MAX_MCC_CMD; i++) { 4992 + init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 4993 + phba->ctrl.mcc_tag[i] = i + 1; 4994 + phba->ctrl.mcc_numtag[i + 1] = 0; 4995 + phba->ctrl.mcc_tag_available++; 4996 + } 4997 + 4998 + phwi_ctrlr = phba->phwi_ctrlr; 4999 + phwi_context = phwi_ctrlr->phwi_ctxt; 5000 + 5001 + if (blk_iopoll_enabled) { 5002 + for (i = 0; i < phba->num_cpus; i++) { 5003 + pbe_eq = &phwi_context->be_eq[i]; 5004 + blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, 5005 + be_iopoll); 5006 + blk_iopoll_enable(&pbe_eq->iopoll); 5007 + } 5008 + 5009 + i = (phba->msix_enabled) ? i : 0; 5010 + /* Work item for MCC handling */ 5011 + pbe_eq = &phwi_context->be_eq[i]; 5012 + INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); 5013 + } else { 5014 + if (phba->msix_enabled) { 5015 + for (i = 0; i <= phba->num_cpus; i++) { 5016 + pbe_eq = &phwi_context->be_eq[i]; 5017 + INIT_WORK(&pbe_eq->work_cqs, 5018 + beiscsi_process_all_cqs); 5019 + } 5020 + } else { 5021 + pbe_eq = &phwi_context->be_eq[0]; 5022 + INIT_WORK(&pbe_eq->work_cqs, 5023 + beiscsi_process_all_cqs); 5024 + } 5025 + } 5026 + 5027 + ret = beiscsi_init_irqs(phba); 5028 + if (ret < 0) { 5029 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5030 + "BM_%d : beiscsi_eeh_resume - " 5031 + "Failed to beiscsi_init_irqs\n"); 5032 + goto ret_err; 5033 + } 5034 + 5035 + hwi_enable_intr(phba); 5036 + phba->state &= ~BE_ADAPTER_PCI_ERR; 5037 + 5038 + return; 5039 + ret_err: 5040 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5041 + "BM_%d : AER EEH Resume Failed\n"); 5042 + } 5043 + 5314 5044 static int beiscsi_dev_probe(struct pci_dev *pcidev, 5315 5045 const struct pci_device_id *id) 5316 5046 { ··· 5479 4887 struct hwi_controller *phwi_ctrlr; 5480 4888 struct hwi_context_memory *phwi_context; 5481 4889 struct be_eq_obj *pbe_eq; 5482 - int ret, i; 4890 + int ret = 0, i; 5483 4891 5484 4892 ret = beiscsi_enable_pci(pcidev); 5485 4893 if (ret < 0) { ··· 5495 4903 goto disable_pci; 5496 4904 } 5497 4905 4906 + /* Enable EEH reporting */ 4907 + ret = pci_enable_pcie_error_reporting(pcidev); 4908 + if (ret) 4909 + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4910 + "BM_%d : PCIe Error Reporting " 4911 + "Enabling Failed\n"); 4912 + 4913 + pci_save_state(pcidev); 4914 + 5498 4915 /* Initialize Driver configuration Paramters */ 5499 4916 beiscsi_hba_attrs_init(phba); 5500 4917 5501 4918 phba->fw_timeout = false; 4919 + phba->mac_addr_set = false; 5502 4920 5503 4921 5504 4922 switch (pcidev->device) { ··· 5531 4929 phba->generation = 0; 5532 4930 } 5533 4931 4932 + ret = be_ctrl_init(phba, pcidev); 4933 + if (ret) { 4934 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4935 + "BM_%d : beiscsi_dev_probe-" 4936 + "Failed in be_ctrl_init\n"); 4937 + goto hba_free; 4938 + } 4939 + 4940 + ret = beiscsi_cmd_reset_function(phba); 4941 + if (ret) { 4942 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4943 + "BM_%d : Reset Failed\n"); 4944 + goto hba_free; 4945 + } 4946 + ret = be_chk_reset_complete(phba); 4947 + if (ret) { 4948 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4949 + "BM_%d : Failed to get out of reset.\n"); 4950 + goto hba_free; 4951 + } 4952 + 4953 + spin_lock_init(&phba->io_sgl_lock); 4954 + spin_lock_init(&phba->mgmt_sgl_lock); 4955 + spin_lock_init(&phba->isr_lock); 4956 + spin_lock_init(&phba->async_pdu_lock); 4957 + ret = mgmt_get_fw_config(&phba->ctrl, phba); 4958 + if (ret != 0) { 4959 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4960 + "BM_%d : Error getting fw config\n"); 4961 + goto free_port; 4962 + } 4963 + 5534 4964 if (enable_msix) 5535 4965 find_num_cpus(phba); 5536 4966 else ··· 5577 4943 if (!phba->msix_enabled) 5578 4944 phba->num_cpus = 1; 5579 4945 } 5580 - ret = be_ctrl_init(phba, pcidev); 5581 - if (ret) { 5582 - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5583 - "BM_%d : beiscsi_dev_probe-" 5584 - "Failed in be_ctrl_init\n"); 5585 - goto hba_free; 5586 - } 5587 4946 5588 - ret = beiscsi_cmd_reset_function(phba); 5589 - if (ret) { 5590 - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5591 - "BM_%d : Reset Failed. Aborting Crashdump\n"); 5592 - goto hba_free; 5593 - } 5594 - ret = be_chk_reset_complete(phba); 5595 - if (ret) { 5596 - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5597 - "BM_%d : Failed to get out of reset." 5598 - "Aborting Crashdump\n"); 5599 - goto hba_free; 5600 - } 5601 - 5602 - spin_lock_init(&phba->io_sgl_lock); 5603 - spin_lock_init(&phba->mgmt_sgl_lock); 5604 - spin_lock_init(&phba->isr_lock); 5605 - ret = mgmt_get_fw_config(&phba->ctrl, phba); 5606 - if (ret != 0) { 5607 - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5608 - "BM_%d : Error getting fw config\n"); 5609 - goto free_port; 5610 - } 5611 - phba->shost->max_id = phba->fw_config.iscsi_cid_count; 4947 + phba->shost->max_id = phba->params.cxns_per_ctrl; 5612 4948 beiscsi_get_params(phba); 5613 4949 phba->shost->can_queue = phba->params.ios_per_ctrl; 5614 4950 ret = beiscsi_init_port(phba); ··· 5589 4985 goto free_port; 5590 4986 } 5591 4987 5592 - for (i = 0; i < MAX_MCC_CMD ; i++) { 4988 + for (i = 0; i < MAX_MCC_CMD; i++) { 5593 4989 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5594 4990 phba->ctrl.mcc_tag[i] = i + 1; 5595 4991 phba->ctrl.mcc_numtag[i + 1] = 0; ··· 5693 5089 return ret; 5694 5090 } 5695 5091 5092 + static struct pci_error_handlers beiscsi_eeh_handlers = { 5093 + .error_detected = beiscsi_eeh_err_detected, 5094 + .slot_reset = beiscsi_eeh_reset, 5095 + .resume = beiscsi_eeh_resume, 5096 + }; 5097 + 5696 5098 struct iscsi_transport beiscsi_iscsi_transport = { 5697 5099 .owner = THIS_MODULE, 5698 5100 .name = DRV_NAME, ··· 5737 5127 .probe = beiscsi_dev_probe, 5738 5128 .remove = beiscsi_remove, 5739 5129 .shutdown = beiscsi_shutdown, 5740 - .id_table = beiscsi_pci_id_table 5130 + .id_table = beiscsi_pci_id_table, 5131 + .err_handler = &beiscsi_eeh_handlers 5741 5132 }; 5742 5133 5743 5134
+118 -64
drivers/scsi/be2iscsi/be_main.h
··· 26 26 #include <linux/in.h> 27 27 #include <linux/ctype.h> 28 28 #include <linux/module.h> 29 + #include <linux/aer.h> 29 30 #include <scsi/scsi.h> 30 31 #include <scsi/scsi_cmnd.h> 31 32 #include <scsi/scsi_device.h> ··· 35 34 #include <scsi/libiscsi.h> 36 35 #include <scsi/scsi_transport_iscsi.h> 37 36 38 - #include "be.h" 39 37 #define DRV_NAME "be2iscsi" 40 - #define BUILD_STR "10.0.467.0" 38 + #define BUILD_STR "10.0.659.0" 41 39 #define BE_NAME "Emulex OneConnect" \ 42 40 "Open-iSCSI Driver version" BUILD_STR 43 41 #define DRV_DESC BE_NAME " " "Driver" ··· 66 66 67 67 #define MAX_CPUS 64 68 68 #define BEISCSI_MAX_NUM_CPUS 7 69 - #define OC_SKH_MAX_NUM_CPUS 31 70 69 71 70 #define BEISCSI_VER_STRLEN 32 72 71 ··· 73 74 74 75 #define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ 75 76 #define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */ 77 + #define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */ 76 78 77 79 #define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ 78 80 #define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */ ··· 97 97 98 98 #define INVALID_SESS_HANDLE 0xFFFFFFFF 99 99 100 - #define BE_ADAPTER_UP 0x00000000 101 - #define BE_ADAPTER_LINK_DOWN 0x00000001 100 + #define BE_ADAPTER_LINK_UP 0x001 101 + #define BE_ADAPTER_LINK_DOWN 0x002 102 + #define BE_ADAPTER_PCI_ERR 0x004 103 + 104 + #define BEISCSI_CLEAN_UNLOAD 0x01 105 + #define BEISCSI_EEH_UNLOAD 0x02 102 106 /** 103 107 * hardware needs the async PDU buffers to be posted in multiples of 8 104 108 * So have atleast 8 of them by default 105 109 */ 106 110 107 - #define HWI_GET_ASYNC_PDU_CTX(phwi) (phwi->phwi_ctxt->pasync_ctx) 111 + #define HWI_GET_ASYNC_PDU_CTX(phwi, ulp_num) \ 112 + (phwi->phwi_ctxt->pasync_ctx[ulp_num]) 108 113 109 114 /********* Memory BAR register ************/ 110 115 #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc ··· 154 149 #define DB_CQ_REARM_SHIFT (29) /* bit 29 */ 155 150 156 151 #define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr) 157 - #define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\ 158 - (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data.id) 159 - #define HWI_GET_DEF_HDRQ_ID(pc) (((struct hwi_controller *)\ 160 - (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr.id) 152 + #define HWI_GET_DEF_BUFQ_ID(pc, ulp_num) (((struct hwi_controller *)\ 153 + (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data[ulp_num].id) 154 + #define HWI_GET_DEF_HDRQ_ID(pc, ulp_num) (((struct hwi_controller *)\ 155 + (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr[ulp_num].id) 161 156 162 157 #define PAGES_REQUIRED(x) \ 163 158 ((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE)) 164 159 165 160 #define BEISCSI_MSI_NAME 20 /* size of msi_name string */ 166 161 162 + #define MEM_DESCR_OFFSET 8 163 + #define BEISCSI_DEFQ_HDR 1 164 + #define BEISCSI_DEFQ_DATA 0 167 165 enum be_mem_enum { 168 166 HWI_MEM_ADDN_CONTEXT, 169 167 HWI_MEM_WRB, 170 168 HWI_MEM_WRBH, 171 169 HWI_MEM_SGLH, 172 170 HWI_MEM_SGE, 173 - HWI_MEM_ASYNC_HEADER_BUF, /* 5 */ 174 - HWI_MEM_ASYNC_DATA_BUF, 175 - HWI_MEM_ASYNC_HEADER_RING, 176 - HWI_MEM_ASYNC_DATA_RING, 177 - HWI_MEM_ASYNC_HEADER_HANDLE, 178 - HWI_MEM_ASYNC_DATA_HANDLE, /* 10 */ 179 - HWI_MEM_ASYNC_PDU_CONTEXT, 171 + HWI_MEM_TEMPLATE_HDR_ULP0, 172 + HWI_MEM_ASYNC_HEADER_BUF_ULP0, /* 6 */ 173 + HWI_MEM_ASYNC_DATA_BUF_ULP0, 174 + HWI_MEM_ASYNC_HEADER_RING_ULP0, 175 + HWI_MEM_ASYNC_DATA_RING_ULP0, 176 + HWI_MEM_ASYNC_HEADER_HANDLE_ULP0, 177 + HWI_MEM_ASYNC_DATA_HANDLE_ULP0, /* 11 */ 178 + HWI_MEM_ASYNC_PDU_CONTEXT_ULP0, 179 + HWI_MEM_TEMPLATE_HDR_ULP1, 180 + HWI_MEM_ASYNC_HEADER_BUF_ULP1, /* 14 */ 181 + HWI_MEM_ASYNC_DATA_BUF_ULP1, 182 + HWI_MEM_ASYNC_HEADER_RING_ULP1, 183 + HWI_MEM_ASYNC_DATA_RING_ULP1, 184 + HWI_MEM_ASYNC_HEADER_HANDLE_ULP1, 185 + HWI_MEM_ASYNC_DATA_HANDLE_ULP1, /* 19 */ 186 + HWI_MEM_ASYNC_PDU_CONTEXT_ULP1, 180 187 ISCSI_MEM_GLOBAL_HEADER, 181 188 SE_MEM_MAX 182 189 }; ··· 283 266 unsigned short cid; 284 267 } __packed; 285 268 269 + #define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \ 270 + (phwi_ctrlr->wrb_context[cri].ulp_num) 271 + struct hwi_wrb_context { 272 + struct list_head wrb_handle_list; 273 + struct list_head wrb_handle_drvr_list; 274 + struct wrb_handle **pwrb_handle_base; 275 + struct wrb_handle **pwrb_handle_basestd; 276 + struct iscsi_wrb *plast_wrb; 277 + unsigned short alloc_index; 278 + unsigned short free_index; 279 + unsigned short wrb_handles_available; 280 + unsigned short cid; 281 + uint8_t ulp_num; /* ULP to which CID binded */ 282 + uint16_t register_set; 283 + uint16_t doorbell_format; 284 + uint32_t doorbell_offset; 285 + }; 286 + 287 + struct ulp_cid_info { 288 + unsigned short *cid_array; 289 + unsigned short avlbl_cids; 290 + unsigned short cid_alloc; 291 + unsigned short cid_free; 292 + }; 293 + 294 + #include "be.h" 286 295 #define chip_be2(phba) (phba->generation == BE_GEN2) 287 296 #define chip_be3_r(phba) (phba->generation == BE_GEN3) 288 297 #define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba))) 298 + 299 + #define BEISCSI_ULP0 0 300 + #define BEISCSI_ULP1 1 301 + #define BEISCSI_ULP_COUNT 2 302 + #define BEISCSI_ULP0_LOADED 0x01 303 + #define BEISCSI_ULP1_LOADED 0x02 304 + 305 + #define BEISCSI_ULP_AVLBL_CID(phba, ulp_num) \ 306 + (((struct ulp_cid_info *)phba->cid_array_info[ulp_num])->avlbl_cids) 307 + #define BEISCSI_ULP0_AVLBL_CID(phba) \ 308 + BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP0) 309 + #define BEISCSI_ULP1_AVLBL_CID(phba) \ 310 + BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP1) 311 + 289 312 struct beiscsi_hba { 290 313 struct hba_parameters params; 291 314 struct hwi_controller *phwi_ctrlr; ··· 360 303 spinlock_t io_sgl_lock; 361 304 spinlock_t mgmt_sgl_lock; 362 305 spinlock_t isr_lock; 306 + spinlock_t async_pdu_lock; 363 307 unsigned int age; 364 - unsigned short avlbl_cids; 365 - unsigned short cid_alloc; 366 - unsigned short cid_free; 367 308 struct list_head hba_queue; 368 309 #define BE_MAX_SESSION 2048 369 310 #define BE_SET_CID_TO_CRI(cri_index, cid) \ 370 311 (phba->cid_to_cri_map[cid] = cri_index) 371 312 #define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid]) 372 313 unsigned short cid_to_cri_map[BE_MAX_SESSION]; 373 - unsigned short *cid_array; 314 + struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT]; 374 315 struct iscsi_endpoint **ep_array; 375 316 struct beiscsi_conn **conn_table; 376 317 struct iscsi_boot_kset *boot_kset; ··· 380 325 * group together since they are used most frequently 381 326 * for cid to cri conversion 382 327 */ 383 - unsigned int iscsi_cid_start; 384 328 unsigned int phys_port; 329 + unsigned int eqid_count; 330 + unsigned int cqid_count; 331 + unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT]; 332 + #define BEISCSI_GET_CID_COUNT(phba, ulp_num) \ 333 + (phba->fw_config.iscsi_cid_count[ulp_num]) 334 + unsigned int iscsi_cid_count[BEISCSI_ULP_COUNT]; 335 + unsigned int iscsi_icd_count[BEISCSI_ULP_COUNT]; 336 + unsigned int iscsi_icd_start[BEISCSI_ULP_COUNT]; 337 + unsigned int iscsi_chain_start[BEISCSI_ULP_COUNT]; 338 + unsigned int iscsi_chain_count[BEISCSI_ULP_COUNT]; 385 339 386 - unsigned int isr_offset; 387 - unsigned int iscsi_icd_start; 388 - unsigned int iscsi_cid_count; 389 - unsigned int iscsi_icd_count; 390 - unsigned int pci_function; 391 - 392 - unsigned short cid_alloc; 393 - unsigned short cid_free; 394 - unsigned short avlbl_cids; 395 340 unsigned short iscsi_features; 396 - spinlock_t cid_lock; 341 + uint16_t dual_ulp_aware; 342 + unsigned long ulp_supported; 397 343 } fw_config; 398 344 399 345 unsigned int state; ··· 402 346 bool ue_detected; 403 347 struct delayed_work beiscsi_hw_check_task; 404 348 349 + bool mac_addr_set; 405 350 u8 mac_address[ETH_ALEN]; 406 351 char fw_ver_str[BEISCSI_VER_STRLEN]; 407 352 char wq_name[20]; ··· 431 374 struct iscsi_conn *conn; 432 375 struct beiscsi_hba *phba; 433 376 u32 exp_statsn; 377 + u32 doorbell_offset; 434 378 u32 beiscsi_conn_cid; 435 379 struct beiscsi_endpoint *ep; 436 380 unsigned short login_in_progress; ··· 532 474 }; 533 475 534 476 struct beiscsi_offload_params { 535 - u32 dw[5]; 477 + u32 dw[6]; 536 478 }; 537 479 538 480 #define OFFLD_PARAMS_ERL 0x00000003 ··· 562 504 u8 max_r2t[16]; 563 505 u8 pad[8]; 564 506 u8 exp_statsn[32]; 507 + u8 max_recv_data_segment_length[32]; 565 508 }; 566 509 567 510 /* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, ··· 626 567 627 568 unsigned int buffer_size; 628 569 unsigned int num_entries; 629 - 570 + #define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid]) 571 + unsigned short cid_to_async_cri_map[BE_MAX_SESSION]; 630 572 /** 631 573 * This is a varying size list! Do not add anything 632 574 * after this entry!! ··· 945 885 u8 first_burst_length[24]; /* DWORD 3 */ 946 886 u8 rsvd3[8]; /* DOWRD 3 */ 947 887 u8 max_r2t[16]; /* DWORD 4 */ 948 - u8 rsvd4[10]; /* DWORD 4 */ 888 + u8 rsvd4; /* DWORD 4 */ 949 889 u8 hde; /* DWORD 4 */ 950 890 u8 dde; /* DWORD 4 */ 951 891 u8 erl[2]; /* DWORD 4 */ 892 + u8 rsvd5[6]; /* DWORD 4 */ 952 893 u8 imd; /* DWORD 4 */ 953 894 u8 ir2t; /* DWORD 4 */ 895 + u8 rsvd6[3]; /* DWORD 4 */ 954 896 u8 stat_sn[32]; /* DWORD 5 */ 955 - u8 rsvd5[32]; /* DWORD 6 */ 956 - u8 rsvd6[32]; /* DWORD 7 */ 897 + u8 rsvd7[32]; /* DWORD 6 */ 898 + u8 rsvd8[32]; /* DWORD 7 */ 957 899 u8 max_recv_dataseg_len[24]; /* DWORD 8 */ 958 - u8 rsvd7[8]; /* DWORD 8 */ 959 - u8 rsvd8[32]; /* DWORD 9 */ 960 - u8 rsvd9[32]; /* DWORD 10 */ 900 + u8 rsvd9[8]; /* DWORD 8 */ 901 + u8 rsvd10[32]; /* DWORD 9 */ 902 + u8 rsvd11[32]; /* DWORD 10 */ 961 903 u8 max_cxns[16]; /* DWORD 11 */ 962 - u8 rsvd10[11]; /* DWORD 11*/ 904 + u8 rsvd12[11]; /* DWORD 11*/ 963 905 u8 invld; /* DWORD 11 */ 964 - u8 rsvd11;/* DWORD 11*/ 906 + u8 rsvd13;/* DWORD 11*/ 965 907 u8 dmsg; /* DWORD 11 */ 966 908 u8 data_seq_inorder; /* DWORD 11 */ 967 909 u8 pdu_seq_inorder; /* DWORD 11 */ 968 - u8 rsvd12[32]; /*DWORD 12 */ 969 - u8 rsvd13[32]; /* DWORD 13 */ 970 - u8 rsvd14[32]; /* DWORD 14 */ 971 - u8 rsvd15[32]; /* DWORD 15 */ 910 + u8 rsvd14[32]; /*DWORD 12 */ 911 + u8 rsvd15[32]; /* DWORD 13 */ 912 + u8 rsvd16[32]; /* DWORD 14 */ 913 + u8 rsvd17[32]; /* DWORD 15 */ 972 914 } __packed; 973 915 974 916 ··· 981 919 u32 cidx; /* consumer index */ 982 920 u32 pidx; /* producer index -- not used by most rings */ 983 921 u32 item_size; /* size in bytes of one object */ 922 + u8 ulp_num; /* ULP to which CID binded */ 923 + u16 register_set; 924 + u16 doorbell_format; 925 + u32 doorbell_offset; 984 926 985 927 void *va; /* The virtual address of the ring. This 986 928 * should be last to allow 32 & 64 bit debugger 987 929 * extensions to work. 988 930 */ 989 - }; 990 - 991 - struct hwi_wrb_context { 992 - struct list_head wrb_handle_list; 993 - struct list_head wrb_handle_drvr_list; 994 - struct wrb_handle **pwrb_handle_base; 995 - struct wrb_handle **pwrb_handle_basestd; 996 - struct iscsi_wrb *plast_wrb; 997 - unsigned short alloc_index; 998 - unsigned short free_index; 999 - unsigned short wrb_handles_available; 1000 - unsigned short cid; 1001 931 }; 1002 932 1003 933 struct hwi_controller { ··· 1000 946 1001 947 struct hwi_wrb_context *wrb_context; 1002 948 struct mcc_wrb *pmcc_wrb_base; 1003 - struct be_ring default_pdu_hdr; 1004 - struct be_ring default_pdu_data; 949 + struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT]; 950 + struct be_ring default_pdu_data[BEISCSI_ULP_COUNT]; 1005 951 struct hwi_context_memory *phwi_ctxt; 1006 952 }; 1007 953 ··· 1032 978 struct be_eq_obj be_eq[MAX_CPUS]; 1033 979 struct be_queue_info be_cq[MAX_CPUS - 1]; 1034 980 1035 - struct be_queue_info be_def_hdrq; 1036 - struct be_queue_info be_def_dataq; 1037 - 1038 981 struct be_queue_info *be_wrbq; 1039 - struct hwi_async_pdu_context *pasync_ctx; 982 + struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT]; 983 + struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT]; 984 + struct hwi_async_pdu_context *pasync_ctx[BEISCSI_ULP_COUNT]; 1040 985 }; 1041 986 1042 987 /* Logging related definitions */ ··· 1045 992 #define BEISCSI_LOG_EH 0x0008 /* Error Handler */ 1046 993 #define BEISCSI_LOG_IO 0x0010 /* IO Code Path */ 1047 994 #define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */ 995 + #define BEISCSI_LOG_ISCSI 0x0040 /* SCSI/iSCSI Protocol related Logs */ 1048 996 1049 997 #define beiscsi_log(phba, level, mask, fmt, arg...) \ 1050 998 do { \
+237 -55
drivers/scsi/be2iscsi/be_mgmt.c
··· 278 278 return tag; 279 279 } 280 280 281 + /** 282 + * mgmt_get_fw_config()- Get the FW config for the function 283 + * @ctrl: ptr to Ctrl Info 284 + * @phba: ptr to the dev priv structure 285 + * 286 + * Get the FW config and resources available for the function. 287 + * The resources are created based on the count received here. 288 + * 289 + * return 290 + * Success: 0 291 + * Failure: Non-Zero Value 292 + **/ 281 293 int mgmt_get_fw_config(struct be_ctrl_info *ctrl, 282 294 struct beiscsi_hba *phba) 283 295 { ··· 303 291 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 304 292 305 293 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 306 - OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); 294 + OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, 295 + EMBED_MBX_MAX_PAYLOAD_SIZE); 307 296 status = be_mbox_notify(ctrl); 308 297 if (!status) { 298 + uint8_t ulp_num = 0; 309 299 struct be_fw_cfg *pfw_cfg; 310 300 pfw_cfg = req; 311 - phba->fw_config.phys_port = pfw_cfg->phys_port; 312 - phba->fw_config.iscsi_icd_start = 313 - pfw_cfg->ulp[0].icd_base; 314 - phba->fw_config.iscsi_icd_count = 315 - pfw_cfg->ulp[0].icd_count; 316 - phba->fw_config.iscsi_cid_start = 317 - pfw_cfg->ulp[0].sq_base; 318 - phba->fw_config.iscsi_cid_count = 319 - pfw_cfg->ulp[0].sq_count; 320 - if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) { 321 - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 322 - "BG_%d : FW reported MAX CXNS as %d\t" 323 - "Max Supported = %d.\n", 324 - phba->fw_config.iscsi_cid_count, 325 - BE2_MAX_SESSIONS); 326 - phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2; 301 + 302 + if (!is_chip_be2_be3r(phba)) { 303 + phba->fw_config.eqid_count = pfw_cfg->eqid_count; 304 + phba->fw_config.cqid_count = pfw_cfg->cqid_count; 305 + 306 + beiscsi_log(phba, KERN_INFO, 307 + BEISCSI_LOG_INIT, 308 + "BG_%d : EQ_Count : %d CQ_Count : %d\n", 309 + phba->fw_config.eqid_count, 310 + phba->fw_config.cqid_count); 327 311 } 312 + 313 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 314 + if (pfw_cfg->ulp[ulp_num].ulp_mode & 315 + BEISCSI_ULP_ISCSI_INI_MODE) 316 + set_bit(ulp_num, 317 + &phba->fw_config.ulp_supported); 318 + 319 + phba->fw_config.phys_port = pfw_cfg->phys_port; 320 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 321 + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 322 + 323 + phba->fw_config.iscsi_cid_start[ulp_num] = 324 + pfw_cfg->ulp[ulp_num].sq_base; 325 + phba->fw_config.iscsi_cid_count[ulp_num] = 326 + pfw_cfg->ulp[ulp_num].sq_count; 327 + 328 + phba->fw_config.iscsi_icd_start[ulp_num] = 329 + pfw_cfg->ulp[ulp_num].icd_base; 330 + phba->fw_config.iscsi_icd_count[ulp_num] = 331 + pfw_cfg->ulp[ulp_num].icd_count; 332 + 333 + phba->fw_config.iscsi_chain_start[ulp_num] = 334 + pfw_cfg->chain_icd[ulp_num].chain_base; 335 + phba->fw_config.iscsi_chain_count[ulp_num] = 336 + pfw_cfg->chain_icd[ulp_num].chain_count; 337 + 338 + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 339 + "BG_%d : Function loaded on ULP : %d\n" 340 + "\tiscsi_cid_count : %d\n" 341 + "\tiscsi_cid_start : %d\n" 342 + "\t iscsi_icd_count : %d\n" 343 + "\t iscsi_icd_start : %d\n", 344 + ulp_num, 345 + phba->fw_config. 346 + iscsi_cid_count[ulp_num], 347 + phba->fw_config. 348 + iscsi_cid_start[ulp_num], 349 + phba->fw_config. 350 + iscsi_icd_count[ulp_num], 351 + phba->fw_config. 352 + iscsi_icd_start[ulp_num]); 353 + } 354 + } 355 + 356 + phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode & 357 + BEISCSI_FUNC_DUA_MODE); 358 + 359 + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 360 + "BG_%d : DUA Mode : 0x%x\n", 361 + phba->fw_config.dual_ulp_aware); 362 + 328 363 } else { 329 - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 364 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 330 365 "BG_%d : Failed in mgmt_get_fw_config\n"); 366 + status = -EINVAL; 331 367 } 332 368 333 369 spin_unlock(&ctrl->mbox_lock); ··· 508 448 return tag; 509 449 } 510 450 511 - int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) 451 + /** 452 + * mgmt_epfw_cleanup()- Inform FW to cleanup data structures. 453 + * @phba: pointer to dev priv structure 454 + * @ulp_num: ULP number. 455 + * 456 + * return 457 + * Success: 0 458 + * Failure: Non-Zero Value 459 + **/ 460 + int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num) 512 461 { 513 462 struct be_ctrl_info *ctrl = &phba->ctrl; 514 463 struct be_mcc_wrb *wrb = wrb_from_mccq(phba); ··· 531 462 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 532 463 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); 533 464 534 - req->chute = chute; 535 - req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba)); 536 - req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba)); 465 + req->chute = (1 << ulp_num); 466 + req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num)); 467 + req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num)); 537 468 538 469 status = be_mcc_notify_wait(phba); 539 470 if (status) ··· 654 585 return tag; 655 586 } 656 587 588 + /** 589 + * mgmt_open_connection()- Establish a TCP CXN 590 + * @dst_addr: Destination Address 591 + * @beiscsi_ep: ptr to device endpoint struct 592 + * @nonemb_cmd: ptr to memory allocated for command 593 + * 594 + * return 595 + * Success: Tag number of the MBX Command issued 596 + * Failure: Error code 597 + **/ 657 598 int mgmt_open_connection(struct beiscsi_hba *phba, 658 599 struct sockaddr *dst_addr, 659 600 struct beiscsi_endpoint *beiscsi_ep, ··· 681 602 struct phys_addr template_address = { 0, 0 }; 682 603 struct phys_addr *ptemplate_address; 683 604 unsigned int tag = 0; 684 - unsigned int i; 605 + unsigned int i, ulp_num; 685 606 unsigned short cid = beiscsi_ep->ep_cid; 686 607 struct be_sge *sge; 687 608 688 609 phwi_ctrlr = phba->phwi_ctrlr; 689 610 phwi_context = phwi_ctrlr->phwi_ctxt; 690 - def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba); 691 - def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba); 611 + 612 + ulp_num = phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(cid)].ulp_num; 613 + 614 + def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba, ulp_num); 615 + def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba, ulp_num); 692 616 693 617 ptemplate_address = &template_address; 694 618 ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address); ··· 830 748 831 749 rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va); 832 750 if (rc) { 751 + /* Check if the IOCTL needs to be re-issued */ 752 + if (rc == -EAGAIN) 753 + return rc; 754 + 833 755 beiscsi_log(phba, KERN_ERR, 834 756 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 835 757 "BG_%d : mgmt_exec_nonemb_cmd Failed status\n"); 836 758 837 - rc = -EIO; 838 759 goto free_cmd; 839 760 } 840 761 ··· 946 861 uint32_t boot_proto) 947 862 { 948 863 struct be_cmd_get_def_gateway_resp gtway_addr_set; 949 - struct be_cmd_get_if_info_resp if_info; 864 + struct be_cmd_get_if_info_resp *if_info; 950 865 struct be_cmd_set_dhcp_req *dhcpreq; 951 866 struct be_cmd_rel_dhcp_req *reldhcp; 952 867 struct be_dma_mem nonemb_cmd; ··· 957 872 if (mgmt_get_all_if_id(phba)) 958 873 return -EIO; 959 874 960 - memset(&if_info, 0, sizeof(if_info)); 961 875 ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ? 962 876 BE2_IPV6 : BE2_IPV4 ; 963 877 964 878 rc = mgmt_get_if_info(phba, ip_type, &if_info); 965 - if (rc) 879 + if (rc) { 880 + kfree(if_info); 966 881 return rc; 882 + } 967 883 968 884 if (boot_proto == ISCSI_BOOTPROTO_DHCP) { 969 - if (if_info.dhcp_state) { 885 + if (if_info->dhcp_state) { 970 886 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 971 887 "BG_%d : DHCP Already Enabled\n"); 972 888 return 0; ··· 980 894 IP_V6_LEN : IP_V4_LEN; 981 895 982 896 } else { 983 - if (if_info.dhcp_state) { 897 + if (if_info->dhcp_state) { 984 898 985 - memset(&if_info, 0, sizeof(if_info)); 899 + memset(if_info, 0, sizeof(*if_info)); 986 900 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 987 901 OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR, 988 902 sizeof(*reldhcp)); ··· 1005 919 } 1006 920 1007 921 /* Delete the Static IP Set */ 1008 - if (if_info.ip_addr.addr[0]) { 1009 - rc = mgmt_static_ip_modify(phba, &if_info, ip_param, NULL, 922 + if (if_info->ip_addr.addr[0]) { 923 + rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL, 1010 924 IP_ACTION_DEL); 1011 925 if (rc) 1012 926 return rc; ··· 1052 966 1053 967 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); 1054 968 } else { 1055 - return mgmt_static_ip_modify(phba, &if_info, ip_param, 969 + return mgmt_static_ip_modify(phba, if_info, ip_param, 1056 970 subnet_param, IP_ACTION_ADD); 1057 971 } 1058 972 ··· 1117 1031 } 1118 1032 1119 1033 int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type, 1120 - struct be_cmd_get_if_info_resp *if_info) 1034 + struct be_cmd_get_if_info_resp **if_info) 1121 1035 { 1122 1036 struct be_cmd_get_if_info_req *req; 1123 1037 struct be_dma_mem nonemb_cmd; 1038 + uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp); 1124 1039 int rc; 1125 1040 1126 1041 if (mgmt_get_all_if_id(phba)) 1127 1042 return -EIO; 1128 1043 1129 - rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 1130 - OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO, 1131 - sizeof(*if_info)); 1132 - if (rc) 1133 - return rc; 1044 + do { 1045 + rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 1046 + OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO, 1047 + ioctl_size); 1048 + if (rc) 1049 + return rc; 1134 1050 1135 - req = nonemb_cmd.va; 1136 - req->interface_hndl = phba->interface_handle; 1137 - req->ip_type = ip_type; 1051 + req = nonemb_cmd.va; 1052 + req->interface_hndl = phba->interface_handle; 1053 + req->ip_type = ip_type; 1138 1054 1139 - return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, if_info, 1140 - sizeof(*if_info)); 1055 + /* Allocate memory for if_info */ 1056 + *if_info = kzalloc(ioctl_size, GFP_KERNEL); 1057 + if (!*if_info) { 1058 + beiscsi_log(phba, KERN_ERR, 1059 + BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 1060 + "BG_%d : Memory Allocation Failure\n"); 1061 + 1062 + /* Free the DMA memory for the IOCTL issuing */ 1063 + pci_free_consistent(phba->ctrl.pdev, 1064 + nonemb_cmd.size, 1065 + nonemb_cmd.va, 1066 + nonemb_cmd.dma); 1067 + return -ENOMEM; 1068 + } 1069 + 1070 + rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, *if_info, 1071 + ioctl_size); 1072 + 1073 + /* Check if the error is because of Insufficent_Buffer */ 1074 + if (rc == -EAGAIN) { 1075 + 1076 + /* Get the new memory size */ 1077 + ioctl_size = ((struct be_cmd_resp_hdr *) 1078 + nonemb_cmd.va)->actual_resp_len; 1079 + ioctl_size += sizeof(struct be_cmd_req_hdr); 1080 + 1081 + /* Free the previous allocated DMA memory */ 1082 + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1083 + nonemb_cmd.va, 1084 + nonemb_cmd.dma); 1085 + 1086 + /* Free the virtual memory */ 1087 + kfree(*if_info); 1088 + } else 1089 + break; 1090 + } while (true); 1091 + return rc; 1141 1092 } 1142 1093 1143 1094 int mgmt_get_nic_conf(struct beiscsi_hba *phba, ··· 1404 1281 } 1405 1282 1406 1283 /** 1407 - * beiscsi_active_cid_disp()- Display Sessions Active 1284 + * beiscsi_active_session_disp()- Display Sessions Active 1408 1285 * @dev: ptr to device not used. 1409 1286 * @attr: device attribute, not used. 1410 1287 * @buf: contains formatted text Session Count ··· 1413 1290 * size of the formatted string 1414 1291 **/ 1415 1292 ssize_t 1416 - beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr, 1293 + beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr, 1417 1294 char *buf) 1418 1295 { 1419 1296 struct Scsi_Host *shost = class_to_shost(dev); 1420 1297 struct beiscsi_hba *phba = iscsi_host_priv(shost); 1298 + uint16_t avlbl_cids = 0, ulp_num, len = 0, total_cids = 0; 1421 1299 1422 - return snprintf(buf, PAGE_SIZE, "%d\n", 1423 - (phba->params.cxns_per_ctrl - phba->avlbl_cids)); 1300 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 1301 + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 1302 + avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num); 1303 + total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num); 1304 + len += snprintf(buf+len, PAGE_SIZE - len, 1305 + "ULP%d : %d\n", ulp_num, 1306 + (total_cids - avlbl_cids)); 1307 + } else 1308 + len += snprintf(buf+len, PAGE_SIZE - len, 1309 + "ULP%d : %d\n", ulp_num, 0); 1310 + } 1311 + 1312 + return len; 1313 + } 1314 + 1315 + /** 1316 + * beiscsi_free_session_disp()- Display Avaliable Session 1317 + * @dev: ptr to device not used. 1318 + * @attr: device attribute, not used. 1319 + * @buf: contains formatted text Session Count 1320 + * 1321 + * return 1322 + * size of the formatted string 1323 + **/ 1324 + ssize_t 1325 + beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr, 1326 + char *buf) 1327 + { 1328 + struct Scsi_Host *shost = class_to_shost(dev); 1329 + struct beiscsi_hba *phba = iscsi_host_priv(shost); 1330 + uint16_t ulp_num, len = 0; 1331 + 1332 + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 1333 + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) 1334 + len += snprintf(buf+len, PAGE_SIZE - len, 1335 + "ULP%d : %d\n", ulp_num, 1336 + BEISCSI_ULP_AVLBL_CID(phba, ulp_num)); 1337 + else 1338 + len += snprintf(buf+len, PAGE_SIZE - len, 1339 + "ULP%d : %d\n", ulp_num, 0); 1340 + } 1341 + 1342 + return len; 1424 1343 } 1425 1344 1426 1345 /** ··· 1503 1338 } 1504 1339 } 1505 1340 1341 + /** 1342 + * beiscsi_phys_port()- Display Physical Port Identifier 1343 + * @dev: ptr to device not used. 1344 + * @attr: device attribute, not used. 1345 + * @buf: contains formatted text port identifier 1346 + * 1347 + * return 1348 + * size of the formatted string 1349 + **/ 1350 + ssize_t 1351 + beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr, 1352 + char *buf) 1353 + { 1354 + struct Scsi_Host *shost = class_to_shost(dev); 1355 + struct beiscsi_hba *phba = iscsi_host_priv(shost); 1356 + 1357 + return snprintf(buf, PAGE_SIZE, "Port Identifier : %d\n", 1358 + phba->fw_config.phys_port); 1359 + } 1506 1360 1507 1361 void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, 1508 1362 struct wrb_handle *pwrb_handle, ··· 1595 1411 1596 1412 memset(pwrb, 0, sizeof(*pwrb)); 1597 1413 1598 - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 1599 - max_burst_length, pwrb, params->dw[offsetof 1600 - (struct amap_beiscsi_offload_params, 1601 - max_burst_length) / 32]); 1602 1414 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, 1603 1415 max_burst_length, pwrb, params->dw[offsetof 1604 1416 (struct amap_beiscsi_offload_params, ··· 1616 1436 params->dw[offsetof(struct amap_beiscsi_offload_params, 1617 1437 first_burst_length) / 32]); 1618 1438 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, 1619 - max_recv_dataseg_len, pwrb, BEISCSI_MAX_RECV_DATASEG_LEN); 1439 + max_recv_dataseg_len, pwrb, 1440 + params->dw[offsetof(struct amap_beiscsi_offload_params, 1441 + max_recv_data_segment_length) / 32]); 1620 1442 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, 1621 1443 max_cxns, pwrb, BEISCSI_MAX_CXNS); 1622 1444 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb,
+10 -3
drivers/scsi/be2iscsi/be_mgmt.h
··· 294 294 struct be_cmd_get_nic_conf_resp *mac); 295 295 296 296 int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type, 297 - struct be_cmd_get_if_info_resp *if_info); 297 + struct be_cmd_get_if_info_resp **if_info); 298 298 299 299 int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type, 300 300 struct be_cmd_get_def_gateway_resp *gateway); ··· 315 315 ssize_t beiscsi_fw_ver_disp(struct device *dev, 316 316 struct device_attribute *attr, char *buf); 317 317 318 - ssize_t beiscsi_active_cid_disp(struct device *dev, 319 - struct device_attribute *attr, char *buf); 318 + ssize_t beiscsi_active_session_disp(struct device *dev, 319 + struct device_attribute *attr, char *buf); 320 320 321 321 ssize_t beiscsi_adap_family_disp(struct device *dev, 322 322 struct device_attribute *attr, char *buf); 323 + 324 + 325 + ssize_t beiscsi_free_session_disp(struct device *dev, 326 + struct device_attribute *attr, char *buf); 327 + 328 + ssize_t beiscsi_phys_port_disp(struct device *dev, 329 + struct device_attribute *attr, char *buf); 323 330 324 331 void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, 325 332 struct wrb_handle *pwrb_handle,
+1 -1
drivers/scsi/bnx2fc/bnx2fc.h
··· 64 64 #include "bnx2fc_constants.h" 65 65 66 66 #define BNX2FC_NAME "bnx2fc" 67 - #define BNX2FC_VERSION "1.0.14" 67 + #define BNX2FC_VERSION "2.4.1" 68 68 69 69 #define PFX "bnx2fc: " 70 70
+49 -20
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 22 22 23 23 #define DRV_MODULE_NAME "bnx2fc" 24 24 #define DRV_MODULE_VERSION BNX2FC_VERSION 25 - #define DRV_MODULE_RELDATE "Mar 08, 2013" 25 + #define DRV_MODULE_RELDATE "Sep 17, 2013" 26 26 27 27 28 28 static char version[] = ··· 542 542 vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); 543 543 if (vn_port) { 544 544 port = lport_priv(vn_port); 545 - if (compare_ether_addr(port->data_src_addr, dest_mac) 546 - != 0) { 545 + if (!ether_addr_equal(port->data_src_addr, dest_mac)) { 547 546 BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); 548 547 put_cpu(); 549 548 kfree_skb(skb); ··· 1380 1381 return NULL; 1381 1382 } 1382 1383 ctlr = fcoe_ctlr_device_priv(ctlr_dev); 1384 + ctlr->cdev = ctlr_dev; 1383 1385 interface = fcoe_ctlr_priv(ctlr); 1384 1386 dev_hold(netdev); 1385 1387 kref_init(&interface->kref); ··· 2004 2004 set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); 2005 2005 } 2006 2006 2007 + /* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */ 2008 + static int __bnx2fc_disable(struct fcoe_ctlr *ctlr) 2009 + { 2010 + struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); 2011 + 2012 + if (interface->enabled == true) { 2013 + if (!ctlr->lp) { 2014 + pr_err(PFX "__bnx2fc_disable: lport not found\n"); 2015 + return -ENODEV; 2016 + } else { 2017 + interface->enabled = false; 2018 + fcoe_ctlr_link_down(ctlr); 2019 + fcoe_clean_pending_queue(ctlr->lp); 2020 + } 2021 + } 2022 + return 0; 2023 + } 2024 + 2007 2025 /** 2008 2026 * Deperecated: Use bnx2fc_enabled() 2009 2027 */ ··· 2036 2018 2037 2019 interface = bnx2fc_interface_lookup(netdev); 2038 2020 ctlr = bnx2fc_to_ctlr(interface); 2039 - if (!interface || !ctlr->lp) { 2040 - rc = -ENODEV; 2041 - printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n"); 2042 - } else { 2043 - interface->enabled = false; 2044 - fcoe_ctlr_link_down(ctlr); 2045 - fcoe_clean_pending_queue(ctlr->lp); 2046 - } 2047 2021 2022 + if (!interface) { 2023 + rc = -ENODEV; 2024 + pr_err(PFX "bnx2fc_disable: interface not found\n"); 2025 + } else { 2026 + rc = __bnx2fc_disable(ctlr); 2027 + } 2048 2028 mutex_unlock(&bnx2fc_dev_lock); 2049 2029 rtnl_unlock(); 2050 2030 return rc; 2031 + } 2032 + 2033 + static int __bnx2fc_enable(struct fcoe_ctlr *ctlr) 2034 + { 2035 + struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); 2036 + 2037 + if (interface->enabled == false) { 2038 + if (!ctlr->lp) { 2039 + pr_err(PFX "__bnx2fc_enable: lport not found\n"); 2040 + return -ENODEV; 2041 + } else if (!bnx2fc_link_ok(ctlr->lp)) { 2042 + fcoe_ctlr_link_up(ctlr); 2043 + interface->enabled = true; 2044 + } 2045 + } 2046 + return 0; 2051 2047 } 2052 2048 2053 2049 /** ··· 2078 2046 2079 2047 interface = bnx2fc_interface_lookup(netdev); 2080 2048 ctlr = bnx2fc_to_ctlr(interface); 2081 - if (!interface || !ctlr->lp) { 2049 + if (!interface) { 2082 2050 rc = -ENODEV; 2083 - printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n"); 2084 - } else if (!bnx2fc_link_ok(ctlr->lp)) { 2085 - fcoe_ctlr_link_up(ctlr); 2086 - interface->enabled = true; 2051 + pr_err(PFX "bnx2fc_enable: interface not found\n"); 2052 + } else { 2053 + rc = __bnx2fc_enable(ctlr); 2087 2054 } 2088 2055 2089 2056 mutex_unlock(&bnx2fc_dev_lock); ··· 2103 2072 static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev) 2104 2073 { 2105 2074 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev); 2106 - struct fc_lport *lport = ctlr->lp; 2107 - struct net_device *netdev = bnx2fc_netdev(lport); 2108 2075 2109 2076 switch (cdev->enabled) { 2110 2077 case FCOE_CTLR_ENABLED: 2111 - return bnx2fc_enable(netdev); 2078 + return __bnx2fc_enable(ctlr); 2112 2079 case FCOE_CTLR_DISABLED: 2113 - return bnx2fc_disable(netdev); 2080 + return __bnx2fc_disable(ctlr); 2114 2081 case FCOE_CTLR_UNUSED: 2115 2082 default: 2116 2083 return -ENOTSUPP;
+6
drivers/scsi/bnx2fc/bnx2fc_io.c
··· 1246 1246 kref_put(&io_req->refcount, 1247 1247 bnx2fc_cmd_release); /* drop timer hold */ 1248 1248 rc = bnx2fc_expl_logo(lport, io_req); 1249 + /* This only occurs when an task abort was requested while ABTS 1250 + is in progress. Setting the IO_CLEANUP flag will skip the 1251 + RRQ process in the case when the fw generated SCSI_CMD cmpl 1252 + was a result from the ABTS request rather than the CLEANUP 1253 + request */ 1254 + set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); 1249 1255 goto out; 1250 1256 } 1251 1257
+13 -11
drivers/scsi/dc395x.c
··· 308 308 struct timer_list waiting_timer; 309 309 struct timer_list selto_timer; 310 310 311 + unsigned long last_reset; 312 + 311 313 u16 srb_count; 312 314 313 315 u8 sel_timeout; ··· 862 860 init_timer(&acb->waiting_timer); 863 861 acb->waiting_timer.function = waiting_timeout; 864 862 acb->waiting_timer.data = (unsigned long) acb; 865 - if (time_before(jiffies + to, acb->scsi_host->last_reset - HZ / 2)) 863 + if (time_before(jiffies + to, acb->last_reset - HZ / 2)) 866 864 acb->waiting_timer.expires = 867 - acb->scsi_host->last_reset - HZ / 2 + 1; 865 + acb->last_reset - HZ / 2 + 1; 868 866 else 869 867 acb->waiting_timer.expires = jiffies + to + 1; 870 868 add_timer(&acb->waiting_timer); ··· 1321 1319 udelay(500); 1322 1320 1323 1321 /* We may be in serious trouble. Wait some seconds */ 1324 - acb->scsi_host->last_reset = 1322 + acb->last_reset = 1325 1323 jiffies + 3 * HZ / 2 + 1326 1324 HZ * acb->eeprom.delay_time; 1327 1325 ··· 1464 1462 acb->selto_timer.function = selection_timeout_missed; 1465 1463 acb->selto_timer.data = (unsigned long) acb; 1466 1464 if (time_before 1467 - (jiffies + HZ, acb->scsi_host->last_reset + HZ / 2)) 1465 + (jiffies + HZ, acb->last_reset + HZ / 2)) 1468 1466 acb->selto_timer.expires = 1469 - acb->scsi_host->last_reset + HZ / 2 + 1; 1467 + acb->last_reset + HZ / 2 + 1; 1470 1468 else 1471 1469 acb->selto_timer.expires = jiffies + HZ + 1; 1472 1470 add_timer(&acb->selto_timer); ··· 1537 1535 } 1538 1536 /* Allow starting of SCSI commands half a second before we allow the mid-level 1539 1537 * to queue them again after a reset */ 1540 - if (time_before(jiffies, acb->scsi_host->last_reset - HZ / 2)) { 1538 + if (time_before(jiffies, acb->last_reset - HZ / 2)) { 1541 1539 dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n"); 1542 1540 return 1; 1543 1541 } ··· 3033 3031 dprintkl(KERN_ERR, "disconnect: No such device\n"); 3034 3032 udelay(500); 3035 3033 /* Suspend queue for a while */ 3036 - acb->scsi_host->last_reset = 3034 + acb->last_reset = 3037 3035 jiffies + HZ / 2 + 3038 3036 HZ * acb->eeprom.delay_time; 3039 3037 clear_fifo(acb, "disconnectEx"); ··· 3055 3053 waiting_process_next(acb); 3056 3054 } else if (srb->state & SRB_ABORT_SENT) { 3057 3055 dcb->flag &= ~ABORT_DEV_; 3058 - acb->scsi_host->last_reset = jiffies + HZ / 2 + 1; 3056 + acb->last_reset = jiffies + HZ / 2 + 1; 3059 3057 dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n"); 3060 3058 doing_srb_done(acb, DID_ABORT, srb->cmd, 1); 3061 3059 waiting_process_next(acb); ··· 3651 3649 /*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */ 3652 3650 udelay(500); 3653 3651 /* Maybe we locked up the bus? Then lets wait even longer ... */ 3654 - acb->scsi_host->last_reset = 3652 + acb->last_reset = 3655 3653 jiffies + 5 * HZ / 2 + 3656 3654 HZ * acb->eeprom.delay_time; 3657 3655 ··· 4428 4426 host->dma_channel = -1; 4429 4427 host->unique_id = acb->io_port_base; 4430 4428 host->irq = acb->irq_level; 4431 - host->last_reset = jiffies; 4429 + acb->last_reset = jiffies; 4432 4430 4433 4431 host->max_id = 16; 4434 4432 if (host->max_id - 1 == eeprom->scsi_id) ··· 4486 4484 /*spin_unlock_irq (&io_request_lock); */ 4487 4485 udelay(500); 4488 4486 4489 - acb->scsi_host->last_reset = 4487 + acb->last_reset = 4490 4488 jiffies + HZ / 2 + 4491 4489 HZ * acb->eeprom.delay_time; 4492 4490
+21 -10
drivers/scsi/device_handler/scsi_dh_alua.c
··· 481 481 * Power On, Reset, or Bus Device Reset, just retry. 482 482 */ 483 483 return ADD_TO_MLQUEUE; 484 + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04) 485 + /* 486 + * Device internal reset 487 + */ 488 + return ADD_TO_MLQUEUE; 484 489 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01) 485 490 /* 486 491 * Mode Parameters Changed ··· 522 517 /* 523 518 * alua_rtpg - Evaluate REPORT TARGET GROUP STATES 524 519 * @sdev: the device to be evaluated. 520 + * @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state 525 521 * 526 522 * Evaluate the Target Port Group State. 527 523 * Returns SCSI_DH_DEV_OFFLINED if the path is 528 524 * found to be unusable. 529 525 */ 530 - static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) 526 + static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition) 531 527 { 532 528 struct scsi_sense_hdr sense_hdr; 533 529 int len, k, off, valid_states = 0; ··· 600 594 else 601 595 h->transition_tmo = ALUA_FAILOVER_TIMEOUT; 602 596 603 - if (orig_transition_tmo != h->transition_tmo) { 597 + if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) { 604 598 sdev_printk(KERN_INFO, sdev, 605 599 "%s: transition timeout set to %d seconds\n", 606 600 ALUA_DH_NAME, h->transition_tmo); ··· 638 632 639 633 switch (h->state) { 640 634 case TPGS_STATE_TRANSITIONING: 641 - if (time_before(jiffies, expiry)) { 642 - /* State transition, retry */ 643 - interval += 2000; 644 - msleep(interval); 645 - goto retry; 635 + if (wait_for_transition) { 636 + if (time_before(jiffies, expiry)) { 637 + /* State transition, retry */ 638 + interval += 2000; 639 + msleep(interval); 640 + goto retry; 641 + } 642 + err = SCSI_DH_RETRY; 643 + } else { 644 + err = SCSI_DH_OK; 646 645 } 646 + 647 647 /* Transitioning time exceeded, set port to standby */ 648 - err = SCSI_DH_RETRY; 649 648 h->state = TPGS_STATE_STANDBY; 650 649 break; 651 650 case TPGS_STATE_OFFLINE: ··· 684 673 if (err != SCSI_DH_OK) 685 674 goto out; 686 675 687 - err = alua_rtpg(sdev, h); 676 + err = alua_rtpg(sdev, h, 0); 688 677 if (err != SCSI_DH_OK) 689 678 goto out; 690 679 ··· 744 733 int err = SCSI_DH_OK; 745 734 int stpg = 0; 746 735 747 - err = alua_rtpg(sdev, h); 736 + err = alua_rtpg(sdev, h, 1); 748 737 if (err != SCSI_DH_OK) 749 738 goto out; 750 739
+1
drivers/scsi/device_handler/scsi_dh_rdac.c
··· 786 786 {"IBM", "1742"}, 787 787 {"IBM", "1745"}, 788 788 {"IBM", "1746"}, 789 + {"IBM", "1813"}, 789 790 {"IBM", "1814"}, 790 791 {"IBM", "1815"}, 791 792 {"IBM", "1818"},
+13 -22
drivers/scsi/dpt_i2o.c
··· 448 448 } 449 449 450 450 rmb(); 451 - /* 452 - * TODO: I need to block here if I am processing ioctl cmds 453 - * but if the outstanding cmds all finish before the ioctl, 454 - * the scsi-core will not know to start sending cmds to me again. 455 - * I need to a way to restart the scsi-cores queues or should I block 456 - * calling scsi_done on the outstanding cmds instead 457 - * for now we don't set the IOCTL state 458 - */ 459 - if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) { 460 - pHba->host->last_reset = jiffies; 461 - pHba->host->resetting = 1; 462 - return 1; 463 - } 451 + if ((pHba->state) & DPTI_STATE_RESET) 452 + return SCSI_MLQUEUE_HOST_BUSY; 464 453 465 454 // TODO if the cmd->device if offline then I may need to issue a bus rescan 466 455 // followed by a get_lct to see if the device is there anymore ··· 1800 1811 } 1801 1812 1802 1813 do { 1803 - if(pHba->host) 1814 + /* 1815 + * Stop any new commands from enterring the 1816 + * controller while processing the ioctl 1817 + */ 1818 + if (pHba->host) { 1819 + scsi_block_requests(pHba->host); 1804 1820 spin_lock_irqsave(pHba->host->host_lock, flags); 1805 - // This state stops any new commands from enterring the 1806 - // controller while processing the ioctl 1807 - // pHba->state |= DPTI_STATE_IOCTL; 1808 - // We can't set this now - The scsi subsystem sets host_blocked and 1809 - // the queue empties and stops. We need a way to restart the queue 1821 + } 1810 1822 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER); 1811 1823 if (rcode != 0) 1812 1824 printk("adpt_i2o_passthru: post wait failed %d %p\n", 1813 1825 rcode, reply); 1814 - // pHba->state &= ~DPTI_STATE_IOCTL; 1815 - if(pHba->host) 1826 + if (pHba->host) { 1816 1827 spin_unlock_irqrestore(pHba->host->host_lock, flags); 1817 - } while(rcode == -ETIMEDOUT); 1828 + scsi_unblock_requests(pHba->host); 1829 + } 1830 + } while (rcode == -ETIMEDOUT); 1818 1831 1819 1832 if(rcode){ 1820 1833 goto cleanup;
-1
drivers/scsi/dpti.h
··· 202 202 203 203 // HBA state flags 204 204 #define DPTI_STATE_RESET (0x01) 205 - #define DPTI_STATE_IOCTL (0x02) 206 205 207 206 typedef struct _adpt_hba { 208 207 struct _adpt_hba *next;
+61 -71
drivers/scsi/esas2r/esas2r.h
··· 799 799 struct esas2r_target *targetdb_end; 800 800 unsigned char *regs; 801 801 unsigned char *data_window; 802 - u32 volatile flags; 803 - #define AF_PORT_CHANGE (u32)(0x00000001) 804 - #define AF_CHPRST_NEEDED (u32)(0x00000004) 805 - #define AF_CHPRST_PENDING (u32)(0x00000008) 806 - #define AF_CHPRST_DETECTED (u32)(0x00000010) 807 - #define AF_BUSRST_NEEDED (u32)(0x00000020) 808 - #define AF_BUSRST_PENDING (u32)(0x00000040) 809 - #define AF_BUSRST_DETECTED (u32)(0x00000080) 810 - #define AF_DISABLED (u32)(0x00000100) 811 - #define AF_FLASH_LOCK (u32)(0x00000200) 812 - #define AF_OS_RESET (u32)(0x00002000) 813 - #define AF_FLASHING (u32)(0x00004000) 814 - #define AF_POWER_MGT (u32)(0x00008000) 815 - #define AF_NVR_VALID (u32)(0x00010000) 816 - #define AF_DEGRADED_MODE (u32)(0x00020000) 817 - #define AF_DISC_PENDING (u32)(0x00040000) 818 - #define AF_TASKLET_SCHEDULED (u32)(0x00080000) 819 - #define AF_HEARTBEAT (u32)(0x00200000) 820 - #define AF_HEARTBEAT_ENB (u32)(0x00400000) 821 - #define AF_NOT_PRESENT (u32)(0x00800000) 822 - #define AF_CHPRST_STARTED (u32)(0x01000000) 823 - #define AF_FIRST_INIT (u32)(0x02000000) 824 - #define AF_POWER_DOWN (u32)(0x04000000) 825 - #define AF_DISC_IN_PROG (u32)(0x08000000) 826 - #define AF_COMM_LIST_TOGGLE (u32)(0x10000000) 827 - #define AF_LEGACY_SGE_MODE (u32)(0x20000000) 828 - #define AF_DISC_POLLED (u32)(0x40000000) 829 - u32 volatile flags2; 830 - #define AF2_SERIAL_FLASH (u32)(0x00000001) 831 - #define AF2_DEV_SCAN (u32)(0x00000002) 832 - #define AF2_DEV_CNT_OK (u32)(0x00000004) 833 - #define AF2_COREDUMP_AVAIL (u32)(0x00000008) 834 - #define AF2_COREDUMP_SAVED (u32)(0x00000010) 835 - #define AF2_VDA_POWER_DOWN (u32)(0x00000100) 836 - #define AF2_THUNDERLINK (u32)(0x00000200) 837 - #define AF2_THUNDERBOLT (u32)(0x00000400) 838 - #define AF2_INIT_DONE (u32)(0x00000800) 839 - #define AF2_INT_PENDING (u32)(0x00001000) 840 - #define AF2_TIMER_TICK (u32)(0x00002000) 841 - #define AF2_IRQ_CLAIMED (u32)(0x00004000) 842 - #define AF2_MSI_ENABLED (u32)(0x00008000) 802 + long flags; 803 + #define AF_PORT_CHANGE 0 804 + #define AF_CHPRST_NEEDED 1 805 + #define AF_CHPRST_PENDING 2 806 + #define AF_CHPRST_DETECTED 3 807 + #define AF_BUSRST_NEEDED 4 808 + #define AF_BUSRST_PENDING 5 809 + #define AF_BUSRST_DETECTED 6 810 + #define AF_DISABLED 7 811 + #define AF_FLASH_LOCK 8 812 + #define AF_OS_RESET 9 813 + #define AF_FLASHING 10 814 + #define AF_POWER_MGT 11 815 + #define AF_NVR_VALID 12 816 + #define AF_DEGRADED_MODE 13 817 + #define AF_DISC_PENDING 14 818 + #define AF_TASKLET_SCHEDULED 15 819 + #define AF_HEARTBEAT 16 820 + #define AF_HEARTBEAT_ENB 17 821 + #define AF_NOT_PRESENT 18 822 + #define AF_CHPRST_STARTED 19 823 + #define AF_FIRST_INIT 20 824 + #define AF_POWER_DOWN 21 825 + #define AF_DISC_IN_PROG 22 826 + #define AF_COMM_LIST_TOGGLE 23 827 + #define AF_LEGACY_SGE_MODE 24 828 + #define AF_DISC_POLLED 25 829 + long flags2; 830 + #define AF2_SERIAL_FLASH 0 831 + #define AF2_DEV_SCAN 1 832 + #define AF2_DEV_CNT_OK 2 833 + #define AF2_COREDUMP_AVAIL 3 834 + #define AF2_COREDUMP_SAVED 4 835 + #define AF2_VDA_POWER_DOWN 5 836 + #define AF2_THUNDERLINK 6 837 + #define AF2_THUNDERBOLT 7 838 + #define AF2_INIT_DONE 8 839 + #define AF2_INT_PENDING 9 840 + #define AF2_TIMER_TICK 10 841 + #define AF2_IRQ_CLAIMED 11 842 + #define AF2_MSI_ENABLED 12 843 843 atomic_t disable_cnt; 844 844 atomic_t dis_ints_cnt; 845 845 u32 int_stat; ··· 1150 1150 int data_sz); 1151 1151 1152 1152 /* Inline functions */ 1153 - static inline u32 esas2r_lock_set_flags(volatile u32 *flags, u32 bits) 1154 - { 1155 - return test_and_set_bit(ilog2(bits), (volatile unsigned long *)flags); 1156 - } 1157 - 1158 - static inline u32 esas2r_lock_clear_flags(volatile u32 *flags, u32 bits) 1159 - { 1160 - return test_and_clear_bit(ilog2(bits), 1161 - (volatile unsigned long *)flags); 1162 - } 1163 1153 1164 1154 /* Allocate a chip scatter/gather list entry */ 1165 1155 static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a) ··· 1207 1217 struct esas2r_adapter *a) 1208 1218 { 1209 1219 union atto_vda_req *vrq = rq->vrq; 1210 - u32 handle; 1211 1220 1212 1221 INIT_LIST_HEAD(&rq->sg_table_head); 1213 1222 rq->data_buf = (void *)(vrq + 1); ··· 1242 1253 1243 1254 /* 1244 1255 * add a reference number to the handle to make it unique (until it 1245 - * wraps of course) while preserving the upper word 1256 + * wraps of course) while preserving the least significant word 1246 1257 */ 1247 - 1248 - handle = be32_to_cpu(vrq->scsi.handle) & 0xFFFF0000; 1249 - vrq->scsi.handle = cpu_to_be32(handle + a->cmd_ref_no++); 1258 + vrq->scsi.handle = (a->cmd_ref_no++ << 16) | (u16)vrq->scsi.handle; 1250 1259 1251 1260 /* 1252 1261 * the following formats a SCSI request. the caller can override as ··· 1290 1303 1291 1304 static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a) 1292 1305 { 1293 - return (a->flags & (AF_BUSRST_NEEDED | AF_BUSRST_DETECTED 1294 - | AF_CHPRST_NEEDED | AF_CHPRST_DETECTED 1295 - | AF_PORT_CHANGE)) 1296 - ? true : false; 1306 + 1307 + return test_bit(AF_BUSRST_NEEDED, &a->flags) || 1308 + test_bit(AF_BUSRST_DETECTED, &a->flags) || 1309 + test_bit(AF_CHPRST_NEEDED, &a->flags) || 1310 + test_bit(AF_CHPRST_DETECTED, &a->flags) || 1311 + test_bit(AF_PORT_CHANGE, &a->flags); 1312 + 1297 1313 } 1298 1314 1299 1315 /* ··· 1335 1345 static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a) 1336 1346 { 1337 1347 /* make sure we don't schedule twice */ 1338 - if (!(esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED) & 1339 - ilog2(AF_TASKLET_SCHEDULED))) 1348 + if (!test_and_set_bit(AF_TASKLET_SCHEDULED, &a->flags)) 1340 1349 tasklet_hi_schedule(&a->tasklet); 1341 1350 } 1342 1351 1343 1352 static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a) 1344 1353 { 1345 - if (!(a->flags & (AF_DEGRADED_MODE | AF_CHPRST_PENDING)) 1346 - && (a->nvram->options2 & SASNVR2_HEARTBEAT)) 1347 - esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT_ENB); 1354 + if (!test_bit(AF_DEGRADED_MODE, &a->flags) && 1355 + !test_bit(AF_CHPRST_PENDING, &a->flags) && 1356 + (a->nvram->options2 & SASNVR2_HEARTBEAT)) 1357 + set_bit(AF_HEARTBEAT_ENB, &a->flags); 1348 1358 else 1349 - esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB); 1359 + clear_bit(AF_HEARTBEAT_ENB, &a->flags); 1350 1360 } 1351 1361 1352 1362 static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a) 1353 1363 { 1354 - esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB); 1355 - esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT); 1364 + clear_bit(AF_HEARTBEAT_ENB, &a->flags); 1365 + clear_bit(AF_HEARTBEAT, &a->flags); 1356 1366 } 1357 1367 1358 1368 /* Set the initial state for resetting the adapter on the next pass through ··· 1362 1372 { 1363 1373 esas2r_disable_heartbeat(a); 1364 1374 1365 - esas2r_lock_set_flags(&a->flags, AF_CHPRST_NEEDED); 1366 - esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING); 1367 - esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING); 1375 + set_bit(AF_CHPRST_NEEDED, &a->flags); 1376 + set_bit(AF_CHPRST_PENDING, &a->flags); 1377 + set_bit(AF_DISC_PENDING, &a->flags); 1368 1378 } 1369 1379 1370 1380 /* See if an interrupt is pending on the adapter. */
+25 -30
drivers/scsi/esas2r/esas2r_disc.c
··· 86 86 87 87 esas2r_trace_enter(); 88 88 89 - esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); 90 - esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN); 91 - esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK); 89 + clear_bit(AF_DISC_IN_PROG, &a->flags); 90 + clear_bit(AF2_DEV_SCAN, &a->flags2); 91 + clear_bit(AF2_DEV_CNT_OK, &a->flags2); 92 92 93 93 a->disc_start_time = jiffies_to_msecs(jiffies); 94 94 a->disc_wait_time = nvr->dev_wait_time * 1000; ··· 107 107 108 108 a->general_req.interrupt_cx = NULL; 109 109 110 - if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) { 110 + if (test_bit(AF_CHPRST_DETECTED, &a->flags) || 111 + test_bit(AF_POWER_MGT, &a->flags)) { 111 112 if (a->prev_dev_cnt == 0) { 112 113 /* Don't bother waiting if there is nothing to wait 113 114 * for. ··· 213 212 || a->disc_wait_cnt == 0)) { 214 213 /* After three seconds of waiting, schedule a scan. */ 215 214 if (time >= 3000 216 - && !(esas2r_lock_set_flags(&a->flags2, 217 - AF2_DEV_SCAN) & 218 - ilog2(AF2_DEV_SCAN))) { 215 + && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { 219 216 spin_lock_irqsave(&a->mem_lock, flags); 220 217 esas2r_disc_queue_event(a, DCDE_DEV_SCAN); 221 218 spin_unlock_irqrestore(&a->mem_lock, flags); ··· 227 228 * We are done waiting...we think. Adjust the wait time to 228 229 * consume events after the count is met. 229 230 */ 230 - if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK) 231 - & ilog2(AF2_DEV_CNT_OK))) 231 + if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2)) 232 232 a->disc_wait_time = time + 3000; 233 233 234 234 /* If we haven't done a full scan yet, do it now. */ 235 - if (!(esas2r_lock_set_flags(&a->flags2, 236 - AF2_DEV_SCAN) & 237 - ilog2(AF2_DEV_SCAN))) { 235 + if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { 238 236 spin_lock_irqsave(&a->mem_lock, flags); 239 237 esas2r_disc_queue_event(a, DCDE_DEV_SCAN); 240 238 spin_unlock_irqrestore(&a->mem_lock, flags); 241 - 242 239 esas2r_trace_exit(); 243 240 return; 244 241 } ··· 248 253 return; 249 254 } 250 255 } else { 251 - if (!(esas2r_lock_set_flags(&a->flags2, 252 - AF2_DEV_SCAN) & 253 - ilog2(AF2_DEV_SCAN))) { 256 + if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { 254 257 spin_lock_irqsave(&a->mem_lock, flags); 255 258 esas2r_disc_queue_event(a, DCDE_DEV_SCAN); 256 259 spin_unlock_irqrestore(&a->mem_lock, flags); ··· 258 265 /* We want to stop waiting for devices. */ 259 266 a->disc_wait_time = 0; 260 267 261 - if ((a->flags & AF_DISC_POLLED) 262 - && (a->flags & AF_DISC_IN_PROG)) { 268 + if (test_bit(AF_DISC_POLLED, &a->flags) && 269 + test_bit(AF_DISC_IN_PROG, &a->flags)) { 263 270 /* 264 271 * Polled discovery is still pending so continue the active 265 272 * discovery until it is done. At that point, we will stop ··· 273 280 * driven; i.e. There is no transition. 274 281 */ 275 282 esas2r_disc_fix_curr_requests(a); 276 - esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); 283 + clear_bit(AF_DISC_PENDING, &a->flags); 277 284 278 285 /* 279 286 * We have deferred target state changes until now because we 280 287 * don't want to report any removals (due to the first arrival) 281 288 * until the device wait time expires. 282 289 */ 283 - esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE); 290 + set_bit(AF_PORT_CHANGE, &a->flags); 284 291 } 285 292 286 293 esas2r_trace_exit(); ··· 301 308 * Don't start discovery before or during polled discovery. if we did, 302 309 * we would have a deadlock if we are in the ISR already. 303 310 */ 304 - if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED))) 311 + if (!test_bit(AF_CHPRST_PENDING, &a->flags) && 312 + !test_bit(AF_DISC_POLLED, &a->flags)) 305 313 esas2r_disc_start_port(a); 306 314 307 315 esas2r_trace_exit(); ··· 316 322 317 323 esas2r_trace_enter(); 318 324 319 - if (a->flags & AF_DISC_IN_PROG) { 325 + if (test_bit(AF_DISC_IN_PROG, &a->flags)) { 320 326 esas2r_trace_exit(); 321 327 322 328 return false; ··· 324 330 325 331 /* If there is a discovery waiting, process it. */ 326 332 if (dc->disc_evt) { 327 - if ((a->flags & AF_DISC_POLLED) 333 + if (test_bit(AF_DISC_POLLED, &a->flags) 328 334 && a->disc_wait_time == 0) { 329 335 /* 330 336 * We are doing polled discovery, but we no longer want ··· 341 347 342 348 esas2r_hdebug("disc done"); 343 349 344 - esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE); 350 + set_bit(AF_PORT_CHANGE, &a->flags); 345 351 346 352 esas2r_trace_exit(); 347 353 ··· 350 356 351 357 /* Handle the discovery context */ 352 358 esas2r_trace("disc_evt: %d", dc->disc_evt); 353 - esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG); 359 + set_bit(AF_DISC_IN_PROG, &a->flags); 354 360 dc->flags = 0; 355 361 356 - if (a->flags & AF_DISC_POLLED) 362 + if (test_bit(AF_DISC_POLLED, &a->flags)) 357 363 dc->flags |= DCF_POLLED; 358 364 359 365 rq->interrupt_cx = dc; ··· 373 379 } 374 380 375 381 /* Continue interrupt driven discovery */ 376 - if (!(a->flags & AF_DISC_POLLED)) 382 + if (!test_bit(AF_DISC_POLLED, &a->flags)) 377 383 ret = esas2r_disc_continue(a, rq); 378 384 else 379 385 ret = true; ··· 447 453 /* Discovery is done...for now. */ 448 454 rq->interrupt_cx = NULL; 449 455 450 - if (!(a->flags & AF_DISC_PENDING)) 456 + if (!test_bit(AF_DISC_PENDING, &a->flags)) 451 457 esas2r_disc_fix_curr_requests(a); 452 458 453 - esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); 459 + clear_bit(AF_DISC_IN_PROG, &a->flags); 454 460 455 461 /* Start the next discovery. */ 456 462 return esas2r_disc_start_port(a); ··· 474 480 475 481 spin_lock_irqsave(&a->queue_lock, flags); 476 482 477 - if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING))) 483 + if (!test_bit(AF_CHPRST_PENDING, &a->flags) && 484 + !test_bit(AF_FLASHING, &a->flags)) 478 485 esas2r_disc_local_start_request(a, rq); 479 486 else 480 487 list_add_tail(&rq->req_list, &a->defer_list);
+19 -15
drivers/scsi/esas2r/esas2r_flash.c
··· 231 231 * RS_PENDING, FM API tasks will continue. 232 232 */ 233 233 rq->req_stat = RS_PENDING; 234 - if (a->flags & AF_DEGRADED_MODE) 234 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) 235 235 /* not suppported for now */; 236 236 else 237 237 build_flash_msg(a, rq); ··· 315 315 memset(fc->scratch, 0, FM_BUF_SZ); 316 316 317 317 esas2r_enable_heartbeat(a); 318 - esas2r_lock_clear_flags(&a->flags, AF_FLASH_LOCK); 318 + clear_bit(AF_FLASH_LOCK, &a->flags); 319 319 return false; 320 320 } 321 321 ··· 526 526 * The download is complete. If in degraded mode, 527 527 * attempt a chip reset. 528 528 */ 529 - if (a->flags & AF_DEGRADED_MODE) 529 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) 530 530 esas2r_local_reset_adapter(a); 531 531 532 532 a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version; ··· 890 890 } 891 891 } 892 892 893 - if (a->flags & AF_DEGRADED_MODE) { 893 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) { 894 894 fs->status = ATTO_STS_DEGRADED; 895 895 return false; 896 896 } ··· 945 945 946 946 /* Now wait for the firmware to process it */ 947 947 starttime = jiffies_to_msecs(jiffies); 948 - timeout = a->flags & 949 - (AF_CHPRST_PENDING | AF_DISC_PENDING) ? 40000 : 5000; 948 + 949 + if (test_bit(AF_CHPRST_PENDING, &a->flags) || 950 + test_bit(AF_DISC_PENDING, &a->flags)) 951 + timeout = 40000; 952 + else 953 + timeout = 5000; 950 954 951 955 while (true) { 952 956 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); ··· 1012 1008 u32 offset; 1013 1009 u32 iatvr; 1014 1010 1015 - if (a->flags2 & AF2_SERIAL_FLASH) 1011 + if (test_bit(AF2_SERIAL_FLASH, &a->flags2)) 1016 1012 iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE); 1017 1013 else 1018 1014 iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE); ··· 1240 1236 if (rq->req_stat != RS_PENDING) { 1241 1237 /* update the NVRAM state */ 1242 1238 if (rq->req_stat == RS_SUCCESS) 1243 - esas2r_lock_set_flags(&a->flags, AF_NVR_VALID); 1239 + set_bit(AF_NVR_VALID, &a->flags); 1244 1240 else 1245 - esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); 1241 + clear_bit(AF_NVR_VALID, &a->flags); 1246 1242 1247 1243 esas2r_enable_heartbeat(a); 1248 1244 ··· 1262 1258 u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0]; 1263 1259 struct atto_vda_flash_req *vrq = &rq->vrq->flash; 1264 1260 1265 - if (a->flags & AF_DEGRADED_MODE) 1261 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) 1266 1262 return false; 1267 1263 1268 1264 if (down_interruptible(&a->nvram_semaphore)) ··· 1306 1302 FLS_OFFSET_NVR, 1307 1303 sizeof(struct esas2r_sas_nvram)); 1308 1304 1309 - if (a->flags & AF_LEGACY_SGE_MODE) { 1305 + if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { 1310 1306 1311 1307 vrq->data.sge[0].length = 1312 1308 cpu_to_le32(SGE_LAST | ··· 1341 1337 } else if (n->version > SASNVR_VERSION) { 1342 1338 esas2r_hdebug("invalid NVRAM version"); 1343 1339 } else { 1344 - esas2r_lock_set_flags(&a->flags, AF_NVR_VALID); 1340 + set_bit(AF_NVR_VALID, &a->flags); 1345 1341 rslt = true; 1346 1342 } 1347 1343 ··· 1363 1359 struct esas2r_sas_nvram *n = a->nvram; 1364 1360 u32 time = jiffies_to_msecs(jiffies); 1365 1361 1366 - esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); 1362 + clear_bit(AF_NVR_VALID, &a->flags); 1367 1363 *n = default_sas_nvram; 1368 1364 n->sas_addr[3] |= 0x0F; 1369 1365 n->sas_addr[4] = HIBYTE(LOWORD(time)); ··· 1393 1389 u8 j; 1394 1390 struct esas2r_component_header *ch; 1395 1391 1396 - if (esas2r_lock_set_flags(&a->flags, AF_FLASH_LOCK) & AF_FLASH_LOCK) { 1392 + if (test_and_set_bit(AF_FLASH_LOCK, &a->flags)) { 1397 1393 /* flag was already set */ 1398 1394 fi->status = FI_STAT_BUSY; 1399 1395 return false; ··· 1417 1413 return complete_fmapi_req(a, rq, FI_STAT_IMG_VER); 1418 1414 } 1419 1415 1420 - if (a->flags & AF_DEGRADED_MODE) 1416 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) 1421 1417 return complete_fmapi_req(a, rq, FI_STAT_DEGRADED); 1422 1418 1423 1419 switch (fi->action) {
+77 -79
drivers/scsi/esas2r/esas2r_init.c
··· 216 216 goto use_legacy_interrupts; 217 217 } 218 218 a->intr_mode = INTR_MODE_MSI; 219 - esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED); 219 + set_bit(AF2_MSI_ENABLED, &a->flags2); 220 220 break; 221 221 222 222 ··· 252 252 return; 253 253 } 254 254 255 - esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED); 255 + set_bit(AF2_IRQ_CLAIMED, &a->flags2); 256 256 esas2r_log(ESAS2R_LOG_INFO, 257 257 "claimed IRQ %d flags: 0x%lx", 258 258 a->pcid->irq, flags); ··· 380 380 /* interrupts will be disabled until we are done with init */ 381 381 atomic_inc(&a->dis_ints_cnt); 382 382 atomic_inc(&a->disable_cnt); 383 - a->flags |= AF_CHPRST_PENDING 384 - | AF_DISC_PENDING 385 - | AF_FIRST_INIT 386 - | AF_LEGACY_SGE_MODE; 383 + set_bit(AF_CHPRST_PENDING, &a->flags); 384 + set_bit(AF_DISC_PENDING, &a->flags); 385 + set_bit(AF_FIRST_INIT, &a->flags); 386 + set_bit(AF_LEGACY_SGE_MODE, &a->flags); 387 387 388 388 a->init_msg = ESAS2R_INIT_MSG_START; 389 389 a->max_vdareq_size = 128; ··· 440 440 441 441 esas2r_claim_interrupts(a); 442 442 443 - if (a->flags2 & AF2_IRQ_CLAIMED) 443 + if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) 444 444 esas2r_enable_chip_interrupts(a); 445 445 446 - esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE); 447 - if (!(a->flags & AF_DEGRADED_MODE)) 446 + set_bit(AF2_INIT_DONE, &a->flags2); 447 + if (!test_bit(AF_DEGRADED_MODE, &a->flags)) 448 448 esas2r_kickoff_timer(a); 449 449 esas2r_debug("esas2r_init_adapter done for %p (%d)", 450 450 a, a->disable_cnt); ··· 457 457 { 458 458 struct esas2r_mem_desc *memdesc, *next; 459 459 460 - if ((a->flags2 & AF2_INIT_DONE) 461 - && (!(a->flags & AF_DEGRADED_MODE))) { 460 + if ((test_bit(AF2_INIT_DONE, &a->flags2)) 461 + && (!test_bit(AF_DEGRADED_MODE, &a->flags))) { 462 462 if (!power_management) { 463 463 del_timer_sync(&a->timer); 464 464 tasklet_kill(&a->tasklet); ··· 508 508 } 509 509 510 510 /* Clean up interrupts */ 511 - if (a->flags2 & AF2_IRQ_CLAIMED) { 511 + if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { 512 512 esas2r_log_dev(ESAS2R_LOG_INFO, 513 513 &(a->pcid->dev), 514 514 "free_irq(%d) called", a->pcid->irq); 515 515 516 516 free_irq(a->pcid->irq, a); 517 517 esas2r_debug("IRQ released"); 518 - esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED); 518 + clear_bit(AF2_IRQ_CLAIMED, &a->flags2); 519 519 } 520 520 521 - if (a->flags2 & AF2_MSI_ENABLED) { 521 + if (test_bit(AF2_MSI_ENABLED, &a->flags2)) { 522 522 pci_disable_msi(a->pcid); 523 - esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED); 523 + clear_bit(AF2_MSI_ENABLED, &a->flags2); 524 524 esas2r_debug("MSI disabled"); 525 525 } 526 526 ··· 641 641 pci_set_drvdata(a->pcid, NULL); 642 642 esas2r_adapters[i] = NULL; 643 643 644 - if (a->flags2 & AF2_INIT_DONE) { 645 - esas2r_lock_clear_flags(&a->flags2, 646 - AF2_INIT_DONE); 644 + if (test_bit(AF2_INIT_DONE, &a->flags2)) { 645 + clear_bit(AF2_INIT_DONE, &a->flags2); 647 646 648 - esas2r_lock_set_flags(&a->flags, 649 - AF_DEGRADED_MODE); 647 + set_bit(AF_DEGRADED_MODE, &a->flags); 650 648 651 649 esas2r_log_dev(ESAS2R_LOG_INFO, 652 650 &(a->host->shost_gendev), ··· 757 759 758 760 esas2r_claim_interrupts(a); 759 761 760 - if (a->flags2 & AF2_IRQ_CLAIMED) { 762 + if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { 761 763 /* 762 764 * Now that system interrupt(s) are claimed, we can enable 763 765 * chip interrupts. ··· 779 781 780 782 bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str) 781 783 { 782 - esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE); 784 + set_bit(AF_DEGRADED_MODE, &a->flags); 783 785 esas2r_log(ESAS2R_LOG_CRIT, 784 786 "setting adapter to degraded mode: %s\n", error_str); 785 787 return false; ··· 807 809 int pcie_cap_reg; 808 810 809 811 pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); 810 - if (0xffff & pcie_cap_reg) { 812 + if (pcie_cap_reg) { 811 813 u16 devcontrol; 812 814 813 815 pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, ··· 894 896 && (a->pcid->subsystem_device & ATTO_SSDID_TBT)) 895 897 a->flags2 |= AF2_THUNDERBOLT; 896 898 897 - if (a->flags2 & AF2_THUNDERBOLT) 899 + if (test_bit(AF2_THUNDERBOLT, &a->flags2)) 898 900 a->flags2 |= AF2_SERIAL_FLASH; 899 901 900 902 if (a->pcid->subsystem_device == ATTO_TLSH_1068) ··· 954 956 a->outbound_copy = (u32 volatile *)high; 955 957 high += sizeof(u32); 956 958 957 - if (!(a->flags & AF_NVR_VALID)) 959 + if (!test_bit(AF_NVR_VALID, &a->flags)) 958 960 esas2r_nvram_set_defaults(a); 959 961 960 962 /* update the caller's uncached memory area pointer */ 961 963 *uncached_area = (void *)high; 962 964 963 965 /* initialize the allocated memory */ 964 - if (a->flags & AF_FIRST_INIT) { 966 + if (test_bit(AF_FIRST_INIT, &a->flags)) { 965 967 memset(a->req_table, 0, 966 968 (num_requests + num_ae_requests + 967 969 1) * sizeof(struct esas2r_request *)); ··· 1017 1019 * if the chip reset detected flag is set, we can bypass a bunch of 1018 1020 * stuff. 1019 1021 */ 1020 - if (a->flags & AF_CHPRST_DETECTED) 1022 + if (test_bit(AF_CHPRST_DETECTED, &a->flags)) 1021 1023 goto skip_chip_reset; 1022 1024 1023 1025 /* ··· 1055 1057 doorbell); 1056 1058 1057 1059 if (ver == DRBL_FW_VER_0) { 1058 - esas2r_lock_set_flags(&a->flags, 1059 - AF_LEGACY_SGE_MODE); 1060 + set_bit(AF_LEGACY_SGE_MODE, &a->flags); 1060 1061 1061 1062 a->max_vdareq_size = 128; 1062 1063 a->build_sgl = esas2r_build_sg_list_sge; 1063 1064 } else if (ver == DRBL_FW_VER_1) { 1064 - esas2r_lock_clear_flags(&a->flags, 1065 - AF_LEGACY_SGE_MODE); 1065 + clear_bit(AF_LEGACY_SGE_MODE, &a->flags); 1066 1066 1067 1067 a->max_vdareq_size = 1024; 1068 1068 a->build_sgl = esas2r_build_sg_list_prd; ··· 1135 1139 *a->outbound_copy = 1136 1140 a->last_write = 1137 1141 a->last_read = a->list_size - 1; 1138 - esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); 1142 + set_bit(AF_COMM_LIST_TOGGLE, &a->flags); 1139 1143 esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE | 1140 1144 a->last_write); 1141 1145 esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE | ··· 1200 1204 */ 1201 1205 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB); 1202 1206 if (doorbell & DRBL_POWER_DOWN) 1203 - esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN); 1207 + set_bit(AF2_VDA_POWER_DOWN, &a->flags2); 1204 1208 else 1205 - esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN); 1209 + clear_bit(AF2_VDA_POWER_DOWN, &a->flags2); 1206 1210 1207 1211 /* 1208 1212 * enable assertion of outbound queue and doorbell interrupts in the ··· 1235 1239 0, 1236 1240 NULL); 1237 1241 ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init; 1238 - ci->sgl_page_size = sgl_page_size; 1239 - ci->epoch_time = now.tv_sec; 1242 + ci->sgl_page_size = cpu_to_le32(sgl_page_size); 1243 + ci->epoch_time = cpu_to_le32(now.tv_sec); 1240 1244 rq->flags |= RF_FAILURE_OK; 1241 1245 a->init_msg = ESAS2R_INIT_MSG_INIT; 1242 1246 break; ··· 1246 1250 if (rq->req_stat == RS_SUCCESS) { 1247 1251 u32 major; 1248 1252 u32 minor; 1253 + u16 fw_release; 1249 1254 1250 1255 a->fw_version = le16_to_cpu( 1251 1256 rq->func_rsp.cfg_rsp.vda_version); 1252 1257 a->fw_build = rq->func_rsp.cfg_rsp.fw_build; 1253 - major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release); 1254 - minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release); 1258 + fw_release = le16_to_cpu( 1259 + rq->func_rsp.cfg_rsp.fw_release); 1260 + major = LOBYTE(fw_release); 1261 + minor = HIBYTE(fw_release); 1255 1262 a->fw_version += (major << 16) + (minor << 24); 1256 1263 } else { 1257 1264 esas2r_hdebug("FAILED"); ··· 1265 1266 * unsupported config requests correctly. 1266 1267 */ 1267 1268 1268 - if ((a->flags2 & AF2_THUNDERBOLT) 1269 - || (be32_to_cpu(a->fw_version) > 1270 - be32_to_cpu(0x47020052))) { 1269 + if ((test_bit(AF2_THUNDERBOLT, &a->flags2)) 1270 + || (be32_to_cpu(a->fw_version) > 0x00524702)) { 1271 1271 esas2r_hdebug("CFG get init"); 1272 1272 esas2r_build_cfg_req(a, 1273 1273 rq, ··· 1359 1361 struct esas2r_request *rq; 1360 1362 u32 i; 1361 1363 1362 - if (a->flags & AF_DEGRADED_MODE) 1364 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) 1363 1365 goto exit; 1364 1366 1365 - if (!(a->flags & AF_NVR_VALID)) { 1367 + if (!test_bit(AF_NVR_VALID, &a->flags)) { 1366 1368 if (!esas2r_nvram_read_direct(a)) 1367 1369 esas2r_log(ESAS2R_LOG_WARN, 1368 1370 "invalid/missing NVRAM parameters"); ··· 1374 1376 } 1375 1377 1376 1378 /* The firmware is ready. */ 1377 - esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE); 1378 - esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); 1379 + clear_bit(AF_DEGRADED_MODE, &a->flags); 1380 + clear_bit(AF_CHPRST_PENDING, &a->flags); 1379 1381 1380 1382 /* Post all the async event requests */ 1381 1383 for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++) ··· 1396 1398 1397 1399 esas2r_hdebug("firmware revision: %s", a->fw_rev); 1398 1400 1399 - if ((a->flags & AF_CHPRST_DETECTED) 1400 - && (a->flags & AF_FIRST_INIT)) { 1401 + if (test_bit(AF_CHPRST_DETECTED, &a->flags) 1402 + && (test_bit(AF_FIRST_INIT, &a->flags))) { 1401 1403 esas2r_enable_chip_interrupts(a); 1402 1404 return true; 1403 1405 } ··· 1421 1423 * Block Tasklets from getting scheduled and indicate this is 1422 1424 * polled discovery. 1423 1425 */ 1424 - esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED); 1425 - esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED); 1426 + set_bit(AF_TASKLET_SCHEDULED, &a->flags); 1427 + set_bit(AF_DISC_POLLED, &a->flags); 1426 1428 1427 1429 /* 1428 1430 * Temporarily bring the disable count to zero to enable 1429 1431 * deferred processing. Note that the count is already zero 1430 1432 * after the first initialization. 1431 1433 */ 1432 - if (a->flags & AF_FIRST_INIT) 1434 + if (test_bit(AF_FIRST_INIT, &a->flags)) 1433 1435 atomic_dec(&a->disable_cnt); 1434 1436 1435 - while (a->flags & AF_DISC_PENDING) { 1437 + while (test_bit(AF_DISC_PENDING, &a->flags)) { 1436 1438 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1437 1439 1438 1440 /* ··· 1451 1453 * we have to make sure the timer tick processes the 1452 1454 * doorbell indicating the firmware is ready. 1453 1455 */ 1454 - if (!(a->flags & AF_CHPRST_PENDING)) 1456 + if (!test_bit(AF_CHPRST_PENDING, &a->flags)) 1455 1457 esas2r_disc_check_for_work(a); 1456 1458 1457 1459 /* Simulate a timer tick. */ ··· 1471 1473 1472 1474 } 1473 1475 1474 - if (a->flags & AF_FIRST_INIT) 1476 + if (test_bit(AF_FIRST_INIT, &a->flags)) 1475 1477 atomic_inc(&a->disable_cnt); 1476 1478 1477 - esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED); 1478 - esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); 1479 + clear_bit(AF_DISC_POLLED, &a->flags); 1480 + clear_bit(AF_TASKLET_SCHEDULED, &a->flags); 1479 1481 } 1480 1482 1481 1483 ··· 1502 1504 * need to get done before we exit. 1503 1505 */ 1504 1506 1505 - if ((a->flags & AF_CHPRST_DETECTED) 1506 - && (a->flags & AF_FIRST_INIT)) { 1507 + if (test_bit(AF_CHPRST_DETECTED, &a->flags) && 1508 + test_bit(AF_FIRST_INIT, &a->flags)) { 1507 1509 /* 1508 1510 * Reinitialization was performed during the first 1509 1511 * initialization. Only clear the chip reset flag so the 1510 1512 * original device polling is not cancelled. 1511 1513 */ 1512 1514 if (!rslt) 1513 - esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); 1515 + clear_bit(AF_CHPRST_PENDING, &a->flags); 1514 1516 } else { 1515 1517 /* First initialization or a subsequent re-init is complete. */ 1516 1518 if (!rslt) { 1517 - esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); 1518 - esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); 1519 + clear_bit(AF_CHPRST_PENDING, &a->flags); 1520 + clear_bit(AF_DISC_PENDING, &a->flags); 1519 1521 } 1520 1522 1521 1523 1522 1524 /* Enable deferred processing after the first initialization. */ 1523 - if (a->flags & AF_FIRST_INIT) { 1524 - esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT); 1525 + if (test_bit(AF_FIRST_INIT, &a->flags)) { 1526 + clear_bit(AF_FIRST_INIT, &a->flags); 1525 1527 1526 1528 if (atomic_dec_return(&a->disable_cnt) == 0) 1527 1529 esas2r_do_deferred_processes(a); ··· 1533 1535 1534 1536 void esas2r_reset_adapter(struct esas2r_adapter *a) 1535 1537 { 1536 - esas2r_lock_set_flags(&a->flags, AF_OS_RESET); 1538 + set_bit(AF_OS_RESET, &a->flags); 1537 1539 esas2r_local_reset_adapter(a); 1538 1540 esas2r_schedule_tasklet(a); 1539 1541 } ··· 1548 1550 * dump is located in the upper 512KB of the onchip SRAM. Make sure 1549 1551 * to not overwrite a previous crash that was saved. 1550 1552 */ 1551 - if ((a->flags2 & AF2_COREDUMP_AVAIL) 1552 - && !(a->flags2 & AF2_COREDUMP_SAVED)) { 1553 + if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) && 1554 + !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) { 1553 1555 esas2r_read_mem_block(a, 1554 1556 a->fw_coredump_buff, 1555 1557 MW_DATA_ADDR_SRAM + 0x80000, 1556 1558 ESAS2R_FWCOREDUMP_SZ); 1557 1559 1558 - esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED); 1560 + set_bit(AF2_COREDUMP_SAVED, &a->flags2); 1559 1561 } 1560 1562 1561 - esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL); 1563 + clear_bit(AF2_COREDUMP_AVAIL, &a->flags2); 1562 1564 1563 1565 /* Reset the chip */ 1564 1566 if (a->pcid->revision == MVR_FREY_B2) ··· 1604 1606 */ 1605 1607 void esas2r_power_down(struct esas2r_adapter *a) 1606 1608 { 1607 - esas2r_lock_set_flags(&a->flags, AF_POWER_MGT); 1608 - esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN); 1609 + set_bit(AF_POWER_MGT, &a->flags); 1610 + set_bit(AF_POWER_DOWN, &a->flags); 1609 1611 1610 - if (!(a->flags & AF_DEGRADED_MODE)) { 1612 + if (!test_bit(AF_DEGRADED_MODE, &a->flags)) { 1611 1613 u32 starttime; 1612 1614 u32 doorbell; 1613 1615 ··· 1647 1649 * For versions of firmware that support it tell them the driver 1648 1650 * is powering down. 1649 1651 */ 1650 - if (a->flags2 & AF2_VDA_POWER_DOWN) 1652 + if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2)) 1651 1653 esas2r_power_down_notify_firmware(a); 1652 1654 } 1653 1655 1654 1656 /* Suspend I/O processing. */ 1655 - esas2r_lock_set_flags(&a->flags, AF_OS_RESET); 1656 - esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING); 1657 - esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING); 1657 + set_bit(AF_OS_RESET, &a->flags); 1658 + set_bit(AF_DISC_PENDING, &a->flags); 1659 + set_bit(AF_CHPRST_PENDING, &a->flags); 1658 1660 1659 1661 esas2r_process_adapter_reset(a); 1660 1662 ··· 1671 1673 { 1672 1674 bool ret; 1673 1675 1674 - esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN); 1676 + clear_bit(AF_POWER_DOWN, &a->flags); 1675 1677 esas2r_init_pci_cfg_space(a); 1676 - esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT); 1678 + set_bit(AF_FIRST_INIT, &a->flags); 1677 1679 atomic_inc(&a->disable_cnt); 1678 1680 1679 1681 /* reinitialize the adapter */ ··· 1685 1687 esas2r_send_reset_ae(a, true); 1686 1688 1687 1689 /* clear this flag after initialization. */ 1688 - esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT); 1690 + clear_bit(AF_POWER_MGT, &a->flags); 1689 1691 return ret; 1690 1692 } 1691 1693 1692 1694 bool esas2r_is_adapter_present(struct esas2r_adapter *a) 1693 1695 { 1694 - if (a->flags & AF_NOT_PRESENT) 1696 + if (test_bit(AF_NOT_PRESENT, &a->flags)) 1695 1697 return false; 1696 1698 1697 1699 if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) { 1698 - esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT); 1700 + set_bit(AF_NOT_PRESENT, &a->flags); 1699 1701 1700 1702 return false; 1701 1703 }
+49 -48
drivers/scsi/esas2r/esas2r_int.c
··· 96 96 if (!esas2r_adapter_interrupt_pending(a)) 97 97 return IRQ_NONE; 98 98 99 - esas2r_lock_set_flags(&a->flags2, AF2_INT_PENDING); 99 + set_bit(AF2_INT_PENDING, &a->flags2); 100 100 esas2r_schedule_tasklet(a); 101 101 102 102 return IRQ_HANDLED; ··· 317 317 * = 2 - can start any request 318 318 */ 319 319 320 - if (a->flags & (AF_CHPRST_PENDING | AF_FLASHING)) 320 + if (test_bit(AF_CHPRST_PENDING, &a->flags) || 321 + test_bit(AF_FLASHING, &a->flags)) 321 322 startreqs = 0; 322 - else if (a->flags & AF_DISC_PENDING) 323 + else if (test_bit(AF_DISC_PENDING, &a->flags)) 323 324 startreqs = 1; 324 325 325 326 atomic_inc(&a->disable_cnt); ··· 368 367 * Flashing could have been set by last local 369 368 * start 370 369 */ 371 - if (a->flags & AF_FLASHING) 370 + if (test_bit(AF_FLASHING, &a->flags)) 372 371 break; 373 372 } 374 373 } ··· 405 404 406 405 dc->disc_evt = 0; 407 406 408 - esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); 407 + clear_bit(AF_DISC_IN_PROG, &a->flags); 409 408 } 410 409 411 410 /* ··· 426 425 a->last_write = 427 426 a->last_read = a->list_size - 1; 428 427 429 - esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); 428 + set_bit(AF_COMM_LIST_TOGGLE, &a->flags); 430 429 431 430 /* Kill all the requests on the active list */ 432 431 list_for_each(element, &a->defer_list) { ··· 471 470 if (atomic_read(&a->disable_cnt) == 0) 472 471 esas2r_do_deferred_processes(a); 473 472 474 - esas2r_lock_clear_flags(&a->flags, AF_OS_RESET); 473 + clear_bit(AF_OS_RESET, &a->flags); 475 474 476 475 esas2r_trace_exit(); 477 476 } ··· 479 478 static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a) 480 479 { 481 480 482 - esas2r_lock_clear_flags(&a->flags, AF_CHPRST_NEEDED); 483 - esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED); 484 - esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED); 485 - esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING); 481 + clear_bit(AF_CHPRST_NEEDED, &a->flags); 482 + clear_bit(AF_BUSRST_NEEDED, &a->flags); 483 + clear_bit(AF_BUSRST_DETECTED, &a->flags); 484 + clear_bit(AF_BUSRST_PENDING, &a->flags); 486 485 /* 487 486 * Make sure we don't get attempt more than 3 resets 488 487 * when the uptime between resets does not exceed one ··· 508 507 * prevent the heartbeat from trying to recover. 509 508 */ 510 509 511 - esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE); 512 - esas2r_lock_set_flags(&a->flags, AF_DISABLED); 513 - esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); 514 - esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); 510 + set_bit(AF_DEGRADED_MODE, &a->flags); 511 + set_bit(AF_DISABLED, &a->flags); 512 + clear_bit(AF_CHPRST_PENDING, &a->flags); 513 + clear_bit(AF_DISC_PENDING, &a->flags); 515 514 516 515 esas2r_disable_chip_interrupts(a); 517 516 a->int_mask = 0; ··· 520 519 esas2r_log(ESAS2R_LOG_CRIT, 521 520 "Adapter disabled because of hardware failure"); 522 521 } else { 523 - u32 flags = 524 - esas2r_lock_set_flags(&a->flags, AF_CHPRST_STARTED); 522 + bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags); 525 523 526 - if (!(flags & AF_CHPRST_STARTED)) 524 + if (!alrdyrst) 527 525 /* 528 526 * Only disable interrupts if this is 529 527 * the first reset attempt. 530 528 */ 531 529 esas2r_disable_chip_interrupts(a); 532 530 533 - if ((a->flags & AF_POWER_MGT) && !(a->flags & AF_FIRST_INIT) && 534 - !(flags & AF_CHPRST_STARTED)) { 531 + if ((test_bit(AF_POWER_MGT, &a->flags)) && 532 + !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) { 535 533 /* 536 534 * Don't reset the chip on the first 537 535 * deferred power up attempt. ··· 543 543 /* Kick off the reinitialization */ 544 544 a->chip_uptime += ESAS2R_CHP_UPTIME_CNT; 545 545 a->chip_init_time = jiffies_to_msecs(jiffies); 546 - if (!(a->flags & AF_POWER_MGT)) { 546 + if (!test_bit(AF_POWER_MGT, &a->flags)) { 547 547 esas2r_process_adapter_reset(a); 548 548 549 - if (!(flags & AF_CHPRST_STARTED)) { 549 + if (!alrdyrst) { 550 550 /* Remove devices now that I/O is cleaned up. */ 551 551 a->prev_dev_cnt = 552 552 esas2r_targ_db_get_tgt_cnt(a); ··· 560 560 561 561 static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a) 562 562 { 563 - while (a->flags & AF_CHPRST_DETECTED) { 563 + while (test_bit(AF_CHPRST_DETECTED, &a->flags)) { 564 564 /* 565 565 * Balance the enable in esas2r_initadapter_hw. 566 566 * Esas2r_power_down already took care of it for power 567 567 * management. 568 568 */ 569 - if (!(a->flags & AF_DEGRADED_MODE) && !(a->flags & 570 - AF_POWER_MGT)) 569 + if (!test_bit(AF_DEGRADED_MODE, &a->flags) && 570 + !test_bit(AF_POWER_MGT, &a->flags)) 571 571 esas2r_disable_chip_interrupts(a); 572 572 573 573 /* Reinitialize the chip. */ 574 574 esas2r_check_adapter(a); 575 575 esas2r_init_adapter_hw(a, 0); 576 576 577 - if (a->flags & AF_CHPRST_NEEDED) 577 + if (test_bit(AF_CHPRST_NEEDED, &a->flags)) 578 578 break; 579 579 580 - if (a->flags & AF_POWER_MGT) { 580 + if (test_bit(AF_POWER_MGT, &a->flags)) { 581 581 /* Recovery from power management. */ 582 - if (a->flags & AF_FIRST_INIT) { 582 + if (test_bit(AF_FIRST_INIT, &a->flags)) { 583 583 /* Chip reset during normal power up */ 584 584 esas2r_log(ESAS2R_LOG_CRIT, 585 585 "The firmware was reset during a normal power-up sequence"); 586 586 } else { 587 587 /* Deferred power up complete. */ 588 - esas2r_lock_clear_flags(&a->flags, 589 - AF_POWER_MGT); 588 + clear_bit(AF_POWER_MGT, &a->flags); 590 589 esas2r_send_reset_ae(a, true); 591 590 } 592 591 } else { 593 592 /* Recovery from online chip reset. */ 594 - if (a->flags & AF_FIRST_INIT) { 593 + if (test_bit(AF_FIRST_INIT, &a->flags)) { 595 594 /* Chip reset during driver load */ 596 595 } else { 597 596 /* Chip reset after driver load */ ··· 601 602 "Recovering from a chip reset while the chip was online"); 602 603 } 603 604 604 - esas2r_lock_clear_flags(&a->flags, AF_CHPRST_STARTED); 605 + clear_bit(AF_CHPRST_STARTED, &a->flags); 605 606 esas2r_enable_chip_interrupts(a); 606 607 607 608 /* 608 609 * Clear this flag last! this indicates that the chip has been 609 610 * reset already during initialization. 610 611 */ 611 - esas2r_lock_clear_flags(&a->flags, AF_CHPRST_DETECTED); 612 + clear_bit(AF_CHPRST_DETECTED, &a->flags); 612 613 } 613 614 } 614 615 ··· 616 617 /* Perform deferred tasks when chip interrupts are disabled */ 617 618 void esas2r_do_tasklet_tasks(struct esas2r_adapter *a) 618 619 { 619 - if (a->flags & (AF_CHPRST_NEEDED | AF_CHPRST_DETECTED)) { 620 - if (a->flags & AF_CHPRST_NEEDED) 620 + 621 + if (test_bit(AF_CHPRST_NEEDED, &a->flags) || 622 + test_bit(AF_CHPRST_DETECTED, &a->flags)) { 623 + if (test_bit(AF_CHPRST_NEEDED, &a->flags)) 621 624 esas2r_chip_rst_needed_during_tasklet(a); 622 625 623 626 esas2r_handle_chip_rst_during_tasklet(a); 624 627 } 625 628 626 - if (a->flags & AF_BUSRST_NEEDED) { 629 + if (test_bit(AF_BUSRST_NEEDED, &a->flags)) { 627 630 esas2r_hdebug("hard resetting bus"); 628 631 629 - esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED); 632 + clear_bit(AF_BUSRST_NEEDED, &a->flags); 630 633 631 - if (a->flags & AF_FLASHING) 632 - esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED); 634 + if (test_bit(AF_FLASHING, &a->flags)) 635 + set_bit(AF_BUSRST_DETECTED, &a->flags); 633 636 else 634 637 esas2r_write_register_dword(a, MU_DOORBELL_IN, 635 638 DRBL_RESET_BUS); 636 639 } 637 640 638 - if (a->flags & AF_BUSRST_DETECTED) { 641 + if (test_bit(AF_BUSRST_DETECTED, &a->flags)) { 639 642 esas2r_process_bus_reset(a); 640 643 641 644 esas2r_log_dev(ESAS2R_LOG_WARN, ··· 646 645 647 646 scsi_report_bus_reset(a->host, 0); 648 647 649 - esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED); 650 - esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING); 648 + clear_bit(AF_BUSRST_DETECTED, &a->flags); 649 + clear_bit(AF_BUSRST_PENDING, &a->flags); 651 650 652 651 esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete"); 653 652 } 654 653 655 - if (a->flags & AF_PORT_CHANGE) { 656 - esas2r_lock_clear_flags(&a->flags, AF_PORT_CHANGE); 654 + if (test_bit(AF_PORT_CHANGE, &a->flags)) { 655 + clear_bit(AF_PORT_CHANGE, &a->flags); 657 656 658 657 esas2r_targ_db_report_changes(a); 659 658 } ··· 673 672 esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); 674 673 675 674 if (doorbell & DRBL_RESET_BUS) 676 - esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED); 675 + set_bit(AF_BUSRST_DETECTED, &a->flags); 677 676 678 677 if (doorbell & DRBL_FORCE_INT) 679 - esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT); 678 + clear_bit(AF_HEARTBEAT, &a->flags); 680 679 681 680 if (doorbell & DRBL_PANIC_REASON_MASK) { 682 681 esas2r_hdebug("*** Firmware Panic ***"); ··· 684 683 } 685 684 686 685 if (doorbell & DRBL_FW_RESET) { 687 - esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_AVAIL); 686 + set_bit(AF2_COREDUMP_AVAIL, &a->flags2); 688 687 esas2r_local_reset_adapter(a); 689 688 } 690 689 ··· 919 918 { 920 919 if (rq->vrq->scsi.function == VDA_FUNC_FLASH 921 920 && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT) 922 - esas2r_lock_clear_flags(&a->flags, AF_FLASHING); 921 + clear_bit(AF_FLASHING, &a->flags); 923 922 924 923 /* See if we setup a callback to do special processing */ 925 924
+35 -38
drivers/scsi/esas2r/esas2r_io.c
··· 49 49 struct esas2r_request *startrq = rq; 50 50 unsigned long flags; 51 51 52 - if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) { 52 + if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags) || 53 + test_bit(AF_POWER_DOWN, &a->flags))) { 53 54 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) 54 55 rq->req_stat = RS_SEL2; 55 56 else ··· 70 69 * Note that if AF_DISC_PENDING is set than this will 71 70 * go on the defer queue. 72 71 */ 73 - if (unlikely(t->target_state != TS_PRESENT 74 - && !(a->flags & AF_DISC_PENDING))) 72 + if (unlikely(t->target_state != TS_PRESENT && 73 + !test_bit(AF_DISC_PENDING, &a->flags))) 75 74 rq->req_stat = RS_SEL; 76 75 } 77 76 } ··· 92 91 spin_lock_irqsave(&a->queue_lock, flags); 93 92 94 93 if (likely(list_empty(&a->defer_list) && 95 - !(a->flags & 96 - (AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING)))) 94 + !test_bit(AF_CHPRST_PENDING, &a->flags) && 95 + !test_bit(AF_FLASHING, &a->flags) && 96 + !test_bit(AF_DISC_PENDING, &a->flags))) 97 97 esas2r_local_start_request(a, startrq); 98 98 else 99 99 list_add_tail(&startrq->req_list, &a->defer_list); ··· 126 124 127 125 if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH 128 126 && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)) 129 - esas2r_lock_set_flags(&a->flags, AF_FLASHING); 127 + set_bit(AF_FLASHING, &a->flags); 130 128 131 129 list_add_tail(&rq->req_list, &a->active_list); 132 130 esas2r_start_vda_request(a, rq); ··· 149 147 if (a->last_write >= a->list_size) { 150 148 a->last_write = 0; 151 149 /* update the toggle bit */ 152 - if (a->flags & AF_COMM_LIST_TOGGLE) 153 - esas2r_lock_clear_flags(&a->flags, 154 - AF_COMM_LIST_TOGGLE); 150 + if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags)) 151 + clear_bit(AF_COMM_LIST_TOGGLE, &a->flags); 155 152 else 156 - esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); 153 + set_bit(AF_COMM_LIST_TOGGLE, &a->flags); 157 154 } 158 155 159 156 element = ··· 170 169 /* Update the write pointer */ 171 170 dw = a->last_write; 172 171 173 - if (a->flags & AF_COMM_LIST_TOGGLE) 172 + if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags)) 174 173 dw |= MU_ILW_TOGGLE; 175 174 176 175 esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle); ··· 688 687 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 689 688 doorbell); 690 689 if (ver == DRBL_FW_VER_0) { 691 - esas2r_lock_set_flags(&a->flags, 692 - AF_CHPRST_DETECTED); 693 - esas2r_lock_set_flags(&a->flags, 694 - AF_LEGACY_SGE_MODE); 690 + set_bit(AF_CHPRST_DETECTED, &a->flags); 691 + set_bit(AF_LEGACY_SGE_MODE, &a->flags); 695 692 696 693 a->max_vdareq_size = 128; 697 694 a->build_sgl = esas2r_build_sg_list_sge; 698 695 } else if (ver == DRBL_FW_VER_1) { 699 - esas2r_lock_set_flags(&a->flags, 700 - AF_CHPRST_DETECTED); 701 - esas2r_lock_clear_flags(&a->flags, 702 - AF_LEGACY_SGE_MODE); 696 + set_bit(AF_CHPRST_DETECTED, &a->flags); 697 + clear_bit(AF_LEGACY_SGE_MODE, &a->flags); 703 698 704 699 a->max_vdareq_size = 1024; 705 700 a->build_sgl = esas2r_build_sg_list_prd; ··· 716 719 a->last_tick_time = currtime; 717 720 718 721 /* count down the uptime */ 719 - if (a->chip_uptime 720 - && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) { 722 + if (a->chip_uptime && 723 + !test_bit(AF_CHPRST_PENDING, &a->flags) && 724 + !test_bit(AF_DISC_PENDING, &a->flags)) { 721 725 if (deltatime >= a->chip_uptime) 722 726 a->chip_uptime = 0; 723 727 else 724 728 a->chip_uptime -= deltatime; 725 729 } 726 730 727 - if (a->flags & AF_CHPRST_PENDING) { 728 - if (!(a->flags & AF_CHPRST_NEEDED) 729 - && !(a->flags & AF_CHPRST_DETECTED)) 731 + if (test_bit(AF_CHPRST_PENDING, &a->flags)) { 732 + if (!test_bit(AF_CHPRST_NEEDED, &a->flags) && 733 + !test_bit(AF_CHPRST_DETECTED, &a->flags)) 730 734 esas2r_handle_pending_reset(a, currtime); 731 735 } else { 732 - if (a->flags & AF_DISC_PENDING) 736 + if (test_bit(AF_DISC_PENDING, &a->flags)) 733 737 esas2r_disc_check_complete(a); 734 - 735 - if (a->flags & AF_HEARTBEAT_ENB) { 736 - if (a->flags & AF_HEARTBEAT) { 738 + if (test_bit(AF_HEARTBEAT_ENB, &a->flags)) { 739 + if (test_bit(AF_HEARTBEAT, &a->flags)) { 737 740 if ((currtime - a->heartbeat_time) >= 738 741 ESAS2R_HEARTBEAT_TIME) { 739 - esas2r_lock_clear_flags(&a->flags, 740 - AF_HEARTBEAT); 742 + clear_bit(AF_HEARTBEAT, &a->flags); 741 743 esas2r_hdebug("heartbeat failed"); 742 744 esas2r_log(ESAS2R_LOG_CRIT, 743 745 "heartbeat failed"); ··· 744 748 esas2r_local_reset_adapter(a); 745 749 } 746 750 } else { 747 - esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT); 751 + set_bit(AF_HEARTBEAT, &a->flags); 748 752 a->heartbeat_time = currtime; 749 753 esas2r_force_interrupt(a); 750 754 } ··· 808 812 rqaux->vrq->scsi.flags |= 809 813 cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK)); 810 814 811 - if (a->flags & AF_FLASHING) { 815 + if (test_bit(AF_FLASHING, &a->flags)) { 812 816 /* Assume success. if there are active requests, return busy */ 813 817 rqaux->req_stat = RS_SUCCESS; 814 818 ··· 827 831 828 832 spin_unlock_irqrestore(&a->queue_lock, flags); 829 833 830 - if (!(a->flags & AF_FLASHING)) 834 + if (!test_bit(AF_FLASHING, &a->flags)) 831 835 esas2r_start_request(a, rqaux); 832 836 833 837 esas2r_comp_list_drain(a, &comp_list); ··· 844 848 { 845 849 esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset"); 846 850 847 - if (!(a->flags & AF_DEGRADED_MODE) 848 - && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) { 849 - esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED); 850 - esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING); 851 - esas2r_lock_set_flags(&a->flags, AF_OS_RESET); 851 + if (!test_bit(AF_DEGRADED_MODE, &a->flags) && 852 + !test_bit(AF_CHPRST_PENDING, &a->flags) && 853 + !test_bit(AF_DISC_PENDING, &a->flags)) { 854 + set_bit(AF_BUSRST_NEEDED, &a->flags); 855 + set_bit(AF_BUSRST_PENDING, &a->flags); 856 + set_bit(AF_OS_RESET, &a->flags); 852 857 853 858 esas2r_schedule_tasklet(a); 854 859 }
+14 -14
drivers/scsi/esas2r/esas2r_ioctl.c
··· 347 347 { 348 348 struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl; 349 349 350 - if (a->flags & AF_DEGRADED_MODE) 350 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) 351 351 return false; 352 352 353 353 esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); ··· 463 463 gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver)); 464 464 gcc->bios_build_rev = LOWORD(a->flash_ver); 465 465 466 - if (a->flags2 & AF2_THUNDERLINK) 466 + if (test_bit(AF2_THUNDERLINK, &a->flags2)) 467 467 gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA 468 468 | CSMI_CNTLRF_SATA_HBA; 469 469 else ··· 485 485 { 486 486 struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts; 487 487 488 - if (a->flags & AF_DEGRADED_MODE) 488 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) 489 489 gcs->status = CSMI_CNTLR_STS_FAILED; 490 490 else 491 491 gcs->status = CSMI_CNTLR_STS_GOOD; ··· 819 819 820 820 gai->adap_type = ATTO_GAI_AT_ESASRAID2; 821 821 822 - if (a->flags2 & AF2_THUNDERLINK) 822 + if (test_bit(AF2_THUNDERLINK, &a->flags2)) 823 823 gai->adap_type = ATTO_GAI_AT_TLSASHBA; 824 824 825 - if (a->flags & AF_DEGRADED_MODE) 825 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) 826 826 gai->adap_flags |= ATTO_GAI_AF_DEGRADED; 827 827 828 828 gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP | ··· 938 938 u32 total_len = ESAS2R_FWCOREDUMP_SZ; 939 939 940 940 /* Size is zero if a core dump isn't present */ 941 - if (!(a->flags2 & AF2_COREDUMP_SAVED)) 941 + if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) 942 942 total_len = 0; 943 943 944 944 if (len > total_len) ··· 960 960 memset(a->fw_coredump_buff, 0, 961 961 ESAS2R_FWCOREDUMP_SZ); 962 962 963 - esas2r_lock_clear_flags(&a->flags2, 964 - AF2_COREDUMP_SAVED); 963 + clear_bit(AF2_COREDUMP_SAVED, &a->flags2); 965 964 } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) { 966 965 hi->status = ATTO_STS_UNSUPPORTED; 967 966 break; ··· 972 973 trc->total_length = ESAS2R_FWCOREDUMP_SZ; 973 974 974 975 /* Return zero length buffer if core dump not present */ 975 - if (!(a->flags2 & AF2_COREDUMP_SAVED)) 976 + if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) 976 977 trc->total_length = 0; 977 978 } else { 978 979 hi->status = ATTO_STS_UNSUPPORTED; ··· 1046 1047 cpu_to_le32(FCP_CMND_TA_ORDRD_Q); 1047 1048 else if (spt->flags & ATTO_SPTF_HEAD_OF_Q) 1048 1049 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q); 1050 + 1049 1051 1050 1052 if (!esas2r_build_sg_list(a, rq, sgc)) { 1051 1053 hi->status = ATTO_STS_OUT_OF_RSRC; ··· 1139 1139 break; 1140 1140 } 1141 1141 1142 - if (a->flags & AF_CHPRST_NEEDED) 1142 + if (test_bit(AF_CHPRST_NEEDED, &a->flags)) 1143 1143 ac->adap_state = ATTO_AC_AS_RST_SCHED; 1144 - else if (a->flags & AF_CHPRST_PENDING) 1144 + else if (test_bit(AF_CHPRST_PENDING, &a->flags)) 1145 1145 ac->adap_state = ATTO_AC_AS_RST_IN_PROG; 1146 - else if (a->flags & AF_DISC_PENDING) 1146 + else if (test_bit(AF_DISC_PENDING, &a->flags)) 1147 1147 ac->adap_state = ATTO_AC_AS_RST_DISC; 1148 - else if (a->flags & AF_DISABLED) 1148 + else if (test_bit(AF_DISABLED, &a->flags)) 1149 1149 ac->adap_state = ATTO_AC_AS_DISABLED; 1150 - else if (a->flags & AF_DEGRADED_MODE) 1150 + else if (test_bit(AF_DEGRADED_MODE, &a->flags)) 1151 1151 ac->adap_state = ATTO_AC_AS_DEGRADED; 1152 1152 else 1153 1153 ac->adap_state = ATTO_AC_AS_OK;
+17 -17
drivers/scsi/esas2r/esas2r_main.c
··· 889 889 /* Assume success, if it fails we will fix the result later. */ 890 890 cmd->result = DID_OK << 16; 891 891 892 - if (unlikely(a->flags & AF_DEGRADED_MODE)) { 892 + if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) { 893 893 cmd->result = DID_NO_CONNECT << 16; 894 894 cmd->scsi_done(cmd); 895 895 return 0; ··· 1050 1050 1051 1051 esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd); 1052 1052 1053 - if (a->flags & AF_DEGRADED_MODE) { 1053 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) { 1054 1054 cmd->result = DID_ABORT << 16; 1055 1055 1056 1056 scsi_set_resid(cmd, 0); ··· 1131 1131 struct esas2r_adapter *a = 1132 1132 (struct esas2r_adapter *)cmd->device->host->hostdata; 1133 1133 1134 - if (a->flags & AF_DEGRADED_MODE) 1134 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) 1135 1135 return FAILED; 1136 1136 1137 1137 if (host_reset) ··· 1141 1141 1142 1142 /* above call sets the AF_OS_RESET flag. wait for it to clear. */ 1143 1143 1144 - while (a->flags & AF_OS_RESET) { 1144 + while (test_bit(AF_OS_RESET, &a->flags)) { 1145 1145 msleep(10); 1146 1146 1147 - if (a->flags & AF_DEGRADED_MODE) 1147 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) 1148 1148 return FAILED; 1149 1149 } 1150 1150 1151 - if (a->flags & AF_DEGRADED_MODE) 1151 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) 1152 1152 return FAILED; 1153 1153 1154 1154 return SUCCESS; ··· 1176 1176 u8 task_management_status = RS_PENDING; 1177 1177 bool completed; 1178 1178 1179 - if (a->flags & AF_DEGRADED_MODE) 1179 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) 1180 1180 return FAILED; 1181 1181 1182 1182 retry: ··· 1229 1229 msleep(10); 1230 1230 } 1231 1231 1232 - if (a->flags & AF_DEGRADED_MODE) 1232 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) 1233 1233 return FAILED; 1234 1234 1235 1235 if (task_management_status == RS_BUSY) { ··· 1666 1666 { 1667 1667 struct esas2r_adapter *a = (struct esas2r_adapter *)context; 1668 1668 1669 - if (unlikely(a->flags2 & AF2_TIMER_TICK)) { 1670 - esas2r_lock_clear_flags(&a->flags2, AF2_TIMER_TICK); 1669 + if (unlikely(test_bit(AF2_TIMER_TICK, &a->flags2))) { 1670 + clear_bit(AF2_TIMER_TICK, &a->flags2); 1671 1671 esas2r_timer_tick(a); 1672 1672 } 1673 1673 1674 - if (likely(a->flags2 & AF2_INT_PENDING)) { 1675 - esas2r_lock_clear_flags(&a->flags2, AF2_INT_PENDING); 1674 + if (likely(test_bit(AF2_INT_PENDING, &a->flags2))) { 1675 + clear_bit(AF2_INT_PENDING, &a->flags2); 1676 1676 esas2r_adapter_interrupt(a); 1677 1677 } 1678 1678 ··· 1680 1680 esas2r_do_tasklet_tasks(a); 1681 1681 1682 1682 if (esas2r_is_tasklet_pending(a) 1683 - || (a->flags2 & AF2_INT_PENDING) 1684 - || (a->flags2 & AF2_TIMER_TICK)) { 1685 - esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); 1683 + || (test_bit(AF2_INT_PENDING, &a->flags2)) 1684 + || (test_bit(AF2_TIMER_TICK, &a->flags2))) { 1685 + clear_bit(AF_TASKLET_SCHEDULED, &a->flags); 1686 1686 esas2r_schedule_tasklet(a); 1687 1687 } else { 1688 - esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); 1688 + clear_bit(AF_TASKLET_SCHEDULED, &a->flags); 1689 1689 } 1690 1690 } 1691 1691 ··· 1707 1707 { 1708 1708 struct esas2r_adapter *a = (struct esas2r_adapter *)context; 1709 1709 1710 - esas2r_lock_set_flags(&a->flags2, AF2_TIMER_TICK); 1710 + set_bit(AF2_TIMER_TICK, &a->flags2); 1711 1711 1712 1712 esas2r_schedule_tasklet(a); 1713 1713
+1 -1
drivers/scsi/esas2r/esas2r_targdb.c
··· 86 86 87 87 esas2r_trace_enter(); 88 88 89 - if (a->flags & AF_DISC_PENDING) { 89 + if (test_bit(AF_DISC_PENDING, &a->flags)) { 90 90 esas2r_trace_exit(); 91 91 return; 92 92 }
+6 -6
drivers/scsi/esas2r/esas2r_vda.c
··· 84 84 return false; 85 85 } 86 86 87 - if (a->flags & AF_DEGRADED_MODE) { 87 + if (test_bit(AF_DEGRADED_MODE, &a->flags)) { 88 88 vi->status = ATTO_STS_DEGRADED; 89 89 return false; 90 90 } ··· 310 310 le32_to_cpu(rsp->vda_version); 311 311 cfg->data.init.fw_build = rsp->fw_build; 312 312 313 - snprintf(buf, sizeof(buf), "%1d.%02d", 314 - (int)LOBYTE(le16_to_cpu(rsp->fw_release)), 315 - (int)HIBYTE(le16_to_cpu(rsp->fw_release))); 313 + snprintf(buf, sizeof(buf), "%1.1u.%2.2u", 314 + (int)LOBYTE(le16_to_cpu(rsp->fw_release)), 315 + (int)HIBYTE(le16_to_cpu(rsp->fw_release))); 316 316 317 317 memcpy(&cfg->data.init.fw_release, buf, 318 318 sizeof(cfg->data.init.fw_release)); ··· 389 389 vrq->length = cpu_to_le32(length); 390 390 391 391 if (vrq->length) { 392 - if (a->flags & AF_LEGACY_SGE_MODE) { 392 + if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { 393 393 vrq->sg_list_offset = (u8)offsetof( 394 394 struct atto_vda_mgmt_req, sge); 395 395 ··· 427 427 428 428 vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data)); 429 429 430 - if (a->flags & AF_LEGACY_SGE_MODE) { 430 + if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { 431 431 vrq->sg_list_offset = 432 432 (u8)offsetof(struct atto_vda_ae_req, sge); 433 433 vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length);
+16 -9
drivers/scsi/fcoe/fcoe.c
··· 408 408 } 409 409 410 410 ctlr = fcoe_ctlr_device_priv(ctlr_dev); 411 + ctlr->cdev = ctlr_dev; 411 412 fcoe = fcoe_ctlr_priv(ctlr); 412 413 413 414 dev_hold(netdev); ··· 1441 1440 ctlr = fcoe_to_ctlr(fcoe); 1442 1441 lport = ctlr->lp; 1443 1442 if (unlikely(!lport)) { 1444 - FCOE_NETDEV_DBG(netdev, "Cannot find hba structure"); 1443 + FCOE_NETDEV_DBG(netdev, "Cannot find hba structure\n"); 1445 1444 goto err2; 1446 1445 } 1447 1446 if (!lport->link_up) 1448 1447 goto err2; 1449 1448 1450 - FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p " 1451 - "data:%p tail:%p end:%p sum:%d dev:%s", 1449 + FCOE_NETDEV_DBG(netdev, 1450 + "skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n", 1452 1451 skb->len, skb->data_len, skb->head, skb->data, 1453 1452 skb_tail_pointer(skb), skb_end_pointer(skb), 1454 1453 skb->csum, skb->dev ? skb->dev->name : "<NULL>"); 1455 1454 1455 + 1456 + skb = skb_share_check(skb, GFP_ATOMIC); 1457 + 1458 + if (skb == NULL) 1459 + return NET_RX_DROP; 1460 + 1456 1461 eh = eth_hdr(skb); 1457 1462 1458 1463 if (is_fip_mode(ctlr) && 1459 - compare_ether_addr(eh->h_source, ctlr->dest_addr)) { 1464 + !ether_addr_equal(eh->h_source, ctlr->dest_addr)) { 1460 1465 FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n", 1461 1466 eh->h_source); 1462 1467 goto err; ··· 1547 1540 wake_up_process(fps->thread); 1548 1541 spin_unlock(&fps->fcoe_rx_list.lock); 1549 1542 1550 - return 0; 1543 + return NET_RX_SUCCESS; 1551 1544 err: 1552 1545 per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++; 1553 1546 put_cpu(); 1554 1547 err2: 1555 1548 kfree_skb(skb); 1556 - return -1; 1549 + return NET_RX_DROP; 1557 1550 } 1558 1551 1559 1552 /** ··· 1795 1788 lport = fr->fr_dev; 1796 1789 if (unlikely(!lport)) { 1797 1790 if (skb->destructor != fcoe_percpu_flush_done) 1798 - FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb"); 1791 + FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n"); 1799 1792 kfree_skb(skb); 1800 1793 return; 1801 1794 } 1802 1795 1803 - FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " 1804 - "head:%p data:%p tail:%p end:%p sum:%d dev:%s", 1796 + FCOE_NETDEV_DBG(skb->dev, 1797 + "skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n", 1805 1798 skb->len, skb->data_len, 1806 1799 skb->head, skb->data, skb_tail_pointer(skb), 1807 1800 skb_end_pointer(skb), skb->csum,
+95 -53
drivers/scsi/fcoe/fcoe_ctlr.c
··· 160 160 } 161 161 EXPORT_SYMBOL(fcoe_ctlr_init); 162 162 163 + /** 164 + * fcoe_sysfs_fcf_add() - Add a fcoe_fcf{,_device} to a fcoe_ctlr{,_device} 165 + * @new: The newly discovered FCF 166 + * 167 + * Called with fip->ctlr_mutex held 168 + */ 163 169 static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new) 164 170 { 165 171 struct fcoe_ctlr *fip = new->fip; 166 - struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); 167 - struct fcoe_fcf_device temp, *fcf_dev; 168 - int rc = 0; 172 + struct fcoe_ctlr_device *ctlr_dev; 173 + struct fcoe_fcf_device *temp, *fcf_dev; 174 + int rc = -ENOMEM; 169 175 170 176 LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n", 171 177 new->fabric_name, new->fcf_mac); 172 178 173 - mutex_lock(&ctlr_dev->lock); 174 - 175 - temp.fabric_name = new->fabric_name; 176 - temp.switch_name = new->switch_name; 177 - temp.fc_map = new->fc_map; 178 - temp.vfid = new->vfid; 179 - memcpy(temp.mac, new->fcf_mac, ETH_ALEN); 180 - temp.priority = new->pri; 181 - temp.fka_period = new->fka_period; 182 - temp.selected = 0; /* default to unselected */ 183 - 184 - fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp); 185 - if (unlikely(!fcf_dev)) { 186 - rc = -ENOMEM; 179 + temp = kzalloc(sizeof(*temp), GFP_KERNEL); 180 + if (!temp) 187 181 goto out; 188 - } 182 + 183 + temp->fabric_name = new->fabric_name; 184 + temp->switch_name = new->switch_name; 185 + temp->fc_map = new->fc_map; 186 + temp->vfid = new->vfid; 187 + memcpy(temp->mac, new->fcf_mac, ETH_ALEN); 188 + temp->priority = new->pri; 189 + temp->fka_period = new->fka_period; 190 + temp->selected = 0; /* default to unselected */ 189 191 190 192 /* 191 - * The fcoe_sysfs layer can return a CONNECTED fcf that 192 - * has a priv (fcf was never deleted) or a CONNECTED fcf 193 - * that doesn't have a priv (fcf was deleted). However, 194 - * libfcoe will always delete FCFs before trying to add 195 - * them. This is ensured because both recv_adv and 196 - * age_fcfs are protected by the the fcoe_ctlr's mutex. 197 - * This means that we should never get a FCF with a 198 - * non-NULL priv pointer. 193 + * If ctlr_dev doesn't exist then it means we're a libfcoe user 194 + * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device. 195 + * fnic would be an example of a driver with this behavior. In this 196 + * case we want to add the fcoe_fcf to the fcoe_ctlr list, but we 197 + * don't want to make sysfs changes. 199 198 */ 200 - BUG_ON(fcf_dev->priv); 201 199 202 - fcf_dev->priv = new; 203 - new->fcf_dev = fcf_dev; 200 + ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); 201 + if (ctlr_dev) { 202 + mutex_lock(&ctlr_dev->lock); 203 + fcf_dev = fcoe_fcf_device_add(ctlr_dev, temp); 204 + if (unlikely(!fcf_dev)) { 205 + rc = -ENOMEM; 206 + mutex_unlock(&ctlr_dev->lock); 207 + goto out; 208 + } 209 + 210 + /* 211 + * The fcoe_sysfs layer can return a CONNECTED fcf that 212 + * has a priv (fcf was never deleted) or a CONNECTED fcf 213 + * that doesn't have a priv (fcf was deleted). However, 214 + * libfcoe will always delete FCFs before trying to add 215 + * them. This is ensured because both recv_adv and 216 + * age_fcfs are protected by the the fcoe_ctlr's mutex. 217 + * This means that we should never get a FCF with a 218 + * non-NULL priv pointer. 219 + */ 220 + BUG_ON(fcf_dev->priv); 221 + 222 + fcf_dev->priv = new; 223 + new->fcf_dev = fcf_dev; 224 + mutex_unlock(&ctlr_dev->lock); 225 + } 204 226 205 227 list_add(&new->list, &fip->fcfs); 206 228 fip->fcf_count++; 229 + rc = 0; 207 230 208 231 out: 209 - mutex_unlock(&ctlr_dev->lock); 232 + kfree(temp); 210 233 return rc; 211 234 } 212 235 236 + /** 237 + * fcoe_sysfs_fcf_del() - Remove a fcoe_fcf{,_device} to a fcoe_ctlr{,_device} 238 + * @new: The FCF to be removed 239 + * 240 + * Called with fip->ctlr_mutex held 241 + */ 213 242 static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new) 214 243 { 215 244 struct fcoe_ctlr *fip = new->fip; 216 - struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); 245 + struct fcoe_ctlr_device *cdev; 217 246 struct fcoe_fcf_device *fcf_dev; 218 247 219 248 list_del(&new->list); 220 249 fip->fcf_count--; 221 250 222 - mutex_lock(&ctlr_dev->lock); 223 - 224 - fcf_dev = fcoe_fcf_to_fcf_dev(new); 225 - WARN_ON(!fcf_dev); 226 - new->fcf_dev = NULL; 227 - fcoe_fcf_device_delete(fcf_dev); 228 - kfree(new); 229 - 230 - mutex_unlock(&ctlr_dev->lock); 251 + /* 252 + * If ctlr_dev doesn't exist then it means we're a libfcoe user 253 + * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device 254 + * or a fcoe_fcf_device. 255 + * 256 + * fnic would be an example of a driver with this behavior. In this 257 + * case we want to remove the fcoe_fcf from the fcoe_ctlr list (above), 258 + * but we don't want to make sysfs changes. 259 + */ 260 + cdev = fcoe_ctlr_to_ctlr_dev(fip); 261 + if (cdev) { 262 + mutex_lock(&cdev->lock); 263 + fcf_dev = fcoe_fcf_to_fcf_dev(new); 264 + WARN_ON(!fcf_dev); 265 + new->fcf_dev = NULL; 266 + fcoe_fcf_device_delete(fcf_dev); 267 + kfree(new); 268 + mutex_unlock(&cdev->lock); 269 + } 231 270 } 232 271 233 272 /** ··· 339 300 spin_unlock_bh(&fip->ctlr_lock); 340 301 sel = fip->sel_fcf; 341 302 342 - if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr)) 303 + if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr)) 343 304 goto unlock; 344 305 if (!is_zero_ether_addr(fip->dest_addr)) { 345 306 printk(KERN_NOTICE "libfcoe: host%d: " ··· 1039 1000 if (fcf->switch_name == new.switch_name && 1040 1001 fcf->fabric_name == new.fabric_name && 1041 1002 fcf->fc_map == new.fc_map && 1042 - compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) { 1003 + ether_addr_equal(fcf->fcf_mac, new.fcf_mac)) { 1043 1004 found = 1; 1044 1005 break; 1045 1006 } ··· 1379 1340 mp = (struct fip_mac_desc *)desc; 1380 1341 if (dlen < sizeof(*mp)) 1381 1342 goto err; 1382 - if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac)) 1343 + if (!ether_addr_equal(mp->fd_mac, fcf->fcf_mac)) 1383 1344 goto err; 1384 1345 desc_mask &= ~BIT(FIP_DT_MAC); 1385 1346 break; ··· 1457 1418 * 'port_id' is already validated, check MAC address and 1458 1419 * wwpn 1459 1420 */ 1460 - if (compare_ether_addr(fip->get_src_addr(vn_port), 1461 - vp->fd_mac) != 0 || 1421 + if (!ether_addr_equal(fip->get_src_addr(vn_port), 1422 + vp->fd_mac) || 1462 1423 get_unaligned_be64(&vp->fd_wwpn) != 1463 1424 vn_port->wwpn) 1464 1425 continue; ··· 1492 1453 */ 1493 1454 void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) 1494 1455 { 1456 + skb = skb_share_check(skb, GFP_ATOMIC); 1457 + if (!skb) 1458 + return; 1495 1459 skb_queue_tail(&fip->fip_recv_list, skb); 1496 1460 schedule_work(&fip->recv_work); 1497 1461 } ··· 1521 1479 goto drop; 1522 1480 eh = eth_hdr(skb); 1523 1481 if (fip->mode == FIP_MODE_VN2VN) { 1524 - if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && 1525 - compare_ether_addr(eh->h_dest, fcoe_all_vn2vn) && 1526 - compare_ether_addr(eh->h_dest, fcoe_all_p2p)) 1482 + if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) && 1483 + !ether_addr_equal(eh->h_dest, fcoe_all_vn2vn) && 1484 + !ether_addr_equal(eh->h_dest, fcoe_all_p2p)) 1527 1485 goto drop; 1528 - } else if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && 1529 - compare_ether_addr(eh->h_dest, fcoe_all_enode)) 1486 + } else if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) && 1487 + !ether_addr_equal(eh->h_dest, fcoe_all_enode)) 1530 1488 goto drop; 1531 1489 fiph = (struct fip_header *)skb->data; 1532 1490 op = ntohs(fiph->fip_op); ··· 1898 1856 * address_mode flag to use FC_OUI-based Ethernet DA. 1899 1857 * Otherwise we use the FCoE gateway addr 1900 1858 */ 1901 - if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) { 1859 + if (ether_addr_equal(sa, (u8[6])FC_FCOE_FLOGI_MAC)) { 1902 1860 fcoe_ctlr_map_dest(fip); 1903 1861 } else { 1904 1862 memcpy(fip->dest_addr, sa, ETH_ALEN); ··· 2867 2825 * disabled, so that should ensure that this routine is only called 2868 2826 * when nothing is happening. 2869 2827 */ 2870 - void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, 2871 - enum fip_state fip_mode) 2828 + static void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, 2829 + enum fip_state fip_mode) 2872 2830 { 2873 2831 void *priv; 2874 2832
+7 -7
drivers/scsi/fcoe/fcoe_sysfs.c
··· 300 300 301 301 switch (ctlr->enabled) { 302 302 case FCOE_CTLR_ENABLED: 303 - LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled."); 303 + LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n"); 304 304 return -EBUSY; 305 305 case FCOE_CTLR_DISABLED: 306 306 if (!ctlr->f->set_fcoe_ctlr_mode) { 307 307 LIBFCOE_SYSFS_DBG(ctlr, 308 - "Mode change not supported by LLD."); 308 + "Mode change not supported by LLD.\n"); 309 309 return -ENOTSUPP; 310 310 } 311 311 312 312 ctlr->mode = fcoe_parse_mode(mode); 313 313 if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) { 314 - LIBFCOE_SYSFS_DBG(ctlr, 315 - "Unknown mode %s provided.", buf); 314 + LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n", 315 + buf); 316 316 return -EINVAL; 317 317 } 318 318 319 319 ctlr->f->set_fcoe_ctlr_mode(ctlr); 320 - LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf); 320 + LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf); 321 321 322 322 return count; 323 323 case FCOE_CTLR_UNUSED: 324 324 default: 325 - LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported."); 325 + LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n"); 326 326 return -ENOTSUPP; 327 327 }; 328 328 } ··· 657 657 if (new->switch_name == old->switch_name && 658 658 new->fabric_name == old->fabric_name && 659 659 new->fc_map == old->fc_map && 660 - compare_ether_addr(new->mac, old->mac) == 0) 660 + ether_addr_equal(new->mac, old->mac)) 661 661 return 1; 662 662 return 0; 663 663 }
+9 -1
drivers/scsi/fnic/fnic.h
··· 27 27 #include "fnic_io.h" 28 28 #include "fnic_res.h" 29 29 #include "fnic_trace.h" 30 + #include "fnic_stats.h" 30 31 #include "vnic_dev.h" 31 32 #include "vnic_wq.h" 32 33 #include "vnic_rq.h" ··· 39 38 40 39 #define DRV_NAME "fnic" 41 40 #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 42 - #define DRV_VERSION "1.5.0.23" 41 + #define DRV_VERSION "1.5.0.45" 43 42 #define PFX DRV_NAME ": " 44 43 #define DFX DRV_NAME "%d: " 45 44 ··· 232 231 233 232 unsigned int wq_count; 234 233 unsigned int cq_count; 234 + 235 + struct dentry *fnic_stats_debugfs_host; 236 + struct dentry *fnic_stats_debugfs_file; 237 + struct dentry *fnic_reset_debugfs_file; 238 + unsigned int reset_stats; 239 + atomic64_t io_cmpl_skip; 240 + struct fnic_stats fnic_stats; 235 241 236 242 u32 vlan_hw_insert:1; /* let hw insert the tag */ 237 243 u32 in_remove:1; /* fnic device in removal */
+379 -11
drivers/scsi/fnic/fnic_debugfs.c
··· 23 23 static struct dentry *fnic_trace_debugfs_root; 24 24 static struct dentry *fnic_trace_debugfs_file; 25 25 static struct dentry *fnic_trace_enable; 26 + static struct dentry *fnic_stats_debugfs_root; 27 + 28 + /* 29 + * fnic_debugfs_init - Initialize debugfs for fnic debug logging 30 + * 31 + * Description: 32 + * When Debugfs is configured this routine sets up the fnic debugfs 33 + * file system. If not already created, this routine will create the 34 + * fnic directory and statistics directory for trace buffer and 35 + * stats logging. 36 + */ 37 + int fnic_debugfs_init(void) 38 + { 39 + int rc = -1; 40 + fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL); 41 + if (!fnic_trace_debugfs_root) { 42 + printk(KERN_DEBUG "Cannot create debugfs root\n"); 43 + return rc; 44 + } 45 + 46 + if (!fnic_trace_debugfs_root) { 47 + printk(KERN_DEBUG 48 + "fnic root directory doesn't exist in debugfs\n"); 49 + return rc; 50 + } 51 + 52 + fnic_stats_debugfs_root = debugfs_create_dir("statistics", 53 + fnic_trace_debugfs_root); 54 + if (!fnic_stats_debugfs_root) { 55 + printk(KERN_DEBUG "Cannot create Statistics directory\n"); 56 + return rc; 57 + } 58 + 59 + rc = 0; 60 + return rc; 61 + } 62 + 63 + /* 64 + * fnic_debugfs_terminate - Tear down debugfs infrastructure 65 + * 66 + * Description: 67 + * When Debugfs is configured this routine removes debugfs file system 68 + * elements that are specific to fnic. 69 + */ 70 + void fnic_debugfs_terminate(void) 71 + { 72 + debugfs_remove(fnic_stats_debugfs_root); 73 + fnic_stats_debugfs_root = NULL; 74 + 75 + debugfs_remove(fnic_trace_debugfs_root); 76 + fnic_trace_debugfs_root = NULL; 77 + } 26 78 27 79 /* 28 80 * fnic_trace_ctrl_open - Open the trace_enable file ··· 293 241 * Description: 294 242 * When Debugfs is configured this routine sets up the fnic debugfs 295 243 * file system. If not already created, this routine will create the 296 - * fnic directory. It will create file trace to log fnic trace buffer 297 - * output into debugfs and it will also create file trace_enable to 298 - * control enable/disable of trace logging into trace buffer. 244 + * create file trace to log fnic trace buffer output into debugfs and 245 + * it will also create file trace_enable to control enable/disable of 246 + * trace logging into trace buffer. 299 247 */ 300 248 int fnic_trace_debugfs_init(void) 301 249 { 302 250 int rc = -1; 303 - fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL); 304 251 if (!fnic_trace_debugfs_root) { 305 - printk(KERN_DEBUG "Cannot create debugfs root\n"); 252 + printk(KERN_DEBUG 253 + "FNIC Debugfs root directory doesn't exist\n"); 306 254 return rc; 307 255 } 308 256 fnic_trace_enable = debugfs_create_file("tracing_enable", ··· 311 259 NULL, &fnic_trace_ctrl_fops); 312 260 313 261 if (!fnic_trace_enable) { 314 - printk(KERN_DEBUG "Cannot create trace_enable file" 315 - " under debugfs"); 262 + printk(KERN_DEBUG 263 + "Cannot create trace_enable file under debugfs\n"); 316 264 return rc; 317 265 } 318 266 ··· 323 271 &fnic_trace_debugfs_fops); 324 272 325 273 if (!fnic_trace_debugfs_file) { 326 - printk(KERN_DEBUG "Cannot create trace file under debugfs"); 274 + printk(KERN_DEBUG 275 + "Cannot create trace file under debugfs\n"); 327 276 return rc; 328 277 } 329 278 rc = 0; ··· 348 295 debugfs_remove(fnic_trace_enable); 349 296 fnic_trace_enable = NULL; 350 297 } 351 - if (fnic_trace_debugfs_root) { 352 - debugfs_remove(fnic_trace_debugfs_root); 353 - fnic_trace_debugfs_root = NULL; 298 + } 299 + 300 + /* 301 + * fnic_reset_stats_open - Open the reset_stats file 302 + * @inode: The inode pointer. 303 + * @file: The file pointer to attach the stats reset flag. 304 + * 305 + * Description: 306 + * This routine opens a debugsfs file reset_stats and stores i_private data 307 + * to debug structure to retrieve later for while performing other 308 + * file oprations. 309 + * 310 + * Returns: 311 + * This function returns zero if successful. 312 + */ 313 + static int fnic_reset_stats_open(struct inode *inode, struct file *file) 314 + { 315 + struct stats_debug_info *debug; 316 + 317 + debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL); 318 + if (!debug) 319 + return -ENOMEM; 320 + 321 + debug->i_private = inode->i_private; 322 + 323 + file->private_data = debug; 324 + 325 + return 0; 326 + } 327 + 328 + /* 329 + * fnic_reset_stats_read - Read a reset_stats debugfs file 330 + * @filp: The file pointer to read from. 331 + * @ubuf: The buffer to copy the data to. 332 + * @cnt: The number of bytes to read. 333 + * @ppos: The position in the file to start reading from. 334 + * 335 + * Description: 336 + * This routine reads value of variable reset_stats 337 + * and stores into local @buf. It will start reading file at @ppos and 338 + * copy up to @cnt of data to @ubuf from @buf. 339 + * 340 + * Returns: 341 + * This function returns the amount of data that was read. 342 + */ 343 + static ssize_t fnic_reset_stats_read(struct file *file, 344 + char __user *ubuf, 345 + size_t cnt, loff_t *ppos) 346 + { 347 + struct stats_debug_info *debug = file->private_data; 348 + struct fnic *fnic = (struct fnic *)debug->i_private; 349 + char buf[64]; 350 + int len; 351 + 352 + len = sprintf(buf, "%u\n", fnic->reset_stats); 353 + 354 + return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 355 + } 356 + 357 + /* 358 + * fnic_reset_stats_write - Write to reset_stats debugfs file 359 + * @filp: The file pointer to write from. 360 + * @ubuf: The buffer to copy the data from. 361 + * @cnt: The number of bytes to write. 362 + * @ppos: The position in the file to start writing to. 363 + * 364 + * Description: 365 + * This routine writes data from user buffer @ubuf to buffer @buf and 366 + * resets cumulative stats of fnic. 367 + * 368 + * Returns: 369 + * This function returns the amount of data that was written. 370 + */ 371 + static ssize_t fnic_reset_stats_write(struct file *file, 372 + const char __user *ubuf, 373 + size_t cnt, loff_t *ppos) 374 + { 375 + struct stats_debug_info *debug = file->private_data; 376 + struct fnic *fnic = (struct fnic *)debug->i_private; 377 + struct fnic_stats *stats = &fnic->fnic_stats; 378 + u64 *io_stats_p = (u64 *)&stats->io_stats; 379 + u64 *fw_stats_p = (u64 *)&stats->fw_stats; 380 + char buf[64]; 381 + unsigned long val; 382 + int ret; 383 + 384 + if (cnt >= sizeof(buf)) 385 + return -EINVAL; 386 + 387 + if (copy_from_user(&buf, ubuf, cnt)) 388 + return -EFAULT; 389 + 390 + buf[cnt] = 0; 391 + 392 + ret = kstrtoul(buf, 10, &val); 393 + if (ret < 0) 394 + return ret; 395 + 396 + fnic->reset_stats = val; 397 + 398 + if (fnic->reset_stats) { 399 + /* Skip variable is used to avoid descrepancies to Num IOs 400 + * and IO Completions stats. Skip incrementing No IO Compls 401 + * for pending active IOs after reset stats 402 + */ 403 + atomic64_set(&fnic->io_cmpl_skip, 404 + atomic64_read(&stats->io_stats.active_ios)); 405 + memset(&stats->abts_stats, 0, sizeof(struct abort_stats)); 406 + memset(&stats->term_stats, 0, 407 + sizeof(struct terminate_stats)); 408 + memset(&stats->reset_stats, 0, sizeof(struct reset_stats)); 409 + memset(&stats->misc_stats, 0, sizeof(struct misc_stats)); 410 + memset(&stats->vlan_stats, 0, sizeof(struct vlan_stats)); 411 + memset(io_stats_p+1, 0, 412 + sizeof(struct io_path_stats) - sizeof(u64)); 413 + memset(fw_stats_p+1, 0, 414 + sizeof(struct fw_stats) - sizeof(u64)); 354 415 } 416 + 417 + (*ppos)++; 418 + return cnt; 419 + } 420 + 421 + /* 422 + * fnic_reset_stats_release - Release the buffer used to store 423 + * debugfs file data 424 + * @inode: The inode pointer 425 + * @file: The file pointer that contains the buffer to release 426 + * 427 + * Description: 428 + * This routine frees the buffer that was allocated when the debugfs 429 + * file was opened. 430 + * 431 + * Returns: 432 + * This function returns zero. 433 + */ 434 + static int fnic_reset_stats_release(struct inode *inode, 435 + struct file *file) 436 + { 437 + struct stats_debug_info *debug = file->private_data; 438 + kfree(debug); 439 + return 0; 440 + } 441 + 442 + /* 443 + * fnic_stats_debugfs_open - Open the stats file for specific host 444 + * and get fnic stats. 445 + * @inode: The inode pointer. 446 + * @file: The file pointer to attach the specific host statistics. 447 + * 448 + * Description: 449 + * This routine opens a debugsfs file stats of specific host and print 450 + * fnic stats. 451 + * 452 + * Returns: 453 + * This function returns zero if successful. 454 + */ 455 + static int fnic_stats_debugfs_open(struct inode *inode, 456 + struct file *file) 457 + { 458 + struct fnic *fnic = inode->i_private; 459 + struct fnic_stats *fnic_stats = &fnic->fnic_stats; 460 + struct stats_debug_info *debug; 461 + int buf_size = 2 * PAGE_SIZE; 462 + 463 + debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL); 464 + if (!debug) 465 + return -ENOMEM; 466 + 467 + debug->debug_buffer = vmalloc(buf_size); 468 + if (!debug->debug_buffer) { 469 + kfree(debug); 470 + return -ENOMEM; 471 + } 472 + 473 + debug->buf_size = buf_size; 474 + memset((void *)debug->debug_buffer, 0, buf_size); 475 + debug->buffer_len = fnic_get_stats_data(debug, fnic_stats); 476 + 477 + file->private_data = debug; 478 + 479 + return 0; 480 + } 481 + 482 + /* 483 + * fnic_stats_debugfs_read - Read a debugfs file 484 + * @file: The file pointer to read from. 485 + * @ubuf: The buffer to copy the data to. 486 + * @nbytes: The number of bytes to read. 487 + * @pos: The position in the file to start reading from. 488 + * 489 + * Description: 490 + * This routine reads data from the buffer indicated in the private_data 491 + * field of @file. It will start reading at @pos and copy up to @nbytes of 492 + * data to @ubuf. 493 + * 494 + * Returns: 495 + * This function returns the amount of data that was read (this could be 496 + * less than @nbytes if the end of the file was reached). 497 + */ 498 + static ssize_t fnic_stats_debugfs_read(struct file *file, 499 + char __user *ubuf, 500 + size_t nbytes, 501 + loff_t *pos) 502 + { 503 + struct stats_debug_info *debug = file->private_data; 504 + int rc = 0; 505 + rc = simple_read_from_buffer(ubuf, nbytes, pos, 506 + debug->debug_buffer, 507 + debug->buffer_len); 508 + return rc; 509 + } 510 + 511 + /* 512 + * fnic_stats_stats_release - Release the buffer used to store 513 + * debugfs file data 514 + * @inode: The inode pointer 515 + * @file: The file pointer that contains the buffer to release 516 + * 517 + * Description: 518 + * This routine frees the buffer that was allocated when the debugfs 519 + * file was opened. 520 + * 521 + * Returns: 522 + * This function returns zero. 523 + */ 524 + static int fnic_stats_debugfs_release(struct inode *inode, 525 + struct file *file) 526 + { 527 + struct stats_debug_info *debug = file->private_data; 528 + vfree(debug->debug_buffer); 529 + kfree(debug); 530 + return 0; 531 + } 532 + 533 + static const struct file_operations fnic_stats_debugfs_fops = { 534 + .owner = THIS_MODULE, 535 + .open = fnic_stats_debugfs_open, 536 + .read = fnic_stats_debugfs_read, 537 + .release = fnic_stats_debugfs_release, 538 + }; 539 + 540 + static const struct file_operations fnic_reset_debugfs_fops = { 541 + .owner = THIS_MODULE, 542 + .open = fnic_reset_stats_open, 543 + .read = fnic_reset_stats_read, 544 + .write = fnic_reset_stats_write, 545 + .release = fnic_reset_stats_release, 546 + }; 547 + 548 + /* 549 + * fnic_stats_init - Initialize stats struct and create stats file per fnic 550 + * 551 + * Description: 552 + * When Debugfs is configured this routine sets up the stats file per fnic 553 + * It will create file stats and reset_stats under statistics/host# directory 554 + * to log per fnic stats. 555 + */ 556 + int fnic_stats_debugfs_init(struct fnic *fnic) 557 + { 558 + int rc = -1; 559 + char name[16]; 560 + 561 + snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no); 562 + 563 + if (!fnic_stats_debugfs_root) { 564 + printk(KERN_DEBUG "fnic_stats root doesn't exist\n"); 565 + return rc; 566 + } 567 + fnic->fnic_stats_debugfs_host = debugfs_create_dir(name, 568 + fnic_stats_debugfs_root); 569 + if (!fnic->fnic_stats_debugfs_host) { 570 + printk(KERN_DEBUG "Cannot create host directory\n"); 571 + return rc; 572 + } 573 + 574 + fnic->fnic_stats_debugfs_file = debugfs_create_file("stats", 575 + S_IFREG|S_IRUGO|S_IWUSR, 576 + fnic->fnic_stats_debugfs_host, 577 + fnic, 578 + &fnic_stats_debugfs_fops); 579 + if (!fnic->fnic_stats_debugfs_file) { 580 + printk(KERN_DEBUG "Cannot create host stats file\n"); 581 + return rc; 582 + } 583 + 584 + fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats", 585 + S_IFREG|S_IRUGO|S_IWUSR, 586 + fnic->fnic_stats_debugfs_host, 587 + fnic, 588 + &fnic_reset_debugfs_fops); 589 + if (!fnic->fnic_reset_debugfs_file) { 590 + printk(KERN_DEBUG "Cannot create host stats file\n"); 591 + return rc; 592 + } 593 + rc = 0; 594 + return rc; 595 + } 596 + 597 + /* 598 + * fnic_stats_debugfs_remove - Tear down debugfs infrastructure of stats 599 + * 600 + * Description: 601 + * When Debugfs is configured this routine removes debugfs file system 602 + * elements that are specific to fnic stats. 603 + */ 604 + void fnic_stats_debugfs_remove(struct fnic *fnic) 605 + { 606 + if (!fnic) 607 + return; 608 + 609 + debugfs_remove(fnic->fnic_stats_debugfs_file); 610 + fnic->fnic_stats_debugfs_file = NULL; 611 + 612 + debugfs_remove(fnic->fnic_reset_debugfs_file); 613 + fnic->fnic_reset_debugfs_file = NULL; 614 + 615 + debugfs_remove(fnic->fnic_stats_debugfs_host); 616 + fnic->fnic_stats_debugfs_host = NULL; 355 617 }
+15 -3
drivers/scsi/fnic/fnic_fcs.c
··· 302 302 static void fnic_fcoe_send_vlan_req(struct fnic *fnic) 303 303 { 304 304 struct fcoe_ctlr *fip = &fnic->ctlr; 305 + struct fnic_stats *fnic_stats = &fnic->fnic_stats; 305 306 struct sk_buff *skb; 306 307 char *eth_fr; 307 308 int fr_len; ··· 338 337 vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; 339 338 vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; 340 339 put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); 340 + atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs); 341 341 342 342 skb_put(skb, sizeof(*vlan)); 343 343 skb->protocol = htons(ETH_P_FIP); ··· 356 354 struct fcoe_ctlr *fip = &fnic->ctlr; 357 355 struct fip_header *fiph; 358 356 struct fip_desc *desc; 357 + struct fnic_stats *fnic_stats = &fnic->fnic_stats; 359 358 u16 vid; 360 359 size_t rlen; 361 360 size_t dlen; ··· 405 402 /* any VLAN descriptors present ? */ 406 403 if (list_empty(&fnic->vlans)) { 407 404 /* retry from timer */ 405 + atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID); 408 406 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, 409 407 "No VLAN descriptors in FIP VLAN response\n"); 410 408 spin_unlock_irqrestore(&fnic->vlans_lock, flags); ··· 537 533 void fnic_handle_fip_frame(struct work_struct *work) 538 534 { 539 535 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); 536 + struct fnic_stats *fnic_stats = &fnic->fnic_stats; 540 537 unsigned long flags; 541 538 struct sk_buff *skb; 542 539 struct ethhdr *eh; ··· 572 567 * fcf's & restart from scratch 573 568 */ 574 569 if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { 570 + atomic64_inc( 571 + &fnic_stats->vlan_stats.flogi_rejects); 575 572 shost_printk(KERN_INFO, fnic->lport->host, 576 573 "Trigger a Link down - VLAN Disc\n"); 577 574 fcoe_ctlr_link_down(&fnic->ctlr); ··· 658 651 659 652 if (is_zero_ether_addr(new)) 660 653 new = ctl; 661 - if (!compare_ether_addr(data, new)) 654 + if (ether_addr_equal(data, new)) 662 655 return; 663 656 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); 664 - if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl)) 657 + if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl)) 665 658 vnic_dev_del_addr(fnic->vdev, data); 666 659 memcpy(data, new, ETH_ALEN); 667 - if (compare_ether_addr(new, ctl)) 660 + if (!ether_addr_equal(new, ctl)) 668 661 vnic_dev_add_addr(fnic->vdev, new); 669 662 } 670 663 ··· 760 753 struct fnic *fnic = vnic_dev_priv(rq->vdev); 761 754 struct sk_buff *skb; 762 755 struct fc_frame *fp; 756 + struct fnic_stats *fnic_stats = &fnic->fnic_stats; 763 757 unsigned int eth_hdrs_stripped; 764 758 u8 type, color, eop, sop, ingress_port, vlan_stripped; 765 759 u8 fcoe = 0, fcoe_sof, fcoe_eof; ··· 811 803 eth_hdrs_stripped = 0; 812 804 skb_trim(skb, bytes_written); 813 805 if (!fcs_ok) { 806 + atomic64_inc(&fnic_stats->misc_stats.frame_errors); 814 807 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 815 808 "fcs error. dropping packet.\n"); 816 809 goto drop; ··· 827 818 } 828 819 829 820 if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { 821 + atomic64_inc(&fnic_stats->misc_stats.frame_errors); 830 822 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 831 823 "fnic rq_cmpl fcoe x%x fcsok x%x" 832 824 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" ··· 1215 1205 { 1216 1206 unsigned long flags; 1217 1207 struct fcoe_vlan *vlan; 1208 + struct fnic_stats *fnic_stats = &fnic->fnic_stats; 1218 1209 u64 sol_time; 1219 1210 1220 1211 spin_lock_irqsave(&fnic->fnic_lock, flags); ··· 1284 1273 vlan->state = FIP_VLAN_SENT; /* sent now */ 1285 1274 } 1286 1275 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 1276 + atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); 1287 1277 vlan->sol_count++; 1288 1278 sol_time = jiffies + msecs_to_jiffies 1289 1279 (FCOE_CTLR_START_DELAY);
+18
drivers/scsi/fnic/fnic_isr.c
··· 37 37 if (!pba) 38 38 return IRQ_NONE; 39 39 40 + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; 41 + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); 42 + 40 43 if (pba & (1 << FNIC_INTX_NOTIFY)) { 41 44 vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]); 42 45 fnic_handle_link_event(fnic); ··· 69 66 struct fnic *fnic = data; 70 67 unsigned long work_done = 0; 71 68 69 + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; 70 + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); 71 + 72 72 work_done += fnic_wq_copy_cmpl_handler(fnic, -1); 73 73 work_done += fnic_wq_cmpl_handler(fnic, -1); 74 74 work_done += fnic_rq_cmpl_handler(fnic, -1); ··· 89 83 struct fnic *fnic = data; 90 84 unsigned long rq_work_done = 0; 91 85 86 + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; 87 + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); 88 + 92 89 rq_work_done = fnic_rq_cmpl_handler(fnic, -1); 93 90 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], 94 91 rq_work_done, ··· 106 97 struct fnic *fnic = data; 107 98 unsigned long wq_work_done = 0; 108 99 100 + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; 101 + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); 102 + 109 103 wq_work_done = fnic_wq_cmpl_handler(fnic, -1); 110 104 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], 111 105 wq_work_done, ··· 122 110 struct fnic *fnic = data; 123 111 unsigned long wq_copy_work_done = 0; 124 112 113 + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; 114 + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); 115 + 125 116 wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1); 126 117 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], 127 118 wq_copy_work_done, ··· 136 121 static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data) 137 122 { 138 123 struct fnic *fnic = data; 124 + 125 + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; 126 + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); 139 127 140 128 vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]); 141 129 fnic_log_q_error(fnic);
+19
drivers/scsi/fnic/fnic_main.c
··· 556 556 557 557 host->transportt = fnic_fc_transport; 558 558 559 + err = fnic_stats_debugfs_init(fnic); 560 + if (err) { 561 + shost_printk(KERN_ERR, fnic->lport->host, 562 + "Failed to initialize debugfs for stats\n"); 563 + fnic_stats_debugfs_remove(fnic); 564 + } 565 + 559 566 /* Setup PCI resources */ 560 567 pci_set_drvdata(pdev, fnic); 561 568 ··· 924 917 err_out_disable_device: 925 918 pci_disable_device(pdev); 926 919 err_out_free_hba: 920 + fnic_stats_debugfs_remove(fnic); 927 921 scsi_host_put(lp->host); 928 922 err_out: 929 923 return err; ··· 977 969 978 970 fcoe_ctlr_destroy(&fnic->ctlr); 979 971 fc_lport_destroy(lp); 972 + fnic_stats_debugfs_remove(fnic); 980 973 981 974 /* 982 975 * This stops the fnic device, masks all interrupts. Completed ··· 1022 1013 int err = 0; 1023 1014 1024 1015 printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); 1016 + 1017 + /* Create debugfs entries for fnic */ 1018 + err = fnic_debugfs_init(); 1019 + if (err < 0) { 1020 + printk(KERN_ERR PFX "Failed to create fnic directory " 1021 + "for tracing and stats logging\n"); 1022 + fnic_debugfs_terminate(); 1023 + } 1025 1024 1026 1025 /* Allocate memory for trace buffer */ 1027 1026 err = fnic_trace_buf_init(); ··· 1119 1102 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); 1120 1103 err_create_fnic_sgl_slab_dflt: 1121 1104 fnic_trace_free(); 1105 + fnic_debugfs_terminate(); 1122 1106 return err; 1123 1107 } 1124 1108 ··· 1136 1118 kmem_cache_destroy(fnic_io_req_cache); 1137 1119 fc_release_transport(fnic_fc_transport); 1138 1120 fnic_trace_free(); 1121 + fnic_debugfs_terminate(); 1139 1122 } 1140 1123 1141 1124 module_init(fnic_init_module);
+236 -17
drivers/scsi/fnic/fnic_scsi.c
··· 226 226 227 227 if (!vnic_wq_copy_desc_avail(wq)) 228 228 ret = -EAGAIN; 229 - else 229 + else { 230 230 fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); 231 + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); 232 + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > 233 + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) 234 + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, 235 + atomic64_read( 236 + &fnic->fnic_stats.fw_stats.active_fw_reqs)); 237 + } 231 238 232 239 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); 233 240 234 - if (!ret) 241 + if (!ret) { 242 + atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets); 235 243 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 236 244 "Issued fw reset\n"); 237 - else { 245 + } else { 238 246 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); 239 247 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 240 248 "Failed to issue fw reset\n"); ··· 299 291 fc_id, fnic->ctlr.map_dest, gw_mac); 300 292 } 301 293 294 + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); 295 + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > 296 + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) 297 + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, 298 + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); 299 + 302 300 flogi_reg_ioreq_end: 303 301 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); 304 302 return ret; ··· 324 310 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); 325 311 struct fc_rport_libfc_priv *rp = rport->dd_data; 326 312 struct host_sg_desc *desc; 313 + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; 327 314 u8 pri_tag = 0; 328 315 unsigned int i; 329 316 unsigned long intr_flags; ··· 373 358 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); 374 359 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 375 360 "fnic_queue_wq_copy_desc failure - no descriptors\n"); 361 + atomic64_inc(&misc_stats->io_cpwq_alloc_failures); 376 362 return SCSI_MLQUEUE_HOST_BUSY; 377 363 } 378 364 ··· 402 386 rport->maxframe_size, rp->r_a_tov, 403 387 rp->e_d_tov); 404 388 389 + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); 390 + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > 391 + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) 392 + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, 393 + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); 394 + 405 395 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); 406 396 return 0; 407 397 } ··· 423 401 struct fc_rport *rport; 424 402 struct fnic_io_req *io_req = NULL; 425 403 struct fnic *fnic = lport_priv(lp); 404 + struct fnic_stats *fnic_stats = &fnic->fnic_stats; 426 405 struct vnic_wq_copy *wq; 427 406 int ret; 428 407 u64 cmd_trace; ··· 437 414 rport = starget_to_rport(scsi_target(sc->device)); 438 415 ret = fc_remote_port_chkready(rport); 439 416 if (ret) { 417 + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); 440 418 sc->result = ret; 441 419 done(sc); 442 420 return 0; ··· 460 436 /* Get a new io_req for this SCSI IO */ 461 437 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); 462 438 if (!io_req) { 439 + atomic64_inc(&fnic_stats->io_stats.alloc_failures); 463 440 ret = SCSI_MLQUEUE_HOST_BUSY; 464 441 goto out; 465 442 } ··· 487 462 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], 488 463 GFP_ATOMIC); 489 464 if (!io_req->sgl_list) { 465 + atomic64_inc(&fnic_stats->io_stats.alloc_failures); 490 466 ret = SCSI_MLQUEUE_HOST_BUSY; 491 467 scsi_dma_unmap(sc); 492 468 mempool_free(io_req, fnic->io_req_pool); ··· 535 509 mempool_free(io_req, fnic->io_req_pool); 536 510 } 537 511 } else { 512 + atomic64_inc(&fnic_stats->io_stats.active_ios); 513 + atomic64_inc(&fnic_stats->io_stats.num_ios); 514 + if (atomic64_read(&fnic_stats->io_stats.active_ios) > 515 + atomic64_read(&fnic_stats->io_stats.max_active_ios)) 516 + atomic64_set(&fnic_stats->io_stats.max_active_ios, 517 + atomic64_read(&fnic_stats->io_stats.active_ios)); 518 + 538 519 /* REVISIT: Use per IO lock in the final code */ 539 520 CMD_FLAGS(sc) |= FNIC_IO_ISSUED; 540 521 } ··· 575 542 struct fcpio_tag tag; 576 543 int ret = 0; 577 544 unsigned long flags; 545 + struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; 578 546 579 547 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); 580 548 549 + atomic64_inc(&reset_stats->fw_reset_completions); 550 + 581 551 /* Clean up all outstanding io requests */ 582 552 fnic_cleanup_io(fnic, SCSI_NO_TAG); 553 + 554 + atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0); 555 + atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0); 583 556 584 557 spin_lock_irqsave(&fnic->fnic_lock, flags); 585 558 ··· 610 571 * reset the firmware. Free the cached flogi 611 572 */ 612 573 fnic->state = FNIC_IN_FC_MODE; 574 + atomic64_inc(&reset_stats->fw_reset_failures); 613 575 ret = -1; 614 576 } 615 577 } else { ··· 618 578 fnic->lport->host, 619 579 "Unexpected state %s while processing" 620 580 " reset cmpl\n", fnic_state_to_str(fnic->state)); 581 + atomic64_inc(&reset_stats->fw_reset_failures); 621 582 ret = -1; 622 583 } 623 584 ··· 742 701 wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; 743 702 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); 744 703 704 + fnic->fnic_stats.misc_stats.last_ack_time = jiffies; 745 705 if (is_ack_index_in_range(wq, request_out)) { 746 706 fnic->fw_ack_index[0] = request_out; 747 707 fnic->fw_ack_recd[0] = 1; 748 - } 708 + } else 709 + atomic64_inc( 710 + &fnic->fnic_stats.misc_stats.ack_index_out_of_range); 711 + 749 712 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); 750 713 FNIC_TRACE(fnic_fcpio_ack_handler, 751 714 fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], ··· 771 726 struct fcpio_icmnd_cmpl *icmnd_cmpl; 772 727 struct fnic_io_req *io_req; 773 728 struct scsi_cmnd *sc; 729 + struct fnic_stats *fnic_stats = &fnic->fnic_stats; 774 730 unsigned long flags; 775 731 spinlock_t *io_lock; 776 732 u64 cmd_trace; ··· 792 746 sc = scsi_host_find_tag(fnic->lport->host, id); 793 747 WARN_ON_ONCE(!sc); 794 748 if (!sc) { 749 + atomic64_inc(&fnic_stats->io_stats.sc_null); 795 750 shost_printk(KERN_ERR, fnic->lport->host, 796 751 "icmnd_cmpl sc is null - " 797 752 "hdr status = %s tag = 0x%x desc = 0x%p\n", ··· 813 766 io_req = (struct fnic_io_req *)CMD_SP(sc); 814 767 WARN_ON_ONCE(!io_req); 815 768 if (!io_req) { 769 + atomic64_inc(&fnic_stats->io_stats.ioreq_null); 816 770 CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL; 817 771 spin_unlock_irqrestore(io_lock, flags); 818 772 shost_printk(KERN_ERR, fnic->lport->host, ··· 872 824 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) 873 825 xfer_len -= icmnd_cmpl->residual; 874 826 827 + if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) 828 + atomic64_inc(&fnic_stats->misc_stats.queue_fulls); 875 829 break; 876 830 877 831 case FCPIO_TIMEOUT: /* request was timed out */ 832 + atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout); 878 833 sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; 879 834 break; 880 835 881 836 case FCPIO_ABORTED: /* request was aborted */ 837 + atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted); 882 838 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; 883 839 break; 884 840 885 841 case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */ 842 + atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch); 886 843 scsi_set_resid(sc, icmnd_cmpl->residual); 887 844 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; 888 845 break; 889 846 890 847 case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */ 848 + atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources); 891 849 sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; 892 850 break; 851 + 852 + case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */ 853 + atomic64_inc(&fnic_stats->io_stats.io_not_found); 854 + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; 855 + break; 856 + 857 + case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */ 858 + atomic64_inc(&fnic_stats->misc_stats.sgl_invalid); 859 + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; 860 + break; 861 + 862 + case FCPIO_FW_ERR: /* request was terminated due fw error */ 863 + atomic64_inc(&fnic_stats->fw_stats.io_fw_errs); 864 + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; 865 + break; 866 + 867 + case FCPIO_MSS_INVALID: /* request was aborted due to mss error */ 868 + atomic64_inc(&fnic_stats->misc_stats.mss_invalid); 869 + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; 870 + break; 871 + 893 872 case FCPIO_INVALID_HEADER: /* header contains invalid data */ 894 873 case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ 895 874 case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ 896 - case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */ 897 - case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */ 898 - case FCPIO_MSS_INVALID: /* request was aborted due to mss error */ 899 - case FCPIO_FW_ERR: /* request was terminated due fw error */ 900 875 default: 901 876 shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", 902 877 fnic_fcpio_status_to_str(hdr_status)); ··· 927 856 break; 928 857 } 929 858 859 + if (hdr_status != FCPIO_SUCCESS) { 860 + atomic64_inc(&fnic_stats->io_stats.io_failures); 861 + shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", 862 + fnic_fcpio_status_to_str(hdr_status)); 863 + } 930 864 /* Break link with the SCSI command */ 931 865 CMD_SP(sc) = NULL; 932 866 CMD_FLAGS(sc) |= FNIC_IO_DONE; ··· 965 889 } else 966 890 fnic->lport->host_stats.fcp_control_requests++; 967 891 892 + atomic64_dec(&fnic_stats->io_stats.active_ios); 893 + if (atomic64_read(&fnic->io_cmpl_skip)) 894 + atomic64_dec(&fnic->io_cmpl_skip); 895 + else 896 + atomic64_inc(&fnic_stats->io_stats.io_completions); 897 + 968 898 /* Call SCSI completion function to complete the IO */ 969 899 if (sc->scsi_done) 970 900 sc->scsi_done(sc); ··· 988 906 u32 id; 989 907 struct scsi_cmnd *sc; 990 908 struct fnic_io_req *io_req; 909 + struct fnic_stats *fnic_stats = &fnic->fnic_stats; 910 + struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats; 911 + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; 912 + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; 991 913 unsigned long flags; 992 914 spinlock_t *io_lock; 993 915 unsigned long start_time; ··· 1009 923 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); 1010 924 WARN_ON_ONCE(!sc); 1011 925 if (!sc) { 926 + atomic64_inc(&fnic_stats->io_stats.sc_null); 1012 927 shost_printk(KERN_ERR, fnic->lport->host, 1013 928 "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", 1014 929 fnic_fcpio_status_to_str(hdr_status), id); ··· 1020 933 io_req = (struct fnic_io_req *)CMD_SP(sc); 1021 934 WARN_ON_ONCE(!io_req); 1022 935 if (!io_req) { 936 + atomic64_inc(&fnic_stats->io_stats.ioreq_null); 1023 937 spin_unlock_irqrestore(io_lock, flags); 1024 938 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; 1025 939 shost_printk(KERN_ERR, fnic->lport->host, ··· 1045 957 spin_unlock_irqrestore(io_lock, flags); 1046 958 } else if (id & FNIC_TAG_ABORT) { 1047 959 /* Completion of abort cmd */ 960 + switch (hdr_status) { 961 + case FCPIO_SUCCESS: 962 + break; 963 + case FCPIO_TIMEOUT: 964 + if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) 965 + atomic64_inc(&abts_stats->abort_fw_timeouts); 966 + else 967 + atomic64_inc( 968 + &term_stats->terminate_fw_timeouts); 969 + break; 970 + case FCPIO_IO_NOT_FOUND: 971 + if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) 972 + atomic64_inc(&abts_stats->abort_io_not_found); 973 + else 974 + atomic64_inc( 975 + &term_stats->terminate_io_not_found); 976 + break; 977 + default: 978 + if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) 979 + atomic64_inc(&abts_stats->abort_failures); 980 + else 981 + atomic64_inc( 982 + &term_stats->terminate_failures); 983 + break; 984 + } 1048 985 if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { 1049 986 /* This is a late completion. Ignore it */ 1050 987 spin_unlock_irqrestore(io_lock, flags); ··· 1077 964 } 1078 965 CMD_ABTS_STATUS(sc) = hdr_status; 1079 966 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; 967 + 968 + atomic64_dec(&fnic_stats->io_stats.active_ios); 969 + if (atomic64_read(&fnic->io_cmpl_skip)) 970 + atomic64_dec(&fnic->io_cmpl_skip); 971 + else 972 + atomic64_inc(&fnic_stats->io_stats.io_completions); 973 + 974 + if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) 975 + atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); 976 + 1080 977 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1081 978 "abts cmpl recd. id %d status %s\n", 1082 979 (int)(id & FNIC_TAG_MASK), ··· 1190 1067 struct fnic *fnic = vnic_dev_priv(vdev); 1191 1068 1192 1069 switch (desc->hdr.type) { 1070 + case FCPIO_ICMND_CMPL: /* fw completed a command */ 1071 + case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ 1072 + case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ 1073 + case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ 1074 + case FCPIO_RESET_CMPL: /* fw completed reset */ 1075 + atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs); 1076 + break; 1077 + default: 1078 + break; 1079 + } 1080 + 1081 + switch (desc->hdr.type) { 1193 1082 case FCPIO_ACK: /* fw copied copy wq desc to its queue */ 1194 1083 fnic_fcpio_ack_handler(fnic, cq_index, desc); 1195 1084 break; ··· 1261 1126 struct scsi_cmnd *sc; 1262 1127 spinlock_t *io_lock; 1263 1128 unsigned long start_time = 0; 1129 + struct fnic_stats *fnic_stats = &fnic->fnic_stats; 1264 1130 1265 1131 for (i = 0; i < fnic->fnic_max_tag_id; i++) { 1266 1132 if (i == exclude_id) ··· 1314 1178 sc->result = DID_TRANSPORT_DISRUPTED << 16; 1315 1179 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:" 1316 1180 " DID_TRANSPORT_DISRUPTED\n"); 1181 + 1182 + if (atomic64_read(&fnic->io_cmpl_skip)) 1183 + atomic64_dec(&fnic->io_cmpl_skip); 1184 + else 1185 + atomic64_inc(&fnic_stats->io_stats.io_completions); 1317 1186 1318 1187 /* Complete the command to SCSI */ 1319 1188 if (sc->scsi_done) { ··· 1403 1262 { 1404 1263 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; 1405 1264 struct Scsi_Host *host = fnic->lport->host; 1265 + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; 1406 1266 unsigned long flags; 1407 1267 1408 1268 spin_lock_irqsave(host->host_lock, flags); ··· 1425 1283 atomic_dec(&fnic->in_flight); 1426 1284 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1427 1285 "fnic_queue_abort_io_req: failure: no descriptors\n"); 1286 + atomic64_inc(&misc_stats->abts_cpwq_alloc_failures); 1428 1287 return 1; 1429 1288 } 1430 1289 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, 1431 1290 0, task_req, tag, fc_lun, io_req->port_id, 1432 1291 fnic->config.ra_tov, fnic->config.ed_tov); 1292 + 1293 + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); 1294 + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > 1295 + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) 1296 + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, 1297 + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); 1433 1298 1434 1299 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); 1435 1300 atomic_dec(&fnic->in_flight); ··· 1448 1299 { 1449 1300 int tag; 1450 1301 int abt_tag; 1302 + int term_cnt = 0; 1451 1303 struct fnic_io_req *io_req; 1452 1304 spinlock_t *io_lock; 1453 1305 unsigned long flags; 1454 1306 struct scsi_cmnd *sc; 1307 + struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; 1308 + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; 1455 1309 struct scsi_lun fc_lun; 1456 1310 enum fnic_ioreq_state old_ioreq_state; 1457 1311 ··· 1518 1366 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 1519 1367 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 1520 1368 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { 1369 + atomic64_inc(&reset_stats->device_reset_terminates); 1521 1370 abt_tag = (tag | FNIC_TAG_DEV_RST); 1522 1371 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1523 1372 "fnic_rport_exch_reset dev rst sc 0x%p\n", ··· 1555 1402 else 1556 1403 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; 1557 1404 spin_unlock_irqrestore(io_lock, flags); 1405 + atomic64_inc(&term_stats->terminates); 1406 + term_cnt++; 1558 1407 } 1559 1408 } 1409 + if (term_cnt > atomic64_read(&term_stats->max_terminates)) 1410 + atomic64_set(&term_stats->max_terminates, term_cnt); 1560 1411 1561 1412 } 1562 1413 ··· 1568 1411 { 1569 1412 int tag; 1570 1413 int abt_tag; 1414 + int term_cnt = 0; 1571 1415 struct fnic_io_req *io_req; 1572 1416 spinlock_t *io_lock; 1573 1417 unsigned long flags; ··· 1578 1420 struct fc_lport *lport; 1579 1421 struct fnic *fnic; 1580 1422 struct fc_rport *cmd_rport; 1423 + struct reset_stats *reset_stats; 1424 + struct terminate_stats *term_stats; 1581 1425 enum fnic_ioreq_state old_ioreq_state; 1582 1426 1583 1427 if (!rport) { ··· 1607 1447 1608 1448 if (fnic->in_remove) 1609 1449 return; 1450 + 1451 + reset_stats = &fnic->fnic_stats.reset_stats; 1452 + term_stats = &fnic->fnic_stats.term_stats; 1610 1453 1611 1454 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { 1612 1455 abt_tag = tag; ··· 1667 1504 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 1668 1505 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 1669 1506 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { 1507 + atomic64_inc(&reset_stats->device_reset_terminates); 1670 1508 abt_tag = (tag | FNIC_TAG_DEV_RST); 1671 1509 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1672 1510 "fnic_terminate_rport_io dev rst sc 0x%p\n", sc); ··· 1704 1540 else 1705 1541 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; 1706 1542 spin_unlock_irqrestore(io_lock, flags); 1543 + atomic64_inc(&term_stats->terminates); 1544 + term_cnt++; 1707 1545 } 1708 1546 } 1547 + if (term_cnt > atomic64_read(&term_stats->max_terminates)) 1548 + atomic64_set(&term_stats->max_terminates, term_cnt); 1709 1549 1710 1550 } 1711 1551 ··· 1730 1562 int ret = SUCCESS; 1731 1563 u32 task_req = 0; 1732 1564 struct scsi_lun fc_lun; 1565 + struct fnic_stats *fnic_stats; 1566 + struct abort_stats *abts_stats; 1567 + struct terminate_stats *term_stats; 1733 1568 int tag; 1734 1569 DECLARE_COMPLETION_ONSTACK(tm_done); 1735 1570 ··· 1743 1572 lp = shost_priv(sc->device->host); 1744 1573 1745 1574 fnic = lport_priv(lp); 1575 + fnic_stats = &fnic->fnic_stats; 1576 + abts_stats = &fnic->fnic_stats.abts_stats; 1577 + term_stats = &fnic->fnic_stats.term_stats; 1578 + 1746 1579 rport = starget_to_rport(scsi_target(sc->device)); 1747 1580 tag = sc->request->tag; 1748 1581 FNIC_SCSI_DBG(KERN_DEBUG, ··· 1805 1630 */ 1806 1631 if (fc_remote_port_chkready(rport) == 0) 1807 1632 task_req = FCPIO_ITMF_ABT_TASK; 1808 - else 1633 + else { 1634 + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); 1809 1635 task_req = FCPIO_ITMF_ABT_TASK_TERM; 1636 + } 1810 1637 1811 1638 /* Now queue the abort command to firmware */ 1812 1639 int_to_scsilun(sc->device->lun, &fc_lun); ··· 1823 1646 ret = FAILED; 1824 1647 goto fnic_abort_cmd_end; 1825 1648 } 1826 - if (task_req == FCPIO_ITMF_ABT_TASK) 1649 + if (task_req == FCPIO_ITMF_ABT_TASK) { 1827 1650 CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED; 1828 - else 1651 + atomic64_inc(&fnic_stats->abts_stats.aborts); 1652 + } else { 1829 1653 CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED; 1654 + atomic64_inc(&fnic_stats->term_stats.terminates); 1655 + } 1830 1656 1831 1657 /* 1832 1658 * We queued an abort IO, wait for its completion. ··· 1847 1667 1848 1668 io_req = (struct fnic_io_req *)CMD_SP(sc); 1849 1669 if (!io_req) { 1670 + atomic64_inc(&fnic_stats->io_stats.ioreq_null); 1850 1671 spin_unlock_irqrestore(io_lock, flags); 1851 1672 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; 1852 1673 ret = FAILED; ··· 1858 1677 /* fw did not complete abort, timed out */ 1859 1678 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { 1860 1679 spin_unlock_irqrestore(io_lock, flags); 1680 + if (task_req == FCPIO_ITMF_ABT_TASK) { 1681 + FNIC_SCSI_DBG(KERN_INFO, 1682 + fnic->lport->host, "Abort Driver Timeout\n"); 1683 + atomic64_inc(&abts_stats->abort_drv_timeouts); 1684 + } else { 1685 + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 1686 + "Terminate Driver Timeout\n"); 1687 + atomic64_inc(&term_stats->terminate_drv_timeouts); 1688 + } 1861 1689 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; 1862 1690 ret = FAILED; 1863 1691 goto fnic_abort_cmd_end; ··· 1911 1721 { 1912 1722 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; 1913 1723 struct Scsi_Host *host = fnic->lport->host; 1724 + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; 1914 1725 struct scsi_lun fc_lun; 1915 1726 int ret = 0; 1916 1727 unsigned long intr_flags; ··· 1933 1742 if (!vnic_wq_copy_desc_avail(wq)) { 1934 1743 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1935 1744 "queue_dr_io_req failure - no descriptors\n"); 1745 + atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures); 1936 1746 ret = -EAGAIN; 1937 1747 goto lr_io_req_end; 1938 1748 } ··· 1945 1753 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG, 1946 1754 fc_lun.scsi_lun, io_req->port_id, 1947 1755 fnic->config.ra_tov, fnic->config.ed_tov); 1756 + 1757 + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); 1758 + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > 1759 + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) 1760 + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, 1761 + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); 1948 1762 1949 1763 lr_io_req_end: 1950 1764 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); ··· 2186 1988 unsigned long flags; 2187 1989 unsigned long start_time = 0; 2188 1990 struct scsi_lun fc_lun; 1991 + struct fnic_stats *fnic_stats; 1992 + struct reset_stats *reset_stats; 2189 1993 int tag = 0; 2190 1994 DECLARE_COMPLETION_ONSTACK(tm_done); 2191 1995 int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ ··· 2199 1999 lp = shost_priv(sc->device->host); 2200 2000 2201 2001 fnic = lport_priv(lp); 2002 + fnic_stats = &fnic->fnic_stats; 2003 + reset_stats = &fnic->fnic_stats.reset_stats; 2004 + 2005 + atomic64_inc(&reset_stats->device_resets); 2202 2006 2203 2007 rport = starget_to_rport(scsi_target(sc->device)); 2204 2008 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, ··· 2213 2009 goto fnic_device_reset_end; 2214 2010 2215 2011 /* Check if remote port up */ 2216 - if (fc_remote_port_chkready(rport)) 2012 + if (fc_remote_port_chkready(rport)) { 2013 + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); 2217 2014 goto fnic_device_reset_end; 2015 + } 2218 2016 2219 2017 CMD_FLAGS(sc) = FNIC_DEVICE_RESET; 2220 2018 /* Allocate tag if not present */ ··· 2292 2086 * gets cleaned up during higher levels of EH 2293 2087 */ 2294 2088 if (status == FCPIO_INVALID_CODE) { 2089 + atomic64_inc(&reset_stats->device_reset_timeouts); 2295 2090 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 2296 2091 "Device reset timed out\n"); 2297 2092 CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; ··· 2406 2199 "Returning from device reset %s\n", 2407 2200 (ret == SUCCESS) ? 2408 2201 "SUCCESS" : "FAILED"); 2202 + 2203 + if (ret == FAILED) 2204 + atomic64_inc(&reset_stats->device_reset_failures); 2205 + 2409 2206 return ret; 2410 2207 } 2411 2208 ··· 2418 2207 { 2419 2208 struct fc_lport *lp; 2420 2209 struct fnic *fnic; 2421 - int ret = SUCCESS; 2210 + int ret = 0; 2211 + struct reset_stats *reset_stats; 2422 2212 2423 2213 lp = shost_priv(shost); 2424 2214 fnic = lport_priv(lp); 2215 + reset_stats = &fnic->fnic_stats.reset_stats; 2425 2216 2426 2217 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 2427 2218 "fnic_reset called\n"); 2219 + 2220 + atomic64_inc(&reset_stats->fnic_resets); 2428 2221 2429 2222 /* 2430 2223 * Reset local port, this will clean up libFC exchanges, 2431 2224 * reset remote port sessions, and if link is up, begin flogi 2432 2225 */ 2433 - if (lp->tt.lport_reset(lp)) 2434 - ret = FAILED; 2226 + ret = lp->tt.lport_reset(lp); 2435 2227 2436 2228 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 2437 2229 "Returning from fnic reset %s\n", 2438 - (ret == SUCCESS) ? 2230 + (ret == 0) ? 2439 2231 "SUCCESS" : "FAILED"); 2232 + 2233 + if (ret == 0) 2234 + atomic64_inc(&reset_stats->fnic_reset_completions); 2235 + else 2236 + atomic64_inc(&reset_stats->fnic_reset_failures); 2440 2237 2441 2238 return ret; 2442 2239 } ··· 2470 2251 * scsi-ml tries to send a TUR to every device if host reset is 2471 2252 * successful, so before returning to scsi, fabric should be up 2472 2253 */ 2473 - ret = fnic_reset(shost); 2254 + ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED; 2474 2255 if (ret == SUCCESS) { 2475 2256 wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; 2476 2257 ret = FAILED;
+116
drivers/scsi/fnic/fnic_stats.h
··· 1 + /* 2 + * Copyright 2013 Cisco Systems, Inc. All rights reserved. 3 + * 4 + * This program is free software; you may redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; version 2 of the License. 7 + * 8 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 9 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 10 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 11 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 12 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 13 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 15 + * SOFTWARE. 16 + */ 17 + #ifndef _FNIC_STATS_H_ 18 + #define _FNIC_STATS_H_ 19 + struct io_path_stats { 20 + atomic64_t active_ios; 21 + atomic64_t max_active_ios; 22 + atomic64_t io_completions; 23 + atomic64_t io_failures; 24 + atomic64_t ioreq_null; 25 + atomic64_t alloc_failures; 26 + atomic64_t sc_null; 27 + atomic64_t io_not_found; 28 + atomic64_t num_ios; 29 + }; 30 + 31 + struct abort_stats { 32 + atomic64_t aborts; 33 + atomic64_t abort_failures; 34 + atomic64_t abort_drv_timeouts; 35 + atomic64_t abort_fw_timeouts; 36 + atomic64_t abort_io_not_found; 37 + }; 38 + 39 + struct terminate_stats { 40 + atomic64_t terminates; 41 + atomic64_t max_terminates; 42 + atomic64_t terminate_drv_timeouts; 43 + atomic64_t terminate_fw_timeouts; 44 + atomic64_t terminate_io_not_found; 45 + atomic64_t terminate_failures; 46 + }; 47 + 48 + struct reset_stats { 49 + atomic64_t device_resets; 50 + atomic64_t device_reset_failures; 51 + atomic64_t device_reset_aborts; 52 + atomic64_t device_reset_timeouts; 53 + atomic64_t device_reset_terminates; 54 + atomic64_t fw_resets; 55 + atomic64_t fw_reset_completions; 56 + atomic64_t fw_reset_failures; 57 + atomic64_t fnic_resets; 58 + atomic64_t fnic_reset_completions; 59 + atomic64_t fnic_reset_failures; 60 + }; 61 + 62 + struct fw_stats { 63 + atomic64_t active_fw_reqs; 64 + atomic64_t max_fw_reqs; 65 + atomic64_t fw_out_of_resources; 66 + atomic64_t io_fw_errs; 67 + }; 68 + 69 + struct vlan_stats { 70 + atomic64_t vlan_disc_reqs; 71 + atomic64_t resp_withno_vlanID; 72 + atomic64_t sol_expiry_count; 73 + atomic64_t flogi_rejects; 74 + }; 75 + 76 + struct misc_stats { 77 + u64 last_isr_time; 78 + u64 last_ack_time; 79 + atomic64_t isr_count; 80 + atomic64_t max_cq_entries; 81 + atomic64_t ack_index_out_of_range; 82 + atomic64_t data_count_mismatch; 83 + atomic64_t fcpio_timeout; 84 + atomic64_t fcpio_aborted; 85 + atomic64_t sgl_invalid; 86 + atomic64_t mss_invalid; 87 + atomic64_t abts_cpwq_alloc_failures; 88 + atomic64_t devrst_cpwq_alloc_failures; 89 + atomic64_t io_cpwq_alloc_failures; 90 + atomic64_t no_icmnd_itmf_cmpls; 91 + atomic64_t queue_fulls; 92 + atomic64_t rport_not_ready; 93 + atomic64_t frame_errors; 94 + }; 95 + 96 + struct fnic_stats { 97 + struct io_path_stats io_stats; 98 + struct abort_stats abts_stats; 99 + struct terminate_stats term_stats; 100 + struct reset_stats reset_stats; 101 + struct fw_stats fw_stats; 102 + struct vlan_stats vlan_stats; 103 + struct misc_stats misc_stats; 104 + }; 105 + 106 + struct stats_debug_info { 107 + char *debug_buffer; 108 + void *i_private; 109 + int buf_size; 110 + int buffer_len; 111 + }; 112 + 113 + int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *); 114 + int fnic_stats_debugfs_init(struct fnic *); 115 + void fnic_stats_debugfs_remove(struct fnic *); 116 + #endif /* _FNIC_STATS_H_ */
+185
drivers/scsi/fnic/fnic_trace.c
··· 189 189 } 190 190 191 191 /* 192 + * fnic_get_stats_data - Copy fnic stats buffer to a memory file 193 + * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer 194 + * 195 + * Description: 196 + * This routine gathers the fnic stats debugfs data from the fnic_stats struct 197 + * and dumps it to stats_debug_info. 198 + * 199 + * Return Value: 200 + * This routine returns the amount of bytes that were dumped into 201 + * stats_debug_info 202 + */ 203 + int fnic_get_stats_data(struct stats_debug_info *debug, 204 + struct fnic_stats *stats) 205 + { 206 + int len = 0; 207 + int buf_size = debug->buf_size; 208 + struct timespec val1, val2; 209 + 210 + len = snprintf(debug->debug_buffer + len, buf_size - len, 211 + "------------------------------------------\n" 212 + "\t\tIO Statistics\n" 213 + "------------------------------------------\n"); 214 + len += snprintf(debug->debug_buffer + len, buf_size - len, 215 + "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n" 216 + "Number of IOs: %lld\nNumber of IO Completions: %lld\n" 217 + "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n" 218 + "Number of Memory alloc Failures: %lld\n" 219 + "Number of IOREQ Null: %lld\n" 220 + "Number of SCSI cmd pointer Null: %lld\n", 221 + (u64)atomic64_read(&stats->io_stats.active_ios), 222 + (u64)atomic64_read(&stats->io_stats.max_active_ios), 223 + (u64)atomic64_read(&stats->io_stats.num_ios), 224 + (u64)atomic64_read(&stats->io_stats.io_completions), 225 + (u64)atomic64_read(&stats->io_stats.io_failures), 226 + (u64)atomic64_read(&stats->io_stats.io_not_found), 227 + (u64)atomic64_read(&stats->io_stats.alloc_failures), 228 + (u64)atomic64_read(&stats->io_stats.ioreq_null), 229 + (u64)atomic64_read(&stats->io_stats.sc_null)); 230 + 231 + len += snprintf(debug->debug_buffer + len, buf_size - len, 232 + "\n------------------------------------------\n" 233 + "\t\tAbort Statistics\n" 234 + "------------------------------------------\n"); 235 + len += snprintf(debug->debug_buffer + len, buf_size - len, 236 + "Number of Aborts: %lld\n" 237 + "Number of Abort Failures: %lld\n" 238 + "Number of Abort Driver Timeouts: %lld\n" 239 + "Number of Abort FW Timeouts: %lld\n" 240 + "Number of Abort IO NOT Found: %lld\n", 241 + (u64)atomic64_read(&stats->abts_stats.aborts), 242 + (u64)atomic64_read(&stats->abts_stats.abort_failures), 243 + (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts), 244 + (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts), 245 + (u64)atomic64_read(&stats->abts_stats.abort_io_not_found)); 246 + 247 + len += snprintf(debug->debug_buffer + len, buf_size - len, 248 + "\n------------------------------------------\n" 249 + "\t\tTerminate Statistics\n" 250 + "------------------------------------------\n"); 251 + len += snprintf(debug->debug_buffer + len, buf_size - len, 252 + "Number of Terminates: %lld\n" 253 + "Maximum Terminates: %lld\n" 254 + "Number of Terminate Driver Timeouts: %lld\n" 255 + "Number of Terminate FW Timeouts: %lld\n" 256 + "Number of Terminate IO NOT Found: %lld\n" 257 + "Number of Terminate Failures: %lld\n", 258 + (u64)atomic64_read(&stats->term_stats.terminates), 259 + (u64)atomic64_read(&stats->term_stats.max_terminates), 260 + (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts), 261 + (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts), 262 + (u64)atomic64_read(&stats->term_stats.terminate_io_not_found), 263 + (u64)atomic64_read(&stats->term_stats.terminate_failures)); 264 + 265 + len += snprintf(debug->debug_buffer + len, buf_size - len, 266 + "\n------------------------------------------\n" 267 + "\t\tReset Statistics\n" 268 + "------------------------------------------\n"); 269 + 270 + len += snprintf(debug->debug_buffer + len, buf_size - len, 271 + "Number of Device Resets: %lld\n" 272 + "Number of Device Reset Failures: %lld\n" 273 + "Number of Device Reset Aborts: %lld\n" 274 + "Number of Device Reset Timeouts: %lld\n" 275 + "Number of Device Reset Terminates: %lld\n" 276 + "Number of FW Resets: %lld\n" 277 + "Number of FW Reset Completions: %lld\n" 278 + "Number of FW Reset Failures: %lld\n" 279 + "Number of Fnic Reset: %lld\n" 280 + "Number of Fnic Reset Completions: %lld\n" 281 + "Number of Fnic Reset Failures: %lld\n", 282 + (u64)atomic64_read(&stats->reset_stats.device_resets), 283 + (u64)atomic64_read(&stats->reset_stats.device_reset_failures), 284 + (u64)atomic64_read(&stats->reset_stats.device_reset_aborts), 285 + (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts), 286 + (u64)atomic64_read( 287 + &stats->reset_stats.device_reset_terminates), 288 + (u64)atomic64_read(&stats->reset_stats.fw_resets), 289 + (u64)atomic64_read(&stats->reset_stats.fw_reset_completions), 290 + (u64)atomic64_read(&stats->reset_stats.fw_reset_failures), 291 + (u64)atomic64_read(&stats->reset_stats.fnic_resets), 292 + (u64)atomic64_read( 293 + &stats->reset_stats.fnic_reset_completions), 294 + (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures)); 295 + 296 + len += snprintf(debug->debug_buffer + len, buf_size - len, 297 + "\n------------------------------------------\n" 298 + "\t\tFirmware Statistics\n" 299 + "------------------------------------------\n"); 300 + 301 + len += snprintf(debug->debug_buffer + len, buf_size - len, 302 + "Number of Active FW Requests %lld\n" 303 + "Maximum FW Requests: %lld\n" 304 + "Number of FW out of resources: %lld\n" 305 + "Number of FW IO errors: %lld\n", 306 + (u64)atomic64_read(&stats->fw_stats.active_fw_reqs), 307 + (u64)atomic64_read(&stats->fw_stats.max_fw_reqs), 308 + (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources), 309 + (u64)atomic64_read(&stats->fw_stats.io_fw_errs)); 310 + 311 + len += snprintf(debug->debug_buffer + len, buf_size - len, 312 + "\n------------------------------------------\n" 313 + "\t\tVlan Discovery Statistics\n" 314 + "------------------------------------------\n"); 315 + 316 + len += snprintf(debug->debug_buffer + len, buf_size - len, 317 + "Number of Vlan Discovery Requests Sent %lld\n" 318 + "Vlan Response Received with no FCF VLAN ID: %lld\n" 319 + "No solicitations recvd after vlan set, expiry count: %lld\n" 320 + "Flogi rejects count: %lld\n", 321 + (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs), 322 + (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID), 323 + (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count), 324 + (u64)atomic64_read(&stats->vlan_stats.flogi_rejects)); 325 + 326 + len += snprintf(debug->debug_buffer + len, buf_size - len, 327 + "\n------------------------------------------\n" 328 + "\t\tOther Important Statistics\n" 329 + "------------------------------------------\n"); 330 + 331 + jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1); 332 + jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2); 333 + 334 + len += snprintf(debug->debug_buffer + len, buf_size - len, 335 + "Last ISR time: %llu (%8lu.%8lu)\n" 336 + "Last ACK time: %llu (%8lu.%8lu)\n" 337 + "Number of ISRs: %lld\n" 338 + "Maximum CQ Entries: %lld\n" 339 + "Number of ACK index out of range: %lld\n" 340 + "Number of data count mismatch: %lld\n" 341 + "Number of FCPIO Timeouts: %lld\n" 342 + "Number of FCPIO Aborted: %lld\n" 343 + "Number of SGL Invalid: %lld\n" 344 + "Number of Copy WQ Alloc Failures for ABTs: %lld\n" 345 + "Number of Copy WQ Alloc Failures for Device Reset: %lld\n" 346 + "Number of Copy WQ Alloc Failures for IOs: %lld\n" 347 + "Number of no icmnd itmf Completions: %lld\n" 348 + "Number of QUEUE Fulls: %lld\n" 349 + "Number of rport not ready: %lld\n" 350 + "Number of receive frame errors: %lld\n", 351 + (u64)stats->misc_stats.last_isr_time, 352 + val1.tv_sec, val1.tv_nsec, 353 + (u64)stats->misc_stats.last_ack_time, 354 + val2.tv_sec, val2.tv_nsec, 355 + (u64)atomic64_read(&stats->misc_stats.isr_count), 356 + (u64)atomic64_read(&stats->misc_stats.max_cq_entries), 357 + (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range), 358 + (u64)atomic64_read(&stats->misc_stats.data_count_mismatch), 359 + (u64)atomic64_read(&stats->misc_stats.fcpio_timeout), 360 + (u64)atomic64_read(&stats->misc_stats.fcpio_aborted), 361 + (u64)atomic64_read(&stats->misc_stats.sgl_invalid), 362 + (u64)atomic64_read( 363 + &stats->misc_stats.abts_cpwq_alloc_failures), 364 + (u64)atomic64_read( 365 + &stats->misc_stats.devrst_cpwq_alloc_failures), 366 + (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures), 367 + (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls), 368 + (u64)atomic64_read(&stats->misc_stats.queue_fulls), 369 + (u64)atomic64_read(&stats->misc_stats.rport_not_ready), 370 + (u64)atomic64_read(&stats->misc_stats.frame_errors)); 371 + 372 + return len; 373 + 374 + } 375 + 376 + /* 192 377 * fnic_trace_buf_init - Initialize fnic trace buffer logging facility 193 378 * 194 379 * Description:
+2 -1
drivers/scsi/fnic/fnic_trace.h
··· 84 84 int fnic_get_trace_data(fnic_dbgfs_t *); 85 85 int fnic_trace_buf_init(void); 86 86 void fnic_trace_free(void); 87 + int fnic_debugfs_init(void); 88 + void fnic_debugfs_terminate(void); 87 89 int fnic_trace_debugfs_init(void); 88 90 void fnic_trace_debugfs_terminate(void); 89 - 90 91 #endif
+7
drivers/scsi/hosts.c
··· 316 316 kfree(shost); 317 317 } 318 318 319 + static unsigned int shost_eh_deadline; 320 + 321 + module_param_named(eh_deadline, shost_eh_deadline, uint, S_IRUGO|S_IWUSR); 322 + MODULE_PARM_DESC(eh_deadline, 323 + "SCSI EH timeout in seconds (should be between 1 and 2^32-1)"); 324 + 319 325 static struct device_type scsi_host_type = { 320 326 .name = "scsi_host", 321 327 .release = scsi_host_dev_release, ··· 394 388 shost->unchecked_isa_dma = sht->unchecked_isa_dma; 395 389 shost->use_clustering = sht->use_clustering; 396 390 shost->ordered_tag = sht->ordered_tag; 391 + shost->eh_deadline = shost_eh_deadline * HZ; 397 392 398 393 if (sht->supported_mode == MODE_UNKNOWN) 399 394 /* means we didn't set it ... default to INITIATOR */
-1
drivers/scsi/hpsa.c
··· 100 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 101 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 102 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 103 - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920}, 104 103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, 105 104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, 106 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
+1
drivers/scsi/iscsi_tcp.c
··· 116 116 struct iscsi_conn *conn = sk->sk_user_data; 117 117 118 118 if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) && 119 + (conn->session->state != ISCSI_STATE_LOGGING_OUT) && 119 120 !atomic_read(&sk->sk_rmem_alloc)) { 120 121 ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n"); 121 122 iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE);
+161 -90
drivers/scsi/libfc/fc_exch.c
··· 27 27 #include <linux/slab.h> 28 28 #include <linux/err.h> 29 29 #include <linux/export.h> 30 + #include <linux/log2.h> 30 31 31 32 #include <scsi/fc/fc_fc2.h> 32 33 ··· 304 303 fr_eof(fp) = FC_EOF_N; 305 304 } 306 305 307 - /* 308 - * Initialize remainig fh fields 309 - * from fc_fill_fc_hdr 310 - */ 306 + /* Initialize remaining fh fields from fc_fill_fc_hdr */ 311 307 fh->fh_ox_id = htons(ep->oxid); 312 308 fh->fh_rx_id = htons(ep->rxid); 313 309 fh->fh_seq_id = ep->seq.id; ··· 360 362 361 363 FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec); 362 364 363 - if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, 364 - msecs_to_jiffies(timer_msec))) 365 - fc_exch_hold(ep); /* hold for timer */ 365 + fc_exch_hold(ep); /* hold for timer */ 366 + if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, 367 + msecs_to_jiffies(timer_msec))) 368 + fc_exch_release(ep); 366 369 } 367 370 368 371 /** ··· 381 382 /** 382 383 * fc_exch_done_locked() - Complete an exchange with the exchange lock held 383 384 * @ep: The exchange that is complete 385 + * 386 + * Note: May sleep if invoked from outside a response handler. 384 387 */ 385 388 static int fc_exch_done_locked(struct fc_exch *ep) 386 389 { ··· 394 393 * ep, and in that case we only clear the resp and set it as 395 394 * complete, so it can be reused by the timer to send the rrq. 396 395 */ 397 - ep->resp = NULL; 398 396 if (ep->state & FC_EX_DONE) 399 397 return rc; 400 398 ep->esb_stat |= ESB_ST_COMPLETE; ··· 464 464 } 465 465 466 466 static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp, 467 - struct fc_frame *fp) 467 + struct fc_frame *fp) 468 468 { 469 469 struct fc_exch *ep; 470 470 struct fc_frame_header *fh = fc_frame_header_get(fp); 471 - int error; 471 + int error = -ENXIO; 472 472 u32 f_ctl; 473 473 u8 fh_type = fh->fh_type; 474 474 475 475 ep = fc_seq_exch(sp); 476 + 477 + if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) { 478 + fc_frame_free(fp); 479 + goto out; 480 + } 481 + 476 482 WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT)); 477 483 478 484 f_ctl = ntoh24(fh->fh_f_ctl); ··· 521 515 * @lport: The local port that the exchange will be sent on 522 516 * @sp: The sequence to be sent 523 517 * @fp: The frame to be sent on the exchange 518 + * 519 + * Note: The frame will be freed either by a direct call to fc_frame_free(fp) 520 + * or indirectly by calling libfc_function_template.frame_send(). 524 521 */ 525 522 static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, 526 523 struct fc_frame *fp) ··· 590 581 591 582 /* 592 583 * Set the response handler for the exchange associated with a sequence. 584 + * 585 + * Note: May sleep if invoked from outside a response handler. 593 586 */ 594 587 static void fc_seq_set_resp(struct fc_seq *sp, 595 588 void (*resp)(struct fc_seq *, struct fc_frame *, ··· 599 588 void *arg) 600 589 { 601 590 struct fc_exch *ep = fc_seq_exch(sp); 591 + DEFINE_WAIT(wait); 602 592 603 593 spin_lock_bh(&ep->ex_lock); 594 + while (ep->resp_active && ep->resp_task != current) { 595 + prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE); 596 + spin_unlock_bh(&ep->ex_lock); 597 + 598 + schedule(); 599 + 600 + spin_lock_bh(&ep->ex_lock); 601 + } 602 + finish_wait(&ep->resp_wq, &wait); 604 603 ep->resp = resp; 605 604 ep->arg = arg; 606 605 spin_unlock_bh(&ep->ex_lock); ··· 643 622 if (!sp) 644 623 return -ENOMEM; 645 624 646 - ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL; 647 625 if (timer_msec) 648 626 fc_exch_timer_set_locked(ep, timer_msec); 649 627 650 - /* 651 - * If not logged into the fabric, don't send ABTS but leave 652 - * sequence active until next timeout. 653 - */ 654 - if (!ep->sid) 655 - return 0; 656 - 657 - /* 658 - * Send an abort for the sequence that timed out. 659 - */ 660 - fp = fc_frame_alloc(ep->lp, 0); 661 - if (fp) { 662 - fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, 663 - FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 664 - error = fc_seq_send_locked(ep->lp, sp, fp); 665 - } else 666 - error = -ENOBUFS; 628 + if (ep->sid) { 629 + /* 630 + * Send an abort for the sequence that timed out. 631 + */ 632 + fp = fc_frame_alloc(ep->lp, 0); 633 + if (fp) { 634 + ep->esb_stat |= ESB_ST_SEQ_INIT; 635 + fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, 636 + FC_TYPE_BLS, FC_FC_END_SEQ | 637 + FC_FC_SEQ_INIT, 0); 638 + error = fc_seq_send_locked(ep->lp, sp, fp); 639 + } else { 640 + error = -ENOBUFS; 641 + } 642 + } else { 643 + /* 644 + * If not logged into the fabric, don't send ABTS but leave 645 + * sequence active until next timeout. 646 + */ 647 + error = 0; 648 + } 649 + ep->esb_stat |= ESB_ST_ABNORMAL; 667 650 return error; 668 651 } 669 652 ··· 694 669 } 695 670 696 671 /** 672 + * fc_invoke_resp() - invoke ep->resp() 673 + * 674 + * Notes: 675 + * It is assumed that after initialization finished (this means the 676 + * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are 677 + * modified only via fc_seq_set_resp(). This guarantees that none of these 678 + * two variables changes if ep->resp_active > 0. 679 + * 680 + * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when 681 + * this function is invoked, the first spin_lock_bh() call in this function 682 + * will wait until fc_seq_set_resp() has finished modifying these variables. 683 + * 684 + * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that 685 + * ep->resp() won't be invoked after fc_exch_done() has returned. 686 + * 687 + * The response handler itself may invoke fc_exch_done(), which will clear the 688 + * ep->resp pointer. 689 + * 690 + * Return value: 691 + * Returns true if and only if ep->resp has been invoked. 692 + */ 693 + static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp, 694 + struct fc_frame *fp) 695 + { 696 + void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); 697 + void *arg; 698 + bool res = false; 699 + 700 + spin_lock_bh(&ep->ex_lock); 701 + ep->resp_active++; 702 + if (ep->resp_task != current) 703 + ep->resp_task = !ep->resp_task ? current : NULL; 704 + resp = ep->resp; 705 + arg = ep->arg; 706 + spin_unlock_bh(&ep->ex_lock); 707 + 708 + if (resp) { 709 + resp(sp, fp, arg); 710 + res = true; 711 + } else if (!IS_ERR(fp)) { 712 + fc_frame_free(fp); 713 + } 714 + 715 + spin_lock_bh(&ep->ex_lock); 716 + if (--ep->resp_active == 0) 717 + ep->resp_task = NULL; 718 + spin_unlock_bh(&ep->ex_lock); 719 + 720 + if (ep->resp_active == 0) 721 + wake_up(&ep->resp_wq); 722 + 723 + return res; 724 + } 725 + 726 + /** 697 727 * fc_exch_timeout() - Handle exchange timer expiration 698 728 * @work: The work_struct identifying the exchange that timed out 699 729 */ ··· 757 677 struct fc_exch *ep = container_of(work, struct fc_exch, 758 678 timeout_work.work); 759 679 struct fc_seq *sp = &ep->seq; 760 - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); 761 - void *arg; 762 680 u32 e_stat; 763 681 int rc = 1; 764 682 ··· 774 696 fc_exch_rrq(ep); 775 697 goto done; 776 698 } else { 777 - resp = ep->resp; 778 - arg = ep->arg; 779 - ep->resp = NULL; 780 699 if (e_stat & ESB_ST_ABNORMAL) 781 700 rc = fc_exch_done_locked(ep); 782 701 spin_unlock_bh(&ep->ex_lock); 783 702 if (!rc) 784 703 fc_exch_delete(ep); 785 - if (resp) 786 - resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); 704 + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT)); 705 + fc_seq_set_resp(sp, NULL, ep->arg); 787 706 fc_seq_exch_abort(sp, 2 * ep->r_a_tov); 788 707 goto done; 789 708 } ··· 867 792 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ 868 793 ep->rxid = FC_XID_UNKNOWN; 869 794 ep->class = mp->class; 795 + ep->resp_active = 0; 796 + init_waitqueue_head(&ep->resp_wq); 870 797 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout); 871 798 out: 872 799 return ep; ··· 915 838 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); 916 839 spin_lock_bh(&pool->lock); 917 840 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); 918 - if (ep && ep->xid == xid) 841 + if (ep) { 842 + WARN_ON(ep->xid != xid); 919 843 fc_exch_hold(ep); 844 + } 920 845 spin_unlock_bh(&pool->lock); 921 846 } 922 847 return ep; ··· 929 850 * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and 930 851 * the memory allocated for the related objects may be freed. 931 852 * @sp: The sequence that has completed 853 + * 854 + * Note: May sleep if invoked from outside a response handler. 932 855 */ 933 856 static void fc_exch_done(struct fc_seq *sp) 934 857 { ··· 940 859 spin_lock_bh(&ep->ex_lock); 941 860 rc = fc_exch_done_locked(ep); 942 861 spin_unlock_bh(&ep->ex_lock); 862 + 863 + fc_seq_set_resp(sp, NULL, ep->arg); 943 864 if (!rc) 944 865 fc_exch_delete(ep); 945 866 } ··· 1070 987 } 1071 988 } 1072 989 990 + spin_lock_bh(&ep->ex_lock); 1073 991 /* 1074 992 * At this point, we have the exchange held. 1075 993 * Find or create the sequence. ··· 1098 1014 * sending RSP, hence write request on other 1099 1015 * end never finishes. 1100 1016 */ 1101 - spin_lock_bh(&ep->ex_lock); 1102 1017 sp->ssb_stat |= SSB_ST_RESP; 1103 1018 sp->id = fh->fh_seq_id; 1104 - spin_unlock_bh(&ep->ex_lock); 1105 1019 } else { 1020 + spin_unlock_bh(&ep->ex_lock); 1021 + 1106 1022 /* sequence/exch should exist */ 1107 1023 reject = FC_RJT_SEQ_ID; 1108 1024 goto rel; ··· 1113 1029 1114 1030 if (f_ctl & FC_FC_SEQ_INIT) 1115 1031 ep->esb_stat |= ESB_ST_SEQ_INIT; 1032 + spin_unlock_bh(&ep->ex_lock); 1116 1033 1117 1034 fr_seq(fp) = sp; 1118 1035 out: ··· 1376 1291 1377 1292 if (!ep) 1378 1293 goto reject; 1294 + 1295 + fp = fc_frame_alloc(ep->lp, sizeof(*ap)); 1296 + if (!fp) 1297 + goto free; 1298 + 1379 1299 spin_lock_bh(&ep->ex_lock); 1380 1300 if (ep->esb_stat & ESB_ST_COMPLETE) { 1381 1301 spin_unlock_bh(&ep->ex_lock); 1302 + 1303 + fc_frame_free(fp); 1382 1304 goto reject; 1383 1305 } 1384 - if (!(ep->esb_stat & ESB_ST_REC_QUAL)) 1306 + if (!(ep->esb_stat & ESB_ST_REC_QUAL)) { 1307 + ep->esb_stat |= ESB_ST_REC_QUAL; 1385 1308 fc_exch_hold(ep); /* hold for REC_QUAL */ 1386 - ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL; 1387 - fc_exch_timer_set_locked(ep, ep->r_a_tov); 1388 - 1389 - fp = fc_frame_alloc(ep->lp, sizeof(*ap)); 1390 - if (!fp) { 1391 - spin_unlock_bh(&ep->ex_lock); 1392 - goto free; 1393 1309 } 1310 + fc_exch_timer_set_locked(ep, ep->r_a_tov); 1394 1311 fh = fc_frame_header_get(fp); 1395 1312 ap = fc_frame_payload_get(fp, sizeof(*ap)); 1396 1313 memset(ap, 0, sizeof(*ap)); ··· 1406 1319 } 1407 1320 sp = fc_seq_start_next_locked(sp); 1408 1321 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); 1322 + ep->esb_stat |= ESB_ST_ABNORMAL; 1409 1323 spin_unlock_bh(&ep->ex_lock); 1324 + 1325 + free: 1410 1326 fc_frame_free(rx_fp); 1411 1327 return; 1412 1328 1413 1329 reject: 1414 1330 fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID); 1415 - free: 1416 - fc_frame_free(rx_fp); 1331 + goto free; 1417 1332 } 1418 1333 1419 1334 /** ··· 1505 1416 * If new exch resp handler is valid then call that 1506 1417 * first. 1507 1418 */ 1508 - if (ep->resp) 1509 - ep->resp(sp, fp, ep->arg); 1510 - else 1419 + if (!fc_invoke_resp(ep, sp, fp)) 1511 1420 lport->tt.lport_recv(lport, fp); 1512 1421 fc_exch_release(ep); /* release from lookup */ 1513 1422 } else { ··· 1529 1442 struct fc_exch *ep; 1530 1443 enum fc_sof sof; 1531 1444 u32 f_ctl; 1532 - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); 1533 - void *ex_resp_arg; 1534 1445 int rc; 1535 1446 1536 1447 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); ··· 1563 1478 1564 1479 f_ctl = ntoh24(fh->fh_f_ctl); 1565 1480 fr_seq(fp) = sp; 1481 + 1482 + spin_lock_bh(&ep->ex_lock); 1566 1483 if (f_ctl & FC_FC_SEQ_INIT) 1567 1484 ep->esb_stat |= ESB_ST_SEQ_INIT; 1485 + spin_unlock_bh(&ep->ex_lock); 1568 1486 1569 1487 if (fc_sof_needs_ack(sof)) 1570 1488 fc_seq_send_ack(sp, fp); 1571 - resp = ep->resp; 1572 - ex_resp_arg = ep->arg; 1573 1489 1574 1490 if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T && 1575 1491 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1576 1492 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { 1577 1493 spin_lock_bh(&ep->ex_lock); 1578 - resp = ep->resp; 1579 1494 rc = fc_exch_done_locked(ep); 1580 1495 WARN_ON(fc_seq_exch(sp) != ep); 1581 1496 spin_unlock_bh(&ep->ex_lock); ··· 1596 1511 * If new exch resp handler is valid then call that 1597 1512 * first. 1598 1513 */ 1599 - if (resp) 1600 - resp(sp, fp, ex_resp_arg); 1601 - else 1602 - fc_frame_free(fp); 1514 + fc_invoke_resp(ep, sp, fp); 1515 + 1603 1516 fc_exch_release(ep); 1604 1517 return; 1605 1518 rel: ··· 1636 1553 */ 1637 1554 static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) 1638 1555 { 1639 - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); 1640 - void *ex_resp_arg; 1641 1556 struct fc_frame_header *fh; 1642 1557 struct fc_ba_acc *ap; 1643 1558 struct fc_seq *sp; ··· 1680 1599 break; 1681 1600 } 1682 1601 1683 - resp = ep->resp; 1684 - ex_resp_arg = ep->arg; 1685 - 1686 1602 /* do we need to do some other checks here. Can we reuse more of 1687 1603 * fc_exch_recv_seq_resp 1688 1604 */ ··· 1691 1613 ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ) 1692 1614 rc = fc_exch_done_locked(ep); 1693 1615 spin_unlock_bh(&ep->ex_lock); 1616 + 1617 + fc_exch_hold(ep); 1694 1618 if (!rc) 1695 1619 fc_exch_delete(ep); 1696 - 1697 - if (resp) 1698 - resp(sp, fp, ex_resp_arg); 1699 - else 1700 - fc_frame_free(fp); 1701 - 1620 + fc_invoke_resp(ep, sp, fp); 1702 1621 if (has_rec) 1703 1622 fc_exch_timer_set(ep, ep->r_a_tov); 1704 - 1623 + fc_exch_release(ep); 1705 1624 } 1706 1625 1707 1626 /** ··· 1737 1662 break; 1738 1663 default: 1739 1664 if (ep) 1740 - FC_EXCH_DBG(ep, "BLS rctl %x - %s received", 1665 + FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n", 1741 1666 fh->fh_r_ctl, 1742 1667 fc_exch_rctl_name(fh->fh_r_ctl)); 1743 1668 break; ··· 1820 1745 /** 1821 1746 * fc_exch_reset() - Reset an exchange 1822 1747 * @ep: The exchange to be reset 1748 + * 1749 + * Note: May sleep if invoked from outside a response handler. 1823 1750 */ 1824 1751 static void fc_exch_reset(struct fc_exch *ep) 1825 1752 { 1826 1753 struct fc_seq *sp; 1827 - void (*resp)(struct fc_seq *, struct fc_frame *, void *); 1828 - void *arg; 1829 1754 int rc = 1; 1830 1755 1831 1756 spin_lock_bh(&ep->ex_lock); 1832 1757 fc_exch_abort_locked(ep, 0); 1833 1758 ep->state |= FC_EX_RST_CLEANUP; 1834 1759 fc_exch_timer_cancel(ep); 1835 - resp = ep->resp; 1836 - ep->resp = NULL; 1837 1760 if (ep->esb_stat & ESB_ST_REC_QUAL) 1838 1761 atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */ 1839 1762 ep->esb_stat &= ~ESB_ST_REC_QUAL; 1840 - arg = ep->arg; 1841 1763 sp = &ep->seq; 1842 1764 rc = fc_exch_done_locked(ep); 1843 1765 spin_unlock_bh(&ep->ex_lock); 1766 + 1767 + fc_exch_hold(ep); 1768 + 1844 1769 if (!rc) 1845 1770 fc_exch_delete(ep); 1846 1771 1847 - if (resp) 1848 - resp(sp, ERR_PTR(-FC_EX_CLOSED), arg); 1772 + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); 1773 + fc_seq_set_resp(sp, NULL, ep->arg); 1774 + fc_exch_release(ep); 1849 1775 } 1850 1776 1851 1777 /** ··· 2032 1956 2033 1957 switch (op) { 2034 1958 case ELS_LS_RJT: 2035 - FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ"); 1959 + FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n"); 2036 1960 /* fall through */ 2037 1961 case ELS_LS_ACC: 2038 1962 goto cleanup; 2039 1963 default: 2040 - FC_EXCH_DBG(aborted_ep, "unexpected response op %x " 2041 - "for RRQ", op); 1964 + FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n", 1965 + op); 2042 1966 return; 2043 1967 } 2044 1968 ··· 2609 2533 * cpu on which exchange originated by simple bitwise 2610 2534 * AND operation between fc_cpu_mask and exchange id. 2611 2535 */ 2612 - fc_cpu_mask = 1; 2613 - fc_cpu_order = 0; 2614 - while (fc_cpu_mask < nr_cpu_ids) { 2615 - fc_cpu_mask <<= 1; 2616 - fc_cpu_order++; 2617 - } 2618 - fc_cpu_mask--; 2536 + fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids)); 2537 + fc_cpu_mask = (1 << fc_cpu_order) - 1; 2619 2538 2620 2539 fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); 2621 2540 if (!fc_exch_workqueue)
+5 -5
drivers/scsi/libfc/fc_fcp.c
··· 902 902 /* 903 903 * Check for missing or extra data frames. 904 904 */ 905 - if (unlikely(fsp->xfer_len != expected_len)) { 905 + if (unlikely(fsp->cdb_status == SAM_STAT_GOOD && 906 + fsp->xfer_len != expected_len)) { 906 907 if (fsp->xfer_len < expected_len) { 907 908 /* 908 909 * Some data may be queued locally, ··· 956 955 * Test for transport underrun, independent of response 957 956 * underrun status. 958 957 */ 959 - if (fsp->xfer_len < fsp->data_len && !fsp->io_status && 958 + if (fsp->cdb_status == SAM_STAT_GOOD && 959 + fsp->xfer_len < fsp->data_len && !fsp->io_status && 960 960 (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || 961 - fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) { 961 + fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) 962 962 fsp->status_code = FC_DATA_UNDRUN; 963 - fsp->io_status = 0; 964 - } 965 963 } 966 964 967 965 seq = fsp->seq_ptr;
+2 -2
drivers/scsi/libfc/fc_lport.c
··· 516 516 * @lport: The local port receiving the LOGO 517 517 * @fp: The LOGO request frame 518 518 * 519 - * Locking Note: The lport lock is exected to be held before calling 519 + * Locking Note: The lport lock is expected to be held before calling 520 520 * this function. 521 521 */ 522 522 static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) ··· 1088 1088 { 1089 1089 unsigned long delay = 0; 1090 1090 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", 1091 - PTR_ERR(fp), fc_lport_state(lport), 1091 + IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport), 1092 1092 lport->retry_count); 1093 1093 1094 1094 if (PTR_ERR(fp) == -FC_EX_CLOSED)
+3 -3
drivers/scsi/libfc/fc_rport.c
··· 1705 1705 * @rdata: The remote port that sent the PRLI request 1706 1706 * @rx_fp: The PRLI request frame 1707 1707 * 1708 - * Locking Note: The rport lock is exected to be held before calling 1708 + * Locking Note: The rport lock is expected to be held before calling 1709 1709 * this function. 1710 1710 */ 1711 1711 static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, ··· 1824 1824 * @rdata: The remote port that sent the PRLO request 1825 1825 * @rx_fp: The PRLO request frame 1826 1826 * 1827 - * Locking Note: The rport lock is exected to be held before calling 1827 + * Locking Note: The rport lock is expected to be held before calling 1828 1828 * this function. 1829 1829 */ 1830 1830 static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, ··· 1895 1895 * @lport: The local port that received the LOGO request 1896 1896 * @fp: The LOGO request frame 1897 1897 * 1898 - * Locking Note: The rport lock is exected to be held before calling 1898 + * Locking Note: The rport lock is expected to be held before calling 1899 1899 * this function. 1900 1900 */ 1901 1901 static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
+4 -3
drivers/scsi/lpfc/lpfc_bsg.c
··· 2629 2629 rspiocbq, 2630 2630 (phba->fc_ratov * 2) 2631 2631 + LPFC_DRVR_TIMEOUT); 2632 - if (iocb_stat) { 2632 + if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) { 2633 2633 ret_val = -EIO; 2634 2634 goto err_get_xri_exit; 2635 2635 } ··· 3204 3204 rspiocbq, (phba->fc_ratov * 2) + 3205 3205 LPFC_DRVR_TIMEOUT); 3206 3206 3207 - if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) && 3208 - (rsp->ulpStatus != IOCB_SUCCESS))) { 3207 + if ((iocb_stat != IOCB_SUCCESS) || 3208 + ((phba->sli_rev < LPFC_SLI_REV4) && 3209 + (rsp->ulpStatus != IOSTAT_SUCCESS))) { 3209 3210 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3210 3211 "3126 Failed loopback test issue iocb: " 3211 3212 "iocb_stat:x%x\n", iocb_stat);
+1 -1
drivers/scsi/lpfc/lpfc_ct.c
··· 280 280 buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3; 281 281 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 282 282 kfree(buf_ptr); 283 - ctiocb->context1 = NULL; 283 + ctiocb->context3 = NULL; 284 284 } 285 285 lpfc_sli_release_iocbq(phba, ctiocb); 286 286 return 0;
+6 -2
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 4171 4171 NLP_INT_NODE_ACT(ndlp); 4172 4172 atomic_set(&ndlp->cmd_pending, 0); 4173 4173 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; 4174 - if (vport->phba->sli_rev == LPFC_SLI_REV4) 4175 - ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); 4176 4174 } 4177 4175 4178 4176 struct lpfc_nodelist * ··· 4215 4217 lpfc_initialize_node(vport, ndlp, did); 4216 4218 4217 4219 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 4220 + if (vport->phba->sli_rev == LPFC_SLI_REV4) 4221 + ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); 4222 + 4218 4223 4219 4224 if (state != NLP_STE_UNUSED_NODE) 4220 4225 lpfc_nlp_set_state(vport, ndlp, state); ··· 5618 5617 5619 5618 lpfc_initialize_node(vport, ndlp, did); 5620 5619 INIT_LIST_HEAD(&ndlp->nlp_listp); 5620 + if (vport->phba->sli_rev == LPFC_SLI_REV4) 5621 + ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); 5622 + 5621 5623 5622 5624 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 5623 5625 "node init: did:x%x",
+25 -5
drivers/scsi/lpfc/lpfc_hw4.h
··· 3439 3439 #define els_req64_hopcnt_SHIFT 24 3440 3440 #define els_req64_hopcnt_MASK 0x000000ff 3441 3441 #define els_req64_hopcnt_WORD word13 3442 - uint32_t reserved[2]; 3442 + uint32_t word14; 3443 + uint32_t max_response_payload_len; 3443 3444 }; 3444 3445 3445 3446 struct xmit_els_rsp64_wqe { ··· 3555 3554 uint32_t relative_offset; 3556 3555 struct wqe_rctl_dfctl wge_ctl; /* word 5 */ 3557 3556 struct wqe_common wqe_com; /* words 6-11 */ 3558 - uint32_t rsvd_12_15[4]; 3557 + uint32_t rsvd_12_14[3]; 3558 + uint32_t max_response_payload_len; 3559 3559 }; 3560 3560 3561 3561 struct create_xri_wqe { ··· 3586 3584 3587 3585 struct fcp_iwrite64_wqe { 3588 3586 struct ulp_bde64 bde; 3589 - uint32_t payload_offset_len; 3587 + uint32_t word3; 3588 + #define cmd_buff_len_SHIFT 16 3589 + #define cmd_buff_len_MASK 0x00000ffff 3590 + #define cmd_buff_len_WORD word3 3591 + #define payload_offset_len_SHIFT 0 3592 + #define payload_offset_len_MASK 0x0000ffff 3593 + #define payload_offset_len_WORD word3 3590 3594 uint32_t total_xfer_len; 3591 3595 uint32_t initial_xfer_len; 3592 3596 struct wqe_common wqe_com; /* words 6-11 */ ··· 3602 3594 3603 3595 struct fcp_iread64_wqe { 3604 3596 struct ulp_bde64 bde; 3605 - uint32_t payload_offset_len; /* word 3 */ 3597 + uint32_t word3; 3598 + #define cmd_buff_len_SHIFT 16 3599 + #define cmd_buff_len_MASK 0x00000ffff 3600 + #define cmd_buff_len_WORD word3 3601 + #define payload_offset_len_SHIFT 0 3602 + #define payload_offset_len_MASK 0x0000ffff 3603 + #define payload_offset_len_WORD word3 3606 3604 uint32_t total_xfer_len; /* word 4 */ 3607 3605 uint32_t rsrvd5; /* word 5 */ 3608 3606 struct wqe_common wqe_com; /* words 6-11 */ ··· 3618 3604 3619 3605 struct fcp_icmnd64_wqe { 3620 3606 struct ulp_bde64 bde; /* words 0-2 */ 3621 - uint32_t rsrvd3; /* word 3 */ 3607 + uint32_t word3; 3608 + #define cmd_buff_len_SHIFT 16 3609 + #define cmd_buff_len_MASK 0x00000ffff 3610 + #define cmd_buff_len_WORD word3 3611 + #define payload_offset_len_SHIFT 0 3612 + #define payload_offset_len_MASK 0x0000ffff 3613 + #define payload_offset_len_WORD word3 3622 3614 uint32_t rsrvd4; /* word 4 */ 3623 3615 uint32_t rsrvd5; /* word 5 */ 3624 3616 struct wqe_common wqe_com; /* words 6-11 */
+1 -1
drivers/scsi/lpfc/lpfc_init.c
··· 4545 4545 pci_save_state(pdev); 4546 4546 4547 4547 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 4548 - if (pci_find_capability(pdev, PCI_CAP_ID_EXP)) 4548 + if (pci_is_pcie(pdev)) 4549 4549 pdev->needs_freset = 1; 4550 4550 4551 4551 return 0;
+107 -26
drivers/scsi/lpfc/lpfc_scsi.c
··· 1012 1012 break; 1013 1013 } 1014 1014 1015 - /* Allocate iotag for psb->cur_iocbq. */ 1016 - iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 1017 - if (iotag == 0) { 1018 - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 1019 - psb->data, psb->dma_handle); 1020 - kfree(psb); 1021 - break; 1022 - } 1023 1015 1024 1016 lxri = lpfc_sli4_next_xritag(phba); 1025 1017 if (lxri == NO_XRI) { 1026 1018 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 1027 1019 psb->data, psb->dma_handle); 1028 1020 kfree(psb); 1021 + break; 1022 + } 1023 + 1024 + /* Allocate iotag for psb->cur_iocbq. */ 1025 + iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 1026 + if (iotag == 0) { 1027 + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 1028 + psb->data, psb->dma_handle); 1029 + kfree(psb); 1030 + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1031 + "3368 Failed to allocated IOTAG for" 1032 + " XRI:0x%x\n", lxri); 1033 + lpfc_sli4_free_xri(phba, lxri); 1029 1034 break; 1030 1035 } 1031 1036 psb->cur_iocbq.sli4_lxritag = lxri; ··· 4490 4485 piocb->ulpContext = 4491 4486 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 4492 4487 } 4493 - if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 4494 - piocb->ulpFCP2Rcvy = 1; 4495 - } 4488 + piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; 4496 4489 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 4497 4490 4498 4491 /* ulpTimeout is only one byte */ ··· 4984 4981 } 4985 4982 } 4986 4983 4984 + 4985 + /** 4986 + * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed 4987 + * @vport: The virtual port for which this call is being executed. 4988 + * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 4989 + * 4990 + * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded 4991 + * 4992 + * Return code : 4993 + * 0x2003 - Error 4994 + * 0x2002 - Success 4995 + **/ 4996 + static int 4997 + lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd) 4998 + { 4999 + struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 5000 + uint32_t rsp_info; 5001 + uint32_t rsp_len; 5002 + uint8_t rsp_info_code; 5003 + int ret = FAILED; 5004 + 5005 + 5006 + if (fcprsp == NULL) 5007 + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5008 + "0703 fcp_rsp is missing\n"); 5009 + else { 5010 + rsp_info = fcprsp->rspStatus2; 5011 + rsp_len = be32_to_cpu(fcprsp->rspRspLen); 5012 + rsp_info_code = fcprsp->rspInfo3; 5013 + 5014 + 5015 + lpfc_printf_vlog(vport, KERN_INFO, 5016 + LOG_FCP, 5017 + "0706 fcp_rsp valid 0x%x," 5018 + " rsp len=%d code 0x%x\n", 5019 + rsp_info, 5020 + rsp_len, rsp_info_code); 5021 + 5022 + if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) { 5023 + switch (rsp_info_code) { 5024 + case RSP_NO_FAILURE: 5025 + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5026 + "0715 Task Mgmt No Failure\n"); 5027 + ret = SUCCESS; 5028 + break; 5029 + case RSP_TM_NOT_SUPPORTED: /* TM rejected */ 5030 + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5031 + "0716 Task Mgmt Target " 5032 + "reject\n"); 5033 + break; 5034 + case RSP_TM_NOT_COMPLETED: /* TM failed */ 5035 + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5036 + "0717 Task Mgmt Target " 5037 + "failed TM\n"); 5038 + break; 5039 + case RSP_TM_INVALID_LU: /* TM to invalid LU! */ 5040 + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5041 + "0718 Task Mgmt to invalid " 5042 + "LUN\n"); 5043 + break; 5044 + } 5045 + } 5046 + } 5047 + return ret; 5048 + } 5049 + 5050 + 4987 5051 /** 4988 5052 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler 4989 5053 * @vport: The virtual port for which this call is being executed. ··· 5112 5042 5113 5043 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 5114 5044 iocbq, iocbqrsp, lpfc_cmd->timeout); 5115 - if (status != IOCB_SUCCESS) { 5116 - if (status == IOCB_TIMEDOUT) { 5117 - ret = TIMEOUT_ERROR; 5118 - } else 5119 - ret = FAILED; 5120 - lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 5045 + if ((status != IOCB_SUCCESS) || 5046 + (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { 5121 5047 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5122 5048 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) " 5123 5049 "iocb_flag x%x\n", ··· 5121 5055 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, 5122 5056 iocbqrsp->iocb.un.ulpWord[4], 5123 5057 iocbq->iocb_flag); 5124 - } else if (status == IOCB_BUSY) 5125 - ret = FAILED; 5126 - else 5058 + /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ 5059 + if (status == IOCB_SUCCESS) { 5060 + if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 5061 + /* Something in the FCP_RSP was invalid. 5062 + * Check conditions */ 5063 + ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); 5064 + else 5065 + ret = FAILED; 5066 + } else if (status == IOCB_TIMEDOUT) { 5067 + ret = TIMEOUT_ERROR; 5068 + } else { 5069 + ret = FAILED; 5070 + } 5071 + lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 5072 + } else 5127 5073 ret = SUCCESS; 5128 5074 5129 5075 lpfc_sli_release_iocbq(phba, iocbqrsp); ··· 5259 5181 unsigned tgt_id = cmnd->device->id; 5260 5182 unsigned int lun_id = cmnd->device->lun; 5261 5183 struct lpfc_scsi_event_header scsi_event; 5262 - int status, ret = SUCCESS; 5184 + int status; 5263 5185 5264 5186 if (!rdata) { 5265 5187 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, ··· 5300 5222 * So, continue on. 5301 5223 * We will report success if all the i/o aborts successfully. 5302 5224 */ 5303 - ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 5225 + if (status == SUCCESS) 5226 + status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 5304 5227 LPFC_CTX_LUN); 5305 - return ret; 5228 + 5229 + return status; 5306 5230 } 5307 5231 5308 5232 /** ··· 5328 5248 unsigned tgt_id = cmnd->device->id; 5329 5249 unsigned int lun_id = cmnd->device->lun; 5330 5250 struct lpfc_scsi_event_header scsi_event; 5331 - int status, ret = SUCCESS; 5251 + int status; 5332 5252 5333 5253 if (!rdata) { 5334 5254 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, ··· 5369 5289 * So, continue on. 5370 5290 * We will report success if all the i/o aborts successfully. 5371 5291 */ 5372 - ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 5292 + if (status == SUCCESS) 5293 + status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 5373 5294 LPFC_CTX_TGT); 5374 - return ret; 5295 + return status; 5375 5296 } 5376 5297 5377 5298 /**
+1
drivers/scsi/lpfc/lpfc_scsi.h
··· 73 73 #define RSP_RO_MISMATCH_ERR 0x03 74 74 #define RSP_TM_NOT_SUPPORTED 0x04 /* Task mgmt function not supported */ 75 75 #define RSP_TM_NOT_COMPLETED 0x05 /* Task mgmt function not performed */ 76 + #define RSP_TM_INVALID_LU 0x09 /* Task mgmt function to invalid LU */ 76 77 77 78 uint32_t rspInfoRsvd; /* FCP_RSP_INFO bytes 4-7 (reserved) */ 78 79
+172 -11
drivers/scsi/lpfc/lpfc_sli.c
··· 71 71 int); 72 72 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *, 73 73 uint32_t); 74 + static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 75 + static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 74 76 75 77 static IOCB_t * 76 78 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) ··· 6568 6566 return; 6569 6567 } 6570 6568 6569 + /** 6570 + * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 6571 + * are pending 6572 + * @phba: Pointer to HBA context object. 6573 + * 6574 + * This function checks if any mailbox completions are present on the mailbox 6575 + * completion queue. 6576 + **/ 6577 + bool 6578 + lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 6579 + { 6580 + 6581 + uint32_t idx; 6582 + struct lpfc_queue *mcq; 6583 + struct lpfc_mcqe *mcqe; 6584 + bool pending_completions = false; 6585 + 6586 + if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 6587 + return false; 6588 + 6589 + /* Check for completions on mailbox completion queue */ 6590 + 6591 + mcq = phba->sli4_hba.mbx_cq; 6592 + idx = mcq->hba_index; 6593 + while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) { 6594 + mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; 6595 + if (bf_get_le32(lpfc_trailer_completed, mcqe) && 6596 + (!bf_get_le32(lpfc_trailer_async, mcqe))) { 6597 + pending_completions = true; 6598 + break; 6599 + } 6600 + idx = (idx + 1) % mcq->entry_count; 6601 + if (mcq->hba_index == idx) 6602 + break; 6603 + } 6604 + return pending_completions; 6605 + 6606 + } 6607 + 6608 + /** 6609 + * lpfc_sli4_process_missed_mbox_completions - process mbox completions 6610 + * that were missed. 6611 + * @phba: Pointer to HBA context object. 6612 + * 6613 + * For sli4, it is possible to miss an interrupt. As such mbox completions 6614 + * maybe missed causing erroneous mailbox timeouts to occur. This function 6615 + * checks to see if mbox completions are on the mailbox completion queue 6616 + * and will process all the completions associated with the eq for the 6617 + * mailbox completion queue. 6618 + **/ 6619 + bool 6620 + lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 6621 + { 6622 + 6623 + uint32_t eqidx; 6624 + struct lpfc_queue *fpeq = NULL; 6625 + struct lpfc_eqe *eqe; 6626 + bool mbox_pending; 6627 + 6628 + if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 6629 + return false; 6630 + 6631 + /* Find the eq associated with the mcq */ 6632 + 6633 + if (phba->sli4_hba.hba_eq) 6634 + for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++) 6635 + if (phba->sli4_hba.hba_eq[eqidx]->queue_id == 6636 + phba->sli4_hba.mbx_cq->assoc_qid) { 6637 + fpeq = phba->sli4_hba.hba_eq[eqidx]; 6638 + break; 6639 + } 6640 + if (!fpeq) 6641 + return false; 6642 + 6643 + /* Turn off interrupts from this EQ */ 6644 + 6645 + lpfc_sli4_eq_clr_intr(fpeq); 6646 + 6647 + /* Check to see if a mbox completion is pending */ 6648 + 6649 + mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 6650 + 6651 + /* 6652 + * If a mbox completion is pending, process all the events on EQ 6653 + * associated with the mbox completion queue (this could include 6654 + * mailbox commands, async events, els commands, receive queue data 6655 + * and fcp commands) 6656 + */ 6657 + 6658 + if (mbox_pending) 6659 + while ((eqe = lpfc_sli4_eq_get(fpeq))) { 6660 + lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); 6661 + fpeq->EQ_processed++; 6662 + } 6663 + 6664 + /* Always clear and re-arm the EQ */ 6665 + 6666 + lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 6667 + 6668 + return mbox_pending; 6669 + 6670 + } 6571 6671 6572 6672 /** 6573 6673 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout ··· 6686 6582 MAILBOX_t *mb = &pmbox->u.mb; 6687 6583 struct lpfc_sli *psli = &phba->sli; 6688 6584 struct lpfc_sli_ring *pring; 6585 + 6586 + /* If the mailbox completed, process the completion and return */ 6587 + if (lpfc_sli4_process_missed_mbox_completions(phba)) 6588 + return; 6689 6589 6690 6590 /* Check the pmbox pointer first. There is a race condition 6691 6591 * between the mbox timeout handler getting executed in the ··· 7184 7076 phba->sli.mbox_active) * 7185 7077 1000) + jiffies; 7186 7078 spin_unlock_irq(&phba->hbalock); 7079 + 7080 + /* Make sure the mailbox is really active */ 7081 + if (timeout) 7082 + lpfc_sli4_process_missed_mbox_completions(phba); 7187 7083 7188 7084 /* Wait for the outstnading mailbox command to complete */ 7189 7085 while (phba->sli.mbox_active) { ··· 8188 8076 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 8189 8077 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 8190 8078 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 8079 + wqe->els_req.max_response_payload_len = total_len - xmit_len; 8191 8080 break; 8192 8081 case CMD_XMIT_SEQUENCE64_CX: 8193 8082 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, ··· 8233 8120 command_type = FCP_COMMAND_DATA_OUT; 8234 8121 /* word3 iocb=iotag wqe=payload_offset_len */ 8235 8122 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8236 - wqe->fcp_iwrite.payload_offset_len = 8237 - xmit_len + sizeof(struct fcp_rsp); 8123 + bf_set(payload_offset_len, &wqe->fcp_iwrite, 8124 + xmit_len + sizeof(struct fcp_rsp)); 8125 + bf_set(cmd_buff_len, &wqe->fcp_iwrite, 8126 + 0); 8238 8127 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 8239 8128 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 8240 8129 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, ··· 8254 8139 case CMD_FCP_IREAD64_CR: 8255 8140 /* word3 iocb=iotag wqe=payload_offset_len */ 8256 8141 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8257 - wqe->fcp_iread.payload_offset_len = 8258 - xmit_len + sizeof(struct fcp_rsp); 8142 + bf_set(payload_offset_len, &wqe->fcp_iread, 8143 + xmit_len + sizeof(struct fcp_rsp)); 8144 + bf_set(cmd_buff_len, &wqe->fcp_iread, 8145 + 0); 8259 8146 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 8260 8147 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 8261 8148 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, ··· 8273 8156 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 8274 8157 break; 8275 8158 case CMD_FCP_ICMND64_CR: 8159 + /* word3 iocb=iotag wqe=payload_offset_len */ 8160 + /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8161 + bf_set(payload_offset_len, &wqe->fcp_icmd, 8162 + xmit_len + sizeof(struct fcp_rsp)); 8163 + bf_set(cmd_buff_len, &wqe->fcp_icmd, 8164 + 0); 8276 8165 /* word3 iocb=IO_TAG wqe=reserved */ 8277 - wqe->fcp_icmd.rsrvd3 = 0; 8278 8166 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 8279 8167 /* Always open the exchange */ 8280 8168 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); ··· 8325 8203 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 8326 8204 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 8327 8205 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 8206 + wqe->gen_req.max_response_payload_len = total_len - xmit_len; 8328 8207 command_type = OTHER_COMMAND; 8329 8208 break; 8330 8209 case CMD_XMIT_ELS_RSP64_CX: ··· 10196 10073 if (iocb_completed) { 10197 10074 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10198 10075 "0331 IOCB wake signaled\n"); 10076 + /* Note: we are not indicating if the IOCB has a success 10077 + * status or not - that's for the caller to check. 10078 + * IOCB_SUCCESS means just that the command was sent and 10079 + * completed. Not that it completed successfully. 10080 + * */ 10199 10081 } else if (timeleft == 0) { 10200 10082 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10201 10083 "0338 IOCB wait timeout error - no " ··· 11202 11074 struct lpfc_iocbq *pIocbOut, 11203 11075 struct lpfc_wcqe_complete *wcqe) 11204 11076 { 11077 + int numBdes, i; 11205 11078 unsigned long iflags; 11206 - uint32_t status; 11079 + uint32_t status, max_response; 11080 + struct lpfc_dmabuf *dmabuf; 11081 + struct ulp_bde64 *bpl, bde; 11207 11082 size_t offset = offsetof(struct lpfc_iocbq, iocb); 11208 11083 11209 11084 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, ··· 11223 11092 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 11224 11093 else { 11225 11094 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 11226 - pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; 11095 + switch (pIocbOut->iocb.ulpCommand) { 11096 + case CMD_ELS_REQUEST64_CR: 11097 + dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 11098 + bpl = (struct ulp_bde64 *)dmabuf->virt; 11099 + bde.tus.w = le32_to_cpu(bpl[1].tus.w); 11100 + max_response = bde.tus.f.bdeSize; 11101 + break; 11102 + case CMD_GEN_REQUEST64_CR: 11103 + max_response = 0; 11104 + if (!pIocbOut->context3) 11105 + break; 11106 + numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ 11107 + sizeof(struct ulp_bde64); 11108 + dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 11109 + bpl = (struct ulp_bde64 *)dmabuf->virt; 11110 + for (i = 0; i < numBdes; i++) { 11111 + bde.tus.w = le32_to_cpu(bpl[i].tus.w); 11112 + if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 11113 + max_response += bde.tus.f.bdeSize; 11114 + } 11115 + break; 11116 + default: 11117 + max_response = wcqe->total_data_placed; 11118 + break; 11119 + } 11120 + if (max_response < wcqe->total_data_placed) 11121 + pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; 11122 + else 11123 + pIocbIn->iocb.un.genreq64.bdl.bdeSize = 11124 + wcqe->total_data_placed; 11227 11125 } 11228 11126 11229 11127 /* Convert BG errors for completion status */ ··· 15258 15098 uint16_t max_rpi, rpi_limit; 15259 15099 uint16_t rpi_remaining, lrpi = 0; 15260 15100 struct lpfc_rpi_hdr *rpi_hdr; 15101 + unsigned long iflag; 15261 15102 15262 15103 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 15263 15104 rpi_limit = phba->sli4_hba.next_rpi; ··· 15267 15106 * Fetch the next logical rpi. Because this index is logical, 15268 15107 * the driver starts at 0 each time. 15269 15108 */ 15270 - spin_lock_irq(&phba->hbalock); 15109 + spin_lock_irqsave(&phba->hbalock, iflag); 15271 15110 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 15272 15111 if (rpi >= rpi_limit) 15273 15112 rpi = LPFC_RPI_ALLOC_ERROR; ··· 15283 15122 */ 15284 15123 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 15285 15124 (phba->sli4_hba.rpi_count >= max_rpi)) { 15286 - spin_unlock_irq(&phba->hbalock); 15125 + spin_unlock_irqrestore(&phba->hbalock, iflag); 15287 15126 return rpi; 15288 15127 } 15289 15128 ··· 15292 15131 * extents. 15293 15132 */ 15294 15133 if (!phba->sli4_hba.rpi_hdrs_in_use) { 15295 - spin_unlock_irq(&phba->hbalock); 15134 + spin_unlock_irqrestore(&phba->hbalock, iflag); 15296 15135 return rpi; 15297 15136 } 15298 15137 ··· 15303 15142 * how many are supported max by the device. 15304 15143 */ 15305 15144 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 15306 - spin_unlock_irq(&phba->hbalock); 15145 + spin_unlock_irqrestore(&phba->hbalock, iflag); 15307 15146 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 15308 15147 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 15309 15148 if (!rpi_hdr) {
+1
drivers/scsi/lpfc/lpfc_sli4.h
··· 673 673 int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); 674 674 int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); 675 675 uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); 676 + void lpfc_sli4_free_xri(struct lpfc_hba *, int); 676 677 int lpfc_sli4_post_async_mbox(struct lpfc_hba *); 677 678 int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); 678 679 struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+1 -1
drivers/scsi/lpfc/lpfc_version.h
··· 18 18 * included with this package. * 19 19 *******************************************************************/ 20 20 21 - #define LPFC_DRIVER_VERSION "8.3.42" 21 + #define LPFC_DRIVER_VERSION "8.3.43" 22 22 #define LPFC_DRIVER_NAME "lpfc" 23 23 24 24 /* Used for SLI 2/3 */
+1
drivers/scsi/megaraid/megaraid_sas.h
··· 1531 1531 struct megasas_register_set __iomem *reg_set; 1532 1532 u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; 1533 1533 struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; 1534 + struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD]; 1534 1535 u8 ld_ids[MEGASAS_MAX_LD_IDS]; 1535 1536 s8 init_id; 1536 1537
+9 -7
drivers/scsi/megaraid/megaraid_sas_base.c
··· 3194 3194 (le32_to_cpu(ci->count) < 3195 3195 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { 3196 3196 3197 - memset(instance->pd_list, 0, 3197 + memset(instance->local_pd_list, 0, 3198 3198 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 3199 3199 3200 3200 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 3201 3201 3202 - instance->pd_list[pd_addr->deviceId].tid = 3202 + instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = 3203 3203 le16_to_cpu(pd_addr->deviceId); 3204 - instance->pd_list[pd_addr->deviceId].driveType = 3204 + instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = 3205 3205 pd_addr->scsiDevType; 3206 - instance->pd_list[pd_addr->deviceId].driveState = 3206 + instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = 3207 3207 MR_PD_STATE_SYSTEM; 3208 3208 pd_addr++; 3209 3209 } 3210 + memcpy(instance->pd_list, instance->local_pd_list, 3211 + sizeof(instance->pd_list)); 3210 3212 } 3211 3213 3212 3214 pci_free_consistent(instance->pdev, ··· 4000 3998 * values 4001 3999 */ 4002 4000 if ((prev_aen.members.class <= curr_aen.members.class) && 4003 - !((le16_to_cpu(prev_aen.members.locale) & curr_aen.members.locale) ^ 4001 + !((prev_aen.members.locale & curr_aen.members.locale) ^ 4004 4002 curr_aen.members.locale)) { 4005 4003 /* 4006 4004 * Previously issued event registration includes ··· 4008 4006 */ 4009 4007 return 0; 4010 4008 } else { 4011 - curr_aen.members.locale |= le16_to_cpu(prev_aen.members.locale); 4009 + curr_aen.members.locale |= prev_aen.members.locale; 4012 4010 4013 4011 if (prev_aen.members.class < curr_aen.members.class) 4014 4012 curr_aen.members.class = prev_aen.members.class; ··· 4099 4097 class_locale.members.class = MR_EVT_CLASS_DEBUG; 4100 4098 4101 4099 return megasas_register_aen(instance, 4102 - le32_to_cpu(eli.newest_seq_num) + 1, 4100 + eli.newest_seq_num + 1, 4103 4101 class_locale.word); 4104 4102 } 4105 4103
+153
drivers/scsi/pm8001/pm8001_ctl.c
··· 309 309 } 310 310 static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL); 311 311 /** 312 + * pm8001_ctl_ib_queue_log_show - Out bound Queue log 313 + * @cdev:pointer to embedded class device 314 + * @buf: the buffer returned 315 + * A sysfs 'read-only' shost attribute. 316 + */ 317 + static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev, 318 + struct device_attribute *attr, char *buf) 319 + { 320 + struct Scsi_Host *shost = class_to_shost(cdev); 321 + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 322 + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 323 + int offset; 324 + char *str = buf; 325 + int start = 0; 326 + #define IB_MEMMAP(c) \ 327 + (*(u32 *)((u8 *)pm8001_ha-> \ 328 + memoryMap.region[IB].virt_ptr + \ 329 + pm8001_ha->evtlog_ib_offset + (c))) 330 + 331 + for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { 332 + if (pm8001_ha->chip_id != chip_8001) 333 + str += sprintf(str, "0x%08x\n", IB_MEMMAP(start)); 334 + else 335 + str += sprintf(str, "0x%08x\n", IB_MEMMAP(start)); 336 + start = start + 4; 337 + } 338 + pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET; 339 + if ((((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0) 340 + && (pm8001_ha->chip_id != chip_8001)) 341 + pm8001_ha->evtlog_ib_offset = 0; 342 + if ((((pm8001_ha->evtlog_ib_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0) 343 + && (pm8001_ha->chip_id == chip_8001)) 344 + pm8001_ha->evtlog_ib_offset = 0; 345 + 346 + return str - buf; 347 + } 348 + 349 + static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL); 350 + /** 351 + * pm8001_ctl_ob_queue_log_show - Out bound Queue log 352 + * @cdev:pointer to embedded class device 353 + * @buf: the buffer returned 354 + * A sysfs 'read-only' shost attribute. 355 + */ 356 + 357 + static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev, 358 + struct device_attribute *attr, char *buf) 359 + { 360 + struct Scsi_Host *shost = class_to_shost(cdev); 361 + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 362 + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 363 + int offset; 364 + char *str = buf; 365 + int start = 0; 366 + #define OB_MEMMAP(c) \ 367 + (*(u32 *)((u8 *)pm8001_ha-> \ 368 + memoryMap.region[OB].virt_ptr + \ 369 + pm8001_ha->evtlog_ob_offset + (c))) 370 + 371 + for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { 372 + if (pm8001_ha->chip_id != chip_8001) 373 + str += sprintf(str, "0x%08x\n", OB_MEMMAP(start)); 374 + else 375 + str += sprintf(str, "0x%08x\n", OB_MEMMAP(start)); 376 + start = start + 4; 377 + } 378 + pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET; 379 + if ((((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0) 380 + && (pm8001_ha->chip_id != chip_8001)) 381 + pm8001_ha->evtlog_ob_offset = 0; 382 + if ((((pm8001_ha->evtlog_ob_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0) 383 + && (pm8001_ha->chip_id == chip_8001)) 384 + pm8001_ha->evtlog_ob_offset = 0; 385 + 386 + return str - buf; 387 + } 388 + static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL); 389 + /** 390 + * pm8001_ctl_bios_version_show - Bios version Display 391 + * @cdev:pointer to embedded class device 392 + * @buf:the buffer returned 393 + * A sysfs 'read-only' shost attribute. 394 + */ 395 + static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, 396 + struct device_attribute *attr, char *buf) 397 + { 398 + struct Scsi_Host *shost = class_to_shost(cdev); 399 + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 400 + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 401 + char *str = buf; 402 + void *virt_addr; 403 + int bios_index; 404 + DECLARE_COMPLETION_ONSTACK(completion); 405 + struct pm8001_ioctl_payload payload; 406 + 407 + pm8001_ha->nvmd_completion = &completion; 408 + payload.minor_function = 7; 409 + payload.offset = 0; 410 + payload.length = 4096; 411 + payload.func_specific = kzalloc(4096, GFP_KERNEL); 412 + PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); 413 + wait_for_completion(&completion); 414 + virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; 415 + for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT; 416 + bios_index++) 417 + str += sprintf(str, "%c", 418 + *((u8 *)((u8 *)virt_addr+bios_index))); 419 + return str - buf; 420 + } 421 + static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL); 422 + /** 312 423 * pm8001_ctl_aap_log_show - IOP event log 313 424 * @cdev: pointer to embedded class device 314 425 * @buf: the buffer returned ··· 454 343 return str - buf; 455 344 } 456 345 static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL); 346 + 347 + /** 348 + ** pm8001_ctl_fatal_log_show - fatal error logging 349 + ** @cdev:pointer to embedded class device 350 + ** @buf: the buffer returned 351 + ** 352 + ** A sysfs 'read-only' shost attribute. 353 + **/ 354 + 355 + static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev, 356 + struct device_attribute *attr, char *buf) 357 + { 358 + u32 count; 359 + 360 + count = pm80xx_get_fatal_dump(cdev, attr, buf); 361 + return count; 362 + } 363 + 364 + static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL); 365 + 366 + 367 + /** 368 + ** pm8001_ctl_gsm_log_show - gsm dump collection 369 + ** @cdev:pointer to embedded class device 370 + ** @buf: the buffer returned 371 + **A sysfs 'read-only' shost attribute. 372 + **/ 373 + static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev, 374 + struct device_attribute *attr, char *buf) 375 + { 376 + u32 count; 377 + 378 + count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf); 379 + return count; 380 + } 381 + 382 + static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL); 457 383 458 384 #define FLASH_CMD_NONE 0x00 459 385 #define FLASH_CMD_UPDATE 0x01 ··· 751 603 &dev_attr_update_fw, 752 604 &dev_attr_aap_log, 753 605 &dev_attr_iop_log, 606 + &dev_attr_fatal_log, 607 + &dev_attr_gsm_log, 754 608 &dev_attr_max_out_io, 755 609 &dev_attr_max_devices, 756 610 &dev_attr_max_sg_list, 757 611 &dev_attr_sas_spec_support, 758 612 &dev_attr_logging_level, 759 613 &dev_attr_host_sas_address, 614 + &dev_attr_bios_version, 615 + &dev_attr_ib_log, 616 + &dev_attr_ob_log, 760 617 NULL, 761 618 }; 762 619
+6
drivers/scsi/pm8001/pm8001_ctl.h
··· 45 45 #define HEADER_LEN 28 46 46 #define SIZE_OFFSET 16 47 47 48 + #define BIOSOFFSET 56 49 + #define BIOS_OFFSET_LIMIT 61 48 50 49 51 #define FLASH_OK 0x000000 50 52 #define FAIL_OPEN_BIOS_FILE 0x000100 ··· 55 53 #define FAIL_OUT_MEMORY 0x000c00 56 54 #define FLASH_IN_PROGRESS 0x001000 57 55 56 + #define IB_OB_READ_TIMES 256 57 + #define SYSFS_OFFSET 1024 58 + #define PM80XX_IB_OB_QUEUE_SIZE (32 * 1024) 59 + #define PM8001_IB_OB_QUEUE_SIZE (16 * 1024) 58 60 #endif /* PM8001_CTL_H_INCLUDED */ 59 61
+6 -2
drivers/scsi/pm8001/pm8001_defs.h
··· 46 46 chip_8008, 47 47 chip_8009, 48 48 chip_8018, 49 - chip_8019 49 + chip_8019, 50 + chip_8074, 51 + chip_8076, 52 + chip_8077 50 53 }; 51 54 52 55 enum phy_speed { ··· 102 99 NVMD, /* NVM device */ 103 100 DEV_MEM, /* memory for devices */ 104 101 CCB_MEM, /* memory for command control block */ 105 - FW_FLASH /* memory for fw flash update */ 102 + FW_FLASH, /* memory for fw flash update */ 103 + FORENSIC_MEM /* memory for fw forensic data */ 106 104 }; 107 105 #define PM8001_EVENT_LOG_SIZE (128 * 1024) 108 106
+149 -3
drivers/scsi/pm8001/pm8001_hwi.c
··· 1868 1868 if (unlikely(!t || !t->lldd_task || !t->dev)) 1869 1869 return; 1870 1870 ts = &t->task_status; 1871 + /* Print sas address of IO failed device */ 1872 + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && 1873 + (status != IO_UNDERFLOW)) 1874 + PM8001_FAIL_DBG(pm8001_ha, 1875 + pm8001_printk("SAS Address of IO Failure Drive:" 1876 + "%016llx", SAS_ADDR(t->dev->sas_addr))); 1877 + 1871 1878 switch (status) { 1872 1879 case IO_SUCCESS: 1873 1880 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS" ··· 2283 2276 u32 param; 2284 2277 u32 status; 2285 2278 u32 tag; 2279 + int i, j; 2280 + u8 sata_addr_low[4]; 2281 + u32 temp_sata_addr_low; 2282 + u8 sata_addr_hi[4]; 2283 + u32 temp_sata_addr_hi; 2286 2284 struct sata_completion_resp *psataPayload; 2287 2285 struct task_status_struct *ts; 2288 2286 struct ata_task_resp *resp ; ··· 2337 2325 pm8001_printk("ts null\n")); 2338 2326 return; 2339 2327 } 2340 - 2328 + /* Print sas address of IO failed device */ 2329 + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && 2330 + (status != IO_UNDERFLOW)) { 2331 + if (!((t->dev->parent) && 2332 + (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) { 2333 + for (i = 0 , j = 4; j <= 7 && i <= 3; i++ , j++) 2334 + sata_addr_low[i] = pm8001_ha->sas_addr[j]; 2335 + for (i = 0 , j = 0; j <= 3 && i <= 3; i++ , j++) 2336 + sata_addr_hi[i] = pm8001_ha->sas_addr[j]; 2337 + memcpy(&temp_sata_addr_low, sata_addr_low, 2338 + sizeof(sata_addr_low)); 2339 + memcpy(&temp_sata_addr_hi, sata_addr_hi, 2340 + sizeof(sata_addr_hi)); 2341 + temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff) 2342 + |((temp_sata_addr_hi << 8) & 2343 + 0xff0000) | 2344 + ((temp_sata_addr_hi >> 8) 2345 + & 0xff00) | 2346 + ((temp_sata_addr_hi << 24) & 2347 + 0xff000000)); 2348 + temp_sata_addr_low = ((((temp_sata_addr_low >> 24) 2349 + & 0xff) | 2350 + ((temp_sata_addr_low << 8) 2351 + & 0xff0000) | 2352 + ((temp_sata_addr_low >> 8) 2353 + & 0xff00) | 2354 + ((temp_sata_addr_low << 24) 2355 + & 0xff000000)) + 2356 + pm8001_dev->attached_phy + 2357 + 0x10); 2358 + PM8001_FAIL_DBG(pm8001_ha, 2359 + pm8001_printk("SAS Address of IO Failure Drive:" 2360 + "%08x%08x", temp_sata_addr_hi, 2361 + temp_sata_addr_low)); 2362 + } else { 2363 + PM8001_FAIL_DBG(pm8001_ha, 2364 + pm8001_printk("SAS Address of IO Failure Drive:" 2365 + "%016llx", SAS_ADDR(t->dev->sas_addr))); 2366 + } 2367 + } 2341 2368 switch (status) { 2342 2369 case IO_SUCCESS: 2343 2370 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); ··· 3138 3087 struct pm8001_device *pm8001_dev = ccb->device; 3139 3088 u32 status = le32_to_cpu(pPayload->status); 3140 3089 u32 device_id = le32_to_cpu(pPayload->device_id); 3141 - u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS; 3142 - u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS; 3090 + u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS; 3091 + u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS; 3143 3092 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state " 3144 3093 "from 0x%x to 0x%x status = 0x%x!\n", 3145 3094 device_id, pds, nds, status)); ··· 4751 4700 sspTMCmd.tmf = cpu_to_le32(tmf->tmf); 4752 4701 memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); 4753 4702 sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); 4703 + if (pm8001_ha->chip_id != chip_8001) 4704 + sspTMCmd.ds_ads_m = 0x08; 4754 4705 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4755 4706 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0); 4756 4707 return ret; ··· 4829 4776 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); 4830 4777 nvmd_req.resp_addr_lo = 4831 4778 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); 4779 + break; 4780 + } 4781 + case IOP_RDUMP: { 4782 + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP); 4783 + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); 4784 + nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset); 4785 + nvmd_req.resp_addr_hi = 4786 + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); 4787 + nvmd_req.resp_addr_lo = 4788 + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); 4832 4789 break; 4833 4790 } 4834 4791 default: ··· 4999 4936 rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info, 5000 4937 tag); 5001 4938 return rc; 4939 + } 4940 + 4941 + ssize_t 4942 + pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf) 4943 + { 4944 + u32 value, rem, offset = 0, bar = 0; 4945 + u32 index, work_offset, dw_length; 4946 + u32 shift_value, gsm_base, gsm_dump_offset; 4947 + char *direct_data; 4948 + struct Scsi_Host *shost = class_to_shost(cdev); 4949 + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 4950 + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 4951 + 4952 + direct_data = buf; 4953 + gsm_dump_offset = pm8001_ha->fatal_forensic_shift_offset; 4954 + 4955 + /* check max is 1 Mbytes */ 4956 + if ((length > 0x100000) || (gsm_dump_offset & 3) || 4957 + ((gsm_dump_offset + length) > 0x1000000)) 4958 + return 1; 4959 + 4960 + if (pm8001_ha->chip_id == chip_8001) 4961 + bar = 2; 4962 + else 4963 + bar = 1; 4964 + 4965 + work_offset = gsm_dump_offset & 0xFFFF0000; 4966 + offset = gsm_dump_offset & 0x0000FFFF; 4967 + gsm_dump_offset = work_offset; 4968 + /* adjust length to dword boundary */ 4969 + rem = length & 3; 4970 + dw_length = length >> 2; 4971 + 4972 + for (index = 0; index < dw_length; index++) { 4973 + if ((work_offset + offset) & 0xFFFF0000) { 4974 + if (pm8001_ha->chip_id == chip_8001) 4975 + shift_value = ((gsm_dump_offset + offset) & 4976 + SHIFT_REG_64K_MASK); 4977 + else 4978 + shift_value = (((gsm_dump_offset + offset) & 4979 + SHIFT_REG_64K_MASK) >> 4980 + SHIFT_REG_BIT_SHIFT); 4981 + 4982 + if (pm8001_ha->chip_id == chip_8001) { 4983 + gsm_base = GSM_BASE; 4984 + if (-1 == pm8001_bar4_shift(pm8001_ha, 4985 + (gsm_base + shift_value))) 4986 + return 1; 4987 + } else { 4988 + gsm_base = 0; 4989 + if (-1 == pm80xx_bar4_shift(pm8001_ha, 4990 + (gsm_base + shift_value))) 4991 + return 1; 4992 + } 4993 + gsm_dump_offset = (gsm_dump_offset + offset) & 4994 + 0xFFFF0000; 4995 + work_offset = 0; 4996 + offset = offset & 0x0000FFFF; 4997 + } 4998 + value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) & 4999 + 0x0000FFFF); 5000 + direct_data += sprintf(direct_data, "%08x ", value); 5001 + offset += 4; 5002 + } 5003 + if (rem != 0) { 5004 + value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) & 5005 + 0x0000FFFF); 5006 + /* xfr for non_dw */ 5007 + direct_data += sprintf(direct_data, "%08x ", value); 5008 + } 5009 + /* Shift back to BAR4 original address */ 5010 + if (pm8001_ha->chip_id == chip_8001) { 5011 + if (-1 == pm8001_bar4_shift(pm8001_ha, 0)) 5012 + return 1; 5013 + } else { 5014 + if (-1 == pm80xx_bar4_shift(pm8001_ha, 0)) 5015 + return 1; 5016 + } 5017 + pm8001_ha->fatal_forensic_shift_offset += 1024; 5018 + 5019 + if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000) 5020 + pm8001_ha->fatal_forensic_shift_offset = 0; 5021 + return direct_data - buf; 5002 5022 } 5003 5023 5004 5024 int
+3
drivers/scsi/pm8001/pm8001_hwi.h
··· 1027 1027 #define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 1028 1028 #define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 1029 1029 1030 + #define GSM_BASE 0x4F0000 1031 + #define SHIFT_REG_64K_MASK 0xffff0000 1032 + #define SHIFT_REG_BIT_SHIFT 8 1030 1033 #endif 1031 1034
+64 -1
drivers/scsi/pm8001/pm8001_init.c
··· 54 54 [chip_8009] = {1, 8, &pm8001_80xx_dispatch,}, 55 55 [chip_8018] = {0, 16, &pm8001_80xx_dispatch,}, 56 56 [chip_8019] = {1, 16, &pm8001_80xx_dispatch,}, 57 + [chip_8074] = {0, 8, &pm8001_80xx_dispatch,}, 58 + [chip_8076] = {0, 16, &pm8001_80xx_dispatch,}, 59 + [chip_8077] = {0, 16, &pm8001_80xx_dispatch,}, 57 60 }; 58 61 static int pm8001_id; 59 62 ··· 347 344 /* Memory region for fw flash */ 348 345 pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096; 349 346 347 + pm8001_ha->memoryMap.region[FORENSIC_MEM].num_elements = 1; 348 + pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000; 349 + pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000; 350 + pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000; 350 351 for (i = 0; i < USI_MAX_MEMCNT; i++) { 351 352 if (pm8001_mem_alloc(pm8001_ha->pdev, 352 353 &pm8001_ha->memoryMap.region[i].virt_ptr, ··· 671 664 #endif 672 665 } 673 666 667 + /* 668 + * pm8001_get_phy_settings_info : Read phy setting values. 669 + * @pm8001_ha : our hba. 670 + */ 671 + void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) 672 + { 673 + 674 + #ifdef PM8001_READ_VPD 675 + /*OPTION ROM FLASH read for the SPC cards */ 676 + DECLARE_COMPLETION_ONSTACK(completion); 677 + struct pm8001_ioctl_payload payload; 678 + 679 + pm8001_ha->nvmd_completion = &completion; 680 + /* SAS ADDRESS read from flash / EEPROM */ 681 + payload.minor_function = 6; 682 + payload.offset = 0; 683 + payload.length = 4096; 684 + payload.func_specific = kzalloc(4096, GFP_KERNEL); 685 + /* Read phy setting values from flash */ 686 + PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); 687 + wait_for_completion(&completion); 688 + pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific); 689 + #endif 690 + } 691 + 674 692 #ifdef PM8001_USE_MSIX 675 693 /** 676 694 * pm8001_setup_msix - enable MSI-X interrupt ··· 876 844 } 877 845 878 846 pm8001_init_sas_add(pm8001_ha); 847 + /* phy setting support for motherboard controller */ 848 + if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 && 849 + pdev->subsystem_vendor != 0) 850 + pm8001_get_phy_settings_info(pm8001_ha); 879 851 pm8001_post_sas_ha_init(shost, chip); 880 852 rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); 881 853 if (rc) ··· 1073 1037 { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 }, 1074 1038 { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 }, 1075 1039 { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 }, 1040 + { PCI_VDEVICE(PMC_Sierra, 0x8074), chip_8074 }, 1041 + { PCI_VDEVICE(ADAPTEC2, 0x8074), chip_8074 }, 1042 + { PCI_VDEVICE(PMC_Sierra, 0x8076), chip_8076 }, 1043 + { PCI_VDEVICE(ADAPTEC2, 0x8076), chip_8076 }, 1044 + { PCI_VDEVICE(PMC_Sierra, 0x8077), chip_8077 }, 1045 + { PCI_VDEVICE(ADAPTEC2, 0x8077), chip_8077 }, 1076 1046 { PCI_VENDOR_ID_ADAPTEC2, 0x8081, 1077 1047 PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 }, 1078 1048 { PCI_VENDOR_ID_ADAPTEC2, 0x8081, ··· 1099 1057 PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 }, 1100 1058 { PCI_VENDOR_ID_ADAPTEC2, 0x8089, 1101 1059 PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 }, 1060 + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, 1061 + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8074 }, 1062 + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, 1063 + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8076 }, 1064 + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, 1065 + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8077 }, 1066 + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, 1067 + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8074 }, 1068 + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, 1069 + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8076 }, 1070 + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, 1071 + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8077 }, 1072 + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, 1073 + PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8076 }, 1074 + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, 1075 + PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8077 }, 1076 + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, 1077 + PCI_VENDOR_ID_ADAPTEC2, 0x0404, 0, 0, chip_8074 }, 1102 1078 {} /* terminate list */ 1103 1079 }; 1104 1080 ··· 1168 1108 module_exit(pm8001_exit); 1169 1109 1170 1110 MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); 1111 + MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>"); 1112 + MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>"); 1171 1113 MODULE_DESCRIPTION( 1172 - "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver"); 1114 + "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 " 1115 + "SAS/SATA controller driver"); 1173 1116 MODULE_VERSION(DRV_VERSION); 1174 1117 MODULE_LICENSE("GPL"); 1175 1118 MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
+8 -1
drivers/scsi/pm8001/pm8001_sas.c
··· 447 447 break; 448 448 case SAS_PROTOCOL_SATA: 449 449 case SAS_PROTOCOL_STP: 450 - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 451 450 rc = pm8001_task_prep_ata(pm8001_ha, ccb); 452 451 break; 453 452 default: ··· 703 704 int res, retry; 704 705 struct sas_task *task = NULL; 705 706 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 707 + struct pm8001_device *pm8001_dev = dev->lldd_dev; 708 + DECLARE_COMPLETION_ONSTACK(completion_setstate); 706 709 707 710 for (retry = 0; retry < 3; retry++) { 708 711 task = sas_alloc_slow_task(GFP_KERNEL); ··· 730 729 goto ex_err; 731 730 } 732 731 wait_for_completion(&task->slow_task->completion); 732 + if (pm8001_ha->chip_id != chip_8001) { 733 + pm8001_dev->setds_completion = &completion_setstate; 734 + PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 735 + pm8001_dev, 0x01); 736 + wait_for_completion(&completion_setstate); 737 + } 733 738 res = -TMF_RESP_FUNC_FAILED; 734 739 /* Even TMF timed out, return direct. */ 735 740 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+73 -1
drivers/scsi/pm8001/pm8001_sas.h
··· 104 104 105 105 106 106 #define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) 107 + #define IS_SPCV_12G(dev) ((dev->device == 0X8074) \ 108 + || (dev->device == 0X8076) \ 109 + || (dev->device == 0X8077)) 107 110 108 111 #define PM8001_NAME_LENGTH 32/* generic length of strings */ 109 112 extern struct list_head hba_list; ··· 131 128 u16 id; 132 129 u8 *func_specific; 133 130 }; 131 + 132 + #define MPI_FATAL_ERROR_TABLE_OFFSET_MASK 0xFFFFFF 133 + #define MPI_FATAL_ERROR_TABLE_SIZE(value) ((0xFF000000 & value) >> SHIFT24) 134 + #define MPI_FATAL_EDUMP_TABLE_LO_OFFSET 0x00 /* HNFBUFL */ 135 + #define MPI_FATAL_EDUMP_TABLE_HI_OFFSET 0x04 /* HNFBUFH */ 136 + #define MPI_FATAL_EDUMP_TABLE_LENGTH 0x08 /* HNFBLEN */ 137 + #define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */ 138 + #define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */ 139 + #define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */ 140 + #define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1 141 + #define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0 142 + #define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0 143 + #define MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED 0x1 144 + #define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA 0x2 145 + #define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE 0x3 146 + #define TYPE_GSM_SPACE 1 147 + #define TYPE_QUEUE 2 148 + #define TYPE_FATAL 3 149 + #define TYPE_NON_FATAL 4 150 + #define TYPE_INBOUND 1 151 + #define TYPE_OUTBOUND 2 152 + struct forensic_data { 153 + u32 data_type; 154 + union { 155 + struct { 156 + u32 direct_len; 157 + u32 direct_offset; 158 + void *direct_data; 159 + } gsm_buf; 160 + struct { 161 + u16 queue_type; 162 + u16 queue_index; 163 + u32 direct_len; 164 + void *direct_data; 165 + } queue_buf; 166 + struct { 167 + u32 direct_len; 168 + u32 direct_offset; 169 + u32 read_len; 170 + void *direct_data; 171 + } data_buf; 172 + }; 173 + }; 174 + 175 + /* bit31-26 - mask bar */ 176 + #define SCRATCH_PAD0_BAR_MASK 0xFC000000 177 + /* bit25-0 - offset mask */ 178 + #define SCRATCH_PAD0_OFFSET_MASK 0x03FFFFFF 179 + /* if AAP error state */ 180 + #define SCRATCH_PAD0_AAPERR_MASK 0xFFFFFFFF 181 + /* Inbound doorbell bit7 */ 182 + #define SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP 0x80 183 + /* Inbound doorbell bit7 SPCV */ 184 + #define SPCV_MSGU_CFG_TABLE_TRANSFER_DEBUG_INFO 0x80 185 + #define MAIN_MERRDCTO_MERRDCES 0xA0/* DWORD 0x28) */ 134 186 135 187 struct pm8001_dispatch { 136 188 char *name; ··· 401 343 u32 phy_attr_table_offset; 402 344 u32 port_recovery_timer; 403 345 u32 interrupt_reassertion_delay; 346 + u32 fatal_n_non_fatal_dump; /* 0x28 */ 404 347 } pm80xx_tbl; 405 348 }; 406 349 ··· 476 417 struct pm8001_hba_memspace io_mem[6]; 477 418 struct mpi_mem_req memoryMap; 478 419 struct encrypt encrypt_info; /* support encryption */ 420 + struct forensic_data forensic_info; 421 + u32 fatal_bar_loc; 422 + u32 forensic_last_offset; 423 + u32 fatal_forensic_shift_offset; 424 + u32 forensic_fatal_step; 425 + u32 evtlog_ib_offset; 426 + u32 evtlog_ob_offset; 479 427 void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ 480 428 void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/ 481 429 void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/ ··· 491 425 void __iomem *pspa_q_tbl_addr; 492 426 /*MPI SAS PHY attributes Queue Config Table Addr*/ 493 427 void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */ 428 + void __iomem *fatal_tbl_addr; /*MPI IVT Table Addr */ 494 429 union main_cfg_table main_cfg_tbl; 495 430 union general_status_table gs_tbl; 496 431 struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM]; ··· 696 629 int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha); 697 630 698 631 int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); 699 - 632 + void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, 633 + u32 length, u8 *buf); 634 + int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); 635 + ssize_t pm80xx_get_fatal_dump(struct device *cdev, 636 + struct device_attribute *attr, char *buf); 637 + ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf); 700 638 /* ctl shared API */ 701 639 extern struct device_attribute *pm8001_host_attrs[]; 702 640
+492 -31
drivers/scsi/pm8001/pm80xx_hwi.c
··· 45 45 46 46 #define SMP_DIRECT 1 47 47 #define SMP_INDIRECT 2 48 + 49 + 50 + int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value) 51 + { 52 + u32 reg_val; 53 + unsigned long start; 54 + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, shift_value); 55 + /* confirm the setting is written */ 56 + start = jiffies + HZ; /* 1 sec */ 57 + do { 58 + reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER); 59 + } while ((reg_val != shift_value) && time_before(jiffies, start)); 60 + if (reg_val != shift_value) { 61 + PM8001_FAIL_DBG(pm8001_ha, 62 + pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER" 63 + " = 0x%x\n", reg_val)); 64 + return -1; 65 + } 66 + return 0; 67 + } 68 + 69 + void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset, 70 + const void *destination, 71 + u32 dw_count, u32 bus_base_number) 72 + { 73 + u32 index, value, offset; 74 + u32 *destination1; 75 + destination1 = (u32 *)destination; 76 + 77 + for (index = 0; index < dw_count; index += 4, destination1++) { 78 + offset = (soffset + index / 4); 79 + if (offset < (64 * 1024)) { 80 + value = pm8001_cr32(pm8001_ha, bus_base_number, offset); 81 + *destination1 = cpu_to_le32(value); 82 + } 83 + } 84 + return; 85 + } 86 + 87 + ssize_t pm80xx_get_fatal_dump(struct device *cdev, 88 + struct device_attribute *attr, char *buf) 89 + { 90 + struct Scsi_Host *shost = class_to_shost(cdev); 91 + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 92 + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 93 + void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr; 94 + u32 status = 1; 95 + u32 accum_len , reg_val, index, *temp; 96 + unsigned long start; 97 + u8 *direct_data; 98 + char *fatal_error_data = buf; 99 + 100 + pm8001_ha->forensic_info.data_buf.direct_data = buf; 101 + if (pm8001_ha->chip_id == chip_8001) { 102 + pm8001_ha->forensic_info.data_buf.direct_data += 103 + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, 104 + "Not supported for SPC controller"); 105 + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - 106 + (char *)buf; 107 + } 108 + if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { 109 + PM8001_IO_DBG(pm8001_ha, 110 + pm8001_printk("forensic_info TYPE_NON_FATAL..............\n")); 111 + direct_data = (u8 *)fatal_error_data; 112 + pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL; 113 + pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET; 114 + pm8001_ha->forensic_info.data_buf.direct_offset = 0; 115 + pm8001_ha->forensic_info.data_buf.read_len = 0; 116 + 117 + pm8001_ha->forensic_info.data_buf.direct_data = direct_data; 118 + } 119 + 120 + if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { 121 + /* start to get data */ 122 + /* Program the MEMBASE II Shifting Register with 0x00.*/ 123 + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, 124 + pm8001_ha->fatal_forensic_shift_offset); 125 + pm8001_ha->forensic_last_offset = 0; 126 + pm8001_ha->forensic_fatal_step = 0; 127 + pm8001_ha->fatal_bar_loc = 0; 128 + } 129 + /* Read until accum_len is retrived */ 130 + accum_len = pm8001_mr32(fatal_table_address, 131 + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); 132 + PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n", 133 + accum_len)); 134 + if (accum_len == 0xFFFFFFFF) { 135 + PM8001_IO_DBG(pm8001_ha, 136 + pm8001_printk("Possible PCI issue 0x%x not expected\n", 137 + accum_len)); 138 + return status; 139 + } 140 + if (accum_len == 0 || accum_len >= 0x100000) { 141 + pm8001_ha->forensic_info.data_buf.direct_data += 142 + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, 143 + "%08x ", 0xFFFFFFFF); 144 + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - 145 + (char *)buf; 146 + } 147 + temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr; 148 + if (pm8001_ha->forensic_fatal_step == 0) { 149 + moreData: 150 + if (pm8001_ha->forensic_info.data_buf.direct_data) { 151 + /* Data is in bar, copy to host memory */ 152 + pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc, 153 + pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr, 154 + pm8001_ha->forensic_info.data_buf.direct_len , 155 + 1); 156 + } 157 + pm8001_ha->fatal_bar_loc += 158 + pm8001_ha->forensic_info.data_buf.direct_len; 159 + pm8001_ha->forensic_info.data_buf.direct_offset += 160 + pm8001_ha->forensic_info.data_buf.direct_len; 161 + pm8001_ha->forensic_last_offset += 162 + pm8001_ha->forensic_info.data_buf.direct_len; 163 + pm8001_ha->forensic_info.data_buf.read_len = 164 + pm8001_ha->forensic_info.data_buf.direct_len; 165 + 166 + if (pm8001_ha->forensic_last_offset >= accum_len) { 167 + pm8001_ha->forensic_info.data_buf.direct_data += 168 + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, 169 + "%08x ", 3); 170 + for (index = 0; index < (SYSFS_OFFSET / 4); index++) { 171 + pm8001_ha->forensic_info.data_buf.direct_data += 172 + sprintf(pm8001_ha-> 173 + forensic_info.data_buf.direct_data, 174 + "%08x ", *(temp + index)); 175 + } 176 + 177 + pm8001_ha->fatal_bar_loc = 0; 178 + pm8001_ha->forensic_fatal_step = 1; 179 + pm8001_ha->fatal_forensic_shift_offset = 0; 180 + pm8001_ha->forensic_last_offset = 0; 181 + status = 0; 182 + return (char *)pm8001_ha-> 183 + forensic_info.data_buf.direct_data - 184 + (char *)buf; 185 + } 186 + if (pm8001_ha->fatal_bar_loc < (64 * 1024)) { 187 + pm8001_ha->forensic_info.data_buf.direct_data += 188 + sprintf(pm8001_ha-> 189 + forensic_info.data_buf.direct_data, 190 + "%08x ", 2); 191 + for (index = 0; index < (SYSFS_OFFSET / 4); index++) { 192 + pm8001_ha->forensic_info.data_buf.direct_data += 193 + sprintf(pm8001_ha-> 194 + forensic_info.data_buf.direct_data, 195 + "%08x ", *(temp + index)); 196 + } 197 + status = 0; 198 + return (char *)pm8001_ha-> 199 + forensic_info.data_buf.direct_data - 200 + (char *)buf; 201 + } 202 + 203 + /* Increment the MEMBASE II Shifting Register value by 0x100.*/ 204 + pm8001_ha->forensic_info.data_buf.direct_data += 205 + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, 206 + "%08x ", 2); 207 + for (index = 0; index < 256; index++) { 208 + pm8001_ha->forensic_info.data_buf.direct_data += 209 + sprintf(pm8001_ha-> 210 + forensic_info.data_buf.direct_data, 211 + "%08x ", *(temp + index)); 212 + } 213 + pm8001_ha->fatal_forensic_shift_offset += 0x100; 214 + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, 215 + pm8001_ha->fatal_forensic_shift_offset); 216 + pm8001_ha->fatal_bar_loc = 0; 217 + status = 0; 218 + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - 219 + (char *)buf; 220 + } 221 + if (pm8001_ha->forensic_fatal_step == 1) { 222 + pm8001_ha->fatal_forensic_shift_offset = 0; 223 + /* Read 64K of the debug data. */ 224 + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, 225 + pm8001_ha->fatal_forensic_shift_offset); 226 + pm8001_mw32(fatal_table_address, 227 + MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 228 + MPI_FATAL_EDUMP_HANDSHAKE_RDY); 229 + 230 + /* Poll FDDHSHK until clear */ 231 + start = jiffies + (2 * HZ); /* 2 sec */ 232 + 233 + do { 234 + reg_val = pm8001_mr32(fatal_table_address, 235 + MPI_FATAL_EDUMP_TABLE_HANDSHAKE); 236 + } while ((reg_val) && time_before(jiffies, start)); 237 + 238 + if (reg_val != 0) { 239 + PM8001_FAIL_DBG(pm8001_ha, 240 + pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER" 241 + " = 0x%x\n", reg_val)); 242 + return -1; 243 + } 244 + 245 + /* Read the next 64K of the debug data. */ 246 + pm8001_ha->forensic_fatal_step = 0; 247 + if (pm8001_mr32(fatal_table_address, 248 + MPI_FATAL_EDUMP_TABLE_STATUS) != 249 + MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) { 250 + pm8001_mw32(fatal_table_address, 251 + MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0); 252 + goto moreData; 253 + } else { 254 + pm8001_ha->forensic_info.data_buf.direct_data += 255 + sprintf(pm8001_ha-> 256 + forensic_info.data_buf.direct_data, 257 + "%08x ", 4); 258 + pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF; 259 + pm8001_ha->forensic_info.data_buf.direct_len = 0; 260 + pm8001_ha->forensic_info.data_buf.direct_offset = 0; 261 + pm8001_ha->forensic_info.data_buf.read_len = 0; 262 + status = 0; 263 + } 264 + } 265 + 266 + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - 267 + (char *)buf; 268 + } 269 + 48 270 /** 49 271 * read_main_config_table - read the configure table and save it. 50 272 * @pm8001_ha: our hba card information ··· 652 430 table is updated */ 653 431 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE); 654 432 /* wait until Inbound DoorBell Clear Register toggled */ 655 - max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */ 433 + if (IS_SPCV_12G(pm8001_ha->pdev)) { 434 + max_wait_count = 4 * 1000 * 1000;/* 4 sec */ 435 + } else { 436 + max_wait_count = 2 * 1000 * 1000;/* 2 sec */ 437 + } 656 438 do { 657 439 udelay(1); 658 440 value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); ··· 804 578 0xFFFFFF); 805 579 pm8001_ha->pspa_q_tbl_addr = 806 580 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) & 581 + 0xFFFFFF); 582 + pm8001_ha->fatal_tbl_addr = 583 + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) & 807 584 0xFFFFFF); 808 585 809 586 PM8001_INIT_DBG(pm8001_ha, ··· 1142 913 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET); 1143 914 1144 915 /* wait until Inbound DoorBell Clear Register toggled */ 1145 - max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */ 916 + if (IS_SPCV_12G(pm8001_ha->pdev)) { 917 + max_wait_count = 4 * 1000 * 1000;/* 4 sec */ 918 + } else { 919 + max_wait_count = 2 * 1000 * 1000;/* 2 sec */ 920 + } 1146 921 do { 1147 922 udelay(1); 1148 923 value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); ··· 1192 959 { 1193 960 u32 regval; 1194 961 u32 bootloader_state; 962 + u32 ibutton0, ibutton1; 1195 963 1196 964 /* Check if MPI is in ready state to reset */ 1197 965 if (mpi_uninit_check(pm8001_ha) != 0) { ··· 1251 1017 if (-1 == check_fw_ready(pm8001_ha)) { 1252 1018 PM8001_FAIL_DBG(pm8001_ha, 1253 1019 pm8001_printk("Firmware is not ready!\n")); 1254 - return -EBUSY; 1020 + /* check iButton feature support for motherboard controller */ 1021 + if (pm8001_ha->pdev->subsystem_vendor != 1022 + PCI_VENDOR_ID_ADAPTEC2 && 1023 + pm8001_ha->pdev->subsystem_vendor != 0) { 1024 + ibutton0 = pm8001_cr32(pm8001_ha, 0, 1025 + MSGU_HOST_SCRATCH_PAD_6); 1026 + ibutton1 = pm8001_cr32(pm8001_ha, 0, 1027 + MSGU_HOST_SCRATCH_PAD_7); 1028 + if (!ibutton0 && !ibutton1) { 1029 + PM8001_FAIL_DBG(pm8001_ha, 1030 + pm8001_printk("iButton Feature is" 1031 + " not Available!!!\n")); 1032 + return -EBUSY; 1033 + } 1034 + if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) { 1035 + PM8001_FAIL_DBG(pm8001_ha, 1036 + pm8001_printk("CRC Check for iButton" 1037 + " Feature Failed!!!\n")); 1038 + return -EBUSY; 1039 + } 1040 + } 1255 1041 } 1256 1042 PM8001_INIT_DBG(pm8001_ha, 1257 1043 pm8001_printk("SPCv soft reset Complete\n")); ··· 1522 1268 if (unlikely(!t || !t->lldd_task || !t->dev)) 1523 1269 return; 1524 1270 ts = &t->task_status; 1271 + /* Print sas address of IO failed device */ 1272 + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && 1273 + (status != IO_UNDERFLOW)) 1274 + PM8001_FAIL_DBG(pm8001_ha, 1275 + pm8001_printk("SAS Address of IO Failure Drive" 1276 + ":%016llx", SAS_ADDR(t->dev->sas_addr))); 1277 + 1525 1278 switch (status) { 1526 1279 case IO_SUCCESS: 1527 1280 PM8001_IO_DBG(pm8001_ha, ··· 1952 1691 u32 param; 1953 1692 u32 status; 1954 1693 u32 tag; 1694 + int i, j; 1695 + u8 sata_addr_low[4]; 1696 + u32 temp_sata_addr_low, temp_sata_addr_hi; 1697 + u8 sata_addr_hi[4]; 1955 1698 struct sata_completion_resp *psataPayload; 1956 1699 struct task_status_struct *ts; 1957 1700 struct ata_task_resp *resp ; ··· 2005 1740 pm8001_printk("ts null\n")); 2006 1741 return; 2007 1742 } 1743 + /* Print sas address of IO failed device */ 1744 + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && 1745 + (status != IO_UNDERFLOW)) { 1746 + if (!((t->dev->parent) && 1747 + (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) { 1748 + for (i = 0 , j = 4; i <= 3 && j <= 7; i++ , j++) 1749 + sata_addr_low[i] = pm8001_ha->sas_addr[j]; 1750 + for (i = 0 , j = 0; i <= 3 && j <= 3; i++ , j++) 1751 + sata_addr_hi[i] = pm8001_ha->sas_addr[j]; 1752 + memcpy(&temp_sata_addr_low, sata_addr_low, 1753 + sizeof(sata_addr_low)); 1754 + memcpy(&temp_sata_addr_hi, sata_addr_hi, 1755 + sizeof(sata_addr_hi)); 1756 + temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff) 1757 + |((temp_sata_addr_hi << 8) & 1758 + 0xff0000) | 1759 + ((temp_sata_addr_hi >> 8) 1760 + & 0xff00) | 1761 + ((temp_sata_addr_hi << 24) & 1762 + 0xff000000)); 1763 + temp_sata_addr_low = ((((temp_sata_addr_low >> 24) 1764 + & 0xff) | 1765 + ((temp_sata_addr_low << 8) 1766 + & 0xff0000) | 1767 + ((temp_sata_addr_low >> 8) 1768 + & 0xff00) | 1769 + ((temp_sata_addr_low << 24) 1770 + & 0xff000000)) + 1771 + pm8001_dev->attached_phy + 1772 + 0x10); 1773 + PM8001_FAIL_DBG(pm8001_ha, 1774 + pm8001_printk("SAS Address of IO Failure Drive:" 1775 + "%08x%08x", temp_sata_addr_hi, 1776 + temp_sata_addr_low)); 2008 1777 1778 + } else { 1779 + PM8001_FAIL_DBG(pm8001_ha, 1780 + pm8001_printk("SAS Address of IO Failure Drive:" 1781 + "%016llx", SAS_ADDR(t->dev->sas_addr))); 1782 + } 1783 + } 2009 1784 switch (status) { 2010 1785 case IO_SUCCESS: 2011 1786 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); ··· 3408 3103 static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, 3409 3104 void *piomb) 3410 3105 { 3411 - PM8001_MSG_DBG(pm8001_ha, 3412 - pm8001_printk(" pm80xx_addition_functionality\n")); 3106 + u8 page_code; 3107 + struct set_phy_profile_resp *pPayload = 3108 + (struct set_phy_profile_resp *)(piomb + 4); 3109 + u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid); 3110 + u32 status = le32_to_cpu(pPayload->status); 3413 3111 3112 + page_code = (u8)((ppc_phyid & 0xFF00) >> 8); 3113 + if (status) { 3114 + /* status is FAILED */ 3115 + PM8001_FAIL_DBG(pm8001_ha, 3116 + pm8001_printk("PhyProfile command failed with status " 3117 + "0x%08X \n", status)); 3118 + return -1; 3119 + } else { 3120 + if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) { 3121 + PM8001_FAIL_DBG(pm8001_ha, 3122 + pm8001_printk("Invalid page code 0x%X\n", 3123 + page_code)); 3124 + return -1; 3125 + } 3126 + } 3414 3127 return 0; 3415 3128 } 3416 3129 ··· 3807 3484 else 3808 3485 pm8001_ha->smp_exp_mode = SMP_INDIRECT; 3809 3486 3810 - /* DIRECT MODE support only in spcv/ve */ 3811 - pm8001_ha->smp_exp_mode = SMP_DIRECT; 3812 3487 3813 3488 tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); 3814 3489 preq_dma_addr = (char *)phys_to_virt(tmp_addr); ··· 3822 3501 /* exclude top 4 bytes for SMP req header */ 3823 3502 smp_cmd.long_smp_req.long_req_addr = 3824 3503 cpu_to_le64((u64)sg_dma_address 3825 - (&task->smp_task.smp_req) - 4); 3504 + (&task->smp_task.smp_req) + 4); 3826 3505 /* exclude 4 bytes for SMP req header and CRC */ 3827 3506 smp_cmd.long_smp_req.long_req_size = 3828 3507 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8); ··· 3925 3604 struct ssp_ini_io_start_req ssp_cmd; 3926 3605 u32 tag = ccb->ccb_tag; 3927 3606 int ret; 3928 - u64 phys_addr; 3607 + u64 phys_addr, start_addr, end_addr; 3608 + u32 end_addr_high, end_addr_low; 3929 3609 struct inbound_queue_table *circularQ; 3930 - static u32 inb; 3931 - static u32 outb; 3610 + u32 q_index; 3932 3611 u32 opc = OPC_INB_SSPINIIOSTART; 3933 3612 memset(&ssp_cmd, 0, sizeof(ssp_cmd)); 3934 3613 memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); ··· 3947 3626 ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); 3948 3627 memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, 3949 3628 task->ssp_task.cmd->cmd_len); 3950 - circularQ = &pm8001_ha->inbnd_q_tbl[0]; 3629 + q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM; 3630 + circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; 3951 3631 3952 3632 /* Check if encryption is set */ 3953 3633 if (pm8001_ha->chip->encrypt && ··· 3980 3658 cpu_to_le32(upper_32_bits(dma_addr)); 3981 3659 ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); 3982 3660 ssp_cmd.enc_esgl = 0; 3661 + /* Check 4G Boundary */ 3662 + start_addr = cpu_to_le64(dma_addr); 3663 + end_addr = (start_addr + ssp_cmd.enc_len) - 1; 3664 + end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); 3665 + end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); 3666 + if (end_addr_high != ssp_cmd.enc_addr_high) { 3667 + PM8001_FAIL_DBG(pm8001_ha, 3668 + pm8001_printk("The sg list address " 3669 + "start_addr=0x%016llx data_len=0x%x " 3670 + "end_addr_high=0x%08x end_addr_low=" 3671 + "0x%08x has crossed 4G boundary\n", 3672 + start_addr, ssp_cmd.enc_len, 3673 + end_addr_high, end_addr_low)); 3674 + pm8001_chip_make_sg(task->scatter, 1, 3675 + ccb->buf_prd); 3676 + phys_addr = ccb->ccb_dma_handle + 3677 + offsetof(struct pm8001_ccb_info, 3678 + buf_prd[0]); 3679 + ssp_cmd.enc_addr_low = 3680 + cpu_to_le32(lower_32_bits(phys_addr)); 3681 + ssp_cmd.enc_addr_high = 3682 + cpu_to_le32(upper_32_bits(phys_addr)); 3683 + ssp_cmd.enc_esgl = cpu_to_le32(1<<31); 3684 + } 3983 3685 } else if (task->num_scatter == 0) { 3984 3686 ssp_cmd.enc_addr_low = 0; 3985 3687 ssp_cmd.enc_addr_high = 0; ··· 4020 3674 } else { 4021 3675 PM8001_IO_DBG(pm8001_ha, pm8001_printk( 4022 3676 "Sending Normal SAS command 0x%x inb q %x\n", 4023 - task->ssp_task.cmd->cmnd[0], inb)); 3677 + task->ssp_task.cmd->cmnd[0], q_index)); 4024 3678 /* fill in PRD (scatter/gather) table, if any */ 4025 3679 if (task->num_scatter > 1) { 4026 3680 pm8001_chip_make_sg(task->scatter, ccb->n_elem, ··· 4039 3693 cpu_to_le32(upper_32_bits(dma_addr)); 4040 3694 ssp_cmd.len = cpu_to_le32(task->total_xfer_len); 4041 3695 ssp_cmd.esgl = 0; 3696 + /* Check 4G Boundary */ 3697 + start_addr = cpu_to_le64(dma_addr); 3698 + end_addr = (start_addr + ssp_cmd.len) - 1; 3699 + end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); 3700 + end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); 3701 + if (end_addr_high != ssp_cmd.addr_high) { 3702 + PM8001_FAIL_DBG(pm8001_ha, 3703 + pm8001_printk("The sg list address " 3704 + "start_addr=0x%016llx data_len=0x%x " 3705 + "end_addr_high=0x%08x end_addr_low=" 3706 + "0x%08x has crossed 4G boundary\n", 3707 + start_addr, ssp_cmd.len, 3708 + end_addr_high, end_addr_low)); 3709 + pm8001_chip_make_sg(task->scatter, 1, 3710 + ccb->buf_prd); 3711 + phys_addr = ccb->ccb_dma_handle + 3712 + offsetof(struct pm8001_ccb_info, 3713 + buf_prd[0]); 3714 + ssp_cmd.addr_low = 3715 + cpu_to_le32(lower_32_bits(phys_addr)); 3716 + ssp_cmd.addr_high = 3717 + cpu_to_le32(upper_32_bits(phys_addr)); 3718 + ssp_cmd.esgl = cpu_to_le32(1<<31); 3719 + } 4042 3720 } else if (task->num_scatter == 0) { 4043 3721 ssp_cmd.addr_low = 0; 4044 3722 ssp_cmd.addr_high = 0; ··· 4070 3700 ssp_cmd.esgl = 0; 4071 3701 } 4072 3702 } 4073 - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++); 4074 - 4075 - /* rotate the outb queue */ 4076 - outb = outb%PM8001_MAX_SPCV_OUTB_NUM; 4077 - 3703 + q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM; 3704 + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, 3705 + &ssp_cmd, q_index); 4078 3706 return ret; 4079 3707 } 4080 3708 ··· 4084 3716 struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; 4085 3717 u32 tag = ccb->ccb_tag; 4086 3718 int ret; 4087 - static u32 inb; 4088 - static u32 outb; 3719 + u32 q_index; 4089 3720 struct sata_start_req sata_cmd; 4090 3721 u32 hdr_tag, ncg_tag = 0; 4091 - u64 phys_addr; 3722 + u64 phys_addr, start_addr, end_addr; 3723 + u32 end_addr_high, end_addr_low; 4092 3724 u32 ATAP = 0x0; 4093 3725 u32 dir; 4094 3726 struct inbound_queue_table *circularQ; 4095 3727 unsigned long flags; 4096 3728 u32 opc = OPC_INB_SATA_HOST_OPSTART; 4097 3729 memset(&sata_cmd, 0, sizeof(sata_cmd)); 4098 - circularQ = &pm8001_ha->inbnd_q_tbl[0]; 3730 + q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM; 3731 + circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; 4099 3732 4100 3733 if (task->data_dir == PCI_DMA_NONE) { 4101 3734 ATAP = 0x04; /* no data*/ ··· 4157 3788 sata_cmd.enc_addr_high = upper_32_bits(dma_addr); 4158 3789 sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); 4159 3790 sata_cmd.enc_esgl = 0; 3791 + /* Check 4G Boundary */ 3792 + start_addr = cpu_to_le64(dma_addr); 3793 + end_addr = (start_addr + sata_cmd.enc_len) - 1; 3794 + end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); 3795 + end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); 3796 + if (end_addr_high != sata_cmd.enc_addr_high) { 3797 + PM8001_FAIL_DBG(pm8001_ha, 3798 + pm8001_printk("The sg list address " 3799 + "start_addr=0x%016llx data_len=0x%x " 3800 + "end_addr_high=0x%08x end_addr_low" 3801 + "=0x%08x has crossed 4G boundary\n", 3802 + start_addr, sata_cmd.enc_len, 3803 + end_addr_high, end_addr_low)); 3804 + pm8001_chip_make_sg(task->scatter, 1, 3805 + ccb->buf_prd); 3806 + phys_addr = ccb->ccb_dma_handle + 3807 + offsetof(struct pm8001_ccb_info, 3808 + buf_prd[0]); 3809 + sata_cmd.enc_addr_low = 3810 + lower_32_bits(phys_addr); 3811 + sata_cmd.enc_addr_high = 3812 + upper_32_bits(phys_addr); 3813 + sata_cmd.enc_esgl = 3814 + cpu_to_le32(1 << 31); 3815 + } 4160 3816 } else if (task->num_scatter == 0) { 4161 3817 sata_cmd.enc_addr_low = 0; 4162 3818 sata_cmd.enc_addr_high = 0; ··· 4202 3808 } else { 4203 3809 PM8001_IO_DBG(pm8001_ha, pm8001_printk( 4204 3810 "Sending Normal SATA command 0x%x inb %x\n", 4205 - sata_cmd.sata_fis.command, inb)); 3811 + sata_cmd.sata_fis.command, q_index)); 4206 3812 /* dad (bit 0-1) is 0 */ 4207 3813 sata_cmd.ncqtag_atap_dir_m_dad = 4208 3814 cpu_to_le32(((ncg_tag & 0xff)<<16) | ··· 4223 3829 sata_cmd.addr_high = upper_32_bits(dma_addr); 4224 3830 sata_cmd.len = cpu_to_le32(task->total_xfer_len); 4225 3831 sata_cmd.esgl = 0; 3832 + /* Check 4G Boundary */ 3833 + start_addr = cpu_to_le64(dma_addr); 3834 + end_addr = (start_addr + sata_cmd.len) - 1; 3835 + end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); 3836 + end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); 3837 + if (end_addr_high != sata_cmd.addr_high) { 3838 + PM8001_FAIL_DBG(pm8001_ha, 3839 + pm8001_printk("The sg list address " 3840 + "start_addr=0x%016llx data_len=0x%x" 3841 + "end_addr_high=0x%08x end_addr_low=" 3842 + "0x%08x has crossed 4G boundary\n", 3843 + start_addr, sata_cmd.len, 3844 + end_addr_high, end_addr_low)); 3845 + pm8001_chip_make_sg(task->scatter, 1, 3846 + ccb->buf_prd); 3847 + phys_addr = ccb->ccb_dma_handle + 3848 + offsetof(struct pm8001_ccb_info, 3849 + buf_prd[0]); 3850 + sata_cmd.addr_low = 3851 + lower_32_bits(phys_addr); 3852 + sata_cmd.addr_high = 3853 + upper_32_bits(phys_addr); 3854 + sata_cmd.esgl = cpu_to_le32(1 << 31); 3855 + } 4226 3856 } else if (task->num_scatter == 0) { 4227 3857 sata_cmd.addr_low = 0; 4228 3858 sata_cmd.addr_high = 0; ··· 4323 3905 } 4324 3906 } 4325 3907 } 4326 - 3908 + q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM; 4327 3909 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, 4328 - &sata_cmd, outb++); 4329 - 4330 - /* rotate the outb queue */ 4331 - outb = outb%PM8001_MAX_SPCV_OUTB_NUM; 3910 + &sata_cmd, q_index); 4332 3911 return ret; 4333 3912 } 4334 3913 ··· 4356 3941 ** [14] 0b disable spin up hold; 1b enable spin up hold 4357 3942 ** [15] ob no change in current PHY analig setup 1b enable using SPAST 4358 3943 */ 4359 - payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | 4360 - LINKMODE_AUTO | LINKRATE_15 | 4361 - LINKRATE_30 | LINKRATE_60 | phy_id); 3944 + if (!IS_SPCV_12G(pm8001_ha->pdev)) 3945 + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | 3946 + LINKMODE_AUTO | LINKRATE_15 | 3947 + LINKRATE_30 | LINKRATE_60 | phy_id); 3948 + else 3949 + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | 3950 + LINKMODE_AUTO | LINKRATE_15 | 3951 + LINKRATE_30 | LINKRATE_60 | LINKRATE_120 | 3952 + phy_id); 3953 + 4362 3954 /* SSC Disable and SAS Analog ST configuration */ 4363 3955 /** 4364 3956 payload.ase_sh_lm_slr_phyid = ··· 4524 4102 return IRQ_HANDLED; 4525 4103 } 4526 4104 4105 + void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha, 4106 + u32 operation, u32 phyid, u32 length, u32 *buf) 4107 + { 4108 + u32 tag , i, j = 0; 4109 + int rc; 4110 + struct set_phy_profile_req payload; 4111 + struct inbound_queue_table *circularQ; 4112 + u32 opc = OPC_INB_SET_PHY_PROFILE; 4113 + 4114 + memset(&payload, 0, sizeof(payload)); 4115 + rc = pm8001_tag_alloc(pm8001_ha, &tag); 4116 + if (rc) 4117 + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Invalid tag\n")); 4118 + circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4119 + payload.tag = cpu_to_le32(tag); 4120 + payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid & 0xFF)); 4121 + PM8001_INIT_DBG(pm8001_ha, 4122 + pm8001_printk(" phy profile command for phy %x ,length is %d\n", 4123 + payload.ppc_phyid, length)); 4124 + for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) { 4125 + payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i)); 4126 + j++; 4127 + } 4128 + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); 4129 + } 4130 + 4131 + void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, 4132 + u32 length, u8 *buf) 4133 + { 4134 + u32 page_code, i; 4135 + 4136 + page_code = SAS_PHY_ANALOG_SETTINGS_PAGE; 4137 + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 4138 + mpi_set_phy_profile_req(pm8001_ha, 4139 + SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf); 4140 + length = length + PHY_DWORD_LENGTH; 4141 + } 4142 + PM8001_INIT_DBG(pm8001_ha, pm8001_printk("phy settings completed\n")); 4143 + } 4527 4144 const struct pm8001_dispatch pm8001_80xx_dispatch = { 4528 4145 .name = "pmc80xx", 4529 4146 .chip_init = pm80xx_chip_init,
+11 -4
drivers/scsi/pm8001/pm80xx_hwi.h
··· 168 168 #define LINKRATE_15 (0x01 << 8) 169 169 #define LINKRATE_30 (0x02 << 8) 170 170 #define LINKRATE_60 (0x06 << 8) 171 + #define LINKRATE_120 (0x08 << 8) 172 + 173 + /* phy_profile */ 174 + #define SAS_PHY_ANALOG_SETTINGS_PAGE 0x04 175 + #define PHY_DWORD_LENGTH 0xC 171 176 172 177 /* Thermal related */ 173 178 #define THERMAL_ENABLE 0x1 ··· 1228 1223 1229 1224 /* MSGU CONFIGURATION TABLE*/ 1230 1225 1231 - #define SPCv_MSGU_CFG_TABLE_UPDATE 0x01 1232 - #define SPCv_MSGU_CFG_TABLE_RESET 0x02 1233 - #define SPCv_MSGU_CFG_TABLE_FREEZE 0x04 1234 - #define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x08 1226 + #define SPCv_MSGU_CFG_TABLE_UPDATE 0x001 1227 + #define SPCv_MSGU_CFG_TABLE_RESET 0x002 1228 + #define SPCv_MSGU_CFG_TABLE_FREEZE 0x004 1229 + #define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x008 1235 1230 #define MSGU_IBDB_SET 0x00 1236 1231 #define MSGU_HOST_INT_STATUS 0x08 1237 1232 #define MSGU_HOST_INT_MASK 0x0C ··· 1525 1520 #define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 1526 1521 #define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 1527 1522 1523 + 1524 + #define MEMBASE_II_SHIFT_REGISTER 0x1010 1528 1525 #endif
+1
drivers/scsi/qla4xxx/ql4_def.h
··· 306 306 struct qla_ddb_index { 307 307 struct list_head list; 308 308 uint16_t fw_ddb_idx; 309 + uint16_t flash_ddb_idx; 309 310 struct dev_db_entry fw_ddb; 310 311 uint8_t flash_isid[6]; 311 312 };
+4
drivers/scsi/qla4xxx/ql4_fw.h
··· 539 539 #define ENABLE_INTERNAL_LOOPBACK 0x04 540 540 #define ENABLE_EXTERNAL_LOOPBACK 0x08 541 541 542 + /* generic defines to enable/disable params */ 543 + #define QL4_PARAM_DISABLE 0 544 + #define QL4_PARAM_ENABLE 1 545 + 542 546 /*************************************************************************/ 543 547 544 548 /* Host Adapter Initialization Control Block (from host) */
+2
drivers/scsi/qla4xxx/ql4_glbl.h
··· 83 83 uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts); 84 84 int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username, 85 85 char *password, int bidi, uint16_t *chap_index); 86 + int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, 87 + uint16_t idx, int bidi); 86 88 87 89 void qla4xxx_queue_iocb(struct scsi_qla_host *ha); 88 90 void qla4xxx_complete_iocb(struct scsi_qla_host *ha);
+12
drivers/scsi/qla4xxx/ql4_inline.h
··· 82 82 __qla4xxx_disable_intrs(ha); 83 83 spin_unlock_irqrestore(&ha->hardware_lock, flags); 84 84 } 85 + 86 + static inline int qla4xxx_get_chap_type(struct ql4_chap_table *chap_entry) 87 + { 88 + int type; 89 + 90 + if (chap_entry->flags & BIT_7) 91 + type = LOCAL_CHAP; 92 + else 93 + type = BIDI_CHAP; 94 + 95 + return type; 96 + }
+30 -4
drivers/scsi/qla4xxx/ql4_mbx.c
··· 1530 1530 return ret; 1531 1531 } 1532 1532 1533 - static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, 1534 - char *password, uint16_t idx, int bidi) 1533 + /** 1534 + * qla4xxx_set_chap - Make a chap entry at the given index 1535 + * @ha: pointer to adapter structure 1536 + * @username: CHAP username to set 1537 + * @password: CHAP password to set 1538 + * @idx: CHAP index at which to make the entry 1539 + * @bidi: type of chap entry (chap_in or chap_out) 1540 + * 1541 + * Create chap entry at the given index with the information provided. 1542 + * 1543 + * Note: Caller should acquire the chap lock before getting here. 1544 + **/ 1545 + int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, 1546 + uint16_t idx, int bidi) 1535 1547 { 1536 1548 int ret = 0; 1537 1549 int rval = QLA_ERROR; 1538 1550 uint32_t offset = 0; 1539 1551 struct ql4_chap_table *chap_table; 1552 + uint32_t chap_size = 0; 1540 1553 dma_addr_t chap_dma; 1541 1554 1542 1555 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); ··· 1567 1554 strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN); 1568 1555 strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN); 1569 1556 chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); 1570 - offset = FLASH_CHAP_OFFSET | (idx * sizeof(struct ql4_chap_table)); 1557 + 1558 + if (is_qla40XX(ha)) { 1559 + chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table); 1560 + offset = FLASH_CHAP_OFFSET; 1561 + } else { /* Single region contains CHAP info for both ports which is 1562 + * divided into half for each port. 1563 + */ 1564 + chap_size = ha->hw.flt_chap_size / 2; 1565 + offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 1566 + if (ha->port_num == 1) 1567 + offset += chap_size; 1568 + } 1569 + 1570 + offset += (idx * sizeof(struct ql4_chap_table)); 1571 1571 rval = qla4xxx_set_flash(ha, chap_dma, offset, 1572 1572 sizeof(struct ql4_chap_table), 1573 1573 FLASH_OPT_RMW_COMMIT); ··· 1637 1611 goto exit_unlock_uni_chap; 1638 1612 } 1639 1613 1640 - if (!(chap_table->flags & BIT_6)) { 1614 + if (!(chap_table->flags & BIT_7)) { 1641 1615 ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n"); 1642 1616 rval = QLA_ERROR; 1643 1617 goto exit_unlock_uni_chap;
+450 -23
drivers/scsi/qla4xxx/ql4_os.c
··· 149 149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 150 150 uint32_t *num_entries, char *buf); 151 151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); 152 + static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, 153 + int len); 152 154 153 155 /* 154 156 * SCSI host template entry points ··· 254 252 .send_ping = qla4xxx_send_ping, 255 253 .get_chap = qla4xxx_get_chap_list, 256 254 .delete_chap = qla4xxx_delete_chap, 255 + .set_chap = qla4xxx_set_chap_entry, 257 256 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param, 258 257 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param, 259 258 .new_flashnode = qla4xxx_sysfs_ddb_add, ··· 511 508 return 0; 512 509 } 513 510 511 + static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, 512 + int16_t chap_index, 513 + struct ql4_chap_table **chap_entry) 514 + { 515 + int rval = QLA_ERROR; 516 + int max_chap_entries; 517 + 518 + if (!ha->chap_list) { 519 + ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 520 + rval = QLA_ERROR; 521 + goto exit_get_chap; 522 + } 523 + 524 + if (is_qla80XX(ha)) 525 + max_chap_entries = (ha->hw.flt_chap_size / 2) / 526 + sizeof(struct ql4_chap_table); 527 + else 528 + max_chap_entries = MAX_CHAP_ENTRIES_40XX; 529 + 530 + if (chap_index > max_chap_entries) { 531 + ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); 532 + rval = QLA_ERROR; 533 + goto exit_get_chap; 534 + } 535 + 536 + *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; 537 + if ((*chap_entry)->cookie != 538 + __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 539 + rval = QLA_ERROR; 540 + *chap_entry = NULL; 541 + } else { 542 + rval = QLA_SUCCESS; 543 + } 544 + 545 + exit_get_chap: 546 + return rval; 547 + } 548 + 549 + /** 550 + * qla4xxx_find_free_chap_index - Find the first free chap index 551 + * @ha: pointer to adapter structure 552 + * @chap_index: CHAP index to be returned 553 + * 554 + * Find the first free chap index available in the chap table 555 + * 556 + * Note: Caller should acquire the chap lock before getting here. 557 + **/ 558 + static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, 559 + uint16_t *chap_index) 560 + { 561 + int i, rval; 562 + int free_index = -1; 563 + int max_chap_entries = 0; 564 + struct ql4_chap_table *chap_table; 565 + 566 + if (is_qla80XX(ha)) 567 + max_chap_entries = (ha->hw.flt_chap_size / 2) / 568 + sizeof(struct ql4_chap_table); 569 + else 570 + max_chap_entries = MAX_CHAP_ENTRIES_40XX; 571 + 572 + if (!ha->chap_list) { 573 + ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 574 + rval = QLA_ERROR; 575 + goto exit_find_chap; 576 + } 577 + 578 + for (i = 0; i < max_chap_entries; i++) { 579 + chap_table = (struct ql4_chap_table *)ha->chap_list + i; 580 + 581 + if ((chap_table->cookie != 582 + __constant_cpu_to_le16(CHAP_VALID_COOKIE)) && 583 + (i > MAX_RESRV_CHAP_IDX)) { 584 + free_index = i; 585 + break; 586 + } 587 + } 588 + 589 + if (free_index != -1) { 590 + *chap_index = free_index; 591 + rval = QLA_SUCCESS; 592 + } else { 593 + rval = QLA_ERROR; 594 + } 595 + 596 + exit_find_chap: 597 + return rval; 598 + } 599 + 514 600 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 515 601 uint32_t *num_entries, char *buf) 516 602 { ··· 781 689 exit_delete_chap: 782 690 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); 783 691 return ret; 692 + } 693 + 694 + /** 695 + * qla4xxx_set_chap_entry - Make chap entry with given information 696 + * @shost: pointer to host 697 + * @data: chap info - credentials, index and type to make chap entry 698 + * @len: length of data 699 + * 700 + * Add or update chap entry with the given information 701 + **/ 702 + static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) 703 + { 704 + struct scsi_qla_host *ha = to_qla_host(shost); 705 + struct iscsi_chap_rec chap_rec; 706 + struct ql4_chap_table *chap_entry = NULL; 707 + struct iscsi_param_info *param_info; 708 + struct nlattr *attr; 709 + int max_chap_entries = 0; 710 + int type; 711 + int rem = len; 712 + int rc = 0; 713 + 714 + memset(&chap_rec, 0, sizeof(chap_rec)); 715 + 716 + nla_for_each_attr(attr, data, len, rem) { 717 + param_info = nla_data(attr); 718 + 719 + switch (param_info->param) { 720 + case ISCSI_CHAP_PARAM_INDEX: 721 + chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value; 722 + break; 723 + case ISCSI_CHAP_PARAM_CHAP_TYPE: 724 + chap_rec.chap_type = param_info->value[0]; 725 + break; 726 + case ISCSI_CHAP_PARAM_USERNAME: 727 + memcpy(chap_rec.username, param_info->value, 728 + param_info->len); 729 + break; 730 + case ISCSI_CHAP_PARAM_PASSWORD: 731 + memcpy(chap_rec.password, param_info->value, 732 + param_info->len); 733 + break; 734 + case ISCSI_CHAP_PARAM_PASSWORD_LEN: 735 + chap_rec.password_length = param_info->value[0]; 736 + break; 737 + default: 738 + ql4_printk(KERN_ERR, ha, 739 + "%s: No such sysfs attribute\n", __func__); 740 + rc = -ENOSYS; 741 + goto exit_set_chap; 742 + }; 743 + } 744 + 745 + if (chap_rec.chap_type == CHAP_TYPE_IN) 746 + type = BIDI_CHAP; 747 + else 748 + type = LOCAL_CHAP; 749 + 750 + if (is_qla80XX(ha)) 751 + max_chap_entries = (ha->hw.flt_chap_size / 2) / 752 + sizeof(struct ql4_chap_table); 753 + else 754 + max_chap_entries = MAX_CHAP_ENTRIES_40XX; 755 + 756 + mutex_lock(&ha->chap_sem); 757 + if (chap_rec.chap_tbl_idx < max_chap_entries) { 758 + rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx, 759 + &chap_entry); 760 + if (!rc) { 761 + if (!(type == qla4xxx_get_chap_type(chap_entry))) { 762 + ql4_printk(KERN_INFO, ha, 763 + "Type mismatch for CHAP entry %d\n", 764 + chap_rec.chap_tbl_idx); 765 + rc = -EINVAL; 766 + goto exit_unlock_chap; 767 + } 768 + 769 + /* If chap index is in use then don't modify it */ 770 + rc = qla4xxx_is_chap_active(shost, 771 + chap_rec.chap_tbl_idx); 772 + if (rc) { 773 + ql4_printk(KERN_INFO, ha, 774 + "CHAP entry %d is in use\n", 775 + chap_rec.chap_tbl_idx); 776 + rc = -EBUSY; 777 + goto exit_unlock_chap; 778 + } 779 + } 780 + } else { 781 + rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx); 782 + if (rc) { 783 + ql4_printk(KERN_INFO, ha, "CHAP entry not available\n"); 784 + rc = -EBUSY; 785 + goto exit_unlock_chap; 786 + } 787 + } 788 + 789 + rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password, 790 + chap_rec.chap_tbl_idx, type); 791 + 792 + exit_unlock_chap: 793 + mutex_unlock(&ha->chap_sem); 794 + 795 + exit_set_chap: 796 + return rc; 784 797 } 785 798 786 799 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, ··· 1652 1455 struct iscsi_session *sess = cls_sess->dd_data; 1653 1456 struct ddb_entry *ddb_entry = sess->dd_data; 1654 1457 struct scsi_qla_host *ha = ddb_entry->ha; 1458 + struct iscsi_cls_conn *cls_conn = ddb_entry->conn; 1459 + struct ql4_chap_table chap_tbl; 1655 1460 int rval, len; 1656 1461 uint16_t idx; 1657 1462 1463 + memset(&chap_tbl, 0, sizeof(chap_tbl)); 1658 1464 switch (param) { 1659 1465 case ISCSI_PARAM_CHAP_IN_IDX: 1660 1466 rval = qla4xxx_get_chap_index(ha, sess->username_in, ··· 1669 1469 len = sprintf(buf, "%hu\n", idx); 1670 1470 break; 1671 1471 case ISCSI_PARAM_CHAP_OUT_IDX: 1672 - rval = qla4xxx_get_chap_index(ha, sess->username, 1673 - sess->password, LOCAL_CHAP, 1674 - &idx); 1472 + if (ddb_entry->ddb_type == FLASH_DDB) { 1473 + if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 1474 + idx = ddb_entry->chap_tbl_idx; 1475 + rval = QLA_SUCCESS; 1476 + } else { 1477 + rval = QLA_ERROR; 1478 + } 1479 + } else { 1480 + rval = qla4xxx_get_chap_index(ha, sess->username, 1481 + sess->password, 1482 + LOCAL_CHAP, &idx); 1483 + } 1675 1484 if (rval) 1676 1485 len = sprintf(buf, "\n"); 1677 1486 else 1678 1487 len = sprintf(buf, "%hu\n", idx); 1679 1488 break; 1489 + case ISCSI_PARAM_USERNAME: 1490 + case ISCSI_PARAM_PASSWORD: 1491 + /* First, populate session username and password for FLASH DDB, 1492 + * if not already done. This happens when session login fails 1493 + * for a FLASH DDB. 1494 + */ 1495 + if (ddb_entry->ddb_type == FLASH_DDB && 1496 + ddb_entry->chap_tbl_idx != INVALID_ENTRY && 1497 + !sess->username && !sess->password) { 1498 + idx = ddb_entry->chap_tbl_idx; 1499 + rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 1500 + chap_tbl.secret, 1501 + idx); 1502 + if (!rval) { 1503 + iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 1504 + (char *)chap_tbl.name, 1505 + strlen((char *)chap_tbl.name)); 1506 + iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 1507 + (char *)chap_tbl.secret, 1508 + chap_tbl.secret_len); 1509 + } 1510 + } 1511 + /* allow fall-through */ 1680 1512 default: 1681 1513 return iscsi_session_get_param(cls_sess, param, buf); 1682 1514 } ··· 2605 2373 COPY_ISID(sess->isid, fw_ddb_entry->isid); 2606 2374 2607 2375 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 2608 - if (ddb_link < MAX_DDB_ENTRIES) 2609 - sess->discovery_parent_idx = ddb_link; 2610 - else 2611 - sess->discovery_parent_idx = DDB_NO_LINK; 2612 - 2613 2376 if (ddb_link == DDB_ISNS) 2614 2377 disc_parent = ISCSI_DISC_PARENT_ISNS; 2615 2378 else if (ddb_link == DDB_NO_LINK) ··· 2629 2402 int buflen = 0; 2630 2403 struct iscsi_session *sess; 2631 2404 struct ddb_entry *ddb_entry; 2405 + struct ql4_chap_table chap_tbl; 2632 2406 struct iscsi_conn *conn; 2633 2407 char ip_addr[DDB_IPADDR_LEN]; 2634 2408 uint16_t options = 0; ··· 2637 2409 sess = cls_sess->dd_data; 2638 2410 ddb_entry = sess->dd_data; 2639 2411 conn = cls_conn->dd_data; 2412 + memset(&chap_tbl, 0, sizeof(chap_tbl)); 2640 2413 2641 2414 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 2642 2415 ··· 2664 2435 (char *)fw_ddb_entry->iscsi_name, buflen); 2665 2436 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, 2666 2437 (char *)ha->name_string, buflen); 2438 + 2439 + if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 2440 + if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 2441 + chap_tbl.secret, 2442 + ddb_entry->chap_tbl_idx)) { 2443 + iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 2444 + (char *)chap_tbl.name, 2445 + strlen((char *)chap_tbl.name)); 2446 + iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 2447 + (char *)chap_tbl.secret, 2448 + chap_tbl.secret_len); 2449 + } 2450 + } 2667 2451 } 2668 2452 2669 2453 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, ··· 5179 4937 } 5180 4938 5181 4939 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, 5182 - struct dev_db_entry *fw_ddb_entry) 4940 + struct dev_db_entry *fw_ddb_entry, 4941 + uint32_t *index) 5183 4942 { 5184 4943 struct ddb_entry *ddb_entry; 5185 4944 struct ql4_tuple_ddb *fw_tddb = NULL; ··· 5214 4971 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); 5215 4972 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { 5216 4973 ret = QLA_SUCCESS; /* found */ 4974 + if (index != NULL) 4975 + *index = idx; 5217 4976 goto exit_check; 5218 4977 } 5219 4978 } ··· 5451 5206 ddb_entry->ha = ha; 5452 5207 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; 5453 5208 ddb_entry->ddb_change = qla4xxx_flash_ddb_change; 5209 + ddb_entry->chap_tbl_idx = INVALID_ENTRY; 5454 5210 5455 5211 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 5456 5212 atomic_set(&ddb_entry->relogin_timer, 0); ··· 5513 5267 } while (time_after(wtime, jiffies)); 5514 5268 } 5515 5269 5270 + static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry, 5271 + struct dev_db_entry *flash_ddb_entry) 5272 + { 5273 + uint16_t options = 0; 5274 + size_t ip_len = IP_ADDR_LEN; 5275 + 5276 + options = le16_to_cpu(fw_ddb_entry->options); 5277 + if (options & DDB_OPT_IPV6_DEVICE) 5278 + ip_len = IPv6_ADDR_LEN; 5279 + 5280 + if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len)) 5281 + return QLA_ERROR; 5282 + 5283 + if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0], 5284 + sizeof(fw_ddb_entry->isid))) 5285 + return QLA_ERROR; 5286 + 5287 + if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port, 5288 + sizeof(fw_ddb_entry->port))) 5289 + return QLA_ERROR; 5290 + 5291 + return QLA_SUCCESS; 5292 + } 5293 + 5294 + static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha, 5295 + struct dev_db_entry *fw_ddb_entry, 5296 + uint32_t fw_idx, uint32_t *flash_index) 5297 + { 5298 + struct dev_db_entry *flash_ddb_entry; 5299 + dma_addr_t flash_ddb_entry_dma; 5300 + uint32_t idx = 0; 5301 + int max_ddbs; 5302 + int ret = QLA_ERROR, status; 5303 + 5304 + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 5305 + MAX_DEV_DB_ENTRIES; 5306 + 5307 + flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 5308 + &flash_ddb_entry_dma); 5309 + if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) { 5310 + ql4_printk(KERN_ERR, ha, "Out of memory\n"); 5311 + goto exit_find_st_idx; 5312 + } 5313 + 5314 + status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 5315 + flash_ddb_entry_dma, fw_idx); 5316 + if (status == QLA_SUCCESS) { 5317 + status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 5318 + if (status == QLA_SUCCESS) { 5319 + *flash_index = fw_idx; 5320 + ret = QLA_SUCCESS; 5321 + goto exit_find_st_idx; 5322 + } 5323 + } 5324 + 5325 + for (idx = 0; idx < max_ddbs; idx++) { 5326 + status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 5327 + flash_ddb_entry_dma, idx); 5328 + if (status == QLA_ERROR) 5329 + continue; 5330 + 5331 + status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 5332 + if (status == QLA_SUCCESS) { 5333 + *flash_index = idx; 5334 + ret = QLA_SUCCESS; 5335 + goto exit_find_st_idx; 5336 + } 5337 + } 5338 + 5339 + if (idx == max_ddbs) 5340 + ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n", 5341 + fw_idx); 5342 + 5343 + exit_find_st_idx: 5344 + if (flash_ddb_entry) 5345 + dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry, 5346 + flash_ddb_entry_dma); 5347 + 5348 + return ret; 5349 + } 5350 + 5516 5351 static void qla4xxx_build_st_list(struct scsi_qla_host *ha, 5517 5352 struct list_head *list_st) 5518 5353 { ··· 5605 5278 int ret; 5606 5279 uint32_t idx = 0, next_idx = 0; 5607 5280 uint32_t state = 0, conn_err = 0; 5281 + uint32_t flash_index = -1; 5608 5282 uint16_t conn_id = 0; 5609 5283 5610 5284 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, ··· 5637 5309 st_ddb_idx = vzalloc(fw_idx_size); 5638 5310 if (!st_ddb_idx) 5639 5311 break; 5312 + 5313 + ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, 5314 + &flash_index); 5315 + if (ret == QLA_ERROR) { 5316 + ql4_printk(KERN_ERR, ha, 5317 + "No flash entry for ST at idx [%d]\n", idx); 5318 + st_ddb_idx->flash_ddb_idx = idx; 5319 + } else { 5320 + ql4_printk(KERN_INFO, ha, 5321 + "ST at idx [%d] is stored at flash [%d]\n", 5322 + idx, flash_index); 5323 + st_ddb_idx->flash_ddb_idx = flash_index; 5324 + } 5640 5325 5641 5326 st_ddb_idx->fw_ddb_idx = idx; 5642 5327 ··· 5693 5352 vfree(ddb_idx); 5694 5353 } 5695 5354 } 5355 + } 5356 + 5357 + static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha, 5358 + struct ddb_entry *ddb_entry, 5359 + struct dev_db_entry *fw_ddb_entry) 5360 + { 5361 + struct iscsi_cls_session *cls_sess; 5362 + struct iscsi_session *sess; 5363 + uint32_t max_ddbs = 0; 5364 + uint16_t ddb_link = -1; 5365 + 5366 + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 5367 + MAX_DEV_DB_ENTRIES; 5368 + 5369 + cls_sess = ddb_entry->sess; 5370 + sess = cls_sess->dd_data; 5371 + 5372 + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 5373 + if (ddb_link < max_ddbs) 5374 + sess->discovery_parent_idx = ddb_link; 5375 + else 5376 + sess->discovery_parent_idx = DDB_NO_LINK; 5696 5377 } 5697 5378 5698 5379 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, ··· 5781 5418 5782 5419 /* Update sess/conn params */ 5783 5420 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 5421 + qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); 5784 5422 5785 5423 if (is_reset == RESET_ADAPTER) { 5786 5424 iscsi_block_session(cls_sess); ··· 5798 5434 return ret; 5799 5435 } 5800 5436 5437 + static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha, 5438 + struct list_head *list_ddb, 5439 + struct dev_db_entry *fw_ddb_entry) 5440 + { 5441 + struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 5442 + uint16_t ddb_link; 5443 + 5444 + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 5445 + 5446 + list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 5447 + if (ddb_idx->fw_ddb_idx == ddb_link) { 5448 + DEBUG2(ql4_printk(KERN_INFO, ha, 5449 + "Updating NT parent idx from [%d] to [%d]\n", 5450 + ddb_link, ddb_idx->flash_ddb_idx)); 5451 + fw_ddb_entry->ddb_link = 5452 + cpu_to_le16(ddb_idx->flash_ddb_idx); 5453 + return; 5454 + } 5455 + } 5456 + } 5457 + 5801 5458 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, 5802 - struct list_head *list_nt, int is_reset) 5459 + struct list_head *list_nt, 5460 + struct list_head *list_st, 5461 + int is_reset) 5803 5462 { 5804 5463 struct dev_db_entry *fw_ddb_entry; 5464 + struct ddb_entry *ddb_entry = NULL; 5805 5465 dma_addr_t fw_ddb_dma; 5806 5466 int max_ddbs; 5807 5467 int fw_idx_size; 5808 5468 int ret; 5809 5469 uint32_t idx = 0, next_idx = 0; 5810 5470 uint32_t state = 0, conn_err = 0; 5471 + uint32_t ddb_idx = -1; 5811 5472 uint16_t conn_id = 0; 5473 + uint16_t ddb_link = -1; 5812 5474 struct qla_ddb_index *nt_ddb_idx; 5813 5475 5814 5476 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, ··· 5861 5471 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) 5862 5472 goto continue_next_nt; 5863 5473 5474 + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 5475 + if (ddb_link < max_ddbs) 5476 + qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); 5477 + 5864 5478 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || 5865 - state == DDB_DS_SESSION_FAILED)) 5479 + state == DDB_DS_SESSION_FAILED) && 5480 + (is_reset == INIT_ADAPTER)) 5866 5481 goto continue_next_nt; 5867 5482 5868 5483 DEBUG2(ql4_printk(KERN_INFO, ha, 5869 5484 "Adding DDB to session = 0x%x\n", idx)); 5485 + 5870 5486 if (is_reset == INIT_ADAPTER) { 5871 5487 nt_ddb_idx = vmalloc(fw_idx_size); 5872 5488 if (!nt_ddb_idx) ··· 5902 5506 5903 5507 list_add_tail(&nt_ddb_idx->list, list_nt); 5904 5508 } else if (is_reset == RESET_ADAPTER) { 5905 - if (qla4xxx_is_session_exists(ha, fw_ddb_entry) == 5906 - QLA_SUCCESS) 5509 + ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, 5510 + &ddb_idx); 5511 + if (ret == QLA_SUCCESS) { 5512 + ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, 5513 + ddb_idx); 5514 + if (ddb_entry != NULL) 5515 + qla4xxx_update_sess_disc_idx(ha, 5516 + ddb_entry, 5517 + fw_ddb_entry); 5907 5518 goto continue_next_nt; 5519 + } 5908 5520 } 5909 5521 5910 5522 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); ··· 5930 5526 } 5931 5527 5932 5528 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, 5933 - struct list_head *list_nt) 5529 + struct list_head *list_nt, 5530 + uint16_t target_id) 5934 5531 { 5935 5532 struct dev_db_entry *fw_ddb_entry; 5936 5533 dma_addr_t fw_ddb_dma; ··· 5976 5571 5977 5572 nt_ddb_idx->fw_ddb_idx = idx; 5978 5573 5979 - ret = qla4xxx_is_session_exists(ha, fw_ddb_entry); 5574 + ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 5980 5575 if (ret == QLA_SUCCESS) { 5981 5576 /* free nt_ddb_idx and do not add to list_nt */ 5982 5577 vfree(nt_ddb_idx); 5983 5578 goto continue_next_new_nt; 5984 5579 } 5580 + 5581 + if (target_id < max_ddbs) 5582 + fw_ddb_entry->ddb_link = cpu_to_le16(target_id); 5985 5583 5986 5584 list_add_tail(&nt_ddb_idx->list, list_nt); 5987 5585 ··· 6302 5894 } 6303 5895 6304 5896 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, 6305 - struct dev_db_entry *fw_ddb_entry) 5897 + struct dev_db_entry *fw_ddb_entry, 5898 + uint16_t target_id) 6306 5899 { 6307 5900 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6308 5901 struct list_head list_nt; ··· 6328 5919 if (ret == QLA_ERROR) 6329 5920 goto exit_login_st; 6330 5921 6331 - qla4xxx_build_new_nt_list(ha, &list_nt); 5922 + qla4xxx_build_new_nt_list(ha, &list_nt, target_id); 6332 5923 6333 5924 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) { 6334 5925 list_del_init(&ddb_idx->list); ··· 6355 5946 { 6356 5947 int ret = QLA_ERROR; 6357 5948 6358 - ret = qla4xxx_is_session_exists(ha, fw_ddb_entry); 5949 + ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 6359 5950 if (ret != QLA_SUCCESS) 6360 5951 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 6361 5952 idx); ··· 6410 6001 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 6411 6002 6412 6003 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 6413 - ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry); 6004 + ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, 6005 + fnode_sess->target_id); 6414 6006 else 6415 6007 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, 6416 6008 fnode_sess->target_id); ··· 6932 6522 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 6933 6523 struct scsi_qla_host *ha = to_qla_host(shost); 6934 6524 struct iscsi_flashnode_param_info *fnode_param; 6525 + struct ql4_chap_table chap_tbl; 6935 6526 struct nlattr *attr; 6527 + uint16_t chap_out_idx = INVALID_ENTRY; 6936 6528 int rc = QLA_ERROR; 6937 6529 uint32_t rem = len; 6938 6530 6531 + memset((void *)&chap_tbl, 0, sizeof(chap_tbl)); 6939 6532 nla_for_each_attr(attr, data, len, rem) { 6940 6533 fnode_param = nla_data(attr); 6941 6534 ··· 6980 6567 break; 6981 6568 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 6982 6569 fnode_sess->chap_auth_en = fnode_param->value[0]; 6570 + /* Invalidate chap index if chap auth is disabled */ 6571 + if (!fnode_sess->chap_auth_en) 6572 + fnode_sess->chap_out_idx = INVALID_ENTRY; 6573 + 6983 6574 break; 6984 6575 case ISCSI_FLASHNODE_SNACK_REQ_EN: 6985 6576 fnode_conn->snack_req_en = fnode_param->value[0]; ··· 7121 6704 case ISCSI_FLASHNODE_EXP_STATSN: 7122 6705 fnode_conn->exp_statsn = 7123 6706 *(uint32_t *)fnode_param->value; 6707 + break; 6708 + case ISCSI_FLASHNODE_CHAP_OUT_IDX: 6709 + chap_out_idx = *(uint16_t *)fnode_param->value; 6710 + if (!qla4xxx_get_uni_chap_at_index(ha, 6711 + chap_tbl.name, 6712 + chap_tbl.secret, 6713 + chap_out_idx)) { 6714 + fnode_sess->chap_out_idx = chap_out_idx; 6715 + /* Enable chap auth if chap index is valid */ 6716 + fnode_sess->chap_auth_en = QL4_PARAM_ENABLE; 6717 + } 7124 6718 break; 7125 6719 default: 7126 6720 ql4_printk(KERN_ERR, ha, ··· 7354 6926 schedule_timeout_uninterruptible(HZ / 10); 7355 6927 } while (time_after(wtime, jiffies)); 7356 6928 7357 - /* Free up the sendtargets list */ 6929 + 6930 + qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset); 6931 + 7358 6932 qla4xxx_free_ddb_list(&list_st); 7359 - 7360 - qla4xxx_build_nt_list(ha, &list_nt, is_reset); 7361 - 7362 6933 qla4xxx_free_ddb_list(&list_nt); 7363 6934 7364 6935 qla4xxx_free_ddb_index(ha);
-28
drivers/scsi/scsi.c
··· 78 78 * Definitions and constants. 79 79 */ 80 80 81 - #define MIN_RESET_DELAY (2*HZ) 82 - 83 - /* Do not call reset on error if we just did a reset within 15 sec. */ 84 - #define MIN_RESET_PERIOD (15*HZ) 85 - 86 81 /* 87 82 * Note - the initial logging level can be set here to log events at boot time. 88 83 * After the system is up, you may enable logging via the /proc interface. ··· 653 658 int scsi_dispatch_cmd(struct scsi_cmnd *cmd) 654 659 { 655 660 struct Scsi_Host *host = cmd->device->host; 656 - unsigned long timeout; 657 661 int rtn = 0; 658 662 659 663 atomic_inc(&cmd->device->iorequest_cnt); ··· 696 702 cmd->device->scsi_level != SCSI_UNKNOWN) { 697 703 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | 698 704 (cmd->device->lun << 5 & 0xe0); 699 - } 700 - 701 - /* 702 - * We will wait MIN_RESET_DELAY clock ticks after the last reset so 703 - * we can avoid the drive not being ready. 704 - */ 705 - timeout = host->last_reset + MIN_RESET_DELAY; 706 - 707 - if (host->resetting && time_before(jiffies, timeout)) { 708 - int ticks_remaining = timeout - jiffies; 709 - /* 710 - * NOTE: This may be executed from within an interrupt 711 - * handler! This is bad, but for now, it'll do. The irq 712 - * level of the interrupt handler has been masked out by the 713 - * platform dependent interrupt handling code already, so the 714 - * sti() here will not cause another call to the SCSI host's 715 - * interrupt handler (assuming there is one irq-level per 716 - * host). 717 - */ 718 - while (--ticks_remaining >= 0) 719 - mdelay(1 + 999 / HZ); 720 - host->resetting = 0; 721 705 } 722 706 723 707 scsi_log_send(cmd);
+75 -66
drivers/scsi/scsi_debug.c
··· 169 169 static int scsi_debug_dsense = DEF_D_SENSE; 170 170 static int scsi_debug_every_nth = DEF_EVERY_NTH; 171 171 static int scsi_debug_fake_rw = DEF_FAKE_RW; 172 - static int scsi_debug_guard = DEF_GUARD; 172 + static unsigned int scsi_debug_guard = DEF_GUARD; 173 173 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; 174 174 static int scsi_debug_max_luns = DEF_MAX_LUNS; 175 175 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE; ··· 292 292 0, 0, 0x2, 0x4b}; 293 293 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, 294 294 0, 0, 0x0, 0x0}; 295 + 296 + static void *fake_store(unsigned long long lba) 297 + { 298 + lba = do_div(lba, sdebug_store_sectors); 299 + 300 + return fake_storep + lba * scsi_debug_sector_size; 301 + } 302 + 303 + static struct sd_dif_tuple *dif_store(sector_t sector) 304 + { 305 + sector = do_div(sector, sdebug_store_sectors); 306 + 307 + return dif_storep + sector; 308 + } 295 309 296 310 static int sdebug_add_adapter(void); 297 311 static void sdebug_remove_adapter(void); ··· 1745 1731 return ret; 1746 1732 } 1747 1733 1748 - static u16 dif_compute_csum(const void *buf, int len) 1734 + static __be16 dif_compute_csum(const void *buf, int len) 1749 1735 { 1750 - u16 csum; 1736 + __be16 csum; 1751 1737 1752 - switch (scsi_debug_guard) { 1753 - case 1: 1754 - csum = ip_compute_csum(buf, len); 1755 - break; 1756 - case 0: 1738 + if (scsi_debug_guard) 1739 + csum = (__force __be16)ip_compute_csum(buf, len); 1740 + else 1757 1741 csum = cpu_to_be16(crc_t10dif(buf, len)); 1758 - break; 1759 - } 1742 + 1760 1743 return csum; 1761 1744 } 1762 1745 1763 1746 static int dif_verify(struct sd_dif_tuple *sdt, const void *data, 1764 1747 sector_t sector, u32 ei_lba) 1765 1748 { 1766 - u16 csum = dif_compute_csum(data, scsi_debug_sector_size); 1749 + __be16 csum = dif_compute_csum(data, scsi_debug_sector_size); 1767 1750 1768 1751 if (sdt->guard_tag != csum) { 1769 1752 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", ··· 1786 1775 return 0; 1787 1776 } 1788 1777 1789 - static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, 1790 - unsigned int sectors, u32 ei_lba) 1778 + static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector, 1779 + unsigned int sectors, bool read) 1791 1780 { 1792 1781 unsigned int i, resid; 1793 1782 struct scatterlist *psgl; 1783 + void *paddr; 1784 + const void *dif_store_end = dif_storep + sdebug_store_sectors; 1785 + 1786 + /* Bytes of protection data to copy into sgl */ 1787 + resid = sectors * sizeof(*dif_storep); 1788 + 1789 + scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) { 1790 + int len = min(psgl->length, resid); 1791 + void *start = dif_store(sector); 1792 + int rest = 0; 1793 + 1794 + if (dif_store_end < start + len) 1795 + rest = start + len - dif_store_end; 1796 + 1797 + paddr = kmap_atomic(sg_page(psgl)) + psgl->offset; 1798 + 1799 + if (read) 1800 + memcpy(paddr, start, len - rest); 1801 + else 1802 + memcpy(start, paddr, len - rest); 1803 + 1804 + if (rest) { 1805 + if (read) 1806 + memcpy(paddr + len - rest, dif_storep, rest); 1807 + else 1808 + memcpy(dif_storep, paddr + len - rest, rest); 1809 + } 1810 + 1811 + sector += len / sizeof(*dif_storep); 1812 + resid -= len; 1813 + kunmap_atomic(paddr); 1814 + } 1815 + } 1816 + 1817 + static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, 1818 + unsigned int sectors, u32 ei_lba) 1819 + { 1820 + unsigned int i; 1794 1821 struct sd_dif_tuple *sdt; 1795 1822 sector_t sector; 1796 - sector_t tmp_sec = start_sec; 1797 - void *paddr; 1798 1823 1799 - start_sec = do_div(tmp_sec, sdebug_store_sectors); 1800 - 1801 - sdt = dif_storep + start_sec; 1802 - 1803 - for (i = 0 ; i < sectors ; i++) { 1824 + for (i = 0; i < sectors; i++) { 1804 1825 int ret; 1805 1826 1806 - if (sdt[i].app_tag == 0xffff) 1827 + sector = start_sec + i; 1828 + sdt = dif_store(sector); 1829 + 1830 + if (sdt->app_tag == cpu_to_be16(0xffff)) 1807 1831 continue; 1808 1832 1809 - sector = start_sec + i; 1810 - 1811 - ret = dif_verify(&sdt[i], 1812 - fake_storep + sector * scsi_debug_sector_size, 1813 - sector, ei_lba); 1833 + ret = dif_verify(sdt, fake_store(sector), sector, ei_lba); 1814 1834 if (ret) { 1815 1835 dif_errors++; 1816 1836 return ret; ··· 1850 1808 ei_lba++; 1851 1809 } 1852 1810 1853 - /* Bytes of protection data to copy into sgl */ 1854 - resid = sectors * sizeof(*dif_storep); 1855 - sector = start_sec; 1856 - 1857 - scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) { 1858 - int len = min(psgl->length, resid); 1859 - 1860 - paddr = kmap_atomic(sg_page(psgl)) + psgl->offset; 1861 - memcpy(paddr, dif_storep + sector, len); 1862 - 1863 - sector += len / sizeof(*dif_storep); 1864 - if (sector >= sdebug_store_sectors) { 1865 - /* Force wrap */ 1866 - tmp_sec = sector; 1867 - sector = do_div(tmp_sec, sdebug_store_sectors); 1868 - } 1869 - resid -= len; 1870 - kunmap_atomic(paddr); 1871 - } 1872 - 1811 + dif_copy_prot(SCpnt, start_sec, sectors, true); 1873 1812 dix_reads++; 1874 1813 1875 1814 return 0; ··· 1933 1910 { 1934 1911 int i, j, ret; 1935 1912 struct sd_dif_tuple *sdt; 1936 - struct scatterlist *dsgl = scsi_sglist(SCpnt); 1913 + struct scatterlist *dsgl; 1937 1914 struct scatterlist *psgl = scsi_prot_sglist(SCpnt); 1938 1915 void *daddr, *paddr; 1939 - sector_t tmp_sec = start_sec; 1940 - sector_t sector; 1916 + sector_t sector = start_sec; 1941 1917 int ppage_offset; 1942 - 1943 - sector = do_div(tmp_sec, sdebug_store_sectors); 1944 1918 1945 1919 BUG_ON(scsi_sg_count(SCpnt) == 0); 1946 1920 BUG_ON(scsi_prot_sg_count(SCpnt) == 0); ··· 1966 1946 1967 1947 sdt = paddr + ppage_offset; 1968 1948 1969 - ret = dif_verify(sdt, daddr + j, start_sec, ei_lba); 1949 + ret = dif_verify(sdt, daddr + j, sector, ei_lba); 1970 1950 if (ret) { 1971 1951 dump_sector(daddr + j, scsi_debug_sector_size); 1972 1952 goto out; 1973 1953 } 1974 1954 1975 - /* Would be great to copy this in bigger 1976 - * chunks. However, for the sake of 1977 - * correctness we need to verify each sector 1978 - * before writing it to "stable" storage 1979 - */ 1980 - memcpy(dif_storep + sector, sdt, sizeof(*sdt)); 1981 - 1982 1955 sector++; 1983 - 1984 - if (sector == sdebug_store_sectors) 1985 - sector = 0; /* Force wrap */ 1986 - 1987 - start_sec++; 1988 1956 ei_lba++; 1989 1957 ppage_offset += sizeof(struct sd_dif_tuple); 1990 1958 } ··· 1981 1973 kunmap_atomic(daddr); 1982 1974 } 1983 1975 1976 + dif_copy_prot(SCpnt, start_sec, sectors, false); 1984 1977 dix_writes++; 1985 1978 1986 1979 return 0; ··· 2751 2742 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR); 2752 2743 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); 2753 2744 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR); 2754 - module_param_named(guard, scsi_debug_guard, int, S_IRUGO); 2745 + module_param_named(guard, scsi_debug_guard, uint, S_IRUGO); 2755 2746 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); 2756 2747 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); 2757 2748 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); ··· 3181 3172 3182 3173 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf) 3183 3174 { 3184 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard); 3175 + return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard); 3185 3176 } 3186 3177 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL); 3187 3178
+133 -13
drivers/scsi/scsi_error.c
··· 87 87 } 88 88 EXPORT_SYMBOL_GPL(scsi_schedule_eh); 89 89 90 + static int scsi_host_eh_past_deadline(struct Scsi_Host *shost) 91 + { 92 + if (!shost->last_reset || !shost->eh_deadline) 93 + return 0; 94 + 95 + if (time_before(jiffies, 96 + shost->last_reset + shost->eh_deadline)) 97 + return 0; 98 + 99 + return 1; 100 + } 101 + 90 102 /** 91 103 * scsi_eh_scmd_add - add scsi cmd to error handling. 92 104 * @scmd: scmd to run eh on. ··· 120 108 if (scsi_host_set_state(shost, SHOST_RECOVERY)) 121 109 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) 122 110 goto out_unlock; 111 + 112 + if (shost->eh_deadline && !shost->last_reset) 113 + shost->last_reset = jiffies; 123 114 124 115 ret = 1; 125 116 scmd->eh_eflags |= eh_flag; ··· 152 137 153 138 trace_scsi_dispatch_cmd_timeout(scmd); 154 139 scsi_log_completion(scmd, TIMEOUT_ERROR); 140 + 141 + if (host->eh_deadline && !host->last_reset) 142 + host->last_reset = jiffies; 155 143 156 144 if (host->transportt->eh_timed_out) 157 145 rtn = host->transportt->eh_timed_out(scmd); ··· 1008 990 struct list_head *done_q) 1009 991 { 1010 992 struct scsi_cmnd *scmd, *next; 993 + struct Scsi_Host *shost; 1011 994 int rtn; 995 + unsigned long flags; 1012 996 1013 997 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1014 998 if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) || 1015 999 SCSI_SENSE_VALID(scmd)) 1016 1000 continue; 1017 1001 1002 + shost = scmd->device->host; 1003 + spin_lock_irqsave(shost->host_lock, flags); 1004 + if (scsi_host_eh_past_deadline(shost)) { 1005 + spin_unlock_irqrestore(shost->host_lock, flags); 1006 + SCSI_LOG_ERROR_RECOVERY(3, 1007 + shost_printk(KERN_INFO, shost, 1008 + "skip %s, past eh deadline\n", 1009 + __func__)); 1010 + break; 1011 + } 1012 + spin_unlock_irqrestore(shost->host_lock, flags); 1018 1013 SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd, 1019 1014 "%s: requesting sense\n", 1020 1015 current->comm)); ··· 1113 1082 struct scsi_cmnd *scmd, *next; 1114 1083 struct scsi_device *sdev; 1115 1084 int finish_cmds; 1085 + unsigned long flags; 1116 1086 1117 1087 while (!list_empty(cmd_list)) { 1118 1088 scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry); 1119 1089 sdev = scmd->device; 1090 + 1091 + if (!try_stu) { 1092 + spin_lock_irqsave(sdev->host->host_lock, flags); 1093 + if (scsi_host_eh_past_deadline(sdev->host)) { 1094 + /* Push items back onto work_q */ 1095 + list_splice_init(cmd_list, work_q); 1096 + spin_unlock_irqrestore(sdev->host->host_lock, 1097 + flags); 1098 + SCSI_LOG_ERROR_RECOVERY(3, 1099 + shost_printk(KERN_INFO, sdev->host, 1100 + "skip %s, past eh deadline", 1101 + __func__)); 1102 + break; 1103 + } 1104 + spin_unlock_irqrestore(sdev->host->host_lock, flags); 1105 + } 1120 1106 1121 1107 finish_cmds = !scsi_device_online(scmd->device) || 1122 1108 (try_stu && !scsi_eh_try_stu(scmd) && ··· 1170 1122 struct scsi_cmnd *scmd, *next; 1171 1123 LIST_HEAD(check_list); 1172 1124 int rtn; 1125 + struct Scsi_Host *shost; 1126 + unsigned long flags; 1173 1127 1174 1128 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1175 1129 if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD)) 1176 1130 continue; 1131 + shost = scmd->device->host; 1132 + spin_lock_irqsave(shost->host_lock, flags); 1133 + if (scsi_host_eh_past_deadline(shost)) { 1134 + spin_unlock_irqrestore(shost->host_lock, flags); 1135 + list_splice_init(&check_list, work_q); 1136 + SCSI_LOG_ERROR_RECOVERY(3, 1137 + shost_printk(KERN_INFO, shost, 1138 + "skip %s, past eh deadline\n", 1139 + __func__)); 1140 + return list_empty(work_q); 1141 + } 1142 + spin_unlock_irqrestore(shost->host_lock, flags); 1177 1143 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:" 1178 1144 "0x%p\n", current->comm, 1179 1145 scmd)); 1180 - rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd); 1181 - if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1182 - scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; 1183 - if (rtn == FAST_IO_FAIL) 1184 - scsi_eh_finish_cmd(scmd, done_q); 1185 - else 1186 - list_move_tail(&scmd->eh_entry, &check_list); 1187 - } else 1146 + rtn = scsi_try_to_abort_cmd(shost->hostt, scmd); 1147 + if (rtn == FAILED) { 1188 1148 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting" 1189 1149 " cmd failed:" 1190 1150 "0x%p\n", 1191 1151 current->comm, 1192 1152 scmd)); 1153 + list_splice_init(&check_list, work_q); 1154 + return list_empty(work_q); 1155 + } 1156 + scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; 1157 + if (rtn == FAST_IO_FAIL) 1158 + scsi_eh_finish_cmd(scmd, done_q); 1159 + else 1160 + list_move_tail(&scmd->eh_entry, &check_list); 1193 1161 } 1194 1162 1195 1163 return scsi_eh_test_devices(&check_list, work_q, done_q, 0); ··· 1251 1187 { 1252 1188 struct scsi_cmnd *scmd, *stu_scmd, *next; 1253 1189 struct scsi_device *sdev; 1190 + unsigned long flags; 1254 1191 1255 1192 shost_for_each_device(sdev, shost) { 1193 + spin_lock_irqsave(shost->host_lock, flags); 1194 + if (scsi_host_eh_past_deadline(shost)) { 1195 + spin_unlock_irqrestore(shost->host_lock, flags); 1196 + SCSI_LOG_ERROR_RECOVERY(3, 1197 + shost_printk(KERN_INFO, shost, 1198 + "skip %s, past eh deadline\n", 1199 + __func__)); 1200 + break; 1201 + } 1202 + spin_unlock_irqrestore(shost->host_lock, flags); 1256 1203 stu_scmd = NULL; 1257 1204 list_for_each_entry(scmd, work_q, eh_entry) 1258 1205 if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) && ··· 1316 1241 { 1317 1242 struct scsi_cmnd *scmd, *bdr_scmd, *next; 1318 1243 struct scsi_device *sdev; 1244 + unsigned long flags; 1319 1245 int rtn; 1320 1246 1321 1247 shost_for_each_device(sdev, shost) { 1248 + spin_lock_irqsave(shost->host_lock, flags); 1249 + if (scsi_host_eh_past_deadline(shost)) { 1250 + spin_unlock_irqrestore(shost->host_lock, flags); 1251 + SCSI_LOG_ERROR_RECOVERY(3, 1252 + shost_printk(KERN_INFO, shost, 1253 + "skip %s, past eh deadline\n", 1254 + __func__)); 1255 + break; 1256 + } 1257 + spin_unlock_irqrestore(shost->host_lock, flags); 1322 1258 bdr_scmd = NULL; 1323 1259 list_for_each_entry(scmd, work_q, eh_entry) 1324 1260 if (scmd->device == sdev) { ··· 1389 1303 struct scsi_cmnd *next, *scmd; 1390 1304 int rtn; 1391 1305 unsigned int id; 1306 + unsigned long flags; 1307 + 1308 + spin_lock_irqsave(shost->host_lock, flags); 1309 + if (scsi_host_eh_past_deadline(shost)) { 1310 + spin_unlock_irqrestore(shost->host_lock, flags); 1311 + /* push back on work queue for further processing */ 1312 + list_splice_init(&check_list, work_q); 1313 + list_splice_init(&tmp_list, work_q); 1314 + SCSI_LOG_ERROR_RECOVERY(3, 1315 + shost_printk(KERN_INFO, shost, 1316 + "skip %s, past eh deadline\n", 1317 + __func__)); 1318 + return list_empty(work_q); 1319 + } 1320 + spin_unlock_irqrestore(shost->host_lock, flags); 1392 1321 1393 1322 scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry); 1394 1323 id = scmd_id(scmd); ··· 1448 1347 LIST_HEAD(check_list); 1449 1348 unsigned int channel; 1450 1349 int rtn; 1350 + unsigned long flags; 1451 1351 1452 1352 /* 1453 1353 * we really want to loop over the various channels, and do this on ··· 1458 1356 */ 1459 1357 1460 1358 for (channel = 0; channel <= shost->max_channel; channel++) { 1359 + spin_lock_irqsave(shost->host_lock, flags); 1360 + if (scsi_host_eh_past_deadline(shost)) { 1361 + spin_unlock_irqrestore(shost->host_lock, flags); 1362 + list_splice_init(&check_list, work_q); 1363 + SCSI_LOG_ERROR_RECOVERY(3, 1364 + shost_printk(KERN_INFO, shost, 1365 + "skip %s, past eh deadline\n", 1366 + __func__)); 1367 + return list_empty(work_q); 1368 + } 1369 + spin_unlock_irqrestore(shost->host_lock, flags); 1370 + 1461 1371 chan_scmd = NULL; 1462 1372 list_for_each_entry(scmd, work_q, eh_entry) { 1463 1373 if (channel == scmd_channel(scmd)) { ··· 1869 1755 * will be requests for character device operations, and also for 1870 1756 * ioctls to queued block devices. 1871 1757 */ 1872 - SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", 1873 - __func__)); 1758 + SCSI_LOG_ERROR_RECOVERY(3, 1759 + printk("scsi_eh_%d waking up host to restart\n", 1760 + shost->host_no)); 1874 1761 1875 1762 spin_lock_irqsave(shost->host_lock, flags); 1876 1763 if (scsi_host_set_state(shost, SHOST_RUNNING)) ··· 1998 1883 if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q)) 1999 1884 scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q); 2000 1885 1886 + spin_lock_irqsave(shost->host_lock, flags); 1887 + if (shost->eh_deadline) 1888 + shost->last_reset = 0; 1889 + spin_unlock_irqrestore(shost->host_lock, flags); 2001 1890 scsi_eh_flush_done_q(&eh_done_q); 2002 1891 } 2003 1892 ··· 2028 1909 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || 2029 1910 shost->host_failed != shost->host_busy) { 2030 1911 SCSI_LOG_ERROR_RECOVERY(1, 2031 - printk("Error handler scsi_eh_%d sleeping\n", 1912 + printk("scsi_eh_%d: sleeping\n", 2032 1913 shost->host_no)); 2033 1914 schedule(); 2034 1915 continue; ··· 2036 1917 2037 1918 __set_current_state(TASK_RUNNING); 2038 1919 SCSI_LOG_ERROR_RECOVERY(1, 2039 - printk("Error handler scsi_eh_%d waking up\n", 2040 - shost->host_no)); 1920 + printk("scsi_eh_%d: waking up %d/%d/%d\n", 1921 + shost->host_no, shost->host_eh_scheduled, 1922 + shost->host_failed, shost->host_busy)); 2041 1923 2042 1924 /* 2043 1925 * We have a host that is failing for some reason. Figure out
+2 -1
drivers/scsi/scsi_pm.c
··· 54 54 /* 55 55 * All the high-level SCSI drivers that implement runtime 56 56 * PM treat runtime suspend, system suspend, and system 57 - * hibernate identically. 57 + * hibernate nearly identically. In all cases the requirements 58 + * for runtime suspension are stricter. 58 59 */ 59 60 if (pm_runtime_suspended(dev)) 60 61 return 0;
+39
drivers/scsi/scsi_sysfs.c
··· 281 281 282 282 static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset); 283 283 284 + static ssize_t 285 + show_shost_eh_deadline(struct device *dev, 286 + struct device_attribute *attr, char *buf) 287 + { 288 + struct Scsi_Host *shost = class_to_shost(dev); 289 + 290 + return sprintf(buf, "%d\n", shost->eh_deadline / HZ); 291 + } 292 + 293 + static ssize_t 294 + store_shost_eh_deadline(struct device *dev, struct device_attribute *attr, 295 + const char *buf, size_t count) 296 + { 297 + struct Scsi_Host *shost = class_to_shost(dev); 298 + int ret = -EINVAL; 299 + int deadline; 300 + unsigned long flags; 301 + 302 + if (shost->transportt && shost->transportt->eh_strategy_handler) 303 + return ret; 304 + 305 + if (sscanf(buf, "%d\n", &deadline) == 1) { 306 + spin_lock_irqsave(shost->host_lock, flags); 307 + if (scsi_host_in_recovery(shost)) 308 + ret = -EBUSY; 309 + else { 310 + shost->eh_deadline = deadline * HZ; 311 + ret = count; 312 + } 313 + spin_unlock_irqrestore(shost->host_lock, flags); 314 + } 315 + return ret; 316 + } 317 + 318 + static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); 319 + 284 320 shost_rd_attr(unique_id, "%u\n"); 285 321 shost_rd_attr(host_busy, "%hu\n"); 286 322 shost_rd_attr(cmd_per_lun, "%hd\n"); ··· 344 308 &dev_attr_prot_capabilities.attr, 345 309 &dev_attr_prot_guard_type.attr, 346 310 &dev_attr_host_reset.attr, 311 + &dev_attr_eh_deadline.attr, 347 312 NULL 348 313 }; 349 314 ··· 566 529 */ 567 530 sdev_rd_attr (device_blocked, "%d\n"); 568 531 sdev_rd_attr (queue_depth, "%d\n"); 532 + sdev_rd_attr (device_busy, "%d\n"); 569 533 sdev_rd_attr (type, "%d\n"); 570 534 sdev_rd_attr (scsi_level, "%d\n"); 571 535 sdev_rd_attr (vendor, "%.8s\n"); ··· 788 750 &dev_attr_device_blocked.attr, 789 751 &dev_attr_type.attr, 790 752 &dev_attr_scsi_level.attr, 753 + &dev_attr_device_busy.attr, 791 754 &dev_attr_vendor.attr, 792 755 &dev_attr_model.attr, 793 756 &dev_attr_rev.attr,
+26
drivers/scsi/scsi_transport_iscsi.c
··· 2744 2744 return err; 2745 2745 } 2746 2746 2747 + static int iscsi_set_chap(struct iscsi_transport *transport, 2748 + struct iscsi_uevent *ev, uint32_t len) 2749 + { 2750 + char *data = (char *)ev + sizeof(*ev); 2751 + struct Scsi_Host *shost; 2752 + int err = 0; 2753 + 2754 + if (!transport->set_chap) 2755 + return -ENOSYS; 2756 + 2757 + shost = scsi_host_lookup(ev->u.set_path.host_no); 2758 + if (!shost) { 2759 + pr_err("%s could not find host no %u\n", 2760 + __func__, ev->u.set_path.host_no); 2761 + return -ENODEV; 2762 + } 2763 + 2764 + err = transport->set_chap(shost, data, len); 2765 + scsi_host_put(shost); 2766 + return err; 2767 + } 2768 + 2747 2769 static int iscsi_delete_chap(struct iscsi_transport *transport, 2748 2770 struct iscsi_uevent *ev) 2749 2771 { ··· 3255 3233 break; 3256 3234 case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID: 3257 3235 err = iscsi_logout_flashnode_sid(transport, ev); 3236 + break; 3237 + case ISCSI_UEVENT_SET_CHAP: 3238 + err = iscsi_set_chap(transport, ev, 3239 + nlmsg_attrlen(nlh, sizeof(*ev))); 3258 3240 break; 3259 3241 default: 3260 3242 err = -ENOSYS;
+66 -17
drivers/scsi/sd.c
··· 105 105 static int sd_probe(struct device *); 106 106 static int sd_remove(struct device *); 107 107 static void sd_shutdown(struct device *); 108 - static int sd_suspend(struct device *); 108 + static int sd_suspend_system(struct device *); 109 + static int sd_suspend_runtime(struct device *); 109 110 static int sd_resume(struct device *); 110 111 static void sd_rescan(struct device *); 111 112 static int sd_done(struct scsi_cmnd *); ··· 485 484 }; 486 485 487 486 static const struct dev_pm_ops sd_pm_ops = { 488 - .suspend = sd_suspend, 487 + .suspend = sd_suspend_system, 489 488 .resume = sd_resume, 490 - .poweroff = sd_suspend, 489 + .poweroff = sd_suspend_system, 491 490 .restore = sd_resume, 492 - .runtime_suspend = sd_suspend, 491 + .runtime_suspend = sd_suspend_runtime, 493 492 .runtime_resume = sd_resume, 494 493 }; 495 494 ··· 830 829 831 830 static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) 832 831 { 833 - rq->timeout = SD_FLUSH_TIMEOUT; 832 + rq->timeout *= SD_FLUSH_TIMEOUT_MULTIPLIER; 834 833 rq->retries = SD_MAX_RETRIES; 835 834 rq->cmd[0] = SYNCHRONIZE_CACHE; 836 835 rq->cmd_len = 10; ··· 1434 1433 { 1435 1434 int retries, res; 1436 1435 struct scsi_device *sdp = sdkp->device; 1436 + const int timeout = sdp->request_queue->rq_timeout 1437 + * SD_FLUSH_TIMEOUT_MULTIPLIER; 1437 1438 struct scsi_sense_hdr sshdr; 1438 1439 1439 1440 if (!scsi_device_online(sdp)) 1440 1441 return -ENODEV; 1441 - 1442 1442 1443 1443 for (retries = 3; retries > 0; --retries) { 1444 1444 unsigned char cmd[10] = { 0 }; ··· 1450 1448 * flush everything. 1451 1449 */ 1452 1450 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, 1453 - &sshdr, SD_FLUSH_TIMEOUT, 1454 - SD_MAX_RETRIES, NULL, REQ_PM); 1451 + &sshdr, timeout, SD_MAX_RETRIES, 1452 + NULL, REQ_PM); 1455 1453 if (res == 0) 1456 1454 break; 1457 1455 } 1458 1456 1459 1457 if (res) { 1460 1458 sd_print_result(sdkp, res); 1459 + 1461 1460 if (driver_byte(res) & DRIVER_SENSE) 1462 1461 sd_print_sense_hdr(sdkp, &sshdr); 1463 - } 1462 + /* we need to evaluate the error return */ 1463 + if (scsi_sense_valid(&sshdr) && 1464 + /* 0x3a is medium not present */ 1465 + sshdr.asc == 0x3a) 1466 + /* this is no error here */ 1467 + return 0; 1464 1468 1465 - if (res) 1466 - return -EIO; 1469 + switch (host_byte(res)) { 1470 + /* ignore errors due to racing a disconnection */ 1471 + case DID_BAD_TARGET: 1472 + case DID_NO_CONNECT: 1473 + return 0; 1474 + /* signal the upper layer it might try again */ 1475 + case DID_BUS_BUSY: 1476 + case DID_IMM_RETRY: 1477 + case DID_REQUEUE: 1478 + case DID_SOFT_ERROR: 1479 + return -EBUSY; 1480 + default: 1481 + return -EIO; 1482 + } 1483 + } 1467 1484 return 0; 1468 1485 } 1469 1486 ··· 2660 2639 struct scsi_device *sdev = sdkp->device; 2661 2640 2662 2641 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) { 2642 + /* too large values might cause issues with arcmsr */ 2643 + int vpd_buf_len = 64; 2644 + 2663 2645 sdev->no_report_opcodes = 1; 2664 2646 2665 2647 /* Disable WRITE SAME if REPORT SUPPORTED OPERATION 2666 2648 * CODES is unsupported and the device has an ATA 2667 2649 * Information VPD page (SAT). 2668 2650 */ 2669 - if (!scsi_get_vpd_page(sdev, 0x89, buffer, SD_BUF_SIZE)) 2651 + if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len)) 2670 2652 sdev->no_write_same = 1; 2671 2653 } 2672 2654 ··· 3082 3058 sd_print_result(sdkp, res); 3083 3059 if (driver_byte(res) & DRIVER_SENSE) 3084 3060 sd_print_sense_hdr(sdkp, &sshdr); 3061 + if (scsi_sense_valid(&sshdr) && 3062 + /* 0x3a is medium not present */ 3063 + sshdr.asc == 0x3a) 3064 + res = 0; 3085 3065 } 3086 3066 3087 - return res; 3067 + /* SCSI error codes must not go to the generic layer */ 3068 + if (res) 3069 + return -EIO; 3070 + 3071 + return 0; 3088 3072 } 3089 3073 3090 3074 /* ··· 3110 3078 if (pm_runtime_suspended(dev)) 3111 3079 goto exit; 3112 3080 3113 - if (sdkp->WCE) { 3081 + if (sdkp->WCE && sdkp->media_present) { 3114 3082 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3115 3083 sd_sync_cache(sdkp); 3116 3084 } ··· 3124 3092 scsi_disk_put(sdkp); 3125 3093 } 3126 3094 3127 - static int sd_suspend(struct device *dev) 3095 + static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) 3128 3096 { 3129 3097 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); 3130 3098 int ret = 0; ··· 3132 3100 if (!sdkp) 3133 3101 return 0; /* this can happen */ 3134 3102 3135 - if (sdkp->WCE) { 3103 + if (sdkp->WCE && sdkp->media_present) { 3136 3104 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3137 3105 ret = sd_sync_cache(sdkp); 3138 - if (ret) 3106 + if (ret) { 3107 + /* ignore OFFLINE device */ 3108 + if (ret == -ENODEV) 3109 + ret = 0; 3139 3110 goto done; 3111 + } 3140 3112 } 3141 3113 3142 3114 if (sdkp->device->manage_start_stop) { 3143 3115 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 3116 + /* an error is not worth aborting a system sleep */ 3144 3117 ret = sd_start_stop_device(sdkp, 0); 3118 + if (ignore_stop_errors) 3119 + ret = 0; 3145 3120 } 3146 3121 3147 3122 done: 3148 3123 scsi_disk_put(sdkp); 3149 3124 return ret; 3125 + } 3126 + 3127 + static int sd_suspend_system(struct device *dev) 3128 + { 3129 + return sd_suspend_common(dev, true); 3130 + } 3131 + 3132 + static int sd_suspend_runtime(struct device *dev) 3133 + { 3134 + return sd_suspend_common(dev, false); 3150 3135 } 3151 3136 3152 3137 static int sd_resume(struct device *dev)
+5 -1
drivers/scsi/sd.h
··· 13 13 */ 14 14 #define SD_TIMEOUT (30 * HZ) 15 15 #define SD_MOD_TIMEOUT (75 * HZ) 16 - #define SD_FLUSH_TIMEOUT (60 * HZ) 16 + /* 17 + * Flush timeout is a multiplier over the standard device timeout which is 18 + * user modifiable via sysfs but initially set to SD_TIMEOUT 19 + */ 20 + #define SD_FLUSH_TIMEOUT_MULTIPLIER 2 17 21 #define SD_WRITE_SAME_TIMEOUT (120 * HZ) 18 22 19 23 /*
+7 -7
drivers/scsi/tmscsim.c
··· 521 521 pACB->SelConn++; 522 522 return 1; 523 523 } 524 - if (time_before (jiffies, pACB->pScsiHost->last_reset)) 524 + if (time_before (jiffies, pACB->last_reset)) 525 525 { 526 526 DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n")); 527 527 return 1; ··· 1863 1863 /* delay half a second */ 1864 1864 udelay (1000); 1865 1865 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); 1866 - pACB->pScsiHost->last_reset = jiffies + 5*HZ/2 1866 + pACB->last_reset = jiffies + 5*HZ/2 1867 1867 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; 1868 1868 pACB->Connected = 0; 1869 1869 ··· 2048 2048 2049 2049 dc390_ResetDevParam(pACB); 2050 2050 mdelay(1); 2051 - pACB->pScsiHost->last_reset = jiffies + 3*HZ/2 2051 + pACB->last_reset = jiffies + 3*HZ/2 2052 2052 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; 2053 - 2053 + 2054 2054 DC390_write8(ScsiCmd, CLEAR_FIFO_CMD); 2055 2055 DC390_read8(INT_Status); /* Reset Pending INT */ 2056 2056 ··· 2383 2383 if (pACB->Gmode2 & RST_SCSI_BUS) { 2384 2384 dc390_ResetSCSIBus(pACB); 2385 2385 udelay(1000); 2386 - shost->last_reset = jiffies + HZ/2 + 2386 + pACB->last_reset = jiffies + HZ/2 + 2387 2387 HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; 2388 2388 } 2389 2389 ··· 2455 2455 shost->irq = pdev->irq; 2456 2456 shost->base = io_port; 2457 2457 shost->unique_id = io_port; 2458 - shost->last_reset = jiffies; 2459 - 2458 + 2459 + pACB->last_reset = jiffies; 2460 2460 pACB->pScsiHost = shost; 2461 2461 pACB->IOPortBase = (u16) io_port; 2462 2462 pACB->IRQLevel = pdev->irq;
+1
drivers/scsi/tmscsim.h
··· 143 143 144 144 struct pci_dev *pdev; 145 145 146 + unsigned long last_reset; 146 147 unsigned long Cmds; 147 148 u32 SelLost; 148 149 u32 SelConn;
+1 -1
include/scsi/fc/fc_fc2.h
··· 104 104 * esb_e_stat - flags from FC-FS-2 T11/1619-D Rev 0.90. 105 105 */ 106 106 #define ESB_ST_RESP (1 << 31) /* responder to exchange */ 107 - #define ESB_ST_SEQ_INIT (1 << 30) /* port holds sequence initiaive */ 107 + #define ESB_ST_SEQ_INIT (1 << 30) /* port holds sequence initiative */ 108 108 #define ESB_ST_COMPLETE (1 << 29) /* exchange is complete */ 109 109 #define ESB_ST_ABNORMAL (1 << 28) /* abnormal ending condition */ 110 110 #define ESB_ST_REC_QUAL (1 << 26) /* recovery qualifier active */
+17
include/scsi/iscsi_if.h
··· 69 69 ISCSI_UEVENT_LOGIN_FLASHNODE = UEVENT_BASE + 28, 70 70 ISCSI_UEVENT_LOGOUT_FLASHNODE = UEVENT_BASE + 29, 71 71 ISCSI_UEVENT_LOGOUT_FLASHNODE_SID = UEVENT_BASE + 30, 72 + ISCSI_UEVENT_SET_CHAP = UEVENT_BASE + 31, 72 73 73 74 /* up events */ 74 75 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, ··· 310 309 ISCSI_HOST_PARAM, /* iscsi_host_param */ 311 310 ISCSI_NET_PARAM, /* iscsi_net_param */ 312 311 ISCSI_FLASHNODE_PARAM, /* iscsi_flashnode_param */ 312 + ISCSI_CHAP_PARAM, /* iscsi_chap_param */ 313 313 }; 314 + 315 + /* structure for minimalist usecase */ 316 + struct iscsi_param_info { 317 + uint32_t len; /* Actual length of the param value */ 318 + uint16_t param; /* iscsi param */ 319 + uint8_t value[0]; /* length sized value follows */ 320 + } __packed; 314 321 315 322 struct iscsi_iface_param_info { 316 323 uint32_t iface_num; /* iface number, 0 - n */ ··· 746 737 enum chap_type_e { 747 738 CHAP_TYPE_OUT, 748 739 CHAP_TYPE_IN, 740 + }; 741 + 742 + enum iscsi_chap_param { 743 + ISCSI_CHAP_PARAM_INDEX, 744 + ISCSI_CHAP_PARAM_CHAP_TYPE, 745 + ISCSI_CHAP_PARAM_USERNAME, 746 + ISCSI_CHAP_PARAM_PASSWORD, 747 + ISCSI_CHAP_PARAM_PASSWORD_LEN 749 748 }; 750 749 751 750 #define ISCSI_CHAP_AUTH_NAME_MAX_LEN 256
+9
include/scsi/libfc.h
··· 410 410 * @fh_type: The frame type 411 411 * @class: The class of service 412 412 * @seq: The sequence in use on this exchange 413 + * @resp_active: Number of tasks that are concurrently executing @resp(). 414 + * @resp_task: If @resp_active > 0, either the task executing @resp(), the 415 + * task that has been interrupted to execute the soft-IRQ 416 + * executing @resp() or NULL if more than one task is executing 417 + * @resp concurrently. 418 + * @resp_wq: Waitqueue for the tasks waiting on @resp_active. 413 419 * @resp: Callback for responses on this exchange 414 420 * @destructor: Called when destroying the exchange 415 421 * @arg: Passed as a void pointer to the resp() callback ··· 447 441 u32 r_a_tov; 448 442 u32 f_ctl; 449 443 struct fc_seq seq; 444 + int resp_active; 445 + struct task_struct *resp_task; 446 + wait_queue_head_t resp_wq; 450 447 void (*resp)(struct fc_seq *, struct fc_frame *, void *); 451 448 void *arg; 452 449 void (*destructor)(struct fc_seq *, void *);
+6 -1
include/scsi/libfcoe.h
··· 90 90 * @lp: &fc_lport: libfc local port. 91 91 * @sel_fcf: currently selected FCF, or NULL. 92 92 * @fcfs: list of discovered FCFs. 93 + * @cdev: (Optional) pointer to sysfs fcoe_ctlr_device. 93 94 * @fcf_count: number of discovered FCF entries. 94 95 * @sol_time: time when a multicast solicitation was last sent. 95 96 * @sel_time: time after which to select an FCF. ··· 128 127 struct fc_lport *lp; 129 128 struct fcoe_fcf *sel_fcf; 130 129 struct list_head fcfs; 130 + struct fcoe_ctlr_device *cdev; 131 131 u16 fcf_count; 132 132 unsigned long sol_time; 133 133 unsigned long sel_time; ··· 170 168 return (void *)(ctlr + 1); 171 169 } 172 170 171 + /* 172 + * This assumes that the fcoe_ctlr (x) is allocated with the fcoe_ctlr_device. 173 + */ 173 174 #define fcoe_ctlr_to_ctlr_dev(x) \ 174 - (struct fcoe_ctlr_device *)(((struct fcoe_ctlr_device *)(x)) - 1) 175 + (x)->cdev 175 176 176 177 /** 177 178 * struct fcoe_fcf - Fibre-Channel Forwarder
+4 -1
include/scsi/scsi_host.h
··· 598 598 unsigned int host_eh_scheduled; /* EH scheduled without command */ 599 599 600 600 unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ 601 - int resetting; /* if set, it means that last_reset is a valid value */ 601 + 602 + /* next two fields are used to bound the time spent in error handling */ 603 + int eh_deadline; 602 604 unsigned long last_reset; 605 + 603 606 604 607 /* 605 608 * These three parameters can be used to allow for wide scsi,
+1
include/scsi/scsi_transport_iscsi.h
··· 152 152 int (*get_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx, 153 153 uint32_t *num_entries, char *buf); 154 154 int (*delete_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx); 155 + int (*set_chap) (struct Scsi_Host *shost, void *data, int len); 155 156 int (*get_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess, 156 157 int param, char *buf); 157 158 int (*set_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess,