Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SCSI] bnx2i: Add bnx2i iSCSI driver.

New iSCSI driver for Broadcom BNX2 devices. The driver interfaces with
the CNIC driver to access the hardware.

Signed-off-by: Anil Veerabhadrappa <anilgv@broadcom.com>
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>

authored by

Michael Chan and committed by
James Bottomley
cf4e6363 a4636960

+7496
+1
drivers/scsi/Kconfig
··· 354 354 http://open-iscsi.org 355 355 356 356 source "drivers/scsi/cxgb3i/Kconfig" 357 + source "drivers/scsi/bnx2i/Kconfig" 357 358 358 359 config SGIWD93_SCSI 359 360 tristate "SGI WD93C93 SCSI Driver"
+1
drivers/scsi/Makefile
··· 129 129 obj-$(CONFIG_SCSI_MVSAS) += mvsas/ 130 130 obj-$(CONFIG_PS3_ROM) += ps3rom.o 131 131 obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ 132 + obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ 132 133 133 134 obj-$(CONFIG_ARM) += arm/ 134 135
+155
drivers/scsi/bnx2i/57xx_iscsi_constants.h
··· 1 + /* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI 2 + * 3 + * Copyright (c) 2006 - 2009 Broadcom Corporation 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License as published by 7 + * the Free Software Foundation. 8 + * 9 + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 10 + */ 11 + #ifndef __57XX_ISCSI_CONSTANTS_H_ 12 + #define __57XX_ISCSI_CONSTANTS_H_ 13 + 14 + /** 15 + * This file defines HSI constants for the iSCSI flows 16 + */ 17 + 18 + /* iSCSI request op codes */ 19 + #define ISCSI_OPCODE_CLEANUP_REQUEST (7) 20 + 21 + /* iSCSI response/messages op codes */ 22 + #define ISCSI_OPCODE_CLEANUP_RESPONSE (0x27) 23 + #define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION (0) 24 + 25 + /* iSCSI task types */ 26 + #define ISCSI_TASK_TYPE_READ (0) 27 + #define ISCSI_TASK_TYPE_WRITE (1) 28 + #define ISCSI_TASK_TYPE_MPATH (2) 29 + 30 + /* initial CQ sequence numbers */ 31 + #define ISCSI_INITIAL_SN (1) 32 + 33 + /* KWQ (kernel work queue) layer codes */ 34 + #define ISCSI_KWQE_LAYER_CODE (6) 35 + 36 + /* KWQ (kernel work queue) request op codes */ 37 + #define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0) 38 + #define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1) 39 + #define ISCSI_KWQE_OPCODE_UPDATE_CONN (2) 40 + #define ISCSI_KWQE_OPCODE_DESTROY_CONN (3) 41 + #define ISCSI_KWQE_OPCODE_INIT1 (4) 42 + #define ISCSI_KWQE_OPCODE_INIT2 (5) 43 + 44 + /* KCQ (kernel completion queue) response op codes */ 45 + #define ISCSI_KCQE_OPCODE_OFFLOAD_CONN (0x10) 46 + #define ISCSI_KCQE_OPCODE_UPDATE_CONN (0x12) 47 + #define ISCSI_KCQE_OPCODE_DESTROY_CONN (0x13) 48 + #define ISCSI_KCQE_OPCODE_INIT (0x14) 49 + #define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK (0x15) 50 + #define ISCSI_KCQE_OPCODE_TCP_RESET (0x16) 51 + #define ISCSI_KCQE_OPCODE_TCP_SYN (0x17) 52 + #define ISCSI_KCQE_OPCODE_TCP_FIN (0X18) 53 + #define ISCSI_KCQE_OPCODE_TCP_ERROR (0x19) 54 + #define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20) 55 + #define ISCSI_KCQE_OPCODE_ISCSI_ERROR (0x21) 56 + 57 + /* KCQ (kernel completion queue) completion status */ 58 + #define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS (0x0) 59 + #define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x1) 60 + #define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x2) 61 + #define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x3) 62 + #define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR (0x4) 63 + 64 + #define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR (0x5) 65 + #define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR (0x6) 66 + 67 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE (0xa) 68 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE (0xb) 69 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN (0xc) 70 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT (0xd) 71 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN (0xe) 72 + 73 + /* Response */ 74 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN (0xf) 75 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T (0x10) 76 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO (0x2c) 77 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG (0x2d) 78 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0 (0x11) 79 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1 (0x12) 80 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2 (0x13) 81 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3 (0x14) 82 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4 (0x15) 83 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5 (0x16) 84 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6 (0x17) 85 + 86 + /* Data-In */ 87 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN (0x18) 88 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN (0x19) 89 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO (0x1a) 90 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV (0x1b) 91 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN (0x1c) 92 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN (0x1d) 93 + 94 + /* R2T */ 95 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF (0x1f) 96 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN (0x20) 97 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN (0x21) 98 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22) 99 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23) 100 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED (0x24) 101 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV (0x25) 102 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN (0x26) 103 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27) 104 + 105 + /* TMF */ 106 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN (0x28) 107 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN (0x29) 108 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN (0x2a) 109 + #define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP (0x2b) 110 + 111 + /* IP/TCP processing errors: */ 112 + #define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT (0x40) 113 + #define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS (0x41) 114 + #define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG (0x42) 115 + #define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS (0x43) 116 + 117 + /* iSCSI licensing errors */ 118 + /* general iSCSI license not installed */ 119 + #define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED (0x50) 120 + /* additional LOM specific iSCSI license not installed */ 121 + #define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51) 122 + 123 + /* SQ/RQ/CQ DB structure sizes */ 124 + #define ISCSI_SQ_DB_SIZE (16) 125 + #define ISCSI_RQ_DB_SIZE (16) 126 + #define ISCSI_CQ_DB_SIZE (80) 127 + 128 + #define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF 129 + 130 + /* Page size codes (for flags field in connection offload request) */ 131 + #define ISCSI_PAGE_SIZE_256 (0) 132 + #define ISCSI_PAGE_SIZE_512 (1) 133 + #define ISCSI_PAGE_SIZE_1K (2) 134 + #define ISCSI_PAGE_SIZE_2K (3) 135 + #define ISCSI_PAGE_SIZE_4K (4) 136 + #define ISCSI_PAGE_SIZE_8K (5) 137 + #define ISCSI_PAGE_SIZE_16K (6) 138 + #define ISCSI_PAGE_SIZE_32K (7) 139 + #define ISCSI_PAGE_SIZE_64K (8) 140 + #define ISCSI_PAGE_SIZE_128K (9) 141 + #define ISCSI_PAGE_SIZE_256K (10) 142 + #define ISCSI_PAGE_SIZE_512K (11) 143 + #define ISCSI_PAGE_SIZE_1M (12) 144 + #define ISCSI_PAGE_SIZE_2M (13) 145 + #define ISCSI_PAGE_SIZE_4M (14) 146 + #define ISCSI_PAGE_SIZE_8M (15) 147 + 148 + /* Iscsi PDU related defines */ 149 + #define ISCSI_HEADER_SIZE (48) 150 + #define ISCSI_DIGEST_SHIFT (2) 151 + #define ISCSI_DIGEST_SIZE (4) 152 + 153 + #define B577XX_ISCSI_CONNECTION_TYPE 3 154 + 155 + #endif /*__57XX_ISCSI_CONSTANTS_H_ */
+1509
drivers/scsi/bnx2i/57xx_iscsi_hsi.h
··· 1 + /* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI. 2 + * 3 + * Copyright (c) 2006 - 2009 Broadcom Corporation 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License as published by 7 + * the Free Software Foundation. 8 + * 9 + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 10 + */ 11 + #ifndef __57XX_ISCSI_HSI_LINUX_LE__ 12 + #define __57XX_ISCSI_HSI_LINUX_LE__ 13 + 14 + /* 15 + * iSCSI Async CQE 16 + */ 17 + struct bnx2i_async_msg { 18 + #if defined(__BIG_ENDIAN) 19 + u8 op_code; 20 + u8 reserved1; 21 + u16 reserved0; 22 + #elif defined(__LITTLE_ENDIAN) 23 + u16 reserved0; 24 + u8 reserved1; 25 + u8 op_code; 26 + #endif 27 + u32 reserved2; 28 + u32 exp_cmd_sn; 29 + u32 max_cmd_sn; 30 + u32 reserved3[2]; 31 + #if defined(__BIG_ENDIAN) 32 + u16 reserved5; 33 + u8 err_code; 34 + u8 reserved4; 35 + #elif defined(__LITTLE_ENDIAN) 36 + u8 reserved4; 37 + u8 err_code; 38 + u16 reserved5; 39 + #endif 40 + u32 reserved6; 41 + u32 lun[2]; 42 + #if defined(__BIG_ENDIAN) 43 + u8 async_event; 44 + u8 async_vcode; 45 + u16 param1; 46 + #elif defined(__LITTLE_ENDIAN) 47 + u16 param1; 48 + u8 async_vcode; 49 + u8 async_event; 50 + #endif 51 + #if defined(__BIG_ENDIAN) 52 + u16 param2; 53 + u16 param3; 54 + #elif defined(__LITTLE_ENDIAN) 55 + u16 param3; 56 + u16 param2; 57 + #endif 58 + u32 reserved7[3]; 59 + u32 cq_req_sn; 60 + }; 61 + 62 + 63 + /* 64 + * iSCSI Buffer Descriptor (BD) 65 + */ 66 + struct iscsi_bd { 67 + u32 buffer_addr_hi; 68 + u32 buffer_addr_lo; 69 + #if defined(__BIG_ENDIAN) 70 + u16 reserved0; 71 + u16 buffer_length; 72 + #elif defined(__LITTLE_ENDIAN) 73 + u16 buffer_length; 74 + u16 reserved0; 75 + #endif 76 + #if defined(__BIG_ENDIAN) 77 + u16 reserved3; 78 + u16 flags; 79 + #define ISCSI_BD_RESERVED1 (0x3F<<0) 80 + #define ISCSI_BD_RESERVED1_SHIFT 0 81 + #define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6) 82 + #define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6 83 + #define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7) 84 + #define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7 85 + #define ISCSI_BD_RESERVED2 (0xFF<<8) 86 + #define ISCSI_BD_RESERVED2_SHIFT 8 87 + #elif defined(__LITTLE_ENDIAN) 88 + u16 flags; 89 + #define ISCSI_BD_RESERVED1 (0x3F<<0) 90 + #define ISCSI_BD_RESERVED1_SHIFT 0 91 + #define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6) 92 + #define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6 93 + #define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7) 94 + #define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7 95 + #define ISCSI_BD_RESERVED2 (0xFF<<8) 96 + #define ISCSI_BD_RESERVED2_SHIFT 8 97 + u16 reserved3; 98 + #endif 99 + }; 100 + 101 + 102 + /* 103 + * iSCSI Cleanup SQ WQE 104 + */ 105 + struct bnx2i_cleanup_request { 106 + #if defined(__BIG_ENDIAN) 107 + u8 op_code; 108 + u8 reserved1; 109 + u16 reserved0; 110 + #elif defined(__LITTLE_ENDIAN) 111 + u16 reserved0; 112 + u8 reserved1; 113 + u8 op_code; 114 + #endif 115 + u32 reserved2[3]; 116 + #if defined(__BIG_ENDIAN) 117 + u16 reserved3; 118 + u16 itt; 119 + #define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0) 120 + #define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0 121 + #define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14) 122 + #define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14 123 + #elif defined(__LITTLE_ENDIAN) 124 + u16 itt; 125 + #define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0) 126 + #define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0 127 + #define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14) 128 + #define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14 129 + u16 reserved3; 130 + #endif 131 + u32 reserved4[10]; 132 + #if defined(__BIG_ENDIAN) 133 + u8 cq_index; 134 + u8 reserved6; 135 + u16 reserved5; 136 + #elif defined(__LITTLE_ENDIAN) 137 + u16 reserved5; 138 + u8 reserved6; 139 + u8 cq_index; 140 + #endif 141 + }; 142 + 143 + 144 + /* 145 + * iSCSI Cleanup CQE 146 + */ 147 + struct bnx2i_cleanup_response { 148 + #if defined(__BIG_ENDIAN) 149 + u8 op_code; 150 + u8 status; 151 + u16 reserved0; 152 + #elif defined(__LITTLE_ENDIAN) 153 + u16 reserved0; 154 + u8 status; 155 + u8 op_code; 156 + #endif 157 + u32 reserved1[3]; 158 + u32 reserved2[2]; 159 + #if defined(__BIG_ENDIAN) 160 + u16 reserved4; 161 + u8 err_code; 162 + u8 reserved3; 163 + #elif defined(__LITTLE_ENDIAN) 164 + u8 reserved3; 165 + u8 err_code; 166 + u16 reserved4; 167 + #endif 168 + u32 reserved5[7]; 169 + #if defined(__BIG_ENDIAN) 170 + u16 reserved6; 171 + u16 itt; 172 + #define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0) 173 + #define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0 174 + #define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14) 175 + #define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14 176 + #elif defined(__LITTLE_ENDIAN) 177 + u16 itt; 178 + #define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0) 179 + #define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0 180 + #define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14) 181 + #define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14 182 + u16 reserved6; 183 + #endif 184 + u32 cq_req_sn; 185 + }; 186 + 187 + 188 + /* 189 + * SCSI read/write SQ WQE 190 + */ 191 + struct bnx2i_cmd_request { 192 + #if defined(__BIG_ENDIAN) 193 + u8 op_code; 194 + u8 op_attr; 195 + #define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0) 196 + #define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0 197 + #define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3) 198 + #define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3 199 + #define ISCSI_CMD_REQUEST_WRITE (0x1<<5) 200 + #define ISCSI_CMD_REQUEST_WRITE_SHIFT 5 201 + #define ISCSI_CMD_REQUEST_READ (0x1<<6) 202 + #define ISCSI_CMD_REQUEST_READ_SHIFT 6 203 + #define ISCSI_CMD_REQUEST_FINAL (0x1<<7) 204 + #define ISCSI_CMD_REQUEST_FINAL_SHIFT 7 205 + u16 reserved0; 206 + #elif defined(__LITTLE_ENDIAN) 207 + u16 reserved0; 208 + u8 op_attr; 209 + #define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0) 210 + #define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0 211 + #define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3) 212 + #define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3 213 + #define ISCSI_CMD_REQUEST_WRITE (0x1<<5) 214 + #define ISCSI_CMD_REQUEST_WRITE_SHIFT 5 215 + #define ISCSI_CMD_REQUEST_READ (0x1<<6) 216 + #define ISCSI_CMD_REQUEST_READ_SHIFT 6 217 + #define ISCSI_CMD_REQUEST_FINAL (0x1<<7) 218 + #define ISCSI_CMD_REQUEST_FINAL_SHIFT 7 219 + u8 op_code; 220 + #endif 221 + #if defined(__BIG_ENDIAN) 222 + u16 ud_buffer_offset; 223 + u16 sd_buffer_offset; 224 + #elif defined(__LITTLE_ENDIAN) 225 + u16 sd_buffer_offset; 226 + u16 ud_buffer_offset; 227 + #endif 228 + u32 lun[2]; 229 + #if defined(__BIG_ENDIAN) 230 + u16 reserved2; 231 + u16 itt; 232 + #define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0) 233 + #define ISCSI_CMD_REQUEST_INDEX_SHIFT 0 234 + #define ISCSI_CMD_REQUEST_TYPE (0x3<<14) 235 + #define ISCSI_CMD_REQUEST_TYPE_SHIFT 14 236 + #elif defined(__LITTLE_ENDIAN) 237 + u16 itt; 238 + #define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0) 239 + #define ISCSI_CMD_REQUEST_INDEX_SHIFT 0 240 + #define ISCSI_CMD_REQUEST_TYPE (0x3<<14) 241 + #define ISCSI_CMD_REQUEST_TYPE_SHIFT 14 242 + u16 reserved2; 243 + #endif 244 + u32 total_data_transfer_length; 245 + u32 cmd_sn; 246 + u32 reserved3; 247 + u32 cdb[4]; 248 + u32 zero_fill; 249 + u32 bd_list_addr_lo; 250 + u32 bd_list_addr_hi; 251 + #if defined(__BIG_ENDIAN) 252 + u8 cq_index; 253 + u8 sd_start_bd_index; 254 + u8 ud_start_bd_index; 255 + u8 num_bds; 256 + #elif defined(__LITTLE_ENDIAN) 257 + u8 num_bds; 258 + u8 ud_start_bd_index; 259 + u8 sd_start_bd_index; 260 + u8 cq_index; 261 + #endif 262 + }; 263 + 264 + 265 + /* 266 + * task statistics for write response 267 + */ 268 + struct bnx2i_write_resp_task_stat { 269 + u32 num_data_ins; 270 + }; 271 + 272 + /* 273 + * task statistics for read response 274 + */ 275 + struct bnx2i_read_resp_task_stat { 276 + #if defined(__BIG_ENDIAN) 277 + u16 num_data_outs; 278 + u16 num_r2ts; 279 + #elif defined(__LITTLE_ENDIAN) 280 + u16 num_r2ts; 281 + u16 num_data_outs; 282 + #endif 283 + }; 284 + 285 + /* 286 + * task statistics for iSCSI cmd response 287 + */ 288 + union bnx2i_cmd_resp_task_stat { 289 + struct bnx2i_write_resp_task_stat write_stat; 290 + struct bnx2i_read_resp_task_stat read_stat; 291 + }; 292 + 293 + /* 294 + * SCSI Command CQE 295 + */ 296 + struct bnx2i_cmd_response { 297 + #if defined(__BIG_ENDIAN) 298 + u8 op_code; 299 + u8 response_flags; 300 + #define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0) 301 + #define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0 302 + #define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1) 303 + #define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1 304 + #define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2) 305 + #define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2 306 + #define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3) 307 + #define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3 308 + #define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4) 309 + #define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4 310 + #define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5) 311 + #define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5 312 + u8 response; 313 + u8 status; 314 + #elif defined(__LITTLE_ENDIAN) 315 + u8 status; 316 + u8 response; 317 + u8 response_flags; 318 + #define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0) 319 + #define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0 320 + #define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1) 321 + #define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1 322 + #define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2) 323 + #define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2 324 + #define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3) 325 + #define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3 326 + #define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4) 327 + #define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4 328 + #define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5) 329 + #define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5 330 + u8 op_code; 331 + #endif 332 + u32 data_length; 333 + u32 exp_cmd_sn; 334 + u32 max_cmd_sn; 335 + u32 reserved2; 336 + u32 residual_count; 337 + #if defined(__BIG_ENDIAN) 338 + u16 reserved4; 339 + u8 err_code; 340 + u8 reserved3; 341 + #elif defined(__LITTLE_ENDIAN) 342 + u8 reserved3; 343 + u8 err_code; 344 + u16 reserved4; 345 + #endif 346 + u32 reserved5[5]; 347 + union bnx2i_cmd_resp_task_stat task_stat; 348 + u32 reserved6; 349 + #if defined(__BIG_ENDIAN) 350 + u16 reserved7; 351 + u16 itt; 352 + #define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0) 353 + #define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0 354 + #define ISCSI_CMD_RESPONSE_TYPE (0x3<<14) 355 + #define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14 356 + #elif defined(__LITTLE_ENDIAN) 357 + u16 itt; 358 + #define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0) 359 + #define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0 360 + #define ISCSI_CMD_RESPONSE_TYPE (0x3<<14) 361 + #define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14 362 + u16 reserved7; 363 + #endif 364 + u32 cq_req_sn; 365 + }; 366 + 367 + 368 + 369 + /* 370 + * firmware middle-path request SQ WQE 371 + */ 372 + struct bnx2i_fw_mp_request { 373 + #if defined(__BIG_ENDIAN) 374 + u8 op_code; 375 + u8 op_attr; 376 + u16 hdr_opaque1; 377 + #elif defined(__LITTLE_ENDIAN) 378 + u16 hdr_opaque1; 379 + u8 op_attr; 380 + u8 op_code; 381 + #endif 382 + u32 data_length; 383 + u32 hdr_opaque2[2]; 384 + #if defined(__BIG_ENDIAN) 385 + u16 reserved0; 386 + u16 itt; 387 + #define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0) 388 + #define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0 389 + #define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14) 390 + #define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14 391 + #elif defined(__LITTLE_ENDIAN) 392 + u16 itt; 393 + #define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0) 394 + #define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0 395 + #define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14) 396 + #define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14 397 + u16 reserved0; 398 + #endif 399 + u32 hdr_opaque3[4]; 400 + u32 resp_bd_list_addr_lo; 401 + u32 resp_bd_list_addr_hi; 402 + u32 resp_buffer; 403 + #define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) 404 + #define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 405 + #define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24) 406 + #define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24 407 + #if defined(__BIG_ENDIAN) 408 + u16 reserved4; 409 + u8 reserved3; 410 + u8 flags; 411 + #define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0) 412 + #define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0 413 + #define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1) 414 + #define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1 415 + #define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) 416 + #define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 417 + #define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3) 418 + #define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3 419 + #elif defined(__LITTLE_ENDIAN) 420 + u8 flags; 421 + #define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0) 422 + #define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0 423 + #define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1) 424 + #define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1 425 + #define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) 426 + #define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 427 + #define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3) 428 + #define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3 429 + u8 reserved3; 430 + u16 reserved4; 431 + #endif 432 + u32 bd_list_addr_lo; 433 + u32 bd_list_addr_hi; 434 + #if defined(__BIG_ENDIAN) 435 + u8 cq_index; 436 + u8 reserved6; 437 + u8 reserved5; 438 + u8 num_bds; 439 + #elif defined(__LITTLE_ENDIAN) 440 + u8 num_bds; 441 + u8 reserved5; 442 + u8 reserved6; 443 + u8 cq_index; 444 + #endif 445 + }; 446 + 447 + 448 + /* 449 + * firmware response - CQE: used only by firmware 450 + */ 451 + struct bnx2i_fw_response { 452 + u32 hdr_dword1[2]; 453 + u32 hdr_exp_cmd_sn; 454 + u32 hdr_max_cmd_sn; 455 + u32 hdr_ttt; 456 + u32 hdr_res_cnt; 457 + u32 cqe_flags; 458 + #define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0) 459 + #define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0 460 + #define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8) 461 + #define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8 462 + #define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16) 463 + #define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16 464 + u32 stat_sn; 465 + u32 hdr_dword2[2]; 466 + u32 hdr_dword3[2]; 467 + u32 task_stat; 468 + u32 reserved0; 469 + u32 hdr_itt; 470 + u32 cq_req_sn; 471 + }; 472 + 473 + 474 + /* 475 + * iSCSI KCQ CQE parameters 476 + */ 477 + union iscsi_kcqe_params { 478 + u32 reserved0[4]; 479 + }; 480 + 481 + /* 482 + * iSCSI KCQ CQE 483 + */ 484 + struct iscsi_kcqe { 485 + u32 iscsi_conn_id; 486 + u32 completion_status; 487 + u32 iscsi_conn_context_id; 488 + union iscsi_kcqe_params params; 489 + #if defined(__BIG_ENDIAN) 490 + u8 flags; 491 + #define ISCSI_KCQE_RESERVED0 (0xF<<0) 492 + #define ISCSI_KCQE_RESERVED0_SHIFT 0 493 + #define ISCSI_KCQE_LAYER_CODE (0x7<<4) 494 + #define ISCSI_KCQE_LAYER_CODE_SHIFT 4 495 + #define ISCSI_KCQE_RESERVED1 (0x1<<7) 496 + #define ISCSI_KCQE_RESERVED1_SHIFT 7 497 + u8 op_code; 498 + u16 qe_self_seq; 499 + #elif defined(__LITTLE_ENDIAN) 500 + u16 qe_self_seq; 501 + u8 op_code; 502 + u8 flags; 503 + #define ISCSI_KCQE_RESERVED0 (0xF<<0) 504 + #define ISCSI_KCQE_RESERVED0_SHIFT 0 505 + #define ISCSI_KCQE_LAYER_CODE (0x7<<4) 506 + #define ISCSI_KCQE_LAYER_CODE_SHIFT 4 507 + #define ISCSI_KCQE_RESERVED1 (0x1<<7) 508 + #define ISCSI_KCQE_RESERVED1_SHIFT 7 509 + #endif 510 + }; 511 + 512 + 513 + 514 + /* 515 + * iSCSI KWQE header 516 + */ 517 + struct iscsi_kwqe_header { 518 + #if defined(__BIG_ENDIAN) 519 + u8 flags; 520 + #define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0) 521 + #define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0 522 + #define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4) 523 + #define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4 524 + #define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7) 525 + #define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7 526 + u8 op_code; 527 + #elif defined(__LITTLE_ENDIAN) 528 + u8 op_code; 529 + u8 flags; 530 + #define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0) 531 + #define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0 532 + #define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4) 533 + #define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4 534 + #define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7) 535 + #define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7 536 + #endif 537 + }; 538 + 539 + /* 540 + * iSCSI firmware init request 1 541 + */ 542 + struct iscsi_kwqe_init1 { 543 + #if defined(__BIG_ENDIAN) 544 + struct iscsi_kwqe_header hdr; 545 + u8 reserved0; 546 + u8 num_cqs; 547 + #elif defined(__LITTLE_ENDIAN) 548 + u8 num_cqs; 549 + u8 reserved0; 550 + struct iscsi_kwqe_header hdr; 551 + #endif 552 + u32 dummy_buffer_addr_lo; 553 + u32 dummy_buffer_addr_hi; 554 + #if defined(__BIG_ENDIAN) 555 + u16 num_ccells_per_conn; 556 + u16 num_tasks_per_conn; 557 + #elif defined(__LITTLE_ENDIAN) 558 + u16 num_tasks_per_conn; 559 + u16 num_ccells_per_conn; 560 + #endif 561 + #if defined(__BIG_ENDIAN) 562 + u16 sq_wqes_per_page; 563 + u16 sq_num_wqes; 564 + #elif defined(__LITTLE_ENDIAN) 565 + u16 sq_num_wqes; 566 + u16 sq_wqes_per_page; 567 + #endif 568 + #if defined(__BIG_ENDIAN) 569 + u8 cq_log_wqes_per_page; 570 + u8 flags; 571 + #define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0) 572 + #define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0 573 + #define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4) 574 + #define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 575 + #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) 576 + #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 577 + #define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6) 578 + #define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6 579 + u16 cq_num_wqes; 580 + #elif defined(__LITTLE_ENDIAN) 581 + u16 cq_num_wqes; 582 + u8 flags; 583 + #define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0) 584 + #define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0 585 + #define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4) 586 + #define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 587 + #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) 588 + #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 589 + #define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6) 590 + #define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6 591 + u8 cq_log_wqes_per_page; 592 + #endif 593 + #if defined(__BIG_ENDIAN) 594 + u16 cq_num_pages; 595 + u16 sq_num_pages; 596 + #elif defined(__LITTLE_ENDIAN) 597 + u16 sq_num_pages; 598 + u16 cq_num_pages; 599 + #endif 600 + #if defined(__BIG_ENDIAN) 601 + u16 rq_buffer_size; 602 + u16 rq_num_wqes; 603 + #elif defined(__LITTLE_ENDIAN) 604 + u16 rq_num_wqes; 605 + u16 rq_buffer_size; 606 + #endif 607 + }; 608 + 609 + /* 610 + * iSCSI firmware init request 2 611 + */ 612 + struct iscsi_kwqe_init2 { 613 + #if defined(__BIG_ENDIAN) 614 + struct iscsi_kwqe_header hdr; 615 + u16 max_cq_sqn; 616 + #elif defined(__LITTLE_ENDIAN) 617 + u16 max_cq_sqn; 618 + struct iscsi_kwqe_header hdr; 619 + #endif 620 + u32 error_bit_map[2]; 621 + u32 reserved1[5]; 622 + }; 623 + 624 + /* 625 + * Initial iSCSI connection offload request 1 626 + */ 627 + struct iscsi_kwqe_conn_offload1 { 628 + #if defined(__BIG_ENDIAN) 629 + struct iscsi_kwqe_header hdr; 630 + u16 iscsi_conn_id; 631 + #elif defined(__LITTLE_ENDIAN) 632 + u16 iscsi_conn_id; 633 + struct iscsi_kwqe_header hdr; 634 + #endif 635 + u32 sq_page_table_addr_lo; 636 + u32 sq_page_table_addr_hi; 637 + u32 cq_page_table_addr_lo; 638 + u32 cq_page_table_addr_hi; 639 + u32 reserved0[3]; 640 + }; 641 + 642 + /* 643 + * iSCSI Page Table Entry (PTE) 644 + */ 645 + struct iscsi_pte { 646 + u32 hi; 647 + u32 lo; 648 + }; 649 + 650 + /* 651 + * Initial iSCSI connection offload request 2 652 + */ 653 + struct iscsi_kwqe_conn_offload2 { 654 + #if defined(__BIG_ENDIAN) 655 + struct iscsi_kwqe_header hdr; 656 + u16 reserved0; 657 + #elif defined(__LITTLE_ENDIAN) 658 + u16 reserved0; 659 + struct iscsi_kwqe_header hdr; 660 + #endif 661 + u32 rq_page_table_addr_lo; 662 + u32 rq_page_table_addr_hi; 663 + struct iscsi_pte sq_first_pte; 664 + struct iscsi_pte cq_first_pte; 665 + u32 num_additional_wqes; 666 + }; 667 + 668 + 669 + /* 670 + * Initial iSCSI connection offload request 3 671 + */ 672 + struct iscsi_kwqe_conn_offload3 { 673 + #if defined(__BIG_ENDIAN) 674 + struct iscsi_kwqe_header hdr; 675 + u16 reserved0; 676 + #elif defined(__LITTLE_ENDIAN) 677 + u16 reserved0; 678 + struct iscsi_kwqe_header hdr; 679 + #endif 680 + u32 reserved1; 681 + struct iscsi_pte qp_first_pte[3]; 682 + }; 683 + 684 + 685 + /* 686 + * iSCSI connection update request 687 + */ 688 + struct iscsi_kwqe_conn_update { 689 + #if defined(__BIG_ENDIAN) 690 + struct iscsi_kwqe_header hdr; 691 + u16 reserved0; 692 + #elif defined(__LITTLE_ENDIAN) 693 + u16 reserved0; 694 + struct iscsi_kwqe_header hdr; 695 + #endif 696 + #if defined(__BIG_ENDIAN) 697 + u8 session_error_recovery_level; 698 + u8 max_outstanding_r2ts; 699 + u8 reserved2; 700 + u8 conn_flags; 701 + #define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0) 702 + #define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0 703 + #define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1) 704 + #define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1 705 + #define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2) 706 + #define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 707 + #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) 708 + #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 709 + #define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4) 710 + #define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4 711 + #elif defined(__LITTLE_ENDIAN) 712 + u8 conn_flags; 713 + #define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0) 714 + #define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0 715 + #define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1) 716 + #define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1 717 + #define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2) 718 + #define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 719 + #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) 720 + #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 721 + #define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4) 722 + #define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4 723 + u8 reserved2; 724 + u8 max_outstanding_r2ts; 725 + u8 session_error_recovery_level; 726 + #endif 727 + u32 context_id; 728 + u32 max_send_pdu_length; 729 + u32 max_recv_pdu_length; 730 + u32 first_burst_length; 731 + u32 max_burst_length; 732 + u32 exp_stat_sn; 733 + }; 734 + 735 + /* 736 + * iSCSI destroy connection request 737 + */ 738 + struct iscsi_kwqe_conn_destroy { 739 + #if defined(__BIG_ENDIAN) 740 + struct iscsi_kwqe_header hdr; 741 + u16 reserved0; 742 + #elif defined(__LITTLE_ENDIAN) 743 + u16 reserved0; 744 + struct iscsi_kwqe_header hdr; 745 + #endif 746 + u32 context_id; 747 + u32 reserved1[6]; 748 + }; 749 + 750 + /* 751 + * iSCSI KWQ WQE 752 + */ 753 + union iscsi_kwqe { 754 + struct iscsi_kwqe_init1 init1; 755 + struct iscsi_kwqe_init2 init2; 756 + struct iscsi_kwqe_conn_offload1 conn_offload1; 757 + struct iscsi_kwqe_conn_offload2 conn_offload2; 758 + struct iscsi_kwqe_conn_update conn_update; 759 + struct iscsi_kwqe_conn_destroy conn_destroy; 760 + }; 761 + 762 + /* 763 + * iSCSI Login SQ WQE 764 + */ 765 + struct bnx2i_login_request { 766 + #if defined(__BIG_ENDIAN) 767 + u8 op_code; 768 + u8 op_attr; 769 + #define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0) 770 + #define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0 771 + #define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2) 772 + #define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2 773 + #define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4) 774 + #define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4 775 + #define ISCSI_LOGIN_REQUEST_CONT (0x1<<6) 776 + #define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6 777 + #define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7) 778 + #define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7 779 + u8 version_max; 780 + u8 version_min; 781 + #elif defined(__LITTLE_ENDIAN) 782 + u8 version_min; 783 + u8 version_max; 784 + u8 op_attr; 785 + #define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0) 786 + #define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0 787 + #define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2) 788 + #define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2 789 + #define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4) 790 + #define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4 791 + #define ISCSI_LOGIN_REQUEST_CONT (0x1<<6) 792 + #define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6 793 + #define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7) 794 + #define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7 795 + u8 op_code; 796 + #endif 797 + u32 data_length; 798 + u32 isid_lo; 799 + #if defined(__BIG_ENDIAN) 800 + u16 isid_hi; 801 + u16 tsih; 802 + #elif defined(__LITTLE_ENDIAN) 803 + u16 tsih; 804 + u16 isid_hi; 805 + #endif 806 + #if defined(__BIG_ENDIAN) 807 + u16 reserved2; 808 + u16 itt; 809 + #define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0) 810 + #define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0 811 + #define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14) 812 + #define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14 813 + #elif defined(__LITTLE_ENDIAN) 814 + u16 itt; 815 + #define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0) 816 + #define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0 817 + #define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14) 818 + #define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14 819 + u16 reserved2; 820 + #endif 821 + #if defined(__BIG_ENDIAN) 822 + u16 cid; 823 + u16 reserved3; 824 + #elif defined(__LITTLE_ENDIAN) 825 + u16 reserved3; 826 + u16 cid; 827 + #endif 828 + u32 cmd_sn; 829 + u32 exp_stat_sn; 830 + u32 reserved4; 831 + u32 resp_bd_list_addr_lo; 832 + u32 resp_bd_list_addr_hi; 833 + u32 resp_buffer; 834 + #define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) 835 + #define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 836 + #define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24) 837 + #define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24 838 + #if defined(__BIG_ENDIAN) 839 + u16 reserved8; 840 + u8 reserved7; 841 + u8 flags; 842 + #define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0) 843 + #define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0 844 + #define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) 845 + #define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 846 + #define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3) 847 + #define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3 848 + #elif defined(__LITTLE_ENDIAN) 849 + u8 flags; 850 + #define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0) 851 + #define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0 852 + #define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) 853 + #define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 854 + #define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3) 855 + #define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3 856 + u8 reserved7; 857 + u16 reserved8; 858 + #endif 859 + u32 bd_list_addr_lo; 860 + u32 bd_list_addr_hi; 861 + #if defined(__BIG_ENDIAN) 862 + u8 cq_index; 863 + u8 reserved10; 864 + u8 reserved9; 865 + u8 num_bds; 866 + #elif defined(__LITTLE_ENDIAN) 867 + u8 num_bds; 868 + u8 reserved9; 869 + u8 reserved10; 870 + u8 cq_index; 871 + #endif 872 + }; 873 + 874 + 875 + /* 876 + * iSCSI Login CQE 877 + */ 878 + struct bnx2i_login_response { 879 + #if defined(__BIG_ENDIAN) 880 + u8 op_code; 881 + u8 response_flags; 882 + #define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0) 883 + #define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0 884 + #define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2) 885 + #define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2 886 + #define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4) 887 + #define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4 888 + #define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6) 889 + #define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6 890 + #define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7) 891 + #define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7 892 + u8 version_max; 893 + u8 version_active; 894 + #elif defined(__LITTLE_ENDIAN) 895 + u8 version_active; 896 + u8 version_max; 897 + u8 response_flags; 898 + #define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0) 899 + #define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0 900 + #define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2) 901 + #define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2 902 + #define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4) 903 + #define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4 904 + #define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6) 905 + #define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6 906 + #define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7) 907 + #define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7 908 + u8 op_code; 909 + #endif 910 + u32 data_length; 911 + u32 exp_cmd_sn; 912 + u32 max_cmd_sn; 913 + u32 reserved1[2]; 914 + #if defined(__BIG_ENDIAN) 915 + u16 reserved3; 916 + u8 err_code; 917 + u8 reserved2; 918 + #elif defined(__LITTLE_ENDIAN) 919 + u8 reserved2; 920 + u8 err_code; 921 + u16 reserved3; 922 + #endif 923 + u32 stat_sn; 924 + u32 isid_lo; 925 + #if defined(__BIG_ENDIAN) 926 + u16 isid_hi; 927 + u16 tsih; 928 + #elif defined(__LITTLE_ENDIAN) 929 + u16 tsih; 930 + u16 isid_hi; 931 + #endif 932 + #if defined(__BIG_ENDIAN) 933 + u8 status_class; 934 + u8 status_detail; 935 + u16 reserved4; 936 + #elif defined(__LITTLE_ENDIAN) 937 + u16 reserved4; 938 + u8 status_detail; 939 + u8 status_class; 940 + #endif 941 + u32 reserved5[3]; 942 + #if defined(__BIG_ENDIAN) 943 + u16 reserved6; 944 + u16 itt; 945 + #define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0) 946 + #define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0 947 + #define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14) 948 + #define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14 949 + #elif defined(__LITTLE_ENDIAN) 950 + u16 itt; 951 + #define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0) 952 + #define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0 953 + #define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14) 954 + #define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14 955 + u16 reserved6; 956 + #endif 957 + u32 cq_req_sn; 958 + }; 959 + 960 + 961 + /* 962 + * iSCSI Logout SQ WQE 963 + */ 964 + struct bnx2i_logout_request { 965 + #if defined(__BIG_ENDIAN) 966 + u8 op_code; 967 + u8 op_attr; 968 + #define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0) 969 + #define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0 970 + #define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7) 971 + #define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7 972 + u16 reserved0; 973 + #elif defined(__LITTLE_ENDIAN) 974 + u16 reserved0; 975 + u8 op_attr; 976 + #define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0) 977 + #define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0 978 + #define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7) 979 + #define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7 980 + u8 op_code; 981 + #endif 982 + u32 data_length; 983 + u32 reserved1[2]; 984 + #if defined(__BIG_ENDIAN) 985 + u16 reserved2; 986 + u16 itt; 987 + #define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0) 988 + #define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0 989 + #define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14) 990 + #define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14 991 + #elif defined(__LITTLE_ENDIAN) 992 + u16 itt; 993 + #define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0) 994 + #define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0 995 + #define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14) 996 + #define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14 997 + u16 reserved2; 998 + #endif 999 + #if defined(__BIG_ENDIAN) 1000 + u16 cid; 1001 + u16 reserved3; 1002 + #elif defined(__LITTLE_ENDIAN) 1003 + u16 reserved3; 1004 + u16 cid; 1005 + #endif 1006 + u32 cmd_sn; 1007 + u32 reserved4[5]; 1008 + u32 zero_fill; 1009 + u32 bd_list_addr_lo; 1010 + u32 bd_list_addr_hi; 1011 + #if defined(__BIG_ENDIAN) 1012 + u8 cq_index; 1013 + u8 reserved6; 1014 + u8 reserved5; 1015 + u8 num_bds; 1016 + #elif defined(__LITTLE_ENDIAN) 1017 + u8 num_bds; 1018 + u8 reserved5; 1019 + u8 reserved6; 1020 + u8 cq_index; 1021 + #endif 1022 + }; 1023 + 1024 + 1025 + /* 1026 + * iSCSI Logout CQE 1027 + */ 1028 + struct bnx2i_logout_response { 1029 + #if defined(__BIG_ENDIAN) 1030 + u8 op_code; 1031 + u8 reserved1; 1032 + u8 response; 1033 + u8 reserved0; 1034 + #elif defined(__LITTLE_ENDIAN) 1035 + u8 reserved0; 1036 + u8 response; 1037 + u8 reserved1; 1038 + u8 op_code; 1039 + #endif 1040 + u32 reserved2; 1041 + u32 exp_cmd_sn; 1042 + u32 max_cmd_sn; 1043 + u32 reserved3[2]; 1044 + #if defined(__BIG_ENDIAN) 1045 + u16 reserved5; 1046 + u8 err_code; 1047 + u8 reserved4; 1048 + #elif defined(__LITTLE_ENDIAN) 1049 + u8 reserved4; 1050 + u8 err_code; 1051 + u16 reserved5; 1052 + #endif 1053 + u32 reserved6[3]; 1054 + #if defined(__BIG_ENDIAN) 1055 + u16 time_to_wait; 1056 + u16 time_to_retain; 1057 + #elif defined(__LITTLE_ENDIAN) 1058 + u16 time_to_retain; 1059 + u16 time_to_wait; 1060 + #endif 1061 + u32 reserved7[3]; 1062 + #if defined(__BIG_ENDIAN) 1063 + u16 reserved8; 1064 + u16 itt; 1065 + #define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0) 1066 + #define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0 1067 + #define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14) 1068 + #define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14 1069 + #elif defined(__LITTLE_ENDIAN) 1070 + u16 itt; 1071 + #define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0) 1072 + #define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0 1073 + #define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14) 1074 + #define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14 1075 + u16 reserved8; 1076 + #endif 1077 + u32 cq_req_sn; 1078 + }; 1079 + 1080 + 1081 + /* 1082 + * iSCSI Nop-In CQE 1083 + */ 1084 + struct bnx2i_nop_in_msg { 1085 + #if defined(__BIG_ENDIAN) 1086 + u8 op_code; 1087 + u8 reserved1; 1088 + u16 reserved0; 1089 + #elif defined(__LITTLE_ENDIAN) 1090 + u16 reserved0; 1091 + u8 reserved1; 1092 + u8 op_code; 1093 + #endif 1094 + u32 data_length; 1095 + u32 exp_cmd_sn; 1096 + u32 max_cmd_sn; 1097 + u32 ttt; 1098 + u32 reserved2; 1099 + #if defined(__BIG_ENDIAN) 1100 + u16 reserved4; 1101 + u8 err_code; 1102 + u8 reserved3; 1103 + #elif defined(__LITTLE_ENDIAN) 1104 + u8 reserved3; 1105 + u8 err_code; 1106 + u16 reserved4; 1107 + #endif 1108 + u32 reserved5; 1109 + u32 lun[2]; 1110 + u32 reserved6[4]; 1111 + #if defined(__BIG_ENDIAN) 1112 + u16 reserved7; 1113 + u16 itt; 1114 + #define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0) 1115 + #define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0 1116 + #define ISCSI_NOP_IN_MSG_TYPE (0x3<<14) 1117 + #define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14 1118 + #elif defined(__LITTLE_ENDIAN) 1119 + u16 itt; 1120 + #define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0) 1121 + #define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0 1122 + #define ISCSI_NOP_IN_MSG_TYPE (0x3<<14) 1123 + #define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14 1124 + u16 reserved7; 1125 + #endif 1126 + u32 cq_req_sn; 1127 + }; 1128 + 1129 + 1130 + /* 1131 + * iSCSI NOP-OUT SQ WQE 1132 + */ 1133 + struct bnx2i_nop_out_request { 1134 + #if defined(__BIG_ENDIAN) 1135 + u8 op_code; 1136 + u8 op_attr; 1137 + #define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0) 1138 + #define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0 1139 + #define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7) 1140 + #define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7 1141 + u16 reserved0; 1142 + #elif defined(__LITTLE_ENDIAN) 1143 + u16 reserved0; 1144 + u8 op_attr; 1145 + #define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0) 1146 + #define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0 1147 + #define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7) 1148 + #define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7 1149 + u8 op_code; 1150 + #endif 1151 + u32 data_length; 1152 + u32 lun[2]; 1153 + #if defined(__BIG_ENDIAN) 1154 + u16 reserved2; 1155 + u16 itt; 1156 + #define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0) 1157 + #define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0 1158 + #define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14) 1159 + #define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14 1160 + #elif defined(__LITTLE_ENDIAN) 1161 + u16 itt; 1162 + #define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0) 1163 + #define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0 1164 + #define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14) 1165 + #define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14 1166 + u16 reserved2; 1167 + #endif 1168 + u32 ttt; 1169 + u32 cmd_sn; 1170 + u32 reserved3[2]; 1171 + u32 resp_bd_list_addr_lo; 1172 + u32 resp_bd_list_addr_hi; 1173 + u32 resp_buffer; 1174 + #define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) 1175 + #define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 1176 + #define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24) 1177 + #define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24 1178 + #if defined(__BIG_ENDIAN) 1179 + u16 reserved7; 1180 + u8 reserved6; 1181 + u8 flags; 1182 + #define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0) 1183 + #define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0 1184 + #define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1) 1185 + #define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1 1186 + #define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2) 1187 + #define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2 1188 + #elif defined(__LITTLE_ENDIAN) 1189 + u8 flags; 1190 + #define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0) 1191 + #define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0 1192 + #define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1) 1193 + #define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1 1194 + #define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2) 1195 + #define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2 1196 + u8 reserved6; 1197 + u16 reserved7; 1198 + #endif 1199 + u32 bd_list_addr_lo; 1200 + u32 bd_list_addr_hi; 1201 + #if defined(__BIG_ENDIAN) 1202 + u8 cq_index; 1203 + u8 reserved9; 1204 + u8 reserved8; 1205 + u8 num_bds; 1206 + #elif defined(__LITTLE_ENDIAN) 1207 + u8 num_bds; 1208 + u8 reserved8; 1209 + u8 reserved9; 1210 + u8 cq_index; 1211 + #endif 1212 + }; 1213 + 1214 + /* 1215 + * iSCSI Reject CQE 1216 + */ 1217 + struct bnx2i_reject_msg { 1218 + #if defined(__BIG_ENDIAN) 1219 + u8 op_code; 1220 + u8 reserved1; 1221 + u8 reason; 1222 + u8 reserved0; 1223 + #elif defined(__LITTLE_ENDIAN) 1224 + u8 reserved0; 1225 + u8 reason; 1226 + u8 reserved1; 1227 + u8 op_code; 1228 + #endif 1229 + u32 data_length; 1230 + u32 exp_cmd_sn; 1231 + u32 max_cmd_sn; 1232 + u32 reserved2[2]; 1233 + #if defined(__BIG_ENDIAN) 1234 + u16 reserved4; 1235 + u8 err_code; 1236 + u8 reserved3; 1237 + #elif defined(__LITTLE_ENDIAN) 1238 + u8 reserved3; 1239 + u8 err_code; 1240 + u16 reserved4; 1241 + #endif 1242 + u32 reserved5[8]; 1243 + u32 cq_req_sn; 1244 + }; 1245 + 1246 + /* 1247 + * bnx2i iSCSI TMF SQ WQE 1248 + */ 1249 + struct bnx2i_tmf_request { 1250 + #if defined(__BIG_ENDIAN) 1251 + u8 op_code; 1252 + u8 op_attr; 1253 + #define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0) 1254 + #define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0 1255 + #define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7) 1256 + #define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7 1257 + u16 reserved0; 1258 + #elif defined(__LITTLE_ENDIAN) 1259 + u16 reserved0; 1260 + u8 op_attr; 1261 + #define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0) 1262 + #define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0 1263 + #define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7) 1264 + #define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7 1265 + u8 op_code; 1266 + #endif 1267 + u32 data_length; 1268 + u32 lun[2]; 1269 + #if defined(__BIG_ENDIAN) 1270 + u16 reserved1; 1271 + u16 itt; 1272 + #define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0) 1273 + #define ISCSI_TMF_REQUEST_INDEX_SHIFT 0 1274 + #define ISCSI_TMF_REQUEST_TYPE (0x3<<14) 1275 + #define ISCSI_TMF_REQUEST_TYPE_SHIFT 14 1276 + #elif defined(__LITTLE_ENDIAN) 1277 + u16 itt; 1278 + #define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0) 1279 + #define ISCSI_TMF_REQUEST_INDEX_SHIFT 0 1280 + #define ISCSI_TMF_REQUEST_TYPE (0x3<<14) 1281 + #define ISCSI_TMF_REQUEST_TYPE_SHIFT 14 1282 + u16 reserved1; 1283 + #endif 1284 + u32 ref_itt; 1285 + u32 cmd_sn; 1286 + u32 reserved2; 1287 + u32 ref_cmd_sn; 1288 + u32 reserved3[3]; 1289 + u32 zero_fill; 1290 + u32 bd_list_addr_lo; 1291 + u32 bd_list_addr_hi; 1292 + #if defined(__BIG_ENDIAN) 1293 + u8 cq_index; 1294 + u8 reserved5; 1295 + u8 reserved4; 1296 + u8 num_bds; 1297 + #elif defined(__LITTLE_ENDIAN) 1298 + u8 num_bds; 1299 + u8 reserved4; 1300 + u8 reserved5; 1301 + u8 cq_index; 1302 + #endif 1303 + }; 1304 + 1305 + /* 1306 + * iSCSI Text SQ WQE 1307 + */ 1308 + struct bnx2i_text_request { 1309 + #if defined(__BIG_ENDIAN) 1310 + u8 op_code; 1311 + u8 op_attr; 1312 + #define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0) 1313 + #define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0 1314 + #define ISCSI_TEXT_REQUEST_CONT (0x1<<6) 1315 + #define ISCSI_TEXT_REQUEST_CONT_SHIFT 6 1316 + #define ISCSI_TEXT_REQUEST_FINAL (0x1<<7) 1317 + #define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7 1318 + u16 reserved0; 1319 + #elif defined(__LITTLE_ENDIAN) 1320 + u16 reserved0; 1321 + u8 op_attr; 1322 + #define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0) 1323 + #define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0 1324 + #define ISCSI_TEXT_REQUEST_CONT (0x1<<6) 1325 + #define ISCSI_TEXT_REQUEST_CONT_SHIFT 6 1326 + #define ISCSI_TEXT_REQUEST_FINAL (0x1<<7) 1327 + #define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7 1328 + u8 op_code; 1329 + #endif 1330 + u32 data_length; 1331 + u32 lun[2]; 1332 + #if defined(__BIG_ENDIAN) 1333 + u16 reserved3; 1334 + u16 itt; 1335 + #define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0) 1336 + #define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0 1337 + #define ISCSI_TEXT_REQUEST_TYPE (0x3<<14) 1338 + #define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14 1339 + #elif defined(__LITTLE_ENDIAN) 1340 + u16 itt; 1341 + #define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0) 1342 + #define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0 1343 + #define ISCSI_TEXT_REQUEST_TYPE (0x3<<14) 1344 + #define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14 1345 + u16 reserved3; 1346 + #endif 1347 + u32 ttt; 1348 + u32 cmd_sn; 1349 + u32 reserved4[2]; 1350 + u32 resp_bd_list_addr_lo; 1351 + u32 resp_bd_list_addr_hi; 1352 + u32 resp_buffer; 1353 + #define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) 1354 + #define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 1355 + #define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24) 1356 + #define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24 1357 + u32 zero_fill; 1358 + u32 bd_list_addr_lo; 1359 + u32 bd_list_addr_hi; 1360 + #if defined(__BIG_ENDIAN) 1361 + u8 cq_index; 1362 + u8 reserved7; 1363 + u8 reserved6; 1364 + u8 num_bds; 1365 + #elif defined(__LITTLE_ENDIAN) 1366 + u8 num_bds; 1367 + u8 reserved6; 1368 + u8 reserved7; 1369 + u8 cq_index; 1370 + #endif 1371 + }; 1372 + 1373 + /* 1374 + * iSCSI SQ WQE 1375 + */ 1376 + union iscsi_request { 1377 + struct bnx2i_cmd_request cmd; 1378 + struct bnx2i_tmf_request tmf; 1379 + struct bnx2i_nop_out_request nop_out; 1380 + struct bnx2i_login_request login_req; 1381 + struct bnx2i_text_request text; 1382 + struct bnx2i_logout_request logout_req; 1383 + struct bnx2i_cleanup_request cleanup; 1384 + }; 1385 + 1386 + 1387 + /* 1388 + * iSCSI TMF CQE 1389 + */ 1390 + struct bnx2i_tmf_response { 1391 + #if defined(__BIG_ENDIAN) 1392 + u8 op_code; 1393 + u8 reserved1; 1394 + u8 response; 1395 + u8 reserved0; 1396 + #elif defined(__LITTLE_ENDIAN) 1397 + u8 reserved0; 1398 + u8 response; 1399 + u8 reserved1; 1400 + u8 op_code; 1401 + #endif 1402 + u32 reserved2; 1403 + u32 exp_cmd_sn; 1404 + u32 max_cmd_sn; 1405 + u32 reserved3[2]; 1406 + #if defined(__BIG_ENDIAN) 1407 + u16 reserved5; 1408 + u8 err_code; 1409 + u8 reserved4; 1410 + #elif defined(__LITTLE_ENDIAN) 1411 + u8 reserved4; 1412 + u8 err_code; 1413 + u16 reserved5; 1414 + #endif 1415 + u32 reserved6[7]; 1416 + #if defined(__BIG_ENDIAN) 1417 + u16 reserved7; 1418 + u16 itt; 1419 + #define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0) 1420 + #define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0 1421 + #define ISCSI_TMF_RESPONSE_TYPE (0x3<<14) 1422 + #define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14 1423 + #elif defined(__LITTLE_ENDIAN) 1424 + u16 itt; 1425 + #define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0) 1426 + #define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0 1427 + #define ISCSI_TMF_RESPONSE_TYPE (0x3<<14) 1428 + #define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14 1429 + u16 reserved7; 1430 + #endif 1431 + u32 cq_req_sn; 1432 + }; 1433 + 1434 + /* 1435 + * iSCSI Text CQE 1436 + */ 1437 + struct bnx2i_text_response { 1438 + #if defined(__BIG_ENDIAN) 1439 + u8 op_code; 1440 + u8 response_flags; 1441 + #define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0) 1442 + #define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0 1443 + #define ISCSI_TEXT_RESPONSE_CONT (0x1<<6) 1444 + #define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6 1445 + #define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7) 1446 + #define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7 1447 + u16 reserved0; 1448 + #elif defined(__LITTLE_ENDIAN) 1449 + u16 reserved0; 1450 + u8 response_flags; 1451 + #define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0) 1452 + #define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0 1453 + #define ISCSI_TEXT_RESPONSE_CONT (0x1<<6) 1454 + #define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6 1455 + #define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7) 1456 + #define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7 1457 + u8 op_code; 1458 + #endif 1459 + u32 data_length; 1460 + u32 exp_cmd_sn; 1461 + u32 max_cmd_sn; 1462 + u32 ttt; 1463 + u32 reserved2; 1464 + #if defined(__BIG_ENDIAN) 1465 + u16 reserved4; 1466 + u8 err_code; 1467 + u8 reserved3; 1468 + #elif defined(__LITTLE_ENDIAN) 1469 + u8 reserved3; 1470 + u8 err_code; 1471 + u16 reserved4; 1472 + #endif 1473 + u32 reserved5; 1474 + u32 lun[2]; 1475 + u32 reserved6[4]; 1476 + #if defined(__BIG_ENDIAN) 1477 + u16 reserved7; 1478 + u16 itt; 1479 + #define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0) 1480 + #define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0 1481 + #define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14) 1482 + #define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14 1483 + #elif defined(__LITTLE_ENDIAN) 1484 + u16 itt; 1485 + #define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0) 1486 + #define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0 1487 + #define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14) 1488 + #define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14 1489 + u16 reserved7; 1490 + #endif 1491 + u32 cq_req_sn; 1492 + }; 1493 + 1494 + /* 1495 + * iSCSI CQE 1496 + */ 1497 + union iscsi_response { 1498 + struct bnx2i_cmd_response cmd; 1499 + struct bnx2i_tmf_response tmf; 1500 + struct bnx2i_login_response login_resp; 1501 + struct bnx2i_text_response text; 1502 + struct bnx2i_logout_response logout_resp; 1503 + struct bnx2i_cleanup_response cleanup; 1504 + struct bnx2i_reject_msg reject; 1505 + struct bnx2i_async_msg async; 1506 + struct bnx2i_nop_in_msg nop_in; 1507 + }; 1508 + 1509 + #endif /* __57XX_ISCSI_HSI_LINUX_LE__ */
+7
drivers/scsi/bnx2i/Kconfig
··· 1 + config SCSI_BNX2_ISCSI 2 + tristate "Broadcom NetXtreme II iSCSI support" 3 + select SCSI_ISCSI_ATTRS 4 + select CNIC 5 + ---help--- 6 + This driver supports iSCSI offload for the Broadcom NetXtreme II 7 + devices.
+3
drivers/scsi/bnx2i/Makefile
··· 1 + bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o 2 + 3 + obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
+771
drivers/scsi/bnx2i/bnx2i.h
··· 1 + /* bnx2i.h: Broadcom NetXtreme II iSCSI driver. 2 + * 3 + * Copyright (c) 2006 - 2009 Broadcom Corporation 4 + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 5 + * Copyright (c) 2007, 2008 Mike Christie 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation. 10 + * 11 + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 12 + */ 13 + 14 + #ifndef _BNX2I_H_ 15 + #define _BNX2I_H_ 16 + 17 + #include <linux/module.h> 18 + #include <linux/moduleparam.h> 19 + 20 + #include <linux/errno.h> 21 + #include <linux/pci.h> 22 + #include <linux/spinlock.h> 23 + #include <linux/interrupt.h> 24 + #include <linux/sched.h> 25 + #include <linux/in.h> 26 + #include <linux/kfifo.h> 27 + #include <linux/netdevice.h> 28 + #include <linux/completion.h> 29 + 30 + #include <scsi/scsi_cmnd.h> 31 + #include <scsi/scsi_device.h> 32 + #include <scsi/scsi_eh.h> 33 + #include <scsi/scsi_host.h> 34 + #include <scsi/scsi.h> 35 + #include <scsi/iscsi_proto.h> 36 + #include <scsi/libiscsi.h> 37 + #include <scsi/scsi_transport_iscsi.h> 38 + 39 + #include "../../net/cnic_if.h" 40 + #include "57xx_iscsi_hsi.h" 41 + #include "57xx_iscsi_constants.h" 42 + 43 + #define BNX2_ISCSI_DRIVER_NAME "bnx2i" 44 + 45 + #define BNX2I_MAX_ADAPTERS 8 46 + 47 + #define ISCSI_MAX_CONNS_PER_HBA 128 48 + #define ISCSI_MAX_SESS_PER_HBA ISCSI_MAX_CONNS_PER_HBA 49 + #define ISCSI_MAX_CMDS_PER_SESS 128 50 + 51 + /* Total active commands across all connections supported by devices */ 52 + #define ISCSI_MAX_CMDS_PER_HBA_5708 (28 * (ISCSI_MAX_CMDS_PER_SESS - 1)) 53 + #define ISCSI_MAX_CMDS_PER_HBA_5709 (128 * (ISCSI_MAX_CMDS_PER_SESS - 1)) 54 + #define ISCSI_MAX_CMDS_PER_HBA_57710 (256 * (ISCSI_MAX_CMDS_PER_SESS - 1)) 55 + 56 + #define ISCSI_MAX_BDS_PER_CMD 32 57 + 58 + #define MAX_PAGES_PER_CTRL_STRUCT_POOL 8 59 + #define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4 60 + 61 + /* 5706/08 hardware has limit on maximum buffer size per BD it can handle */ 62 + #define MAX_BD_LENGTH 65535 63 + #define BD_SPLIT_SIZE 32768 64 + 65 + /* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */ 66 + #define BNX2I_SQ_WQES_MIN 16 67 + #define BNX2I_570X_SQ_WQES_MAX 128 68 + #define BNX2I_5770X_SQ_WQES_MAX 512 69 + #define BNX2I_570X_SQ_WQES_DEFAULT 128 70 + #define BNX2I_5770X_SQ_WQES_DEFAULT 256 71 + 72 + #define BNX2I_570X_CQ_WQES_MAX 128 73 + #define BNX2I_5770X_CQ_WQES_MAX 512 74 + 75 + #define BNX2I_RQ_WQES_MIN 16 76 + #define BNX2I_RQ_WQES_MAX 32 77 + #define BNX2I_RQ_WQES_DEFAULT 16 78 + 79 + /* CCELLs per conn */ 80 + #define BNX2I_CCELLS_MIN 16 81 + #define BNX2I_CCELLS_MAX 96 82 + #define BNX2I_CCELLS_DEFAULT 64 83 + 84 + #define ITT_INVALID_SIGNATURE 0xFFFF 85 + 86 + #define ISCSI_CMD_CLEANUP_TIMEOUT 100 87 + 88 + #define BNX2I_CONN_CTX_BUF_SIZE 16384 89 + 90 + #define BNX2I_SQ_WQE_SIZE 64 91 + #define BNX2I_RQ_WQE_SIZE 256 92 + #define BNX2I_CQE_SIZE 64 93 + 94 + #define MB_KERNEL_CTX_SHIFT 8 95 + #define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT) 96 + 97 + #define CTX_SHIFT 7 98 + #define GET_CID_NUM(cid_addr) ((cid_addr) >> CTX_SHIFT) 99 + 100 + #define CTX_OFFSET 0x10000 101 + #define MAX_CID_CNT 0x4000 102 + 103 + /* 5709 context registers */ 104 + #define BNX2_MQ_CONFIG2 0x00003d00 105 + #define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4) 106 + #define BNX2_MQ_CONFIG2_FIRST_L4L5 (0x1fL<<8) 107 + 108 + /* 57710's BAR2 is mapped to doorbell registers */ 109 + #define BNX2X_DOORBELL_PCI_BAR 2 110 + #define BNX2X_MAX_CQS 8 111 + 112 + #define CNIC_ARM_CQE 1 113 + #define CNIC_DISARM_CQE 0 114 + 115 + #define REG_RD(__hba, offset) \ 116 + readl(__hba->regview + offset) 117 + #define REG_WR(__hba, offset, val) \ 118 + writel(val, __hba->regview + offset) 119 + 120 + 121 + /** 122 + * struct generic_pdu_resc - login pdu resource structure 123 + * 124 + * @req_buf: driver buffer used to stage payload associated with 125 + * the login request 126 + * @req_dma_addr: dma address for iscsi login request payload buffer 127 + * @req_buf_size: actual login request payload length 128 + * @req_wr_ptr: pointer into login request buffer when next data is 129 + * to be written 130 + * @resp_hdr: iscsi header where iscsi login response header is to 131 + * be recreated 132 + * @resp_buf: buffer to stage login response payload 133 + * @resp_dma_addr: login response payload buffer dma address 134 + * @resp_buf_size: login response paylod length 135 + * @resp_wr_ptr: pointer into login response buffer when next data is 136 + * to be written 137 + * @req_bd_tbl: iscsi login request payload BD table 138 + * @req_bd_dma: login request BD table dma address 139 + * @resp_bd_tbl: iscsi login response payload BD table 140 + * @resp_bd_dma: login request BD table dma address 141 + * 142 + * following structure defines buffer info for generic pdus such as iSCSI Login, 143 + * Logout and NOP 144 + */ 145 + struct generic_pdu_resc { 146 + char *req_buf; 147 + dma_addr_t req_dma_addr; 148 + u32 req_buf_size; 149 + char *req_wr_ptr; 150 + struct iscsi_hdr resp_hdr; 151 + char *resp_buf; 152 + dma_addr_t resp_dma_addr; 153 + u32 resp_buf_size; 154 + char *resp_wr_ptr; 155 + char *req_bd_tbl; 156 + dma_addr_t req_bd_dma; 157 + char *resp_bd_tbl; 158 + dma_addr_t resp_bd_dma; 159 + }; 160 + 161 + 162 + /** 163 + * struct bd_resc_page - tracks DMA'able memory allocated for BD tables 164 + * 165 + * @link: list head to link elements 166 + * @max_ptrs: maximun pointers that can be stored in this page 167 + * @num_valid: number of pointer valid in this page 168 + * @page: base addess for page pointer array 169 + * 170 + * structure to track DMA'able memory allocated for command BD tables 171 + */ 172 + struct bd_resc_page { 173 + struct list_head link; 174 + u32 max_ptrs; 175 + u32 num_valid; 176 + void *page[1]; 177 + }; 178 + 179 + 180 + /** 181 + * struct io_bdt - I/O buffer destricptor table 182 + * 183 + * @bd_tbl: BD table's virtual address 184 + * @bd_tbl_dma: BD table's dma address 185 + * @bd_valid: num valid BD entries 186 + * 187 + * IO BD table 188 + */ 189 + struct io_bdt { 190 + struct iscsi_bd *bd_tbl; 191 + dma_addr_t bd_tbl_dma; 192 + u16 bd_valid; 193 + }; 194 + 195 + 196 + /** 197 + * bnx2i_cmd - iscsi command structure 198 + * 199 + * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd 200 + * @sg: SG list 201 + * @io_tbl: buffer descriptor (BD) table 202 + * @bd_tbl_dma: buffer descriptor (BD) table's dma address 203 + */ 204 + struct bnx2i_cmd { 205 + struct iscsi_hdr hdr; 206 + struct bnx2i_conn *conn; 207 + struct scsi_cmnd *scsi_cmd; 208 + struct scatterlist *sg; 209 + struct io_bdt io_tbl; 210 + dma_addr_t bd_tbl_dma; 211 + struct bnx2i_cmd_request req; 212 + }; 213 + 214 + 215 + /** 216 + * struct bnx2i_conn - iscsi connection structure 217 + * 218 + * @cls_conn: pointer to iscsi cls conn 219 + * @hba: adapter structure pointer 220 + * @iscsi_conn_cid: iscsi conn id 221 + * @fw_cid: firmware iscsi context id 222 + * @ep: endpoint structure pointer 223 + * @gen_pdu: login/nopout/logout pdu resources 224 + * @violation_notified: bit mask used to track iscsi error/warning messages 225 + * already printed out 226 + * 227 + * iSCSI connection structure 228 + */ 229 + struct bnx2i_conn { 230 + struct iscsi_cls_conn *cls_conn; 231 + struct bnx2i_hba *hba; 232 + struct completion cmd_cleanup_cmpl; 233 + int is_bound; 234 + 235 + u32 iscsi_conn_cid; 236 + #define BNX2I_CID_RESERVED 0x5AFF 237 + u32 fw_cid; 238 + 239 + struct timer_list poll_timer; 240 + /* 241 + * Queue Pair (QP) related structure elements. 242 + */ 243 + struct bnx2i_endpoint *ep; 244 + 245 + /* 246 + * Buffer for login negotiation process 247 + */ 248 + struct generic_pdu_resc gen_pdu; 249 + u64 violation_notified; 250 + }; 251 + 252 + 253 + 254 + /** 255 + * struct iscsi_cid_queue - Per adapter iscsi cid queue 256 + * 257 + * @cid_que_base: queue base memory 258 + * @cid_que: queue memory pointer 259 + * @cid_q_prod_idx: produce index 260 + * @cid_q_cons_idx: consumer index 261 + * @cid_q_max_idx: max index. used to detect wrap around condition 262 + * @cid_free_cnt: queue size 263 + * @conn_cid_tbl: iscsi cid to conn structure mapping table 264 + * 265 + * Per adapter iSCSI CID Queue 266 + */ 267 + struct iscsi_cid_queue { 268 + void *cid_que_base; 269 + u32 *cid_que; 270 + u32 cid_q_prod_idx; 271 + u32 cid_q_cons_idx; 272 + u32 cid_q_max_idx; 273 + u32 cid_free_cnt; 274 + struct bnx2i_conn **conn_cid_tbl; 275 + }; 276 + 277 + /** 278 + * struct bnx2i_hba - bnx2i adapter structure 279 + * 280 + * @link: list head to link elements 281 + * @cnic: pointer to cnic device 282 + * @pcidev: pointer to pci dev 283 + * @netdev: pointer to netdev structure 284 + * @regview: mapped PCI register space 285 + * @age: age, incremented by every recovery 286 + * @cnic_dev_type: cnic device type, 5706/5708/5709/57710 287 + * @mail_queue_access: mailbox queue access mode, applicable to 5709 only 288 + * @reg_with_cnic: indicates whether the device is register with CNIC 289 + * @adapter_state: adapter state, UP, GOING_DOWN, LINK_DOWN 290 + * @mtu_supported: Ethernet MTU supported 291 + * @shost: scsi host pointer 292 + * @max_sqes: SQ size 293 + * @max_rqes: RQ size 294 + * @max_cqes: CQ size 295 + * @num_ccell: number of command cells per connection 296 + * @ofld_conns_active: active connection list 297 + * @max_active_conns: max offload connections supported by this device 298 + * @cid_que: iscsi cid queue 299 + * @ep_rdwr_lock: read / write lock to synchronize various ep lists 300 + * @ep_ofld_list: connection list for pending offload completion 301 + * @ep_destroy_list: connection list for pending offload completion 302 + * @mp_bd_tbl: BD table to be used with middle path requests 303 + * @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer 304 + * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs 305 + * @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer 306 + * @lock: lock to synchonize access to hba structure 307 + * @pci_did: PCI device ID 308 + * @pci_vid: PCI vendor ID 309 + * @pci_sdid: PCI subsystem device ID 310 + * @pci_svid: PCI subsystem vendor ID 311 + * @pci_func: PCI function number in system pci tree 312 + * @pci_devno: PCI device number in system pci tree 313 + * @num_wqe_sent: statistic counter, total wqe's sent 314 + * @num_cqe_rcvd: statistic counter, total cqe's received 315 + * @num_intr_claimed: statistic counter, total interrupts claimed 316 + * @link_changed_count: statistic counter, num of link change notifications 317 + * received 318 + * @ipaddr_changed_count: statistic counter, num times IP address changed while 319 + * at least one connection is offloaded 320 + * @num_sess_opened: statistic counter, total num sessions opened 321 + * @num_conn_opened: statistic counter, total num conns opened on this hba 322 + * @ctx_ccell_tasks: captures number of ccells and tasks supported by 323 + * currently offloaded connection, used to decode 324 + * context memory 325 + * 326 + * Adapter Data Structure 327 + */ 328 + struct bnx2i_hba { 329 + struct list_head link; 330 + struct cnic_dev *cnic; 331 + struct pci_dev *pcidev; 332 + struct net_device *netdev; 333 + void __iomem *regview; 334 + 335 + u32 age; 336 + unsigned long cnic_dev_type; 337 + #define BNX2I_NX2_DEV_5706 0x0 338 + #define BNX2I_NX2_DEV_5708 0x1 339 + #define BNX2I_NX2_DEV_5709 0x2 340 + #define BNX2I_NX2_DEV_57710 0x3 341 + u32 mail_queue_access; 342 + #define BNX2I_MQ_KERNEL_MODE 0x0 343 + #define BNX2I_MQ_KERNEL_BYPASS_MODE 0x1 344 + #define BNX2I_MQ_BIN_MODE 0x2 345 + unsigned long reg_with_cnic; 346 + #define BNX2I_CNIC_REGISTERED 1 347 + 348 + unsigned long adapter_state; 349 + #define ADAPTER_STATE_UP 0 350 + #define ADAPTER_STATE_GOING_DOWN 1 351 + #define ADAPTER_STATE_LINK_DOWN 2 352 + #define ADAPTER_STATE_INIT_FAILED 31 353 + unsigned int mtu_supported; 354 + #define BNX2I_MAX_MTU_SUPPORTED 1500 355 + 356 + struct Scsi_Host *shost; 357 + 358 + u32 max_sqes; 359 + u32 max_rqes; 360 + u32 max_cqes; 361 + u32 num_ccell; 362 + 363 + int ofld_conns_active; 364 + 365 + int max_active_conns; 366 + struct iscsi_cid_queue cid_que; 367 + 368 + rwlock_t ep_rdwr_lock; 369 + struct list_head ep_ofld_list; 370 + struct list_head ep_destroy_list; 371 + 372 + /* 373 + * BD table to be used with MP (Middle Path requests. 374 + */ 375 + char *mp_bd_tbl; 376 + dma_addr_t mp_bd_dma; 377 + char *dummy_buffer; 378 + dma_addr_t dummy_buf_dma; 379 + 380 + spinlock_t lock; /* protects hba structure access */ 381 + struct mutex net_dev_lock;/* sync net device access */ 382 + 383 + /* 384 + * PCI related info. 385 + */ 386 + u16 pci_did; 387 + u16 pci_vid; 388 + u16 pci_sdid; 389 + u16 pci_svid; 390 + u16 pci_func; 391 + u16 pci_devno; 392 + 393 + /* 394 + * Following are a bunch of statistics useful during development 395 + * and later stage for score boarding. 396 + */ 397 + u32 num_wqe_sent; 398 + u32 num_cqe_rcvd; 399 + u32 num_intr_claimed; 400 + u32 link_changed_count; 401 + u32 ipaddr_changed_count; 402 + u32 num_sess_opened; 403 + u32 num_conn_opened; 404 + unsigned int ctx_ccell_tasks; 405 + }; 406 + 407 + 408 + /******************************************************************************* 409 + * QP [ SQ / RQ / CQ ] info. 410 + ******************************************************************************/ 411 + 412 + /* 413 + * SQ/RQ/CQ generic structure definition 414 + */ 415 + struct sqe { 416 + u8 sqe_byte[BNX2I_SQ_WQE_SIZE]; 417 + }; 418 + 419 + struct rqe { 420 + u8 rqe_byte[BNX2I_RQ_WQE_SIZE]; 421 + }; 422 + 423 + struct cqe { 424 + u8 cqe_byte[BNX2I_CQE_SIZE]; 425 + }; 426 + 427 + 428 + enum { 429 + #if defined(__LITTLE_ENDIAN) 430 + CNIC_EVENT_COAL_INDEX = 0x0, 431 + CNIC_SEND_DOORBELL = 0x4, 432 + CNIC_EVENT_CQ_ARM = 0x7, 433 + CNIC_RECV_DOORBELL = 0x8 434 + #elif defined(__BIG_ENDIAN) 435 + CNIC_EVENT_COAL_INDEX = 0x2, 436 + CNIC_SEND_DOORBELL = 0x6, 437 + CNIC_EVENT_CQ_ARM = 0x4, 438 + CNIC_RECV_DOORBELL = 0xa 439 + #endif 440 + }; 441 + 442 + 443 + /* 444 + * CQ DB 445 + */ 446 + struct bnx2x_iscsi_cq_pend_cmpl { 447 + /* CQ producer, updated by Ustorm */ 448 + u16 ustrom_prod; 449 + /* CQ pending completion counter */ 450 + u16 pend_cntr; 451 + }; 452 + 453 + 454 + struct bnx2i_5771x_cq_db { 455 + struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS]; 456 + /* CQ pending completion ITT array */ 457 + u16 itt[BNX2X_MAX_CQS]; 458 + /* Cstorm CQ sequence to notify array, updated by driver */; 459 + u16 sqn[BNX2X_MAX_CQS]; 460 + u32 reserved[4] /* 16 byte allignment */; 461 + }; 462 + 463 + 464 + struct bnx2i_5771x_sq_rq_db { 465 + u16 prod_idx; 466 + u8 reserved0[14]; /* Pad structure size to 16 bytes */ 467 + }; 468 + 469 + 470 + struct bnx2i_5771x_dbell_hdr { 471 + u8 header; 472 + /* 1 for rx doorbell, 0 for tx doorbell */ 473 + #define B577XX_DOORBELL_HDR_RX (0x1<<0) 474 + #define B577XX_DOORBELL_HDR_RX_SHIFT 0 475 + /* 0 for normal doorbell, 1 for advertise wnd doorbell */ 476 + #define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1) 477 + #define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1 478 + /* rdma tx only: DPM transaction size specifier (64/128/256/512B) */ 479 + #define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2) 480 + #define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2 481 + /* connection type */ 482 + #define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4) 483 + #define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4 484 + }; 485 + 486 + struct bnx2i_5771x_dbell { 487 + struct bnx2i_5771x_dbell_hdr dbell; 488 + u8 pad[3]; 489 + 490 + }; 491 + 492 + /** 493 + * struct qp_info - QP (share queue region) atrributes structure 494 + * 495 + * @ctx_base: ioremapped pci register base to access doorbell register 496 + * pertaining to this offloaded connection 497 + * @sq_virt: virtual address of send queue (SQ) region 498 + * @sq_phys: DMA address of SQ memory region 499 + * @sq_mem_size: SQ size 500 + * @sq_prod_qe: SQ producer entry pointer 501 + * @sq_cons_qe: SQ consumer entry pointer 502 + * @sq_first_qe: virtaul address of first entry in SQ 503 + * @sq_last_qe: virtaul address of last entry in SQ 504 + * @sq_prod_idx: SQ producer index 505 + * @sq_cons_idx: SQ consumer index 506 + * @sqe_left: number sq entry left 507 + * @sq_pgtbl_virt: page table describing buffer consituting SQ region 508 + * @sq_pgtbl_phys: dma address of 'sq_pgtbl_virt' 509 + * @sq_pgtbl_size: SQ page table size 510 + * @cq_virt: virtual address of completion queue (CQ) region 511 + * @cq_phys: DMA address of RQ memory region 512 + * @cq_mem_size: CQ size 513 + * @cq_prod_qe: CQ producer entry pointer 514 + * @cq_cons_qe: CQ consumer entry pointer 515 + * @cq_first_qe: virtaul address of first entry in CQ 516 + * @cq_last_qe: virtaul address of last entry in CQ 517 + * @cq_prod_idx: CQ producer index 518 + * @cq_cons_idx: CQ consumer index 519 + * @cqe_left: number cq entry left 520 + * @cqe_size: size of each CQ entry 521 + * @cqe_exp_seq_sn: next expected CQE sequence number 522 + * @cq_pgtbl_virt: page table describing buffer consituting CQ region 523 + * @cq_pgtbl_phys: dma address of 'cq_pgtbl_virt' 524 + * @cq_pgtbl_size: CQ page table size 525 + * @rq_virt: virtual address of receive queue (RQ) region 526 + * @rq_phys: DMA address of RQ memory region 527 + * @rq_mem_size: RQ size 528 + * @rq_prod_qe: RQ producer entry pointer 529 + * @rq_cons_qe: RQ consumer entry pointer 530 + * @rq_first_qe: virtaul address of first entry in RQ 531 + * @rq_last_qe: virtaul address of last entry in RQ 532 + * @rq_prod_idx: RQ producer index 533 + * @rq_cons_idx: RQ consumer index 534 + * @rqe_left: number rq entry left 535 + * @rq_pgtbl_virt: page table describing buffer consituting RQ region 536 + * @rq_pgtbl_phys: dma address of 'rq_pgtbl_virt' 537 + * @rq_pgtbl_size: RQ page table size 538 + * 539 + * queue pair (QP) is a per connection shared data structure which is used 540 + * to send work requests (SQ), receive completion notifications (CQ) 541 + * and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure 542 + * below holds queue memory, consumer/producer indexes and page table 543 + * information 544 + */ 545 + struct qp_info { 546 + void __iomem *ctx_base; 547 + #define DPM_TRIGER_TYPE 0x40 548 + 549 + #define BNX2I_570x_QUE_DB_SIZE 0 550 + #define BNX2I_5771x_QUE_DB_SIZE 16 551 + struct sqe *sq_virt; 552 + dma_addr_t sq_phys; 553 + u32 sq_mem_size; 554 + 555 + struct sqe *sq_prod_qe; 556 + struct sqe *sq_cons_qe; 557 + struct sqe *sq_first_qe; 558 + struct sqe *sq_last_qe; 559 + u16 sq_prod_idx; 560 + u16 sq_cons_idx; 561 + u32 sqe_left; 562 + 563 + void *sq_pgtbl_virt; 564 + dma_addr_t sq_pgtbl_phys; 565 + u32 sq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ 566 + 567 + struct cqe *cq_virt; 568 + dma_addr_t cq_phys; 569 + u32 cq_mem_size; 570 + 571 + struct cqe *cq_prod_qe; 572 + struct cqe *cq_cons_qe; 573 + struct cqe *cq_first_qe; 574 + struct cqe *cq_last_qe; 575 + u16 cq_prod_idx; 576 + u16 cq_cons_idx; 577 + u32 cqe_left; 578 + u32 cqe_size; 579 + u32 cqe_exp_seq_sn; 580 + 581 + void *cq_pgtbl_virt; 582 + dma_addr_t cq_pgtbl_phys; 583 + u32 cq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ 584 + 585 + struct rqe *rq_virt; 586 + dma_addr_t rq_phys; 587 + u32 rq_mem_size; 588 + 589 + struct rqe *rq_prod_qe; 590 + struct rqe *rq_cons_qe; 591 + struct rqe *rq_first_qe; 592 + struct rqe *rq_last_qe; 593 + u16 rq_prod_idx; 594 + u16 rq_cons_idx; 595 + u32 rqe_left; 596 + 597 + void *rq_pgtbl_virt; 598 + dma_addr_t rq_pgtbl_phys; 599 + u32 rq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ 600 + }; 601 + 602 + 603 + 604 + /* 605 + * CID handles 606 + */ 607 + struct ep_handles { 608 + u32 fw_cid; 609 + u32 drv_iscsi_cid; 610 + u16 pg_cid; 611 + u16 rsvd; 612 + }; 613 + 614 + 615 + enum { 616 + EP_STATE_IDLE = 0x0, 617 + EP_STATE_PG_OFLD_START = 0x1, 618 + EP_STATE_PG_OFLD_COMPL = 0x2, 619 + EP_STATE_OFLD_START = 0x4, 620 + EP_STATE_OFLD_COMPL = 0x8, 621 + EP_STATE_CONNECT_START = 0x10, 622 + EP_STATE_CONNECT_COMPL = 0x20, 623 + EP_STATE_ULP_UPDATE_START = 0x40, 624 + EP_STATE_ULP_UPDATE_COMPL = 0x80, 625 + EP_STATE_DISCONN_START = 0x100, 626 + EP_STATE_DISCONN_COMPL = 0x200, 627 + EP_STATE_CLEANUP_START = 0x400, 628 + EP_STATE_CLEANUP_CMPL = 0x800, 629 + EP_STATE_TCP_FIN_RCVD = 0x1000, 630 + EP_STATE_TCP_RST_RCVD = 0x2000, 631 + EP_STATE_PG_OFLD_FAILED = 0x1000000, 632 + EP_STATE_ULP_UPDATE_FAILED = 0x2000000, 633 + EP_STATE_CLEANUP_FAILED = 0x4000000, 634 + EP_STATE_OFLD_FAILED = 0x8000000, 635 + EP_STATE_CONNECT_FAILED = 0x10000000, 636 + EP_STATE_DISCONN_TIMEDOUT = 0x20000000, 637 + }; 638 + 639 + /** 640 + * struct bnx2i_endpoint - representation of tcp connection in NX2 world 641 + * 642 + * @link: list head to link elements 643 + * @hba: adapter to which this connection belongs 644 + * @conn: iscsi connection this EP is linked to 645 + * @sess: iscsi session this EP is linked to 646 + * @cm_sk: cnic sock struct 647 + * @hba_age: age to detect if 'iscsid' issues ep_disconnect() 648 + * after HBA reset is completed by bnx2i/cnic/bnx2 649 + * modules 650 + * @state: tracks offload connection state machine 651 + * @teardown_mode: indicates if conn teardown is abortive or orderly 652 + * @qp: QP information 653 + * @ids: contains chip allocated *context id* & driver assigned 654 + * *iscsi cid* 655 + * @ofld_timer: offload timer to detect timeout 656 + * @ofld_wait: wait queue 657 + * 658 + * Endpoint Structure - equivalent of tcp socket structure 659 + */ 660 + struct bnx2i_endpoint { 661 + struct list_head link; 662 + struct bnx2i_hba *hba; 663 + struct bnx2i_conn *conn; 664 + struct cnic_sock *cm_sk; 665 + u32 hba_age; 666 + u32 state; 667 + unsigned long timestamp; 668 + int num_active_cmds; 669 + 670 + struct qp_info qp; 671 + struct ep_handles ids; 672 + #define ep_iscsi_cid ids.drv_iscsi_cid 673 + #define ep_cid ids.fw_cid 674 + #define ep_pg_cid ids.pg_cid 675 + struct timer_list ofld_timer; 676 + wait_queue_head_t ofld_wait; 677 + }; 678 + 679 + 680 + 681 + /* Global variables */ 682 + extern unsigned int error_mask1, error_mask2; 683 + extern u64 iscsi_error_mask; 684 + extern unsigned int en_tcp_dack; 685 + extern unsigned int event_coal_div; 686 + 687 + extern struct scsi_transport_template *bnx2i_scsi_xport_template; 688 + extern struct iscsi_transport bnx2i_iscsi_transport; 689 + extern struct cnic_ulp_ops bnx2i_cnic_cb; 690 + 691 + extern unsigned int sq_size; 692 + extern unsigned int rq_size; 693 + 694 + extern struct device_attribute *bnx2i_dev_attributes[]; 695 + 696 + 697 + 698 + /* 699 + * Function Prototypes 700 + */ 701 + extern void bnx2i_identify_device(struct bnx2i_hba *hba); 702 + extern void bnx2i_register_device(struct bnx2i_hba *hba); 703 + 704 + extern void bnx2i_ulp_init(struct cnic_dev *dev); 705 + extern void bnx2i_ulp_exit(struct cnic_dev *dev); 706 + extern void bnx2i_start(void *handle); 707 + extern void bnx2i_stop(void *handle); 708 + extern void bnx2i_reg_dev_all(void); 709 + extern void bnx2i_unreg_dev_all(void); 710 + extern struct bnx2i_hba *get_adapter_list_head(void); 711 + 712 + struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, 713 + u16 iscsi_cid); 714 + 715 + int bnx2i_alloc_ep_pool(void); 716 + void bnx2i_release_ep_pool(void); 717 + struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba); 718 + struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba); 719 + 720 + struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic); 721 + 722 + struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic); 723 + void bnx2i_free_hba(struct bnx2i_hba *hba); 724 + 725 + void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len); 726 + void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count); 727 + 728 + void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd); 729 + 730 + void bnx2i_drop_session(struct iscsi_cls_session *session); 731 + 732 + extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba); 733 + extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn, 734 + struct iscsi_task *mtask); 735 + extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn, 736 + struct iscsi_task *mtask); 737 + extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn, 738 + struct bnx2i_cmd *cmnd); 739 + extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn, 740 + struct iscsi_task *mtask, u32 ttt, 741 + char *datap, int data_len, int unsol); 742 + extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn, 743 + struct iscsi_task *mtask); 744 + extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, 745 + struct bnx2i_cmd *cmd); 746 + extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, 747 + struct bnx2i_endpoint *ep); 748 + extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn); 749 + extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, 750 + struct bnx2i_endpoint *ep); 751 + 752 + extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, 753 + struct bnx2i_endpoint *ep); 754 + extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba, 755 + struct bnx2i_endpoint *ep); 756 + extern void bnx2i_ep_ofld_timer(unsigned long data); 757 + extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list( 758 + struct bnx2i_hba *hba, u32 iscsi_cid); 759 + extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list( 760 + struct bnx2i_hba *hba, u32 iscsi_cid); 761 + 762 + extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep); 763 + extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action); 764 + 765 + /* Debug related function prototypes */ 766 + extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn); 767 + extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn); 768 + extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn); 769 + extern void bnx2i_print_recv_state(struct bnx2i_conn *conn); 770 + 771 + #endif
+2405
drivers/scsi/bnx2i/bnx2i_hwi.c
··· 1 + /* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver. 2 + * 3 + * Copyright (c) 2006 - 2009 Broadcom Corporation 4 + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 5 + * Copyright (c) 2007, 2008 Mike Christie 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation. 10 + * 11 + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 12 + */ 13 + 14 + #include <scsi/scsi_tcq.h> 15 + #include <scsi/libiscsi.h> 16 + #include "bnx2i.h" 17 + 18 + /** 19 + * bnx2i_get_cid_num - get cid from ep 20 + * @ep: endpoint pointer 21 + * 22 + * Only applicable to 57710 family of devices 23 + */ 24 + static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep) 25 + { 26 + u32 cid; 27 + 28 + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) 29 + cid = ep->ep_cid; 30 + else 31 + cid = GET_CID_NUM(ep->ep_cid); 32 + return cid; 33 + } 34 + 35 + 36 + /** 37 + * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type 38 + * @hba: Adapter for which adjustments is to be made 39 + * 40 + * Only applicable to 57710 family of devices 41 + */ 42 + static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) 43 + { 44 + u32 num_elements_per_pg; 45 + 46 + if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) || 47 + test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) || 48 + test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { 49 + if (!is_power_of_2(hba->max_sqes)) 50 + hba->max_sqes = rounddown_pow_of_two(hba->max_sqes); 51 + 52 + if (!is_power_of_2(hba->max_rqes)) 53 + hba->max_rqes = rounddown_pow_of_two(hba->max_rqes); 54 + } 55 + 56 + /* Adjust each queue size if the user selection does not 57 + * yield integral num of page buffers 58 + */ 59 + /* adjust SQ */ 60 + num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; 61 + if (hba->max_sqes < num_elements_per_pg) 62 + hba->max_sqes = num_elements_per_pg; 63 + else if (hba->max_sqes % num_elements_per_pg) 64 + hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) & 65 + ~(num_elements_per_pg - 1); 66 + 67 + /* adjust CQ */ 68 + num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE; 69 + if (hba->max_cqes < num_elements_per_pg) 70 + hba->max_cqes = num_elements_per_pg; 71 + else if (hba->max_cqes % num_elements_per_pg) 72 + hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) & 73 + ~(num_elements_per_pg - 1); 74 + 75 + /* adjust RQ */ 76 + num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE; 77 + if (hba->max_rqes < num_elements_per_pg) 78 + hba->max_rqes = num_elements_per_pg; 79 + else if (hba->max_rqes % num_elements_per_pg) 80 + hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) & 81 + ~(num_elements_per_pg - 1); 82 + } 83 + 84 + 85 + /** 86 + * bnx2i_get_link_state - get network interface link state 87 + * @hba: adapter instance pointer 88 + * 89 + * updates adapter structure flag based on netdev state 90 + */ 91 + static void bnx2i_get_link_state(struct bnx2i_hba *hba) 92 + { 93 + if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state)) 94 + set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); 95 + else 96 + clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); 97 + } 98 + 99 + 100 + /** 101 + * bnx2i_iscsi_license_error - displays iscsi license related error message 102 + * @hba: adapter instance pointer 103 + * @error_code: error classification 104 + * 105 + * Puts out an error log when driver is unable to offload iscsi connection 106 + * due to license restrictions 107 + */ 108 + static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code) 109 + { 110 + if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED) 111 + /* iSCSI offload not supported on this device */ 112 + printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n", 113 + hba->netdev->name); 114 + if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED) 115 + /* iSCSI offload not supported on this LOM device */ 116 + printk(KERN_ERR "bnx2i: LOM is not enable to " 117 + "offload iSCSI connections, dev=%s\n", 118 + hba->netdev->name); 119 + set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state); 120 + } 121 + 122 + 123 + /** 124 + * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification 125 + * @ep: endpoint (transport indentifier) structure 126 + * @action: action, ARM or DISARM. For now only ARM_CQE is used 127 + * 128 + * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt 129 + * the driver. EQ event is generated CQ index is hit or at least 1 CQ is 130 + * outstanding and on chip timer expires 131 + */ 132 + void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action) 133 + { 134 + struct bnx2i_5771x_cq_db *cq_db; 135 + u16 cq_index; 136 + 137 + if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) 138 + return; 139 + 140 + if (action == CNIC_ARM_CQE) { 141 + cq_index = ep->qp.cqe_exp_seq_sn + 142 + ep->num_active_cmds / event_coal_div; 143 + cq_index %= (ep->qp.cqe_size * 2 + 1); 144 + if (!cq_index) { 145 + cq_index = 1; 146 + cq_db = (struct bnx2i_5771x_cq_db *) 147 + ep->qp.cq_pgtbl_virt; 148 + cq_db->sqn[0] = cq_index; 149 + } 150 + } 151 + } 152 + 153 + 154 + /** 155 + * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer 156 + * @conn: iscsi connection on which RQ event occured 157 + * @ptr: driver buffer to which RQ buffer contents is to 158 + * be copied 159 + * @len: length of valid data inside RQ buf 160 + * 161 + * Copies RQ buffer contents from shared (DMA'able) memory region to 162 + * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and 163 + * scsi sense info 164 + */ 165 + void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len) 166 + { 167 + if (!bnx2i_conn->ep->qp.rqe_left) 168 + return; 169 + 170 + bnx2i_conn->ep->qp.rqe_left--; 171 + memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len); 172 + if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) { 173 + bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe; 174 + bnx2i_conn->ep->qp.rq_cons_idx = 0; 175 + } else { 176 + bnx2i_conn->ep->qp.rq_cons_qe++; 177 + bnx2i_conn->ep->qp.rq_cons_idx++; 178 + } 179 + } 180 + 181 + 182 + static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn) 183 + { 184 + struct bnx2i_5771x_dbell dbell; 185 + u32 msg; 186 + 187 + memset(&dbell, 0, sizeof(dbell)); 188 + dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE << 189 + B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT); 190 + msg = *((u32 *)&dbell); 191 + /* TODO : get doorbell register mapping */ 192 + writel(cpu_to_le32(msg), conn->ep->qp.ctx_base); 193 + } 194 + 195 + 196 + /** 197 + * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell 198 + * @conn: iscsi connection on which event to post 199 + * @count: number of RQ buffer being posted to chip 200 + * 201 + * No need to ring hardware doorbell for 57710 family of devices 202 + */ 203 + void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count) 204 + { 205 + struct bnx2i_5771x_sq_rq_db *rq_db; 206 + u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000); 207 + struct bnx2i_endpoint *ep = bnx2i_conn->ep; 208 + 209 + ep->qp.rqe_left += count; 210 + ep->qp.rq_prod_idx &= 0x7FFF; 211 + ep->qp.rq_prod_idx += count; 212 + 213 + if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) { 214 + ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes; 215 + if (!hi_bit) 216 + ep->qp.rq_prod_idx |= 0x8000; 217 + } else 218 + ep->qp.rq_prod_idx |= hi_bit; 219 + 220 + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { 221 + rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt; 222 + rq_db->prod_idx = ep->qp.rq_prod_idx; 223 + /* no need to ring hardware doorbell for 57710 */ 224 + } else { 225 + writew(ep->qp.rq_prod_idx, 226 + ep->qp.ctx_base + CNIC_RECV_DOORBELL); 227 + } 228 + mmiowb(); 229 + } 230 + 231 + 232 + /** 233 + * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine 234 + * @conn: iscsi connection to which new SQ entries belong 235 + * @count: number of SQ WQEs to post 236 + * 237 + * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family 238 + * of devices. For 5706/5708/5709 new SQ WQE count is written into the 239 + * doorbell register 240 + */ 241 + static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count) 242 + { 243 + struct bnx2i_5771x_sq_rq_db *sq_db; 244 + struct bnx2i_endpoint *ep = bnx2i_conn->ep; 245 + 246 + ep->num_active_cmds++; 247 + wmb(); /* flush SQ WQE memory before the doorbell is rung */ 248 + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { 249 + sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt; 250 + sq_db->prod_idx = ep->qp.sq_prod_idx; 251 + bnx2i_ring_577xx_doorbell(bnx2i_conn); 252 + } else 253 + writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL); 254 + 255 + mmiowb(); /* flush posted PCI writes */ 256 + } 257 + 258 + 259 + /** 260 + * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters 261 + * @conn: iscsi connection to which new SQ entries belong 262 + * @count: number of SQ WQEs to post 263 + * 264 + * this routine will update SQ driver parameters and ring the doorbell 265 + */ 266 + static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn, 267 + int count) 268 + { 269 + int tmp_cnt; 270 + 271 + if (count == 1) { 272 + if (bnx2i_conn->ep->qp.sq_prod_qe == 273 + bnx2i_conn->ep->qp.sq_last_qe) 274 + bnx2i_conn->ep->qp.sq_prod_qe = 275 + bnx2i_conn->ep->qp.sq_first_qe; 276 + else 277 + bnx2i_conn->ep->qp.sq_prod_qe++; 278 + } else { 279 + if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <= 280 + bnx2i_conn->ep->qp.sq_last_qe) 281 + bnx2i_conn->ep->qp.sq_prod_qe += count; 282 + else { 283 + tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe - 284 + bnx2i_conn->ep->qp.sq_prod_qe; 285 + bnx2i_conn->ep->qp.sq_prod_qe = 286 + &bnx2i_conn->ep->qp.sq_first_qe[count - 287 + (tmp_cnt + 1)]; 288 + } 289 + } 290 + bnx2i_conn->ep->qp.sq_prod_idx += count; 291 + /* Ring the doorbell */ 292 + bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx); 293 + } 294 + 295 + 296 + /** 297 + * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware 298 + * @conn: iscsi connection 299 + * @cmd: driver command structure which is requesting 300 + * a WQE to sent to chip for further processing 301 + * 302 + * prepare and post an iSCSI Login request WQE to CNIC firmware 303 + */ 304 + int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn, 305 + struct iscsi_task *task) 306 + { 307 + struct bnx2i_cmd *bnx2i_cmd; 308 + struct bnx2i_login_request *login_wqe; 309 + struct iscsi_login *login_hdr; 310 + u32 dword; 311 + 312 + bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; 313 + login_hdr = (struct iscsi_login *)task->hdr; 314 + login_wqe = (struct bnx2i_login_request *) 315 + bnx2i_conn->ep->qp.sq_prod_qe; 316 + 317 + login_wqe->op_code = login_hdr->opcode; 318 + login_wqe->op_attr = login_hdr->flags; 319 + login_wqe->version_max = login_hdr->max_version; 320 + login_wqe->version_min = login_hdr->min_version; 321 + login_wqe->data_length = ntoh24(login_hdr->dlength); 322 + login_wqe->isid_lo = *((u32 *) login_hdr->isid); 323 + login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2); 324 + login_wqe->tsih = login_hdr->tsih; 325 + login_wqe->itt = task->itt | 326 + (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT); 327 + login_wqe->cid = login_hdr->cid; 328 + 329 + login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn); 330 + login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn); 331 + 332 + login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma; 333 + login_wqe->resp_bd_list_addr_hi = 334 + (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32); 335 + 336 + dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) | 337 + (bnx2i_conn->gen_pdu.resp_buf_size << 338 + ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT)); 339 + login_wqe->resp_buffer = dword; 340 + login_wqe->flags = 0; 341 + login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma; 342 + login_wqe->bd_list_addr_hi = 343 + (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32); 344 + login_wqe->num_bds = 1; 345 + login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ 346 + 347 + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); 348 + return 0; 349 + } 350 + 351 + /** 352 + * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware 353 + * @conn: iscsi connection 354 + * @mtask: driver command structure which is requesting 355 + * a WQE to sent to chip for further processing 356 + * 357 + * prepare and post an iSCSI Login request WQE to CNIC firmware 358 + */ 359 + int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn, 360 + struct iscsi_task *mtask) 361 + { 362 + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 363 + struct iscsi_tm *tmfabort_hdr; 364 + struct scsi_cmnd *ref_sc; 365 + struct iscsi_task *ctask; 366 + struct bnx2i_cmd *bnx2i_cmd; 367 + struct bnx2i_tmf_request *tmfabort_wqe; 368 + u32 dword; 369 + 370 + bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; 371 + tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; 372 + tmfabort_wqe = (struct bnx2i_tmf_request *) 373 + bnx2i_conn->ep->qp.sq_prod_qe; 374 + 375 + tmfabort_wqe->op_code = tmfabort_hdr->opcode; 376 + tmfabort_wqe->op_attr = 0; 377 + tmfabort_wqe->op_attr = 378 + ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK; 379 + tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]); 380 + tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]); 381 + 382 + tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14)); 383 + tmfabort_wqe->reserved2 = 0; 384 + tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn); 385 + 386 + ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); 387 + if (!ctask || ctask->sc) 388 + /* 389 + * the iscsi layer must have completed the cmd while this 390 + * was starting up. 391 + */ 392 + return 0; 393 + ref_sc = ctask->sc; 394 + 395 + if (ref_sc->sc_data_direction == DMA_TO_DEVICE) 396 + dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); 397 + else 398 + dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); 399 + tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt); 400 + tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); 401 + 402 + tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; 403 + tmfabort_wqe->bd_list_addr_hi = (u32) 404 + ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); 405 + tmfabort_wqe->num_bds = 1; 406 + tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ 407 + 408 + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); 409 + return 0; 410 + } 411 + 412 + /** 413 + * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware 414 + * @conn: iscsi connection 415 + * @cmd: driver command structure which is requesting 416 + * a WQE to sent to chip for further processing 417 + * 418 + * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware 419 + */ 420 + int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn, 421 + struct bnx2i_cmd *cmd) 422 + { 423 + struct bnx2i_cmd_request *scsi_cmd_wqe; 424 + 425 + scsi_cmd_wqe = (struct bnx2i_cmd_request *) 426 + bnx2i_conn->ep->qp.sq_prod_qe; 427 + memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request)); 428 + scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ 429 + 430 + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); 431 + return 0; 432 + } 433 + 434 + /** 435 + * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware 436 + * @conn: iscsi connection 437 + * @cmd: driver command structure which is requesting 438 + * a WQE to sent to chip for further processing 439 + * @ttt: TTT to be used when building pdu header 440 + * @datap: payload buffer pointer 441 + * @data_len: payload data length 442 + * @unsol: indicated whether nopout pdu is unsolicited pdu or 443 + * in response to target's NOPIN w/ TTT != FFFFFFFF 444 + * 445 + * prepare and post a nopout request WQE to CNIC firmware 446 + */ 447 + int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, 448 + struct iscsi_task *task, u32 ttt, 449 + char *datap, int data_len, int unsol) 450 + { 451 + struct bnx2i_endpoint *ep = bnx2i_conn->ep; 452 + struct bnx2i_cmd *bnx2i_cmd; 453 + struct bnx2i_nop_out_request *nopout_wqe; 454 + struct iscsi_nopout *nopout_hdr; 455 + 456 + bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; 457 + nopout_hdr = (struct iscsi_nopout *)task->hdr; 458 + nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe; 459 + nopout_wqe->op_code = nopout_hdr->opcode; 460 + nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL; 461 + memcpy(nopout_wqe->lun, nopout_hdr->lun, 8); 462 + 463 + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { 464 + u32 tmp = nopout_hdr->lun[0]; 465 + /* 57710 requires LUN field to be swapped */ 466 + nopout_hdr->lun[0] = nopout_hdr->lun[1]; 467 + nopout_hdr->lun[1] = tmp; 468 + } 469 + 470 + nopout_wqe->itt = ((u16)task->itt | 471 + (ISCSI_TASK_TYPE_MPATH << 472 + ISCSI_TMF_REQUEST_TYPE_SHIFT)); 473 + nopout_wqe->ttt = ttt; 474 + nopout_wqe->flags = 0; 475 + if (!unsol) 476 + nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; 477 + else if (nopout_hdr->itt == RESERVED_ITT) 478 + nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; 479 + 480 + nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn); 481 + nopout_wqe->data_length = data_len; 482 + if (data_len) { 483 + /* handle payload data, not required in first release */ 484 + printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n"); 485 + } else { 486 + nopout_wqe->bd_list_addr_lo = (u32) 487 + bnx2i_conn->hba->mp_bd_dma; 488 + nopout_wqe->bd_list_addr_hi = 489 + (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); 490 + nopout_wqe->num_bds = 1; 491 + } 492 + nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ 493 + 494 + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); 495 + return 0; 496 + } 497 + 498 + 499 + /** 500 + * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware 501 + * @conn: iscsi connection 502 + * @cmd: driver command structure which is requesting 503 + * a WQE to sent to chip for further processing 504 + * 505 + * prepare and post logout request WQE to CNIC firmware 506 + */ 507 + int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn, 508 + struct iscsi_task *task) 509 + { 510 + struct bnx2i_cmd *bnx2i_cmd; 511 + struct bnx2i_logout_request *logout_wqe; 512 + struct iscsi_logout *logout_hdr; 513 + 514 + bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; 515 + logout_hdr = (struct iscsi_logout *)task->hdr; 516 + 517 + logout_wqe = (struct bnx2i_logout_request *) 518 + bnx2i_conn->ep->qp.sq_prod_qe; 519 + memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request)); 520 + 521 + logout_wqe->op_code = logout_hdr->opcode; 522 + logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn); 523 + logout_wqe->op_attr = 524 + logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE; 525 + logout_wqe->itt = ((u16)task->itt | 526 + (ISCSI_TASK_TYPE_MPATH << 527 + ISCSI_LOGOUT_REQUEST_TYPE_SHIFT)); 528 + logout_wqe->data_length = 0; 529 + logout_wqe->cid = 0; 530 + 531 + logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; 532 + logout_wqe->bd_list_addr_hi = (u32) 533 + ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); 534 + logout_wqe->num_bds = 1; 535 + logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ 536 + 537 + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); 538 + return 0; 539 + } 540 + 541 + 542 + /** 543 + * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware 544 + * @conn: iscsi connection which requires iscsi parameter update 545 + * 546 + * sends down iSCSI Conn Update request to move iSCSI conn to FFP 547 + */ 548 + void bnx2i_update_iscsi_conn(struct iscsi_conn *conn) 549 + { 550 + struct bnx2i_conn *bnx2i_conn = conn->dd_data; 551 + struct bnx2i_hba *hba = bnx2i_conn->hba; 552 + struct kwqe *kwqe_arr[2]; 553 + struct iscsi_kwqe_conn_update *update_wqe; 554 + struct iscsi_kwqe_conn_update conn_update_kwqe; 555 + 556 + update_wqe = &conn_update_kwqe; 557 + 558 + update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN; 559 + update_wqe->hdr.flags = 560 + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); 561 + 562 + /* 5771x requires conn context id to be passed as is */ 563 + if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type)) 564 + update_wqe->context_id = bnx2i_conn->ep->ep_cid; 565 + else 566 + update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7); 567 + update_wqe->conn_flags = 0; 568 + if (conn->hdrdgst_en) 569 + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST; 570 + if (conn->datadgst_en) 571 + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST; 572 + if (conn->session->initial_r2t_en) 573 + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T; 574 + if (conn->session->imm_data_en) 575 + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA; 576 + 577 + update_wqe->max_send_pdu_length = conn->max_xmit_dlength; 578 + update_wqe->max_recv_pdu_length = conn->max_recv_dlength; 579 + update_wqe->first_burst_length = conn->session->first_burst; 580 + update_wqe->max_burst_length = conn->session->max_burst; 581 + update_wqe->exp_stat_sn = conn->exp_statsn; 582 + update_wqe->max_outstanding_r2ts = conn->session->max_r2t; 583 + update_wqe->session_error_recovery_level = conn->session->erl; 584 + iscsi_conn_printk(KERN_ALERT, conn, 585 + "bnx2i: conn update - MBL 0x%x FBL 0x%x" 586 + "MRDSL_I 0x%x MRDSL_T 0x%x \n", 587 + update_wqe->max_burst_length, 588 + update_wqe->first_burst_length, 589 + update_wqe->max_recv_pdu_length, 590 + update_wqe->max_send_pdu_length); 591 + 592 + kwqe_arr[0] = (struct kwqe *) update_wqe; 593 + if (hba->cnic && hba->cnic->submit_kwqes) 594 + hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); 595 + } 596 + 597 + 598 + /** 599 + * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware 600 + * @data: endpoint (transport handle) structure pointer 601 + * 602 + * routine to handle connection offload/destroy request timeout 603 + */ 604 + void bnx2i_ep_ofld_timer(unsigned long data) 605 + { 606 + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data; 607 + 608 + if (ep->state == EP_STATE_OFLD_START) { 609 + printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n"); 610 + ep->state = EP_STATE_OFLD_FAILED; 611 + } else if (ep->state == EP_STATE_DISCONN_START) { 612 + printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n"); 613 + ep->state = EP_STATE_DISCONN_TIMEDOUT; 614 + } else if (ep->state == EP_STATE_CLEANUP_START) { 615 + printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n"); 616 + ep->state = EP_STATE_CLEANUP_FAILED; 617 + } 618 + 619 + wake_up_interruptible(&ep->ofld_wait); 620 + } 621 + 622 + 623 + static int bnx2i_power_of2(u32 val) 624 + { 625 + u32 power = 0; 626 + if (val & (val - 1)) 627 + return power; 628 + val--; 629 + while (val) { 630 + val = val >> 1; 631 + power++; 632 + } 633 + return power; 634 + } 635 + 636 + 637 + /** 638 + * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request 639 + * @hba: adapter structure pointer 640 + * @cmd: driver command structure which is requesting 641 + * a WQE to sent to chip for further processing 642 + * 643 + * prepares and posts CONN_OFLD_REQ1/2 KWQE 644 + */ 645 + void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) 646 + { 647 + struct bnx2i_cleanup_request *cmd_cleanup; 648 + 649 + cmd_cleanup = 650 + (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe; 651 + memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request)); 652 + 653 + cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST; 654 + cmd_cleanup->itt = cmd->req.itt; 655 + cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */ 656 + 657 + bnx2i_ring_dbell_update_sq_params(cmd->conn, 1); 658 + } 659 + 660 + 661 + /** 662 + * bnx2i_send_conn_destroy - initiates iscsi connection teardown process 663 + * @hba: adapter structure pointer 664 + * @ep: endpoint (transport indentifier) structure 665 + * 666 + * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate 667 + * iscsi connection context clean-up process 668 + */ 669 + void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) 670 + { 671 + struct kwqe *kwqe_arr[2]; 672 + struct iscsi_kwqe_conn_destroy conn_cleanup; 673 + 674 + memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy)); 675 + 676 + conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN; 677 + conn_cleanup.hdr.flags = 678 + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); 679 + /* 5771x requires conn context id to be passed as is */ 680 + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) 681 + conn_cleanup.context_id = ep->ep_cid; 682 + else 683 + conn_cleanup.context_id = (ep->ep_cid >> 7); 684 + 685 + conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid; 686 + 687 + kwqe_arr[0] = (struct kwqe *) &conn_cleanup; 688 + if (hba->cnic && hba->cnic->submit_kwqes) 689 + hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); 690 + } 691 + 692 + 693 + /** 694 + * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process 695 + * @hba: adapter structure pointer 696 + * @ep: endpoint (transport indentifier) structure 697 + * 698 + * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE 699 + */ 700 + static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, 701 + struct bnx2i_endpoint *ep) 702 + { 703 + struct kwqe *kwqe_arr[2]; 704 + struct iscsi_kwqe_conn_offload1 ofld_req1; 705 + struct iscsi_kwqe_conn_offload2 ofld_req2; 706 + dma_addr_t dma_addr; 707 + int num_kwqes = 2; 708 + u32 *ptbl; 709 + 710 + ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; 711 + ofld_req1.hdr.flags = 712 + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); 713 + 714 + ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid; 715 + 716 + dma_addr = ep->qp.sq_pgtbl_phys; 717 + ofld_req1.sq_page_table_addr_lo = (u32) dma_addr; 718 + ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); 719 + 720 + dma_addr = ep->qp.cq_pgtbl_phys; 721 + ofld_req1.cq_page_table_addr_lo = (u32) dma_addr; 722 + ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); 723 + 724 + ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2; 725 + ofld_req2.hdr.flags = 726 + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); 727 + 728 + dma_addr = ep->qp.rq_pgtbl_phys; 729 + ofld_req2.rq_page_table_addr_lo = (u32) dma_addr; 730 + ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); 731 + 732 + ptbl = (u32 *) ep->qp.sq_pgtbl_virt; 733 + 734 + ofld_req2.sq_first_pte.hi = *ptbl++; 735 + ofld_req2.sq_first_pte.lo = *ptbl; 736 + 737 + ptbl = (u32 *) ep->qp.cq_pgtbl_virt; 738 + ofld_req2.cq_first_pte.hi = *ptbl++; 739 + ofld_req2.cq_first_pte.lo = *ptbl; 740 + 741 + kwqe_arr[0] = (struct kwqe *) &ofld_req1; 742 + kwqe_arr[1] = (struct kwqe *) &ofld_req2; 743 + ofld_req2.num_additional_wqes = 0; 744 + 745 + if (hba->cnic && hba->cnic->submit_kwqes) 746 + hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 747 + } 748 + 749 + 750 + /** 751 + * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation 752 + * @hba: adapter structure pointer 753 + * @ep: endpoint (transport indentifier) structure 754 + * 755 + * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE 756 + */ 757 + static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, 758 + struct bnx2i_endpoint *ep) 759 + { 760 + struct kwqe *kwqe_arr[5]; 761 + struct iscsi_kwqe_conn_offload1 ofld_req1; 762 + struct iscsi_kwqe_conn_offload2 ofld_req2; 763 + struct iscsi_kwqe_conn_offload3 ofld_req3[1]; 764 + dma_addr_t dma_addr; 765 + int num_kwqes = 2; 766 + u32 *ptbl; 767 + 768 + ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; 769 + ofld_req1.hdr.flags = 770 + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); 771 + 772 + ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid; 773 + 774 + dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE; 775 + ofld_req1.sq_page_table_addr_lo = (u32) dma_addr; 776 + ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); 777 + 778 + dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE; 779 + ofld_req1.cq_page_table_addr_lo = (u32) dma_addr; 780 + ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); 781 + 782 + ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2; 783 + ofld_req2.hdr.flags = 784 + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); 785 + 786 + dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE; 787 + ofld_req2.rq_page_table_addr_lo = (u32) dma_addr; 788 + ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); 789 + 790 + ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); 791 + ofld_req2.sq_first_pte.hi = *ptbl++; 792 + ofld_req2.sq_first_pte.lo = *ptbl; 793 + 794 + ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); 795 + ofld_req2.cq_first_pte.hi = *ptbl++; 796 + ofld_req2.cq_first_pte.lo = *ptbl; 797 + 798 + kwqe_arr[0] = (struct kwqe *) &ofld_req1; 799 + kwqe_arr[1] = (struct kwqe *) &ofld_req2; 800 + 801 + ofld_req2.num_additional_wqes = 1; 802 + memset(ofld_req3, 0x00, sizeof(ofld_req3[0])); 803 + ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); 804 + ofld_req3[0].qp_first_pte[0].hi = *ptbl++; 805 + ofld_req3[0].qp_first_pte[0].lo = *ptbl; 806 + 807 + kwqe_arr[2] = (struct kwqe *) ofld_req3; 808 + /* need if we decide to go with multiple KCQE's per conn */ 809 + num_kwqes += 1; 810 + 811 + if (hba->cnic && hba->cnic->submit_kwqes) 812 + hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 813 + } 814 + 815 + /** 816 + * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process 817 + * 818 + * @hba: adapter structure pointer 819 + * @ep: endpoint (transport indentifier) structure 820 + * 821 + * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE 822 + */ 823 + void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) 824 + { 825 + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) 826 + bnx2i_5771x_send_conn_ofld_req(hba, ep); 827 + else 828 + bnx2i_570x_send_conn_ofld_req(hba, ep); 829 + } 830 + 831 + 832 + /** 833 + * setup_qp_page_tables - iscsi QP page table setup function 834 + * @ep: endpoint (transport indentifier) structure 835 + * 836 + * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires 837 + * 64-bit address in big endian format. Whereas 10G/sec (57710) requires 838 + * PT in little endian format 839 + */ 840 + static void setup_qp_page_tables(struct bnx2i_endpoint *ep) 841 + { 842 + int num_pages; 843 + u32 *ptbl; 844 + dma_addr_t page; 845 + int cnic_dev_10g; 846 + 847 + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) 848 + cnic_dev_10g = 1; 849 + else 850 + cnic_dev_10g = 0; 851 + 852 + /* SQ page table */ 853 + memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); 854 + num_pages = ep->qp.sq_mem_size / PAGE_SIZE; 855 + page = ep->qp.sq_phys; 856 + 857 + if (cnic_dev_10g) 858 + ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); 859 + else 860 + ptbl = (u32 *) ep->qp.sq_pgtbl_virt; 861 + while (num_pages--) { 862 + if (cnic_dev_10g) { 863 + /* PTE is written in little endian format for 57710 */ 864 + *ptbl = (u32) page; 865 + ptbl++; 866 + *ptbl = (u32) ((u64) page >> 32); 867 + ptbl++; 868 + page += PAGE_SIZE; 869 + } else { 870 + /* PTE is written in big endian format for 871 + * 5706/5708/5709 devices */ 872 + *ptbl = (u32) ((u64) page >> 32); 873 + ptbl++; 874 + *ptbl = (u32) page; 875 + ptbl++; 876 + page += PAGE_SIZE; 877 + } 878 + } 879 + 880 + /* RQ page table */ 881 + memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); 882 + num_pages = ep->qp.rq_mem_size / PAGE_SIZE; 883 + page = ep->qp.rq_phys; 884 + 885 + if (cnic_dev_10g) 886 + ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); 887 + else 888 + ptbl = (u32 *) ep->qp.rq_pgtbl_virt; 889 + while (num_pages--) { 890 + if (cnic_dev_10g) { 891 + /* PTE is written in little endian format for 57710 */ 892 + *ptbl = (u32) page; 893 + ptbl++; 894 + *ptbl = (u32) ((u64) page >> 32); 895 + ptbl++; 896 + page += PAGE_SIZE; 897 + } else { 898 + /* PTE is written in big endian format for 899 + * 5706/5708/5709 devices */ 900 + *ptbl = (u32) ((u64) page >> 32); 901 + ptbl++; 902 + *ptbl = (u32) page; 903 + ptbl++; 904 + page += PAGE_SIZE; 905 + } 906 + } 907 + 908 + /* CQ page table */ 909 + memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); 910 + num_pages = ep->qp.cq_mem_size / PAGE_SIZE; 911 + page = ep->qp.cq_phys; 912 + 913 + if (cnic_dev_10g) 914 + ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); 915 + else 916 + ptbl = (u32 *) ep->qp.cq_pgtbl_virt; 917 + while (num_pages--) { 918 + if (cnic_dev_10g) { 919 + /* PTE is written in little endian format for 57710 */ 920 + *ptbl = (u32) page; 921 + ptbl++; 922 + *ptbl = (u32) ((u64) page >> 32); 923 + ptbl++; 924 + page += PAGE_SIZE; 925 + } else { 926 + /* PTE is written in big endian format for 927 + * 5706/5708/5709 devices */ 928 + *ptbl = (u32) ((u64) page >> 32); 929 + ptbl++; 930 + *ptbl = (u32) page; 931 + ptbl++; 932 + page += PAGE_SIZE; 933 + } 934 + } 935 + } 936 + 937 + 938 + /** 939 + * bnx2i_alloc_qp_resc - allocates required resources for QP. 940 + * @hba: adapter structure pointer 941 + * @ep: endpoint (transport indentifier) structure 942 + * 943 + * Allocate QP (transport layer for iSCSI connection) resources, DMA'able 944 + * memory for SQ/RQ/CQ and page tables. EP structure elements such 945 + * as producer/consumer indexes/pointers, queue sizes and page table 946 + * contents are setup 947 + */ 948 + int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) 949 + { 950 + struct bnx2i_5771x_cq_db *cq_db; 951 + 952 + ep->hba = hba; 953 + ep->conn = NULL; 954 + ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0; 955 + 956 + /* Allocate page table memory for SQ which is page aligned */ 957 + ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; 958 + ep->qp.sq_mem_size = 959 + (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 960 + ep->qp.sq_pgtbl_size = 961 + (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *); 962 + ep->qp.sq_pgtbl_size = 963 + (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; 964 + 965 + ep->qp.sq_pgtbl_virt = 966 + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, 967 + &ep->qp.sq_pgtbl_phys, GFP_KERNEL); 968 + if (!ep->qp.sq_pgtbl_virt) { 969 + printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n", 970 + ep->qp.sq_pgtbl_size); 971 + goto mem_alloc_err; 972 + } 973 + 974 + /* Allocate memory area for actual SQ element */ 975 + ep->qp.sq_virt = 976 + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, 977 + &ep->qp.sq_phys, GFP_KERNEL); 978 + if (!ep->qp.sq_virt) { 979 + printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", 980 + ep->qp.sq_mem_size); 981 + goto mem_alloc_err; 982 + } 983 + 984 + memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size); 985 + ep->qp.sq_first_qe = ep->qp.sq_virt; 986 + ep->qp.sq_prod_qe = ep->qp.sq_first_qe; 987 + ep->qp.sq_cons_qe = ep->qp.sq_first_qe; 988 + ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1]; 989 + ep->qp.sq_prod_idx = 0; 990 + ep->qp.sq_cons_idx = 0; 991 + ep->qp.sqe_left = hba->max_sqes; 992 + 993 + /* Allocate page table memory for CQ which is page aligned */ 994 + ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; 995 + ep->qp.cq_mem_size = 996 + (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 997 + ep->qp.cq_pgtbl_size = 998 + (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *); 999 + ep->qp.cq_pgtbl_size = 1000 + (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; 1001 + 1002 + ep->qp.cq_pgtbl_virt = 1003 + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, 1004 + &ep->qp.cq_pgtbl_phys, GFP_KERNEL); 1005 + if (!ep->qp.cq_pgtbl_virt) { 1006 + printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n", 1007 + ep->qp.cq_pgtbl_size); 1008 + goto mem_alloc_err; 1009 + } 1010 + 1011 + /* Allocate memory area for actual CQ element */ 1012 + ep->qp.cq_virt = 1013 + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, 1014 + &ep->qp.cq_phys, GFP_KERNEL); 1015 + if (!ep->qp.cq_virt) { 1016 + printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", 1017 + ep->qp.cq_mem_size); 1018 + goto mem_alloc_err; 1019 + } 1020 + memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size); 1021 + 1022 + ep->qp.cq_first_qe = ep->qp.cq_virt; 1023 + ep->qp.cq_prod_qe = ep->qp.cq_first_qe; 1024 + ep->qp.cq_cons_qe = ep->qp.cq_first_qe; 1025 + ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1]; 1026 + ep->qp.cq_prod_idx = 0; 1027 + ep->qp.cq_cons_idx = 0; 1028 + ep->qp.cqe_left = hba->max_cqes; 1029 + ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN; 1030 + ep->qp.cqe_size = hba->max_cqes; 1031 + 1032 + /* Invalidate all EQ CQE index, req only for 57710 */ 1033 + cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; 1034 + memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS); 1035 + 1036 + /* Allocate page table memory for RQ which is page aligned */ 1037 + ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; 1038 + ep->qp.rq_mem_size = 1039 + (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 1040 + ep->qp.rq_pgtbl_size = 1041 + (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *); 1042 + ep->qp.rq_pgtbl_size = 1043 + (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; 1044 + 1045 + ep->qp.rq_pgtbl_virt = 1046 + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, 1047 + &ep->qp.rq_pgtbl_phys, GFP_KERNEL); 1048 + if (!ep->qp.rq_pgtbl_virt) { 1049 + printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n", 1050 + ep->qp.rq_pgtbl_size); 1051 + goto mem_alloc_err; 1052 + } 1053 + 1054 + /* Allocate memory area for actual RQ element */ 1055 + ep->qp.rq_virt = 1056 + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, 1057 + &ep->qp.rq_phys, GFP_KERNEL); 1058 + if (!ep->qp.rq_virt) { 1059 + printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n", 1060 + ep->qp.rq_mem_size); 1061 + goto mem_alloc_err; 1062 + } 1063 + 1064 + ep->qp.rq_first_qe = ep->qp.rq_virt; 1065 + ep->qp.rq_prod_qe = ep->qp.rq_first_qe; 1066 + ep->qp.rq_cons_qe = ep->qp.rq_first_qe; 1067 + ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1]; 1068 + ep->qp.rq_prod_idx = 0x8000; 1069 + ep->qp.rq_cons_idx = 0; 1070 + ep->qp.rqe_left = hba->max_rqes; 1071 + 1072 + setup_qp_page_tables(ep); 1073 + 1074 + return 0; 1075 + 1076 + mem_alloc_err: 1077 + bnx2i_free_qp_resc(hba, ep); 1078 + return -ENOMEM; 1079 + } 1080 + 1081 + 1082 + 1083 + /** 1084 + * bnx2i_free_qp_resc - free memory resources held by QP 1085 + * @hba: adapter structure pointer 1086 + * @ep: endpoint (transport indentifier) structure 1087 + * 1088 + * Free QP resources - SQ/RQ/CQ memory and page tables. 1089 + */ 1090 + void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) 1091 + { 1092 + if (ep->qp.ctx_base) { 1093 + iounmap(ep->qp.ctx_base); 1094 + ep->qp.ctx_base = NULL; 1095 + } 1096 + /* Free SQ mem */ 1097 + if (ep->qp.sq_pgtbl_virt) { 1098 + dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, 1099 + ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys); 1100 + ep->qp.sq_pgtbl_virt = NULL; 1101 + ep->qp.sq_pgtbl_phys = 0; 1102 + } 1103 + if (ep->qp.sq_virt) { 1104 + dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, 1105 + ep->qp.sq_virt, ep->qp.sq_phys); 1106 + ep->qp.sq_virt = NULL; 1107 + ep->qp.sq_phys = 0; 1108 + } 1109 + 1110 + /* Free RQ mem */ 1111 + if (ep->qp.rq_pgtbl_virt) { 1112 + dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, 1113 + ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys); 1114 + ep->qp.rq_pgtbl_virt = NULL; 1115 + ep->qp.rq_pgtbl_phys = 0; 1116 + } 1117 + if (ep->qp.rq_virt) { 1118 + dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, 1119 + ep->qp.rq_virt, ep->qp.rq_phys); 1120 + ep->qp.rq_virt = NULL; 1121 + ep->qp.rq_phys = 0; 1122 + } 1123 + 1124 + /* Free CQ mem */ 1125 + if (ep->qp.cq_pgtbl_virt) { 1126 + dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, 1127 + ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys); 1128 + ep->qp.cq_pgtbl_virt = NULL; 1129 + ep->qp.cq_pgtbl_phys = 0; 1130 + } 1131 + if (ep->qp.cq_virt) { 1132 + dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, 1133 + ep->qp.cq_virt, ep->qp.cq_phys); 1134 + ep->qp.cq_virt = NULL; 1135 + ep->qp.cq_phys = 0; 1136 + } 1137 + } 1138 + 1139 + 1140 + /** 1141 + * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w 1142 + * @hba: adapter structure pointer 1143 + * 1144 + * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w 1145 + * This results in iSCSi support validation and on-chip context manager 1146 + * initialization. Firmware completes this handshake with a CQE carrying 1147 + * the result of iscsi support validation. Parameter carried by 1148 + * iscsi init request determines the number of offloaded connection and 1149 + * tolerance level for iscsi protocol violation this hba/chip can support 1150 + */ 1151 + int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) 1152 + { 1153 + struct kwqe *kwqe_arr[3]; 1154 + struct iscsi_kwqe_init1 iscsi_init; 1155 + struct iscsi_kwqe_init2 iscsi_init2; 1156 + int rc = 0; 1157 + u64 mask64; 1158 + 1159 + bnx2i_adjust_qp_size(hba); 1160 + 1161 + iscsi_init.flags = 1162 + ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; 1163 + if (en_tcp_dack) 1164 + iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; 1165 + iscsi_init.reserved0 = 0; 1166 + iscsi_init.num_cqs = 1; 1167 + iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1; 1168 + iscsi_init.hdr.flags = 1169 + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); 1170 + 1171 + iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; 1172 + iscsi_init.dummy_buffer_addr_hi = 1173 + (u32) ((u64) hba->dummy_buf_dma >> 32); 1174 + 1175 + hba->ctx_ccell_tasks = 1176 + ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); 1177 + iscsi_init.num_ccells_per_conn = hba->num_ccell; 1178 + iscsi_init.num_tasks_per_conn = hba->max_sqes; 1179 + iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; 1180 + iscsi_init.sq_num_wqes = hba->max_sqes; 1181 + iscsi_init.cq_log_wqes_per_page = 1182 + (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE); 1183 + iscsi_init.cq_num_wqes = hba->max_cqes; 1184 + iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + 1185 + (PAGE_SIZE - 1)) / PAGE_SIZE; 1186 + iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + 1187 + (PAGE_SIZE - 1)) / PAGE_SIZE; 1188 + iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; 1189 + iscsi_init.rq_num_wqes = hba->max_rqes; 1190 + 1191 + 1192 + iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2; 1193 + iscsi_init2.hdr.flags = 1194 + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); 1195 + iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1; 1196 + mask64 = 0x0ULL; 1197 + mask64 |= ( 1198 + /* CISCO MDS */ 1199 + (1UL << 1200 + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) | 1201 + /* HP MSA1510i */ 1202 + (1UL << 1203 + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) | 1204 + /* EMC */ 1205 + (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN)); 1206 + if (error_mask1) 1207 + iscsi_init2.error_bit_map[0] = error_mask1; 1208 + else 1209 + iscsi_init2.error_bit_map[0] = (u32) mask64; 1210 + 1211 + if (error_mask2) 1212 + iscsi_init2.error_bit_map[1] = error_mask2; 1213 + else 1214 + iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32); 1215 + 1216 + iscsi_error_mask = mask64; 1217 + 1218 + kwqe_arr[0] = (struct kwqe *) &iscsi_init; 1219 + kwqe_arr[1] = (struct kwqe *) &iscsi_init2; 1220 + 1221 + if (hba->cnic && hba->cnic->submit_kwqes) 1222 + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2); 1223 + return rc; 1224 + } 1225 + 1226 + 1227 + /** 1228 + * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion. 1229 + * @conn: iscsi connection 1230 + * @cqe: pointer to newly DMA'ed CQE entry for processing 1231 + * 1232 + * process SCSI CMD Response CQE & complete the request to SCSI-ML 1233 + */ 1234 + static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, 1235 + struct bnx2i_conn *bnx2i_conn, 1236 + struct cqe *cqe) 1237 + { 1238 + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1239 + struct bnx2i_cmd_response *resp_cqe; 1240 + struct bnx2i_cmd *bnx2i_cmd; 1241 + struct iscsi_task *task; 1242 + struct iscsi_cmd_rsp *hdr; 1243 + u32 datalen = 0; 1244 + 1245 + resp_cqe = (struct bnx2i_cmd_response *)cqe; 1246 + spin_lock(&session->lock); 1247 + task = iscsi_itt_to_task(conn, 1248 + resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX); 1249 + if (!task) 1250 + goto fail; 1251 + 1252 + bnx2i_cmd = task->dd_data; 1253 + 1254 + if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) { 1255 + conn->datain_pdus_cnt += 1256 + resp_cqe->task_stat.read_stat.num_data_outs; 1257 + conn->rxdata_octets += 1258 + bnx2i_cmd->req.total_data_transfer_length; 1259 + } else { 1260 + conn->dataout_pdus_cnt += 1261 + resp_cqe->task_stat.read_stat.num_data_outs; 1262 + conn->r2t_pdus_cnt += 1263 + resp_cqe->task_stat.read_stat.num_r2ts; 1264 + conn->txdata_octets += 1265 + bnx2i_cmd->req.total_data_transfer_length; 1266 + } 1267 + bnx2i_iscsi_unmap_sg_list(bnx2i_cmd); 1268 + 1269 + hdr = (struct iscsi_cmd_rsp *)task->hdr; 1270 + resp_cqe = (struct bnx2i_cmd_response *)cqe; 1271 + hdr->opcode = resp_cqe->op_code; 1272 + hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn); 1273 + hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn); 1274 + hdr->response = resp_cqe->response; 1275 + hdr->cmd_status = resp_cqe->status; 1276 + hdr->flags = resp_cqe->response_flags; 1277 + hdr->residual_count = cpu_to_be32(resp_cqe->residual_count); 1278 + 1279 + if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN) 1280 + goto done; 1281 + 1282 + if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) { 1283 + datalen = resp_cqe->data_length; 1284 + if (datalen < 2) 1285 + goto done; 1286 + 1287 + if (datalen > BNX2I_RQ_WQE_SIZE) { 1288 + iscsi_conn_printk(KERN_ERR, conn, 1289 + "sense data len %d > RQ sz\n", 1290 + datalen); 1291 + datalen = BNX2I_RQ_WQE_SIZE; 1292 + } else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) { 1293 + iscsi_conn_printk(KERN_ERR, conn, 1294 + "sense data len %d > conn data\n", 1295 + datalen); 1296 + datalen = ISCSI_DEF_MAX_RECV_SEG_LEN; 1297 + } 1298 + 1299 + bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen); 1300 + bnx2i_put_rq_buf(bnx2i_cmd->conn, 1); 1301 + } 1302 + 1303 + done: 1304 + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, 1305 + conn->data, datalen); 1306 + fail: 1307 + spin_unlock(&session->lock); 1308 + return 0; 1309 + } 1310 + 1311 + 1312 + /** 1313 + * bnx2i_process_login_resp - this function handles iscsi login response 1314 + * @session: iscsi session pointer 1315 + * @bnx2i_conn: iscsi connection pointer 1316 + * @cqe: pointer to newly DMA'ed CQE entry for processing 1317 + * 1318 + * process Login Response CQE & complete it to open-iscsi user daemon 1319 + */ 1320 + static int bnx2i_process_login_resp(struct iscsi_session *session, 1321 + struct bnx2i_conn *bnx2i_conn, 1322 + struct cqe *cqe) 1323 + { 1324 + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1325 + struct iscsi_task *task; 1326 + struct bnx2i_login_response *login; 1327 + struct iscsi_login_rsp *resp_hdr; 1328 + int pld_len; 1329 + int pad_len; 1330 + 1331 + login = (struct bnx2i_login_response *) cqe; 1332 + spin_lock(&session->lock); 1333 + task = iscsi_itt_to_task(conn, 1334 + login->itt & ISCSI_LOGIN_RESPONSE_INDEX); 1335 + if (!task) 1336 + goto done; 1337 + 1338 + resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; 1339 + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); 1340 + resp_hdr->opcode = login->op_code; 1341 + resp_hdr->flags = login->response_flags; 1342 + resp_hdr->max_version = login->version_max; 1343 + resp_hdr->active_version = login->version_active;; 1344 + resp_hdr->hlength = 0; 1345 + 1346 + hton24(resp_hdr->dlength, login->data_length); 1347 + memcpy(resp_hdr->isid, &login->isid_lo, 6); 1348 + resp_hdr->tsih = cpu_to_be16(login->tsih); 1349 + resp_hdr->itt = task->hdr->itt; 1350 + resp_hdr->statsn = cpu_to_be32(login->stat_sn); 1351 + resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn); 1352 + resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn); 1353 + resp_hdr->status_class = login->status_class; 1354 + resp_hdr->status_detail = login->status_detail; 1355 + pld_len = login->data_length; 1356 + bnx2i_conn->gen_pdu.resp_wr_ptr = 1357 + bnx2i_conn->gen_pdu.resp_buf + pld_len; 1358 + 1359 + pad_len = 0; 1360 + if (pld_len & 0x3) 1361 + pad_len = 4 - (pld_len % 4); 1362 + 1363 + if (pad_len) { 1364 + int i = 0; 1365 + for (i = 0; i < pad_len; i++) { 1366 + bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0; 1367 + bnx2i_conn->gen_pdu.resp_wr_ptr++; 1368 + } 1369 + } 1370 + 1371 + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, 1372 + bnx2i_conn->gen_pdu.resp_buf, 1373 + bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf); 1374 + done: 1375 + spin_unlock(&session->lock); 1376 + return 0; 1377 + } 1378 + 1379 + /** 1380 + * bnx2i_process_tmf_resp - this function handles iscsi TMF response 1381 + * @session: iscsi session pointer 1382 + * @bnx2i_conn: iscsi connection pointer 1383 + * @cqe: pointer to newly DMA'ed CQE entry for processing 1384 + * 1385 + * process iSCSI TMF Response CQE and wake up the driver eh thread. 1386 + */ 1387 + static int bnx2i_process_tmf_resp(struct iscsi_session *session, 1388 + struct bnx2i_conn *bnx2i_conn, 1389 + struct cqe *cqe) 1390 + { 1391 + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1392 + struct iscsi_task *task; 1393 + struct bnx2i_tmf_response *tmf_cqe; 1394 + struct iscsi_tm_rsp *resp_hdr; 1395 + 1396 + tmf_cqe = (struct bnx2i_tmf_response *)cqe; 1397 + spin_lock(&session->lock); 1398 + task = iscsi_itt_to_task(conn, 1399 + tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX); 1400 + if (!task) 1401 + goto done; 1402 + 1403 + resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; 1404 + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); 1405 + resp_hdr->opcode = tmf_cqe->op_code; 1406 + resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn); 1407 + resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn); 1408 + resp_hdr->itt = task->hdr->itt; 1409 + resp_hdr->response = tmf_cqe->response; 1410 + 1411 + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); 1412 + done: 1413 + spin_unlock(&session->lock); 1414 + return 0; 1415 + } 1416 + 1417 + /** 1418 + * bnx2i_process_logout_resp - this function handles iscsi logout response 1419 + * @session: iscsi session pointer 1420 + * @bnx2i_conn: iscsi connection pointer 1421 + * @cqe: pointer to newly DMA'ed CQE entry for processing 1422 + * 1423 + * process iSCSI Logout Response CQE & make function call to 1424 + * notify the user daemon. 1425 + */ 1426 + static int bnx2i_process_logout_resp(struct iscsi_session *session, 1427 + struct bnx2i_conn *bnx2i_conn, 1428 + struct cqe *cqe) 1429 + { 1430 + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1431 + struct iscsi_task *task; 1432 + struct bnx2i_logout_response *logout; 1433 + struct iscsi_logout_rsp *resp_hdr; 1434 + 1435 + logout = (struct bnx2i_logout_response *) cqe; 1436 + spin_lock(&session->lock); 1437 + task = iscsi_itt_to_task(conn, 1438 + logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX); 1439 + if (!task) 1440 + goto done; 1441 + 1442 + resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; 1443 + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); 1444 + resp_hdr->opcode = logout->op_code; 1445 + resp_hdr->flags = logout->response; 1446 + resp_hdr->hlength = 0; 1447 + 1448 + resp_hdr->itt = task->hdr->itt; 1449 + resp_hdr->statsn = task->hdr->exp_statsn; 1450 + resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn); 1451 + resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn); 1452 + 1453 + resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait); 1454 + resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain); 1455 + 1456 + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); 1457 + done: 1458 + spin_unlock(&session->lock); 1459 + return 0; 1460 + } 1461 + 1462 + /** 1463 + * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE 1464 + * @session: iscsi session pointer 1465 + * @bnx2i_conn: iscsi connection pointer 1466 + * @cqe: pointer to newly DMA'ed CQE entry for processing 1467 + * 1468 + * process iSCSI NOPIN local completion CQE, frees IIT and command structures 1469 + */ 1470 + static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session, 1471 + struct bnx2i_conn *bnx2i_conn, 1472 + struct cqe *cqe) 1473 + { 1474 + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1475 + struct bnx2i_nop_in_msg *nop_in; 1476 + struct iscsi_task *task; 1477 + 1478 + nop_in = (struct bnx2i_nop_in_msg *)cqe; 1479 + spin_lock(&session->lock); 1480 + task = iscsi_itt_to_task(conn, 1481 + nop_in->itt & ISCSI_NOP_IN_MSG_INDEX); 1482 + if (task) 1483 + iscsi_put_task(task); 1484 + spin_unlock(&session->lock); 1485 + } 1486 + 1487 + /** 1488 + * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd 1489 + * @conn: iscsi connection 1490 + * 1491 + * Firmware advances RQ producer index for every unsolicited PDU even if 1492 + * payload data length is '0'. This function makes corresponding 1493 + * adjustments on the driver side to match this f/w behavior 1494 + */ 1495 + static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn) 1496 + { 1497 + char dummy_rq_data[2]; 1498 + bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1); 1499 + bnx2i_put_rq_buf(bnx2i_conn, 1); 1500 + } 1501 + 1502 + 1503 + /** 1504 + * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE 1505 + * @session: iscsi session pointer 1506 + * @bnx2i_conn: iscsi connection pointer 1507 + * @cqe: pointer to newly DMA'ed CQE entry for processing 1508 + * 1509 + * process iSCSI target's proactive iSCSI NOPIN request 1510 + */ 1511 + static int bnx2i_process_nopin_mesg(struct iscsi_session *session, 1512 + struct bnx2i_conn *bnx2i_conn, 1513 + struct cqe *cqe) 1514 + { 1515 + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1516 + struct iscsi_task *task; 1517 + struct bnx2i_nop_in_msg *nop_in; 1518 + struct iscsi_nopin *hdr; 1519 + u32 itt; 1520 + int tgt_async_nop = 0; 1521 + 1522 + nop_in = (struct bnx2i_nop_in_msg *)cqe; 1523 + itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX; 1524 + 1525 + spin_lock(&session->lock); 1526 + hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr; 1527 + memset(hdr, 0, sizeof(struct iscsi_hdr)); 1528 + hdr->opcode = nop_in->op_code; 1529 + hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn); 1530 + hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn); 1531 + hdr->ttt = cpu_to_be32(nop_in->ttt); 1532 + 1533 + if (itt == (u16) RESERVED_ITT) { 1534 + bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); 1535 + hdr->itt = RESERVED_ITT; 1536 + tgt_async_nop = 1; 1537 + goto done; 1538 + } 1539 + 1540 + /* this is a response to one of our nop-outs */ 1541 + task = iscsi_itt_to_task(conn, itt); 1542 + if (task) { 1543 + hdr->flags = ISCSI_FLAG_CMD_FINAL; 1544 + hdr->itt = task->hdr->itt; 1545 + hdr->ttt = cpu_to_be32(nop_in->ttt); 1546 + memcpy(hdr->lun, nop_in->lun, 8); 1547 + } 1548 + done: 1549 + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1550 + spin_unlock(&session->lock); 1551 + 1552 + return tgt_async_nop; 1553 + } 1554 + 1555 + 1556 + /** 1557 + * bnx2i_process_async_mesg - this function handles iscsi async message 1558 + * @session: iscsi session pointer 1559 + * @bnx2i_conn: iscsi connection pointer 1560 + * @cqe: pointer to newly DMA'ed CQE entry for processing 1561 + * 1562 + * process iSCSI ASYNC Message 1563 + */ 1564 + static void bnx2i_process_async_mesg(struct iscsi_session *session, 1565 + struct bnx2i_conn *bnx2i_conn, 1566 + struct cqe *cqe) 1567 + { 1568 + struct bnx2i_async_msg *async_cqe; 1569 + struct iscsi_async *resp_hdr; 1570 + u8 async_event; 1571 + 1572 + bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); 1573 + 1574 + async_cqe = (struct bnx2i_async_msg *)cqe; 1575 + async_event = async_cqe->async_event; 1576 + 1577 + if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) { 1578 + iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, 1579 + "async: scsi events not supported\n"); 1580 + return; 1581 + } 1582 + 1583 + spin_lock(&session->lock); 1584 + resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr; 1585 + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); 1586 + resp_hdr->opcode = async_cqe->op_code; 1587 + resp_hdr->flags = 0x80; 1588 + 1589 + memcpy(resp_hdr->lun, async_cqe->lun, 8); 1590 + resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn); 1591 + resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn); 1592 + 1593 + resp_hdr->async_event = async_cqe->async_event; 1594 + resp_hdr->async_vcode = async_cqe->async_vcode; 1595 + 1596 + resp_hdr->param1 = cpu_to_be16(async_cqe->param1); 1597 + resp_hdr->param2 = cpu_to_be16(async_cqe->param2); 1598 + resp_hdr->param3 = cpu_to_be16(async_cqe->param3); 1599 + 1600 + __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data, 1601 + (struct iscsi_hdr *)resp_hdr, NULL, 0); 1602 + spin_unlock(&session->lock); 1603 + } 1604 + 1605 + 1606 + /** 1607 + * bnx2i_process_reject_mesg - process iscsi reject pdu 1608 + * @session: iscsi session pointer 1609 + * @bnx2i_conn: iscsi connection pointer 1610 + * @cqe: pointer to newly DMA'ed CQE entry for processing 1611 + * 1612 + * process iSCSI REJECT message 1613 + */ 1614 + static void bnx2i_process_reject_mesg(struct iscsi_session *session, 1615 + struct bnx2i_conn *bnx2i_conn, 1616 + struct cqe *cqe) 1617 + { 1618 + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1619 + struct bnx2i_reject_msg *reject; 1620 + struct iscsi_reject *hdr; 1621 + 1622 + reject = (struct bnx2i_reject_msg *) cqe; 1623 + if (reject->data_length) { 1624 + bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length); 1625 + bnx2i_put_rq_buf(bnx2i_conn, 1); 1626 + } else 1627 + bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); 1628 + 1629 + spin_lock(&session->lock); 1630 + hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr; 1631 + memset(hdr, 0, sizeof(struct iscsi_hdr)); 1632 + hdr->opcode = reject->op_code; 1633 + hdr->reason = reject->reason; 1634 + hton24(hdr->dlength, reject->data_length); 1635 + hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn); 1636 + hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn); 1637 + hdr->ffffffff = cpu_to_be32(RESERVED_ITT); 1638 + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data, 1639 + reject->data_length); 1640 + spin_unlock(&session->lock); 1641 + } 1642 + 1643 + /** 1644 + * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion 1645 + * @session: iscsi session pointer 1646 + * @bnx2i_conn: iscsi connection pointer 1647 + * @cqe: pointer to newly DMA'ed CQE entry for processing 1648 + * 1649 + * process command cleanup response CQE during conn shutdown or error recovery 1650 + */ 1651 + static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session, 1652 + struct bnx2i_conn *bnx2i_conn, 1653 + struct cqe *cqe) 1654 + { 1655 + struct bnx2i_cleanup_response *cmd_clean_rsp; 1656 + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1657 + struct iscsi_task *task; 1658 + 1659 + cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe; 1660 + spin_lock(&session->lock); 1661 + task = iscsi_itt_to_task(conn, 1662 + cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); 1663 + if (!task) 1664 + printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n", 1665 + cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); 1666 + spin_unlock(&session->lock); 1667 + complete(&bnx2i_conn->cmd_cleanup_cmpl); 1668 + } 1669 + 1670 + 1671 + 1672 + /** 1673 + * bnx2i_process_new_cqes - process newly DMA'ed CQE's 1674 + * @bnx2i_conn: iscsi connection 1675 + * 1676 + * this function is called by generic KCQ handler to process all pending CQE's 1677 + */ 1678 + static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) 1679 + { 1680 + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1681 + struct iscsi_session *session = conn->session; 1682 + struct qp_info *qp = &bnx2i_conn->ep->qp; 1683 + struct bnx2i_nop_in_msg *nopin; 1684 + int tgt_async_msg; 1685 + 1686 + while (1) { 1687 + nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe; 1688 + if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) 1689 + break; 1690 + 1691 + if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) 1692 + break; 1693 + 1694 + tgt_async_msg = 0; 1695 + 1696 + switch (nopin->op_code) { 1697 + case ISCSI_OP_SCSI_CMD_RSP: 1698 + case ISCSI_OP_SCSI_DATA_IN: 1699 + bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, 1700 + qp->cq_cons_qe); 1701 + break; 1702 + case ISCSI_OP_LOGIN_RSP: 1703 + bnx2i_process_login_resp(session, bnx2i_conn, 1704 + qp->cq_cons_qe); 1705 + break; 1706 + case ISCSI_OP_SCSI_TMFUNC_RSP: 1707 + bnx2i_process_tmf_resp(session, bnx2i_conn, 1708 + qp->cq_cons_qe); 1709 + break; 1710 + case ISCSI_OP_LOGOUT_RSP: 1711 + bnx2i_process_logout_resp(session, bnx2i_conn, 1712 + qp->cq_cons_qe); 1713 + break; 1714 + case ISCSI_OP_NOOP_IN: 1715 + if (bnx2i_process_nopin_mesg(session, bnx2i_conn, 1716 + qp->cq_cons_qe)) 1717 + tgt_async_msg = 1; 1718 + break; 1719 + case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION: 1720 + bnx2i_process_nopin_local_cmpl(session, bnx2i_conn, 1721 + qp->cq_cons_qe); 1722 + break; 1723 + case ISCSI_OP_ASYNC_EVENT: 1724 + bnx2i_process_async_mesg(session, bnx2i_conn, 1725 + qp->cq_cons_qe); 1726 + tgt_async_msg = 1; 1727 + break; 1728 + case ISCSI_OP_REJECT: 1729 + bnx2i_process_reject_mesg(session, bnx2i_conn, 1730 + qp->cq_cons_qe); 1731 + break; 1732 + case ISCSI_OPCODE_CLEANUP_RESPONSE: 1733 + bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn, 1734 + qp->cq_cons_qe); 1735 + break; 1736 + default: 1737 + printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", 1738 + nopin->op_code); 1739 + } 1740 + 1741 + if (!tgt_async_msg) 1742 + bnx2i_conn->ep->num_active_cmds--; 1743 + 1744 + /* clear out in production version only, till beta keep opcode 1745 + * field intact, will be helpful in debugging (context dump) 1746 + * nopin->op_code = 0; 1747 + */ 1748 + qp->cqe_exp_seq_sn++; 1749 + if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1)) 1750 + qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN; 1751 + 1752 + if (qp->cq_cons_qe == qp->cq_last_qe) { 1753 + qp->cq_cons_qe = qp->cq_first_qe; 1754 + qp->cq_cons_idx = 0; 1755 + } else { 1756 + qp->cq_cons_qe++; 1757 + qp->cq_cons_idx++; 1758 + } 1759 + } 1760 + bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); 1761 + } 1762 + 1763 + /** 1764 + * bnx2i_fastpath_notification - process global event queue (KCQ) 1765 + * @hba: adapter structure pointer 1766 + * @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry 1767 + * 1768 + * Fast path event notification handler, KCQ entry carries context id 1769 + * of the connection that has 1 or more pending CQ entries 1770 + */ 1771 + static void bnx2i_fastpath_notification(struct bnx2i_hba *hba, 1772 + struct iscsi_kcqe *new_cqe_kcqe) 1773 + { 1774 + struct bnx2i_conn *conn; 1775 + u32 iscsi_cid; 1776 + 1777 + iscsi_cid = new_cqe_kcqe->iscsi_conn_id; 1778 + conn = bnx2i_get_conn_from_id(hba, iscsi_cid); 1779 + 1780 + if (!conn) { 1781 + printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid); 1782 + return; 1783 + } 1784 + if (!conn->ep) { 1785 + printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid); 1786 + return; 1787 + } 1788 + 1789 + bnx2i_process_new_cqes(conn); 1790 + } 1791 + 1792 + 1793 + /** 1794 + * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE 1795 + * @hba: adapter structure pointer 1796 + * @update_kcqe: kcqe pointer 1797 + * 1798 + * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration 1799 + */ 1800 + static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba, 1801 + struct iscsi_kcqe *update_kcqe) 1802 + { 1803 + struct bnx2i_conn *conn; 1804 + u32 iscsi_cid; 1805 + 1806 + iscsi_cid = update_kcqe->iscsi_conn_id; 1807 + conn = bnx2i_get_conn_from_id(hba, iscsi_cid); 1808 + 1809 + if (!conn) { 1810 + printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid); 1811 + return; 1812 + } 1813 + if (!conn->ep) { 1814 + printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid); 1815 + return; 1816 + } 1817 + 1818 + if (update_kcqe->completion_status) { 1819 + printk(KERN_ALERT "request failed cid %x\n", iscsi_cid); 1820 + conn->ep->state = EP_STATE_ULP_UPDATE_FAILED; 1821 + } else 1822 + conn->ep->state = EP_STATE_ULP_UPDATE_COMPL; 1823 + 1824 + wake_up_interruptible(&conn->ep->ofld_wait); 1825 + } 1826 + 1827 + 1828 + /** 1829 + * bnx2i_recovery_que_add_conn - add connection to recovery queue 1830 + * @hba: adapter structure pointer 1831 + * @bnx2i_conn: iscsi connection 1832 + * 1833 + * Add connection to recovery queue and schedule adapter eh worker 1834 + */ 1835 + static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba, 1836 + struct bnx2i_conn *bnx2i_conn) 1837 + { 1838 + iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data, 1839 + ISCSI_ERR_CONN_FAILED); 1840 + } 1841 + 1842 + 1843 + /** 1844 + * bnx2i_process_tcp_error - process error notification on a given connection 1845 + * 1846 + * @hba: adapter structure pointer 1847 + * @tcp_err: tcp error kcqe pointer 1848 + * 1849 + * handles tcp level error notifications from FW. 1850 + */ 1851 + static void bnx2i_process_tcp_error(struct bnx2i_hba *hba, 1852 + struct iscsi_kcqe *tcp_err) 1853 + { 1854 + struct bnx2i_conn *bnx2i_conn; 1855 + u32 iscsi_cid; 1856 + 1857 + iscsi_cid = tcp_err->iscsi_conn_id; 1858 + bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); 1859 + 1860 + if (!bnx2i_conn) { 1861 + printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid); 1862 + return; 1863 + } 1864 + 1865 + printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n", 1866 + iscsi_cid, tcp_err->completion_status); 1867 + bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn); 1868 + } 1869 + 1870 + 1871 + /** 1872 + * bnx2i_process_iscsi_error - process error notification on a given connection 1873 + * @hba: adapter structure pointer 1874 + * @iscsi_err: iscsi error kcqe pointer 1875 + * 1876 + * handles iscsi error notifications from the FW. Firmware based in initial 1877 + * handshake classifies iscsi protocol / TCP rfc violation into either 1878 + * warning or error indications. If indication is of "Error" type, driver 1879 + * will initiate session recovery for that connection/session. For 1880 + * "Warning" type indication, driver will put out a system log message 1881 + * (there will be only one message for each type for the life of the 1882 + * session, this is to avoid un-necessarily overloading the system) 1883 + */ 1884 + static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba, 1885 + struct iscsi_kcqe *iscsi_err) 1886 + { 1887 + struct bnx2i_conn *bnx2i_conn; 1888 + u32 iscsi_cid; 1889 + char warn_notice[] = "iscsi_warning"; 1890 + char error_notice[] = "iscsi_error"; 1891 + char additional_notice[64]; 1892 + char *message; 1893 + int need_recovery; 1894 + u64 err_mask64; 1895 + 1896 + iscsi_cid = iscsi_err->iscsi_conn_id; 1897 + bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); 1898 + if (!bnx2i_conn) { 1899 + printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid); 1900 + return; 1901 + } 1902 + 1903 + err_mask64 = (0x1ULL << iscsi_err->completion_status); 1904 + 1905 + if (err_mask64 & iscsi_error_mask) { 1906 + need_recovery = 0; 1907 + message = warn_notice; 1908 + } else { 1909 + need_recovery = 1; 1910 + message = error_notice; 1911 + } 1912 + 1913 + switch (iscsi_err->completion_status) { 1914 + case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR: 1915 + strcpy(additional_notice, "hdr digest err"); 1916 + break; 1917 + case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR: 1918 + strcpy(additional_notice, "data digest err"); 1919 + break; 1920 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE: 1921 + strcpy(additional_notice, "wrong opcode rcvd"); 1922 + break; 1923 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN: 1924 + strcpy(additional_notice, "AHS len > 0 rcvd"); 1925 + break; 1926 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT: 1927 + strcpy(additional_notice, "invalid ITT rcvd"); 1928 + break; 1929 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN: 1930 + strcpy(additional_notice, "wrong StatSN rcvd"); 1931 + break; 1932 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN: 1933 + strcpy(additional_notice, "wrong DataSN rcvd"); 1934 + break; 1935 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T: 1936 + strcpy(additional_notice, "pend R2T violation"); 1937 + break; 1938 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0: 1939 + strcpy(additional_notice, "ERL0, UO"); 1940 + break; 1941 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1: 1942 + strcpy(additional_notice, "ERL0, U1"); 1943 + break; 1944 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2: 1945 + strcpy(additional_notice, "ERL0, U2"); 1946 + break; 1947 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3: 1948 + strcpy(additional_notice, "ERL0, U3"); 1949 + break; 1950 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4: 1951 + strcpy(additional_notice, "ERL0, U4"); 1952 + break; 1953 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5: 1954 + strcpy(additional_notice, "ERL0, U5"); 1955 + break; 1956 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6: 1957 + strcpy(additional_notice, "ERL0, U6"); 1958 + break; 1959 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN: 1960 + strcpy(additional_notice, "invalid resi len"); 1961 + break; 1962 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN: 1963 + strcpy(additional_notice, "MRDSL violation"); 1964 + break; 1965 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO: 1966 + strcpy(additional_notice, "F-bit not set"); 1967 + break; 1968 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV: 1969 + strcpy(additional_notice, "invalid TTT"); 1970 + break; 1971 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN: 1972 + strcpy(additional_notice, "invalid DataSN"); 1973 + break; 1974 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN: 1975 + strcpy(additional_notice, "burst len violation"); 1976 + break; 1977 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF: 1978 + strcpy(additional_notice, "buf offset violation"); 1979 + break; 1980 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN: 1981 + strcpy(additional_notice, "invalid LUN field"); 1982 + break; 1983 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN: 1984 + strcpy(additional_notice, "invalid R2TSN field"); 1985 + break; 1986 + #define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \ 1987 + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 1988 + case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0: 1989 + strcpy(additional_notice, "invalid cmd len1"); 1990 + break; 1991 + #define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \ 1992 + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 1993 + case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1: 1994 + strcpy(additional_notice, "invalid cmd len2"); 1995 + break; 1996 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED: 1997 + strcpy(additional_notice, 1998 + "pend r2t exceeds MaxOutstandingR2T value"); 1999 + break; 2000 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV: 2001 + strcpy(additional_notice, "TTT is rsvd"); 2002 + break; 2003 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN: 2004 + strcpy(additional_notice, "MBL violation"); 2005 + break; 2006 + #define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \ 2007 + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO 2008 + case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO: 2009 + strcpy(additional_notice, "data seg len != 0"); 2010 + break; 2011 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN: 2012 + strcpy(additional_notice, "reject pdu len error"); 2013 + break; 2014 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN: 2015 + strcpy(additional_notice, "async pdu len error"); 2016 + break; 2017 + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN: 2018 + strcpy(additional_notice, "nopin pdu len error"); 2019 + break; 2020 + #define BNX2_ERR_PEND_R2T_IN_CLEANUP \ 2021 + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP 2022 + case BNX2_ERR_PEND_R2T_IN_CLEANUP: 2023 + strcpy(additional_notice, "pend r2t in cleanup"); 2024 + break; 2025 + 2026 + case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT: 2027 + strcpy(additional_notice, "IP fragments rcvd"); 2028 + break; 2029 + case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS: 2030 + strcpy(additional_notice, "IP options error"); 2031 + break; 2032 + case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG: 2033 + strcpy(additional_notice, "urgent flag error"); 2034 + break; 2035 + default: 2036 + printk(KERN_ALERT "iscsi_err - unknown err %x\n", 2037 + iscsi_err->completion_status); 2038 + } 2039 + 2040 + if (need_recovery) { 2041 + iscsi_conn_printk(KERN_ALERT, 2042 + bnx2i_conn->cls_conn->dd_data, 2043 + "bnx2i: %s - %s\n", 2044 + message, additional_notice); 2045 + 2046 + iscsi_conn_printk(KERN_ALERT, 2047 + bnx2i_conn->cls_conn->dd_data, 2048 + "conn_err - hostno %d conn %p, " 2049 + "iscsi_cid %x cid %x\n", 2050 + bnx2i_conn->hba->shost->host_no, 2051 + bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid, 2052 + bnx2i_conn->ep->ep_cid); 2053 + bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn); 2054 + } else 2055 + if (!test_and_set_bit(iscsi_err->completion_status, 2056 + (void *) &bnx2i_conn->violation_notified)) 2057 + iscsi_conn_printk(KERN_ALERT, 2058 + bnx2i_conn->cls_conn->dd_data, 2059 + "bnx2i: %s - %s\n", 2060 + message, additional_notice); 2061 + } 2062 + 2063 + 2064 + /** 2065 + * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion 2066 + * @hba: adapter structure pointer 2067 + * @conn_destroy: conn destroy kcqe pointer 2068 + * 2069 + * handles connection destroy completion request. 2070 + */ 2071 + static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba, 2072 + struct iscsi_kcqe *conn_destroy) 2073 + { 2074 + struct bnx2i_endpoint *ep; 2075 + 2076 + ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id); 2077 + if (!ep) { 2078 + printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending " 2079 + "offload request, unexpected complection\n"); 2080 + return; 2081 + } 2082 + 2083 + if (hba != ep->hba) { 2084 + printk(KERN_ALERT "conn destroy- error hba mis-match\n"); 2085 + return; 2086 + } 2087 + 2088 + if (conn_destroy->completion_status) { 2089 + printk(KERN_ALERT "conn_destroy_cmpl: op failed\n"); 2090 + ep->state = EP_STATE_CLEANUP_FAILED; 2091 + } else 2092 + ep->state = EP_STATE_CLEANUP_CMPL; 2093 + wake_up_interruptible(&ep->ofld_wait); 2094 + } 2095 + 2096 + 2097 + /** 2098 + * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion 2099 + * @hba: adapter structure pointer 2100 + * @ofld_kcqe: conn offload kcqe pointer 2101 + * 2102 + * handles initial connection offload completion, ep_connect() thread is 2103 + * woken-up to continue with LLP connect process 2104 + */ 2105 + static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba, 2106 + struct iscsi_kcqe *ofld_kcqe) 2107 + { 2108 + u32 cid_addr; 2109 + struct bnx2i_endpoint *ep; 2110 + u32 cid_num; 2111 + 2112 + ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id); 2113 + if (!ep) { 2114 + printk(KERN_ALERT "ofld_cmpl: no pend offload request\n"); 2115 + return; 2116 + } 2117 + 2118 + if (hba != ep->hba) { 2119 + printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n"); 2120 + return; 2121 + } 2122 + 2123 + if (ofld_kcqe->completion_status) { 2124 + if (ofld_kcqe->completion_status == 2125 + ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) 2126 + printk(KERN_ALERT "bnx2i: unable to allocate" 2127 + " iSCSI context resources\n"); 2128 + ep->state = EP_STATE_OFLD_FAILED; 2129 + } else { 2130 + ep->state = EP_STATE_OFLD_COMPL; 2131 + cid_addr = ofld_kcqe->iscsi_conn_context_id; 2132 + cid_num = bnx2i_get_cid_num(ep); 2133 + ep->ep_cid = cid_addr; 2134 + ep->qp.ctx_base = NULL; 2135 + } 2136 + wake_up_interruptible(&ep->ofld_wait); 2137 + } 2138 + 2139 + /** 2140 + * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE 2141 + * @hba: adapter structure pointer 2142 + * @update_kcqe: kcqe pointer 2143 + * 2144 + * Generic KCQ event handler/dispatcher 2145 + */ 2146 + static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[], 2147 + u32 num_cqe) 2148 + { 2149 + struct bnx2i_hba *hba = context; 2150 + int i = 0; 2151 + struct iscsi_kcqe *ikcqe = NULL; 2152 + 2153 + while (i < num_cqe) { 2154 + ikcqe = (struct iscsi_kcqe *) kcqe[i++]; 2155 + 2156 + if (ikcqe->op_code == 2157 + ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION) 2158 + bnx2i_fastpath_notification(hba, ikcqe); 2159 + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN) 2160 + bnx2i_process_ofld_cmpl(hba, ikcqe); 2161 + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN) 2162 + bnx2i_process_update_conn_cmpl(hba, ikcqe); 2163 + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) { 2164 + if (ikcqe->completion_status != 2165 + ISCSI_KCQE_COMPLETION_STATUS_SUCCESS) 2166 + bnx2i_iscsi_license_error(hba, ikcqe->\ 2167 + completion_status); 2168 + else { 2169 + set_bit(ADAPTER_STATE_UP, &hba->adapter_state); 2170 + bnx2i_get_link_state(hba); 2171 + printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: " 2172 + "ISCSI_INIT passed\n", 2173 + (u8)hba->pcidev->bus->number, 2174 + hba->pci_devno, 2175 + (u8)hba->pci_func); 2176 + 2177 + 2178 + } 2179 + } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN) 2180 + bnx2i_process_conn_destroy_cmpl(hba, ikcqe); 2181 + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR) 2182 + bnx2i_process_iscsi_error(hba, ikcqe); 2183 + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR) 2184 + bnx2i_process_tcp_error(hba, ikcqe); 2185 + else 2186 + printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", 2187 + ikcqe->op_code); 2188 + } 2189 + } 2190 + 2191 + 2192 + /** 2193 + * bnx2i_indicate_netevent - Generic netdev event handler 2194 + * @context: adapter structure pointer 2195 + * @event: event type 2196 + * 2197 + * Handles four netdev events, NETDEV_UP, NETDEV_DOWN, 2198 + * NETDEV_GOING_DOWN and NETDEV_CHANGE 2199 + */ 2200 + static void bnx2i_indicate_netevent(void *context, unsigned long event) 2201 + { 2202 + struct bnx2i_hba *hba = context; 2203 + 2204 + switch (event) { 2205 + case NETDEV_UP: 2206 + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) 2207 + bnx2i_send_fw_iscsi_init_msg(hba); 2208 + break; 2209 + case NETDEV_DOWN: 2210 + clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); 2211 + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); 2212 + break; 2213 + case NETDEV_GOING_DOWN: 2214 + set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); 2215 + iscsi_host_for_each_session(hba->shost, 2216 + bnx2i_drop_session); 2217 + break; 2218 + case NETDEV_CHANGE: 2219 + bnx2i_get_link_state(hba); 2220 + break; 2221 + default: 2222 + ; 2223 + } 2224 + } 2225 + 2226 + 2227 + /** 2228 + * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion 2229 + * @cm_sk: cnic sock structure pointer 2230 + * 2231 + * function callback exported via bnx2i - cnic driver interface to 2232 + * indicate completion of option-2 TCP connect request. 2233 + */ 2234 + static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk) 2235 + { 2236 + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; 2237 + 2238 + if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state)) 2239 + ep->state = EP_STATE_CONNECT_FAILED; 2240 + else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags)) 2241 + ep->state = EP_STATE_CONNECT_COMPL; 2242 + else 2243 + ep->state = EP_STATE_CONNECT_FAILED; 2244 + 2245 + wake_up_interruptible(&ep->ofld_wait); 2246 + } 2247 + 2248 + 2249 + /** 2250 + * bnx2i_cm_close_cmpl - process tcp conn close completion 2251 + * @cm_sk: cnic sock structure pointer 2252 + * 2253 + * function callback exported via bnx2i - cnic driver interface to 2254 + * indicate completion of option-2 graceful TCP connect shutdown 2255 + */ 2256 + static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk) 2257 + { 2258 + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; 2259 + 2260 + ep->state = EP_STATE_DISCONN_COMPL; 2261 + wake_up_interruptible(&ep->ofld_wait); 2262 + } 2263 + 2264 + 2265 + /** 2266 + * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion 2267 + * @cm_sk: cnic sock structure pointer 2268 + * 2269 + * function callback exported via bnx2i - cnic driver interface to 2270 + * indicate completion of option-2 abortive TCP connect termination 2271 + */ 2272 + static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk) 2273 + { 2274 + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; 2275 + 2276 + ep->state = EP_STATE_DISCONN_COMPL; 2277 + wake_up_interruptible(&ep->ofld_wait); 2278 + } 2279 + 2280 + 2281 + /** 2282 + * bnx2i_cm_remote_close - process received TCP FIN 2283 + * @hba: adapter structure pointer 2284 + * @update_kcqe: kcqe pointer 2285 + * 2286 + * function callback exported via bnx2i - cnic driver interface to indicate 2287 + * async TCP events such as FIN 2288 + */ 2289 + static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk) 2290 + { 2291 + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; 2292 + 2293 + ep->state = EP_STATE_TCP_FIN_RCVD; 2294 + if (ep->conn) 2295 + bnx2i_recovery_que_add_conn(ep->hba, ep->conn); 2296 + } 2297 + 2298 + /** 2299 + * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup 2300 + * @hba: adapter structure pointer 2301 + * @update_kcqe: kcqe pointer 2302 + * 2303 + * function callback exported via bnx2i - cnic driver interface to 2304 + * indicate async TCP events (RST) sent by the peer. 2305 + */ 2306 + static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk) 2307 + { 2308 + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; 2309 + 2310 + ep->state = EP_STATE_TCP_RST_RCVD; 2311 + if (ep->conn) 2312 + bnx2i_recovery_que_add_conn(ep->hba, ep->conn); 2313 + } 2314 + 2315 + 2316 + static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type, 2317 + char *buf, u16 buflen) 2318 + { 2319 + struct bnx2i_hba *hba; 2320 + 2321 + hba = bnx2i_find_hba_for_cnic(dev); 2322 + if (!hba) 2323 + return; 2324 + 2325 + if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport, 2326 + msg_type, buf, buflen)) 2327 + printk(KERN_ALERT "bnx2i: private nl message send error\n"); 2328 + 2329 + } 2330 + 2331 + 2332 + /** 2333 + * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure 2334 + * carrying callback function pointers 2335 + * 2336 + */ 2337 + struct cnic_ulp_ops bnx2i_cnic_cb = { 2338 + .cnic_init = bnx2i_ulp_init, 2339 + .cnic_exit = bnx2i_ulp_exit, 2340 + .cnic_start = bnx2i_start, 2341 + .cnic_stop = bnx2i_stop, 2342 + .indicate_kcqes = bnx2i_indicate_kcqe, 2343 + .indicate_netevent = bnx2i_indicate_netevent, 2344 + .cm_connect_complete = bnx2i_cm_connect_cmpl, 2345 + .cm_close_complete = bnx2i_cm_close_cmpl, 2346 + .cm_abort_complete = bnx2i_cm_abort_cmpl, 2347 + .cm_remote_close = bnx2i_cm_remote_close, 2348 + .cm_remote_abort = bnx2i_cm_remote_abort, 2349 + .iscsi_nl_send_msg = bnx2i_send_nl_mesg, 2350 + .owner = THIS_MODULE 2351 + }; 2352 + 2353 + 2354 + /** 2355 + * bnx2i_map_ep_dbell_regs - map connection doorbell registers 2356 + * @ep: bnx2i endpoint 2357 + * 2358 + * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these 2359 + * register in BAR #0. Whereas in 57710 these register are accessed by 2360 + * mapping BAR #1 2361 + */ 2362 + int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) 2363 + { 2364 + u32 cid_num; 2365 + u32 reg_off; 2366 + u32 first_l4l5; 2367 + u32 ctx_sz; 2368 + u32 config2; 2369 + resource_size_t reg_base; 2370 + 2371 + cid_num = bnx2i_get_cid_num(ep); 2372 + 2373 + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { 2374 + reg_base = pci_resource_start(ep->hba->pcidev, 2375 + BNX2X_DOORBELL_PCI_BAR); 2376 + reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE; 2377 + ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); 2378 + goto arm_cq; 2379 + } 2380 + 2381 + reg_base = ep->hba->netdev->base_addr; 2382 + if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) && 2383 + (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) { 2384 + config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2); 2385 + first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5; 2386 + ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3; 2387 + if (ctx_sz) 2388 + reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE 2389 + + PAGE_SIZE * 2390 + (((cid_num - first_l4l5) / ctx_sz) + 256); 2391 + else 2392 + reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); 2393 + } else 2394 + /* 5709 device in normal node and 5706/5708 devices */ 2395 + reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); 2396 + 2397 + ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 2398 + MB_KERNEL_CTX_SIZE); 2399 + if (!ep->qp.ctx_base) 2400 + return -ENOMEM; 2401 + 2402 + arm_cq: 2403 + bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE); 2404 + return 0; 2405 + }
+438
drivers/scsi/bnx2i/bnx2i_init.c
··· 1 + /* bnx2i.c: Broadcom NetXtreme II iSCSI driver. 2 + * 3 + * Copyright (c) 2006 - 2009 Broadcom Corporation 4 + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 5 + * Copyright (c) 2007, 2008 Mike Christie 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation. 10 + * 11 + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 12 + */ 13 + 14 + #include "bnx2i.h" 15 + 16 + static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); 17 + static u32 adapter_count; 18 + static int bnx2i_reg_device; 19 + 20 + #define DRV_MODULE_NAME "bnx2i" 21 + #define DRV_MODULE_VERSION "2.0.1d" 22 + #define DRV_MODULE_RELDATE "Mar 25, 2009" 23 + 24 + static char version[] __devinitdata = 25 + "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ 26 + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 27 + 28 + 29 + MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>"); 30 + MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver"); 31 + MODULE_LICENSE("GPL"); 32 + MODULE_VERSION(DRV_MODULE_VERSION); 33 + 34 + static DEFINE_RWLOCK(bnx2i_dev_lock); 35 + 36 + unsigned int event_coal_div = 1; 37 + module_param(event_coal_div, int, 0664); 38 + MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor"); 39 + 40 + unsigned int en_tcp_dack = 1; 41 + module_param(en_tcp_dack, int, 0664); 42 + MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK"); 43 + 44 + unsigned int error_mask1 = 0x00; 45 + module_param(error_mask1, int, 0664); 46 + MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1"); 47 + 48 + unsigned int error_mask2 = 0x00; 49 + module_param(error_mask2, int, 0664); 50 + MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2"); 51 + 52 + unsigned int sq_size; 53 + module_param(sq_size, int, 0664); 54 + MODULE_PARM_DESC(sq_size, "Configure SQ size"); 55 + 56 + unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT; 57 + module_param(rq_size, int, 0664); 58 + MODULE_PARM_DESC(rq_size, "Configure RQ size"); 59 + 60 + u64 iscsi_error_mask = 0x00; 61 + 62 + static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ; 63 + 64 + 65 + /** 66 + * bnx2i_identify_device - identifies NetXtreme II device type 67 + * @hba: Adapter structure pointer 68 + * 69 + * This function identifies the NX2 device type and sets appropriate 70 + * queue mailbox register access method, 5709 requires driver to 71 + * access MBOX regs using *bin* mode 72 + */ 73 + void bnx2i_identify_device(struct bnx2i_hba *hba) 74 + { 75 + hba->cnic_dev_type = 0; 76 + if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) || 77 + (hba->pci_did == PCI_DEVICE_ID_NX2_5706S)) 78 + set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type); 79 + else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) || 80 + (hba->pci_did == PCI_DEVICE_ID_NX2_5708S)) 81 + set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type); 82 + else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) || 83 + (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) { 84 + set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); 85 + hba->mail_queue_access = BNX2I_MQ_BIN_MODE; 86 + } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || 87 + hba->pci_did == PCI_DEVICE_ID_NX2_57711) 88 + set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); 89 + } 90 + 91 + 92 + /** 93 + * get_adapter_list_head - returns head of adapter list 94 + */ 95 + struct bnx2i_hba *get_adapter_list_head(void) 96 + { 97 + struct bnx2i_hba *hba = NULL; 98 + struct bnx2i_hba *tmp_hba; 99 + 100 + if (!adapter_count) 101 + goto hba_not_found; 102 + 103 + read_lock(&bnx2i_dev_lock); 104 + list_for_each_entry(tmp_hba, &adapter_list, link) { 105 + if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) { 106 + hba = tmp_hba; 107 + break; 108 + } 109 + } 110 + read_unlock(&bnx2i_dev_lock); 111 + hba_not_found: 112 + return hba; 113 + } 114 + 115 + 116 + /** 117 + * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance 118 + * @cnic: pointer to cnic device instance 119 + * 120 + */ 121 + struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic) 122 + { 123 + struct bnx2i_hba *hba, *temp; 124 + 125 + read_lock(&bnx2i_dev_lock); 126 + list_for_each_entry_safe(hba, temp, &adapter_list, link) { 127 + if (hba->cnic == cnic) { 128 + read_unlock(&bnx2i_dev_lock); 129 + return hba; 130 + } 131 + } 132 + read_unlock(&bnx2i_dev_lock); 133 + return NULL; 134 + } 135 + 136 + 137 + /** 138 + * bnx2i_start - cnic callback to initialize & start adapter instance 139 + * @handle: transparent handle pointing to adapter structure 140 + * 141 + * This function maps adapter structure to pcidev structure and initiates 142 + * firmware handshake to enable/initialize on chip iscsi components 143 + * This bnx2i - cnic interface api callback is issued after following 144 + * 2 conditions are met - 145 + * a) underlying network interface is up (marked by event 'NETDEV_UP' 146 + * from netdev 147 + * b) bnx2i adapter instance is registered 148 + */ 149 + void bnx2i_start(void *handle) 150 + { 151 + #define BNX2I_INIT_POLL_TIME (1000 / HZ) 152 + struct bnx2i_hba *hba = handle; 153 + int i = HZ; 154 + 155 + bnx2i_send_fw_iscsi_init_msg(hba); 156 + while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) 157 + msleep(BNX2I_INIT_POLL_TIME); 158 + } 159 + 160 + 161 + /** 162 + * bnx2i_stop - cnic callback to shutdown adapter instance 163 + * @handle: transparent handle pointing to adapter structure 164 + * 165 + * driver checks if adapter is already in shutdown mode, if not start 166 + * the shutdown process 167 + */ 168 + void bnx2i_stop(void *handle) 169 + { 170 + struct bnx2i_hba *hba = handle; 171 + 172 + /* check if cleanup happened in GOING_DOWN context */ 173 + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); 174 + if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, 175 + &hba->adapter_state)) 176 + iscsi_host_for_each_session(hba->shost, 177 + bnx2i_drop_session); 178 + } 179 + 180 + /** 181 + * bnx2i_register_device - register bnx2i adapter instance with the cnic driver 182 + * @hba: Adapter instance to register 183 + * 184 + * registers bnx2i adapter instance with the cnic driver while holding the 185 + * adapter structure lock 186 + */ 187 + void bnx2i_register_device(struct bnx2i_hba *hba) 188 + { 189 + if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || 190 + test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 191 + return; 192 + } 193 + 194 + hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba); 195 + 196 + spin_lock(&hba->lock); 197 + bnx2i_reg_device++; 198 + spin_unlock(&hba->lock); 199 + 200 + set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); 201 + } 202 + 203 + 204 + /** 205 + * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver 206 + * 207 + * registers all bnx2i adapter instances with the cnic driver while holding 208 + * the global resource lock 209 + */ 210 + void bnx2i_reg_dev_all(void) 211 + { 212 + struct bnx2i_hba *hba, *temp; 213 + 214 + read_lock(&bnx2i_dev_lock); 215 + list_for_each_entry_safe(hba, temp, &adapter_list, link) 216 + bnx2i_register_device(hba); 217 + read_unlock(&bnx2i_dev_lock); 218 + } 219 + 220 + 221 + /** 222 + * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver 223 + * @hba: Adapter instance to unregister 224 + * 225 + * registers bnx2i adapter instance with the cnic driver while holding 226 + * the adapter structure lock 227 + */ 228 + static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) 229 + { 230 + if (hba->ofld_conns_active || 231 + !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) || 232 + test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) 233 + return; 234 + 235 + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); 236 + 237 + spin_lock(&hba->lock); 238 + bnx2i_reg_device--; 239 + spin_unlock(&hba->lock); 240 + 241 + /* ep_disconnect could come before NETDEV_DOWN, driver won't 242 + * see NETDEV_DOWN as it already unregistered itself. 243 + */ 244 + hba->adapter_state = 0; 245 + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); 246 + } 247 + 248 + /** 249 + * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver 250 + * 251 + * unregisters all bnx2i adapter instances with the cnic driver while holding 252 + * the global resource lock 253 + */ 254 + void bnx2i_unreg_dev_all(void) 255 + { 256 + struct bnx2i_hba *hba, *temp; 257 + 258 + read_lock(&bnx2i_dev_lock); 259 + list_for_each_entry_safe(hba, temp, &adapter_list, link) 260 + bnx2i_unreg_one_device(hba); 261 + read_unlock(&bnx2i_dev_lock); 262 + } 263 + 264 + 265 + /** 266 + * bnx2i_init_one - initialize an adapter instance and allocate memory resources 267 + * @hba: bnx2i adapter instance 268 + * @cnic: cnic device handle 269 + * 270 + * Global resource lock and host adapter lock is held during critical sections 271 + * below. This routine is called from cnic_register_driver() context and 272 + * work horse thread which does majority of device specific initialization 273 + */ 274 + static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) 275 + { 276 + int rc; 277 + 278 + read_lock(&bnx2i_dev_lock); 279 + if (bnx2i_reg_device && 280 + !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 281 + rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); 282 + if (rc) /* duplicate registration */ 283 + printk(KERN_ERR "bnx2i- dev reg failed\n"); 284 + 285 + spin_lock(&hba->lock); 286 + bnx2i_reg_device++; 287 + hba->age++; 288 + spin_unlock(&hba->lock); 289 + 290 + set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); 291 + } 292 + read_unlock(&bnx2i_dev_lock); 293 + 294 + write_lock(&bnx2i_dev_lock); 295 + list_add_tail(&hba->link, &adapter_list); 296 + adapter_count++; 297 + write_unlock(&bnx2i_dev_lock); 298 + return 0; 299 + } 300 + 301 + 302 + /** 303 + * bnx2i_ulp_init - initialize an adapter instance 304 + * @dev: cnic device handle 305 + * 306 + * Called from cnic_register_driver() context to initialize all enumerated 307 + * cnic devices. This routine allocate adapter structure and other 308 + * device specific resources. 309 + */ 310 + void bnx2i_ulp_init(struct cnic_dev *dev) 311 + { 312 + struct bnx2i_hba *hba; 313 + 314 + /* Allocate a HBA structure for this device */ 315 + hba = bnx2i_alloc_hba(dev); 316 + if (!hba) { 317 + printk(KERN_ERR "bnx2i init: hba initialization failed\n"); 318 + return; 319 + } 320 + 321 + /* Get PCI related information and update hba struct members */ 322 + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); 323 + if (bnx2i_init_one(hba, dev)) { 324 + printk(KERN_ERR "bnx2i - hba %p init failed\n", hba); 325 + bnx2i_free_hba(hba); 326 + } else 327 + hba->cnic = dev; 328 + } 329 + 330 + 331 + /** 332 + * bnx2i_ulp_exit - shuts down adapter instance and frees all resources 333 + * @dev: cnic device handle 334 + * 335 + */ 336 + void bnx2i_ulp_exit(struct cnic_dev *dev) 337 + { 338 + struct bnx2i_hba *hba; 339 + 340 + hba = bnx2i_find_hba_for_cnic(dev); 341 + if (!hba) { 342 + printk(KERN_INFO "bnx2i_ulp_exit: hba not " 343 + "found, dev 0x%p\n", dev); 344 + return; 345 + } 346 + write_lock(&bnx2i_dev_lock); 347 + list_del_init(&hba->link); 348 + adapter_count--; 349 + 350 + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 351 + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); 352 + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); 353 + 354 + spin_lock(&hba->lock); 355 + bnx2i_reg_device--; 356 + spin_unlock(&hba->lock); 357 + } 358 + write_unlock(&bnx2i_dev_lock); 359 + 360 + bnx2i_free_hba(hba); 361 + } 362 + 363 + 364 + /** 365 + * bnx2i_mod_init - module init entry point 366 + * 367 + * initialize any driver wide global data structures such as endpoint pool, 368 + * tcp port manager/queue, sysfs. finally driver will register itself 369 + * with the cnic module 370 + */ 371 + static int __init bnx2i_mod_init(void) 372 + { 373 + int err; 374 + 375 + printk(KERN_INFO "%s", version); 376 + 377 + if (!is_power_of_2(sq_size)) 378 + sq_size = roundup_pow_of_two(sq_size); 379 + 380 + bnx2i_scsi_xport_template = 381 + iscsi_register_transport(&bnx2i_iscsi_transport); 382 + if (!bnx2i_scsi_xport_template) { 383 + printk(KERN_ERR "Could not register bnx2i transport.\n"); 384 + err = -ENOMEM; 385 + goto out; 386 + } 387 + 388 + err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb); 389 + if (err) { 390 + printk(KERN_ERR "Could not register bnx2i cnic driver.\n"); 391 + goto unreg_xport; 392 + } 393 + 394 + return 0; 395 + 396 + unreg_xport: 397 + iscsi_unregister_transport(&bnx2i_iscsi_transport); 398 + out: 399 + return err; 400 + } 401 + 402 + 403 + /** 404 + * bnx2i_mod_exit - module cleanup/exit entry point 405 + * 406 + * Global resource lock and host adapter lock is held during critical sections 407 + * in this function. Driver will browse through the adapter list, cleans-up 408 + * each instance, unregisters iscsi transport name and finally driver will 409 + * unregister itself with the cnic module 410 + */ 411 + static void __exit bnx2i_mod_exit(void) 412 + { 413 + struct bnx2i_hba *hba; 414 + 415 + write_lock(&bnx2i_dev_lock); 416 + while (!list_empty(&adapter_list)) { 417 + hba = list_entry(adapter_list.next, struct bnx2i_hba, link); 418 + list_del(&hba->link); 419 + adapter_count--; 420 + 421 + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 422 + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); 423 + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); 424 + bnx2i_reg_device--; 425 + } 426 + 427 + write_unlock(&bnx2i_dev_lock); 428 + bnx2i_free_hba(hba); 429 + write_lock(&bnx2i_dev_lock); 430 + } 431 + write_unlock(&bnx2i_dev_lock); 432 + 433 + iscsi_unregister_transport(&bnx2i_iscsi_transport); 434 + cnic_unregister_driver(CNIC_ULP_ISCSI); 435 + } 436 + 437 + module_init(bnx2i_mod_init); 438 + module_exit(bnx2i_mod_exit);
+2064
drivers/scsi/bnx2i/bnx2i_iscsi.c
··· 1 + /* 2 + * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. 3 + * 4 + * Copyright (c) 2006 - 2009 Broadcom Corporation 5 + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 6 + * Copyright (c) 2007, 2008 Mike Christie 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation. 11 + * 12 + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 13 + */ 14 + 15 + #include <scsi/scsi_tcq.h> 16 + #include <scsi/libiscsi.h> 17 + #include "bnx2i.h" 18 + 19 + struct scsi_transport_template *bnx2i_scsi_xport_template; 20 + struct iscsi_transport bnx2i_iscsi_transport; 21 + static struct scsi_host_template bnx2i_host_template; 22 + 23 + /* 24 + * Global endpoint resource info 25 + */ 26 + static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */ 27 + 28 + 29 + static int bnx2i_adapter_ready(struct bnx2i_hba *hba) 30 + { 31 + int retval = 0; 32 + 33 + if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || 34 + test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || 35 + test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) 36 + retval = -EPERM; 37 + return retval; 38 + } 39 + 40 + /** 41 + * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks 42 + * @cmd: iscsi cmd struct pointer 43 + * @buf_off: absolute buffer offset 44 + * @start_bd_off: u32 pointer to return the offset within the BD 45 + * indicated by 'start_bd_idx' on which 'buf_off' falls 46 + * @start_bd_idx: index of the BD on which 'buf_off' falls 47 + * 48 + * identifies & marks various bd info for scsi command's imm data, 49 + * unsolicited data and the first solicited data seq. 50 + */ 51 + static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off, 52 + u32 *start_bd_off, u32 *start_bd_idx) 53 + { 54 + struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl; 55 + u32 cur_offset = 0; 56 + u32 cur_bd_idx = 0; 57 + 58 + if (buf_off) { 59 + while (buf_off >= (cur_offset + bd_tbl->buffer_length)) { 60 + cur_offset += bd_tbl->buffer_length; 61 + cur_bd_idx++; 62 + bd_tbl++; 63 + } 64 + } 65 + 66 + *start_bd_off = buf_off - cur_offset; 67 + *start_bd_idx = cur_bd_idx; 68 + } 69 + 70 + /** 71 + * bnx2i_setup_write_cmd_bd_info - sets up BD various information 72 + * @task: transport layer's cmd struct pointer 73 + * 74 + * identifies & marks various bd info for scsi command's immediate data, 75 + * unsolicited data and first solicited data seq which includes BD start 76 + * index & BD buf off. his function takes into account iscsi parameter such 77 + * as immediate data and unsolicited data is support on this connection. 78 + */ 79 + static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task) 80 + { 81 + struct bnx2i_cmd *cmd = task->dd_data; 82 + u32 start_bd_offset; 83 + u32 start_bd_idx; 84 + u32 buffer_offset = 0; 85 + u32 cmd_len = cmd->req.total_data_transfer_length; 86 + 87 + /* if ImmediateData is turned off & IntialR2T is turned on, 88 + * there will be no immediate or unsolicited data, just return. 89 + */ 90 + if (!iscsi_task_has_unsol_data(task) && !task->imm_count) 91 + return; 92 + 93 + /* Immediate data */ 94 + buffer_offset += task->imm_count; 95 + if (task->imm_count == cmd_len) 96 + return; 97 + 98 + if (iscsi_task_has_unsol_data(task)) { 99 + bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, 100 + &start_bd_offset, &start_bd_idx); 101 + cmd->req.ud_buffer_offset = start_bd_offset; 102 + cmd->req.ud_start_bd_index = start_bd_idx; 103 + buffer_offset += task->unsol_r2t.data_length; 104 + } 105 + 106 + if (buffer_offset != cmd_len) { 107 + bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, 108 + &start_bd_offset, &start_bd_idx); 109 + if ((start_bd_offset > task->conn->session->first_burst) || 110 + (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) { 111 + int i = 0; 112 + 113 + iscsi_conn_printk(KERN_ALERT, task->conn, 114 + "bnx2i- error, buf offset 0x%x " 115 + "bd_valid %d use_sg %d\n", 116 + buffer_offset, cmd->io_tbl.bd_valid, 117 + scsi_sg_count(cmd->scsi_cmd)); 118 + for (i = 0; i < cmd->io_tbl.bd_valid; i++) 119 + iscsi_conn_printk(KERN_ALERT, task->conn, 120 + "bnx2i err, bd[%d]: len %x\n", 121 + i, cmd->io_tbl.bd_tbl[i].\ 122 + buffer_length); 123 + } 124 + cmd->req.sd_buffer_offset = start_bd_offset; 125 + cmd->req.sd_start_bd_index = start_bd_idx; 126 + } 127 + } 128 + 129 + 130 + 131 + /** 132 + * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table 133 + * @hba: adapter instance 134 + * @cmd: iscsi cmd struct pointer 135 + * 136 + * map SG list 137 + */ 138 + static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) 139 + { 140 + struct scsi_cmnd *sc = cmd->scsi_cmd; 141 + struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; 142 + struct scatterlist *sg; 143 + int byte_count = 0; 144 + int bd_count = 0; 145 + int sg_count; 146 + int sg_len; 147 + u64 addr; 148 + int i; 149 + 150 + BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD); 151 + 152 + sg_count = scsi_dma_map(sc); 153 + 154 + scsi_for_each_sg(sc, sg, sg_count, i) { 155 + sg_len = sg_dma_len(sg); 156 + addr = (u64) sg_dma_address(sg); 157 + bd[bd_count].buffer_addr_lo = addr & 0xffffffff; 158 + bd[bd_count].buffer_addr_hi = addr >> 32; 159 + bd[bd_count].buffer_length = sg_len; 160 + bd[bd_count].flags = 0; 161 + if (bd_count == 0) 162 + bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN; 163 + 164 + byte_count += sg_len; 165 + bd_count++; 166 + } 167 + 168 + if (bd_count) 169 + bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN; 170 + 171 + BUG_ON(byte_count != scsi_bufflen(sc)); 172 + return bd_count; 173 + } 174 + 175 + /** 176 + * bnx2i_iscsi_map_sg_list - maps SG list 177 + * @cmd: iscsi cmd struct pointer 178 + * 179 + * creates BD list table for the command 180 + */ 181 + static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd) 182 + { 183 + int bd_count; 184 + 185 + bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd); 186 + if (!bd_count) { 187 + struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; 188 + 189 + bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0; 190 + bd[0].buffer_length = bd[0].flags = 0; 191 + } 192 + cmd->io_tbl.bd_valid = bd_count; 193 + } 194 + 195 + 196 + /** 197 + * bnx2i_iscsi_unmap_sg_list - unmaps SG list 198 + * @cmd: iscsi cmd struct pointer 199 + * 200 + * unmap IO buffers and invalidate the BD table 201 + */ 202 + void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd) 203 + { 204 + struct scsi_cmnd *sc = cmd->scsi_cmd; 205 + 206 + if (cmd->io_tbl.bd_valid && sc) { 207 + scsi_dma_unmap(sc); 208 + cmd->io_tbl.bd_valid = 0; 209 + } 210 + } 211 + 212 + static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd) 213 + { 214 + memset(&cmd->req, 0x00, sizeof(cmd->req)); 215 + cmd->req.op_code = 0xFF; 216 + cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma; 217 + cmd->req.bd_list_addr_hi = 218 + (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32); 219 + 220 + } 221 + 222 + 223 + /** 224 + * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid' 225 + * @hba: pointer to adapter instance 226 + * @conn: pointer to iscsi connection 227 + * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) 228 + * 229 + * update iscsi cid table entry with connection pointer. This enables 230 + * driver to quickly get hold of connection structure pointer in 231 + * completion/interrupt thread using iscsi context ID 232 + */ 233 + static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba, 234 + struct bnx2i_conn *bnx2i_conn, 235 + u32 iscsi_cid) 236 + { 237 + if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) { 238 + iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, 239 + "conn bind - entry #%d not free\n", iscsi_cid); 240 + return -EBUSY; 241 + } 242 + 243 + hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn; 244 + return 0; 245 + } 246 + 247 + 248 + /** 249 + * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr 250 + * @hba: pointer to adapter instance 251 + * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) 252 + */ 253 + struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, 254 + u16 iscsi_cid) 255 + { 256 + if (!hba->cid_que.conn_cid_tbl) { 257 + printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n"); 258 + return NULL; 259 + 260 + } else if (iscsi_cid >= hba->max_active_conns) { 261 + printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid); 262 + return NULL; 263 + } 264 + return hba->cid_que.conn_cid_tbl[iscsi_cid]; 265 + } 266 + 267 + 268 + /** 269 + * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool 270 + * @hba: pointer to adapter instance 271 + */ 272 + static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba) 273 + { 274 + int idx; 275 + 276 + if (!hba->cid_que.cid_free_cnt) 277 + return -1; 278 + 279 + idx = hba->cid_que.cid_q_cons_idx; 280 + hba->cid_que.cid_q_cons_idx++; 281 + if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx) 282 + hba->cid_que.cid_q_cons_idx = 0; 283 + 284 + hba->cid_que.cid_free_cnt--; 285 + return hba->cid_que.cid_que[idx]; 286 + } 287 + 288 + 289 + /** 290 + * bnx2i_free_iscsi_cid - returns tcp port to free list 291 + * @hba: pointer to adapter instance 292 + * @iscsi_cid: iscsi context ID to free 293 + */ 294 + static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid) 295 + { 296 + int idx; 297 + 298 + if (iscsi_cid == (u16) -1) 299 + return; 300 + 301 + hba->cid_que.cid_free_cnt++; 302 + 303 + idx = hba->cid_que.cid_q_prod_idx; 304 + hba->cid_que.cid_que[idx] = iscsi_cid; 305 + hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL; 306 + hba->cid_que.cid_q_prod_idx++; 307 + if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx) 308 + hba->cid_que.cid_q_prod_idx = 0; 309 + } 310 + 311 + 312 + /** 313 + * bnx2i_setup_free_cid_que - sets up free iscsi cid queue 314 + * @hba: pointer to adapter instance 315 + * 316 + * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table, 317 + * and initialize table attributes 318 + */ 319 + static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba) 320 + { 321 + int mem_size; 322 + int i; 323 + 324 + mem_size = hba->max_active_conns * sizeof(u32); 325 + mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 326 + 327 + hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL); 328 + if (!hba->cid_que.cid_que_base) 329 + return -ENOMEM; 330 + 331 + mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *); 332 + mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 333 + hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL); 334 + if (!hba->cid_que.conn_cid_tbl) { 335 + kfree(hba->cid_que.cid_que_base); 336 + hba->cid_que.cid_que_base = NULL; 337 + return -ENOMEM; 338 + } 339 + 340 + hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base; 341 + hba->cid_que.cid_q_prod_idx = 0; 342 + hba->cid_que.cid_q_cons_idx = 0; 343 + hba->cid_que.cid_q_max_idx = hba->max_active_conns; 344 + hba->cid_que.cid_free_cnt = hba->max_active_conns; 345 + 346 + for (i = 0; i < hba->max_active_conns; i++) { 347 + hba->cid_que.cid_que[i] = i; 348 + hba->cid_que.conn_cid_tbl[i] = NULL; 349 + } 350 + return 0; 351 + } 352 + 353 + 354 + /** 355 + * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources 356 + * @hba: pointer to adapter instance 357 + */ 358 + static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba) 359 + { 360 + kfree(hba->cid_que.cid_que_base); 361 + hba->cid_que.cid_que_base = NULL; 362 + 363 + kfree(hba->cid_que.conn_cid_tbl); 364 + hba->cid_que.conn_cid_tbl = NULL; 365 + } 366 + 367 + 368 + /** 369 + * bnx2i_alloc_ep - allocates ep structure from global pool 370 + * @hba: pointer to adapter instance 371 + * 372 + * routine allocates a free endpoint structure from global pool and 373 + * a tcp port to be used for this connection. Global resource lock, 374 + * 'bnx2i_resc_lock' is held while accessing shared global data structures 375 + */ 376 + static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba) 377 + { 378 + struct iscsi_endpoint *ep; 379 + struct bnx2i_endpoint *bnx2i_ep; 380 + 381 + ep = iscsi_create_endpoint(sizeof(*bnx2i_ep)); 382 + if (!ep) { 383 + printk(KERN_ERR "bnx2i: Could not allocate ep\n"); 384 + return NULL; 385 + } 386 + 387 + bnx2i_ep = ep->dd_data; 388 + INIT_LIST_HEAD(&bnx2i_ep->link); 389 + bnx2i_ep->state = EP_STATE_IDLE; 390 + bnx2i_ep->hba = hba; 391 + bnx2i_ep->hba_age = hba->age; 392 + hba->ofld_conns_active++; 393 + init_waitqueue_head(&bnx2i_ep->ofld_wait); 394 + return ep; 395 + } 396 + 397 + 398 + /** 399 + * bnx2i_free_ep - free endpoint 400 + * @ep: pointer to iscsi endpoint structure 401 + */ 402 + static void bnx2i_free_ep(struct iscsi_endpoint *ep) 403 + { 404 + struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; 405 + unsigned long flags; 406 + 407 + spin_lock_irqsave(&bnx2i_resc_lock, flags); 408 + bnx2i_ep->state = EP_STATE_IDLE; 409 + bnx2i_ep->hba->ofld_conns_active--; 410 + 411 + bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid); 412 + if (bnx2i_ep->conn) { 413 + bnx2i_ep->conn->ep = NULL; 414 + bnx2i_ep->conn = NULL; 415 + } 416 + 417 + bnx2i_ep->hba = NULL; 418 + spin_unlock_irqrestore(&bnx2i_resc_lock, flags); 419 + iscsi_destroy_endpoint(ep); 420 + } 421 + 422 + 423 + /** 424 + * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command 425 + * @hba: adapter instance pointer 426 + * @session: iscsi session pointer 427 + * @cmd: iscsi command structure 428 + */ 429 + static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session, 430 + struct bnx2i_cmd *cmd) 431 + { 432 + struct io_bdt *io = &cmd->io_tbl; 433 + struct iscsi_bd *bd; 434 + 435 + io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, 436 + ISCSI_MAX_BDS_PER_CMD * sizeof(*bd), 437 + &io->bd_tbl_dma, GFP_KERNEL); 438 + if (!io->bd_tbl) { 439 + iscsi_session_printk(KERN_ERR, session, "Could not " 440 + "allocate bdt.\n"); 441 + return -ENOMEM; 442 + } 443 + io->bd_valid = 0; 444 + return 0; 445 + } 446 + 447 + /** 448 + * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table 449 + * @hba: adapter instance pointer 450 + * @session: iscsi session pointer 451 + * @cmd: iscsi command structure 452 + */ 453 + static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba, 454 + struct iscsi_session *session) 455 + { 456 + int i; 457 + 458 + for (i = 0; i < session->cmds_max; i++) { 459 + struct iscsi_task *task = session->cmds[i]; 460 + struct bnx2i_cmd *cmd = task->dd_data; 461 + 462 + if (cmd->io_tbl.bd_tbl) 463 + dma_free_coherent(&hba->pcidev->dev, 464 + ISCSI_MAX_BDS_PER_CMD * 465 + sizeof(struct iscsi_bd), 466 + cmd->io_tbl.bd_tbl, 467 + cmd->io_tbl.bd_tbl_dma); 468 + } 469 + 470 + } 471 + 472 + 473 + /** 474 + * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session 475 + * @hba: adapter instance pointer 476 + * @session: iscsi session pointer 477 + */ 478 + static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba, 479 + struct iscsi_session *session) 480 + { 481 + int i; 482 + 483 + for (i = 0; i < session->cmds_max; i++) { 484 + struct iscsi_task *task = session->cmds[i]; 485 + struct bnx2i_cmd *cmd = task->dd_data; 486 + 487 + /* Anil */ 488 + task->hdr = &cmd->hdr; 489 + task->hdr_max = sizeof(struct iscsi_hdr); 490 + 491 + if (bnx2i_alloc_bdt(hba, session, cmd)) 492 + goto free_bdts; 493 + } 494 + 495 + return 0; 496 + 497 + free_bdts: 498 + bnx2i_destroy_cmd_pool(hba, session); 499 + return -ENOMEM; 500 + } 501 + 502 + 503 + /** 504 + * bnx2i_setup_mp_bdt - allocate BD table resources 505 + * @hba: pointer to adapter structure 506 + * 507 + * Allocate memory for dummy buffer and associated BD 508 + * table to be used by middle path (MP) requests 509 + */ 510 + static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) 511 + { 512 + int rc = 0; 513 + struct iscsi_bd *mp_bdt; 514 + u64 addr; 515 + 516 + hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 517 + &hba->mp_bd_dma, GFP_KERNEL); 518 + if (!hba->mp_bd_tbl) { 519 + printk(KERN_ERR "unable to allocate Middle Path BDT\n"); 520 + rc = -1; 521 + goto out; 522 + } 523 + 524 + hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 525 + &hba->dummy_buf_dma, GFP_KERNEL); 526 + if (!hba->dummy_buffer) { 527 + printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); 528 + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 529 + hba->mp_bd_tbl, hba->mp_bd_dma); 530 + hba->mp_bd_tbl = NULL; 531 + rc = -1; 532 + goto out; 533 + } 534 + 535 + mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl; 536 + addr = (unsigned long) hba->dummy_buf_dma; 537 + mp_bdt->buffer_addr_lo = addr & 0xffffffff; 538 + mp_bdt->buffer_addr_hi = addr >> 32; 539 + mp_bdt->buffer_length = PAGE_SIZE; 540 + mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | 541 + ISCSI_BD_FIRST_IN_BD_CHAIN; 542 + out: 543 + return rc; 544 + } 545 + 546 + 547 + /** 548 + * bnx2i_free_mp_bdt - releases ITT back to free pool 549 + * @hba: pointer to adapter instance 550 + * 551 + * free MP dummy buffer and associated BD table 552 + */ 553 + static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) 554 + { 555 + if (hba->mp_bd_tbl) { 556 + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 557 + hba->mp_bd_tbl, hba->mp_bd_dma); 558 + hba->mp_bd_tbl = NULL; 559 + } 560 + if (hba->dummy_buffer) { 561 + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 562 + hba->dummy_buffer, hba->dummy_buf_dma); 563 + hba->dummy_buffer = NULL; 564 + } 565 + return; 566 + } 567 + 568 + /** 569 + * bnx2i_drop_session - notifies iscsid of connection error. 570 + * @hba: adapter instance pointer 571 + * @session: iscsi session pointer 572 + * 573 + * This notifies iscsid that there is a error, so it can initiate 574 + * recovery. 575 + * 576 + * This relies on caller using the iscsi class iterator so the object 577 + * is refcounted and does not disapper from under us. 578 + */ 579 + void bnx2i_drop_session(struct iscsi_cls_session *cls_session) 580 + { 581 + iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); 582 + } 583 + 584 + /** 585 + * bnx2i_ep_destroy_list_add - add an entry to EP destroy list 586 + * @hba: pointer to adapter instance 587 + * @ep: pointer to endpoint (transport indentifier) structure 588 + * 589 + * EP destroy queue manager 590 + */ 591 + static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba, 592 + struct bnx2i_endpoint *ep) 593 + { 594 + write_lock_bh(&hba->ep_rdwr_lock); 595 + list_add_tail(&ep->link, &hba->ep_destroy_list); 596 + write_unlock_bh(&hba->ep_rdwr_lock); 597 + return 0; 598 + } 599 + 600 + /** 601 + * bnx2i_ep_destroy_list_del - add an entry to EP destroy list 602 + * 603 + * @hba: pointer to adapter instance 604 + * @ep: pointer to endpoint (transport indentifier) structure 605 + * 606 + * EP destroy queue manager 607 + */ 608 + static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba, 609 + struct bnx2i_endpoint *ep) 610 + { 611 + write_lock_bh(&hba->ep_rdwr_lock); 612 + list_del_init(&ep->link); 613 + write_unlock_bh(&hba->ep_rdwr_lock); 614 + 615 + return 0; 616 + } 617 + 618 + /** 619 + * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list 620 + * @hba: pointer to adapter instance 621 + * @ep: pointer to endpoint (transport indentifier) structure 622 + * 623 + * pending conn offload completion queue manager 624 + */ 625 + static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba, 626 + struct bnx2i_endpoint *ep) 627 + { 628 + write_lock_bh(&hba->ep_rdwr_lock); 629 + list_add_tail(&ep->link, &hba->ep_ofld_list); 630 + write_unlock_bh(&hba->ep_rdwr_lock); 631 + return 0; 632 + } 633 + 634 + /** 635 + * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list 636 + * @hba: pointer to adapter instance 637 + * @ep: pointer to endpoint (transport indentifier) structure 638 + * 639 + * pending conn offload completion queue manager 640 + */ 641 + static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba, 642 + struct bnx2i_endpoint *ep) 643 + { 644 + write_lock_bh(&hba->ep_rdwr_lock); 645 + list_del_init(&ep->link); 646 + write_unlock_bh(&hba->ep_rdwr_lock); 647 + return 0; 648 + } 649 + 650 + 651 + /** 652 + * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints 653 + * 654 + * @hba: pointer to adapter instance 655 + * @iscsi_cid: iscsi context ID to find 656 + * 657 + */ 658 + struct bnx2i_endpoint * 659 + bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid) 660 + { 661 + struct list_head *list; 662 + struct list_head *tmp; 663 + struct bnx2i_endpoint *ep; 664 + 665 + read_lock_bh(&hba->ep_rdwr_lock); 666 + list_for_each_safe(list, tmp, &hba->ep_ofld_list) { 667 + ep = (struct bnx2i_endpoint *)list; 668 + 669 + if (ep->ep_iscsi_cid == iscsi_cid) 670 + break; 671 + ep = NULL; 672 + } 673 + read_unlock_bh(&hba->ep_rdwr_lock); 674 + 675 + if (!ep) 676 + printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); 677 + return ep; 678 + } 679 + 680 + 681 + /** 682 + * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list 683 + * @hba: pointer to adapter instance 684 + * @iscsi_cid: iscsi context ID to find 685 + * 686 + */ 687 + struct bnx2i_endpoint * 688 + bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) 689 + { 690 + struct list_head *list; 691 + struct list_head *tmp; 692 + struct bnx2i_endpoint *ep; 693 + 694 + read_lock_bh(&hba->ep_rdwr_lock); 695 + list_for_each_safe(list, tmp, &hba->ep_destroy_list) { 696 + ep = (struct bnx2i_endpoint *)list; 697 + 698 + if (ep->ep_iscsi_cid == iscsi_cid) 699 + break; 700 + ep = NULL; 701 + } 702 + read_unlock_bh(&hba->ep_rdwr_lock); 703 + 704 + if (!ep) 705 + printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); 706 + 707 + return ep; 708 + } 709 + 710 + /** 711 + * bnx2i_setup_host_queue_size - assigns shost->can_queue param 712 + * @hba: pointer to adapter instance 713 + * @shost: scsi host pointer 714 + * 715 + * Initializes 'can_queue' parameter based on how many outstanding commands 716 + * the device can handle. Each device 5708/5709/57710 has different 717 + * capabilities 718 + */ 719 + static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba, 720 + struct Scsi_Host *shost) 721 + { 722 + if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type)) 723 + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; 724 + else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) 725 + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709; 726 + else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) 727 + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710; 728 + else 729 + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; 730 + } 731 + 732 + 733 + /** 734 + * bnx2i_alloc_hba - allocate and init adapter instance 735 + * @cnic: cnic device pointer 736 + * 737 + * allocate & initialize adapter structure and call other 738 + * support routines to do per adapter initialization 739 + */ 740 + struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) 741 + { 742 + struct Scsi_Host *shost; 743 + struct bnx2i_hba *hba; 744 + 745 + shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0); 746 + if (!shost) 747 + return NULL; 748 + shost->dma_boundary = cnic->pcidev->dma_mask; 749 + shost->transportt = bnx2i_scsi_xport_template; 750 + shost->max_id = ISCSI_MAX_CONNS_PER_HBA; 751 + shost->max_channel = 0; 752 + shost->max_lun = 512; 753 + shost->max_cmd_len = 16; 754 + 755 + hba = iscsi_host_priv(shost); 756 + hba->shost = shost; 757 + hba->netdev = cnic->netdev; 758 + /* Get PCI related information and update hba struct members */ 759 + hba->pcidev = cnic->pcidev; 760 + pci_dev_get(hba->pcidev); 761 + hba->pci_did = hba->pcidev->device; 762 + hba->pci_vid = hba->pcidev->vendor; 763 + hba->pci_sdid = hba->pcidev->subsystem_device; 764 + hba->pci_svid = hba->pcidev->subsystem_vendor; 765 + hba->pci_func = PCI_FUNC(hba->pcidev->devfn); 766 + hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); 767 + bnx2i_identify_device(hba); 768 + 769 + bnx2i_identify_device(hba); 770 + bnx2i_setup_host_queue_size(hba, shost); 771 + 772 + if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { 773 + hba->regview = ioremap_nocache(hba->netdev->base_addr, 774 + BNX2_MQ_CONFIG2); 775 + if (!hba->regview) 776 + goto ioreg_map_err; 777 + } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 778 + hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096); 779 + if (!hba->regview) 780 + goto ioreg_map_err; 781 + } 782 + 783 + if (bnx2i_setup_mp_bdt(hba)) 784 + goto mp_bdt_mem_err; 785 + 786 + INIT_LIST_HEAD(&hba->ep_ofld_list); 787 + INIT_LIST_HEAD(&hba->ep_destroy_list); 788 + rwlock_init(&hba->ep_rdwr_lock); 789 + 790 + hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED; 791 + 792 + /* different values for 5708/5709/57710 */ 793 + hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA; 794 + 795 + if (bnx2i_setup_free_cid_que(hba)) 796 + goto cid_que_err; 797 + 798 + /* SQ/RQ/CQ size can be changed via sysfx interface */ 799 + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 800 + if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX) 801 + hba->max_sqes = sq_size; 802 + else 803 + hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT; 804 + } else { /* 5706/5708/5709 */ 805 + if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX) 806 + hba->max_sqes = sq_size; 807 + else 808 + hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT; 809 + } 810 + 811 + hba->max_rqes = rq_size; 812 + hba->max_cqes = hba->max_sqes + rq_size; 813 + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 814 + if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX) 815 + hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX; 816 + } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX) 817 + hba->max_cqes = BNX2I_570X_CQ_WQES_MAX; 818 + 819 + hba->num_ccell = hba->max_sqes / 2; 820 + 821 + spin_lock_init(&hba->lock); 822 + mutex_init(&hba->net_dev_lock); 823 + 824 + if (iscsi_host_add(shost, &hba->pcidev->dev)) 825 + goto free_dump_mem; 826 + return hba; 827 + 828 + free_dump_mem: 829 + bnx2i_release_free_cid_que(hba); 830 + cid_que_err: 831 + bnx2i_free_mp_bdt(hba); 832 + mp_bdt_mem_err: 833 + if (hba->regview) { 834 + iounmap(hba->regview); 835 + hba->regview = NULL; 836 + } 837 + ioreg_map_err: 838 + pci_dev_put(hba->pcidev); 839 + scsi_host_put(shost); 840 + return NULL; 841 + } 842 + 843 + /** 844 + * bnx2i_free_hba- releases hba structure and resources held by the adapter 845 + * @hba: pointer to adapter instance 846 + * 847 + * free adapter structure and call various cleanup routines. 848 + */ 849 + void bnx2i_free_hba(struct bnx2i_hba *hba) 850 + { 851 + struct Scsi_Host *shost = hba->shost; 852 + 853 + iscsi_host_remove(shost); 854 + INIT_LIST_HEAD(&hba->ep_ofld_list); 855 + INIT_LIST_HEAD(&hba->ep_destroy_list); 856 + pci_dev_put(hba->pcidev); 857 + 858 + if (hba->regview) { 859 + iounmap(hba->regview); 860 + hba->regview = NULL; 861 + } 862 + bnx2i_free_mp_bdt(hba); 863 + bnx2i_release_free_cid_que(hba); 864 + iscsi_host_free(shost); 865 + } 866 + 867 + /** 868 + * bnx2i_conn_free_login_resources - free DMA resources used for login process 869 + * @hba: pointer to adapter instance 870 + * @bnx2i_conn: iscsi connection pointer 871 + * 872 + * Login related resources, mostly BDT & payload DMA memory is freed 873 + */ 874 + static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, 875 + struct bnx2i_conn *bnx2i_conn) 876 + { 877 + if (bnx2i_conn->gen_pdu.resp_bd_tbl) { 878 + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 879 + bnx2i_conn->gen_pdu.resp_bd_tbl, 880 + bnx2i_conn->gen_pdu.resp_bd_dma); 881 + bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; 882 + } 883 + 884 + if (bnx2i_conn->gen_pdu.req_bd_tbl) { 885 + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 886 + bnx2i_conn->gen_pdu.req_bd_tbl, 887 + bnx2i_conn->gen_pdu.req_bd_dma); 888 + bnx2i_conn->gen_pdu.req_bd_tbl = NULL; 889 + } 890 + 891 + if (bnx2i_conn->gen_pdu.resp_buf) { 892 + dma_free_coherent(&hba->pcidev->dev, 893 + ISCSI_DEF_MAX_RECV_SEG_LEN, 894 + bnx2i_conn->gen_pdu.resp_buf, 895 + bnx2i_conn->gen_pdu.resp_dma_addr); 896 + bnx2i_conn->gen_pdu.resp_buf = NULL; 897 + } 898 + 899 + if (bnx2i_conn->gen_pdu.req_buf) { 900 + dma_free_coherent(&hba->pcidev->dev, 901 + ISCSI_DEF_MAX_RECV_SEG_LEN, 902 + bnx2i_conn->gen_pdu.req_buf, 903 + bnx2i_conn->gen_pdu.req_dma_addr); 904 + bnx2i_conn->gen_pdu.req_buf = NULL; 905 + } 906 + } 907 + 908 + /** 909 + * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop. 910 + * @hba: pointer to adapter instance 911 + * @bnx2i_conn: iscsi connection pointer 912 + * 913 + * Mgmt task DNA resources are allocated in this routine. 914 + */ 915 + static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, 916 + struct bnx2i_conn *bnx2i_conn) 917 + { 918 + /* Allocate memory for login request/response buffers */ 919 + bnx2i_conn->gen_pdu.req_buf = 920 + dma_alloc_coherent(&hba->pcidev->dev, 921 + ISCSI_DEF_MAX_RECV_SEG_LEN, 922 + &bnx2i_conn->gen_pdu.req_dma_addr, 923 + GFP_KERNEL); 924 + if (bnx2i_conn->gen_pdu.req_buf == NULL) 925 + goto login_req_buf_failure; 926 + 927 + bnx2i_conn->gen_pdu.req_buf_size = 0; 928 + bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf; 929 + 930 + bnx2i_conn->gen_pdu.resp_buf = 931 + dma_alloc_coherent(&hba->pcidev->dev, 932 + ISCSI_DEF_MAX_RECV_SEG_LEN, 933 + &bnx2i_conn->gen_pdu.resp_dma_addr, 934 + GFP_KERNEL); 935 + if (bnx2i_conn->gen_pdu.resp_buf == NULL) 936 + goto login_resp_buf_failure; 937 + 938 + bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN; 939 + bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; 940 + 941 + bnx2i_conn->gen_pdu.req_bd_tbl = 942 + dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 943 + &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); 944 + if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) 945 + goto login_req_bd_tbl_failure; 946 + 947 + bnx2i_conn->gen_pdu.resp_bd_tbl = 948 + dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 949 + &bnx2i_conn->gen_pdu.resp_bd_dma, 950 + GFP_KERNEL); 951 + if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) 952 + goto login_resp_bd_tbl_failure; 953 + 954 + return 0; 955 + 956 + login_resp_bd_tbl_failure: 957 + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 958 + bnx2i_conn->gen_pdu.req_bd_tbl, 959 + bnx2i_conn->gen_pdu.req_bd_dma); 960 + bnx2i_conn->gen_pdu.req_bd_tbl = NULL; 961 + 962 + login_req_bd_tbl_failure: 963 + dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, 964 + bnx2i_conn->gen_pdu.resp_buf, 965 + bnx2i_conn->gen_pdu.resp_dma_addr); 966 + bnx2i_conn->gen_pdu.resp_buf = NULL; 967 + login_resp_buf_failure: 968 + dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, 969 + bnx2i_conn->gen_pdu.req_buf, 970 + bnx2i_conn->gen_pdu.req_dma_addr); 971 + bnx2i_conn->gen_pdu.req_buf = NULL; 972 + login_req_buf_failure: 973 + iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data, 974 + "login resource alloc failed!!\n"); 975 + return -ENOMEM; 976 + 977 + } 978 + 979 + 980 + /** 981 + * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table. 982 + * @bnx2i_conn: iscsi connection pointer 983 + * 984 + * Allocates buffers and BD tables before shipping requests to cnic 985 + * for PDUs prepared by 'iscsid' daemon 986 + */ 987 + static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn) 988 + { 989 + struct iscsi_bd *bd_tbl; 990 + 991 + bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl; 992 + 993 + bd_tbl->buffer_addr_hi = 994 + (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32); 995 + bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr; 996 + bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr - 997 + bnx2i_conn->gen_pdu.req_buf; 998 + bd_tbl->reserved0 = 0; 999 + bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | 1000 + ISCSI_BD_FIRST_IN_BD_CHAIN; 1001 + 1002 + bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl; 1003 + bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32; 1004 + bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr; 1005 + bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN; 1006 + bd_tbl->reserved0 = 0; 1007 + bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | 1008 + ISCSI_BD_FIRST_IN_BD_CHAIN; 1009 + } 1010 + 1011 + 1012 + /** 1013 + * bnx2i_iscsi_send_generic_request - called to send mgmt tasks. 1014 + * @task: transport layer task pointer 1015 + * 1016 + * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login, 1017 + * Nop-out and Logout requests flow through this path. 1018 + */ 1019 + static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task) 1020 + { 1021 + struct bnx2i_cmd *cmd = task->dd_data; 1022 + struct bnx2i_conn *bnx2i_conn = cmd->conn; 1023 + int rc = 0; 1024 + char *buf; 1025 + int data_len; 1026 + 1027 + bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn); 1028 + switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 1029 + case ISCSI_OP_LOGIN: 1030 + bnx2i_send_iscsi_login(bnx2i_conn, task); 1031 + break; 1032 + case ISCSI_OP_NOOP_OUT: 1033 + data_len = bnx2i_conn->gen_pdu.req_buf_size; 1034 + buf = bnx2i_conn->gen_pdu.req_buf; 1035 + if (data_len) 1036 + rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, 1037 + RESERVED_ITT, 1038 + buf, data_len, 1); 1039 + else 1040 + rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, 1041 + RESERVED_ITT, 1042 + NULL, 0, 1); 1043 + break; 1044 + case ISCSI_OP_LOGOUT: 1045 + rc = bnx2i_send_iscsi_logout(bnx2i_conn, task); 1046 + break; 1047 + case ISCSI_OP_SCSI_TMFUNC: 1048 + rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task); 1049 + break; 1050 + default: 1051 + iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, 1052 + "send_gen: unsupported op 0x%x\n", 1053 + task->hdr->opcode); 1054 + } 1055 + return rc; 1056 + } 1057 + 1058 + 1059 + /********************************************************************** 1060 + * SCSI-ML Interface 1061 + **********************************************************************/ 1062 + 1063 + /** 1064 + * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe 1065 + * @sc: SCSI-ML command pointer 1066 + * @cmd: iscsi cmd pointer 1067 + */ 1068 + static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd) 1069 + { 1070 + u32 dword; 1071 + int lpcnt; 1072 + u8 *srcp; 1073 + u32 *dstp; 1074 + u32 scsi_lun[2]; 1075 + 1076 + int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun); 1077 + cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]); 1078 + cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]); 1079 + 1080 + lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword); 1081 + srcp = (u8 *) sc->cmnd; 1082 + dstp = (u32 *) cmd->req.cdb; 1083 + while (lpcnt--) { 1084 + memcpy(&dword, (const void *) srcp, 4); 1085 + *dstp = cpu_to_be32(dword); 1086 + srcp += 4; 1087 + dstp++; 1088 + } 1089 + if (sc->cmd_len & 0x3) { 1090 + dword = (u32) srcp[0] | ((u32) srcp[1] << 8); 1091 + *dstp = cpu_to_be32(dword); 1092 + } 1093 + } 1094 + 1095 + static void bnx2i_cleanup_task(struct iscsi_task *task) 1096 + { 1097 + struct iscsi_conn *conn = task->conn; 1098 + struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1099 + struct bnx2i_hba *hba = bnx2i_conn->hba; 1100 + 1101 + /* 1102 + * mgmt task or cmd was never sent to us to transmit. 1103 + */ 1104 + if (!task->sc || task->state == ISCSI_TASK_PENDING) 1105 + return; 1106 + /* 1107 + * need to clean-up task context to claim dma buffers 1108 + */ 1109 + if (task->state == ISCSI_TASK_ABRT_TMF) { 1110 + bnx2i_send_cmd_cleanup_req(hba, task->dd_data); 1111 + 1112 + spin_unlock_bh(&conn->session->lock); 1113 + wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, 1114 + msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); 1115 + spin_lock_bh(&conn->session->lock); 1116 + } 1117 + bnx2i_iscsi_unmap_sg_list(task->dd_data); 1118 + } 1119 + 1120 + /** 1121 + * bnx2i_mtask_xmit - transmit mtask to chip for further processing 1122 + * @conn: transport layer conn structure pointer 1123 + * @task: transport layer command structure pointer 1124 + */ 1125 + static int 1126 + bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) 1127 + { 1128 + struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1129 + struct bnx2i_cmd *cmd = task->dd_data; 1130 + 1131 + memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); 1132 + 1133 + bnx2i_setup_cmd_wqe_template(cmd); 1134 + bnx2i_conn->gen_pdu.req_buf_size = task->data_count; 1135 + if (task->data_count) { 1136 + memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, 1137 + task->data_count); 1138 + bnx2i_conn->gen_pdu.req_wr_ptr = 1139 + bnx2i_conn->gen_pdu.req_buf + task->data_count; 1140 + } 1141 + cmd->conn = conn->dd_data; 1142 + cmd->scsi_cmd = NULL; 1143 + return bnx2i_iscsi_send_generic_request(task); 1144 + } 1145 + 1146 + /** 1147 + * bnx2i_task_xmit - transmit iscsi command to chip for further processing 1148 + * @task: transport layer command structure pointer 1149 + * 1150 + * maps SG buffers and send request to chip/firmware in the form of SQ WQE 1151 + */ 1152 + static int bnx2i_task_xmit(struct iscsi_task *task) 1153 + { 1154 + struct iscsi_conn *conn = task->conn; 1155 + struct iscsi_session *session = conn->session; 1156 + struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); 1157 + struct bnx2i_hba *hba = iscsi_host_priv(shost); 1158 + struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1159 + struct scsi_cmnd *sc = task->sc; 1160 + struct bnx2i_cmd *cmd = task->dd_data; 1161 + struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; 1162 + 1163 + if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) 1164 + return -ENOTCONN; 1165 + 1166 + if (!bnx2i_conn->is_bound) 1167 + return -ENOTCONN; 1168 + 1169 + /* 1170 + * If there is no scsi_cmnd this must be a mgmt task 1171 + */ 1172 + if (!sc) 1173 + return bnx2i_mtask_xmit(conn, task); 1174 + 1175 + bnx2i_setup_cmd_wqe_template(cmd); 1176 + cmd->req.op_code = ISCSI_OP_SCSI_CMD; 1177 + cmd->conn = bnx2i_conn; 1178 + cmd->scsi_cmd = sc; 1179 + cmd->req.total_data_transfer_length = scsi_bufflen(sc); 1180 + cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn); 1181 + 1182 + bnx2i_iscsi_map_sg_list(cmd); 1183 + bnx2i_cpy_scsi_cdb(sc, cmd); 1184 + 1185 + cmd->req.op_attr = ISCSI_ATTR_SIMPLE; 1186 + if (sc->sc_data_direction == DMA_TO_DEVICE) { 1187 + cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE; 1188 + cmd->req.itt = task->itt | 1189 + (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); 1190 + bnx2i_setup_write_cmd_bd_info(task); 1191 + } else { 1192 + if (scsi_bufflen(sc)) 1193 + cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ; 1194 + cmd->req.itt = task->itt | 1195 + (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); 1196 + } 1197 + 1198 + cmd->req.num_bds = cmd->io_tbl.bd_valid; 1199 + if (!cmd->io_tbl.bd_valid) { 1200 + cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma; 1201 + cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32); 1202 + cmd->req.num_bds = 1; 1203 + } 1204 + 1205 + bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd); 1206 + return 0; 1207 + } 1208 + 1209 + /** 1210 + * bnx2i_session_create - create a new iscsi session 1211 + * @cmds_max: max commands supported 1212 + * @qdepth: scsi queue depth to support 1213 + * @initial_cmdsn: initial iscsi CMDSN to be used for this session 1214 + * 1215 + * Creates a new iSCSI session instance on given device. 1216 + */ 1217 + static struct iscsi_cls_session * 1218 + bnx2i_session_create(struct iscsi_endpoint *ep, 1219 + uint16_t cmds_max, uint16_t qdepth, 1220 + uint32_t initial_cmdsn) 1221 + { 1222 + struct Scsi_Host *shost; 1223 + struct iscsi_cls_session *cls_session; 1224 + struct bnx2i_hba *hba; 1225 + struct bnx2i_endpoint *bnx2i_ep; 1226 + 1227 + if (!ep) { 1228 + printk(KERN_ERR "bnx2i: missing ep.\n"); 1229 + return NULL; 1230 + } 1231 + 1232 + bnx2i_ep = ep->dd_data; 1233 + shost = bnx2i_ep->hba->shost; 1234 + hba = iscsi_host_priv(shost); 1235 + if (bnx2i_adapter_ready(hba)) 1236 + return NULL; 1237 + 1238 + /* 1239 + * user can override hw limit as long as it is within 1240 + * the min/max. 1241 + */ 1242 + if (cmds_max > hba->max_sqes) 1243 + cmds_max = hba->max_sqes; 1244 + else if (cmds_max < BNX2I_SQ_WQES_MIN) 1245 + cmds_max = BNX2I_SQ_WQES_MIN; 1246 + 1247 + cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost, 1248 + cmds_max, sizeof(struct bnx2i_cmd), 1249 + initial_cmdsn, ISCSI_MAX_TARGET); 1250 + if (!cls_session) 1251 + return NULL; 1252 + 1253 + if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data)) 1254 + goto session_teardown; 1255 + return cls_session; 1256 + 1257 + session_teardown: 1258 + iscsi_session_teardown(cls_session); 1259 + return NULL; 1260 + } 1261 + 1262 + 1263 + /** 1264 + * bnx2i_session_destroy - destroys iscsi session 1265 + * @cls_session: pointer to iscsi cls session 1266 + * 1267 + * Destroys previously created iSCSI session instance and releases 1268 + * all resources held by it 1269 + */ 1270 + static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session) 1271 + { 1272 + struct iscsi_session *session = cls_session->dd_data; 1273 + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 1274 + struct bnx2i_hba *hba = iscsi_host_priv(shost); 1275 + 1276 + bnx2i_destroy_cmd_pool(hba, session); 1277 + iscsi_session_teardown(cls_session); 1278 + } 1279 + 1280 + 1281 + /** 1282 + * bnx2i_conn_create - create iscsi connection instance 1283 + * @cls_session: pointer to iscsi cls session 1284 + * @cid: iscsi cid as per rfc (not NX2's CID terminology) 1285 + * 1286 + * Creates a new iSCSI connection instance for a given session 1287 + */ 1288 + static struct iscsi_cls_conn * 1289 + bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) 1290 + { 1291 + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 1292 + struct bnx2i_hba *hba = iscsi_host_priv(shost); 1293 + struct bnx2i_conn *bnx2i_conn; 1294 + struct iscsi_cls_conn *cls_conn; 1295 + struct iscsi_conn *conn; 1296 + 1297 + cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn), 1298 + cid); 1299 + if (!cls_conn) 1300 + return NULL; 1301 + conn = cls_conn->dd_data; 1302 + 1303 + bnx2i_conn = conn->dd_data; 1304 + bnx2i_conn->cls_conn = cls_conn; 1305 + bnx2i_conn->hba = hba; 1306 + /* 'ep' ptr will be assigned in bind() call */ 1307 + bnx2i_conn->ep = NULL; 1308 + init_completion(&bnx2i_conn->cmd_cleanup_cmpl); 1309 + 1310 + if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) { 1311 + iscsi_conn_printk(KERN_ALERT, conn, 1312 + "conn_new: login resc alloc failed!!\n"); 1313 + goto free_conn; 1314 + } 1315 + 1316 + return cls_conn; 1317 + 1318 + free_conn: 1319 + iscsi_conn_teardown(cls_conn); 1320 + return NULL; 1321 + } 1322 + 1323 + /** 1324 + * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together 1325 + * @cls_session: pointer to iscsi cls session 1326 + * @cls_conn: pointer to iscsi cls conn 1327 + * @transport_fd: 64-bit EP handle 1328 + * @is_leading: leading connection on this session? 1329 + * 1330 + * Binds together iSCSI session instance, iSCSI connection instance 1331 + * and the TCP connection. This routine returns error code if 1332 + * TCP connection does not belong on the device iSCSI sess/conn 1333 + * is bound 1334 + */ 1335 + static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, 1336 + struct iscsi_cls_conn *cls_conn, 1337 + uint64_t transport_fd, int is_leading) 1338 + { 1339 + struct iscsi_conn *conn = cls_conn->dd_data; 1340 + struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1341 + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 1342 + struct bnx2i_hba *hba = iscsi_host_priv(shost); 1343 + struct bnx2i_endpoint *bnx2i_ep; 1344 + struct iscsi_endpoint *ep; 1345 + int ret_code; 1346 + 1347 + ep = iscsi_lookup_endpoint(transport_fd); 1348 + if (!ep) 1349 + return -EINVAL; 1350 + 1351 + bnx2i_ep = ep->dd_data; 1352 + if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || 1353 + (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) 1354 + /* Peer disconnect via' FIN or RST */ 1355 + return -EINVAL; 1356 + 1357 + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 1358 + return -EINVAL; 1359 + 1360 + if (bnx2i_ep->hba != hba) { 1361 + /* Error - TCP connection does not belong to this device 1362 + */ 1363 + iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, 1364 + "conn bind, ep=0x%p (%s) does not", 1365 + bnx2i_ep, bnx2i_ep->hba->netdev->name); 1366 + iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, 1367 + "belong to hba (%s)\n", 1368 + hba->netdev->name); 1369 + return -EEXIST; 1370 + } 1371 + 1372 + bnx2i_ep->conn = bnx2i_conn; 1373 + bnx2i_conn->ep = bnx2i_ep; 1374 + bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; 1375 + bnx2i_conn->fw_cid = bnx2i_ep->ep_cid; 1376 + bnx2i_conn->is_bound = 1; 1377 + 1378 + ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, 1379 + bnx2i_ep->ep_iscsi_cid); 1380 + 1381 + /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710 1382 + * driver needs to explicitly replenish RQ index during setup. 1383 + */ 1384 + if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) 1385 + bnx2i_put_rq_buf(bnx2i_conn, 0); 1386 + 1387 + bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); 1388 + return ret_code; 1389 + } 1390 + 1391 + 1392 + /** 1393 + * bnx2i_conn_destroy - destroy iscsi connection instance & release resources 1394 + * @cls_conn: pointer to iscsi cls conn 1395 + * 1396 + * Destroy an iSCSI connection instance and release memory resources held by 1397 + * this connection 1398 + */ 1399 + static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn) 1400 + { 1401 + struct iscsi_conn *conn = cls_conn->dd_data; 1402 + struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1403 + struct Scsi_Host *shost; 1404 + struct bnx2i_hba *hba; 1405 + 1406 + shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); 1407 + hba = iscsi_host_priv(shost); 1408 + 1409 + bnx2i_conn_free_login_resources(hba, bnx2i_conn); 1410 + iscsi_conn_teardown(cls_conn); 1411 + } 1412 + 1413 + 1414 + /** 1415 + * bnx2i_conn_get_param - return iscsi connection parameter to caller 1416 + * @cls_conn: pointer to iscsi cls conn 1417 + * @param: parameter type identifier 1418 + * @buf: buffer pointer 1419 + * 1420 + * returns iSCSI connection parameters 1421 + */ 1422 + static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn, 1423 + enum iscsi_param param, char *buf) 1424 + { 1425 + struct iscsi_conn *conn = cls_conn->dd_data; 1426 + struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1427 + int len = 0; 1428 + 1429 + switch (param) { 1430 + case ISCSI_PARAM_CONN_PORT: 1431 + if (bnx2i_conn->ep) 1432 + len = sprintf(buf, "%hu\n", 1433 + bnx2i_conn->ep->cm_sk->dst_port); 1434 + break; 1435 + case ISCSI_PARAM_CONN_ADDRESS: 1436 + if (bnx2i_conn->ep) 1437 + len = sprintf(buf, NIPQUAD_FMT "\n", 1438 + NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip)); 1439 + break; 1440 + default: 1441 + return iscsi_conn_get_param(cls_conn, param, buf); 1442 + } 1443 + 1444 + return len; 1445 + } 1446 + 1447 + /** 1448 + * bnx2i_host_get_param - returns host (adapter) related parameters 1449 + * @shost: scsi host pointer 1450 + * @param: parameter type identifier 1451 + * @buf: buffer pointer 1452 + */ 1453 + static int bnx2i_host_get_param(struct Scsi_Host *shost, 1454 + enum iscsi_host_param param, char *buf) 1455 + { 1456 + struct bnx2i_hba *hba = iscsi_host_priv(shost); 1457 + int len = 0; 1458 + 1459 + switch (param) { 1460 + case ISCSI_HOST_PARAM_HWADDRESS: 1461 + len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6); 1462 + break; 1463 + case ISCSI_HOST_PARAM_NETDEV_NAME: 1464 + len = sprintf(buf, "%s\n", hba->netdev->name); 1465 + break; 1466 + default: 1467 + return iscsi_host_get_param(shost, param, buf); 1468 + } 1469 + return len; 1470 + } 1471 + 1472 + /** 1473 + * bnx2i_conn_start - completes iscsi connection migration to FFP 1474 + * @cls_conn: pointer to iscsi cls conn 1475 + * 1476 + * last call in FFP migration to handover iscsi conn to the driver 1477 + */ 1478 + static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn) 1479 + { 1480 + struct iscsi_conn *conn = cls_conn->dd_data; 1481 + struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1482 + 1483 + bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START; 1484 + bnx2i_update_iscsi_conn(conn); 1485 + 1486 + /* 1487 + * this should normally not sleep for a long time so it should 1488 + * not disrupt the caller. 1489 + */ 1490 + bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies; 1491 + bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer; 1492 + bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep; 1493 + add_timer(&bnx2i_conn->ep->ofld_timer); 1494 + /* update iSCSI context for this conn, wait for CNIC to complete */ 1495 + wait_event_interruptible(bnx2i_conn->ep->ofld_wait, 1496 + bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START); 1497 + 1498 + if (signal_pending(current)) 1499 + flush_signals(current); 1500 + del_timer_sync(&bnx2i_conn->ep->ofld_timer); 1501 + 1502 + iscsi_conn_start(cls_conn); 1503 + return 0; 1504 + } 1505 + 1506 + 1507 + /** 1508 + * bnx2i_conn_get_stats - returns iSCSI stats 1509 + * @cls_conn: pointer to iscsi cls conn 1510 + * @stats: pointer to iscsi statistic struct 1511 + */ 1512 + static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn, 1513 + struct iscsi_stats *stats) 1514 + { 1515 + struct iscsi_conn *conn = cls_conn->dd_data; 1516 + 1517 + stats->txdata_octets = conn->txdata_octets; 1518 + stats->rxdata_octets = conn->rxdata_octets; 1519 + stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; 1520 + stats->dataout_pdus = conn->dataout_pdus_cnt; 1521 + stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; 1522 + stats->datain_pdus = conn->datain_pdus_cnt; 1523 + stats->r2t_pdus = conn->r2t_pdus_cnt; 1524 + stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; 1525 + stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; 1526 + stats->custom_length = 3; 1527 + strcpy(stats->custom[2].desc, "eh_abort_cnt"); 1528 + stats->custom[2].value = conn->eh_abort_cnt; 1529 + stats->digest_err = 0; 1530 + stats->timeout_err = 0; 1531 + stats->custom_length = 0; 1532 + } 1533 + 1534 + 1535 + /** 1536 + * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices 1537 + * @dst_addr: target IP address 1538 + * 1539 + * check if route resolves to BNX2 device 1540 + */ 1541 + static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr) 1542 + { 1543 + struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; 1544 + struct bnx2i_hba *hba; 1545 + struct cnic_dev *cnic = NULL; 1546 + 1547 + bnx2i_reg_dev_all(); 1548 + 1549 + hba = get_adapter_list_head(); 1550 + if (hba && hba->cnic) 1551 + cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI); 1552 + if (!cnic) { 1553 + printk(KERN_ALERT "bnx2i: no route," 1554 + "can't connect using cnic\n"); 1555 + goto no_nx2_route; 1556 + } 1557 + hba = bnx2i_find_hba_for_cnic(cnic); 1558 + if (!hba) 1559 + goto no_nx2_route; 1560 + 1561 + if (bnx2i_adapter_ready(hba)) { 1562 + printk(KERN_ALERT "bnx2i: check route, hba not found\n"); 1563 + goto no_nx2_route; 1564 + } 1565 + if (hba->netdev->mtu > hba->mtu_supported) { 1566 + printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n", 1567 + hba->netdev->name, hba->netdev->mtu); 1568 + printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n", 1569 + hba->mtu_supported); 1570 + goto no_nx2_route; 1571 + } 1572 + return hba; 1573 + no_nx2_route: 1574 + return NULL; 1575 + } 1576 + 1577 + 1578 + /** 1579 + * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources 1580 + * @hba: pointer to adapter instance 1581 + * @ep: endpoint (transport indentifier) structure 1582 + * 1583 + * destroys cm_sock structure and on chip iscsi context 1584 + */ 1585 + static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, 1586 + struct bnx2i_endpoint *ep) 1587 + { 1588 + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) 1589 + hba->cnic->cm_destroy(ep->cm_sk); 1590 + 1591 + if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state)) 1592 + ep->state = EP_STATE_DISCONN_COMPL; 1593 + 1594 + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) && 1595 + ep->state == EP_STATE_DISCONN_TIMEDOUT) { 1596 + printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump," 1597 + " NW/PCIe trace, driver msgs to developers" 1598 + " for analysis\n"); 1599 + return 1; 1600 + } 1601 + 1602 + ep->state = EP_STATE_CLEANUP_START; 1603 + init_timer(&ep->ofld_timer); 1604 + ep->ofld_timer.expires = 10*HZ + jiffies; 1605 + ep->ofld_timer.function = bnx2i_ep_ofld_timer; 1606 + ep->ofld_timer.data = (unsigned long) ep; 1607 + add_timer(&ep->ofld_timer); 1608 + 1609 + bnx2i_ep_destroy_list_add(hba, ep); 1610 + 1611 + /* destroy iSCSI context, wait for it to complete */ 1612 + bnx2i_send_conn_destroy(hba, ep); 1613 + wait_event_interruptible(ep->ofld_wait, 1614 + (ep->state != EP_STATE_CLEANUP_START)); 1615 + 1616 + if (signal_pending(current)) 1617 + flush_signals(current); 1618 + del_timer_sync(&ep->ofld_timer); 1619 + 1620 + bnx2i_ep_destroy_list_del(hba, ep); 1621 + 1622 + if (ep->state != EP_STATE_CLEANUP_CMPL) 1623 + /* should never happen */ 1624 + printk(KERN_ALERT "bnx2i - conn destroy failed\n"); 1625 + 1626 + return 0; 1627 + } 1628 + 1629 + 1630 + /** 1631 + * bnx2i_ep_connect - establish TCP connection to target portal 1632 + * @shost: scsi host 1633 + * @dst_addr: target IP address 1634 + * @non_blocking: blocking or non-blocking call 1635 + * 1636 + * this routine initiates the TCP/IP connection by invoking Option-2 i/f 1637 + * with l5_core and the CNIC. This is a multi-step process of resolving 1638 + * route to target, create a iscsi connection context, handshaking with 1639 + * CNIC module to create/initialize the socket struct and finally 1640 + * sending down option-2 request to complete TCP 3-way handshake 1641 + */ 1642 + static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, 1643 + struct sockaddr *dst_addr, 1644 + int non_blocking) 1645 + { 1646 + u32 iscsi_cid = BNX2I_CID_RESERVED; 1647 + struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; 1648 + struct sockaddr_in6 *desti6; 1649 + struct bnx2i_endpoint *bnx2i_ep; 1650 + struct bnx2i_hba *hba; 1651 + struct cnic_dev *cnic; 1652 + struct cnic_sockaddr saddr; 1653 + struct iscsi_endpoint *ep; 1654 + int rc = 0; 1655 + 1656 + if (shost) 1657 + /* driver is given scsi host to work with */ 1658 + hba = iscsi_host_priv(shost); 1659 + else 1660 + /* 1661 + * check if the given destination can be reached through 1662 + * a iscsi capable NetXtreme2 device 1663 + */ 1664 + hba = bnx2i_check_route(dst_addr); 1665 + if (!hba) { 1666 + rc = -ENOMEM; 1667 + goto check_busy; 1668 + } 1669 + 1670 + cnic = hba->cnic; 1671 + ep = bnx2i_alloc_ep(hba); 1672 + if (!ep) { 1673 + rc = -ENOMEM; 1674 + goto check_busy; 1675 + } 1676 + bnx2i_ep = ep->dd_data; 1677 + 1678 + mutex_lock(&hba->net_dev_lock); 1679 + if (bnx2i_adapter_ready(hba)) { 1680 + rc = -EPERM; 1681 + goto net_if_down; 1682 + } 1683 + 1684 + bnx2i_ep->state = EP_STATE_IDLE; 1685 + bnx2i_ep->ep_iscsi_cid = (u16) -1; 1686 + bnx2i_ep->num_active_cmds = 0; 1687 + iscsi_cid = bnx2i_alloc_iscsi_cid(hba); 1688 + if (iscsi_cid == -1) { 1689 + printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n"); 1690 + rc = -ENOMEM; 1691 + goto iscsi_cid_err; 1692 + } 1693 + bnx2i_ep->hba_age = hba->age; 1694 + 1695 + rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep); 1696 + if (rc != 0) { 1697 + printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n"); 1698 + rc = -ENOMEM; 1699 + goto qp_resc_err; 1700 + } 1701 + 1702 + bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid; 1703 + bnx2i_ep->state = EP_STATE_OFLD_START; 1704 + bnx2i_ep_ofld_list_add(hba, bnx2i_ep); 1705 + 1706 + init_timer(&bnx2i_ep->ofld_timer); 1707 + bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies; 1708 + bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; 1709 + bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; 1710 + add_timer(&bnx2i_ep->ofld_timer); 1711 + 1712 + bnx2i_send_conn_ofld_req(hba, bnx2i_ep); 1713 + 1714 + /* Wait for CNIC hardware to setup conn context and return 'cid' */ 1715 + wait_event_interruptible(bnx2i_ep->ofld_wait, 1716 + bnx2i_ep->state != EP_STATE_OFLD_START); 1717 + 1718 + if (signal_pending(current)) 1719 + flush_signals(current); 1720 + del_timer_sync(&bnx2i_ep->ofld_timer); 1721 + 1722 + bnx2i_ep_ofld_list_del(hba, bnx2i_ep); 1723 + 1724 + if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) { 1725 + rc = -ENOSPC; 1726 + goto conn_failed; 1727 + } 1728 + 1729 + rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid, 1730 + iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep); 1731 + if (rc) { 1732 + rc = -EINVAL; 1733 + goto conn_failed; 1734 + } 1735 + 1736 + bnx2i_ep->cm_sk->rcv_buf = 256 * 1024; 1737 + bnx2i_ep->cm_sk->snd_buf = 256 * 1024; 1738 + clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags); 1739 + 1740 + memset(&saddr, 0, sizeof(saddr)); 1741 + if (dst_addr->sa_family == AF_INET) { 1742 + desti = (struct sockaddr_in *) dst_addr; 1743 + saddr.remote.v4 = *desti; 1744 + saddr.local.v4.sin_family = desti->sin_family; 1745 + } else if (dst_addr->sa_family == AF_INET6) { 1746 + desti6 = (struct sockaddr_in6 *) dst_addr; 1747 + saddr.remote.v6 = *desti6; 1748 + saddr.local.v6.sin6_family = desti6->sin6_family; 1749 + } 1750 + 1751 + bnx2i_ep->timestamp = jiffies; 1752 + bnx2i_ep->state = EP_STATE_CONNECT_START; 1753 + if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 1754 + rc = -EINVAL; 1755 + goto conn_failed; 1756 + } else 1757 + rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr); 1758 + 1759 + if (rc) 1760 + goto release_ep; 1761 + 1762 + if (bnx2i_map_ep_dbell_regs(bnx2i_ep)) 1763 + goto release_ep; 1764 + mutex_unlock(&hba->net_dev_lock); 1765 + return ep; 1766 + 1767 + release_ep: 1768 + if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { 1769 + mutex_unlock(&hba->net_dev_lock); 1770 + return ERR_PTR(rc); 1771 + } 1772 + conn_failed: 1773 + net_if_down: 1774 + iscsi_cid_err: 1775 + bnx2i_free_qp_resc(hba, bnx2i_ep); 1776 + qp_resc_err: 1777 + bnx2i_free_ep(ep); 1778 + mutex_unlock(&hba->net_dev_lock); 1779 + check_busy: 1780 + bnx2i_unreg_dev_all(); 1781 + return ERR_PTR(rc); 1782 + } 1783 + 1784 + 1785 + /** 1786 + * bnx2i_ep_poll - polls for TCP connection establishement 1787 + * @ep: TCP connection (endpoint) handle 1788 + * @timeout_ms: timeout value in milli secs 1789 + * 1790 + * polls for TCP connect request to complete 1791 + */ 1792 + static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 1793 + { 1794 + struct bnx2i_endpoint *bnx2i_ep; 1795 + int rc = 0; 1796 + 1797 + bnx2i_ep = ep->dd_data; 1798 + if ((bnx2i_ep->state == EP_STATE_IDLE) || 1799 + (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) || 1800 + (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) 1801 + return -1; 1802 + if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL) 1803 + return 1; 1804 + 1805 + rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait, 1806 + ((bnx2i_ep->state == 1807 + EP_STATE_OFLD_FAILED) || 1808 + (bnx2i_ep->state == 1809 + EP_STATE_CONNECT_FAILED) || 1810 + (bnx2i_ep->state == 1811 + EP_STATE_CONNECT_COMPL)), 1812 + msecs_to_jiffies(timeout_ms)); 1813 + if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) 1814 + rc = -1; 1815 + 1816 + if (rc > 0) 1817 + return 1; 1818 + else if (!rc) 1819 + return 0; /* timeout */ 1820 + else 1821 + return rc; 1822 + } 1823 + 1824 + 1825 + /** 1826 + * bnx2i_ep_tcp_conn_active - check EP state transition 1827 + * @ep: endpoint pointer 1828 + * 1829 + * check if underlying TCP connection is active 1830 + */ 1831 + static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep) 1832 + { 1833 + int ret; 1834 + int cnic_dev_10g = 0; 1835 + 1836 + if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) 1837 + cnic_dev_10g = 1; 1838 + 1839 + switch (bnx2i_ep->state) { 1840 + case EP_STATE_CONNECT_START: 1841 + case EP_STATE_CLEANUP_FAILED: 1842 + case EP_STATE_OFLD_FAILED: 1843 + case EP_STATE_DISCONN_TIMEDOUT: 1844 + ret = 0; 1845 + break; 1846 + case EP_STATE_CONNECT_COMPL: 1847 + case EP_STATE_ULP_UPDATE_START: 1848 + case EP_STATE_ULP_UPDATE_COMPL: 1849 + case EP_STATE_TCP_FIN_RCVD: 1850 + case EP_STATE_ULP_UPDATE_FAILED: 1851 + ret = 1; 1852 + break; 1853 + case EP_STATE_TCP_RST_RCVD: 1854 + ret = 0; 1855 + break; 1856 + case EP_STATE_CONNECT_FAILED: 1857 + if (cnic_dev_10g) 1858 + ret = 1; 1859 + else 1860 + ret = 0; 1861 + break; 1862 + default: 1863 + ret = 0; 1864 + } 1865 + 1866 + return ret; 1867 + } 1868 + 1869 + 1870 + /** 1871 + * bnx2i_ep_disconnect - executes TCP connection teardown process 1872 + * @ep: TCP connection (endpoint) handle 1873 + * 1874 + * executes TCP connection teardown process 1875 + */ 1876 + static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) 1877 + { 1878 + struct bnx2i_endpoint *bnx2i_ep; 1879 + struct bnx2i_conn *bnx2i_conn = NULL; 1880 + struct iscsi_session *session = NULL; 1881 + struct iscsi_conn *conn; 1882 + struct cnic_dev *cnic; 1883 + struct bnx2i_hba *hba; 1884 + 1885 + bnx2i_ep = ep->dd_data; 1886 + 1887 + /* driver should not attempt connection cleanup untill TCP_CONNECT 1888 + * completes either successfully or fails. Timeout is 9-secs, so 1889 + * wait for it to complete 1890 + */ 1891 + while ((bnx2i_ep->state == EP_STATE_CONNECT_START) && 1892 + !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ))) 1893 + msleep(250); 1894 + 1895 + if (bnx2i_ep->conn) { 1896 + bnx2i_conn = bnx2i_ep->conn; 1897 + conn = bnx2i_conn->cls_conn->dd_data; 1898 + session = conn->session; 1899 + 1900 + spin_lock_bh(&session->lock); 1901 + bnx2i_conn->is_bound = 0; 1902 + spin_unlock_bh(&session->lock); 1903 + } 1904 + 1905 + hba = bnx2i_ep->hba; 1906 + if (bnx2i_ep->state == EP_STATE_IDLE) 1907 + goto return_bnx2i_ep; 1908 + cnic = hba->cnic; 1909 + 1910 + mutex_lock(&hba->net_dev_lock); 1911 + 1912 + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) 1913 + goto free_resc; 1914 + if (bnx2i_ep->hba_age != hba->age) 1915 + goto free_resc; 1916 + 1917 + if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) 1918 + goto destory_conn; 1919 + 1920 + bnx2i_ep->state = EP_STATE_DISCONN_START; 1921 + 1922 + init_timer(&bnx2i_ep->ofld_timer); 1923 + bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies; 1924 + bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; 1925 + bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; 1926 + add_timer(&bnx2i_ep->ofld_timer); 1927 + 1928 + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 1929 + int close = 0; 1930 + 1931 + if (session) { 1932 + spin_lock_bh(&session->lock); 1933 + if (session->state == ISCSI_STATE_LOGGING_OUT) 1934 + close = 1; 1935 + spin_unlock_bh(&session->lock); 1936 + } 1937 + if (close) 1938 + cnic->cm_close(bnx2i_ep->cm_sk); 1939 + else 1940 + cnic->cm_abort(bnx2i_ep->cm_sk); 1941 + } else 1942 + goto free_resc; 1943 + 1944 + /* wait for option-2 conn teardown */ 1945 + wait_event_interruptible(bnx2i_ep->ofld_wait, 1946 + bnx2i_ep->state != EP_STATE_DISCONN_START); 1947 + 1948 + if (signal_pending(current)) 1949 + flush_signals(current); 1950 + del_timer_sync(&bnx2i_ep->ofld_timer); 1951 + 1952 + destory_conn: 1953 + if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { 1954 + mutex_unlock(&hba->net_dev_lock); 1955 + return; 1956 + } 1957 + free_resc: 1958 + mutex_unlock(&hba->net_dev_lock); 1959 + bnx2i_free_qp_resc(hba, bnx2i_ep); 1960 + return_bnx2i_ep: 1961 + if (bnx2i_conn) 1962 + bnx2i_conn->ep = NULL; 1963 + 1964 + bnx2i_free_ep(ep); 1965 + 1966 + if (!hba->ofld_conns_active) 1967 + bnx2i_unreg_dev_all(); 1968 + } 1969 + 1970 + 1971 + /** 1972 + * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler 1973 + * @buf: pointer to buffer containing iscsi path message 1974 + * 1975 + */ 1976 + static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params) 1977 + { 1978 + struct bnx2i_hba *hba = iscsi_host_priv(shost); 1979 + char *buf = (char *) params; 1980 + u16 len = sizeof(*params); 1981 + 1982 + /* handled by cnic driver */ 1983 + hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf, 1984 + len); 1985 + 1986 + return 0; 1987 + } 1988 + 1989 + 1990 + /* 1991 + * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template 1992 + * used while registering with the scsi host and iSCSI transport module. 1993 + */ 1994 + static struct scsi_host_template bnx2i_host_template = { 1995 + .module = THIS_MODULE, 1996 + .name = "Broadcom Offload iSCSI Initiator", 1997 + .proc_name = "bnx2i", 1998 + .queuecommand = iscsi_queuecommand, 1999 + .eh_abort_handler = iscsi_eh_abort, 2000 + .eh_device_reset_handler = iscsi_eh_device_reset, 2001 + .eh_target_reset_handler = iscsi_eh_target_reset, 2002 + .can_queue = 1024, 2003 + .max_sectors = 127, 2004 + .cmd_per_lun = 32, 2005 + .this_id = -1, 2006 + .use_clustering = ENABLE_CLUSTERING, 2007 + .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, 2008 + .shost_attrs = bnx2i_dev_attributes, 2009 + }; 2010 + 2011 + struct iscsi_transport bnx2i_iscsi_transport = { 2012 + .owner = THIS_MODULE, 2013 + .name = "bnx2i", 2014 + .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | 2015 + CAP_MULTI_R2T | CAP_DATADGST | 2016 + CAP_DATA_PATH_OFFLOAD, 2017 + .param_mask = ISCSI_MAX_RECV_DLENGTH | 2018 + ISCSI_MAX_XMIT_DLENGTH | 2019 + ISCSI_HDRDGST_EN | 2020 + ISCSI_DATADGST_EN | 2021 + ISCSI_INITIAL_R2T_EN | 2022 + ISCSI_MAX_R2T | 2023 + ISCSI_IMM_DATA_EN | 2024 + ISCSI_FIRST_BURST | 2025 + ISCSI_MAX_BURST | 2026 + ISCSI_PDU_INORDER_EN | 2027 + ISCSI_DATASEQ_INORDER_EN | 2028 + ISCSI_ERL | 2029 + ISCSI_CONN_PORT | 2030 + ISCSI_CONN_ADDRESS | 2031 + ISCSI_EXP_STATSN | 2032 + ISCSI_PERSISTENT_PORT | 2033 + ISCSI_PERSISTENT_ADDRESS | 2034 + ISCSI_TARGET_NAME | ISCSI_TPGT | 2035 + ISCSI_USERNAME | ISCSI_PASSWORD | 2036 + ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | 2037 + ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | 2038 + ISCSI_LU_RESET_TMO | 2039 + ISCSI_PING_TMO | ISCSI_RECV_TMO | 2040 + ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, 2041 + .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME, 2042 + .create_session = bnx2i_session_create, 2043 + .destroy_session = bnx2i_session_destroy, 2044 + .create_conn = bnx2i_conn_create, 2045 + .bind_conn = bnx2i_conn_bind, 2046 + .destroy_conn = bnx2i_conn_destroy, 2047 + .set_param = iscsi_set_param, 2048 + .get_conn_param = bnx2i_conn_get_param, 2049 + .get_session_param = iscsi_session_get_param, 2050 + .get_host_param = bnx2i_host_get_param, 2051 + .start_conn = bnx2i_conn_start, 2052 + .stop_conn = iscsi_conn_stop, 2053 + .send_pdu = iscsi_conn_send_pdu, 2054 + .xmit_task = bnx2i_task_xmit, 2055 + .get_stats = bnx2i_conn_get_stats, 2056 + /* TCP connect - disconnect - option-2 interface calls */ 2057 + .ep_connect = bnx2i_ep_connect, 2058 + .ep_poll = bnx2i_ep_poll, 2059 + .ep_disconnect = bnx2i_ep_disconnect, 2060 + .set_path = bnx2i_nl_set_path, 2061 + /* Error recovery timeout call */ 2062 + .session_recovery_timedout = iscsi_session_recovery_timedout, 2063 + .cleanup_task = bnx2i_cleanup_task, 2064 + };
+142
drivers/scsi/bnx2i/bnx2i_sysfs.c
··· 1 + /* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver. 2 + * 3 + * Copyright (c) 2004 - 2009 Broadcom Corporation 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License as published by 7 + * the Free Software Foundation. 8 + * 9 + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 10 + */ 11 + 12 + #include "bnx2i.h" 13 + 14 + /** 15 + * bnx2i_dev_to_hba - maps dev pointer to adapter struct 16 + * @dev: device pointer 17 + * 18 + * Map device to hba structure 19 + */ 20 + static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev) 21 + { 22 + struct Scsi_Host *shost = class_to_shost(dev); 23 + return iscsi_host_priv(shost); 24 + } 25 + 26 + 27 + /** 28 + * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size 29 + * @dev: device pointer 30 + * @buf: buffer to return current SQ size parameter 31 + * 32 + * Returns current SQ size parameter, this paramater determines the number 33 + * outstanding iSCSI commands supported on a connection 34 + */ 35 + static ssize_t bnx2i_show_sq_info(struct device *dev, 36 + struct device_attribute *attr, char *buf) 37 + { 38 + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); 39 + 40 + return sprintf(buf, "0x%x\n", hba->max_sqes); 41 + } 42 + 43 + 44 + /** 45 + * bnx2i_set_sq_info - update send queue (SQ) size parameter 46 + * @dev: device pointer 47 + * @buf: buffer to return current SQ size parameter 48 + * @count: parameter buffer size 49 + * 50 + * Interface for user to change shared queue size allocated for each conn 51 + * Must be within SQ limits and a power of 2. For the latter this is needed 52 + * because of how libiscsi preallocates tasks. 53 + */ 54 + static ssize_t bnx2i_set_sq_info(struct device *dev, 55 + struct device_attribute *attr, 56 + const char *buf, size_t count) 57 + { 58 + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); 59 + u32 val; 60 + int max_sq_size; 61 + 62 + if (hba->ofld_conns_active) 63 + goto skip_config; 64 + 65 + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) 66 + max_sq_size = BNX2I_5770X_SQ_WQES_MAX; 67 + else 68 + max_sq_size = BNX2I_570X_SQ_WQES_MAX; 69 + 70 + if (sscanf(buf, " 0x%x ", &val) > 0) { 71 + if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) && 72 + (is_power_of_2(val))) 73 + hba->max_sqes = val; 74 + } 75 + 76 + return count; 77 + 78 + skip_config: 79 + printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n"); 80 + return 0; 81 + } 82 + 83 + 84 + /** 85 + * bnx2i_show_ccell_info - returns command cell (HQ) size 86 + * @dev: device pointer 87 + * @buf: buffer to return current SQ size parameter 88 + * 89 + * returns per-connection TCP history queue size parameter 90 + */ 91 + static ssize_t bnx2i_show_ccell_info(struct device *dev, 92 + struct device_attribute *attr, char *buf) 93 + { 94 + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); 95 + 96 + return sprintf(buf, "0x%x\n", hba->num_ccell); 97 + } 98 + 99 + 100 + /** 101 + * bnx2i_get_link_state - set command cell (HQ) size 102 + * @dev: device pointer 103 + * @buf: buffer to return current SQ size parameter 104 + * @count: parameter buffer size 105 + * 106 + * updates per-connection TCP history queue size parameter 107 + */ 108 + static ssize_t bnx2i_set_ccell_info(struct device *dev, 109 + struct device_attribute *attr, 110 + const char *buf, size_t count) 111 + { 112 + u32 val; 113 + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); 114 + 115 + if (hba->ofld_conns_active) 116 + goto skip_config; 117 + 118 + if (sscanf(buf, " 0x%x ", &val) > 0) { 119 + if ((val >= BNX2I_CCELLS_MIN) && 120 + (val <= BNX2I_CCELLS_MAX)) { 121 + hba->num_ccell = val; 122 + } 123 + } 124 + 125 + return count; 126 + 127 + skip_config: 128 + printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n"); 129 + return 0; 130 + } 131 + 132 + 133 + static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR, 134 + bnx2i_show_sq_info, bnx2i_set_sq_info); 135 + static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR, 136 + bnx2i_show_ccell_info, bnx2i_set_ccell_info); 137 + 138 + struct device_attribute *bnx2i_dev_attributes[] = { 139 + &dev_attr_sq_size, 140 + &dev_attr_num_ccell, 141 + NULL 142 + };