Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: elx: libefc_sli: Queue create/destroy/parse routines

Add service routines to create mailbox commands and add APIs to
create/destroy/parse SLI-4 EQ, CQ, RQ and MQ queues.

Link: https://lore.kernel.org/r/20210601235512.20104-5-jsmart2021@gmail.com
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Co-developed-by: Ram Vegesna <ram.vegesna@broadcom.com>
Signed-off-by: Ram Vegesna <ram.vegesna@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

James Smart and committed by
Martin K. Petersen
7c5b7683 18be69fa

+1371
+15
drivers/scsi/elx/include/efc_common.h
··· 19 19 struct pci_dev *pdev; 20 20 }; 21 21 22 + #define efc_log_crit(efc, fmt, args...) \ 23 + dev_crit(&((efc)->pci)->dev, fmt, ##args) 24 + 25 + #define efc_log_err(efc, fmt, args...) \ 26 + dev_err(&((efc)->pci)->dev, fmt, ##args) 27 + 28 + #define efc_log_warn(efc, fmt, args...) \ 29 + dev_warn(&((efc)->pci)->dev, fmt, ##args) 30 + 31 + #define efc_log_info(efc, fmt, args...) \ 32 + dev_info(&((efc)->pci)->dev, fmt, ##args) 33 + 34 + #define efc_log_debug(efc, fmt, args...) \ 35 + dev_dbg(&((efc)->pci)->dev, fmt, ##args) 36 + 22 37 #endif /* __EFC_COMMON_H__ */
+1347
drivers/scsi/elx/libefc_sli/sli4.c
··· 20 20 { SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_7}, 21 21 { SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_7}, 22 22 }; 23 + 24 + /* Convert queue type enum (SLI_QTYPE_*) into a string */ 25 + static char *SLI4_QNAME[] = { 26 + "Event Queue", 27 + "Completion Queue", 28 + "Mailbox Queue", 29 + "Work Queue", 30 + "Receive Queue", 31 + "Undefined" 32 + }; 33 + 34 + /** 35 + * sli_config_cmd_init() - Write a SLI_CONFIG command to the provided buffer. 36 + * 37 + * @sli4: SLI context pointer. 38 + * @buf: Destination buffer for the command. 39 + * @length: Length in bytes of attached command. 40 + * @dma: DMA buffer for non-embedded commands. 41 + * Return: Command payload buffer. 42 + */ 43 + static void * 44 + sli_config_cmd_init(struct sli4 *sli4, void *buf, u32 length, 45 + struct efc_dma *dma) 46 + { 47 + struct sli4_cmd_sli_config *config; 48 + u32 flags; 49 + 50 + if (length > sizeof(config->payload.embed) && !dma) { 51 + efc_log_err(sli4, "Too big for an embedded cmd with len(%d)\n", 52 + length); 53 + return NULL; 54 + } 55 + 56 + memset(buf, 0, SLI4_BMBX_SIZE); 57 + 58 + config = buf; 59 + 60 + config->hdr.command = SLI4_MBX_CMD_SLI_CONFIG; 61 + if (!dma) { 62 + flags = SLI4_SLICONF_EMB; 63 + config->dw1_flags = cpu_to_le32(flags); 64 + config->payload_len = cpu_to_le32(length); 65 + return config->payload.embed; 66 + } 67 + 68 + flags = SLI4_SLICONF_PMDCMD_VAL_1; 69 + flags &= ~SLI4_SLICONF_EMB; 70 + config->dw1_flags = cpu_to_le32(flags); 71 + 72 + config->payload.mem.addr.low = cpu_to_le32(lower_32_bits(dma->phys)); 73 + config->payload.mem.addr.high = cpu_to_le32(upper_32_bits(dma->phys)); 74 + config->payload.mem.length = 75 + cpu_to_le32(dma->size & SLI4_SLICONF_PMD_LEN); 76 + config->payload_len = cpu_to_le32(dma->size); 77 + /* save pointer to DMA for BMBX dumping purposes */ 78 + sli4->bmbx_non_emb_pmd = dma; 79 + return dma->virt; 80 + } 81 + 82 + /** 83 + * sli_cmd_common_create_cq() - Write a COMMON_CREATE_CQ V2 command. 84 + * 85 + * @sli4: SLI context pointer. 86 + * @buf: Destination buffer for the command. 87 + * @qmem: DMA memory for queue. 88 + * @eq_id: EQ id assosiated with this cq. 89 + * Return: status -EIO/0. 90 + */ 91 + static int 92 + sli_cmd_common_create_cq(struct sli4 *sli4, void *buf, struct efc_dma *qmem, 93 + u16 eq_id) 94 + { 95 + struct sli4_rqst_cmn_create_cq_v2 *cqv2 = NULL; 96 + u32 p; 97 + uintptr_t addr; 98 + u32 num_pages = 0; 99 + size_t cmd_size = 0; 100 + u32 page_size = 0; 101 + u32 n_cqe = 0; 102 + u32 dw5_flags = 0; 103 + u16 dw6w1_arm = 0; 104 + __le32 len; 105 + 106 + /* First calculate number of pages and the mailbox cmd length */ 107 + n_cqe = qmem->size / SLI4_CQE_BYTES; 108 + switch (n_cqe) { 109 + case 256: 110 + case 512: 111 + case 1024: 112 + case 2048: 113 + page_size = SZ_4K; 114 + break; 115 + case 4096: 116 + page_size = SZ_8K; 117 + break; 118 + default: 119 + return -EIO; 120 + } 121 + num_pages = sli_page_count(qmem->size, page_size); 122 + 123 + cmd_size = SLI4_RQST_CMDSZ(cmn_create_cq_v2) 124 + + SZ_DMAADDR * num_pages; 125 + 126 + cqv2 = sli_config_cmd_init(sli4, buf, cmd_size, NULL); 127 + if (!cqv2) 128 + return -EIO; 129 + 130 + len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_v2, SZ_DMAADDR * num_pages); 131 + sli_cmd_fill_hdr(&cqv2->hdr, SLI4_CMN_CREATE_CQ, SLI4_SUBSYSTEM_COMMON, 132 + CMD_V2, len); 133 + cqv2->page_size = page_size / SLI_PAGE_SIZE; 134 + 135 + /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */ 136 + cqv2->num_pages = cpu_to_le16(num_pages); 137 + if (!num_pages || num_pages > SLI4_CREATE_CQV2_MAX_PAGES) 138 + return -EIO; 139 + 140 + switch (num_pages) { 141 + case 1: 142 + dw5_flags |= SLI4_CQ_CNT_VAL(256); 143 + break; 144 + case 2: 145 + dw5_flags |= SLI4_CQ_CNT_VAL(512); 146 + break; 147 + case 4: 148 + dw5_flags |= SLI4_CQ_CNT_VAL(1024); 149 + break; 150 + case 8: 151 + dw5_flags |= SLI4_CQ_CNT_VAL(LARGE); 152 + cqv2->cqe_count = cpu_to_le16(n_cqe); 153 + break; 154 + default: 155 + efc_log_err(sli4, "num_pages %d not valid\n", num_pages); 156 + return -EIO; 157 + } 158 + 159 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 160 + dw5_flags |= SLI4_CREATE_CQV2_AUTOVALID; 161 + 162 + dw5_flags |= SLI4_CREATE_CQV2_EVT; 163 + dw5_flags |= SLI4_CREATE_CQV2_VALID; 164 + 165 + cqv2->dw5_flags = cpu_to_le32(dw5_flags); 166 + cqv2->dw6w1_arm = cpu_to_le16(dw6w1_arm); 167 + cqv2->eq_id = cpu_to_le16(eq_id); 168 + 169 + for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) { 170 + cqv2->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); 171 + cqv2->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); 172 + } 173 + 174 + return 0; 175 + } 176 + 177 + static int 178 + sli_cmd_common_create_eq(struct sli4 *sli4, void *buf, struct efc_dma *qmem) 179 + { 180 + struct sli4_rqst_cmn_create_eq *eq; 181 + u32 p; 182 + uintptr_t addr; 183 + u16 num_pages; 184 + u32 dw5_flags = 0; 185 + u32 dw6_flags = 0, ver; 186 + 187 + eq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_create_eq), 188 + NULL); 189 + if (!eq) 190 + return -EIO; 191 + 192 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 193 + ver = CMD_V2; 194 + else 195 + ver = CMD_V0; 196 + 197 + sli_cmd_fill_hdr(&eq->hdr, SLI4_CMN_CREATE_EQ, SLI4_SUBSYSTEM_COMMON, 198 + ver, SLI4_RQST_PYLD_LEN(cmn_create_eq)); 199 + 200 + /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */ 201 + num_pages = qmem->size / SLI_PAGE_SIZE; 202 + eq->num_pages = cpu_to_le16(num_pages); 203 + 204 + switch (num_pages) { 205 + case 1: 206 + dw5_flags |= SLI4_EQE_SIZE_4; 207 + dw6_flags |= SLI4_EQ_CNT_VAL(1024); 208 + break; 209 + case 2: 210 + dw5_flags |= SLI4_EQE_SIZE_4; 211 + dw6_flags |= SLI4_EQ_CNT_VAL(2048); 212 + break; 213 + case 4: 214 + dw5_flags |= SLI4_EQE_SIZE_4; 215 + dw6_flags |= SLI4_EQ_CNT_VAL(4096); 216 + break; 217 + default: 218 + efc_log_err(sli4, "num_pages %d not valid\n", num_pages); 219 + return -EIO; 220 + } 221 + 222 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 223 + dw5_flags |= SLI4_CREATE_EQ_AUTOVALID; 224 + 225 + dw5_flags |= SLI4_CREATE_EQ_VALID; 226 + dw6_flags &= (~SLI4_CREATE_EQ_ARM); 227 + eq->dw5_flags = cpu_to_le32(dw5_flags); 228 + eq->dw6_flags = cpu_to_le32(dw6_flags); 229 + eq->dw7_delaymulti = cpu_to_le32(SLI4_CREATE_EQ_DELAYMULTI); 230 + 231 + for (p = 0, addr = qmem->phys; p < num_pages; 232 + p++, addr += SLI_PAGE_SIZE) { 233 + eq->page_address[p].low = cpu_to_le32(lower_32_bits(addr)); 234 + eq->page_address[p].high = cpu_to_le32(upper_32_bits(addr)); 235 + } 236 + 237 + return 0; 238 + } 239 + 240 + static int 241 + sli_cmd_common_create_mq_ext(struct sli4 *sli4, void *buf, struct efc_dma *qmem, 242 + u16 cq_id) 243 + { 244 + struct sli4_rqst_cmn_create_mq_ext *mq; 245 + u32 p; 246 + uintptr_t addr; 247 + u32 num_pages; 248 + u16 dw6w1_flags = 0; 249 + 250 + mq = sli_config_cmd_init(sli4, buf, 251 + SLI4_CFG_PYLD_LENGTH(cmn_create_mq_ext), NULL); 252 + if (!mq) 253 + return -EIO; 254 + 255 + sli_cmd_fill_hdr(&mq->hdr, SLI4_CMN_CREATE_MQ_EXT, 256 + SLI4_SUBSYSTEM_COMMON, CMD_V0, 257 + SLI4_RQST_PYLD_LEN(cmn_create_mq_ext)); 258 + 259 + /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */ 260 + num_pages = qmem->size / SLI_PAGE_SIZE; 261 + mq->num_pages = cpu_to_le16(num_pages); 262 + switch (num_pages) { 263 + case 1: 264 + dw6w1_flags |= SLI4_MQE_SIZE_16; 265 + break; 266 + case 2: 267 + dw6w1_flags |= SLI4_MQE_SIZE_32; 268 + break; 269 + case 4: 270 + dw6w1_flags |= SLI4_MQE_SIZE_64; 271 + break; 272 + case 8: 273 + dw6w1_flags |= SLI4_MQE_SIZE_128; 274 + break; 275 + default: 276 + efc_log_info(sli4, "num_pages %d not valid\n", num_pages); 277 + return -EIO; 278 + } 279 + 280 + mq->async_event_bitmap = cpu_to_le32(SLI4_ASYNC_EVT_FC_ALL); 281 + 282 + if (sli4->params.mq_create_version) { 283 + mq->cq_id_v1 = cpu_to_le16(cq_id); 284 + mq->hdr.dw3_version = cpu_to_le32(CMD_V1); 285 + } else { 286 + dw6w1_flags |= (cq_id << SLI4_CREATE_MQEXT_CQID_SHIFT); 287 + } 288 + mq->dw7_val = cpu_to_le32(SLI4_CREATE_MQEXT_VAL); 289 + 290 + mq->dw6w1_flags = cpu_to_le16(dw6w1_flags); 291 + for (p = 0, addr = qmem->phys; p < num_pages; 292 + p++, addr += SLI_PAGE_SIZE) { 293 + mq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); 294 + mq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); 295 + } 296 + 297 + return 0; 298 + } 299 + 300 + int 301 + sli_cmd_wq_create(struct sli4 *sli4, void *buf, struct efc_dma *qmem, u16 cq_id) 302 + { 303 + struct sli4_rqst_wq_create *wq; 304 + u32 p; 305 + uintptr_t addr; 306 + u32 page_size = 0; 307 + u32 n_wqe = 0; 308 + u16 num_pages; 309 + 310 + wq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(wq_create), 311 + NULL); 312 + if (!wq) 313 + return -EIO; 314 + 315 + sli_cmd_fill_hdr(&wq->hdr, SLI4_OPC_WQ_CREATE, SLI4_SUBSYSTEM_FC, 316 + CMD_V1, SLI4_RQST_PYLD_LEN(wq_create)); 317 + n_wqe = qmem->size / sli4->wqe_size; 318 + 319 + switch (qmem->size) { 320 + case 4096: 321 + case 8192: 322 + case 16384: 323 + case 32768: 324 + page_size = SZ_4K; 325 + break; 326 + case 65536: 327 + page_size = SZ_8K; 328 + break; 329 + case 131072: 330 + page_size = SZ_16K; 331 + break; 332 + case 262144: 333 + page_size = SZ_32K; 334 + break; 335 + case 524288: 336 + page_size = SZ_64K; 337 + break; 338 + default: 339 + return -EIO; 340 + } 341 + 342 + /* valid values for number of pages(num_pages): 1-8 */ 343 + num_pages = sli_page_count(qmem->size, page_size); 344 + wq->num_pages = cpu_to_le16(num_pages); 345 + if (!num_pages || num_pages > SLI4_WQ_CREATE_MAX_PAGES) 346 + return -EIO; 347 + 348 + wq->cq_id = cpu_to_le16(cq_id); 349 + 350 + wq->page_size = page_size / SLI_PAGE_SIZE; 351 + 352 + if (sli4->wqe_size == SLI4_WQE_EXT_BYTES) 353 + wq->wqe_size_byte |= SLI4_WQE_EXT_SIZE; 354 + else 355 + wq->wqe_size_byte |= SLI4_WQE_SIZE; 356 + 357 + wq->wqe_count = cpu_to_le16(n_wqe); 358 + 359 + for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) { 360 + wq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); 361 + wq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); 362 + } 363 + 364 + return 0; 365 + } 366 + 367 + static int 368 + sli_cmd_rq_create_v1(struct sli4 *sli4, void *buf, struct efc_dma *qmem, 369 + u16 cq_id, u16 buffer_size) 370 + { 371 + struct sli4_rqst_rq_create_v1 *rq; 372 + u32 p; 373 + uintptr_t addr; 374 + u32 num_pages; 375 + 376 + rq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(rq_create_v1), 377 + NULL); 378 + if (!rq) 379 + return -EIO; 380 + 381 + sli_cmd_fill_hdr(&rq->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC, 382 + CMD_V1, SLI4_RQST_PYLD_LEN(rq_create_v1)); 383 + /* Disable "no buffer warnings" to avoid Lancer bug */ 384 + rq->dim_dfd_dnb |= SLI4_RQ_CREATE_V1_DNB; 385 + 386 + /* valid values for number of pages: 1-8 (sec 4.5.6) */ 387 + num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE); 388 + rq->num_pages = cpu_to_le16(num_pages); 389 + if (!num_pages || 390 + num_pages > SLI4_RQ_CREATE_V1_MAX_PAGES) { 391 + efc_log_info(sli4, "num_pages %d not valid, max %d\n", 392 + num_pages, SLI4_RQ_CREATE_V1_MAX_PAGES); 393 + return -EIO; 394 + } 395 + 396 + /* 397 + * RQE count is the total number of entries (note not lg2(# entries)) 398 + */ 399 + rq->rqe_count = cpu_to_le16(qmem->size / SLI4_RQE_SIZE); 400 + 401 + rq->rqe_size_byte |= SLI4_RQE_SIZE_8; 402 + 403 + rq->page_size = SLI4_RQ_PAGE_SIZE_4096; 404 + 405 + if (buffer_size < sli4->rq_min_buf_size || 406 + buffer_size > sli4->rq_max_buf_size) { 407 + efc_log_err(sli4, "buffer_size %d out of range (%d-%d)\n", 408 + buffer_size, sli4->rq_min_buf_size, 409 + sli4->rq_max_buf_size); 410 + return -EIO; 411 + } 412 + rq->buffer_size = cpu_to_le32(buffer_size); 413 + 414 + rq->cq_id = cpu_to_le16(cq_id); 415 + 416 + for (p = 0, addr = qmem->phys; 417 + p < num_pages; 418 + p++, addr += SLI_PAGE_SIZE) { 419 + rq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); 420 + rq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); 421 + } 422 + 423 + return 0; 424 + } 425 + 426 + static int 427 + sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs, 428 + struct sli4_queue *qs[], u32 base_cq_id, 429 + u32 header_buffer_size, 430 + u32 payload_buffer_size, struct efc_dma *dma) 431 + { 432 + struct sli4_rqst_rq_create_v2 *req = NULL; 433 + u32 i, p, offset = 0; 434 + u32 payload_size, page_count; 435 + uintptr_t addr; 436 + u32 num_pages; 437 + __le32 len; 438 + 439 + page_count = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rqs; 440 + 441 + /* Payload length must accommodate both request and response */ 442 + payload_size = max(SLI4_RQST_CMDSZ(rq_create_v2) + 443 + SZ_DMAADDR * page_count, 444 + sizeof(struct sli4_rsp_cmn_create_queue_set)); 445 + 446 + dma->size = payload_size; 447 + dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size, 448 + &dma->phys, GFP_DMA); 449 + if (!dma->virt) 450 + return -EIO; 451 + 452 + memset(dma->virt, 0, payload_size); 453 + 454 + req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma); 455 + if (!req) 456 + return -EIO; 457 + 458 + len = SLI4_RQST_PYLD_LEN_VAR(rq_create_v2, SZ_DMAADDR * page_count); 459 + sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC, 460 + CMD_V2, len); 461 + /* Fill Payload fields */ 462 + req->dim_dfd_dnb |= SLI4_RQCREATEV2_DNB; 463 + num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE); 464 + req->num_pages = cpu_to_le16(num_pages); 465 + req->rqe_count = cpu_to_le16(qs[0]->dma.size / SLI4_RQE_SIZE); 466 + req->rqe_size_byte |= SLI4_RQE_SIZE_8; 467 + req->page_size = SLI4_RQ_PAGE_SIZE_4096; 468 + req->rq_count = num_rqs; 469 + req->base_cq_id = cpu_to_le16(base_cq_id); 470 + req->hdr_buffer_size = cpu_to_le16(header_buffer_size); 471 + req->payload_buffer_size = cpu_to_le16(payload_buffer_size); 472 + 473 + for (i = 0; i < num_rqs; i++) { 474 + for (p = 0, addr = qs[i]->dma.phys; p < num_pages; 475 + p++, addr += SLI_PAGE_SIZE) { 476 + req->page_phys_addr[offset].low = 477 + cpu_to_le32(lower_32_bits(addr)); 478 + req->page_phys_addr[offset].high = 479 + cpu_to_le32(upper_32_bits(addr)); 480 + offset++; 481 + } 482 + } 483 + 484 + return 0; 485 + } 486 + 487 + static void 488 + __sli_queue_destroy(struct sli4 *sli4, struct sli4_queue *q) 489 + { 490 + if (!q->dma.size) 491 + return; 492 + 493 + dma_free_coherent(&sli4->pci->dev, q->dma.size, 494 + q->dma.virt, q->dma.phys); 495 + memset(&q->dma, 0, sizeof(struct efc_dma)); 496 + } 497 + 498 + int 499 + __sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype, 500 + size_t size, u32 n_entries, u32 align) 501 + { 502 + if (q->dma.virt) { 503 + efc_log_err(sli4, "%s failed\n", __func__); 504 + return -EIO; 505 + } 506 + 507 + memset(q, 0, sizeof(struct sli4_queue)); 508 + 509 + q->dma.size = size * n_entries; 510 + q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size, 511 + &q->dma.phys, GFP_DMA); 512 + if (!q->dma.virt) { 513 + memset(&q->dma, 0, sizeof(struct efc_dma)); 514 + efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]); 515 + return -EIO; 516 + } 517 + 518 + memset(q->dma.virt, 0, size * n_entries); 519 + 520 + spin_lock_init(&q->lock); 521 + 522 + q->type = qtype; 523 + q->size = size; 524 + q->length = n_entries; 525 + 526 + if (q->type == SLI4_QTYPE_EQ || q->type == SLI4_QTYPE_CQ) { 527 + /* For prism, phase will be flipped after 528 + * a sweep through eq and cq 529 + */ 530 + q->phase = 1; 531 + } 532 + 533 + /* Limit to hwf the queue size per interrupt */ 534 + q->proc_limit = n_entries / 2; 535 + 536 + if (q->type == SLI4_QTYPE_EQ) 537 + q->posted_limit = q->length / 2; 538 + else 539 + q->posted_limit = 64; 540 + 541 + return 0; 542 + } 543 + 544 + int 545 + sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q, 546 + u32 n_entries, u32 buffer_size, 547 + struct sli4_queue *cq, bool is_hdr) 548 + { 549 + if (__sli_queue_init(sli4, q, SLI4_QTYPE_RQ, SLI4_RQE_SIZE, 550 + n_entries, SLI_PAGE_SIZE)) 551 + return -EIO; 552 + 553 + if (sli_cmd_rq_create_v1(sli4, sli4->bmbx.virt, &q->dma, cq->id, 554 + buffer_size)) 555 + goto error; 556 + 557 + if (__sli_create_queue(sli4, q)) 558 + goto error; 559 + 560 + if (is_hdr && q->id & 1) { 561 + efc_log_info(sli4, "bad header RQ_ID %d\n", q->id); 562 + goto error; 563 + } else if (!is_hdr && (q->id & 1) == 0) { 564 + efc_log_info(sli4, "bad data RQ_ID %d\n", q->id); 565 + goto error; 566 + } 567 + 568 + if (is_hdr) 569 + q->u.flag |= SLI4_QUEUE_FLAG_HDR; 570 + else 571 + q->u.flag &= ~SLI4_QUEUE_FLAG_HDR; 572 + 573 + return 0; 574 + 575 + error: 576 + __sli_queue_destroy(sli4, q); 577 + return -EIO; 578 + } 579 + 580 + int 581 + sli_fc_rq_set_alloc(struct sli4 *sli4, u32 num_rq_pairs, 582 + struct sli4_queue *qs[], u32 base_cq_id, 583 + u32 n_entries, u32 header_buffer_size, 584 + u32 payload_buffer_size) 585 + { 586 + u32 i; 587 + struct efc_dma dma = {0}; 588 + struct sli4_rsp_cmn_create_queue_set *rsp = NULL; 589 + void __iomem *db_regaddr = NULL; 590 + u32 num_rqs = num_rq_pairs * 2; 591 + 592 + for (i = 0; i < num_rqs; i++) { 593 + if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_RQ, 594 + SLI4_RQE_SIZE, n_entries, 595 + SLI_PAGE_SIZE)) { 596 + goto error; 597 + } 598 + } 599 + 600 + if (sli_cmd_rq_create_v2(sli4, num_rqs, qs, base_cq_id, 601 + header_buffer_size, payload_buffer_size, 602 + &dma)) { 603 + goto error; 604 + } 605 + 606 + if (sli_bmbx_command(sli4)) { 607 + efc_log_err(sli4, "bootstrap mailbox write failed RQSet\n"); 608 + goto error; 609 + } 610 + 611 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 612 + db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG; 613 + else 614 + db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG; 615 + 616 + rsp = dma.virt; 617 + if (rsp->hdr.status) { 618 + efc_log_err(sli4, "bad create RQSet status=%#x addl=%#x\n", 619 + rsp->hdr.status, rsp->hdr.additional_status); 620 + goto error; 621 + } 622 + 623 + for (i = 0; i < num_rqs; i++) { 624 + qs[i]->id = i + le16_to_cpu(rsp->q_id); 625 + if ((qs[i]->id & 1) == 0) 626 + qs[i]->u.flag |= SLI4_QUEUE_FLAG_HDR; 627 + else 628 + qs[i]->u.flag &= ~SLI4_QUEUE_FLAG_HDR; 629 + 630 + qs[i]->db_regaddr = db_regaddr; 631 + } 632 + 633 + dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys); 634 + 635 + return 0; 636 + 637 + error: 638 + for (i = 0; i < num_rqs; i++) 639 + __sli_queue_destroy(sli4, qs[i]); 640 + 641 + if (dma.virt) 642 + dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, 643 + dma.phys); 644 + 645 + return -EIO; 646 + } 647 + 648 + static int 649 + sli_res_sli_config(struct sli4 *sli4, void *buf) 650 + { 651 + struct sli4_cmd_sli_config *sli_config = buf; 652 + 653 + /* sanity check */ 654 + if (!buf || sli_config->hdr.command != 655 + SLI4_MBX_CMD_SLI_CONFIG) { 656 + efc_log_err(sli4, "bad parameter buf=%p cmd=%#x\n", buf, 657 + buf ? sli_config->hdr.command : -1); 658 + return -EIO; 659 + } 660 + 661 + if (le16_to_cpu(sli_config->hdr.status)) 662 + return le16_to_cpu(sli_config->hdr.status); 663 + 664 + if (le32_to_cpu(sli_config->dw1_flags) & SLI4_SLICONF_EMB) 665 + return sli_config->payload.embed[4]; 666 + 667 + efc_log_info(sli4, "external buffers not supported\n"); 668 + return -EIO; 669 + } 670 + 671 + int 672 + __sli_create_queue(struct sli4 *sli4, struct sli4_queue *q) 673 + { 674 + struct sli4_rsp_cmn_create_queue *res_q = NULL; 675 + 676 + if (sli_bmbx_command(sli4)) { 677 + efc_log_crit(sli4, "bootstrap mailbox write fail %s\n", 678 + SLI4_QNAME[q->type]); 679 + return -EIO; 680 + } 681 + if (sli_res_sli_config(sli4, sli4->bmbx.virt)) { 682 + efc_log_err(sli4, "bad status create %s\n", 683 + SLI4_QNAME[q->type]); 684 + return -EIO; 685 + } 686 + res_q = (void *)((u8 *)sli4->bmbx.virt + 687 + offsetof(struct sli4_cmd_sli_config, payload)); 688 + 689 + if (res_q->hdr.status) { 690 + efc_log_err(sli4, "bad create %s status=%#x addl=%#x\n", 691 + SLI4_QNAME[q->type], res_q->hdr.status, 692 + res_q->hdr.additional_status); 693 + return -EIO; 694 + } 695 + q->id = le16_to_cpu(res_q->q_id); 696 + switch (q->type) { 697 + case SLI4_QTYPE_EQ: 698 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 699 + q->db_regaddr = sli4->reg[1] + SLI4_IF6_EQ_DB_REG; 700 + else 701 + q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG; 702 + break; 703 + case SLI4_QTYPE_CQ: 704 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 705 + q->db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG; 706 + else 707 + q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG; 708 + break; 709 + case SLI4_QTYPE_MQ: 710 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 711 + q->db_regaddr = sli4->reg[1] + SLI4_IF6_MQ_DB_REG; 712 + else 713 + q->db_regaddr = sli4->reg[0] + SLI4_MQ_DB_REG; 714 + break; 715 + case SLI4_QTYPE_RQ: 716 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 717 + q->db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG; 718 + else 719 + q->db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG; 720 + break; 721 + case SLI4_QTYPE_WQ: 722 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 723 + q->db_regaddr = sli4->reg[1] + SLI4_IF6_WQ_DB_REG; 724 + else 725 + q->db_regaddr = sli4->reg[0] + SLI4_IO_WQ_DB_REG; 726 + break; 727 + default: 728 + break; 729 + } 730 + 731 + return 0; 732 + } 733 + 734 + int 735 + sli_get_queue_entry_size(struct sli4 *sli4, u32 qtype) 736 + { 737 + u32 size = 0; 738 + 739 + switch (qtype) { 740 + case SLI4_QTYPE_EQ: 741 + size = sizeof(u32); 742 + break; 743 + case SLI4_QTYPE_CQ: 744 + size = 16; 745 + break; 746 + case SLI4_QTYPE_MQ: 747 + size = 256; 748 + break; 749 + case SLI4_QTYPE_WQ: 750 + size = sli4->wqe_size; 751 + break; 752 + case SLI4_QTYPE_RQ: 753 + size = SLI4_RQE_SIZE; 754 + break; 755 + default: 756 + efc_log_info(sli4, "unknown queue type %d\n", qtype); 757 + return -1; 758 + } 759 + return size; 760 + } 761 + 762 + int 763 + sli_queue_alloc(struct sli4 *sli4, u32 qtype, 764 + struct sli4_queue *q, u32 n_entries, 765 + struct sli4_queue *assoc) 766 + { 767 + int size; 768 + u32 align = 0; 769 + 770 + /* get queue size */ 771 + size = sli_get_queue_entry_size(sli4, qtype); 772 + if (size < 0) 773 + return -EIO; 774 + align = SLI_PAGE_SIZE; 775 + 776 + if (__sli_queue_init(sli4, q, qtype, size, n_entries, align)) 777 + return -EIO; 778 + 779 + switch (qtype) { 780 + case SLI4_QTYPE_EQ: 781 + if (!sli_cmd_common_create_eq(sli4, sli4->bmbx.virt, &q->dma) && 782 + !__sli_create_queue(sli4, q)) 783 + return 0; 784 + 785 + break; 786 + case SLI4_QTYPE_CQ: 787 + if (!sli_cmd_common_create_cq(sli4, sli4->bmbx.virt, &q->dma, 788 + assoc ? assoc->id : 0) && 789 + !__sli_create_queue(sli4, q)) 790 + return 0; 791 + 792 + break; 793 + case SLI4_QTYPE_MQ: 794 + assoc->u.flag |= SLI4_QUEUE_FLAG_MQ; 795 + if (!sli_cmd_common_create_mq_ext(sli4, sli4->bmbx.virt, 796 + &q->dma, assoc->id) && 797 + !__sli_create_queue(sli4, q)) 798 + return 0; 799 + 800 + break; 801 + case SLI4_QTYPE_WQ: 802 + if (!sli_cmd_wq_create(sli4, sli4->bmbx.virt, &q->dma, 803 + assoc ? assoc->id : 0) && 804 + !__sli_create_queue(sli4, q)) 805 + return 0; 806 + 807 + break; 808 + default: 809 + efc_log_info(sli4, "unknown queue type %d\n", qtype); 810 + } 811 + 812 + __sli_queue_destroy(sli4, q); 813 + return -EIO; 814 + } 815 + 816 + static int sli_cmd_cq_set_create(struct sli4 *sli4, 817 + struct sli4_queue *qs[], u32 num_cqs, 818 + struct sli4_queue *eqs[], 819 + struct efc_dma *dma) 820 + { 821 + struct sli4_rqst_cmn_create_cq_set_v0 *req = NULL; 822 + uintptr_t addr; 823 + u32 i, offset = 0, page_bytes = 0, payload_size; 824 + u32 p = 0, page_size = 0, n_cqe = 0, num_pages_cq; 825 + u32 dw5_flags = 0; 826 + u16 dw6w1_flags = 0; 827 + __le32 req_len; 828 + 829 + n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES; 830 + switch (n_cqe) { 831 + case 256: 832 + case 512: 833 + case 1024: 834 + case 2048: 835 + page_size = 1; 836 + break; 837 + case 4096: 838 + page_size = 2; 839 + break; 840 + default: 841 + return -EIO; 842 + } 843 + 844 + page_bytes = page_size * SLI_PAGE_SIZE; 845 + num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes); 846 + payload_size = max(SLI4_RQST_CMDSZ(cmn_create_cq_set_v0) + 847 + (SZ_DMAADDR * num_pages_cq * num_cqs), 848 + sizeof(struct sli4_rsp_cmn_create_queue_set)); 849 + 850 + dma->size = payload_size; 851 + dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size, 852 + &dma->phys, GFP_DMA); 853 + if (!dma->virt) 854 + return -EIO; 855 + 856 + memset(dma->virt, 0, payload_size); 857 + 858 + req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma); 859 + if (!req) 860 + return -EIO; 861 + 862 + req_len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_set_v0, 863 + SZ_DMAADDR * num_pages_cq * num_cqs); 864 + sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_CREATE_CQ_SET, SLI4_SUBSYSTEM_FC, 865 + CMD_V0, req_len); 866 + req->page_size = page_size; 867 + 868 + req->num_pages = cpu_to_le16(num_pages_cq); 869 + switch (num_pages_cq) { 870 + case 1: 871 + dw5_flags |= SLI4_CQ_CNT_VAL(256); 872 + break; 873 + case 2: 874 + dw5_flags |= SLI4_CQ_CNT_VAL(512); 875 + break; 876 + case 4: 877 + dw5_flags |= SLI4_CQ_CNT_VAL(1024); 878 + break; 879 + case 8: 880 + dw5_flags |= SLI4_CQ_CNT_VAL(LARGE); 881 + dw6w1_flags |= (n_cqe & SLI4_CREATE_CQSETV0_CQE_COUNT); 882 + break; 883 + default: 884 + efc_log_info(sli4, "num_pages %d not valid\n", num_pages_cq); 885 + return -EIO; 886 + } 887 + 888 + dw5_flags |= SLI4_CREATE_CQSETV0_EVT; 889 + dw5_flags |= SLI4_CREATE_CQSETV0_VALID; 890 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 891 + dw5_flags |= SLI4_CREATE_CQSETV0_AUTOVALID; 892 + 893 + dw6w1_flags &= ~SLI4_CREATE_CQSETV0_ARM; 894 + 895 + req->dw5_flags = cpu_to_le32(dw5_flags); 896 + req->dw6w1_flags = cpu_to_le16(dw6w1_flags); 897 + 898 + req->num_cq_req = cpu_to_le16(num_cqs); 899 + 900 + /* Fill page addresses of all the CQs. */ 901 + for (i = 0; i < num_cqs; i++) { 902 + req->eq_id[i] = cpu_to_le16(eqs[i]->id); 903 + for (p = 0, addr = qs[i]->dma.phys; p < num_pages_cq; 904 + p++, addr += page_bytes) { 905 + req->page_phys_addr[offset].low = 906 + cpu_to_le32(lower_32_bits(addr)); 907 + req->page_phys_addr[offset].high = 908 + cpu_to_le32(upper_32_bits(addr)); 909 + offset++; 910 + } 911 + } 912 + 913 + return 0; 914 + } 915 + 916 + int 917 + sli_cq_alloc_set(struct sli4 *sli4, struct sli4_queue *qs[], 918 + u32 num_cqs, u32 n_entries, struct sli4_queue *eqs[]) 919 + { 920 + u32 i; 921 + struct efc_dma dma = {0}; 922 + struct sli4_rsp_cmn_create_queue_set *res; 923 + void __iomem *db_regaddr; 924 + 925 + /* Align the queue DMA memory */ 926 + for (i = 0; i < num_cqs; i++) { 927 + if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_CQ, SLI4_CQE_BYTES, 928 + n_entries, SLI_PAGE_SIZE)) 929 + goto error; 930 + } 931 + 932 + if (sli_cmd_cq_set_create(sli4, qs, num_cqs, eqs, &dma)) 933 + goto error; 934 + 935 + if (sli_bmbx_command(sli4)) 936 + goto error; 937 + 938 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 939 + db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG; 940 + else 941 + db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG; 942 + 943 + res = dma.virt; 944 + if (res->hdr.status) { 945 + efc_log_err(sli4, "bad create CQSet status=%#x addl=%#x\n", 946 + res->hdr.status, res->hdr.additional_status); 947 + goto error; 948 + } 949 + 950 + /* Check if we got all requested CQs. */ 951 + if (le16_to_cpu(res->num_q_allocated) != num_cqs) { 952 + efc_log_crit(sli4, "Requested count CQs doesn't match.\n"); 953 + goto error; 954 + } 955 + /* Fill the resp cq ids. */ 956 + for (i = 0; i < num_cqs; i++) { 957 + qs[i]->id = le16_to_cpu(res->q_id) + i; 958 + qs[i]->db_regaddr = db_regaddr; 959 + } 960 + 961 + dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys); 962 + 963 + return 0; 964 + 965 + error: 966 + for (i = 0; i < num_cqs; i++) 967 + __sli_queue_destroy(sli4, qs[i]); 968 + 969 + if (dma.virt) 970 + dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, 971 + dma.phys); 972 + 973 + return -EIO; 974 + } 975 + 976 + static int 977 + sli_cmd_common_destroy_q(struct sli4 *sli4, u8 opc, u8 subsystem, u16 q_id) 978 + { 979 + struct sli4_rqst_cmn_destroy_q *req; 980 + 981 + /* Payload length must accommodate both request and response */ 982 + req = sli_config_cmd_init(sli4, sli4->bmbx.virt, 983 + SLI4_CFG_PYLD_LENGTH(cmn_destroy_q), NULL); 984 + if (!req) 985 + return -EIO; 986 + 987 + sli_cmd_fill_hdr(&req->hdr, opc, subsystem, 988 + CMD_V0, SLI4_RQST_PYLD_LEN(cmn_destroy_q)); 989 + req->q_id = cpu_to_le16(q_id); 990 + 991 + return 0; 992 + } 993 + 994 + int 995 + sli_queue_free(struct sli4 *sli4, struct sli4_queue *q, 996 + u32 destroy_queues, u32 free_memory) 997 + { 998 + int rc = 0; 999 + u8 opcode, subsystem; 1000 + struct sli4_rsp_hdr *res; 1001 + 1002 + if (!q) { 1003 + efc_log_err(sli4, "bad parameter sli4=%p q=%p\n", sli4, q); 1004 + return -EIO; 1005 + } 1006 + 1007 + if (!destroy_queues) 1008 + goto free_mem; 1009 + 1010 + switch (q->type) { 1011 + case SLI4_QTYPE_EQ: 1012 + opcode = SLI4_CMN_DESTROY_EQ; 1013 + subsystem = SLI4_SUBSYSTEM_COMMON; 1014 + break; 1015 + case SLI4_QTYPE_CQ: 1016 + opcode = SLI4_CMN_DESTROY_CQ; 1017 + subsystem = SLI4_SUBSYSTEM_COMMON; 1018 + break; 1019 + case SLI4_QTYPE_MQ: 1020 + opcode = SLI4_CMN_DESTROY_MQ; 1021 + subsystem = SLI4_SUBSYSTEM_COMMON; 1022 + break; 1023 + case SLI4_QTYPE_WQ: 1024 + opcode = SLI4_OPC_WQ_DESTROY; 1025 + subsystem = SLI4_SUBSYSTEM_FC; 1026 + break; 1027 + case SLI4_QTYPE_RQ: 1028 + opcode = SLI4_OPC_RQ_DESTROY; 1029 + subsystem = SLI4_SUBSYSTEM_FC; 1030 + break; 1031 + default: 1032 + efc_log_info(sli4, "bad queue type %d\n", q->type); 1033 + rc = -EIO; 1034 + goto free_mem; 1035 + } 1036 + 1037 + rc = sli_cmd_common_destroy_q(sli4, opcode, subsystem, q->id); 1038 + if (rc) 1039 + goto free_mem; 1040 + 1041 + rc = sli_bmbx_command(sli4); 1042 + if (rc) 1043 + goto free_mem; 1044 + 1045 + rc = sli_res_sli_config(sli4, sli4->bmbx.virt); 1046 + if (rc) 1047 + goto free_mem; 1048 + 1049 + res = (void *)((u8 *)sli4->bmbx.virt + 1050 + offsetof(struct sli4_cmd_sli_config, payload)); 1051 + if (res->status) { 1052 + efc_log_err(sli4, "destroy %s st=%#x addl=%#x\n", 1053 + SLI4_QNAME[q->type], res->status, 1054 + res->additional_status); 1055 + rc = -EIO; 1056 + goto free_mem; 1057 + } 1058 + 1059 + free_mem: 1060 + if (free_memory) 1061 + __sli_queue_destroy(sli4, q); 1062 + 1063 + return rc; 1064 + } 1065 + 1066 + int 1067 + sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm) 1068 + { 1069 + u32 val; 1070 + unsigned long flags = 0; 1071 + u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM; 1072 + 1073 + spin_lock_irqsave(&q->lock, flags); 1074 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 1075 + val = sli_format_if6_eq_db_data(q->n_posted, q->id, a); 1076 + else 1077 + val = sli_format_eq_db_data(q->n_posted, q->id, a); 1078 + 1079 + writel(val, q->db_regaddr); 1080 + q->n_posted = 0; 1081 + spin_unlock_irqrestore(&q->lock, flags); 1082 + 1083 + return 0; 1084 + } 1085 + 1086 + int 1087 + sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm) 1088 + { 1089 + u32 val = 0; 1090 + unsigned long flags = 0; 1091 + u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM; 1092 + 1093 + spin_lock_irqsave(&q->lock, flags); 1094 + 1095 + switch (q->type) { 1096 + case SLI4_QTYPE_EQ: 1097 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 1098 + val = sli_format_if6_eq_db_data(q->n_posted, q->id, a); 1099 + else 1100 + val = sli_format_eq_db_data(q->n_posted, q->id, a); 1101 + 1102 + writel(val, q->db_regaddr); 1103 + q->n_posted = 0; 1104 + break; 1105 + case SLI4_QTYPE_CQ: 1106 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 1107 + val = sli_format_if6_cq_db_data(q->n_posted, q->id, a); 1108 + else 1109 + val = sli_format_cq_db_data(q->n_posted, q->id, a); 1110 + 1111 + writel(val, q->db_regaddr); 1112 + q->n_posted = 0; 1113 + break; 1114 + default: 1115 + efc_log_info(sli4, "should only be used for EQ/CQ, not %s\n", 1116 + SLI4_QNAME[q->type]); 1117 + } 1118 + 1119 + spin_unlock_irqrestore(&q->lock, flags); 1120 + 1121 + return 0; 1122 + } 1123 + 1124 + int 1125 + sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) 1126 + { 1127 + u8 *qe = q->dma.virt; 1128 + u32 qindex; 1129 + u32 val = 0; 1130 + 1131 + qindex = q->index; 1132 + qe += q->index * q->size; 1133 + 1134 + if (sli4->params.perf_wq_id_association) 1135 + sli_set_wq_id_association(entry, q->id); 1136 + 1137 + memcpy(qe, entry, q->size); 1138 + val = sli_format_wq_db_data(q->id); 1139 + 1140 + writel(val, q->db_regaddr); 1141 + q->index = (q->index + 1) & (q->length - 1); 1142 + 1143 + return qindex; 1144 + } 1145 + 1146 + int 1147 + sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) 1148 + { 1149 + u8 *qe = q->dma.virt; 1150 + u32 qindex; 1151 + u32 val = 0; 1152 + unsigned long flags; 1153 + 1154 + spin_lock_irqsave(&q->lock, flags); 1155 + qindex = q->index; 1156 + qe += q->index * q->size; 1157 + 1158 + memcpy(qe, entry, q->size); 1159 + val = sli_format_mq_db_data(q->id); 1160 + writel(val, q->db_regaddr); 1161 + q->index = (q->index + 1) & (q->length - 1); 1162 + spin_unlock_irqrestore(&q->lock, flags); 1163 + 1164 + return qindex; 1165 + } 1166 + 1167 + int 1168 + sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) 1169 + { 1170 + u8 *qe = q->dma.virt; 1171 + u32 qindex; 1172 + u32 val = 0; 1173 + 1174 + qindex = q->index; 1175 + qe += q->index * q->size; 1176 + 1177 + memcpy(qe, entry, q->size); 1178 + 1179 + /* 1180 + * In RQ-pair, an RQ either contains the FC header 1181 + * (i.e. is_hdr == TRUE) or the payload. 1182 + * 1183 + * Don't ring doorbell for payload RQ 1184 + */ 1185 + if (!(q->u.flag & SLI4_QUEUE_FLAG_HDR)) 1186 + goto skip; 1187 + 1188 + val = sli_format_rq_db_data(q->id); 1189 + writel(val, q->db_regaddr); 1190 + skip: 1191 + q->index = (q->index + 1) & (q->length - 1); 1192 + 1193 + return qindex; 1194 + } 1195 + 1196 + int 1197 + sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) 1198 + { 1199 + u8 *qe = q->dma.virt; 1200 + unsigned long flags = 0; 1201 + u16 wflags = 0; 1202 + 1203 + spin_lock_irqsave(&q->lock, flags); 1204 + 1205 + qe += q->index * q->size; 1206 + 1207 + /* Check if eqe is valid */ 1208 + wflags = le16_to_cpu(((struct sli4_eqe *)qe)->dw0w0_flags); 1209 + 1210 + if ((wflags & SLI4_EQE_VALID) != q->phase) { 1211 + spin_unlock_irqrestore(&q->lock, flags); 1212 + return -EIO; 1213 + } 1214 + 1215 + if (sli4->if_type != SLI4_INTF_IF_TYPE_6) { 1216 + wflags &= ~SLI4_EQE_VALID; 1217 + ((struct sli4_eqe *)qe)->dw0w0_flags = cpu_to_le16(wflags); 1218 + } 1219 + 1220 + memcpy(entry, qe, q->size); 1221 + q->index = (q->index + 1) & (q->length - 1); 1222 + q->n_posted++; 1223 + /* 1224 + * For prism, the phase value will be used 1225 + * to check the validity of eq/cq entries. 1226 + * The value toggles after a complete sweep 1227 + * through the queue. 1228 + */ 1229 + 1230 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0) 1231 + q->phase ^= (u16)0x1; 1232 + 1233 + spin_unlock_irqrestore(&q->lock, flags); 1234 + 1235 + return 0; 1236 + } 1237 + 1238 + int 1239 + sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) 1240 + { 1241 + u8 *qe = q->dma.virt; 1242 + unsigned long flags = 0; 1243 + u32 dwflags = 0; 1244 + bool valid_bit_set; 1245 + 1246 + spin_lock_irqsave(&q->lock, flags); 1247 + 1248 + qe += q->index * q->size; 1249 + 1250 + /* Check if cqe is valid */ 1251 + dwflags = le32_to_cpu(((struct sli4_mcqe *)qe)->dw3_flags); 1252 + valid_bit_set = (dwflags & SLI4_MCQE_VALID) != 0; 1253 + 1254 + if (valid_bit_set != q->phase) { 1255 + spin_unlock_irqrestore(&q->lock, flags); 1256 + return -EIO; 1257 + } 1258 + 1259 + if (sli4->if_type != SLI4_INTF_IF_TYPE_6) { 1260 + dwflags &= ~SLI4_MCQE_VALID; 1261 + ((struct sli4_mcqe *)qe)->dw3_flags = cpu_to_le32(dwflags); 1262 + } 1263 + 1264 + memcpy(entry, qe, q->size); 1265 + q->index = (q->index + 1) & (q->length - 1); 1266 + q->n_posted++; 1267 + /* 1268 + * For prism, the phase value will be used 1269 + * to check the validity of eq/cq entries. 1270 + * The value toggles after a complete sweep 1271 + * through the queue. 1272 + */ 1273 + 1274 + if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0) 1275 + q->phase ^= (u16)0x1; 1276 + 1277 + spin_unlock_irqrestore(&q->lock, flags); 1278 + 1279 + return 0; 1280 + } 1281 + 1282 + int 1283 + sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) 1284 + { 1285 + u8 *qe = q->dma.virt; 1286 + unsigned long flags = 0; 1287 + 1288 + spin_lock_irqsave(&q->lock, flags); 1289 + 1290 + qe += q->u.r_idx * q->size; 1291 + 1292 + /* Check if mqe is valid */ 1293 + if (q->index == q->u.r_idx) { 1294 + spin_unlock_irqrestore(&q->lock, flags); 1295 + return -EIO; 1296 + } 1297 + 1298 + memcpy(entry, qe, q->size); 1299 + q->u.r_idx = (q->u.r_idx + 1) & (q->length - 1); 1300 + 1301 + spin_unlock_irqrestore(&q->lock, flags); 1302 + 1303 + return 0; 1304 + } 1305 + 1306 + int 1307 + sli_eq_parse(struct sli4 *sli4, u8 *buf, u16 *cq_id) 1308 + { 1309 + struct sli4_eqe *eqe = (void *)buf; 1310 + int rc = 0; 1311 + u16 flags = 0; 1312 + u16 majorcode; 1313 + u16 minorcode; 1314 + 1315 + if (!buf || !cq_id) { 1316 + efc_log_err(sli4, "bad parameters sli4=%p buf=%p cq_id=%p\n", 1317 + sli4, buf, cq_id); 1318 + return -EIO; 1319 + } 1320 + 1321 + flags = le16_to_cpu(eqe->dw0w0_flags); 1322 + majorcode = (flags & SLI4_EQE_MJCODE) >> 1; 1323 + minorcode = (flags & SLI4_EQE_MNCODE) >> 4; 1324 + switch (majorcode) { 1325 + case SLI4_MAJOR_CODE_STANDARD: 1326 + *cq_id = le16_to_cpu(eqe->resource_id); 1327 + break; 1328 + case SLI4_MAJOR_CODE_SENTINEL: 1329 + efc_log_info(sli4, "sentinel EQE\n"); 1330 + rc = SLI4_EQE_STATUS_EQ_FULL; 1331 + break; 1332 + default: 1333 + efc_log_info(sli4, "Unsupported EQE: major %x minor %x\n", 1334 + majorcode, minorcode); 1335 + rc = -EIO; 1336 + } 1337 + 1338 + return rc; 1339 + } 1340 + 1341 + int 1342 + sli_cq_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe, 1343 + enum sli4_qentry *etype, u16 *q_id) 1344 + { 1345 + int rc = 0; 1346 + 1347 + if (!cq || !cqe || !etype) { 1348 + efc_log_err(sli4, "bad params sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n", 1349 + sli4, cq, cqe, etype, q_id); 1350 + return -EINVAL; 1351 + } 1352 + 1353 + /* Parse a CQ entry to retrieve the event type and the queue id */ 1354 + if (cq->u.flag & SLI4_QUEUE_FLAG_MQ) { 1355 + struct sli4_mcqe *mcqe = (void *)cqe; 1356 + 1357 + if (le32_to_cpu(mcqe->dw3_flags) & SLI4_MCQE_AE) { 1358 + *etype = SLI4_QENTRY_ASYNC; 1359 + } else { 1360 + *etype = SLI4_QENTRY_MQ; 1361 + rc = sli_cqe_mq(sli4, mcqe); 1362 + } 1363 + *q_id = -1; 1364 + } else { 1365 + rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id); 1366 + } 1367 + 1368 + return rc; 1369 + }
+9
drivers/scsi/elx/libefc_sli/sli4.h
··· 3718 3718 struct efc_dma vpd_data; 3719 3719 }; 3720 3720 3721 + static inline void 3722 + sli_cmd_fill_hdr(struct sli4_rqst_hdr *hdr, u8 opc, u8 sub, u32 ver, __le32 len) 3723 + { 3724 + hdr->opcode = opc; 3725 + hdr->subsystem = sub; 3726 + hdr->dw3_version = cpu_to_le32(ver); 3727 + hdr->request_length = len; 3728 + } 3729 + 3721 3730 #endif /* !_SLI4_H */