Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SCSI] lpfc 8.3.33: Tie parallel I/O queues into separate MSIX vectors

Add fcp_io_channel module attribute to control amount of parallel I/O queues

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>

authored by

James Smart and committed by
James Bottomley
67d12733 aa6fbb75

+453 -686
+1
drivers/scsi/lpfc/lpfc.h
··· 695 695 uint32_t cfg_fcp_imax; 696 696 uint32_t cfg_fcp_wq_count; 697 697 uint32_t cfg_fcp_eq_count; 698 + uint32_t cfg_fcp_io_channel; 698 699 uint32_t cfg_sg_seg_cnt; 699 700 uint32_t cfg_prot_sg_seg_cnt; 700 701 uint32_t cfg_sg_dma_buf_size;
+20 -6
drivers/scsi/lpfc/lpfc_attr.c
··· 3654 3654 return -EINVAL; 3655 3655 3656 3656 phba->cfg_fcp_imax = (uint32_t)val; 3657 - for (i = 0; i < phba->cfg_fcp_eq_count; i += LPFC_MAX_EQ_DELAY) 3657 + for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY) 3658 3658 lpfc_modify_fcp_eq_delay(phba, i); 3659 3659 3660 3660 return strlen(buf); ··· 3844 3844 3845 3845 /* 3846 3846 # lpfc_fcp_wq_count: Set the number of fast-path FCP work queues 3847 + # This parameter is ignored and will eventually be depricated 3847 3848 # 3848 - # Value range is [1,31]. Default value is 4. 3849 + # Value range is [1,7]. Default value is 4. 3849 3850 */ 3850 - LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX, 3851 + LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN, 3852 + LPFC_FCP_IO_CHAN_MAX, 3851 3853 "Set the number of fast-path FCP work queues, if possible"); 3852 3854 3853 3855 /* 3854 - # lpfc_fcp_eq_count: Set the number of fast-path FCP event queues 3856 + # lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels 3855 3857 # 3856 - # Value range is [1,7]. Default value is 1. 3858 + # Value range is [1,7]. Default value is 4. 3857 3859 */ 3858 - LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX, 3860 + LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN, 3861 + LPFC_FCP_IO_CHAN_MAX, 3859 3862 "Set the number of fast-path FCP event queues, if possible"); 3863 + 3864 + /* 3865 + # lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels 3866 + # 3867 + # Value range is [1,7]. Default value is 4. 3868 + */ 3869 + LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN, 3870 + LPFC_FCP_IO_CHAN_MAX, 3871 + "Set the number of FCP I/O channels"); 3860 3872 3861 3873 /* 3862 3874 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. ··· 4014 4002 &dev_attr_lpfc_fcp_imax, 4015 4003 &dev_attr_lpfc_fcp_wq_count, 4016 4004 &dev_attr_lpfc_fcp_eq_count, 4005 + &dev_attr_lpfc_fcp_io_channel, 4017 4006 &dev_attr_lpfc_enable_bg, 4018 4007 &dev_attr_lpfc_soft_wwnn, 4019 4008 &dev_attr_lpfc_soft_wwpn, ··· 4993 4980 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 4994 4981 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); 4995 4982 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); 4983 + lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); 4996 4984 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4997 4985 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4998 4986 lpfc_enable_bg_init(phba, lpfc_enable_bg);
+1 -2
drivers/scsi/lpfc/lpfc_crtn.h
··· 196 196 irqreturn_t lpfc_sli_sp_intr_handler(int, void *); 197 197 irqreturn_t lpfc_sli_fp_intr_handler(int, void *); 198 198 irqreturn_t lpfc_sli4_intr_handler(int, void *); 199 - irqreturn_t lpfc_sli4_sp_intr_handler(int, void *); 200 - irqreturn_t lpfc_sli4_fp_intr_handler(int, void *); 199 + irqreturn_t lpfc_sli4_hba_intr_handler(int, void *); 201 200 202 201 void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); 203 202 void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
+26 -52
drivers/scsi/lpfc/lpfc_debugfs.c
··· 2013 2013 if (*ppos) 2014 2014 return 0; 2015 2015 2016 - /* Get slow-path event queue information */ 2017 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2018 - "Slow-path EQ information:\n"); 2019 - if (phba->sli4_hba.sp_eq) { 2020 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2021 - "\tEQID[%02d], " 2022 - "QE-COUNT[%04d], QE-SIZE[%04d], " 2023 - "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2024 - phba->sli4_hba.sp_eq->queue_id, 2025 - phba->sli4_hba.sp_eq->entry_count, 2026 - phba->sli4_hba.sp_eq->entry_size, 2027 - phba->sli4_hba.sp_eq->host_index, 2028 - phba->sli4_hba.sp_eq->hba_index); 2029 - } 2030 - 2031 2016 /* Get fast-path event queue information */ 2032 2017 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2033 - "Fast-path EQ information:\n"); 2034 - if (phba->sli4_hba.fp_eq) { 2035 - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 2018 + "HBA EQ information:\n"); 2019 + if (phba->sli4_hba.hba_eq) { 2020 + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 2036 2021 fcp_qidx++) { 2037 - if (phba->sli4_hba.fp_eq[fcp_qidx]) { 2022 + if (phba->sli4_hba.hba_eq[fcp_qidx]) { 2038 2023 len += snprintf(pbuffer+len, 2039 2024 LPFC_QUE_INFO_GET_BUF_SIZE-len, 2040 2025 "\tEQID[%02d], " 2041 2026 "QE-COUNT[%04d], QE-SIZE[%04d], " 2042 2027 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2043 - phba->sli4_hba.fp_eq[fcp_qidx]->queue_id, 2044 - phba->sli4_hba.fp_eq[fcp_qidx]->entry_count, 2045 - phba->sli4_hba.fp_eq[fcp_qidx]->entry_size, 2046 - phba->sli4_hba.fp_eq[fcp_qidx]->host_index, 2047 - phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); 2028 + phba->sli4_hba.hba_eq[fcp_qidx]->queue_id, 2029 + phba->sli4_hba.hba_eq[fcp_qidx]->entry_count, 2030 + phba->sli4_hba.hba_eq[fcp_qidx]->entry_size, 2031 + phba->sli4_hba.hba_eq[fcp_qidx]->host_index, 2032 + phba->sli4_hba.hba_eq[fcp_qidx]->hba_index); 2048 2033 } 2049 2034 } 2050 2035 } ··· 2093 2108 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, 2094 2109 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); 2095 2110 } 2096 - } while (++fcp_qidx < phba->cfg_fcp_eq_count); 2111 + } while (++fcp_qidx < phba->cfg_fcp_io_channel); 2097 2112 len += snprintf(pbuffer+len, 2098 2113 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 2099 2114 } ··· 2138 2153 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2139 2154 "Fast-path FCP WQ information:\n"); 2140 2155 if (phba->sli4_hba.fcp_wq) { 2141 - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; 2156 + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 2142 2157 fcp_qidx++) { 2143 2158 if (!phba->sli4_hba.fcp_wq[fcp_qidx]) 2144 2159 continue; ··· 2395 2410 2396 2411 switch (quetp) { 2397 2412 case LPFC_IDIAG_EQ: 2398 - /* Slow-path event queue */ 2399 - if (phba->sli4_hba.sp_eq && 2400 - phba->sli4_hba.sp_eq->queue_id == queid) { 2401 - /* Sanity check */ 2402 - rc = lpfc_idiag_que_param_check( 2403 - phba->sli4_hba.sp_eq, index, count); 2404 - if (rc) 2405 - goto error_out; 2406 - idiag.ptr_private = phba->sli4_hba.sp_eq; 2407 - goto pass_check; 2408 - } 2409 - /* Fast-path event queue */ 2410 - if (phba->sli4_hba.fp_eq) { 2411 - for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) { 2412 - if (phba->sli4_hba.fp_eq[qidx] && 2413 - phba->sli4_hba.fp_eq[qidx]->queue_id == 2413 + /* HBA event queue */ 2414 + if (phba->sli4_hba.hba_eq) { 2415 + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; 2416 + qidx++) { 2417 + if (phba->sli4_hba.hba_eq[qidx] && 2418 + phba->sli4_hba.hba_eq[qidx]->queue_id == 2414 2419 queid) { 2415 2420 /* Sanity check */ 2416 2421 rc = lpfc_idiag_que_param_check( 2417 - phba->sli4_hba.fp_eq[qidx], 2422 + phba->sli4_hba.hba_eq[qidx], 2418 2423 index, count); 2419 2424 if (rc) 2420 2425 goto error_out; 2421 2426 idiag.ptr_private = 2422 - phba->sli4_hba.fp_eq[qidx]; 2427 + phba->sli4_hba.hba_eq[qidx]; 2423 2428 goto pass_check; 2424 2429 } 2425 2430 } ··· 2456 2481 phba->sli4_hba.fcp_cq[qidx]; 2457 2482 goto pass_check; 2458 2483 } 2459 - } while (++qidx < phba->cfg_fcp_eq_count); 2484 + } while (++qidx < phba->cfg_fcp_io_channel); 2460 2485 } 2461 2486 goto error_out; 2462 2487 break; ··· 2488 2513 } 2489 2514 /* FCP work queue */ 2490 2515 if (phba->sli4_hba.fcp_wq) { 2491 - for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) { 2516 + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; 2517 + qidx++) { 2492 2518 if (!phba->sli4_hba.fcp_wq[qidx]) 2493 2519 continue; 2494 2520 if (phba->sli4_hba.fcp_wq[qidx]->queue_id == ··· 4468 4492 lpfc_debug_dump_mbx_wq(phba); 4469 4493 lpfc_debug_dump_els_wq(phba); 4470 4494 4471 - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) 4495 + for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) 4472 4496 lpfc_debug_dump_fcp_wq(phba, fcp_wqidx); 4473 4497 4474 4498 lpfc_debug_dump_hdr_rq(phba); ··· 4479 4503 lpfc_debug_dump_mbx_cq(phba); 4480 4504 lpfc_debug_dump_els_cq(phba); 4481 4505 4482 - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) 4506 + for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) 4483 4507 lpfc_debug_dump_fcp_cq(phba, fcp_wqidx); 4484 4508 4485 4509 /* 4486 4510 * Dump Event Queues (EQs) 4487 4511 */ 4488 - lpfc_debug_dump_sp_eq(phba); 4489 - 4490 - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) 4491 - lpfc_debug_dump_fcp_eq(phba, fcp_wqidx); 4512 + for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) 4513 + lpfc_debug_dump_hba_eq(phba, fcp_wqidx); 4492 4514 }
+20 -49
drivers/scsi/lpfc/lpfc_debugfs.h
··· 369 369 lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx) 370 370 { 371 371 /* sanity check */ 372 - if (fcp_wqidx >= phba->cfg_fcp_wq_count) 372 + if (fcp_wqidx >= phba->cfg_fcp_io_channel) 373 373 return; 374 374 375 375 printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n", ··· 391 391 int fcp_cqidx, fcp_cqid; 392 392 393 393 /* sanity check */ 394 - if (fcp_wqidx >= phba->cfg_fcp_wq_count) 394 + if (fcp_wqidx >= phba->cfg_fcp_io_channel) 395 395 return; 396 396 397 397 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; 398 - for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) 398 + for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) 399 399 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) 400 400 break; 401 401 if (phba->intr_type == MSIX) { 402 - if (fcp_cqidx >= phba->cfg_fcp_eq_count) 402 + if (fcp_cqidx >= phba->cfg_fcp_io_channel) 403 403 return; 404 404 } else { 405 405 if (fcp_cqidx > 0) ··· 413 413 } 414 414 415 415 /** 416 - * lpfc_debug_dump_fcp_eq - dump all entries from a fcp work queue's evt queue 416 + * lpfc_debug_dump_hba_eq - dump all entries from a fcp work queue's evt queue 417 417 * @phba: Pointer to HBA context object. 418 418 * @fcp_wqidx: Index to a FCP work queue. 419 419 * ··· 421 421 * associated to the FCP work queue specified by the @fcp_wqidx. 422 422 **/ 423 423 static inline void 424 - lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx) 424 + lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int fcp_wqidx) 425 425 { 426 426 struct lpfc_queue *qdesc; 427 427 int fcp_eqidx, fcp_eqid; 428 428 int fcp_cqidx, fcp_cqid; 429 429 430 430 /* sanity check */ 431 - if (fcp_wqidx >= phba->cfg_fcp_wq_count) 431 + if (fcp_wqidx >= phba->cfg_fcp_io_channel) 432 432 return; 433 433 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; 434 - for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) 434 + for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) 435 435 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) 436 436 break; 437 437 if (phba->intr_type == MSIX) { 438 - if (fcp_cqidx >= phba->cfg_fcp_eq_count) 438 + if (fcp_cqidx >= phba->cfg_fcp_io_channel) 439 439 return; 440 440 } else { 441 441 if (fcp_cqidx > 0) 442 442 return; 443 443 } 444 444 445 - if (phba->cfg_fcp_eq_count == 0) { 446 - fcp_eqidx = -1; 447 - fcp_eqid = phba->sli4_hba.sp_eq->queue_id; 448 - qdesc = phba->sli4_hba.sp_eq; 449 - } else { 450 - fcp_eqidx = fcp_cqidx; 451 - fcp_eqid = phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id; 452 - qdesc = phba->sli4_hba.fp_eq[fcp_eqidx]; 453 - } 445 + fcp_eqidx = fcp_cqidx; 446 + fcp_eqid = phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id; 447 + qdesc = phba->sli4_hba.hba_eq[fcp_eqidx]; 454 448 455 449 printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->" 456 450 "EQ[Idx:%d|Qid:%d]\n", ··· 540 546 } 541 547 542 548 /** 543 - * lpfc_debug_dump_sp_eq - dump all entries from slow-path event queue 544 - * @phba: Pointer to HBA context object. 545 - * 546 - * This function dumps all entries from the slow-path event queue. 547 - **/ 548 - static inline void 549 - lpfc_debug_dump_sp_eq(struct lpfc_hba *phba) 550 - { 551 - printk(KERN_ERR "SP EQ: WQ[Qid:%d/Qid:%d]->CQ[Qid:%d/Qid:%d]->" 552 - "EQ[Qid:%d]:\n", 553 - phba->sli4_hba.mbx_wq->queue_id, 554 - phba->sli4_hba.els_wq->queue_id, 555 - phba->sli4_hba.mbx_cq->queue_id, 556 - phba->sli4_hba.els_cq->queue_id, 557 - phba->sli4_hba.sp_eq->queue_id); 558 - lpfc_debug_dump_q(phba->sli4_hba.sp_eq); 559 - } 560 - 561 - /** 562 549 * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id 563 550 * @phba: Pointer to HBA context object. 564 551 * @qid: Work queue identifier. ··· 552 577 { 553 578 int wq_idx; 554 579 555 - for (wq_idx = 0; wq_idx < phba->cfg_fcp_wq_count; wq_idx++) 580 + for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++) 556 581 if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid) 557 582 break; 558 - if (wq_idx < phba->cfg_fcp_wq_count) { 583 + if (wq_idx < phba->cfg_fcp_io_channel) { 559 584 printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); 560 585 lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]); 561 586 return; ··· 622 647 do { 623 648 if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid) 624 649 break; 625 - } while (++cq_idx < phba->cfg_fcp_eq_count); 650 + } while (++cq_idx < phba->cfg_fcp_io_channel); 626 651 627 - if (cq_idx < phba->cfg_fcp_eq_count) { 652 + if (cq_idx < phba->cfg_fcp_io_channel) { 628 653 printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); 629 654 lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]); 630 655 return; ··· 655 680 { 656 681 int eq_idx; 657 682 658 - for (eq_idx = 0; eq_idx < phba->cfg_fcp_eq_count; eq_idx++) { 659 - if (phba->sli4_hba.fp_eq[eq_idx]->queue_id == qid) 683 + for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) { 684 + if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid) 660 685 break; 661 686 } 662 687 663 - if (eq_idx < phba->cfg_fcp_eq_count) { 688 + if (eq_idx < phba->cfg_fcp_io_channel) { 664 689 printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid); 665 - lpfc_debug_dump_q(phba->sli4_hba.fp_eq[eq_idx]); 690 + lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]); 666 691 return; 667 692 } 668 693 669 - if (phba->sli4_hba.sp_eq->queue_id == qid) { 670 - printk(KERN_ERR "SP EQ[|Qid:%d]\n", qid); 671 - lpfc_debug_dump_q(phba->sli4_hba.sp_eq); 672 - } 673 694 } 674 695 675 696 void lpfc_debug_dump_all_queues(struct lpfc_hba *);
+328 -422
drivers/scsi/lpfc/lpfc_init.c
··· 4702 4702 /* Get all the module params for configuring this host */ 4703 4703 lpfc_get_cfgparam(phba); 4704 4704 phba->max_vpi = LPFC_MAX_VPI; 4705 + 4706 + /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */ 4707 + phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count; 4708 + 4705 4709 /* This will be set to correct value after the read_config mbox */ 4706 4710 phba->max_vports = 0; 4707 4711 ··· 4726 4722 */ 4727 4723 if (!phba->sli.ring) 4728 4724 phba->sli.ring = kzalloc( 4729 - (LPFC_SLI3_MAX_RING + phba->cfg_fcp_eq_count) * 4725 + (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) * 4730 4726 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 4731 4727 if (!phba->sli.ring) 4732 4728 return -ENOMEM; ··· 4935 4931 goto out_remove_rpi_hdrs; 4936 4932 } 4937 4933 4938 - /* 4939 - * The cfg_fcp_eq_count can be zero whenever there is exactly one 4940 - * interrupt vector. This is not an error 4941 - */ 4942 - if (phba->cfg_fcp_eq_count) { 4943 - phba->sli4_hba.fcp_eq_hdl = 4944 - kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4945 - phba->cfg_fcp_eq_count), GFP_KERNEL); 4946 - if (!phba->sli4_hba.fcp_eq_hdl) { 4947 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4948 - "2572 Failed allocate memory for " 4949 - "fast-path per-EQ handle array\n"); 4950 - rc = -ENOMEM; 4951 - goto out_free_fcf_rr_bmask; 4952 - } 4934 + phba->sli4_hba.fcp_eq_hdl = 4935 + kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4936 + phba->cfg_fcp_io_channel), GFP_KERNEL); 4937 + if (!phba->sli4_hba.fcp_eq_hdl) { 4938 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4939 + "2572 Failed allocate memory for " 4940 + "fast-path per-EQ handle array\n"); 4941 + rc = -ENOMEM; 4942 + goto out_free_fcf_rr_bmask; 4953 4943 } 4954 4944 4955 4945 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * ··· 6536 6538 static int 6537 6539 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 6538 6540 { 6539 - int cfg_fcp_wq_count; 6540 - int cfg_fcp_eq_count; 6541 + int cfg_fcp_io_channel; 6541 6542 6542 6543 /* 6543 - * Sanity check for confiugred queue parameters against the run-time 6544 + * Sanity check for configured queue parameters against the run-time 6544 6545 * device parameters 6545 6546 */ 6546 6547 6547 - /* Sanity check on FCP fast-path WQ parameters */ 6548 - cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 6549 - if (cfg_fcp_wq_count > 6550 - (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 6551 - cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 6552 - LPFC_SP_WQN_DEF; 6553 - if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 6554 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6555 - "2581 Not enough WQs (%d) from " 6556 - "the pci function for supporting " 6557 - "FCP WQs (%d)\n", 6558 - phba->sli4_hba.max_cfg_param.max_wq, 6559 - phba->cfg_fcp_wq_count); 6560 - goto out_error; 6561 - } 6562 - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6563 - "2582 Not enough WQs (%d) from the pci " 6564 - "function for supporting the requested " 6565 - "FCP WQs (%d), the actual FCP WQs can " 6566 - "be supported: %d\n", 6567 - phba->sli4_hba.max_cfg_param.max_wq, 6568 - phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 6569 - } 6570 - /* The actual number of FCP work queues adopted */ 6571 - phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 6548 + /* Sanity check on HBA EQ parameters */ 6549 + cfg_fcp_io_channel = phba->cfg_fcp_io_channel; 6572 6550 6573 - /* Sanity check on FCP fast-path EQ parameters */ 6574 - cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 6575 - if (cfg_fcp_eq_count > 6576 - (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 6577 - cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 6578 - LPFC_SP_EQN_DEF; 6579 - if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 6551 + if (cfg_fcp_io_channel > 6552 + phba->sli4_hba.max_cfg_param.max_eq) { 6553 + cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq; 6554 + if (cfg_fcp_io_channel < LPFC_FCP_IO_CHAN_MIN) { 6580 6555 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6581 6556 "2574 Not enough EQs (%d) from the " 6582 6557 "pci function for supporting FCP " 6583 6558 "EQs (%d)\n", 6584 6559 phba->sli4_hba.max_cfg_param.max_eq, 6585 - phba->cfg_fcp_eq_count); 6560 + phba->cfg_fcp_io_channel); 6586 6561 goto out_error; 6587 6562 } 6588 6563 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, ··· 6564 6593 "FCP EQs (%d), the actual FCP EQs can " 6565 6594 "be supported: %d\n", 6566 6595 phba->sli4_hba.max_cfg_param.max_eq, 6567 - phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 6596 + phba->cfg_fcp_io_channel, cfg_fcp_io_channel); 6568 6597 } 6569 - /* It does not make sense to have more EQs than WQs */ 6570 - if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 6571 - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6572 - "2593 The FCP EQ count(%d) cannot be greater " 6573 - "than the FCP WQ count(%d), limiting the " 6574 - "FCP EQ count to %d\n", cfg_fcp_eq_count, 6575 - phba->cfg_fcp_wq_count, 6576 - phba->cfg_fcp_wq_count); 6577 - cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 6578 - } 6598 + 6599 + /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */ 6600 + 6579 6601 /* The actual number of FCP event queues adopted */ 6580 - phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 6581 - /* The overall number of event queues used */ 6582 - phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 6602 + phba->cfg_fcp_eq_count = cfg_fcp_io_channel; 6603 + phba->cfg_fcp_wq_count = cfg_fcp_io_channel; 6604 + phba->cfg_fcp_io_channel = cfg_fcp_io_channel; 6605 + phba->sli4_hba.cfg_eqn = cfg_fcp_io_channel; 6583 6606 6584 6607 /* Get EQ depth from module parameter, fake the default for now */ 6585 6608 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; ··· 6606 6641 lpfc_sli4_queue_create(struct lpfc_hba *phba) 6607 6642 { 6608 6643 struct lpfc_queue *qdesc; 6609 - int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6644 + int idx; 6610 6645 6611 6646 /* 6612 - * Create Event Queues (EQs) 6647 + * Create HBA Record arrays. 6613 6648 */ 6649 + if (!phba->cfg_fcp_io_channel) 6650 + return -ERANGE; 6614 6651 6615 - /* Create slow path event queue */ 6616 - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6617 - phba->sli4_hba.eq_ecount); 6618 - if (!qdesc) { 6652 + phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 6653 + phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 6654 + phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 6655 + phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 6656 + phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 6657 + phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 6658 + 6659 + phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) * 6660 + phba->cfg_fcp_io_channel), GFP_KERNEL); 6661 + if (!phba->sli4_hba.hba_eq) { 6619 6662 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6620 - "0496 Failed allocate slow-path EQ\n"); 6663 + "2576 Failed allocate memory for " 6664 + "fast-path EQ record array\n"); 6621 6665 goto out_error; 6622 6666 } 6623 - phba->sli4_hba.sp_eq = qdesc; 6667 + 6668 + phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 6669 + phba->cfg_fcp_io_channel), GFP_KERNEL); 6670 + if (!phba->sli4_hba.fcp_cq) { 6671 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6672 + "2577 Failed allocate memory for fast-path " 6673 + "CQ record array\n"); 6674 + goto out_error; 6675 + } 6676 + 6677 + phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 6678 + phba->cfg_fcp_io_channel), GFP_KERNEL); 6679 + if (!phba->sli4_hba.fcp_wq) { 6680 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6681 + "2578 Failed allocate memory for fast-path " 6682 + "WQ record array\n"); 6683 + goto out_error; 6684 + } 6624 6685 6625 6686 /* 6626 - * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be 6627 - * zero whenever there is exactly one interrupt vector. This is not 6628 - * an error. 6687 + * Since the first EQ can have multiple CQs associated with it, 6688 + * this array is used to quickly see if we have a FCP fast-path 6689 + * CQ match. 6629 6690 */ 6630 - if (phba->cfg_fcp_eq_count) { 6631 - phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 6632 - phba->cfg_fcp_eq_count), GFP_KERNEL); 6633 - if (!phba->sli4_hba.fp_eq) { 6634 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6635 - "2576 Failed allocate memory for " 6636 - "fast-path EQ record array\n"); 6637 - goto out_free_sp_eq; 6638 - } 6691 + phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) * 6692 + phba->cfg_fcp_io_channel), GFP_KERNEL); 6693 + if (!phba->sli4_hba.fcp_cq_map) { 6694 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6695 + "2545 Failed allocate memory for fast-path " 6696 + "CQ map\n"); 6697 + goto out_error; 6639 6698 } 6640 - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6699 + 6700 + /* 6701 + * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies 6702 + * how many EQs to create. 6703 + */ 6704 + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 6705 + 6706 + /* Create EQs */ 6641 6707 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6642 6708 phba->sli4_hba.eq_ecount); 6643 6709 if (!qdesc) { 6644 6710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6645 - "0497 Failed allocate fast-path EQ\n"); 6646 - goto out_free_fp_eq; 6711 + "0497 Failed allocate EQ (%d)\n", idx); 6712 + goto out_error; 6647 6713 } 6648 - phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 6714 + phba->sli4_hba.hba_eq[idx] = qdesc; 6715 + 6716 + /* Create Fast Path FCP CQs */ 6717 + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6718 + phba->sli4_hba.cq_ecount); 6719 + if (!qdesc) { 6720 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6721 + "0499 Failed allocate fast-path FCP " 6722 + "CQ (%d)\n", idx); 6723 + goto out_error; 6724 + } 6725 + phba->sli4_hba.fcp_cq[idx] = qdesc; 6726 + 6727 + /* Create Fast Path FCP WQs */ 6728 + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6729 + phba->sli4_hba.wq_ecount); 6730 + if (!qdesc) { 6731 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6732 + "0503 Failed allocate fast-path FCP " 6733 + "WQ (%d)\n", idx); 6734 + goto out_error; 6735 + } 6736 + phba->sli4_hba.fcp_wq[idx] = qdesc; 6649 6737 } 6650 6738 6739 + 6651 6740 /* 6652 - * Create Complete Queues (CQs) 6741 + * Create Slow Path Completion Queues (CQs) 6653 6742 */ 6654 6743 6655 6744 /* Create slow-path Mailbox Command Complete Queue */ ··· 6712 6693 if (!qdesc) { 6713 6694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6714 6695 "0500 Failed allocate slow-path mailbox CQ\n"); 6715 - goto out_free_fp_eq; 6696 + goto out_error; 6716 6697 } 6717 6698 phba->sli4_hba.mbx_cq = qdesc; 6718 6699 ··· 6722 6703 if (!qdesc) { 6723 6704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6724 6705 "0501 Failed allocate slow-path ELS CQ\n"); 6725 - goto out_free_mbx_cq; 6706 + goto out_error; 6726 6707 } 6727 6708 phba->sli4_hba.els_cq = qdesc; 6728 6709 6729 6710 6730 6711 /* 6731 - * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs. 6732 - * If there are no FCP EQs then create exactly one FCP CQ. 6712 + * Create Slow Path Work Queues (WQs) 6733 6713 */ 6734 - if (phba->cfg_fcp_eq_count) 6735 - phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 6736 - phba->cfg_fcp_eq_count), 6737 - GFP_KERNEL); 6738 - else 6739 - phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *), 6740 - GFP_KERNEL); 6741 - if (!phba->sli4_hba.fcp_cq) { 6742 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6743 - "2577 Failed allocate memory for fast-path " 6744 - "CQ record array\n"); 6745 - goto out_free_els_cq; 6746 - } 6747 - fcp_cqidx = 0; 6748 - do { 6749 - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6750 - phba->sli4_hba.cq_ecount); 6751 - if (!qdesc) { 6752 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6753 - "0499 Failed allocate fast-path FCP " 6754 - "CQ (%d)\n", fcp_cqidx); 6755 - goto out_free_fcp_cq; 6756 - } 6757 - phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 6758 - } while (++fcp_cqidx < phba->cfg_fcp_eq_count); 6759 6714 6760 6715 /* Create Mailbox Command Queue */ 6761 - phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 6762 - phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 6763 6716 6764 6717 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 6765 6718 phba->sli4_hba.mq_ecount); 6766 6719 if (!qdesc) { 6767 6720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6768 6721 "0505 Failed allocate slow-path MQ\n"); 6769 - goto out_free_fcp_cq; 6722 + goto out_error; 6770 6723 } 6771 6724 phba->sli4_hba.mbx_wq = qdesc; 6772 6725 6773 6726 /* 6774 - * Create all the Work Queues (WQs) 6727 + * Create ELS Work Queues 6775 6728 */ 6776 - phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 6777 - phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 6778 6729 6779 6730 /* Create slow-path ELS Work Queue */ 6780 6731 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, ··· 6752 6763 if (!qdesc) { 6753 6764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6754 6765 "0504 Failed allocate slow-path ELS WQ\n"); 6755 - goto out_free_mbx_wq; 6766 + goto out_error; 6756 6767 } 6757 6768 phba->sli4_hba.els_wq = qdesc; 6758 - 6759 - /* Create fast-path FCP Work Queue(s) */ 6760 - phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 6761 - phba->cfg_fcp_wq_count), GFP_KERNEL); 6762 - if (!phba->sli4_hba.fcp_wq) { 6763 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6764 - "2578 Failed allocate memory for fast-path " 6765 - "WQ record array\n"); 6766 - goto out_free_els_wq; 6767 - } 6768 - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6769 - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6770 - phba->sli4_hba.wq_ecount); 6771 - if (!qdesc) { 6772 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6773 - "0503 Failed allocate fast-path FCP " 6774 - "WQ (%d)\n", fcp_wqidx); 6775 - goto out_free_fcp_wq; 6776 - } 6777 - phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 6778 - } 6779 6769 6780 6770 /* 6781 6771 * Create Receive Queue (RQ) 6782 6772 */ 6783 - phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 6784 - phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 6785 6773 6786 6774 /* Create Receive Queue for header */ 6787 6775 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, ··· 6766 6800 if (!qdesc) { 6767 6801 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6768 6802 "0506 Failed allocate receive HRQ\n"); 6769 - goto out_free_fcp_wq; 6803 + goto out_error; 6770 6804 } 6771 6805 phba->sli4_hba.hdr_rq = qdesc; 6772 6806 ··· 6776 6810 if (!qdesc) { 6777 6811 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6778 6812 "0507 Failed allocate receive DRQ\n"); 6779 - goto out_free_hdr_rq; 6813 + goto out_error; 6780 6814 } 6781 6815 phba->sli4_hba.dat_rq = qdesc; 6782 6816 6783 6817 return 0; 6784 6818 6785 - out_free_hdr_rq: 6786 - lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6787 - phba->sli4_hba.hdr_rq = NULL; 6788 - out_free_fcp_wq: 6789 - for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 6790 - lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 6791 - phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 6792 - } 6793 - kfree(phba->sli4_hba.fcp_wq); 6794 - phba->sli4_hba.fcp_wq = NULL; 6795 - out_free_els_wq: 6796 - lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6797 - phba->sli4_hba.els_wq = NULL; 6798 - out_free_mbx_wq: 6799 - lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6800 - phba->sli4_hba.mbx_wq = NULL; 6801 - out_free_fcp_cq: 6802 - for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 6803 - lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 6804 - phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 6805 - } 6806 - kfree(phba->sli4_hba.fcp_cq); 6807 - phba->sli4_hba.fcp_cq = NULL; 6808 - out_free_els_cq: 6809 - lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6810 - phba->sli4_hba.els_cq = NULL; 6811 - out_free_mbx_cq: 6812 - lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6813 - phba->sli4_hba.mbx_cq = NULL; 6814 - out_free_fp_eq: 6815 - for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 6816 - lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 6817 - phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 6818 - } 6819 - kfree(phba->sli4_hba.fp_eq); 6820 - phba->sli4_hba.fp_eq = NULL; 6821 - out_free_sp_eq: 6822 - lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6823 - phba->sli4_hba.sp_eq = NULL; 6824 6819 out_error: 6820 + lpfc_sli4_queue_destroy(phba); 6825 6821 return -ENOMEM; 6826 6822 } 6827 6823 ··· 6802 6874 void 6803 6875 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6804 6876 { 6805 - int fcp_qidx; 6877 + int idx; 6878 + 6879 + if (phba->sli4_hba.hba_eq != NULL) { 6880 + /* Release HBA event queue */ 6881 + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 6882 + if (phba->sli4_hba.hba_eq[idx] != NULL) { 6883 + lpfc_sli4_queue_free( 6884 + phba->sli4_hba.hba_eq[idx]); 6885 + phba->sli4_hba.hba_eq[idx] = NULL; 6886 + } 6887 + } 6888 + kfree(phba->sli4_hba.hba_eq); 6889 + phba->sli4_hba.hba_eq = NULL; 6890 + } 6891 + 6892 + if (phba->sli4_hba.fcp_cq != NULL) { 6893 + /* Release FCP completion queue */ 6894 + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 6895 + if (phba->sli4_hba.fcp_cq[idx] != NULL) { 6896 + lpfc_sli4_queue_free( 6897 + phba->sli4_hba.fcp_cq[idx]); 6898 + phba->sli4_hba.fcp_cq[idx] = NULL; 6899 + } 6900 + } 6901 + kfree(phba->sli4_hba.fcp_cq); 6902 + phba->sli4_hba.fcp_cq = NULL; 6903 + } 6904 + 6905 + if (phba->sli4_hba.fcp_wq != NULL) { 6906 + /* Release FCP work queue */ 6907 + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 6908 + if (phba->sli4_hba.fcp_wq[idx] != NULL) { 6909 + lpfc_sli4_queue_free( 6910 + phba->sli4_hba.fcp_wq[idx]); 6911 + phba->sli4_hba.fcp_wq[idx] = NULL; 6912 + } 6913 + } 6914 + kfree(phba->sli4_hba.fcp_wq); 6915 + phba->sli4_hba.fcp_wq = NULL; 6916 + } 6917 + 6918 + /* Release FCP CQ mapping array */ 6919 + if (phba->sli4_hba.fcp_cq_map != NULL) { 6920 + kfree(phba->sli4_hba.fcp_cq_map); 6921 + phba->sli4_hba.fcp_cq_map = NULL; 6922 + } 6806 6923 6807 6924 /* Release mailbox command work queue */ 6808 - lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6809 - phba->sli4_hba.mbx_wq = NULL; 6925 + if (phba->sli4_hba.mbx_wq != NULL) { 6926 + lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6927 + phba->sli4_hba.mbx_wq = NULL; 6928 + } 6810 6929 6811 6930 /* Release ELS work queue */ 6812 - lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6813 - phba->sli4_hba.els_wq = NULL; 6814 - 6815 - /* Release FCP work queue */ 6816 - if (phba->sli4_hba.fcp_wq != NULL) 6817 - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; 6818 - fcp_qidx++) 6819 - lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 6820 - kfree(phba->sli4_hba.fcp_wq); 6821 - phba->sli4_hba.fcp_wq = NULL; 6931 + if (phba->sli4_hba.els_wq != NULL) { 6932 + lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6933 + phba->sli4_hba.els_wq = NULL; 6934 + } 6822 6935 6823 6936 /* Release unsolicited receive queue */ 6824 - lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6825 - phba->sli4_hba.hdr_rq = NULL; 6826 - lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 6827 - phba->sli4_hba.dat_rq = NULL; 6937 + if (phba->sli4_hba.hdr_rq != NULL) { 6938 + lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6939 + phba->sli4_hba.hdr_rq = NULL; 6940 + } 6941 + if (phba->sli4_hba.dat_rq != NULL) { 6942 + lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 6943 + phba->sli4_hba.dat_rq = NULL; 6944 + } 6828 6945 6829 6946 /* Release ELS complete queue */ 6830 - lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6831 - phba->sli4_hba.els_cq = NULL; 6947 + if (phba->sli4_hba.els_cq != NULL) { 6948 + lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6949 + phba->sli4_hba.els_cq = NULL; 6950 + } 6832 6951 6833 6952 /* Release mailbox command complete queue */ 6834 - lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6835 - phba->sli4_hba.mbx_cq = NULL; 6836 - 6837 - /* Release FCP response complete queue */ 6838 - fcp_qidx = 0; 6839 - if (phba->sli4_hba.fcp_cq != NULL) 6840 - do 6841 - lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6842 - while (++fcp_qidx < phba->cfg_fcp_eq_count); 6843 - kfree(phba->sli4_hba.fcp_cq); 6844 - phba->sli4_hba.fcp_cq = NULL; 6845 - 6846 - /* Release fast-path event queue */ 6847 - if (phba->sli4_hba.fp_eq != NULL) 6848 - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 6849 - fcp_qidx++) 6850 - lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6851 - kfree(phba->sli4_hba.fp_eq); 6852 - phba->sli4_hba.fp_eq = NULL; 6853 - 6854 - /* Release slow-path event queue */ 6855 - lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6856 - phba->sli4_hba.sp_eq = NULL; 6953 + if (phba->sli4_hba.mbx_cq != NULL) { 6954 + lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6955 + phba->sli4_hba.mbx_cq = NULL; 6956 + } 6857 6957 6858 6958 return; 6859 6959 } ··· 6908 6952 int fcp_cq_index = 0; 6909 6953 6910 6954 /* 6911 - * Set up Event Queues (EQs) 6955 + * Set up HBA Event Queues (EQs) 6912 6956 */ 6913 6957 6914 - /* Set up slow-path event queue */ 6915 - if (!phba->sli4_hba.sp_eq) { 6916 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6917 - "0520 Slow-path EQ not allocated\n"); 6918 - goto out_error; 6919 - } 6920 - rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 6921 - LPFC_SP_DEF_IMAX); 6922 - if (rc) { 6923 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6924 - "0521 Failed setup of slow-path EQ: " 6925 - "rc = 0x%x\n", rc); 6926 - goto out_error; 6927 - } 6928 - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6929 - "2583 Slow-path EQ setup: queue-id=%d\n", 6930 - phba->sli4_hba.sp_eq->queue_id); 6931 - 6932 - /* Set up fast-path event queue */ 6933 - if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) { 6958 + /* Set up HBA event queue */ 6959 + if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) { 6934 6960 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6935 6961 "3147 Fast-path EQs not allocated\n"); 6936 6962 rc = -ENOMEM; 6937 - goto out_destroy_sp_eq; 6963 + goto out_error; 6938 6964 } 6939 - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6940 - if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6965 + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { 6966 + if (!phba->sli4_hba.hba_eq[fcp_eqidx]) { 6941 6967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6942 6968 "0522 Fast-path EQ (%d) not " 6943 6969 "allocated\n", fcp_eqidx); 6944 6970 rc = -ENOMEM; 6945 - goto out_destroy_fp_eq; 6971 + goto out_destroy_hba_eq; 6946 6972 } 6947 - rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6973 + rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx], 6948 6974 phba->cfg_fcp_imax); 6949 6975 if (rc) { 6950 6976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6951 6977 "0523 Failed setup of fast-path EQ " 6952 6978 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6953 - goto out_destroy_fp_eq; 6979 + goto out_destroy_hba_eq; 6954 6980 } 6955 6981 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6956 - "2584 Fast-path EQ setup: " 6982 + "2584 HBA EQ setup: " 6957 6983 "queue[%d]-id=%d\n", fcp_eqidx, 6958 - phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6984 + phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id); 6959 6985 } 6960 6986 6987 + /* Set up fast-path FCP Response Complete Queue */ 6988 + if (!phba->sli4_hba.fcp_cq) { 6989 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6990 + "3148 Fast-path FCP CQ array not " 6991 + "allocated\n"); 6992 + rc = -ENOMEM; 6993 + goto out_destroy_hba_eq; 6994 + } 6995 + 6996 + for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) { 6997 + if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6998 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6999 + "0526 Fast-path FCP CQ (%d) not " 7000 + "allocated\n", fcp_cqidx); 7001 + rc = -ENOMEM; 7002 + goto out_destroy_fcp_cq; 7003 + } 7004 + rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 7005 + phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP); 7006 + if (rc) { 7007 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7008 + "0527 Failed setup of fast-path FCP " 7009 + "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 7010 + goto out_destroy_fcp_cq; 7011 + } 7012 + 7013 + /* Setup fcp_cq_map for fast lookup */ 7014 + phba->sli4_hba.fcp_cq_map[fcp_cqidx] = 7015 + phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id; 7016 + 7017 + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7018 + "2588 FCP CQ setup: cq[%d]-id=%d, " 7019 + "parent seq[%d]-id=%d\n", 7020 + fcp_cqidx, 7021 + phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 7022 + fcp_cqidx, 7023 + phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id); 7024 + } 7025 + 7026 + /* Set up fast-path FCP Work Queue */ 7027 + if (!phba->sli4_hba.fcp_wq) { 7028 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7029 + "3149 Fast-path FCP WQ array not " 7030 + "allocated\n"); 7031 + rc = -ENOMEM; 7032 + goto out_destroy_fcp_cq; 7033 + } 7034 + 7035 + for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) { 7036 + if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 7037 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7038 + "0534 Fast-path FCP WQ (%d) not " 7039 + "allocated\n", fcp_wqidx); 7040 + rc = -ENOMEM; 7041 + goto out_destroy_fcp_wq; 7042 + } 7043 + rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 7044 + phba->sli4_hba.fcp_cq[fcp_wqidx], 7045 + LPFC_FCP); 7046 + if (rc) { 7047 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7048 + "0535 Failed setup of fast-path FCP " 7049 + "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 7050 + goto out_destroy_fcp_wq; 7051 + } 7052 + 7053 + /* Bind this WQ to the next FCP ring */ 7054 + pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx]; 7055 + pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx]; 7056 + phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring; 7057 + 7058 + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7059 + "2591 FCP WQ setup: wq[%d]-id=%d, " 7060 + "parent cq[%d]-id=%d\n", 7061 + fcp_wqidx, 7062 + phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 7063 + fcp_cq_index, 7064 + phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id); 7065 + } 6961 7066 /* 6962 7067 * Set up Complete Queues (CQs) 6963 7068 */ ··· 7028 7011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7029 7012 "0528 Mailbox CQ not allocated\n"); 7030 7013 rc = -ENOMEM; 7031 - goto out_destroy_fp_eq; 7014 + goto out_destroy_fcp_wq; 7032 7015 } 7033 - rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 7034 - LPFC_MCQ, LPFC_MBOX); 7016 + rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, 7017 + phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX); 7035 7018 if (rc) { 7036 7019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7037 7020 "0529 Failed setup of slow-path mailbox CQ: " 7038 7021 "rc = 0x%x\n", rc); 7039 - goto out_destroy_fp_eq; 7022 + goto out_destroy_fcp_wq; 7040 7023 } 7041 7024 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7042 7025 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 7043 7026 phba->sli4_hba.mbx_cq->queue_id, 7044 - phba->sli4_hba.sp_eq->queue_id); 7027 + phba->sli4_hba.hba_eq[0]->queue_id); 7045 7028 7046 7029 /* Set up slow-path ELS Complete Queue */ 7047 7030 if (!phba->sli4_hba.els_cq) { ··· 7050 7033 rc = -ENOMEM; 7051 7034 goto out_destroy_mbx_cq; 7052 7035 } 7053 - rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 7054 - LPFC_WCQ, LPFC_ELS); 7036 + rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, 7037 + phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS); 7055 7038 if (rc) { 7056 7039 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7057 7040 "0531 Failed setup of slow-path ELS CQ: " ··· 7061 7044 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7062 7045 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 7063 7046 phba->sli4_hba.els_cq->queue_id, 7064 - phba->sli4_hba.sp_eq->queue_id); 7065 - 7066 - /* Set up fast-path FCP Response Complete Queue */ 7067 - if (!phba->sli4_hba.fcp_cq) { 7068 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7069 - "3148 Fast-path FCP CQ array not " 7070 - "allocated\n"); 7071 - rc = -ENOMEM; 7072 - goto out_destroy_els_cq; 7073 - } 7074 - fcp_cqidx = 0; 7075 - do { 7076 - if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 7077 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7078 - "0526 Fast-path FCP CQ (%d) not " 7079 - "allocated\n", fcp_cqidx); 7080 - rc = -ENOMEM; 7081 - goto out_destroy_fcp_cq; 7082 - } 7083 - if (phba->cfg_fcp_eq_count) 7084 - rc = lpfc_cq_create(phba, 7085 - phba->sli4_hba.fcp_cq[fcp_cqidx], 7086 - phba->sli4_hba.fp_eq[fcp_cqidx], 7087 - LPFC_WCQ, LPFC_FCP); 7088 - else 7089 - rc = lpfc_cq_create(phba, 7090 - phba->sli4_hba.fcp_cq[fcp_cqidx], 7091 - phba->sli4_hba.sp_eq, 7092 - LPFC_WCQ, LPFC_FCP); 7093 - if (rc) { 7094 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7095 - "0527 Failed setup of fast-path FCP " 7096 - "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 7097 - goto out_destroy_fcp_cq; 7098 - } 7099 - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7100 - "2588 FCP CQ setup: cq[%d]-id=%d, " 7101 - "parent %seq[%d]-id=%d\n", 7102 - fcp_cqidx, 7103 - phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 7104 - (phba->cfg_fcp_eq_count) ? "" : "sp_", 7105 - fcp_cqidx, 7106 - (phba->cfg_fcp_eq_count) ? 7107 - phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id : 7108 - phba->sli4_hba.sp_eq->queue_id); 7109 - } while (++fcp_cqidx < phba->cfg_fcp_eq_count); 7047 + phba->sli4_hba.hba_eq[0]->queue_id); 7110 7048 7111 7049 /* 7112 7050 * Set up all the Work Queues (WQs) ··· 7072 7100 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7073 7101 "0538 Slow-path MQ not allocated\n"); 7074 7102 rc = -ENOMEM; 7075 - goto out_destroy_fcp_cq; 7103 + goto out_destroy_els_cq; 7076 7104 } 7077 7105 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 7078 7106 phba->sli4_hba.mbx_cq, LPFC_MBOX); ··· 7080 7108 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7081 7109 "0539 Failed setup of slow-path MQ: " 7082 7110 "rc = 0x%x\n", rc); 7083 - goto out_destroy_fcp_cq; 7111 + goto out_destroy_els_cq; 7084 7112 } 7085 7113 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7086 7114 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", ··· 7113 7141 phba->sli4_hba.els_wq->queue_id, 7114 7142 phba->sli4_hba.els_cq->queue_id); 7115 7143 7116 - /* Set up fast-path FCP Work Queue */ 7117 - if (!phba->sli4_hba.fcp_wq) { 7118 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7119 - "3149 Fast-path FCP WQ array not " 7120 - "allocated\n"); 7121 - rc = -ENOMEM; 7122 - goto out_destroy_els_wq; 7123 - } 7124 - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 7125 - if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 7126 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7127 - "0534 Fast-path FCP WQ (%d) not " 7128 - "allocated\n", fcp_wqidx); 7129 - rc = -ENOMEM; 7130 - goto out_destroy_fcp_wq; 7131 - } 7132 - rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 7133 - phba->sli4_hba.fcp_cq[fcp_cq_index], 7134 - LPFC_FCP); 7135 - if (rc) { 7136 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7137 - "0535 Failed setup of fast-path FCP " 7138 - "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 7139 - goto out_destroy_fcp_wq; 7140 - } 7141 - 7142 - /* Bind this WQ to the next FCP ring */ 7143 - pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx]; 7144 - pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx]; 7145 - phba->sli4_hba.fcp_cq[fcp_cq_index]->pring = pring; 7146 - 7147 - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7148 - "2591 FCP WQ setup: wq[%d]-id=%d, " 7149 - "parent cq[%d]-id=%d\n", 7150 - fcp_wqidx, 7151 - phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 7152 - fcp_cq_index, 7153 - phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 7154 - /* Round robin FCP Work Queue's Completion Queue assignment */ 7155 - if (phba->cfg_fcp_eq_count) 7156 - fcp_cq_index = ((fcp_cq_index + 1) % 7157 - phba->cfg_fcp_eq_count); 7158 - } 7159 - 7160 7144 /* 7161 7145 * Create Receive Queue (RQ) 7162 7146 */ ··· 7120 7192 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7121 7193 "0540 Receive Queue not allocated\n"); 7122 7194 rc = -ENOMEM; 7123 - goto out_destroy_fcp_wq; 7195 + goto out_destroy_els_wq; 7124 7196 } 7125 7197 7126 7198 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); ··· 7143 7215 phba->sli4_hba.els_cq->queue_id); 7144 7216 return 0; 7145 7217 7146 - out_destroy_fcp_wq: 7147 - for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 7148 - lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 7149 7218 out_destroy_els_wq: 7150 7219 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7151 7220 out_destroy_mbx_wq: 7152 7221 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7153 - out_destroy_fcp_cq: 7154 - for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 7155 - lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 7156 7222 out_destroy_els_cq: 7157 7223 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7158 7224 out_destroy_mbx_cq: 7159 7225 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7160 - out_destroy_fp_eq: 7226 + out_destroy_fcp_wq: 7227 + for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 7228 + lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 7229 + out_destroy_fcp_cq: 7230 + for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 7231 + lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 7232 + out_destroy_hba_eq: 7161 7233 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 7162 - lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 7163 - out_destroy_sp_eq: 7164 - lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 7234 + lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]); 7165 7235 out_error: 7166 7236 return rc; 7167 7237 } ··· 7188 7262 /* Unset unsolicited receive queue */ 7189 7263 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7190 7264 /* Unset FCP work queue */ 7191 - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 7192 - lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 7265 + if (phba->sli4_hba.fcp_wq) { 7266 + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7267 + fcp_qidx++) 7268 + lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 7269 + } 7193 7270 /* Unset mailbox command complete queue */ 7194 7271 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7195 7272 /* Unset ELS complete queue */ 7196 7273 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7197 7274 /* Unset FCP response complete queue */ 7198 7275 if (phba->sli4_hba.fcp_cq) { 7199 - fcp_qidx = 0; 7200 - do { 7276 + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7277 + fcp_qidx++) 7201 7278 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 7202 - } while (++fcp_qidx < phba->cfg_fcp_eq_count); 7203 7279 } 7204 7280 /* Unset fast-path event queue */ 7205 - if (phba->sli4_hba.fp_eq) { 7206 - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 7281 + if (phba->sli4_hba.hba_eq) { 7282 + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7207 7283 fcp_qidx++) 7208 - lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 7284 + lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]); 7209 7285 } 7210 - /* Unset slow-path event queue */ 7211 - lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 7212 7286 } 7213 7287 7214 7288 /** ··· 8100 8174 "message=%d\n", index, 8101 8175 phba->sli4_hba.msix_entries[index].vector, 8102 8176 phba->sli4_hba.msix_entries[index].entry); 8177 + 8103 8178 /* 8104 8179 * Assign MSI-X vectors to interrupt handlers 8105 8180 */ 8106 - if (vectors > 1) 8107 - rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 8108 - &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 8109 - LPFC_SP_DRIVER_HANDLER_NAME, phba); 8110 - else 8111 - /* All Interrupts need to be handled by one EQ */ 8112 - rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 8113 - &lpfc_sli4_intr_handler, IRQF_SHARED, 8114 - LPFC_DRIVER_NAME, phba); 8115 - if (rc) { 8116 - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8117 - "0485 MSI-X slow-path request_irq failed " 8118 - "(%d)\n", rc); 8119 - goto msi_fail_out; 8120 - } 8121 - 8122 - /* The rest of the vector(s) are associated to fast-path handler(s) */ 8123 - for (index = 1; index < vectors; index++) { 8124 - phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 8125 - phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 8181 + for (index = 0; index < vectors; index++) { 8182 + phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8183 + phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8126 8184 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 8127 - &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 8185 + &lpfc_sli4_hba_intr_handler, IRQF_SHARED, 8128 8186 LPFC_FP_DRIVER_HANDLER_NAME, 8129 - &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8187 + &phba->sli4_hba.fcp_eq_hdl[index]); 8130 8188 if (rc) { 8131 8189 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8132 8190 "0486 MSI-X fast-path (%d) " ··· 8124 8214 8125 8215 cfg_fail_out: 8126 8216 /* free the irq already requested */ 8127 - for (--index; index >= 1; index--) 8128 - free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 8129 - &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8130 - 8131 - /* free the irq already requested */ 8132 - free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 8217 + for (--index; index >= 0; index--) 8218 + free_irq(phba->sli4_hba.msix_entries[index].vector, 8219 + &phba->sli4_hba.fcp_eq_hdl[index]); 8133 8220 8134 8221 msi_fail_out: 8135 8222 /* Unconfigure MSI-X capability structure */ ··· 8147 8240 int index; 8148 8241 8149 8242 /* Free up MSI-X multi-message vectors */ 8150 - free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 8151 - 8152 - for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) 8243 + for (index = 0; index < phba->sli4_hba.msix_vec_nr; index++) 8153 8244 free_irq(phba->sli4_hba.msix_entries[index].vector, 8154 - &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8245 + &phba->sli4_hba.fcp_eq_hdl[index]); 8155 8246 8156 8247 /* Disable MSI-X */ 8157 8248 pci_disable_msix(phba->pcidev); ··· 8195 8290 return rc; 8196 8291 } 8197 8292 8198 - for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 8293 + for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 8199 8294 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8200 8295 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8201 8296 } ··· 8275 8370 /* Indicate initialization to INTx mode */ 8276 8371 phba->intr_type = INTx; 8277 8372 intr_mode = 0; 8278 - for (index = 0; index < phba->cfg_fcp_eq_count; 8373 + for (index = 0; index < phba->cfg_fcp_io_channel; 8279 8374 index++) { 8280 8375 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8281 8376 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; ··· 9395 9490 int error; 9396 9491 uint32_t cfg_mode, intr_mode; 9397 9492 int mcnt; 9398 - int adjusted_fcp_eq_count; 9493 + int adjusted_fcp_io_channel; 9399 9494 const struct firmware *fw; 9400 9495 uint8_t file_name[16]; 9401 9496 ··· 9498 9593 } 9499 9594 /* Default to single EQ for non-MSI-X */ 9500 9595 if (phba->intr_type != MSIX) 9501 - adjusted_fcp_eq_count = 0; 9596 + adjusted_fcp_io_channel = 0; 9502 9597 else if (phba->sli4_hba.msix_vec_nr < 9503 - phba->cfg_fcp_eq_count + 1) 9504 - adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 9598 + phba->cfg_fcp_io_channel) 9599 + adjusted_fcp_io_channel = phba->sli4_hba.msix_vec_nr; 9505 9600 else 9506 - adjusted_fcp_eq_count = phba->cfg_fcp_eq_count; 9507 - phba->cfg_fcp_eq_count = adjusted_fcp_eq_count; 9601 + adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; 9602 + phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; 9508 9603 /* Set up SLI-4 HBA */ 9509 9604 if (lpfc_sli4_hba_setup(phba)) { 9510 9605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, ··· 9640 9735 * buffers are released to their corresponding pools here. 9641 9736 */ 9642 9737 lpfc_scsi_free(phba); 9738 + 9643 9739 lpfc_sli4_driver_resource_unset(phba); 9644 9740 9645 9741 /* Unmap adapter Control and Doorbell registers */
+46 -138
drivers/scsi/lpfc/lpfc_sli.c
··· 4921 4921 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4922 4922 fcp_eqidx = 0; 4923 4923 if (phba->sli4_hba.fcp_cq) { 4924 - do 4924 + do { 4925 4925 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4926 4926 LPFC_QUEUE_REARM); 4927 - while (++fcp_eqidx < phba->cfg_fcp_eq_count); 4927 + } while (++fcp_eqidx < phba->cfg_fcp_io_channel); 4928 4928 } 4929 - lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4930 - if (phba->sli4_hba.fp_eq) { 4931 - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; 4929 + if (phba->sli4_hba.hba_eq) { 4930 + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; 4932 4931 fcp_eqidx++) 4933 - lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4932 + lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx], 4934 4933 LPFC_QUEUE_REARM); 4935 4934 } 4936 4935 } ··· 7817 7818 int i; 7818 7819 7819 7820 i = atomic_add_return(1, &phba->fcp_qidx); 7820 - i = (i % phba->cfg_fcp_wq_count); 7821 + i = (i % phba->cfg_fcp_io_channel); 7821 7822 return i; 7822 7823 } 7823 7824 ··· 8726 8727 8727 8728 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 8728 8729 if (phba->sli_rev == LPFC_SLI_REV4) 8729 - psli->num_rings += phba->cfg_fcp_eq_count; 8730 + psli->num_rings += phba->cfg_fcp_io_channel; 8730 8731 psli->sli_flag = 0; 8731 8732 psli->fcp_ring = LPFC_FCP_RING; 8732 8733 psli->next_ring = LPFC_FCP_NEXT_RING; ··· 11467 11468 * 11468 11469 **/ 11469 11470 static void 11470 - lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 11471 + lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 11472 + struct lpfc_queue *speq) 11471 11473 { 11472 - struct lpfc_queue *cq = NULL, *childq, *speq; 11474 + struct lpfc_queue *cq = NULL, *childq; 11473 11475 struct lpfc_cqe *cqe; 11474 11476 bool workposted = false; 11475 11477 int ecount = 0; 11476 11478 uint16_t cqid; 11477 11479 11478 - if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) { 11479 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11480 - "0359 Not a valid slow-path completion " 11481 - "event: majorcode=x%x, minorcode=x%x\n", 11482 - bf_get_le32(lpfc_eqe_major_code, eqe), 11483 - bf_get_le32(lpfc_eqe_minor_code, eqe)); 11484 - return; 11485 - } 11486 - 11487 11480 /* Get the reference to the corresponding CQ */ 11488 11481 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11489 11482 11490 - /* Search for completion queue pointer matching this cqid */ 11491 - speq = phba->sli4_hba.sp_eq; 11492 - /* sanity check on queue memory */ 11493 - if (unlikely(!speq)) 11494 - return; 11495 11483 list_for_each_entry(childq, &speq->child_list, list) { 11496 11484 if (childq->queue_id == cqid) { 11497 11485 cq = childq; ··· 11697 11711 } 11698 11712 11699 11713 /** 11700 - * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry 11714 + * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 11701 11715 * @phba: Pointer to HBA context object. 11702 11716 * @eqe: Pointer to fast-path event queue entry. 11703 11717 * ··· 11709 11723 * completion queue, and then return. 11710 11724 **/ 11711 11725 static void 11712 - lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 11713 - uint32_t fcp_cqidx) 11726 + lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 11727 + uint32_t qidx) 11714 11728 { 11715 11729 struct lpfc_queue *cq; 11716 11730 struct lpfc_cqe *cqe; ··· 11720 11734 11721 11735 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 11722 11736 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11723 - "0366 Not a valid fast-path completion " 11737 + "0366 Not a valid completion " 11724 11738 "event: majorcode=x%x, minorcode=x%x\n", 11725 11739 bf_get_le32(lpfc_eqe_major_code, eqe), 11726 11740 bf_get_le32(lpfc_eqe_minor_code, eqe)); 11741 + return; 11742 + } 11743 + 11744 + /* Get the reference to the corresponding CQ */ 11745 + cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11746 + 11747 + /* Check if this is a Slow path event */ 11748 + if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) { 11749 + lpfc_sli4_sp_handle_eqe(phba, eqe, 11750 + phba->sli4_hba.hba_eq[qidx]); 11727 11751 return; 11728 11752 } 11729 11753 ··· 11743 11747 "does not exist\n"); 11744 11748 return; 11745 11749 } 11746 - cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 11750 + cq = phba->sli4_hba.fcp_cq[qidx]; 11747 11751 if (unlikely(!cq)) { 11748 11752 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11749 11753 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11750 11754 "0367 Fast-path completion queue " 11751 - "(%d) does not exist\n", fcp_cqidx); 11755 + "(%d) does not exist\n", qidx); 11752 11756 return; 11753 11757 } 11754 11758 11755 - /* Get the reference to the corresponding CQ */ 11756 - cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11757 11759 if (unlikely(cqid != cq->queue_id)) { 11758 11760 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11759 11761 "0368 Miss-matched fast-path completion " ··· 11799 11805 } 11800 11806 11801 11807 /** 11802 - * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device 11803 - * @irq: Interrupt number. 11804 - * @dev_id: The device context pointer. 11805 - * 11806 - * This function is directly called from the PCI layer as an interrupt 11807 - * service routine when device with SLI-4 interface spec is enabled with 11808 - * MSI-X multi-message interrupt mode and there are slow-path events in 11809 - * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 11810 - * interrupt mode, this function is called as part of the device-level 11811 - * interrupt handler. When the PCI slot is in error recovery or the HBA is 11812 - * undergoing initialization, the interrupt handler will not process the 11813 - * interrupt. The link attention and ELS ring attention events are handled 11814 - * by the worker thread. The interrupt handler signals the worker thread 11815 - * and returns for these events. This function is called without any lock 11816 - * held. It gets the hbalock to access and update SLI data structures. 11817 - * 11818 - * This function returns IRQ_HANDLED when interrupt is handled else it 11819 - * returns IRQ_NONE. 11820 - **/ 11821 - irqreturn_t 11822 - lpfc_sli4_sp_intr_handler(int irq, void *dev_id) 11823 - { 11824 - struct lpfc_hba *phba; 11825 - struct lpfc_queue *speq; 11826 - struct lpfc_eqe *eqe; 11827 - unsigned long iflag; 11828 - int ecount = 0; 11829 - 11830 - /* 11831 - * Get the driver's phba structure from the dev_id 11832 - */ 11833 - phba = (struct lpfc_hba *)dev_id; 11834 - 11835 - if (unlikely(!phba)) 11836 - return IRQ_NONE; 11837 - 11838 - /* Get to the EQ struct associated with this vector */ 11839 - speq = phba->sli4_hba.sp_eq; 11840 - if (unlikely(!speq)) 11841 - return IRQ_NONE; 11842 - 11843 - /* Check device state for handling interrupt */ 11844 - if (unlikely(lpfc_intr_state_check(phba))) { 11845 - speq->EQ_badstate++; 11846 - /* Check again for link_state with lock held */ 11847 - spin_lock_irqsave(&phba->hbalock, iflag); 11848 - if (phba->link_state < LPFC_LINK_DOWN) 11849 - /* Flush, clear interrupt, and rearm the EQ */ 11850 - lpfc_sli4_eq_flush(phba, speq); 11851 - spin_unlock_irqrestore(&phba->hbalock, iflag); 11852 - return IRQ_NONE; 11853 - } 11854 - 11855 - /* 11856 - * Process all the event on FCP slow-path EQ 11857 - */ 11858 - while ((eqe = lpfc_sli4_eq_get(speq))) { 11859 - lpfc_sli4_sp_handle_eqe(phba, eqe); 11860 - if (!(++ecount % speq->entry_repost)) 11861 - lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); 11862 - speq->EQ_processed++; 11863 - } 11864 - 11865 - /* Track the max number of EQEs processed in 1 intr */ 11866 - if (ecount > speq->EQ_max_eqe) 11867 - speq->EQ_max_eqe = ecount; 11868 - 11869 - /* Always clear and re-arm the slow-path EQ */ 11870 - lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); 11871 - 11872 - /* Catch the no cq entry condition */ 11873 - if (unlikely(ecount == 0)) { 11874 - speq->EQ_no_entry++; 11875 - if (phba->intr_type == MSIX) 11876 - /* MSI-X treated interrupt served as no EQ share INT */ 11877 - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11878 - "0357 MSI-X interrupt with no EQE\n"); 11879 - else 11880 - /* Non MSI-X treated on interrupt as EQ share INT */ 11881 - return IRQ_NONE; 11882 - } 11883 - 11884 - return IRQ_HANDLED; 11885 - } /* lpfc_sli4_sp_intr_handler */ 11886 - 11887 - /** 11888 - * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device 11808 + * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 11889 11809 * @irq: Interrupt number. 11890 11810 * @dev_id: The device context pointer. 11891 11811 * ··· 11816 11908 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 11817 11909 * equal to that of FCP CQ index. 11818 11910 * 11911 + * The link attention and ELS ring attention events are handled 11912 + * by the worker thread. The interrupt handler signals the worker thread 11913 + * and returns for these events. This function is called without any lock 11914 + * held. It gets the hbalock to access and update SLI data structures. 11915 + * 11819 11916 * This function returns IRQ_HANDLED when interrupt is handled else it 11820 11917 * returns IRQ_NONE. 11821 11918 **/ 11822 11919 irqreturn_t 11823 - lpfc_sli4_fp_intr_handler(int irq, void *dev_id) 11920 + lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 11824 11921 { 11825 11922 struct lpfc_hba *phba; 11826 11923 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; ··· 11842 11929 11843 11930 if (unlikely(!phba)) 11844 11931 return IRQ_NONE; 11845 - if (unlikely(!phba->sli4_hba.fp_eq)) 11932 + if (unlikely(!phba->sli4_hba.hba_eq)) 11846 11933 return IRQ_NONE; 11847 11934 11848 11935 /* Get to the EQ struct associated with this vector */ 11849 - fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 11936 + fpeq = phba->sli4_hba.hba_eq[fcp_eqidx]; 11850 11937 if (unlikely(!fpeq)) 11851 11938 return IRQ_NONE; 11852 11939 ··· 11866 11953 * Process all the event on FCP fast-path EQ 11867 11954 */ 11868 11955 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 11869 - lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); 11956 + lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx); 11870 11957 if (!(++ecount % fpeq->entry_repost)) 11871 11958 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 11872 11959 fpeq->EQ_processed++; ··· 11914 12001 lpfc_sli4_intr_handler(int irq, void *dev_id) 11915 12002 { 11916 12003 struct lpfc_hba *phba; 11917 - irqreturn_t sp_irq_rc, fp_irq_rc; 11918 - bool fp_handled = false; 12004 + irqreturn_t hba_irq_rc; 12005 + bool hba_handled = false; 11919 12006 uint32_t fcp_eqidx; 11920 12007 11921 12008 /* Get the driver's phba structure from the dev_id */ ··· 11925 12012 return IRQ_NONE; 11926 12013 11927 12014 /* 11928 - * Invokes slow-path host attention interrupt handling as appropriate. 11929 - */ 11930 - sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id); 11931 - 11932 - /* 11933 12015 * Invoke fast-path host attention interrupt handling as appropriate. 11934 12016 */ 11935 - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 11936 - fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, 12017 + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { 12018 + hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 11937 12019 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 11938 - if (fp_irq_rc == IRQ_HANDLED) 11939 - fp_handled |= true; 12020 + if (hba_irq_rc == IRQ_HANDLED) 12021 + hba_handled |= true; 11940 12022 } 11941 12023 11942 - return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; 12024 + return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 11943 12025 } /* lpfc_sli4_intr_handler */ 11944 12026 11945 12027 /** ··· 12065 12157 union lpfc_sli4_cfg_shdr *shdr; 12066 12158 uint16_t dmult; 12067 12159 12068 - if (startq >= phba->cfg_fcp_eq_count) 12160 + if (startq >= phba->cfg_fcp_io_channel) 12069 12161 return 0; 12070 12162 12071 12163 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); ··· 12082 12174 dmult = LPFC_DMULT_CONST/phba->cfg_fcp_imax - 1; 12083 12175 12084 12176 cnt = 0; 12085 - for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_eq_count; 12177 + for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel; 12086 12178 fcp_eqidx++) { 12087 - eq = phba->sli4_hba.fp_eq[fcp_eqidx]; 12179 + eq = phba->sli4_hba.hba_eq[fcp_eqidx]; 12088 12180 if (!eq) 12089 12181 continue; 12090 12182 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
+11 -17
drivers/scsi/lpfc/lpfc_sli4.h
··· 34 34 /* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ 35 35 #define LPFC_NEMBED_MBOX_SGL_CNT 254 36 36 37 - /* Multi-queue arrangement for fast-path FCP work queues */ 38 - #define LPFC_FN_EQN_MAX 8 39 - #define LPFC_SP_EQN_DEF 1 40 - #define LPFC_FP_EQN_DEF 4 41 - #define LPFC_FP_EQN_MIN 1 42 - #define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) 43 - 44 - #define LPFC_FN_WQN_MAX 32 45 - #define LPFC_SP_WQN_DEF 1 46 - #define LPFC_FP_WQN_DEF 4 47 - #define LPFC_FP_WQN_MIN 1 48 - #define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF) 37 + /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ 38 + #define LPFC_FCP_IO_CHAN_DEF 4 39 + #define LPFC_FCP_IO_CHAN_MIN 1 40 + #define LPFC_FCP_IO_CHAN_MAX 8 49 41 50 42 /* 51 43 * Provide the default FCF Record attributes used by the driver ··· 489 497 uint32_t cfg_eqn; 490 498 uint32_t msix_vec_nr; 491 499 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ 500 + 492 501 /* Pointers to the constructed SLI4 queues */ 493 - struct lpfc_queue **fp_eq; /* Fast-path event queue */ 494 - struct lpfc_queue *sp_eq; /* Slow-path event queue */ 502 + struct lpfc_queue **hba_eq;/* Event queues for HBA */ 503 + struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ 495 504 struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */ 505 + uint16_t *fcp_cq_map; 506 + 507 + struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ 508 + struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ 496 509 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */ 497 510 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */ 498 511 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ 499 512 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ 500 - struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ 501 - struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ 502 - struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ 503 513 504 514 /* Setup information for various queue parameters */ 505 515 int eq_esize;