[SCSI] bfa: fix comments for c files

This patch addresses the comments from Randy Dunlap (Randy.Dunlap@oracle.com)
regarding comment blocks that begining with "/**". bfa driver comments
currently do not follow kernel-doc convention, we hence replace all
/** with /* and **/ with */.

Signed-off-by: Jing Huang <huangj@brocade.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>

authored by

Jing Huang and committed by
James Bottomley
5fbe25c7 acdc79a6

+943 -1003
+56 -56
drivers/scsi/bfa/bfa_core.c
··· 21 22 BFA_TRC_FILE(HAL, CORE); 23 24 - /** 25 * BFA IOC FC related definitions 26 */ 27 28 - /** 29 * IOC local definitions 30 */ 31 #define BFA_IOCFC_TOV 5000 /* msecs */ ··· 54 #define DEF_CFG_NUM_SBOOT_TGTS 16 55 #define DEF_CFG_NUM_SBOOT_LUNS 16 56 57 - /** 58 * forward declaration for IOC FC functions 59 */ 60 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); ··· 63 static void bfa_iocfc_reset_cbfn(void *bfa_arg); 64 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; 65 66 - /** 67 * BFA Interrupt handling functions 68 */ 69 static void ··· 86 87 waitq = bfa_reqq(bfa, qid); 88 list_for_each_safe(qe, qen, waitq) { 89 - /** 90 * Callback only as long as there is room in request queue 91 */ 92 if (bfa_reqq_full(bfa, qid)) ··· 104 bfa_intx(bfa); 105 } 106 107 - /** 108 * hal_intr_api 109 */ 110 bfa_boolean_t ··· 117 if (!intr) 118 return BFA_FALSE; 119 120 - /** 121 * RME completion queue interrupt 122 */ 123 qintr = intr & __HFN_INT_RME_MASK; ··· 131 if (!intr) 132 return BFA_TRUE; 133 134 - /** 135 * CPE completion queue interrupt 136 */ 137 qintr = intr & __HFN_INT_CPE_MASK; ··· 211 212 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); 213 214 - /** 215 * Resume any pending requests in the corresponding reqq. 216 */ 217 waitq = bfa_reqq(bfa, qid); ··· 259 } 260 } 261 262 - /** 263 * update CI 264 */ 265 bfa_rspq_ci(bfa, qid) = pi; 266 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]); 267 mmiowb(); 268 269 - /** 270 * Resume any pending requests in the corresponding reqq. 271 */ 272 waitq = bfa_reqq(bfa, qid); ··· 289 290 if (intr) { 291 if (intr & __HFN_INT_LL_HALT) { 292 - /** 293 * If LL_HALT bit is set then FW Init Halt LL Port 294 * Register needs to be cleared as well so Interrupt 295 * Status Register will be cleared. ··· 300 } 301 302 if (intr & __HFN_INT_ERR_PSS) { 303 - /** 304 * ERR_PSS bit needs to be cleared as well in case 305 * interrups are shared so driver's interrupt handler is 306 * still called eventhough it is already masked out. ··· 323 bfa_isrs[mc] = isr_func; 324 } 325 326 - /** 327 * BFA IOC FC related functions 328 */ 329 330 - /** 331 * hal_ioc_pvt BFA IOC private functions 332 */ 333 ··· 366 BFA_CACHELINE_SZ); 367 } 368 369 - /** 370 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ 371 */ 372 static void ··· 384 385 bfa_iocfc_reset_queues(bfa); 386 387 - /** 388 * initialize IOC configuration info 389 */ 390 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 391 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 392 393 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); 394 - /** 395 * dma map REQ and RSP circular queues and shadow pointers 396 */ 397 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { ··· 410 cpu_to_be16(cfg->drvcfg.num_rspq_elems); 411 } 412 413 - /** 414 * Enable interrupt coalescing if it is driver init path 415 * and not ioc disable/enable path. 416 */ ··· 419 420 iocfc->cfgdone = BFA_FALSE; 421 422 - /** 423 * dma map IOC configuration itself 424 */ 425 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, ··· 442 443 iocfc->cfg = *cfg; 444 445 - /** 446 * Initialize chip specific handlers. 447 */ 448 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) { ··· 559 } 560 } 561 562 - /** 563 * Start BFA submodules. 564 */ 565 static void ··· 573 hal_mods[i]->start(bfa); 574 } 575 576 - /** 577 * Disable BFA submodules. 578 */ 579 static void ··· 623 complete(&bfad->disable_comp); 624 } 625 626 - /** 627 * Update BFA configuration from firmware configuration. 628 */ 629 static void ··· 642 643 iocfc->cfgdone = BFA_TRUE; 644 645 - /** 646 * Configuration is complete - initialize/start submodules 647 */ 648 bfa_fcport_init(bfa); ··· 665 } 666 } 667 668 - /** 669 * IOC enable request is complete 670 */ 671 static void ··· 684 bfa_iocfc_send_cfg(bfa); 685 } 686 687 - /** 688 * IOC disable request is complete 689 */ 690 static void ··· 705 } 706 } 707 708 - /** 709 * Notify sub-modules of hardware failure. 710 */ 711 static void ··· 723 bfa); 724 } 725 726 - /** 727 * Actions on chip-reset completion. 728 */ 729 static void ··· 735 bfa_isr_enable(bfa); 736 } 737 738 - /** 739 * hal_ioc_public 740 */ 741 742 - /** 743 * Query IOC memory requirement information. 744 */ 745 void ··· 754 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover); 755 } 756 757 - /** 758 * Query IOC memory requirement information. 759 */ 760 void ··· 772 ioc->trcmod = bfa->trcmod; 773 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); 774 775 - /** 776 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC. 777 */ 778 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC) ··· 790 INIT_LIST_HEAD(&bfa->reqq_waitq[i]); 791 } 792 793 - /** 794 * Query IOC memory requirement information. 795 */ 796 void ··· 799 bfa_ioc_detach(&bfa->ioc); 800 } 801 802 - /** 803 * Query IOC memory requirement information. 804 */ 805 void ··· 809 bfa_ioc_enable(&bfa->ioc); 810 } 811 812 - /** 813 * IOC start called from bfa_start(). Called to start IOC operations 814 * at driver instantiation for this instance. 815 */ ··· 820 bfa_iocfc_start_submod(bfa); 821 } 822 823 - /** 824 * IOC stop called from bfa_stop(). Called only when driver is unloaded 825 * for this instance. 826 */ ··· 924 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); 925 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); 926 } 927 - /** 928 * Enable IOC after it is disabled. 929 */ 930 void ··· 953 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; 954 } 955 956 - /** 957 * Return boot target port wwns -- read from boot information in flash. 958 */ 959 void ··· 998 return cfgrsp->pbc_cfg.nvports; 999 } 1000 1001 - /** 1002 * hal_api 1003 */ 1004 1005 - /** 1006 * Use this function query the memory requirement of the BFA library. 1007 * This function needs to be called before bfa_attach() to get the 1008 * memory required of the BFA layer for a given driver configuration. ··· 1055 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; 1056 } 1057 1058 - /** 1059 * Use this function to do attach the driver instance with the BFA 1060 * library. This function will not trigger any HW initialization 1061 * process (which will be done in bfa_init() call) ··· 1092 1093 bfa_assert((cfg != NULL) && (meminfo != NULL)); 1094 1095 - /** 1096 * initialize all memory pointers for iterative allocation 1097 */ 1098 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { ··· 1109 bfa_com_port_attach(bfa, meminfo); 1110 } 1111 1112 - /** 1113 * Use this function to delete a BFA IOC. IOC should be stopped (by 1114 * calling bfa_stop()) before this function call. 1115 * ··· 1146 bfa->plog = plog; 1147 } 1148 1149 - /** 1150 * Initialize IOC. 1151 * 1152 * This function will return immediately, when the IOC initialization is ··· 1169 bfa_iocfc_init(bfa); 1170 } 1171 1172 - /** 1173 * Use this function initiate the IOC configuration setup. This function 1174 * will return immediately. 1175 * ··· 1183 bfa_iocfc_start(bfa); 1184 } 1185 1186 - /** 1187 * Use this function quiese the IOC. This function will return immediately, 1188 * when the IOC is actually stopped, the bfad->comp will be set. 1189 * ··· 1243 bfa->fcs = BFA_TRUE; 1244 } 1245 1246 - /** 1247 * Periodic timer heart beat from driver 1248 */ 1249 void ··· 1252 bfa_timer_beat(&bfa->timer_mod); 1253 } 1254 1255 - /** 1256 * Return the list of PCI vendor/device id lists supported by this 1257 * BFA instance. 1258 */ ··· 1270 *pciids = __pciids; 1271 } 1272 1273 - /** 1274 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled 1275 * into BFA layer). The OS driver can then turn back and overwrite entries that 1276 * have been configured by the user. ··· 1328 bfa_ioc_get_attr(&bfa->ioc, ioc_attr); 1329 } 1330 1331 - /** 1332 * Retrieve firmware trace information on IOC failure. 1333 */ 1334 bfa_status_t ··· 1337 return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen); 1338 } 1339 1340 - /** 1341 * Clear the saved firmware trace information of an IOC. 1342 */ 1343 void ··· 1346 bfa_ioc_debug_fwsave_clear(&bfa->ioc); 1347 } 1348 1349 - /** 1350 * Fetch firmware trace data. 1351 * 1352 * @param[in] bfa BFA instance ··· 1362 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen); 1363 } 1364 1365 - /** 1366 * Dump firmware memory. 1367 * 1368 * @param[in] bfa BFA instance ··· 1378 { 1379 return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen); 1380 } 1381 - /** 1382 * Reset hw semaphore & usage cnt regs and initialize. 1383 */ 1384 void ··· 1388 bfa_ioc_pll_init(&bfa->ioc); 1389 } 1390 1391 - /** 1392 * Fetch firmware statistics data. 1393 * 1394 * @param[in] bfa BFA instance
··· 21 22 BFA_TRC_FILE(HAL, CORE); 23 24 + /* 25 * BFA IOC FC related definitions 26 */ 27 28 + /* 29 * IOC local definitions 30 */ 31 #define BFA_IOCFC_TOV 5000 /* msecs */ ··· 54 #define DEF_CFG_NUM_SBOOT_TGTS 16 55 #define DEF_CFG_NUM_SBOOT_LUNS 16 56 57 + /* 58 * forward declaration for IOC FC functions 59 */ 60 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); ··· 63 static void bfa_iocfc_reset_cbfn(void *bfa_arg); 64 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; 65 66 + /* 67 * BFA Interrupt handling functions 68 */ 69 static void ··· 86 87 waitq = bfa_reqq(bfa, qid); 88 list_for_each_safe(qe, qen, waitq) { 89 + /* 90 * Callback only as long as there is room in request queue 91 */ 92 if (bfa_reqq_full(bfa, qid)) ··· 104 bfa_intx(bfa); 105 } 106 107 + /* 108 * hal_intr_api 109 */ 110 bfa_boolean_t ··· 117 if (!intr) 118 return BFA_FALSE; 119 120 + /* 121 * RME completion queue interrupt 122 */ 123 qintr = intr & __HFN_INT_RME_MASK; ··· 131 if (!intr) 132 return BFA_TRUE; 133 134 + /* 135 * CPE completion queue interrupt 136 */ 137 qintr = intr & __HFN_INT_CPE_MASK; ··· 211 212 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); 213 214 + /* 215 * Resume any pending requests in the corresponding reqq. 216 */ 217 waitq = bfa_reqq(bfa, qid); ··· 259 } 260 } 261 262 + /* 263 * update CI 264 */ 265 bfa_rspq_ci(bfa, qid) = pi; 266 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]); 267 mmiowb(); 268 269 + /* 270 * Resume any pending requests in the corresponding reqq. 271 */ 272 waitq = bfa_reqq(bfa, qid); ··· 289 290 if (intr) { 291 if (intr & __HFN_INT_LL_HALT) { 292 + /* 293 * If LL_HALT bit is set then FW Init Halt LL Port 294 * Register needs to be cleared as well so Interrupt 295 * Status Register will be cleared. ··· 300 } 301 302 if (intr & __HFN_INT_ERR_PSS) { 303 + /* 304 * ERR_PSS bit needs to be cleared as well in case 305 * interrups are shared so driver's interrupt handler is 306 * still called eventhough it is already masked out. ··· 323 bfa_isrs[mc] = isr_func; 324 } 325 326 + /* 327 * BFA IOC FC related functions 328 */ 329 330 + /* 331 * hal_ioc_pvt BFA IOC private functions 332 */ 333 ··· 366 BFA_CACHELINE_SZ); 367 } 368 369 + /* 370 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ 371 */ 372 static void ··· 384 385 bfa_iocfc_reset_queues(bfa); 386 387 + /* 388 * initialize IOC configuration info 389 */ 390 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 391 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 392 393 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); 394 + /* 395 * dma map REQ and RSP circular queues and shadow pointers 396 */ 397 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { ··· 410 cpu_to_be16(cfg->drvcfg.num_rspq_elems); 411 } 412 413 + /* 414 * Enable interrupt coalescing if it is driver init path 415 * and not ioc disable/enable path. 416 */ ··· 419 420 iocfc->cfgdone = BFA_FALSE; 421 422 + /* 423 * dma map IOC configuration itself 424 */ 425 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, ··· 442 443 iocfc->cfg = *cfg; 444 445 + /* 446 * Initialize chip specific handlers. 447 */ 448 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) { ··· 559 } 560 } 561 562 + /* 563 * Start BFA submodules. 564 */ 565 static void ··· 573 hal_mods[i]->start(bfa); 574 } 575 576 + /* 577 * Disable BFA submodules. 578 */ 579 static void ··· 623 complete(&bfad->disable_comp); 624 } 625 626 + /* 627 * Update BFA configuration from firmware configuration. 628 */ 629 static void ··· 642 643 iocfc->cfgdone = BFA_TRUE; 644 645 + /* 646 * Configuration is complete - initialize/start submodules 647 */ 648 bfa_fcport_init(bfa); ··· 665 } 666 } 667 668 + /* 669 * IOC enable request is complete 670 */ 671 static void ··· 684 bfa_iocfc_send_cfg(bfa); 685 } 686 687 + /* 688 * IOC disable request is complete 689 */ 690 static void ··· 705 } 706 } 707 708 + /* 709 * Notify sub-modules of hardware failure. 710 */ 711 static void ··· 723 bfa); 724 } 725 726 + /* 727 * Actions on chip-reset completion. 728 */ 729 static void ··· 735 bfa_isr_enable(bfa); 736 } 737 738 + /* 739 * hal_ioc_public 740 */ 741 742 + /* 743 * Query IOC memory requirement information. 744 */ 745 void ··· 754 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover); 755 } 756 757 + /* 758 * Query IOC memory requirement information. 759 */ 760 void ··· 772 ioc->trcmod = bfa->trcmod; 773 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); 774 775 + /* 776 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC. 777 */ 778 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC) ··· 790 INIT_LIST_HEAD(&bfa->reqq_waitq[i]); 791 } 792 793 + /* 794 * Query IOC memory requirement information. 795 */ 796 void ··· 799 bfa_ioc_detach(&bfa->ioc); 800 } 801 802 + /* 803 * Query IOC memory requirement information. 804 */ 805 void ··· 809 bfa_ioc_enable(&bfa->ioc); 810 } 811 812 + /* 813 * IOC start called from bfa_start(). Called to start IOC operations 814 * at driver instantiation for this instance. 815 */ ··· 820 bfa_iocfc_start_submod(bfa); 821 } 822 823 + /* 824 * IOC stop called from bfa_stop(). Called only when driver is unloaded 825 * for this instance. 826 */ ··· 924 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); 925 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); 926 } 927 + /* 928 * Enable IOC after it is disabled. 929 */ 930 void ··· 953 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; 954 } 955 956 + /* 957 * Return boot target port wwns -- read from boot information in flash. 958 */ 959 void ··· 998 return cfgrsp->pbc_cfg.nvports; 999 } 1000 1001 + /* 1002 * hal_api 1003 */ 1004 1005 + /* 1006 * Use this function query the memory requirement of the BFA library. 1007 * This function needs to be called before bfa_attach() to get the 1008 * memory required of the BFA layer for a given driver configuration. ··· 1055 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; 1056 } 1057 1058 + /* 1059 * Use this function to do attach the driver instance with the BFA 1060 * library. This function will not trigger any HW initialization 1061 * process (which will be done in bfa_init() call) ··· 1092 1093 bfa_assert((cfg != NULL) && (meminfo != NULL)); 1094 1095 + /* 1096 * initialize all memory pointers for iterative allocation 1097 */ 1098 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { ··· 1109 bfa_com_port_attach(bfa, meminfo); 1110 } 1111 1112 + /* 1113 * Use this function to delete a BFA IOC. IOC should be stopped (by 1114 * calling bfa_stop()) before this function call. 1115 * ··· 1146 bfa->plog = plog; 1147 } 1148 1149 + /* 1150 * Initialize IOC. 1151 * 1152 * This function will return immediately, when the IOC initialization is ··· 1169 bfa_iocfc_init(bfa); 1170 } 1171 1172 + /* 1173 * Use this function initiate the IOC configuration setup. This function 1174 * will return immediately. 1175 * ··· 1183 bfa_iocfc_start(bfa); 1184 } 1185 1186 + /* 1187 * Use this function quiese the IOC. This function will return immediately, 1188 * when the IOC is actually stopped, the bfad->comp will be set. 1189 * ··· 1243 bfa->fcs = BFA_TRUE; 1244 } 1245 1246 + /* 1247 * Periodic timer heart beat from driver 1248 */ 1249 void ··· 1252 bfa_timer_beat(&bfa->timer_mod); 1253 } 1254 1255 + /* 1256 * Return the list of PCI vendor/device id lists supported by this 1257 * BFA instance. 1258 */ ··· 1270 *pciids = __pciids; 1271 } 1272 1273 + /* 1274 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled 1275 * into BFA layer). The OS driver can then turn back and overwrite entries that 1276 * have been configured by the user. ··· 1328 bfa_ioc_get_attr(&bfa->ioc, ioc_attr); 1329 } 1330 1331 + /* 1332 * Retrieve firmware trace information on IOC failure. 1333 */ 1334 bfa_status_t ··· 1337 return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen); 1338 } 1339 1340 + /* 1341 * Clear the saved firmware trace information of an IOC. 1342 */ 1343 void ··· 1346 bfa_ioc_debug_fwsave_clear(&bfa->ioc); 1347 } 1348 1349 + /* 1350 * Fetch firmware trace data. 1351 * 1352 * @param[in] bfa BFA instance ··· 1362 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen); 1363 } 1364 1365 + /* 1366 * Dump firmware memory. 1367 * 1368 * @param[in] bfa BFA instance ··· 1378 { 1379 return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen); 1380 } 1381 + /* 1382 * Reset hw semaphore & usage cnt regs and initialize. 1383 */ 1384 void ··· 1388 bfa_ioc_pll_init(&bfa->ioc); 1389 } 1390 1391 + /* 1392 * Fetch firmware statistics data. 1393 * 1394 * @param[in] bfa BFA instance
+3 -3
drivers/scsi/bfa/bfa_drv.c
··· 17 18 #include "bfa_modules.h" 19 20 - /** 21 * BFA module list terminated by NULL 22 */ 23 struct bfa_module_s *hal_mods[] = { ··· 31 NULL 32 }; 33 34 - /** 35 * Message handlers for various modules. 36 */ 37 bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { ··· 70 }; 71 72 73 - /** 74 * Message handlers for mailbox command classes 75 */ 76 bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
··· 17 18 #include "bfa_modules.h" 19 20 + /* 21 * BFA module list terminated by NULL 22 */ 23 struct bfa_module_s *hal_mods[] = { ··· 31 NULL 32 }; 33 34 + /* 35 * Message handlers for various modules. 36 */ 37 bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { ··· 70 }; 71 72 73 + /* 74 * Message handlers for mailbox command classes 75 */ 76 bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
+1 -1
drivers/scsi/bfa/bfa_fcbuild.c
··· 150 fchs->s_id = (s_id); 151 fchs->ox_id = cpu_to_be16(ox_id); 152 153 - /** 154 * @todo no need to set ox_id for request 155 * no need to set rx_id for response 156 */
··· 150 fchs->s_id = (s_id); 151 fchs->ox_id = cpu_to_be16(ox_id); 152 153 + /* 154 * @todo no need to set ox_id for request 155 * no need to set rx_id for response 156 */
+150 -150
drivers/scsi/bfa/bfa_fcpim.c
··· 26 (__l->__stats += __r->__stats) 27 28 29 - /** 30 * BFA ITNIM Related definitions 31 */ 32 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); ··· 72 } \ 73 } while (0) 74 75 - /** 76 * bfa_itnim_sm BFA itnim state machine 77 */ 78 ··· 89 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */ 90 }; 91 92 - /** 93 * BFA IOIM related definitions 94 */ 95 #define bfa_ioim_move_to_comp_q(__ioim) do { \ ··· 107 if ((__fcpim)->profile_start) \ 108 (__fcpim)->profile_start(__ioim); \ 109 } while (0) 110 - /** 111 * hal_ioim_sm 112 */ 113 114 - /** 115 * IO state machine events 116 */ 117 enum bfa_ioim_event { ··· 136 }; 137 138 139 - /** 140 * BFA TSKIM related definitions 141 */ 142 143 - /** 144 * task management completion handling 145 */ 146 #define bfa_tskim_qcomp(__tskim, __cbfn) do { \ ··· 165 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ 166 }; 167 168 - /** 169 * forward declaration for BFA ITNIM functions 170 */ 171 static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim); ··· 183 static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim); 184 static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim); 185 186 - /** 187 * forward declaration of ITNIM state machine 188 */ 189 static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, ··· 217 static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, 218 enum bfa_itnim_event event); 219 220 - /** 221 * forward declaration for BFA IOIM functions 222 */ 223 static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); ··· 233 static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); 234 235 236 - /** 237 * forward declaration of BFA IO state machine 238 */ 239 static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, ··· 261 static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, 262 enum bfa_ioim_event event); 263 264 - /** 265 * forward declaration for BFA TSKIM functions 266 */ 267 static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); ··· 276 static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); 277 278 279 - /** 280 * forward declaration of BFA TSKIM state machine 281 */ 282 static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, ··· 294 static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, 295 enum bfa_tskim_event event); 296 297 - /** 298 * hal_fcpim_mod BFA FCP Initiator Mode module 299 */ 300 301 - /** 302 * Compute and return memory needed by FCP(im) module. 303 */ 304 static void ··· 307 { 308 bfa_itnim_meminfo(cfg, km_len, dm_len); 309 310 - /** 311 * IO memory 312 */ 313 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN) ··· 320 321 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN; 322 323 - /** 324 * task management command memory 325 */ 326 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN) ··· 604 605 606 607 - /** 608 * BFA ITNIM module state machine functions 609 */ 610 611 - /** 612 * Beginning/unallocated state - no events expected. 613 */ 614 static void ··· 629 } 630 } 631 632 - /** 633 * Beginning state, only online event expected. 634 */ 635 static void ··· 660 } 661 } 662 663 - /** 664 * Waiting for itnim create response from firmware. 665 */ 666 static void ··· 732 } 733 } 734 735 - /** 736 * Waiting for itnim create response from firmware, a delete is pending. 737 */ 738 static void ··· 760 } 761 } 762 763 - /** 764 * Online state - normal parking state. 765 */ 766 static void ··· 802 } 803 } 804 805 - /** 806 * Second level error recovery need. 807 */ 808 static void ··· 833 } 834 } 835 836 - /** 837 * Going offline. Waiting for active IO cleanup. 838 */ 839 static void ··· 870 } 871 } 872 873 - /** 874 * Deleting itnim. Waiting for active IO cleanup. 875 */ 876 static void ··· 898 } 899 } 900 901 - /** 902 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response. 903 */ 904 static void ··· 955 } 956 } 957 958 - /** 959 * Offline state. 960 */ 961 static void ··· 987 } 988 } 989 990 - /** 991 * IOC h/w failed state. 992 */ 993 static void ··· 1023 } 1024 } 1025 1026 - /** 1027 * Itnim is deleted, waiting for firmware response to delete. 1028 */ 1029 static void ··· 1068 } 1069 } 1070 1071 - /** 1072 * Initiate cleanup of all IOs on an IOC failure. 1073 */ 1074 static void ··· 1088 bfa_ioim_iocdisable(ioim); 1089 } 1090 1091 - /** 1092 * For IO request in pending queue, we pretend an early timeout. 1093 */ 1094 list_for_each_safe(qe, qen, &itnim->pending_q) { ··· 1102 } 1103 } 1104 1105 - /** 1106 * IO cleanup completion 1107 */ 1108 static void ··· 1114 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP); 1115 } 1116 1117 - /** 1118 * Initiate cleanup of all IOs. 1119 */ 1120 static void ··· 1129 list_for_each_safe(qe, qen, &itnim->io_q) { 1130 ioim = (struct bfa_ioim_s *) qe; 1131 1132 - /** 1133 * Move IO to a cleanup queue from active queue so that a later 1134 * TM will not pickup this IO. 1135 */ ··· 1176 bfa_cb_itnim_sler(itnim->ditn); 1177 } 1178 1179 - /** 1180 * Call to resume any I/O requests waiting for room in request queue. 1181 */ 1182 static void ··· 1190 1191 1192 1193 - /** 1194 * bfa_itnim_public 1195 */ 1196 ··· 1210 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 1211 u32 *dm_len) 1212 { 1213 - /** 1214 * ITN memory 1215 */ 1216 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s); ··· 1264 1265 itnim->msg_no++; 1266 1267 - /** 1268 * check for room in queue to send request now 1269 */ 1270 m = bfa_reqq_next(itnim->bfa, itnim->reqq); ··· 1281 m->msg_no = itnim->msg_no; 1282 bfa_stats(itnim, fw_create); 1283 1284 - /** 1285 * queue I/O message to firmware 1286 */ 1287 bfa_reqq_produce(itnim->bfa, itnim->reqq); ··· 1293 { 1294 struct bfi_itnim_delete_req_s *m; 1295 1296 - /** 1297 * check for room in queue to send request now 1298 */ 1299 m = bfa_reqq_next(itnim->bfa, itnim->reqq); ··· 1307 m->fw_handle = itnim->rport->fw_handle; 1308 bfa_stats(itnim, fw_delete); 1309 1310 - /** 1311 * queue I/O message to firmware 1312 */ 1313 bfa_reqq_produce(itnim->bfa, itnim->reqq); 1314 return BFA_TRUE; 1315 } 1316 1317 - /** 1318 * Cleanup all pending failed inflight requests. 1319 */ 1320 static void ··· 1329 } 1330 } 1331 1332 - /** 1333 * Start all pending IO requests. 1334 */ 1335 static void ··· 1339 1340 bfa_itnim_iotov_stop(itnim); 1341 1342 - /** 1343 * Abort all inflight IO requests in the queue 1344 */ 1345 bfa_itnim_delayed_comp(itnim, BFA_FALSE); 1346 1347 - /** 1348 * Start all pending IO requests. 1349 */ 1350 while (!list_empty(&itnim->pending_q)) { ··· 1354 } 1355 } 1356 1357 - /** 1358 * Fail all pending IO requests 1359 */ 1360 static void ··· 1362 { 1363 struct bfa_ioim_s *ioim; 1364 1365 - /** 1366 * Fail all inflight IO requests in the queue 1367 */ 1368 bfa_itnim_delayed_comp(itnim, BFA_TRUE); 1369 1370 - /** 1371 * Fail any pending IO requests. 1372 */ 1373 while (!list_empty(&itnim->pending_q)) { ··· 1377 } 1378 } 1379 1380 - /** 1381 * IO TOV timer callback. Fail any pending IO requests. 1382 */ 1383 static void ··· 1392 bfa_cb_itnim_tov(itnim->ditn); 1393 } 1394 1395 - /** 1396 * Start IO TOV timer for failing back pending IO requests in offline state. 1397 */ 1398 static void ··· 1407 } 1408 } 1409 1410 - /** 1411 * Stop IO TOV timer. 1412 */ 1413 static void ··· 1419 } 1420 } 1421 1422 - /** 1423 * Stop IO TOV timer. 1424 */ 1425 static void ··· 1459 1460 1461 1462 - /** 1463 * bfa_itnim_public 1464 */ 1465 1466 - /** 1467 * Itnim interrupt processing. 1468 */ 1469 void ··· 1509 1510 1511 1512 - /** 1513 * bfa_itnim_api 1514 */ 1515 ··· 1552 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE); 1553 } 1554 1555 - /** 1556 * Return true if itnim is considered offline for holding off IO request. 1557 * IO is not held if itnim is being deleted. 1558 */ ··· 1603 itnim->ioprofile.io_latency.min[j] = ~0; 1604 } 1605 1606 - /** 1607 * BFA IO module state machine functions 1608 */ 1609 1610 - /** 1611 * IO is not started (unallocated). 1612 */ 1613 static void ··· 1657 break; 1658 1659 case BFA_IOIM_SM_ABORT: 1660 - /** 1661 * IO in pending queue can get abort requests. Complete abort 1662 * requests immediately. 1663 */ ··· 1672 } 1673 } 1674 1675 - /** 1676 * IO is waiting for SG pages. 1677 */ 1678 static void ··· 1719 } 1720 } 1721 1722 - /** 1723 * IO is active. 1724 */ 1725 static void ··· 1803 } 1804 } 1805 1806 - /** 1807 * IO is retried with new tag. 1808 */ 1809 static void ··· 1844 break; 1845 1846 case BFA_IOIM_SM_ABORT: 1847 - /** in this state IO abort is done. 1848 * Waiting for IO tag resource free. 1849 */ 1850 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); ··· 1857 } 1858 } 1859 1860 - /** 1861 * IO is being aborted, waiting for completion from firmware. 1862 */ 1863 static void ··· 1919 } 1920 } 1921 1922 - /** 1923 * IO is being cleaned up (implicit abort), waiting for completion from 1924 * firmware. 1925 */ ··· 1937 break; 1938 1939 case BFA_IOIM_SM_ABORT: 1940 - /** 1941 * IO is already being aborted implicitly 1942 */ 1943 ioim->io_cbfn = __bfa_cb_ioim_abort; ··· 1969 break; 1970 1971 case BFA_IOIM_SM_CLEANUP: 1972 - /** 1973 * IO can be in cleanup state already due to TM command. 1974 * 2nd cleanup request comes from ITN offline event. 1975 */ ··· 1980 } 1981 } 1982 1983 - /** 1984 * IO is waiting for room in request CQ 1985 */ 1986 static void ··· 2024 } 2025 } 2026 2027 - /** 2028 * Active IO is being aborted, waiting for room in request CQ. 2029 */ 2030 static void ··· 2075 } 2076 } 2077 2078 - /** 2079 * Active IO is being cleaned up, waiting for room in request CQ. 2080 */ 2081 static void ··· 2091 break; 2092 2093 case BFA_IOIM_SM_ABORT: 2094 - /** 2095 * IO is alraedy being cleaned up implicitly 2096 */ 2097 ioim->io_cbfn = __bfa_cb_ioim_abort; ··· 2125 } 2126 } 2127 2128 - /** 2129 * IO bfa callback is pending. 2130 */ 2131 static void ··· 2152 } 2153 } 2154 2155 - /** 2156 * IO bfa callback is pending. IO resource cannot be freed. 2157 */ 2158 static void ··· 2185 } 2186 } 2187 2188 - /** 2189 * IO is completed, waiting resource free from firmware. 2190 */ 2191 static void ··· 2214 2215 2216 2217 - /** 2218 * hal_ioim_private 2219 */ 2220 ··· 2247 2248 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg; 2249 if (m->io_status == BFI_IOIM_STS_OK) { 2250 - /** 2251 * setup sense information, if present 2252 */ 2253 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) && ··· 2256 snsinfo = ioim->iosp->snsinfo; 2257 } 2258 2259 - /** 2260 * setup residue value correctly for normal completions 2261 */ 2262 if (m->resid_flags == FCP_RESID_UNDER) { ··· 2327 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); 2328 } 2329 2330 - /** 2331 * Send I/O request to firmware. 2332 */ 2333 static bfa_boolean_t ··· 2343 struct scatterlist *sg; 2344 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; 2345 2346 - /** 2347 * check for room in queue to send request now 2348 */ 2349 m = bfa_reqq_next(ioim->bfa, ioim->reqq); ··· 2354 return BFA_FALSE; 2355 } 2356 2357 - /** 2358 * build i/o request message next 2359 */ 2360 m->io_tag = cpu_to_be16(ioim->iotag); 2361 m->rport_hdl = ioim->itnim->rport->fw_handle; 2362 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio); 2363 2364 - /** 2365 * build inline IO SG element here 2366 */ 2367 sge = &m->sges[0]; ··· 2387 sge->flags = BFI_SGE_PGDLEN; 2388 bfa_sge_to_be(sge); 2389 2390 - /** 2391 * set up I/O command parameters 2392 */ 2393 m->cmnd = cmnd_z0; ··· 2397 fcp_dl = bfa_cb_ioim_get_size(ioim->dio); 2398 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl); 2399 2400 - /** 2401 * set up I/O message header 2402 */ 2403 switch (m->cmnd.iodir) { ··· 2426 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio); 2427 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio); 2428 2429 - /** 2430 * Handle large CDB (>16 bytes). 2431 */ 2432 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) - ··· 2440 } 2441 #endif 2442 2443 - /** 2444 * queue I/O message to firmware 2445 */ 2446 bfa_reqq_produce(ioim->bfa, ioim->reqq); 2447 return BFA_TRUE; 2448 } 2449 2450 - /** 2451 * Setup any additional SG pages needed.Inline SG element is setup 2452 * at queuing time. 2453 */ ··· 2458 2459 bfa_assert(ioim->nsges > BFI_SGE_INLINE); 2460 2461 - /** 2462 * allocate SG pages needed 2463 */ 2464 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); ··· 2507 sge->sg_len = sg_dma_len(sg); 2508 pgcumsz += sge->sg_len; 2509 2510 - /** 2511 * set flags 2512 */ 2513 if (i < (nsges - 1)) ··· 2522 2523 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); 2524 2525 - /** 2526 * set the link element of each page 2527 */ 2528 if (sgeid == ioim->nsges) { ··· 2539 } while (sgeid < ioim->nsges); 2540 } 2541 2542 - /** 2543 * Send I/O abort request to firmware. 2544 */ 2545 static bfa_boolean_t ··· 2548 struct bfi_ioim_abort_req_s *m; 2549 enum bfi_ioim_h2i msgop; 2550 2551 - /** 2552 * check for room in queue to send request now 2553 */ 2554 m = bfa_reqq_next(ioim->bfa, ioim->reqq); 2555 if (!m) 2556 return BFA_FALSE; 2557 2558 - /** 2559 * build i/o request message next 2560 */ 2561 if (ioim->iosp->abort_explicit) ··· 2567 m->io_tag = cpu_to_be16(ioim->iotag); 2568 m->abort_tag = ++ioim->abort_tag; 2569 2570 - /** 2571 * queue I/O message to firmware 2572 */ 2573 bfa_reqq_produce(ioim->bfa, ioim->reqq); 2574 return BFA_TRUE; 2575 } 2576 2577 - /** 2578 * Call to resume any I/O requests waiting for room in request queue. 2579 */ 2580 static void ··· 2590 static void 2591 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim) 2592 { 2593 - /** 2594 * Move IO from itnim queue to fcpim global queue since itnim will be 2595 * freed. 2596 */ ··· 2623 return BFA_TRUE; 2624 } 2625 2626 - /** 2627 * or after the link comes back. 2628 */ 2629 void 2630 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) 2631 { 2632 - /** 2633 * If path tov timer expired, failback with PATHTOV status - these 2634 * IO requests are not normally retried by IO stack. 2635 * ··· 2644 } 2645 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); 2646 2647 - /** 2648 * Move IO to fcpim global queue since itnim will be 2649 * freed. 2650 */ ··· 2654 2655 2656 2657 - /** 2658 * hal_ioim_friend 2659 */ 2660 2661 - /** 2662 * Memory allocation and initialization. 2663 */ 2664 void ··· 2670 u8 *snsinfo; 2671 u32 snsbufsz; 2672 2673 - /** 2674 * claim memory first 2675 */ 2676 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo); ··· 2681 fcpim->ioim_sp_arr = iosp; 2682 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs); 2683 2684 - /** 2685 * Claim DMA memory for per IO sense data. 2686 */ 2687 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN; ··· 2693 snsinfo = fcpim->snsbase.kva; 2694 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa); 2695 2696 - /** 2697 * Initialize ioim free queues 2698 */ 2699 INIT_LIST_HEAD(&fcpim->ioim_free_q); ··· 2722 } 2723 } 2724 2725 - /** 2726 * Driver detach time call. 2727 */ 2728 void ··· 2858 io_lat->max[index] : val; 2859 io_lat->avg[index] += val; 2860 } 2861 - /** 2862 * Called by itnim to clean up IO while going offline. 2863 */ 2864 void ··· 2881 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); 2882 } 2883 2884 - /** 2885 * IOC failure handling. 2886 */ 2887 void ··· 2892 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL); 2893 } 2894 2895 - /** 2896 * IO offline TOV popped. Fail the pending IO. 2897 */ 2898 void ··· 2904 2905 2906 2907 - /** 2908 * hal_ioim_api 2909 */ 2910 2911 - /** 2912 * Allocate IOIM resource for initiator mode I/O request. 2913 */ 2914 struct bfa_ioim_s * ··· 2918 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 2919 struct bfa_ioim_s *ioim; 2920 2921 - /** 2922 * alocate IOIM resource 2923 */ 2924 bfa_q_deq(&fcpim->ioim_free_q, &ioim); ··· 2969 2970 bfa_ioim_cb_profile_start(ioim->fcpim, ioim); 2971 2972 - /** 2973 * Obtain the queue over which this request has to be issued 2974 */ 2975 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? ··· 2979 bfa_sm_send_event(ioim, BFA_IOIM_SM_START); 2980 } 2981 2982 - /** 2983 * Driver I/O abort request. 2984 */ 2985 bfa_status_t ··· 2998 } 2999 3000 3001 - /** 3002 * BFA TSKIM state machine functions 3003 */ 3004 3005 - /** 3006 * Task management command beginning state. 3007 */ 3008 static void ··· 3015 bfa_sm_set_state(tskim, bfa_tskim_sm_active); 3016 bfa_tskim_gather_ios(tskim); 3017 3018 - /** 3019 * If device is offline, do not send TM on wire. Just cleanup 3020 * any pending IO requests and complete TM request. 3021 */ ··· 3039 } 3040 } 3041 3042 - /** 3043 * brief 3044 * TM command is active, awaiting completion from firmware to 3045 * cleanup IO requests in TM scope. ··· 3076 } 3077 } 3078 3079 - /** 3080 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup 3081 * completion event from firmware. 3082 */ ··· 3087 3088 switch (event) { 3089 case BFA_TSKIM_SM_DONE: 3090 - /** 3091 * Ignore and wait for ABORT completion from firmware. 3092 */ 3093 break; ··· 3120 break; 3121 3122 case BFA_TSKIM_SM_CLEANUP: 3123 - /** 3124 * Ignore, TM command completed on wire. 3125 * Notify TM conmpletion on IO cleanup completion. 3126 */ ··· 3137 } 3138 } 3139 3140 - /** 3141 * Task management command is waiting for room in request CQ 3142 */ 3143 static void ··· 3152 break; 3153 3154 case BFA_TSKIM_SM_CLEANUP: 3155 - /** 3156 * No need to send TM on wire since ITN is offline. 3157 */ 3158 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); ··· 3172 } 3173 } 3174 3175 - /** 3176 * Task management command is active, awaiting for room in request CQ 3177 * to send clean up request. 3178 */ ··· 3185 switch (event) { 3186 case BFA_TSKIM_SM_DONE: 3187 bfa_reqq_wcancel(&tskim->reqq_wait); 3188 - /** 3189 * 3190 * Fall through !!! 3191 */ ··· 3207 } 3208 } 3209 3210 - /** 3211 * BFA callback is pending 3212 */ 3213 static void ··· 3235 3236 3237 3238 - /** 3239 * hal_tskim_private 3240 */ 3241 ··· 3288 return BFA_FALSE; 3289 } 3290 3291 - /** 3292 * Gather affected IO requests and task management commands. 3293 */ 3294 static void ··· 3300 3301 INIT_LIST_HEAD(&tskim->io_q); 3302 3303 - /** 3304 * Gather any active IO requests first. 3305 */ 3306 list_for_each_safe(qe, qen, &itnim->io_q) { ··· 3312 } 3313 } 3314 3315 - /** 3316 * Failback any pending IO requests immediately. 3317 */ 3318 list_for_each_safe(qe, qen, &itnim->pending_q) { ··· 3326 } 3327 } 3328 3329 - /** 3330 * IO cleanup completion 3331 */ 3332 static void ··· 3338 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE); 3339 } 3340 3341 - /** 3342 * Gather affected IO requests and task management commands. 3343 */ 3344 static void ··· 3358 bfa_wc_wait(&tskim->wc); 3359 } 3360 3361 - /** 3362 * Send task management request to firmware. 3363 */ 3364 static bfa_boolean_t ··· 3367 struct bfa_itnim_s *itnim = tskim->itnim; 3368 struct bfi_tskim_req_s *m; 3369 3370 - /** 3371 * check for room in queue to send request now 3372 */ 3373 m = bfa_reqq_next(tskim->bfa, itnim->reqq); 3374 if (!m) 3375 return BFA_FALSE; 3376 3377 - /** 3378 * build i/o request message next 3379 */ 3380 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ, ··· 3386 m->lun = tskim->lun; 3387 m->tm_flags = tskim->tm_cmnd; 3388 3389 - /** 3390 * queue I/O message to firmware 3391 */ 3392 bfa_reqq_produce(tskim->bfa, itnim->reqq); 3393 return BFA_TRUE; 3394 } 3395 3396 - /** 3397 * Send abort request to cleanup an active TM to firmware. 3398 */ 3399 static bfa_boolean_t ··· 3402 struct bfa_itnim_s *itnim = tskim->itnim; 3403 struct bfi_tskim_abortreq_s *m; 3404 3405 - /** 3406 * check for room in queue to send request now 3407 */ 3408 m = bfa_reqq_next(tskim->bfa, itnim->reqq); 3409 if (!m) 3410 return BFA_FALSE; 3411 3412 - /** 3413 * build i/o request message next 3414 */ 3415 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ, ··· 3417 3418 m->tsk_tag = cpu_to_be16(tskim->tsk_tag); 3419 3420 - /** 3421 * queue I/O message to firmware 3422 */ 3423 bfa_reqq_produce(tskim->bfa, itnim->reqq); 3424 return BFA_TRUE; 3425 } 3426 3427 - /** 3428 * Call to resume task management cmnd waiting for room in request queue. 3429 */ 3430 static void ··· 3436 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME); 3437 } 3438 3439 - /** 3440 * Cleanup IOs associated with a task mangement command on IOC failures. 3441 */ 3442 static void ··· 3453 3454 3455 3456 - /** 3457 * hal_tskim_friend 3458 */ 3459 3460 - /** 3461 * Notification on completions from related ioim. 3462 */ 3463 void ··· 3466 bfa_wc_down(&tskim->wc); 3467 } 3468 3469 - /** 3470 * Handle IOC h/w failure notification from itnim. 3471 */ 3472 void ··· 3477 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL); 3478 } 3479 3480 - /** 3481 * Cleanup TM command and associated IOs as part of ITNIM offline. 3482 */ 3483 void ··· 3488 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP); 3489 } 3490 3491 - /** 3492 * Memory allocation and initialization. 3493 */ 3494 void ··· 3524 void 3525 bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim) 3526 { 3527 - /** 3528 * @todo 3529 */ 3530 } ··· 3542 3543 tskim->tsk_status = rsp->tsk_status; 3544 3545 - /** 3546 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort 3547 * requests. All other statuses are for normal completions. 3548 */ ··· 3557 3558 3559 3560 - /** 3561 * hal_tskim_api 3562 */ 3563 ··· 3584 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q); 3585 } 3586 3587 - /** 3588 * Start a task management command. 3589 * 3590 * @param[in] tskim BFA task management command instance
··· 26 (__l->__stats += __r->__stats) 27 28 29 + /* 30 * BFA ITNIM Related definitions 31 */ 32 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); ··· 72 } \ 73 } while (0) 74 75 + /* 76 * bfa_itnim_sm BFA itnim state machine 77 */ 78 ··· 89 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */ 90 }; 91 92 + /* 93 * BFA IOIM related definitions 94 */ 95 #define bfa_ioim_move_to_comp_q(__ioim) do { \ ··· 107 if ((__fcpim)->profile_start) \ 108 (__fcpim)->profile_start(__ioim); \ 109 } while (0) 110 + /* 111 * hal_ioim_sm 112 */ 113 114 + /* 115 * IO state machine events 116 */ 117 enum bfa_ioim_event { ··· 136 }; 137 138 139 + /* 140 * BFA TSKIM related definitions 141 */ 142 143 + /* 144 * task management completion handling 145 */ 146 #define bfa_tskim_qcomp(__tskim, __cbfn) do { \ ··· 165 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ 166 }; 167 168 + /* 169 * forward declaration for BFA ITNIM functions 170 */ 171 static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim); ··· 183 static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim); 184 static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim); 185 186 + /* 187 * forward declaration of ITNIM state machine 188 */ 189 static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, ··· 217 static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, 218 enum bfa_itnim_event event); 219 220 + /* 221 * forward declaration for BFA IOIM functions 222 */ 223 static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); ··· 233 static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); 234 235 236 + /* 237 * forward declaration of BFA IO state machine 238 */ 239 static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, ··· 261 static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, 262 enum bfa_ioim_event event); 263 264 + /* 265 * forward declaration for BFA TSKIM functions 266 */ 267 static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); ··· 276 static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); 277 278 279 + /* 280 * forward declaration of BFA TSKIM state machine 281 */ 282 static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, ··· 294 static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, 295 enum bfa_tskim_event event); 296 297 + /* 298 * hal_fcpim_mod BFA FCP Initiator Mode module 299 */ 300 301 + /* 302 * Compute and return memory needed by FCP(im) module. 303 */ 304 static void ··· 307 { 308 bfa_itnim_meminfo(cfg, km_len, dm_len); 309 310 + /* 311 * IO memory 312 */ 313 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN) ··· 320 321 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN; 322 323 + /* 324 * task management command memory 325 */ 326 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN) ··· 604 605 606 607 + /* 608 * BFA ITNIM module state machine functions 609 */ 610 611 + /* 612 * Beginning/unallocated state - no events expected. 613 */ 614 static void ··· 629 } 630 } 631 632 + /* 633 * Beginning state, only online event expected. 634 */ 635 static void ··· 660 } 661 } 662 663 + /* 664 * Waiting for itnim create response from firmware. 665 */ 666 static void ··· 732 } 733 } 734 735 + /* 736 * Waiting for itnim create response from firmware, a delete is pending. 737 */ 738 static void ··· 760 } 761 } 762 763 + /* 764 * Online state - normal parking state. 765 */ 766 static void ··· 802 } 803 } 804 805 + /* 806 * Second level error recovery need. 807 */ 808 static void ··· 833 } 834 } 835 836 + /* 837 * Going offline. Waiting for active IO cleanup. 838 */ 839 static void ··· 870 } 871 } 872 873 + /* 874 * Deleting itnim. Waiting for active IO cleanup. 875 */ 876 static void ··· 898 } 899 } 900 901 + /* 902 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response. 903 */ 904 static void ··· 955 } 956 } 957 958 + /* 959 * Offline state. 960 */ 961 static void ··· 987 } 988 } 989 990 + /* 991 * IOC h/w failed state. 992 */ 993 static void ··· 1023 } 1024 } 1025 1026 + /* 1027 * Itnim is deleted, waiting for firmware response to delete. 1028 */ 1029 static void ··· 1068 } 1069 } 1070 1071 + /* 1072 * Initiate cleanup of all IOs on an IOC failure. 1073 */ 1074 static void ··· 1088 bfa_ioim_iocdisable(ioim); 1089 } 1090 1091 + /* 1092 * For IO request in pending queue, we pretend an early timeout. 1093 */ 1094 list_for_each_safe(qe, qen, &itnim->pending_q) { ··· 1102 } 1103 } 1104 1105 + /* 1106 * IO cleanup completion 1107 */ 1108 static void ··· 1114 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP); 1115 } 1116 1117 + /* 1118 * Initiate cleanup of all IOs. 1119 */ 1120 static void ··· 1129 list_for_each_safe(qe, qen, &itnim->io_q) { 1130 ioim = (struct bfa_ioim_s *) qe; 1131 1132 + /* 1133 * Move IO to a cleanup queue from active queue so that a later 1134 * TM will not pickup this IO. 1135 */ ··· 1176 bfa_cb_itnim_sler(itnim->ditn); 1177 } 1178 1179 + /* 1180 * Call to resume any I/O requests waiting for room in request queue. 1181 */ 1182 static void ··· 1190 1191 1192 1193 + /* 1194 * bfa_itnim_public 1195 */ 1196 ··· 1210 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 1211 u32 *dm_len) 1212 { 1213 + /* 1214 * ITN memory 1215 */ 1216 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s); ··· 1264 1265 itnim->msg_no++; 1266 1267 + /* 1268 * check for room in queue to send request now 1269 */ 1270 m = bfa_reqq_next(itnim->bfa, itnim->reqq); ··· 1281 m->msg_no = itnim->msg_no; 1282 bfa_stats(itnim, fw_create); 1283 1284 + /* 1285 * queue I/O message to firmware 1286 */ 1287 bfa_reqq_produce(itnim->bfa, itnim->reqq); ··· 1293 { 1294 struct bfi_itnim_delete_req_s *m; 1295 1296 + /* 1297 * check for room in queue to send request now 1298 */ 1299 m = bfa_reqq_next(itnim->bfa, itnim->reqq); ··· 1307 m->fw_handle = itnim->rport->fw_handle; 1308 bfa_stats(itnim, fw_delete); 1309 1310 + /* 1311 * queue I/O message to firmware 1312 */ 1313 bfa_reqq_produce(itnim->bfa, itnim->reqq); 1314 return BFA_TRUE; 1315 } 1316 1317 + /* 1318 * Cleanup all pending failed inflight requests. 1319 */ 1320 static void ··· 1329 } 1330 } 1331 1332 + /* 1333 * Start all pending IO requests. 1334 */ 1335 static void ··· 1339 1340 bfa_itnim_iotov_stop(itnim); 1341 1342 + /* 1343 * Abort all inflight IO requests in the queue 1344 */ 1345 bfa_itnim_delayed_comp(itnim, BFA_FALSE); 1346 1347 + /* 1348 * Start all pending IO requests. 1349 */ 1350 while (!list_empty(&itnim->pending_q)) { ··· 1354 } 1355 } 1356 1357 + /* 1358 * Fail all pending IO requests 1359 */ 1360 static void ··· 1362 { 1363 struct bfa_ioim_s *ioim; 1364 1365 + /* 1366 * Fail all inflight IO requests in the queue 1367 */ 1368 bfa_itnim_delayed_comp(itnim, BFA_TRUE); 1369 1370 + /* 1371 * Fail any pending IO requests. 1372 */ 1373 while (!list_empty(&itnim->pending_q)) { ··· 1377 } 1378 } 1379 1380 + /* 1381 * IO TOV timer callback. Fail any pending IO requests. 1382 */ 1383 static void ··· 1392 bfa_cb_itnim_tov(itnim->ditn); 1393 } 1394 1395 + /* 1396 * Start IO TOV timer for failing back pending IO requests in offline state. 1397 */ 1398 static void ··· 1407 } 1408 } 1409 1410 + /* 1411 * Stop IO TOV timer. 1412 */ 1413 static void ··· 1419 } 1420 } 1421 1422 + /* 1423 * Stop IO TOV timer. 1424 */ 1425 static void ··· 1459 1460 1461 1462 + /* 1463 * bfa_itnim_public 1464 */ 1465 1466 + /* 1467 * Itnim interrupt processing. 1468 */ 1469 void ··· 1509 1510 1511 1512 + /* 1513 * bfa_itnim_api 1514 */ 1515 ··· 1552 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE); 1553 } 1554 1555 + /* 1556 * Return true if itnim is considered offline for holding off IO request. 1557 * IO is not held if itnim is being deleted. 1558 */ ··· 1603 itnim->ioprofile.io_latency.min[j] = ~0; 1604 } 1605 1606 + /* 1607 * BFA IO module state machine functions 1608 */ 1609 1610 + /* 1611 * IO is not started (unallocated). 1612 */ 1613 static void ··· 1657 break; 1658 1659 case BFA_IOIM_SM_ABORT: 1660 + /* 1661 * IO in pending queue can get abort requests. Complete abort 1662 * requests immediately. 1663 */ ··· 1672 } 1673 } 1674 1675 + /* 1676 * IO is waiting for SG pages. 1677 */ 1678 static void ··· 1719 } 1720 } 1721 1722 + /* 1723 * IO is active. 1724 */ 1725 static void ··· 1803 } 1804 } 1805 1806 + /* 1807 * IO is retried with new tag. 1808 */ 1809 static void ··· 1844 break; 1845 1846 case BFA_IOIM_SM_ABORT: 1847 + /* in this state IO abort is done. 1848 * Waiting for IO tag resource free. 1849 */ 1850 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); ··· 1857 } 1858 } 1859 1860 + /* 1861 * IO is being aborted, waiting for completion from firmware. 1862 */ 1863 static void ··· 1919 } 1920 } 1921 1922 + /* 1923 * IO is being cleaned up (implicit abort), waiting for completion from 1924 * firmware. 1925 */ ··· 1937 break; 1938 1939 case BFA_IOIM_SM_ABORT: 1940 + /* 1941 * IO is already being aborted implicitly 1942 */ 1943 ioim->io_cbfn = __bfa_cb_ioim_abort; ··· 1969 break; 1970 1971 case BFA_IOIM_SM_CLEANUP: 1972 + /* 1973 * IO can be in cleanup state already due to TM command. 1974 * 2nd cleanup request comes from ITN offline event. 1975 */ ··· 1980 } 1981 } 1982 1983 + /* 1984 * IO is waiting for room in request CQ 1985 */ 1986 static void ··· 2024 } 2025 } 2026 2027 + /* 2028 * Active IO is being aborted, waiting for room in request CQ. 2029 */ 2030 static void ··· 2075 } 2076 } 2077 2078 + /* 2079 * Active IO is being cleaned up, waiting for room in request CQ. 2080 */ 2081 static void ··· 2091 break; 2092 2093 case BFA_IOIM_SM_ABORT: 2094 + /* 2095 * IO is alraedy being cleaned up implicitly 2096 */ 2097 ioim->io_cbfn = __bfa_cb_ioim_abort; ··· 2125 } 2126 } 2127 2128 + /* 2129 * IO bfa callback is pending. 2130 */ 2131 static void ··· 2152 } 2153 } 2154 2155 + /* 2156 * IO bfa callback is pending. IO resource cannot be freed. 2157 */ 2158 static void ··· 2185 } 2186 } 2187 2188 + /* 2189 * IO is completed, waiting resource free from firmware. 2190 */ 2191 static void ··· 2214 2215 2216 2217 + /* 2218 * hal_ioim_private 2219 */ 2220 ··· 2247 2248 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg; 2249 if (m->io_status == BFI_IOIM_STS_OK) { 2250 + /* 2251 * setup sense information, if present 2252 */ 2253 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) && ··· 2256 snsinfo = ioim->iosp->snsinfo; 2257 } 2258 2259 + /* 2260 * setup residue value correctly for normal completions 2261 */ 2262 if (m->resid_flags == FCP_RESID_UNDER) { ··· 2327 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); 2328 } 2329 2330 + /* 2331 * Send I/O request to firmware. 2332 */ 2333 static bfa_boolean_t ··· 2343 struct scatterlist *sg; 2344 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; 2345 2346 + /* 2347 * check for room in queue to send request now 2348 */ 2349 m = bfa_reqq_next(ioim->bfa, ioim->reqq); ··· 2354 return BFA_FALSE; 2355 } 2356 2357 + /* 2358 * build i/o request message next 2359 */ 2360 m->io_tag = cpu_to_be16(ioim->iotag); 2361 m->rport_hdl = ioim->itnim->rport->fw_handle; 2362 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio); 2363 2364 + /* 2365 * build inline IO SG element here 2366 */ 2367 sge = &m->sges[0]; ··· 2387 sge->flags = BFI_SGE_PGDLEN; 2388 bfa_sge_to_be(sge); 2389 2390 + /* 2391 * set up I/O command parameters 2392 */ 2393 m->cmnd = cmnd_z0; ··· 2397 fcp_dl = bfa_cb_ioim_get_size(ioim->dio); 2398 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl); 2399 2400 + /* 2401 * set up I/O message header 2402 */ 2403 switch (m->cmnd.iodir) { ··· 2426 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio); 2427 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio); 2428 2429 + /* 2430 * Handle large CDB (>16 bytes). 2431 */ 2432 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) - ··· 2440 } 2441 #endif 2442 2443 + /* 2444 * queue I/O message to firmware 2445 */ 2446 bfa_reqq_produce(ioim->bfa, ioim->reqq); 2447 return BFA_TRUE; 2448 } 2449 2450 + /* 2451 * Setup any additional SG pages needed.Inline SG element is setup 2452 * at queuing time. 2453 */ ··· 2458 2459 bfa_assert(ioim->nsges > BFI_SGE_INLINE); 2460 2461 + /* 2462 * allocate SG pages needed 2463 */ 2464 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); ··· 2507 sge->sg_len = sg_dma_len(sg); 2508 pgcumsz += sge->sg_len; 2509 2510 + /* 2511 * set flags 2512 */ 2513 if (i < (nsges - 1)) ··· 2522 2523 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); 2524 2525 + /* 2526 * set the link element of each page 2527 */ 2528 if (sgeid == ioim->nsges) { ··· 2539 } while (sgeid < ioim->nsges); 2540 } 2541 2542 + /* 2543 * Send I/O abort request to firmware. 2544 */ 2545 static bfa_boolean_t ··· 2548 struct bfi_ioim_abort_req_s *m; 2549 enum bfi_ioim_h2i msgop; 2550 2551 + /* 2552 * check for room in queue to send request now 2553 */ 2554 m = bfa_reqq_next(ioim->bfa, ioim->reqq); 2555 if (!m) 2556 return BFA_FALSE; 2557 2558 + /* 2559 * build i/o request message next 2560 */ 2561 if (ioim->iosp->abort_explicit) ··· 2567 m->io_tag = cpu_to_be16(ioim->iotag); 2568 m->abort_tag = ++ioim->abort_tag; 2569 2570 + /* 2571 * queue I/O message to firmware 2572 */ 2573 bfa_reqq_produce(ioim->bfa, ioim->reqq); 2574 return BFA_TRUE; 2575 } 2576 2577 + /* 2578 * Call to resume any I/O requests waiting for room in request queue. 2579 */ 2580 static void ··· 2590 static void 2591 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim) 2592 { 2593 + /* 2594 * Move IO from itnim queue to fcpim global queue since itnim will be 2595 * freed. 2596 */ ··· 2623 return BFA_TRUE; 2624 } 2625 2626 + /* 2627 * or after the link comes back. 2628 */ 2629 void 2630 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) 2631 { 2632 + /* 2633 * If path tov timer expired, failback with PATHTOV status - these 2634 * IO requests are not normally retried by IO stack. 2635 * ··· 2644 } 2645 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); 2646 2647 + /* 2648 * Move IO to fcpim global queue since itnim will be 2649 * freed. 2650 */ ··· 2654 2655 2656 2657 + /* 2658 * hal_ioim_friend 2659 */ 2660 2661 + /* 2662 * Memory allocation and initialization. 2663 */ 2664 void ··· 2670 u8 *snsinfo; 2671 u32 snsbufsz; 2672 2673 + /* 2674 * claim memory first 2675 */ 2676 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo); ··· 2681 fcpim->ioim_sp_arr = iosp; 2682 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs); 2683 2684 + /* 2685 * Claim DMA memory for per IO sense data. 2686 */ 2687 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN; ··· 2693 snsinfo = fcpim->snsbase.kva; 2694 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa); 2695 2696 + /* 2697 * Initialize ioim free queues 2698 */ 2699 INIT_LIST_HEAD(&fcpim->ioim_free_q); ··· 2722 } 2723 } 2724 2725 + /* 2726 * Driver detach time call. 2727 */ 2728 void ··· 2858 io_lat->max[index] : val; 2859 io_lat->avg[index] += val; 2860 } 2861 + /* 2862 * Called by itnim to clean up IO while going offline. 2863 */ 2864 void ··· 2881 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); 2882 } 2883 2884 + /* 2885 * IOC failure handling. 2886 */ 2887 void ··· 2892 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL); 2893 } 2894 2895 + /* 2896 * IO offline TOV popped. Fail the pending IO. 2897 */ 2898 void ··· 2904 2905 2906 2907 + /* 2908 * hal_ioim_api 2909 */ 2910 2911 + /* 2912 * Allocate IOIM resource for initiator mode I/O request. 2913 */ 2914 struct bfa_ioim_s * ··· 2918 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 2919 struct bfa_ioim_s *ioim; 2920 2921 + /* 2922 * alocate IOIM resource 2923 */ 2924 bfa_q_deq(&fcpim->ioim_free_q, &ioim); ··· 2969 2970 bfa_ioim_cb_profile_start(ioim->fcpim, ioim); 2971 2972 + /* 2973 * Obtain the queue over which this request has to be issued 2974 */ 2975 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? ··· 2979 bfa_sm_send_event(ioim, BFA_IOIM_SM_START); 2980 } 2981 2982 + /* 2983 * Driver I/O abort request. 2984 */ 2985 bfa_status_t ··· 2998 } 2999 3000 3001 + /* 3002 * BFA TSKIM state machine functions 3003 */ 3004 3005 + /* 3006 * Task management command beginning state. 3007 */ 3008 static void ··· 3015 bfa_sm_set_state(tskim, bfa_tskim_sm_active); 3016 bfa_tskim_gather_ios(tskim); 3017 3018 + /* 3019 * If device is offline, do not send TM on wire. Just cleanup 3020 * any pending IO requests and complete TM request. 3021 */ ··· 3039 } 3040 } 3041 3042 + /* 3043 * brief 3044 * TM command is active, awaiting completion from firmware to 3045 * cleanup IO requests in TM scope. ··· 3076 } 3077 } 3078 3079 + /* 3080 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup 3081 * completion event from firmware. 3082 */ ··· 3087 3088 switch (event) { 3089 case BFA_TSKIM_SM_DONE: 3090 + /* 3091 * Ignore and wait for ABORT completion from firmware. 3092 */ 3093 break; ··· 3120 break; 3121 3122 case BFA_TSKIM_SM_CLEANUP: 3123 + /* 3124 * Ignore, TM command completed on wire. 3125 * Notify TM conmpletion on IO cleanup completion. 3126 */ ··· 3137 } 3138 } 3139 3140 + /* 3141 * Task management command is waiting for room in request CQ 3142 */ 3143 static void ··· 3152 break; 3153 3154 case BFA_TSKIM_SM_CLEANUP: 3155 + /* 3156 * No need to send TM on wire since ITN is offline. 3157 */ 3158 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); ··· 3172 } 3173 } 3174 3175 + /* 3176 * Task management command is active, awaiting for room in request CQ 3177 * to send clean up request. 3178 */ ··· 3185 switch (event) { 3186 case BFA_TSKIM_SM_DONE: 3187 bfa_reqq_wcancel(&tskim->reqq_wait); 3188 + /* 3189 * 3190 * Fall through !!! 3191 */ ··· 3207 } 3208 } 3209 3210 + /* 3211 * BFA callback is pending 3212 */ 3213 static void ··· 3235 3236 3237 3238 + /* 3239 * hal_tskim_private 3240 */ 3241 ··· 3288 return BFA_FALSE; 3289 } 3290 3291 + /* 3292 * Gather affected IO requests and task management commands. 3293 */ 3294 static void ··· 3300 3301 INIT_LIST_HEAD(&tskim->io_q); 3302 3303 + /* 3304 * Gather any active IO requests first. 3305 */ 3306 list_for_each_safe(qe, qen, &itnim->io_q) { ··· 3312 } 3313 } 3314 3315 + /* 3316 * Failback any pending IO requests immediately. 3317 */ 3318 list_for_each_safe(qe, qen, &itnim->pending_q) { ··· 3326 } 3327 } 3328 3329 + /* 3330 * IO cleanup completion 3331 */ 3332 static void ··· 3338 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE); 3339 } 3340 3341 + /* 3342 * Gather affected IO requests and task management commands. 3343 */ 3344 static void ··· 3358 bfa_wc_wait(&tskim->wc); 3359 } 3360 3361 + /* 3362 * Send task management request to firmware. 3363 */ 3364 static bfa_boolean_t ··· 3367 struct bfa_itnim_s *itnim = tskim->itnim; 3368 struct bfi_tskim_req_s *m; 3369 3370 + /* 3371 * check for room in queue to send request now 3372 */ 3373 m = bfa_reqq_next(tskim->bfa, itnim->reqq); 3374 if (!m) 3375 return BFA_FALSE; 3376 3377 + /* 3378 * build i/o request message next 3379 */ 3380 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ, ··· 3386 m->lun = tskim->lun; 3387 m->tm_flags = tskim->tm_cmnd; 3388 3389 + /* 3390 * queue I/O message to firmware 3391 */ 3392 bfa_reqq_produce(tskim->bfa, itnim->reqq); 3393 return BFA_TRUE; 3394 } 3395 3396 + /* 3397 * Send abort request to cleanup an active TM to firmware. 3398 */ 3399 static bfa_boolean_t ··· 3402 struct bfa_itnim_s *itnim = tskim->itnim; 3403 struct bfi_tskim_abortreq_s *m; 3404 3405 + /* 3406 * check for room in queue to send request now 3407 */ 3408 m = bfa_reqq_next(tskim->bfa, itnim->reqq); 3409 if (!m) 3410 return BFA_FALSE; 3411 3412 + /* 3413 * build i/o request message next 3414 */ 3415 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ, ··· 3417 3418 m->tsk_tag = cpu_to_be16(tskim->tsk_tag); 3419 3420 + /* 3421 * queue I/O message to firmware 3422 */ 3423 bfa_reqq_produce(tskim->bfa, itnim->reqq); 3424 return BFA_TRUE; 3425 } 3426 3427 + /* 3428 * Call to resume task management cmnd waiting for room in request queue. 3429 */ 3430 static void ··· 3436 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME); 3437 } 3438 3439 + /* 3440 * Cleanup IOs associated with a task mangement command on IOC failures. 3441 */ 3442 static void ··· 3453 3454 3455 3456 + /* 3457 * hal_tskim_friend 3458 */ 3459 3460 + /* 3461 * Notification on completions from related ioim. 3462 */ 3463 void ··· 3466 bfa_wc_down(&tskim->wc); 3467 } 3468 3469 + /* 3470 * Handle IOC h/w failure notification from itnim. 3471 */ 3472 void ··· 3477 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL); 3478 } 3479 3480 + /* 3481 * Cleanup TM command and associated IOs as part of ITNIM offline. 3482 */ 3483 void ··· 3488 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP); 3489 } 3490 3491 + /* 3492 * Memory allocation and initialization. 3493 */ 3494 void ··· 3524 void 3525 bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim) 3526 { 3527 + /* 3528 * @todo 3529 */ 3530 } ··· 3542 3543 tskim->tsk_status = rsp->tsk_status; 3544 3545 + /* 3546 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort 3547 * requests. All other statuses are for normal completions. 3548 */ ··· 3557 3558 3559 3560 + /* 3561 * hal_tskim_api 3562 */ 3563 ··· 3584 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q); 3585 } 3586 3587 + /* 3588 * Start a task management command. 3589 * 3590 * @param[in] tskim BFA task management command instance
+67 -67
drivers/scsi/bfa/bfa_fcs.c
··· 15 * General Public License for more details. 16 */ 17 18 - /** 19 * bfa_fcs.c BFA FCS main 20 */ 21 ··· 25 26 BFA_TRC_FILE(FCS, FCS); 27 28 - /** 29 * FCS sub-modules 30 */ 31 struct bfa_fcs_mod_s { ··· 43 bfa_fcs_fabric_modexit }, 44 }; 45 46 - /** 47 * fcs_api BFA FCS API 48 */ 49 ··· 58 59 60 61 - /** 62 * fcs_api BFA FCS API 63 */ 64 65 - /** 66 * fcs attach -- called once to initialize data structures at driver attach time 67 */ 68 void ··· 86 } 87 } 88 89 - /** 90 * fcs initialization, called once after bfa initialization is complete 91 */ 92 void ··· 110 } 111 } 112 113 - /** 114 * Start FCS operations. 115 */ 116 void ··· 119 bfa_fcs_fabric_modstart(fcs); 120 } 121 122 - /** 123 * brief 124 * FCS driver details initialization. 125 * ··· 138 bfa_fcs_fabric_psymb_init(&fcs->fabric); 139 } 140 141 - /** 142 * brief 143 * FCS FDMI Driver Parameter Initialization 144 * ··· 154 fcs->fdmi_enabled = fdmi_enable; 155 156 } 157 - /** 158 * brief 159 * FCS instance cleanup and exit. 160 * ··· 196 bfa_wc_down(&fcs->wc); 197 } 198 199 - /** 200 * Fabric module implementation. 201 */ 202 ··· 232 u32 rsp_len, 233 u32 resid_len, 234 struct fchs_s *rspfchs); 235 - /** 236 * fcs_fabric_sm fabric state machine functions 237 */ 238 239 - /** 240 * Fabric state machine events 241 */ 242 enum bfa_fcs_fabric_event { ··· 286 enum bfa_fcs_fabric_event event); 287 static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, 288 enum bfa_fcs_fabric_event event); 289 - /** 290 * Beginning state before fabric creation. 291 */ 292 static void ··· 312 } 313 } 314 315 - /** 316 * Beginning state before fabric creation. 317 */ 318 static void ··· 345 } 346 } 347 348 - /** 349 * Link is down, awaiting LINK UP event from port. This is also the 350 * first state at fabric creation. 351 */ ··· 375 } 376 } 377 378 - /** 379 * FLOGI is in progress, awaiting FLOGI reply. 380 */ 381 static void ··· 468 } 469 } 470 471 - /** 472 * Authentication is in progress, awaiting authentication results. 473 */ 474 static void ··· 508 } 509 } 510 511 - /** 512 * Authentication failed 513 */ 514 static void ··· 534 } 535 } 536 537 - /** 538 * Port is in loopback mode. 539 */ 540 static void ··· 560 } 561 } 562 563 - /** 564 * There is no attached fabric - private loop or NPort-to-NPort topology. 565 */ 566 static void ··· 593 } 594 } 595 596 - /** 597 * Fabric is online - normal operating state. 598 */ 599 static void ··· 628 } 629 } 630 631 - /** 632 * Exchanging virtual fabric parameters. 633 */ 634 static void ··· 652 } 653 } 654 655 - /** 656 * EVFP exchange complete and VFT tagging is enabled. 657 */ 658 static void ··· 663 bfa_trc(fabric->fcs, event); 664 } 665 666 - /** 667 * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F). 668 */ 669 static void ··· 684 fabric->event_arg.swp_vfid); 685 } 686 687 - /** 688 * Fabric is being deleted, awaiting vport delete completions. 689 */ 690 static void ··· 714 715 716 717 - /** 718 * fcs_fabric_private fabric private functions 719 */ 720 ··· 728 port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc); 729 } 730 731 - /** 732 * Port Symbolic Name Creation for base port. 733 */ 734 void ··· 789 port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; 790 } 791 792 - /** 793 * bfa lps login completion callback 794 */ 795 void ··· 867 bfa_trc(fabric->fcs, fabric->is_npiv); 868 bfa_trc(fabric->fcs, fabric->is_auth); 869 } 870 - /** 871 * Allocate and send FLOGI. 872 */ 873 static void ··· 897 bfa_fcs_fabric_set_opertype(fabric); 898 fabric->stats.fabric_onlines++; 899 900 - /** 901 * notify online event to base and then virtual ports 902 */ 903 bfa_fcs_lport_online(&fabric->bport); ··· 917 bfa_trc(fabric->fcs, fabric->fabric_name); 918 fabric->stats.fabric_offlines++; 919 920 - /** 921 * notify offline event first to vports and then base port. 922 */ 923 list_for_each_safe(qe, qen, &fabric->vport_q) { ··· 939 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED); 940 } 941 942 - /** 943 * Delete all vports and wait for vport delete completions. 944 */ 945 static void ··· 965 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); 966 } 967 968 - /** 969 * fcs_fabric_public fabric public functions 970 */ 971 972 - /** 973 * Attach time initialization. 974 */ 975 void ··· 980 fabric = &fcs->fabric; 981 memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); 982 983 - /** 984 * Initialize base fabric. 985 */ 986 fabric->fcs = fcs; ··· 989 fabric->lps = bfa_lps_alloc(fcs->bfa); 990 bfa_assert(fabric->lps); 991 992 - /** 993 * Initialize fabric delete completion handler. Fabric deletion is 994 * complete when the last vport delete is complete. 995 */ ··· 1007 bfa_trc(fcs, 0); 1008 } 1009 1010 - /** 1011 * Module cleanup 1012 */ 1013 void ··· 1017 1018 bfa_trc(fcs, 0); 1019 1020 - /** 1021 * Cleanup base fabric. 1022 */ 1023 fabric = &fcs->fabric; ··· 1025 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE); 1026 } 1027 1028 - /** 1029 * Fabric module start -- kick starts FCS actions 1030 */ 1031 void ··· 1038 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); 1039 } 1040 1041 - /** 1042 * Suspend fabric activity as part of driver suspend. 1043 */ 1044 void ··· 1064 return fabric->oper_type; 1065 } 1066 1067 - /** 1068 * Link up notification from BFA physical port module. 1069 */ 1070 void ··· 1074 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP); 1075 } 1076 1077 - /** 1078 * Link down notification from BFA physical port module. 1079 */ 1080 void ··· 1084 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); 1085 } 1086 1087 - /** 1088 * A child vport is being created in the fabric. 1089 * 1090 * Call from vport module at vport creation. A list of base port and vports ··· 1099 bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, 1100 struct bfa_fcs_vport_s *vport) 1101 { 1102 - /** 1103 * - add vport to fabric's vport_q 1104 */ 1105 bfa_trc(fabric->fcs, fabric->vf_id); ··· 1109 bfa_wc_up(&fabric->wc); 1110 } 1111 1112 - /** 1113 * A child vport is being deleted from fabric. 1114 * 1115 * Vport is being deleted. ··· 1123 bfa_wc_down(&fabric->wc); 1124 } 1125 1126 - /** 1127 * Base port is deleted. 1128 */ 1129 void ··· 1133 } 1134 1135 1136 - /** 1137 * Check if fabric is online. 1138 * 1139 * param[in] fabric - Fabric instance. This can be a base fabric or vf. ··· 1146 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online); 1147 } 1148 1149 - /** 1150 * brief 1151 * 1152 */ ··· 1158 return BFA_STATUS_OK; 1159 } 1160 1161 - /** 1162 * Lookup for a vport withing a fabric given its pwwn 1163 */ 1164 struct bfa_fcs_vport_s * ··· 1176 return NULL; 1177 } 1178 1179 - /** 1180 * In a given fabric, return the number of lports. 1181 * 1182 * param[in] fabric - Fabric instance. This can be a base fabric or vf. ··· 1214 1215 return oui; 1216 } 1217 - /** 1218 * Unsolicited frame receive handling. 1219 */ 1220 void ··· 1230 bfa_trc(fabric->fcs, len); 1231 bfa_trc(fabric->fcs, pid); 1232 1233 - /** 1234 * Look for our own FLOGI frames being looped back. This means an 1235 * external loopback cable is in place. Our own FLOGI frames are 1236 * sometimes looped back when switch port gets temporarily bypassed. ··· 1242 return; 1243 } 1244 1245 - /** 1246 * FLOGI/EVFP exchanges should be consumed by base fabric. 1247 */ 1248 if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) { ··· 1252 } 1253 1254 if (fabric->bport.pid == pid) { 1255 - /** 1256 * All authentication frames should be routed to auth 1257 */ 1258 bfa_trc(fabric->fcs, els_cmd->els_code); ··· 1266 return; 1267 } 1268 1269 - /** 1270 * look for a matching local port ID 1271 */ 1272 list_for_each(qe, &fabric->vport_q) { ··· 1280 bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); 1281 } 1282 1283 - /** 1284 * Unsolicited frames to be processed by fabric. 1285 */ 1286 static void ··· 1304 } 1305 } 1306 1307 - /** 1308 * Process incoming FLOGI 1309 */ 1310 static void ··· 1351 struct fchs_s fchs; 1352 1353 fcxp = bfa_fcs_fcxp_alloc(fabric->fcs); 1354 - /** 1355 * Do not expect this failure -- expect remote node to retry 1356 */ 1357 if (!fcxp) ··· 1370 FC_MAX_PDUSZ, 0); 1371 } 1372 1373 - /** 1374 * Flogi Acc completion callback. 1375 */ 1376 static void ··· 1417 } 1418 } 1419 1420 - /** 1421 * Returns FCS vf structure for a given vf_id. 1422 * 1423 * param[in] vf_id - VF_ID ··· 1435 return NULL; 1436 } 1437 1438 - /** 1439 * BFA FCS PPORT ( physical port) 1440 */ 1441 static void ··· 1465 bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); 1466 } 1467 1468 - /** 1469 * BFA FCS UF ( Unsolicited Frames) 1470 */ 1471 1472 - /** 1473 * BFA callback for unsolicited frame receive handler. 1474 * 1475 * @param[in] cbarg callback arg for receive handler ··· 1486 struct fc_vft_s *vft; 1487 struct bfa_fcs_fabric_s *fabric; 1488 1489 - /** 1490 * check for VFT header 1491 */ 1492 if (fchs->routing == FC_RTG_EXT_HDR && ··· 1498 else 1499 fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id); 1500 1501 - /** 1502 * drop frame if vfid is unknown 1503 */ 1504 if (!fabric) { ··· 1508 return; 1509 } 1510 1511 - /** 1512 * skip vft header 1513 */ 1514 fchs = (struct fchs_s *) (vft + 1);
··· 15 * General Public License for more details. 16 */ 17 18 + /* 19 * bfa_fcs.c BFA FCS main 20 */ 21 ··· 25 26 BFA_TRC_FILE(FCS, FCS); 27 28 + /* 29 * FCS sub-modules 30 */ 31 struct bfa_fcs_mod_s { ··· 43 bfa_fcs_fabric_modexit }, 44 }; 45 46 + /* 47 * fcs_api BFA FCS API 48 */ 49 ··· 58 59 60 61 + /* 62 * fcs_api BFA FCS API 63 */ 64 65 + /* 66 * fcs attach -- called once to initialize data structures at driver attach time 67 */ 68 void ··· 86 } 87 } 88 89 + /* 90 * fcs initialization, called once after bfa initialization is complete 91 */ 92 void ··· 110 } 111 } 112 113 + /* 114 * Start FCS operations. 115 */ 116 void ··· 119 bfa_fcs_fabric_modstart(fcs); 120 } 121 122 + /* 123 * brief 124 * FCS driver details initialization. 125 * ··· 138 bfa_fcs_fabric_psymb_init(&fcs->fabric); 139 } 140 141 + /* 142 * brief 143 * FCS FDMI Driver Parameter Initialization 144 * ··· 154 fcs->fdmi_enabled = fdmi_enable; 155 156 } 157 + /* 158 * brief 159 * FCS instance cleanup and exit. 160 * ··· 196 bfa_wc_down(&fcs->wc); 197 } 198 199 + /* 200 * Fabric module implementation. 201 */ 202 ··· 232 u32 rsp_len, 233 u32 resid_len, 234 struct fchs_s *rspfchs); 235 + /* 236 * fcs_fabric_sm fabric state machine functions 237 */ 238 239 + /* 240 * Fabric state machine events 241 */ 242 enum bfa_fcs_fabric_event { ··· 286 enum bfa_fcs_fabric_event event); 287 static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, 288 enum bfa_fcs_fabric_event event); 289 + /* 290 * Beginning state before fabric creation. 291 */ 292 static void ··· 312 } 313 } 314 315 + /* 316 * Beginning state before fabric creation. 317 */ 318 static void ··· 345 } 346 } 347 348 + /* 349 * Link is down, awaiting LINK UP event from port. This is also the 350 * first state at fabric creation. 351 */ ··· 375 } 376 } 377 378 + /* 379 * FLOGI is in progress, awaiting FLOGI reply. 380 */ 381 static void ··· 468 } 469 } 470 471 + /* 472 * Authentication is in progress, awaiting authentication results. 473 */ 474 static void ··· 508 } 509 } 510 511 + /* 512 * Authentication failed 513 */ 514 static void ··· 534 } 535 } 536 537 + /* 538 * Port is in loopback mode. 539 */ 540 static void ··· 560 } 561 } 562 563 + /* 564 * There is no attached fabric - private loop or NPort-to-NPort topology. 565 */ 566 static void ··· 593 } 594 } 595 596 + /* 597 * Fabric is online - normal operating state. 598 */ 599 static void ··· 628 } 629 } 630 631 + /* 632 * Exchanging virtual fabric parameters. 633 */ 634 static void ··· 652 } 653 } 654 655 + /* 656 * EVFP exchange complete and VFT tagging is enabled. 657 */ 658 static void ··· 663 bfa_trc(fabric->fcs, event); 664 } 665 666 + /* 667 * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F). 668 */ 669 static void ··· 684 fabric->event_arg.swp_vfid); 685 } 686 687 + /* 688 * Fabric is being deleted, awaiting vport delete completions. 689 */ 690 static void ··· 714 715 716 717 + /* 718 * fcs_fabric_private fabric private functions 719 */ 720 ··· 728 port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc); 729 } 730 731 + /* 732 * Port Symbolic Name Creation for base port. 733 */ 734 void ··· 789 port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; 790 } 791 792 + /* 793 * bfa lps login completion callback 794 */ 795 void ··· 867 bfa_trc(fabric->fcs, fabric->is_npiv); 868 bfa_trc(fabric->fcs, fabric->is_auth); 869 } 870 + /* 871 * Allocate and send FLOGI. 872 */ 873 static void ··· 897 bfa_fcs_fabric_set_opertype(fabric); 898 fabric->stats.fabric_onlines++; 899 900 + /* 901 * notify online event to base and then virtual ports 902 */ 903 bfa_fcs_lport_online(&fabric->bport); ··· 917 bfa_trc(fabric->fcs, fabric->fabric_name); 918 fabric->stats.fabric_offlines++; 919 920 + /* 921 * notify offline event first to vports and then base port. 922 */ 923 list_for_each_safe(qe, qen, &fabric->vport_q) { ··· 939 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED); 940 } 941 942 + /* 943 * Delete all vports and wait for vport delete completions. 944 */ 945 static void ··· 965 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); 966 } 967 968 + /* 969 * fcs_fabric_public fabric public functions 970 */ 971 972 + /* 973 * Attach time initialization. 974 */ 975 void ··· 980 fabric = &fcs->fabric; 981 memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); 982 983 + /* 984 * Initialize base fabric. 985 */ 986 fabric->fcs = fcs; ··· 989 fabric->lps = bfa_lps_alloc(fcs->bfa); 990 bfa_assert(fabric->lps); 991 992 + /* 993 * Initialize fabric delete completion handler. Fabric deletion is 994 * complete when the last vport delete is complete. 995 */ ··· 1007 bfa_trc(fcs, 0); 1008 } 1009 1010 + /* 1011 * Module cleanup 1012 */ 1013 void ··· 1017 1018 bfa_trc(fcs, 0); 1019 1020 + /* 1021 * Cleanup base fabric. 1022 */ 1023 fabric = &fcs->fabric; ··· 1025 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE); 1026 } 1027 1028 + /* 1029 * Fabric module start -- kick starts FCS actions 1030 */ 1031 void ··· 1038 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); 1039 } 1040 1041 + /* 1042 * Suspend fabric activity as part of driver suspend. 1043 */ 1044 void ··· 1064 return fabric->oper_type; 1065 } 1066 1067 + /* 1068 * Link up notification from BFA physical port module. 1069 */ 1070 void ··· 1074 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP); 1075 } 1076 1077 + /* 1078 * Link down notification from BFA physical port module. 1079 */ 1080 void ··· 1084 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); 1085 } 1086 1087 + /* 1088 * A child vport is being created in the fabric. 1089 * 1090 * Call from vport module at vport creation. A list of base port and vports ··· 1099 bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, 1100 struct bfa_fcs_vport_s *vport) 1101 { 1102 + /* 1103 * - add vport to fabric's vport_q 1104 */ 1105 bfa_trc(fabric->fcs, fabric->vf_id); ··· 1109 bfa_wc_up(&fabric->wc); 1110 } 1111 1112 + /* 1113 * A child vport is being deleted from fabric. 1114 * 1115 * Vport is being deleted. ··· 1123 bfa_wc_down(&fabric->wc); 1124 } 1125 1126 + /* 1127 * Base port is deleted. 1128 */ 1129 void ··· 1133 } 1134 1135 1136 + /* 1137 * Check if fabric is online. 1138 * 1139 * param[in] fabric - Fabric instance. This can be a base fabric or vf. ··· 1146 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online); 1147 } 1148 1149 + /* 1150 * brief 1151 * 1152 */ ··· 1158 return BFA_STATUS_OK; 1159 } 1160 1161 + /* 1162 * Lookup for a vport withing a fabric given its pwwn 1163 */ 1164 struct bfa_fcs_vport_s * ··· 1176 return NULL; 1177 } 1178 1179 + /* 1180 * In a given fabric, return the number of lports. 1181 * 1182 * param[in] fabric - Fabric instance. This can be a base fabric or vf. ··· 1214 1215 return oui; 1216 } 1217 + /* 1218 * Unsolicited frame receive handling. 1219 */ 1220 void ··· 1230 bfa_trc(fabric->fcs, len); 1231 bfa_trc(fabric->fcs, pid); 1232 1233 + /* 1234 * Look for our own FLOGI frames being looped back. This means an 1235 * external loopback cable is in place. Our own FLOGI frames are 1236 * sometimes looped back when switch port gets temporarily bypassed. ··· 1242 return; 1243 } 1244 1245 + /* 1246 * FLOGI/EVFP exchanges should be consumed by base fabric. 1247 */ 1248 if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) { ··· 1252 } 1253 1254 if (fabric->bport.pid == pid) { 1255 + /* 1256 * All authentication frames should be routed to auth 1257 */ 1258 bfa_trc(fabric->fcs, els_cmd->els_code); ··· 1266 return; 1267 } 1268 1269 + /* 1270 * look for a matching local port ID 1271 */ 1272 list_for_each(qe, &fabric->vport_q) { ··· 1280 bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); 1281 } 1282 1283 + /* 1284 * Unsolicited frames to be processed by fabric. 1285 */ 1286 static void ··· 1304 } 1305 } 1306 1307 + /* 1308 * Process incoming FLOGI 1309 */ 1310 static void ··· 1351 struct fchs_s fchs; 1352 1353 fcxp = bfa_fcs_fcxp_alloc(fabric->fcs); 1354 + /* 1355 * Do not expect this failure -- expect remote node to retry 1356 */ 1357 if (!fcxp) ··· 1370 FC_MAX_PDUSZ, 0); 1371 } 1372 1373 + /* 1374 * Flogi Acc completion callback. 1375 */ 1376 static void ··· 1417 } 1418 } 1419 1420 + /* 1421 * Returns FCS vf structure for a given vf_id. 1422 * 1423 * param[in] vf_id - VF_ID ··· 1435 return NULL; 1436 } 1437 1438 + /* 1439 * BFA FCS PPORT ( physical port) 1440 */ 1441 static void ··· 1465 bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); 1466 } 1467 1468 + /* 1469 * BFA FCS UF ( Unsolicited Frames) 1470 */ 1471 1472 + /* 1473 * BFA callback for unsolicited frame receive handler. 1474 * 1475 * @param[in] cbarg callback arg for receive handler ··· 1486 struct fc_vft_s *vft; 1487 struct bfa_fcs_fabric_s *fabric; 1488 1489 + /* 1490 * check for VFT header 1491 */ 1492 if (fchs->routing == FC_RTG_EXT_HDR && ··· 1498 else 1499 fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id); 1500 1501 + /* 1502 * drop frame if vfid is unknown 1503 */ 1504 if (!fabric) { ··· 1508 return; 1509 } 1510 1511 + /* 1512 * skip vft header 1513 */ 1514 fchs = (struct fchs_s *) (vft + 1);
+15 -15
drivers/scsi/bfa/bfa_fcs_fcpim.c
··· 15 * General Public License for more details. 16 */ 17 18 - /** 19 * fcpim.c - FCP initiator mode i-t nexus state machine 20 */ 21 ··· 38 bfa_status_t req_status, u32 rsp_len, 39 u32 resid_len, struct fchs_s *rsp_fchs); 40 41 - /** 42 * fcs_itnim_sm FCS itnim state machine events 43 */ 44 ··· 84 {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR}, 85 }; 86 87 - /** 88 * fcs_itnim_sm FCS itnim state machine 89 */ 90 ··· 494 495 496 497 - /** 498 * itnim_public FCS ITNIM public interfaces 499 */ 500 501 - /** 502 * Called by rport when a new rport is created. 503 * 504 * @param[in] rport - remote port. ··· 554 return itnim; 555 } 556 557 - /** 558 * Called by rport to delete the instance of FCPIM. 559 * 560 * @param[in] rport - remote port. ··· 566 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE); 567 } 568 569 - /** 570 * Notification from rport that PLOGI is complete to initiate FC-4 session. 571 */ 572 void ··· 586 } 587 } 588 589 - /** 590 * Called by rport to handle a remote device offline. 591 */ 592 void ··· 596 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE); 597 } 598 599 - /** 600 * Called by rport when remote port is known to be an initiator from 601 * PRLI received. 602 */ ··· 608 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); 609 } 610 611 - /** 612 * Called by rport to check if the itnim is online. 613 */ 614 bfa_status_t ··· 625 } 626 } 627 628 - /** 629 * BFA completion callback for bfa_itnim_online(). 630 */ 631 void ··· 637 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE); 638 } 639 640 - /** 641 * BFA completion callback for bfa_itnim_offline(). 642 */ 643 void ··· 649 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); 650 } 651 652 - /** 653 * Mark the beginning of PATH TOV handling. IO completion callbacks 654 * are still pending. 655 */ ··· 661 bfa_trc(itnim->fcs, itnim->rport->pwwn); 662 } 663 664 - /** 665 * Mark the end of PATH TOV handling. All pending IOs are already cleaned up. 666 */ 667 void ··· 674 itnim_drv->state = ITNIM_STATE_TIMEOUT; 675 } 676 677 - /** 678 * BFA notification to FCS/driver for second level error recovery. 679 * 680 * Atleast one I/O request has timedout and target is unresponsive to
··· 15 * General Public License for more details. 16 */ 17 18 + /* 19 * fcpim.c - FCP initiator mode i-t nexus state machine 20 */ 21 ··· 38 bfa_status_t req_status, u32 rsp_len, 39 u32 resid_len, struct fchs_s *rsp_fchs); 40 41 + /* 42 * fcs_itnim_sm FCS itnim state machine events 43 */ 44 ··· 84 {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR}, 85 }; 86 87 + /* 88 * fcs_itnim_sm FCS itnim state machine 89 */ 90 ··· 494 495 496 497 + /* 498 * itnim_public FCS ITNIM public interfaces 499 */ 500 501 + /* 502 * Called by rport when a new rport is created. 503 * 504 * @param[in] rport - remote port. ··· 554 return itnim; 555 } 556 557 + /* 558 * Called by rport to delete the instance of FCPIM. 559 * 560 * @param[in] rport - remote port. ··· 566 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE); 567 } 568 569 + /* 570 * Notification from rport that PLOGI is complete to initiate FC-4 session. 571 */ 572 void ··· 586 } 587 } 588 589 + /* 590 * Called by rport to handle a remote device offline. 591 */ 592 void ··· 596 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE); 597 } 598 599 + /* 600 * Called by rport when remote port is known to be an initiator from 601 * PRLI received. 602 */ ··· 608 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); 609 } 610 611 + /* 612 * Called by rport to check if the itnim is online. 613 */ 614 bfa_status_t ··· 625 } 626 } 627 628 + /* 629 * BFA completion callback for bfa_itnim_online(). 630 */ 631 void ··· 637 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE); 638 } 639 640 + /* 641 * BFA completion callback for bfa_itnim_offline(). 642 */ 643 void ··· 649 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); 650 } 651 652 + /* 653 * Mark the beginning of PATH TOV handling. IO completion callbacks 654 * are still pending. 655 */ ··· 661 bfa_trc(itnim->fcs, itnim->rport->pwwn); 662 } 663 664 + /* 665 * Mark the end of PATH TOV handling. All pending IOs are already cleaned up. 666 */ 667 void ··· 674 itnim_drv->state = ITNIM_STATE_TIMEOUT; 675 } 676 677 + /* 678 * BFA notification to FCS/driver for second level error recovery. 679 * 680 * Atleast one I/O request has timedout and target is unresponsive to
+154 -214
drivers/scsi/bfa/bfa_fcs_lport.c
··· 15 * General Public License for more details. 16 */ 17 18 - /** 19 - * bfa_fcs_lport.c BFA FCS port 20 - */ 21 - 22 #include "bfa_fcs.h" 23 #include "bfa_fcbuild.h" 24 #include "bfa_fc.h" 25 #include "bfad_drv.h" 26 27 BFA_TRC_FILE(FCS, PORT); 28 - 29 - /** 30 - * Forward declarations 31 - */ 32 33 static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, 34 struct fchs_s *rx_fchs, u8 reason_code, ··· 64 bfa_fcs_lport_n2n_offline}, 65 }; 66 67 - /** 68 * fcs_port_sm FCS logical port state machine 69 */ 70 ··· 232 } 233 } 234 235 - /** 236 * fcs_port_pvt 237 */ 238 ··· 264 FC_MAX_PDUSZ, 0); 265 } 266 267 - /** 268 * Process incoming plogi from a remote port. 269 */ 270 static void ··· 295 return; 296 } 297 298 - /** 299 * Direct Attach P2P mode : verify address assigned by the r-port. 300 */ 301 if ((!bfa_fcs_fabric_is_switched(port->fabric)) && ··· 311 port->pid = rx_fchs->d_id; 312 } 313 314 - /** 315 * First, check if we know the device by pwwn. 316 */ 317 rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name); 318 if (rport) { 319 - /** 320 * Direct Attach P2P mode : handle address assigned by r-port. 321 */ 322 if ((!bfa_fcs_fabric_is_switched(port->fabric)) && ··· 329 return; 330 } 331 332 - /** 333 * Next, lookup rport by PID. 334 */ 335 rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id); 336 if (!rport) { 337 - /** 338 * Inbound PLOGI from a new device. 339 */ 340 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); 341 return; 342 } 343 344 - /** 345 * Rport is known only by PID. 346 */ 347 if (rport->pwwn) { 348 - /** 349 * This is a different device with the same pid. Old device 350 * disappeared. Send implicit LOGO to old device. 351 */ 352 bfa_assert(rport->pwwn != plogi->port_name); 353 bfa_fcs_rport_logo_imp(rport); 354 355 - /** 356 * Inbound PLOGI from a new device (with old PID). 357 */ 358 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); 359 return; 360 } 361 362 - /** 363 * PLOGI crossing each other. 364 */ 365 bfa_assert(rport->pwwn == WWN_NULL); ··· 590 591 592 593 - /** 594 * fcs_lport_api BFA FCS port API 595 */ 596 - /** 597 * Module initialization 598 */ 599 void ··· 602 603 } 604 605 - /** 606 * Module cleanup 607 */ 608 void ··· 611 bfa_fcs_modexit_comp(fcs); 612 } 613 614 - /** 615 * Unsolicited frame receive handling. 616 */ 617 void ··· 629 return; 630 } 631 632 - /** 633 * First, handle ELSs that donot require a login. 634 */ 635 /* ··· 665 bfa_fcs_lport_abts_acc(lport, fchs); 666 return; 667 } 668 - /** 669 * look for a matching remote port ID 670 */ 671 rport = bfa_fcs_lport_get_rport_by_pid(lport, pid); ··· 678 return; 679 } 680 681 - /** 682 * Only handles ELS frames for now. 683 */ 684 if (fchs->type != FC_TYPE_ELS) { ··· 694 } 695 696 if (els_cmd->els_code == FC_ELS_LOGO) { 697 - /** 698 * @todo Handle LOGO frames received. 699 */ 700 return; 701 } 702 703 if (els_cmd->els_code == FC_ELS_PRLI) { 704 - /** 705 * @todo Handle PRLI frames received. 706 */ 707 return; 708 } 709 710 - /** 711 * Unhandled ELS frames. Send a LS_RJT. 712 */ 713 bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP, ··· 715 716 } 717 718 - /** 719 * PID based Lookup for a R-Port in the Port R-Port Queue 720 */ 721 struct bfa_fcs_rport_s * ··· 734 return NULL; 735 } 736 737 - /** 738 * PWWN based Lookup for a R-Port in the Port R-Port Queue 739 */ 740 struct bfa_fcs_rport_s * ··· 753 return NULL; 754 } 755 756 - /** 757 * NWWN based Lookup for a R-Port in the Port R-Port Queue 758 */ 759 struct bfa_fcs_rport_s * ··· 772 return NULL; 773 } 774 775 - /** 776 * Called by rport module when new rports are discovered. 777 */ 778 void ··· 784 port->num_rports++; 785 } 786 787 - /** 788 * Called by rport module to when rports are deleted. 789 */ 790 void ··· 799 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT); 800 } 801 802 - /** 803 * Called by fabric for base port when fabric login is complete. 804 * Called by vport for virtual ports when FDISC is complete. 805 */ ··· 809 bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE); 810 } 811 812 - /** 813 * Called by fabric for base port when fabric goes offline. 814 * Called by vport for virtual ports when virtual port becomes offline. 815 */ ··· 819 bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE); 820 } 821 822 - /** 823 * Called by fabric to delete base lport and associated resources. 824 * 825 * Called by vport to delete lport and associated resources. Should call ··· 831 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE); 832 } 833 834 - /** 835 * Return TRUE if port is online, else return FALSE 836 */ 837 bfa_boolean_t ··· 840 return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online); 841 } 842 843 - /** 844 * Attach time initialization of logical ports. 845 */ 846 void ··· 857 lport->num_rports = 0; 858 } 859 860 - /** 861 * Logical port initialization of base or virtual port. 862 * Called by fabric for base port or by vport for virtual ports. 863 */ ··· 886 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 887 } 888 889 - /** 890 * fcs_lport_api 891 */ 892 ··· 926 } 927 } 928 929 - /** 930 * bfa_fcs_lport_fab port fab functions 931 */ 932 933 - /** 934 * Called by port to initialize fabric services of the base port. 935 */ 936 static void ··· 941 bfa_fcs_lport_ms_init(port); 942 } 943 944 - /** 945 * Called by port to notify transition to online state. 946 */ 947 static void ··· 951 bfa_fcs_lport_scn_online(port); 952 } 953 954 - /** 955 * Called by port to notify transition to offline state. 956 */ 957 static void ··· 962 bfa_fcs_lport_ms_offline(port); 963 } 964 965 - /** 966 * bfa_fcs_lport_n2n functions 967 */ 968 969 - /** 970 * Called by fcs/port to initialize N2N topology. 971 */ 972 static void ··· 974 { 975 } 976 977 - /** 978 * Called by fcs/port to notify transition to online state. 979 */ 980 static void ··· 998 ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn, 999 sizeof(wwn_t)) > 0) { 1000 port->pid = N2N_LOCAL_PID; 1001 - /** 1002 * First, check if we know the device by pwwn. 1003 */ 1004 rport = bfa_fcs_lport_get_rport_by_pwwn(port, ··· 1027 } 1028 } 1029 1030 - /** 1031 * Called by fcs/port to notify transition to offline state. 1032 */ 1033 static void ··· 1086 struct bfa_fcs_fdmi_hba_attr_s *hba_attr); 1087 static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, 1088 struct bfa_fcs_fdmi_port_attr_s *port_attr); 1089 - /** 1090 * fcs_fdmi_sm FCS FDMI state machine 1091 */ 1092 1093 - /** 1094 * FDMI State Machine events 1095 */ 1096 enum port_fdmi_event { ··· 1135 static void bfa_fcs_lport_fdmi_sm_disabled( 1136 struct bfa_fcs_lport_fdmi_s *fdmi, 1137 enum port_fdmi_event event); 1138 - /** 1139 * Start in offline state - awaiting MS to send start. 1140 */ 1141 static void ··· 1502 bfa_sm_fault(port->fcs, event); 1503 } 1504 } 1505 - /** 1506 * FDMI is disabled state. 1507 */ 1508 static void ··· 1517 /* No op State. It can only be enabled at Driver Init. */ 1518 } 1519 1520 - /** 1521 * RHBA : Register HBA Attributes. 1522 */ 1523 static void ··· 1599 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1600 len += attr->len; 1601 count++; 1602 - attr->len = 1603 - cpu_to_be16(attr->len + sizeof(attr->type) + 1604 sizeof(attr->len)); 1605 1606 /* ··· 1609 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER); 1610 attr->len = (u16) strlen(fcs_hba_attr->manufacturer); 1611 memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len); 1612 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1613 - *fields need 1614 - *to be 4 byte 1615 - *aligned */ 1616 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1617 len += attr->len; 1618 count++; 1619 - attr->len = 1620 - cpu_to_be16(attr->len + sizeof(attr->type) + 1621 sizeof(attr->len)); 1622 1623 /* ··· 1623 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM); 1624 attr->len = (u16) strlen(fcs_hba_attr->serial_num); 1625 memcpy(attr->value, fcs_hba_attr->serial_num, attr->len); 1626 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1627 - *fields need 1628 - *to be 4 byte 1629 - *aligned */ 1630 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1631 len += attr->len; 1632 count++; 1633 - attr->len = 1634 - cpu_to_be16(attr->len + sizeof(attr->type) + 1635 sizeof(attr->len)); 1636 1637 /* ··· 1637 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL); 1638 attr->len = (u16) strlen(fcs_hba_attr->model); 1639 memcpy(attr->value, fcs_hba_attr->model, attr->len); 1640 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1641 - *fields need 1642 - *to be 4 byte 1643 - *aligned */ 1644 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1645 len += attr->len; 1646 count++; 1647 - attr->len = 1648 - cpu_to_be16(attr->len + sizeof(attr->type) + 1649 sizeof(attr->len)); 1650 1651 /* ··· 1651 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC); 1652 attr->len = (u16) strlen(fcs_hba_attr->model_desc); 1653 memcpy(attr->value, fcs_hba_attr->model_desc, attr->len); 1654 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1655 - *fields need 1656 - *to be 4 byte 1657 - *aligned */ 1658 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1659 len += attr->len; 1660 count++; 1661 - attr->len = 1662 - cpu_to_be16(attr->len + sizeof(attr->type) + 1663 sizeof(attr->len)); 1664 1665 /* ··· 1666 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION); 1667 attr->len = (u16) strlen(fcs_hba_attr->hw_version); 1668 memcpy(attr->value, fcs_hba_attr->hw_version, attr->len); 1669 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1670 - *fields need 1671 - *to be 4 byte 1672 - *aligned */ 1673 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1674 len += attr->len; 1675 count++; 1676 - attr->len = 1677 - cpu_to_be16(attr->len + sizeof(attr->type) + 1678 sizeof(attr->len)); 1679 } 1680 ··· 1681 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION); 1682 attr->len = (u16) strlen(fcs_hba_attr->driver_version); 1683 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); 1684 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1685 - *fields need 1686 - *to be 4 byte 1687 - *aligned */ 1688 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1689 len += attr->len;; 1690 count++; 1691 - attr->len = 1692 - cpu_to_be16(attr->len + sizeof(attr->type) + 1693 sizeof(attr->len)); 1694 1695 /* ··· 1696 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION); 1697 attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver); 1698 memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len); 1699 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1700 - *fields need 1701 - *to be 4 byte 1702 - *aligned */ 1703 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1704 len += attr->len; 1705 count++; 1706 - attr->len = 1707 - cpu_to_be16(attr->len + sizeof(attr->type) + 1708 sizeof(attr->len)); 1709 } 1710 ··· 1711 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); 1712 attr->len = (u16) strlen(fcs_hba_attr->driver_version); 1713 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); 1714 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1715 - *fields need 1716 - *to be 4 byte 1717 - *aligned */ 1718 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1719 len += attr->len; 1720 count++; 1721 - attr->len = 1722 - cpu_to_be16(attr->len + sizeof(attr->type) + 1723 sizeof(attr->len)); 1724 1725 /* ··· 1726 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME); 1727 attr->len = (u16) strlen(fcs_hba_attr->os_name); 1728 memcpy(attr->value, fcs_hba_attr->os_name, attr->len); 1729 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1730 - *fields need 1731 - *to be 4 byte 1732 - *aligned */ 1733 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1734 len += attr->len; 1735 count++; 1736 - attr->len = 1737 - cpu_to_be16(attr->len + sizeof(attr->type) + 1738 sizeof(attr->len)); 1739 } 1740 ··· 1743 memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len); 1744 len += attr->len; 1745 count++; 1746 - attr->len = 1747 - cpu_to_be16(attr->len + sizeof(attr->type) + 1748 sizeof(attr->len)); 1749 1750 /* 1751 * Update size of payload 1752 */ 1753 - len += ((sizeof(attr->type) + 1754 - sizeof(attr->len)) * count); 1755 1756 rhba->hba_attr_blk.attr_count = cpu_to_be32(count); 1757 return len; ··· 1790 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); 1791 } 1792 1793 - /** 1794 * RPRT : Register Port 1795 */ 1796 static void ··· 1832 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT); 1833 } 1834 1835 - /** 1836 * This routine builds Port Attribute Block that used in RPA, RPRT commands. 1837 */ 1838 static u16 ··· 1896 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1897 len += attr->len; 1898 ++count; 1899 - attr->len = 1900 - cpu_to_be16(attr->len + sizeof(attr->type) + 1901 sizeof(attr->len)); 1902 1903 /* ··· 1909 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1910 len += attr->len; 1911 ++count; 1912 - attr->len = 1913 - cpu_to_be16(attr->len + sizeof(attr->type) + 1914 sizeof(attr->len)); 1915 1916 /* ··· 1920 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME); 1921 attr->len = (u16) strlen(fcs_port_attr.os_device_name); 1922 memcpy(attr->value, fcs_port_attr.os_device_name, attr->len); 1923 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1924 - *fields need 1925 - *to be 4 byte 1926 - *aligned */ 1927 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1928 len += attr->len; 1929 ++count; 1930 - attr->len = 1931 - cpu_to_be16(attr->len + sizeof(attr->type) + 1932 sizeof(attr->len)); 1933 } 1934 /* ··· 1935 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME); 1936 attr->len = (u16) strlen(fcs_port_attr.host_name); 1937 memcpy(attr->value, fcs_port_attr.host_name, attr->len); 1938 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1939 - *fields need 1940 - *to be 4 byte 1941 - *aligned */ 1942 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1943 len += attr->len; 1944 ++count; 1945 - attr->len = 1946 - cpu_to_be16(attr->len + sizeof(attr->type) + 1947 sizeof(attr->len)); 1948 } 1949 ··· 1947 * Update size of payload 1948 */ 1949 port_attrib->attr_count = cpu_to_be32(count); 1950 - len += ((sizeof(attr->type) + 1951 - sizeof(attr->len)) * count); 1952 return len; 1953 } 1954 ··· 2004 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); 2005 } 2006 2007 - /** 2008 * RPA : Register Port Attributes. 2009 */ 2010 static void ··· 2033 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), 2034 FDMI_RPA); 2035 2036 - attr_len = 2037 - bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi, 2038 - (u8 *) ((struct ct_hdr_s *) pyld 2039 - + 1)); 2040 2041 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 2042 FC_CLASS_3, len + attr_len, &fchs, ··· 2261 u32 rsp_len, 2262 u32 resid_len, 2263 struct fchs_s *rsp_fchs); 2264 - /** 2265 * fcs_ms_sm FCS MS state machine 2266 */ 2267 2268 - /** 2269 * MS State Machine events 2270 */ 2271 enum port_ms_event { ··· 2300 enum port_ms_event event); 2301 static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms, 2302 enum port_ms_event event); 2303 - /** 2304 * Start in offline state - awaiting NS to send start. 2305 */ 2306 static void ··· 2372 */ 2373 bfa_fcs_lport_fdmi_online(ms); 2374 2375 - /** 2376 * if this is a Vport, go to online state. 2377 */ 2378 if (ms->port->vport) { ··· 2535 bfa_sm_fault(ms->port->fcs, event); 2536 } 2537 } 2538 - /** 2539 * ms_pvt MS local functions 2540 */ 2541 ··· 2735 bfa_sm_fault(ms->port->fcs, event); 2736 } 2737 } 2738 - /** 2739 * ms_pvt MS local functions 2740 */ 2741 ··· 2811 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); 2812 } 2813 2814 - /** 2815 * ms_pvt MS local functions 2816 */ 2817 ··· 2957 bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN); 2958 } 2959 2960 - /** 2961 * @page ns_sm_info VPORT NS State Machine 2962 * 2963 * @section ns_sm_interactions VPORT NS State Machine Interactions ··· 3020 u32 *pid_buf, u32 n_pids); 3021 3022 static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port); 3023 - /** 3024 * fcs_ns_sm FCS nameserver interface state machine 3025 */ 3026 3027 - /** 3028 * VPort NS State Machine events 3029 */ 3030 enum vport_ns_event { ··· 3079 enum vport_ns_event event); 3080 static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, 3081 enum vport_ns_event event); 3082 - /** 3083 * Start in offline state - awaiting linkup 3084 */ 3085 static void ··· 3568 3569 3570 3571 - /** 3572 * ns_pvt Nameserver local functions 3573 */ 3574 ··· 3664 } 3665 } 3666 3667 - /** 3668 * Register the symbolic port name. 3669 */ 3670 static void ··· 3695 * for V-Port, form a Port Symbolic Name 3696 */ 3697 if (port->vport) { 3698 - /** 3699 * For Vports, we append the vport's port symbolic name 3700 * to that of the base port. 3701 */ ··· 3769 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3770 } 3771 3772 - /** 3773 * Register FC4-Types 3774 */ 3775 static void ··· 3841 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3842 } 3843 3844 - /** 3845 * Register FC4-Features : Should be done after RFT_ID 3846 */ 3847 static void ··· 3922 } else 3923 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3924 } 3925 - /** 3926 * Query Fabric for FC4-Types Devices. 3927 * 3928 * TBD : Need to use a local (FCS private) response buffer, since the response ··· 4042 } 4043 } 4044 4045 - /** 4046 * This routine will be called by bfa_timer on timer timeouts. 4047 * 4048 * param[in] port - pointer to bfa_fcs_lport_t. ··· 4106 } 4107 } 4108 4109 - /** 4110 * fcs_ns_public FCS nameserver public interfaces 4111 */ 4112 ··· 4167 } 4168 } 4169 4170 - /** 4171 * FCS SCN 4172 */ 4173 ··· 4190 struct fchs_s *rx_fchs); 4191 static void bfa_fcs_lport_scn_timeout(void *arg); 4192 4193 - /** 4194 * fcs_scm_sm FCS SCN state machine 4195 */ 4196 4197 - /** 4198 * VPort SCN State Machine events 4199 */ 4200 enum port_scn_event { ··· 4218 static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn, 4219 enum port_scn_event event); 4220 4221 - /** 4222 * Starting state - awaiting link up. 4223 */ 4224 static void ··· 4322 4323 4324 4325 - /** 4326 * fcs_scn_private FCS SCN private functions 4327 */ 4328 4329 - /** 4330 * This routine will be called to send a SCR command. 4331 */ 4332 static void ··· 4439 FC_MAX_PDUSZ, 0); 4440 } 4441 4442 - /** 4443 * This routine will be called by bfa_timer on timer timeouts. 4444 * 4445 * param[in] vport - pointer to bfa_fcs_lport_t. ··· 4462 4463 4464 4465 - /** 4466 * fcs_scn_public FCS state change notification public interfaces 4467 */ 4468 ··· 4503 4504 bfa_trc(port->fcs, rpid); 4505 4506 - /** 4507 * If this is an unknown device, then it just came online. 4508 * Otherwise let rport handle the RSCN event. 4509 */ ··· 4519 bfa_fcs_rport_scn(rport); 4520 } 4521 4522 - /** 4523 * rscn format based PID comparison 4524 */ 4525 #define __fc_pid_match(__c0, __c1, __fmt) \ ··· 4631 } 4632 } 4633 4634 - /** 4635 - * If any of area, domain or fabric RSCN is received, do a fresh discovery 4636 - * to find new devices. 4637 */ 4638 if (nsquery) 4639 bfa_fcs_lport_ns_query(port); 4640 } 4641 4642 - /** 4643 * BFA FCS port 4644 */ 4645 - /** 4646 * fcs_port_api BFA FCS port API 4647 */ 4648 struct bfa_fcs_lport_s * ··· 4886 memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s)); 4887 } 4888 4889 - /** 4890 * FCS virtual port state machine 4891 */ 4892 ··· 4907 static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport); 4908 static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport); 4909 4910 - /** 4911 * fcs_vport_sm FCS virtual port state machine 4912 */ 4913 4914 - /** 4915 * VPort State Machine events 4916 */ 4917 enum bfa_fcs_vport_event { ··· 4964 {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR} 4965 }; 4966 4967 - /** 4968 * Beginning state. 4969 */ 4970 static void ··· 4985 } 4986 } 4987 4988 - /** 4989 * Created state - a start event is required to start up the state machine. 4990 */ 4991 static void ··· 5002 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); 5003 bfa_fcs_vport_do_fdisc(vport); 5004 } else { 5005 - /** 5006 * Fabric is offline or not NPIV capable, stay in 5007 * offline state. 5008 */ ··· 5018 5019 case BFA_FCS_VPORT_SM_ONLINE: 5020 case BFA_FCS_VPORT_SM_OFFLINE: 5021 - /** 5022 * Ignore ONLINE/OFFLINE events from fabric 5023 * till vport is started. 5024 */ ··· 5029 } 5030 } 5031 5032 - /** 5033 * Offline state - awaiting ONLINE event from fabric SM. 5034 */ 5035 static void ··· 5067 } 5068 5069 5070 - /** 5071 * FDISC is sent and awaiting reply from fabric. 5072 */ 5073 static void ··· 5114 } 5115 } 5116 5117 - /** 5118 * FDISC attempt failed - a timer is active to retry FDISC. 5119 */ 5120 static void ··· 5148 } 5149 } 5150 5151 - /** 5152 * Vport is online (FDISC is complete). 5153 */ 5154 static void ··· 5175 } 5176 } 5177 5178 - /** 5179 * Vport is being deleted - awaiting lport delete completion to send 5180 * LOGO to fabric. 5181 */ ··· 5204 } 5205 } 5206 5207 - /** 5208 * Error State. 5209 * This state will be set when the Vport Creation fails due 5210 * to errors like Dup WWN. In this state only operation allowed ··· 5228 } 5229 } 5230 5231 - /** 5232 * Lport cleanup is in progress since vport is being deleted. Fabric is 5233 * offline, so no LOGO is needed to complete vport deletion. 5234 */ ··· 5253 } 5254 } 5255 5256 - /** 5257 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup 5258 * is done. 5259 */ ··· 5287 5288 5289 5290 - /** 5291 * fcs_vport_private FCS virtual port private functions 5292 */ 5293 - /** 5294 * This routine will be called to send a FDISC command. 5295 */ 5296 static void ··· 5337 } 5338 } 5339 5340 - /** 5341 * Called to send a logout to the fabric. Used when a V-Port is 5342 * deleted/stopped. 5343 */ ··· 5351 } 5352 5353 5354 - /** 5355 * This routine will be called by bfa_timer on timer timeouts. 5356 * 5357 * param[in] vport - pointer to bfa_fcs_vport_t. ··· 5389 5390 5391 5392 - /** 5393 * fcs_vport_public FCS virtual port public interfaces 5394 */ 5395 5396 - /** 5397 * Online notification from fabric SM. 5398 */ 5399 void ··· 5403 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); 5404 } 5405 5406 - /** 5407 * Offline notification from fabric SM. 5408 */ 5409 void ··· 5413 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); 5414 } 5415 5416 - /** 5417 * Cleanup notification from fabric SM on link timer expiry. 5418 */ 5419 void ··· 5421 { 5422 vport->vport_stats.fab_cleanup++; 5423 } 5424 - /** 5425 * delete notification from fabric SM. To be invoked from within FCS. 5426 */ 5427 void ··· 5430 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); 5431 } 5432 5433 - /** 5434 * Delete completion callback from associated lport 5435 */ 5436 void ··· 5441 5442 5443 5444 - /** 5445 * fcs_vport_api Virtual port API 5446 */ 5447 5448 - /** 5449 * Use this function to instantiate a new FCS vport object. This 5450 * function will not trigger any HW initialization process (which will be 5451 * done in vport_start() call) ··· 5495 return BFA_STATUS_OK; 5496 } 5497 5498 - /** 5499 * Use this function to instantiate a new FCS PBC vport object. This 5500 * function will not trigger any HW initialization process (which will be 5501 * done in vport_start() call) ··· 5525 return rc; 5526 } 5527 5528 - /** 5529 * Use this function to findout if this is a pbc vport or not. 5530 * 5531 * @param[in] vport - pointer to bfa_fcs_vport_t. ··· 5543 5544 } 5545 5546 - /** 5547 * Use this function initialize the vport. 5548 * 5549 * @param[in] vport - pointer to bfa_fcs_vport_t. ··· 5558 return BFA_STATUS_OK; 5559 } 5560 5561 - /** 5562 * Use this function quiese the vport object. This function will return 5563 * immediately, when the vport is actually stopped, the 5564 * bfa_drv_vport_stop_cb() will be called. ··· 5575 return BFA_STATUS_OK; 5576 } 5577 5578 - /** 5579 * Use this function to delete a vport object. Fabric object should 5580 * be stopped before this function call. 5581 * ··· 5597 return BFA_STATUS_OK; 5598 } 5599 5600 - /** 5601 * Use this function to get vport's current status info. 5602 * 5603 * param[in] vport pointer to bfa_fcs_vport_t. ··· 5618 attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm); 5619 } 5620 5621 - /** 5622 * Use this function to get vport's statistics. 5623 * 5624 * param[in] vport pointer to bfa_fcs_vport_t. ··· 5633 *stats = vport->vport_stats; 5634 } 5635 5636 - /** 5637 * Use this function to clear vport's statistics. 5638 * 5639 * param[in] vport pointer to bfa_fcs_vport_t. ··· 5646 memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); 5647 } 5648 5649 - /** 5650 * Lookup a virtual port. Excludes base port from lookup. 5651 */ 5652 struct bfa_fcs_vport_s * ··· 5668 return vport; 5669 } 5670 5671 - /** 5672 * FDISC Response 5673 */ 5674 void ··· 5724 } 5725 } 5726 5727 - /** 5728 * LOGO response 5729 */ 5730 void ··· 5734 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); 5735 } 5736 5737 - /** 5738 * Received clear virtual link 5739 */ 5740 void
··· 15 * General Public License for more details. 16 */ 17 18 #include "bfa_fcs.h" 19 #include "bfa_fcbuild.h" 20 #include "bfa_fc.h" 21 #include "bfad_drv.h" 22 23 BFA_TRC_FILE(FCS, PORT); 24 25 static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, 26 struct fchs_s *rx_fchs, u8 reason_code, ··· 72 bfa_fcs_lport_n2n_offline}, 73 }; 74 75 + /* 76 * fcs_port_sm FCS logical port state machine 77 */ 78 ··· 240 } 241 } 242 243 + /* 244 * fcs_port_pvt 245 */ 246 ··· 272 FC_MAX_PDUSZ, 0); 273 } 274 275 + /* 276 * Process incoming plogi from a remote port. 277 */ 278 static void ··· 303 return; 304 } 305 306 + /* 307 * Direct Attach P2P mode : verify address assigned by the r-port. 308 */ 309 if ((!bfa_fcs_fabric_is_switched(port->fabric)) && ··· 319 port->pid = rx_fchs->d_id; 320 } 321 322 + /* 323 * First, check if we know the device by pwwn. 324 */ 325 rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name); 326 if (rport) { 327 + /* 328 * Direct Attach P2P mode : handle address assigned by r-port. 329 */ 330 if ((!bfa_fcs_fabric_is_switched(port->fabric)) && ··· 337 return; 338 } 339 340 + /* 341 * Next, lookup rport by PID. 342 */ 343 rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id); 344 if (!rport) { 345 + /* 346 * Inbound PLOGI from a new device. 347 */ 348 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); 349 return; 350 } 351 352 + /* 353 * Rport is known only by PID. 354 */ 355 if (rport->pwwn) { 356 + /* 357 * This is a different device with the same pid. Old device 358 * disappeared. Send implicit LOGO to old device. 359 */ 360 bfa_assert(rport->pwwn != plogi->port_name); 361 bfa_fcs_rport_logo_imp(rport); 362 363 + /* 364 * Inbound PLOGI from a new device (with old PID). 365 */ 366 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); 367 return; 368 } 369 370 + /* 371 * PLOGI crossing each other. 372 */ 373 bfa_assert(rport->pwwn == WWN_NULL); ··· 598 599 600 601 + /* 602 * fcs_lport_api BFA FCS port API 603 */ 604 + /* 605 * Module initialization 606 */ 607 void ··· 610 611 } 612 613 + /* 614 * Module cleanup 615 */ 616 void ··· 619 bfa_fcs_modexit_comp(fcs); 620 } 621 622 + /* 623 * Unsolicited frame receive handling. 624 */ 625 void ··· 637 return; 638 } 639 640 + /* 641 * First, handle ELSs that donot require a login. 642 */ 643 /* ··· 673 bfa_fcs_lport_abts_acc(lport, fchs); 674 return; 675 } 676 + /* 677 * look for a matching remote port ID 678 */ 679 rport = bfa_fcs_lport_get_rport_by_pid(lport, pid); ··· 686 return; 687 } 688 689 + /* 690 * Only handles ELS frames for now. 691 */ 692 if (fchs->type != FC_TYPE_ELS) { ··· 702 } 703 704 if (els_cmd->els_code == FC_ELS_LOGO) { 705 + /* 706 * @todo Handle LOGO frames received. 707 */ 708 return; 709 } 710 711 if (els_cmd->els_code == FC_ELS_PRLI) { 712 + /* 713 * @todo Handle PRLI frames received. 714 */ 715 return; 716 } 717 718 + /* 719 * Unhandled ELS frames. Send a LS_RJT. 720 */ 721 bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP, ··· 723 724 } 725 726 + /* 727 * PID based Lookup for a R-Port in the Port R-Port Queue 728 */ 729 struct bfa_fcs_rport_s * ··· 742 return NULL; 743 } 744 745 + /* 746 * PWWN based Lookup for a R-Port in the Port R-Port Queue 747 */ 748 struct bfa_fcs_rport_s * ··· 761 return NULL; 762 } 763 764 + /* 765 * NWWN based Lookup for a R-Port in the Port R-Port Queue 766 */ 767 struct bfa_fcs_rport_s * ··· 780 return NULL; 781 } 782 783 + /* 784 * Called by rport module when new rports are discovered. 785 */ 786 void ··· 792 port->num_rports++; 793 } 794 795 + /* 796 * Called by rport module to when rports are deleted. 797 */ 798 void ··· 807 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT); 808 } 809 810 + /* 811 * Called by fabric for base port when fabric login is complete. 812 * Called by vport for virtual ports when FDISC is complete. 813 */ ··· 817 bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE); 818 } 819 820 + /* 821 * Called by fabric for base port when fabric goes offline. 822 * Called by vport for virtual ports when virtual port becomes offline. 823 */ ··· 827 bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE); 828 } 829 830 + /* 831 * Called by fabric to delete base lport and associated resources. 832 * 833 * Called by vport to delete lport and associated resources. Should call ··· 839 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE); 840 } 841 842 + /* 843 * Return TRUE if port is online, else return FALSE 844 */ 845 bfa_boolean_t ··· 848 return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online); 849 } 850 851 + /* 852 * Attach time initialization of logical ports. 853 */ 854 void ··· 865 lport->num_rports = 0; 866 } 867 868 + /* 869 * Logical port initialization of base or virtual port. 870 * Called by fabric for base port or by vport for virtual ports. 871 */ ··· 894 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 895 } 896 897 + /* 898 * fcs_lport_api 899 */ 900 ··· 934 } 935 } 936 937 + /* 938 * bfa_fcs_lport_fab port fab functions 939 */ 940 941 + /* 942 * Called by port to initialize fabric services of the base port. 943 */ 944 static void ··· 949 bfa_fcs_lport_ms_init(port); 950 } 951 952 + /* 953 * Called by port to notify transition to online state. 954 */ 955 static void ··· 959 bfa_fcs_lport_scn_online(port); 960 } 961 962 + /* 963 * Called by port to notify transition to offline state. 964 */ 965 static void ··· 970 bfa_fcs_lport_ms_offline(port); 971 } 972 973 + /* 974 * bfa_fcs_lport_n2n functions 975 */ 976 977 + /* 978 * Called by fcs/port to initialize N2N topology. 979 */ 980 static void ··· 982 { 983 } 984 985 + /* 986 * Called by fcs/port to notify transition to online state. 987 */ 988 static void ··· 1006 ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn, 1007 sizeof(wwn_t)) > 0) { 1008 port->pid = N2N_LOCAL_PID; 1009 + /* 1010 * First, check if we know the device by pwwn. 1011 */ 1012 rport = bfa_fcs_lport_get_rport_by_pwwn(port, ··· 1035 } 1036 } 1037 1038 + /* 1039 * Called by fcs/port to notify transition to offline state. 1040 */ 1041 static void ··· 1094 struct bfa_fcs_fdmi_hba_attr_s *hba_attr); 1095 static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, 1096 struct bfa_fcs_fdmi_port_attr_s *port_attr); 1097 + /* 1098 * fcs_fdmi_sm FCS FDMI state machine 1099 */ 1100 1101 + /* 1102 * FDMI State Machine events 1103 */ 1104 enum port_fdmi_event { ··· 1143 static void bfa_fcs_lport_fdmi_sm_disabled( 1144 struct bfa_fcs_lport_fdmi_s *fdmi, 1145 enum port_fdmi_event event); 1146 + /* 1147 * Start in offline state - awaiting MS to send start. 1148 */ 1149 static void ··· 1510 bfa_sm_fault(port->fcs, event); 1511 } 1512 } 1513 + /* 1514 * FDMI is disabled state. 1515 */ 1516 static void ··· 1525 /* No op State. It can only be enabled at Driver Init. */ 1526 } 1527 1528 + /* 1529 * RHBA : Register HBA Attributes. 1530 */ 1531 static void ··· 1607 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1608 len += attr->len; 1609 count++; 1610 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1611 sizeof(attr->len)); 1612 1613 /* ··· 1618 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER); 1619 attr->len = (u16) strlen(fcs_hba_attr->manufacturer); 1620 memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len); 1621 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1622 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1623 len += attr->len; 1624 count++; 1625 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1626 sizeof(attr->len)); 1627 1628 /* ··· 1636 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM); 1637 attr->len = (u16) strlen(fcs_hba_attr->serial_num); 1638 memcpy(attr->value, fcs_hba_attr->serial_num, attr->len); 1639 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1640 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1641 len += attr->len; 1642 count++; 1643 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1644 sizeof(attr->len)); 1645 1646 /* ··· 1654 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL); 1655 attr->len = (u16) strlen(fcs_hba_attr->model); 1656 memcpy(attr->value, fcs_hba_attr->model, attr->len); 1657 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1658 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1659 len += attr->len; 1660 count++; 1661 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1662 sizeof(attr->len)); 1663 1664 /* ··· 1672 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC); 1673 attr->len = (u16) strlen(fcs_hba_attr->model_desc); 1674 memcpy(attr->value, fcs_hba_attr->model_desc, attr->len); 1675 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1676 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1677 len += attr->len; 1678 count++; 1679 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1680 sizeof(attr->len)); 1681 1682 /* ··· 1691 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION); 1692 attr->len = (u16) strlen(fcs_hba_attr->hw_version); 1693 memcpy(attr->value, fcs_hba_attr->hw_version, attr->len); 1694 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1695 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1696 len += attr->len; 1697 count++; 1698 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1699 sizeof(attr->len)); 1700 } 1701 ··· 1710 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION); 1711 attr->len = (u16) strlen(fcs_hba_attr->driver_version); 1712 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); 1713 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1714 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1715 len += attr->len;; 1716 count++; 1717 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1718 sizeof(attr->len)); 1719 1720 /* ··· 1729 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION); 1730 attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver); 1731 memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len); 1732 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1733 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1734 len += attr->len; 1735 count++; 1736 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1737 sizeof(attr->len)); 1738 } 1739 ··· 1748 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); 1749 attr->len = (u16) strlen(fcs_hba_attr->driver_version); 1750 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); 1751 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1752 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1753 len += attr->len; 1754 count++; 1755 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1756 sizeof(attr->len)); 1757 1758 /* ··· 1767 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME); 1768 attr->len = (u16) strlen(fcs_hba_attr->os_name); 1769 memcpy(attr->value, fcs_hba_attr->os_name, attr->len); 1770 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1771 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1772 len += attr->len; 1773 count++; 1774 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1775 sizeof(attr->len)); 1776 } 1777 ··· 1788 memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len); 1789 len += attr->len; 1790 count++; 1791 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1792 sizeof(attr->len)); 1793 1794 /* 1795 * Update size of payload 1796 */ 1797 + len += ((sizeof(attr->type) + sizeof(attr->len)) * count); 1798 1799 rhba->hba_attr_blk.attr_count = cpu_to_be32(count); 1800 return len; ··· 1837 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); 1838 } 1839 1840 + /* 1841 * RPRT : Register Port 1842 */ 1843 static void ··· 1879 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT); 1880 } 1881 1882 + /* 1883 * This routine builds Port Attribute Block that used in RPA, RPRT commands. 1884 */ 1885 static u16 ··· 1943 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1944 len += attr->len; 1945 ++count; 1946 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1947 sizeof(attr->len)); 1948 1949 /* ··· 1957 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1958 len += attr->len; 1959 ++count; 1960 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1961 sizeof(attr->len)); 1962 1963 /* ··· 1969 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME); 1970 attr->len = (u16) strlen(fcs_port_attr.os_device_name); 1971 memcpy(attr->value, fcs_port_attr.os_device_name, attr->len); 1972 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1973 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1974 len += attr->len; 1975 ++count; 1976 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1977 sizeof(attr->len)); 1978 } 1979 /* ··· 1988 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME); 1989 attr->len = (u16) strlen(fcs_port_attr.host_name); 1990 memcpy(attr->value, fcs_port_attr.host_name, attr->len); 1991 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1992 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1993 len += attr->len; 1994 ++count; 1995 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1996 sizeof(attr->len)); 1997 } 1998 ··· 2004 * Update size of payload 2005 */ 2006 port_attrib->attr_count = cpu_to_be32(count); 2007 + len += ((sizeof(attr->type) + sizeof(attr->len)) * count); 2008 return len; 2009 } 2010 ··· 2062 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); 2063 } 2064 2065 + /* 2066 * RPA : Register Port Attributes. 2067 */ 2068 static void ··· 2091 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), 2092 FDMI_RPA); 2093 2094 + attr_len = bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi, 2095 + (u8 *) ((struct ct_hdr_s *) pyld + 1)); 2096 2097 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 2098 FC_CLASS_3, len + attr_len, &fchs, ··· 2321 u32 rsp_len, 2322 u32 resid_len, 2323 struct fchs_s *rsp_fchs); 2324 + /* 2325 * fcs_ms_sm FCS MS state machine 2326 */ 2327 2328 + /* 2329 * MS State Machine events 2330 */ 2331 enum port_ms_event { ··· 2360 enum port_ms_event event); 2361 static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms, 2362 enum port_ms_event event); 2363 + /* 2364 * Start in offline state - awaiting NS to send start. 2365 */ 2366 static void ··· 2432 */ 2433 bfa_fcs_lport_fdmi_online(ms); 2434 2435 + /* 2436 * if this is a Vport, go to online state. 2437 */ 2438 if (ms->port->vport) { ··· 2595 bfa_sm_fault(ms->port->fcs, event); 2596 } 2597 } 2598 + /* 2599 * ms_pvt MS local functions 2600 */ 2601 ··· 2795 bfa_sm_fault(ms->port->fcs, event); 2796 } 2797 } 2798 + /* 2799 * ms_pvt MS local functions 2800 */ 2801 ··· 2871 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); 2872 } 2873 2874 + /* 2875 * ms_pvt MS local functions 2876 */ 2877 ··· 3017 bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN); 3018 } 3019 3020 + /* 3021 * @page ns_sm_info VPORT NS State Machine 3022 * 3023 * @section ns_sm_interactions VPORT NS State Machine Interactions ··· 3080 u32 *pid_buf, u32 n_pids); 3081 3082 static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port); 3083 + /* 3084 * fcs_ns_sm FCS nameserver interface state machine 3085 */ 3086 3087 + /* 3088 * VPort NS State Machine events 3089 */ 3090 enum vport_ns_event { ··· 3139 enum vport_ns_event event); 3140 static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, 3141 enum vport_ns_event event); 3142 + /* 3143 * Start in offline state - awaiting linkup 3144 */ 3145 static void ··· 3628 3629 3630 3631 + /* 3632 * ns_pvt Nameserver local functions 3633 */ 3634 ··· 3724 } 3725 } 3726 3727 + /* 3728 * Register the symbolic port name. 3729 */ 3730 static void ··· 3755 * for V-Port, form a Port Symbolic Name 3756 */ 3757 if (port->vport) { 3758 + /* 3759 * For Vports, we append the vport's port symbolic name 3760 * to that of the base port. 3761 */ ··· 3829 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3830 } 3831 3832 + /* 3833 * Register FC4-Types 3834 */ 3835 static void ··· 3901 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3902 } 3903 3904 + /* 3905 * Register FC4-Features : Should be done after RFT_ID 3906 */ 3907 static void ··· 3982 } else 3983 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3984 } 3985 + /* 3986 * Query Fabric for FC4-Types Devices. 3987 * 3988 * TBD : Need to use a local (FCS private) response buffer, since the response ··· 4102 } 4103 } 4104 4105 + /* 4106 * This routine will be called by bfa_timer on timer timeouts. 4107 * 4108 * param[in] port - pointer to bfa_fcs_lport_t. ··· 4166 } 4167 } 4168 4169 + /* 4170 * fcs_ns_public FCS nameserver public interfaces 4171 */ 4172 ··· 4227 } 4228 } 4229 4230 + /* 4231 * FCS SCN 4232 */ 4233 ··· 4250 struct fchs_s *rx_fchs); 4251 static void bfa_fcs_lport_scn_timeout(void *arg); 4252 4253 + /* 4254 * fcs_scm_sm FCS SCN state machine 4255 */ 4256 4257 + /* 4258 * VPort SCN State Machine events 4259 */ 4260 enum port_scn_event { ··· 4278 static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn, 4279 enum port_scn_event event); 4280 4281 + /* 4282 * Starting state - awaiting link up. 4283 */ 4284 static void ··· 4382 4383 4384 4385 + /* 4386 * fcs_scn_private FCS SCN private functions 4387 */ 4388 4389 + /* 4390 * This routine will be called to send a SCR command. 4391 */ 4392 static void ··· 4499 FC_MAX_PDUSZ, 0); 4500 } 4501 4502 + /* 4503 * This routine will be called by bfa_timer on timer timeouts. 4504 * 4505 * param[in] vport - pointer to bfa_fcs_lport_t. ··· 4522 4523 4524 4525 + /* 4526 * fcs_scn_public FCS state change notification public interfaces 4527 */ 4528 ··· 4563 4564 bfa_trc(port->fcs, rpid); 4565 4566 + /* 4567 * If this is an unknown device, then it just came online. 4568 * Otherwise let rport handle the RSCN event. 4569 */ ··· 4579 bfa_fcs_rport_scn(rport); 4580 } 4581 4582 + /* 4583 * rscn format based PID comparison 4584 */ 4585 #define __fc_pid_match(__c0, __c1, __fmt) \ ··· 4691 } 4692 } 4693 4694 + /* 4695 + * If any of area, domain or fabric RSCN is received, do a fresh 4696 + * discovery to find new devices. 4697 */ 4698 if (nsquery) 4699 bfa_fcs_lport_ns_query(port); 4700 } 4701 4702 + /* 4703 * BFA FCS port 4704 */ 4705 + /* 4706 * fcs_port_api BFA FCS port API 4707 */ 4708 struct bfa_fcs_lport_s * ··· 4946 memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s)); 4947 } 4948 4949 + /* 4950 * FCS virtual port state machine 4951 */ 4952 ··· 4967 static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport); 4968 static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport); 4969 4970 + /* 4971 * fcs_vport_sm FCS virtual port state machine 4972 */ 4973 4974 + /* 4975 * VPort State Machine events 4976 */ 4977 enum bfa_fcs_vport_event { ··· 5024 {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR} 5025 }; 5026 5027 + /* 5028 * Beginning state. 5029 */ 5030 static void ··· 5045 } 5046 } 5047 5048 + /* 5049 * Created state - a start event is required to start up the state machine. 5050 */ 5051 static void ··· 5062 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); 5063 bfa_fcs_vport_do_fdisc(vport); 5064 } else { 5065 + /* 5066 * Fabric is offline or not NPIV capable, stay in 5067 * offline state. 5068 */ ··· 5078 5079 case BFA_FCS_VPORT_SM_ONLINE: 5080 case BFA_FCS_VPORT_SM_OFFLINE: 5081 + /* 5082 * Ignore ONLINE/OFFLINE events from fabric 5083 * till vport is started. 5084 */ ··· 5089 } 5090 } 5091 5092 + /* 5093 * Offline state - awaiting ONLINE event from fabric SM. 5094 */ 5095 static void ··· 5127 } 5128 5129 5130 + /* 5131 * FDISC is sent and awaiting reply from fabric. 5132 */ 5133 static void ··· 5174 } 5175 } 5176 5177 + /* 5178 * FDISC attempt failed - a timer is active to retry FDISC. 5179 */ 5180 static void ··· 5208 } 5209 } 5210 5211 + /* 5212 * Vport is online (FDISC is complete). 5213 */ 5214 static void ··· 5235 } 5236 } 5237 5238 + /* 5239 * Vport is being deleted - awaiting lport delete completion to send 5240 * LOGO to fabric. 5241 */ ··· 5264 } 5265 } 5266 5267 + /* 5268 * Error State. 5269 * This state will be set when the Vport Creation fails due 5270 * to errors like Dup WWN. In this state only operation allowed ··· 5288 } 5289 } 5290 5291 + /* 5292 * Lport cleanup is in progress since vport is being deleted. Fabric is 5293 * offline, so no LOGO is needed to complete vport deletion. 5294 */ ··· 5313 } 5314 } 5315 5316 + /* 5317 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup 5318 * is done. 5319 */ ··· 5347 5348 5349 5350 + /* 5351 * fcs_vport_private FCS virtual port private functions 5352 */ 5353 + /* 5354 * This routine will be called to send a FDISC command. 5355 */ 5356 static void ··· 5397 } 5398 } 5399 5400 + /* 5401 * Called to send a logout to the fabric. Used when a V-Port is 5402 * deleted/stopped. 5403 */ ··· 5411 } 5412 5413 5414 + /* 5415 * This routine will be called by bfa_timer on timer timeouts. 5416 * 5417 * param[in] vport - pointer to bfa_fcs_vport_t. ··· 5449 5450 5451 5452 + /* 5453 * fcs_vport_public FCS virtual port public interfaces 5454 */ 5455 5456 + /* 5457 * Online notification from fabric SM. 5458 */ 5459 void ··· 5463 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); 5464 } 5465 5466 + /* 5467 * Offline notification from fabric SM. 5468 */ 5469 void ··· 5473 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); 5474 } 5475 5476 + /* 5477 * Cleanup notification from fabric SM on link timer expiry. 5478 */ 5479 void ··· 5481 { 5482 vport->vport_stats.fab_cleanup++; 5483 } 5484 + /* 5485 * delete notification from fabric SM. To be invoked from within FCS. 5486 */ 5487 void ··· 5490 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); 5491 } 5492 5493 + /* 5494 * Delete completion callback from associated lport 5495 */ 5496 void ··· 5501 5502 5503 5504 + /* 5505 * fcs_vport_api Virtual port API 5506 */ 5507 5508 + /* 5509 * Use this function to instantiate a new FCS vport object. This 5510 * function will not trigger any HW initialization process (which will be 5511 * done in vport_start() call) ··· 5555 return BFA_STATUS_OK; 5556 } 5557 5558 + /* 5559 * Use this function to instantiate a new FCS PBC vport object. This 5560 * function will not trigger any HW initialization process (which will be 5561 * done in vport_start() call) ··· 5585 return rc; 5586 } 5587 5588 + /* 5589 * Use this function to findout if this is a pbc vport or not. 5590 * 5591 * @param[in] vport - pointer to bfa_fcs_vport_t. ··· 5603 5604 } 5605 5606 + /* 5607 * Use this function initialize the vport. 5608 * 5609 * @param[in] vport - pointer to bfa_fcs_vport_t. ··· 5618 return BFA_STATUS_OK; 5619 } 5620 5621 + /* 5622 * Use this function quiese the vport object. This function will return 5623 * immediately, when the vport is actually stopped, the 5624 * bfa_drv_vport_stop_cb() will be called. ··· 5635 return BFA_STATUS_OK; 5636 } 5637 5638 + /* 5639 * Use this function to delete a vport object. Fabric object should 5640 * be stopped before this function call. 5641 * ··· 5657 return BFA_STATUS_OK; 5658 } 5659 5660 + /* 5661 * Use this function to get vport's current status info. 5662 * 5663 * param[in] vport pointer to bfa_fcs_vport_t. ··· 5678 attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm); 5679 } 5680 5681 + /* 5682 * Use this function to get vport's statistics. 5683 * 5684 * param[in] vport pointer to bfa_fcs_vport_t. ··· 5693 *stats = vport->vport_stats; 5694 } 5695 5696 + /* 5697 * Use this function to clear vport's statistics. 5698 * 5699 * param[in] vport pointer to bfa_fcs_vport_t. ··· 5706 memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); 5707 } 5708 5709 + /* 5710 * Lookup a virtual port. Excludes base port from lookup. 5711 */ 5712 struct bfa_fcs_vport_s * ··· 5728 return vport; 5729 } 5730 5731 + /* 5732 * FDISC Response 5733 */ 5734 void ··· 5784 } 5785 } 5786 5787 + /* 5788 * LOGO response 5789 */ 5790 void ··· 5794 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); 5795 } 5796 5797 + /* 5798 * Received clear virtual link 5799 */ 5800 void
+85 -85
drivers/scsi/bfa/bfa_fcs_rport.c
··· 15 * General Public License for more details. 16 */ 17 18 - /** 19 * rport.c Remote port implementation. 20 */ 21 ··· 75 static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, 76 struct fchs_s *rx_fchs, u16 len); 77 static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); 78 - /** 79 * fcs_rport_sm FCS rport state machine events 80 */ 81 ··· 172 {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC}, 173 }; 174 175 - /** 176 * Beginning state. 177 */ 178 static void ··· 210 } 211 } 212 213 - /** 214 * PLOGI is being sent. 215 */ 216 static void ··· 262 } 263 } 264 265 - /** 266 * PLOGI is being sent. 267 */ 268 static void ··· 287 288 case RPSM_EVENT_PLOGI_RCVD: 289 case RPSM_EVENT_SCN: 290 - /** 291 * Ignore, SCN is possibly online notification. 292 */ 293 break; ··· 309 break; 310 311 case RPSM_EVENT_HCB_OFFLINE: 312 - /** 313 * Ignore BFA callback, on a PLOGI receive we call bfa offline. 314 */ 315 break; ··· 319 } 320 } 321 322 - /** 323 * PLOGI is sent. 324 */ 325 static void ··· 380 } 381 } 382 383 - /** 384 * PLOGI is sent. 385 */ 386 static void ··· 475 } 476 } 477 478 - /** 479 * PLOGI is complete. Awaiting BFA rport online callback. FC-4s 480 * are offline. 481 */ ··· 519 break; 520 521 case RPSM_EVENT_SCN: 522 - /** 523 * @todo 524 * Ignore SCN - PLOGI just completed, FC-4 login should detect 525 * device failures. ··· 531 } 532 } 533 534 - /** 535 * Rport is ONLINE. FC-4s active. 536 */ 537 static void ··· 580 } 581 } 582 583 - /** 584 * An SCN event is received in ONLINE state. NS query is being sent 585 * prior to ADISC authentication with rport. FC-4s are paused. 586 */ ··· 604 break; 605 606 case RPSM_EVENT_SCN: 607 - /** 608 * ignore SCN, wait for response to query itself 609 */ 610 break; ··· 638 } 639 } 640 641 - /** 642 * An SCN event is received in ONLINE state. NS query is sent to rport. 643 * FC-4s are paused. 644 */ ··· 697 } 698 } 699 700 - /** 701 * An SCN event is received in ONLINE state. ADISC is being sent for 702 * authenticating with rport. FC-4s are paused. 703 */ ··· 748 } 749 } 750 751 - /** 752 * An SCN event is received in ONLINE state. ADISC is to rport. 753 * FC-4s are paused. 754 */ ··· 765 break; 766 767 case RPSM_EVENT_PLOGI_RCVD: 768 - /** 769 * Too complex to cleanup FC-4 & rport and then acc to PLOGI. 770 * At least go offline when a PLOGI is received. 771 */ ··· 787 break; 788 789 case RPSM_EVENT_SCN: 790 - /** 791 * already processing RSCN 792 */ 793 break; ··· 810 } 811 } 812 813 - /** 814 * Rport has sent LOGO. Awaiting FC-4 offline completion callback. 815 */ 816 static void ··· 841 } 842 } 843 844 - /** 845 * LOGO needs to be sent to rport. Awaiting FC-4 offline completion 846 * callback. 847 */ ··· 864 } 865 } 866 867 - /** 868 * Rport is going offline. Awaiting FC-4 offline completion callback. 869 */ 870 static void ··· 886 case RPSM_EVENT_LOGO_RCVD: 887 case RPSM_EVENT_PRLO_RCVD: 888 case RPSM_EVENT_ADDRESS_CHANGE: 889 - /** 890 * rport is already going offline. 891 * SCN - ignore and wait till transitioning to offline state 892 */ ··· 901 } 902 } 903 904 - /** 905 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline 906 * callback. 907 */ ··· 945 case RPSM_EVENT_SCN: 946 case RPSM_EVENT_LOGO_RCVD: 947 case RPSM_EVENT_PRLO_RCVD: 948 - /** 949 * Ignore, already offline. 950 */ 951 break; ··· 955 } 956 } 957 958 - /** 959 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline 960 * callback to send LOGO accept. 961 */ ··· 1009 1010 case RPSM_EVENT_LOGO_RCVD: 1011 case RPSM_EVENT_PRLO_RCVD: 1012 - /** 1013 * Ignore - already processing a LOGO. 1014 */ 1015 break; ··· 1019 } 1020 } 1021 1022 - /** 1023 * Rport is being deleted. FC-4s are offline. 1024 * Awaiting BFA rport offline 1025 * callback to send LOGO. ··· 1048 } 1049 } 1050 1051 - /** 1052 * Rport is being deleted. FC-4s are offline. LOGO is being sent. 1053 */ 1054 static void ··· 1082 } 1083 } 1084 1085 - /** 1086 * Rport is offline. FC-4s are offline. BFA rport is offline. 1087 * Timer active to delete stale rport. 1088 */ ··· 1142 } 1143 } 1144 1145 - /** 1146 * Rport address has changed. Nameserver discovery request is being sent. 1147 */ 1148 static void ··· 1199 } 1200 } 1201 1202 - /** 1203 * Nameserver discovery failed. Waiting for timeout to retry. 1204 */ 1205 static void ··· 1263 } 1264 } 1265 1266 - /** 1267 * Rport address has changed. Nameserver discovery request is sent. 1268 */ 1269 static void ··· 1329 bfa_fcs_rport_send_prlo_acc(rport); 1330 break; 1331 case RPSM_EVENT_SCN: 1332 - /** 1333 * ignore, wait for NS query response 1334 */ 1335 break; 1336 1337 case RPSM_EVENT_LOGO_RCVD: 1338 - /** 1339 * Not logged-in yet. Accept LOGO. 1340 */ 1341 bfa_fcs_rport_send_logo_acc(rport); ··· 1354 1355 1356 1357 - /** 1358 * fcs_rport_private FCS RPORT provate functions 1359 */ 1360 ··· 1415 1416 plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp); 1417 1418 - /** 1419 * Check for failure first. 1420 */ 1421 if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) { ··· 1436 return; 1437 } 1438 1439 - /** 1440 * PLOGI is complete. Make sure this device is not one of the known 1441 * device with a new FC port address. 1442 */ ··· 1468 } 1469 } 1470 1471 - /** 1472 * Normal login path -- no evil twins. 1473 */ 1474 rport->stats.plogi_accs++; ··· 1722 } 1723 } 1724 1725 - /** 1726 * Called to send a logout to the rport. 1727 */ 1728 static void ··· 1759 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1760 } 1761 1762 - /** 1763 * Send ACC for a LOGO received. 1764 */ 1765 static void ··· 1788 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1789 } 1790 1791 - /** 1792 * brief 1793 * This routine will be called by bfa_timer on timer timeouts. 1794 * ··· 1961 struct bfa_fcs_rport_s *rport; 1962 struct bfad_rport_s *rport_drv; 1963 1964 - /** 1965 * allocate rport 1966 */ 1967 if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv) ··· 1979 rport->pid = rpid; 1980 rport->pwwn = pwwn; 1981 1982 - /** 1983 * allocate BFA rport 1984 */ 1985 rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport); ··· 1989 return NULL; 1990 } 1991 1992 - /** 1993 * allocate FC-4s 1994 */ 1995 bfa_assert(bfa_fcs_lport_is_initiator(port)); ··· 2021 { 2022 struct bfa_fcs_lport_s *port = rport->port; 2023 2024 - /** 2025 * - delete FC-4s 2026 * - delete BFA rport 2027 * - remove from queue of rports ··· 2093 } 2094 } 2095 2096 - /** 2097 * Update rport parameters from PLOGI or PLOGI accept. 2098 */ 2099 static void ··· 2101 { 2102 bfa_fcs_lport_t *port = rport->port; 2103 2104 - /** 2105 * - port name 2106 * - node name 2107 */ 2108 rport->pwwn = plogi->port_name; 2109 rport->nwwn = plogi->node_name; 2110 2111 - /** 2112 * - class of service 2113 */ 2114 rport->fc_cos = 0; ··· 2118 if (plogi->class2.class_valid) 2119 rport->fc_cos |= FC_CLASS_2; 2120 2121 - /** 2122 * - CISC 2123 * - MAX receive frame size 2124 */ ··· 2127 2128 bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); 2129 bfa_trc(port->fcs, port->fabric->bb_credit); 2130 - /** 2131 * Direct Attach P2P mode : 2132 * This is to handle a bug (233476) in IBM targets in Direct Attach 2133 * Mode. Basically, in FLOGI Accept the target would have ··· 2148 2149 } 2150 2151 - /** 2152 * Called to handle LOGO received from an existing remote port. 2153 */ 2154 static void ··· 2164 2165 2166 2167 - /** 2168 * fcs_rport_public FCS rport public interfaces 2169 */ 2170 2171 - /** 2172 * Called by bport/vport to create a remote port instance for a discovered 2173 * remote device. 2174 * ··· 2191 return rport; 2192 } 2193 2194 - /** 2195 * Called to create a rport for which only the wwn is known. 2196 * 2197 * @param[in] port - base port ··· 2211 bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); 2212 return rport; 2213 } 2214 - /** 2215 * Called by bport in private loop topology to indicate that a 2216 * rport has been discovered and plogi has been completed. 2217 * ··· 2233 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP); 2234 } 2235 2236 - /** 2237 * Called by bport/vport to handle PLOGI received from a new remote port. 2238 * If an existing rport does a plogi, it will be handled separately. 2239 */ ··· 2272 return 0; 2273 } 2274 2275 - /** 2276 * Called by bport/vport to handle PLOGI received from an existing 2277 * remote port. 2278 */ ··· 2280 bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, 2281 struct fc_logi_s *plogi) 2282 { 2283 - /** 2284 * @todo Handle P2P and initiator-initiator. 2285 */ 2286 ··· 2289 rport->reply_oxid = rx_fchs->ox_id; 2290 bfa_trc(rport->fcs, rport->reply_oxid); 2291 2292 - /** 2293 * In Switched fabric topology, 2294 * PLOGI to each other. If our pwwn is smaller, ignore it, 2295 * if it is not a well known address. ··· 2307 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); 2308 } 2309 2310 - /** 2311 * Called by bport/vport to delete a remote port instance. 2312 * 2313 * Rport delete is called under the following conditions: ··· 2321 bfa_sm_send_event(rport, RPSM_EVENT_DELETE); 2322 } 2323 2324 - /** 2325 * Called by bport/vport to when a target goes offline. 2326 * 2327 */ ··· 2331 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); 2332 } 2333 2334 - /** 2335 * Called by bport in n2n when a target (attached port) becomes online. 2336 * 2337 */ ··· 2340 { 2341 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); 2342 } 2343 - /** 2344 * Called by bport/vport to notify SCN for the remote port 2345 */ 2346 void ··· 2350 bfa_sm_send_event(rport, RPSM_EVENT_SCN); 2351 } 2352 2353 - /** 2354 * Called by fcpim to notify that the ITN cleanup is done. 2355 */ 2356 void ··· 2359 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); 2360 } 2361 2362 - /** 2363 * Called by fcptm to notify that the ITN cleanup is done. 2364 */ 2365 void ··· 2368 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); 2369 } 2370 2371 - /** 2372 * brief 2373 * This routine BFA callback for bfa_rport_online() call. 2374 * ··· 2391 bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE); 2392 } 2393 2394 - /** 2395 * brief 2396 * This routine BFA callback for bfa_rport_offline() call. 2397 * ··· 2413 bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE); 2414 } 2415 2416 - /** 2417 * brief 2418 * This routine is a static BFA callback when there is a QoS flow_id 2419 * change notification ··· 2437 bfa_trc(rport->fcs, rport->pwwn); 2438 } 2439 2440 - /** 2441 * brief 2442 * This routine is a static BFA callback when there is a QoS priority 2443 * change notification ··· 2461 bfa_trc(rport->fcs, rport->pwwn); 2462 } 2463 2464 - /** 2465 * Called to process any unsolicted frames from this remote port 2466 */ 2467 void ··· 2470 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); 2471 } 2472 2473 - /** 2474 * Called to process any unsolicted frames from this remote port 2475 */ 2476 void ··· 2577 FC_MAX_PDUSZ, 0); 2578 } 2579 2580 - /** 2581 * Return state of rport. 2582 */ 2583 int ··· 2586 return bfa_sm_to_state(rport_sm_table, rport->sm); 2587 } 2588 2589 - /** 2590 * brief 2591 * Called by the Driver to set rport delete/ageout timeout 2592 * ··· 2613 2614 2615 2616 - /** 2617 * Remote port implementation. 2618 */ 2619 2620 - /** 2621 * fcs_rport_api FCS rport API. 2622 */ 2623 2624 - /** 2625 * Direct API to add a target by port wwn. This interface is used, for 2626 * example, by bios when target pwwn is known from boot lun configuration. 2627 */ ··· 2634 return BFA_STATUS_OK; 2635 } 2636 2637 - /** 2638 * Direct API to remove a target and its associated resources. This 2639 * interface is used, for example, by driver to remove target 2640 * ports from the target list for a VM. ··· 2663 2664 } 2665 2666 - /** 2667 * Remote device status for display/debug. 2668 */ 2669 void ··· 2704 } 2705 } 2706 2707 - /** 2708 * Per remote device statistics. 2709 */ 2710 void ··· 2767 2768 2769 2770 - /** 2771 * Remote port features (RPF) implementation. 2772 */ 2773 ··· 2786 2787 static void bfa_fcs_rpf_timeout(void *arg); 2788 2789 - /** 2790 * fcs_rport_ftrs_sm FCS rport state machine events 2791 */ 2792 ··· 2981 bfa_sm_fault(rport->fcs, event); 2982 } 2983 } 2984 - /** 2985 * Called when Rport is created. 2986 */ 2987 void ··· 2995 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit); 2996 } 2997 2998 - /** 2999 * Called when Rport becomes online 3000 */ 3001 void ··· 3010 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE); 3011 } 3012 3013 - /** 3014 * Called when Rport becomes offline 3015 */ 3016 void
··· 15 * General Public License for more details. 16 */ 17 18 + /* 19 * rport.c Remote port implementation. 20 */ 21 ··· 75 static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, 76 struct fchs_s *rx_fchs, u16 len); 77 static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); 78 + /* 79 * fcs_rport_sm FCS rport state machine events 80 */ 81 ··· 172 {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC}, 173 }; 174 175 + /* 176 * Beginning state. 177 */ 178 static void ··· 210 } 211 } 212 213 + /* 214 * PLOGI is being sent. 215 */ 216 static void ··· 262 } 263 } 264 265 + /* 266 * PLOGI is being sent. 267 */ 268 static void ··· 287 288 case RPSM_EVENT_PLOGI_RCVD: 289 case RPSM_EVENT_SCN: 290 + /* 291 * Ignore, SCN is possibly online notification. 292 */ 293 break; ··· 309 break; 310 311 case RPSM_EVENT_HCB_OFFLINE: 312 + /* 313 * Ignore BFA callback, on a PLOGI receive we call bfa offline. 314 */ 315 break; ··· 319 } 320 } 321 322 + /* 323 * PLOGI is sent. 324 */ 325 static void ··· 380 } 381 } 382 383 + /* 384 * PLOGI is sent. 385 */ 386 static void ··· 475 } 476 } 477 478 + /* 479 * PLOGI is complete. Awaiting BFA rport online callback. FC-4s 480 * are offline. 481 */ ··· 519 break; 520 521 case RPSM_EVENT_SCN: 522 + /* 523 * @todo 524 * Ignore SCN - PLOGI just completed, FC-4 login should detect 525 * device failures. ··· 531 } 532 } 533 534 + /* 535 * Rport is ONLINE. FC-4s active. 536 */ 537 static void ··· 580 } 581 } 582 583 + /* 584 * An SCN event is received in ONLINE state. NS query is being sent 585 * prior to ADISC authentication with rport. FC-4s are paused. 586 */ ··· 604 break; 605 606 case RPSM_EVENT_SCN: 607 + /* 608 * ignore SCN, wait for response to query itself 609 */ 610 break; ··· 638 } 639 } 640 641 + /* 642 * An SCN event is received in ONLINE state. NS query is sent to rport. 643 * FC-4s are paused. 644 */ ··· 697 } 698 } 699 700 + /* 701 * An SCN event is received in ONLINE state. ADISC is being sent for 702 * authenticating with rport. FC-4s are paused. 703 */ ··· 748 } 749 } 750 751 + /* 752 * An SCN event is received in ONLINE state. ADISC is to rport. 753 * FC-4s are paused. 754 */ ··· 765 break; 766 767 case RPSM_EVENT_PLOGI_RCVD: 768 + /* 769 * Too complex to cleanup FC-4 & rport and then acc to PLOGI. 770 * At least go offline when a PLOGI is received. 771 */ ··· 787 break; 788 789 case RPSM_EVENT_SCN: 790 + /* 791 * already processing RSCN 792 */ 793 break; ··· 810 } 811 } 812 813 + /* 814 * Rport has sent LOGO. Awaiting FC-4 offline completion callback. 815 */ 816 static void ··· 841 } 842 } 843 844 + /* 845 * LOGO needs to be sent to rport. Awaiting FC-4 offline completion 846 * callback. 847 */ ··· 864 } 865 } 866 867 + /* 868 * Rport is going offline. Awaiting FC-4 offline completion callback. 869 */ 870 static void ··· 886 case RPSM_EVENT_LOGO_RCVD: 887 case RPSM_EVENT_PRLO_RCVD: 888 case RPSM_EVENT_ADDRESS_CHANGE: 889 + /* 890 * rport is already going offline. 891 * SCN - ignore and wait till transitioning to offline state 892 */ ··· 901 } 902 } 903 904 + /* 905 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline 906 * callback. 907 */ ··· 945 case RPSM_EVENT_SCN: 946 case RPSM_EVENT_LOGO_RCVD: 947 case RPSM_EVENT_PRLO_RCVD: 948 + /* 949 * Ignore, already offline. 950 */ 951 break; ··· 955 } 956 } 957 958 + /* 959 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline 960 * callback to send LOGO accept. 961 */ ··· 1009 1010 case RPSM_EVENT_LOGO_RCVD: 1011 case RPSM_EVENT_PRLO_RCVD: 1012 + /* 1013 * Ignore - already processing a LOGO. 1014 */ 1015 break; ··· 1019 } 1020 } 1021 1022 + /* 1023 * Rport is being deleted. FC-4s are offline. 1024 * Awaiting BFA rport offline 1025 * callback to send LOGO. ··· 1048 } 1049 } 1050 1051 + /* 1052 * Rport is being deleted. FC-4s are offline. LOGO is being sent. 1053 */ 1054 static void ··· 1082 } 1083 } 1084 1085 + /* 1086 * Rport is offline. FC-4s are offline. BFA rport is offline. 1087 * Timer active to delete stale rport. 1088 */ ··· 1142 } 1143 } 1144 1145 + /* 1146 * Rport address has changed. Nameserver discovery request is being sent. 1147 */ 1148 static void ··· 1199 } 1200 } 1201 1202 + /* 1203 * Nameserver discovery failed. Waiting for timeout to retry. 1204 */ 1205 static void ··· 1263 } 1264 } 1265 1266 + /* 1267 * Rport address has changed. Nameserver discovery request is sent. 1268 */ 1269 static void ··· 1329 bfa_fcs_rport_send_prlo_acc(rport); 1330 break; 1331 case RPSM_EVENT_SCN: 1332 + /* 1333 * ignore, wait for NS query response 1334 */ 1335 break; 1336 1337 case RPSM_EVENT_LOGO_RCVD: 1338 + /* 1339 * Not logged-in yet. Accept LOGO. 1340 */ 1341 bfa_fcs_rport_send_logo_acc(rport); ··· 1354 1355 1356 1357 + /* 1358 * fcs_rport_private FCS RPORT provate functions 1359 */ 1360 ··· 1415 1416 plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp); 1417 1418 + /* 1419 * Check for failure first. 1420 */ 1421 if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) { ··· 1436 return; 1437 } 1438 1439 + /* 1440 * PLOGI is complete. Make sure this device is not one of the known 1441 * device with a new FC port address. 1442 */ ··· 1468 } 1469 } 1470 1471 + /* 1472 * Normal login path -- no evil twins. 1473 */ 1474 rport->stats.plogi_accs++; ··· 1722 } 1723 } 1724 1725 + /* 1726 * Called to send a logout to the rport. 1727 */ 1728 static void ··· 1759 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1760 } 1761 1762 + /* 1763 * Send ACC for a LOGO received. 1764 */ 1765 static void ··· 1788 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1789 } 1790 1791 + /* 1792 * brief 1793 * This routine will be called by bfa_timer on timer timeouts. 1794 * ··· 1961 struct bfa_fcs_rport_s *rport; 1962 struct bfad_rport_s *rport_drv; 1963 1964 + /* 1965 * allocate rport 1966 */ 1967 if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv) ··· 1979 rport->pid = rpid; 1980 rport->pwwn = pwwn; 1981 1982 + /* 1983 * allocate BFA rport 1984 */ 1985 rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport); ··· 1989 return NULL; 1990 } 1991 1992 + /* 1993 * allocate FC-4s 1994 */ 1995 bfa_assert(bfa_fcs_lport_is_initiator(port)); ··· 2021 { 2022 struct bfa_fcs_lport_s *port = rport->port; 2023 2024 + /* 2025 * - delete FC-4s 2026 * - delete BFA rport 2027 * - remove from queue of rports ··· 2093 } 2094 } 2095 2096 + /* 2097 * Update rport parameters from PLOGI or PLOGI accept. 2098 */ 2099 static void ··· 2101 { 2102 bfa_fcs_lport_t *port = rport->port; 2103 2104 + /* 2105 * - port name 2106 * - node name 2107 */ 2108 rport->pwwn = plogi->port_name; 2109 rport->nwwn = plogi->node_name; 2110 2111 + /* 2112 * - class of service 2113 */ 2114 rport->fc_cos = 0; ··· 2118 if (plogi->class2.class_valid) 2119 rport->fc_cos |= FC_CLASS_2; 2120 2121 + /* 2122 * - CISC 2123 * - MAX receive frame size 2124 */ ··· 2127 2128 bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); 2129 bfa_trc(port->fcs, port->fabric->bb_credit); 2130 + /* 2131 * Direct Attach P2P mode : 2132 * This is to handle a bug (233476) in IBM targets in Direct Attach 2133 * Mode. Basically, in FLOGI Accept the target would have ··· 2148 2149 } 2150 2151 + /* 2152 * Called to handle LOGO received from an existing remote port. 2153 */ 2154 static void ··· 2164 2165 2166 2167 + /* 2168 * fcs_rport_public FCS rport public interfaces 2169 */ 2170 2171 + /* 2172 * Called by bport/vport to create a remote port instance for a discovered 2173 * remote device. 2174 * ··· 2191 return rport; 2192 } 2193 2194 + /* 2195 * Called to create a rport for which only the wwn is known. 2196 * 2197 * @param[in] port - base port ··· 2211 bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); 2212 return rport; 2213 } 2214 + /* 2215 * Called by bport in private loop topology to indicate that a 2216 * rport has been discovered and plogi has been completed. 2217 * ··· 2233 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP); 2234 } 2235 2236 + /* 2237 * Called by bport/vport to handle PLOGI received from a new remote port. 2238 * If an existing rport does a plogi, it will be handled separately. 2239 */ ··· 2272 return 0; 2273 } 2274 2275 + /* 2276 * Called by bport/vport to handle PLOGI received from an existing 2277 * remote port. 2278 */ ··· 2280 bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, 2281 struct fc_logi_s *plogi) 2282 { 2283 + /* 2284 * @todo Handle P2P and initiator-initiator. 2285 */ 2286 ··· 2289 rport->reply_oxid = rx_fchs->ox_id; 2290 bfa_trc(rport->fcs, rport->reply_oxid); 2291 2292 + /* 2293 * In Switched fabric topology, 2294 * PLOGI to each other. If our pwwn is smaller, ignore it, 2295 * if it is not a well known address. ··· 2307 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); 2308 } 2309 2310 + /* 2311 * Called by bport/vport to delete a remote port instance. 2312 * 2313 * Rport delete is called under the following conditions: ··· 2321 bfa_sm_send_event(rport, RPSM_EVENT_DELETE); 2322 } 2323 2324 + /* 2325 * Called by bport/vport to when a target goes offline. 2326 * 2327 */ ··· 2331 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); 2332 } 2333 2334 + /* 2335 * Called by bport in n2n when a target (attached port) becomes online. 2336 * 2337 */ ··· 2340 { 2341 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); 2342 } 2343 + /* 2344 * Called by bport/vport to notify SCN for the remote port 2345 */ 2346 void ··· 2350 bfa_sm_send_event(rport, RPSM_EVENT_SCN); 2351 } 2352 2353 + /* 2354 * Called by fcpim to notify that the ITN cleanup is done. 2355 */ 2356 void ··· 2359 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); 2360 } 2361 2362 + /* 2363 * Called by fcptm to notify that the ITN cleanup is done. 2364 */ 2365 void ··· 2368 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); 2369 } 2370 2371 + /* 2372 * brief 2373 * This routine BFA callback for bfa_rport_online() call. 2374 * ··· 2391 bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE); 2392 } 2393 2394 + /* 2395 * brief 2396 * This routine BFA callback for bfa_rport_offline() call. 2397 * ··· 2413 bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE); 2414 } 2415 2416 + /* 2417 * brief 2418 * This routine is a static BFA callback when there is a QoS flow_id 2419 * change notification ··· 2437 bfa_trc(rport->fcs, rport->pwwn); 2438 } 2439 2440 + /* 2441 * brief 2442 * This routine is a static BFA callback when there is a QoS priority 2443 * change notification ··· 2461 bfa_trc(rport->fcs, rport->pwwn); 2462 } 2463 2464 + /* 2465 * Called to process any unsolicted frames from this remote port 2466 */ 2467 void ··· 2470 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); 2471 } 2472 2473 + /* 2474 * Called to process any unsolicted frames from this remote port 2475 */ 2476 void ··· 2577 FC_MAX_PDUSZ, 0); 2578 } 2579 2580 + /* 2581 * Return state of rport. 2582 */ 2583 int ··· 2586 return bfa_sm_to_state(rport_sm_table, rport->sm); 2587 } 2588 2589 + /* 2590 * brief 2591 * Called by the Driver to set rport delete/ageout timeout 2592 * ··· 2613 2614 2615 2616 + /* 2617 * Remote port implementation. 2618 */ 2619 2620 + /* 2621 * fcs_rport_api FCS rport API. 2622 */ 2623 2624 + /* 2625 * Direct API to add a target by port wwn. This interface is used, for 2626 * example, by bios when target pwwn is known from boot lun configuration. 2627 */ ··· 2634 return BFA_STATUS_OK; 2635 } 2636 2637 + /* 2638 * Direct API to remove a target and its associated resources. This 2639 * interface is used, for example, by driver to remove target 2640 * ports from the target list for a VM. ··· 2663 2664 } 2665 2666 + /* 2667 * Remote device status for display/debug. 2668 */ 2669 void ··· 2704 } 2705 } 2706 2707 + /* 2708 * Per remote device statistics. 2709 */ 2710 void ··· 2767 2768 2769 2770 + /* 2771 * Remote port features (RPF) implementation. 2772 */ 2773 ··· 2786 2787 static void bfa_fcs_rpf_timeout(void *arg); 2788 2789 + /* 2790 * fcs_rport_ftrs_sm FCS rport state machine events 2791 */ 2792 ··· 2981 bfa_sm_fault(rport->fcs, event); 2982 } 2983 } 2984 + /* 2985 * Called when Rport is created. 2986 */ 2987 void ··· 2995 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit); 2996 } 2997 2998 + /* 2999 * Called when Rport becomes online 3000 */ 3001 void ··· 3010 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE); 3011 } 3012 3013 + /* 3014 * Called when Rport becomes offline 3015 */ 3016 void
+3 -3
drivers/scsi/bfa/bfa_hw_cb.c
··· 102 *num_vecs = __HFN_NUMINTS; 103 } 104 105 - /** 106 * No special setup required for crossbow -- vector assignments are implicit. 107 */ 108 void ··· 129 bfa->msix.handler[i] = bfa_msix_lpu_err; 130 } 131 132 - /** 133 * Crossbow -- dummy, interrupts are masked 134 */ 135 void ··· 142 { 143 } 144 145 - /** 146 * No special enable/disable -- vector assignments are implicit. 147 */ 148 void
··· 102 *num_vecs = __HFN_NUMINTS; 103 } 104 105 + /* 106 * No special setup required for crossbow -- vector assignments are implicit. 107 */ 108 void ··· 129 bfa->msix.handler[i] = bfa_msix_lpu_err; 130 } 131 132 + /* 133 * Crossbow -- dummy, interrupts are masked 134 */ 135 void ··· 142 { 143 } 144 145 + /* 146 * No special enable/disable -- vector assignments are implicit. 147 */ 148 void
+3 -3
drivers/scsi/bfa/bfa_hw_ct.c
··· 39 writel(0, kva + __ct_msix_err_vec_reg[fn]); 40 } 41 42 - /** 43 * Dummy interrupt handler for handling spurious interrupt during chip-reinit. 44 */ 45 static void ··· 110 *num_vecs = BFA_MSIX_CT_MAX; 111 } 112 113 - /** 114 * Setup MSI-X vector for catapult 115 */ 116 void ··· 156 bfa->msix.handler[i] = bfa_hwct_msix_dummy; 157 } 158 159 - /** 160 * Enable MSI-X vectors 161 */ 162 void
··· 39 writel(0, kva + __ct_msix_err_vec_reg[fn]); 40 } 41 42 + /* 43 * Dummy interrupt handler for handling spurious interrupt during chip-reinit. 44 */ 45 static void ··· 110 *num_vecs = BFA_MSIX_CT_MAX; 111 } 112 113 + /* 114 * Setup MSI-X vector for catapult 115 */ 116 void ··· 156 bfa->msix.handler[i] = bfa_hwct_msix_dummy; 157 } 158 159 + /* 160 * Enable MSI-X vectors 161 */ 162 void
+119 -119
drivers/scsi/bfa/bfa_ioc.c
··· 23 24 BFA_TRC_FILE(CNA, IOC); 25 26 - /** 27 * IOC local definitions 28 */ 29 #define BFA_IOC_TOV 3000 /* msecs */ ··· 49 BFA_TRC_MAX * sizeof(struct bfa_trc_s))) 50 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) 51 52 - /** 53 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. 54 */ 55 ··· 101 static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc); 102 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); 103 104 - /** 105 * hal_ioc_sm 106 */ 107 108 - /** 109 * IOC state machine definitions/declarations 110 */ 111 enum ioc_event { ··· 144 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 145 }; 146 147 - /** 148 * IOCPF state machine definitions/declarations 149 */ 150 ··· 174 static void bfa_iocpf_timeout(void *ioc_arg); 175 static void bfa_iocpf_sem_timeout(void *ioc_arg); 176 177 - /** 178 * IOCPF state machine events 179 */ 180 enum iocpf_event { ··· 191 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */ 192 }; 193 194 - /** 195 * IOCPF states 196 */ 197 enum bfa_iocpf_state { ··· 232 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 233 }; 234 235 - /** 236 * IOC State Machine 237 */ 238 239 - /** 240 * Beginning state. IOC uninit state. 241 */ 242 ··· 245 { 246 } 247 248 - /** 249 * IOC is in uninit state. 250 */ 251 static void ··· 262 bfa_sm_fault(ioc, event); 263 } 264 } 265 - /** 266 * Reset entry actions -- initialize state machine 267 */ 268 static void ··· 271 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); 272 } 273 274 - /** 275 * IOC is in reset state. 276 */ 277 static void ··· 304 bfa_iocpf_enable(ioc); 305 } 306 307 - /** 308 * Host IOC function is being enabled, awaiting response from firmware. 309 * Semaphore is acquired. 310 */ ··· 352 bfa_ioc_send_getattr(ioc); 353 } 354 355 - /** 356 * IOC configuration in progress. Timer is active. 357 */ 358 static void ··· 447 BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n"); 448 } 449 450 - /** 451 * IOC is being disabled 452 */ 453 static void ··· 474 } 475 } 476 477 - /** 478 * IOC disable completion entry. 479 */ 480 static void ··· 514 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 515 } 516 517 - /** 518 * Hardware initialization failed. 519 */ 520 static void ··· 528 break; 529 530 case IOC_E_FAILED: 531 - /** 532 * Initialization failure during iocpf init retry. 533 */ 534 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); ··· 556 struct bfa_ioc_hbfail_notify_s *notify; 557 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 558 559 - /** 560 * Notify driver and common modules registered for notification. 561 */ 562 ioc->cbfn->hbfail_cbfn(ioc->bfa); ··· 569 "Heart Beat of IOC has failed\n"); 570 } 571 572 - /** 573 * IOC failure. 574 */ 575 static void ··· 580 switch (event) { 581 582 case IOC_E_FAILED: 583 - /** 584 * Initialization failure during iocpf recovery. 585 * !!! Fall through !!! 586 */ ··· 608 609 610 611 - /** 612 * IOCPF State Machine 613 */ 614 615 616 - /** 617 * Reset entry actions -- initialize state machine 618 */ 619 static void ··· 623 iocpf->auto_recover = bfa_auto_recover; 624 } 625 626 - /** 627 * Beginning state. IOC is in reset state. 628 */ 629 static void ··· 646 } 647 } 648 649 - /** 650 * Semaphore should be acquired for version check. 651 */ 652 static void ··· 655 bfa_ioc_hw_sem_get(iocpf->ioc); 656 } 657 658 - /** 659 * Awaiting h/w semaphore to continue with version check. 660 */ 661 static void ··· 692 } 693 } 694 695 - /** 696 * Notify enable completion callback. 697 */ 698 static void ··· 708 bfa_iocpf_timer_start(iocpf->ioc); 709 } 710 711 - /** 712 * Awaiting firmware version match. 713 */ 714 static void ··· 739 } 740 } 741 742 - /** 743 * Request for semaphore. 744 */ 745 static void ··· 748 bfa_ioc_hw_sem_get(iocpf->ioc); 749 } 750 751 - /** 752 * Awaiting semaphore for h/w initialzation. 753 */ 754 static void ··· 782 bfa_ioc_reset(iocpf->ioc, BFA_FALSE); 783 } 784 785 - /** 786 * Hardware is being initialized. Interrupts are enabled. 787 * Holding hardware semaphore lock. 788 */ ··· 839 bfa_ioc_send_enable(iocpf->ioc); 840 } 841 842 - /** 843 * Host IOC function is being enabled, awaiting response from firmware. 844 * Semaphore is acquired. 845 */ ··· 943 bfa_ioc_send_disable(iocpf->ioc); 944 } 945 946 - /** 947 * IOC is being disabled 948 */ 949 static void ··· 979 } 980 } 981 982 - /** 983 * IOC disable completion entry. 984 */ 985 static void ··· 1017 bfa_iocpf_timer_start(iocpf->ioc); 1018 } 1019 1020 - /** 1021 * Hardware initialization failed. 1022 */ 1023 static void ··· 1052 static void 1053 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) 1054 { 1055 - /** 1056 * Mark IOC as failed in hardware and stop firmware. 1057 */ 1058 bfa_ioc_lpu_stop(iocpf->ioc); 1059 writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate); 1060 1061 - /** 1062 * Notify other functions on HB failure. 1063 */ 1064 bfa_ioc_notify_hbfail(iocpf->ioc); 1065 1066 - /** 1067 * Flush any queued up mailbox requests. 1068 */ 1069 bfa_ioc_mbox_hbfail(iocpf->ioc); ··· 1072 bfa_iocpf_recovery_timer_start(iocpf->ioc); 1073 } 1074 1075 - /** 1076 * IOC is in failed state. 1077 */ 1078 static void ··· 1100 1101 1102 1103 - /** 1104 * hal_ioc_pvt BFA IOC private functions 1105 */ 1106 ··· 1112 1113 ioc->cbfn->disable_cbfn(ioc->bfa); 1114 1115 - /** 1116 * Notify common modules registered for notification. 1117 */ 1118 list_for_each(qe, &ioc->hb_notify_q) { ··· 1154 { 1155 u32 r32; 1156 1157 - /** 1158 * First read to the semaphore register will return 0, subsequent reads 1159 * will return 1. Semaphore is released by writing 1 to the register 1160 */ ··· 1179 bfa_sem_timer_stop(ioc); 1180 } 1181 1182 - /** 1183 * Initialize LPU local memory (aka secondary memory / SRAM) 1184 */ 1185 static void ··· 1199 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); 1200 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1201 1202 - /** 1203 * wait for memory initialization to be complete 1204 */ 1205 i = 0; ··· 1208 i++; 1209 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); 1210 1211 - /** 1212 * If memory initialization is not successful, IOC timeout will catch 1213 * such failures. 1214 */ ··· 1224 { 1225 u32 pss_ctl; 1226 1227 - /** 1228 * Take processor out of reset. 1229 */ 1230 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); ··· 1238 { 1239 u32 pss_ctl; 1240 1241 - /** 1242 * Put processors in reset. 1243 */ 1244 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); ··· 1247 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1248 } 1249 1250 - /** 1251 * Get driver and firmware versions. 1252 */ 1253 void ··· 1270 } 1271 } 1272 1273 - /** 1274 * Returns TRUE if same. 1275 */ 1276 bfa_boolean_t ··· 1295 return BFA_TRUE; 1296 } 1297 1298 - /** 1299 * Return true if current running version is valid. Firmware signature and 1300 * execution context (driver/bios) must match. 1301 */ ··· 1304 { 1305 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; 1306 1307 - /** 1308 * If bios/efi boot (flash based) -- return true 1309 */ 1310 if (bfa_ioc_is_bios_optrom(ioc)) ··· 1329 return bfa_ioc_fwver_cmp(ioc, &fwhdr); 1330 } 1331 1332 - /** 1333 * Conditionally flush any pending message from firmware at start. 1334 */ 1335 static void ··· 1361 boot_type = BFI_BOOT_TYPE_NORMAL; 1362 boot_env = BFI_BOOT_LOADER_OS; 1363 1364 - /** 1365 * Flash based firmware boot BIOS env. 1366 */ 1367 if (bfa_ioc_is_bios_optrom(ioc)) { ··· 1369 boot_env = BFI_BOOT_LOADER_BIOS; 1370 } 1371 1372 - /** 1373 * Flash based firmware boot UEFI env. 1374 */ 1375 if (bfa_ioc_is_uefi(ioc)) { ··· 1377 boot_env = BFI_BOOT_LOADER_UEFI; 1378 } 1379 1380 - /** 1381 * check if firmware is valid 1382 */ 1383 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? ··· 1388 return; 1389 } 1390 1391 - /** 1392 * If hardware initialization is in progress (initialized by other IOC), 1393 * just wait for an initialization completion interrupt. 1394 */ ··· 1397 return; 1398 } 1399 1400 - /** 1401 * If IOC function is disabled and firmware version is same, 1402 * just re-enable IOC. 1403 * ··· 1408 if (ioc_fwstate == BFI_IOC_DISABLED || 1409 (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { 1410 1411 - /** 1412 * When using MSI-X any pending firmware ready event should 1413 * be flushed. Otherwise MSI-X interrupts are not delivered. 1414 */ ··· 1418 return; 1419 } 1420 1421 - /** 1422 * Initialize the h/w for any other states. 1423 */ 1424 bfa_ioc_boot(ioc, boot_type, boot_env); ··· 1529 } 1530 1531 1532 - /** 1533 * Initiate a full firmware download. 1534 */ 1535 static void ··· 1542 u32 chunkno = 0; 1543 u32 i; 1544 1545 - /** 1546 * Initialize LMEM first before code download 1547 */ 1548 bfa_ioc_lmem_init(ioc); ··· 1563 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 1564 } 1565 1566 - /** 1567 * write smem 1568 */ 1569 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, ··· 1571 1572 loff += sizeof(u32); 1573 1574 - /** 1575 * handle page offset wrap around 1576 */ 1577 loff = PSS_SMEM_PGOFF(loff); ··· 1598 bfa_ioc_hwinit(ioc, force); 1599 } 1600 1601 - /** 1602 * Update BFA configuration from firmware configuration. 1603 */ 1604 static void ··· 1613 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 1614 } 1615 1616 - /** 1617 * Attach time initialization of mbox logic. 1618 */ 1619 static void ··· 1629 } 1630 } 1631 1632 - /** 1633 * Mbox poll timer -- restarts any pending mailbox requests. 1634 */ 1635 static void ··· 1639 struct bfa_mbox_cmd_s *cmd; 1640 u32 stat; 1641 1642 - /** 1643 * If no command pending, do nothing 1644 */ 1645 if (list_empty(&mod->cmd_q)) 1646 return; 1647 1648 - /** 1649 * If previous command is not yet fetched by firmware, do nothing 1650 */ 1651 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 1652 if (stat) 1653 return; 1654 1655 - /** 1656 * Enqueue command to firmware. 1657 */ 1658 bfa_q_deq(&mod->cmd_q, &cmd); 1659 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 1660 } 1661 1662 - /** 1663 * Cleanup any pending requests. 1664 */ 1665 static void ··· 1672 bfa_q_deq(&mod->cmd_q, &cmd); 1673 } 1674 1675 - /** 1676 * Read data from SMEM to host through PCI memmap 1677 * 1678 * @param[in] ioc memory for IOC ··· 1710 buf[i] = be32_to_cpu(r32); 1711 loff += sizeof(u32); 1712 1713 - /** 1714 * handle page offset wrap around 1715 */ 1716 loff = PSS_SMEM_PGOFF(loff); ··· 1729 return BFA_STATUS_OK; 1730 } 1731 1732 - /** 1733 * Clear SMEM data from host through PCI memmap 1734 * 1735 * @param[in] ioc memory for IOC ··· 1764 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0); 1765 loff += sizeof(u32); 1766 1767 - /** 1768 * handle page offset wrap around 1769 */ 1770 loff = PSS_SMEM_PGOFF(loff); ··· 1783 return BFA_STATUS_OK; 1784 } 1785 1786 - /** 1787 * hal iocpf to ioc interface 1788 */ 1789 static void ··· 1808 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc) 1809 { 1810 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 1811 - /** 1812 * Provide enable completion callback. 1813 */ 1814 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); ··· 1819 1820 1821 1822 - /** 1823 * hal_ioc_public 1824 */ 1825 ··· 1843 return BFA_STATUS_OK; 1844 } 1845 1846 - /** 1847 * Interface used by diag module to do firmware boot with memory test 1848 * as the entry vector. 1849 */ ··· 1857 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) 1858 return; 1859 1860 - /** 1861 * Initialize IOC state of all functions on a chip reset. 1862 */ 1863 rb = ioc->pcidev.pci_bar_kva; ··· 1872 bfa_ioc_msgflush(ioc); 1873 bfa_ioc_download_fw(ioc, boot_type, boot_env); 1874 1875 - /** 1876 * Enable interrupts just before starting LPU 1877 */ 1878 ioc->cbfn->reset_cbfn(ioc->bfa); 1879 bfa_ioc_lpu_start(ioc); 1880 } 1881 1882 - /** 1883 * Enable/disable IOC failure auto recovery. 1884 */ 1885 void ··· 1913 u32 r32; 1914 int i; 1915 1916 - /** 1917 * read the MBOX msg 1918 */ 1919 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); ··· 1923 msgp[i] = cpu_to_be32(r32); 1924 } 1925 1926 - /** 1927 * turn off mailbox interrupt by clearing mailbox status 1928 */ 1929 writel(1, ioc->ioc_regs.lpu_mbox_cmd); ··· 1966 } 1967 } 1968 1969 - /** 1970 * IOC attach time initialization and setup. 1971 * 1972 * @param[in] ioc memory for IOC ··· 1991 bfa_fsm_send_event(ioc, IOC_E_RESET); 1992 } 1993 1994 - /** 1995 * Driver detach time IOC cleanup. 1996 */ 1997 void ··· 2000 bfa_fsm_send_event(ioc, IOC_E_DETACH); 2001 } 2002 2003 - /** 2004 * Setup IOC PCI properties. 2005 * 2006 * @param[in] pcidev PCI device information for this IOC ··· 2014 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); 2015 ioc->cna = ioc->ctdev && !ioc->fcmode; 2016 2017 - /** 2018 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c 2019 */ 2020 if (ioc->ctdev) ··· 2026 bfa_ioc_reg_init(ioc); 2027 } 2028 2029 - /** 2030 * Initialize IOC dma memory 2031 * 2032 * @param[in] dm_kva kernel virtual address of IOC dma memory ··· 2035 void 2036 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) 2037 { 2038 - /** 2039 * dma memory for firmware attribute 2040 */ 2041 ioc->attr_dma.kva = dm_kva; ··· 2043 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; 2044 } 2045 2046 - /** 2047 * Return size of dma memory required. 2048 */ 2049 u32 ··· 2068 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 2069 } 2070 2071 - /** 2072 * Returns memory required for saving firmware trace in case of crash. 2073 * Driver must call this interface to allocate memory required for 2074 * automatic saving of firmware trace. Driver should call ··· 2081 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 2082 } 2083 2084 - /** 2085 * Initialize memory for saving firmware trace. Driver must initialize 2086 * trace memory before call bfa_ioc_enable(). 2087 */ ··· 2104 return PSS_SMEM_PGOFF(fmaddr); 2105 } 2106 2107 - /** 2108 * Register mailbox message handler functions 2109 * 2110 * @param[in] ioc IOC instance ··· 2120 mod->mbhdlr[mc].cbfn = mcfuncs[mc]; 2121 } 2122 2123 - /** 2124 * Register mailbox message handler function, to be called by common modules 2125 */ 2126 void ··· 2133 mod->mbhdlr[mc].cbarg = cbarg; 2134 } 2135 2136 - /** 2137 * Queue a mailbox command request to firmware. Waits if mailbox is busy. 2138 * Responsibility of caller to serialize 2139 * ··· 2146 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 2147 u32 stat; 2148 2149 - /** 2150 * If a previous command is pending, queue new command 2151 */ 2152 if (!list_empty(&mod->cmd_q)) { ··· 2154 return; 2155 } 2156 2157 - /** 2158 * If mailbox is busy, queue command for poll timer 2159 */ 2160 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); ··· 2163 return; 2164 } 2165 2166 - /** 2167 * mailbox is free -- queue command to firmware 2168 */ 2169 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2170 } 2171 2172 - /** 2173 * Handle mailbox interrupts 2174 */ 2175 void ··· 2181 2182 bfa_ioc_msgget(ioc, &m); 2183 2184 - /** 2185 * Treat IOC message class as special. 2186 */ 2187 mc = m.mh.msg_class; ··· 2209 ioc->port_id = bfa_ioc_pcifn(ioc); 2210 } 2211 2212 - /** 2213 * return true if IOC is disabled 2214 */ 2215 bfa_boolean_t ··· 2219 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 2220 } 2221 2222 - /** 2223 * return true if IOC firmware is different. 2224 */ 2225 bfa_boolean_t ··· 2238 ((__sm) == BFI_IOC_FAIL) || \ 2239 ((__sm) == BFI_IOC_CFG_DISABLED)) 2240 2241 - /** 2242 * Check if adapter is disabled -- both IOCs should be in a disabled 2243 * state. 2244 */ ··· 2264 return BFA_TRUE; 2265 } 2266 2267 - /** 2268 * Add to IOC heartbeat failure notification queue. To be used by common 2269 * modules such as cee, port, diag. 2270 */ ··· 2391 2392 ioc_attr = ioc->attr; 2393 2394 - /** 2395 * model name 2396 */ 2397 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", ··· 2455 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2456 } 2457 2458 - /** 2459 * hal_wwn_public 2460 */ 2461 wwn_t ··· 2521 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id); 2522 } 2523 2524 - /** 2525 * Retrieve saved firmware trace from a prior IOC failure. 2526 */ 2527 bfa_status_t ··· 2541 return BFA_STATUS_OK; 2542 } 2543 2544 - /** 2545 * Clear saved firmware trace 2546 */ 2547 void ··· 2550 ioc->dbg_fwsave_once = BFA_TRUE; 2551 } 2552 2553 - /** 2554 * Retrieve saved firmware trace from a prior IOC failure. 2555 */ 2556 bfa_status_t ··· 2590 2591 bfa_ioc_send_fwsync(ioc); 2592 2593 - /** 2594 * After sending a fw sync mbox command wait for it to 2595 * take effect. We will not wait for a response because 2596 * 1. fw_sync mbox cmd doesn't have a response. ··· 2605 fwsync_iter--; 2606 } 2607 2608 - /** 2609 * Dump firmware smem 2610 */ 2611 bfa_status_t ··· 2625 loff = *offset; 2626 dlen = *buflen; 2627 2628 - /** 2629 * First smem read, sync smem before proceeding 2630 * No need to sync before reading every chunk. 2631 */ ··· 2652 return status; 2653 } 2654 2655 - /** 2656 * Firmware statistics 2657 */ 2658 bfa_status_t ··· 2697 return status; 2698 } 2699 2700 - /** 2701 * Save firmware trace if configured. 2702 */ 2703 static void ··· 2711 } 2712 } 2713 2714 - /** 2715 * Firmware failure detected. Start recovery actions. 2716 */ 2717 static void ··· 2733 return; 2734 } 2735 2736 - /** 2737 * hal_iocpf_pvt BFA IOC PF private functions 2738 */ 2739 ··· 2790 bfa_ioc_hw_sem_get(ioc); 2791 } 2792 2793 - /** 2794 * bfa timer function 2795 */ 2796 void ··· 2835 } 2836 } 2837 2838 - /** 2839 * Should be called with lock protection 2840 */ 2841 void ··· 2853 list_add_tail(&timer->qe, &mod->timer_q); 2854 } 2855 2856 - /** 2857 * Should be called with lock protection 2858 */ 2859 void
··· 23 24 BFA_TRC_FILE(CNA, IOC); 25 26 + /* 27 * IOC local definitions 28 */ 29 #define BFA_IOC_TOV 3000 /* msecs */ ··· 49 BFA_TRC_MAX * sizeof(struct bfa_trc_s))) 50 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) 51 52 + /* 53 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. 54 */ 55 ··· 101 static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc); 102 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); 103 104 + /* 105 * hal_ioc_sm 106 */ 107 108 + /* 109 * IOC state machine definitions/declarations 110 */ 111 enum ioc_event { ··· 144 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 145 }; 146 147 + /* 148 * IOCPF state machine definitions/declarations 149 */ 150 ··· 174 static void bfa_iocpf_timeout(void *ioc_arg); 175 static void bfa_iocpf_sem_timeout(void *ioc_arg); 176 177 + /* 178 * IOCPF state machine events 179 */ 180 enum iocpf_event { ··· 191 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */ 192 }; 193 194 + /* 195 * IOCPF states 196 */ 197 enum bfa_iocpf_state { ··· 232 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 233 }; 234 235 + /* 236 * IOC State Machine 237 */ 238 239 + /* 240 * Beginning state. IOC uninit state. 241 */ 242 ··· 245 { 246 } 247 248 + /* 249 * IOC is in uninit state. 250 */ 251 static void ··· 262 bfa_sm_fault(ioc, event); 263 } 264 } 265 + /* 266 * Reset entry actions -- initialize state machine 267 */ 268 static void ··· 271 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); 272 } 273 274 + /* 275 * IOC is in reset state. 276 */ 277 static void ··· 304 bfa_iocpf_enable(ioc); 305 } 306 307 + /* 308 * Host IOC function is being enabled, awaiting response from firmware. 309 * Semaphore is acquired. 310 */ ··· 352 bfa_ioc_send_getattr(ioc); 353 } 354 355 + /* 356 * IOC configuration in progress. Timer is active. 357 */ 358 static void ··· 447 BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n"); 448 } 449 450 + /* 451 * IOC is being disabled 452 */ 453 static void ··· 474 } 475 } 476 477 + /* 478 * IOC disable completion entry. 479 */ 480 static void ··· 514 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 515 } 516 517 + /* 518 * Hardware initialization failed. 519 */ 520 static void ··· 528 break; 529 530 case IOC_E_FAILED: 531 + /* 532 * Initialization failure during iocpf init retry. 533 */ 534 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); ··· 556 struct bfa_ioc_hbfail_notify_s *notify; 557 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 558 559 + /* 560 * Notify driver and common modules registered for notification. 561 */ 562 ioc->cbfn->hbfail_cbfn(ioc->bfa); ··· 569 "Heart Beat of IOC has failed\n"); 570 } 571 572 + /* 573 * IOC failure. 574 */ 575 static void ··· 580 switch (event) { 581 582 case IOC_E_FAILED: 583 + /* 584 * Initialization failure during iocpf recovery. 585 * !!! Fall through !!! 586 */ ··· 608 609 610 611 + /* 612 * IOCPF State Machine 613 */ 614 615 616 + /* 617 * Reset entry actions -- initialize state machine 618 */ 619 static void ··· 623 iocpf->auto_recover = bfa_auto_recover; 624 } 625 626 + /* 627 * Beginning state. IOC is in reset state. 628 */ 629 static void ··· 646 } 647 } 648 649 + /* 650 * Semaphore should be acquired for version check. 651 */ 652 static void ··· 655 bfa_ioc_hw_sem_get(iocpf->ioc); 656 } 657 658 + /* 659 * Awaiting h/w semaphore to continue with version check. 660 */ 661 static void ··· 692 } 693 } 694 695 + /* 696 * Notify enable completion callback. 697 */ 698 static void ··· 708 bfa_iocpf_timer_start(iocpf->ioc); 709 } 710 711 + /* 712 * Awaiting firmware version match. 713 */ 714 static void ··· 739 } 740 } 741 742 + /* 743 * Request for semaphore. 744 */ 745 static void ··· 748 bfa_ioc_hw_sem_get(iocpf->ioc); 749 } 750 751 + /* 752 * Awaiting semaphore for h/w initialzation. 753 */ 754 static void ··· 782 bfa_ioc_reset(iocpf->ioc, BFA_FALSE); 783 } 784 785 + /* 786 * Hardware is being initialized. Interrupts are enabled. 787 * Holding hardware semaphore lock. 788 */ ··· 839 bfa_ioc_send_enable(iocpf->ioc); 840 } 841 842 + /* 843 * Host IOC function is being enabled, awaiting response from firmware. 844 * Semaphore is acquired. 845 */ ··· 943 bfa_ioc_send_disable(iocpf->ioc); 944 } 945 946 + /* 947 * IOC is being disabled 948 */ 949 static void ··· 979 } 980 } 981 982 + /* 983 * IOC disable completion entry. 984 */ 985 static void ··· 1017 bfa_iocpf_timer_start(iocpf->ioc); 1018 } 1019 1020 + /* 1021 * Hardware initialization failed. 1022 */ 1023 static void ··· 1052 static void 1053 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) 1054 { 1055 + /* 1056 * Mark IOC as failed in hardware and stop firmware. 1057 */ 1058 bfa_ioc_lpu_stop(iocpf->ioc); 1059 writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate); 1060 1061 + /* 1062 * Notify other functions on HB failure. 1063 */ 1064 bfa_ioc_notify_hbfail(iocpf->ioc); 1065 1066 + /* 1067 * Flush any queued up mailbox requests. 1068 */ 1069 bfa_ioc_mbox_hbfail(iocpf->ioc); ··· 1072 bfa_iocpf_recovery_timer_start(iocpf->ioc); 1073 } 1074 1075 + /* 1076 * IOC is in failed state. 1077 */ 1078 static void ··· 1100 1101 1102 1103 + /* 1104 * hal_ioc_pvt BFA IOC private functions 1105 */ 1106 ··· 1112 1113 ioc->cbfn->disable_cbfn(ioc->bfa); 1114 1115 + /* 1116 * Notify common modules registered for notification. 1117 */ 1118 list_for_each(qe, &ioc->hb_notify_q) { ··· 1154 { 1155 u32 r32; 1156 1157 + /* 1158 * First read to the semaphore register will return 0, subsequent reads 1159 * will return 1. Semaphore is released by writing 1 to the register 1160 */ ··· 1179 bfa_sem_timer_stop(ioc); 1180 } 1181 1182 + /* 1183 * Initialize LPU local memory (aka secondary memory / SRAM) 1184 */ 1185 static void ··· 1199 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); 1200 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1201 1202 + /* 1203 * wait for memory initialization to be complete 1204 */ 1205 i = 0; ··· 1208 i++; 1209 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); 1210 1211 + /* 1212 * If memory initialization is not successful, IOC timeout will catch 1213 * such failures. 1214 */ ··· 1224 { 1225 u32 pss_ctl; 1226 1227 + /* 1228 * Take processor out of reset. 1229 */ 1230 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); ··· 1238 { 1239 u32 pss_ctl; 1240 1241 + /* 1242 * Put processors in reset. 1243 */ 1244 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); ··· 1247 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1248 } 1249 1250 + /* 1251 * Get driver and firmware versions. 1252 */ 1253 void ··· 1270 } 1271 } 1272 1273 + /* 1274 * Returns TRUE if same. 1275 */ 1276 bfa_boolean_t ··· 1295 return BFA_TRUE; 1296 } 1297 1298 + /* 1299 * Return true if current running version is valid. Firmware signature and 1300 * execution context (driver/bios) must match. 1301 */ ··· 1304 { 1305 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; 1306 1307 + /* 1308 * If bios/efi boot (flash based) -- return true 1309 */ 1310 if (bfa_ioc_is_bios_optrom(ioc)) ··· 1329 return bfa_ioc_fwver_cmp(ioc, &fwhdr); 1330 } 1331 1332 + /* 1333 * Conditionally flush any pending message from firmware at start. 1334 */ 1335 static void ··· 1361 boot_type = BFI_BOOT_TYPE_NORMAL; 1362 boot_env = BFI_BOOT_LOADER_OS; 1363 1364 + /* 1365 * Flash based firmware boot BIOS env. 1366 */ 1367 if (bfa_ioc_is_bios_optrom(ioc)) { ··· 1369 boot_env = BFI_BOOT_LOADER_BIOS; 1370 } 1371 1372 + /* 1373 * Flash based firmware boot UEFI env. 1374 */ 1375 if (bfa_ioc_is_uefi(ioc)) { ··· 1377 boot_env = BFI_BOOT_LOADER_UEFI; 1378 } 1379 1380 + /* 1381 * check if firmware is valid 1382 */ 1383 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? ··· 1388 return; 1389 } 1390 1391 + /* 1392 * If hardware initialization is in progress (initialized by other IOC), 1393 * just wait for an initialization completion interrupt. 1394 */ ··· 1397 return; 1398 } 1399 1400 + /* 1401 * If IOC function is disabled and firmware version is same, 1402 * just re-enable IOC. 1403 * ··· 1408 if (ioc_fwstate == BFI_IOC_DISABLED || 1409 (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { 1410 1411 + /* 1412 * When using MSI-X any pending firmware ready event should 1413 * be flushed. Otherwise MSI-X interrupts are not delivered. 1414 */ ··· 1418 return; 1419 } 1420 1421 + /* 1422 * Initialize the h/w for any other states. 1423 */ 1424 bfa_ioc_boot(ioc, boot_type, boot_env); ··· 1529 } 1530 1531 1532 + /* 1533 * Initiate a full firmware download. 1534 */ 1535 static void ··· 1542 u32 chunkno = 0; 1543 u32 i; 1544 1545 + /* 1546 * Initialize LMEM first before code download 1547 */ 1548 bfa_ioc_lmem_init(ioc); ··· 1563 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 1564 } 1565 1566 + /* 1567 * write smem 1568 */ 1569 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, ··· 1571 1572 loff += sizeof(u32); 1573 1574 + /* 1575 * handle page offset wrap around 1576 */ 1577 loff = PSS_SMEM_PGOFF(loff); ··· 1598 bfa_ioc_hwinit(ioc, force); 1599 } 1600 1601 + /* 1602 * Update BFA configuration from firmware configuration. 1603 */ 1604 static void ··· 1613 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 1614 } 1615 1616 + /* 1617 * Attach time initialization of mbox logic. 1618 */ 1619 static void ··· 1629 } 1630 } 1631 1632 + /* 1633 * Mbox poll timer -- restarts any pending mailbox requests. 1634 */ 1635 static void ··· 1639 struct bfa_mbox_cmd_s *cmd; 1640 u32 stat; 1641 1642 + /* 1643 * If no command pending, do nothing 1644 */ 1645 if (list_empty(&mod->cmd_q)) 1646 return; 1647 1648 + /* 1649 * If previous command is not yet fetched by firmware, do nothing 1650 */ 1651 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 1652 if (stat) 1653 return; 1654 1655 + /* 1656 * Enqueue command to firmware. 1657 */ 1658 bfa_q_deq(&mod->cmd_q, &cmd); 1659 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 1660 } 1661 1662 + /* 1663 * Cleanup any pending requests. 1664 */ 1665 static void ··· 1672 bfa_q_deq(&mod->cmd_q, &cmd); 1673 } 1674 1675 + /* 1676 * Read data from SMEM to host through PCI memmap 1677 * 1678 * @param[in] ioc memory for IOC ··· 1710 buf[i] = be32_to_cpu(r32); 1711 loff += sizeof(u32); 1712 1713 + /* 1714 * handle page offset wrap around 1715 */ 1716 loff = PSS_SMEM_PGOFF(loff); ··· 1729 return BFA_STATUS_OK; 1730 } 1731 1732 + /* 1733 * Clear SMEM data from host through PCI memmap 1734 * 1735 * @param[in] ioc memory for IOC ··· 1764 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0); 1765 loff += sizeof(u32); 1766 1767 + /* 1768 * handle page offset wrap around 1769 */ 1770 loff = PSS_SMEM_PGOFF(loff); ··· 1783 return BFA_STATUS_OK; 1784 } 1785 1786 + /* 1787 * hal iocpf to ioc interface 1788 */ 1789 static void ··· 1808 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc) 1809 { 1810 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 1811 + /* 1812 * Provide enable completion callback. 1813 */ 1814 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); ··· 1819 1820 1821 1822 + /* 1823 * hal_ioc_public 1824 */ 1825 ··· 1843 return BFA_STATUS_OK; 1844 } 1845 1846 + /* 1847 * Interface used by diag module to do firmware boot with memory test 1848 * as the entry vector. 1849 */ ··· 1857 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) 1858 return; 1859 1860 + /* 1861 * Initialize IOC state of all functions on a chip reset. 1862 */ 1863 rb = ioc->pcidev.pci_bar_kva; ··· 1872 bfa_ioc_msgflush(ioc); 1873 bfa_ioc_download_fw(ioc, boot_type, boot_env); 1874 1875 + /* 1876 * Enable interrupts just before starting LPU 1877 */ 1878 ioc->cbfn->reset_cbfn(ioc->bfa); 1879 bfa_ioc_lpu_start(ioc); 1880 } 1881 1882 + /* 1883 * Enable/disable IOC failure auto recovery. 1884 */ 1885 void ··· 1913 u32 r32; 1914 int i; 1915 1916 + /* 1917 * read the MBOX msg 1918 */ 1919 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); ··· 1923 msgp[i] = cpu_to_be32(r32); 1924 } 1925 1926 + /* 1927 * turn off mailbox interrupt by clearing mailbox status 1928 */ 1929 writel(1, ioc->ioc_regs.lpu_mbox_cmd); ··· 1966 } 1967 } 1968 1969 + /* 1970 * IOC attach time initialization and setup. 1971 * 1972 * @param[in] ioc memory for IOC ··· 1991 bfa_fsm_send_event(ioc, IOC_E_RESET); 1992 } 1993 1994 + /* 1995 * Driver detach time IOC cleanup. 1996 */ 1997 void ··· 2000 bfa_fsm_send_event(ioc, IOC_E_DETACH); 2001 } 2002 2003 + /* 2004 * Setup IOC PCI properties. 2005 * 2006 * @param[in] pcidev PCI device information for this IOC ··· 2014 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); 2015 ioc->cna = ioc->ctdev && !ioc->fcmode; 2016 2017 + /* 2018 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c 2019 */ 2020 if (ioc->ctdev) ··· 2026 bfa_ioc_reg_init(ioc); 2027 } 2028 2029 + /* 2030 * Initialize IOC dma memory 2031 * 2032 * @param[in] dm_kva kernel virtual address of IOC dma memory ··· 2035 void 2036 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) 2037 { 2038 + /* 2039 * dma memory for firmware attribute 2040 */ 2041 ioc->attr_dma.kva = dm_kva; ··· 2043 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; 2044 } 2045 2046 + /* 2047 * Return size of dma memory required. 2048 */ 2049 u32 ··· 2068 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 2069 } 2070 2071 + /* 2072 * Returns memory required for saving firmware trace in case of crash. 2073 * Driver must call this interface to allocate memory required for 2074 * automatic saving of firmware trace. Driver should call ··· 2081 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 2082 } 2083 2084 + /* 2085 * Initialize memory for saving firmware trace. Driver must initialize 2086 * trace memory before call bfa_ioc_enable(). 2087 */ ··· 2104 return PSS_SMEM_PGOFF(fmaddr); 2105 } 2106 2107 + /* 2108 * Register mailbox message handler functions 2109 * 2110 * @param[in] ioc IOC instance ··· 2120 mod->mbhdlr[mc].cbfn = mcfuncs[mc]; 2121 } 2122 2123 + /* 2124 * Register mailbox message handler function, to be called by common modules 2125 */ 2126 void ··· 2133 mod->mbhdlr[mc].cbarg = cbarg; 2134 } 2135 2136 + /* 2137 * Queue a mailbox command request to firmware. Waits if mailbox is busy. 2138 * Responsibility of caller to serialize 2139 * ··· 2146 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 2147 u32 stat; 2148 2149 + /* 2150 * If a previous command is pending, queue new command 2151 */ 2152 if (!list_empty(&mod->cmd_q)) { ··· 2154 return; 2155 } 2156 2157 + /* 2158 * If mailbox is busy, queue command for poll timer 2159 */ 2160 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); ··· 2163 return; 2164 } 2165 2166 + /* 2167 * mailbox is free -- queue command to firmware 2168 */ 2169 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2170 } 2171 2172 + /* 2173 * Handle mailbox interrupts 2174 */ 2175 void ··· 2181 2182 bfa_ioc_msgget(ioc, &m); 2183 2184 + /* 2185 * Treat IOC message class as special. 2186 */ 2187 mc = m.mh.msg_class; ··· 2209 ioc->port_id = bfa_ioc_pcifn(ioc); 2210 } 2211 2212 + /* 2213 * return true if IOC is disabled 2214 */ 2215 bfa_boolean_t ··· 2219 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 2220 } 2221 2222 + /* 2223 * return true if IOC firmware is different. 2224 */ 2225 bfa_boolean_t ··· 2238 ((__sm) == BFI_IOC_FAIL) || \ 2239 ((__sm) == BFI_IOC_CFG_DISABLED)) 2240 2241 + /* 2242 * Check if adapter is disabled -- both IOCs should be in a disabled 2243 * state. 2244 */ ··· 2264 return BFA_TRUE; 2265 } 2266 2267 + /* 2268 * Add to IOC heartbeat failure notification queue. To be used by common 2269 * modules such as cee, port, diag. 2270 */ ··· 2391 2392 ioc_attr = ioc->attr; 2393 2394 + /* 2395 * model name 2396 */ 2397 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", ··· 2455 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2456 } 2457 2458 + /* 2459 * hal_wwn_public 2460 */ 2461 wwn_t ··· 2521 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id); 2522 } 2523 2524 + /* 2525 * Retrieve saved firmware trace from a prior IOC failure. 2526 */ 2527 bfa_status_t ··· 2541 return BFA_STATUS_OK; 2542 } 2543 2544 + /* 2545 * Clear saved firmware trace 2546 */ 2547 void ··· 2550 ioc->dbg_fwsave_once = BFA_TRUE; 2551 } 2552 2553 + /* 2554 * Retrieve saved firmware trace from a prior IOC failure. 2555 */ 2556 bfa_status_t ··· 2590 2591 bfa_ioc_send_fwsync(ioc); 2592 2593 + /* 2594 * After sending a fw sync mbox command wait for it to 2595 * take effect. We will not wait for a response because 2596 * 1. fw_sync mbox cmd doesn't have a response. ··· 2605 fwsync_iter--; 2606 } 2607 2608 + /* 2609 * Dump firmware smem 2610 */ 2611 bfa_status_t ··· 2625 loff = *offset; 2626 dlen = *buflen; 2627 2628 + /* 2629 * First smem read, sync smem before proceeding 2630 * No need to sync before reading every chunk. 2631 */ ··· 2652 return status; 2653 } 2654 2655 + /* 2656 * Firmware statistics 2657 */ 2658 bfa_status_t ··· 2697 return status; 2698 } 2699 2700 + /* 2701 * Save firmware trace if configured. 2702 */ 2703 static void ··· 2711 } 2712 } 2713 2714 + /* 2715 * Firmware failure detected. Start recovery actions. 2716 */ 2717 static void ··· 2733 return; 2734 } 2735 2736 + /* 2737 * hal_iocpf_pvt BFA IOC PF private functions 2738 */ 2739 ··· 2790 bfa_ioc_hw_sem_get(ioc); 2791 } 2792 2793 + /* 2794 * bfa timer function 2795 */ 2796 void ··· 2835 } 2836 } 2837 2838 + /* 2839 * Should be called with lock protection 2840 */ 2841 void ··· 2853 list_add_tail(&timer->qe, &mod->timer_q); 2854 } 2855 2856 + /* 2857 * Should be called with lock protection 2858 */ 2859 void
+11 -11
drivers/scsi/bfa/bfa_ioc_cb.c
··· 34 35 struct bfa_ioc_hwif_s hwif_cb; 36 37 - /** 38 * Called from bfa_ioc_attach() to map asic specific calls. 39 */ 40 void ··· 52 ioc->ioc_hwif = &hwif_cb; 53 } 54 55 - /** 56 * Return true if firmware of current driver matches the running firmware. 57 */ 58 static bfa_boolean_t ··· 66 { 67 } 68 69 - /** 70 * Notify other functions on HB failure. 71 */ 72 static void ··· 76 readl(ioc->ioc_regs.err_set); 77 } 78 79 - /** 80 * Host to LPU mailbox message addresses 81 */ 82 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { ··· 84 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 } 85 }; 86 87 - /** 88 * Host <-> LPU mailbox command/status registers 89 */ 90 static struct { u32 hfn, lpu; } iocreg_mbcmd[] = { ··· 113 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 114 } 115 116 - /** 117 * Host <-> LPU mailbox command/status registers 118 */ 119 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn; ··· 133 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); 134 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 135 136 - /** 137 * sram memory access 138 */ 139 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); ··· 145 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 146 } 147 148 - /** 149 * Initialize IOC to port mapping. 150 */ 151 152 static void 153 bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) 154 { 155 - /** 156 * For crossbow, port id is same as pci function. 157 */ 158 ioc->port_id = bfa_ioc_pcifn(ioc); ··· 160 bfa_trc(ioc, ioc->port_id); 161 } 162 163 - /** 164 * Set interrupt mode for a function: INTX or MSIX 165 */ 166 static void ··· 168 { 169 } 170 171 - /** 172 * Cleanup hw semaphore and usecnt registers 173 */ 174 static void
··· 34 35 struct bfa_ioc_hwif_s hwif_cb; 36 37 + /* 38 * Called from bfa_ioc_attach() to map asic specific calls. 39 */ 40 void ··· 52 ioc->ioc_hwif = &hwif_cb; 53 } 54 55 + /* 56 * Return true if firmware of current driver matches the running firmware. 57 */ 58 static bfa_boolean_t ··· 66 { 67 } 68 69 + /* 70 * Notify other functions on HB failure. 71 */ 72 static void ··· 76 readl(ioc->ioc_regs.err_set); 77 } 78 79 + /* 80 * Host to LPU mailbox message addresses 81 */ 82 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { ··· 84 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 } 85 }; 86 87 + /* 88 * Host <-> LPU mailbox command/status registers 89 */ 90 static struct { u32 hfn, lpu; } iocreg_mbcmd[] = { ··· 113 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 114 } 115 116 + /* 117 * Host <-> LPU mailbox command/status registers 118 */ 119 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn; ··· 133 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); 134 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 135 136 + /* 137 * sram memory access 138 */ 139 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); ··· 145 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 146 } 147 148 + /* 149 * Initialize IOC to port mapping. 150 */ 151 152 static void 153 bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) 154 { 155 + /* 156 * For crossbow, port id is same as pci function. 157 */ 158 ioc->port_id = bfa_ioc_pcifn(ioc); ··· 160 bfa_trc(ioc, ioc->port_id); 161 } 162 163 + /* 164 * Set interrupt mode for a function: INTX or MSIX 165 */ 166 static void ··· 168 { 169 } 170 171 + /* 172 * Cleanup hw semaphore and usecnt registers 173 */ 174 static void
+21 -21
drivers/scsi/bfa/bfa_ioc_ct.c
··· 34 35 struct bfa_ioc_hwif_s hwif_ct; 36 37 - /** 38 * Called from bfa_ioc_attach() to map asic specific calls. 39 */ 40 void ··· 52 ioc->ioc_hwif = &hwif_ct; 53 } 54 55 - /** 56 * Return true if firmware of current driver matches the running firmware. 57 */ 58 static bfa_boolean_t ··· 62 u32 usecnt; 63 struct bfi_ioc_image_hdr_s fwhdr; 64 65 - /** 66 * Firmware match check is relevant only for CNA. 67 */ 68 if (!ioc->cna) 69 return BFA_TRUE; 70 71 - /** 72 * If bios boot (flash based) -- do not increment usage count 73 */ 74 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < ··· 78 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 79 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 80 81 - /** 82 * If usage count is 0, always return TRUE. 83 */ 84 if (usecnt == 0) { ··· 91 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 92 bfa_trc(ioc, ioc_fwstate); 93 94 - /** 95 * Use count cannot be non-zero and chip in uninitialized state. 96 */ 97 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT); 98 99 - /** 100 * Check if another driver with a different firmware is active 101 */ 102 bfa_ioc_fwver_get(ioc, &fwhdr); ··· 106 return BFA_FALSE; 107 } 108 109 - /** 110 * Same firmware version. Increment the reference count. 111 */ 112 usecnt++; ··· 121 { 122 u32 usecnt; 123 124 - /** 125 * Firmware lock is relevant only for CNA. 126 */ 127 if (!ioc->cna) 128 return; 129 130 - /** 131 * If bios boot (flash based) -- do not decrement usage count 132 */ 133 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < 134 BFA_IOC_FWIMG_MINSZ) 135 return; 136 137 - /** 138 * decrement usage count 139 */ 140 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); ··· 148 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 149 } 150 151 - /** 152 * Notify other functions on HB failure. 153 */ 154 static void ··· 164 } 165 } 166 167 - /** 168 * Host to LPU mailbox message addresses 169 */ 170 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { ··· 174 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } 175 }; 176 177 - /** 178 * Host <-> LPU mailbox command/status registers - port 0 179 */ 180 static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = { ··· 184 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT } 185 }; 186 187 - /** 188 * Host <-> LPU mailbox command/status registers - port 1 189 */ 190 static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = { ··· 236 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 237 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 238 239 - /** 240 * sram memory access 241 */ 242 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); ··· 248 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 249 } 250 251 - /** 252 * Initialize IOC to port mapping. 253 */ 254 ··· 259 void __iomem *rb = ioc->pcidev.pci_bar_kva; 260 u32 r32; 261 262 - /** 263 * For catapult, base port id on personality register and IOC type 264 */ 265 r32 = readl(rb + FNC_PERS_REG); ··· 270 bfa_trc(ioc, ioc->port_id); 271 } 272 273 - /** 274 * Set interrupt mode for a function: INTX or MSIX 275 */ 276 static void ··· 285 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & 286 __F0_INTX_STATUS; 287 288 - /** 289 * If already in desired mode, do not change anything 290 */ 291 if (!msix && mode) ··· 303 writel(r32, rb + FNC_PERS_REG); 304 } 305 306 - /** 307 * Cleanup hw semaphore and usecnt registers 308 */ 309 static void
··· 34 35 struct bfa_ioc_hwif_s hwif_ct; 36 37 + /* 38 * Called from bfa_ioc_attach() to map asic specific calls. 39 */ 40 void ··· 52 ioc->ioc_hwif = &hwif_ct; 53 } 54 55 + /* 56 * Return true if firmware of current driver matches the running firmware. 57 */ 58 static bfa_boolean_t ··· 62 u32 usecnt; 63 struct bfi_ioc_image_hdr_s fwhdr; 64 65 + /* 66 * Firmware match check is relevant only for CNA. 67 */ 68 if (!ioc->cna) 69 return BFA_TRUE; 70 71 + /* 72 * If bios boot (flash based) -- do not increment usage count 73 */ 74 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < ··· 78 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 79 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 80 81 + /* 82 * If usage count is 0, always return TRUE. 83 */ 84 if (usecnt == 0) { ··· 91 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 92 bfa_trc(ioc, ioc_fwstate); 93 94 + /* 95 * Use count cannot be non-zero and chip in uninitialized state. 96 */ 97 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT); 98 99 + /* 100 * Check if another driver with a different firmware is active 101 */ 102 bfa_ioc_fwver_get(ioc, &fwhdr); ··· 106 return BFA_FALSE; 107 } 108 109 + /* 110 * Same firmware version. Increment the reference count. 111 */ 112 usecnt++; ··· 121 { 122 u32 usecnt; 123 124 + /* 125 * Firmware lock is relevant only for CNA. 126 */ 127 if (!ioc->cna) 128 return; 129 130 + /* 131 * If bios boot (flash based) -- do not decrement usage count 132 */ 133 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < 134 BFA_IOC_FWIMG_MINSZ) 135 return; 136 137 + /* 138 * decrement usage count 139 */ 140 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); ··· 148 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 149 } 150 151 + /* 152 * Notify other functions on HB failure. 153 */ 154 static void ··· 164 } 165 } 166 167 + /* 168 * Host to LPU mailbox message addresses 169 */ 170 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { ··· 174 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } 175 }; 176 177 + /* 178 * Host <-> LPU mailbox command/status registers - port 0 179 */ 180 static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = { ··· 184 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT } 185 }; 186 187 + /* 188 * Host <-> LPU mailbox command/status registers - port 1 189 */ 190 static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = { ··· 236 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 237 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 238 239 + /* 240 * sram memory access 241 */ 242 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); ··· 248 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 249 } 250 251 + /* 252 * Initialize IOC to port mapping. 253 */ 254 ··· 259 void __iomem *rb = ioc->pcidev.pci_bar_kva; 260 u32 r32; 261 262 + /* 263 * For catapult, base port id on personality register and IOC type 264 */ 265 r32 = readl(rb + FNC_PERS_REG); ··· 270 bfa_trc(ioc, ioc->port_id); 271 } 272 273 + /* 274 * Set interrupt mode for a function: INTX or MSIX 275 */ 276 static void ··· 285 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & 286 __F0_INTX_STATUS; 287 288 + /* 289 * If already in desired mode, do not change anything 290 */ 291 if (!msix && mode) ··· 303 writel(r32, rb + FNC_PERS_REG); 304 } 305 306 + /* 307 * Cleanup hw semaphore and usecnt registers 308 */ 309 static void
+16 -16
drivers/scsi/bfa/bfa_port.c
··· 46 } 47 } 48 49 - /** 50 * bfa_port_enable_isr() 51 * 52 * ··· 63 port->endis_cbfn(port->endis_cbarg, status); 64 } 65 66 - /** 67 * bfa_port_disable_isr() 68 * 69 * ··· 80 port->endis_cbfn(port->endis_cbarg, status); 81 } 82 83 - /** 84 * bfa_port_get_stats_isr() 85 * 86 * ··· 112 } 113 } 114 115 - /** 116 * bfa_port_clear_stats_isr() 117 * 118 * ··· 129 port->stats_status = status; 130 port->stats_busy = BFA_FALSE; 131 132 - /** 133 * re-initialize time stamp for stats reset 134 */ 135 bfa_os_gettimeofday(&tv); ··· 141 } 142 } 143 144 - /** 145 * bfa_port_isr() 146 * 147 * ··· 189 } 190 } 191 192 - /** 193 * bfa_port_meminfo() 194 * 195 * ··· 203 return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ); 204 } 205 206 - /** 207 * bfa_port_mem_claim() 208 * 209 * ··· 220 port->stats_dma.pa = dma_pa; 221 } 222 223 - /** 224 * bfa_port_enable() 225 * 226 * Send the Port enable request to the f/w ··· 264 return BFA_STATUS_OK; 265 } 266 267 - /** 268 * bfa_port_disable() 269 * 270 * Send the Port disable request to the f/w ··· 308 return BFA_STATUS_OK; 309 } 310 311 - /** 312 * bfa_port_get_stats() 313 * 314 * Send the request to the f/w to fetch Port statistics. ··· 348 return BFA_STATUS_OK; 349 } 350 351 - /** 352 * bfa_port_clear_stats() 353 * 354 * ··· 385 return BFA_STATUS_OK; 386 } 387 388 - /** 389 * bfa_port_hbfail() 390 * 391 * ··· 415 } 416 } 417 418 - /** 419 * bfa_port_attach() 420 * 421 * ··· 449 bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port); 450 bfa_ioc_hbfail_register(port->ioc, &port->hbfail); 451 452 - /** 453 * initialize time stamp for stats reset 454 */ 455 bfa_os_gettimeofday(&tv); ··· 458 bfa_trc(port, 0); 459 } 460 461 - /** 462 * bfa_port_detach() 463 * 464 *
··· 46 } 47 } 48 49 + /* 50 * bfa_port_enable_isr() 51 * 52 * ··· 63 port->endis_cbfn(port->endis_cbarg, status); 64 } 65 66 + /* 67 * bfa_port_disable_isr() 68 * 69 * ··· 80 port->endis_cbfn(port->endis_cbarg, status); 81 } 82 83 + /* 84 * bfa_port_get_stats_isr() 85 * 86 * ··· 112 } 113 } 114 115 + /* 116 * bfa_port_clear_stats_isr() 117 * 118 * ··· 129 port->stats_status = status; 130 port->stats_busy = BFA_FALSE; 131 132 + /* 133 * re-initialize time stamp for stats reset 134 */ 135 bfa_os_gettimeofday(&tv); ··· 141 } 142 } 143 144 + /* 145 * bfa_port_isr() 146 * 147 * ··· 189 } 190 } 191 192 + /* 193 * bfa_port_meminfo() 194 * 195 * ··· 203 return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ); 204 } 205 206 + /* 207 * bfa_port_mem_claim() 208 * 209 * ··· 220 port->stats_dma.pa = dma_pa; 221 } 222 223 + /* 224 * bfa_port_enable() 225 * 226 * Send the Port enable request to the f/w ··· 264 return BFA_STATUS_OK; 265 } 266 267 + /* 268 * bfa_port_disable() 269 * 270 * Send the Port disable request to the f/w ··· 308 return BFA_STATUS_OK; 309 } 310 311 + /* 312 * bfa_port_get_stats() 313 * 314 * Send the request to the f/w to fetch Port statistics. ··· 348 return BFA_STATUS_OK; 349 } 350 351 + /* 352 * bfa_port_clear_stats() 353 * 354 * ··· 385 return BFA_STATUS_OK; 386 } 387 388 + /* 389 * bfa_port_hbfail() 390 * 391 * ··· 415 } 416 } 417 418 + /* 419 * bfa_port_attach() 420 * 421 * ··· 449 bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port); 450 bfa_ioc_hbfail_register(port->ioc, &port->hbfail); 451 452 + /* 453 * initialize time stamp for stats reset 454 */ 455 bfa_os_gettimeofday(&tv); ··· 458 bfa_trc(port, 0); 459 } 460 461 + /* 462 * bfa_port_detach() 463 * 464 *
+189 -189
drivers/scsi/bfa/bfa_svc.c
··· 29 BFA_MODULE(rport); 30 BFA_MODULE(uf); 31 32 - /** 33 * LPS related definitions 34 */ 35 #define BFA_LPS_MIN_LPORTS (1) ··· 41 #define BFA_LPS_MAX_VPORTS_SUPP_CB 255 42 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190 43 44 - /** 45 * lps_pvt BFA LPS private functions 46 */ 47 ··· 55 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */ 56 }; 57 58 - /** 59 * FC PORT related definitions 60 */ 61 /* ··· 67 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) 68 69 70 - /** 71 * BFA port state machine events 72 */ 73 enum bfa_fcport_sm_event { ··· 82 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ 83 }; 84 85 - /** 86 * BFA port link notification state machine events 87 */ 88 ··· 92 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */ 93 }; 94 95 - /** 96 * RPORT related definitions 97 */ 98 #define bfa_rport_offline_cb(__rp) do { \ ··· 126 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */ 127 }; 128 129 - /** 130 * forward declarations FCXP related functions 131 */ 132 static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete); ··· 138 static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, 139 struct bfi_fcxp_send_req_s *send_req); 140 141 - /** 142 * forward declarations for LPS functions 143 */ 144 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, ··· 163 static void bfa_lps_logout_comp(struct bfa_lps_s *lps); 164 static void bfa_lps_cvl_event(struct bfa_lps_s *lps); 165 166 - /** 167 * forward declaration for LPS state machine 168 */ 169 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); ··· 175 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event 176 event); 177 178 - /** 179 * forward declaration for FC Port functions 180 */ 181 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport); ··· 193 static void bfa_fcport_stats_clr_timeout(void *cbarg); 194 static void bfa_trunk_iocdisable(struct bfa_s *bfa); 195 196 - /** 197 * forward declaration for FC PORT state machine 198 */ 199 static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, ··· 252 }; 253 254 255 - /** 256 * forward declaration for RPORT related functions 257 */ 258 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod); ··· 265 static void __bfa_cb_rport_offline(void *cbarg, 266 bfa_boolean_t complete); 267 268 - /** 269 * forward declaration for RPORT state machine 270 */ 271 static void bfa_rport_sm_uninit(struct bfa_rport_s *rp, ··· 295 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, 296 enum bfa_rport_event event); 297 298 - /** 299 * PLOG related definitions 300 */ 301 static int ··· 461 return (bfa_boolean_t)plog->plog_enabled; 462 } 463 464 - /** 465 * fcxp_pvt BFA FCXP private functions 466 */ 467 ··· 562 mod->bfa = bfa; 563 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs; 564 565 - /** 566 * Initialize FCXP request and response payload sizes. 567 */ 568 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ; ··· 746 747 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len); 748 749 - /** 750 * @todo f/w should not set residue to non-0 when everything 751 * is received. 752 */ ··· 855 } 856 } 857 858 - /** 859 * Handler to resume sending fcxp when space in available in cpe queue. 860 */ 861 static void ··· 870 bfa_fcxp_queue(fcxp, send_req); 871 } 872 873 - /** 874 * Queue fcxp send request to foimrware. 875 */ 876 static void ··· 954 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP)); 955 } 956 957 - /** 958 * hal_fcxp_api BFA FCXP API 959 */ 960 961 - /** 962 * Allocate an FCXP instance to send a response or to send a request 963 * that has a response. Request/response buffers are allocated by caller. 964 * ··· 1004 return fcxp; 1005 } 1006 1007 - /** 1008 * Get the internal request buffer pointer 1009 * 1010 * @param[in] fcxp BFA fcxp pointer ··· 1031 return mod->req_pld_sz; 1032 } 1033 1034 - /** 1035 * Get the internal response buffer pointer 1036 * 1037 * @param[in] fcxp BFA fcxp pointer ··· 1051 return rspbuf; 1052 } 1053 1054 - /** 1055 * Free the BFA FCXP 1056 * 1057 * @param[in] fcxp BFA fcxp pointer ··· 1068 bfa_fcxp_put(fcxp); 1069 } 1070 1071 - /** 1072 * Send a FCXP request 1073 * 1074 * @param[in] fcxp BFA fcxp pointer ··· 1102 1103 bfa_trc(bfa, fcxp->fcxp_tag); 1104 1105 - /** 1106 * setup request/response info 1107 */ 1108 reqi->bfa_rport = rport; ··· 1117 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp; 1118 fcxp->send_cbarg = cbarg; 1119 1120 - /** 1121 * If no room in CPE queue, wait for space in request queue 1122 */ 1123 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); ··· 1131 bfa_fcxp_queue(fcxp, send_req); 1132 } 1133 1134 - /** 1135 * Abort a BFA FCXP 1136 * 1137 * @param[in] fcxp BFA fcxp pointer ··· 1185 void 1186 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp) 1187 { 1188 - /** 1189 * If waiting for room in request queue, cancel reqq wait 1190 * and free fcxp. 1191 */ ··· 1201 1202 1203 1204 - /** 1205 * hal_fcxp_public BFA FCXP public functions 1206 */ 1207 ··· 1228 } 1229 1230 1231 - /** 1232 * BFA LPS state machine functions 1233 */ 1234 1235 - /** 1236 * Init state -- no login 1237 */ 1238 static void ··· 1284 } 1285 } 1286 1287 - /** 1288 * login is in progress -- awaiting response from firmware 1289 */ 1290 static void ··· 1326 } 1327 } 1328 1329 - /** 1330 * login pending - awaiting space in request queue 1331 */ 1332 static void ··· 1358 } 1359 } 1360 1361 - /** 1362 * login complete 1363 */ 1364 static void ··· 1399 } 1400 } 1401 1402 - /** 1403 * logout in progress - awaiting firmware response 1404 */ 1405 static void ··· 1423 } 1424 } 1425 1426 - /** 1427 * logout pending -- awaiting space in request queue 1428 */ 1429 static void ··· 1450 1451 1452 1453 - /** 1454 * lps_pvt BFA LPS private functions 1455 */ 1456 1457 - /** 1458 * return memory requirement 1459 */ 1460 static void ··· 1467 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS; 1468 } 1469 1470 - /** 1471 * bfa module attach at initialization time 1472 */ 1473 static void ··· 1515 { 1516 } 1517 1518 - /** 1519 * IOC in disabled state -- consider all lps offline 1520 */ 1521 static void ··· 1531 } 1532 } 1533 1534 - /** 1535 * Firmware login response 1536 */ 1537 static void ··· 1578 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1579 } 1580 1581 - /** 1582 * Firmware logout response 1583 */ 1584 static void ··· 1593 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1594 } 1595 1596 - /** 1597 * Firmware received a Clear virtual link request (for FCoE) 1598 */ 1599 static void ··· 1607 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); 1608 } 1609 1610 - /** 1611 * Space is available in request queue, resume queueing request to firmware. 1612 */ 1613 static void ··· 1618 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME); 1619 } 1620 1621 - /** 1622 * lps is freed -- triggered by vport delete 1623 */ 1624 static void ··· 1631 list_add_tail(&lps->qe, &mod->lps_free_q); 1632 } 1633 1634 - /** 1635 * send login request to firmware 1636 */ 1637 static void ··· 1656 bfa_reqq_produce(lps->bfa, lps->reqq); 1657 } 1658 1659 - /** 1660 * send logout request to firmware 1661 */ 1662 static void ··· 1675 bfa_reqq_produce(lps->bfa, lps->reqq); 1676 } 1677 1678 - /** 1679 * Indirect login completion handler for non-fcs 1680 */ 1681 static void ··· 1692 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); 1693 } 1694 1695 - /** 1696 * Login completion handler -- direct call for fcs, queue for others 1697 */ 1698 static void ··· 1710 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); 1711 } 1712 1713 - /** 1714 * Indirect logout completion handler for non-fcs 1715 */ 1716 static void ··· 1725 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); 1726 } 1727 1728 - /** 1729 * Logout completion handler -- direct call for fcs, queue for others 1730 */ 1731 static void ··· 1740 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); 1741 } 1742 1743 - /** 1744 * Clear virtual link completion handler for non-fcs 1745 */ 1746 static void ··· 1756 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); 1757 } 1758 1759 - /** 1760 * Received Clear virtual link event --direct call for fcs, 1761 * queue for others 1762 */ ··· 1776 1777 1778 1779 - /** 1780 * lps_public BFA LPS public functions 1781 */ 1782 ··· 1789 return BFA_LPS_MAX_VPORTS_SUPP_CB; 1790 } 1791 1792 - /** 1793 * Allocate a lport srvice tag. 1794 */ 1795 struct bfa_lps_s * ··· 1809 return lps; 1810 } 1811 1812 - /** 1813 * Free lport service tag. This can be called anytime after an alloc. 1814 * No need to wait for any pending login/logout completions. 1815 */ ··· 1819 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE); 1820 } 1821 1822 - /** 1823 * Initiate a lport login. 1824 */ 1825 void ··· 1836 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); 1837 } 1838 1839 - /** 1840 * Initiate a lport fdisc login. 1841 */ 1842 void ··· 1853 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); 1854 } 1855 1856 - /** 1857 * Initiate a lport logout (flogi). 1858 */ 1859 void ··· 1862 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); 1863 } 1864 1865 - /** 1866 * Initiate a lport FDSIC logout. 1867 */ 1868 void ··· 1871 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); 1872 } 1873 1874 - /** 1875 * Discard a pending login request -- should be called only for 1876 * link down handling. 1877 */ ··· 1881 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); 1882 } 1883 1884 - /** 1885 * Return lport services tag 1886 */ 1887 u8 ··· 1890 return lps->lp_tag; 1891 } 1892 1893 - /** 1894 * Return lport services tag given the pid 1895 */ 1896 u8 ··· 1909 return 0; 1910 } 1911 1912 - /** 1913 * return if fabric login indicates support for NPIV 1914 */ 1915 bfa_boolean_t ··· 1918 return lps->npiv_en; 1919 } 1920 1921 - /** 1922 * Return TRUE if attached to F-Port, else return FALSE 1923 */ 1924 bfa_boolean_t ··· 1927 return lps->fport; 1928 } 1929 1930 - /** 1931 * Return TRUE if attached to a Brocade Fabric 1932 */ 1933 bfa_boolean_t ··· 1935 { 1936 return lps->brcd_switch; 1937 } 1938 - /** 1939 * return TRUE if authentication is required 1940 */ 1941 bfa_boolean_t ··· 1950 return lps->ext_status; 1951 } 1952 1953 - /** 1954 * return port id assigned to the lport 1955 */ 1956 u32 ··· 1959 return lps->lp_pid; 1960 } 1961 1962 - /** 1963 * return port id assigned to the base lport 1964 */ 1965 u32 ··· 1970 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid; 1971 } 1972 1973 - /** 1974 * Return bb_credit assigned in FLOGI response 1975 */ 1976 u16 ··· 1979 return lps->pr_bbcred; 1980 } 1981 1982 - /** 1983 * Return peer port name 1984 */ 1985 wwn_t ··· 1988 return lps->pr_pwwn; 1989 } 1990 1991 - /** 1992 * Return peer node name 1993 */ 1994 wwn_t ··· 1997 return lps->pr_nwwn; 1998 } 1999 2000 - /** 2001 * return reason code if login request is rejected 2002 */ 2003 u8 ··· 2006 return lps->lsrjt_rsn; 2007 } 2008 2009 - /** 2010 * return explanation code if login request is rejected 2011 */ 2012 u8 ··· 2015 return lps->lsrjt_expl; 2016 } 2017 2018 - /** 2019 * Return fpma/spma MAC for lport 2020 */ 2021 mac_t ··· 2024 return lps->lp_mac; 2025 } 2026 2027 - /** 2028 * LPS firmware message class handler. 2029 */ 2030 void ··· 2054 } 2055 } 2056 2057 - /** 2058 * FC PORT state machine functions 2059 */ 2060 static void ··· 2065 2066 switch (event) { 2067 case BFA_FCPORT_SM_START: 2068 - /** 2069 * Start event after IOC is configured and BFA is started. 2070 */ 2071 if (bfa_fcport_send_enable(fcport)) { ··· 2079 break; 2080 2081 case BFA_FCPORT_SM_ENABLE: 2082 - /** 2083 * Port is persistently configured to be in enabled state. Do 2084 * not change state. Port enabling is done when START event is 2085 * received. ··· 2087 break; 2088 2089 case BFA_FCPORT_SM_DISABLE: 2090 - /** 2091 * If a port is persistently configured to be disabled, the 2092 * first event will a port disable request. 2093 */ ··· 2123 break; 2124 2125 case BFA_FCPORT_SM_ENABLE: 2126 - /** 2127 * Already enable is in progress. 2128 */ 2129 break; 2130 2131 case BFA_FCPORT_SM_DISABLE: 2132 - /** 2133 * Just send disable request to firmware when room becomes 2134 * available in request queue. 2135 */ ··· 2144 2145 case BFA_FCPORT_SM_LINKUP: 2146 case BFA_FCPORT_SM_LINKDOWN: 2147 - /** 2148 * Possible to get link events when doing back-to-back 2149 * enable/disables. 2150 */ ··· 2183 break; 2184 2185 case BFA_FCPORT_SM_ENABLE: 2186 - /** 2187 * Already being enabled. 2188 */ 2189 break; ··· 2256 break; 2257 2258 case BFA_FCPORT_SM_LINKDOWN: 2259 - /** 2260 * Possible to get link down event. 2261 */ 2262 break; 2263 2264 case BFA_FCPORT_SM_ENABLE: 2265 - /** 2266 * Already enabled. 2267 */ 2268 break; ··· 2305 2306 switch (event) { 2307 case BFA_FCPORT_SM_ENABLE: 2308 - /** 2309 * Already enabled. 2310 */ 2311 break; ··· 2398 break; 2399 2400 case BFA_FCPORT_SM_DISABLE: 2401 - /** 2402 * Already being disabled. 2403 */ 2404 break; 2405 2406 case BFA_FCPORT_SM_LINKUP: 2407 case BFA_FCPORT_SM_LINKDOWN: 2408 - /** 2409 * Possible to get link events when doing back-to-back 2410 * enable/disables. 2411 */ ··· 2452 2453 case BFA_FCPORT_SM_LINKUP: 2454 case BFA_FCPORT_SM_LINKDOWN: 2455 - /** 2456 * Possible to get link events when doing back-to-back 2457 * enable/disables. 2458 */ ··· 2482 break; 2483 2484 case BFA_FCPORT_SM_DISABLE: 2485 - /** 2486 * Already being disabled. 2487 */ 2488 break; ··· 2507 2508 case BFA_FCPORT_SM_LINKUP: 2509 case BFA_FCPORT_SM_LINKDOWN: 2510 - /** 2511 * Possible to get link events when doing back-to-back 2512 * enable/disables. 2513 */ ··· 2532 2533 switch (event) { 2534 case BFA_FCPORT_SM_START: 2535 - /** 2536 * Ignore start event for a port that is disabled. 2537 */ 2538 break; ··· 2556 break; 2557 2558 case BFA_FCPORT_SM_DISABLE: 2559 - /** 2560 * Already disabled. 2561 */ 2562 break; ··· 2586 break; 2587 2588 default: 2589 - /** 2590 * Ignore all other events. 2591 */ 2592 ; 2593 } 2594 } 2595 2596 - /** 2597 * Port is enabled. IOC is down/failed. 2598 */ 2599 static void ··· 2612 break; 2613 2614 default: 2615 - /** 2616 * Ignore all events. 2617 */ 2618 ; 2619 } 2620 } 2621 2622 - /** 2623 * Port is disabled. IOC is down/failed. 2624 */ 2625 static void ··· 2638 break; 2639 2640 default: 2641 - /** 2642 * Ignore all events. 2643 */ 2644 ; 2645 } 2646 } 2647 2648 - /** 2649 * Link state is down 2650 */ 2651 static void ··· 2665 } 2666 } 2667 2668 - /** 2669 * Link state is waiting for down notification 2670 */ 2671 static void ··· 2688 } 2689 } 2690 2691 - /** 2692 * Link state is waiting for down notification and there is a pending up 2693 */ 2694 static void ··· 2712 } 2713 } 2714 2715 - /** 2716 * Link state is up 2717 */ 2718 static void ··· 2732 } 2733 } 2734 2735 - /** 2736 * Link state is waiting for up notification 2737 */ 2738 static void ··· 2755 } 2756 } 2757 2758 - /** 2759 * Link state is waiting for up notification and there is a pending down 2760 */ 2761 static void ··· 2779 } 2780 } 2781 2782 - /** 2783 * Link state is waiting for up notification and there are pending down and up 2784 */ 2785 static void ··· 2805 2806 2807 2808 - /** 2809 * hal_port_private 2810 */ 2811 ··· 2820 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); 2821 } 2822 2823 - /** 2824 * Send SCN notification to upper layers. 2825 * trunk - false if caller is fcport to ignore fcport event in trunked mode 2826 */ ··· 2896 bfa_meminfo_dma_phys(meminfo) = dm_pa; 2897 } 2898 2899 - /** 2900 * Memory initialization. 2901 */ 2902 static void ··· 2917 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); 2918 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); 2919 2920 - /** 2921 * initialize time stamp for stats reset 2922 */ 2923 bfa_os_gettimeofday(&tv); 2924 fcport->stats_reset_time = tv.tv_sec; 2925 2926 - /** 2927 * initialize and set default configuration 2928 */ 2929 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P; ··· 2941 { 2942 } 2943 2944 - /** 2945 * Called when IOC is ready. 2946 */ 2947 static void ··· 2950 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); 2951 } 2952 2953 - /** 2954 * Called before IOC is stopped. 2955 */ 2956 static void ··· 2960 bfa_trunk_iocdisable(bfa); 2961 } 2962 2963 - /** 2964 * Called when IOC failure is detected. 2965 */ 2966 static void ··· 2988 fcport->qos_attr = pevent->link_state.qos_attr; 2989 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr; 2990 2991 - /** 2992 * update trunk state if applicable 2993 */ 2994 if (!fcport->cfg.trunked) ··· 3008 fcport->topology = BFA_PORT_TOPOLOGY_NONE; 3009 } 3010 3011 - /** 3012 * Send port enable message to firmware. 3013 */ 3014 static bfa_boolean_t ··· 3016 { 3017 struct bfi_fcport_enable_req_s *m; 3018 3019 - /** 3020 * Increment message tag before queue check, so that responses to old 3021 * requests are discarded. 3022 */ 3023 fcport->msgtag++; 3024 3025 - /** 3026 * check for room in queue to send request now 3027 */ 3028 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); ··· 3043 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); 3044 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); 3045 3046 - /** 3047 * queue I/O message to firmware 3048 */ 3049 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3050 return BFA_TRUE; 3051 } 3052 3053 - /** 3054 * Send port disable message to firmware. 3055 */ 3056 static bfa_boolean_t ··· 3058 { 3059 struct bfi_fcport_req_s *m; 3060 3061 - /** 3062 * Increment message tag before queue check, so that responses to old 3063 * requests are discarded. 3064 */ 3065 fcport->msgtag++; 3066 3067 - /** 3068 * check for room in queue to send request now 3069 */ 3070 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); ··· 3078 bfa_lpuid(fcport->bfa)); 3079 m->msgtag = fcport->msgtag; 3080 3081 - /** 3082 * queue I/O message to firmware 3083 */ 3084 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); ··· 3103 struct bfa_fcport_s *fcport = port_cbarg; 3104 struct bfi_fcport_set_svc_params_req_s *m; 3105 3106 - /** 3107 * check for room in queue to send request now 3108 */ 3109 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); ··· 3116 bfa_lpuid(fcport->bfa)); 3117 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit); 3118 3119 - /** 3120 * queue I/O message to firmware 3121 */ 3122 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); ··· 3235 if (complete) { 3236 struct bfa_timeval_s tv; 3237 3238 - /** 3239 * re-initialize time stamp for stats reset 3240 */ 3241 bfa_os_gettimeofday(&tv); ··· 3289 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3290 } 3291 3292 - /** 3293 * Handle trunk SCN event from firmware. 3294 */ 3295 static void ··· 3310 bfa_trc(fcport->bfa, scn->trunk_state); 3311 bfa_trc(fcport->bfa, scn->trunk_speed); 3312 3313 - /** 3314 * Save off new state for trunk attribute query 3315 */ 3316 state_prev = trunk->attr.state; ··· 3358 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down"); 3359 } 3360 3361 - /** 3362 * Notify upper layers if trunk state changed. 3363 */ 3364 if ((state_prev != trunk->attr.state) || ··· 3374 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3375 int i = 0; 3376 3377 - /** 3378 * In trunked mode, notify upper layers that link is down 3379 */ 3380 if (fcport->cfg.trunked) { ··· 3398 3399 3400 3401 - /** 3402 * hal_port_public 3403 */ 3404 3405 - /** 3406 * Called to initialize port attributes 3407 */ 3408 void ··· 3410 { 3411 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3412 3413 - /** 3414 * Initialize port attributes from IOC hardware data. 3415 */ 3416 bfa_fcport_set_wwns(fcport); ··· 3424 bfa_assert(fcport->speed_sup); 3425 } 3426 3427 - /** 3428 * Firmware message handler. 3429 */ 3430 void ··· 3505 3506 3507 3508 - /** 3509 * hal_port_api 3510 */ 3511 3512 - /** 3513 * Registered callback for port events. 3514 */ 3515 void ··· 3550 return BFA_STATUS_OK; 3551 } 3552 3553 - /** 3554 * Configure port speed. 3555 */ 3556 bfa_status_t ··· 3572 return BFA_STATUS_OK; 3573 } 3574 3575 - /** 3576 * Get current speed. 3577 */ 3578 enum bfa_port_speed ··· 3583 return fcport->speed; 3584 } 3585 3586 - /** 3587 * Configure port topology. 3588 */ 3589 bfa_status_t ··· 3608 return BFA_STATUS_OK; 3609 } 3610 3611 - /** 3612 * Get current topology. 3613 */ 3614 enum bfa_port_topology ··· 3708 bfa_fcport_send_txcredit(fcport); 3709 } 3710 3711 - /** 3712 * Get port attributes. 3713 */ 3714 ··· 3768 3769 #define BFA_FCPORT_STATS_TOV 1000 3770 3771 - /** 3772 * Fetch port statistics (FCQoS or FCoE). 3773 */ 3774 bfa_status_t ··· 3794 return BFA_STATUS_OK; 3795 } 3796 3797 - /** 3798 * Reset port statistics (FCQoS or FCoE). 3799 */ 3800 bfa_status_t ··· 3818 return BFA_STATUS_OK; 3819 } 3820 3821 - /** 3822 * Fetch FCQoS port statistics 3823 */ 3824 bfa_status_t ··· 3831 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); 3832 } 3833 3834 - /** 3835 * Reset FCoE port statistics 3836 */ 3837 bfa_status_t ··· 3843 return bfa_fcport_clear_stats(bfa, cbfn, cbarg); 3844 } 3845 3846 - /** 3847 * Fetch FCQoS port statistics 3848 */ 3849 bfa_status_t ··· 3856 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); 3857 } 3858 3859 - /** 3860 * Reset FCoE port statistics 3861 */ 3862 bfa_status_t ··· 3902 } 3903 } 3904 3905 - /** 3906 * Fetch port attributes. 3907 */ 3908 bfa_boolean_t ··· 3937 3938 if (ioc_type == BFA_IOC_TYPE_FC) { 3939 fcport->cfg.qos_enabled = on_off; 3940 - /** 3941 * Notify fcpim of the change in QoS state 3942 */ 3943 bfa_fcpim_update_ioredirect(bfa); ··· 3957 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; 3958 } 3959 3960 - /** 3961 * Configure default minimum ratelim speed 3962 */ 3963 bfa_status_t ··· 3978 return BFA_STATUS_OK; 3979 } 3980 3981 - /** 3982 * Get default minimum ratelim speed 3983 */ 3984 enum bfa_port_speed ··· 4093 } 4094 4095 4096 - /** 4097 * Rport State machine functions 4098 */ 4099 - /** 4100 * Beginning state, only online event expected. 4101 */ 4102 static void ··· 4149 } 4150 } 4151 4152 - /** 4153 * Waiting for rport create response from firmware. 4154 */ 4155 static void ··· 4186 } 4187 } 4188 4189 - /** 4190 * Request queue is full, awaiting queue resume to send create request. 4191 */ 4192 static void ··· 4227 } 4228 } 4229 4230 - /** 4231 * Online state - normal parking state. 4232 */ 4233 static void ··· 4295 } 4296 } 4297 4298 - /** 4299 * Firmware rport is being deleted - awaiting f/w response. 4300 */ 4301 static void ··· 4358 } 4359 } 4360 4361 - /** 4362 * Offline state. 4363 */ 4364 static void ··· 4393 } 4394 } 4395 4396 - /** 4397 * Rport is deleted, waiting for firmware response to delete. 4398 */ 4399 static void ··· 4445 } 4446 } 4447 4448 - /** 4449 * Waiting for rport create response from firmware. A delete is pending. 4450 */ 4451 static void ··· 4476 } 4477 } 4478 4479 - /** 4480 * Waiting for rport create response from firmware. Rport offline is pending. 4481 */ 4482 static void ··· 4511 } 4512 } 4513 4514 - /** 4515 * IOC h/w failed. 4516 */ 4517 static void ··· 4551 4552 4553 4554 - /** 4555 * bfa_rport_private BFA rport private functions 4556 */ 4557 ··· 4615 rp->rport_tag = i; 4616 bfa_sm_set_state(rp, bfa_rport_sm_uninit); 4617 4618 - /** 4619 * - is unused 4620 */ 4621 if (i) ··· 4624 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp); 4625 } 4626 4627 - /** 4628 * consume memory 4629 */ 4630 bfa_meminfo_kva(meminfo) = (u8 *) rp; ··· 4685 { 4686 struct bfi_rport_create_req_s *m; 4687 4688 - /** 4689 * check for room in queue to send request now 4690 */ 4691 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); ··· 4706 m->vf_id = rp->rport_info.vf_id; 4707 m->cisc = rp->rport_info.cisc; 4708 4709 - /** 4710 * queue I/O message to firmware 4711 */ 4712 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); ··· 4718 { 4719 struct bfi_rport_delete_req_s *m; 4720 4721 - /** 4722 * check for room in queue to send request now 4723 */ 4724 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); ··· 4731 bfa_lpuid(rp->bfa)); 4732 m->fw_handle = rp->fw_handle; 4733 4734 - /** 4735 * queue I/O message to firmware 4736 */ 4737 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); ··· 4743 { 4744 struct bfa_rport_speed_req_s *m; 4745 4746 - /** 4747 * check for room in queue to send request now 4748 */ 4749 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); ··· 4757 m->fw_handle = rp->fw_handle; 4758 m->speed = (u8)rp->rport_info.speed; 4759 4760 - /** 4761 * queue I/O message to firmware 4762 */ 4763 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); ··· 4766 4767 4768 4769 - /** 4770 * bfa_rport_public 4771 */ 4772 4773 - /** 4774 * Rport interrupt processing. 4775 */ 4776 void ··· 4812 4813 4814 4815 - /** 4816 * bfa_rport_api 4817 */ 4818 ··· 4847 { 4848 bfa_assert(rport_info->max_frmsz != 0); 4849 4850 - /** 4851 * Some JBODs are seen to be not setting PDU size correctly in PLOGI 4852 * responses. Default to minimum size. 4853 */ ··· 4899 } 4900 4901 4902 - /** 4903 * SGPG related functions 4904 */ 4905 4906 - /** 4907 * Compute and return memory needed by FCP(im) module. 4908 */ 4909 static void ··· 4995 4996 4997 4998 - /** 4999 * hal_sgpg_public BFA SGPG public functions 5000 */ 5001 ··· 5037 if (list_empty(&mod->sgpg_wait_q)) 5038 return; 5039 5040 - /** 5041 * satisfy as many waiting requests as possible 5042 */ 5043 do { ··· 5065 5066 wqe->nsgpg_total = wqe->nsgpg = nsgpg; 5067 5068 - /** 5069 * allocate any left to this one first 5070 */ 5071 if (mod->free_sgpgs) { 5072 - /** 5073 * no one else is waiting for SGPG 5074 */ 5075 bfa_assert(list_empty(&mod->sgpg_wait_q)); ··· 5103 wqe->cbarg = cbarg; 5104 } 5105 5106 - /** 5107 * UF related functions 5108 */ 5109 /* ··· 5171 bfa_sge_to_be(&sge[1]); 5172 } 5173 5174 - /** 5175 * advance pointer beyond consumed memory 5176 */ 5177 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg; ··· 5201 list_add_tail(&uf->qe, &ufm->uf_free_q); 5202 } 5203 5204 - /** 5205 * advance memory pointer 5206 */ 5207 bfa_meminfo_kva(mi) = (u8 *) uf; ··· 5363 5364 5365 5366 - /** 5367 * hal_uf_api 5368 */ 5369 5370 - /** 5371 * Register handler for all unsolicted recieve frames. 5372 * 5373 * @param[in] bfa BFA instance ··· 5383 ufm->cbarg = cbarg; 5384 } 5385 5386 - /** 5387 * Free an unsolicited frame back to BFA. 5388 * 5389 * @param[in] uf unsolicited frame to be freed ··· 5399 5400 5401 5402 - /** 5403 * uf_pub BFA uf module public functions 5404 */ 5405 void
··· 29 BFA_MODULE(rport); 30 BFA_MODULE(uf); 31 32 + /* 33 * LPS related definitions 34 */ 35 #define BFA_LPS_MIN_LPORTS (1) ··· 41 #define BFA_LPS_MAX_VPORTS_SUPP_CB 255 42 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190 43 44 + /* 45 * lps_pvt BFA LPS private functions 46 */ 47 ··· 55 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */ 56 }; 57 58 + /* 59 * FC PORT related definitions 60 */ 61 /* ··· 67 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) 68 69 70 + /* 71 * BFA port state machine events 72 */ 73 enum bfa_fcport_sm_event { ··· 82 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ 83 }; 84 85 + /* 86 * BFA port link notification state machine events 87 */ 88 ··· 92 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */ 93 }; 94 95 + /* 96 * RPORT related definitions 97 */ 98 #define bfa_rport_offline_cb(__rp) do { \ ··· 126 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */ 127 }; 128 129 + /* 130 * forward declarations FCXP related functions 131 */ 132 static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete); ··· 138 static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, 139 struct bfi_fcxp_send_req_s *send_req); 140 141 + /* 142 * forward declarations for LPS functions 143 */ 144 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, ··· 163 static void bfa_lps_logout_comp(struct bfa_lps_s *lps); 164 static void bfa_lps_cvl_event(struct bfa_lps_s *lps); 165 166 + /* 167 * forward declaration for LPS state machine 168 */ 169 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); ··· 175 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event 176 event); 177 178 + /* 179 * forward declaration for FC Port functions 180 */ 181 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport); ··· 193 static void bfa_fcport_stats_clr_timeout(void *cbarg); 194 static void bfa_trunk_iocdisable(struct bfa_s *bfa); 195 196 + /* 197 * forward declaration for FC PORT state machine 198 */ 199 static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, ··· 252 }; 253 254 255 + /* 256 * forward declaration for RPORT related functions 257 */ 258 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod); ··· 265 static void __bfa_cb_rport_offline(void *cbarg, 266 bfa_boolean_t complete); 267 268 + /* 269 * forward declaration for RPORT state machine 270 */ 271 static void bfa_rport_sm_uninit(struct bfa_rport_s *rp, ··· 295 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, 296 enum bfa_rport_event event); 297 298 + /* 299 * PLOG related definitions 300 */ 301 static int ··· 461 return (bfa_boolean_t)plog->plog_enabled; 462 } 463 464 + /* 465 * fcxp_pvt BFA FCXP private functions 466 */ 467 ··· 562 mod->bfa = bfa; 563 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs; 564 565 + /* 566 * Initialize FCXP request and response payload sizes. 567 */ 568 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ; ··· 746 747 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len); 748 749 + /* 750 * @todo f/w should not set residue to non-0 when everything 751 * is received. 752 */ ··· 855 } 856 } 857 858 + /* 859 * Handler to resume sending fcxp when space in available in cpe queue. 860 */ 861 static void ··· 870 bfa_fcxp_queue(fcxp, send_req); 871 } 872 873 + /* 874 * Queue fcxp send request to foimrware. 875 */ 876 static void ··· 954 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP)); 955 } 956 957 + /* 958 * hal_fcxp_api BFA FCXP API 959 */ 960 961 + /* 962 * Allocate an FCXP instance to send a response or to send a request 963 * that has a response. Request/response buffers are allocated by caller. 964 * ··· 1004 return fcxp; 1005 } 1006 1007 + /* 1008 * Get the internal request buffer pointer 1009 * 1010 * @param[in] fcxp BFA fcxp pointer ··· 1031 return mod->req_pld_sz; 1032 } 1033 1034 + /* 1035 * Get the internal response buffer pointer 1036 * 1037 * @param[in] fcxp BFA fcxp pointer ··· 1051 return rspbuf; 1052 } 1053 1054 + /* 1055 * Free the BFA FCXP 1056 * 1057 * @param[in] fcxp BFA fcxp pointer ··· 1068 bfa_fcxp_put(fcxp); 1069 } 1070 1071 + /* 1072 * Send a FCXP request 1073 * 1074 * @param[in] fcxp BFA fcxp pointer ··· 1102 1103 bfa_trc(bfa, fcxp->fcxp_tag); 1104 1105 + /* 1106 * setup request/response info 1107 */ 1108 reqi->bfa_rport = rport; ··· 1117 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp; 1118 fcxp->send_cbarg = cbarg; 1119 1120 + /* 1121 * If no room in CPE queue, wait for space in request queue 1122 */ 1123 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); ··· 1131 bfa_fcxp_queue(fcxp, send_req); 1132 } 1133 1134 + /* 1135 * Abort a BFA FCXP 1136 * 1137 * @param[in] fcxp BFA fcxp pointer ··· 1185 void 1186 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp) 1187 { 1188 + /* 1189 * If waiting for room in request queue, cancel reqq wait 1190 * and free fcxp. 1191 */ ··· 1201 1202 1203 1204 + /* 1205 * hal_fcxp_public BFA FCXP public functions 1206 */ 1207 ··· 1228 } 1229 1230 1231 + /* 1232 * BFA LPS state machine functions 1233 */ 1234 1235 + /* 1236 * Init state -- no login 1237 */ 1238 static void ··· 1284 } 1285 } 1286 1287 + /* 1288 * login is in progress -- awaiting response from firmware 1289 */ 1290 static void ··· 1326 } 1327 } 1328 1329 + /* 1330 * login pending - awaiting space in request queue 1331 */ 1332 static void ··· 1358 } 1359 } 1360 1361 + /* 1362 * login complete 1363 */ 1364 static void ··· 1399 } 1400 } 1401 1402 + /* 1403 * logout in progress - awaiting firmware response 1404 */ 1405 static void ··· 1423 } 1424 } 1425 1426 + /* 1427 * logout pending -- awaiting space in request queue 1428 */ 1429 static void ··· 1450 1451 1452 1453 + /* 1454 * lps_pvt BFA LPS private functions 1455 */ 1456 1457 + /* 1458 * return memory requirement 1459 */ 1460 static void ··· 1467 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS; 1468 } 1469 1470 + /* 1471 * bfa module attach at initialization time 1472 */ 1473 static void ··· 1515 { 1516 } 1517 1518 + /* 1519 * IOC in disabled state -- consider all lps offline 1520 */ 1521 static void ··· 1531 } 1532 } 1533 1534 + /* 1535 * Firmware login response 1536 */ 1537 static void ··· 1578 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1579 } 1580 1581 + /* 1582 * Firmware logout response 1583 */ 1584 static void ··· 1593 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1594 } 1595 1596 + /* 1597 * Firmware received a Clear virtual link request (for FCoE) 1598 */ 1599 static void ··· 1607 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); 1608 } 1609 1610 + /* 1611 * Space is available in request queue, resume queueing request to firmware. 1612 */ 1613 static void ··· 1618 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME); 1619 } 1620 1621 + /* 1622 * lps is freed -- triggered by vport delete 1623 */ 1624 static void ··· 1631 list_add_tail(&lps->qe, &mod->lps_free_q); 1632 } 1633 1634 + /* 1635 * send login request to firmware 1636 */ 1637 static void ··· 1656 bfa_reqq_produce(lps->bfa, lps->reqq); 1657 } 1658 1659 + /* 1660 * send logout request to firmware 1661 */ 1662 static void ··· 1675 bfa_reqq_produce(lps->bfa, lps->reqq); 1676 } 1677 1678 + /* 1679 * Indirect login completion handler for non-fcs 1680 */ 1681 static void ··· 1692 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); 1693 } 1694 1695 + /* 1696 * Login completion handler -- direct call for fcs, queue for others 1697 */ 1698 static void ··· 1710 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); 1711 } 1712 1713 + /* 1714 * Indirect logout completion handler for non-fcs 1715 */ 1716 static void ··· 1725 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); 1726 } 1727 1728 + /* 1729 * Logout completion handler -- direct call for fcs, queue for others 1730 */ 1731 static void ··· 1740 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); 1741 } 1742 1743 + /* 1744 * Clear virtual link completion handler for non-fcs 1745 */ 1746 static void ··· 1756 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); 1757 } 1758 1759 + /* 1760 * Received Clear virtual link event --direct call for fcs, 1761 * queue for others 1762 */ ··· 1776 1777 1778 1779 + /* 1780 * lps_public BFA LPS public functions 1781 */ 1782 ··· 1789 return BFA_LPS_MAX_VPORTS_SUPP_CB; 1790 } 1791 1792 + /* 1793 * Allocate a lport srvice tag. 1794 */ 1795 struct bfa_lps_s * ··· 1809 return lps; 1810 } 1811 1812 + /* 1813 * Free lport service tag. This can be called anytime after an alloc. 1814 * No need to wait for any pending login/logout completions. 1815 */ ··· 1819 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE); 1820 } 1821 1822 + /* 1823 * Initiate a lport login. 1824 */ 1825 void ··· 1836 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); 1837 } 1838 1839 + /* 1840 * Initiate a lport fdisc login. 1841 */ 1842 void ··· 1853 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); 1854 } 1855 1856 + /* 1857 * Initiate a lport logout (flogi). 1858 */ 1859 void ··· 1862 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); 1863 } 1864 1865 + /* 1866 * Initiate a lport FDSIC logout. 1867 */ 1868 void ··· 1871 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); 1872 } 1873 1874 + /* 1875 * Discard a pending login request -- should be called only for 1876 * link down handling. 1877 */ ··· 1881 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); 1882 } 1883 1884 + /* 1885 * Return lport services tag 1886 */ 1887 u8 ··· 1890 return lps->lp_tag; 1891 } 1892 1893 + /* 1894 * Return lport services tag given the pid 1895 */ 1896 u8 ··· 1909 return 0; 1910 } 1911 1912 + /* 1913 * return if fabric login indicates support for NPIV 1914 */ 1915 bfa_boolean_t ··· 1918 return lps->npiv_en; 1919 } 1920 1921 + /* 1922 * Return TRUE if attached to F-Port, else return FALSE 1923 */ 1924 bfa_boolean_t ··· 1927 return lps->fport; 1928 } 1929 1930 + /* 1931 * Return TRUE if attached to a Brocade Fabric 1932 */ 1933 bfa_boolean_t ··· 1935 { 1936 return lps->brcd_switch; 1937 } 1938 + /* 1939 * return TRUE if authentication is required 1940 */ 1941 bfa_boolean_t ··· 1950 return lps->ext_status; 1951 } 1952 1953 + /* 1954 * return port id assigned to the lport 1955 */ 1956 u32 ··· 1959 return lps->lp_pid; 1960 } 1961 1962 + /* 1963 * return port id assigned to the base lport 1964 */ 1965 u32 ··· 1970 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid; 1971 } 1972 1973 + /* 1974 * Return bb_credit assigned in FLOGI response 1975 */ 1976 u16 ··· 1979 return lps->pr_bbcred; 1980 } 1981 1982 + /* 1983 * Return peer port name 1984 */ 1985 wwn_t ··· 1988 return lps->pr_pwwn; 1989 } 1990 1991 + /* 1992 * Return peer node name 1993 */ 1994 wwn_t ··· 1997 return lps->pr_nwwn; 1998 } 1999 2000 + /* 2001 * return reason code if login request is rejected 2002 */ 2003 u8 ··· 2006 return lps->lsrjt_rsn; 2007 } 2008 2009 + /* 2010 * return explanation code if login request is rejected 2011 */ 2012 u8 ··· 2015 return lps->lsrjt_expl; 2016 } 2017 2018 + /* 2019 * Return fpma/spma MAC for lport 2020 */ 2021 mac_t ··· 2024 return lps->lp_mac; 2025 } 2026 2027 + /* 2028 * LPS firmware message class handler. 2029 */ 2030 void ··· 2054 } 2055 } 2056 2057 + /* 2058 * FC PORT state machine functions 2059 */ 2060 static void ··· 2065 2066 switch (event) { 2067 case BFA_FCPORT_SM_START: 2068 + /* 2069 * Start event after IOC is configured and BFA is started. 2070 */ 2071 if (bfa_fcport_send_enable(fcport)) { ··· 2079 break; 2080 2081 case BFA_FCPORT_SM_ENABLE: 2082 + /* 2083 * Port is persistently configured to be in enabled state. Do 2084 * not change state. Port enabling is done when START event is 2085 * received. ··· 2087 break; 2088 2089 case BFA_FCPORT_SM_DISABLE: 2090 + /* 2091 * If a port is persistently configured to be disabled, the 2092 * first event will a port disable request. 2093 */ ··· 2123 break; 2124 2125 case BFA_FCPORT_SM_ENABLE: 2126 + /* 2127 * Already enable is in progress. 2128 */ 2129 break; 2130 2131 case BFA_FCPORT_SM_DISABLE: 2132 + /* 2133 * Just send disable request to firmware when room becomes 2134 * available in request queue. 2135 */ ··· 2144 2145 case BFA_FCPORT_SM_LINKUP: 2146 case BFA_FCPORT_SM_LINKDOWN: 2147 + /* 2148 * Possible to get link events when doing back-to-back 2149 * enable/disables. 2150 */ ··· 2183 break; 2184 2185 case BFA_FCPORT_SM_ENABLE: 2186 + /* 2187 * Already being enabled. 2188 */ 2189 break; ··· 2256 break; 2257 2258 case BFA_FCPORT_SM_LINKDOWN: 2259 + /* 2260 * Possible to get link down event. 2261 */ 2262 break; 2263 2264 case BFA_FCPORT_SM_ENABLE: 2265 + /* 2266 * Already enabled. 2267 */ 2268 break; ··· 2305 2306 switch (event) { 2307 case BFA_FCPORT_SM_ENABLE: 2308 + /* 2309 * Already enabled. 2310 */ 2311 break; ··· 2398 break; 2399 2400 case BFA_FCPORT_SM_DISABLE: 2401 + /* 2402 * Already being disabled. 2403 */ 2404 break; 2405 2406 case BFA_FCPORT_SM_LINKUP: 2407 case BFA_FCPORT_SM_LINKDOWN: 2408 + /* 2409 * Possible to get link events when doing back-to-back 2410 * enable/disables. 2411 */ ··· 2452 2453 case BFA_FCPORT_SM_LINKUP: 2454 case BFA_FCPORT_SM_LINKDOWN: 2455 + /* 2456 * Possible to get link events when doing back-to-back 2457 * enable/disables. 2458 */ ··· 2482 break; 2483 2484 case BFA_FCPORT_SM_DISABLE: 2485 + /* 2486 * Already being disabled. 2487 */ 2488 break; ··· 2507 2508 case BFA_FCPORT_SM_LINKUP: 2509 case BFA_FCPORT_SM_LINKDOWN: 2510 + /* 2511 * Possible to get link events when doing back-to-back 2512 * enable/disables. 2513 */ ··· 2532 2533 switch (event) { 2534 case BFA_FCPORT_SM_START: 2535 + /* 2536 * Ignore start event for a port that is disabled. 2537 */ 2538 break; ··· 2556 break; 2557 2558 case BFA_FCPORT_SM_DISABLE: 2559 + /* 2560 * Already disabled. 2561 */ 2562 break; ··· 2586 break; 2587 2588 default: 2589 + /* 2590 * Ignore all other events. 2591 */ 2592 ; 2593 } 2594 } 2595 2596 + /* 2597 * Port is enabled. IOC is down/failed. 2598 */ 2599 static void ··· 2612 break; 2613 2614 default: 2615 + /* 2616 * Ignore all events. 2617 */ 2618 ; 2619 } 2620 } 2621 2622 + /* 2623 * Port is disabled. IOC is down/failed. 2624 */ 2625 static void ··· 2638 break; 2639 2640 default: 2641 + /* 2642 * Ignore all events. 2643 */ 2644 ; 2645 } 2646 } 2647 2648 + /* 2649 * Link state is down 2650 */ 2651 static void ··· 2665 } 2666 } 2667 2668 + /* 2669 * Link state is waiting for down notification 2670 */ 2671 static void ··· 2688 } 2689 } 2690 2691 + /* 2692 * Link state is waiting for down notification and there is a pending up 2693 */ 2694 static void ··· 2712 } 2713 } 2714 2715 + /* 2716 * Link state is up 2717 */ 2718 static void ··· 2732 } 2733 } 2734 2735 + /* 2736 * Link state is waiting for up notification 2737 */ 2738 static void ··· 2755 } 2756 } 2757 2758 + /* 2759 * Link state is waiting for up notification and there is a pending down 2760 */ 2761 static void ··· 2779 } 2780 } 2781 2782 + /* 2783 * Link state is waiting for up notification and there are pending down and up 2784 */ 2785 static void ··· 2805 2806 2807 2808 + /* 2809 * hal_port_private 2810 */ 2811 ··· 2820 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); 2821 } 2822 2823 + /* 2824 * Send SCN notification to upper layers. 2825 * trunk - false if caller is fcport to ignore fcport event in trunked mode 2826 */ ··· 2896 bfa_meminfo_dma_phys(meminfo) = dm_pa; 2897 } 2898 2899 + /* 2900 * Memory initialization. 2901 */ 2902 static void ··· 2917 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); 2918 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); 2919 2920 + /* 2921 * initialize time stamp for stats reset 2922 */ 2923 bfa_os_gettimeofday(&tv); 2924 fcport->stats_reset_time = tv.tv_sec; 2925 2926 + /* 2927 * initialize and set default configuration 2928 */ 2929 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P; ··· 2941 { 2942 } 2943 2944 + /* 2945 * Called when IOC is ready. 2946 */ 2947 static void ··· 2950 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); 2951 } 2952 2953 + /* 2954 * Called before IOC is stopped. 2955 */ 2956 static void ··· 2960 bfa_trunk_iocdisable(bfa); 2961 } 2962 2963 + /* 2964 * Called when IOC failure is detected. 2965 */ 2966 static void ··· 2988 fcport->qos_attr = pevent->link_state.qos_attr; 2989 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr; 2990 2991 + /* 2992 * update trunk state if applicable 2993 */ 2994 if (!fcport->cfg.trunked) ··· 3008 fcport->topology = BFA_PORT_TOPOLOGY_NONE; 3009 } 3010 3011 + /* 3012 * Send port enable message to firmware. 3013 */ 3014 static bfa_boolean_t ··· 3016 { 3017 struct bfi_fcport_enable_req_s *m; 3018 3019 + /* 3020 * Increment message tag before queue check, so that responses to old 3021 * requests are discarded. 3022 */ 3023 fcport->msgtag++; 3024 3025 + /* 3026 * check for room in queue to send request now 3027 */ 3028 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); ··· 3043 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); 3044 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); 3045 3046 + /* 3047 * queue I/O message to firmware 3048 */ 3049 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3050 return BFA_TRUE; 3051 } 3052 3053 + /* 3054 * Send port disable message to firmware. 3055 */ 3056 static bfa_boolean_t ··· 3058 { 3059 struct bfi_fcport_req_s *m; 3060 3061 + /* 3062 * Increment message tag before queue check, so that responses to old 3063 * requests are discarded. 3064 */ 3065 fcport->msgtag++; 3066 3067 + /* 3068 * check for room in queue to send request now 3069 */ 3070 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); ··· 3078 bfa_lpuid(fcport->bfa)); 3079 m->msgtag = fcport->msgtag; 3080 3081 + /* 3082 * queue I/O message to firmware 3083 */ 3084 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); ··· 3103 struct bfa_fcport_s *fcport = port_cbarg; 3104 struct bfi_fcport_set_svc_params_req_s *m; 3105 3106 + /* 3107 * check for room in queue to send request now 3108 */ 3109 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); ··· 3116 bfa_lpuid(fcport->bfa)); 3117 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit); 3118 3119 + /* 3120 * queue I/O message to firmware 3121 */ 3122 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); ··· 3235 if (complete) { 3236 struct bfa_timeval_s tv; 3237 3238 + /* 3239 * re-initialize time stamp for stats reset 3240 */ 3241 bfa_os_gettimeofday(&tv); ··· 3289 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3290 } 3291 3292 + /* 3293 * Handle trunk SCN event from firmware. 3294 */ 3295 static void ··· 3310 bfa_trc(fcport->bfa, scn->trunk_state); 3311 bfa_trc(fcport->bfa, scn->trunk_speed); 3312 3313 + /* 3314 * Save off new state for trunk attribute query 3315 */ 3316 state_prev = trunk->attr.state; ··· 3358 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down"); 3359 } 3360 3361 + /* 3362 * Notify upper layers if trunk state changed. 3363 */ 3364 if ((state_prev != trunk->attr.state) || ··· 3374 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3375 int i = 0; 3376 3377 + /* 3378 * In trunked mode, notify upper layers that link is down 3379 */ 3380 if (fcport->cfg.trunked) { ··· 3398 3399 3400 3401 + /* 3402 * hal_port_public 3403 */ 3404 3405 + /* 3406 * Called to initialize port attributes 3407 */ 3408 void ··· 3410 { 3411 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3412 3413 + /* 3414 * Initialize port attributes from IOC hardware data. 3415 */ 3416 bfa_fcport_set_wwns(fcport); ··· 3424 bfa_assert(fcport->speed_sup); 3425 } 3426 3427 + /* 3428 * Firmware message handler. 3429 */ 3430 void ··· 3505 3506 3507 3508 + /* 3509 * hal_port_api 3510 */ 3511 3512 + /* 3513 * Registered callback for port events. 3514 */ 3515 void ··· 3550 return BFA_STATUS_OK; 3551 } 3552 3553 + /* 3554 * Configure port speed. 3555 */ 3556 bfa_status_t ··· 3572 return BFA_STATUS_OK; 3573 } 3574 3575 + /* 3576 * Get current speed. 3577 */ 3578 enum bfa_port_speed ··· 3583 return fcport->speed; 3584 } 3585 3586 + /* 3587 * Configure port topology. 3588 */ 3589 bfa_status_t ··· 3608 return BFA_STATUS_OK; 3609 } 3610 3611 + /* 3612 * Get current topology. 3613 */ 3614 enum bfa_port_topology ··· 3708 bfa_fcport_send_txcredit(fcport); 3709 } 3710 3711 + /* 3712 * Get port attributes. 3713 */ 3714 ··· 3768 3769 #define BFA_FCPORT_STATS_TOV 1000 3770 3771 + /* 3772 * Fetch port statistics (FCQoS or FCoE). 3773 */ 3774 bfa_status_t ··· 3794 return BFA_STATUS_OK; 3795 } 3796 3797 + /* 3798 * Reset port statistics (FCQoS or FCoE). 3799 */ 3800 bfa_status_t ··· 3818 return BFA_STATUS_OK; 3819 } 3820 3821 + /* 3822 * Fetch FCQoS port statistics 3823 */ 3824 bfa_status_t ··· 3831 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); 3832 } 3833 3834 + /* 3835 * Reset FCoE port statistics 3836 */ 3837 bfa_status_t ··· 3843 return bfa_fcport_clear_stats(bfa, cbfn, cbarg); 3844 } 3845 3846 + /* 3847 * Fetch FCQoS port statistics 3848 */ 3849 bfa_status_t ··· 3856 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); 3857 } 3858 3859 + /* 3860 * Reset FCoE port statistics 3861 */ 3862 bfa_status_t ··· 3902 } 3903 } 3904 3905 + /* 3906 * Fetch port attributes. 3907 */ 3908 bfa_boolean_t ··· 3937 3938 if (ioc_type == BFA_IOC_TYPE_FC) { 3939 fcport->cfg.qos_enabled = on_off; 3940 + /* 3941 * Notify fcpim of the change in QoS state 3942 */ 3943 bfa_fcpim_update_ioredirect(bfa); ··· 3957 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; 3958 } 3959 3960 + /* 3961 * Configure default minimum ratelim speed 3962 */ 3963 bfa_status_t ··· 3978 return BFA_STATUS_OK; 3979 } 3980 3981 + /* 3982 * Get default minimum ratelim speed 3983 */ 3984 enum bfa_port_speed ··· 4093 } 4094 4095 4096 + /* 4097 * Rport State machine functions 4098 */ 4099 + /* 4100 * Beginning state, only online event expected. 4101 */ 4102 static void ··· 4149 } 4150 } 4151 4152 + /* 4153 * Waiting for rport create response from firmware. 4154 */ 4155 static void ··· 4186 } 4187 } 4188 4189 + /* 4190 * Request queue is full, awaiting queue resume to send create request. 4191 */ 4192 static void ··· 4227 } 4228 } 4229 4230 + /* 4231 * Online state - normal parking state. 4232 */ 4233 static void ··· 4295 } 4296 } 4297 4298 + /* 4299 * Firmware rport is being deleted - awaiting f/w response. 4300 */ 4301 static void ··· 4358 } 4359 } 4360 4361 + /* 4362 * Offline state. 4363 */ 4364 static void ··· 4393 } 4394 } 4395 4396 + /* 4397 * Rport is deleted, waiting for firmware response to delete. 4398 */ 4399 static void ··· 4445 } 4446 } 4447 4448 + /* 4449 * Waiting for rport create response from firmware. A delete is pending. 4450 */ 4451 static void ··· 4476 } 4477 } 4478 4479 + /* 4480 * Waiting for rport create response from firmware. Rport offline is pending. 4481 */ 4482 static void ··· 4511 } 4512 } 4513 4514 + /* 4515 * IOC h/w failed. 4516 */ 4517 static void ··· 4551 4552 4553 4554 + /* 4555 * bfa_rport_private BFA rport private functions 4556 */ 4557 ··· 4615 rp->rport_tag = i; 4616 bfa_sm_set_state(rp, bfa_rport_sm_uninit); 4617 4618 + /* 4619 * - is unused 4620 */ 4621 if (i) ··· 4624 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp); 4625 } 4626 4627 + /* 4628 * consume memory 4629 */ 4630 bfa_meminfo_kva(meminfo) = (u8 *) rp; ··· 4685 { 4686 struct bfi_rport_create_req_s *m; 4687 4688 + /* 4689 * check for room in queue to send request now 4690 */ 4691 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); ··· 4706 m->vf_id = rp->rport_info.vf_id; 4707 m->cisc = rp->rport_info.cisc; 4708 4709 + /* 4710 * queue I/O message to firmware 4711 */ 4712 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); ··· 4718 { 4719 struct bfi_rport_delete_req_s *m; 4720 4721 + /* 4722 * check for room in queue to send request now 4723 */ 4724 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); ··· 4731 bfa_lpuid(rp->bfa)); 4732 m->fw_handle = rp->fw_handle; 4733 4734 + /* 4735 * queue I/O message to firmware 4736 */ 4737 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); ··· 4743 { 4744 struct bfa_rport_speed_req_s *m; 4745 4746 + /* 4747 * check for room in queue to send request now 4748 */ 4749 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); ··· 4757 m->fw_handle = rp->fw_handle; 4758 m->speed = (u8)rp->rport_info.speed; 4759 4760 + /* 4761 * queue I/O message to firmware 4762 */ 4763 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); ··· 4766 4767 4768 4769 + /* 4770 * bfa_rport_public 4771 */ 4772 4773 + /* 4774 * Rport interrupt processing. 4775 */ 4776 void ··· 4812 4813 4814 4815 + /* 4816 * bfa_rport_api 4817 */ 4818 ··· 4847 { 4848 bfa_assert(rport_info->max_frmsz != 0); 4849 4850 + /* 4851 * Some JBODs are seen to be not setting PDU size correctly in PLOGI 4852 * responses. Default to minimum size. 4853 */ ··· 4899 } 4900 4901 4902 + /* 4903 * SGPG related functions 4904 */ 4905 4906 + /* 4907 * Compute and return memory needed by FCP(im) module. 4908 */ 4909 static void ··· 4995 4996 4997 4998 + /* 4999 * hal_sgpg_public BFA SGPG public functions 5000 */ 5001 ··· 5037 if (list_empty(&mod->sgpg_wait_q)) 5038 return; 5039 5040 + /* 5041 * satisfy as many waiting requests as possible 5042 */ 5043 do { ··· 5065 5066 wqe->nsgpg_total = wqe->nsgpg = nsgpg; 5067 5068 + /* 5069 * allocate any left to this one first 5070 */ 5071 if (mod->free_sgpgs) { 5072 + /* 5073 * no one else is waiting for SGPG 5074 */ 5075 bfa_assert(list_empty(&mod->sgpg_wait_q)); ··· 5103 wqe->cbarg = cbarg; 5104 } 5105 5106 + /* 5107 * UF related functions 5108 */ 5109 /* ··· 5171 bfa_sge_to_be(&sge[1]); 5172 } 5173 5174 + /* 5175 * advance pointer beyond consumed memory 5176 */ 5177 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg; ··· 5201 list_add_tail(&uf->qe, &ufm->uf_free_q); 5202 } 5203 5204 + /* 5205 * advance memory pointer 5206 */ 5207 bfa_meminfo_kva(mi) = (u8 *) uf; ··· 5363 5364 5365 5366 + /* 5367 * hal_uf_api 5368 */ 5369 5370 + /* 5371 * Register handler for all unsolicted recieve frames. 5372 * 5373 * @param[in] bfa BFA instance ··· 5383 ufm->cbarg = cbarg; 5384 } 5385 5386 + /* 5387 * Free an unsolicited frame back to BFA. 5388 * 5389 * @param[in] uf unsolicited frame to be freed ··· 5399 5400 5401 5402 + /* 5403 * uf_pub BFA uf module public functions 5404 */ 5405 void
+16 -16
drivers/scsi/bfa/bfad.c
··· 15 * General Public License for more details. 16 */ 17 18 - /** 19 * bfad.c Linux driver PCI interface module. 20 */ 21 #include <linux/module.h> ··· 151 static void 152 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); 153 154 - /** 155 * Beginning state for the driver instance, awaiting the pci_probe event 156 */ 157 static void ··· 181 } 182 } 183 184 - /** 185 * Driver Instance is created, awaiting event INIT to initialize the bfad 186 */ 187 static void ··· 364 } 365 } 366 367 - /** 368 * BFA callbacks 369 */ 370 void ··· 376 complete(&fcomp->comp); 377 } 378 379 - /** 380 * bfa_init callback 381 */ 382 void ··· 401 complete(&bfad->comp); 402 } 403 404 - /** 405 * BFA_FCS callbacks 406 */ 407 struct bfad_port_s * ··· 457 } 458 } 459 460 - /** 461 * FCS RPORT alloc callback, after successful PLOGI by FCS 462 */ 463 bfa_status_t ··· 478 return rc; 479 } 480 481 - /** 482 * FCS PBC VPORT Create 483 */ 484 void ··· 663 return rc; 664 } 665 666 - /** 667 * Create a vport under a vf. 668 */ 669 bfa_status_t ··· 1140 return 0; 1141 } 1142 1143 - /** 1144 * BFA driver interrupt functions 1145 */ 1146 irqreturn_t ··· 1199 return IRQ_HANDLED; 1200 } 1201 1202 - /** 1203 * Initialize the MSIX entry table. 1204 */ 1205 static void ··· 1252 return 0; 1253 } 1254 1255 - /** 1256 * Setup MSIX based interrupt. 1257 */ 1258 int ··· 1333 } 1334 } 1335 1336 - /** 1337 * PCI probe entry. 1338 */ 1339 int ··· 1419 return error; 1420 } 1421 1422 - /** 1423 * PCI remove entry. 1424 */ 1425 void ··· 1500 .remove = __devexit_p(bfad_pci_remove), 1501 }; 1502 1503 - /** 1504 * Driver module init. 1505 */ 1506 static int __init ··· 1540 return error; 1541 } 1542 1543 - /** 1544 * Driver module exit. 1545 */ 1546 static void __exit
··· 15 * General Public License for more details. 16 */ 17 18 + /* 19 * bfad.c Linux driver PCI interface module. 20 */ 21 #include <linux/module.h> ··· 151 static void 152 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); 153 154 + /* 155 * Beginning state for the driver instance, awaiting the pci_probe event 156 */ 157 static void ··· 181 } 182 } 183 184 + /* 185 * Driver Instance is created, awaiting event INIT to initialize the bfad 186 */ 187 static void ··· 364 } 365 } 366 367 + /* 368 * BFA callbacks 369 */ 370 void ··· 376 complete(&fcomp->comp); 377 } 378 379 + /* 380 * bfa_init callback 381 */ 382 void ··· 401 complete(&bfad->comp); 402 } 403 404 + /* 405 * BFA_FCS callbacks 406 */ 407 struct bfad_port_s * ··· 457 } 458 } 459 460 + /* 461 * FCS RPORT alloc callback, after successful PLOGI by FCS 462 */ 463 bfa_status_t ··· 478 return rc; 479 } 480 481 + /* 482 * FCS PBC VPORT Create 483 */ 484 void ··· 663 return rc; 664 } 665 666 + /* 667 * Create a vport under a vf. 668 */ 669 bfa_status_t ··· 1140 return 0; 1141 } 1142 1143 + /* 1144 * BFA driver interrupt functions 1145 */ 1146 irqreturn_t ··· 1199 return IRQ_HANDLED; 1200 } 1201 1202 + /* 1203 * Initialize the MSIX entry table. 1204 */ 1205 static void ··· 1252 return 0; 1253 } 1254 1255 + /* 1256 * Setup MSIX based interrupt. 1257 */ 1258 int ··· 1333 } 1334 } 1335 1336 + /* 1337 * PCI probe entry. 1338 */ 1339 int ··· 1419 return error; 1420 } 1421 1422 + /* 1423 * PCI remove entry. 1424 */ 1425 void ··· 1500 .remove = __devexit_p(bfad_pci_remove), 1501 }; 1502 1503 + /* 1504 * Driver module init. 1505 */ 1506 static int __init ··· 1540 return error; 1541 } 1542 1543 + /* 1544 * Driver module exit. 1545 */ 1546 static void __exit
+15 -15
drivers/scsi/bfa/bfad_attr.c
··· 15 * General Public License for more details. 16 */ 17 18 - /** 19 * bfa_attr.c Linux driver configuration interface module. 20 */ 21 22 #include "bfad_drv.h" 23 #include "bfad_im.h" 24 25 - /** 26 * FC transport template entry, get SCSI target port ID. 27 */ 28 void ··· 48 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 49 } 50 51 - /** 52 * FC transport template entry, get SCSI target nwwn. 53 */ 54 void ··· 74 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 75 } 76 77 - /** 78 * FC transport template entry, get SCSI target pwwn. 79 */ 80 void ··· 100 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 101 } 102 103 - /** 104 * FC transport template entry, get SCSI host port ID. 105 */ 106 void ··· 114 bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); 115 } 116 117 - /** 118 * FC transport template entry, get SCSI host port type. 119 */ 120 static void ··· 146 } 147 } 148 149 - /** 150 * FC transport template entry, get SCSI host port state. 151 */ 152 static void ··· 183 } 184 } 185 186 - /** 187 * FC transport template entry, get SCSI host active fc4s. 188 */ 189 static void ··· 202 fc_host_active_fc4s(shost)[7] = 1; 203 } 204 205 - /** 206 * FC transport template entry, get SCSI host link speed. 207 */ 208 static void ··· 236 } 237 } 238 239 - /** 240 * FC transport template entry, get SCSI host port type. 241 */ 242 static void ··· 253 254 } 255 256 - /** 257 * FC transport template entry, get BFAD statistics. 258 */ 259 static struct fc_host_statistics * ··· 304 return hstats; 305 } 306 307 - /** 308 * FC transport template entry, reset BFAD statistics. 309 */ 310 static void ··· 331 return; 332 } 333 334 - /** 335 * FC transport template entry, get rport loss timeout. 336 */ 337 static void ··· 347 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 348 } 349 350 - /** 351 * FC transport template entry, set rport loss timeout. 352 */ 353 static void ··· 633 .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, 634 }; 635 636 - /** 637 * Scsi_Host_attrs SCSI host attributes 638 */ 639 static ssize_t
··· 15 * General Public License for more details. 16 */ 17 18 + /* 19 * bfa_attr.c Linux driver configuration interface module. 20 */ 21 22 #include "bfad_drv.h" 23 #include "bfad_im.h" 24 25 + /* 26 * FC transport template entry, get SCSI target port ID. 27 */ 28 void ··· 48 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 49 } 50 51 + /* 52 * FC transport template entry, get SCSI target nwwn. 53 */ 54 void ··· 74 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 75 } 76 77 + /* 78 * FC transport template entry, get SCSI target pwwn. 79 */ 80 void ··· 100 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 101 } 102 103 + /* 104 * FC transport template entry, get SCSI host port ID. 105 */ 106 void ··· 114 bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); 115 } 116 117 + /* 118 * FC transport template entry, get SCSI host port type. 119 */ 120 static void ··· 146 } 147 } 148 149 + /* 150 * FC transport template entry, get SCSI host port state. 151 */ 152 static void ··· 183 } 184 } 185 186 + /* 187 * FC transport template entry, get SCSI host active fc4s. 188 */ 189 static void ··· 202 fc_host_active_fc4s(shost)[7] = 1; 203 } 204 205 + /* 206 * FC transport template entry, get SCSI host link speed. 207 */ 208 static void ··· 236 } 237 } 238 239 + /* 240 * FC transport template entry, get SCSI host port type. 241 */ 242 static void ··· 253 254 } 255 256 + /* 257 * FC transport template entry, get BFAD statistics. 258 */ 259 static struct fc_host_statistics * ··· 304 return hstats; 305 } 306 307 + /* 308 * FC transport template entry, reset BFAD statistics. 309 */ 310 static void ··· 331 return; 332 } 333 334 + /* 335 * FC transport template entry, get rport loss timeout. 336 */ 337 static void ··· 347 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 348 } 349 350 + /* 351 * FC transport template entry, set rport loss timeout. 352 */ 353 static void ··· 633 .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, 634 }; 635 636 + /* 637 * Scsi_Host_attrs SCSI host attributes 638 */ 639 static ssize_t
+19 -19
drivers/scsi/bfa/bfad_im.c
··· 15 * General Public License for more details. 16 */ 17 18 - /** 19 * bfad_im.c Linux driver IM module. 20 */ 21 ··· 164 wake_up(wq); 165 } 166 167 - /** 168 * Scsi_Host_template SCSI host template 169 */ 170 - /** 171 * Scsi_Host template entry, returns BFAD PCI info. 172 */ 173 static const char * ··· 196 return bfa_buf; 197 } 198 199 - /** 200 * Scsi_Host template entry, aborts the specified SCSI command. 201 * 202 * Returns: SUCCESS or FAILED. ··· 280 return rc; 281 } 282 283 - /** 284 * Scsi_Host template entry, resets a LUN and abort its all commands. 285 * 286 * Returns: SUCCESS or FAILED. ··· 319 goto out; 320 } 321 322 - /** 323 * Set host_scribble to NULL to avoid aborting a task command 324 * if happens. 325 */ ··· 346 return rc; 347 } 348 349 - /** 350 * Scsi_Host template entry, resets the bus and abort all commands. 351 */ 352 static int ··· 396 return SUCCESS; 397 } 398 399 - /** 400 * Scsi_Host template entry slave_destroy. 401 */ 402 static void ··· 406 return; 407 } 408 409 - /** 410 * BFA FCS itnim callbacks 411 */ 412 413 - /** 414 * BFA FCS itnim alloc callback, after successful PRLI 415 * Context: Interrupt 416 */ ··· 433 bfad->bfad_flags |= BFAD_RPORT_ONLINE; 434 } 435 436 - /** 437 * BFA FCS itnim free callback. 438 * Context: Interrupt. bfad_lock is held 439 */ ··· 471 queue_work(im->drv_workq, &itnim_drv->itnim_work); 472 } 473 474 - /** 475 * BFA FCS itnim online callback. 476 * Context: Interrupt. bfad_lock is held 477 */ ··· 492 queue_work(im->drv_workq, &itnim_drv->itnim_work); 493 } 494 495 - /** 496 * BFA FCS itnim offline callback. 497 * Context: Interrupt. bfad_lock is held 498 */ ··· 519 queue_work(im->drv_workq, &itnim_drv->itnim_work); 520 } 521 522 - /** 523 * Allocate a Scsi_Host for a port. 524 */ 525 int ··· 751 return BFA_STATUS_OK; 752 } 753 754 - /** 755 * Scsi_Host template entry. 756 * 757 * Description: ··· 896 return NULL; 897 } 898 899 - /** 900 * Scsi_Host template entry slave_alloc 901 */ 902 static int ··· 973 sprintf(fc_host_symbolic_name(host), "%s", symname); 974 975 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); 976 - fc_host_maxframe_size(host) = fcport->cfg.maxfrsize; 977 } 978 979 static void ··· 1016 return; 1017 } 1018 1019 - /** 1020 * Work queue handler using FC transport service 1021 * Context: kernel 1022 */ ··· 1116 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1117 } 1118 1119 - /** 1120 * Scsi_Host template entry, queue a SCSI command to the BFAD. 1121 */ 1122 static int
··· 15 * General Public License for more details. 16 */ 17 18 + /* 19 * bfad_im.c Linux driver IM module. 20 */ 21 ··· 164 wake_up(wq); 165 } 166 167 + /* 168 * Scsi_Host_template SCSI host template 169 */ 170 + /* 171 * Scsi_Host template entry, returns BFAD PCI info. 172 */ 173 static const char * ··· 196 return bfa_buf; 197 } 198 199 + /* 200 * Scsi_Host template entry, aborts the specified SCSI command. 201 * 202 * Returns: SUCCESS or FAILED. ··· 280 return rc; 281 } 282 283 + /* 284 * Scsi_Host template entry, resets a LUN and abort its all commands. 285 * 286 * Returns: SUCCESS or FAILED. ··· 319 goto out; 320 } 321 322 + /* 323 * Set host_scribble to NULL to avoid aborting a task command 324 * if happens. 325 */ ··· 346 return rc; 347 } 348 349 + /* 350 * Scsi_Host template entry, resets the bus and abort all commands. 351 */ 352 static int ··· 396 return SUCCESS; 397 } 398 399 + /* 400 * Scsi_Host template entry slave_destroy. 401 */ 402 static void ··· 406 return; 407 } 408 409 + /* 410 * BFA FCS itnim callbacks 411 */ 412 413 + /* 414 * BFA FCS itnim alloc callback, after successful PRLI 415 * Context: Interrupt 416 */ ··· 433 bfad->bfad_flags |= BFAD_RPORT_ONLINE; 434 } 435 436 + /* 437 * BFA FCS itnim free callback. 438 * Context: Interrupt. bfad_lock is held 439 */ ··· 471 queue_work(im->drv_workq, &itnim_drv->itnim_work); 472 } 473 474 + /* 475 * BFA FCS itnim online callback. 476 * Context: Interrupt. bfad_lock is held 477 */ ··· 492 queue_work(im->drv_workq, &itnim_drv->itnim_work); 493 } 494 495 + /* 496 * BFA FCS itnim offline callback. 497 * Context: Interrupt. bfad_lock is held 498 */ ··· 519 queue_work(im->drv_workq, &itnim_drv->itnim_work); 520 } 521 522 + /* 523 * Allocate a Scsi_Host for a port. 524 */ 525 int ··· 751 return BFA_STATUS_OK; 752 } 753 754 + /* 755 * Scsi_Host template entry. 756 * 757 * Description: ··· 896 return NULL; 897 } 898 899 + /* 900 * Scsi_Host template entry slave_alloc 901 */ 902 static int ··· 973 sprintf(fc_host_symbolic_name(host), "%s", symname); 974 975 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); 976 + fc_host_maxframe_size(host) = fcport->cfg.maxfrsize; 977 } 978 979 static void ··· 1016 return; 1017 } 1018 1019 + /* 1020 * Work queue handler using FC transport service 1021 * Context: kernel 1022 */ ··· 1116 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1117 } 1118 1119 + /* 1120 * Scsi_Host template entry, queue a SCSI command to the BFAD. 1121 */ 1122 static int