Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SCSI] bfa: Add diagnostic port (D-Port) support

- Introduced support for D-Port which is a new port mode during which
link level diagnostics can be run.
- Provided mechanism to dynamically configure D-Port and initiate diagnostic
tests to isolate any link level issues.
- In D-Port mode, the HBA port does not participate in fabric or login to the
remote device or run data traffic.
- Diagnostic tests include running various loopback tests in conjunction with
the attached device.

Signed-off-by: Krishna Gudipati <kgudipat@brocade.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>

authored by

Krishna Gudipati and committed by
James Bottomley
e353546e 1306e31d

+658 -3
+16
drivers/scsi/bfa/bfa_defs.h
··· 165 165 BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */ 166 166 BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */ 167 167 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */ 168 + BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */ 168 169 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot 169 170 * configuration */ 170 171 BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */ ··· 190 189 BFA_STATUS_TOPOLOGY_LOOP = 230, /* Topology is set to Loop */ 191 190 BFA_STATUS_LOOP_UNSUPP_MEZZ = 231, /* Loop topology is not supported 192 191 * on mezz cards */ 192 + BFA_STATUS_DPORT_ENABLED = 235, /* D-port mode is already enabled */ 193 + BFA_STATUS_DPORT_DISABLED = 236, /* D-port mode is already disabled */ 194 + BFA_STATUS_CMD_NOTSUPP_MEZZ = 239, /* Cmd not supported for MEZZ card */ 195 + BFA_STATUS_DPORT_ERR = 245, /* D-port mode is enabled */ 193 196 BFA_STATUS_MAX_VAL /* Unknown error code */ 194 197 }; 195 198 #define bfa_status_t enum bfa_status ··· 510 505 wwn_t pwwn; 511 506 u16 ioc_type; 512 507 mac_t mac; 508 + }; 509 + 510 + /* 511 + * D-port states 512 + * 513 + */ 514 + enum bfa_dport_state { 515 + BFA_DPORT_ST_DISABLED = 0, /* D-port is Disabled */ 516 + BFA_DPORT_ST_DISABLING = 1, /* D-port is Disabling */ 517 + BFA_DPORT_ST_ENABLING = 2, /* D-port is Enabling */ 518 + BFA_DPORT_ST_ENABLED = 3, /* D-port is Enabled */ 513 519 }; 514 520 515 521 /*
+1
drivers/scsi/bfa/bfa_defs_svc.h
··· 722 722 BFA_PORT_ST_PREBOOT_DISABLED = 13, 723 723 BFA_PORT_ST_TOGGLING_QWAIT = 14, 724 724 BFA_PORT_ST_ACQ_ADDR = 15, 725 + BFA_PORT_ST_DPORT = 16, 725 726 BFA_PORT_ST_MAX_STATE, 726 727 }; 727 728
+32
drivers/scsi/bfa/bfa_port.c
··· 250 250 return BFA_STATUS_IOC_FAILURE; 251 251 } 252 252 253 + /* if port is d-port enabled, return error */ 254 + if (port->dport_enabled) { 255 + bfa_trc(port, BFA_STATUS_DPORT_ERR); 256 + return BFA_STATUS_DPORT_ERR; 257 + } 258 + 253 259 if (port->endis_pending) { 254 260 bfa_trc(port, BFA_STATUS_DEVBUSY); 255 261 return BFA_STATUS_DEVBUSY; ··· 304 298 if (!bfa_ioc_is_operational(port->ioc)) { 305 299 bfa_trc(port, BFA_STATUS_IOC_FAILURE); 306 300 return BFA_STATUS_IOC_FAILURE; 301 + } 302 + 303 + /* if port is d-port enabled, return error */ 304 + if (port->dport_enabled) { 305 + bfa_trc(port, BFA_STATUS_DPORT_ERR); 306 + return BFA_STATUS_DPORT_ERR; 307 307 } 308 308 309 309 if (port->endis_pending) { ··· 443 431 port->endis_cbfn = NULL; 444 432 port->endis_pending = BFA_FALSE; 445 433 } 434 + 435 + /* clear D-port mode */ 436 + if (port->dport_enabled) 437 + bfa_port_set_dportenabled(port, BFA_FALSE); 446 438 break; 447 439 default: 448 440 break; ··· 483 467 port->stats_cbfn = NULL; 484 468 port->endis_cbfn = NULL; 485 469 port->pbc_disabled = BFA_FALSE; 470 + port->dport_enabled = BFA_FALSE; 486 471 487 472 bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port); 488 473 bfa_q_qe_init(&port->ioc_notify); ··· 497 480 port->stats_reset_time = tv.tv_sec; 498 481 499 482 bfa_trc(port, 0); 483 + } 484 + 485 + /* 486 + * bfa_port_set_dportenabled(); 487 + * 488 + * Port module- set pbc disabled flag 489 + * 490 + * @param[in] port - Pointer to the Port module data structure 491 + * 492 + * @return void 493 + */ 494 + void 495 + bfa_port_set_dportenabled(struct bfa_port_s *port, bfa_boolean_t enabled) 496 + { 497 + port->dport_enabled = enabled; 500 498 } 501 499 502 500 /*
+3
drivers/scsi/bfa/bfa_port.h
··· 45 45 bfa_status_t endis_status; 46 46 struct bfa_ioc_notify_s ioc_notify; 47 47 bfa_boolean_t pbc_disabled; 48 + bfa_boolean_t dport_enabled; 48 49 struct bfa_mem_dma_s port_dma; 49 50 }; 50 51 ··· 67 66 u32 bfa_port_meminfo(void); 68 67 void bfa_port_mem_claim(struct bfa_port_s *port, 69 68 u8 *dma_kva, u64 dma_pa); 69 + void bfa_port_set_dportenabled(struct bfa_port_s *port, 70 + bfa_boolean_t enabled); 70 71 71 72 /* 72 73 * CEE declaration
+502 -3
drivers/scsi/bfa/bfa_svc.c
··· 67 67 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */ 68 68 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */ 69 69 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ 70 + BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */ 71 + BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */ 70 72 }; 71 73 72 74 /* ··· 199 197 enum bfa_fcport_sm_event event); 200 198 static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, 201 199 enum bfa_fcport_sm_event event); 200 + static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, 201 + enum bfa_fcport_sm_event event); 202 202 203 203 static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, 204 204 enum bfa_fcport_ln_sm_event event); ··· 230 226 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED}, 231 227 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN}, 232 228 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN}, 229 + {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT}, 233 230 }; 234 231 235 232 ··· 2611 2606 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); 2612 2607 break; 2613 2608 2609 + case BFA_FCPORT_SM_DPORTENABLE: 2610 + bfa_sm_set_state(fcport, bfa_fcport_sm_dport); 2611 + break; 2612 + 2614 2613 default: 2615 2614 bfa_sm_fault(fcport->bfa, event); 2616 2615 } ··· 2692 2683 * Ignore all events. 2693 2684 */ 2694 2685 ; 2686 + } 2687 + } 2688 + 2689 + static void 2690 + bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) 2691 + { 2692 + bfa_trc(fcport->bfa, event); 2693 + 2694 + switch (event) { 2695 + case BFA_FCPORT_SM_DPORTENABLE: 2696 + case BFA_FCPORT_SM_DISABLE: 2697 + case BFA_FCPORT_SM_ENABLE: 2698 + case BFA_FCPORT_SM_START: 2699 + /* 2700 + * Ignore event for a port that is dport 2701 + */ 2702 + break; 2703 + 2704 + case BFA_FCPORT_SM_STOP: 2705 + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 2706 + break; 2707 + 2708 + case BFA_FCPORT_SM_HWFAIL: 2709 + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); 2710 + break; 2711 + 2712 + case BFA_FCPORT_SM_DPORTDISABLE: 2713 + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); 2714 + break; 2715 + 2716 + default: 2717 + bfa_sm_fault(fcport->bfa, event); 2695 2718 } 2696 2719 } 2697 2720 ··· 3748 3707 return BFA_STATUS_UNSUPP_SPEED; 3749 3708 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) 3750 3709 return BFA_STATUS_LOOP_UNSUPP_MEZZ; 3710 + if (bfa_fcport_is_dport(bfa) != BFA_FALSE) 3711 + return BFA_STATUS_DPORT_ERR; 3751 3712 break; 3752 3713 3753 3714 case BFA_PORT_TOPOLOGY_AUTO: ··· 4006 3963 } 4007 3964 4008 3965 bfa_boolean_t 3966 + bfa_fcport_is_dport(struct bfa_s *bfa) 3967 + { 3968 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3969 + 3970 + return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) == 3971 + BFA_PORT_ST_DPORT); 3972 + } 3973 + 3974 + bfa_boolean_t 4009 3975 bfa_fcport_is_ratelim(struct bfa_s *bfa) 4010 3976 { 4011 3977 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); ··· 4089 4037 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 4090 4038 4091 4039 return fcport->cfg.trunked; 4040 + } 4041 + 4042 + void 4043 + bfa_fcport_dportenable(struct bfa_s *bfa) 4044 + { 4045 + /* 4046 + * Assume caller check for port is in disable state 4047 + */ 4048 + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE); 4049 + bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE); 4050 + } 4051 + 4052 + void 4053 + bfa_fcport_dportdisable(struct bfa_s *bfa) 4054 + { 4055 + /* 4056 + * Assume caller check for port is in disable state 4057 + */ 4058 + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE); 4059 + bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE); 4092 4060 } 4093 4061 4094 4062 /* ··· 5493 5421 } 5494 5422 5495 5423 /* 5424 + * Dport forward declaration 5425 + */ 5426 + 5427 + /* 5428 + * BFA DPORT state machine events 5429 + */ 5430 + enum bfa_dport_sm_event { 5431 + BFA_DPORT_SM_ENABLE = 1, /* dport enable event */ 5432 + BFA_DPORT_SM_DISABLE = 2, /* dport disable event */ 5433 + BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */ 5434 + BFA_DPORT_SM_QRESUME = 4, /* CQ space available */ 5435 + BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */ 5436 + }; 5437 + 5438 + static void bfa_dport_sm_disabled(struct bfa_dport_s *dport, 5439 + enum bfa_dport_sm_event event); 5440 + static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport, 5441 + enum bfa_dport_sm_event event); 5442 + static void bfa_dport_sm_enabling(struct bfa_dport_s *dport, 5443 + enum bfa_dport_sm_event event); 5444 + static void bfa_dport_sm_enabled(struct bfa_dport_s *dport, 5445 + enum bfa_dport_sm_event event); 5446 + static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport, 5447 + enum bfa_dport_sm_event event); 5448 + static void bfa_dport_sm_disabling(struct bfa_dport_s *dport, 5449 + enum bfa_dport_sm_event event); 5450 + static void bfa_dport_qresume(void *cbarg); 5451 + static void bfa_dport_req_comp(struct bfa_dport_s *dport, 5452 + bfi_diag_dport_rsp_t *msg); 5453 + 5454 + /* 5496 5455 * BFA fcdiag module 5497 5456 */ 5498 5457 #define BFA_DIAG_QTEST_TOV 1000 /* msec */ ··· 5553 5450 struct bfa_pcidev_s *pcidev) 5554 5451 { 5555 5452 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); 5453 + struct bfa_dport_s *dport = &fcdiag->dport; 5454 + 5556 5455 fcdiag->bfa = bfa; 5557 5456 fcdiag->trcmod = bfa->trcmod; 5558 5457 /* The common DIAG attach bfa_diag_attach() will do all memory claim */ 5458 + dport->bfa = bfa; 5459 + bfa_sm_set_state(dport, bfa_dport_sm_disabled); 5460 + bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport); 5461 + dport->cbfn = NULL; 5462 + dport->cbarg = NULL; 5559 5463 } 5560 5464 5561 5465 static void 5562 5466 bfa_fcdiag_iocdisable(struct bfa_s *bfa) 5563 5467 { 5564 5468 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); 5469 + struct bfa_dport_s *dport = &fcdiag->dport; 5470 + 5565 5471 bfa_trc(fcdiag, fcdiag->lb.lock); 5566 5472 if (fcdiag->lb.lock) { 5567 5473 fcdiag->lb.status = BFA_STATUS_IOC_FAILURE; ··· 5578 5466 fcdiag->lb.lock = 0; 5579 5467 bfa_fcdiag_set_busy_status(fcdiag); 5580 5468 } 5469 + 5470 + bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL); 5581 5471 } 5582 5472 5583 5473 static void ··· 5764 5650 case BFI_DIAG_I2H_QTEST: 5765 5651 bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg); 5766 5652 break; 5653 + case BFI_DIAG_I2H_DPORT: 5654 + bfa_dport_req_comp(&fcdiag->dport, (bfi_diag_dport_rsp_t *)msg); 5655 + break; 5767 5656 default: 5768 5657 bfa_trc(fcdiag, msg->mhdr.msg_id); 5769 5658 WARN_ON(1); ··· 5836 5719 } 5837 5720 } 5838 5721 5722 + /* 5723 + * For CT2, 1G is not supported 5724 + */ 5725 + if ((speed == BFA_PORT_SPEED_1GBPS) && 5726 + (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) { 5727 + bfa_trc(fcdiag, speed); 5728 + return BFA_STATUS_UNSUPP_SPEED; 5729 + } 5730 + 5839 5731 /* For Mezz card, port speed entered needs to be checked */ 5840 5732 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) { 5841 5733 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) { 5842 - if ((speed == BFA_PORT_SPEED_1GBPS) && 5843 - (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) 5844 - return BFA_STATUS_UNSUPP_SPEED; 5845 5734 if (!(speed == BFA_PORT_SPEED_1GBPS || 5846 5735 speed == BFA_PORT_SPEED_2GBPS || 5847 5736 speed == BFA_PORT_SPEED_4GBPS || ··· 5959 5836 { 5960 5837 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); 5961 5838 return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK; 5839 + } 5840 + 5841 + /* 5842 + * D-port 5843 + */ 5844 + static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport, 5845 + enum bfi_dport_req req); 5846 + static void 5847 + bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status) 5848 + { 5849 + if (dport->cbfn != NULL) { 5850 + dport->cbfn(dport->cbarg, bfa_status); 5851 + dport->cbfn = NULL; 5852 + dport->cbarg = NULL; 5853 + } 5854 + } 5855 + 5856 + static void 5857 + bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) 5858 + { 5859 + bfa_trc(dport->bfa, event); 5860 + 5861 + switch (event) { 5862 + case BFA_DPORT_SM_ENABLE: 5863 + bfa_fcport_dportenable(dport->bfa); 5864 + if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE)) 5865 + bfa_sm_set_state(dport, bfa_dport_sm_enabling); 5866 + else 5867 + bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait); 5868 + break; 5869 + 5870 + case BFA_DPORT_SM_DISABLE: 5871 + /* Already disabled */ 5872 + break; 5873 + 5874 + case BFA_DPORT_SM_HWFAIL: 5875 + /* ignore */ 5876 + break; 5877 + 5878 + default: 5879 + bfa_sm_fault(dport->bfa, event); 5880 + } 5881 + } 5882 + 5883 + static void 5884 + bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport, 5885 + enum bfa_dport_sm_event event) 5886 + { 5887 + bfa_trc(dport->bfa, event); 5888 + 5889 + switch (event) { 5890 + case BFA_DPORT_SM_QRESUME: 5891 + bfa_sm_set_state(dport, bfa_dport_sm_enabling); 5892 + bfa_dport_send_req(dport, BFI_DPORT_ENABLE); 5893 + break; 5894 + 5895 + case BFA_DPORT_SM_HWFAIL: 5896 + bfa_reqq_wcancel(&dport->reqq_wait); 5897 + bfa_sm_set_state(dport, bfa_dport_sm_disabled); 5898 + bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); 5899 + break; 5900 + 5901 + default: 5902 + bfa_sm_fault(dport->bfa, event); 5903 + } 5904 + } 5905 + 5906 + static void 5907 + bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) 5908 + { 5909 + bfa_trc(dport->bfa, event); 5910 + 5911 + switch (event) { 5912 + case BFA_DPORT_SM_FWRSP: 5913 + bfa_sm_set_state(dport, bfa_dport_sm_enabled); 5914 + break; 5915 + 5916 + case BFA_DPORT_SM_HWFAIL: 5917 + bfa_sm_set_state(dport, bfa_dport_sm_disabled); 5918 + bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); 5919 + break; 5920 + 5921 + default: 5922 + bfa_sm_fault(dport->bfa, event); 5923 + } 5924 + } 5925 + 5926 + static void 5927 + bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) 5928 + { 5929 + bfa_trc(dport->bfa, event); 5930 + 5931 + switch (event) { 5932 + case BFA_DPORT_SM_ENABLE: 5933 + /* Already enabled */ 5934 + break; 5935 + 5936 + case BFA_DPORT_SM_DISABLE: 5937 + bfa_fcport_dportdisable(dport->bfa); 5938 + if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE)) 5939 + bfa_sm_set_state(dport, bfa_dport_sm_disabling); 5940 + else 5941 + bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait); 5942 + break; 5943 + 5944 + case BFA_DPORT_SM_HWFAIL: 5945 + bfa_sm_set_state(dport, bfa_dport_sm_disabled); 5946 + break; 5947 + 5948 + default: 5949 + bfa_sm_fault(dport->bfa, event); 5950 + } 5951 + } 5952 + 5953 + static void 5954 + bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport, 5955 + enum bfa_dport_sm_event event) 5956 + { 5957 + bfa_trc(dport->bfa, event); 5958 + 5959 + switch (event) { 5960 + case BFA_DPORT_SM_QRESUME: 5961 + bfa_sm_set_state(dport, bfa_dport_sm_disabling); 5962 + bfa_dport_send_req(dport, BFI_DPORT_DISABLE); 5963 + break; 5964 + 5965 + case BFA_DPORT_SM_HWFAIL: 5966 + bfa_sm_set_state(dport, bfa_dport_sm_disabled); 5967 + bfa_reqq_wcancel(&dport->reqq_wait); 5968 + bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); 5969 + break; 5970 + 5971 + default: 5972 + bfa_sm_fault(dport->bfa, event); 5973 + } 5974 + } 5975 + 5976 + static void 5977 + bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) 5978 + { 5979 + bfa_trc(dport->bfa, event); 5980 + 5981 + switch (event) { 5982 + case BFA_DPORT_SM_FWRSP: 5983 + bfa_sm_set_state(dport, bfa_dport_sm_disabled); 5984 + break; 5985 + 5986 + case BFA_DPORT_SM_HWFAIL: 5987 + bfa_sm_set_state(dport, bfa_dport_sm_disabled); 5988 + bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); 5989 + break; 5990 + 5991 + default: 5992 + bfa_sm_fault(dport->bfa, event); 5993 + } 5994 + } 5995 + 5996 + 5997 + static bfa_boolean_t 5998 + bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req) 5999 + { 6000 + struct bfi_diag_dport_req_s *m; 6001 + 6002 + /* 6003 + * Increment message tag before queue check, so that responses to old 6004 + * requests are discarded. 6005 + */ 6006 + dport->msgtag++; 6007 + 6008 + /* 6009 + * check for room in queue to send request now 6010 + */ 6011 + m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG); 6012 + if (!m) { 6013 + bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait); 6014 + return BFA_FALSE; 6015 + } 6016 + 6017 + bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT, 6018 + bfa_fn_lpu(dport->bfa)); 6019 + m->req = req; 6020 + m->msgtag = dport->msgtag; 6021 + 6022 + /* 6023 + * queue I/O message to firmware 6024 + */ 6025 + bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh); 6026 + 6027 + return BFA_TRUE; 6028 + } 6029 + 6030 + static void 6031 + bfa_dport_qresume(void *cbarg) 6032 + { 6033 + struct bfa_dport_s *dport = cbarg; 6034 + 6035 + bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME); 6036 + } 6037 + 6038 + static void 6039 + bfa_dport_req_comp(struct bfa_dport_s *dport, bfi_diag_dport_rsp_t *msg) 6040 + { 6041 + bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP); 6042 + bfa_cb_fcdiag_dport(dport, msg->status); 6043 + } 6044 + 6045 + /* 6046 + * Dport enable 6047 + * 6048 + * @param[in] *bfa - bfa data struct 6049 + */ 6050 + bfa_status_t 6051 + bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) 6052 + { 6053 + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); 6054 + struct bfa_dport_s *dport = &fcdiag->dport; 6055 + 6056 + /* 6057 + * Dport is not support in MEZZ card 6058 + */ 6059 + if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) { 6060 + bfa_trc(dport->bfa, BFA_STATUS_PBC); 6061 + return BFA_STATUS_CMD_NOTSUPP_MEZZ; 6062 + } 6063 + 6064 + /* 6065 + * Check to see if IOC is down 6066 + */ 6067 + if (!bfa_iocfc_is_operational(bfa)) 6068 + return BFA_STATUS_IOC_NON_OP; 6069 + 6070 + /* if port is PBC disabled, return error */ 6071 + if (bfa_fcport_is_pbcdisabled(bfa)) { 6072 + bfa_trc(dport->bfa, BFA_STATUS_PBC); 6073 + return BFA_STATUS_PBC; 6074 + } 6075 + 6076 + /* 6077 + * Check if port mode is FC port 6078 + */ 6079 + if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) { 6080 + bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc)); 6081 + return BFA_STATUS_CMD_NOTSUPP_CNA; 6082 + } 6083 + 6084 + /* 6085 + * Check if port is in LOOP mode 6086 + */ 6087 + if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) || 6088 + (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) { 6089 + bfa_trc(dport->bfa, 0); 6090 + return BFA_STATUS_TOPOLOGY_LOOP; 6091 + } 6092 + 6093 + /* 6094 + * Check if port is TRUNK mode 6095 + */ 6096 + if (bfa_fcport_is_trunk_enabled(bfa)) { 6097 + bfa_trc(dport->bfa, 0); 6098 + return BFA_STATUS_ERROR_TRUNK_ENABLED; 6099 + } 6100 + 6101 + /* 6102 + * Check to see if port is disable or in dport state 6103 + */ 6104 + if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) && 6105 + (bfa_fcport_is_dport(bfa) == BFA_FALSE)) { 6106 + bfa_trc(dport->bfa, 0); 6107 + return BFA_STATUS_PORT_NOT_DISABLED; 6108 + } 6109 + 6110 + /* 6111 + * Check if dport is busy 6112 + */ 6113 + if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || 6114 + bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) || 6115 + bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || 6116 + bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) { 6117 + return BFA_STATUS_DEVBUSY; 6118 + } 6119 + 6120 + /* 6121 + * Check if dport is already enabled 6122 + */ 6123 + if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) { 6124 + bfa_trc(dport->bfa, 0); 6125 + return BFA_STATUS_DPORT_ENABLED; 6126 + } 6127 + 6128 + dport->cbfn = cbfn; 6129 + dport->cbarg = cbarg; 6130 + 6131 + bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE); 6132 + return BFA_STATUS_OK; 6133 + } 6134 + 6135 + /* 6136 + * Dport disable 6137 + * 6138 + * @param[in] *bfa - bfa data struct 6139 + */ 6140 + bfa_status_t 6141 + bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) 6142 + { 6143 + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); 6144 + struct bfa_dport_s *dport = &fcdiag->dport; 6145 + 6146 + if (bfa_ioc_is_disabled(&bfa->ioc)) 6147 + return BFA_STATUS_IOC_DISABLED; 6148 + 6149 + /* if port is PBC disabled, return error */ 6150 + if (bfa_fcport_is_pbcdisabled(bfa)) { 6151 + bfa_trc(dport->bfa, BFA_STATUS_PBC); 6152 + return BFA_STATUS_PBC; 6153 + } 6154 + 6155 + /* 6156 + * Check to see if port is disable or in dport state 6157 + */ 6158 + if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) && 6159 + (bfa_fcport_is_dport(bfa) == BFA_FALSE)) { 6160 + bfa_trc(dport->bfa, 0); 6161 + return BFA_STATUS_PORT_NOT_DISABLED; 6162 + } 6163 + 6164 + /* 6165 + * Check if dport is busy 6166 + */ 6167 + if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || 6168 + bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) || 6169 + bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || 6170 + bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) 6171 + return BFA_STATUS_DEVBUSY; 6172 + 6173 + /* 6174 + * Check if dport is already disabled 6175 + */ 6176 + if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) { 6177 + bfa_trc(dport->bfa, 0); 6178 + return BFA_STATUS_DPORT_DISABLED; 6179 + } 6180 + 6181 + dport->cbfn = cbfn; 6182 + dport->cbarg = cbarg; 6183 + 6184 + bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE); 6185 + return BFA_STATUS_OK; 6186 + } 6187 + 6188 + /* 6189 + * Get D-port state 6190 + * 6191 + * @param[in] *bfa - bfa data struct 6192 + */ 6193 + 6194 + bfa_status_t 6195 + bfa_dport_get_state(struct bfa_s *bfa, enum bfa_dport_state *state) 6196 + { 6197 + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); 6198 + struct bfa_dport_s *dport = &fcdiag->dport; 6199 + 6200 + if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) 6201 + *state = BFA_DPORT_ST_ENABLED; 6202 + else if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || 6203 + bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait)) 6204 + *state = BFA_DPORT_ST_ENABLING; 6205 + else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) 6206 + *state = BFA_DPORT_ST_DISABLED; 6207 + else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || 6208 + bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) 6209 + *state = BFA_DPORT_ST_DISABLING; 6210 + else { 6211 + bfa_trc(dport->bfa, BFA_STATUS_EINVAL); 6212 + return BFA_STATUS_EINVAL; 6213 + } 6214 + return BFA_STATUS_OK; 5962 6215 }
+19
drivers/scsi/bfa/bfa_svc.h
··· 550 550 void (*event_cbfn) (void *cbarg, 551 551 enum bfa_port_linkstate event), void *event_cbarg); 552 552 bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); 553 + bfa_boolean_t bfa_fcport_is_dport(struct bfa_s *bfa); 553 554 enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); 554 555 555 556 void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn); ··· 564 563 struct bfa_cb_pending_q_s *cb); 565 564 bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); 566 565 bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); 566 + void bfa_fcport_dportenable(struct bfa_s *bfa); 567 + void bfa_fcport_dportdisable(struct bfa_s *bfa); 567 568 bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); 568 569 void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state); 569 570 ··· 706 703 u32 status; 707 704 }; 708 705 706 + struct bfa_dport_s { 707 + struct bfa_s *bfa; /* Back pointer to BFA */ 708 + bfa_sm_t sm; /* finite state machine */ 709 + u32 msgtag; /* firmware msg tag for reply */ 710 + struct bfa_reqq_wait_s reqq_wait; 711 + bfa_cb_diag_t cbfn; 712 + void *cbarg; 713 + }; 714 + 709 715 struct bfa_fcdiag_s { 710 716 struct bfa_s *bfa; /* Back pointer to BFA */ 711 717 struct bfa_trc_mod_s *trcmod; 712 718 struct bfa_fcdiag_lb_s lb; 713 719 struct bfa_fcdiag_qtest_s qtest; 720 + struct bfa_dport_s dport; 714 721 }; 715 722 716 723 #define BFA_FCDIAG_MOD(__bfa) (&(__bfa)->modules.fcdiag) ··· 736 723 u32 queue, struct bfa_diag_qtest_result_s *result, 737 724 bfa_cb_diag_t cbfn, void *cbarg); 738 725 bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa); 726 + bfa_status_t bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, 727 + void *cbarg); 728 + bfa_status_t bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, 729 + void *cbarg); 730 + bfa_status_t bfa_dport_get_state(struct bfa_s *bfa, 731 + enum bfa_dport_state *state); 739 732 740 733 #endif /* __BFA_SVC_H__ */
+56
drivers/scsi/bfa/bfad_bsg.c
··· 1747 1747 } 1748 1748 1749 1749 int 1750 + bfad_iocmd_diag_cfg_dport(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 1751 + { 1752 + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 1753 + unsigned long flags; 1754 + struct bfad_hal_comp fcomp; 1755 + 1756 + init_completion(&fcomp.comp); 1757 + spin_lock_irqsave(&bfad->bfad_lock, flags); 1758 + if (cmd == IOCMD_DIAG_DPORT_ENABLE) 1759 + iocmd->status = bfa_dport_enable(&bfad->bfa, 1760 + bfad_hcb_comp, &fcomp); 1761 + else if (cmd == IOCMD_DIAG_DPORT_DISABLE) 1762 + iocmd->status = bfa_dport_disable(&bfad->bfa, 1763 + bfad_hcb_comp, &fcomp); 1764 + else { 1765 + bfa_trc(bfad, 0); 1766 + spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1767 + return -EINVAL; 1768 + } 1769 + spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1770 + 1771 + if (iocmd->status != BFA_STATUS_OK) 1772 + bfa_trc(bfad, iocmd->status); 1773 + else { 1774 + wait_for_completion(&fcomp.comp); 1775 + iocmd->status = fcomp.status; 1776 + } 1777 + 1778 + return 0; 1779 + } 1780 + 1781 + int 1782 + bfad_iocmd_diag_dport_get_state(struct bfad_s *bfad, void *pcmd) 1783 + { 1784 + struct bfa_bsg_diag_dport_get_state_s *iocmd = 1785 + (struct bfa_bsg_diag_dport_get_state_s *)pcmd; 1786 + unsigned long flags; 1787 + 1788 + spin_lock_irqsave(&bfad->bfad_lock, flags); 1789 + iocmd->status = bfa_dport_get_state(&bfad->bfa, &iocmd->state); 1790 + spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1791 + 1792 + return 0; 1793 + } 1794 + 1795 + int 1750 1796 bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) 1751 1797 { 1752 1798 struct bfa_bsg_phy_attr_s *iocmd = ··· 2217 2171 unsigned long flags; 2218 2172 2219 2173 spin_lock_irqsave(&bfad->bfad_lock, flags); 2174 + 2175 + if (bfa_fcport_is_dport(&bfad->bfa)) 2176 + return BFA_STATUS_DPORT_ERR; 2220 2177 2221 2178 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || 2222 2179 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) ··· 2750 2701 break; 2751 2702 case IOCMD_DIAG_LB_STAT: 2752 2703 rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); 2704 + break; 2705 + case IOCMD_DIAG_DPORT_ENABLE: 2706 + case IOCMD_DIAG_DPORT_DISABLE: 2707 + rc = bfad_iocmd_diag_cfg_dport(bfad, cmd, iocmd); 2708 + break; 2709 + case IOCMD_DIAG_DPORT_GET_STATE: 2710 + rc = bfad_iocmd_diag_dport_get_state(bfad, iocmd); 2753 2711 break; 2754 2712 case IOCMD_PHY_GET_ATTR: 2755 2713 rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
+10
drivers/scsi/bfa/bfad_bsg.h
··· 141 141 IOCMD_FCPIM_LUNMASK_QUERY, 142 142 IOCMD_FCPIM_LUNMASK_ADD, 143 143 IOCMD_FCPIM_LUNMASK_DELETE, 144 + IOCMD_DIAG_DPORT_ENABLE, 145 + IOCMD_DIAG_DPORT_DISABLE, 146 + IOCMD_DIAG_DPORT_GET_STATE, 144 147 }; 145 148 146 149 struct bfa_bsg_gen_s { ··· 614 611 bfa_status_t status; 615 612 u16 bfad_num; 616 613 u16 rsvd; 614 + }; 615 + 616 + struct bfa_bsg_diag_dport_get_state_s { 617 + bfa_status_t status; 618 + u16 bfad_num; 619 + u16 rsvd; 620 + enum bfa_dport_state state; 617 621 }; 618 622 619 623 struct bfa_bsg_phy_attr_s {
+19
drivers/scsi/bfa/bfi.h
··· 960 960 BFI_DIAG_H2I_TEMPSENSOR = 4, 961 961 BFI_DIAG_H2I_LEDTEST = 5, 962 962 BFI_DIAG_H2I_QTEST = 6, 963 + BFI_DIAG_H2I_DPORT = 7, 963 964 }; 964 965 965 966 enum bfi_diag_i2h { ··· 970 969 BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR), 971 970 BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST), 972 971 BFI_DIAG_I2H_QTEST = BFA_I2HM(BFI_DIAG_H2I_QTEST), 972 + BFI_DIAG_I2H_DPORT = BFA_I2HM(BFI_DIAG_H2I_DPORT), 973 973 }; 974 974 975 975 #define BFI_DIAG_MAX_SGES 2 ··· 1055 1053 u32 data[BFI_LMSG_PL_WSZ]; /* fill up tcm prefetch area */ 1056 1054 }; 1057 1055 #define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s 1056 + 1057 + /* 1058 + * D-port test 1059 + */ 1060 + enum bfi_dport_req { 1061 + BFI_DPORT_DISABLE = 0, /* disable dport request */ 1062 + BFI_DPORT_ENABLE = 1, /* enable dport request */ 1063 + }; 1064 + 1065 + struct bfi_diag_dport_req_s { 1066 + struct bfi_mhdr_s mh; /* 4 bytes */ 1067 + u8 req; /* request 1: enable 0: disable */ 1068 + u8 status; /* reply status */ 1069 + u8 rsvd[2]; 1070 + u32 msgtag; /* msgtag for reply */ 1071 + }; 1072 + #define bfi_diag_dport_rsp_t struct bfi_diag_dport_req_s 1058 1073 1059 1074 /* 1060 1075 * PHY module specific