Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: lpfc: Change lpfc_hba hba_flag member into a bitmask

In attempt to reduce the amount of unnecessary phba->hbalock acquisitions
in the lpfc driver, change hba_flag into an unsigned long bitmask and use
clear_bit/test_bit bitwise atomic APIs instead of reliance on phba->hbalock
for synchronization.

Signed-off-by: Justin Tee <justin.tee@broadcom.com>
Link: https://lore.kernel.org/r/20240429221547.6842-6-justintee8345@gmail.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Justin Tee and committed by
Martin K. Petersen
e780c942 5f800d72

+281 -333
+32 -29
drivers/scsi/lpfc/lpfc.h
··· 393 393 LPFC_HBA_ERROR = -1 394 394 }; 395 395 396 + enum lpfc_hba_flag { /* hba generic flags */ 397 + HBA_ERATT_HANDLED = 0, /* This flag is set when eratt handled */ 398 + DEFER_ERATT = 1, /* Deferred error attn in progress */ 399 + HBA_FCOE_MODE = 2, /* HBA function in FCoE Mode */ 400 + HBA_SP_QUEUE_EVT = 3, /* Slow-path qevt posted to worker thread*/ 401 + HBA_POST_RECEIVE_BUFFER = 4, /* Rcv buffers need to be posted */ 402 + HBA_PERSISTENT_TOPO = 5, /* Persistent topology support in hba */ 403 + ELS_XRI_ABORT_EVENT = 6, /* ELS_XRI abort event was queued */ 404 + ASYNC_EVENT = 7, 405 + LINK_DISABLED = 8, /* Link disabled by user */ 406 + FCF_TS_INPROG = 9, /* FCF table scan in progress */ 407 + FCF_RR_INPROG = 10, /* FCF roundrobin flogi in progress */ 408 + HBA_FIP_SUPPORT = 11, /* FIP support in HBA */ 409 + HBA_DEVLOSS_TMO = 13, /* HBA in devloss timeout */ 410 + HBA_RRQ_ACTIVE = 14, /* process the rrq active list */ 411 + HBA_IOQ_FLUSH = 15, /* I/O queues being flushed */ 412 + HBA_RECOVERABLE_UE = 17, /* FW supports recoverable UE */ 413 + HBA_FORCED_LINK_SPEED = 18, /* 414 + * Firmware supports Forced Link 415 + * Speed capability 416 + */ 417 + HBA_FLOGI_ISSUED = 20, /* FLOGI was issued */ 418 + HBA_DEFER_FLOGI = 23, /* Defer FLOGI till read_sparm cmpl */ 419 + HBA_SETUP = 24, /* HBA setup completed */ 420 + HBA_NEEDS_CFG_PORT = 25, /* SLI3: CONFIG_PORT mbox needed */ 421 + HBA_HBEAT_INP = 26, /* mbox HBEAT is in progress */ 422 + HBA_HBEAT_TMO = 27, /* HBEAT initiated after timeout */ 423 + HBA_FLOGI_OUTSTANDING = 28, /* FLOGI is outstanding */ 424 + HBA_RHBA_CMPL = 29, /* RHBA FDMI cmd is successful */ 425 + }; 426 + 396 427 struct lpfc_trunk_link_state { 397 428 enum hba_state state; 398 429 uint8_t fault; ··· 1038 1007 #define LS_CT_VEN_RPA 0x20 /* Vendor RPA sent to switch */ 1039 1008 #define LS_EXTERNAL_LOOPBACK 0x40 /* External loopback plug inserted */ 1040 1009 1041 - uint32_t hba_flag; /* hba generic flags */ 1042 - #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 1043 - #define DEFER_ERATT 0x2 /* Deferred error attention in progress */ 1044 - #define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */ 1045 - #define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ 1046 - #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ 1047 - #define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */ 1048 - #define ELS_XRI_ABORT_EVENT 0x40 /* ELS_XRI abort event was queued */ 1049 - #define ASYNC_EVENT 0x80 1050 - #define LINK_DISABLED 0x100 /* Link disabled by user */ 1051 - #define FCF_TS_INPROG 0x200 /* FCF table scan in progress */ 1052 - #define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */ 1053 - #define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */ 1054 - #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ 1055 - #define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ 1056 - #define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */ 1057 - #define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ 1058 - #define HBA_FORCED_LINK_SPEED 0x40000 /* 1059 - * Firmware supports Forced Link Speed 1060 - * capability 1061 - */ 1062 - #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ 1063 - #define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */ 1064 - #define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */ 1065 - #define HBA_NEEDS_CFG_PORT 0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */ 1066 - #define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */ 1067 - #define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */ 1068 - #define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */ 1069 - #define HBA_RHBA_CMPL 0x20000000 /* RHBA FDMI command is successful */ 1010 + unsigned long hba_flag; /* hba generic flags */ 1070 1011 1071 1012 struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */ 1072 1013 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
+17 -14
drivers/scsi/lpfc/lpfc_attr.c
··· 322 322 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 323 323 struct lpfc_hba *phba = vport->phba; 324 324 325 - if (phba->hba_flag & HBA_FIP_SUPPORT) 325 + if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) 326 326 return scnprintf(buf, PAGE_SIZE, "1\n"); 327 327 else 328 328 return scnprintf(buf, PAGE_SIZE, "0\n"); ··· 1049 1049 case LPFC_INIT_MBX_CMDS: 1050 1050 case LPFC_LINK_DOWN: 1051 1051 case LPFC_HBA_ERROR: 1052 - if (phba->hba_flag & LINK_DISABLED) 1052 + if (test_bit(LINK_DISABLED, &phba->hba_flag)) 1053 1053 len += scnprintf(buf + len, PAGE_SIZE-len, 1054 1054 "Link Down - User disabled\n"); 1055 1055 else ··· 1292 1292 * it doesn't make any sense to allow issue_lip 1293 1293 */ 1294 1294 if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) || 1295 - (phba->hba_flag & LINK_DISABLED) || 1295 + test_bit(LINK_DISABLED, &phba->hba_flag) || 1296 1296 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) 1297 1297 return -EPERM; 1298 1298 ··· 3635 3635 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3636 3636 3637 3637 return scnprintf(buf, PAGE_SIZE, "%d\n", 3638 - (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0); 3638 + test_bit(HBA_PERSISTENT_TOPO, 3639 + &phba->hba_flag) ? 1 : 0); 3639 3640 } 3640 3641 static DEVICE_ATTR(pt, 0444, 3641 3642 lpfc_pt_show, NULL); ··· 4206 4205 &phba->sli4_hba.sli_intf); 4207 4206 if_type = bf_get(lpfc_sli_intf_if_type, 4208 4207 &phba->sli4_hba.sli_intf); 4209 - if ((phba->hba_flag & HBA_PERSISTENT_TOPO || 4210 - (!phba->sli4_hba.pc_sli4_params.pls && 4208 + if ((test_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag) || 4209 + (!phba->sli4_hba.pc_sli4_params.pls && 4211 4210 (sli_family == LPFC_SLI_INTF_FAMILY_G6 || 4212 4211 if_type == LPFC_SLI_INTF_IF_TYPE_6))) && 4213 4212 val == 4) { ··· 4310 4309 4311 4310 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 4312 4311 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 && 4313 - phba->hba_flag & HBA_FORCED_LINK_SPEED) 4312 + test_bit(HBA_FORCED_LINK_SPEED, &phba->hba_flag)) 4314 4313 return -EPERM; 4315 4314 4316 4315 if (!strncmp(buf, "nolip ", strlen("nolip "))) { ··· 6498 6497 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6499 6498 struct lpfc_hba *phba = vport->phba; 6500 6499 6501 - if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) { 6500 + if ((lpfc_is_link_up(phba)) && 6501 + !test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 6502 6502 switch(phba->fc_linkspeed) { 6503 6503 case LPFC_LINK_SPEED_1GHZ: 6504 6504 fc_host_speed(shost) = FC_PORTSPEED_1GBIT; ··· 6535 6533 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 6536 6534 break; 6537 6535 } 6538 - } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) { 6536 + } else if (lpfc_is_link_up(phba) && 6537 + test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 6539 6538 switch (phba->fc_linkspeed) { 6540 6539 case LPFC_ASYNC_LINK_SPEED_1GBPS: 6541 6540 fc_host_speed(shost) = FC_PORTSPEED_1GBIT; ··· 6721 6718 hs->invalid_crc_count -= lso->invalid_crc_count; 6722 6719 hs->error_frames -= lso->error_frames; 6723 6720 6724 - if (phba->hba_flag & HBA_FCOE_MODE) { 6721 + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 6725 6722 hs->lip_count = -1; 6726 6723 hs->nos_count = (phba->link_events >> 1); 6727 6724 hs->nos_count -= lso->link_events; ··· 6819 6816 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; 6820 6817 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 6821 6818 lso->error_frames = pmb->un.varRdLnk.crcCnt; 6822 - if (phba->hba_flag & HBA_FCOE_MODE) 6819 + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) 6823 6820 lso->link_events = (phba->link_events >> 1); 6824 6821 else 6825 6822 lso->link_events = (phba->fc_eventTag >> 1); ··· 7164 7161 case PCI_DEVICE_ID_ZEPHYR_DCSP: 7165 7162 case PCI_DEVICE_ID_TIGERSHARK: 7166 7163 case PCI_DEVICE_ID_TOMCAT: 7167 - phba->hba_flag |= HBA_FCOE_MODE; 7164 + set_bit(HBA_FCOE_MODE, &phba->hba_flag); 7168 7165 break; 7169 7166 default: 7170 7167 /* for others, clear the flag */ 7171 - phba->hba_flag &= ~HBA_FCOE_MODE; 7168 + clear_bit(HBA_FCOE_MODE, &phba->hba_flag); 7172 7169 } 7173 7170 } 7174 7171 ··· 7239 7236 lpfc_get_hba_function_mode(phba); 7240 7237 7241 7238 /* BlockGuard allowed for FC only. */ 7242 - if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) { 7239 + if (phba->cfg_enable_bg && test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 7243 7240 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7244 7241 "0581 BlockGuard feature not supported\n"); 7245 7242 /* If set, clear the BlockGuard support param */
+2 -1
drivers/scsi/lpfc/lpfc_bsg.c
··· 5002 5002 goto job_error; 5003 5003 } 5004 5004 5005 - forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED) 5005 + forced_reply->supported = test_bit(HBA_FORCED_LINK_SPEED, 5006 + &phba->hba_flag) 5006 5007 ? LPFC_FORCED_LINK_SPEED_SUPPORTED 5007 5008 : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED; 5008 5009 job_error:
+4 -4
drivers/scsi/lpfc/lpfc_ct.c
··· 2173 2173 struct lpfc_nodelist *ndlp; 2174 2174 int i; 2175 2175 2176 - phba->hba_flag |= HBA_RHBA_CMPL; 2176 + set_bit(HBA_RHBA_CMPL, &phba->hba_flag); 2177 2177 vports = lpfc_create_vport_work_array(phba); 2178 2178 if (vports) { 2179 2179 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { ··· 2368 2368 * for the physical port completes successfully. 2369 2369 * We may have to defer the RPRT accordingly. 2370 2370 */ 2371 - if (phba->hba_flag & HBA_RHBA_CMPL) { 2371 + if (test_bit(HBA_RHBA_CMPL, &phba->hba_flag)) { 2372 2372 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0); 2373 2373 } else { 2374 2374 lpfc_printf_vlog(vport, KERN_INFO, ··· 2785 2785 u32 tcfg; 2786 2786 u8 i, cnt; 2787 2787 2788 - if (!(phba->hba_flag & HBA_FCOE_MODE)) { 2788 + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 2789 2789 cnt = 0; 2790 2790 if (phba->sli_rev == LPFC_SLI_REV4) { 2791 2791 tcfg = phba->sli4_hba.conf_trunk; ··· 2859 2859 struct lpfc_hba *phba = vport->phba; 2860 2860 u32 speeds = 0; 2861 2861 2862 - if (!(phba->hba_flag & HBA_FCOE_MODE)) { 2862 + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 2863 2863 switch (phba->fc_linkspeed) { 2864 2864 case LPFC_LINK_SPEED_1GHZ: 2865 2865 speeds = HBA_PORTSPEED_1GFC;
+24 -19
drivers/scsi/lpfc/lpfc_els.c
··· 189 189 * If this command is for fabric controller and HBA running 190 190 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 191 191 */ 192 - if ((did == Fabric_DID) && 193 - (phba->hba_flag & HBA_FIP_SUPPORT) && 194 - ((elscmd == ELS_CMD_FLOGI) || 195 - (elscmd == ELS_CMD_FDISC) || 196 - (elscmd == ELS_CMD_LOGO))) 192 + if (did == Fabric_DID && 193 + test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) && 194 + (elscmd == ELS_CMD_FLOGI || 195 + elscmd == ELS_CMD_FDISC || 196 + elscmd == ELS_CMD_LOGO)) 197 197 switch (elscmd) { 198 198 case ELS_CMD_FLOGI: 199 199 elsiocb->cmd_flag |= ··· 965 965 * In case of FIP mode, perform roundrobin FCF failover 966 966 * due to new FCF discovery 967 967 */ 968 - if ((phba->hba_flag & HBA_FIP_SUPPORT) && 968 + if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) && 969 969 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 970 970 if (phba->link_state < LPFC_LINK_UP) 971 971 goto stop_rr_fcf_flogi; ··· 999 999 IOERR_LOOP_OPEN_FAILURE))) 1000 1000 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1001 1001 "2858 FLOGI failure Status:x%x/x%x TMO" 1002 - ":x%x Data x%x x%x\n", 1002 + ":x%x Data x%lx x%x\n", 1003 1003 ulp_status, ulp_word4, tmo, 1004 1004 phba->hba_flag, phba->fcf.fcf_flag); 1005 1005 ··· 1119 1119 if (sp->cmn.fPort) 1120 1120 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, 1121 1121 ulp_word4); 1122 - else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1122 + else if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) 1123 1123 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1124 1124 else { 1125 1125 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, ··· 1149 1149 lpfc_nlp_put(ndlp); 1150 1150 spin_lock_irq(&phba->hbalock); 1151 1151 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1152 - phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1153 1152 spin_unlock_irq(&phba->hbalock); 1153 + clear_bit(FCF_RR_INPROG, &phba->hba_flag); 1154 + clear_bit(HBA_DEVLOSS_TMO, &phba->hba_flag); 1154 1155 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1155 1156 goto out; 1156 1157 } 1157 1158 if (!rc) { 1158 1159 /* Mark the FCF discovery process done */ 1159 - if (phba->hba_flag & HBA_FIP_SUPPORT) 1160 + if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) 1160 1161 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1161 1162 LOG_ELS, 1162 1163 "2769 FLOGI to FCF (x%x) " ··· 1165 1164 phba->fcf.current_rec.fcf_indx); 1166 1165 spin_lock_irq(&phba->hbalock); 1167 1166 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1168 - phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1169 1167 spin_unlock_irq(&phba->hbalock); 1168 + clear_bit(FCF_RR_INPROG, &phba->hba_flag); 1169 + clear_bit(HBA_DEVLOSS_TMO, &phba->hba_flag); 1170 1170 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1171 1171 goto out; 1172 1172 } ··· 1204 1202 } 1205 1203 out: 1206 1204 if (!flogi_in_retry) 1207 - phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1205 + clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); 1208 1206 1209 1207 lpfc_els_free_iocb(phba, cmdiocb); 1210 1208 lpfc_nlp_put(ndlp); ··· 1374 1372 } 1375 1373 1376 1374 /* Avoid race with FLOGI completion and hba_flags. */ 1377 - phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1375 + set_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); 1376 + set_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); 1378 1377 1379 1378 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1380 1379 if (rc == IOCB_ERROR) { 1381 - phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1380 + clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); 1381 + clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); 1382 1382 lpfc_els_free_iocb(phba, elsiocb); 1383 1383 lpfc_nlp_put(ndlp); 1384 1384 return 1; ··· 1417 1413 1418 1414 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1419 1415 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1420 - " ox_id: x%x, hba_flag x%x\n", 1416 + " ox_id: x%x, hba_flag x%lx\n", 1421 1417 phba->defer_flogi_acc_rx_id, 1422 1418 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1423 1419 ··· 7419 7415 goto error; 7420 7416 } 7421 7417 7422 - if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 7418 + if (phba->sli_rev < LPFC_SLI_REV4 || 7419 + test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 7423 7420 rjt_err = LSRJT_UNABLE_TPC; 7424 7421 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7425 7422 goto error; ··· 7743 7738 } 7744 7739 7745 7740 if (phba->sli_rev < LPFC_SLI_REV4 || 7746 - phba->hba_flag & HBA_FCOE_MODE || 7741 + test_bit(HBA_FCOE_MODE, &phba->hba_flag) || 7747 7742 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7748 7743 LPFC_SLI_INTF_IF_TYPE_2)) { 7749 7744 rjt_err = LSRJT_CMD_UNSUPPORTED; ··· 8448 8443 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 8449 8444 8450 8445 /* Defer ACC response until AFTER we issue a FLOGI */ 8451 - if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 8446 + if (!test_bit(HBA_FLOGI_ISSUED, &phba->hba_flag)) { 8452 8447 phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, 8453 8448 &wqe->xmit_els_rsp.wqe_com); 8454 8449 phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, ··· 8458 8453 8459 8454 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8460 8455 "3344 Deferring FLOGI ACC: rx_id: x%x," 8461 - " ox_id: x%x, hba_flag x%x\n", 8456 + " ox_id: x%x, hba_flag x%lx\n", 8462 8457 phba->defer_flogi_acc_rx_id, 8463 8458 phba->defer_flogi_acc_ox_id, phba->hba_flag); 8464 8459
+63 -74
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 487 487 recovering = true; 488 488 } else { 489 489 /* Physical port path. */ 490 - if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) 490 + if (test_bit(HBA_FLOGI_OUTSTANDING, 491 + &phba->hba_flag)) 491 492 recovering = true; 492 493 } 493 494 break; ··· 653 652 if (!fcf_inuse) 654 653 return; 655 654 656 - if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { 655 + if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) && 656 + !lpfc_fcf_inuse(phba)) { 657 657 spin_lock_irq(&phba->hbalock); 658 658 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 659 - if (phba->hba_flag & HBA_DEVLOSS_TMO) { 659 + if (test_and_set_bit(HBA_DEVLOSS_TMO, 660 + &phba->hba_flag)) { 660 661 spin_unlock_irq(&phba->hbalock); 661 662 return; 662 663 } 663 - phba->hba_flag |= HBA_DEVLOSS_TMO; 664 664 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 665 665 "2847 Last remote node (x%x) using " 666 666 "FCF devloss tmo\n", nlp_did); ··· 673 671 "in progress\n"); 674 672 return; 675 673 } 676 - if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { 677 - spin_unlock_irq(&phba->hbalock); 674 + spin_unlock_irq(&phba->hbalock); 675 + if (!test_bit(FCF_TS_INPROG, &phba->hba_flag) && 676 + !test_bit(FCF_RR_INPROG, &phba->hba_flag)) { 678 677 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 679 678 "2869 Devloss tmo to idle FIP engine, " 680 679 "unreg in-use FCF and rescan.\n"); ··· 683 680 lpfc_unregister_fcf_rescan(phba); 684 681 return; 685 682 } 686 - spin_unlock_irq(&phba->hbalock); 687 - if (phba->hba_flag & FCF_TS_INPROG) 683 + if (test_bit(FCF_TS_INPROG, &phba->hba_flag)) 688 684 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 689 685 "2870 FCF table scan in progress\n"); 690 - if (phba->hba_flag & FCF_RR_INPROG) 686 + if (test_bit(FCF_RR_INPROG, &phba->hba_flag)) 691 687 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 692 688 "2871 FLOGI roundrobin FCF failover " 693 689 "in progress\n"); ··· 980 978 981 979 /* Process SLI4 events */ 982 980 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { 983 - if (phba->hba_flag & HBA_RRQ_ACTIVE) 981 + if (test_bit(HBA_RRQ_ACTIVE, &phba->hba_flag)) 984 982 lpfc_handle_rrq_active(phba); 985 - if (phba->hba_flag & ELS_XRI_ABORT_EVENT) 983 + if (test_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag)) 986 984 lpfc_sli4_els_xri_abort_event_proc(phba); 987 - if (phba->hba_flag & ASYNC_EVENT) 985 + if (test_bit(ASYNC_EVENT, &phba->hba_flag)) 988 986 lpfc_sli4_async_event_proc(phba); 989 - if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { 990 - spin_lock_irq(&phba->hbalock); 991 - phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; 992 - spin_unlock_irq(&phba->hbalock); 987 + if (test_and_clear_bit(HBA_POST_RECEIVE_BUFFER, 988 + &phba->hba_flag)) 993 989 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 994 - } 995 990 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) 996 991 lpfc_sli4_fcf_redisc_event_proc(phba); 997 992 } ··· 1034 1035 status >>= (4*LPFC_ELS_RING); 1035 1036 if (pring && (status & HA_RXMASK || 1036 1037 pring->flag & LPFC_DEFERRED_RING_EVENT || 1037 - phba->hba_flag & HBA_SP_QUEUE_EVT)) { 1038 + test_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag))) { 1038 1039 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 1039 1040 pring->flag |= LPFC_DEFERRED_RING_EVENT; 1040 1041 /* Preserve legacy behavior. */ 1041 - if (!(phba->hba_flag & HBA_SP_QUEUE_EVT)) 1042 + if (!test_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag)) 1042 1043 set_bit(LPFC_DATA_READY, &phba->data_flags); 1043 1044 } else { 1044 1045 /* Driver could have abort request completed in queue ··· 1419 1420 spin_unlock_irq(shost->host_lock); 1420 1421 1421 1422 /* reinitialize initial HBA flag */ 1422 - phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL); 1423 + clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); 1424 + clear_bit(HBA_RHBA_CMPL, &phba->hba_flag); 1423 1425 1424 1426 return 0; 1425 1427 } ··· 1505 1505 1506 1506 /* don't perform discovery for SLI4 loopback diagnostic test */ 1507 1507 if ((phba->sli_rev == LPFC_SLI_REV4) && 1508 - !(phba->hba_flag & HBA_FCOE_MODE) && 1508 + !test_bit(HBA_FCOE_MODE, &phba->hba_flag) && 1509 1509 (phba->link_flag & LS_LOOPBACK_MODE)) 1510 1510 return; 1511 1511 ··· 1548 1548 goto sparam_out; 1549 1549 } 1550 1550 1551 - phba->hba_flag |= HBA_DEFER_FLOGI; 1551 + set_bit(HBA_DEFER_FLOGI, &phba->hba_flag); 1552 1552 } else { 1553 1553 lpfc_initial_flogi(vport); 1554 1554 } ··· 1617 1617 spin_unlock_irq(&phba->hbalock); 1618 1618 1619 1619 /* If there is a pending FCoE event, restart FCF table scan. */ 1620 - if ((!(phba->hba_flag & FCF_RR_INPROG)) && 1621 - lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) 1620 + if (!test_bit(FCF_RR_INPROG, &phba->hba_flag) && 1621 + lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) 1622 1622 goto fail_out; 1623 1623 1624 1624 /* Mark successful completion of FCF table scan */ 1625 1625 spin_lock_irq(&phba->hbalock); 1626 1626 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1627 - phba->hba_flag &= ~FCF_TS_INPROG; 1628 - if (vport->port_state != LPFC_FLOGI) { 1629 - phba->hba_flag |= FCF_RR_INPROG; 1630 - spin_unlock_irq(&phba->hbalock); 1631 - lpfc_issue_init_vfi(vport); 1632 - goto out; 1633 - } 1634 1627 spin_unlock_irq(&phba->hbalock); 1628 + clear_bit(FCF_TS_INPROG, &phba->hba_flag); 1629 + if (vport->port_state != LPFC_FLOGI) { 1630 + set_bit(FCF_RR_INPROG, &phba->hba_flag); 1631 + lpfc_issue_init_vfi(vport); 1632 + } 1635 1633 goto out; 1636 1634 1637 1635 fail_out: 1638 - spin_lock_irq(&phba->hbalock); 1639 - phba->hba_flag &= ~FCF_RR_INPROG; 1640 - spin_unlock_irq(&phba->hbalock); 1636 + clear_bit(FCF_RR_INPROG, &phba->hba_flag); 1641 1637 out: 1642 1638 mempool_free(mboxq, phba->mbox_mem_pool); 1643 1639 } ··· 1863 1867 spin_lock_irq(&phba->hbalock); 1864 1868 /* If the FCF is not available do nothing. */ 1865 1869 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1866 - phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1867 1870 spin_unlock_irq(&phba->hbalock); 1871 + clear_bit(FCF_TS_INPROG, &phba->hba_flag); 1872 + clear_bit(FCF_RR_INPROG, &phba->hba_flag); 1868 1873 return; 1869 1874 } 1870 1875 1871 1876 /* The FCF is already registered, start discovery */ 1872 1877 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1873 1878 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1874 - phba->hba_flag &= ~FCF_TS_INPROG; 1879 + spin_unlock_irq(&phba->hbalock); 1880 + clear_bit(FCF_TS_INPROG, &phba->hba_flag); 1875 1881 if (phba->pport->port_state != LPFC_FLOGI && 1876 1882 test_bit(FC_FABRIC, &phba->pport->fc_flag)) { 1877 - phba->hba_flag |= FCF_RR_INPROG; 1878 - spin_unlock_irq(&phba->hbalock); 1883 + set_bit(FCF_RR_INPROG, &phba->hba_flag); 1879 1884 lpfc_initial_flogi(phba->pport); 1880 1885 return; 1881 1886 } 1882 - spin_unlock_irq(&phba->hbalock); 1883 1887 return; 1884 1888 } 1885 1889 spin_unlock_irq(&phba->hbalock); 1886 1890 1887 1891 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1888 1892 if (!fcf_mbxq) { 1889 - spin_lock_irq(&phba->hbalock); 1890 - phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1891 - spin_unlock_irq(&phba->hbalock); 1893 + clear_bit(FCF_TS_INPROG, &phba->hba_flag); 1894 + clear_bit(FCF_RR_INPROG, &phba->hba_flag); 1892 1895 return; 1893 1896 } 1894 1897 ··· 1896 1901 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; 1897 1902 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1898 1903 if (rc == MBX_NOT_FINISHED) { 1899 - spin_lock_irq(&phba->hbalock); 1900 - phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1901 - spin_unlock_irq(&phba->hbalock); 1904 + clear_bit(FCF_TS_INPROG, &phba->hba_flag); 1905 + clear_bit(FCF_RR_INPROG, &phba->hba_flag); 1902 1906 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1903 1907 } 1904 1908 ··· 1950 1956 bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)) 1951 1957 return 0; 1952 1958 1953 - if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { 1959 + if (!test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) { 1954 1960 *boot_flag = 0; 1955 1961 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1956 1962 new_fcf_record); ··· 2145 2151 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 2146 2152 "2833 Stop FCF discovery process due to link " 2147 2153 "state change (x%x)\n", phba->link_state); 2154 + clear_bit(FCF_TS_INPROG, &phba->hba_flag); 2155 + clear_bit(FCF_RR_INPROG, &phba->hba_flag); 2148 2156 spin_lock_irq(&phba->hbalock); 2149 - phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 2150 2157 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); 2151 2158 spin_unlock_irq(&phba->hbalock); 2152 2159 } ··· 2375 2380 int rc; 2376 2381 2377 2382 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { 2378 - spin_lock_irq(&phba->hbalock); 2379 - if (phba->hba_flag & HBA_DEVLOSS_TMO) { 2380 - spin_unlock_irq(&phba->hbalock); 2383 + if (test_bit(HBA_DEVLOSS_TMO, &phba->hba_flag)) { 2381 2384 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2382 2385 "2872 Devloss tmo with no eligible " 2383 2386 "FCF, unregister in-use FCF (x%x) " ··· 2385 2392 goto stop_flogi_current_fcf; 2386 2393 } 2387 2394 /* Mark the end to FLOGI roundrobin failover */ 2388 - phba->hba_flag &= ~FCF_RR_INPROG; 2395 + clear_bit(FCF_RR_INPROG, &phba->hba_flag); 2389 2396 /* Allow action to new fcf asynchronous event */ 2397 + spin_lock_irq(&phba->hbalock); 2390 2398 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 2391 2399 spin_unlock_irq(&phba->hbalock); 2392 2400 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, ··· 2624 2630 "2765 Mailbox command READ_FCF_RECORD " 2625 2631 "failed to retrieve a FCF record.\n"); 2626 2632 /* Let next new FCF event trigger fast failover */ 2627 - spin_lock_irq(&phba->hbalock); 2628 - phba->hba_flag &= ~FCF_TS_INPROG; 2629 - spin_unlock_irq(&phba->hbalock); 2633 + clear_bit(FCF_TS_INPROG, &phba->hba_flag); 2630 2634 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2631 2635 return; 2632 2636 } ··· 2865 2873 phba->fcoe_eventtag_at_fcf_scan, 2866 2874 bf_get(lpfc_fcf_record_fcf_index, 2867 2875 new_fcf_record)); 2868 - spin_lock_irq(&phba->hbalock); 2869 - if (phba->hba_flag & HBA_DEVLOSS_TMO) { 2870 - phba->hba_flag &= ~FCF_TS_INPROG; 2871 - spin_unlock_irq(&phba->hbalock); 2876 + if (test_bit(HBA_DEVLOSS_TMO, 2877 + &phba->hba_flag)) { 2878 + clear_bit(FCF_TS_INPROG, 2879 + &phba->hba_flag); 2872 2880 /* Unregister in-use FCF and rescan */ 2873 2881 lpfc_printf_log(phba, KERN_INFO, 2874 2882 LOG_FIP, ··· 2881 2889 /* 2882 2890 * Let next new FCF event trigger fast failover 2883 2891 */ 2884 - phba->hba_flag &= ~FCF_TS_INPROG; 2885 - spin_unlock_irq(&phba->hbalock); 2892 + clear_bit(FCF_TS_INPROG, &phba->hba_flag); 2886 2893 return; 2887 2894 } 2888 2895 /* ··· 2987 2996 if (phba->link_state < LPFC_LINK_UP) { 2988 2997 spin_lock_irq(&phba->hbalock); 2989 2998 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 2990 - phba->hba_flag &= ~FCF_RR_INPROG; 2991 2999 spin_unlock_irq(&phba->hbalock); 3000 + clear_bit(FCF_RR_INPROG, &phba->hba_flag); 2992 3001 goto out; 2993 3002 } 2994 3003 ··· 2999 3008 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 3000 3009 "2766 Mailbox command READ_FCF_RECORD " 3001 3010 "failed to retrieve a FCF record. " 3002 - "hba_flg x%x fcf_flg x%x\n", phba->hba_flag, 3011 + "hba_flg x%lx fcf_flg x%x\n", phba->hba_flag, 3003 3012 phba->fcf.fcf_flag); 3004 3013 lpfc_unregister_fcf_rescan(phba); 3005 3014 goto out; ··· 3462 3471 /* Check if sending the FLOGI is being deferred to after we get 3463 3472 * up to date CSPs from MBX_READ_SPARAM. 3464 3473 */ 3465 - if (phba->hba_flag & HBA_DEFER_FLOGI) { 3474 + if (test_bit(HBA_DEFER_FLOGI, &phba->hba_flag)) { 3466 3475 lpfc_initial_flogi(vport); 3467 - phba->hba_flag &= ~HBA_DEFER_FLOGI; 3476 + clear_bit(HBA_DEFER_FLOGI, &phba->hba_flag); 3468 3477 } 3469 3478 return; 3470 3479 ··· 3486 3495 spin_lock_irqsave(&phba->hbalock, iflags); 3487 3496 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); 3488 3497 3489 - if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3498 + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 3490 3499 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { 3491 3500 case LPFC_LINK_SPEED_1GHZ: 3492 3501 case LPFC_LINK_SPEED_2GHZ: ··· 3602 3611 goto out; 3603 3612 } 3604 3613 3605 - if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3614 + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 3606 3615 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3607 3616 if (!cfglink_mbox) 3608 3617 goto out; ··· 3622 3631 * is phase 1 implementation that support FCF index 0 and driver 3623 3632 * defaults. 3624 3633 */ 3625 - if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { 3634 + if (!test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) { 3626 3635 fcf_record = kzalloc(sizeof(struct fcf_record), 3627 3636 GFP_KERNEL); 3628 3637 if (unlikely(!fcf_record)) { ··· 3652 3661 * The driver is expected to do FIP/FCF. Call the port 3653 3662 * and get the FCF Table. 3654 3663 */ 3655 - spin_lock_irqsave(&phba->hbalock, iflags); 3656 - if (phba->hba_flag & FCF_TS_INPROG) { 3657 - spin_unlock_irqrestore(&phba->hbalock, iflags); 3664 + if (test_bit(FCF_TS_INPROG, &phba->hba_flag)) 3658 3665 return; 3659 - } 3660 3666 /* This is the initial FCF discovery scan */ 3667 + spin_lock_irqsave(&phba->hbalock, iflags); 3661 3668 phba->fcf.fcf_flag |= FCF_INIT_DISC; 3662 3669 spin_unlock_irqrestore(&phba->hbalock, iflags); 3663 3670 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, ··· 6986 6997 * registered, do nothing. 6987 6998 */ 6988 6999 spin_lock_irq(&phba->hbalock); 6989 - if (!(phba->hba_flag & HBA_FCOE_MODE) || 7000 + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) || 6990 7001 !(phba->fcf.fcf_flag & FCF_REGISTERED) || 6991 - !(phba->hba_flag & HBA_FIP_SUPPORT) || 7002 + !test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) || 6992 7003 (phba->fcf.fcf_flag & FCF_DISCOVERY) || 6993 - (phba->pport->port_state == LPFC_FLOGI)) { 7004 + phba->pport->port_state == LPFC_FLOGI) { 6994 7005 spin_unlock_irq(&phba->hbalock); 6995 7006 return; 6996 7007 }
+44 -63
drivers/scsi/lpfc/lpfc_init.c
··· 567 567 568 568 spin_lock_irq(&phba->hbalock); 569 569 /* Initialize ERATT handling flag */ 570 - phba->hba_flag &= ~HBA_ERATT_HANDLED; 570 + clear_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 571 571 572 572 /* Enable appropriate host interrupts */ 573 573 if (lpfc_readl(phba->HCregaddr, &status)) { ··· 599 599 /* Set up heart beat (HB) timer */ 600 600 mod_timer(&phba->hb_tmofunc, 601 601 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 602 - phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 602 + clear_bit(HBA_HBEAT_INP, &phba->hba_flag); 603 + clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); 603 604 phba->last_completion_time = jiffies; 604 605 /* Set up error attention (ERATT) polling timer */ 605 606 mod_timer(&phba->eratt_poll, 606 607 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 607 608 608 - if (phba->hba_flag & LINK_DISABLED) { 609 + if (test_bit(LINK_DISABLED, &phba->hba_flag)) { 609 610 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 610 611 "2598 Adapter Link is disabled.\n"); 611 612 lpfc_down_link(phba, pmb); ··· 926 925 struct hbq_dmabuf *dmabuf; 927 926 struct lpfc_cq_event *cq_event; 928 927 929 - spin_lock_irq(&phba->hbalock); 930 - phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 931 - spin_unlock_irq(&phba->hbalock); 928 + clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); 932 929 933 930 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 934 931 /* Get the response iocb from the head of work queue */ ··· 1227 1228 lpfc_rrq_timeout(struct timer_list *t) 1228 1229 { 1229 1230 struct lpfc_hba *phba; 1230 - unsigned long iflag; 1231 1231 1232 1232 phba = from_timer(phba, t, rrq_tmr); 1233 - spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1234 - if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) 1235 - phba->hba_flag |= HBA_RRQ_ACTIVE; 1236 - else 1237 - phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1238 - spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1233 + if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { 1234 + clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); 1235 + return; 1236 + } 1239 1237 1240 - if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) 1241 - lpfc_worker_wake_up(phba); 1238 + set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); 1239 + lpfc_worker_wake_up(phba); 1242 1240 } 1243 1241 1244 1242 /** ··· 1257 1261 static void 1258 1262 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1259 1263 { 1260 - unsigned long drvr_flag; 1261 - 1262 - spin_lock_irqsave(&phba->hbalock, drvr_flag); 1263 - phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 1264 - spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1264 + clear_bit(HBA_HBEAT_INP, &phba->hba_flag); 1265 + clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); 1265 1266 1266 1267 /* Check and reset heart-beat timer if necessary */ 1267 1268 mempool_free(pmboxq, phba->mbox_mem_pool); ··· 1450 1457 int retval; 1451 1458 1452 1459 /* Is a Heartbeat mbox already in progress */ 1453 - if (phba->hba_flag & HBA_HBEAT_INP) 1460 + if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) 1454 1461 return 0; 1455 1462 1456 1463 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); ··· 1466 1473 mempool_free(pmboxq, phba->mbox_mem_pool); 1467 1474 return -ENXIO; 1468 1475 } 1469 - phba->hba_flag |= HBA_HBEAT_INP; 1476 + set_bit(HBA_HBEAT_INP, &phba->hba_flag); 1470 1477 1471 1478 return 0; 1472 1479 } ··· 1486 1493 { 1487 1494 if (phba->cfg_enable_hba_heartbeat) 1488 1495 return; 1489 - phba->hba_flag |= HBA_HBEAT_TMO; 1496 + set_bit(HBA_HBEAT_TMO, &phba->hba_flag); 1490 1497 } 1491 1498 1492 1499 /** ··· 1558 1565 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1559 1566 jiffies)) { 1560 1567 spin_unlock_irq(&phba->pport->work_port_lock); 1561 - if (phba->hba_flag & HBA_HBEAT_INP) 1568 + if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) 1562 1569 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1563 1570 else 1564 1571 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); ··· 1567 1574 spin_unlock_irq(&phba->pport->work_port_lock); 1568 1575 1569 1576 /* Check if a MBX_HEARTBEAT is already in progress */ 1570 - if (phba->hba_flag & HBA_HBEAT_INP) { 1577 + if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) { 1571 1578 /* 1572 1579 * If heart beat timeout called with HBA_HBEAT_INP set 1573 1580 * we need to give the hb mailbox cmd a chance to ··· 1604 1611 } 1605 1612 } else { 1606 1613 /* Check to see if we want to force a MBX_HEARTBEAT */ 1607 - if (phba->hba_flag & HBA_HBEAT_TMO) { 1614 + if (test_bit(HBA_HBEAT_TMO, &phba->hba_flag)) { 1608 1615 retval = lpfc_issue_hb_mbox(phba); 1609 1616 if (retval) 1610 1617 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); ··· 1692 1699 * since we cannot communicate with the pci card anyway. 1693 1700 */ 1694 1701 if (pci_channel_offline(phba->pcidev)) { 1695 - spin_lock_irq(&phba->hbalock); 1696 - phba->hba_flag &= ~DEFER_ERATT; 1697 - spin_unlock_irq(&phba->hbalock); 1702 + clear_bit(DEFER_ERATT, &phba->hba_flag); 1698 1703 return; 1699 1704 } 1700 1705 ··· 1743 1752 if (!phba->work_hs && !test_bit(FC_UNLOADING, &phba->pport->load_flag)) 1744 1753 phba->work_hs = old_host_status & ~HS_FFER1; 1745 1754 1746 - spin_lock_irq(&phba->hbalock); 1747 - phba->hba_flag &= ~DEFER_ERATT; 1748 - spin_unlock_irq(&phba->hbalock); 1755 + clear_bit(DEFER_ERATT, &phba->hba_flag); 1749 1756 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1750 1757 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1751 1758 } ··· 1787 1798 * since we cannot communicate with the pci card anyway. 1788 1799 */ 1789 1800 if (pci_channel_offline(phba->pcidev)) { 1790 - spin_lock_irq(&phba->hbalock); 1791 - phba->hba_flag &= ~DEFER_ERATT; 1792 - spin_unlock_irq(&phba->hbalock); 1801 + clear_bit(DEFER_ERATT, &phba->hba_flag); 1793 1802 return; 1794 1803 } 1795 1804 ··· 1798 1811 /* Send an internal error event to mgmt application */ 1799 1812 lpfc_board_errevt_to_mgmt(phba); 1800 1813 1801 - if (phba->hba_flag & DEFER_ERATT) 1814 + if (test_bit(DEFER_ERATT, &phba->hba_flag)) 1802 1815 lpfc_handle_deferred_eratt(phba); 1803 1816 1804 1817 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { ··· 2013 2026 /* consider PCI bus read error as pci_channel_offline */ 2014 2027 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 2015 2028 return; 2016 - if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 2029 + if (!test_bit(HBA_RECOVERABLE_UE, &phba->hba_flag)) { 2017 2030 lpfc_sli4_offline_eratt(phba); 2018 2031 return; 2019 2032 } ··· 3306 3319 del_timer_sync(&phba->hb_tmofunc); 3307 3320 if (phba->sli_rev == LPFC_SLI_REV4) { 3308 3321 del_timer_sync(&phba->rrq_tmr); 3309 - phba->hba_flag &= ~HBA_RRQ_ACTIVE; 3322 + clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); 3310 3323 } 3311 - phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 3324 + clear_bit(HBA_HBEAT_INP, &phba->hba_flag); 3325 + clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); 3312 3326 3313 3327 switch (phba->pci_dev_grp) { 3314 3328 case LPFC_PCI_DEV_LP: ··· 4964 4976 * Avoid reporting supported link speed for FCoE as it can't be 4965 4977 * controlled via FCoE. 4966 4978 */ 4967 - if (phba->hba_flag & HBA_FCOE_MODE) 4979 + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) 4968 4980 return; 4969 4981 4970 4982 if (phba->lmt & LMT_256Gb) ··· 5478 5490 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 5479 5491 * topology info. Note: Optional for non FC-AL ports. 5480 5492 */ 5481 - if (!(phba->hba_flag & HBA_FCOE_MODE)) { 5493 + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 5482 5494 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5483 5495 if (rc == MBX_NOT_FINISHED) 5484 5496 goto out_free_pmb; ··· 6013 6025 */ 6014 6026 if (phba->cmf_active_mode == LPFC_CFG_MANAGED && 6015 6027 phba->link_state != LPFC_LINK_DOWN && 6016 - phba->hba_flag & HBA_SETUP) { 6028 + test_bit(HBA_SETUP, &phba->hba_flag)) { 6017 6029 mbpi = phba->cmf_last_sync_bw; 6018 6030 phba->cmf_last_sync_bw = 0; 6019 6031 extra = 0; ··· 6766 6778 } 6767 6779 6768 6780 /* If the FCF discovery is in progress, do nothing. */ 6769 - spin_lock_irq(&phba->hbalock); 6770 - if (phba->hba_flag & FCF_TS_INPROG) { 6771 - spin_unlock_irq(&phba->hbalock); 6781 + if (test_bit(FCF_TS_INPROG, &phba->hba_flag)) 6772 6782 break; 6773 - } 6783 + spin_lock_irq(&phba->hbalock); 6774 6784 /* If fast FCF failover rescan event is pending, do nothing */ 6775 6785 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 6776 6786 spin_unlock_irq(&phba->hbalock); ··· 7307 7321 unsigned long iflags; 7308 7322 7309 7323 /* First, declare the async event has been handled */ 7310 - spin_lock_irqsave(&phba->hbalock, iflags); 7311 - phba->hba_flag &= ~ASYNC_EVENT; 7312 - spin_unlock_irqrestore(&phba->hbalock, iflags); 7324 + clear_bit(ASYNC_EVENT, &phba->hba_flag); 7313 7325 7314 7326 /* Now, handle all the async events */ 7315 7327 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); ··· 9853 9869 return; 9854 9870 } 9855 9871 /* FW supports persistent topology - override module parameter value */ 9856 - phba->hba_flag |= HBA_PERSISTENT_TOPO; 9872 + set_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag); 9857 9873 9858 9874 /* if ASIC_GEN_NUM >= 0xC) */ 9859 9875 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 9860 9876 LPFC_SLI_INTF_IF_TYPE_6) || 9861 9877 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 9862 9878 LPFC_SLI_INTF_FAMILY_G6)) { 9863 - if (!tf) { 9879 + if (!tf) 9864 9880 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) 9865 9881 ? FLAGS_TOPOLOGY_MODE_LOOP 9866 9882 : FLAGS_TOPOLOGY_MODE_PT_PT); 9867 - } else { 9868 - phba->hba_flag &= ~HBA_PERSISTENT_TOPO; 9869 - } 9883 + else 9884 + clear_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag); 9870 9885 } else { /* G5 */ 9871 - if (tf) { 9886 + if (tf) 9872 9887 /* If topology failover set - pt is '0' or '1' */ 9873 9888 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : 9874 9889 FLAGS_TOPOLOGY_MODE_LOOP_PT); 9875 - } else { 9890 + else 9876 9891 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) 9877 9892 ? FLAGS_TOPOLOGY_MODE_PT_PT 9878 9893 : FLAGS_TOPOLOGY_MODE_LOOP); 9879 - } 9880 9894 } 9881 - if (phba->hba_flag & HBA_PERSISTENT_TOPO) { 9895 + if (test_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag)) 9882 9896 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9883 9897 "2020 Using persistent topology value [%s]", 9884 9898 lpfc_topo_to_str[phba->cfg_topology]); 9885 - } else { 9899 + else 9886 9900 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9887 9901 "2021 Invalid topology values from FW " 9888 9902 "Using driver parameter defined value [%s]", 9889 9903 lpfc_topo_to_str[phba->cfg_topology]); 9890 - } 9891 9904 } 9892 9905 9893 9906 /** ··· 10127 10146 forced_link_speed = 10128 10147 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 10129 10148 if (forced_link_speed) { 10130 - phba->hba_flag |= HBA_FORCED_LINK_SPEED; 10149 + set_bit(HBA_FORCED_LINK_SPEED, &phba->hba_flag); 10131 10150 10132 10151 switch (forced_link_speed) { 10133 10152 case LINK_SPEED_1G: ··· 12222 12241 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 12223 12242 if (retval) 12224 12243 return intr_mode; 12225 - phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; 12244 + clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); 12226 12245 12227 12246 if (cfg_mode == 2) { 12228 12247 /* Now, try to enable MSI-X interrupt mode */ ··· 15510 15529 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15511 15530 15512 15531 if (phba->link_state == LPFC_HBA_ERROR && 15513 - phba->hba_flag & HBA_IOQ_FLUSH) 15532 + test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) 15514 15533 return PCI_ERS_RESULT_NEED_RESET; 15515 15534 15516 15535 switch (phba->pci_dev_grp) {
+1 -1
drivers/scsi/lpfc/lpfc_nportdisc.c
··· 504 504 * must have ACCed the remote NPorts FLOGI to us 505 505 * to make it here. 506 506 */ 507 - if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) 507 + if (test_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag)) 508 508 lpfc_els_abort_flogi(phba); 509 509 510 510 ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
+11 -16
drivers/scsi/lpfc/lpfc_nvme.c
··· 95 95 vport = lport->vport; 96 96 97 97 if (!vport || test_bit(FC_UNLOADING, &vport->load_flag) || 98 - vport->phba->hba_flag & HBA_IOQ_FLUSH) 98 + test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) 99 99 return -ENODEV; 100 100 101 101 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL); ··· 272 272 273 273 remoteport = lpfc_rport->remoteport; 274 274 if (!vport->localport || 275 - vport->phba->hba_flag & HBA_IOQ_FLUSH) 275 + test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) 276 276 return -EINVAL; 277 277 278 278 lport = vport->localport->private; ··· 569 569 ndlp->nlp_DID, ntype, nstate); 570 570 return -ENODEV; 571 571 } 572 - if (vport->phba->hba_flag & HBA_IOQ_FLUSH) 572 + if (test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) 573 573 return -ENODEV; 574 574 575 575 if (!vport->phba->sli4_hba.nvmels_wq) ··· 675 675 676 676 vport = lport->vport; 677 677 if (test_bit(FC_UNLOADING, &vport->load_flag) || 678 - vport->phba->hba_flag & HBA_IOQ_FLUSH) 678 + test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) 679 679 return -ENODEV; 680 680 681 681 atomic_inc(&lport->fc4NvmeLsRequests); ··· 1568 1568 phba = vport->phba; 1569 1569 1570 1570 if ((unlikely(test_bit(FC_UNLOADING, &vport->load_flag))) || 1571 - phba->hba_flag & HBA_IOQ_FLUSH) { 1571 + test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { 1572 1572 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1573 1573 "6124 Fail IO, Driver unload\n"); 1574 1574 atomic_inc(&lport->xmt_fcp_err); ··· 1909 1909 return; 1910 1910 } 1911 1911 1912 - /* Guard against IO completion being called at same time */ 1913 - spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags); 1914 - 1915 - /* If the hba is getting reset, this flag is set. It is 1916 - * cleared when the reset is complete and rings reestablished. 1917 - */ 1918 - spin_lock(&phba->hbalock); 1919 1912 /* driver queued commands are in process of being flushed */ 1920 - if (phba->hba_flag & HBA_IOQ_FLUSH) { 1921 - spin_unlock(&phba->hbalock); 1922 - spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); 1913 + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { 1923 1914 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1924 1915 "6139 Driver in reset cleanup - flushing " 1925 - "NVME Req now. hba_flag x%x\n", 1916 + "NVME Req now. hba_flag x%lx\n", 1926 1917 phba->hba_flag); 1927 1918 return; 1928 1919 } 1920 + 1921 + /* Guard against IO completion being called at same time */ 1922 + spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags); 1923 + spin_lock(&phba->hbalock); 1929 1924 1930 1925 nvmereq_wqe = &lpfc_nbuf->cur_iocbq; 1931 1926
+3 -4
drivers/scsi/lpfc/lpfc_nvmet.c
··· 3395 3395 /* If the hba is getting reset, this flag is set. It is 3396 3396 * cleared when the reset is complete and rings reestablished. 3397 3397 */ 3398 - spin_lock_irqsave(&phba->hbalock, flags); 3399 3398 /* driver queued commands are in process of being flushed */ 3400 - if (phba->hba_flag & HBA_IOQ_FLUSH) { 3401 - spin_unlock_irqrestore(&phba->hbalock, flags); 3399 + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { 3402 3400 atomic_inc(&tgtp->xmt_abort_rsp_error); 3403 3401 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3404 3402 "6163 Driver in reset cleanup - flushing " 3405 - "NVME Req now. hba_flag x%x oxid x%x\n", 3403 + "NVME Req now. hba_flag x%lx oxid x%x\n", 3406 3404 phba->hba_flag, ctxp->oxid); 3407 3405 lpfc_sli_release_iocbq(phba, abts_wqeq); 3408 3406 spin_lock_irqsave(&ctxp->ctxlock, flags); ··· 3409 3411 return 0; 3410 3412 } 3411 3413 3414 + spin_lock_irqsave(&phba->hbalock, flags); 3412 3415 /* Outstanding abort is in progress */ 3413 3416 if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) { 3414 3417 spin_unlock_irqrestore(&phba->hbalock, flags);
+4 -4
drivers/scsi/lpfc/lpfc_scsi.c
··· 3227 3227 */ 3228 3228 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 3229 3229 /* Set first-burst provided it was successfully negotiated */ 3230 - if (!(phba->hba_flag & HBA_FCOE_MODE) && 3230 + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) && 3231 3231 vport->cfg_first_burst_size && 3232 3232 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { 3233 3233 u32 init_len, total_len; ··· 3423 3423 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 3424 3424 3425 3425 /* Set first-burst provided it was successfully negotiated */ 3426 - if (!(phba->hba_flag & HBA_FCOE_MODE) && 3426 + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) && 3427 3427 vport->cfg_first_burst_size && 3428 3428 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { 3429 3429 u32 init_len, total_len; ··· 5043 5043 5044 5044 /* Check for valid Emulex Device ID */ 5045 5045 if (phba->sli_rev != LPFC_SLI_REV4 || 5046 - phba->hba_flag & HBA_FCOE_MODE) { 5046 + test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 5047 5047 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5048 5048 "8347 Incapable PCI reset device: " 5049 5049 "0x%04x\n", ptr->device); ··· 5520 5520 5521 5521 spin_lock(&phba->hbalock); 5522 5522 /* driver queued commands are in process of being flushed */ 5523 - if (phba->hba_flag & HBA_IOQ_FLUSH) { 5523 + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { 5524 5524 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5525 5525 "3168 SCSI Layer abort requested I/O has been " 5526 5526 "flushed by LLD.\n");
+76 -104
drivers/scsi/lpfc/lpfc_sli.c
··· 1024 1024 unsigned long iflags; 1025 1025 LIST_HEAD(send_rrq); 1026 1026 1027 - spin_lock_irqsave(&phba->hbalock, iflags); 1028 - phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1029 - spin_unlock_irqrestore(&phba->hbalock, iflags); 1030 - 1027 + clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); 1031 1028 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 1032 1029 spin_lock_irqsave(&phba->rrq_list_lock, iflags); 1033 1030 list_for_each_entry_safe(rrq, nextrrq, ··· 1179 1182 if (!phba->cfg_enable_rrq) 1180 1183 return -EINVAL; 1181 1184 1182 - spin_lock_irqsave(&phba->hbalock, iflags); 1183 1185 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { 1184 - phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1185 - goto out; 1186 + clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); 1187 + goto outnl; 1186 1188 } 1187 1189 1190 + spin_lock_irqsave(&phba->hbalock, iflags); 1188 1191 if (ndlp->vport && test_bit(FC_UNLOADING, &ndlp->vport->load_flag)) 1189 1192 goto out; 1190 1193 ··· 1218 1221 empty = list_empty(&phba->active_rrq_list); 1219 1222 list_add_tail(&rrq->list, &phba->active_rrq_list); 1220 1223 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); 1221 - 1222 - spin_lock_irqsave(&phba->hbalock, iflags); 1223 - phba->hba_flag |= HBA_RRQ_ACTIVE; 1224 - spin_unlock_irqrestore(&phba->hbalock, iflags); 1225 - 1224 + set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); 1226 1225 if (empty) 1227 1226 lpfc_worker_wake_up(phba); 1228 1227 return 0; 1229 1228 out: 1230 1229 spin_unlock_irqrestore(&phba->hbalock, iflags); 1230 + outnl: 1231 1231 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1232 1232 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 1233 1233 " DID:0x%x Send:%d\n", ··· 3939 3945 uint64_t sli_intr, cnt; 3940 3946 3941 3947 phba = from_timer(phba, t, eratt_poll); 3942 - if (!(phba->hba_flag & HBA_SETUP)) 3948 + if (!test_bit(HBA_SETUP, &phba->hba_flag)) 3943 3949 return; 3944 3950 3945 3951 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) ··· 4524 4530 unsigned long iflag; 4525 4531 int count = 0; 4526 4532 4527 - spin_lock_irqsave(&phba->hbalock, iflag); 4528 - phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 4529 - spin_unlock_irqrestore(&phba->hbalock, iflag); 4533 + clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); 4530 4534 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 4531 4535 /* Get the response iocb from the head of work queue */ 4532 4536 spin_lock_irqsave(&phba->hbalock, iflag); ··· 4681 4689 uint32_t i; 4682 4690 struct lpfc_iocbq *piocb, *next_iocb; 4683 4691 4684 - spin_lock_irq(&phba->hbalock); 4685 4692 /* Indicate the I/O queues are flushed */ 4686 - phba->hba_flag |= HBA_IOQ_FLUSH; 4687 - spin_unlock_irq(&phba->hbalock); 4693 + set_bit(HBA_IOQ_FLUSH, &phba->hba_flag); 4688 4694 4689 4695 /* Look on all the FCP Rings for the iotag */ 4690 4696 if (phba->sli_rev >= LPFC_SLI_REV4) { ··· 4760 4770 if (lpfc_readl(phba->HSregaddr, &status)) 4761 4771 return 1; 4762 4772 4763 - phba->hba_flag |= HBA_NEEDS_CFG_PORT; 4773 + set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); 4764 4774 4765 4775 /* 4766 4776 * Check status register every 100ms for 5 retries, then every ··· 4839 4849 } else 4840 4850 phba->sli4_hba.intr_enable = 0; 4841 4851 4842 - phba->hba_flag &= ~HBA_SETUP; 4852 + clear_bit(HBA_SETUP, &phba->hba_flag); 4843 4853 return retval; 4844 4854 } 4845 4855 ··· 5091 5101 /* perform board reset */ 5092 5102 phba->fc_eventTag = 0; 5093 5103 phba->link_events = 0; 5094 - phba->hba_flag |= HBA_NEEDS_CFG_PORT; 5104 + set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); 5095 5105 if (phba->pport) { 5096 5106 phba->pport->fc_myDID = 0; 5097 5107 phba->pport->fc_prevDID = 0; ··· 5151 5161 5152 5162 /* Reset HBA */ 5153 5163 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5154 - "0295 Reset HBA Data: x%x x%x x%x\n", 5164 + "0295 Reset HBA Data: x%x x%x x%lx\n", 5155 5165 phba->pport->port_state, psli->sli_flag, 5156 5166 phba->hba_flag); 5157 5167 ··· 5160 5170 phba->link_events = 0; 5161 5171 phba->pport->fc_myDID = 0; 5162 5172 phba->pport->fc_prevDID = 0; 5163 - phba->hba_flag &= ~HBA_SETUP; 5173 + clear_bit(HBA_SETUP, &phba->hba_flag); 5164 5174 5165 5175 spin_lock_irq(&phba->hbalock); 5166 5176 psli->sli_flag &= ~(LPFC_PROCESS_LA); ··· 5404 5414 return -EIO; 5405 5415 } 5406 5416 5407 - phba->hba_flag |= HBA_NEEDS_CFG_PORT; 5417 + set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); 5408 5418 5409 5419 /* Clear all interrupt enable conditions */ 5410 5420 writel(0, phba->HCregaddr); ··· 5706 5716 int longs; 5707 5717 5708 5718 /* Enable ISR already does config_port because of config_msi mbx */ 5709 - if (phba->hba_flag & HBA_NEEDS_CFG_PORT) { 5719 + if (test_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag)) { 5710 5720 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 5711 5721 if (rc) 5712 5722 return -EIO; 5713 - phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; 5723 + clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); 5714 5724 } 5715 5725 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 5716 5726 ··· 7757 7767 snprintf(mbox->u.mqe.un.set_host_data.un.data, 7758 7768 LPFC_HOST_OS_DRIVER_VERSION_SIZE, 7759 7769 "Linux %s v"LPFC_DRIVER_VERSION, 7760 - (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); 7770 + test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? "FCoE" : "FC"); 7761 7771 } 7762 7772 7763 7773 int ··· 8485 8495 spin_unlock_irq(&phba->hbalock); 8486 8496 } 8487 8497 } 8488 - phba->hba_flag &= ~HBA_SETUP; 8498 + clear_bit(HBA_SETUP, &phba->hba_flag); 8489 8499 8490 8500 lpfc_sli4_dip(phba); 8491 8501 ··· 8514 8524 mqe = &mboxq->u.mqe; 8515 8525 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 8516 8526 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { 8517 - phba->hba_flag |= HBA_FCOE_MODE; 8527 + set_bit(HBA_FCOE_MODE, &phba->hba_flag); 8518 8528 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 8519 8529 } else { 8520 - phba->hba_flag &= ~HBA_FCOE_MODE; 8530 + clear_bit(HBA_FCOE_MODE, &phba->hba_flag); 8521 8531 } 8522 8532 8523 8533 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 8524 8534 LPFC_DCBX_CEE_MODE) 8525 - phba->hba_flag |= HBA_FIP_SUPPORT; 8535 + set_bit(HBA_FIP_SUPPORT, &phba->hba_flag); 8526 8536 else 8527 - phba->hba_flag &= ~HBA_FIP_SUPPORT; 8537 + clear_bit(HBA_FIP_SUPPORT, &phba->hba_flag); 8528 8538 8529 - phba->hba_flag &= ~HBA_IOQ_FLUSH; 8539 + clear_bit(HBA_IOQ_FLUSH, &phba->hba_flag); 8530 8540 8531 8541 if (phba->sli_rev != LPFC_SLI_REV4) { 8532 8542 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8533 8543 "0376 READ_REV Error. SLI Level %d " 8534 8544 "FCoE enabled %d\n", 8535 - phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 8545 + phba->sli_rev, 8546 + test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? 1 : 0); 8536 8547 rc = -EIO; 8537 8548 kfree(vpd); 8538 8549 goto out_free_mbox; ··· 8548 8557 * to read FCoE param config regions, only read parameters if the 8549 8558 * board is FCoE 8550 8559 */ 8551 - if (phba->hba_flag & HBA_FCOE_MODE && 8560 + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag) && 8552 8561 lpfc_sli4_read_fcoe_params(phba)) 8553 8562 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 8554 8563 "2570 Failed to read FCoE parameters\n"); ··· 8625 8634 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); 8626 8635 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8627 8636 if (rc == MBX_SUCCESS) { 8628 - phba->hba_flag |= HBA_RECOVERABLE_UE; 8637 + set_bit(HBA_RECOVERABLE_UE, &phba->hba_flag); 8629 8638 /* Set 1Sec interval to detect UE */ 8630 8639 phba->eratt_poll_interval = 1; 8631 8640 phba->sli4_hba.ue_to_sr = bf_get( ··· 8676 8685 } 8677 8686 8678 8687 /* Performance Hints are ONLY for FCoE */ 8679 - if (phba->hba_flag & HBA_FCOE_MODE) { 8688 + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 8680 8689 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 8681 8690 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 8682 8691 else ··· 8935 8944 } 8936 8945 lpfc_sli4_node_prep(phba); 8937 8946 8938 - if (!(phba->hba_flag & HBA_FCOE_MODE)) { 8947 + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 8939 8948 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { 8940 8949 /* 8941 8950 * The FC Port needs to register FCFI (index 0) ··· 9011 9020 /* Start heart beat timer */ 9012 9021 mod_timer(&phba->hb_tmofunc, 9013 9022 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 9014 - phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 9023 + clear_bit(HBA_HBEAT_INP, &phba->hba_flag); 9024 + clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); 9015 9025 phba->last_completion_time = jiffies; 9016 9026 9017 9027 /* start eq_delay heartbeat */ ··· 9054 9062 /* Setup CMF after HBA is initialized */ 9055 9063 lpfc_cmf_setup(phba); 9056 9064 9057 - if (!(phba->hba_flag & HBA_FCOE_MODE) && 9058 - (phba->hba_flag & LINK_DISABLED)) { 9065 + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) && 9066 + test_bit(LINK_DISABLED, &phba->hba_flag)) { 9059 9067 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9060 9068 "3103 Adapter Link is disabled.\n"); 9061 9069 lpfc_down_link(phba, mboxq); ··· 9079 9087 /* Enable RAS FW log support */ 9080 9088 lpfc_sli4_ras_setup(phba); 9081 9089 9082 - phba->hba_flag |= HBA_SETUP; 9090 + set_bit(HBA_SETUP, &phba->hba_flag); 9083 9091 return rc; 9084 9092 9085 9093 out_io_buff_free: ··· 9383 9391 } 9384 9392 9385 9393 /* If HBA has a deferred error attention, fail the iocb. */ 9386 - if (unlikely(phba->hba_flag & DEFER_ERATT)) { 9394 + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { 9387 9395 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 9388 9396 goto out_not_finished; 9389 9397 } ··· 10447 10455 return IOCB_ERROR; 10448 10456 10449 10457 /* If HBA has a deferred error attention, fail the iocb. */ 10450 - if (unlikely(phba->hba_flag & DEFER_ERATT)) 10458 + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) 10451 10459 return IOCB_ERROR; 10452 10460 10453 10461 /* ··· 12777 12785 int i; 12778 12786 12779 12787 /* all I/Os are in process of being flushed */ 12780 - if (phba->hba_flag & HBA_IOQ_FLUSH) 12788 + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) 12781 12789 return errcnt; 12782 12790 12783 12791 for (i = 1; i <= phba->sli.last_iotag; i++) { ··· 12847 12855 u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT; 12848 12856 bool ia; 12849 12857 12850 - spin_lock_irqsave(&phba->hbalock, iflags); 12851 - 12852 12858 /* all I/Os are in process of being flushed */ 12853 - if (phba->hba_flag & HBA_IOQ_FLUSH) { 12854 - spin_unlock_irqrestore(&phba->hbalock, iflags); 12859 + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) 12855 12860 return 0; 12856 - } 12861 + 12857 12862 sum = 0; 12858 12863 12864 + spin_lock_irqsave(&phba->hbalock, iflags); 12859 12865 for (i = 1; i <= phba->sli.last_iotag; i++) { 12860 12866 iocbq = phba->sli.iocbq_lookup[i]; 12861 12867 ··· 13385 13395 if ((HS_FFER1 & phba->work_hs) && 13386 13396 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 13387 13397 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 13388 - phba->hba_flag |= DEFER_ERATT; 13398 + set_bit(DEFER_ERATT, &phba->hba_flag); 13389 13399 /* Clear all interrupt enable conditions */ 13390 13400 writel(0, phba->HCregaddr); 13391 13401 readl(phba->HCregaddr); ··· 13394 13404 /* Set the driver HA work bitmap */ 13395 13405 phba->work_ha |= HA_ERATT; 13396 13406 /* Indicate polling handles this ERATT */ 13397 - phba->hba_flag |= HBA_ERATT_HANDLED; 13407 + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13398 13408 return 1; 13399 13409 } 13400 13410 return 0; ··· 13405 13415 /* Set the driver HA work bitmap */ 13406 13416 phba->work_ha |= HA_ERATT; 13407 13417 /* Indicate polling handles this ERATT */ 13408 - phba->hba_flag |= HBA_ERATT_HANDLED; 13418 + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13409 13419 return 1; 13410 13420 } 13411 13421 ··· 13441 13451 &uerr_sta_hi)) { 13442 13452 phba->work_hs |= UNPLUG_ERR; 13443 13453 phba->work_ha |= HA_ERATT; 13444 - phba->hba_flag |= HBA_ERATT_HANDLED; 13454 + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13445 13455 return 1; 13446 13456 } 13447 13457 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || ··· 13457 13467 phba->work_status[0] = uerr_sta_lo; 13458 13468 phba->work_status[1] = uerr_sta_hi; 13459 13469 phba->work_ha |= HA_ERATT; 13460 - phba->hba_flag |= HBA_ERATT_HANDLED; 13470 + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13461 13471 return 1; 13462 13472 } 13463 13473 break; ··· 13469 13479 &portsmphr)){ 13470 13480 phba->work_hs |= UNPLUG_ERR; 13471 13481 phba->work_ha |= HA_ERATT; 13472 - phba->hba_flag |= HBA_ERATT_HANDLED; 13482 + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13473 13483 return 1; 13474 13484 } 13475 13485 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { ··· 13492 13502 phba->work_status[0], 13493 13503 phba->work_status[1]); 13494 13504 phba->work_ha |= HA_ERATT; 13495 - phba->hba_flag |= HBA_ERATT_HANDLED; 13505 + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); 13496 13506 return 1; 13497 13507 } 13498 13508 break; ··· 13529 13539 return 0; 13530 13540 13531 13541 /* Check if interrupt handler handles this ERATT */ 13532 - spin_lock_irq(&phba->hbalock); 13533 - if (phba->hba_flag & HBA_ERATT_HANDLED) { 13542 + if (test_bit(HBA_ERATT_HANDLED, &phba->hba_flag)) 13534 13543 /* Interrupt handler has handled ERATT */ 13535 - spin_unlock_irq(&phba->hbalock); 13536 13544 return 0; 13537 - } 13538 13545 13539 13546 /* 13540 13547 * If there is deferred error attention, do not check for error 13541 13548 * attention 13542 13549 */ 13543 - if (unlikely(phba->hba_flag & DEFER_ERATT)) { 13544 - spin_unlock_irq(&phba->hbalock); 13550 + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) 13545 13551 return 0; 13546 - } 13547 13552 13553 + spin_lock_irq(&phba->hbalock); 13548 13554 /* If PCI channel is offline, don't process it */ 13549 13555 if (unlikely(pci_channel_offline(phba->pcidev))) { 13550 13556 spin_unlock_irq(&phba->hbalock); ··· 13662 13676 ha_copy &= ~HA_ERATT; 13663 13677 /* Check the need for handling ERATT in interrupt handler */ 13664 13678 if (ha_copy & HA_ERATT) { 13665 - if (phba->hba_flag & HBA_ERATT_HANDLED) 13679 + if (test_and_set_bit(HBA_ERATT_HANDLED, 13680 + &phba->hba_flag)) 13666 13681 /* ERATT polling has handled ERATT */ 13667 13682 ha_copy &= ~HA_ERATT; 13668 - else 13669 - /* Indicate interrupt handler handles ERATT */ 13670 - phba->hba_flag |= HBA_ERATT_HANDLED; 13671 13683 } 13672 13684 13673 13685 /* 13674 13686 * If there is deferred error attention, do not check for any 13675 13687 * interrupt. 13676 13688 */ 13677 - if (unlikely(phba->hba_flag & DEFER_ERATT)) { 13689 + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { 13678 13690 spin_unlock_irqrestore(&phba->hbalock, iflag); 13679 13691 return IRQ_NONE; 13680 13692 } ··· 13768 13784 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 13769 13785 HS_FFER6 | HS_FFER7 | HS_FFER8) & 13770 13786 phba->work_hs)) { 13771 - phba->hba_flag |= DEFER_ERATT; 13787 + set_bit(DEFER_ERATT, &phba->hba_flag); 13772 13788 /* Clear all interrupt enable conditions */ 13773 13789 writel(0, phba->HCregaddr); 13774 13790 readl(phba->HCregaddr); ··· 13955 13971 /* Need to read HA REG for FCP ring and other ring events */ 13956 13972 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 13957 13973 return IRQ_HANDLED; 13958 - /* Clear up only attention source related to fast-path */ 13959 - spin_lock_irqsave(&phba->hbalock, iflag); 13974 + 13960 13975 /* 13961 13976 * If there is deferred error attention, do not check for 13962 13977 * any interrupt. 13963 13978 */ 13964 - if (unlikely(phba->hba_flag & DEFER_ERATT)) { 13965 - spin_unlock_irqrestore(&phba->hbalock, iflag); 13979 + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) 13966 13980 return IRQ_NONE; 13967 - } 13981 + 13982 + /* Clear up only attention source related to fast-path */ 13983 + spin_lock_irqsave(&phba->hbalock, iflag); 13968 13984 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 13969 13985 phba->HAregaddr); 13970 13986 readl(phba->HAregaddr); /* flush */ ··· 14047 14063 spin_unlock(&phba->hbalock); 14048 14064 return IRQ_NONE; 14049 14065 } else if (phba->ha_copy & HA_ERATT) { 14050 - if (phba->hba_flag & HBA_ERATT_HANDLED) 14066 + if (test_and_set_bit(HBA_ERATT_HANDLED, &phba->hba_flag)) 14051 14067 /* ERATT polling has handled ERATT */ 14052 14068 phba->ha_copy &= ~HA_ERATT; 14053 - else 14054 - /* Indicate interrupt handler handles ERATT */ 14055 - phba->hba_flag |= HBA_ERATT_HANDLED; 14056 14069 } 14057 14070 14058 14071 /* 14059 14072 * If there is deferred error attention, do not check for any interrupt. 14060 14073 */ 14061 - if (unlikely(phba->hba_flag & DEFER_ERATT)) { 14074 + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { 14062 14075 spin_unlock(&phba->hbalock); 14063 14076 return IRQ_NONE; 14064 14077 } ··· 14126 14145 unsigned long iflags; 14127 14146 14128 14147 /* First, declare the els xri abort event has been handled */ 14129 - spin_lock_irqsave(&phba->hbalock, iflags); 14130 - phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 14131 - spin_unlock_irqrestore(&phba->hbalock, iflags); 14148 + clear_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag); 14132 14149 14133 14150 /* Now, handle all the els xri abort events */ 14134 14151 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); ··· 14252 14273 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 14253 14274 14254 14275 /* Set the async event flag */ 14255 - spin_lock_irqsave(&phba->hbalock, iflags); 14256 - phba->hba_flag |= ASYNC_EVENT; 14257 - spin_unlock_irqrestore(&phba->hbalock, iflags); 14276 + set_bit(ASYNC_EVENT, &phba->hba_flag); 14258 14277 14259 14278 return true; 14260 14279 } ··· 14492 14515 spin_lock_irqsave(&phba->hbalock, iflags); 14493 14516 list_add_tail(&irspiocbq->cq_event.list, 14494 14517 &phba->sli4_hba.sp_queue_event); 14495 - phba->hba_flag |= HBA_SP_QUEUE_EVT; 14496 14518 spin_unlock_irqrestore(&phba->hbalock, iflags); 14519 + set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); 14497 14520 14498 14521 return true; 14499 14522 } ··· 14567 14590 list_add_tail(&cq_event->list, 14568 14591 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 14569 14592 /* Set the els xri abort event flag */ 14570 - phba->hba_flag |= ELS_XRI_ABORT_EVENT; 14593 + set_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag); 14571 14594 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, 14572 14595 iflags); 14573 14596 workposted = true; ··· 14654 14677 /* save off the frame for the work thread to process */ 14655 14678 list_add_tail(&dma_buf->cq_event.list, 14656 14679 &phba->sli4_hba.sp_queue_event); 14657 - /* Frame received */ 14658 - phba->hba_flag |= HBA_SP_QUEUE_EVT; 14659 14680 spin_unlock_irqrestore(&phba->hbalock, iflags); 14681 + /* Frame received */ 14682 + set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); 14660 14683 workposted = true; 14661 14684 break; 14662 14685 case FC_STATUS_INSUFF_BUF_FRM_DISC: ··· 14676 14699 case FC_STATUS_INSUFF_BUF_NEED_BUF: 14677 14700 hrq->RQ_no_posted_buf++; 14678 14701 /* Post more buffers if possible */ 14679 - spin_lock_irqsave(&phba->hbalock, iflags); 14680 - phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 14681 - spin_unlock_irqrestore(&phba->hbalock, iflags); 14702 + set_bit(HBA_POST_RECEIVE_BUFFER, &phba->hba_flag); 14682 14703 workposted = true; 14683 14704 break; 14684 14705 case FC_STATUS_RQ_DMA_FAILURE: ··· 19334 19359 spin_lock_irqsave(&phba->hbalock, iflags); 19335 19360 list_add_tail(&dmabuf->cq_event.list, 19336 19361 &phba->sli4_hba.sp_queue_event); 19337 - phba->hba_flag |= HBA_SP_QUEUE_EVT; 19338 19362 spin_unlock_irqrestore(&phba->hbalock, iflags); 19363 + set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); 19339 19364 lpfc_worker_wake_up(phba); 19340 19365 return; 19341 19366 } ··· 20087 20112 mboxq->vport = phba->pport; 20088 20113 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 20089 20114 20090 - spin_lock_irq(&phba->hbalock); 20091 - phba->hba_flag |= FCF_TS_INPROG; 20092 - spin_unlock_irq(&phba->hbalock); 20115 + set_bit(FCF_TS_INPROG, &phba->hba_flag); 20093 20116 20094 20117 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 20095 20118 if (rc == MBX_NOT_FINISHED) ··· 20103 20130 if (mboxq) 20104 20131 lpfc_sli4_mbox_cmd_free(phba, mboxq); 20105 20132 /* FCF scan failed, clear FCF_TS_INPROG flag */ 20106 - spin_lock_irq(&phba->hbalock); 20107 - phba->hba_flag &= ~FCF_TS_INPROG; 20108 - spin_unlock_irq(&phba->hbalock); 20133 + clear_bit(FCF_TS_INPROG, &phba->hba_flag); 20109 20134 } 20110 20135 return error; 20111 20136 } ··· 20760 20789 20761 20790 /* This HBA contains PORT_STE configured */ 20762 20791 if (!rgn23_data[offset + 2]) 20763 - phba->hba_flag |= LINK_DISABLED; 20792 + set_bit(LINK_DISABLED, &phba->hba_flag); 20764 20793 20765 20794 goto out; 20766 20795 } ··· 22574 22603 u8 cmnd; 22575 22604 u32 *pcmd; 22576 22605 u32 if_type = 0; 22577 - u32 fip, abort_tag; 22606 + u32 abort_tag; 22607 + bool fip; 22578 22608 struct lpfc_nodelist *ndlp = NULL; 22579 22609 union lpfc_wqe128 *wqe = &job->wqe; 22580 22610 u8 command_type = ELS_COMMAND_NON_FIP; 22581 22611 22582 - fip = phba->hba_flag & HBA_FIP_SUPPORT; 22612 + fip = test_bit(HBA_FIP_SUPPORT, &phba->hba_flag); 22583 22613 /* The fcp commands will set command type */ 22584 22614 if (job->cmd_flag & LPFC_IO_FCP) 22585 22615 command_type = FCP_COMMAND;