Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: lpfc: Add an internal trace log buffer

The current logging methods typically end up requesting a reproduction with
a different logging level set to figure out what happened. This was mainly
by design to not clutter the kernel log messages with things that were
typically not interesting and the messages themselves could cause other
issues.

When looking to make a better system, it was seen that in many cases when
more data was wanted was when another message, usually at KERN_ERR level,
was logged. And in most cases, what the additional logging that was then
enabled was typically. Most of these areas fell into the discovery machine.

Based on this summary, the following design has been put in place: The
driver will maintain an internal log (256 elements of 256 bytes). The
"additional logging" messages that are usually enabled in a reproduction
will be changed to now log all the time to the internal log. A new logging
level is defined - LOG_TRACE_EVENT. When this level is set (it is not by
default) and a message marked as KERN_ERR is logged, all the messages in
the internal log will be dumped to the kernel log before the KERN_ERR
message is logged.

There is a timestamp on each message added to the internal log. However,
this timestamp is not converted to wall time when logged. The value of the
timestamp is solely to give a crude time reference for the messages.

Link: https://lore.kernel.org/r/20200630215001.70793-14-jsmart2021@gmail.com
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Dick Kennedy and committed by
Martin K. Petersen
372c187b 317aeb83

+928 -792
+12
drivers/scsi/lpfc/lpfc.h
··· 627 627 enum ras_state state; /* RAS logging running state */ 628 628 }; 629 629 630 + #define DBG_LOG_STR_SZ 256 631 + #define DBG_LOG_SZ 256 632 + 633 + struct dbg_log_ent { 634 + char log[DBG_LOG_STR_SZ]; 635 + u64 t_ns; 636 + }; 637 + 630 638 enum lpfc_irq_chann_mode { 631 639 /* Assign IRQs to all possible cpus that have hardware queues */ 632 640 NORMAL_MODE, ··· 1248 1240 struct scsi_host_template port_template; 1249 1241 /* SCSI host template information - for all vports */ 1250 1242 struct scsi_host_template vport_template; 1243 + atomic_t dbg_log_idx; 1244 + atomic_t dbg_log_cnt; 1245 + atomic_t dbg_log_dmping; 1246 + struct dbg_log_ent dbg_log[DBG_LOG_SZ]; 1251 1247 }; 1252 1248 1253 1249 static inline struct Scsi_Host *
+1 -1
drivers/scsi/lpfc/lpfc_crtn.h
··· 386 386 int lpfc_link_reset(struct lpfc_vport *vport); 387 387 388 388 /* Function prototypes. */ 389 - int lpfc_check_pci_resettable(const struct lpfc_hba *phba); 389 + int lpfc_check_pci_resettable(struct lpfc_hba *phba); 390 390 const char* lpfc_info(struct Scsi_Host *); 391 391 int lpfc_scan_finished(struct Scsi_Host *, unsigned long); 392 392
+8 -8
drivers/scsi/lpfc/lpfc_ct.c
··· 750 750 if (vport->fc_flag & FC_RSCN_MODE) 751 751 lpfc_els_flush_rscn(vport); 752 752 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 753 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 753 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 754 754 "0257 GID_FT Query error: 0x%x 0x%x\n", 755 755 irsp->ulpStatus, vport->fc_ns_retry); 756 756 } else { ··· 811 811 812 812 } else { 813 813 /* NameServer Rsp Error */ 814 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 814 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 815 815 "0241 NameServer Rsp Error " 816 816 "Data: x%x x%x x%x x%x\n", 817 817 CTrsp->CommandResponse.bits.CmdRsp, ··· 951 951 if (vport->fc_flag & FC_RSCN_MODE) 952 952 lpfc_els_flush_rscn(vport); 953 953 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 954 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 954 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 955 955 "4103 GID_FT Query error: 0x%x 0x%x\n", 956 956 irsp->ulpStatus, vport->fc_ns_retry); 957 957 } else { ··· 1012 1012 } 1013 1013 } else { 1014 1014 /* NameServer Rsp Error */ 1015 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1015 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1016 1016 "4109 NameServer Rsp Error " 1017 1017 "Data: x%x x%x x%x x%x\n", 1018 1018 CTrsp->CommandResponse.bits.CmdRsp, ··· 1143 1143 } 1144 1144 } 1145 1145 } 1146 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1146 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1147 1147 "0267 NameServer GFF Rsp " 1148 1148 "x%x Error (%d %d) Data: x%x x%x\n", 1149 1149 did, irsp->ulpStatus, irsp->un.ulpWord[4], ··· 1271 1271 } 1272 1272 } 1273 1273 } else 1274 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1274 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1275 1275 "3065 GFT_ID failed x%08x\n", irsp->ulpStatus); 1276 1276 1277 1277 lpfc_ct_free_iocb(phba, cmdiocb); ··· 1320 1320 irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode); 1321 1321 1322 1322 if (irsp->ulpStatus) { 1323 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1323 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1324 1324 "0268 NS cmd x%x Error (x%x x%x)\n", 1325 1325 cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]); 1326 1326 ··· 1843 1843 ns_cmd_free_mp: 1844 1844 kfree(mp); 1845 1845 ns_cmd_exit: 1846 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1846 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1847 1847 "0266 Issue NameServer Req x%x err %d Data: x%x x%x\n", 1848 1848 cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt); 1849 1849 return 1;
+69 -62
drivers/scsi/lpfc/lpfc_els.c
··· 100 100 return 0; 101 101 102 102 /* Pending Link Event during Discovery */ 103 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 103 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 104 104 "0237 Pending Link Event during " 105 105 "Discovery: State x%x\n", 106 106 phba->pport->port_state); ··· 440 440 441 441 fail: 442 442 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 443 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 444 - "0249 Cannot issue Register Fabric login: Err %d\n", err); 443 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 444 + "0249 Cannot issue Register Fabric login: Err %d\n", 445 + err); 445 446 return -ENXIO; 446 447 } 447 448 ··· 525 524 } 526 525 527 526 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 528 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 529 - "0289 Issue Register VFI failed: Err %d\n", rc); 527 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 528 + "0289 Issue Register VFI failed: Err %d\n", rc); 530 529 return rc; 531 530 } 532 531 ··· 551 550 552 551 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 553 552 if (!mboxq) { 554 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 553 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 555 554 "2556 UNREG_VFI mbox allocation failed" 556 555 "HBA state x%x\n", phba->pport->port_state); 557 556 return -ENOMEM; ··· 563 562 564 563 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 565 564 if (rc == MBX_NOT_FINISHED) { 566 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 565 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 567 566 "2557 UNREG_VFI issue mbox failed rc x%x " 568 567 "HBA state x%x\n", 569 568 rc, phba->pport->port_state); ··· 1042 1041 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1043 1042 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1044 1043 IOERR_LOOP_OPEN_FAILURE))) 1045 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1046 - "2858 FLOGI failure Status:x%x/x%x " 1047 - "TMO:x%x Data x%x x%x\n", 1048 - irsp->ulpStatus, irsp->un.ulpWord[4], 1049 - irsp->ulpTimeout, phba->hba_flag, 1050 - phba->fcf.fcf_flag); 1044 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1045 + "2858 FLOGI failure Status:x%x/x%x TMO" 1046 + ":x%x Data x%x x%x\n", 1047 + irsp->ulpStatus, irsp->un.ulpWord[4], 1048 + irsp->ulpTimeout, phba->hba_flag, 1049 + phba->fcf.fcf_flag); 1051 1050 1052 1051 /* Check for retry */ 1053 1052 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1054 1053 goto out; 1055 1054 1056 - lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 1055 + lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1057 1056 "0150 FLOGI failure Status:x%x/x%x " 1058 1057 "xri x%x TMO:x%x\n", 1059 1058 irsp->ulpStatus, irsp->un.ulpWord[4], ··· 1133 1132 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1134 1133 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1135 1134 else { 1136 - lpfc_printf_vlog(vport, KERN_ERR, 1137 - LOG_FIP | LOG_ELS, 1135 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1138 1136 "2831 FLOGI response with cleared Fabric " 1139 1137 "bit fcf_index 0x%x " 1140 1138 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " ··· 1934 1934 1935 1935 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1936 1936 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) { 1937 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1937 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1938 1938 "2882 RRQ completes to NPort x%x " 1939 1939 "with no ndlp. Data: x%x x%x x%x\n", 1940 1940 irsp->un.elsreq64.remoteID, ··· 1957 1957 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1958 1958 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1959 1959 (phba)->pport->cfg_log_verbose & LOG_ELS) 1960 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1961 - "2881 RRQ failure DID:%06X Status:x%x/x%x\n", 1962 - ndlp->nlp_DID, irsp->ulpStatus, 1963 - irsp->un.ulpWord[4]); 1960 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1961 + "2881 RRQ failure DID:%06X Status:" 1962 + "x%x/x%x\n", 1963 + ndlp->nlp_DID, irsp->ulpStatus, 1964 + irsp->un.ulpWord[4]); 1964 1965 } 1965 1966 out: 1966 1967 if (rrq) ··· 2011 2010 2012 2011 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 2013 2012 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 2014 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2013 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2015 2014 "0136 PLOGI completes to NPort x%x " 2016 2015 "with no ndlp. Data: x%x x%x x%x\n", 2017 2016 irsp->un.elsreq64.remoteID, ··· 2060 2059 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 2061 2060 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 2062 2061 (phba)->pport->cfg_log_verbose & LOG_ELS) 2063 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2062 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2064 2063 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 2065 2064 ndlp->nlp_DID, irsp->ulpStatus, 2066 2065 irsp->un.ulpWord[4]); ··· 2238 2237 IOCB_t *irsp; 2239 2238 struct lpfc_nodelist *ndlp; 2240 2239 char *mode; 2240 + u32 loglevel; 2241 2241 2242 2242 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2243 2243 cmdiocb->context_un.rsp_iocb = rspiocb; ··· 2280 2278 * could be expected. 2281 2279 */ 2282 2280 if ((vport->fc_flag & FC_FABRIC) || 2283 - (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) 2281 + (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2284 2282 mode = KERN_ERR; 2285 - else 2283 + loglevel = LOG_TRACE_EVENT; 2284 + } else { 2286 2285 mode = KERN_INFO; 2286 + loglevel = LOG_ELS; 2287 + } 2287 2288 2288 2289 /* PRLI failed */ 2289 - lpfc_printf_vlog(vport, mode, LOG_ELS, 2290 + lpfc_printf_vlog(vport, mode, loglevel, 2290 2291 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2291 2292 "data: x%x\n", 2292 2293 ndlp->nlp_DID, irsp->ulpStatus, ··· 2700 2695 goto out; 2701 2696 } 2702 2697 /* ADISC failed */ 2703 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2698 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2704 2699 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2705 2700 ndlp->nlp_DID, irsp->ulpStatus, 2706 2701 irsp->un.ulpWord[4]); ··· 2858 2853 */ 2859 2854 if (irsp->ulpStatus) { 2860 2855 /* LOGO failed */ 2861 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2856 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2862 2857 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n", 2863 2858 ndlp->nlp_DID, irsp->ulpStatus, 2864 2859 irsp->un.ulpWord[4]); ··· 3739 3734 "2851 Attempt link reset\n"); 3740 3735 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3741 3736 if (!mbox) { 3742 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 3737 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3743 3738 "2852 Failed to allocate mbox memory"); 3744 3739 return 1; 3745 3740 } ··· 3761 3756 mbox->vport = vport; 3762 3757 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3763 3758 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 3764 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 3759 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3765 3760 "2853 Failed to issue INIT_LINK " 3766 3761 "mbox command, rc:x%x\n", rc); 3767 3762 mempool_free(mbox, phba->mbox_mem_pool); ··· 3865 3860 break; 3866 3861 3867 3862 case IOERR_ILLEGAL_COMMAND: 3868 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3863 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3869 3864 "0124 Retry illegal cmd x%x " 3870 3865 "retry:x%x delay:x%x\n", 3871 3866 cmd, cmdiocb->retry, delay); ··· 3975 3970 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3976 3971 (cmd == ELS_CMD_FDISC) && 3977 3972 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 3978 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3973 + lpfc_printf_vlog(vport, KERN_ERR, 3974 + LOG_TRACE_EVENT, 3979 3975 "0125 FDISC Failed (x%x). " 3980 3976 "Fabric out of resources\n", 3981 3977 stat.un.lsRjtError); ··· 4015 4009 LSEXP_NOTHING_MORE) { 4016 4010 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4017 4011 retry = 1; 4018 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4012 + lpfc_printf_vlog(vport, KERN_ERR, 4013 + LOG_TRACE_EVENT, 4019 4014 "0820 FLOGI Failed (x%x). " 4020 4015 "BBCredit Not Supported\n", 4021 4016 stat.un.lsRjtError); ··· 4029 4022 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4030 4023 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4031 4024 ) { 4032 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4025 + lpfc_printf_vlog(vport, KERN_ERR, 4026 + LOG_TRACE_EVENT, 4033 4027 "0122 FDISC Failed (x%x). " 4034 4028 "Fabric Detected Bad WWN\n", 4035 4029 stat.un.lsRjtError); ··· 4208 4200 } 4209 4201 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 4210 4202 if (logerr) { 4211 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4203 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4212 4204 "0137 No retry ELS command x%x to remote " 4213 4205 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 4214 4206 cmd, did, irsp->ulpStatus, ··· 4507 4499 irsp = &rspiocb->iocb; 4508 4500 4509 4501 if (!vport) { 4510 - lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 4502 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4511 4503 "3177 ELS response failed\n"); 4512 4504 goto out; 4513 4505 } ··· 4613 4605 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 4614 4606 4615 4607 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 4616 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4608 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4617 4609 "0138 ELS rsp: Cannot issue reg_login for x%x " 4618 4610 "Data: x%x x%x x%x\n", 4619 4611 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ··· 6419 6411 lcb_context->rx_id = cmdiocb->iocb.ulpContext; 6420 6412 lcb_context->ndlp = lpfc_nlp_get(ndlp); 6421 6413 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 6422 - lpfc_printf_vlog(ndlp->vport, KERN_ERR, 6423 - LOG_ELS, "0193 failed to send mail box"); 6414 + lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 6415 + "0193 failed to send mail box"); 6424 6416 kfree(lcb_context); 6425 6417 lpfc_nlp_put(ndlp); 6426 6418 rjt_err = LSRJT_UNABLE_TPC; ··· 6629 6621 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 6630 6622 payload_len, GFP_KERNEL); 6631 6623 if (!rscn_event_data) { 6632 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6624 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6633 6625 "0147 Failed to allocate memory for RSCN event\n"); 6634 6626 return; 6635 6627 } ··· 7006 6998 7007 6999 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 7008 7000 Loop Mode */ 7009 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7001 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7010 7002 "0113 An FLOGI ELS command x%x was " 7011 7003 "received from DID x%x in Loop Mode\n", 7012 7004 cmd, did); ··· 7996 7988 7997 7989 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 7998 7990 cmd = &piocb->iocb; 7999 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7991 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8000 7992 "0127 ELS timeout Data: x%x x%x x%x " 8001 7993 "x%x\n", els_command, 8002 7994 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); ··· 8106 8098 spin_unlock_irqrestore(&phba->hbalock, iflags); 8107 8099 } 8108 8100 if (!list_empty(&abort_list)) 8109 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8101 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8110 8102 "3387 abort list for txq not empty\n"); 8111 8103 INIT_LIST_HEAD(&abort_list); 8112 8104 ··· 8277 8269 if (*payload == ELS_CMD_LOGO) { 8278 8270 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 8279 8271 if (!logo_data) { 8280 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8272 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8281 8273 "0148 Failed to allocate memory " 8282 8274 "for LOGO event\n"); 8283 8275 return; ··· 8287 8279 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 8288 8280 GFP_KERNEL); 8289 8281 if (!els_data) { 8290 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8282 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8291 8283 "0149 Failed to allocate memory " 8292 8284 "for ELS event\n"); 8293 8285 return; ··· 8404 8396 break; 8405 8397 default: 8406 8398 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 8407 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8399 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8408 8400 "4678 skipped FPIN descriptor[%d]: " 8409 8401 "tag x%x (%s)\n", 8410 8402 desc_cnt, dtag, dtag_nm); ··· 8819 8811 rjt_exp = LSEXP_NOTHING_MORE; 8820 8812 8821 8813 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 8822 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8814 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8823 8815 "0115 Unknown ELS command x%x " 8824 8816 "received from NPORT x%x\n", cmd, did); 8825 8817 if (newnode) ··· 8864 8856 8865 8857 dropit: 8866 8858 if (vport && !(vport->load_flag & FC_UNLOADING)) 8867 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8859 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8868 8860 "0111 Dropping received ELS cmd " 8869 8861 "Data: x%x x%x x%x\n", 8870 8862 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); ··· 9014 9006 spin_lock_irq(shost->host_lock); 9015 9007 if (vport->fc_flag & FC_DISC_DELAYED) { 9016 9008 spin_unlock_irq(shost->host_lock); 9017 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 9009 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9018 9010 "3334 Delay fc port discovery for %d seconds\n", 9019 9011 phba->fc_ratov); 9020 9012 mod_timer(&vport->delayed_disc_tmo, ··· 9032 9024 return; 9033 9025 } 9034 9026 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9035 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 9027 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9036 9028 "0251 NameServer login: no memory\n"); 9037 9029 return; 9038 9030 } ··· 9044 9036 return; 9045 9037 } 9046 9038 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9047 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 9039 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9048 9040 "0348 NameServer login: node freed\n"); 9049 9041 return; 9050 9042 } ··· 9055 9047 9056 9048 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 9057 9049 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9058 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 9050 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9059 9051 "0252 Cannot issue NameServer login\n"); 9060 9052 return; 9061 9053 } ··· 9092 9084 spin_unlock_irq(shost->host_lock); 9093 9085 9094 9086 if (mb->mbxStatus) { 9095 - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 9087 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9096 9088 "0915 Register VPI failed : Status: x%x" 9097 9089 " upd bit: x%x \n", mb->mbxStatus, 9098 9090 mb->un.varRegVpi.upd); ··· 9122 9114 rc = lpfc_sli_issue_mbox(phba, pmb, 9123 9115 MBX_NOWAIT); 9124 9116 if (rc == MBX_NOT_FINISHED) { 9125 - lpfc_printf_vlog(vport, 9126 - KERN_ERR, LOG_MBOX, 9117 + lpfc_printf_vlog(vport, KERN_ERR, 9118 + LOG_TRACE_EVENT, 9127 9119 "2732 Failed to issue INIT_VPI" 9128 9120 " mailbox command\n"); 9129 9121 } else { ··· 9211 9203 lpfc_nlp_put(ndlp); 9212 9204 mempool_free(mbox, phba->mbox_mem_pool); 9213 9205 9214 - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 9206 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9215 9207 "0253 Register VPI: Can't send mbox\n"); 9216 9208 goto mbox_err_exit; 9217 9209 } 9218 9210 } else { 9219 - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 9211 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9220 9212 "0254 Register VPI: no memory\n"); 9221 9213 goto mbox_err_exit; 9222 9214 } ··· 9378 9370 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 9379 9371 goto out; 9380 9372 /* FDISC failed */ 9381 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 9373 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9382 9374 "0126 FDISC failed. (x%x/x%x)\n", 9383 9375 irsp->ulpStatus, irsp->un.ulpWord[4]); 9384 9376 goto fdisc_failed; ··· 9500 9492 ELS_CMD_FDISC); 9501 9493 if (!elsiocb) { 9502 9494 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9503 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 9495 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9504 9496 "0255 Issue FDISC: no IOCB\n"); 9505 9497 return 1; 9506 9498 } ··· 9554 9546 if (rc == IOCB_ERROR) { 9555 9547 lpfc_els_free_iocb(phba, elsiocb); 9556 9548 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9557 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 9549 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9558 9550 "0256 Issue FDISC: Cannot send IOCB\n"); 9559 9551 return 1; 9560 9552 } ··· 10135 10127 "rport in state 0x%x\n", ndlp->nlp_state); 10136 10128 return; 10137 10129 } 10138 - lpfc_printf_log(phba, KERN_ERR, 10139 - LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR, 10130 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10140 10131 "3094 Start rport recovery on shost id 0x%x " 10141 10132 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 10142 10133 "flags 0x%x\n",
+114 -102
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 155 155 return; 156 156 157 157 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) 158 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 159 - "6789 rport name %llx != node port name %llx", 160 - rport->port_name, 161 - wwn_to_u64(ndlp->nlp_portname.u.wwn)); 158 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 159 + "6789 rport name %llx != node port name %llx", 160 + rport->port_name, 161 + wwn_to_u64(ndlp->nlp_portname.u.wwn)); 162 162 163 163 evtp = &ndlp->dev_loss_evt; 164 164 165 165 if (!list_empty(&evtp->evt_listp)) { 166 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 167 - "6790 rport name %llx dev_loss_evt pending", 168 - rport->port_name); 166 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 167 + "6790 rport name %llx dev_loss_evt pending", 168 + rport->port_name); 169 169 return; 170 170 } 171 171 ··· 295 295 } 296 296 297 297 if (warn_on) { 298 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 298 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 299 299 "0203 Devloss timeout on " 300 300 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 301 301 "NPort x%06x Data: x%x x%x x%x\n", ··· 304 304 ndlp->nlp_DID, ndlp->nlp_flag, 305 305 ndlp->nlp_state, ndlp->nlp_rpi); 306 306 } else { 307 - lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 307 + lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, 308 308 "0204 Devloss timeout on " 309 309 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 310 310 "NPort x%06x Data: x%x x%x x%x\n", ··· 755 755 || kthread_should_stop())); 756 756 /* Signal wakeup shall terminate the worker thread */ 757 757 if (rc) { 758 - lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 758 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 759 759 "0433 Wakeup on signal: rc=x%x\n", rc); 760 760 break; 761 761 } ··· 1092 1092 /* Check for error */ 1093 1093 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { 1094 1094 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ 1095 - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1095 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1096 1096 "0320 CLEAR_LA mbxStatus error x%x hba " 1097 1097 "state x%x\n", 1098 1098 mb->mbxStatus, vport->port_state); ··· 1180 1180 return; 1181 1181 1182 1182 out: 1183 - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1183 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1184 1184 "0306 CONFIG_LINK mbxStatus error x%x " 1185 1185 "HBA state x%x\n", 1186 1186 pmb->u.mb.mbxStatus, vport->port_state); ··· 1188 1188 1189 1189 lpfc_linkdown(phba); 1190 1190 1191 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1191 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1192 1192 "0200 CONFIG_LINK bad hba state x%x\n", 1193 1193 vport->port_state); 1194 1194 ··· 1224 1224 struct lpfc_vport *vport = mboxq->vport; 1225 1225 1226 1226 if (mboxq->u.mb.mbxStatus) { 1227 - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1228 - "2017 REG_FCFI mbxStatus error x%x " 1229 - "HBA state x%x\n", 1230 - mboxq->u.mb.mbxStatus, vport->port_state); 1227 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1228 + "2017 REG_FCFI mbxStatus error x%x " 1229 + "HBA state x%x\n", mboxq->u.mb.mbxStatus, 1230 + vport->port_state); 1231 1231 goto fail_out; 1232 1232 } 1233 1233 ··· 1848 1848 */ 1849 1849 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 1850 1850 if (unlikely(!mboxq->sge_array)) { 1851 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1851 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1852 1852 "2524 Failed to get the non-embedded SGE " 1853 1853 "virtual address\n"); 1854 1854 return NULL; ··· 1864 1864 if (shdr_status || shdr_add_status) { 1865 1865 if (shdr_status == STATUS_FCF_TABLE_EMPTY || 1866 1866 if_type == LPFC_SLI_INTF_IF_TYPE_2) 1867 - lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 1867 + lpfc_printf_log(phba, KERN_ERR, 1868 + LOG_TRACE_EVENT, 1868 1869 "2726 READ_FCF_RECORD Indicates empty " 1869 1870 "FCF table.\n"); 1870 1871 else 1871 - lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 1872 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1872 1873 "2521 READ_FCF_RECORD mailbox failed " 1873 1874 "with status x%x add_status x%x, " 1874 1875 "mbx\n", shdr_status, shdr_add_status); ··· 2247 2246 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 2248 2247 &next_fcf_index); 2249 2248 if (!new_fcf_record) { 2250 - lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 2249 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2251 2250 "2765 Mailbox command READ_FCF_RECORD " 2252 2251 "failed to retrieve a FCF record.\n"); 2253 2252 /* Let next new FCF event trigger fast failover */ ··· 2291 2290 new_fcf_record, LPFC_FCOE_IGNORE_VID)) { 2292 2291 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) != 2293 2292 phba->fcf.current_rec.fcf_indx) { 2294 - lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 2293 + lpfc_printf_log(phba, KERN_ERR, 2294 + LOG_TRACE_EVENT, 2295 2295 "2862 FCF (x%x) matches property " 2296 2296 "of in-use FCF (x%x)\n", 2297 2297 bf_get(lpfc_fcf_record_fcf_index, ··· 2362 2360 phba->pport->fc_flag); 2363 2361 goto out; 2364 2362 } else 2365 - lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 2363 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2366 2364 "2863 New FCF (x%x) matches " 2367 2365 "property of in-use FCF (x%x)\n", 2368 2366 bf_get(lpfc_fcf_record_fcf_index, ··· 2776 2774 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2777 2775 LPFC_SLI_INTF_IF_TYPE_0) && 2778 2776 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { 2779 - lpfc_printf_vlog(vport, KERN_ERR, 2780 - LOG_MBOX, 2781 - "2891 Init VFI mailbox failed 0x%x\n", 2782 - mboxq->u.mb.mbxStatus); 2777 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2778 + "2891 Init VFI mailbox failed 0x%x\n", 2779 + mboxq->u.mb.mbxStatus); 2783 2780 mempool_free(mboxq, phba->mbox_mem_pool); 2784 2781 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2785 2782 return; ··· 2806 2805 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2807 2806 if (!mboxq) { 2808 2807 lpfc_printf_vlog(vport, KERN_ERR, 2809 - LOG_MBOX, "2892 Failed to allocate " 2808 + LOG_TRACE_EVENT, "2892 Failed to allocate " 2810 2809 "init_vfi mailbox\n"); 2811 2810 return; 2812 2811 } ··· 2814 2813 mboxq->mbox_cmpl = lpfc_init_vfi_cmpl; 2815 2814 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 2816 2815 if (rc == MBX_NOT_FINISHED) { 2817 - lpfc_printf_vlog(vport, KERN_ERR, 2818 - LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n"); 2816 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2817 + "2893 Failed to issue init_vfi mailbox\n"); 2819 2818 mempool_free(mboxq, vport->phba->mbox_mem_pool); 2820 2819 } 2821 2820 } ··· 2835 2834 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2836 2835 2837 2836 if (mboxq->u.mb.mbxStatus) { 2838 - lpfc_printf_vlog(vport, KERN_ERR, 2839 - LOG_MBOX, 2840 - "2609 Init VPI mailbox failed 0x%x\n", 2841 - mboxq->u.mb.mbxStatus); 2837 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2838 + "2609 Init VPI mailbox failed 0x%x\n", 2839 + mboxq->u.mb.mbxStatus); 2842 2840 mempool_free(mboxq, phba->mbox_mem_pool); 2843 2841 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2844 2842 return; ··· 2851 2851 ndlp = lpfc_findnode_did(vport, Fabric_DID); 2852 2852 if (!ndlp) 2853 2853 lpfc_printf_vlog(vport, KERN_ERR, 2854 - LOG_DISCOVERY, 2854 + LOG_TRACE_EVENT, 2855 2855 "2731 Cannot find fabric " 2856 2856 "controller node\n"); 2857 2857 else ··· 2864 2864 lpfc_initial_fdisc(vport); 2865 2865 else { 2866 2866 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); 2867 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2867 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2868 2868 "2606 No NPIV Fabric support\n"); 2869 2869 } 2870 2870 mempool_free(mboxq, phba->mbox_mem_pool); ··· 2887 2887 if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { 2888 2888 vpi = lpfc_alloc_vpi(vport->phba); 2889 2889 if (!vpi) { 2890 - lpfc_printf_vlog(vport, KERN_ERR, 2891 - LOG_MBOX, 2890 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2892 2891 "3303 Failed to obtain vport vpi\n"); 2893 2892 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2894 2893 return; ··· 2898 2899 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); 2899 2900 if (!mboxq) { 2900 2901 lpfc_printf_vlog(vport, KERN_ERR, 2901 - LOG_MBOX, "2607 Failed to allocate " 2902 + LOG_TRACE_EVENT, "2607 Failed to allocate " 2902 2903 "init_vpi mailbox\n"); 2903 2904 return; 2904 2905 } ··· 2907 2908 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; 2908 2909 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT); 2909 2910 if (rc == MBX_NOT_FINISHED) { 2910 - lpfc_printf_vlog(vport, KERN_ERR, 2911 - LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n"); 2911 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2912 + "2608 Failed to issue init_vpi mailbox\n"); 2912 2913 mempool_free(mboxq, vport->phba->mbox_mem_pool); 2913 2914 } 2914 2915 } ··· 2952 2953 lpfc_vport_set_state(vports[i], 2953 2954 FC_VPORT_NO_FABRIC_SUPP); 2954 2955 lpfc_printf_vlog(vports[i], KERN_ERR, 2955 - LOG_ELS, 2956 + LOG_TRACE_EVENT, 2956 2957 "0259 No NPIV " 2957 2958 "Fabric support\n"); 2958 2959 } ··· 2976 2977 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2977 2978 LPFC_SLI_INTF_IF_TYPE_0) && 2978 2979 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { 2979 - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 2980 - "2018 REG_VFI mbxStatus error x%x " 2981 - "HBA state x%x\n", 2982 - mboxq->u.mb.mbxStatus, vport->port_state); 2980 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2981 + "2018 REG_VFI mbxStatus error x%x " 2982 + "HBA state x%x\n", 2983 + mboxq->u.mb.mbxStatus, vport->port_state); 2983 2984 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 2984 2985 /* FLOGI failed, use loop map to make discovery list */ 2985 2986 lpfc_disc_list_loopmap(vport); ··· 3066 3067 /* Check for error */ 3067 3068 if (mb->mbxStatus) { 3068 3069 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ 3069 - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 3070 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3070 3071 "0319 READ_SPARAM mbxStatus error x%x " 3071 3072 "hba state x%x>\n", 3072 3073 mb->mbxStatus, vport->port_state); ··· 3285 3286 GFP_KERNEL); 3286 3287 if (unlikely(!fcf_record)) { 3287 3288 lpfc_printf_log(phba, KERN_ERR, 3288 - LOG_MBOX | LOG_SLI, 3289 + LOG_TRACE_EVENT, 3289 3290 "2554 Could not allocate memory for " 3290 3291 "fcf record\n"); 3291 3292 rc = -ENODEV; ··· 3297 3298 rc = lpfc_sli4_add_fcf_record(phba, fcf_record); 3298 3299 if (unlikely(rc)) { 3299 3300 lpfc_printf_log(phba, KERN_ERR, 3300 - LOG_MBOX | LOG_SLI, 3301 + LOG_TRACE_EVENT, 3301 3302 "2013 Could not manually add FCF " 3302 3303 "record 0, status %d\n", rc); 3303 3304 rc = -ENODEV; ··· 3343 3344 return; 3344 3345 out: 3345 3346 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3346 - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 3347 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3347 3348 "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n", 3348 3349 vport->port_state, sparam_mbox, cfglink_mbox); 3349 3350 lpfc_issue_clear_la(phba, vport); ··· 3616 3617 break; 3617 3618 /* If VPI is busy, reset the HBA */ 3618 3619 case 0x9700: 3619 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3620 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3620 3621 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n", 3621 3622 vport->vpi, mb->mbxStatus); 3622 3623 if (!(phba->pport->load_flag & FC_UNLOADING)) ··· 3654 3655 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; 3655 3656 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3656 3657 if (rc == MBX_NOT_FINISHED) { 3657 - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 3658 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3658 3659 "1800 Could not issue unreg_vpi\n"); 3659 3660 mempool_free(mbox, phba->mbox_mem_pool); 3660 3661 vport->unreg_vpi_cmpl = VPORT_ERROR; ··· 3741 3742 3742 3743 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3743 3744 if (!pmb) { 3744 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3745 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3745 3746 "0542 lpfc_create_static_vport failed to" 3746 3747 " allocate mailbox memory\n"); 3747 3748 return; ··· 3751 3752 3752 3753 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); 3753 3754 if (!vport_info) { 3754 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3755 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3755 3756 "0543 lpfc_create_static_vport failed to" 3756 3757 " allocate vport_info\n"); 3757 3758 mempool_free(pmb, phba->mbox_mem_pool); ··· 3812 3813 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || 3813 3814 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) 3814 3815 != VPORT_INFO_REV)) { 3815 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3816 - "0545 lpfc_create_static_vport bad" 3817 - " information header 0x%x 0x%x\n", 3818 - le32_to_cpu(vport_info->signature), 3819 - le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK); 3816 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3817 + "0545 lpfc_create_static_vport bad" 3818 + " information header 0x%x 0x%x\n", 3819 + le32_to_cpu(vport_info->signature), 3820 + le32_to_cpu(vport_info->rev) & 3821 + VPORT_INFO_REV_MASK); 3820 3822 3821 3823 goto out; 3822 3824 } ··· 3881 3881 pmb->ctx_buf = NULL; 3882 3882 3883 3883 if (mb->mbxStatus) { 3884 - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 3884 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3885 3885 "0258 Register Fabric login error: 0x%x\n", 3886 3886 mb->mbxStatus); 3887 3887 lpfc_mbuf_free(phba, mp->virt, mp->phys); ··· 3954 3954 /* Cannot issue NameServer FCP Query, so finish up 3955 3955 * discovery 3956 3956 */ 3957 - lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 3957 + lpfc_printf_vlog(vport, KERN_ERR, 3958 + LOG_TRACE_EVENT, 3958 3959 "0604 %s FC TYPE %x %s\n", 3959 3960 "Failed to issue GID_FT to ", 3960 3961 FC_TYPE_FCP, ··· 3971 3970 /* Cannot issue NameServer NVME Query, so finish up 3972 3971 * discovery 3973 3972 */ 3974 - lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 3973 + lpfc_printf_vlog(vport, KERN_ERR, 3974 + LOG_TRACE_EVENT, 3975 3975 "0605 %s FC_TYPE %x %s %d\n", 3976 3976 "Failed to issue GID_FT to ", 3977 3977 FC_TYPE_NVME, ··· 4004 4002 /* Cannot issue NameServer FCP Query, so finish up 4005 4003 * discovery 4006 4004 */ 4007 - lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 4005 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4008 4006 "0606 %s Port TYPE %x %s\n", 4009 4007 "Failed to issue GID_PT to ", 4010 4008 GID_PT_N_PORT, ··· 4034 4032 vport->gidft_inp = 0; 4035 4033 4036 4034 if (mb->mbxStatus) { 4037 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4035 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4038 4036 "0260 Register NameServer error: 0x%x\n", 4039 4037 mb->mbxStatus); 4040 4038 ··· 4346 4344 GFP_KERNEL); 4347 4345 4348 4346 if (!ndlp->lat_data) 4349 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 4347 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4350 4348 "0286 lpfc_nlp_state_cleanup failed to " 4351 4349 "allocate statistical data buffer DID " 4352 4350 "0x%x\n", ndlp->nlp_DID); ··· 5015 5013 5016 5014 vports = lpfc_create_vport_work_array(phba); 5017 5015 if (!vports) { 5018 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 5019 - "2884 Vport array allocation failed \n"); 5016 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5017 + "2884 Vport array allocation failed \n"); 5020 5018 return; 5021 5019 } 5022 5020 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { ··· 5059 5057 mempool_free(mbox, phba->mbox_mem_pool); 5060 5058 5061 5059 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) 5062 - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 5063 - "1836 Could not issue " 5064 - "unreg_login(all_rpis) status %d\n", rc); 5060 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5061 + "1836 Could not issue " 5062 + "unreg_login(all_rpis) status %d\n", 5063 + rc); 5065 5064 } 5066 5065 } 5067 5066 ··· 5089 5086 mempool_free(mbox, phba->mbox_mem_pool); 5090 5087 5091 5088 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) 5092 - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 5089 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5093 5090 "1815 Could not issue " 5094 5091 "unreg_did (default rpis) status %d\n", 5095 5092 rc); ··· 5910 5907 case LPFC_FLOGI: 5911 5908 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ 5912 5909 /* Initial FLOGI timeout */ 5913 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5910 + lpfc_printf_vlog(vport, KERN_ERR, 5911 + LOG_TRACE_EVENT, 5914 5912 "0222 Initial %s timeout\n", 5915 5913 vport->vpi ? "FDISC" : "FLOGI"); 5916 5914 ··· 5929 5925 case LPFC_FABRIC_CFG_LINK: 5930 5926 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for 5931 5927 NameServer login */ 5932 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5928 + lpfc_printf_vlog(vport, KERN_ERR, 5929 + LOG_TRACE_EVENT, 5933 5930 "0223 Timeout while waiting for " 5934 5931 "NameServer login\n"); 5935 5932 /* Next look for NameServer ndlp */ ··· 5943 5938 5944 5939 case LPFC_NS_QRY: 5945 5940 /* Check for wait for NameServer Rsp timeout */ 5946 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5941 + lpfc_printf_vlog(vport, KERN_ERR, 5942 + LOG_TRACE_EVENT, 5947 5943 "0224 NameServer Query timeout " 5948 5944 "Data: x%x x%x\n", 5949 5945 vport->fc_ns_retry, LPFC_MAX_NS_RETRY); ··· 5977 5971 /* Setup and issue mailbox INITIALIZE LINK command */ 5978 5972 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5979 5973 if (!initlinkmbox) { 5980 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5974 + lpfc_printf_vlog(vport, KERN_ERR, 5975 + LOG_TRACE_EVENT, 5981 5976 "0206 Device Discovery " 5982 5977 "completion error\n"); 5983 5978 phba->link_state = LPFC_HBA_ERROR; ··· 6000 5993 6001 5994 case LPFC_DISC_AUTH: 6002 5995 /* Node Authentication timeout */ 6003 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5996 + lpfc_printf_vlog(vport, KERN_ERR, 5997 + LOG_TRACE_EVENT, 6004 5998 "0227 Node Authentication timeout\n"); 6005 5999 lpfc_disc_flush_list(vport); 6006 6000 ··· 6021 6013 6022 6014 case LPFC_VPORT_READY: 6023 6015 if (vport->fc_flag & FC_RSCN_MODE) { 6024 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 6016 + lpfc_printf_vlog(vport, KERN_ERR, 6017 + LOG_TRACE_EVENT, 6025 6018 "0231 RSCN timeout Data: x%x " 6026 6019 "x%x\n", 6027 6020 vport->fc_ns_retry, LPFC_MAX_NS_RETRY); ··· 6036 6027 break; 6037 6028 6038 6029 default: 6039 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 6030 + lpfc_printf_vlog(vport, KERN_ERR, 6031 + LOG_TRACE_EVENT, 6040 6032 "0273 Unexpected discovery timeout, " 6041 6033 "vport State x%x\n", vport->port_state); 6042 6034 break; ··· 6046 6036 switch (phba->link_state) { 6047 6037 case LPFC_CLEAR_LA: 6048 6038 /* CLEAR LA timeout */ 6049 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 6039 + lpfc_printf_vlog(vport, KERN_ERR, 6040 + LOG_TRACE_EVENT, 6050 6041 "0228 CLEAR LA timeout\n"); 6051 6042 clrlaerr = 1; 6052 6043 break; ··· 6061 6050 case LPFC_INIT_MBX_CMDS: 6062 6051 case LPFC_LINK_DOWN: 6063 6052 case LPFC_HBA_ERROR: 6064 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 6053 + lpfc_printf_vlog(vport, KERN_ERR, 6054 + LOG_TRACE_EVENT, 6065 6055 "0230 Unexpected timeout, hba link " 6066 6056 "state x%x\n", phba->link_state); 6067 6057 clrlaerr = 1; ··· 6253 6241 } 6254 6242 6255 6243 if (i >= phba->max_vpi) { 6256 - lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 6257 - "2936 Could not find Vport mapped " 6258 - "to vpi %d\n", vpi); 6244 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6245 + "2936 Could not find Vport mapped " 6246 + "to vpi %d\n", vpi); 6259 6247 return NULL; 6260 6248 } 6261 6249 } ··· 6559 6547 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6560 6548 6561 6549 if (mboxq->u.mb.mbxStatus) { 6562 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 6563 - "2555 UNREG_VFI mbxStatus error x%x " 6564 - "HBA state x%x\n", 6565 - mboxq->u.mb.mbxStatus, vport->port_state); 6550 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6551 + "2555 UNREG_VFI mbxStatus error x%x " 6552 + "HBA state x%x\n", 6553 + mboxq->u.mb.mbxStatus, vport->port_state); 6566 6554 } 6567 6555 spin_lock_irq(shost->host_lock); 6568 6556 phba->pport->fc_flag &= ~FC_VFI_REGISTERED; ··· 6584 6572 struct lpfc_vport *vport = mboxq->vport; 6585 6573 6586 6574 if (mboxq->u.mb.mbxStatus) { 6587 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 6588 - "2550 UNREG_FCFI mbxStatus error x%x " 6589 - "HBA state x%x\n", 6590 - mboxq->u.mb.mbxStatus, vport->port_state); 6575 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6576 + "2550 UNREG_FCFI mbxStatus error x%x " 6577 + "HBA state x%x\n", 6578 + mboxq->u.mb.mbxStatus, vport->port_state); 6591 6579 } 6592 6580 mempool_free(mboxq, phba->mbox_mem_pool); 6593 6581 return; ··· 6676 6664 6677 6665 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6678 6666 if (!mbox) { 6679 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 6667 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6680 6668 "2551 UNREG_FCFI mbox allocation failed" 6681 6669 "HBA state x%x\n", phba->pport->port_state); 6682 6670 return -ENOMEM; ··· 6687 6675 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6688 6676 6689 6677 if (rc == MBX_NOT_FINISHED) { 6690 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6678 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6691 6679 "2552 Unregister FCFI command failed rc x%x " 6692 6680 "HBA state x%x\n", 6693 6681 rc, phba->pport->port_state); ··· 6711 6699 /* Preparation for unregistering fcf */ 6712 6700 rc = lpfc_unregister_fcf_prep(phba); 6713 6701 if (rc) { 6714 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 6702 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6715 6703 "2748 Failed to prepare for unregistering " 6716 6704 "HBA's FCF record: rc=%d\n", rc); 6717 6705 return; ··· 6747 6735 spin_lock_irq(&phba->hbalock); 6748 6736 phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 6749 6737 spin_unlock_irq(&phba->hbalock); 6750 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 6738 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6751 6739 "2553 lpfc_unregister_unused_fcf failed " 6752 6740 "to read FCF record HBA state x%x\n", 6753 6741 phba->pport->port_state); ··· 6769 6757 /* Preparation for unregistering fcf */ 6770 6758 rc = lpfc_unregister_fcf_prep(phba); 6771 6759 if (rc) { 6772 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 6760 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6773 6761 "2749 Failed to prepare for unregistering " 6774 6762 "HBA's FCF record: rc=%d\n", rc); 6775 6763 return; ··· 6856 6844 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry), 6857 6845 GFP_KERNEL); 6858 6846 if (!conn_entry) { 6859 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6860 - "2566 Failed to allocate connection" 6861 - " table entry\n"); 6847 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6848 + "2566 Failed to allocate connection" 6849 + " table entry\n"); 6862 6850 return; 6863 6851 } 6864 6852 ··· 7002 6990 7003 6991 /* Check the region signature first */ 7004 6992 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) { 7005 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6993 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7006 6994 "2567 Config region 23 has bad signature\n"); 7007 6995 return; 7008 6996 } ··· 7011 6999 7012 7000 /* Check the data structure version */ 7013 7001 if (buff[offset] != LPFC_REGION23_VERSION) { 7014 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7015 - "2568 Config region 23 has bad version\n"); 7002 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7003 + "2568 Config region 23 has bad version\n"); 7016 7004 return; 7017 7005 } 7018 7006 offset += 4;
+331 -234
drivers/scsi/lpfc/lpfc_init.c
··· 153 153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 154 154 155 155 if (rc != MBX_SUCCESS) { 156 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 156 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 157 157 "0324 Config Port initialization " 158 158 "error, mbxCmd x%x READ_NVPARM, " 159 159 "mbxStatus x%x\n", ··· 177 177 lpfc_read_rev(phba, pmb); 178 178 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 179 179 if (rc != MBX_SUCCESS) { 180 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 180 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 181 181 "0439 Adapter failed to init, mbxCmd x%x " 182 182 "READ_REV, mbxStatus x%x\n", 183 183 mb->mbxCommand, mb->mbxStatus); ··· 192 192 */ 193 193 if (mb->un.varRdRev.rr == 0) { 194 194 vp->rev.rBit = 0; 195 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 195 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 196 196 "0440 Adapter failed to init, READ_REV has " 197 197 "missing revision information.\n"); 198 198 mempool_free(pmb, phba->mbox_mem_pool); ··· 444 444 445 445 pmb->vport = vport; 446 446 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 447 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 447 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 448 448 "0448 Adapter failed init, mbxCmd x%x " 449 449 "READ_SPARM mbxStatus x%x\n", 450 450 mb->mbxCommand, mb->mbxStatus); ··· 498 498 lpfc_read_config(phba, pmb); 499 499 pmb->vport = vport; 500 500 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 501 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 501 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 502 502 "0453 Adapter failed to init, mbxCmd x%x " 503 503 "READ_CONFIG, mbxStatus x%x\n", 504 504 mb->mbxCommand, mb->mbxStatus); ··· 547 547 } 548 548 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 549 549 if (rc != MBX_SUCCESS) { 550 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 550 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 551 551 "0352 Config MSI mailbox command " 552 552 "failed, mbxCmd x%x, mbxStatus x%x\n", 553 553 pmb->u.mb.mbxCommand, ··· 598 598 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 599 599 600 600 if (phba->hba_flag & LINK_DISABLED) { 601 - lpfc_printf_log(phba, 602 - KERN_ERR, LOG_INIT, 603 - "2598 Adapter Link is disabled.\n"); 601 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 602 + "2598 Adapter Link is disabled.\n"); 604 603 lpfc_down_link(phba, pmb); 605 604 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 606 605 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 607 606 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 608 - lpfc_printf_log(phba, 609 - KERN_ERR, LOG_INIT, 610 - "2599 Adapter failed to issue DOWN_LINK" 611 - " mbox command rc 0x%x\n", rc); 607 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 608 + "2599 Adapter failed to issue DOWN_LINK" 609 + " mbox command rc 0x%x\n", rc); 612 610 613 611 mempool_free(pmb, phba->mbox_mem_pool); 614 612 return -EIO; ··· 630 632 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 631 633 632 634 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 633 - lpfc_printf_log(phba, 634 - KERN_ERR, 635 - LOG_INIT, 635 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 636 636 "0456 Adapter failed to issue " 637 637 "ASYNCEVT_ENABLE mbox status x%x\n", 638 638 rc); ··· 650 654 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 651 655 652 656 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 653 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 657 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 658 + "0435 Adapter failed " 654 659 "to get Option ROM version status x%x\n", rc); 655 660 mempool_free(pmb, phba->mbox_mem_pool); 656 661 } ··· 729 732 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 730 733 !(phba->lmt & LMT_64Gb))) { 731 734 /* Reset link speed to auto */ 732 - lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 733 - "1302 Invalid speed for this board:%d " 734 - "Reset link speed to auto.\n", 735 - phba->cfg_link_speed); 735 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 736 + "1302 Invalid speed for this board:%d " 737 + "Reset link speed to auto.\n", 738 + phba->cfg_link_speed); 736 739 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 737 740 } 738 741 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); ··· 741 744 lpfc_set_loopback_flag(phba); 742 745 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 743 746 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 744 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 745 - "0498 Adapter failed to init, mbxCmd x%x " 746 - "INIT_LINK, mbxStatus x%x\n", 747 - mb->mbxCommand, mb->mbxStatus); 747 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 748 + "0498 Adapter failed to init, mbxCmd x%x " 749 + "INIT_LINK, mbxStatus x%x\n", 750 + mb->mbxCommand, mb->mbxStatus); 748 751 if (phba->sli_rev <= LPFC_SLI_REV3) { 749 752 /* Clear all interrupt enable conditions */ 750 753 writel(0, phba->HCregaddr); ··· 790 793 return -ENOMEM; 791 794 } 792 795 793 - lpfc_printf_log(phba, 794 - KERN_ERR, LOG_INIT, 795 - "0491 Adapter Link is disabled.\n"); 796 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 797 + "0491 Adapter Link is disabled.\n"); 796 798 lpfc_down_link(phba, pmb); 797 799 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 798 800 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 799 801 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 800 - lpfc_printf_log(phba, 801 - KERN_ERR, LOG_INIT, 802 - "2522 Adapter failed to issue DOWN_LINK" 803 - " mbox command rc 0x%x\n", rc); 802 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 803 + "2522 Adapter failed to issue DOWN_LINK" 804 + " mbox command rc 0x%x\n", rc); 804 805 805 806 mempool_free(pmb, phba->mbox_mem_pool); 806 807 return -EIO; ··· 1604 1609 return; 1605 1610 } 1606 1611 1607 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1608 - "0479 Deferred Adapter Hardware Error " 1609 - "Data: x%x x%x x%x\n", 1610 - phba->work_hs, 1611 - phba->work_status[0], phba->work_status[1]); 1612 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1613 + "0479 Deferred Adapter Hardware Error " 1614 + "Data: x%x x%x x%x\n", 1615 + phba->work_hs, phba->work_status[0], 1616 + phba->work_status[1]); 1612 1617 1613 1618 spin_lock_irq(&phba->hbalock); 1614 1619 psli->sli_flag &= ~LPFC_SLI_ACTIVE; ··· 1759 1764 temp_event_data.event_code = LPFC_CRIT_TEMP; 1760 1765 temp_event_data.data = (uint32_t)temperature; 1761 1766 1762 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1767 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1763 1768 "0406 Adapter maximum temperature exceeded " 1764 1769 "(%ld), taking this port offline " 1765 1770 "Data: x%x x%x x%x\n", ··· 1783 1788 * failure is a value other than FFER6. Do not call the offline 1784 1789 * twice. This is the adapter hardware error path. 1785 1790 */ 1786 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1791 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1787 1792 "0457 Adapter Hardware Error " 1788 1793 "Data: x%x x%x x%x\n", 1789 1794 phba->work_hs, ··· 1831 1836 1832 1837 /* need reset: attempt for port recovery */ 1833 1838 if (en_rn_msg) 1834 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1839 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1835 1840 "2887 Reset Needed: Attempting Port " 1836 1841 "Recovery...\n"); 1837 1842 lpfc_offline_prep(phba, mbx_action); ··· 1841 1846 lpfc_sli4_disable_intr(phba); 1842 1847 rc = lpfc_sli_brdrestart(phba); 1843 1848 if (rc) { 1844 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1849 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1845 1850 "6309 Failed to restart board\n"); 1846 1851 return rc; 1847 1852 } 1848 1853 /* request and enable interrupt */ 1849 1854 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1850 1855 if (intr_mode == LPFC_INTR_ERROR) { 1851 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1856 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1852 1857 "3175 Failed to enable interrupt\n"); 1853 1858 return -EIO; 1854 1859 } ··· 1887 1892 * we cannot communicate with the pci card anyway. 1888 1893 */ 1889 1894 if (pci_channel_offline(phba->pcidev)) { 1890 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1895 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1891 1896 "3166 pci channel is offline\n"); 1892 1897 lpfc_sli4_offline_eratt(phba); 1893 1898 return; ··· 1910 1915 lpfc_sli4_offline_eratt(phba); 1911 1916 return; 1912 1917 } 1913 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1918 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1914 1919 "7623 Checking UE recoverable"); 1915 1920 1916 1921 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { ··· 1927 1932 msleep(1000); 1928 1933 } 1929 1934 1930 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1935 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1931 1936 "4827 smphr_port_status x%x : Waited %dSec", 1932 1937 smphr_port_status, i); 1933 1938 ··· 1945 1950 LPFC_MBX_NO_WAIT, en_rn_msg); 1946 1951 if (rc == 0) 1947 1952 return; 1948 - lpfc_printf_log(phba, 1949 - KERN_ERR, LOG_INIT, 1953 + lpfc_printf_log(phba, KERN_ERR, 1954 + LOG_TRACE_EVENT, 1950 1955 "4215 Failed to recover UE"); 1951 1956 break; 1952 1957 } 1953 1958 } 1954 1959 } 1955 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1960 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1956 1961 "7624 Firmware not ready: Failing UE recovery," 1957 1962 " waited %dSec", i); 1958 1963 phba->link_state = LPFC_HBA_ERROR; ··· 1965 1970 &portstat_reg.word0); 1966 1971 /* consider PCI bus read error as pci_channel_offline */ 1967 1972 if (pci_rd_rc1 == -EIO) { 1968 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1973 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1969 1974 "3151 PCI bus read access failure: x%x\n", 1970 1975 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1971 1976 lpfc_sli4_offline_eratt(phba); ··· 1974 1979 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1975 1980 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1976 1981 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1977 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1978 - "2889 Port Overtemperature event, " 1979 - "taking port offline Data: x%x x%x\n", 1980 - reg_err1, reg_err2); 1982 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1983 + "2889 Port Overtemperature event, " 1984 + "taking port offline Data: x%x x%x\n", 1985 + reg_err1, reg_err2); 1981 1986 1982 1987 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1983 1988 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; ··· 1999 2004 } 2000 2005 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2001 2006 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 2002 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2007 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2003 2008 "3143 Port Down: Firmware Update " 2004 2009 "Detected\n"); 2005 2010 en_rn_msg = false; 2006 2011 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2007 2012 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2008 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2013 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2009 2014 "3144 Port Down: Debug Dump\n"); 2010 2015 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2011 2016 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 2012 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2017 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2013 2018 "3145 Port Down: Provisioning\n"); 2014 2019 2015 2020 /* If resets are disabled then leave the HBA alone and return */ ··· 2028 2033 break; 2029 2034 } 2030 2035 /* fall through for not able to recover */ 2031 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2036 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2032 2037 "3152 Unrecoverable error\n"); 2033 2038 phba->link_state = LPFC_HBA_ERROR; 2034 2039 break; ··· 2146 2151 lpfc_linkdown(phba); 2147 2152 phba->link_state = LPFC_HBA_ERROR; 2148 2153 2149 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 2150 - "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2154 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2155 + "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2151 2156 2152 2157 return; 2153 2158 } ··· 2896 2901 */ 2897 2902 while (!list_empty(&vport->fc_nodes)) { 2898 2903 if (i++ > 3000) { 2899 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2904 + lpfc_printf_vlog(vport, KERN_ERR, 2905 + LOG_TRACE_EVENT, 2900 2906 "0233 Nodelist not empty\n"); 2901 2907 list_for_each_entry_safe(ndlp, next_ndlp, 2902 2908 &vport->fc_nodes, nlp_listp) { 2903 2909 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2904 - LOG_NODE, 2910 + LOG_TRACE_EVENT, 2905 2911 "0282 did:x%x ndlp:x%px " 2906 2912 "usgmap:x%x refcnt:%d\n", 2907 2913 ndlp->nlp_DID, (void *)ndlp, ··· 3010 3014 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3011 3015 break; 3012 3016 default: 3013 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3017 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3014 3018 "0297 Invalid device group (x%x)\n", 3015 3019 phba->pci_dev_grp); 3016 3020 break; ··· 3057 3061 /* Check active mailbox complete status every 2ms */ 3058 3062 msleep(2); 3059 3063 if (time_after(jiffies, timeout)) { 3060 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3061 - "2813 Mgmt IO is Blocked %x " 3062 - "- mbox cmd %x still active\n", 3063 - phba->sli.sli_flag, actcmd); 3064 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3065 + "2813 Mgmt IO is Blocked %x " 3066 + "- mbox cmd %x still active\n", 3067 + phba->sli.sli_flag, actcmd); 3064 3068 break; 3065 3069 } 3066 3070 } ··· 3405 3409 !phba->nvmet_support) { 3406 3410 error = lpfc_nvme_create_localport(phba->pport); 3407 3411 if (error) 3408 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3412 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3409 3413 "6132 NVME restore reg failed " 3410 3414 "on nvmei error x%x\n", error); 3411 3415 } ··· 3745 3749 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3746 3750 GFP_KERNEL); 3747 3751 if (sglq_entry == NULL) { 3748 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3752 + lpfc_printf_log(phba, KERN_ERR, 3753 + LOG_TRACE_EVENT, 3749 3754 "2562 Failure to allocate an " 3750 3755 "ELS sgl entry:%d\n", i); 3751 3756 rc = -ENOMEM; ··· 3757 3760 &sglq_entry->phys); 3758 3761 if (sglq_entry->virt == NULL) { 3759 3762 kfree(sglq_entry); 3760 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3763 + lpfc_printf_log(phba, KERN_ERR, 3764 + LOG_TRACE_EVENT, 3761 3765 "2563 Failure to allocate an " 3762 3766 "ELS mbuf:%d\n", i); 3763 3767 rc = -ENOMEM; ··· 3813 3815 &phba->sli4_hba.lpfc_els_sgl_list, list) { 3814 3816 lxri = lpfc_sli4_next_xritag(phba); 3815 3817 if (lxri == NO_XRI) { 3816 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3818 + lpfc_printf_log(phba, KERN_ERR, 3819 + LOG_TRACE_EVENT, 3817 3820 "2400 Failed to allocate xri for " 3818 3821 "ELS sgl\n"); 3819 3822 rc = -ENOMEM; ··· 3869 3870 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3870 3871 GFP_KERNEL); 3871 3872 if (sglq_entry == NULL) { 3872 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3873 + lpfc_printf_log(phba, KERN_ERR, 3874 + LOG_TRACE_EVENT, 3873 3875 "6303 Failure to allocate an " 3874 3876 "NVMET sgl entry:%d\n", i); 3875 3877 rc = -ENOMEM; ··· 3881 3881 &sglq_entry->phys); 3882 3882 if (sglq_entry->virt == NULL) { 3883 3883 kfree(sglq_entry); 3884 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3884 + lpfc_printf_log(phba, KERN_ERR, 3885 + LOG_TRACE_EVENT, 3885 3886 "6304 Failure to allocate an " 3886 3887 "NVMET buf:%d\n", i); 3887 3888 rc = -ENOMEM; ··· 3938 3937 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 3939 3938 lxri = lpfc_sli4_next_xritag(phba); 3940 3939 if (lxri == NO_XRI) { 3941 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3940 + lpfc_printf_log(phba, KERN_ERR, 3941 + LOG_TRACE_EVENT, 3942 3942 "6307 Failed to allocate xri for " 3943 3943 "NVMET sgl\n"); 3944 3944 rc = -ENOMEM; ··· 4113 4111 &io_sgl_list, list) { 4114 4112 lxri = lpfc_sli4_next_xritag(phba); 4115 4113 if (lxri == NO_XRI) { 4116 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4114 + lpfc_printf_log(phba, KERN_ERR, 4115 + LOG_TRACE_EVENT, 4117 4116 "6075 Failed to allocate xri for " 4118 4117 "nvme buffer\n"); 4119 4118 rc = -ENOMEM; ··· 4184 4181 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4185 4182 (((unsigned long)(lpfc_ncmd->data) & 4186 4183 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4187 - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 4184 + lpfc_printf_log(phba, KERN_ERR, 4185 + LOG_TRACE_EVENT, 4188 4186 "3369 Memory alignment err: " 4189 4187 "addr=%lx\n", 4190 4188 (unsigned long)lpfc_ncmd->data); ··· 4214 4210 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4215 4211 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4216 4212 kfree(lpfc_ncmd); 4217 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 4213 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4218 4214 "6121 Failed to allocate IOTAG for" 4219 4215 " XRI:0x%x\n", lxri); 4220 4216 lpfc_sli4_free_xri(phba, lxri); ··· 4265 4261 lpfc_read_nv(phba, mboxq); 4266 4262 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4267 4263 if (rc != MBX_SUCCESS) { 4268 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4264 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4269 4265 "6019 Mailbox failed , mbxCmd x%x " 4270 4266 "READ_NV, mbxStatus x%x\n", 4271 4267 bf_get(lpfc_mqe_command, &mboxq->u.mqe), ··· 4325 4321 4326 4322 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4327 4323 if (wwn == lpfc_no_hba_reset[i]) { 4328 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4324 + lpfc_printf_log(phba, KERN_ERR, 4325 + LOG_TRACE_EVENT, 4329 4326 "6020 Setting use_no_reset port=%llx\n", 4330 4327 wwn); 4331 4328 use_no_reset_hba = true; ··· 4771 4766 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 4772 4767 break; 4773 4768 default: 4774 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4769 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4775 4770 "0398 Unknown link fault code: x%x\n", 4776 4771 bf_get(lpfc_acqe_link_fault, acqe_link)); 4777 4772 break; ··· 4807 4802 att_type = LPFC_ATT_LINK_UP; 4808 4803 break; 4809 4804 default: 4810 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4805 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4811 4806 "0399 Invalid link attention type: x%x\n", 4812 4807 bf_get(lpfc_acqe_link_status, acqe_link)); 4813 4808 att_type = LPFC_ATT_RESERVED; ··· 4979 4974 phba->fcoe_eventtag = acqe_link->event_tag; 4980 4975 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4981 4976 if (!pmb) { 4982 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4977 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4983 4978 "0395 The mboxq allocation failed\n"); 4984 4979 return; 4985 4980 } 4986 4981 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4987 4982 if (!mp) { 4988 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4983 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4989 4984 "0396 The lpfc_dmabuf allocation failed\n"); 4990 4985 goto out_free_pmb; 4991 4986 } 4992 4987 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4993 4988 if (!mp->virt) { 4994 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4989 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4995 4990 "0397 The mbuf allocation failed\n"); 4996 4991 goto out_free_dmabuf; 4997 4992 } ··· 5192 5187 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 5193 5188 } 5194 5189 5195 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5190 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5196 5191 "2910 Async FC Trunking Event - Speed:%d\n" 5197 5192 "\tLogical speed:%d " 5198 5193 "port0: %s port1: %s port2: %s port3: %s\n", ··· 5202 5197 trunk_link_status(2), trunk_link_status(3)); 5203 5198 5204 5199 if (port_fault) 5205 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5200 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5206 5201 "3202 trunk error:0x%x (%s) seen on port0:%s " 5207 5202 /* 5208 5203 * SLI-4: We have only 0xA error codes ··· 5236 5231 5237 5232 if (bf_get(lpfc_trailer_type, acqe_fc) != 5238 5233 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 5239 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5234 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5240 5235 "2895 Non FC link Event detected.(%d)\n", 5241 5236 bf_get(lpfc_trailer_type, acqe_fc)); 5242 5237 return; ··· 5284 5279 phba->sli4_hba.link_state.fault); 5285 5280 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5286 5281 if (!pmb) { 5287 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5282 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5288 5283 "2897 The mboxq allocation failed\n"); 5289 5284 return; 5290 5285 } 5291 5286 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5292 5287 if (!mp) { 5293 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5288 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5294 5289 "2898 The lpfc_dmabuf allocation failed\n"); 5295 5290 goto out_free_pmb; 5296 5291 } 5297 5292 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5298 5293 if (!mp->virt) { 5299 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5294 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5300 5295 "2899 The mbuf allocation failed\n"); 5301 5296 goto out_free_dmabuf; 5302 5297 } ··· 5463 5458 &misconfigured->theEvent); 5464 5459 break; 5465 5460 default: 5466 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5461 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5467 5462 "3296 " 5468 5463 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 5469 5464 "event: Invalid link %d", ··· 5515 5510 rc = lpfc_sli4_read_config(phba); 5516 5511 if (rc) { 5517 5512 phba->lmt = 0; 5518 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5513 + lpfc_printf_log(phba, KERN_ERR, 5514 + LOG_TRACE_EVENT, 5519 5515 "3194 Unable to retrieve supported " 5520 5516 "speeds, rc = 0x%x\n", rc); 5521 5517 } ··· 5668 5662 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 5669 5663 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 5670 5664 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 5671 - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5672 - LOG_DISCOVERY, 5665 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5673 5666 "2546 New FCF event, evt_tag:x%x, " 5674 5667 "index:x%x\n", 5675 5668 acqe_fip->event_tag, ··· 5721 5716 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 5722 5717 LPFC_FCOE_FCF_GET_FIRST); 5723 5718 if (rc) 5724 - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5719 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5725 5720 "2547 Issue FCF scan read FCF mailbox " 5726 5721 "command failed (x%x)\n", rc); 5727 5722 break; 5728 5723 5729 5724 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 5730 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5731 - "2548 FCF Table full count 0x%x tag 0x%x\n", 5732 - bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 5733 - acqe_fip->event_tag); 5725 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5726 + "2548 FCF Table full count 0x%x tag 0x%x\n", 5727 + bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 5728 + acqe_fip->event_tag); 5734 5729 break; 5735 5730 5736 5731 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 5737 5732 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5738 - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5739 - "2549 FCF (x%x) disconnected from network, " 5740 - "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 5733 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5734 + "2549 FCF (x%x) disconnected from network, " 5735 + "tag:x%x\n", acqe_fip->index, 5736 + acqe_fip->event_tag); 5741 5737 /* 5742 5738 * If we are in the middle of FCF failover process, clear 5743 5739 * the corresponding FCF bit in the roundrobin bitmap. ··· 5775 5769 rc = lpfc_sli4_redisc_fcf_table(phba); 5776 5770 if (rc) { 5777 5771 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5778 - LOG_DISCOVERY, 5772 + LOG_TRACE_EVENT, 5779 5773 "2772 Issue FCF rediscover mailbox " 5780 5774 "command failed, fail through to FCF " 5781 5775 "dead event\n"); ··· 5799 5793 break; 5800 5794 case LPFC_FIP_EVENT_TYPE_CVL: 5801 5795 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5802 - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5796 + lpfc_printf_log(phba, KERN_ERR, 5797 + LOG_TRACE_EVENT, 5803 5798 "2718 Clear Virtual Link Received for VPI 0x%x" 5804 5799 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 5805 5800 ··· 5867 5860 rc = lpfc_sli4_redisc_fcf_table(phba); 5868 5861 if (rc) { 5869 5862 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5870 - LOG_DISCOVERY, 5863 + LOG_TRACE_EVENT, 5871 5864 "2774 Issue FCF rediscover " 5872 5865 "mailbox command failed, " 5873 5866 "through to CVL event\n"); ··· 5888 5881 } 5889 5882 break; 5890 5883 default: 5891 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5892 - "0288 Unknown FCoE event type 0x%x event tag " 5893 - "0x%x\n", event_type, acqe_fip->event_tag); 5884 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5885 + "0288 Unknown FCoE event type 0x%x event tag " 5886 + "0x%x\n", event_type, acqe_fip->event_tag); 5894 5887 break; 5895 5888 } 5896 5889 } ··· 5907 5900 struct lpfc_acqe_dcbx *acqe_dcbx) 5908 5901 { 5909 5902 phba->fc_eventTag = acqe_dcbx->event_tag; 5910 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5903 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5911 5904 "0290 The SLI4 DCBX asynchronous event is not " 5912 5905 "handled yet\n"); 5913 5906 } ··· 5984 5977 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 5985 5978 break; 5986 5979 default: 5987 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5980 + lpfc_printf_log(phba, KERN_ERR, 5981 + LOG_TRACE_EVENT, 5988 5982 "1804 Invalid asynchronous event code: " 5989 5983 "x%x\n", bf_get(lpfc_trailer_code, 5990 5984 &cq_event->cqe.mcqe_cmpl)); ··· 6021 6013 "2777 Start post-quiescent FCF table scan\n"); 6022 6014 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 6023 6015 if (rc) 6024 - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 6016 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6025 6017 "2747 Issue FCF scan read FCF mailbox " 6026 6018 "command failed 0x%x\n", rc); 6027 6019 } ··· 6092 6084 "0480 Enabled MSI-X interrupt mode.\n"); 6093 6085 break; 6094 6086 default: 6095 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6087 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6096 6088 "0482 Illegal interrupt mode.\n"); 6097 6089 break; 6098 6090 } ··· 6140 6132 out_disable_device: 6141 6133 pci_disable_device(pdev); 6142 6134 out_error: 6143 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6135 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6144 6136 "1401 Failed to enable pci device\n"); 6145 6137 return -ENODEV; 6146 6138 } ··· 6241 6233 6242 6234 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 6243 6235 if (nr_vfn > max_nr_vfn) { 6244 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6236 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6245 6237 "3057 Requested vfs (%d) greater than " 6246 6238 "supported vfs (%d)", nr_vfn, max_nr_vfn); 6247 6239 return -EINVAL; ··· 6280 6272 * Driver resources common to all SLI revisions 6281 6273 */ 6282 6274 atomic_set(&phba->fast_event_count, 0); 6275 + atomic_set(&phba->dbg_log_idx, 0); 6276 + atomic_set(&phba->dbg_log_cnt, 0); 6277 + atomic_set(&phba->dbg_log_dmping, 0); 6283 6278 spin_lock_init(&phba->hbalock); 6284 6279 6285 6280 /* Initialize ndlp management spinlock */ ··· 6710 6699 lpfc_read_nv(phba, mboxq); 6711 6700 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6712 6701 if (rc != MBX_SUCCESS) { 6713 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6702 + lpfc_printf_log(phba, KERN_ERR, 6703 + LOG_TRACE_EVENT, 6714 6704 "6016 Mailbox failed , mbxCmd x%x " 6715 6705 "READ_NV, mbxStatus x%x\n", 6716 6706 bf_get(lpfc_mqe_command, &mboxq->u.mqe), ··· 6740 6728 6741 6729 phba->nvmet_support = 1; /* a match */ 6742 6730 6743 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6731 + lpfc_printf_log(phba, KERN_ERR, 6732 + LOG_TRACE_EVENT, 6744 6733 "6017 NVME Target %016llx\n", 6745 6734 wwn); 6746 6735 #else 6747 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6736 + lpfc_printf_log(phba, KERN_ERR, 6737 + LOG_TRACE_EVENT, 6748 6738 "6021 Can't enable NVME Target." 6749 6739 " NVME_TARGET_FC infrastructure" 6750 6740 " is not in kernel\n"); ··· 6806 6792 &phba->sli4_hba.sli_intf); 6807 6793 if (phba->sli4_hba.extents_in_use && 6808 6794 phba->sli4_hba.rpi_hdrs_in_use) { 6809 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6810 - "2999 Unsupported SLI4 Parameters " 6811 - "Extents and RPI headers enabled.\n"); 6795 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6796 + "2999 Unsupported SLI4 Parameters " 6797 + "Extents and RPI headers enabled.\n"); 6812 6798 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6813 6799 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 6814 6800 mempool_free(mboxq, phba->mbox_mem_pool); ··· 6968 6954 /* Allocate and initialize active sgl array */ 6969 6955 rc = lpfc_init_active_sgl_array(phba); 6970 6956 if (rc) { 6971 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6957 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6972 6958 "1430 Failed to initialize sgl list.\n"); 6973 6959 goto out_destroy_cq_event_pool; 6974 6960 } 6975 6961 rc = lpfc_sli4_init_rpi_hdrs(phba); 6976 6962 if (rc) { 6977 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6963 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6978 6964 "1432 Failed to initialize rpi headers.\n"); 6979 6965 goto out_free_active_sgl; 6980 6966 } ··· 6984 6970 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 6985 6971 GFP_KERNEL); 6986 6972 if (!phba->fcf.fcf_rr_bmask) { 6987 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6973 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6988 6974 "2759 Failed allocate memory for FCF round " 6989 6975 "robin failover bmask\n"); 6990 6976 rc = -ENOMEM; ··· 6995 6981 sizeof(struct lpfc_hba_eq_hdl), 6996 6982 GFP_KERNEL); 6997 6983 if (!phba->sli4_hba.hba_eq_hdl) { 6998 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6984 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6999 6985 "2572 Failed allocate memory for " 7000 6986 "fast-path per-EQ handle array\n"); 7001 6987 rc = -ENOMEM; ··· 7006 6992 sizeof(struct lpfc_vector_map_info), 7007 6993 GFP_KERNEL); 7008 6994 if (!phba->sli4_hba.cpu_map) { 7009 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6995 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7010 6996 "3327 Failed allocate memory for msi-x " 7011 6997 "interrupt vector mapping\n"); 7012 6998 rc = -ENOMEM; ··· 7015 7001 7016 7002 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 7017 7003 if (!phba->sli4_hba.eq_info) { 7018 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7004 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7019 7005 "3321 Failed allocation for per_cpu stats\n"); 7020 7006 rc = -ENOMEM; 7021 7007 goto out_free_hba_cpu_map; ··· 7025 7011 sizeof(*phba->sli4_hba.idle_stat), 7026 7012 GFP_KERNEL); 7027 7013 if (!phba->sli4_hba.idle_stat) { 7028 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7014 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7029 7015 "3390 Failed allocation for idle_stat\n"); 7030 7016 rc = -ENOMEM; 7031 7017 goto out_free_hba_eq_info; ··· 7034 7020 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 7035 7021 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); 7036 7022 if (!phba->sli4_hba.c_stat) { 7037 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7023 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7038 7024 "3332 Failed allocating per cpu hdwq stats\n"); 7039 7025 rc = -ENOMEM; 7040 7026 goto out_free_hba_idle_stat; ··· 7182 7168 phba->lpfc_stop_port = lpfc_stop_port_s4; 7183 7169 break; 7184 7170 default: 7185 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7171 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7186 7172 "1431 Invalid HBA PCI-device group: 0x%x\n", 7187 7173 dev_grp); 7188 7174 return -ENODEV; ··· 7477 7463 7478 7464 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 7479 7465 if (!rpi_hdr) { 7480 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7466 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7481 7467 "0391 Error during rpi post operation\n"); 7482 7468 lpfc_sli4_remove_rpis(phba); 7483 7469 rc = -ENODEV; ··· 7789 7775 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7790 7776 if ((old_mask != phba->cfg_prot_mask) || 7791 7777 (old_guard != phba->cfg_prot_guard)) 7792 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7778 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7793 7779 "1475 Registering BlockGuard with the " 7794 7780 "SCSI layer: mask %d guard %d\n", 7795 7781 phba->cfg_prot_mask, ··· 7798 7784 scsi_host_set_prot(shost, phba->cfg_prot_mask); 7799 7785 scsi_host_set_guard(shost, phba->cfg_prot_guard); 7800 7786 } else 7801 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7787 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7802 7788 "1479 Not Registering BlockGuard with the SCSI " 7803 7789 "layer, Bad protection parameters: %d %d\n", 7804 7790 old_mask, old_guard); ··· 8029 8015 * other register reads as the data may not be valid. Just exit. 8030 8016 */ 8031 8017 if (port_error) { 8032 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8018 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8033 8019 "1408 Port Failed POST - portsmphr=0x%x, " 8034 8020 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 8035 8021 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", ··· 8078 8064 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 8079 8065 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 8080 8066 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 8081 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8067 + lpfc_printf_log(phba, KERN_ERR, 8068 + LOG_TRACE_EVENT, 8082 8069 "1422 Unrecoverable Error " 8083 8070 "Detected during POST " 8084 8071 "uerr_lo_reg=0x%x, " ··· 8106 8091 phba->work_status[1] = 8107 8092 readl(phba->sli4_hba.u.if_type2. 8108 8093 ERR2regaddr); 8109 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8094 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8110 8095 "2888 Unrecoverable port error " 8111 8096 "following POST: port status reg " 8112 8097 "0x%x, port_smphr reg 0x%x, " ··· 8500 8485 8501 8486 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8502 8487 if (!pmb) { 8503 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8488 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8504 8489 "2011 Unable to allocate memory for issuing " 8505 8490 "SLI_CONFIG_SPECIAL mailbox command\n"); 8506 8491 return -ENOMEM; ··· 8510 8495 8511 8496 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8512 8497 if (rc != MBX_SUCCESS) { 8513 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8514 - "2012 Mailbox failed , mbxCmd x%x " 8515 - "READ_CONFIG, mbxStatus x%x\n", 8516 - bf_get(lpfc_mqe_command, &pmb->u.mqe), 8517 - bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8498 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8499 + "2012 Mailbox failed , mbxCmd x%x " 8500 + "READ_CONFIG, mbxStatus x%x\n", 8501 + bf_get(lpfc_mqe_command, &pmb->u.mqe), 8502 + bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8518 8503 rc = -EIO; 8519 8504 } else { 8520 8505 rd_config = &pmb->u.mqe.un.rd_config; ··· 8624 8609 /* Check to see if there is enough for NVME */ 8625 8610 if ((phba->cfg_irq_chann > qmin) || 8626 8611 (phba->cfg_hdw_queue > qmin)) { 8627 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8612 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8628 8613 "2005 Reducing Queues: " 8629 8614 "WQ %d CQ %d EQ %d: min %d: " 8630 8615 "IRQ %d HDWQ %d\n", ··· 8690 8675 LPFC_USER_LINK_SPEED_AUTO; 8691 8676 break; 8692 8677 default: 8693 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8678 + lpfc_printf_log(phba, KERN_ERR, 8679 + LOG_TRACE_EVENT, 8694 8680 "0047 Unrecognized link " 8695 8681 "speed : %d\n", 8696 8682 forced_link_speed); ··· 8728 8712 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8729 8713 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 8730 8714 if (rc2 || shdr_status || shdr_add_status) { 8731 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8715 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8732 8716 "3026 Mailbox failed , mbxCmd x%x " 8733 8717 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 8734 8718 bf_get(lpfc_mqe_command, &pmb->u.mqe), ··· 8765 8749 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 8766 8750 phba->sli4_hba.iov.vf_number); 8767 8751 else 8768 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8752 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8769 8753 "3028 GET_FUNCTION_CONFIG: failed to find " 8770 8754 "Resource Descriptor:x%x\n", 8771 8755 LPFC_RSRC_DESC_TYPE_FCFCOE); ··· 8802 8786 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8803 8787 GFP_KERNEL); 8804 8788 if (!mboxq) { 8805 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8789 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8806 8790 "0492 Unable to allocate memory for " 8807 8791 "issuing SLI_CONFIG_SPECIAL mailbox " 8808 8792 "command\n"); ··· 8817 8801 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 8818 8802 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8819 8803 if (rc != MBX_SUCCESS) { 8820 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8804 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8821 8805 "0493 SLI_CONFIG_SPECIAL mailbox " 8822 8806 "failed with status x%x\n", 8823 8807 rc); ··· 8897 8881 phba->sli4_hba.cq_esize, 8898 8882 phba->sli4_hba.cq_ecount, cpu); 8899 8883 if (!qdesc) { 8900 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8901 - "0499 Failed allocate fast-path IO CQ (%d)\n", idx); 8884 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8885 + "0499 Failed allocate fast-path IO CQ (%d)\n", 8886 + idx); 8902 8887 return 1; 8903 8888 } 8904 8889 qdesc->qe_valid = 1; ··· 8921 8904 phba->sli4_hba.wq_ecount, cpu); 8922 8905 8923 8906 if (!qdesc) { 8924 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8907 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8925 8908 "0503 Failed allocate fast-path IO WQ (%d)\n", 8926 8909 idx); 8927 8910 return 1; ··· 8977 8960 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 8978 8961 GFP_KERNEL); 8979 8962 if (!phba->sli4_hba.hdwq) { 8980 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8963 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8981 8964 "6427 Failed allocate memory for " 8982 8965 "fast-path Hardware Queue array\n"); 8983 8966 goto out_error; ··· 9009 8992 sizeof(struct lpfc_queue *), 9010 8993 GFP_KERNEL); 9011 8994 if (!phba->sli4_hba.nvmet_cqset) { 9012 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8995 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9013 8996 "3121 Fail allocate memory for " 9014 8997 "fast-path CQ set array\n"); 9015 8998 goto out_error; ··· 9019 9002 sizeof(struct lpfc_queue *), 9020 9003 GFP_KERNEL); 9021 9004 if (!phba->sli4_hba.nvmet_mrq_hdr) { 9022 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9005 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9023 9006 "3122 Fail allocate memory for " 9024 9007 "fast-path RQ set hdr array\n"); 9025 9008 goto out_error; ··· 9029 9012 sizeof(struct lpfc_queue *), 9030 9013 GFP_KERNEL); 9031 9014 if (!phba->sli4_hba.nvmet_mrq_data) { 9032 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9015 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9033 9016 "3124 Fail allocate memory for " 9034 9017 "fast-path RQ set data array\n"); 9035 9018 goto out_error; ··· 9057 9040 phba->sli4_hba.eq_esize, 9058 9041 phba->sli4_hba.eq_ecount, cpu); 9059 9042 if (!qdesc) { 9060 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9043 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9061 9044 "0497 Failed allocate EQ (%d)\n", 9062 9045 cpup->hdwq); 9063 9046 goto out_error; ··· 9111 9094 phba->sli4_hba.cq_ecount, 9112 9095 cpu); 9113 9096 if (!qdesc) { 9114 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9097 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9115 9098 "3142 Failed allocate NVME " 9116 9099 "CQ Set (%d)\n", idx); 9117 9100 goto out_error; ··· 9133 9116 phba->sli4_hba.cq_esize, 9134 9117 phba->sli4_hba.cq_ecount, cpu); 9135 9118 if (!qdesc) { 9136 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9119 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9137 9120 "0500 Failed allocate slow-path mailbox CQ\n"); 9138 9121 goto out_error; 9139 9122 } ··· 9145 9128 phba->sli4_hba.cq_esize, 9146 9129 phba->sli4_hba.cq_ecount, cpu); 9147 9130 if (!qdesc) { 9148 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9131 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9149 9132 "0501 Failed allocate slow-path ELS CQ\n"); 9150 9133 goto out_error; 9151 9134 } ··· 9164 9147 phba->sli4_hba.mq_esize, 9165 9148 phba->sli4_hba.mq_ecount, cpu); 9166 9149 if (!qdesc) { 9167 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9150 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9168 9151 "0505 Failed allocate slow-path MQ\n"); 9169 9152 goto out_error; 9170 9153 } ··· 9180 9163 phba->sli4_hba.wq_esize, 9181 9164 phba->sli4_hba.wq_ecount, cpu); 9182 9165 if (!qdesc) { 9183 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9166 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9184 9167 "0504 Failed allocate slow-path ELS WQ\n"); 9185 9168 goto out_error; 9186 9169 } ··· 9194 9177 phba->sli4_hba.cq_esize, 9195 9178 phba->sli4_hba.cq_ecount, cpu); 9196 9179 if (!qdesc) { 9197 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9180 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9198 9181 "6079 Failed allocate NVME LS CQ\n"); 9199 9182 goto out_error; 9200 9183 } ··· 9207 9190 phba->sli4_hba.wq_esize, 9208 9191 phba->sli4_hba.wq_ecount, cpu); 9209 9192 if (!qdesc) { 9210 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9193 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9211 9194 "6080 Failed allocate NVME LS WQ\n"); 9212 9195 goto out_error; 9213 9196 } ··· 9225 9208 phba->sli4_hba.rq_esize, 9226 9209 phba->sli4_hba.rq_ecount, cpu); 9227 9210 if (!qdesc) { 9228 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9211 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9229 9212 "0506 Failed allocate receive HRQ\n"); 9230 9213 goto out_error; 9231 9214 } ··· 9236 9219 phba->sli4_hba.rq_esize, 9237 9220 phba->sli4_hba.rq_ecount, cpu); 9238 9221 if (!qdesc) { 9239 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9222 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9240 9223 "0507 Failed allocate receive DRQ\n"); 9241 9224 goto out_error; 9242 9225 } ··· 9254 9237 LPFC_NVMET_RQE_DEF_COUNT, 9255 9238 cpu); 9256 9239 if (!qdesc) { 9257 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9240 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9258 9241 "3146 Failed allocate " 9259 9242 "receive HRQ\n"); 9260 9243 goto out_error; ··· 9267 9250 GFP_KERNEL, 9268 9251 cpu_to_node(cpu)); 9269 9252 if (qdesc->rqbp == NULL) { 9270 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9253 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9271 9254 "6131 Failed allocate " 9272 9255 "Header RQBP\n"); 9273 9256 goto out_error; ··· 9283 9266 LPFC_NVMET_RQE_DEF_COUNT, 9284 9267 cpu); 9285 9268 if (!qdesc) { 9286 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9269 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9287 9270 "3156 Failed allocate " 9288 9271 "receive DRQ\n"); 9289 9272 goto out_error; ··· 9474 9457 int rc; 9475 9458 9476 9459 if (!eq || !cq || !wq) { 9477 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9460 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9478 9461 "6085 Fast-path %s (%d) not allocated\n", 9479 9462 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 9480 9463 return -ENOMEM; ··· 9484 9467 rc = lpfc_cq_create(phba, cq, eq, 9485 9468 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 9486 9469 if (rc) { 9487 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9488 - "6086 Failed setup of CQ (%d), rc = 0x%x\n", 9489 - qidx, (uint32_t)rc); 9470 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9471 + "6086 Failed setup of CQ (%d), rc = 0x%x\n", 9472 + qidx, (uint32_t)rc); 9490 9473 return rc; 9491 9474 } 9492 9475 ··· 9502 9485 /* create the wq */ 9503 9486 rc = lpfc_wq_create(phba, wq, cq, qtype); 9504 9487 if (rc) { 9505 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9488 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9506 9489 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 9507 9490 qidx, (uint32_t)rc); 9508 9491 /* no need to tear down cq - caller will do so */ ··· 9520 9503 } else { 9521 9504 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 9522 9505 if (rc) { 9523 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9524 - "0539 Failed setup of slow-path MQ: " 9525 - "rc = 0x%x\n", rc); 9506 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9507 + "0539 Failed setup of slow-path MQ: " 9508 + "rc = 0x%x\n", rc); 9526 9509 /* no need to tear down cq - caller will do so */ 9527 9510 return rc; 9528 9511 } ··· 9595 9578 /* Check for dual-ULP support */ 9596 9579 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9597 9580 if (!mboxq) { 9598 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9581 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9599 9582 "3249 Unable to allocate memory for " 9600 9583 "QUERY_FW_CFG mailbox command\n"); 9601 9584 return -ENOMEM; ··· 9613 9596 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9614 9597 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 9615 9598 if (shdr_status || shdr_add_status || rc) { 9616 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9599 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9617 9600 "3250 QUERY_FW_CFG mailbox failed with status " 9618 9601 "x%x add_status x%x, mbx status x%x\n", 9619 9602 shdr_status, shdr_add_status, rc); ··· 9644 9627 9645 9628 /* Set up HBA event queue */ 9646 9629 if (!qp) { 9647 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9630 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9648 9631 "3147 Fast-path EQs not allocated\n"); 9649 9632 rc = -ENOMEM; 9650 9633 goto out_error; ··· 9668 9651 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 9669 9652 phba->cfg_fcp_imax); 9670 9653 if (rc) { 9671 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9654 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9672 9655 "0523 Failed setup of fast-path" 9673 9656 " EQ (%d), rc = 0x%x\n", 9674 9657 cpup->eq, (uint32_t)rc); ··· 9700 9683 qidx, 9701 9684 LPFC_IO); 9702 9685 if (rc) { 9703 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9686 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9704 9687 "0535 Failed to setup fastpath " 9705 9688 "IO WQ/CQ (%d), rc = 0x%x\n", 9706 9689 qidx, (uint32_t)rc); ··· 9715 9698 /* Set up slow-path MBOX CQ/MQ */ 9716 9699 9717 9700 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 9718 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9701 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9719 9702 "0528 %s not allocated\n", 9720 9703 phba->sli4_hba.mbx_cq ? 9721 9704 "Mailbox WQ" : "Mailbox CQ"); ··· 9728 9711 phba->sli4_hba.mbx_wq, 9729 9712 NULL, 0, LPFC_MBOX); 9730 9713 if (rc) { 9731 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9714 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9732 9715 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 9733 9716 (uint32_t)rc); 9734 9717 goto out_destroy; 9735 9718 } 9736 9719 if (phba->nvmet_support) { 9737 9720 if (!phba->sli4_hba.nvmet_cqset) { 9738 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9721 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9739 9722 "3165 Fast-path NVME CQ Set " 9740 9723 "array not allocated\n"); 9741 9724 rc = -ENOMEM; ··· 9747 9730 qp, 9748 9731 LPFC_WCQ, LPFC_NVMET); 9749 9732 if (rc) { 9750 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9733 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9751 9734 "3164 Failed setup of NVME CQ " 9752 9735 "Set, rc = 0x%x\n", 9753 9736 (uint32_t)rc); ··· 9759 9742 qp[0].hba_eq, 9760 9743 LPFC_WCQ, LPFC_NVMET); 9761 9744 if (rc) { 9762 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9745 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9763 9746 "6089 Failed setup NVMET CQ: " 9764 9747 "rc = 0x%x\n", (uint32_t)rc); 9765 9748 goto out_destroy; ··· 9776 9759 9777 9760 /* Set up slow-path ELS WQ/CQ */ 9778 9761 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 9779 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9762 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9780 9763 "0530 ELS %s not allocated\n", 9781 9764 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 9782 9765 rc = -ENOMEM; ··· 9787 9770 phba->sli4_hba.els_wq, 9788 9771 NULL, 0, LPFC_ELS); 9789 9772 if (rc) { 9790 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9773 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9791 9774 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 9792 9775 (uint32_t)rc); 9793 9776 goto out_destroy; ··· 9800 9783 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9801 9784 /* Set up NVME LS Complete Queue */ 9802 9785 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 9803 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9786 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9804 9787 "6091 LS %s not allocated\n", 9805 9788 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 9806 9789 rc = -ENOMEM; ··· 9811 9794 phba->sli4_hba.nvmels_wq, 9812 9795 NULL, 0, LPFC_NVME_LS); 9813 9796 if (rc) { 9814 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9797 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9815 9798 "0526 Failed setup of NVVME LS WQ/CQ: " 9816 9799 "rc = 0x%x\n", (uint32_t)rc); 9817 9800 goto out_destroy; ··· 9831 9814 if ((!phba->sli4_hba.nvmet_cqset) || 9832 9815 (!phba->sli4_hba.nvmet_mrq_hdr) || 9833 9816 (!phba->sli4_hba.nvmet_mrq_data)) { 9834 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9817 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9835 9818 "6130 MRQ CQ Queues not " 9836 9819 "allocated\n"); 9837 9820 rc = -ENOMEM; ··· 9844 9827 phba->sli4_hba.nvmet_cqset, 9845 9828 LPFC_NVMET); 9846 9829 if (rc) { 9847 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9830 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9848 9831 "6098 Failed setup of NVMET " 9849 9832 "MRQ: rc = 0x%x\n", 9850 9833 (uint32_t)rc); ··· 9858 9841 phba->sli4_hba.nvmet_cqset[0], 9859 9842 LPFC_NVMET); 9860 9843 if (rc) { 9861 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9844 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9862 9845 "6057 Failed setup of NVMET " 9863 9846 "Receive Queue: rc = 0x%x\n", 9864 9847 (uint32_t)rc); ··· 9877 9860 } 9878 9861 9879 9862 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 9880 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9863 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9881 9864 "0540 Receive Queue not allocated\n"); 9882 9865 rc = -ENOMEM; 9883 9866 goto out_destroy; ··· 9886 9869 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 9887 9870 phba->sli4_hba.els_cq, LPFC_USOL); 9888 9871 if (rc) { 9889 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9872 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9890 9873 "0541 Failed setup of Receive Queue: " 9891 9874 "rc = 0x%x\n", (uint32_t)rc); 9892 9875 goto out_destroy; ··· 9914 9897 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 9915 9898 sizeof(struct lpfc_queue *), GFP_KERNEL); 9916 9899 if (!phba->sli4_hba.cq_lookup) { 9917 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9900 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9918 9901 "0549 Failed setup of CQ Lookup table: " 9919 9902 "size 0x%x\n", phba->sli4_hba.cq_max); 9920 9903 rc = -ENOMEM; ··· 10214 10197 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 10215 10198 GFP_KERNEL); 10216 10199 if (!mboxq) { 10217 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10200 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10218 10201 "0494 Unable to allocate memory for " 10219 10202 "issuing SLI_FUNCTION_RESET mailbox " 10220 10203 "command\n"); ··· 10234 10217 if (rc != MBX_TIMEOUT) 10235 10218 mempool_free(mboxq, phba->mbox_mem_pool); 10236 10219 if (shdr_status || shdr_add_status || rc) { 10237 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10220 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10238 10221 "0495 SLI_FUNCTION_RESET mailbox " 10239 10222 "failed with status x%x add_status x%x," 10240 10223 " mbx status x%x\n", ··· 10266 10249 phba->sli4_hba.u.if_type2.ERR1regaddr); 10267 10250 phba->work_status[1] = readl( 10268 10251 phba->sli4_hba.u.if_type2.ERR2regaddr); 10269 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10252 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10270 10253 "2890 Port not ready, port status reg " 10271 10254 "0x%x error 1=0x%x, error 2=0x%x\n", 10272 10255 reg_data.word0, ··· 10308 10291 out: 10309 10292 /* Catch the not-ready port failure after a port reset. */ 10310 10293 if (rc) { 10311 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10294 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10312 10295 "3317 HBA not functional: IP Reset Failed " 10313 10296 "try: echo fw_reset > board_mode\n"); 10314 10297 rc = -ENODEV; ··· 10358 10341 /* There is no SLI3 failback for SLI4 devices. */ 10359 10342 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 10360 10343 LPFC_SLI_INTF_VALID) { 10361 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10344 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10362 10345 "2894 SLI_INTF reg contents invalid " 10363 10346 "sli_intf reg 0x%x\n", 10364 10347 phba->sli4_hba.sli_intf.word0); ··· 10633 10616 10634 10617 if (!pmb) { 10635 10618 rc = -ENOMEM; 10636 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10619 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10637 10620 "0474 Unable to allocate memory for issuing " 10638 10621 "MBOX_CONFIG_MSI command\n"); 10639 10622 goto mem_fail_out; ··· 11614 11597 } 11615 11598 11616 11599 if (vectors != phba->cfg_irq_chann) { 11617 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11600 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11618 11601 "3238 Reducing IO channels to match number of " 11619 11602 "MSI-X vectors, requested %d got %d\n", 11620 11603 phba->cfg_irq_chann, vectors); ··· 11887 11870 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { 11888 11871 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 11889 11872 if (!nvmet_xri_cmpl) 11890 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11873 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11891 11874 "6424 NVMET XRI exchange busy " 11892 11875 "wait time: %d seconds.\n", 11893 11876 wait_time/1000); 11894 11877 if (!io_xri_cmpl) 11895 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11878 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11896 11879 "6100 IO XRI exchange busy " 11897 11880 "wait time: %d seconds.\n", 11898 11881 wait_time/1000); 11899 11882 if (!els_xri_cmpl) 11900 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11883 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11901 11884 "2878 ELS XRI exchange busy " 11902 11885 "wait time: %d seconds.\n", 11903 11886 wait_time/1000); ··· 12391 12374 /* Configure and enable interrupt */ 12392 12375 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 12393 12376 if (intr_mode == LPFC_INTR_ERROR) { 12394 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12377 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12395 12378 "0431 Failed to enable interrupt.\n"); 12396 12379 error = -ENODEV; 12397 12380 goto out_free_sysfs_attr; 12398 12381 } 12399 12382 /* SLI-3 HBA setup */ 12400 12383 if (lpfc_sli_hba_setup(phba)) { 12401 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12384 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12402 12385 "1477 Failed to set up hba\n"); 12403 12386 error = -ENODEV; 12404 12387 goto out_remove_device; ··· 12656 12639 /* Configure and enable interrupt */ 12657 12640 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12658 12641 if (intr_mode == LPFC_INTR_ERROR) { 12659 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12642 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12660 12643 "0430 PM resume Failed to enable interrupt\n"); 12661 12644 return -EIO; 12662 12645 } else ··· 12682 12665 static void 12683 12666 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 12684 12667 { 12685 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12668 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12686 12669 "2723 PCI channel I/O abort preparing for recovery\n"); 12687 12670 12688 12671 /* ··· 12703 12686 static void 12704 12687 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 12705 12688 { 12706 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12689 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12707 12690 "2710 PCI channel disable preparing for reset\n"); 12708 12691 12709 12692 /* Block any management I/Os to the device */ ··· 12734 12717 static void 12735 12718 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 12736 12719 { 12737 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12720 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12738 12721 "2711 PCI channel permanent disable for failure\n"); 12739 12722 /* Block all SCSI devices' I/Os on the host */ 12740 12723 lpfc_scsi_dev_block(phba); ··· 12785 12768 return PCI_ERS_RESULT_DISCONNECT; 12786 12769 default: 12787 12770 /* Unknown state, prepare and request slot reset */ 12788 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12771 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12789 12772 "0472 Unknown PCI error state: x%x\n", state); 12790 12773 lpfc_sli_prep_dev_for_reset(phba); 12791 12774 return PCI_ERS_RESULT_NEED_RESET; ··· 12843 12826 /* Configure and enable interrupt */ 12844 12827 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12845 12828 if (intr_mode == LPFC_INTR_ERROR) { 12846 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12829 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12847 12830 "0427 Cannot re-enable interrupt after " 12848 12831 "slot reset.\n"); 12849 12832 return PCI_ERS_RESULT_DISCONNECT; ··· 12946 12929 magic_number != MAGIC_NUMBER_G6) || 12947 12930 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && 12948 12931 magic_number != MAGIC_NUMBER_G7)) { 12949 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12932 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12950 12933 "3030 This firmware version is not supported on" 12951 12934 " this HBA model. Device:%x Magic:%x Type:%x " 12952 12935 "ID:%x Size %d %zd\n", ··· 12954 12937 fsize, fw->size); 12955 12938 rc = -EINVAL; 12956 12939 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { 12957 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12940 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12958 12941 "3021 Firmware downloads have been prohibited " 12959 12942 "by a system configuration setting on " 12960 12943 "Device:%x Magic:%x Type:%x ID:%x Size %d " ··· 12963 12946 fsize, fw->size); 12964 12947 rc = -EACCES; 12965 12948 } else { 12966 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12949 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12967 12950 "3022 FW Download failed. Add Status x%x " 12968 12951 "Device:%x Magic:%x Type:%x ID:%x Size %d " 12969 12952 "%zd\n", ··· 13008 12991 INIT_LIST_HEAD(&dma_buffer_list); 13009 12992 lpfc_decode_firmware_rev(phba, fwrev, 1); 13010 12993 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 13011 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12994 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13012 12995 "3023 Updating Firmware, Current Version:%s " 13013 12996 "New Version:%s\n", 13014 12997 fwrev, image->revision); ··· 13058 13041 } 13059 13042 rc = offset; 13060 13043 } else 13061 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13044 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13062 13045 "3029 Skipped Firmware update, Current " 13063 13046 "Version:%s New Version:%s\n", 13064 13047 fwrev, image->revision); ··· 13073 13056 release_firmware(fw); 13074 13057 out: 13075 13058 if (rc < 0) 13076 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13059 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13077 13060 "3062 Firmware update error, status %d.\n", rc); 13078 13061 else 13079 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13062 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13080 13063 "3024 Firmware update success: size %d.\n", rc); 13081 13064 } 13082 13065 ··· 13205 13188 /* Configure and enable interrupt */ 13206 13189 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 13207 13190 if (intr_mode == LPFC_INTR_ERROR) { 13208 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13191 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13209 13192 "0426 Failed to enable interrupt.\n"); 13210 13193 error = -ENODEV; 13211 13194 goto out_unset_driver_resource; ··· 13240 13223 13241 13224 /* Set up SLI-4 HBA */ 13242 13225 if (lpfc_sli4_hba_setup(phba)) { 13243 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13226 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13244 13227 "1421 Failed to set up hba\n"); 13245 13228 error = -ENODEV; 13246 13229 goto out_free_sysfs_attr; ··· 13265 13248 */ 13266 13249 error = lpfc_nvme_create_localport(vport); 13267 13250 if (error) { 13268 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13251 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13269 13252 "6004 NVME registration " 13270 13253 "failed, error x%x\n", 13271 13254 error); ··· 13499 13482 /* Configure and enable interrupt */ 13500 13483 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 13501 13484 if (intr_mode == LPFC_INTR_ERROR) { 13502 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13485 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13503 13486 "0294 PM resume Failed to enable interrupt\n"); 13504 13487 return -EIO; 13505 13488 } else ··· 13525 13508 static void 13526 13509 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 13527 13510 { 13528 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13511 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13529 13512 "2828 PCI channel I/O abort preparing for recovery\n"); 13530 13513 /* 13531 13514 * There may be errored I/Os through HBA, abort all I/Os on txcmplq ··· 13545 13528 static void 13546 13529 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 13547 13530 { 13548 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13531 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13549 13532 "2826 PCI channel disable preparing for reset\n"); 13550 13533 13551 13534 /* Block any management I/Os to the device */ ··· 13577 13560 static void 13578 13561 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 13579 13562 { 13580 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13563 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13581 13564 "2827 PCI channel permanent disable for failure\n"); 13582 13565 13583 13566 /* Block all SCSI devices' I/Os on the host */ ··· 13627 13610 return PCI_ERS_RESULT_DISCONNECT; 13628 13611 default: 13629 13612 /* Unknown state, prepare and request slot reset */ 13630 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13613 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13631 13614 "2825 Unknown PCI error state: x%x\n", state); 13632 13615 lpfc_sli4_prep_dev_for_reset(phba); 13633 13616 return PCI_ERS_RESULT_NEED_RESET; ··· 13685 13668 /* Configure and enable interrupt */ 13686 13669 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 13687 13670 if (intr_mode == LPFC_INTR_ERROR) { 13688 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13671 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13689 13672 "2824 Cannot re-enable interrupt after " 13690 13673 "slot reset.\n"); 13691 13674 return PCI_ERS_RESULT_DISCONNECT; ··· 13790 13773 lpfc_pci_remove_one_s4(pdev); 13791 13774 break; 13792 13775 default: 13793 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13776 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13794 13777 "1424 Invalid PCI device group: 0x%x\n", 13795 13778 phba->pci_dev_grp); 13796 13779 break; ··· 13827 13810 rc = lpfc_pci_suspend_one_s4(pdev, msg); 13828 13811 break; 13829 13812 default: 13830 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13813 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13831 13814 "1425 Invalid PCI device group: 0x%x\n", 13832 13815 phba->pci_dev_grp); 13833 13816 break; ··· 13863 13846 rc = lpfc_pci_resume_one_s4(pdev); 13864 13847 break; 13865 13848 default: 13866 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13849 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13867 13850 "1426 Invalid PCI device group: 0x%x\n", 13868 13851 phba->pci_dev_grp); 13869 13852 break; ··· 13901 13884 rc = lpfc_io_error_detected_s4(pdev, state); 13902 13885 break; 13903 13886 default: 13904 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13887 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13905 13888 "1427 Invalid PCI device group: 0x%x\n", 13906 13889 phba->pci_dev_grp); 13907 13890 break; ··· 13938 13921 rc = lpfc_io_slot_reset_s4(pdev); 13939 13922 break; 13940 13923 default: 13941 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13924 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13942 13925 "1428 Invalid PCI device group: 0x%x\n", 13943 13926 phba->pci_dev_grp); 13944 13927 break; ··· 13970 13953 lpfc_io_resume_s4(pdev); 13971 13954 break; 13972 13955 default: 13973 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13956 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13974 13957 "1429 Invalid PCI device group: 0x%x\n", 13975 13958 phba->pci_dev_grp); 13976 13959 break; ··· 14124 14107 fc_release_transport(lpfc_vport_transport_template); 14125 14108 14126 14109 return error; 14110 + } 14111 + 14112 + void lpfc_dmp_dbg(struct lpfc_hba *phba) 14113 + { 14114 + unsigned int start_idx; 14115 + unsigned int dbg_cnt; 14116 + unsigned int temp_idx; 14117 + int i; 14118 + int j = 0; 14119 + unsigned long rem_nsec; 14120 + 14121 + if (phba->cfg_log_verbose) 14122 + return; 14123 + 14124 + if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) 14125 + return; 14126 + 14127 + start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; 14128 + dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); 14129 + temp_idx = start_idx; 14130 + if (dbg_cnt >= DBG_LOG_SZ) { 14131 + dbg_cnt = DBG_LOG_SZ; 14132 + temp_idx -= 1; 14133 + } else { 14134 + if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { 14135 + temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ; 14136 + } else { 14137 + if ((start_idx - dbg_cnt) < 0) { 14138 + start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); 14139 + temp_idx = 0; 14140 + } else { 14141 + start_idx -= dbg_cnt; 14142 + } 14143 + } 14144 + } 14145 + dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", 14146 + start_idx, temp_idx, dbg_cnt); 14147 + 14148 + for (i = 0; i < dbg_cnt; i++) { 14149 + if ((start_idx + i) < DBG_LOG_SZ) 14150 + temp_idx = (start_idx + i) % (DBG_LOG_SZ - 1); 14151 + else 14152 + temp_idx = j++; 14153 + rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); 14154 + dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", 14155 + temp_idx, 14156 + (unsigned long)phba->dbg_log[temp_idx].t_ns, 14157 + rem_nsec / 1000, 14158 + phba->dbg_log[temp_idx].log); 14159 + } 14160 + atomic_set(&phba->dbg_log_cnt, 0); 14161 + atomic_set(&phba->dbg_log_dmping, 0); 14162 + } 14163 + 14164 + void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...) 14165 + { 14166 + unsigned int idx; 14167 + va_list args; 14168 + int dbg_dmping = atomic_read(&phba->dbg_log_dmping); 14169 + struct va_format vaf; 14170 + 14171 + 14172 + va_start(args, fmt); 14173 + if (unlikely(dbg_dmping)) { 14174 + vaf.fmt = fmt; 14175 + vaf.va = &args; 14176 + dev_info(&phba->pcidev->dev, "%pV", &vaf); 14177 + va_end(args); 14178 + return; 14179 + } 14180 + idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % 14181 + DBG_LOG_SZ; 14182 + 14183 + atomic_inc(&phba->dbg_log_cnt); 14184 + 14185 + vscnprintf(phba->dbg_log[idx].log, 14186 + sizeof(phba->dbg_log[idx].log), fmt, args); 14187 + va_end(args); 14188 + 14189 + phba->dbg_log[idx].t_ns = local_clock(); 14127 14190 } 14128 14191 14129 14192 /**
+19 -5
drivers/scsi/lpfc/lpfc_logmsg.h
··· 44 44 #define LOG_NVME_DISC 0x00200000 /* NVME Discovery/Connect events. */ 45 45 #define LOG_NVME_ABTS 0x00400000 /* NVME ABTS events. */ 46 46 #define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */ 47 - #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ 47 + #define LOG_TRACE_EVENT 0x80000000 /* Dmp the DBG log on this err */ 48 + #define LOG_ALL_MSG 0x7fffffff /* LOG all messages */ 49 + 50 + void lpfc_dmp_dbg(struct lpfc_hba *phba); 51 + void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...); 48 52 49 53 /* generate message by verbose log setting or severity */ 50 54 #define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \ ··· 69 65 70 66 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 71 67 do { \ 72 - { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \ 68 + { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) { \ 69 + if ((mask) & LOG_TRACE_EVENT) \ 70 + lpfc_dmp_dbg((vport)->phba); \ 73 71 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ 74 - fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ 72 + fmt, (vport)->phba->brd_no, vport->vpi, ##arg); \ 73 + } else if (!(vport)->cfg_log_verbose) \ 74 + lpfc_dbg_print((vport)->phba, "%d:(%d):" fmt, \ 75 + (vport)->phba->brd_no, (vport)->vpi, ##arg); \ 76 + } \ 75 77 } while (0) 76 78 77 79 #define lpfc_printf_log(phba, level, mask, fmt, arg...) \ ··· 85 75 { uint32_t log_verbose = (phba)->pport ? \ 86 76 (phba)->pport->cfg_log_verbose : \ 87 77 (phba)->cfg_log_verbose; \ 88 - if (((mask) & log_verbose) || (level[1] <= '3')) \ 78 + if (((mask) & log_verbose) || (level[1] <= '3')) { \ 79 + if ((mask) & LOG_TRACE_EVENT) \ 80 + lpfc_dmp_dbg(phba); \ 89 81 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ 90 - fmt, phba->brd_no, ##arg); \ 82 + fmt, phba->brd_no, ##arg); \ 83 + } else if (!(phba)->cfg_log_verbose)\ 84 + lpfc_dbg_print(phba, "%d:" fmt, phba->brd_no, ##arg); \ 91 85 } \ 92 86 } while (0)
+26 -25
drivers/scsi/lpfc/lpfc_nportdisc.c
··· 152 152 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); 153 153 return 1; 154 154 bad_service_param: 155 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 155 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 156 156 "0207 Device %x " 157 157 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent " 158 158 "invalid service parameters. Ignoring device.\n", ··· 301 301 302 302 /* Check for CONFIG_LINK error */ 303 303 if (mb->mbxStatus) { 304 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 304 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 305 305 "4575 CONFIG_LINK fails pt2pt discovery: %x\n", 306 306 mb->mbxStatus); 307 307 mempool_free(login_mbox, phba->mbox_mem_pool); ··· 316 316 rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI, 317 317 save_iocb, ndlp, login_mbox); 318 318 if (rc) { 319 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 319 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 320 320 "4576 PLOGI ACC fails pt2pt discovery: %x\n", 321 321 rc); 322 322 mempool_free(login_mbox, phba->mbox_mem_pool); ··· 361 361 lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb); 362 362 363 363 if (!piocb) { 364 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS, 364 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 365 365 "4578 PLOGI ACC fail\n"); 366 366 if (mbox) 367 367 mempool_free(mbox, phba->mbox_mem_pool); ··· 370 370 371 371 rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox); 372 372 if (rc) { 373 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS, 373 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 374 374 "4579 PLOGI ACC fail %x\n", rc); 375 375 if (mbox) 376 376 mempool_free(mbox, phba->mbox_mem_pool); ··· 405 405 lp = (uint32_t *) pcmd->virt; 406 406 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 407 407 if (wwn_to_u64(sp->portName.u.wwn) == 0) { 408 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 408 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 409 409 "0140 PLOGI Reject: invalid nname\n"); 410 410 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 411 411 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME; ··· 414 414 return 0; 415 415 } 416 416 if (wwn_to_u64(sp->nodeName.u.wwn) == 0) { 417 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 417 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 418 418 "0141 PLOGI Reject: invalid pname\n"); 419 419 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 420 420 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME; ··· 481 481 } 482 482 if (nlp_portwwn != 0 && 483 483 nlp_portwwn != wwn_to_u64(sp->portName.u.wwn)) 484 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 484 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 485 485 "0143 PLOGI recv'd from DID: x%x " 486 486 "WWPN changed: old %llx new %llx\n", 487 487 ndlp->nlp_DID, ··· 689 689 return 1; 690 690 out: 691 691 if (defer_acc) 692 - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 692 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 693 693 "4577 discovery failure: %p %p %p\n", 694 694 save_iocb, link_mbox, login_mbox); 695 695 kfree(save_iocb); ··· 1097 1097 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 1098 1098 GFP_KERNEL); 1099 1099 if (!pmb) 1100 - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1101 - "2796 mailbox memory allocation failed \n"); 1100 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1101 + "2796 mailbox memory allocation failed \n"); 1102 1102 else { 1103 1103 lpfc_unreg_login(phba, vport->vpi, rpi, pmb); 1104 1104 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; ··· 1136 1136 rpi = pmb->u.mb.un.varWords[0]; 1137 1137 lpfc_release_rpi(phba, vport, ndlp, rpi); 1138 1138 } 1139 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1139 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1140 1140 "0271 Illegal State Transition: node x%x " 1141 1141 "event x%x, state x%x Data: x%x x%x\n", 1142 1142 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ··· 1154 1154 * to stop it. 1155 1155 */ 1156 1156 if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { 1157 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1158 - "0272 Illegal State Transition: node x%x " 1159 - "event x%x, state x%x Data: x%x x%x\n", 1160 - ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 1161 - ndlp->nlp_flag); 1157 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1158 + "0272 Illegal State Transition: node x%x " 1159 + "event x%x, state x%x Data: x%x x%x\n", 1160 + ndlp->nlp_DID, evt, ndlp->nlp_state, 1161 + ndlp->nlp_rpi, ndlp->nlp_flag); 1162 1162 } 1163 1163 return ndlp->nlp_state; 1164 1164 } ··· 1378 1378 if ((ndlp->nlp_DID != FDMI_DID) && 1379 1379 (wwn_to_u64(sp->portName.u.wwn) == 0 || 1380 1380 wwn_to_u64(sp->nodeName.u.wwn) == 0)) { 1381 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1381 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1382 1382 "0142 PLOGI RSP: Invalid WWN.\n"); 1383 1383 goto out; 1384 1384 } ··· 1440 1440 } else { 1441 1441 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1442 1442 if (!mbox) { 1443 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1443 + lpfc_printf_vlog(vport, KERN_ERR, 1444 + LOG_TRACE_EVENT, 1444 1445 "0133 PLOGI: no memory " 1445 1446 "for config_link " 1446 1447 "Data: x%x x%x x%x x%x\n", ··· 1466 1465 1467 1466 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1468 1467 if (!mbox) { 1469 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1468 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1470 1469 "0018 PLOGI: no memory for reg_login " 1471 1470 "Data: x%x x%x x%x x%x\n", 1472 1471 ndlp->nlp_DID, ndlp->nlp_state, ··· 1506 1505 kfree(mp); 1507 1506 mempool_free(mbox, phba->mbox_mem_pool); 1508 1507 1509 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1508 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1510 1509 "0134 PLOGI: cannot issue reg_login " 1511 1510 "Data: x%x x%x x%x x%x\n", 1512 1511 ndlp->nlp_DID, ndlp->nlp_state, ··· 1514 1513 } else { 1515 1514 mempool_free(mbox, phba->mbox_mem_pool); 1516 1515 1517 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1516 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1518 1517 "0135 PLOGI: cannot format reg_login " 1519 1518 "Data: x%x x%x x%x x%x\n", 1520 1519 ndlp->nlp_DID, ndlp->nlp_state, ··· 1525 1524 out: 1526 1525 if (ndlp->nlp_DID == NameServer_DID) { 1527 1526 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1528 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1527 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1529 1528 "0261 Cannot Register NameServer login\n"); 1530 1529 } 1531 1530 ··· 1947 1946 1948 1947 if (mb->mbxStatus) { 1949 1948 /* RegLogin failed */ 1950 - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1951 - "0246 RegLogin failed Data: x%x x%x x%x x%x " 1949 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1950 + "0246 RegLogin failed Data: x%x x%x x%x x%x " 1952 1951 "x%x\n", 1953 1952 did, mb->mbxStatus, vport->port_state, 1954 1953 mb->un.varRegLogin.vpi,
+29 -38
drivers/scsi/lpfc/lpfc_nvme.c
··· 498 498 if (pnvme_lsreq->done) 499 499 pnvme_lsreq->done(pnvme_lsreq, status); 500 500 else 501 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 501 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 502 502 "6046 NVMEx cmpl without done call back? " 503 503 "Data %px DID %x Xri: %x status %x\n", 504 504 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, ··· 647 647 648 648 rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe); 649 649 if (rc) { 650 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC | LOG_ELS, 650 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 651 651 "6045 Issue GEN REQ WQE to NPORT x%x " 652 652 "Data: x%x x%x rc x%x\n", 653 653 ndlp->nlp_DID, genwqe->iotag, ··· 693 693 uint16_t ntype, nstate; 694 694 695 695 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 696 - lpfc_printf_vlog(vport, KERN_ERR, 697 - LOG_NVME_DISC | LOG_NODE | LOG_NVME_IOERR, 696 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 698 697 "6051 NVMEx LS REQ: Bad NDLP x%px, Failing " 699 698 "LS Req\n", 700 699 ndlp); ··· 704 705 nstate = ndlp->nlp_state; 705 706 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) || 706 707 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) { 707 - lpfc_printf_vlog(vport, KERN_ERR, 708 - LOG_NVME_DISC | LOG_NODE | LOG_NVME_IOERR, 708 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 709 709 "6088 NVMEx LS REQ: Fail DID x%06x not " 710 710 "ready for IO. Type x%x, State x%x\n", 711 711 ndlp->nlp_DID, ntype, nstate); ··· 725 727 726 728 bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); 727 729 if (!bmp) { 728 - 729 - lpfc_printf_vlog(vport, KERN_ERR, 730 - LOG_NVME_DISC | LOG_NVME_IOERR, 730 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 731 731 "6044 NVMEx LS REQ: Could not alloc LS buf " 732 732 "for DID %x\n", 733 733 ndlp->nlp_DID); ··· 734 738 735 739 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); 736 740 if (!bmp->virt) { 737 - lpfc_printf_vlog(vport, KERN_ERR, 738 - LOG_NVME_DISC | LOG_NVME_IOERR, 741 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 739 742 "6042 NVMEx LS REQ: Could not alloc mbuf " 740 743 "for DID %x\n", 741 744 ndlp->nlp_DID); ··· 769 774 pnvme_lsreq, gen_req_cmp, ndlp, 2, 770 775 LPFC_NVME_LS_TIMEOUT, 0); 771 776 if (ret != WQE_SUCCESS) { 772 - lpfc_printf_vlog(vport, KERN_ERR, 773 - LOG_NVME_DISC | LOG_NVME_IOERR, 777 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 774 778 "6052 NVMEx REQ: EXIT. issue ls wqe failed " 775 779 "lsreq x%px Status %x DID %x\n", 776 780 pnvme_lsreq, ret, ndlp->nlp_DID); ··· 847 853 bool foundit = false; 848 854 849 855 if (!ndlp) { 850 - lpfc_printf_log(phba, KERN_ERR, 851 - LOG_NVME_DISC | LOG_NODE | 852 - LOG_NVME_IOERR | LOG_NVME_ABTS, 856 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 853 857 "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID " 854 858 "x%06x, Failing LS Req\n", 855 859 ndlp, ndlp ? ndlp->nlp_DID : 0); ··· 1091 1099 1092 1100 /* Sanity check on return of outstanding command */ 1093 1101 if (!lpfc_ncmd) { 1094 - lpfc_printf_vlog(vport, KERN_ERR, 1095 - LOG_NODE | LOG_NVME_IOERR, 1102 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1096 1103 "6071 Null lpfc_ncmd pointer. No " 1097 1104 "release, skip completion\n"); 1098 1105 return; ··· 1102 1111 1103 1112 if (!lpfc_ncmd->nvmeCmd) { 1104 1113 spin_unlock(&lpfc_ncmd->buf_lock); 1105 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, 1114 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1106 1115 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, " 1107 1116 "nvmeCmd x%px\n", 1108 1117 lpfc_ncmd, lpfc_ncmd->nvmeCmd); ··· 1135 1144 */ 1136 1145 ndlp = lpfc_ncmd->ndlp; 1137 1146 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1138 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 1147 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1139 1148 "6062 Ignoring NVME cmpl. No ndlp\n"); 1140 1149 goto out_err; 1141 1150 } ··· 1206 1215 /* Sanity check */ 1207 1216 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) 1208 1217 break; 1209 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 1218 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1210 1219 "6081 NVME Completion Protocol Error: " 1211 1220 "xri %x status x%x result x%x " 1212 1221 "placed x%x\n", ··· 1450 1459 first_data_sgl = sgl; 1451 1460 lpfc_ncmd->seg_cnt = nCmd->sg_cnt; 1452 1461 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) { 1453 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1462 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1454 1463 "6058 Too many sg segments from " 1455 1464 "NVME Transport. Max %d, " 1456 1465 "nvmeIO sg_cnt %d\n", ··· 1473 1482 j = 2; 1474 1483 for (i = 0; i < nseg; i++) { 1475 1484 if (data_sg == NULL) { 1476 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1485 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1477 1486 "6059 dptr err %d, nseg %d\n", 1478 1487 i, nseg); 1479 1488 lpfc_ncmd->seg_cnt = 0; ··· 1574 1583 * and sg_cnt must zero. 1575 1584 */ 1576 1585 if (nCmd->payload_length != 0) { 1577 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1586 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1578 1587 "6063 NVME DMA Prep Err: sg_cnt %d " 1579 1588 "payload_length x%x\n", 1580 1589 nCmd->sg_cnt, nCmd->payload_length); ··· 1937 1946 /* driver queued commands are in process of being flushed */ 1938 1947 if (phba->hba_flag & HBA_IOQ_FLUSH) { 1939 1948 spin_unlock_irqrestore(&phba->hbalock, flags); 1940 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 1949 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1941 1950 "6139 Driver in reset cleanup - flushing " 1942 1951 "NVME Req now. hba_flag x%x\n", 1943 1952 phba->hba_flag); ··· 1947 1956 lpfc_nbuf = freqpriv->nvme_buf; 1948 1957 if (!lpfc_nbuf) { 1949 1958 spin_unlock_irqrestore(&phba->hbalock, flags); 1950 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 1959 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1951 1960 "6140 NVME IO req has no matching lpfc nvme " 1952 1961 "io buffer. Skipping abort req.\n"); 1953 1962 return; 1954 1963 } else if (!lpfc_nbuf->nvmeCmd) { 1955 1964 spin_unlock_irqrestore(&phba->hbalock, flags); 1956 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 1965 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1957 1966 "6141 lpfc NVME IO req has no nvme_fcreq " 1958 1967 "io buffer. Skipping abort req.\n"); 1959 1968 return; ··· 1971 1980 * has not seen it yet. 1972 1981 */ 1973 1982 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { 1974 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 1983 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1975 1984 "6143 NVME req mismatch: " 1976 1985 "lpfc_nbuf x%px nvmeCmd x%px, " 1977 1986 "pnvme_fcreq x%px. Skipping Abort xri x%x\n", ··· 1982 1991 1983 1992 /* Don't abort IOs no longer on the pending queue. */ 1984 1993 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 1985 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 1994 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1986 1995 "6142 NVME IO req x%px not queued - skipping " 1987 1996 "abort req xri x%x\n", 1988 1997 pnvme_fcreq, nvmereq_wqe->sli4_xritag); ··· 1996 2005 1997 2006 /* Outstanding abort is in progress */ 1998 2007 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) { 1999 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 2008 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2000 2009 "6144 Outstanding NVME I/O Abort Request " 2001 2010 "still pending on nvme_fcreq x%px, " 2002 2011 "lpfc_ncmd %px xri x%x\n", ··· 2007 2016 2008 2017 abts_buf = __lpfc_sli_get_iocbq(phba); 2009 2018 if (!abts_buf) { 2010 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 2019 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2011 2020 "6136 No available abort wqes. Skipping " 2012 2021 "Abts req for nvme_fcreq x%px xri x%x\n", 2013 2022 pnvme_fcreq, nvmereq_wqe->sli4_xritag); ··· 2028 2037 spin_unlock(&lpfc_nbuf->buf_lock); 2029 2038 spin_unlock_irqrestore(&phba->hbalock, flags); 2030 2039 if (ret_val) { 2031 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 2040 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2032 2041 "6137 Failed abts issue_wqe with status x%x " 2033 2042 "for nvme_fcreq x%px.\n", 2034 2043 ret_val, pnvme_fcreq); ··· 2301 2310 if (pring->txcmplq_cnt) 2302 2311 pending += pring->txcmplq_cnt; 2303 2312 } 2304 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 2313 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2305 2314 "6176 Lport x%px Localport x%px wait " 2306 2315 "timed out. Pending %d. Renewing.\n", 2307 2316 lport, vport->localport, pending); ··· 2519 2528 ndlp, prev_ndlp); 2520 2529 } else { 2521 2530 lpfc_printf_vlog(vport, KERN_ERR, 2522 - LOG_NVME_DISC | LOG_NODE, 2531 + LOG_TRACE_EVENT, 2523 2532 "6031 RemotePort Registration failed " 2524 2533 "err: %d, DID x%06x\n", 2525 2534 ret, ndlp->nlp_DID); ··· 2565 2574 ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 2566 2575 nvme_fc_rescan_remoteport(remoteport); 2567 2576 2568 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 2577 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2569 2578 "6172 NVME rescanned DID x%06x " 2570 2579 "port_state x%x\n", 2571 2580 ndlp->nlp_DID, remoteport->port_state); ··· 2648 2657 ret = nvme_fc_unregister_remoteport(remoteport); 2649 2658 if (ret != 0) { 2650 2659 lpfc_nlp_put(ndlp); 2651 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 2660 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2652 2661 "6167 NVME unregister failed %d " 2653 2662 "port_state x%x\n", 2654 2663 ret, remoteport->port_state); ··· 2658 2667 2659 2668 input_err: 2660 2669 #endif 2661 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 2670 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2662 2671 "6168 State error: lport x%px, rport x%px FCID x%06x\n", 2663 2672 vport->localport, ndlp->rport, ndlp->nlp_DID); 2664 2673 } ··· 2743 2752 * dump a message. Something is wrong. 2744 2753 */ 2745 2754 if ((wait_cnt % 1000) == 0) { 2746 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2755 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2747 2756 "6178 NVME IO not empty, " 2748 2757 "cnt %d\n", wait_cnt); 2749 2758 }
+47 -49
drivers/scsi/lpfc/lpfc_nvmet.c
··· 303 303 result = wcqe->parameter; 304 304 305 305 if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) { 306 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR, 306 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 307 307 "6410 NVMEx LS cmpl state mismatch IO x%x: " 308 308 "%d %d\n", 309 309 axchg->oxid, axchg->state, axchg->entry_cnt); ··· 395 395 unsigned long iflag; 396 396 397 397 if (ctxp->state == LPFC_NVME_STE_FREE) { 398 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 398 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 399 399 "6411 NVMET free, already free IO x%x: %d %d\n", 400 400 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 401 401 } ··· 474 474 475 475 if (!queue_work(phba->wq, &ctx_buf->defer_work)) { 476 476 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 477 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 477 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 478 478 "6181 Unable to queue deferred work " 479 479 "for oxid x%x. " 480 480 "FCP Drop IO [x%x x%x x%x]\n", ··· 879 879 "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid); 880 880 881 881 if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) { 882 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR, 882 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 883 883 "6412 NVMEx LS rsp state mismatch " 884 884 "oxid x%x: %d %d\n", 885 885 axchg->oxid, axchg->state, axchg->entry_cnt); ··· 891 891 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma, 892 892 ls_rsp->rsplen); 893 893 if (nvmewqeq == NULL) { 894 - lpfc_printf_log(phba, KERN_ERR, 895 - LOG_NVME_DISC | LOG_NVME_IOERR | LOG_NVME_ABTS, 894 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 896 895 "6150 NVMEx LS Drop Rsp x%x: Prep\n", 897 896 axchg->oxid); 898 897 rc = -ENOMEM; ··· 935 936 return 0; 936 937 } 937 938 938 - lpfc_printf_log(phba, KERN_ERR, 939 - LOG_NVME_DISC | LOG_NVME_IOERR | LOG_NVME_ABTS, 939 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 940 940 "6151 NVMEx LS RSP x%x: failed to transmit %d\n", 941 941 axchg->oxid, rc); 942 942 ··· 1056 1058 if ((ctxp->flag & LPFC_NVME_ABTS_RCV) || 1057 1059 (ctxp->state == LPFC_NVME_STE_ABORT)) { 1058 1060 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 1059 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1061 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1060 1062 "6102 IO oxid x%x aborted\n", 1061 1063 ctxp->oxid); 1062 1064 rc = -ENXIO; ··· 1066 1068 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); 1067 1069 if (nvmewqeq == NULL) { 1068 1070 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 1069 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1071 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1070 1072 "6152 FCP Drop IO x%x: Prep\n", 1071 1073 ctxp->oxid); 1072 1074 rc = -ENXIO; ··· 1114 1116 1115 1117 /* Give back resources */ 1116 1118 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 1117 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1119 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1118 1120 "6153 FCP Drop IO x%x: Issue: %d\n", 1119 1121 ctxp->oxid, rc); 1120 1122 ··· 1214 1216 ctxp->flag, ctxp->oxid); 1215 1217 else if (ctxp->state != LPFC_NVME_STE_DONE && 1216 1218 ctxp->state != LPFC_NVME_STE_ABORT) 1217 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1219 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1218 1220 "6413 NVMET release bad state %d %d oxid x%x\n", 1219 1221 ctxp->state, ctxp->entry_cnt, ctxp->oxid); 1220 1222 ··· 1393 1395 phba = tgtp->phba; 1394 1396 1395 1397 rc = lpfc_issue_els_rscn(phba->pport, 0); 1396 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1398 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1397 1399 "6420 NVMET subsystem change: Notification %s\n", 1398 1400 (rc) ? "Failed" : "Sent"); 1399 1401 } ··· 1491 1493 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq, 1492 1494 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL); 1493 1495 if (!phba->sli4_hba.nvmet_ctx_info) { 1494 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1496 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1495 1497 "6419 Failed allocate memory for " 1496 1498 "nvmet context lists\n"); 1497 1499 return -ENOMEM; ··· 1549 1551 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { 1550 1552 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); 1551 1553 if (!ctx_buf) { 1552 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1554 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1553 1555 "6404 Ran out of memory for NVMET\n"); 1554 1556 return -ENOMEM; 1555 1557 } ··· 1558 1560 GFP_KERNEL); 1559 1561 if (!ctx_buf->context) { 1560 1562 kfree(ctx_buf); 1561 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1563 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1562 1564 "6405 Ran out of NVMET " 1563 1565 "context memory\n"); 1564 1566 return -ENOMEM; ··· 1570 1572 if (!ctx_buf->iocbq) { 1571 1573 kfree(ctx_buf->context); 1572 1574 kfree(ctx_buf); 1573 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1575 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1574 1576 "6406 Ran out of NVMET iocb/WQEs\n"); 1575 1577 return -ENOMEM; 1576 1578 } ··· 1589 1591 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); 1590 1592 kfree(ctx_buf->context); 1591 1593 kfree(ctx_buf); 1592 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1594 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1593 1595 "6407 Ran out of NVMET XRIs\n"); 1594 1596 return -ENOMEM; 1595 1597 } ··· 1668 1670 error = -ENOENT; 1669 1671 #endif 1670 1672 if (error) { 1671 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1673 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1672 1674 "6025 Cannot register NVME targetport x%x: " 1673 1675 "portnm %llx nodenm %llx segs %d qs %d\n", 1674 1676 error, ··· 2112 2114 nvmet_fc_unregister_targetport(phba->targetport); 2113 2115 if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp, 2114 2116 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) 2115 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 2117 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2116 2118 "6179 Unreg targetport x%px timeout " 2117 2119 "reached.\n", phba->targetport); 2118 2120 lpfc_nvmet_cleanup_io_context(phba); ··· 2185 2187 unsigned long iflags; 2186 2188 2187 2189 if (!nvmebuf) { 2188 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2190 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2189 2191 "6159 process_rcv_fcp_req, nvmebuf is NULL, " 2190 2192 "oxid: x%x flg: x%x state: x%x\n", 2191 2193 ctxp->oxid, ctxp->flag, ctxp->state); ··· 2198 2200 } 2199 2201 2200 2202 if (ctxp->flag & LPFC_NVME_ABTS_RCV) { 2201 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2203 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2202 2204 "6324 IO oxid x%x aborted\n", 2203 2205 ctxp->oxid); 2204 2206 return; ··· 2262 2264 } 2263 2265 ctxp->flag &= ~LPFC_NVME_TNOTIFY; 2264 2266 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 2265 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2267 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2266 2268 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", 2267 2269 ctxp->oxid, rc, 2268 2270 atomic_read(&tgtp->rcv_fcp_cmd_in), ··· 2381 2383 2382 2384 ctx_buf = NULL; 2383 2385 if (!nvmebuf || !phba->targetport) { 2384 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2386 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2385 2387 "6157 NVMET FCP Drop IO\n"); 2386 2388 if (nvmebuf) 2387 2389 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); ··· 2454 2456 list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list); 2455 2457 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); 2456 2458 if (ctxp->state != LPFC_NVME_STE_FREE) { 2457 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2459 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2458 2460 "6414 NVMET Context corrupt %d %d oxid x%x\n", 2459 2461 ctxp->state, ctxp->entry_cnt, ctxp->oxid); 2460 2462 } ··· 2496 2498 2497 2499 if (!queue_work(phba->wq, &ctx_buf->defer_work)) { 2498 2500 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 2499 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 2501 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2500 2502 "6325 Unable to queue work for oxid x%x. " 2501 2503 "FCP Drop IO [x%x x%x x%x]\n", 2502 2504 ctxp->oxid, ··· 2533 2535 uint8_t cqflag) 2534 2536 { 2535 2537 if (!nvmebuf) { 2536 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2538 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2537 2539 "3167 NVMET FCP Drop IO\n"); 2538 2540 return; 2539 2541 } ··· 2579 2581 union lpfc_wqe128 *wqe; 2580 2582 2581 2583 if (!lpfc_is_link_up(phba)) { 2582 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 2584 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2583 2585 "6104 NVMET prep LS wqe: link err: " 2584 2586 "NPORT x%x oxid:x%x ste %d\n", 2585 2587 ctxp->sid, ctxp->oxid, ctxp->state); ··· 2589 2591 /* Allocate buffer for command wqe */ 2590 2592 nvmewqe = lpfc_sli_get_iocbq(phba); 2591 2593 if (nvmewqe == NULL) { 2592 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 2594 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2593 2595 "6105 NVMET prep LS wqe: No WQE: " 2594 2596 "NPORT x%x oxid x%x ste %d\n", 2595 2597 ctxp->sid, ctxp->oxid, ctxp->state); ··· 2600 2602 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2601 2603 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 2602 2604 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 2603 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 2605 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2604 2606 "6106 NVMET prep LS wqe: No ndlp: " 2605 2607 "NPORT x%x oxid x%x ste %d\n", 2606 2608 ctxp->sid, ctxp->oxid, ctxp->state); ··· 2709 2711 int xc = 1; 2710 2712 2711 2713 if (!lpfc_is_link_up(phba)) { 2712 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2714 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2713 2715 "6107 NVMET prep FCP wqe: link err:" 2714 2716 "NPORT x%x oxid x%x ste %d\n", 2715 2717 ctxp->sid, ctxp->oxid, ctxp->state); ··· 2720 2722 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2721 2723 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 2722 2724 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 2723 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2725 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2724 2726 "6108 NVMET prep FCP wqe: no ndlp: " 2725 2727 "NPORT x%x oxid x%x ste %d\n", 2726 2728 ctxp->sid, ctxp->oxid, ctxp->state); ··· 2728 2730 } 2729 2731 2730 2732 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) { 2731 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2733 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2732 2734 "6109 NVMET prep FCP wqe: seg cnt err: " 2733 2735 "NPORT x%x oxid x%x ste %d cnt %d\n", 2734 2736 ctxp->sid, ctxp->oxid, ctxp->state, ··· 2743 2745 /* Allocate buffer for command wqe */ 2744 2746 nvmewqe = ctxp->ctxbuf->iocbq; 2745 2747 if (nvmewqe == NULL) { 2746 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2748 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2747 2749 "6110 NVMET prep FCP wqe: No " 2748 2750 "WQE: NPORT x%x oxid x%x ste %d\n", 2749 2751 ctxp->sid, ctxp->oxid, ctxp->state); ··· 2761 2763 (ctxp->state == LPFC_NVME_STE_DATA)) { 2762 2764 wqe = &nvmewqe->wqe; 2763 2765 } else { 2764 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2766 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2765 2767 "6111 Wrong state NVMET FCP: %d cnt %d\n", 2766 2768 ctxp->state, ctxp->entry_cnt); 2767 2769 return NULL; ··· 3134 3136 3135 3137 /* Sanity check */ 3136 3138 if (ctxp->state != LPFC_NVME_STE_ABORT) { 3137 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 3139 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3138 3140 "6112 ABTS Wrong state:%d oxid x%x\n", 3139 3141 ctxp->state, ctxp->oxid); 3140 3142 } ··· 3208 3210 result, wcqe->word3); 3209 3211 3210 3212 if (!ctxp) { 3211 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 3213 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3212 3214 "6415 NVMET LS Abort No ctx: WCQE: " 3213 3215 "%08x %08x %08x %08x\n", 3214 3216 wcqe->word0, wcqe->total_data_placed, ··· 3219 3221 } 3220 3222 3221 3223 if (ctxp->state != LPFC_NVME_STE_LS_ABORT) { 3222 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 3224 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3223 3225 "6416 NVMET LS abort cmpl state mismatch: " 3224 3226 "oxid x%x: %d %d\n", 3225 3227 ctxp->oxid, ctxp->state, ctxp->entry_cnt); ··· 3254 3256 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 3255 3257 if (tgtp) 3256 3258 atomic_inc(&tgtp->xmt_abort_rsp_error); 3257 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 3259 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3258 3260 "6134 Drop ABTS - wrong NDLP state x%x.\n", 3259 3261 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); 3260 3262 ··· 3351 3353 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 3352 3354 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 3353 3355 atomic_inc(&tgtp->xmt_abort_rsp_error); 3354 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 3356 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3355 3357 "6160 Drop ABORT - wrong NDLP state x%x.\n", 3356 3358 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); 3357 3359 ··· 3367 3369 spin_lock_irqsave(&ctxp->ctxlock, flags); 3368 3370 if (!ctxp->abort_wqeq) { 3369 3371 atomic_inc(&tgtp->xmt_abort_rsp_error); 3370 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 3372 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3371 3373 "6161 ABORT failed: No wqeqs: " 3372 3374 "xri: x%x\n", ctxp->oxid); 3373 3375 /* No failure to an ABTS request. */ ··· 3394 3396 if (phba->hba_flag & HBA_IOQ_FLUSH) { 3395 3397 spin_unlock_irqrestore(&phba->hbalock, flags); 3396 3398 atomic_inc(&tgtp->xmt_abort_rsp_error); 3397 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 3399 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3398 3400 "6163 Driver in reset cleanup - flushing " 3399 3401 "NVME Req now. hba_flag x%x oxid x%x\n", 3400 3402 phba->hba_flag, ctxp->oxid); ··· 3409 3411 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { 3410 3412 spin_unlock_irqrestore(&phba->hbalock, flags); 3411 3413 atomic_inc(&tgtp->xmt_abort_rsp_error); 3412 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 3414 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3413 3415 "6164 Outstanding NVME I/O Abort Request " 3414 3416 "still pending on oxid x%x\n", 3415 3417 ctxp->oxid); ··· 3447 3449 ctxp->flag &= ~LPFC_NVME_ABORT_OP; 3448 3450 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 3449 3451 lpfc_sli_release_iocbq(phba, abts_wqeq); 3450 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 3452 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3451 3453 "6166 Failed ABORT issue_wqe with status x%x " 3452 3454 "for oxid x%x.\n", 3453 3455 rc, ctxp->oxid); ··· 3472 3474 } 3473 3475 3474 3476 if (ctxp->state == LPFC_NVME_STE_FREE) { 3475 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 3477 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3476 3478 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n", 3477 3479 ctxp->state, ctxp->entry_cnt, ctxp->oxid); 3478 3480 rc = WQE_BUSY; ··· 3510 3512 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 3511 3513 3512 3514 atomic_inc(&tgtp->xmt_abort_rsp_error); 3513 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 3515 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3514 3516 "6135 Failed to Issue ABTS for oxid x%x. Status x%x " 3515 3517 "(%x)\n", 3516 3518 ctxp->oxid, rc, released); ··· 3542 3544 ctxp->state = LPFC_NVME_STE_LS_ABORT; 3543 3545 ctxp->entry_cnt++; 3544 3546 } else { 3545 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 3547 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3546 3548 "6418 NVMET LS abort state mismatch " 3547 3549 "IO x%x: %d %d\n", 3548 3550 ctxp->oxid, ctxp->state, ctxp->entry_cnt); ··· 3556 3558 /* Issue ABTS for this WQE based on iotag */ 3557 3559 ctxp->wqeq = lpfc_sli_get_iocbq(phba); 3558 3560 if (!ctxp->wqeq) { 3559 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 3561 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3560 3562 "6068 Abort failed: No wqeqs: " 3561 3563 "xri: x%x\n", xri); 3562 3564 /* No failure to an ABTS request. */ ··· 3588 3590 abts_wqeq->context2 = NULL; 3589 3591 abts_wqeq->context3 = NULL; 3590 3592 lpfc_sli_release_iocbq(phba, abts_wqeq); 3591 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 3593 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3592 3594 "6056 Failed to Issue ABTS. Status x%x\n", rc); 3593 3595 return 1; 3594 3596 }
+64 -61
drivers/scsi/lpfc/lpfc_scsi.c
··· 867 867 868 868 lpfc_cmd->seg_cnt = nseg; 869 869 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 870 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 871 - "9064 BLKGRD: %s: Too many sg segments from " 872 - "dma_map_sg. Config %d, seg_cnt %d\n", 873 - __func__, phba->cfg_sg_seg_cnt, 874 - lpfc_cmd->seg_cnt); 870 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 871 + "9064 BLKGRD: %s: Too many sg segments" 872 + " from dma_map_sg. Config %d, seg_cnt" 873 + " %d\n", __func__, phba->cfg_sg_seg_cnt, 874 + lpfc_cmd->seg_cnt); 875 875 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 876 876 lpfc_cmd->seg_cnt = 0; 877 877 scsi_dma_unmap(scsi_cmnd); ··· 1061 1061 * inserted in middle of the IO. 1062 1062 */ 1063 1063 1064 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1064 + lpfc_printf_log(phba, KERN_ERR, 1065 + LOG_TRACE_EVENT, 1065 1066 "9076 BLKGRD: Injecting reftag error: " 1066 1067 "write lba x%lx + x%x oldrefTag x%x\n", 1067 1068 (unsigned long)lba, blockoff, ··· 1112 1111 } 1113 1112 rc = BG_ERR_TGT | BG_ERR_CHECK; 1114 1113 1115 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1114 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1116 1115 "9078 BLKGRD: Injecting reftag error: " 1117 1116 "write lba x%lx\n", (unsigned long)lba); 1118 1117 break; ··· 1133 1132 } 1134 1133 rc = BG_ERR_INIT; 1135 1134 1136 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1135 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1137 1136 "9077 BLKGRD: Injecting reftag error: " 1138 1137 "write lba x%lx\n", (unsigned long)lba); 1139 1138 break; ··· 1160 1159 } 1161 1160 rc = BG_ERR_INIT; 1162 1161 1163 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1162 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1164 1163 "9079 BLKGRD: Injecting reftag error: " 1165 1164 "read lba x%lx\n", (unsigned long)lba); 1166 1165 break; ··· 1182 1181 * inserted in middle of the IO. 1183 1182 */ 1184 1183 1185 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1184 + lpfc_printf_log(phba, KERN_ERR, 1185 + LOG_TRACE_EVENT, 1186 1186 "9080 BLKGRD: Injecting apptag error: " 1187 1187 "write lba x%lx + x%x oldappTag x%x\n", 1188 1188 (unsigned long)lba, blockoff, ··· 1232 1230 } 1233 1231 rc = BG_ERR_TGT | BG_ERR_CHECK; 1234 1232 1235 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1233 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1236 1234 "0813 BLKGRD: Injecting apptag error: " 1237 1235 "write lba x%lx\n", (unsigned long)lba); 1238 1236 break; ··· 1253 1251 } 1254 1252 rc = BG_ERR_INIT; 1255 1253 1256 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1254 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1257 1255 "0812 BLKGRD: Injecting apptag error: " 1258 1256 "write lba x%lx\n", (unsigned long)lba); 1259 1257 break; ··· 1280 1278 } 1281 1279 rc = BG_ERR_INIT; 1282 1280 1283 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1281 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1284 1282 "0814 BLKGRD: Injecting apptag error: " 1285 1283 "read lba x%lx\n", (unsigned long)lba); 1286 1284 break; ··· 1315 1313 rc |= BG_ERR_TGT | BG_ERR_SWAP; 1316 1314 /* Signals the caller to swap CRC->CSUM */ 1317 1315 1318 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1316 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1319 1317 "0817 BLKGRD: Injecting guard error: " 1320 1318 "write lba x%lx\n", (unsigned long)lba); 1321 1319 break; ··· 1337 1335 rc = BG_ERR_INIT | BG_ERR_SWAP; 1338 1336 /* Signals the caller to swap CRC->CSUM */ 1339 1337 1340 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1338 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1341 1339 "0816 BLKGRD: Injecting guard error: " 1342 1340 "write lba x%lx\n", (unsigned long)lba); 1343 1341 break; ··· 1365 1363 rc = BG_ERR_INIT | BG_ERR_SWAP; 1366 1364 /* Signals the caller to swap CRC->CSUM */ 1367 1365 1368 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1366 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1369 1367 "0818 BLKGRD: Injecting guard error: " 1370 1368 "read lba x%lx\n", (unsigned long)lba); 1371 1369 } ··· 1415 1413 1416 1414 case SCSI_PROT_NORMAL: 1417 1415 default: 1418 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1416 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1419 1417 "9063 BLKGRD: Bad op/guard:%d/IP combination\n", 1420 1418 scsi_get_prot_op(sc)); 1421 1419 ret = 1; ··· 1444 1442 1445 1443 case SCSI_PROT_NORMAL: 1446 1444 default: 1447 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1445 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1448 1446 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n", 1449 1447 scsi_get_prot_op(sc)); 1450 1448 ret = 1; ··· 1730 1728 sgde = scsi_sglist(sc); 1731 1729 1732 1730 if (!sgpe || !sgde) { 1733 - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1731 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1734 1732 "9020 Invalid s/g entry: data=x%px prot=x%px\n", 1735 1733 sgpe, sgde); 1736 1734 return 0; ··· 1842 1840 return num_bde + 1; 1843 1841 1844 1842 if (!sgde) { 1845 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1843 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1846 1844 "9065 BLKGRD:%s Invalid data segment\n", 1847 1845 __func__); 1848 1846 return 0; ··· 1905 1903 reftag += protgrp_blks; 1906 1904 } else { 1907 1905 /* if we're here, we have a bug */ 1908 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1909 - "9054 BLKGRD: bug in %s\n", __func__); 1906 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1907 + "9054 BLKGRD: bug in %s\n", __func__); 1910 1908 } 1911 1909 1912 1910 } while (!alldone); ··· 2156 2154 sgde = scsi_sglist(sc); 2157 2155 2158 2156 if (!sgpe || !sgde) { 2159 - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2157 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2160 2158 "9082 Invalid s/g entry: data=x%px prot=x%px\n", 2161 2159 sgpe, sgde); 2162 2160 return 0; ··· 2309 2307 return num_sge + 1; 2310 2308 2311 2309 if (!sgde) { 2312 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2310 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2313 2311 "9086 BLKGRD:%s Invalid data segment\n", 2314 2312 __func__); 2315 2313 return 0; ··· 2414 2412 reftag += protgrp_blks; 2415 2413 } else { 2416 2414 /* if we're here, we have a bug */ 2417 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2418 - "9085 BLKGRD: bug in %s\n", __func__); 2415 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2416 + "9085 BLKGRD: bug in %s\n", __func__); 2419 2417 } 2420 2418 2421 2419 } while (!alldone); ··· 2455 2453 break; 2456 2454 default: 2457 2455 if (phba) 2458 - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2456 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2459 2457 "9021 Unsupported protection op:%d\n", 2460 2458 op); 2461 2459 break; ··· 2619 2617 scsi_dma_unmap(scsi_cmnd); 2620 2618 lpfc_cmd->seg_cnt = 0; 2621 2619 2622 - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2620 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2623 2621 "9022 Unexpected protection group %i\n", 2624 2622 prot_group_type); 2625 2623 return 2; ··· 2663 2661 scsi_prot_sg_count(scsi_cmnd), 2664 2662 scsi_cmnd->sc_data_direction); 2665 2663 2666 - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2664 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2667 2665 "9023 Cannot setup S/G List for HBA" 2668 2666 "IO segs %d/%d BPL %d SCSI %d: %d %d\n", 2669 2667 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, ··· 3087 3085 lpfc_cmd->seg_cnt = nseg; 3088 3086 if (!phba->cfg_xpsgl && 3089 3087 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 3090 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:" 3091 - " %s: Too many sg segments from " 3092 - "dma_map_sg. Config %d, seg_cnt %d\n", 3093 - __func__, phba->cfg_sg_seg_cnt, 3094 - lpfc_cmd->seg_cnt); 3088 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3089 + "9074 BLKGRD:" 3090 + " %s: Too many sg segments from " 3091 + "dma_map_sg. Config %d, seg_cnt %d\n", 3092 + __func__, phba->cfg_sg_seg_cnt, 3093 + lpfc_cmd->seg_cnt); 3095 3094 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 3096 3095 lpfc_cmd->seg_cnt = 0; 3097 3096 scsi_dma_unmap(scsi_cmnd); ··· 3369 3366 scsi_dma_unmap(scsi_cmnd); 3370 3367 lpfc_cmd->seg_cnt = 0; 3371 3368 3372 - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 3369 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3373 3370 "9083 Unexpected protection group %i\n", 3374 3371 prot_group_type); 3375 3372 return 2; ··· 3425 3422 scsi_prot_sg_count(scsi_cmnd), 3426 3423 scsi_cmnd->sc_data_direction); 3427 3424 3428 - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 3425 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3429 3426 "9084 Cannot setup S/G List for HBA" 3430 3427 "IO segs %d/%d SGL %d SCSI %d: %d %d\n", 3431 3428 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, ··· 3635 3632 if (resp_info & RSP_LEN_VALID) { 3636 3633 rsplen = be32_to_cpu(fcprsp->rspRspLen); 3637 3634 if (rsplen != 0 && rsplen != 4 && rsplen != 8) { 3638 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3639 - "2719 Invalid response length: " 3640 - "tgt x%x lun x%llx cmnd x%x rsplen x%x\n", 3641 - cmnd->device->id, 3642 - cmnd->device->lun, cmnd->cmnd[0], 3643 - rsplen); 3635 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3636 + "2719 Invalid response length: " 3637 + "tgt x%x lun x%llx cmnd x%x rsplen " 3638 + "x%x\n", cmnd->device->id, 3639 + cmnd->device->lun, cmnd->cmnd[0], 3640 + rsplen); 3644 3641 host_status = DID_ERROR; 3645 3642 goto out; 3646 3643 } 3647 3644 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { 3648 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3645 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3649 3646 "2757 Protocol failure detected during " 3650 3647 "processing of FCP I/O op: " 3651 3648 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n", ··· 3815 3812 /* Sanity check on return of outstanding command */ 3816 3813 cmd = lpfc_cmd->pCmd; 3817 3814 if (!cmd || !phba) { 3818 - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3815 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3819 3816 "2621 IO completion: Not an active IO\n"); 3820 3817 spin_unlock(&lpfc_cmd->buf_lock); 3821 3818 return; ··· 4280 4277 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; 4281 4278 break; 4282 4279 default: 4283 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4280 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4284 4281 "1418 Invalid HBA PCI-device group: 0x%x\n", 4285 4282 dev_grp); 4286 4283 return -ENODEV; ··· 4327 4324 * 0, successful 4328 4325 */ 4329 4326 int 4330 - lpfc_check_pci_resettable(const struct lpfc_hba *phba) 4327 + lpfc_check_pci_resettable(struct lpfc_hba *phba) 4331 4328 { 4332 4329 const struct pci_dev *pdev = phba->pcidev; 4333 4330 struct pci_dev *ptr = NULL; ··· 4531 4528 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && 4532 4529 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { 4533 4530 4534 - lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4531 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4535 4532 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" 4536 4533 " op:%02x str=%s without registering for" 4537 4534 " BlockGuard - Rejecting command\n", ··· 4890 4887 4891 4888 if (lpfc_cmd->pCmd == cmnd) { 4892 4889 ret = FAILED; 4893 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 4890 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4894 4891 "0748 abort handler timed out waiting " 4895 4892 "for aborting I/O (xri:x%x) to complete: " 4896 4893 "ret %#x, ID %d, LUN %llu\n", ··· 5083 5080 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { 5084 5081 if (status != IOCB_SUCCESS || 5085 5082 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR) 5086 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5083 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5087 5084 "0727 TMF %s to TGT %d LUN %llu " 5088 5085 "failed (%d, %d) iocb_flag x%x\n", 5089 5086 lpfc_taskmgmt_name(task_mgmt_cmd), ··· 5198 5195 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 5199 5196 } 5200 5197 if (cnt) { 5201 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5198 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5202 5199 "0724 I/O flush failure for context %s : cnt x%x\n", 5203 5200 ((context == LPFC_CTX_LUN) ? "LUN" : 5204 5201 ((context == LPFC_CTX_TGT) ? "TGT" : ··· 5234 5231 5235 5232 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5236 5233 if (!rdata || !rdata->pnode) { 5237 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5234 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5238 5235 "0798 Device Reset rdata failure: rdata x%px\n", 5239 5236 rdata); 5240 5237 return FAILED; ··· 5246 5243 5247 5244 status = lpfc_chk_tgt_mapped(vport, cmnd); 5248 5245 if (status == FAILED) { 5249 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5246 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5250 5247 "0721 Device Reset rport failure: rdata x%px\n", rdata); 5251 5248 return FAILED; 5252 5249 } ··· 5263 5260 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, 5264 5261 FCP_LUN_RESET); 5265 5262 5266 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5263 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5267 5264 "0713 SCSI layer issued Device Reset (%d, %llu) " 5268 5265 "return x%x\n", tgt_id, lun_id, status); 5269 5266 ··· 5305 5302 5306 5303 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5307 5304 if (!rdata || !rdata->pnode) { 5308 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5305 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5309 5306 "0799 Target Reset rdata failure: rdata x%px\n", 5310 5307 rdata); 5311 5308 return FAILED; ··· 5317 5314 5318 5315 status = lpfc_chk_tgt_mapped(vport, cmnd); 5319 5316 if (status == FAILED) { 5320 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5317 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5321 5318 "0722 Target Reset rport failure: rdata x%px\n", rdata); 5322 5319 if (pnode) { 5323 5320 spin_lock_irq(shost->host_lock); ··· 5342 5339 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, 5343 5340 FCP_TARGET_RESET); 5344 5341 5345 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5342 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5346 5343 "0723 SCSI layer issued Target Reset (%d, %llu) " 5347 5344 "return x%x\n", tgt_id, lun_id, status); 5348 5345 ··· 5423 5420 i, 0, FCP_TARGET_RESET); 5424 5421 5425 5422 if (status != SUCCESS) { 5426 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5423 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5427 5424 "0700 Bus Reset on target %d failed\n", 5428 5425 i); 5429 5426 ret = FAILED; ··· 5440 5437 if (status != SUCCESS) 5441 5438 ret = FAILED; 5442 5439 5443 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5440 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5444 5441 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 5445 5442 return ret; 5446 5443 } ··· 5469 5466 struct lpfc_hba *phba = vport->phba; 5470 5467 int rc, ret = SUCCESS; 5471 5468 5472 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5469 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5473 5470 "3172 SCSI layer issued Host Reset Data:\n"); 5474 5471 5475 5472 lpfc_offline_prep(phba, LPFC_MBX_WAIT); ··· 5486 5483 5487 5484 return ret; 5488 5485 error: 5489 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5486 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5490 5487 "3323 Failed host reset\n"); 5491 5488 lpfc_unblock_mgmt_io(phba); 5492 5489 return FAILED; ··· 5597 5594 } 5598 5595 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc); 5599 5596 if (num_to_alloc != num_allocated) { 5600 - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5597 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5601 5598 "0708 Allocation request of %d " 5602 5599 "command buffers did not succeed. " 5603 5600 "Allocated %d buffers.\n",
+177 -178
drivers/scsi/lpfc/lpfc_sli.c
··· 1567 1567 lpfc_config_ring(phba, i, pmb); 1568 1568 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1569 1569 if (rc != MBX_SUCCESS) { 1570 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1570 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1571 1571 "0446 Adapter failed to init (%d), " 1572 1572 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1573 1573 "ring %d\n", ··· 1676 1676 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 1677 1677 1678 1678 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 1679 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1679 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1680 1680 "0315 Ring %d issue: portCmdGet %d " 1681 1681 "is bigger than cmd ring %d\n", 1682 1682 pring->ringno, ··· 1962 1962 hbqp->local_hbqGetIdx = getidx; 1963 1963 1964 1964 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1965 - lpfc_printf_log(phba, KERN_ERR, 1966 - LOG_SLI | LOG_VPORT, 1965 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1967 1966 "1802 HBQ %d: local_hbqGetIdx " 1968 1967 "%u is > than hbqp->entry_count %u\n", 1969 1968 hbqno, hbqp->local_hbqGetIdx, ··· 2301 2302 } 2302 2303 } 2303 2304 spin_unlock_irq(&phba->hbalock); 2304 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 2305 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2305 2306 "1803 Bad hbq tag. Data: x%x x%x\n", 2306 2307 tag, phba->hbqs[tag >> 16].buffer_count); 2307 2308 return NULL; ··· 2555 2556 /* Check security permission status on INIT_LINK mailbox command */ 2556 2557 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2557 2558 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2558 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2559 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2559 2560 "2860 SLI authentication is required " 2560 2561 "for INIT_LINK but has not done yet\n"); 2561 2562 ··· 2691 2692 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2692 2693 MBX_SHUTDOWN) { 2693 2694 /* Unknown mailbox command compl */ 2694 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2695 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2695 2696 "(%d):0323 Unknown Mailbox command " 2696 2697 "x%x (x%x/x%x) Cmpl\n", 2697 2698 pmb->vport ? pmb->vport->vpi : ··· 2849 2850 } 2850 2851 2851 2852 if (unlikely(failwhy)) { 2852 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR, 2853 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2853 2854 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n", 2854 2855 sid, oxid, failwhy); 2855 2856 goto out_fail; ··· 2889 2890 if (!ret) 2890 2891 return; 2891 2892 2892 - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR, 2893 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2893 2894 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X " 2894 2895 "NVMe%s handler failed %d\n", 2895 2896 did, sid, oxid, ··· 3172 3173 } 3173 3174 3174 3175 spin_unlock_irqrestore(temp_lock, iflag); 3175 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3176 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3176 3177 "0317 iotag x%x is out of " 3177 3178 "range: max iotag x%x wd0 x%x\n", 3178 3179 iotag, phba->sli.last_iotag, ··· 3219 3220 } 3220 3221 3221 3222 spin_unlock_irqrestore(temp_lock, iflag); 3222 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3223 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3223 3224 "0372 iotag x%x lookup error: max iotag (x%x) " 3224 3225 "iocb_flag x%x\n", 3225 3226 iotag, phba->sli.last_iotag, ··· 3395 3396 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3396 3397 * rsp ring <portRspMax> 3397 3398 */ 3398 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3399 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3399 3400 "0312 Ring %d handler: portRspPut %d " 3400 3401 "is bigger than rsp ring %d\n", 3401 3402 pring->ringno, le32_to_cpu(pgp->rspPutInx), ··· 3614 3615 phba->brd_no, adaptermsg); 3615 3616 } else { 3616 3617 /* Unknown IOCB command */ 3617 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3618 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3618 3619 "0334 Unknown IOCB command " 3619 3620 "Data: x%x, x%x x%x x%x x%x\n", 3620 3621 type, irsp->ulpCommand, ··· 3812 3813 phba->brd_no, adaptermsg); 3813 3814 } else { 3814 3815 /* Unknown IOCB command */ 3815 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3816 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3816 3817 "0335 Unknown IOCB " 3817 3818 "command Data: x%x " 3818 3819 "x%x x%x x%x\n", ··· 3892 3893 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3893 3894 * rsp ring <portRspMax> 3894 3895 */ 3895 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3896 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3896 3897 "0303 Ring %d handler: portRspPut %d " 3897 3898 "is bigger than rsp ring %d\n", 3898 3899 pring->ringno, portRspPut, portRspMax); ··· 4264 4265 4265 4266 /* Check to see if any errors occurred during init */ 4266 4267 if ((status & HS_FFERM) || (i >= 20)) { 4267 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4268 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4268 4269 "2751 Adapter failed to restart, " 4269 4270 "status reg x%x, FW Data: A8 x%x AC x%x\n", 4270 4271 status, ··· 4486 4487 if (retval != MBX_SUCCESS) { 4487 4488 if (retval != MBX_BUSY) 4488 4489 mempool_free(pmb, phba->mbox_mem_pool); 4489 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4490 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4490 4491 "2752 KILL_BOARD command failed retval %d\n", 4491 4492 retval); 4492 4493 spin_lock_irq(&phba->hbalock); ··· 4838 4839 if (i++ >= 200) { 4839 4840 /* Adapter failed to init, timeout, status reg 4840 4841 <status> */ 4841 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4842 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4842 4843 "0436 Adapter failed to init, " 4843 4844 "timeout, status reg x%x, " 4844 4845 "FW Data: A8 x%x AC x%x\n", status, ··· 4853 4854 /* ERROR: During chipset initialization */ 4854 4855 /* Adapter failed to init, chipset, status reg 4855 4856 <status> */ 4856 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4857 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4857 4858 "0437 Adapter failed to init, " 4858 4859 "chipset, status reg x%x, " 4859 4860 "FW Data: A8 x%x AC x%x\n", status, ··· 4884 4885 if (status & HS_FFERM) { 4885 4886 /* ERROR: During chipset initialization */ 4886 4887 /* Adapter failed to init, chipset, status reg <status> */ 4887 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4888 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4888 4889 "0438 Adapter failed to init, chipset, " 4889 4890 "status reg x%x, " 4890 4891 "FW Data: A8 x%x AC x%x\n", status, ··· 5107 5108 LPFC_SLI3_CRP_ENABLED | 5108 5109 LPFC_SLI3_DSS_ENABLED); 5109 5110 if (rc != MBX_SUCCESS) { 5110 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5111 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5111 5112 "0442 Adapter failed to init, mbxCmd x%x " 5112 5113 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 5113 5114 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); ··· 5157 5158 if (pmb->u.mb.un.varCfgPort.gbg == 0) { 5158 5159 phba->cfg_enable_bg = 0; 5159 5160 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 5160 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5161 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5161 5162 "0443 Adapter did not grant " 5162 5163 "BlockGuard\n"); 5163 5164 } ··· 5196 5197 switch (phba->cfg_sli_mode) { 5197 5198 case 2: 5198 5199 if (phba->cfg_enable_npiv) { 5199 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5200 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5200 5201 "1824 NPIV enabled: Override sli_mode " 5201 5202 "parameter (%d) to auto (0).\n", 5202 5203 phba->cfg_sli_mode); ··· 5208 5209 case 3: 5209 5210 break; 5210 5211 default: 5211 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5212 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5212 5213 "1819 Unrecognized sli_mode parameter: %d.\n", 5213 5214 phba->cfg_sli_mode); 5214 5215 ··· 5219 5220 rc = lpfc_sli_config_port(phba, mode); 5220 5221 5221 5222 if (rc && phba->cfg_sli_mode == 3) 5222 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5223 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5223 5224 "1820 Unable to select SLI-3. " 5224 5225 "Not supported by adapter.\n"); 5225 5226 if (rc && mode != 2) ··· 5313 5314 5314 5315 lpfc_sli_hba_setup_error: 5315 5316 phba->link_state = LPFC_HBA_ERROR; 5316 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5317 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5317 5318 "0445 Firmware initialization failed\n"); 5318 5319 return rc; 5319 5320 } ··· 5510 5511 LPFC_SLI4_MBX_NEMBED); 5511 5512 5512 5513 if (alloclen < reqlen) { 5513 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5514 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5514 5515 "3084 Allocated DMA memory size (%d) is " 5515 5516 "less than the requested DMA memory size " 5516 5517 "(%d)\n", alloclen, reqlen); ··· 5770 5771 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 5771 5772 if (bf_get(lpfc_mbox_hdr_status, 5772 5773 &rsrc_info->header.cfg_shdr.response)) { 5773 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5774 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5774 5775 "2930 Failed to get resource extents " 5775 5776 "Status 0x%x Add'l Status 0x%x\n", 5776 5777 bf_get(lpfc_mbox_hdr_status, ··· 5908 5909 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5909 5910 req_len, *emb); 5910 5911 if (alloc_len < req_len) { 5911 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5912 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5912 5913 "2982 Allocated DMA memory size (x%x) is " 5913 5914 "less than the requested DMA memory " 5914 5915 "size (x%x)\n", alloc_len, req_len); ··· 5964 5965 return -EIO; 5965 5966 5966 5967 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5967 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5968 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5968 5969 "3009 No available Resource Extents " 5969 5970 "for resource type 0x%x: Count: 0x%x, " 5970 5971 "Size 0x%x\n", type, rsrc_cnt, ··· 6215 6216 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 6216 6217 if (bf_get(lpfc_mbox_hdr_status, 6217 6218 &dealloc_rsrc->header.cfg_shdr.response)) { 6218 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6219 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6219 6220 "2919 Failed to release resource extents " 6220 6221 "for type %d - Status 0x%x Add'l Status 0x%x. " 6221 6222 "Resource memory not released.\n", ··· 6409 6410 &ras_fwlog->lwpd.phys, 6410 6411 GFP_KERNEL); 6411 6412 if (!ras_fwlog->lwpd.virt) { 6412 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6413 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6413 6414 "6185 LWPD Memory Alloc Failed\n"); 6414 6415 6415 6416 return -ENOMEM; ··· 6470 6471 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6471 6472 6472 6473 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { 6473 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 6474 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6474 6475 "6188 FW LOG mailbox " 6475 6476 "completed with status x%x add_status x%x," 6476 6477 " mbx status x%x\n", ··· 6538 6539 /* Setup Mailbox command */ 6539 6540 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6540 6541 if (!mbox) { 6541 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6542 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6542 6543 "6190 RAS MBX Alloc Failed"); 6543 6544 rc = -ENOMEM; 6544 6545 goto mem_free; ··· 6586 6587 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6587 6588 6588 6589 if (rc == MBX_NOT_FINISHED) { 6589 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6590 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6590 6591 "6191 FW-Log Mailbox failed. " 6591 6592 "status %d mbxStatus : x%x", rc, 6592 6593 bf_get(lpfc_mqe_status, &mbox->u.mqe)); ··· 6722 6723 /* RPIs. */ 6723 6724 count = phba->sli4_hba.max_cfg_param.max_rpi; 6724 6725 if (count <= 0) { 6725 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6726 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6726 6727 "3279 Invalid provisioning of " 6727 6728 "rpi:%d\n", count); 6728 6729 rc = -EINVAL; ··· 6750 6751 /* VPIs. */ 6751 6752 count = phba->sli4_hba.max_cfg_param.max_vpi; 6752 6753 if (count <= 0) { 6753 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6754 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6754 6755 "3280 Invalid provisioning of " 6755 6756 "vpi:%d\n", count); 6756 6757 rc = -EINVAL; ··· 6777 6778 /* XRIs. */ 6778 6779 count = phba->sli4_hba.max_cfg_param.max_xri; 6779 6780 if (count <= 0) { 6780 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6781 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6781 6782 "3281 Invalid provisioning of " 6782 6783 "xri:%d\n", count); 6783 6784 rc = -EINVAL; ··· 6806 6807 /* VFIs. */ 6807 6808 count = phba->sli4_hba.max_cfg_param.max_vfi; 6808 6809 if (count <= 0) { 6809 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6810 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6810 6811 "3282 Invalid provisioning of " 6811 6812 "vfi:%d\n", count); 6812 6813 rc = -EINVAL; ··· 6984 6985 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 6985 6986 req_len, emb); 6986 6987 if (alloc_len < req_len) { 6987 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6988 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6988 6989 "2983 Allocated DMA memory size (x%x) is " 6989 6990 "less than the requested DMA memory " 6990 6991 "size (x%x)\n", alloc_len, req_len); ··· 7027 7028 } 7028 7029 7029 7030 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 7030 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 7031 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7031 7032 "2984 Failed to read allocated resources " 7032 7033 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 7033 7034 type, ··· 7182 7183 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7183 7184 spin_unlock_irq(&phba->hbalock); 7184 7185 } else { 7185 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7186 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7186 7187 "3161 Failure to post sgl to port.\n"); 7187 7188 return -EIO; 7188 7189 } ··· 7279 7280 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); 7280 7281 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 7281 7282 if (rc < 0) { 7282 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7283 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7283 7284 "6421 Cannot post to HRQ %d: %x %x %x " 7284 7285 "DRQ %x %x\n", 7285 7286 hrq->queue_id, ··· 7354 7355 return; 7355 7356 7356 7357 if (bf_get(lpfc_sliport_status_dip, &reg_data)) 7357 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7358 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7358 7359 "2904 Firmware Dump Image Present" 7359 7360 " on Adapter"); 7360 7361 } ··· 7440 7441 phba->hba_flag &= ~HBA_IOQ_FLUSH; 7441 7442 7442 7443 if (phba->sli_rev != LPFC_SLI_REV4) { 7443 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7444 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7444 7445 "0376 READ_REV Error. SLI Level %d " 7445 7446 "FCoE enabled %d\n", 7446 7447 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); ··· 7482 7483 */ 7483 7484 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 7484 7485 if (unlikely(!rc)) { 7485 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7486 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7486 7487 "0377 Error %d parsing vpd. " 7487 7488 "Using defaults.\n", rc); 7488 7489 rc = 0; ··· 7621 7622 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7622 7623 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature); 7623 7624 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP)) 7624 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_INIT, 7625 + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7625 7626 "6448 Dual Dump is enabled\n"); 7626 7627 else 7627 7628 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT, ··· 7639 7640 */ 7640 7641 rc = lpfc_sli4_alloc_resource_identifiers(phba); 7641 7642 if (rc) { 7642 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7643 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7643 7644 "2920 Failed to alloc Resource IDs " 7644 7645 "rc = x%x\n", rc); 7645 7646 goto out_free_mbox; ··· 7678 7679 kfree(mp); 7679 7680 mboxq->ctx_buf = NULL; 7680 7681 if (unlikely(rc)) { 7681 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7682 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7682 7683 "0382 READ_SPARAM command failed " 7683 7684 "status %d, mbxStatus x%x\n", 7684 7685 rc, bf_get(lpfc_mqe_status, mqe)); ··· 7696 7697 /* Create all the SLI4 queues */ 7697 7698 rc = lpfc_sli4_queue_create(phba); 7698 7699 if (rc) { 7699 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7700 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7700 7701 "3089 Failed to allocate queues\n"); 7701 7702 rc = -ENODEV; 7702 7703 goto out_free_mbox; ··· 7704 7705 /* Set up all the queues to the device */ 7705 7706 rc = lpfc_sli4_queue_setup(phba); 7706 7707 if (unlikely(rc)) { 7707 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7708 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7708 7709 "0381 Error %d during queue setup.\n ", rc); 7709 7710 goto out_stop_timers; 7710 7711 } ··· 7715 7716 /* update host els xri-sgl sizes and mappings */ 7716 7717 rc = lpfc_sli4_els_sgl_update(phba); 7717 7718 if (unlikely(rc)) { 7718 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7719 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7719 7720 "1400 Failed to update xri-sgl size and " 7720 7721 "mapping: %d\n", rc); 7721 7722 goto out_destroy_queue; ··· 7725 7726 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, 7726 7727 phba->sli4_hba.els_xri_cnt); 7727 7728 if (unlikely(rc < 0)) { 7728 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7729 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7729 7730 "0582 Error %d during els sgl post " 7730 7731 "operation\n", rc); 7731 7732 rc = -ENODEV; ··· 7737 7738 /* update host nvmet xri-sgl sizes and mappings */ 7738 7739 rc = lpfc_sli4_nvmet_sgl_update(phba); 7739 7740 if (unlikely(rc)) { 7740 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7741 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7741 7742 "6308 Failed to update nvmet-sgl size " 7742 7743 "and mapping: %d\n", rc); 7743 7744 goto out_destroy_queue; ··· 7749 7750 &phba->sli4_hba.lpfc_nvmet_sgl_list, 7750 7751 phba->sli4_hba.nvmet_xri_cnt); 7751 7752 if (unlikely(rc < 0)) { 7752 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7753 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7753 7754 "3117 Error %d during nvmet " 7754 7755 "sgl post\n", rc); 7755 7756 rc = -ENODEV; ··· 7766 7767 /* update host common xri-sgl sizes and mappings */ 7767 7768 rc = lpfc_sli4_io_sgl_update(phba); 7768 7769 if (unlikely(rc)) { 7769 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7770 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7770 7771 "6082 Failed to update nvme-sgl size " 7771 7772 "and mapping: %d\n", rc); 7772 7773 goto out_destroy_queue; ··· 7775 7776 /* register the allocated common sgl pool to the port */ 7776 7777 rc = lpfc_sli4_repost_io_sgl_list(phba); 7777 7778 if (unlikely(rc)) { 7778 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7779 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7779 7780 "6116 Error %d during nvme sgl post " 7780 7781 "operation\n", rc); 7781 7782 /* Some NVME buffers were moved to abort nvme list */ ··· 7796 7797 cnt); 7797 7798 rc = lpfc_init_iocb_list(phba, cnt); 7798 7799 if (rc) { 7799 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7800 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7800 7801 "1413 Failed to init iocb list.\n"); 7801 7802 goto out_destroy_queue; 7802 7803 } ··· 7825 7826 /* Post the rpi header region to the device. */ 7826 7827 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 7827 7828 if (unlikely(rc)) { 7828 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7829 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7829 7830 "0393 Error %d during rpi post operation\n", 7830 7831 rc); 7831 7832 rc = -ENODEV; ··· 7969 7970 7970 7971 if (!(phba->hba_flag & HBA_FCOE_MODE) && 7971 7972 (phba->hba_flag & LINK_DISABLED)) { 7972 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7973 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7973 7974 "3103 Adapter Link is disabled.\n"); 7974 7975 lpfc_down_link(phba, mboxq); 7975 7976 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7976 7977 if (rc != MBX_SUCCESS) { 7977 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7978 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7978 7979 "3104 Adapter failed to issue " 7979 7980 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 7980 7981 goto out_io_buff_free; ··· 8181 8182 } 8182 8183 8183 8184 /* Mbox cmd <mbxCommand> timeout */ 8184 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8185 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8185 8186 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n", 8186 8187 mb->mbxCommand, 8187 8188 phba->pport->port_state, ··· 8203 8204 8204 8205 lpfc_sli_abort_fcp_rings(phba); 8205 8206 8206 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8207 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8207 8208 "0345 Resetting board due to mailbox timeout\n"); 8208 8209 8209 8210 /* Reset the HBA device */ ··· 8301 8302 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8302 8303 8303 8304 /* Mbox command <mbxCommand> cannot issue */ 8304 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8305 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8305 8306 "(%d):0311 Mailbox command x%x cannot " 8306 8307 "issue Data: x%x x%x\n", 8307 8308 pmbox->vport ? pmbox->vport->vpi : 0, ··· 8313 8314 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 8314 8315 !(hc_copy & HC_MBINT_ENA)) { 8315 8316 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8316 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8317 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8317 8318 "(%d):2528 Mailbox command x%x cannot " 8318 8319 "issue Data: x%x x%x\n", 8319 8320 pmbox->vport ? pmbox->vport->vpi : 0, ··· 8332 8333 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8333 8334 8334 8335 /* Mbox command <mbxCommand> cannot issue */ 8335 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8336 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8336 8337 "(%d):2529 Mailbox command x%x " 8337 8338 "cannot issue Data: x%x x%x\n", 8338 8339 pmbox->vport ? pmbox->vport->vpi : 0, ··· 8344 8345 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 8345 8346 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8346 8347 /* Mbox command <mbxCommand> cannot issue */ 8347 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8348 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8348 8349 "(%d):2530 Mailbox command x%x " 8349 8350 "cannot issue Data: x%x x%x\n", 8350 8351 pmbox->vport ? pmbox->vport->vpi : 0, ··· 8397 8398 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8398 8399 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8399 8400 /* Mbox command <mbxCommand> cannot issue */ 8400 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8401 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8401 8402 "(%d):2531 Mailbox command x%x " 8402 8403 "cannot issue Data: x%x x%x\n", 8403 8404 pmbox->vport ? pmbox->vport->vpi : 0, ··· 8788 8789 spin_lock_irqsave(&phba->hbalock, iflag); 8789 8790 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8790 8791 spin_unlock_irqrestore(&phba->hbalock, iflag); 8791 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8792 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8792 8793 "(%d):2532 Mailbox command x%x (x%x/x%x) " 8793 8794 "cannot issue Data: x%x x%x\n", 8794 8795 mboxq->vport ? mboxq->vport->vpi : 0, ··· 8909 8910 8910 8911 rc = lpfc_mbox_dev_check(phba); 8911 8912 if (unlikely(rc)) { 8912 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8913 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8913 8914 "(%d):2544 Mailbox command x%x (x%x/x%x) " 8914 8915 "cannot issue Data: x%x x%x\n", 8915 8916 mboxq->vport ? mboxq->vport->vpi : 0, ··· 8986 8987 /* Now, interrupt mode asynchronous mailbox command */ 8987 8988 rc = lpfc_mbox_cmd_check(phba, mboxq); 8988 8989 if (rc) { 8989 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8990 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8990 8991 "(%d):2543 Mailbox command x%x (x%x/x%x) " 8991 8992 "cannot issue Data: x%x x%x\n", 8992 8993 mboxq->vport ? mboxq->vport->vpi : 0, ··· 9054 9055 } 9055 9056 if (unlikely(phba->sli.mbox_active)) { 9056 9057 spin_unlock_irqrestore(&phba->hbalock, iflags); 9057 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 9058 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9058 9059 "0384 There is pending active mailbox cmd\n"); 9059 9060 return MBX_NOT_FINISHED; 9060 9061 } ··· 9115 9116 /* Post the mailbox command to the port */ 9116 9117 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 9117 9118 if (rc != MBX_SUCCESS) { 9118 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 9119 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9119 9120 "(%d):2533 Mailbox command x%x (x%x/x%x) " 9120 9121 "cannot issue Data: x%x x%x\n", 9121 9122 mboxq->vport ? mboxq->vport->vpi : 0, ··· 9191 9192 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 9192 9193 break; 9193 9194 default: 9194 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9195 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9195 9196 "1420 Invalid HBA PCI-device group: 0x%x\n", 9196 9197 dev_grp); 9197 9198 return -ENODEV; ··· 9292 9293 if (piocb->iocb_cmpl && (!piocb->vport) && 9293 9294 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 9294 9295 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 9295 - lpfc_printf_log(phba, KERN_ERR, 9296 - LOG_SLI | LOG_VPORT, 9296 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9297 9297 "1807 IOCB x%x failed. No vport\n", 9298 9298 piocb->iocb.ulpCommand); 9299 9299 dump_stack(); ··· 9587 9589 else 9588 9590 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9589 9591 if (!iocbq->iocb.ulpLe) { 9590 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9592 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9591 9593 "2007 Only Limited Edition cmd Format" 9592 9594 " supported 0x%x\n", 9593 9595 iocbq->iocb.ulpCommand); ··· 9895 9897 /* word6 context tag copied in memcpy */ 9896 9898 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 9897 9899 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9898 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9900 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9899 9901 "2015 Invalid CT %x command 0x%x\n", 9900 9902 ct, iocbq->iocb.ulpCommand); 9901 9903 return IOCB_ERROR; ··· 10074 10076 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 10075 10077 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 10076 10078 default: 10077 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10079 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10078 10080 "2014 Invalid command 0x%x\n", 10079 10081 iocbq->iocb.ulpCommand); 10080 10082 return IOCB_ERROR; ··· 10236 10238 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 10237 10239 break; 10238 10240 default: 10239 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10241 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10240 10242 "1419 Invalid HBA PCI-device group: 0x%x\n", 10241 10243 dev_grp); 10242 10244 return -ENODEV; ··· 10496 10498 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 10497 10499 if (evt_code == ASYNC_TEMP_WARN) { 10498 10500 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 10499 - lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10501 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10500 10502 "0347 Adapter is very hot, please take " 10501 10503 "corrective action. temperature : %d Celsius\n", 10502 10504 (uint32_t) icmd->ulpContext); 10503 10505 } else { 10504 10506 temp_event_data.event_code = LPFC_NORMAL_TEMP; 10505 - lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10507 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10506 10508 "0340 Adapter temperature is OK now. " 10507 10509 "temperature : %d Celsius\n", 10508 10510 (uint32_t) icmd->ulpContext); ··· 10519 10521 break; 10520 10522 default: 10521 10523 iocb_w = (uint32_t *) icmd; 10522 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10524 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10523 10525 "0346 Ring %d handler: unexpected ASYNC_STATUS" 10524 10526 " evt_code 0x%x\n" 10525 10527 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" ··· 11193 11195 } 11194 11196 11195 11197 spin_unlock_irq(&phba->hbalock); 11196 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11198 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11197 11199 "0402 Cannot find virtual addr for buffer tag on " 11198 11200 "ring %d Data x%lx x%px x%px x%x\n", 11199 11201 pring->ringno, (unsigned long) tag, ··· 11237 11239 } 11238 11240 11239 11241 spin_unlock_irq(&phba->hbalock); 11240 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11242 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11241 11243 "0410 Cannot find virtual addr for mapped buf on " 11242 11244 "ring %d Data x%llx x%px x%px x%x\n", 11243 11245 pring->ringno, (unsigned long long)phys, ··· 12131 12133 * completed. Not that it completed successfully. 12132 12134 * */ 12133 12135 } else if (timeleft == 0) { 12134 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12136 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12135 12137 "0338 IOCB wait timeout error - no " 12136 12138 "wake response Data x%x\n", timeout); 12137 12139 retval = IOCB_TIMEDOUT; 12138 12140 } else { 12139 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12141 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12140 12142 "0330 IOCB wake NOT set, " 12141 12143 "Data x%x x%lx\n", 12142 12144 timeout, (timeleft / jiffies)); ··· 12398 12400 } 12399 12401 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 12400 12402 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 12401 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12403 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12402 12404 "1423 HBA Unrecoverable error: " 12403 12405 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 12404 12406 "ue_mask_lo_reg=0x%x, " ··· 12429 12431 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 12430 12432 phba->work_status[1] = 12431 12433 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 12432 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12434 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12433 12435 "2885 Port Status Event: " 12434 12436 "port status reg 0x%x, " 12435 12437 "port smphr reg 0x%x, " ··· 12445 12447 break; 12446 12448 case LPFC_SLI_INTF_IF_TYPE_1: 12447 12449 default: 12448 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12450 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12449 12451 "2886 HBA Error Attention on unsupported " 12450 12452 "if type %d.", if_type); 12451 12453 return 1; ··· 12509 12511 ha_copy = lpfc_sli4_eratt_read(phba); 12510 12512 break; 12511 12513 default: 12512 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12514 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12513 12515 "0299 Invalid SLI revision (%d)\n", 12514 12516 phba->sli_rev); 12515 12517 ha_copy = 0; ··· 12742 12744 * Stray Mailbox Interrupt, mbxCommand <cmd> 12743 12745 * mbxStatus <status> 12744 12746 */ 12745 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12746 - LOG_SLI, 12747 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12747 12748 "(%d):0304 Stray Mailbox " 12748 12749 "Interrupt mbxCommand x%x " 12749 12750 "mbxStatus x%x\n", ··· 12802 12805 if (rc != MBX_BUSY) 12803 12806 lpfc_printf_log(phba, 12804 12807 KERN_ERR, 12805 - LOG_MBOX | LOG_SLI, 12808 + LOG_TRACE_EVENT, 12806 12809 "0350 rc should have" 12807 12810 "been MBX_BUSY\n"); 12808 12811 if (rc != MBX_NOT_FINISHED) ··· 12831 12834 MBX_NOWAIT); 12832 12835 } while (rc == MBX_NOT_FINISHED); 12833 12836 if (rc != MBX_SUCCESS) 12834 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12835 - LOG_SLI, "0349 rc should be " 12837 + lpfc_printf_log(phba, KERN_ERR, 12838 + LOG_TRACE_EVENT, 12839 + "0349 rc should be " 12836 12840 "MBX_SUCCESS\n"); 12837 12841 } 12838 12842 ··· 13260 13262 /* Allocate a new internal CQ_EVENT entry */ 13261 13263 cq_event = lpfc_sli4_cq_event_alloc(phba); 13262 13264 if (!cq_event) { 13263 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13265 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13264 13266 "0602 Failed to alloc CQ_EVENT entry\n"); 13265 13267 return NULL; 13266 13268 } ··· 13335 13337 spin_lock_irqsave(&phba->hbalock, iflags); 13336 13338 pmb = phba->sli.mbox_active; 13337 13339 if (unlikely(!pmb)) { 13338 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13340 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13339 13341 "1832 No pending MBOX command to handle\n"); 13340 13342 spin_unlock_irqrestore(&phba->hbalock, iflags); 13341 13343 goto out_no_mqe_complete; ··· 13384 13386 pmb->vport = vport; 13385 13387 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 13386 13388 if (rc != MBX_BUSY) 13387 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 13388 - LOG_SLI, "0385 rc should " 13389 + lpfc_printf_log(phba, KERN_ERR, 13390 + LOG_TRACE_EVENT, 13391 + "0385 rc should " 13389 13392 "have been MBX_BUSY\n"); 13390 13393 if (rc != MBX_NOT_FINISHED) 13391 13394 goto send_current_mbox; ··· 13493 13494 txq_cnt++; 13494 13495 if (!list_empty(&pring->txcmplq)) 13495 13496 txcmplq_cnt++; 13496 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13497 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13497 13498 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 13498 13499 "els_txcmplq_cnt=%d\n", 13499 13500 txq_cnt, phba->iocb_cnt, ··· 13584 13585 workposted = true; 13585 13586 break; 13586 13587 default: 13587 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13588 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13588 13589 "0603 Invalid CQ subtype %d: " 13589 13590 "%08x %08x %08x %08x\n", 13590 13591 cq->subtype, wcqe->word0, wcqe->parameter, ··· 13632 13633 status = bf_get(lpfc_rcqe_status, rcqe); 13633 13634 switch (status) { 13634 13635 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13635 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13636 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13636 13637 "2537 Receive Frame Truncated!!\n"); 13637 13638 /* fall through */ 13638 13639 case FC_STATUS_RQ_SUCCESS: ··· 13669 13670 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13670 13671 if (phba->nvmet_support) { 13671 13672 tgtp = phba->targetport->private; 13672 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13673 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13673 13674 "6402 RQE Error x%x, posted %d err_cnt " 13674 13675 "%d: %x %x %x\n", 13675 13676 status, hrq->RQ_buf_posted, ··· 13741 13742 (struct lpfc_rcqe *)&cqevt); 13742 13743 break; 13743 13744 default: 13744 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13745 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13745 13746 "0388 Not a valid WCQE code: x%x\n", 13746 13747 bf_get(lpfc_cqe_code, &cqevt)); 13747 13748 break; ··· 13781 13782 } 13782 13783 if (unlikely(!cq)) { 13783 13784 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 13784 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13785 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13785 13786 "0365 Slow-path CQ identifier " 13786 13787 "(%d) does not exist\n", cqid); 13787 13788 return; ··· 13796 13797 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork); 13797 13798 13798 13799 if (!ret) 13799 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13800 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13800 13801 "0390 Cannot schedule queue work " 13801 13802 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 13802 13803 cqid, cq->queue_id, raw_smp_processor_id()); ··· 13934 13935 &delay, LPFC_QUEUE_WORK); 13935 13936 break; 13936 13937 default: 13937 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13938 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13938 13939 "0370 Invalid completion queue type (%d)\n", 13939 13940 cq->type); 13940 13941 return; ··· 13948 13949 ret = queue_delayed_work_on(cq->chann, phba->wq, 13949 13950 &cq->sched_spwork, delay); 13950 13951 if (!ret) 13951 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13952 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13952 13953 "0394 Cannot schedule queue work " 13953 13954 "for cqid=%d on CPU %d\n", 13954 13955 cq->queue_id, cq->chann); ··· 14158 14159 status = bf_get(lpfc_rcqe_status, rcqe); 14159 14160 switch (status) { 14160 14161 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 14161 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14162 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14162 14163 "6126 Receive Frame Truncated!!\n"); 14163 14164 /* fall through */ 14164 14165 case FC_STATUS_RQ_SUCCESS: ··· 14198 14199 case FC_STATUS_INSUFF_BUF_FRM_DISC: 14199 14200 if (phba->nvmet_support) { 14200 14201 tgtp = phba->targetport->private; 14201 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 14202 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14202 14203 "6401 RQE Error x%x, posted %d err_cnt " 14203 14204 "%d: %x %x %x\n", 14204 14205 status, hrq->RQ_buf_posted, ··· 14272 14273 } 14273 14274 break; 14274 14275 default: 14275 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14276 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14276 14277 "0144 Not a valid CQE code: x%x\n", 14277 14278 bf_get(lpfc_wcqe_c_code, &wcqe)); 14278 14279 break; ··· 14310 14311 else 14311 14312 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork); 14312 14313 if (!ret) 14313 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14314 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14314 14315 "0383 Cannot schedule queue work " 14315 14316 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 14316 14317 cqid, cq->queue_id, ··· 14339 14340 uint16_t cqid, id; 14340 14341 14341 14342 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 14342 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14343 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14343 14344 "0366 Not a valid completion " 14344 14345 "event: majorcode=x%x, minorcode=x%x\n", 14345 14346 bf_get_le32(lpfc_eqe_major_code, eqe), ··· 14382 14383 14383 14384 process_cq: 14384 14385 if (unlikely(cqid != cq->queue_id)) { 14385 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14386 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14386 14387 "0368 Miss-matched fast-path completion " 14387 14388 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 14388 14389 cqid, cq->queue_id); ··· 14436 14437 ret = queue_delayed_work_on(cq->chann, phba->wq, 14437 14438 &cq->sched_irqwork, delay); 14438 14439 if (!ret) 14439 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14440 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14440 14441 "0367 Cannot schedule queue work " 14441 14442 "for cqid=%d on CPU %d\n", 14442 14443 cq->queue_id, cq->chann); ··· 14967 14968 14968 14969 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14969 14970 if (!mbox) { 14970 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME, 14971 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14971 14972 "6428 Failed allocating mailbox cmd buffer." 14972 14973 " EQ delay was not set.\n"); 14973 14974 return; ··· 15009 15010 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15010 15011 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15011 15012 if (shdr_status || shdr_add_status || rc) { 15012 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15013 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15013 15014 "2512 MODIFY_EQ_DELAY mailbox failed with " 15014 15015 "status x%x add_status x%x, mbx status x%x\n", 15015 15016 shdr_status, shdr_add_status, rc); ··· 15086 15087 dmult); 15087 15088 switch (eq->entry_count) { 15088 15089 default: 15089 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15090 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15090 15091 "0360 Unsupported EQ count. (%d)\n", 15091 15092 eq->entry_count); 15092 15093 if (eq->entry_count < 256) { ··· 15130 15131 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15131 15132 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15132 15133 if (shdr_status || shdr_add_status || rc) { 15133 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15134 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15134 15135 "2500 EQ_CREATE mailbox failed with " 15135 15136 "status x%x add_status x%x, mbx status x%x\n", 15136 15137 shdr_status, shdr_add_status, rc); ··· 15235 15236 } 15236 15237 /* fall through */ 15237 15238 default: 15238 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15239 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15239 15240 "0361 Unsupported CQ count: " 15240 15241 "entry cnt %d sz %d pg cnt %d\n", 15241 15242 cq->entry_count, cq->entry_size, ··· 15271 15272 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15272 15273 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15273 15274 if (shdr_status || shdr_add_status || rc) { 15274 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15275 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15275 15276 "2501 CQ_CREATE mailbox failed with " 15276 15277 "status x%x add_status x%x, mbx status x%x\n", 15277 15278 shdr_status, shdr_add_status, rc); ··· 15358 15359 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, 15359 15360 LPFC_SLI4_MBX_NEMBED); 15360 15361 if (alloclen < length) { 15361 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15362 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15362 15363 "3098 Allocated DMA memory size (%d) is " 15363 15364 "less than the requested DMA memory size " 15364 15365 "(%d)\n", alloclen, length); ··· 15412 15413 } 15413 15414 /* fall through */ 15414 15415 default: 15415 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15416 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15416 15417 "3118 Bad CQ count. (%d)\n", 15417 15418 cq->entry_count); 15418 15419 if (cq->entry_count < 256) { ··· 15530 15531 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15531 15532 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15532 15533 if (shdr_status || shdr_add_status || rc) { 15533 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15534 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15534 15535 "3119 CQ_CREATE_SET mailbox failed with " 15535 15536 "status x%x add_status x%x, mbx status x%x\n", 15536 15537 shdr_status, shdr_add_status, rc); ··· 15688 15689 cq->queue_id); 15689 15690 switch (mq->entry_count) { 15690 15691 default: 15691 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15692 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15692 15693 "0362 Unsupported MQ count. (%d)\n", 15693 15694 mq->entry_count); 15694 15695 if (mq->entry_count < 16) { ··· 15744 15745 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15745 15746 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15746 15747 if (shdr_status || shdr_add_status || rc) { 15747 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15748 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15748 15749 "2502 MQ_CREATE mailbox failed with " 15749 15750 "status x%x add_status x%x, mbx status x%x\n", 15750 15751 shdr_status, shdr_add_status, rc); ··· 15893 15894 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15894 15895 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15895 15896 if (shdr_status || shdr_add_status || rc) { 15896 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15897 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15897 15898 "2503 WQ_CREATE mailbox failed with " 15898 15899 "status x%x add_status x%x, mbx status x%x\n", 15899 15900 shdr_status, shdr_add_status, rc); ··· 15920 15921 &wq_create->u.response); 15921 15922 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 15922 15923 (wq->db_format != LPFC_DB_RING_FORMAT)) { 15923 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15924 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15924 15925 "3265 WQ[%d] doorbell format " 15925 15926 "not supported: x%x\n", 15926 15927 wq->queue_id, wq->db_format); ··· 15932 15933 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15933 15934 pci_barset); 15934 15935 if (!bar_memmap_p) { 15935 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15936 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15936 15937 "3263 WQ[%d] failed to memmap " 15937 15938 "pci barset:x%x\n", 15938 15939 wq->queue_id, pci_barset); ··· 15942 15943 db_offset = wq_create->u.response.doorbell_offset; 15943 15944 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 15944 15945 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 15945 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15946 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15946 15947 "3252 WQ[%d] doorbell offset " 15947 15948 "not supported: x%x\n", 15948 15949 wq->queue_id, db_offset); ··· 15966 15967 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15967 15968 pci_barset); 15968 15969 if (!bar_memmap_p) { 15969 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15970 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15970 15971 "3267 WQ[%d] failed to memmap " 15971 15972 "pci barset:x%x\n", 15972 15973 wq->queue_id, pci_barset); ··· 15982 15983 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15983 15984 dpp_barset); 15984 15985 if (!bar_memmap_p) { 15985 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15986 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15986 15987 "3268 WQ[%d] failed to memmap " 15987 15988 "pci barset:x%x\n", 15988 15989 wq->queue_id, dpp_barset); ··· 16106 16107 } else { 16107 16108 switch (hrq->entry_count) { 16108 16109 default: 16109 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16110 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16110 16111 "2535 Unsupported RQ count. (%d)\n", 16111 16112 hrq->entry_count); 16112 16113 if (hrq->entry_count < 512) { ··· 16157 16158 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16158 16159 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16159 16160 if (shdr_status || shdr_add_status || rc) { 16160 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16161 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16161 16162 "2504 RQ_CREATE mailbox failed with " 16162 16163 "status x%x add_status x%x, mbx status x%x\n", 16163 16164 shdr_status, shdr_add_status, rc); ··· 16175 16176 &rq_create->u.response); 16176 16177 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 16177 16178 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 16178 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16179 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16179 16180 "3262 RQ [%d] doorbell format not " 16180 16181 "supported: x%x\n", hrq->queue_id, 16181 16182 hrq->db_format); ··· 16187 16188 &rq_create->u.response); 16188 16189 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 16189 16190 if (!bar_memmap_p) { 16190 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16191 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16191 16192 "3269 RQ[%d] failed to memmap pci " 16192 16193 "barset:x%x\n", hrq->queue_id, 16193 16194 pci_barset); ··· 16198 16199 db_offset = rq_create->u.response.doorbell_offset; 16199 16200 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 16200 16201 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 16201 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16202 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16202 16203 "3270 RQ[%d] doorbell offset not " 16203 16204 "supported: x%x\n", hrq->queue_id, 16204 16205 db_offset); ··· 16243 16244 } else { 16244 16245 switch (drq->entry_count) { 16245 16246 default: 16246 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16247 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16247 16248 "2536 Unsupported RQ count. (%d)\n", 16248 16249 drq->entry_count); 16249 16250 if (drq->entry_count < 512) { ··· 16380 16381 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, 16381 16382 LPFC_SLI4_MBX_NEMBED); 16382 16383 if (alloclen < length) { 16383 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16384 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16384 16385 "3099 Allocated DMA memory size (%d) is " 16385 16386 "less than the requested DMA memory size " 16386 16387 "(%d)\n", alloclen, length); ··· 16490 16491 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16491 16492 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16492 16493 if (shdr_status || shdr_add_status || rc) { 16493 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16494 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16494 16495 "3120 RQ_CREATE mailbox failed with " 16495 16496 "status x%x add_status x%x, mbx status x%x\n", 16496 16497 shdr_status, shdr_add_status, rc); ··· 16560 16561 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16561 16562 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16562 16563 if (shdr_status || shdr_add_status || rc) { 16563 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16564 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16564 16565 "2505 EQ_DESTROY mailbox failed with " 16565 16566 "status x%x add_status x%x, mbx status x%x\n", 16566 16567 shdr_status, shdr_add_status, rc); ··· 16615 16616 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16616 16617 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16617 16618 if (shdr_status || shdr_add_status || rc) { 16618 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16619 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16619 16620 "2506 CQ_DESTROY mailbox failed with " 16620 16621 "status x%x add_status x%x, mbx status x%x\n", 16621 16622 shdr_status, shdr_add_status, rc); ··· 16669 16670 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16670 16671 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16671 16672 if (shdr_status || shdr_add_status || rc) { 16672 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16673 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16673 16674 "2507 MQ_DESTROY mailbox failed with " 16674 16675 "status x%x add_status x%x, mbx status x%x\n", 16675 16676 shdr_status, shdr_add_status, rc); ··· 16722 16723 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16723 16724 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16724 16725 if (shdr_status || shdr_add_status || rc) { 16725 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16726 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16726 16727 "2508 WQ_DESTROY mailbox failed with " 16727 16728 "status x%x add_status x%x, mbx status x%x\n", 16728 16729 shdr_status, shdr_add_status, rc); ··· 16779 16780 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16780 16781 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16781 16782 if (shdr_status || shdr_add_status || rc) { 16782 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16783 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16783 16784 "2509 RQ_DESTROY mailbox failed with " 16784 16785 "status x%x add_status x%x, mbx status x%x\n", 16785 16786 shdr_status, shdr_add_status, rc); ··· 16795 16796 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16796 16797 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16797 16798 if (shdr_status || shdr_add_status || rc) { 16798 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16799 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16799 16800 "2510 RQ_DESTROY mailbox failed with " 16800 16801 "status x%x add_status x%x, mbx status x%x\n", 16801 16802 shdr_status, shdr_add_status, rc); ··· 16843 16844 union lpfc_sli4_cfg_shdr *shdr; 16844 16845 16845 16846 if (xritag == NO_XRI) { 16846 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16847 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16847 16848 "0364 Invalid param:\n"); 16848 16849 return -EINVAL; 16849 16850 } ··· 16884 16885 if (rc != MBX_TIMEOUT) 16885 16886 mempool_free(mbox, phba->mbox_mem_pool); 16886 16887 if (shdr_status || shdr_add_status || rc) { 16887 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16888 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 16888 16889 "2511 POST_SGL mailbox failed with " 16889 16890 "status x%x add_status x%x, mbx status x%x\n", 16890 16891 shdr_status, shdr_add_status, rc); ··· 17015 17016 reqlen = post_cnt * sizeof(struct sgl_page_pairs) + 17016 17017 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 17017 17018 if (reqlen > SLI4_PAGE_SIZE) { 17018 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17019 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17019 17020 "2559 Block sgl registration required DMA " 17020 17021 "size (%d) great than a page\n", reqlen); 17021 17022 return -ENOMEM; ··· 17031 17032 LPFC_SLI4_MBX_NEMBED); 17032 17033 17033 17034 if (alloclen < reqlen) { 17034 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17035 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17035 17036 "0285 Allocated DMA memory size (%d) is " 17036 17037 "less than the requested DMA memory " 17037 17038 "size (%d)\n", alloclen, reqlen); ··· 17079 17080 if (rc != MBX_TIMEOUT) 17080 17081 lpfc_sli4_mbox_cmd_free(phba, mbox); 17081 17082 if (shdr_status || shdr_add_status || rc) { 17082 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17083 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17083 17084 "2513 POST_SGL_BLOCK mailbox command failed " 17084 17085 "status x%x add_status x%x mbx status x%x\n", 17085 17086 shdr_status, shdr_add_status, rc); ··· 17127 17128 } 17128 17129 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17129 17130 if (!mbox) { 17130 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17131 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17131 17132 "6119 Failed to allocate mbox cmd memory\n"); 17132 17133 return -ENOMEM; 17133 17134 } ··· 17138 17139 reqlen, LPFC_SLI4_MBX_NEMBED); 17139 17140 17140 17141 if (alloclen < reqlen) { 17141 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17142 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17142 17143 "6120 Allocated DMA memory size (%d) is " 17143 17144 "less than the requested DMA memory " 17144 17145 "size (%d)\n", alloclen, reqlen); ··· 17192 17193 if (rc != MBX_TIMEOUT) 17193 17194 lpfc_sli4_mbox_cmd_free(phba, mbox); 17194 17195 if (shdr_status || shdr_add_status || rc) { 17195 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17196 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17196 17197 "6125 POST_SGL_BLOCK mailbox command failed " 17197 17198 "status x%x add_status x%x mbx status x%x\n", 17198 17199 shdr_status, shdr_add_status, rc); ··· 17776 17777 17777 17778 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 17778 17779 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 17779 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17780 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 17780 17781 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 17781 17782 rsp_iocbq->iocb.ulpStatus, 17782 17783 rsp_iocbq->iocb.un.ulpWord[4]); ··· 17938 17939 17939 17940 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 17940 17941 if (rc == IOCB_ERROR) { 17941 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 17942 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 17942 17943 "2925 Failed to issue CT ABTS RSP x%x on " 17943 17944 "xri x%x, Data x%x\n", 17944 17945 icmd->un.xseq64.w5.hcsw.Rctl, oxid, ··· 18188 18189 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 18189 18190 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 18190 18191 if (!iocbq) { 18191 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18192 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18192 18193 "2707 Ring %d handler: Failed to allocate " 18193 18194 "iocb Rctl x%x Type x%x received\n", 18194 18195 LPFC_ELS_RING, ··· 18199 18200 phba->sli4_hba.els_wq->pring, 18200 18201 iocbq, fc_hdr->fh_r_ctl, 18201 18202 fc_hdr->fh_type)) 18202 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18203 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18203 18204 "2540 Ring %d handler: unexpected Rctl " 18204 18205 "x%x Type x%x received\n", 18205 18206 LPFC_ELS_RING, ··· 18464 18465 18465 18466 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 18466 18467 if (rc != MBX_SUCCESS) { 18467 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18468 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18468 18469 "2008 Error %d posting all rpi " 18469 18470 "headers\n", rc); 18470 18471 rc = -EIO; ··· 18510 18511 /* The port is notified of the header region via a mailbox command. */ 18511 18512 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18512 18513 if (!mboxq) { 18513 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18514 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18514 18515 "2001 Unable to allocate memory for issuing " 18515 18516 "SLI_CONFIG_SPECIAL mailbox command\n"); 18516 18517 return -ENOMEM; ··· 18540 18541 if (rc != MBX_TIMEOUT) 18541 18542 mempool_free(mboxq, phba->mbox_mem_pool); 18542 18543 if (shdr_status || shdr_add_status || rc) { 18543 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18544 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18544 18545 "2514 POST_RPI_HDR mailbox failed with " 18545 18546 "status x%x add_status x%x, mbx status x%x\n", 18546 18547 shdr_status, shdr_add_status, rc); ··· 18630 18631 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 18631 18632 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 18632 18633 if (!rpi_hdr) { 18633 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18634 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18634 18635 "2002 Error Could not grow rpi " 18635 18636 "count\n"); 18636 18637 } else { ··· 18732 18733 mboxq->vport = ndlp->vport; 18733 18734 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18734 18735 if (rc == MBX_NOT_FINISHED) { 18735 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18736 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18736 18737 "2010 Resume RPI Mailbox failed " 18737 18738 "status %d, mbxStatus x%x\n", rc, 18738 18739 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); ··· 18767 18768 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 18768 18769 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 18769 18770 if (rc != MBX_SUCCESS) { 18770 - lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 18771 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 18771 18772 "2022 INIT VPI Mailbox failed " 18772 18773 "status %d, mbxStatus x%x\n", rc, 18773 18774 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); ··· 18803 18804 18804 18805 if ((shdr_status || shdr_add_status) && 18805 18806 (shdr_status != STATUS_FCF_IN_USE)) 18806 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18807 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18807 18808 "2558 ADD_FCF_RECORD mailbox failed with " 18808 18809 "status x%x add_status x%x\n", 18809 18810 shdr_status, shdr_add_status); ··· 18833 18834 18834 18835 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18835 18836 if (!mboxq) { 18836 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18837 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18837 18838 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 18838 18839 return -ENOMEM; 18839 18840 } ··· 18846 18847 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 18847 18848 req_len, LPFC_SLI4_MBX_NEMBED); 18848 18849 if (alloc_len < req_len) { 18849 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18850 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18850 18851 "2523 Allocated DMA memory size (x%x) is " 18851 18852 "less than the requested DMA memory " 18852 18853 "size (x%x)\n", alloc_len, req_len); ··· 18879 18880 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 18880 18881 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18881 18882 if (rc == MBX_NOT_FINISHED) { 18882 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18883 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18883 18884 "2515 ADD_FCF_RECORD mailbox failed with " 18884 18885 "status 0x%x\n", rc); 18885 18886 lpfc_sli4_mbox_cmd_free(phba, mboxq); ··· 18952 18953 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 18953 18954 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18954 18955 if (!mboxq) { 18955 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18956 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 18956 18957 "2000 Failed to allocate mbox for " 18957 18958 "READ_FCF cmd\n"); 18958 18959 error = -ENOMEM; ··· 19395 19396 19396 19397 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19397 19398 if (!mbox) { 19398 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19399 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19399 19400 "2745 Failed to allocate mbox for " 19400 19401 "requesting FCF rediscover.\n"); 19401 19402 return -ENOMEM; ··· 19470 19471 19471 19472 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19472 19473 if (!pmb) { 19473 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19474 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19474 19475 "2600 failed to allocate mailbox memory\n"); 19475 19476 return 0; 19476 19477 } ··· 19529 19530 19530 19531 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19531 19532 if (!mboxq) { 19532 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19533 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19533 19534 "3105 failed to allocate mailbox memory\n"); 19534 19535 return 0; 19535 19536 } ··· 19593 19594 19594 19595 /* Check the region signature first */ 19595 19596 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 19596 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19597 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19597 19598 "2619 Config region 23 has bad signature\n"); 19598 19599 goto out; 19599 19600 } ··· 19601 19602 19602 19603 /* Check the data structure version */ 19603 19604 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 19604 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19605 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19605 19606 "2620 Config region 23 has bad version\n"); 19606 19607 goto out; 19607 19608 } ··· 19778 19779 if (rc != MBX_TIMEOUT) 19779 19780 mempool_free(mbox, phba->mbox_mem_pool); 19780 19781 if (shdr_status || shdr_add_status || rc) { 19781 - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19782 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19782 19783 "3025 Write Object mailbox failed with " 19783 19784 "status x%x add_status x%x, mbx status x%x\n", 19784 19785 shdr_status, shdr_add_status, rc); ··· 19958 19959 piocbq = lpfc_sli_ringtx_get(phba, pring); 19959 19960 if (!piocbq) { 19960 19961 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19961 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19962 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19962 19963 "2823 txq empty and txq_cnt is %d\n ", 19963 19964 txq_cnt); 19964 19965 break; ··· 19987 19988 19988 19989 if (fail_msg) { 19989 19990 /* Failed means we can't issue and need to cancel */ 19990 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19991 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 19991 19992 "2822 IOCB failed %s iotag 0x%x " 19992 19993 "xri 0x%x\n", 19993 19994 fail_msg,
+31 -29
drivers/scsi/lpfc/lpfc_vport.c
··· 145 145 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2); 146 146 if (rc != MBX_SUCCESS) { 147 147 if (signal_pending(current)) { 148 - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, 148 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 149 149 "1830 Signal aborted mbxCmd x%x\n", 150 150 mb->mbxCommand); 151 151 lpfc_mbuf_free(phba, mp->virt, mp->phys); ··· 154 154 mempool_free(pmb, phba->mbox_mem_pool); 155 155 return -EINTR; 156 156 } else { 157 - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, 157 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 158 158 "1818 VPort failed init, mbxCmd x%x " 159 159 "READ_SPARM mbxStatus x%x, rc = x%x\n", 160 160 mb->mbxCommand, mb->mbxStatus, rc); ··· 190 190 ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0))) 191 191 return 1; 192 192 193 - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 193 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 194 194 "1822 Invalid %s: %02x:%02x:%02x:%02x:" 195 195 "%02x:%02x:%02x:%02x\n", 196 196 name_type, ··· 284 284 } 285 285 286 286 if (time_after(jiffies, wait_time_max)) 287 - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 288 - "1835 Vport discovery quiesce failed:" 289 - " state x%x fc_flags x%x wait msecs x%x\n", 290 - vport->port_state, vport->fc_flag, 291 - jiffies_to_msecs(jiffies - start_time)); 287 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 288 + "1835 Vport discovery quiesce failed:" 289 + " state x%x fc_flags x%x wait msecs x%x\n", 290 + vport->port_state, vport->fc_flag, 291 + jiffies_to_msecs(jiffies - start_time)); 292 292 } 293 293 294 294 int ··· 305 305 int status; 306 306 307 307 if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) { 308 - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 308 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 309 309 "1808 Create VPORT failed: " 310 310 "NPIV is not enabled: SLImode:%d\n", 311 311 phba->sli_rev); ··· 315 315 316 316 /* NPIV is not supported if HBA has NVME Target enabled */ 317 317 if (phba->nvmet_support) { 318 - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 318 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 319 319 "3189 Create VPORT failed: " 320 320 "NPIV is not supported on NVME Target\n"); 321 321 rc = VPORT_INVAL; ··· 324 324 325 325 vpi = lpfc_alloc_vpi(phba); 326 326 if (vpi == 0) { 327 - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 327 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 328 328 "1809 Create VPORT failed: " 329 329 "Max VPORTs (%d) exceeded\n", 330 330 phba->max_vpi); ··· 334 334 335 335 /* Assign an unused board number */ 336 336 if ((instance = lpfc_get_instance()) < 0) { 337 - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 337 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 338 338 "1810 Create VPORT failed: Cannot get " 339 339 "instance number\n"); 340 340 lpfc_free_vpi(phba, vpi); ··· 344 344 345 345 vport = lpfc_create_port(phba, instance, &fc_vport->dev); 346 346 if (!vport) { 347 - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 347 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 348 348 "1811 Create VPORT failed: vpi x%x\n", vpi); 349 349 lpfc_free_vpi(phba, vpi); 350 350 rc = VPORT_NORESOURCES; ··· 356 356 357 357 if ((status = lpfc_vport_sparm(phba, vport))) { 358 358 if (status == -EINTR) { 359 - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 359 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 360 360 "1831 Create VPORT Interrupted.\n"); 361 361 rc = VPORT_ERROR; 362 362 } else { 363 - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 363 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 364 364 "1813 Create VPORT failed. " 365 365 "Cannot get sparam\n"); 366 366 rc = VPORT_NORESOURCES; ··· 378 378 379 379 if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") || 380 380 !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) { 381 - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 381 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 382 382 "1821 Create VPORT failed. " 383 383 "Invalid WWN format\n"); 384 384 lpfc_free_vpi(phba, vpi); ··· 388 388 } 389 389 390 390 if (!lpfc_unique_wwpn(phba, vport)) { 391 - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 391 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 392 392 "1823 Create VPORT failed. " 393 393 "Duplicate WWN on HBA\n"); 394 394 lpfc_free_vpi(phba, vpi); ··· 426 426 (pport->fc_flag & FC_VFI_REGISTERED)) { 427 427 rc = lpfc_sli4_init_vpi(vport); 428 428 if (rc) { 429 - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 429 + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 430 430 "1838 Failed to INIT_VPI on vpi %d " 431 431 "status %d\n", vpi, rc); 432 432 rc = VPORT_NORESOURCES; ··· 469 469 lpfc_initial_fdisc(vport); 470 470 } else { 471 471 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); 472 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 472 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 473 473 "0262 No NPIV Fabric support\n"); 474 474 } 475 475 } else { ··· 478 478 rc = VPORT_OK; 479 479 480 480 out: 481 - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 482 - "1825 Vport Created.\n"); 481 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 482 + "1825 Vport Created.\n"); 483 483 lpfc_host_attrib_init(lpfc_shost_from_vport(vport)); 484 484 error_out: 485 485 return rc; ··· 534 534 } 535 535 536 536 lpfc_vport_set_state(vport, FC_VPORT_DISABLED); 537 - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 537 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 538 538 "1826 Vport Disabled.\n"); 539 539 return VPORT_OK; 540 540 } ··· 575 575 lpfc_initial_fdisc(vport); 576 576 } else { 577 577 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); 578 - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 578 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 579 579 "0264 No NPIV Fabric support\n"); 580 580 } 581 581 } else { ··· 583 583 } 584 584 585 585 out: 586 - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 586 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 587 587 "1827 Vport Enabled.\n"); 588 588 return VPORT_OK; 589 589 } ··· 609 609 bool ns_ndlp_referenced = false; 610 610 611 611 if (vport->port_type == LPFC_PHYSICAL_PORT) { 612 - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 612 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 613 613 "1812 vport_delete failed: Cannot delete " 614 614 "physical host\n"); 615 615 return VPORT_ERROR; ··· 618 618 /* If the vport is a static vport fail the deletion. */ 619 619 if ((vport->vport_flag & STATIC_VPORT) && 620 620 !(phba->pport->load_flag & FC_UNLOADING)) { 621 - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 621 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 622 622 "1837 vport_delete failed: Cannot delete " 623 623 "static vport.\n"); 624 624 return VPORT_ERROR; ··· 807 807 spin_lock_irq(&phba->port_list_lock); 808 808 list_del_init(&vport->listentry); 809 809 spin_unlock_irq(&phba->port_list_lock); 810 - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 810 + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 811 811 "1828 Vport Deleted.\n"); 812 812 scsi_host_put(shost); 813 813 return VPORT_OK; ··· 828 828 if (port_iterator->load_flag & FC_UNLOADING) 829 829 continue; 830 830 if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) { 831 - lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT, 831 + lpfc_printf_vlog(port_iterator, KERN_ERR, 832 + LOG_TRACE_EVENT, 832 833 "1801 Create vport work array FAILED: " 833 834 "cannot do scsi_host_get\n"); 834 835 continue; ··· 899 898 GFP_ATOMIC); 900 899 901 900 if (!ndlp->lat_data) 902 - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 901 + lpfc_printf_vlog(vport, KERN_ERR, 902 + LOG_TRACE_EVENT, 903 903 "0287 lpfc_alloc_bucket failed to " 904 904 "allocate statistical data buffer DID " 905 905 "0x%x\n", ndlp->nlp_DID);