Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: lpfc: NVME Target: Add debugfs support

NVME Target: Add debugfs support

Adds debugfs snippets to cover the new NVME target functionality

Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

James Smart and committed by
Martin K. Petersen
2b65e182 d613b6a7

+628 -5
+300 -3
drivers/scsi/lpfc/lpfc_debugfs.c
··· 47 47 #include "lpfc.h" 48 48 #include "lpfc_scsi.h" 49 49 #include "lpfc_nvme.h" 50 + #include "lpfc_nvmet.h" 50 51 #include "lpfc_logmsg.h" 51 52 #include "lpfc_crtn.h" 52 53 #include "lpfc_vport.h" ··· 544 543 int len = 0; 545 544 int cnt; 546 545 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 546 + struct lpfc_hba *phba = vport->phba; 547 547 struct lpfc_nodelist *ndlp; 548 548 unsigned char *statep; 549 549 struct nvme_fc_local_port *localport; 550 550 struct lpfc_nvme_lport *lport; 551 551 struct lpfc_nvme_rport *rport; 552 + struct lpfc_nvmet_tgtport *tgtp; 552 553 struct nvme_fc_remote_port *nrport; 553 554 554 555 cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); ··· 628 625 len += snprintf(buf+len, size-len, "\n"); 629 626 } 630 627 spin_unlock_irq(shost->host_lock); 628 + 629 + if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) { 630 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 631 + len += snprintf(buf + len, size - len, 632 + "\nNVME Targetport Entry ...\n"); 633 + 634 + /* Port state is only one of two values for now. */ 635 + if (phba->targetport->port_id) 636 + statep = "REGISTERED"; 637 + else 638 + statep = "INIT"; 639 + len += snprintf(buf + len, size - len, 640 + "TGT WWNN x%llx WWPN x%llx State %s\n", 641 + wwn_to_u64(vport->fc_nodename.u.wwn), 642 + wwn_to_u64(vport->fc_portname.u.wwn), 643 + statep); 644 + len += snprintf(buf + len, size - len, 645 + " Targetport DID x%06x\n", 646 + phba->targetport->port_id); 647 + goto out_exit; 648 + } 631 649 632 650 len += snprintf(buf + len, size - len, 633 651 "\nNVME Lport/Rport Entries ...\n"); ··· 742 718 lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) 743 719 { 744 720 struct lpfc_hba *phba = vport->phba; 721 + struct lpfc_nvmet_tgtport *tgtp; 745 722 int len = 0; 746 723 747 - if (phba->nvmet_support == 0) { 724 + if (phba->nvmet_support) { 725 + if (!phba->targetport) 726 + return len; 727 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 728 + len += snprintf(buf+len, size-len, 729 + "\nNVME Targetport Statistics\n"); 730 + 731 + len += snprintf(buf+len, size-len, 732 + "LS: Rcv %08x Drop %08x Abort %08x\n", 733 + atomic_read(&tgtp->rcv_ls_req_in), 734 + atomic_read(&tgtp->rcv_ls_req_drop), 735 + atomic_read(&tgtp->xmt_ls_abort)); 736 + if (atomic_read(&tgtp->rcv_ls_req_in) != 737 + atomic_read(&tgtp->rcv_ls_req_out)) { 738 + len += snprintf(buf+len, size-len, 739 + "Rcv LS: in %08x != out %08x\n", 740 + atomic_read(&tgtp->rcv_ls_req_in), 741 + atomic_read(&tgtp->rcv_ls_req_out)); 742 + } 743 + 744 + len += snprintf(buf+len, size-len, 745 + "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n", 746 + atomic_read(&tgtp->xmt_ls_rsp), 747 + atomic_read(&tgtp->xmt_ls_drop), 748 + atomic_read(&tgtp->xmt_ls_rsp_cmpl), 749 + atomic_read(&tgtp->xmt_ls_rsp_error)); 750 + 751 + len += snprintf(buf+len, size-len, 752 + "FCP: Rcv %08x Drop %08x\n", 753 + atomic_read(&tgtp->rcv_fcp_cmd_in), 754 + atomic_read(&tgtp->rcv_fcp_cmd_drop)); 755 + 756 + if (atomic_read(&tgtp->rcv_fcp_cmd_in) != 757 + atomic_read(&tgtp->rcv_fcp_cmd_out)) { 758 + len += snprintf(buf+len, size-len, 759 + "Rcv FCP: in %08x != out %08x\n", 760 + atomic_read(&tgtp->rcv_fcp_cmd_in), 761 + atomic_read(&tgtp->rcv_fcp_cmd_out)); 762 + } 763 + 764 + len += snprintf(buf+len, size-len, 765 + "FCP Rsp: read %08x readrsp %08x write %08x rsp %08x\n", 766 + atomic_read(&tgtp->xmt_fcp_read), 767 + atomic_read(&tgtp->xmt_fcp_read_rsp), 768 + atomic_read(&tgtp->xmt_fcp_write), 769 + atomic_read(&tgtp->xmt_fcp_rsp)); 770 + 771 + len += snprintf(buf+len, size-len, 772 + "FCP Rsp: abort %08x drop %08x\n", 773 + atomic_read(&tgtp->xmt_fcp_abort), 774 + atomic_read(&tgtp->xmt_fcp_drop)); 775 + 776 + len += snprintf(buf+len, size-len, 777 + "FCP Rsp Cmpl: %08x err %08x drop %08x\n", 778 + atomic_read(&tgtp->xmt_fcp_rsp_cmpl), 779 + atomic_read(&tgtp->xmt_fcp_rsp_error), 780 + atomic_read(&tgtp->xmt_fcp_rsp_drop)); 781 + 782 + len += snprintf(buf+len, size-len, 783 + "ABORT: Xmt %08x Err %08x Cmpl %08x", 784 + atomic_read(&tgtp->xmt_abort_rsp), 785 + atomic_read(&tgtp->xmt_abort_rsp_error), 786 + atomic_read(&tgtp->xmt_abort_cmpl)); 787 + 788 + len += snprintf(buf+len, size-len, "\n"); 789 + } else { 748 790 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 749 791 return len; 750 792 ··· 918 828 phba->ktime_data_samples)); 919 829 return len; 920 830 } 831 + 832 + /* NVME Target */ 833 + len += snprintf(buf + len, PAGE_SIZE-len, 834 + "ktime %s: Total Samples: %lld %lld\n", 835 + (phba->ktime_on ? "Enabled" : "Disabled"), 836 + phba->ktime_data_samples, 837 + phba->ktime_status_samples); 838 + if (phba->ktime_data_samples == 0) 839 + return len; 840 + 841 + len += snprintf(buf + len, PAGE_SIZE-len, 842 + "Segment 1: MSI-X ISR Rcv cmd -to- " 843 + "cmd pass to NVME Layer\n"); 844 + len += snprintf(buf + len, PAGE_SIZE-len, 845 + "avg:%08lld min:%08lld max %08lld\n", 846 + phba->ktime_seg1_total / 847 + phba->ktime_data_samples, 848 + phba->ktime_seg1_min, 849 + phba->ktime_seg1_max); 850 + len += snprintf(buf + len, PAGE_SIZE-len, 851 + "Segment 2: cmd pass to NVME Layer- " 852 + "-to- Driver rcv cmd OP (action)\n"); 853 + len += snprintf(buf + len, PAGE_SIZE-len, 854 + "avg:%08lld min:%08lld max %08lld\n", 855 + phba->ktime_seg2_total / 856 + phba->ktime_data_samples, 857 + phba->ktime_seg2_min, 858 + phba->ktime_seg2_max); 859 + len += snprintf(buf + len, PAGE_SIZE-len, 860 + "Segment 3: Driver rcv cmd OP -to- " 861 + "Firmware WQ doorbell: cmd\n"); 862 + len += snprintf(buf + len, PAGE_SIZE-len, 863 + "avg:%08lld min:%08lld max %08lld\n", 864 + phba->ktime_seg3_total / 865 + phba->ktime_data_samples, 866 + phba->ktime_seg3_min, 867 + phba->ktime_seg3_max); 868 + len += snprintf(buf + len, PAGE_SIZE-len, 869 + "Segment 4: Firmware WQ doorbell: cmd " 870 + "-to- MSI-X ISR for cmd cmpl\n"); 871 + len += snprintf(buf + len, PAGE_SIZE-len, 872 + "avg:%08lld min:%08lld max %08lld\n", 873 + phba->ktime_seg4_total / 874 + phba->ktime_data_samples, 875 + phba->ktime_seg4_min, 876 + phba->ktime_seg4_max); 877 + len += snprintf(buf + len, PAGE_SIZE-len, 878 + "Segment 5: MSI-X ISR for cmd cmpl " 879 + "-to- NVME layer passed cmd done\n"); 880 + len += snprintf(buf + len, PAGE_SIZE-len, 881 + "avg:%08lld min:%08lld max %08lld\n", 882 + phba->ktime_seg5_total / 883 + phba->ktime_data_samples, 884 + phba->ktime_seg5_min, 885 + phba->ktime_seg5_max); 886 + 887 + if (phba->ktime_status_samples == 0) { 888 + len += snprintf(buf + len, PAGE_SIZE-len, 889 + "Total: cmd received by MSI-X ISR " 890 + "-to- cmd completed on wire\n"); 891 + len += snprintf(buf + len, PAGE_SIZE-len, 892 + "avg:%08lld min:%08lld " 893 + "max %08lld\n", 894 + phba->ktime_seg10_total / 895 + phba->ktime_data_samples, 896 + phba->ktime_seg10_min, 897 + phba->ktime_seg10_max); 898 + return len; 899 + } 900 + 901 + len += snprintf(buf + len, PAGE_SIZE-len, 902 + "Segment 6: NVME layer passed cmd done " 903 + "-to- Driver rcv rsp status OP\n"); 904 + len += snprintf(buf + len, PAGE_SIZE-len, 905 + "avg:%08lld min:%08lld max %08lld\n", 906 + phba->ktime_seg6_total / 907 + phba->ktime_status_samples, 908 + phba->ktime_seg6_min, 909 + phba->ktime_seg6_max); 910 + len += snprintf(buf + len, PAGE_SIZE-len, 911 + "Segment 7: Driver rcv rsp status OP " 912 + "-to- Firmware WQ doorbell: status\n"); 913 + len += snprintf(buf + len, PAGE_SIZE-len, 914 + "avg:%08lld min:%08lld max %08lld\n", 915 + phba->ktime_seg7_total / 916 + phba->ktime_status_samples, 917 + phba->ktime_seg7_min, 918 + phba->ktime_seg7_max); 919 + len += snprintf(buf + len, PAGE_SIZE-len, 920 + "Segment 8: Firmware WQ doorbell: status" 921 + " -to- MSI-X ISR for status cmpl\n"); 922 + len += snprintf(buf + len, PAGE_SIZE-len, 923 + "avg:%08lld min:%08lld max %08lld\n", 924 + phba->ktime_seg8_total / 925 + phba->ktime_status_samples, 926 + phba->ktime_seg8_min, 927 + phba->ktime_seg8_max); 928 + len += snprintf(buf + len, PAGE_SIZE-len, 929 + "Segment 9: MSI-X ISR for status cmpl " 930 + "-to- NVME layer passed status done\n"); 931 + len += snprintf(buf + len, PAGE_SIZE-len, 932 + "avg:%08lld min:%08lld max %08lld\n", 933 + phba->ktime_seg9_total / 934 + phba->ktime_status_samples, 935 + phba->ktime_seg9_min, 936 + phba->ktime_seg9_max); 937 + len += snprintf(buf + len, PAGE_SIZE-len, 938 + "Total: cmd received by MSI-X ISR -to- " 939 + "cmd completed on wire\n"); 940 + len += snprintf(buf + len, PAGE_SIZE-len, 941 + "avg:%08lld min:%08lld max %08lld\n", 942 + phba->ktime_seg10_total / 943 + phba->ktime_status_samples, 944 + phba->ktime_seg10_min, 945 + phba->ktime_seg10_max); 921 946 return len; 922 947 } 923 948 ··· 1158 953 int i; 1159 954 int len = 0; 1160 955 uint32_t tot_xmt = 0; 956 + uint32_t tot_rcv = 0; 1161 957 uint32_t tot_cmpl = 0; 958 + uint32_t tot_ccmpl = 0; 1162 959 1163 960 if (phba->nvmet_support == 0) { 1164 961 /* NVME Initiator */ ··· 1184 977 return len; 1185 978 } 1186 979 980 + /* NVME Target */ 981 + len += snprintf(buf + len, PAGE_SIZE - len, 982 + "CPUcheck %s ", 983 + (phba->cpucheck_on & LPFC_CHECK_NVMET_IO ? 984 + "IO Enabled - " : "IO Disabled - ")); 985 + len += snprintf(buf + len, PAGE_SIZE - len, 986 + "%s\n", 987 + (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ? 988 + "Rcv Enabled\n" : "Rcv Disabled\n")); 989 + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 990 + if (i >= LPFC_CHECK_CPU_CNT) 991 + break; 992 + len += snprintf(buf + len, PAGE_SIZE - len, 993 + "%02d: xmit x%08x ccmpl x%08x " 994 + "cmpl x%08x rcv x%08x\n", 995 + i, phba->cpucheck_xmt_io[i], 996 + phba->cpucheck_ccmpl_io[i], 997 + phba->cpucheck_cmpl_io[i], 998 + phba->cpucheck_rcv_io[i]); 999 + tot_xmt += phba->cpucheck_xmt_io[i]; 1000 + tot_rcv += phba->cpucheck_rcv_io[i]; 1001 + tot_cmpl += phba->cpucheck_cmpl_io[i]; 1002 + tot_ccmpl += phba->cpucheck_ccmpl_io[i]; 1003 + } 1004 + len += snprintf(buf + len, PAGE_SIZE - len, 1005 + "tot:xmit x%08x ccmpl x%08x cmpl x%08x rcv x%08x\n", 1006 + tot_xmt, tot_ccmpl, tot_cmpl, tot_rcv); 1187 1007 return len; 1188 1008 } 1189 1009 ··· 1894 1660 return rc; 1895 1661 } 1896 1662 1663 + static ssize_t 1664 + lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf, 1665 + size_t nbytes, loff_t *ppos) 1666 + { 1667 + struct lpfc_debug *debug = file->private_data; 1668 + struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; 1669 + struct lpfc_hba *phba = vport->phba; 1670 + struct lpfc_nvmet_tgtport *tgtp; 1671 + char mybuf[64]; 1672 + char *pbuf; 1673 + 1674 + if (!phba->targetport) 1675 + return -ENXIO; 1676 + 1677 + if (nbytes > 64) 1678 + nbytes = 64; 1679 + 1680 + /* Protect copy from user */ 1681 + if (!access_ok(VERIFY_READ, buf, nbytes)) 1682 + return -EFAULT; 1683 + 1684 + memset(mybuf, 0, sizeof(mybuf)); 1685 + 1686 + if (copy_from_user(mybuf, buf, nbytes)) 1687 + return -EFAULT; 1688 + pbuf = &mybuf[0]; 1689 + 1690 + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1691 + if ((strncmp(pbuf, "reset", strlen("reset")) == 0) || 1692 + (strncmp(pbuf, "zero", strlen("zero")) == 0)) { 1693 + atomic_set(&tgtp->rcv_ls_req_in, 0); 1694 + atomic_set(&tgtp->rcv_ls_req_out, 0); 1695 + atomic_set(&tgtp->rcv_ls_req_drop, 0); 1696 + atomic_set(&tgtp->xmt_ls_abort, 0); 1697 + atomic_set(&tgtp->xmt_ls_rsp, 0); 1698 + atomic_set(&tgtp->xmt_ls_drop, 0); 1699 + atomic_set(&tgtp->xmt_ls_rsp_error, 0); 1700 + atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); 1701 + 1702 + atomic_set(&tgtp->rcv_fcp_cmd_in, 0); 1703 + atomic_set(&tgtp->rcv_fcp_cmd_out, 0); 1704 + atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); 1705 + atomic_set(&tgtp->xmt_fcp_abort, 0); 1706 + atomic_set(&tgtp->xmt_fcp_drop, 0); 1707 + atomic_set(&tgtp->xmt_fcp_read_rsp, 0); 1708 + atomic_set(&tgtp->xmt_fcp_read, 0); 1709 + atomic_set(&tgtp->xmt_fcp_write, 0); 1710 + atomic_set(&tgtp->xmt_fcp_rsp, 0); 1711 + atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); 1712 + atomic_set(&tgtp->xmt_fcp_rsp_error, 0); 1713 + atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); 1714 + 1715 + atomic_set(&tgtp->xmt_abort_rsp, 0); 1716 + atomic_set(&tgtp->xmt_abort_rsp_error, 0); 1717 + atomic_set(&tgtp->xmt_abort_cmpl, 0); 1718 + } 1719 + return nbytes; 1720 + } 1721 + 1897 1722 static int 1898 1723 lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file) 1899 1724 { ··· 2249 1956 pbuf = &mybuf[0]; 2250 1957 2251 1958 if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { 2252 - phba->cpucheck_on |= LPFC_CHECK_NVME_IO; 1959 + if (phba->nvmet_support) 1960 + phba->cpucheck_on |= LPFC_CHECK_NVMET_IO; 1961 + else 1962 + phba->cpucheck_on |= LPFC_CHECK_NVME_IO; 2253 1963 return strlen(pbuf); 2254 1964 } else if ((strncmp(pbuf, "rcv", 2255 1965 sizeof("rcv") - 1) == 0)) { ··· 3176 2880 return 1; 3177 2881 } 3178 2882 3179 - if (phba->cfg_nvmet_mrq > eqidx) { 2883 + if (eqidx < phba->cfg_nvmet_mrq) { 3180 2884 /* NVMET CQset */ 3181 2885 qp = phba->sli4_hba.nvmet_cqset[eqidx]; 3182 2886 *len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len); ··· 4789 4493 .open = lpfc_debugfs_nvmestat_open, 4790 4494 .llseek = lpfc_debugfs_lseek, 4791 4495 .read = lpfc_debugfs_read, 4496 + .write = lpfc_debugfs_nvmestat_write, 4792 4497 .release = lpfc_debugfs_release, 4793 4498 }; 4794 4499
+315 -2
drivers/scsi/lpfc/lpfc_nvmet.c
··· 51 51 #include "lpfc_logmsg.h" 52 52 #include "lpfc_crtn.h" 53 53 #include "lpfc_vport.h" 54 + #include "lpfc_debugfs.h" 54 55 55 56 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *, 56 57 struct lpfc_nvmet_rcv_ctx *, ··· 104 103 ctxp = cmdwqe->context2; 105 104 rsp = &ctxp->ctx.ls_req; 106 105 106 + lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n", 107 + ctxp->oxid, status, result); 108 + 107 109 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 108 110 "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__, 109 111 ctxp, status, result); ··· 148 144 lpfc_rq_buf_free(phba, mp); 149 145 } 150 146 147 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 148 + static void 149 + lpfc_nvmet_ktime(struct lpfc_hba *phba, 150 + struct lpfc_nvmet_rcv_ctx *ctxp) 151 + { 152 + uint64_t seg1, seg2, seg3, seg4, seg5; 153 + uint64_t seg6, seg7, seg8, seg9, seg10; 154 + 155 + if (!phba->ktime_on) 156 + return; 157 + 158 + if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme || 159 + !ctxp->ts_nvme_data || !ctxp->ts_data_wqput || 160 + !ctxp->ts_isr_data || !ctxp->ts_data_nvme || 161 + !ctxp->ts_nvme_status || !ctxp->ts_status_wqput || 162 + !ctxp->ts_isr_status || !ctxp->ts_status_nvme) 163 + return; 164 + 165 + if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme) 166 + return; 167 + if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data) 168 + return; 169 + if (ctxp->ts_nvme_data > ctxp->ts_data_wqput) 170 + return; 171 + if (ctxp->ts_data_wqput > ctxp->ts_isr_data) 172 + return; 173 + if (ctxp->ts_isr_data > ctxp->ts_data_nvme) 174 + return; 175 + if (ctxp->ts_data_nvme > ctxp->ts_nvme_status) 176 + return; 177 + if (ctxp->ts_nvme_status > ctxp->ts_status_wqput) 178 + return; 179 + if (ctxp->ts_status_wqput > ctxp->ts_isr_status) 180 + return; 181 + if (ctxp->ts_isr_status > ctxp->ts_status_nvme) 182 + return; 183 + /* 184 + * Segment 1 - Time from FCP command received by MSI-X ISR 185 + * to FCP command is passed to NVME Layer. 186 + * Segment 2 - Time from FCP command payload handed 187 + * off to NVME Layer to Driver receives a Command op 188 + * from NVME Layer. 189 + * Segment 3 - Time from Driver receives a Command op 190 + * from NVME Layer to Command is put on WQ. 191 + * Segment 4 - Time from Driver WQ put is done 192 + * to MSI-X ISR for Command cmpl. 193 + * Segment 5 - Time from MSI-X ISR for Command cmpl to 194 + * Command cmpl is passed to NVME Layer. 195 + * Segment 6 - Time from Command cmpl is passed to NVME 196 + * Layer to Driver receives a RSP op from NVME Layer. 197 + * Segment 7 - Time from Driver receives a RSP op from 198 + * NVME Layer to WQ put is done on TRSP FCP Status. 199 + * Segment 8 - Time from Driver WQ put is done on TRSP 200 + * FCP Status to MSI-X ISR for TRSP cmpl. 201 + * Segment 9 - Time from MSI-X ISR for TRSP cmpl to 202 + * TRSP cmpl is passed to NVME Layer. 203 + * Segment 10 - Time from FCP command received by 204 + * MSI-X ISR to command is completed on wire. 205 + * (Segments 1 thru 8) for READDATA / WRITEDATA 206 + * (Segments 1 thru 4) for READDATA_RSP 207 + */ 208 + seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd; 209 + seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1; 210 + seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) - 211 + seg1 - seg2; 212 + seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) - 213 + seg1 - seg2 - seg3; 214 + seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) - 215 + seg1 - seg2 - seg3 - seg4; 216 + 217 + /* For auto rsp commands seg6 thru seg10 will be 0 */ 218 + if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) { 219 + seg6 = (ctxp->ts_nvme_status - 220 + ctxp->ts_isr_cmd) - 221 + seg1 - seg2 - seg3 - seg4 - seg5; 222 + seg7 = (ctxp->ts_status_wqput - 223 + ctxp->ts_isr_cmd) - 224 + seg1 - seg2 - seg3 - 225 + seg4 - seg5 - seg6; 226 + seg8 = (ctxp->ts_isr_status - 227 + ctxp->ts_isr_cmd) - 228 + seg1 - seg2 - seg3 - seg4 - 229 + seg5 - seg6 - seg7; 230 + seg9 = (ctxp->ts_status_nvme - 231 + ctxp->ts_isr_cmd) - 232 + seg1 - seg2 - seg3 - seg4 - 233 + seg5 - seg6 - seg7 - seg8; 234 + seg10 = (ctxp->ts_isr_status - 235 + ctxp->ts_isr_cmd); 236 + } else { 237 + seg6 = 0; 238 + seg7 = 0; 239 + seg8 = 0; 240 + seg9 = 0; 241 + seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd); 242 + } 243 + 244 + phba->ktime_seg1_total += seg1; 245 + if (seg1 < phba->ktime_seg1_min) 246 + phba->ktime_seg1_min = seg1; 247 + else if (seg1 > phba->ktime_seg1_max) 248 + phba->ktime_seg1_max = seg1; 249 + 250 + phba->ktime_seg2_total += seg2; 251 + if (seg2 < phba->ktime_seg2_min) 252 + phba->ktime_seg2_min = seg2; 253 + else if (seg2 > phba->ktime_seg2_max) 254 + phba->ktime_seg2_max = seg2; 255 + 256 + phba->ktime_seg3_total += seg3; 257 + if (seg3 < phba->ktime_seg3_min) 258 + phba->ktime_seg3_min = seg3; 259 + else if (seg3 > phba->ktime_seg3_max) 260 + phba->ktime_seg3_max = seg3; 261 + 262 + phba->ktime_seg4_total += seg4; 263 + if (seg4 < phba->ktime_seg4_min) 264 + phba->ktime_seg4_min = seg4; 265 + else if (seg4 > phba->ktime_seg4_max) 266 + phba->ktime_seg4_max = seg4; 267 + 268 + phba->ktime_seg5_total += seg5; 269 + if (seg5 < phba->ktime_seg5_min) 270 + phba->ktime_seg5_min = seg5; 271 + else if (seg5 > phba->ktime_seg5_max) 272 + phba->ktime_seg5_max = seg5; 273 + 274 + phba->ktime_data_samples++; 275 + if (!seg6) 276 + goto out; 277 + 278 + phba->ktime_seg6_total += seg6; 279 + if (seg6 < phba->ktime_seg6_min) 280 + phba->ktime_seg6_min = seg6; 281 + else if (seg6 > phba->ktime_seg6_max) 282 + phba->ktime_seg6_max = seg6; 283 + 284 + phba->ktime_seg7_total += seg7; 285 + if (seg7 < phba->ktime_seg7_min) 286 + phba->ktime_seg7_min = seg7; 287 + else if (seg7 > phba->ktime_seg7_max) 288 + phba->ktime_seg7_max = seg7; 289 + 290 + phba->ktime_seg8_total += seg8; 291 + if (seg8 < phba->ktime_seg8_min) 292 + phba->ktime_seg8_min = seg8; 293 + else if (seg8 > phba->ktime_seg8_max) 294 + phba->ktime_seg8_max = seg8; 295 + 296 + phba->ktime_seg9_total += seg9; 297 + if (seg9 < phba->ktime_seg9_min) 298 + phba->ktime_seg9_min = seg9; 299 + else if (seg9 > phba->ktime_seg9_max) 300 + phba->ktime_seg9_max = seg9; 301 + out: 302 + phba->ktime_seg10_total += seg10; 303 + if (seg10 < phba->ktime_seg10_min) 304 + phba->ktime_seg10_min = seg10; 305 + else if (seg10 > phba->ktime_seg10_max) 306 + phba->ktime_seg10_max = seg10; 307 + phba->ktime_status_samples++; 308 + } 309 + #endif 310 + 151 311 /** 152 312 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response 153 313 * @phba: Pointer to HBA context object. ··· 330 162 struct nvmefc_tgt_fcp_req *rsp; 331 163 struct lpfc_nvmet_rcv_ctx *ctxp; 332 164 uint32_t status, result, op, start_clean; 165 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 166 + uint32_t id; 167 + #endif 333 168 334 169 ctxp = cmdwqe->context2; 335 170 rsp = &ctxp->ctx.fcp_req; ··· 344 173 345 174 if (!phba->targetport) 346 175 goto out; 176 + 177 + lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n", 178 + ctxp->oxid, op, status); 347 179 348 180 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 349 181 if (status) { ··· 368 194 /* Sanity check */ 369 195 ctxp->state = LPFC_NVMET_STE_DONE; 370 196 ctxp->entry_cnt++; 197 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 198 + if (phba->ktime_on) { 199 + if (rsp->op == NVMET_FCOP_READDATA_RSP) { 200 + ctxp->ts_isr_data = 201 + cmdwqe->isr_timestamp; 202 + ctxp->ts_data_nvme = 203 + ktime_get_ns(); 204 + ctxp->ts_nvme_status = 205 + ctxp->ts_data_nvme; 206 + ctxp->ts_status_wqput = 207 + ctxp->ts_data_nvme; 208 + ctxp->ts_isr_status = 209 + ctxp->ts_data_nvme; 210 + ctxp->ts_status_nvme = 211 + ctxp->ts_data_nvme; 212 + } else { 213 + ctxp->ts_isr_status = 214 + cmdwqe->isr_timestamp; 215 + ctxp->ts_status_nvme = 216 + ktime_get_ns(); 217 + } 218 + } 219 + if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 220 + id = smp_processor_id(); 221 + if (ctxp->cpu != id) 222 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 223 + "6703 CPU Check cmpl: " 224 + "cpu %d expect %d\n", 225 + id, ctxp->cpu); 226 + if (ctxp->cpu < LPFC_CHECK_CPU_CNT) 227 + phba->cpucheck_cmpl_io[id]++; 228 + } 229 + #endif 371 230 rsp->done(rsp); 231 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 232 + if (phba->ktime_on) 233 + lpfc_nvmet_ktime(phba, ctxp); 234 + #endif 372 235 /* Let Abort cmpl repost the context */ 373 236 if (!(ctxp->flag & LPFC_NVMET_ABORT_OP)) 374 237 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); ··· 414 203 start_clean = offsetof(struct lpfc_iocbq, wqe); 415 204 memset(((char *)cmdwqe) + start_clean, 0, 416 205 (sizeof(struct lpfc_iocbq) - start_clean)); 206 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 207 + if (phba->ktime_on) { 208 + ctxp->ts_isr_data = cmdwqe->isr_timestamp; 209 + ctxp->ts_data_nvme = ktime_get_ns(); 210 + } 211 + if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 212 + id = smp_processor_id(); 213 + if (ctxp->cpu != id) 214 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 215 + "6704 CPU Check cmdcmpl: " 216 + "cpu %d expect %d\n", 217 + id, ctxp->cpu); 218 + if (ctxp->cpu < LPFC_CHECK_CPU_CNT) 219 + phba->cpucheck_ccmpl_io[id]++; 220 + } 221 + #endif 417 222 rsp->done(rsp); 418 223 } 419 224 } ··· 481 254 nvmewqeq->iocb_cmpl = NULL; 482 255 nvmewqeq->context2 = ctxp; 483 256 257 + lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n", 258 + ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen); 259 + 484 260 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq); 485 261 if (rc == WQE_SUCCESS) { 486 262 /* ··· 518 288 struct lpfc_hba *phba = ctxp->phba; 519 289 struct lpfc_iocbq *nvmewqeq; 520 290 unsigned long iflags; 521 - int rc; 291 + int rc, id; 292 + 293 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 294 + if (phba->ktime_on) { 295 + if (rsp->op == NVMET_FCOP_RSP) 296 + ctxp->ts_nvme_status = ktime_get_ns(); 297 + else 298 + ctxp->ts_nvme_data = ktime_get_ns(); 299 + } 300 + if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 301 + id = smp_processor_id(); 302 + ctxp->cpu = id; 303 + if (id < LPFC_CHECK_CPU_CNT) 304 + phba->cpucheck_xmt_io[id]++; 305 + if (rsp->hwqid != id) { 306 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 307 + "6705 CPU Check OP: " 308 + "cpu %d expect %d\n", 309 + id, rsp->hwqid); 310 + ctxp->cpu = rsp->hwqid; 311 + } 312 + } 313 + #endif 522 314 523 315 if (rsp->op == NVMET_FCOP_ABORT) { 524 316 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 525 317 "6103 Abort op: oxri x%x %d cnt %d\n", 526 318 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 319 + 320 + lpfc_nvmeio_data(phba, "NVMET FCP ABRT: " 321 + "xri x%x state x%x cnt x%x\n", 322 + ctxp->oxid, ctxp->state, ctxp->entry_cnt); 323 + 527 324 atomic_inc(&lpfc_nvmep->xmt_fcp_abort); 528 325 ctxp->entry_cnt++; 529 326 ctxp->flag |= LPFC_NVMET_ABORT_OP; ··· 587 330 nvmewqeq->iocb_flag |= LPFC_IO_NVMET; 588 331 ctxp->wqeq->hba_wqidx = rsp->hwqid; 589 332 333 + lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", 334 + ctxp->oxid, rsp->op, rsp->rsplen); 335 + 590 336 /* For now we take hbalock */ 591 337 spin_lock_irqsave(&phba->hbalock, iflags); 592 338 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); 593 339 spin_unlock_irqrestore(&phba->hbalock, iflags); 594 340 if (rc == WQE_SUCCESS) { 595 341 ctxp->flag |= LPFC_NVMET_IO_INP; 342 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 343 + if (!phba->ktime_on) 344 + return 0; 345 + if (rsp->op == NVMET_FCOP_RSP) 346 + ctxp->ts_status_wqput = ktime_get_ns(); 347 + else 348 + ctxp->ts_data_wqput = ktime_get_ns(); 349 + #endif 596 350 return 0; 597 351 } 598 352 ··· 771 503 if (!nvmebuf || !phba->targetport) { 772 504 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 773 505 "6154 LS Drop IO\n"); 506 + oxid = 0; 507 + size = 0; 508 + sid = 0; 774 509 goto dropit; 775 510 } 776 511 ··· 791 520 "6155 LS Drop IO x%x: Alloc\n", 792 521 oxid); 793 522 dropit: 523 + lpfc_nvmeio_data(phba, "NVMET LS DROP: " 524 + "xri x%x sz %d from %06x\n", 525 + oxid, size, sid); 794 526 if (nvmebuf) 795 527 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 796 528 return; ··· 805 531 ctxp->wqeq = NULL; 806 532 ctxp->state = LPFC_NVMET_STE_RCV; 807 533 ctxp->rqb_buffer = (void *)nvmebuf; 534 + 535 + lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", 536 + oxid, size, sid); 808 537 /* 809 538 * The calling sequence should be: 810 539 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done ··· 822 545 "%08x %08x %08x\n", __func__, ctxp, size, rc, 823 546 *payload, *(payload+1), *(payload+2), 824 547 *(payload+3), *(payload+4), *(payload+5)); 548 + 825 549 if (rc == 0) { 826 550 atomic_inc(&tgtp->rcv_ls_req_out); 827 551 return; 828 552 } 553 + 554 + lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n", 555 + oxid, size, sid); 556 + 829 557 atomic_inc(&tgtp->rcv_ls_req_drop); 830 558 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 831 559 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n", ··· 868 586 struct fc_frame_header *fc_hdr; 869 587 uint32_t *payload; 870 588 uint32_t size, oxid, sid, rc; 589 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 590 + uint32_t id; 591 + #endif 871 592 872 - oxid = 0; 873 593 if (!nvmebuf || !phba->targetport) { 874 594 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 875 595 "6157 FCP Drop IO\n"); 596 + oxid = 0; 597 + size = 0; 598 + sid = 0; 876 599 goto dropit; 877 600 } 878 601 ··· 912 625 ctxp->entry_cnt = 1; 913 626 ctxp->flag = 0; 914 627 628 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 629 + if (phba->ktime_on) { 630 + ctxp->ts_isr_cmd = isr_timestamp; 631 + ctxp->ts_cmd_nvme = ktime_get_ns(); 632 + ctxp->ts_nvme_data = 0; 633 + ctxp->ts_data_wqput = 0; 634 + ctxp->ts_isr_data = 0; 635 + ctxp->ts_data_nvme = 0; 636 + ctxp->ts_nvme_status = 0; 637 + ctxp->ts_status_wqput = 0; 638 + ctxp->ts_isr_status = 0; 639 + ctxp->ts_status_nvme = 0; 640 + } 641 + 642 + if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { 643 + id = smp_processor_id(); 644 + if (id < LPFC_CHECK_CPU_CNT) 645 + phba->cpucheck_rcv_io[id]++; 646 + } 647 + #endif 648 + 649 + lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d from %06x\n", 650 + oxid, size, sid); 651 + 915 652 atomic_inc(&tgtp->rcv_fcp_cmd_in); 916 653 /* 917 654 * The calling sequence should be: ··· 956 645 "6159 FCP Drop IO x%x: nvmet_fc_rcv_fcp_req x%x\n", 957 646 ctxp->oxid, rc); 958 647 dropit: 648 + lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", 649 + oxid, size, sid); 959 650 if (oxid) { 960 651 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); 961 652 return;
+13
drivers/scsi/lpfc/lpfc_nvmet.h
··· 98 98 #define LPFC_NVMET_IO_INP 1 99 99 #define LPFC_NVMET_ABORT_OP 2 100 100 struct rqb_dmabuf *rqb_buffer; 101 + 102 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 103 + uint64_t ts_isr_cmd; 104 + uint64_t ts_cmd_nvme; 105 + uint64_t ts_nvme_data; 106 + uint64_t ts_data_wqput; 107 + uint64_t ts_isr_data; 108 + uint64_t ts_data_nvme; 109 + uint64_t ts_nvme_status; 110 + uint64_t ts_status_wqput; 111 + uint64_t ts_isr_status; 112 + uint64_t ts_status_nvme; 113 + #endif 101 114 };