Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

octeon_ep: support Octeon CN10K devices

Add PCI Endpoint NIC support for Octeon CN10K devices.
CN10K devices are part of Octeon 10 family products with
similar PCI NIC characteristics. These include:
- CN10KA
- CNF10KA
- CNF10KB
- CN10KB

Update supported device list in Documentation

Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
Link: https://lore.kernel.org/r/20231117103817.2468176-1-srasheed@marvell.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

Shinas Rasheed and committed by
Paolo Abeni
0807dc76 31c54867

+1318 -1
+4
Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst
··· 24 24 Currently, this driver support following devices: 25 25 * Network controller: Cavium, Inc. Device b200 26 26 * Network controller: Cavium, Inc. Device b400 27 + * Network controller: Cavium, Inc. Device b900 28 + * Network controller: Cavium, Inc. Device ba00 29 + * Network controller: Cavium, Inc. Device bc00 30 + * Network controller: Cavium, Inc. Device bd00 27 31 28 32 Interface Control 29 33 =================
+2 -1
drivers/net/ethernet/marvell/octeon_ep/Makefile
··· 6 6 obj-$(CONFIG_OCTEON_EP) += octeon_ep.o 7 7 8 8 octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \ 9 - octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o 9 + octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o \ 10 + octep_cnxk_pf.o
+886
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/pci.h> 9 + #include <linux/netdevice.h> 10 + #include <linux/etherdevice.h> 11 + 12 + #include "octep_config.h" 13 + #include "octep_main.h" 14 + #include "octep_regs_cnxk_pf.h" 15 + 16 + /* We will support 128 pf's in control mbox */ 17 + #define CTRL_MBOX_MAX_PF 128 18 + #define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF)) 19 + 20 + /* Names of Hardware non-queue generic interrupts */ 21 + static char *cnxk_non_ioq_msix_names[] = { 22 + "epf_ire_rint", 23 + "epf_ore_rint", 24 + "epf_vfire_rint", 25 + "epf_rsvd0", 26 + "epf_vfore_rint", 27 + "epf_rsvd1", 28 + "epf_mbox_rint", 29 + "epf_rsvd2_0", 30 + "epf_rsvd2_1", 31 + "epf_dma_rint", 32 + "epf_dma_vf_rint", 33 + "epf_rsvd3", 34 + "epf_pp_vf_rint", 35 + "epf_rsvd3", 36 + "epf_misc_rint", 37 + "epf_rsvd5", 38 + /* Next 16 are for OEI_RINT */ 39 + "epf_oei_rint0", 40 + "epf_oei_rint1", 41 + "epf_oei_rint2", 42 + "epf_oei_rint3", 43 + "epf_oei_rint4", 44 + "epf_oei_rint5", 45 + "epf_oei_rint6", 46 + "epf_oei_rint7", 47 + "epf_oei_rint8", 48 + "epf_oei_rint9", 49 + "epf_oei_rint10", 50 + "epf_oei_rint11", 51 + "epf_oei_rint12", 52 + "epf_oei_rint13", 53 + "epf_oei_rint14", 54 + "epf_oei_rint15", 55 + /* IOQ interrupt */ 56 + "octeon_ep" 57 + }; 58 + 59 + /* Dump useful hardware CSRs for debug purpose */ 60 + static void cnxk_dump_regs(struct octep_device *oct, int qno) 61 + { 62 + struct device *dev = &oct->pdev->dev; 63 + 64 + dev_info(dev, "IQ-%d register dump\n", qno); 65 + dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n", 66 + qno, CNXK_SDP_R_IN_INSTR_DBELL(qno), 67 + octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(qno))); 68 + dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n", 69 + qno, CNXK_SDP_R_IN_CONTROL(qno), 70 + octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(qno))); 71 + dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n", 72 + qno, CNXK_SDP_R_IN_ENABLE(qno), 73 + octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(qno))); 74 + dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n", 75 + qno, CNXK_SDP_R_IN_INSTR_BADDR(qno), 76 + octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(qno))); 77 + dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n", 78 + qno, CNXK_SDP_R_IN_INSTR_RSIZE(qno), 79 + octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(qno))); 80 + dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n", 81 + qno, CNXK_SDP_R_IN_CNTS(qno), 82 + octep_read_csr64(oct, CNXK_SDP_R_IN_CNTS(qno))); 83 + dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n", 84 + qno, CNXK_SDP_R_IN_INT_LEVELS(qno), 85 + octep_read_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(qno))); 86 + dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n", 87 + qno, CNXK_SDP_R_IN_PKT_CNT(qno), 88 + octep_read_csr64(oct, CNXK_SDP_R_IN_PKT_CNT(qno))); 89 + dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n", 90 + qno, CNXK_SDP_R_IN_BYTE_CNT(qno), 91 + octep_read_csr64(oct, CNXK_SDP_R_IN_BYTE_CNT(qno))); 92 + 93 + dev_info(dev, "OQ-%d register dump\n", qno); 94 + dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n", 95 + qno, CNXK_SDP_R_OUT_SLIST_DBELL(qno), 96 + octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(qno))); 97 + dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n", 98 + qno, CNXK_SDP_R_OUT_CONTROL(qno), 99 + octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(qno))); 100 + dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n", 101 + qno, CNXK_SDP_R_OUT_ENABLE(qno), 102 + octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(qno))); 103 + dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n", 104 + qno, CNXK_SDP_R_OUT_SLIST_BADDR(qno), 105 + octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(qno))); 106 + dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n", 107 + qno, CNXK_SDP_R_OUT_SLIST_RSIZE(qno), 108 + octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(qno))); 109 + dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n", 110 + qno, CNXK_SDP_R_OUT_CNTS(qno), 111 + octep_read_csr64(oct, CNXK_SDP_R_OUT_CNTS(qno))); 112 + dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n", 113 + qno, CNXK_SDP_R_OUT_INT_LEVELS(qno), 114 + octep_read_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(qno))); 115 + dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n", 116 + qno, CNXK_SDP_R_OUT_PKT_CNT(qno), 117 + octep_read_csr64(oct, CNXK_SDP_R_OUT_PKT_CNT(qno))); 118 + dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n", 119 + qno, CNXK_SDP_R_OUT_BYTE_CNT(qno), 120 + octep_read_csr64(oct, CNXK_SDP_R_OUT_BYTE_CNT(qno))); 121 + dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n", 122 + qno, CNXK_SDP_R_ERR_TYPE(qno), 123 + octep_read_csr64(oct, CNXK_SDP_R_ERR_TYPE(qno))); 124 + } 125 + 126 + /* Reset Hardware Tx queue */ 127 + static int cnxk_reset_iq(struct octep_device *oct, int q_no) 128 + { 129 + struct octep_config *conf = oct->conf; 130 + u64 val = 0ULL; 131 + 132 + dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no); 133 + 134 + /* Get absolute queue number */ 135 + q_no += conf->pf_ring_cfg.srn; 136 + 137 + /* Disable the Tx/Instruction Ring */ 138 + octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(q_no), val); 139 + 140 + /* clear the Instruction Ring packet/byte counts and doorbell CSRs */ 141 + octep_write_csr64(oct, CNXK_SDP_R_IN_CNTS(q_no), val); 142 + octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(q_no), val); 143 + octep_write_csr64(oct, CNXK_SDP_R_IN_PKT_CNT(q_no), val); 144 + octep_write_csr64(oct, CNXK_SDP_R_IN_BYTE_CNT(q_no), val); 145 + octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(q_no), val); 146 + octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(q_no), val); 147 + 148 + val = 0xFFFFFFFF; 149 + octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(q_no), val); 150 + 151 + return 0; 152 + } 153 + 154 + /* Reset Hardware Rx queue */ 155 + static void cnxk_reset_oq(struct octep_device *oct, int q_no) 156 + { 157 + u64 val = 0ULL; 158 + 159 + q_no += CFG_GET_PORTS_PF_SRN(oct->conf); 160 + 161 + /* Disable Output (Rx) Ring */ 162 + octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(q_no), val); 163 + octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(q_no), val); 164 + octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(q_no), val); 165 + octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(q_no), val); 166 + 167 + /* Clear count CSRs */ 168 + val = octep_read_csr(oct, CNXK_SDP_R_OUT_CNTS(q_no)); 169 + octep_write_csr(oct, CNXK_SDP_R_OUT_CNTS(q_no), val); 170 + 171 + octep_write_csr64(oct, CNXK_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL); 172 + octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF); 173 + } 174 + 175 + /* Reset all hardware Tx/Rx queues */ 176 + static void octep_reset_io_queues_cnxk_pf(struct octep_device *oct) 177 + { 178 + struct pci_dev *pdev = oct->pdev; 179 + int q; 180 + 181 + dev_dbg(&pdev->dev, "Reset OCTEP_CNXK PF IO Queues\n"); 182 + 183 + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 184 + cnxk_reset_iq(oct, q); 185 + cnxk_reset_oq(oct, q); 186 + } 187 + } 188 + 189 + /* Initialize windowed addresses to access some hardware registers */ 190 + static void octep_setup_pci_window_regs_cnxk_pf(struct octep_device *oct) 191 + { 192 + u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr; 193 + 194 + oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_WR_ADDR64); 195 + oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_RD_ADDR64); 196 + oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_WR_DATA64); 197 + oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_RD_DATA64); 198 + } 199 + 200 + /* Configure Hardware mapping: inform hardware which rings belong to PF. */ 201 + static void octep_configure_ring_mapping_cnxk_pf(struct octep_device *oct) 202 + { 203 + struct octep_config *conf = oct->conf; 204 + struct pci_dev *pdev = oct->pdev; 205 + u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf); 206 + int q; 207 + 208 + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) { 209 + u64 regval = 0; 210 + 211 + if (oct->pcie_port) 212 + regval = 8 << CNXK_SDP_FUNC_SEL_EPF_BIT_POS; 213 + 214 + octep_write_csr64(oct, CNXK_SDP_EPVF_RING(pf_srn + q), regval); 215 + 216 + regval = octep_read_csr64(oct, CNXK_SDP_EPVF_RING(pf_srn + q)); 217 + dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n", 218 + CNXK_SDP_EPVF_RING(pf_srn + q), regval); 219 + } 220 + } 221 + 222 + /* Initialize configuration limits and initial active config */ 223 + static void octep_init_config_cnxk_pf(struct octep_device *oct) 224 + { 225 + struct octep_config *conf = oct->conf; 226 + struct pci_dev *pdev = oct->pdev; 227 + u8 link = 0; 228 + u64 val; 229 + int pos; 230 + 231 + /* Read ring configuration: 232 + * PF ring count, number of VFs and rings per VF supported 233 + */ 234 + val = octep_read_csr64(oct, CNXK_SDP_EPF_RINFO); 235 + dev_info(&pdev->dev, "SDP_EPF_RINFO[0x%x]:0x%llx\n", CNXK_SDP_EPF_RINFO, val); 236 + conf->sriov_cfg.max_rings_per_vf = CNXK_SDP_EPF_RINFO_RPVF(val); 237 + conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf; 238 + conf->sriov_cfg.max_vfs = CNXK_SDP_EPF_RINFO_NVFS(val); 239 + conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs; 240 + conf->sriov_cfg.vf_srn = CNXK_SDP_EPF_RINFO_SRN(val); 241 + 242 + val = octep_read_csr64(oct, CNXK_SDP_MAC_PF_RING_CTL(oct->pcie_port)); 243 + dev_info(&pdev->dev, "SDP_MAC_PF_RING_CTL[%d]:0x%llx\n", oct->pcie_port, val); 244 + conf->pf_ring_cfg.srn = CNXK_SDP_MAC_PF_RING_CTL_SRN(val); 245 + conf->pf_ring_cfg.max_io_rings = CNXK_SDP_MAC_PF_RING_CTL_RPPF(val); 246 + conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings; 247 + dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n", 248 + conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf, 249 + conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings); 250 + 251 + conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS; 252 + conf->iq.instr_type = OCTEP_64BYTE_INSTR; 253 + conf->iq.db_min = OCTEP_DB_MIN; 254 + conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD; 255 + 256 + conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS; 257 + conf->oq.buf_size = OCTEP_OQ_BUF_SIZE; 258 + conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD; 259 + conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD; 260 + conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD; 261 + 262 + conf->msix_cfg.non_ioq_msix = CNXK_NUM_NON_IOQ_INTR; 263 + conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings; 264 + conf->msix_cfg.non_ioq_msix_names = cnxk_non_ioq_msix_names; 265 + 266 + pos = pci_find_ext_capability(oct->pdev, PCI_EXT_CAP_ID_SRIOV); 267 + if (pos) { 268 + pci_read_config_byte(oct->pdev, 269 + pos + PCI_SRIOV_FUNC_LINK, 270 + &link); 271 + link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link); 272 + } 273 + conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr + 274 + CNXK_PEM_BAR4_INDEX_OFFSET + 275 + (link * CTRL_MBOX_SZ); 276 + 277 + conf->fw_info.hb_interval = OCTEP_DEFAULT_FW_HB_INTERVAL; 278 + conf->fw_info.hb_miss_count = OCTEP_DEFAULT_FW_HB_MISS_COUNT; 279 + } 280 + 281 + /* Setup registers for a hardware Tx Queue */ 282 + static void octep_setup_iq_regs_cnxk_pf(struct octep_device *oct, int iq_no) 283 + { 284 + struct octep_iq *iq = oct->iq[iq_no]; 285 + u32 reset_instr_cnt; 286 + u64 reg_val; 287 + 288 + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 289 + reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no)); 290 + 291 + /* wait for IDLE to set to 1 */ 292 + if (!(reg_val & CNXK_R_IN_CTL_IDLE)) { 293 + do { 294 + reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no)); 295 + } while (!(reg_val & CNXK_R_IN_CTL_IDLE)); 296 + } 297 + 298 + reg_val |= CNXK_R_IN_CTL_RDSIZE; 299 + reg_val |= CNXK_R_IN_CTL_IS_64B; 300 + reg_val |= CNXK_R_IN_CTL_ESR; 301 + octep_write_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no), reg_val); 302 + 303 + /* Write the start of the input queue's ring and its size */ 304 + octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(iq_no), 305 + iq->desc_ring_dma); 306 + octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(iq_no), 307 + iq->max_count); 308 + 309 + /* Remember the doorbell & instruction count register addr 310 + * for this queue 311 + */ 312 + iq->doorbell_reg = oct->mmio[0].hw_addr + 313 + CNXK_SDP_R_IN_INSTR_DBELL(iq_no); 314 + iq->inst_cnt_reg = oct->mmio[0].hw_addr + 315 + CNXK_SDP_R_IN_CNTS(iq_no); 316 + iq->intr_lvl_reg = oct->mmio[0].hw_addr + 317 + CNXK_SDP_R_IN_INT_LEVELS(iq_no); 318 + 319 + /* Store the current instruction counter (used in flush_iq calculation) */ 320 + reset_instr_cnt = readl(iq->inst_cnt_reg); 321 + writel(reset_instr_cnt, iq->inst_cnt_reg); 322 + 323 + /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */ 324 + reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff; 325 + octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(iq_no), reg_val); 326 + } 327 + 328 + /* Setup registers for a hardware Rx Queue */ 329 + static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no) 330 + { 331 + u64 reg_val; 332 + u64 oq_ctl = 0ULL; 333 + u32 time_threshold = 0; 334 + struct octep_oq *oq = oct->oq[oq_no]; 335 + 336 + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 337 + reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no)); 338 + 339 + /* wait for IDLE to set to 1 */ 340 + if (!(reg_val & CNXK_R_OUT_CTL_IDLE)) { 341 + do { 342 + reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no)); 343 + } while (!(reg_val & CNXK_R_OUT_CTL_IDLE)); 344 + } 345 + 346 + reg_val &= ~(CNXK_R_OUT_CTL_IMODE); 347 + reg_val &= ~(CNXK_R_OUT_CTL_ROR_P); 348 + reg_val &= ~(CNXK_R_OUT_CTL_NSR_P); 349 + reg_val &= ~(CNXK_R_OUT_CTL_ROR_I); 350 + reg_val &= ~(CNXK_R_OUT_CTL_NSR_I); 351 + reg_val &= ~(CNXK_R_OUT_CTL_ES_I); 352 + reg_val &= ~(CNXK_R_OUT_CTL_ROR_D); 353 + reg_val &= ~(CNXK_R_OUT_CTL_NSR_D); 354 + reg_val &= ~(CNXK_R_OUT_CTL_ES_D); 355 + reg_val |= (CNXK_R_OUT_CTL_ES_P); 356 + 357 + octep_write_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no), reg_val); 358 + octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(oq_no), 359 + oq->desc_ring_dma); 360 + octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(oq_no), 361 + oq->max_count); 362 + 363 + oq_ctl = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no)); 364 + 365 + /* Clear the ISIZE and BSIZE (22-0) */ 366 + oq_ctl &= ~0x7fffffULL; 367 + 368 + /* Populate the BSIZE (15-0) */ 369 + oq_ctl |= (oq->buffer_size & 0xffff); 370 + octep_write_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no), oq_ctl); 371 + 372 + /* Get the mapped address of the pkt_sent and pkts_credit regs */ 373 + oq->pkts_sent_reg = oct->mmio[0].hw_addr + CNXK_SDP_R_OUT_CNTS(oq_no); 374 + oq->pkts_credit_reg = oct->mmio[0].hw_addr + 375 + CNXK_SDP_R_OUT_SLIST_DBELL(oq_no); 376 + 377 + time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf); 378 + reg_val = ((u64)time_threshold << 32) | 379 + CFG_GET_OQ_INTR_PKT(oct->conf); 380 + octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); 381 + } 382 + 383 + /* Setup registers for a PF mailbox */ 384 + static void octep_setup_mbox_regs_cnxk_pf(struct octep_device *oct, int q_no) 385 + { 386 + struct octep_mbox *mbox = oct->mbox[q_no]; 387 + 388 + mbox->q_no = q_no; 389 + 390 + /* PF mbox interrupt reg */ 391 + mbox->mbox_int_reg = oct->mmio[0].hw_addr + CNXK_SDP_EPF_MBOX_RINT(0); 392 + 393 + /* PF to VF DATA reg. PF writes into this reg */ 394 + mbox->mbox_write_reg = oct->mmio[0].hw_addr + CNXK_SDP_R_MBOX_PF_VF_DATA(q_no); 395 + 396 + /* VF to PF DATA reg. PF reads from this reg */ 397 + mbox->mbox_read_reg = oct->mmio[0].hw_addr + CNXK_SDP_R_MBOX_VF_PF_DATA(q_no); 398 + } 399 + 400 + /* Poll OEI events like heartbeat */ 401 + static void octep_poll_oei_cnxk_pf(struct octep_device *oct) 402 + { 403 + u64 reg0; 404 + 405 + /* Check for OEI INTR */ 406 + reg0 = octep_read_csr64(oct, CNXK_SDP_EPF_OEI_RINT); 407 + if (reg0) { 408 + octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT, reg0); 409 + if (reg0 & CNXK_SDP_EPF_OEI_RINT_DATA_BIT_MBOX) 410 + queue_work(octep_wq, &oct->ctrl_mbox_task); 411 + if (reg0 & CNXK_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT) 412 + atomic_set(&oct->hb_miss_cnt, 0); 413 + } 414 + } 415 + 416 + /* OEI interrupt handler */ 417 + static irqreturn_t octep_oei_intr_handler_cnxk_pf(void *dev) 418 + { 419 + struct octep_device *oct = (struct octep_device *)dev; 420 + 421 + octep_poll_oei_cnxk_pf(oct); 422 + return IRQ_HANDLED; 423 + } 424 + 425 + /* Process non-ioq interrupts required to keep pf interface running. 426 + * OEI_RINT is needed for control mailbox 427 + * MBOX_RINT is needed for pfvf mailbox 428 + */ 429 + static void octep_poll_non_ioq_interrupts_cnxk_pf(struct octep_device *oct) 430 + { 431 + octep_poll_oei_cnxk_pf(oct); 432 + } 433 + 434 + /* Interrupt handler for input ring error interrupts. */ 435 + static irqreturn_t octep_ire_intr_handler_cnxk_pf(void *dev) 436 + { 437 + struct octep_device *oct = (struct octep_device *)dev; 438 + struct pci_dev *pdev = oct->pdev; 439 + u64 reg_val = 0; 440 + int i = 0; 441 + 442 + /* Check for IRERR INTR */ 443 + reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_IRERR_RINT); 444 + if (reg_val) { 445 + dev_info(&pdev->dev, 446 + "received IRERR_RINT intr: 0x%llx\n", reg_val); 447 + octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT, reg_val); 448 + 449 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 450 + reg_val = octep_read_csr64(oct, 451 + CNXK_SDP_R_ERR_TYPE(i)); 452 + if (reg_val) { 453 + dev_info(&pdev->dev, 454 + "Received err type on IQ-%d: 0x%llx\n", 455 + i, reg_val); 456 + octep_write_csr64(oct, CNXK_SDP_R_ERR_TYPE(i), 457 + reg_val); 458 + } 459 + } 460 + } 461 + return IRQ_HANDLED; 462 + } 463 + 464 + /* Interrupt handler for output ring error interrupts. */ 465 + static irqreturn_t octep_ore_intr_handler_cnxk_pf(void *dev) 466 + { 467 + struct octep_device *oct = (struct octep_device *)dev; 468 + struct pci_dev *pdev = oct->pdev; 469 + u64 reg_val = 0; 470 + int i = 0; 471 + 472 + /* Check for ORERR INTR */ 473 + reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_ORERR_RINT); 474 + if (reg_val) { 475 + dev_info(&pdev->dev, 476 + "Received ORERR_RINT intr: 0x%llx\n", reg_val); 477 + octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT, reg_val); 478 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 479 + reg_val = octep_read_csr64(oct, CNXK_SDP_R_ERR_TYPE(i)); 480 + if (reg_val) { 481 + dev_info(&pdev->dev, 482 + "Received err type on OQ-%d: 0x%llx\n", 483 + i, reg_val); 484 + octep_write_csr64(oct, CNXK_SDP_R_ERR_TYPE(i), 485 + reg_val); 486 + } 487 + } 488 + } 489 + return IRQ_HANDLED; 490 + } 491 + 492 + /* Interrupt handler for vf input ring error interrupts. */ 493 + static irqreturn_t octep_vfire_intr_handler_cnxk_pf(void *dev) 494 + { 495 + struct octep_device *oct = (struct octep_device *)dev; 496 + struct pci_dev *pdev = oct->pdev; 497 + u64 reg_val = 0; 498 + 499 + /* Check for VFIRE INTR */ 500 + reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT(0)); 501 + if (reg_val) { 502 + dev_info(&pdev->dev, 503 + "Received VFIRE_RINT intr: 0x%llx\n", reg_val); 504 + octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT(0), reg_val); 505 + } 506 + return IRQ_HANDLED; 507 + } 508 + 509 + /* Interrupt handler for vf output ring error interrupts. */ 510 + static irqreturn_t octep_vfore_intr_handler_cnxk_pf(void *dev) 511 + { 512 + struct octep_device *oct = (struct octep_device *)dev; 513 + struct pci_dev *pdev = oct->pdev; 514 + u64 reg_val = 0; 515 + 516 + /* Check for VFORE INTR */ 517 + reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_VFORE_RINT(0)); 518 + if (reg_val) { 519 + dev_info(&pdev->dev, 520 + "Received VFORE_RINT intr: 0x%llx\n", reg_val); 521 + octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT(0), reg_val); 522 + } 523 + return IRQ_HANDLED; 524 + } 525 + 526 + /* Interrupt handler for dpi dma related interrupts. */ 527 + static irqreturn_t octep_dma_intr_handler_cnxk_pf(void *dev) 528 + { 529 + struct octep_device *oct = (struct octep_device *)dev; 530 + u64 reg_val = 0; 531 + 532 + /* Check for DMA INTR */ 533 + reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_DMA_RINT); 534 + if (reg_val) 535 + octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT, reg_val); 536 + 537 + return IRQ_HANDLED; 538 + } 539 + 540 + /* Interrupt handler for dpi dma transaction error interrupts for VFs */ 541 + static irqreturn_t octep_dma_vf_intr_handler_cnxk_pf(void *dev) 542 + { 543 + struct octep_device *oct = (struct octep_device *)dev; 544 + struct pci_dev *pdev = oct->pdev; 545 + u64 reg_val = 0; 546 + 547 + /* Check for DMA VF INTR */ 548 + reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT(0)); 549 + if (reg_val) { 550 + dev_info(&pdev->dev, 551 + "Received DMA_VF_RINT intr: 0x%llx\n", reg_val); 552 + octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT(0), reg_val); 553 + } 554 + return IRQ_HANDLED; 555 + } 556 + 557 + /* Interrupt handler for pp transaction error interrupts for VFs */ 558 + static irqreturn_t octep_pp_vf_intr_handler_cnxk_pf(void *dev) 559 + { 560 + struct octep_device *oct = (struct octep_device *)dev; 561 + struct pci_dev *pdev = oct->pdev; 562 + u64 reg_val = 0; 563 + 564 + /* Check for PPVF INTR */ 565 + reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT(0)); 566 + if (reg_val) { 567 + dev_info(&pdev->dev, 568 + "Received PP_VF_RINT intr: 0x%llx\n", reg_val); 569 + octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT(0), reg_val); 570 + } 571 + return IRQ_HANDLED; 572 + } 573 + 574 + /* Interrupt handler for mac related interrupts. */ 575 + static irqreturn_t octep_misc_intr_handler_cnxk_pf(void *dev) 576 + { 577 + struct octep_device *oct = (struct octep_device *)dev; 578 + struct pci_dev *pdev = oct->pdev; 579 + u64 reg_val = 0; 580 + 581 + /* Check for MISC INTR */ 582 + reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_MISC_RINT); 583 + if (reg_val) { 584 + dev_info(&pdev->dev, 585 + "Received MISC_RINT intr: 0x%llx\n", reg_val); 586 + octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT, reg_val); 587 + } 588 + return IRQ_HANDLED; 589 + } 590 + 591 + /* Interrupts handler for all reserved interrupts. */ 592 + static irqreturn_t octep_rsvd_intr_handler_cnxk_pf(void *dev) 593 + { 594 + struct octep_device *oct = (struct octep_device *)dev; 595 + struct pci_dev *pdev = oct->pdev; 596 + 597 + dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n"); 598 + return IRQ_HANDLED; 599 + } 600 + 601 + /* Tx/Rx queue interrupt handler */ 602 + static irqreturn_t octep_ioq_intr_handler_cnxk_pf(void *data) 603 + { 604 + struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data; 605 + struct octep_oq *oq = vector->oq; 606 + 607 + napi_schedule_irqoff(oq->napi); 608 + return IRQ_HANDLED; 609 + } 610 + 611 + /* soft reset */ 612 + static int octep_soft_reset_cnxk_pf(struct octep_device *oct) 613 + { 614 + dev_info(&oct->pdev->dev, "CNXKXX: Doing soft reset\n"); 615 + 616 + octep_write_csr64(oct, CNXK_SDP_WIN_WR_MASK_REG, 0xFF); 617 + 618 + /* Firmware status CSR is supposed to be cleared by 619 + * core domain reset, but due to a hw bug, it is not. 620 + * Set it to RUNNING right before reset so that it is not 621 + * left in READY (1) state after a reset. This is required 622 + * in addition to the early setting to handle the case where 623 + * the OcteonTX is unexpectedly reset, reboots, and then 624 + * the module is removed. 625 + */ 626 + OCTEP_PCI_WIN_WRITE(oct, CNXK_PEMX_PFX_CSX_PFCFGX(0, 0, CNXK_PCIEEP_VSECST_CTL), 627 + FW_STATUS_RUNNING); 628 + 629 + /* Set chip domain reset bit */ 630 + OCTEP_PCI_WIN_WRITE(oct, CNXK_RST_CHIP_DOMAIN_W1S, 1); 631 + /* Wait till Octeon resets. */ 632 + mdelay(10); 633 + /* restore the reset value */ 634 + octep_write_csr64(oct, CNXK_SDP_WIN_WR_MASK_REG, 0xFF); 635 + 636 + return 0; 637 + } 638 + 639 + /* Re-initialize Octeon hardware registers */ 640 + static void octep_reinit_regs_cnxk_pf(struct octep_device *oct) 641 + { 642 + u32 i; 643 + 644 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 645 + oct->hw_ops.setup_iq_regs(oct, i); 646 + 647 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 648 + oct->hw_ops.setup_oq_regs(oct, i); 649 + 650 + oct->hw_ops.enable_interrupts(oct); 651 + oct->hw_ops.enable_io_queues(oct); 652 + 653 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 654 + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); 655 + } 656 + 657 + /* Enable all interrupts */ 658 + static void octep_enable_interrupts_cnxk_pf(struct octep_device *oct) 659 + { 660 + u64 intr_mask = 0ULL; 661 + int srn, num_rings, i; 662 + 663 + srn = CFG_GET_PORTS_PF_SRN(oct->conf); 664 + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 665 + 666 + for (i = 0; i < num_rings; i++) 667 + intr_mask |= (0x1ULL << (srn + i)); 668 + 669 + octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask); 670 + octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask); 671 + octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL); 672 + 673 + octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S(0), -1ULL); 674 + octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT_ENA_W1S(0), -1ULL); 675 + 676 + octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask); 677 + octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask); 678 + 679 + octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S(0), -1ULL); 680 + octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S(0), -1ULL); 681 + } 682 + 683 + /* Disable all interrupts */ 684 + static void octep_disable_interrupts_cnxk_pf(struct octep_device *oct) 685 + { 686 + u64 intr_mask = 0ULL; 687 + int srn, num_rings, i; 688 + 689 + srn = CFG_GET_PORTS_PF_SRN(oct->conf); 690 + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 691 + 692 + for (i = 0; i < num_rings; i++) 693 + intr_mask |= (0x1ULL << (srn + i)); 694 + 695 + octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask); 696 + octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask); 697 + octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL); 698 + 699 + octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C(0), -1ULL); 700 + octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT_ENA_W1C(0), -1ULL); 701 + 702 + octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask); 703 + octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask); 704 + 705 + octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C(0), -1ULL); 706 + octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C(0), -1ULL); 707 + } 708 + 709 + /* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ 710 + static u32 octep_update_iq_read_index_cnxk_pf(struct octep_iq *iq) 711 + { 712 + u32 pkt_in_done = readl(iq->inst_cnt_reg); 713 + u32 last_done, new_idx; 714 + 715 + last_done = pkt_in_done - iq->pkt_in_done; 716 + iq->pkt_in_done = pkt_in_done; 717 + 718 + new_idx = (iq->octep_read_index + last_done) % iq->max_count; 719 + 720 + return new_idx; 721 + } 722 + 723 + /* Enable a hardware Tx Queue */ 724 + static void octep_enable_iq_cnxk_pf(struct octep_device *oct, int iq_no) 725 + { 726 + u64 loop = HZ; 727 + u64 reg_val; 728 + 729 + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 730 + 731 + octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF); 732 + 733 + while (octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(iq_no)) && 734 + loop--) { 735 + schedule_timeout_interruptible(1); 736 + } 737 + 738 + reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(iq_no)); 739 + reg_val |= (0x1ULL << 62); 740 + octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(iq_no), reg_val); 741 + 742 + reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no)); 743 + reg_val |= 0x1ULL; 744 + octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no), reg_val); 745 + } 746 + 747 + /* Enable a hardware Rx Queue */ 748 + static void octep_enable_oq_cnxk_pf(struct octep_device *oct, int oq_no) 749 + { 750 + u64 reg_val = 0ULL; 751 + 752 + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 753 + 754 + reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(oq_no)); 755 + reg_val |= (0x1ULL << 62); 756 + octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); 757 + 758 + octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF); 759 + 760 + reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no)); 761 + reg_val |= 0x1ULL; 762 + octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no), reg_val); 763 + } 764 + 765 + /* Enable all hardware Tx/Rx Queues assined to PF */ 766 + static void octep_enable_io_queues_cnxk_pf(struct octep_device *oct) 767 + { 768 + u8 q; 769 + 770 + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 771 + octep_enable_iq_cnxk_pf(oct, q); 772 + octep_enable_oq_cnxk_pf(oct, q); 773 + } 774 + } 775 + 776 + /* Disable a hardware Tx Queue assined to PF */ 777 + static void octep_disable_iq_cnxk_pf(struct octep_device *oct, int iq_no) 778 + { 779 + u64 reg_val = 0ULL; 780 + 781 + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 782 + 783 + reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no)); 784 + reg_val &= ~0x1ULL; 785 + octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no), reg_val); 786 + } 787 + 788 + /* Disable a hardware Rx Queue assined to PF */ 789 + static void octep_disable_oq_cnxk_pf(struct octep_device *oct, int oq_no) 790 + { 791 + u64 reg_val = 0ULL; 792 + 793 + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 794 + reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no)); 795 + reg_val &= ~0x1ULL; 796 + octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no), reg_val); 797 + } 798 + 799 + /* Disable all hardware Tx/Rx Queues assined to PF */ 800 + static void octep_disable_io_queues_cnxk_pf(struct octep_device *oct) 801 + { 802 + int q = 0; 803 + 804 + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 805 + octep_disable_iq_cnxk_pf(oct, q); 806 + octep_disable_oq_cnxk_pf(oct, q); 807 + } 808 + } 809 + 810 + /* Dump hardware registers (including Tx/Rx queues) for debugging. */ 811 + static void octep_dump_registers_cnxk_pf(struct octep_device *oct) 812 + { 813 + u8 srn, num_rings, q; 814 + 815 + srn = CFG_GET_PORTS_PF_SRN(oct->conf); 816 + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 817 + 818 + for (q = srn; q < srn + num_rings; q++) 819 + cnxk_dump_regs(oct, q); 820 + } 821 + 822 + /** 823 + * octep_device_setup_cnxk_pf() - Setup Octeon device. 824 + * 825 + * @oct: Octeon device private data structure. 826 + * 827 + * - initialize hardware operations. 828 + * - get target side pcie port number for the device. 829 + * - setup window access to hardware registers. 830 + * - set initial configuration and max limits. 831 + * - setup hardware mapping of rings to the PF device. 832 + */ 833 + void octep_device_setup_cnxk_pf(struct octep_device *oct) 834 + { 835 + oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cnxk_pf; 836 + oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cnxk_pf; 837 + oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cnxk_pf; 838 + 839 + oct->hw_ops.oei_intr_handler = octep_oei_intr_handler_cnxk_pf; 840 + oct->hw_ops.ire_intr_handler = octep_ire_intr_handler_cnxk_pf; 841 + oct->hw_ops.ore_intr_handler = octep_ore_intr_handler_cnxk_pf; 842 + oct->hw_ops.vfire_intr_handler = octep_vfire_intr_handler_cnxk_pf; 843 + oct->hw_ops.vfore_intr_handler = octep_vfore_intr_handler_cnxk_pf; 844 + oct->hw_ops.dma_intr_handler = octep_dma_intr_handler_cnxk_pf; 845 + oct->hw_ops.dma_vf_intr_handler = octep_dma_vf_intr_handler_cnxk_pf; 846 + oct->hw_ops.pp_vf_intr_handler = octep_pp_vf_intr_handler_cnxk_pf; 847 + oct->hw_ops.misc_intr_handler = octep_misc_intr_handler_cnxk_pf; 848 + oct->hw_ops.rsvd_intr_handler = octep_rsvd_intr_handler_cnxk_pf; 849 + oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cnxk_pf; 850 + oct->hw_ops.soft_reset = octep_soft_reset_cnxk_pf; 851 + oct->hw_ops.reinit_regs = octep_reinit_regs_cnxk_pf; 852 + 853 + oct->hw_ops.enable_interrupts = octep_enable_interrupts_cnxk_pf; 854 + oct->hw_ops.disable_interrupts = octep_disable_interrupts_cnxk_pf; 855 + oct->hw_ops.poll_non_ioq_interrupts = octep_poll_non_ioq_interrupts_cnxk_pf; 856 + 857 + oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cnxk_pf; 858 + 859 + oct->hw_ops.enable_iq = octep_enable_iq_cnxk_pf; 860 + oct->hw_ops.enable_oq = octep_enable_oq_cnxk_pf; 861 + oct->hw_ops.enable_io_queues = octep_enable_io_queues_cnxk_pf; 862 + 863 + oct->hw_ops.disable_iq = octep_disable_iq_cnxk_pf; 864 + oct->hw_ops.disable_oq = octep_disable_oq_cnxk_pf; 865 + oct->hw_ops.disable_io_queues = octep_disable_io_queues_cnxk_pf; 866 + oct->hw_ops.reset_io_queues = octep_reset_io_queues_cnxk_pf; 867 + 868 + oct->hw_ops.dump_registers = octep_dump_registers_cnxk_pf; 869 + 870 + octep_setup_pci_window_regs_cnxk_pf(oct); 871 + 872 + oct->pcie_port = octep_read_csr64(oct, CNXK_SDP_MAC_NUMBER) & 0xff; 873 + dev_info(&oct->pdev->dev, 874 + "Octeon device using PCIE Port %d\n", oct->pcie_port); 875 + 876 + octep_init_config_cnxk_pf(oct); 877 + octep_configure_ring_mapping_cnxk_pf(oct); 878 + 879 + /* Firmware status CSR is supposed to be cleared by 880 + * core domain reset, but due to IPBUPEM-38842, it is not. 881 + * Set it to RUNNING early in boot, so that unexpected resets 882 + * leave it in a state that is not READY (1). 883 + */ 884 + OCTEP_PCI_WIN_WRITE(oct, CNXK_PEMX_PFX_CSX_PFCFGX(0, 0, CNXK_PCIEEP_VSECST_CTL), 885 + FW_STATUS_RUNNING); 886 + }
+20
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
··· 24 24 static const struct pci_device_id octep_pci_id_tbl[] = { 25 25 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)}, 26 26 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_PF)}, 27 + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_PF)}, 28 + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_PF)}, 29 + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_PF)}, 30 + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_PF)}, 27 31 {0, }, 28 32 }; 29 33 MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl); ··· 1151 1147 return "CN93XX"; 1152 1148 case OCTEP_PCI_DEVICE_ID_CNF95N_PF: 1153 1149 return "CNF95N"; 1150 + case OCTEP_PCI_DEVICE_ID_CN10KA_PF: 1151 + return "CN10KA"; 1152 + case OCTEP_PCI_DEVICE_ID_CNF10KA_PF: 1153 + return "CNF10KA"; 1154 + case OCTEP_PCI_DEVICE_ID_CNF10KB_PF: 1155 + return "CNF10KB"; 1156 + case OCTEP_PCI_DEVICE_ID_CN10KB_PF: 1157 + return "CN10KB"; 1154 1158 default: 1155 1159 return "Unsupported"; 1156 1160 } ··· 1203 1191 octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct), 1204 1192 OCTEP_MINOR_REV(oct)); 1205 1193 octep_device_setup_cn93_pf(oct); 1194 + break; 1195 + case OCTEP_PCI_DEVICE_ID_CNF10KA_PF: 1196 + case OCTEP_PCI_DEVICE_ID_CN10KA_PF: 1197 + case OCTEP_PCI_DEVICE_ID_CNF10KB_PF: 1198 + case OCTEP_PCI_DEVICE_ID_CN10KB_PF: 1199 + dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n", 1200 + octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct)); 1201 + octep_device_setup_cnxk_pf(oct); 1206 1202 break; 1207 1203 default: 1208 1204 dev_err(&pdev->dev,
+6
drivers/net/ethernet/marvell/octeon_ep/octep_main.h
··· 23 23 24 24 #define OCTEP_PCI_DEVICE_ID_CNF95N_PF 0xB400 //95N PF 25 25 26 + #define OCTEP_PCI_DEVICE_ID_CN10KA_PF 0xB900 //CN10KA PF 27 + #define OCTEP_PCI_DEVICE_ID_CNF10KA_PF 0xBA00 //CNF10KA PF 28 + #define OCTEP_PCI_DEVICE_ID_CNF10KB_PF 0xBC00 //CNF10KB PF 29 + #define OCTEP_PCI_DEVICE_ID_CN10KB_PF 0xBD00 //CN10KB PF 30 + 26 31 #define OCTEP_MAX_QUEUES 63 27 32 #define OCTEP_MAX_IQ OCTEP_MAX_QUEUES 28 33 #define OCTEP_MAX_OQ OCTEP_MAX_QUEUES ··· 391 386 void octep_free_oqs(struct octep_device *oct); 392 387 void octep_oq_dbell_init(struct octep_device *oct); 393 388 void octep_device_setup_cn93_pf(struct octep_device *oct); 389 + void octep_device_setup_cnxk_pf(struct octep_device *oct); 394 390 int octep_iq_process_completions(struct octep_iq *iq, u16 budget); 395 391 int octep_oq_process_rx(struct octep_oq *oq, int budget); 396 392 void octep_set_ethtool_ops(struct net_device *netdev);
+400
drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef _OCTEP_REGS_CNXK_PF_H_ 9 + #define _OCTEP_REGS_CNXK_PF_H_ 10 + 11 + /* ############################ RST ######################### */ 12 + #define CNXK_RST_BOOT 0x000087E006001600ULL 13 + #define CNXK_RST_CHIP_DOMAIN_W1S 0x000087E006001810ULL 14 + #define CNXK_RST_CORE_DOMAIN_W1S 0x000087E006001820ULL 15 + #define CNXK_RST_CORE_DOMAIN_W1C 0x000087E006001828ULL 16 + 17 + #define CNXK_CONFIG_XPANSION_BAR 0x38 18 + #define CNXK_CONFIG_PCIE_CAP 0x70 19 + #define CNXK_CONFIG_PCIE_DEVCAP 0x74 20 + #define CNXK_CONFIG_PCIE_DEVCTL 0x78 21 + #define CNXK_CONFIG_PCIE_LINKCAP 0x7C 22 + #define CNXK_CONFIG_PCIE_LINKCTL 0x80 23 + #define CNXK_CONFIG_PCIE_SLOTCAP 0x84 24 + #define CNXK_CONFIG_PCIE_SLOTCTL 0x88 25 + 26 + #define CNXK_PCIE_SRIOV_FDL 0x188 /* 0x98 */ 27 + #define CNXK_PCIE_SRIOV_FDL_BIT_POS 0x10 28 + #define CNXK_PCIE_SRIOV_FDL_MASK 0xFF 29 + 30 + #define CNXK_CONFIG_PCIE_FLTMSK 0x720 31 + 32 + /* ################# Offsets of RING, EPF, MAC ######################### */ 33 + #define CNXK_RING_OFFSET (0x1ULL << 17) 34 + #define CNXK_EPF_OFFSET (0x1ULL << 25) 35 + #define CNXK_MAC_OFFSET (0x1ULL << 4) 36 + #define CNXK_BIT_ARRAY_OFFSET (0x1ULL << 4) 37 + #define CNXK_EPVF_RING_OFFSET (0x1ULL << 4) 38 + 39 + /* ################# Scratch Registers ######################### */ 40 + #define CNXK_SDP_EPF_SCRATCH 0x209E0 41 + 42 + /* ################# Window Registers ######################### */ 43 + #define CNXK_SDP_WIN_WR_ADDR64 0x20000 44 + #define CNXK_SDP_WIN_RD_ADDR64 0x20010 45 + #define CNXK_SDP_WIN_WR_DATA64 0x20020 46 + #define CNXK_SDP_WIN_WR_MASK_REG 0x20030 47 + #define CNXK_SDP_WIN_RD_DATA64 0x20040 48 + 49 + #define CNXK_SDP_MAC_NUMBER 0x2C100 50 + 51 + /* ################# Global Previliged registers ######################### */ 52 + #define CNXK_SDP_EPF_RINFO 0x209F0 53 + 54 + #define CNXK_SDP_EPF_RINFO_SRN(val) ((val) & 0x7F) 55 + #define CNXK_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF) 56 + #define CNXK_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) & 0x7F) 57 + 58 + /* SDP Function select */ 59 + #define CNXK_SDP_FUNC_SEL_EPF_BIT_POS 7 60 + #define CNXK_SDP_FUNC_SEL_FUNC_BIT_POS 0 61 + 62 + /* ##### RING IN (Into device from PCI: Tx Ring) REGISTERS #### */ 63 + #define CNXK_SDP_R_IN_CONTROL_START 0x10000 64 + #define CNXK_SDP_R_IN_ENABLE_START 0x10010 65 + #define CNXK_SDP_R_IN_INSTR_BADDR_START 0x10020 66 + #define CNXK_SDP_R_IN_INSTR_RSIZE_START 0x10030 67 + #define CNXK_SDP_R_IN_INSTR_DBELL_START 0x10040 68 + #define CNXK_SDP_R_IN_CNTS_START 0x10050 69 + #define CNXK_SDP_R_IN_INT_LEVELS_START 0x10060 70 + #define CNXK_SDP_R_IN_PKT_CNT_START 0x10080 71 + #define CNXK_SDP_R_IN_BYTE_CNT_START 0x10090 72 + 73 + #define CNXK_SDP_R_IN_CONTROL(ring) \ 74 + (CNXK_SDP_R_IN_CONTROL_START + ((ring) * CNXK_RING_OFFSET)) 75 + 76 + #define CNXK_SDP_R_IN_ENABLE(ring) \ 77 + (CNXK_SDP_R_IN_ENABLE_START + ((ring) * CNXK_RING_OFFSET)) 78 + 79 + #define CNXK_SDP_R_IN_INSTR_BADDR(ring) \ 80 + (CNXK_SDP_R_IN_INSTR_BADDR_START + ((ring) * CNXK_RING_OFFSET)) 81 + 82 + #define CNXK_SDP_R_IN_INSTR_RSIZE(ring) \ 83 + (CNXK_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CNXK_RING_OFFSET)) 84 + 85 + #define CNXK_SDP_R_IN_INSTR_DBELL(ring) \ 86 + (CNXK_SDP_R_IN_INSTR_DBELL_START + ((ring) * CNXK_RING_OFFSET)) 87 + 88 + #define CNXK_SDP_R_IN_CNTS(ring) \ 89 + (CNXK_SDP_R_IN_CNTS_START + ((ring) * CNXK_RING_OFFSET)) 90 + 91 + #define CNXK_SDP_R_IN_INT_LEVELS(ring) \ 92 + (CNXK_SDP_R_IN_INT_LEVELS_START + ((ring) * CNXK_RING_OFFSET)) 93 + 94 + #define CNXK_SDP_R_IN_PKT_CNT(ring) \ 95 + (CNXK_SDP_R_IN_PKT_CNT_START + ((ring) * CNXK_RING_OFFSET)) 96 + 97 + #define CNXK_SDP_R_IN_BYTE_CNT(ring) \ 98 + (CNXK_SDP_R_IN_BYTE_CNT_START + ((ring) * CNXK_RING_OFFSET)) 99 + 100 + /* Rings per Virtual Function */ 101 + #define CNXK_R_IN_CTL_RPVF_MASK (0xF) 102 + #define CNXK_R_IN_CTL_RPVF_POS (48) 103 + 104 + /* Number of instructions to be read in one MAC read request. 105 + * setting to Max value(4) 106 + */ 107 + #define CNXK_R_IN_CTL_IDLE (0x1ULL << 28) 108 + #define CNXK_R_IN_CTL_RDSIZE (0x3ULL << 25) 109 + #define CNXK_R_IN_CTL_IS_64B (0x1ULL << 24) 110 + #define CNXK_R_IN_CTL_D_NSR (0x1ULL << 8) 111 + #define CNXK_R_IN_CTL_D_ESR (0x1ULL << 6) 112 + #define CNXK_R_IN_CTL_D_ROR (0x1ULL << 5) 113 + #define CNXK_R_IN_CTL_NSR (0x1ULL << 3) 114 + #define CNXK_R_IN_CTL_ESR (0x1ULL << 1) 115 + #define CNXK_R_IN_CTL_ROR (0x1ULL << 0) 116 + 117 + #define CNXK_R_IN_CTL_MASK (CNXK_R_IN_CTL_RDSIZE | CNXK_R_IN_CTL_IS_64B) 118 + 119 + /* ##### RING OUT (out from device to PCI host: Rx Ring) REGISTERS #### */ 120 + #define CNXK_SDP_R_OUT_CNTS_START 0x10100 121 + #define CNXK_SDP_R_OUT_INT_LEVELS_START 0x10110 122 + #define CNXK_SDP_R_OUT_SLIST_BADDR_START 0x10120 123 + #define CNXK_SDP_R_OUT_SLIST_RSIZE_START 0x10130 124 + #define CNXK_SDP_R_OUT_SLIST_DBELL_START 0x10140 125 + #define CNXK_SDP_R_OUT_CONTROL_START 0x10150 126 + #define CNXK_SDP_R_OUT_WMARK_START 0x10160 127 + #define CNXK_SDP_R_OUT_ENABLE_START 0x10170 128 + #define CNXK_SDP_R_OUT_PKT_CNT_START 0x10180 129 + #define CNXK_SDP_R_OUT_BYTE_CNT_START 0x10190 130 + 131 + #define CNXK_SDP_R_OUT_CONTROL(ring) \ 132 + (CNXK_SDP_R_OUT_CONTROL_START + ((ring) * CNXK_RING_OFFSET)) 133 + 134 + #define CNXK_SDP_R_OUT_ENABLE(ring) \ 135 + (CNXK_SDP_R_OUT_ENABLE_START + ((ring) * CNXK_RING_OFFSET)) 136 + 137 + #define CNXK_SDP_R_OUT_SLIST_BADDR(ring) \ 138 + (CNXK_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CNXK_RING_OFFSET)) 139 + 140 + #define CNXK_SDP_R_OUT_SLIST_RSIZE(ring) \ 141 + (CNXK_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CNXK_RING_OFFSET)) 142 + 143 + #define CNXK_SDP_R_OUT_SLIST_DBELL(ring) \ 144 + (CNXK_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CNXK_RING_OFFSET)) 145 + 146 + #define CNXK_SDP_R_OUT_CNTS(ring) \ 147 + (CNXK_SDP_R_OUT_CNTS_START + ((ring) * CNXK_RING_OFFSET)) 148 + 149 + #define CNXK_SDP_R_OUT_INT_LEVELS(ring) \ 150 + (CNXK_SDP_R_OUT_INT_LEVELS_START + ((ring) * CNXK_RING_OFFSET)) 151 + 152 + #define CNXK_SDP_R_OUT_PKT_CNT(ring) \ 153 + (CNXK_SDP_R_OUT_PKT_CNT_START + ((ring) * CNXK_RING_OFFSET)) 154 + 155 + #define CNXK_SDP_R_OUT_BYTE_CNT(ring) \ 156 + (CNXK_SDP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_RING_OFFSET)) 157 + 158 + /*------------------ R_OUT Masks ----------------*/ 159 + #define CNXK_R_OUT_INT_LEVELS_BMODE BIT_ULL(63) 160 + #define CNXK_R_OUT_INT_LEVELS_TIMET (32) 161 + 162 + #define CNXK_R_OUT_CTL_IDLE BIT_ULL(40) 163 + #define CNXK_R_OUT_CTL_ES_I BIT_ULL(34) 164 + #define CNXK_R_OUT_CTL_NSR_I BIT_ULL(33) 165 + #define CNXK_R_OUT_CTL_ROR_I BIT_ULL(32) 166 + #define CNXK_R_OUT_CTL_ES_D BIT_ULL(30) 167 + #define CNXK_R_OUT_CTL_NSR_D BIT_ULL(29) 168 + #define CNXK_R_OUT_CTL_ROR_D BIT_ULL(28) 169 + #define CNXK_R_OUT_CTL_ES_P BIT_ULL(26) 170 + #define CNXK_R_OUT_CTL_NSR_P BIT_ULL(25) 171 + #define CNXK_R_OUT_CTL_ROR_P BIT_ULL(24) 172 + #define CNXK_R_OUT_CTL_IMODE BIT_ULL(23) 173 + 174 + /* ############### Interrupt Moderation Registers ############### */ 175 + #define CNXK_SDP_R_IN_INT_MDRT_CTL0_START 0x10280 176 + #define CNXK_SDP_R_IN_INT_MDRT_CTL1_START 0x102A0 177 + #define CNXK_SDP_R_IN_INT_MDRT_DBG_START 0x102C0 178 + 179 + #define CNXK_SDP_R_OUT_INT_MDRT_CTL0_START 0x10380 180 + #define CNXK_SDP_R_OUT_INT_MDRT_CTL1_START 0x103A0 181 + #define CNXK_SDP_R_OUT_INT_MDRT_DBG_START 0x103C0 182 + 183 + #define CNXK_SDP_R_OUT_CNTS_ISM_START 0x10510 184 + #define CNXK_SDP_R_IN_CNTS_ISM_START 0x10520 185 + 186 + #define CNXK_SDP_R_IN_INT_MDRT_CTL0(ring) \ 187 + (CNXK_SDP_R_IN_INT_MDRT_CTL0_START + ((ring) * CNXK_RING_OFFSET)) 188 + 189 + #define CNXK_SDP_R_IN_INT_MDRT_CTL1(ring) \ 190 + (CNXK_SDP_R_IN_INT_MDRT_CTL1_START + ((ring) * CNXK_RING_OFFSET)) 191 + 192 + #define CNXK_SDP_R_IN_INT_MDRT_DBG(ring) \ 193 + (CNXK_SDP_R_IN_INT_MDRT_DBG_START + ((ring) * CNXK_RING_OFFSET)) 194 + 195 + #define CNXK_SDP_R_OUT_INT_MDRT_CTL0(ring) \ 196 + (CNXK_SDP_R_OUT_INT_MDRT_CTL0_START + ((ring) * CNXK_RING_OFFSET)) 197 + 198 + #define CNXK_SDP_R_OUT_INT_MDRT_CTL1(ring) \ 199 + (CNXK_SDP_R_OUT_INT_MDRT_CTL1_START + ((ring) * CNXK_RING_OFFSET)) 200 + 201 + #define CNXK_SDP_R_OUT_INT_MDRT_DBG(ring) \ 202 + (CNXK_SDP_R_OUT_INT_MDRT_DBG_START + ((ring) * CNXK_RING_OFFSET)) 203 + 204 + #define CNXK_SDP_R_OUT_CNTS_ISM(ring) \ 205 + (CNXK_SDP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET)) 206 + 207 + #define CNXK_SDP_R_IN_CNTS_ISM(ring) \ 208 + (CNXK_SDP_R_IN_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET)) 209 + 210 + /* ##################### Mail Box Registers ########################## */ 211 + /* INT register for VF. when a MBOX write from PF happed to a VF, 212 + * corresponding bit will be set in this register as well as in 213 + * PF_VF_INT register. 214 + * 215 + * This is a RO register, the int can be cleared by writing 1 to PF_VF_INT 216 + */ 217 + /* Basically first 3 are from PF to VF. The last one is data from VF to PF */ 218 + #define CNXK_SDP_R_MBOX_PF_VF_DATA_START 0x10210 219 + #define CNXK_SDP_R_MBOX_PF_VF_INT_START 0x10220 220 + #define CNXK_SDP_R_MBOX_VF_PF_DATA_START 0x10230 221 + 222 + #define CNXK_SDP_R_MBOX_PF_VF_DATA(ring) \ 223 + (CNXK_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CNXK_RING_OFFSET)) 224 + 225 + #define CNXK_SDP_R_MBOX_PF_VF_INT(ring) \ 226 + (CNXK_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_RING_OFFSET)) 227 + 228 + #define CNXK_SDP_R_MBOX_VF_PF_DATA(ring) \ 229 + (CNXK_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_RING_OFFSET)) 230 + 231 + /* ##################### Interrupt Registers ########################## */ 232 + #define CNXK_SDP_R_ERR_TYPE_START 0x10400 233 + 234 + #define CNXK_SDP_R_ERR_TYPE(ring) \ 235 + (CNXK_SDP_R_ERR_TYPE_START + ((ring) * CNXK_RING_OFFSET)) 236 + 237 + #define CNXK_SDP_R_MBOX_ISM_START 0x10500 238 + #define CNXK_SDP_R_OUT_CNTS_ISM_START 0x10510 239 + #define CNXK_SDP_R_IN_CNTS_ISM_START 0x10520 240 + 241 + #define CNXK_SDP_R_MBOX_ISM(ring) \ 242 + (CNXK_SDP_R_MBOX_ISM_START + ((ring) * CNXK_RING_OFFSET)) 243 + 244 + #define CNXK_SDP_R_OUT_CNTS_ISM(ring) \ 245 + (CNXK_SDP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET)) 246 + 247 + #define CNXK_SDP_R_IN_CNTS_ISM(ring) \ 248 + (CNXK_SDP_R_IN_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET)) 249 + 250 + #define CNXK_SDP_EPF_MBOX_RINT_START 0x20100 251 + #define CNXK_SDP_EPF_MBOX_RINT_W1S_START 0x20120 252 + #define CNXK_SDP_EPF_MBOX_RINT_ENA_W1C_START 0x20140 253 + #define CNXK_SDP_EPF_MBOX_RINT_ENA_W1S_START 0x20160 254 + 255 + #define CNXK_SDP_EPF_VFIRE_RINT_START 0x20180 256 + #define CNXK_SDP_EPF_VFIRE_RINT_W1S_START 0x201A0 257 + #define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C_START 0x201C0 258 + #define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S_START 0x201E0 259 + 260 + #define CNXK_SDP_EPF_IRERR_RINT 0x20200 261 + #define CNXK_SDP_EPF_IRERR_RINT_W1S 0x20210 262 + #define CNXK_SDP_EPF_IRERR_RINT_ENA_W1C 0x20220 263 + #define CNXK_SDP_EPF_IRERR_RINT_ENA_W1S 0x20230 264 + 265 + #define CNXK_SDP_EPF_VFORE_RINT_START 0x20240 266 + #define CNXK_SDP_EPF_VFORE_RINT_W1S_START 0x20260 267 + #define CNXK_SDP_EPF_VFORE_RINT_ENA_W1C_START 0x20280 268 + #define CNXK_SDP_EPF_VFORE_RINT_ENA_W1S_START 0x202A0 269 + 270 + #define CNXK_SDP_EPF_ORERR_RINT 0x20320 271 + #define CNXK_SDP_EPF_ORERR_RINT_W1S 0x20330 272 + #define CNXK_SDP_EPF_ORERR_RINT_ENA_W1C 0x20340 273 + #define CNXK_SDP_EPF_ORERR_RINT_ENA_W1S 0x20350 274 + 275 + #define CNXK_SDP_EPF_OEI_RINT 0x20400 276 + #define CNXK_SDP_EPF_OEI_RINT_W1S 0x20500 277 + #define CNXK_SDP_EPF_OEI_RINT_ENA_W1C 0x20600 278 + #define CNXK_SDP_EPF_OEI_RINT_ENA_W1S 0x20700 279 + 280 + #define CNXK_SDP_EPF_DMA_RINT 0x20800 281 + #define CNXK_SDP_EPF_DMA_RINT_W1S 0x20810 282 + #define CNXK_SDP_EPF_DMA_RINT_ENA_W1C 0x20820 283 + #define CNXK_SDP_EPF_DMA_RINT_ENA_W1S 0x20830 284 + 285 + #define CNXK_SDP_EPF_DMA_INT_LEVEL_START 0x20840 286 + #define CNXK_SDP_EPF_DMA_CNT_START 0x20860 287 + #define CNXK_SDP_EPF_DMA_TIM_START 0x20880 288 + 289 + #define CNXK_SDP_EPF_MISC_RINT 0x208A0 290 + #define CNXK_SDP_EPF_MISC_RINT_W1S 0x208B0 291 + #define CNXK_SDP_EPF_MISC_RINT_ENA_W1C 0x208C0 292 + #define CNXK_SDP_EPF_MISC_RINT_ENA_W1S 0x208D0 293 + 294 + #define CNXK_SDP_EPF_DMA_VF_RINT_START 0x208E0 295 + #define CNXK_SDP_EPF_DMA_VF_RINT_W1S_START 0x20900 296 + #define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C_START 0x20920 297 + #define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S_START 0x20940 298 + 299 + #define CNXK_SDP_EPF_PP_VF_RINT_START 0x20960 300 + #define CNXK_SDP_EPF_PP_VF_RINT_W1S_START 0x20980 301 + #define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C_START 0x209A0 302 + #define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S_START 0x209C0 303 + 304 + #define CNXK_SDP_EPF_MBOX_RINT(index) \ 305 + (CNXK_SDP_EPF_MBOX_RINT_START + ((index) * CNXK_BIT_ARRAY_OFFSET)) 306 + #define CNXK_SDP_EPF_MBOX_RINT_W1S(index) \ 307 + (CNXK_SDP_EPF_MBOX_RINT_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET)) 308 + #define CNXK_SDP_EPF_MBOX_RINT_ENA_W1C(index) \ 309 + (CNXK_SDP_EPF_MBOX_RINT_ENA_W1C_START + ((index) * CNXK_BIT_ARRAY_OFFSET)) 310 + #define CNXK_SDP_EPF_MBOX_RINT_ENA_W1S(index) \ 311 + (CNXK_SDP_EPF_MBOX_RINT_ENA_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET)) 312 + 313 + #define CNXK_SDP_EPF_VFIRE_RINT(index) \ 314 + (CNXK_SDP_EPF_VFIRE_RINT_START + ((index) * CNXK_BIT_ARRAY_OFFSET)) 315 + #define CNXK_SDP_EPF_VFIRE_RINT_W1S(index) \ 316 + (CNXK_SDP_EPF_VFIRE_RINT_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET)) 317 + #define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C(index) \ 318 + (CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C_START + ((index) * CNXK_BIT_ARRAY_OFFSET)) 319 + #define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S(index) \ 320 + (CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET)) 321 + 322 + #define CNXK_SDP_EPF_VFORE_RINT(index) \ 323 + (CNXK_SDP_EPF_VFORE_RINT_START + ((index) * CNXK_BIT_ARRAY_OFFSET)) 324 + #define CNXK_SDP_EPF_VFORE_RINT_W1S(index) \ 325 + (CNXK_SDP_EPF_VFORE_RINT_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET)) 326 + #define CNXK_SDP_EPF_VFORE_RINT_ENA_W1C(index) \ 327 + (CNXK_SDP_EPF_VFORE_RINT_ENA_W1C_START + ((index) * CNXK_BIT_ARRAY_OFFSET)) 328 + #define CNXK_SDP_EPF_VFORE_RINT_ENA_W1S(index) \ 329 + (CNXK_SDP_EPF_VFORE_RINT_ENA_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET)) 330 + 331 + #define CNXK_SDP_EPF_DMA_VF_RINT(index) \ 332 + (CNXK_SDP_EPF_DMA_VF_RINT_START + ((index) + CNXK_BIT_ARRAY_OFFSET)) 333 + #define CNXK_SDP_EPF_DMA_VF_RINT_W1S(index) \ 334 + (CNXK_SDP_EPF_DMA_VF_RINT_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET)) 335 + #define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C(index) \ 336 + (CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C_START + ((index) + CNXK_BIT_ARRAY_OFFSET)) 337 + #define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S(index) \ 338 + (CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET)) 339 + 340 + #define CNXK_SDP_EPF_PP_VF_RINT(index) \ 341 + (CNXK_SDP_EPF_PP_VF_RINT_START + ((index) + CNXK_BIT_ARRAY_OFFSET)) 342 + #define CNXK_SDP_EPF_PP_VF_RINT_W1S(index) \ 343 + (CNXK_SDP_EPF_PP_VF_RINT_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET)) 344 + #define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C(index) \ 345 + (CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C_START + ((index) + CNXK_BIT_ARRAY_OFFSET)) 346 + #define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S(index) \ 347 + (CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET)) 348 + 349 + /*------------------ Interrupt Masks ----------------*/ 350 + #define CNXK_INTR_R_SEND_ISM BIT_ULL(63) 351 + #define CNXK_INTR_R_OUT_INT BIT_ULL(62) 352 + #define CNXK_INTR_R_IN_INT BIT_ULL(61) 353 + #define CNXK_INTR_R_MBOX_INT BIT_ULL(60) 354 + #define CNXK_INTR_R_RESEND BIT_ULL(59) 355 + #define CNXK_INTR_R_CLR_TIM BIT_ULL(58) 356 + 357 + /* ####################### Ring Mapping Registers ################################## */ 358 + #define CNXK_SDP_EPVF_RING_START 0x26000 359 + #define CNXK_SDP_IN_RING_TB_MAP_START 0x28000 360 + #define CNXK_SDP_IN_RATE_LIMIT_START 0x2A000 361 + #define CNXK_SDP_MAC_PF_RING_CTL_START 0x2C000 362 + 363 + #define CNXK_SDP_EPVF_RING(ring) \ 364 + (CNXK_SDP_EPVF_RING_START + ((ring) * CNXK_EPVF_RING_OFFSET)) 365 + #define CNXK_SDP_IN_RING_TB_MAP(ring) \ 366 + (CNXK_SDP_N_RING_TB_MAP_START + ((ring) * CNXK_EPVF_RING_OFFSET)) 367 + #define CNXK_SDP_IN_RATE_LIMIT(ring) \ 368 + (CNXK_SDP_IN_RATE_LIMIT_START + ((ring) * CNXK_EPVF_RING_OFFSET)) 369 + #define CNXK_SDP_MAC_PF_RING_CTL(mac) \ 370 + (CNXK_SDP_MAC_PF_RING_CTL_START + ((mac) * CNXK_MAC_OFFSET)) 371 + 372 + #define CNXK_SDP_MAC_PF_RING_CTL_NPFS(val) ((val) & 0x3) 373 + #define CNXK_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0x7F) 374 + #define CNXK_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F) 375 + 376 + /* Number of non-queue interrupts in CNXKxx */ 377 + #define CNXK_NUM_NON_IOQ_INTR 32 378 + 379 + /* bit 0 for control mbox interrupt */ 380 + #define CNXK_SDP_EPF_OEI_RINT_DATA_BIT_MBOX BIT_ULL(0) 381 + /* bit 1 for firmware heartbeat interrupt */ 382 + #define CNXK_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1) 383 + #define FW_STATUS_RUNNING 2ULL 384 + #define CNXK_PEMX_PFX_CSX_PFCFGX(pem, pf, offset) ({ typeof(offset) _off = (offset); \ 385 + ((0x8e0000008000 | \ 386 + (uint64_t)(pem) << 36 \ 387 + | (pf) << 18 \ 388 + | ((_off >> 16) & 1) << 16 \ 389 + | (_off >> 3) << 3) \ 390 + + (((_off >> 2) & 1) << 2)); \ 391 + }) 392 + 393 + /* Register defines for use with CNXK_PEMX_PFX_CSX_PFCFGX */ 394 + #define CNXK_PCIEEP_VSECST_CTL 0x418 395 + 396 + #define CNXK_PEM_BAR4_INDEX 7 397 + #define CNXK_PEM_BAR4_INDEX_SIZE 0x400000ULL 398 + #define CNXK_PEM_BAR4_INDEX_OFFSET (CNXK_PEM_BAR4_INDEX * CNXK_PEM_BAR4_INDEX_SIZE) 399 + 400 + #endif /* _OCTEP_REGS_CNXK_PF_H_ */