Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'octeon_ep-driver'

Veerasenareddy Burru says:

====================
Add octeon_ep driver

This driver implements networking functionality of Marvell's Octeon
PCI Endpoint NIC.

This driver support following devices:
* Network controller: Cavium, Inc. Device b200

V4 -> V5:
- Fix warnings reported by clang.
- Address comments from community reviews.

V3 -> V4:
- Fix warnings and errors reported by "make W=1 C=1".

V2 -> V3:
- Fix warnings and errors reported by kernel test robot:
"Reported-by: kernel test robot <lkp@intel.com>"

V1 -> V2:
- Address review comments on original patch series.
- Divide PATCH 1/4 from the original series into 4 patches in
v2 patch series: PATCH 1/7 to PATCH 4/7.
- Fix clang build errors.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+5633
+1
Documentation/networking/device_drivers/ethernet/index.rst
··· 39 39 intel/iavf 40 40 intel/ice 41 41 marvell/octeontx2 42 + marvell/octeon_ep 42 43 mellanox/mlx5 43 44 microsoft/netvsc 44 45 neterion/s2io
+35
Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0+ 2 + 3 + ==================================================================== 4 + Linux kernel networking driver for Marvell's Octeon PCI Endpoint NIC 5 + ==================================================================== 6 + 7 + Network driver for Marvell's Octeon PCI EndPoint NIC. 8 + Copyright (c) 2020 Marvell International Ltd. 9 + 10 + Contents 11 + ======== 12 + 13 + - `Overview`_ 14 + - `Supported Devices`_ 15 + - `Interface Control`_ 16 + 17 + Overview 18 + ======== 19 + This driver implements networking functionality of Marvell's Octeon PCI 20 + EndPoint NIC. 21 + 22 + Supported Devices 23 + ================= 24 + Currently, this driver support following devices: 25 + * Network controller: Cavium, Inc. Device b200 26 + 27 + Interface Control 28 + ================= 29 + Network Interface control like changing mtu, link speed, link down/up are 30 + done by writing command to mailbox command queue, a mailbox interface 31 + implemented through a reserved region in BAR4. 32 + This driver writes the commands into the mailbox and the firmware on the 33 + Octeon device processes them. The firmware also sends unsolicited notifications 34 + to driver for events suchs as link change, through notification queue 35 + implemented as part of mailbox interface.
+7
MAINTAINERS
··· 11828 11828 F: Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt 11829 11829 F: drivers/mmc/host/sdhci-xenon* 11830 11830 11831 + MARVELL OCTEON ENDPOINT DRIVER 11832 + M: Veerasenareddy Burru <vburru@marvell.com> 11833 + M: Abhijit Ayarekar <aayarekar@marvell.com> 11834 + L: netdev@vger.kernel.org 11835 + S: Supported 11836 + F: drivers/net/ethernet/marvell/octeon_ep 11837 + 11831 11838 MATROX FRAMEBUFFER DRIVER 11832 11839 L: linux-fbdev@vger.kernel.org 11833 11840 S: Orphan
+1
drivers/net/ethernet/marvell/Kconfig
··· 177 177 178 178 179 179 source "drivers/net/ethernet/marvell/octeontx2/Kconfig" 180 + source "drivers/net/ethernet/marvell/octeon_ep/Kconfig" 180 181 source "drivers/net/ethernet/marvell/prestera/Kconfig" 181 182 182 183 endif # NET_VENDOR_MARVELL
+1
drivers/net/ethernet/marvell/Makefile
··· 11 11 obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o 12 12 obj-$(CONFIG_SKGE) += skge.o 13 13 obj-$(CONFIG_SKY2) += sky2.o 14 + obj-y += octeon_ep/ 14 15 obj-y += octeontx2/ 15 16 obj-y += prestera/
+20
drivers/net/ethernet/marvell/octeon_ep/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + # 3 + # Marvell's Octeon PCI Endpoint NIC Driver Configuration 4 + # 5 + 6 + config OCTEON_EP 7 + tristate "Marvell Octeon PCI Endpoint NIC Driver" 8 + depends on 64BIT 9 + depends on PCI 10 + depends on PTP_1588_CLOCK_OPTIONAL 11 + help 12 + This driver supports networking functionality of Marvell's 13 + Octeon PCI Endpoint NIC. 14 + 15 + To know the list of devices supported by this driver, refer 16 + documentation in 17 + <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst>. 18 + 19 + To compile this drivers as a module, choose M here. Name of the 20 + module is octeon_ep.
+9
drivers/net/ethernet/marvell/octeon_ep/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # 3 + # Network driver for Marvell's Octeon PCI Endpoint NIC 4 + # 5 + 6 + obj-$(CONFIG_OCTEON_EP) += octeon_ep.o 7 + 8 + octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \ 9 + octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o
+737
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/pci.h> 9 + #include <linux/netdevice.h> 10 + #include <linux/etherdevice.h> 11 + 12 + #include "octep_config.h" 13 + #include "octep_main.h" 14 + #include "octep_regs_cn9k_pf.h" 15 + 16 + /* Names of Hardware non-queue generic interrupts */ 17 + static char *cn93_non_ioq_msix_names[] = { 18 + "epf_ire_rint", 19 + "epf_ore_rint", 20 + "epf_vfire_rint0", 21 + "epf_vfire_rint1", 22 + "epf_vfore_rint0", 23 + "epf_vfore_rint1", 24 + "epf_mbox_rint0", 25 + "epf_mbox_rint1", 26 + "epf_oei_rint", 27 + "epf_dma_rint", 28 + "epf_dma_vf_rint0", 29 + "epf_dma_vf_rint1", 30 + "epf_pp_vf_rint0", 31 + "epf_pp_vf_rint1", 32 + "epf_misc_rint", 33 + "epf_rsvd", 34 + }; 35 + 36 + /* Dump useful hardware CSRs for debug purpose */ 37 + static void cn93_dump_regs(struct octep_device *oct, int qno) 38 + { 39 + struct device *dev = &oct->pdev->dev; 40 + 41 + dev_info(dev, "IQ-%d register dump\n", qno); 42 + dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n", 43 + qno, CN93_SDP_R_IN_INSTR_DBELL(qno), 44 + octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno))); 45 + dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n", 46 + qno, CN93_SDP_R_IN_CONTROL(qno), 47 + octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno))); 48 + dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n", 49 + qno, CN93_SDP_R_IN_ENABLE(qno), 50 + octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno))); 51 + dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n", 52 + qno, CN93_SDP_R_IN_INSTR_BADDR(qno), 53 + octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno))); 54 + dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n", 55 + qno, CN93_SDP_R_IN_INSTR_RSIZE(qno), 56 + octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(qno))); 57 + dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n", 58 + qno, CN93_SDP_R_IN_CNTS(qno), 59 + octep_read_csr64(oct, CN93_SDP_R_IN_CNTS(qno))); 60 + dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n", 61 + qno, CN93_SDP_R_IN_INT_LEVELS(qno), 62 + octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(qno))); 63 + dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n", 64 + qno, CN93_SDP_R_IN_PKT_CNT(qno), 65 + octep_read_csr64(oct, CN93_SDP_R_IN_PKT_CNT(qno))); 66 + dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n", 67 + qno, CN93_SDP_R_IN_BYTE_CNT(qno), 68 + octep_read_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(qno))); 69 + 70 + dev_info(dev, "OQ-%d register dump\n", qno); 71 + dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n", 72 + qno, CN93_SDP_R_OUT_SLIST_DBELL(qno), 73 + octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(qno))); 74 + dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n", 75 + qno, CN93_SDP_R_OUT_CONTROL(qno), 76 + octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(qno))); 77 + dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n", 78 + qno, CN93_SDP_R_OUT_ENABLE(qno), 79 + octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(qno))); 80 + dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n", 81 + qno, CN93_SDP_R_OUT_SLIST_BADDR(qno), 82 + octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(qno))); 83 + dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n", 84 + qno, CN93_SDP_R_OUT_SLIST_RSIZE(qno), 85 + octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(qno))); 86 + dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n", 87 + qno, CN93_SDP_R_OUT_CNTS(qno), 88 + octep_read_csr64(oct, CN93_SDP_R_OUT_CNTS(qno))); 89 + dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n", 90 + qno, CN93_SDP_R_OUT_INT_LEVELS(qno), 91 + octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(qno))); 92 + dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n", 93 + qno, CN93_SDP_R_OUT_PKT_CNT(qno), 94 + octep_read_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(qno))); 95 + dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n", 96 + qno, CN93_SDP_R_OUT_BYTE_CNT(qno), 97 + octep_read_csr64(oct, CN93_SDP_R_OUT_BYTE_CNT(qno))); 98 + dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n", 99 + qno, CN93_SDP_R_ERR_TYPE(qno), 100 + octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(qno))); 101 + } 102 + 103 + /* Reset Hardware Tx queue */ 104 + static int cn93_reset_iq(struct octep_device *oct, int q_no) 105 + { 106 + struct octep_config *conf = oct->conf; 107 + u64 val = 0ULL; 108 + 109 + dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no); 110 + 111 + /* Get absolute queue number */ 112 + q_no += conf->pf_ring_cfg.srn; 113 + 114 + /* Disable the Tx/Instruction Ring */ 115 + octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val); 116 + 117 + /* clear the Instruction Ring packet/byte counts and doorbell CSRs */ 118 + octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val); 119 + octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val); 120 + octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val); 121 + octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val); 122 + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val); 123 + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), val); 124 + 125 + val = 0xFFFFFFFF; 126 + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(q_no), val); 127 + 128 + return 0; 129 + } 130 + 131 + /* Reset Hardware Rx queue */ 132 + static void cn93_reset_oq(struct octep_device *oct, int q_no) 133 + { 134 + u64 val = 0ULL; 135 + 136 + q_no += CFG_GET_PORTS_PF_SRN(oct->conf); 137 + 138 + /* Disable Output (Rx) Ring */ 139 + octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(q_no), val); 140 + 141 + /* Clear count CSRs */ 142 + val = octep_read_csr(oct, CN93_SDP_R_OUT_CNTS(q_no)); 143 + octep_write_csr(oct, CN93_SDP_R_OUT_CNTS(q_no), val); 144 + 145 + octep_write_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL); 146 + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF); 147 + } 148 + 149 + /* Reset all hardware Tx/Rx queues */ 150 + static void octep_reset_io_queues_cn93_pf(struct octep_device *oct) 151 + { 152 + struct pci_dev *pdev = oct->pdev; 153 + int q; 154 + 155 + dev_dbg(&pdev->dev, "Reset OCTEP_CN93 PF IO Queues\n"); 156 + 157 + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 158 + cn93_reset_iq(oct, q); 159 + cn93_reset_oq(oct, q); 160 + } 161 + } 162 + 163 + /* Initialize windowed addresses to access some hardware registers */ 164 + static void octep_setup_pci_window_regs_cn93_pf(struct octep_device *oct) 165 + { 166 + u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr; 167 + 168 + oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_ADDR64); 169 + oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_ADDR64); 170 + oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_DATA64); 171 + oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_DATA64); 172 + } 173 + 174 + /* Configure Hardware mapping: inform hardware which rings belong to PF. */ 175 + static void octep_configure_ring_mapping_cn93_pf(struct octep_device *oct) 176 + { 177 + struct octep_config *conf = oct->conf; 178 + struct pci_dev *pdev = oct->pdev; 179 + u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf); 180 + int q; 181 + 182 + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) { 183 + u64 regval = 0; 184 + 185 + if (oct->pcie_port) 186 + regval = 8 << CN93_SDP_FUNC_SEL_EPF_BIT_POS; 187 + 188 + octep_write_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q), regval); 189 + 190 + regval = octep_read_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q)); 191 + dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n", 192 + CN93_SDP_EPVF_RING(pf_srn + q), regval); 193 + } 194 + } 195 + 196 + /* Initialize configuration limits and initial active config 93xx PF. */ 197 + static void octep_init_config_cn93_pf(struct octep_device *oct) 198 + { 199 + struct octep_config *conf = oct->conf; 200 + struct pci_dev *pdev = oct->pdev; 201 + u64 val; 202 + 203 + /* Read ring configuration: 204 + * PF ring count, number of VFs and rings per VF supported 205 + */ 206 + val = octep_read_csr64(oct, CN93_SDP_EPF_RINFO); 207 + conf->sriov_cfg.max_rings_per_vf = CN93_SDP_EPF_RINFO_RPVF(val); 208 + conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf; 209 + conf->sriov_cfg.max_vfs = CN93_SDP_EPF_RINFO_NVFS(val); 210 + conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs; 211 + conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val); 212 + 213 + val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port)); 214 + conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val); 215 + conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val); 216 + conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings; 217 + dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n", 218 + conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf, 219 + conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings); 220 + 221 + conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS; 222 + conf->iq.instr_type = OCTEP_64BYTE_INSTR; 223 + conf->iq.pkind = 0; 224 + conf->iq.db_min = OCTEP_DB_MIN; 225 + conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD; 226 + 227 + conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS; 228 + conf->oq.buf_size = OCTEP_OQ_BUF_SIZE; 229 + conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD; 230 + conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD; 231 + conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD; 232 + 233 + conf->msix_cfg.non_ioq_msix = CN93_NUM_NON_IOQ_INTR; 234 + conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings; 235 + conf->msix_cfg.non_ioq_msix_names = cn93_non_ioq_msix_names; 236 + 237 + conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr + (0x400000ull * 7); 238 + } 239 + 240 + /* Setup registers for a hardware Tx Queue */ 241 + static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no) 242 + { 243 + struct octep_iq *iq = oct->iq[iq_no]; 244 + u32 reset_instr_cnt; 245 + u64 reg_val; 246 + 247 + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 248 + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no)); 249 + 250 + /* wait for IDLE to set to 1 */ 251 + if (!(reg_val & CN93_R_IN_CTL_IDLE)) { 252 + do { 253 + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no)); 254 + } while (!(reg_val & CN93_R_IN_CTL_IDLE)); 255 + } 256 + 257 + reg_val |= CN93_R_IN_CTL_RDSIZE; 258 + reg_val |= CN93_R_IN_CTL_IS_64B; 259 + reg_val |= CN93_R_IN_CTL_ESR; 260 + octep_write_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no), reg_val); 261 + 262 + /* Write the start of the input queue's ring and its size */ 263 + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(iq_no), 264 + iq->desc_ring_dma); 265 + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(iq_no), 266 + iq->max_count); 267 + 268 + /* Remember the doorbell & instruction count register addr 269 + * for this queue 270 + */ 271 + iq->doorbell_reg = oct->mmio[0].hw_addr + 272 + CN93_SDP_R_IN_INSTR_DBELL(iq_no); 273 + iq->inst_cnt_reg = oct->mmio[0].hw_addr + 274 + CN93_SDP_R_IN_CNTS(iq_no); 275 + iq->intr_lvl_reg = oct->mmio[0].hw_addr + 276 + CN93_SDP_R_IN_INT_LEVELS(iq_no); 277 + 278 + /* Store the current instruction counter (used in flush_iq calculation) */ 279 + reset_instr_cnt = readl(iq->inst_cnt_reg); 280 + writel(reset_instr_cnt, iq->inst_cnt_reg); 281 + 282 + /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */ 283 + reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff; 284 + octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val); 285 + } 286 + 287 + /* Setup registers for a hardware Rx Queue */ 288 + static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no) 289 + { 290 + u64 reg_val; 291 + u64 oq_ctl = 0ULL; 292 + u32 time_threshold = 0; 293 + struct octep_oq *oq = oct->oq[oq_no]; 294 + 295 + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 296 + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); 297 + 298 + /* wait for IDLE to set to 1 */ 299 + if (!(reg_val & CN93_R_OUT_CTL_IDLE)) { 300 + do { 301 + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); 302 + } while (!(reg_val & CN93_R_OUT_CTL_IDLE)); 303 + } 304 + 305 + reg_val &= ~(CN93_R_OUT_CTL_IMODE); 306 + reg_val &= ~(CN93_R_OUT_CTL_ROR_P); 307 + reg_val &= ~(CN93_R_OUT_CTL_NSR_P); 308 + reg_val &= ~(CN93_R_OUT_CTL_ROR_I); 309 + reg_val &= ~(CN93_R_OUT_CTL_NSR_I); 310 + reg_val &= ~(CN93_R_OUT_CTL_ES_I); 311 + reg_val &= ~(CN93_R_OUT_CTL_ROR_D); 312 + reg_val &= ~(CN93_R_OUT_CTL_NSR_D); 313 + reg_val &= ~(CN93_R_OUT_CTL_ES_D); 314 + reg_val |= (CN93_R_OUT_CTL_ES_P); 315 + 316 + octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), reg_val); 317 + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(oq_no), 318 + oq->desc_ring_dma); 319 + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(oq_no), 320 + oq->max_count); 321 + 322 + oq_ctl = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); 323 + oq_ctl &= ~0x7fffffULL; //clear the ISIZE and BSIZE (22-0) 324 + oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0) 325 + octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), oq_ctl); 326 + 327 + /* Get the mapped address of the pkt_sent and pkts_credit regs */ 328 + oq->pkts_sent_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_CNTS(oq_no); 329 + oq->pkts_credit_reg = oct->mmio[0].hw_addr + 330 + CN93_SDP_R_OUT_SLIST_DBELL(oq_no); 331 + 332 + time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf); 333 + reg_val = ((u64)time_threshold << 32) | 334 + CFG_GET_OQ_INTR_PKT(oct->conf); 335 + octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); 336 + } 337 + 338 + /* Setup registers for a PF mailbox */ 339 + static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no) 340 + { 341 + struct octep_mbox *mbox = oct->mbox[q_no]; 342 + 343 + mbox->q_no = q_no; 344 + 345 + /* PF mbox interrupt reg */ 346 + mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0); 347 + 348 + /* PF to VF DATA reg. PF writes into this reg */ 349 + mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no); 350 + 351 + /* VF to PF DATA reg. PF reads from this reg */ 352 + mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no); 353 + } 354 + 355 + /* Mailbox Interrupt handler */ 356 + static void cn93_handle_pf_mbox_intr(struct octep_device *oct) 357 + { 358 + u64 mbox_int_val = 0ULL, val = 0ULL, qno = 0ULL; 359 + 360 + mbox_int_val = readq(oct->mbox[0]->mbox_int_reg); 361 + for (qno = 0; qno < OCTEP_MAX_VF; qno++) { 362 + val = readq(oct->mbox[qno]->mbox_read_reg); 363 + dev_dbg(&oct->pdev->dev, 364 + "PF MBOX READ: val:%llx from VF:%llx\n", val, qno); 365 + } 366 + 367 + writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg); 368 + } 369 + 370 + /* Interrupts handler for all non-queue generic interrupts. */ 371 + static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) 372 + { 373 + struct octep_device *oct = (struct octep_device *)dev; 374 + struct pci_dev *pdev = oct->pdev; 375 + u64 reg_val = 0; 376 + int i = 0; 377 + 378 + /* Check for IRERR INTR */ 379 + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_IRERR_RINT); 380 + if (reg_val) { 381 + dev_info(&pdev->dev, 382 + "received IRERR_RINT intr: 0x%llx\n", reg_val); 383 + octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT, reg_val); 384 + 385 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 386 + reg_val = octep_read_csr64(oct, 387 + CN93_SDP_R_ERR_TYPE(i)); 388 + if (reg_val) { 389 + dev_info(&pdev->dev, 390 + "Received err type on IQ-%d: 0x%llx\n", 391 + i, reg_val); 392 + octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i), 393 + reg_val); 394 + } 395 + } 396 + goto irq_handled; 397 + } 398 + 399 + /* Check for ORERR INTR */ 400 + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT); 401 + if (reg_val) { 402 + dev_info(&pdev->dev, 403 + "Received ORERR_RINT intr: 0x%llx\n", reg_val); 404 + octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT, reg_val); 405 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 406 + reg_val = octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(i)); 407 + if (reg_val) { 408 + dev_info(&pdev->dev, 409 + "Received err type on OQ-%d: 0x%llx\n", 410 + i, reg_val); 411 + octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i), 412 + reg_val); 413 + } 414 + } 415 + 416 + goto irq_handled; 417 + } 418 + 419 + /* Check for VFIRE INTR */ 420 + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0)); 421 + if (reg_val) { 422 + dev_info(&pdev->dev, 423 + "Received VFIRE_RINT intr: 0x%llx\n", reg_val); 424 + octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val); 425 + goto irq_handled; 426 + } 427 + 428 + /* Check for VFORE INTR */ 429 + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0)); 430 + if (reg_val) { 431 + dev_info(&pdev->dev, 432 + "Received VFORE_RINT intr: 0x%llx\n", reg_val); 433 + octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val); 434 + goto irq_handled; 435 + } 436 + 437 + /* Check for MBOX INTR */ 438 + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MBOX_RINT(0)); 439 + if (reg_val) { 440 + dev_info(&pdev->dev, 441 + "Received MBOX_RINT intr: 0x%llx\n", reg_val); 442 + cn93_handle_pf_mbox_intr(oct); 443 + goto irq_handled; 444 + } 445 + 446 + /* Check for OEI INTR */ 447 + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT); 448 + if (reg_val) { 449 + dev_info(&pdev->dev, 450 + "Received OEI_EINT intr: 0x%llx\n", reg_val); 451 + octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg_val); 452 + queue_work(octep_wq, &oct->ctrl_mbox_task); 453 + goto irq_handled; 454 + } 455 + 456 + /* Check for DMA INTR */ 457 + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT); 458 + if (reg_val) { 459 + octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val); 460 + goto irq_handled; 461 + } 462 + 463 + /* Check for DMA VF INTR */ 464 + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0)); 465 + if (reg_val) { 466 + dev_info(&pdev->dev, 467 + "Received DMA_VF_RINT intr: 0x%llx\n", reg_val); 468 + octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val); 469 + goto irq_handled; 470 + } 471 + 472 + /* Check for PPVF INTR */ 473 + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0)); 474 + if (reg_val) { 475 + dev_info(&pdev->dev, 476 + "Received PP_VF_RINT intr: 0x%llx\n", reg_val); 477 + octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val); 478 + goto irq_handled; 479 + } 480 + 481 + /* Check for MISC INTR */ 482 + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT); 483 + if (reg_val) { 484 + dev_info(&pdev->dev, 485 + "Received MISC_RINT intr: 0x%llx\n", reg_val); 486 + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val); 487 + goto irq_handled; 488 + } 489 + 490 + dev_info(&pdev->dev, "Reserved inerrupts raised; Ignore\n"); 491 + irq_handled: 492 + return IRQ_HANDLED; 493 + } 494 + 495 + /* Tx/Rx queue interrupt handler */ 496 + static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data) 497 + { 498 + struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data; 499 + struct octep_oq *oq = vector->oq; 500 + 501 + napi_schedule_irqoff(oq->napi); 502 + return IRQ_HANDLED; 503 + } 504 + 505 + /* soft reset of 93xx */ 506 + static int octep_soft_reset_cn93_pf(struct octep_device *oct) 507 + { 508 + dev_info(&oct->pdev->dev, "CN93XX: Doing soft reset\n"); 509 + 510 + octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF); 511 + 512 + /* Set core domain reset bit */ 513 + OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1); 514 + /* Wait for 100ms as Octeon resets. */ 515 + mdelay(100); 516 + /* clear core domain reset bit */ 517 + OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1C, 1); 518 + 519 + return 0; 520 + } 521 + 522 + /* Re-initialize Octeon hardware registers */ 523 + static void octep_reinit_regs_cn93_pf(struct octep_device *oct) 524 + { 525 + u32 i; 526 + 527 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 528 + oct->hw_ops.setup_iq_regs(oct, i); 529 + 530 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 531 + oct->hw_ops.setup_oq_regs(oct, i); 532 + 533 + oct->hw_ops.enable_interrupts(oct); 534 + oct->hw_ops.enable_io_queues(oct); 535 + 536 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 537 + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); 538 + } 539 + 540 + /* Enable all interrupts */ 541 + static void octep_enable_interrupts_cn93_pf(struct octep_device *oct) 542 + { 543 + u64 intr_mask = 0ULL; 544 + int srn, num_rings, i; 545 + 546 + srn = CFG_GET_PORTS_PF_SRN(oct->conf); 547 + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 548 + 549 + for (i = 0; i < num_rings; i++) 550 + intr_mask |= (0x1ULL << (srn + i)); 551 + 552 + octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask); 553 + octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask); 554 + octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL); 555 + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask); 556 + octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask); 557 + } 558 + 559 + /* Disable all interrupts */ 560 + static void octep_disable_interrupts_cn93_pf(struct octep_device *oct) 561 + { 562 + u64 intr_mask = 0ULL; 563 + int srn, num_rings, i; 564 + 565 + srn = CFG_GET_PORTS_PF_SRN(oct->conf); 566 + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 567 + 568 + for (i = 0; i < num_rings; i++) 569 + intr_mask |= (0x1ULL << (srn + i)); 570 + 571 + octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask); 572 + octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask); 573 + octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL); 574 + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask); 575 + octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask); 576 + } 577 + 578 + /* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ 579 + static u32 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq) 580 + { 581 + u32 pkt_in_done = readl(iq->inst_cnt_reg); 582 + u32 last_done, new_idx; 583 + 584 + last_done = pkt_in_done - iq->pkt_in_done; 585 + iq->pkt_in_done = pkt_in_done; 586 + 587 + new_idx = (iq->octep_read_index + last_done) % iq->max_count; 588 + 589 + return new_idx; 590 + } 591 + 592 + /* Enable a hardware Tx Queue */ 593 + static void octep_enable_iq_cn93_pf(struct octep_device *oct, int iq_no) 594 + { 595 + u64 loop = HZ; 596 + u64 reg_val; 597 + 598 + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 599 + 600 + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF); 601 + 602 + while (octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no)) && 603 + loop--) { 604 + schedule_timeout_interruptible(1); 605 + } 606 + 607 + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no)); 608 + reg_val |= (0x1ULL << 62); 609 + octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val); 610 + 611 + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no)); 612 + reg_val |= 0x1ULL; 613 + octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val); 614 + } 615 + 616 + /* Enable a hardware Rx Queue */ 617 + static void octep_enable_oq_cn93_pf(struct octep_device *oct, int oq_no) 618 + { 619 + u64 reg_val = 0ULL; 620 + 621 + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 622 + 623 + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no)); 624 + reg_val |= (0x1ULL << 62); 625 + octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); 626 + 627 + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF); 628 + 629 + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no)); 630 + reg_val |= 0x1ULL; 631 + octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val); 632 + } 633 + 634 + /* Enable all hardware Tx/Rx Queues assined to PF */ 635 + static void octep_enable_io_queues_cn93_pf(struct octep_device *oct) 636 + { 637 + u8 q; 638 + 639 + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 640 + octep_enable_iq_cn93_pf(oct, q); 641 + octep_enable_oq_cn93_pf(oct, q); 642 + } 643 + } 644 + 645 + /* Disable a hardware Tx Queue assined to PF */ 646 + static void octep_disable_iq_cn93_pf(struct octep_device *oct, int iq_no) 647 + { 648 + u64 reg_val = 0ULL; 649 + 650 + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 651 + 652 + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no)); 653 + reg_val &= ~0x1ULL; 654 + octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val); 655 + } 656 + 657 + /* Disable a hardware Rx Queue assined to PF */ 658 + static void octep_disable_oq_cn93_pf(struct octep_device *oct, int oq_no) 659 + { 660 + u64 reg_val = 0ULL; 661 + 662 + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 663 + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no)); 664 + reg_val &= ~0x1ULL; 665 + octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val); 666 + } 667 + 668 + /* Disable all hardware Tx/Rx Queues assined to PF */ 669 + static void octep_disable_io_queues_cn93_pf(struct octep_device *oct) 670 + { 671 + int q = 0; 672 + 673 + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 674 + octep_disable_iq_cn93_pf(oct, q); 675 + octep_disable_oq_cn93_pf(oct, q); 676 + } 677 + } 678 + 679 + /* Dump hardware registers (including Tx/Rx queues) for debugging. */ 680 + static void octep_dump_registers_cn93_pf(struct octep_device *oct) 681 + { 682 + u8 srn, num_rings, q; 683 + 684 + srn = CFG_GET_PORTS_PF_SRN(oct->conf); 685 + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 686 + 687 + for (q = srn; q < srn + num_rings; q++) 688 + cn93_dump_regs(oct, q); 689 + } 690 + 691 + /** 692 + * octep_device_setup_cn93_pf() - Setup Octeon device. 693 + * 694 + * @oct: Octeon device private data structure. 695 + * 696 + * - initialize hardware operations. 697 + * - get target side pcie port number for the device. 698 + * - setup window access to hardware registers. 699 + * - set initial configuration and max limits. 700 + * - setup hardware mapping of rings to the PF device. 701 + */ 702 + void octep_device_setup_cn93_pf(struct octep_device *oct) 703 + { 704 + oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cn93_pf; 705 + oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf; 706 + oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf; 707 + 708 + oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf; 709 + oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf; 710 + oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf; 711 + oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf; 712 + 713 + oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf; 714 + oct->hw_ops.disable_interrupts = octep_disable_interrupts_cn93_pf; 715 + 716 + oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cn93_pf; 717 + 718 + oct->hw_ops.enable_iq = octep_enable_iq_cn93_pf; 719 + oct->hw_ops.enable_oq = octep_enable_oq_cn93_pf; 720 + oct->hw_ops.enable_io_queues = octep_enable_io_queues_cn93_pf; 721 + 722 + oct->hw_ops.disable_iq = octep_disable_iq_cn93_pf; 723 + oct->hw_ops.disable_oq = octep_disable_oq_cn93_pf; 724 + oct->hw_ops.disable_io_queues = octep_disable_io_queues_cn93_pf; 725 + oct->hw_ops.reset_io_queues = octep_reset_io_queues_cn93_pf; 726 + 727 + oct->hw_ops.dump_registers = octep_dump_registers_cn93_pf; 728 + 729 + octep_setup_pci_window_regs_cn93_pf(oct); 730 + 731 + oct->pcie_port = octep_read_csr64(oct, CN93_SDP_MAC_NUMBER) & 0xff; 732 + dev_info(&oct->pdev->dev, 733 + "Octeon device using PCIE Port %d\n", oct->pcie_port); 734 + 735 + octep_init_config_cn93_pf(oct); 736 + octep_configure_ring_mapping_cn93_pf(oct); 737 + }
+204
drivers/net/ethernet/marvell/octeon_ep/octep_config.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef _OCTEP_CONFIG_H_ 9 + #define _OCTEP_CONFIG_H_ 10 + 11 + /* Tx instruction types by length */ 12 + #define OCTEP_32BYTE_INSTR 32 13 + #define OCTEP_64BYTE_INSTR 64 14 + 15 + /* Tx Queue: maximum descriptors per ring */ 16 + #define OCTEP_IQ_MAX_DESCRIPTORS 1024 17 + /* Minimum input (Tx) requests to be enqueued to ring doorbell */ 18 + #define OCTEP_DB_MIN 1 19 + /* Packet threshold for Tx queue interrupt */ 20 + #define OCTEP_IQ_INTR_THRESHOLD 0x0 21 + 22 + /* Rx Queue: maximum descriptors per ring */ 23 + #define OCTEP_OQ_MAX_DESCRIPTORS 1024 24 + 25 + /* Rx buffer size: Use page size buffers. 26 + * Build skb from allocated page buffer once the packet is received. 27 + * When a gathered packet is received, make head page as skb head and 28 + * page buffers in consecutive Rx descriptors as fragments. 29 + */ 30 + #define OCTEP_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE)) 31 + #define OCTEP_OQ_PKTS_PER_INTR 128 32 + #define OCTEP_OQ_REFILL_THRESHOLD (OCTEP_OQ_MAX_DESCRIPTORS / 4) 33 + 34 + #define OCTEP_OQ_INTR_PKT_THRESHOLD 1 35 + #define OCTEP_OQ_INTR_TIME_THRESHOLD 10 36 + 37 + #define OCTEP_MSIX_NAME_SIZE (IFNAMSIZ + 32) 38 + 39 + /* Tx Queue wake threshold 40 + * wakeup a stopped Tx queue if minimum 2 descriptors are available. 41 + * Even a skb with fragments consume only one Tx queue descriptor entry. 42 + */ 43 + #define OCTEP_WAKE_QUEUE_THRESHOLD 2 44 + 45 + /* Minimum MTU supported by Octeon network interface */ 46 + #define OCTEP_MIN_MTU ETH_MIN_MTU 47 + /* Maximum MTU supported by Octeon interface*/ 48 + #define OCTEP_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN)) 49 + /* Default MTU */ 50 + #define OCTEP_DEFAULT_MTU 1500 51 + 52 + /* Macros to get octeon config params */ 53 + #define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) 54 + #define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs) 55 + #define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) 56 + #define CFG_GET_IQ_PKIND(cfg) ((cfg)->iq.pkind) 57 + #define CFG_GET_IQ_INSTR_SIZE(cfg) (64) 58 + #define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) 59 + #define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold) 60 + 61 + #define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs) 62 + #define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size) 63 + #define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold) 64 + #define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt) 65 + #define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time) 66 + 67 + #define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.max_io_rings) 68 + #define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.active_io_rings) 69 + #define CFG_GET_PORTS_PF_SRN(cfg) ((cfg)->pf_ring_cfg.srn) 70 + 71 + #define CFG_GET_DPI_PKIND(cfg) ((cfg)->core_cfg.dpi_pkind) 72 + #define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us) 73 + #define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us) 74 + 75 + #define CFG_GET_MAX_VFS(cfg) ((cfg)->sriov_cfg.max_vfs) 76 + #define CFG_GET_ACTIVE_VFS(cfg) ((cfg)->sriov_cfg.active_vfs) 77 + #define CFG_GET_MAX_RPVF(cfg) ((cfg)->sriov_cfg.max_rings_per_vf) 78 + #define CFG_GET_ACTIVE_RPVF(cfg) ((cfg)->sriov_cfg.active_rings_per_vf) 79 + #define CFG_GET_VF_SRN(cfg) ((cfg)->sriov_cfg.vf_srn) 80 + 81 + #define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix) 82 + #define CFG_GET_NON_IOQ_MSIX(cfg) ((cfg)->msix_cfg.non_ioq_msix) 83 + #define CFG_GET_NON_IOQ_MSIX_NAMES(cfg) ((cfg)->msix_cfg.non_ioq_msix_names) 84 + 85 + #define CFG_GET_CTRL_MBOX_MEM_ADDR(cfg) ((cfg)->ctrl_mbox_cfg.barmem_addr) 86 + 87 + /* Hardware Tx Queue configuration. */ 88 + struct octep_iq_config { 89 + /* Size of the Input queue (number of commands) */ 90 + u16 num_descs; 91 + 92 + /* Command size - 32 or 64 bytes */ 93 + u16 instr_type; 94 + 95 + /* pkind for packets sent to Octeon */ 96 + u16 pkind; 97 + 98 + /* Minimum number of commands pending to be posted to Octeon before driver 99 + * hits the Input queue doorbell. 100 + */ 101 + u16 db_min; 102 + 103 + /* Trigger the IQ interrupt when processed cmd count reaches 104 + * this level. 105 + */ 106 + u32 intr_threshold; 107 + }; 108 + 109 + /* Hardware Rx Queue configuration. */ 110 + struct octep_oq_config { 111 + /* Size of Output queue (number of descriptors) */ 112 + u16 num_descs; 113 + 114 + /* Size of buffer in this Output queue. */ 115 + u16 buf_size; 116 + 117 + /* The number of buffers that were consumed during packet processing 118 + * by the driver on this Output queue before the driver attempts to 119 + * replenish the descriptor ring with new buffers. 120 + */ 121 + u16 refill_threshold; 122 + 123 + /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host 124 + * only if it sent as many packets as specified by this field. 125 + * The driver usually does not use packet count interrupt coalescing. 126 + */ 127 + u32 oq_intr_pkt; 128 + 129 + /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host 130 + * if at least one packet was sent in the time interval specified by 131 + * this field. The driver uses time interval interrupt coalescing by 132 + * default. The time is specified in microseconds. 133 + */ 134 + u32 oq_intr_time; 135 + }; 136 + 137 + /* Tx/Rx configuration */ 138 + struct octep_pf_ring_config { 139 + /* Max number of IOQs */ 140 + u16 max_io_rings; 141 + 142 + /* Number of active IOQs */ 143 + u16 active_io_rings; 144 + 145 + /* Starting IOQ number: this changes based on which PEM is used */ 146 + u16 srn; 147 + }; 148 + 149 + /* Octeon Hardware SRIOV config */ 150 + struct octep_sriov_config { 151 + /* Max number of VF devices supported */ 152 + u16 max_vfs; 153 + 154 + /* Number of VF devices enabled */ 155 + u16 active_vfs; 156 + 157 + /* Max number of rings assigned to VF */ 158 + u8 max_rings_per_vf; 159 + 160 + /* Number of rings enabled per VF */ 161 + u8 active_rings_per_vf; 162 + 163 + /* starting ring number of VF's: ring-0 of VF-0 of the PF */ 164 + u16 vf_srn; 165 + }; 166 + 167 + /* Octeon MSI-x config. */ 168 + struct octep_msix_config { 169 + /* Number of IOQ interrupts */ 170 + u16 ioq_msix; 171 + 172 + /* Number of Non IOQ interrupts */ 173 + u16 non_ioq_msix; 174 + 175 + /* Names of Non IOQ interrupts */ 176 + char **non_ioq_msix_names; 177 + }; 178 + 179 + struct octep_ctrl_mbox_config { 180 + /* Barmem address for control mbox */ 181 + void __iomem *barmem_addr; 182 + }; 183 + 184 + /* Data Structure to hold configuration limits and active config */ 185 + struct octep_config { 186 + /* Input Queue attributes. */ 187 + struct octep_iq_config iq; 188 + 189 + /* Output Queue attributes. */ 190 + struct octep_oq_config oq; 191 + 192 + /* NIC Port Configuration */ 193 + struct octep_pf_ring_config pf_ring_cfg; 194 + 195 + /* SRIOV configuration of the PF */ 196 + struct octep_sriov_config sriov_cfg; 197 + 198 + /* MSI-X interrupt config */ 199 + struct octep_msix_config msix_cfg; 200 + 201 + /* ctrl mbox config */ 202 + struct octep_ctrl_mbox_config ctrl_mbox_cfg; 203 + }; 204 + #endif /* _OCTEP_CONFIG_H_ */
+256
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + #include <linux/types.h> 8 + #include <linux/errno.h> 9 + #include <linux/string.h> 10 + #include <linux/mutex.h> 11 + #include <linux/jiffies.h> 12 + #include <linux/sched.h> 13 + #include <linux/sched/signal.h> 14 + #include <linux/io.h> 15 + #include <linux/pci.h> 16 + #include <linux/etherdevice.h> 17 + 18 + #include "octep_ctrl_mbox.h" 19 + #include "octep_config.h" 20 + #include "octep_main.h" 21 + 22 + /* Timeout in msecs for message response */ 23 + #define OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS 100 24 + /* Time in msecs to wait for message response */ 25 + #define OCTEP_CTRL_MBOX_MSG_WAIT_MS 10 26 + 27 + #define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(m) (m) 28 + #define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(m) ((m) + 8) 29 + #define OCTEP_CTRL_MBOX_INFO_HOST_VERSION_OFFSET(m) ((m) + 16) 30 + #define OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(m) ((m) + 24) 31 + #define OCTEP_CTRL_MBOX_INFO_FW_VERSION_OFFSET(m) ((m) + 136) 32 + #define OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(m) ((m) + 144) 33 + 34 + #define OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ) 35 + #define OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) 36 + #define OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 4) 37 + #define OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 8) 38 + #define OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 12) 39 + 40 + #define OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m) ((m) + \ 41 + OCTEP_CTRL_MBOX_INFO_SZ + \ 42 + OCTEP_CTRL_MBOX_H2FQ_INFO_SZ) 43 + #define OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) 44 + #define OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 4) 45 + #define OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 8) 46 + #define OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 12) 47 + 48 + #define OCTEP_CTRL_MBOX_Q_OFFSET(m, i) ((m) + \ 49 + (sizeof(struct octep_ctrl_mbox_msg) * (i))) 50 + 51 + static u32 octep_ctrl_mbox_circq_inc(u32 index, u32 mask) 52 + { 53 + return (index + 1) & mask; 54 + } 55 + 56 + static u32 octep_ctrl_mbox_circq_space(u32 pi, u32 ci, u32 mask) 57 + { 58 + return mask - ((pi - ci) & mask); 59 + } 60 + 61 + static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 mask) 62 + { 63 + return ((pi - ci) & mask); 64 + } 65 + 66 + int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox) 67 + { 68 + u64 version, magic_num, status; 69 + 70 + if (!mbox) 71 + return -EINVAL; 72 + 73 + if (!mbox->barmem) { 74 + pr_info("octep_ctrl_mbox : Invalid barmem %p\n", mbox->barmem); 75 + return -EINVAL; 76 + } 77 + 78 + magic_num = readq(OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(mbox->barmem)); 79 + if (magic_num != OCTEP_CTRL_MBOX_MAGIC_NUMBER) { 80 + pr_info("octep_ctrl_mbox : Invalid magic number %llx\n", magic_num); 81 + return -EINVAL; 82 + } 83 + 84 + version = readq(OCTEP_CTRL_MBOX_INFO_FW_VERSION_OFFSET(mbox->barmem)); 85 + if (version != OCTEP_DRV_VERSION) { 86 + pr_info("octep_ctrl_mbox : Firmware version mismatch %llx != %x\n", 87 + version, OCTEP_DRV_VERSION); 88 + return -EINVAL; 89 + } 90 + 91 + status = readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(mbox->barmem)); 92 + if (status != OCTEP_CTRL_MBOX_STATUS_READY) { 93 + pr_info("octep_ctrl_mbox : Firmware is not ready.\n"); 94 + return -EINVAL; 95 + } 96 + 97 + mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(mbox->barmem)); 98 + 99 + writeq(mbox->version, OCTEP_CTRL_MBOX_INFO_HOST_VERSION_OFFSET(mbox->barmem)); 100 + writeq(OCTEP_CTRL_MBOX_STATUS_INIT, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem)); 101 + 102 + mbox->h2fq.elem_cnt = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(mbox->barmem)); 103 + mbox->h2fq.elem_sz = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(mbox->barmem)); 104 + mbox->h2fq.mask = (mbox->h2fq.elem_cnt - 1); 105 + mutex_init(&mbox->h2fq_lock); 106 + 107 + mbox->f2hq.elem_cnt = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(mbox->barmem)); 108 + mbox->f2hq.elem_sz = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(mbox->barmem)); 109 + mbox->f2hq.mask = (mbox->f2hq.elem_cnt - 1); 110 + mutex_init(&mbox->f2hq_lock); 111 + 112 + mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(mbox->barmem); 113 + mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(mbox->barmem); 114 + mbox->h2fq.hw_q = mbox->barmem + 115 + OCTEP_CTRL_MBOX_INFO_SZ + 116 + OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + 117 + OCTEP_CTRL_MBOX_F2HQ_INFO_SZ; 118 + 119 + mbox->f2hq.hw_prod = OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(mbox->barmem); 120 + mbox->f2hq.hw_cons = OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(mbox->barmem); 121 + mbox->f2hq.hw_q = mbox->h2fq.hw_q + 122 + ((mbox->h2fq.elem_sz + sizeof(union octep_ctrl_mbox_msg_hdr)) * 123 + mbox->h2fq.elem_cnt); 124 + 125 + /* ensure ready state is seen after everything is initialized */ 126 + wmb(); 127 + writeq(OCTEP_CTRL_MBOX_STATUS_READY, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem)); 128 + 129 + pr_info("Octep ctrl mbox : Init successful.\n"); 130 + 131 + return 0; 132 + } 133 + 134 + int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg) 135 + { 136 + unsigned long timeout = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS); 137 + unsigned long period = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_WAIT_MS); 138 + struct octep_ctrl_mbox_q *q; 139 + unsigned long expire; 140 + u64 *mbuf, *word0; 141 + u8 __iomem *qidx; 142 + u16 pi, ci; 143 + int i; 144 + 145 + if (!mbox || !msg) 146 + return -EINVAL; 147 + 148 + q = &mbox->h2fq; 149 + pi = readl(q->hw_prod); 150 + ci = readl(q->hw_cons); 151 + 152 + if (!octep_ctrl_mbox_circq_space(pi, ci, q->mask)) 153 + return -ENOMEM; 154 + 155 + qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, pi); 156 + mbuf = (u64 *)msg->msg; 157 + word0 = &msg->hdr.word0; 158 + 159 + mutex_lock(&mbox->h2fq_lock); 160 + for (i = 1; i <= msg->hdr.sizew; i++) 161 + writeq(*mbuf++, (qidx + (i * 8))); 162 + 163 + writeq(*word0, qidx); 164 + 165 + pi = octep_ctrl_mbox_circq_inc(pi, q->mask); 166 + writel(pi, q->hw_prod); 167 + mutex_unlock(&mbox->h2fq_lock); 168 + 169 + /* don't check for notification response */ 170 + if (msg->hdr.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY) 171 + return 0; 172 + 173 + expire = jiffies + timeout; 174 + while (true) { 175 + *word0 = readq(qidx); 176 + if (msg->hdr.flags == OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP) 177 + break; 178 + schedule_timeout_interruptible(period); 179 + if (signal_pending(current) || time_after(jiffies, expire)) { 180 + pr_info("octep_ctrl_mbox: Timed out\n"); 181 + return -EBUSY; 182 + } 183 + } 184 + mbuf = (u64 *)msg->msg; 185 + for (i = 1; i <= msg->hdr.sizew; i++) 186 + *mbuf++ = readq(qidx + (i * 8)); 187 + 188 + return 0; 189 + } 190 + 191 + int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg) 192 + { 193 + struct octep_ctrl_mbox_q *q; 194 + u32 count, pi, ci; 195 + u8 __iomem *qidx; 196 + u64 *mbuf; 197 + int i; 198 + 199 + if (!mbox || !msg) 200 + return -EINVAL; 201 + 202 + q = &mbox->f2hq; 203 + pi = readl(q->hw_prod); 204 + ci = readl(q->hw_cons); 205 + count = octep_ctrl_mbox_circq_depth(pi, ci, q->mask); 206 + if (!count) 207 + return -EAGAIN; 208 + 209 + qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, ci); 210 + mbuf = (u64 *)msg->msg; 211 + 212 + mutex_lock(&mbox->f2hq_lock); 213 + 214 + msg->hdr.word0 = readq(qidx); 215 + for (i = 1; i <= msg->hdr.sizew; i++) 216 + *mbuf++ = readq(qidx + (i * 8)); 217 + 218 + ci = octep_ctrl_mbox_circq_inc(ci, q->mask); 219 + writel(ci, q->hw_cons); 220 + 221 + mutex_unlock(&mbox->f2hq_lock); 222 + 223 + if (msg->hdr.flags != OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ || !mbox->process_req) 224 + return 0; 225 + 226 + mbox->process_req(mbox->user_ctx, msg); 227 + mbuf = (u64 *)msg->msg; 228 + for (i = 1; i <= msg->hdr.sizew; i++) 229 + writeq(*mbuf++, (qidx + (i * 8))); 230 + 231 + writeq(msg->hdr.word0, qidx); 232 + 233 + return 0; 234 + } 235 + 236 + int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox) 237 + { 238 + if (!mbox) 239 + return -EINVAL; 240 + 241 + writeq(OCTEP_CTRL_MBOX_STATUS_UNINIT, 242 + OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem)); 243 + /* ensure uninit state is written before uninitialization */ 244 + wmb(); 245 + 246 + mutex_destroy(&mbox->h2fq_lock); 247 + mutex_destroy(&mbox->f2hq_lock); 248 + 249 + writeq(OCTEP_CTRL_MBOX_STATUS_INVALID, 250 + OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem)); 251 + writeq(0, OCTEP_CTRL_MBOX_INFO_HOST_VERSION_OFFSET(mbox->barmem)); 252 + 253 + pr_info("Octep ctrl mbox : Uninit successful.\n"); 254 + 255 + return 0; 256 + }
+170
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + #ifndef __OCTEP_CTRL_MBOX_H__ 8 + #define __OCTEP_CTRL_MBOX_H__ 9 + 10 + /* barmem structure 11 + * |===========================================| 12 + * |Info (16 + 120 + 120 = 256 bytes) | 13 + * |-------------------------------------------| 14 + * |magic number (8 bytes) | 15 + * |bar memory size (4 bytes) | 16 + * |reserved (4 bytes) | 17 + * |-------------------------------------------| 18 + * |host version (8 bytes) | 19 + * |host status (8 bytes) | 20 + * |host reserved (104 bytes) | 21 + * |-------------------------------------------| 22 + * |fw version (8 bytes) | 23 + * |fw status (8 bytes) | 24 + * |fw reserved (104 bytes) | 25 + * |===========================================| 26 + * |Host to Fw Queue info (16 bytes) | 27 + * |-------------------------------------------| 28 + * |producer index (4 bytes) | 29 + * |consumer index (4 bytes) | 30 + * |element size (4 bytes) | 31 + * |element count (4 bytes) | 32 + * |===========================================| 33 + * |Fw to Host Queue info (16 bytes) | 34 + * |-------------------------------------------| 35 + * |producer index (4 bytes) | 36 + * |consumer index (4 bytes) | 37 + * |element size (4 bytes) | 38 + * |element count (4 bytes) | 39 + * |===========================================| 40 + * |Host to Fw Queue | 41 + * |-------------------------------------------| 42 + * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes| 43 + * |===========================================| 44 + * |===========================================| 45 + * |Fw to Host Queue | 46 + * |-------------------------------------------| 47 + * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes| 48 + * |===========================================| 49 + */ 50 + 51 + #define OCTEP_CTRL_MBOX_MAGIC_NUMBER 0xdeaddeadbeefbeefull 52 + 53 + /* Size of mbox info in bytes */ 54 + #define OCTEP_CTRL_MBOX_INFO_SZ 256 55 + /* Size of mbox host to target queue info in bytes */ 56 + #define OCTEP_CTRL_MBOX_H2FQ_INFO_SZ 16 57 + /* Size of mbox target to host queue info in bytes */ 58 + #define OCTEP_CTRL_MBOX_F2HQ_INFO_SZ 16 59 + /* Size of mbox queue in bytes */ 60 + #define OCTEP_CTRL_MBOX_Q_SZ(sz, cnt) (((sz) + 8) * (cnt)) 61 + /* Size of mbox in bytes */ 62 + #define OCTEP_CTRL_MBOX_SZ(hsz, hcnt, fsz, fcnt) (OCTEP_CTRL_MBOX_INFO_SZ + \ 63 + OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + \ 64 + OCTEP_CTRL_MBOX_F2HQ_INFO_SZ + \ 65 + OCTEP_CTRL_MBOX_Q_SZ(hsz, hcnt) + \ 66 + OCTEP_CTRL_MBOX_Q_SZ(fsz, fcnt)) 67 + 68 + /* Valid request message */ 69 + #define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ BIT(0) 70 + /* Valid response message */ 71 + #define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP BIT(1) 72 + /* Valid notification, no response required */ 73 + #define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY BIT(2) 74 + 75 + enum octep_ctrl_mbox_status { 76 + OCTEP_CTRL_MBOX_STATUS_INVALID = 0, 77 + OCTEP_CTRL_MBOX_STATUS_INIT, 78 + OCTEP_CTRL_MBOX_STATUS_READY, 79 + OCTEP_CTRL_MBOX_STATUS_UNINIT 80 + }; 81 + 82 + /* mbox message */ 83 + union octep_ctrl_mbox_msg_hdr { 84 + u64 word0; 85 + struct { 86 + /* OCTEP_CTRL_MBOX_MSG_HDR_FLAG_* */ 87 + u32 flags; 88 + /* size of message in words excluding header */ 89 + u32 sizew; 90 + }; 91 + }; 92 + 93 + /* mbox message */ 94 + struct octep_ctrl_mbox_msg { 95 + /* mbox transaction header */ 96 + union octep_ctrl_mbox_msg_hdr hdr; 97 + /* pointer to message buffer */ 98 + void *msg; 99 + }; 100 + 101 + /* Mbox queue */ 102 + struct octep_ctrl_mbox_q { 103 + /* q element size, should be aligned to unsigned long */ 104 + u16 elem_sz; 105 + /* q element count, should be power of 2 */ 106 + u16 elem_cnt; 107 + /* q mask */ 108 + u16 mask; 109 + /* producer address in bar mem */ 110 + u8 __iomem *hw_prod; 111 + /* consumer address in bar mem */ 112 + u8 __iomem *hw_cons; 113 + /* q base address in bar mem */ 114 + u8 __iomem *hw_q; 115 + }; 116 + 117 + struct octep_ctrl_mbox { 118 + /* host driver version */ 119 + u64 version; 120 + /* size of bar memory */ 121 + u32 barmem_sz; 122 + /* pointer to BAR memory */ 123 + u8 __iomem *barmem; 124 + /* user context for callback, can be null */ 125 + void *user_ctx; 126 + /* callback handler for processing request, called from octep_ctrl_mbox_recv */ 127 + int (*process_req)(void *user_ctx, struct octep_ctrl_mbox_msg *msg); 128 + /* host-to-fw queue */ 129 + struct octep_ctrl_mbox_q h2fq; 130 + /* fw-to-host queue */ 131 + struct octep_ctrl_mbox_q f2hq; 132 + /* lock for h2fq */ 133 + struct mutex h2fq_lock; 134 + /* lock for f2hq */ 135 + struct mutex f2hq_lock; 136 + }; 137 + 138 + /* Initialize control mbox. 139 + * 140 + * @param mbox: non-null pointer to struct octep_ctrl_mbox. 141 + * 142 + * return value: 0 on success, -errno on failure. 143 + */ 144 + int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox); 145 + 146 + /* Send mbox message. 147 + * 148 + * @param mbox: non-null pointer to struct octep_ctrl_mbox. 149 + * 150 + * return value: 0 on success, -errno on failure. 151 + */ 152 + int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg); 153 + 154 + /* Retrieve mbox message. 155 + * 156 + * @param mbox: non-null pointer to struct octep_ctrl_mbox. 157 + * 158 + * return value: 0 on success, -errno on failure. 159 + */ 160 + int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg); 161 + 162 + /* Uninitialize control mbox. 163 + * 164 + * @param ep: non-null pointer to struct octep_ctrl_mbox. 165 + * 166 + * return value: 0 on success, -errno on failure. 167 + */ 168 + int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox); 169 + 170 + #endif /* __OCTEP_CTRL_MBOX_H__ */
+194
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + #include <linux/string.h> 8 + #include <linux/types.h> 9 + #include <linux/etherdevice.h> 10 + #include <linux/pci.h> 11 + 12 + #include "octep_config.h" 13 + #include "octep_main.h" 14 + #include "octep_ctrl_net.h" 15 + 16 + int octep_get_link_status(struct octep_device *oct) 17 + { 18 + struct octep_ctrl_net_h2f_req req = {}; 19 + struct octep_ctrl_net_h2f_resp *resp; 20 + struct octep_ctrl_mbox_msg msg = {}; 21 + int err; 22 + 23 + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS; 24 + req.link.cmd = OCTEP_CTRL_NET_CMD_GET; 25 + 26 + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; 27 + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW; 28 + msg.msg = &req; 29 + err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); 30 + if (err) 31 + return err; 32 + 33 + resp = (struct octep_ctrl_net_h2f_resp *)&req; 34 + return resp->link.state; 35 + } 36 + 37 + void octep_set_link_status(struct octep_device *oct, bool up) 38 + { 39 + struct octep_ctrl_net_h2f_req req = {}; 40 + struct octep_ctrl_mbox_msg msg = {}; 41 + 42 + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS; 43 + req.link.cmd = OCTEP_CTRL_NET_CMD_SET; 44 + req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN; 45 + 46 + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; 47 + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW; 48 + msg.msg = &req; 49 + octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); 50 + } 51 + 52 + void octep_set_rx_state(struct octep_device *oct, bool up) 53 + { 54 + struct octep_ctrl_net_h2f_req req = {}; 55 + struct octep_ctrl_mbox_msg msg = {}; 56 + 57 + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_RX_STATE; 58 + req.link.cmd = OCTEP_CTRL_NET_CMD_SET; 59 + req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN; 60 + 61 + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; 62 + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW; 63 + msg.msg = &req; 64 + octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); 65 + } 66 + 67 + int octep_get_mac_addr(struct octep_device *oct, u8 *addr) 68 + { 69 + struct octep_ctrl_net_h2f_req req = {}; 70 + struct octep_ctrl_net_h2f_resp *resp; 71 + struct octep_ctrl_mbox_msg msg = {}; 72 + int err; 73 + 74 + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC; 75 + req.link.cmd = OCTEP_CTRL_NET_CMD_GET; 76 + 77 + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; 78 + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW; 79 + msg.msg = &req; 80 + err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); 81 + if (err) 82 + return err; 83 + 84 + resp = (struct octep_ctrl_net_h2f_resp *)&req; 85 + memcpy(addr, resp->mac.addr, ETH_ALEN); 86 + 87 + return err; 88 + } 89 + 90 + int octep_set_mac_addr(struct octep_device *oct, u8 *addr) 91 + { 92 + struct octep_ctrl_net_h2f_req req = {}; 93 + struct octep_ctrl_mbox_msg msg = {}; 94 + 95 + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC; 96 + req.mac.cmd = OCTEP_CTRL_NET_CMD_SET; 97 + memcpy(&req.mac.addr, addr, ETH_ALEN); 98 + 99 + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; 100 + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW; 101 + msg.msg = &req; 102 + 103 + return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); 104 + } 105 + 106 + int octep_set_mtu(struct octep_device *oct, int mtu) 107 + { 108 + struct octep_ctrl_net_h2f_req req = {}; 109 + struct octep_ctrl_mbox_msg msg = {}; 110 + 111 + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU; 112 + req.mtu.cmd = OCTEP_CTRL_NET_CMD_SET; 113 + req.mtu.val = mtu; 114 + 115 + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; 116 + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MTU_REQ_SZW; 117 + msg.msg = &req; 118 + 119 + return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); 120 + } 121 + 122 + int octep_get_if_stats(struct octep_device *oct) 123 + { 124 + void __iomem *iface_rx_stats; 125 + void __iomem *iface_tx_stats; 126 + struct octep_ctrl_net_h2f_req req = {}; 127 + struct octep_ctrl_mbox_msg msg = {}; 128 + int err; 129 + 130 + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS; 131 + req.mac.cmd = OCTEP_CTRL_NET_CMD_GET; 132 + req.get_stats.offset = oct->ctrl_mbox_ifstats_offset; 133 + 134 + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; 135 + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW; 136 + msg.msg = &req; 137 + err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); 138 + if (err) 139 + return err; 140 + 141 + iface_rx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset; 142 + iface_tx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset + 143 + sizeof(struct octep_iface_rx_stats); 144 + memcpy_fromio(&oct->iface_rx_stats, iface_rx_stats, sizeof(struct octep_iface_rx_stats)); 145 + memcpy_fromio(&oct->iface_tx_stats, iface_tx_stats, sizeof(struct octep_iface_tx_stats)); 146 + 147 + return err; 148 + } 149 + 150 + int octep_get_link_info(struct octep_device *oct) 151 + { 152 + struct octep_ctrl_net_h2f_req req = {}; 153 + struct octep_ctrl_net_h2f_resp *resp; 154 + struct octep_ctrl_mbox_msg msg = {}; 155 + int err; 156 + 157 + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO; 158 + req.mac.cmd = OCTEP_CTRL_NET_CMD_GET; 159 + 160 + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; 161 + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW; 162 + msg.msg = &req; 163 + err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); 164 + if (err) 165 + return err; 166 + 167 + resp = (struct octep_ctrl_net_h2f_resp *)&req; 168 + oct->link_info.supported_modes = resp->link_info.supported_modes; 169 + oct->link_info.advertised_modes = resp->link_info.advertised_modes; 170 + oct->link_info.autoneg = resp->link_info.autoneg; 171 + oct->link_info.pause = resp->link_info.pause; 172 + oct->link_info.speed = resp->link_info.speed; 173 + 174 + return err; 175 + } 176 + 177 + int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info) 178 + { 179 + struct octep_ctrl_net_h2f_req req = {}; 180 + struct octep_ctrl_mbox_msg msg = {}; 181 + 182 + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO; 183 + req.link_info.cmd = OCTEP_CTRL_NET_CMD_SET; 184 + req.link_info.info.advertised_modes = link_info->advertised_modes; 185 + req.link_info.info.autoneg = link_info->autoneg; 186 + req.link_info.info.pause = link_info->pause; 187 + req.link_info.info.speed = link_info->speed; 188 + 189 + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; 190 + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW; 191 + msg.msg = &req; 192 + 193 + return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); 194 + }
+299
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + #ifndef __OCTEP_CTRL_NET_H__ 8 + #define __OCTEP_CTRL_NET_H__ 9 + 10 + /* Supported commands */ 11 + enum octep_ctrl_net_cmd { 12 + OCTEP_CTRL_NET_CMD_GET = 0, 13 + OCTEP_CTRL_NET_CMD_SET, 14 + }; 15 + 16 + /* Supported states */ 17 + enum octep_ctrl_net_state { 18 + OCTEP_CTRL_NET_STATE_DOWN = 0, 19 + OCTEP_CTRL_NET_STATE_UP, 20 + }; 21 + 22 + /* Supported replies */ 23 + enum octep_ctrl_net_reply { 24 + OCTEP_CTRL_NET_REPLY_OK = 0, 25 + OCTEP_CTRL_NET_REPLY_GENERIC_FAIL, 26 + OCTEP_CTRL_NET_REPLY_INVALID_PARAM, 27 + }; 28 + 29 + /* Supported host to fw commands */ 30 + enum octep_ctrl_net_h2f_cmd { 31 + OCTEP_CTRL_NET_H2F_CMD_INVALID = 0, 32 + OCTEP_CTRL_NET_H2F_CMD_MTU, 33 + OCTEP_CTRL_NET_H2F_CMD_MAC, 34 + OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS, 35 + OCTEP_CTRL_NET_H2F_CMD_GET_XSTATS, 36 + OCTEP_CTRL_NET_H2F_CMD_GET_Q_STATS, 37 + OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS, 38 + OCTEP_CTRL_NET_H2F_CMD_RX_STATE, 39 + OCTEP_CTRL_NET_H2F_CMD_LINK_INFO, 40 + }; 41 + 42 + /* Supported fw to host commands */ 43 + enum octep_ctrl_net_f2h_cmd { 44 + OCTEP_CTRL_NET_F2H_CMD_INVALID = 0, 45 + OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS, 46 + }; 47 + 48 + struct octep_ctrl_net_req_hdr { 49 + /* sender id */ 50 + u16 sender; 51 + /* receiver id */ 52 + u16 receiver; 53 + /* octep_ctrl_net_h2t_cmd */ 54 + u16 cmd; 55 + /* reserved */ 56 + u16 rsvd0; 57 + }; 58 + 59 + /* get/set mtu request */ 60 + struct octep_ctrl_net_h2f_req_cmd_mtu { 61 + /* enum octep_ctrl_net_cmd */ 62 + u16 cmd; 63 + /* 0-65535 */ 64 + u16 val; 65 + }; 66 + 67 + /* get/set mac request */ 68 + struct octep_ctrl_net_h2f_req_cmd_mac { 69 + /* enum octep_ctrl_net_cmd */ 70 + u16 cmd; 71 + /* xx:xx:xx:xx:xx:xx */ 72 + u8 addr[ETH_ALEN]; 73 + }; 74 + 75 + /* get if_stats, xstats, q_stats request */ 76 + struct octep_ctrl_net_h2f_req_cmd_get_stats { 77 + /* offset into barmem where fw should copy over stats */ 78 + u32 offset; 79 + }; 80 + 81 + /* get/set link state, rx state */ 82 + struct octep_ctrl_net_h2f_req_cmd_state { 83 + /* enum octep_ctrl_net_cmd */ 84 + u16 cmd; 85 + /* enum octep_ctrl_net_state */ 86 + u16 state; 87 + }; 88 + 89 + /* link info */ 90 + struct octep_ctrl_net_link_info { 91 + /* Bitmap of Supported link speeds/modes */ 92 + u64 supported_modes; 93 + /* Bitmap of Advertised link speeds/modes */ 94 + u64 advertised_modes; 95 + /* Autonegotation state; bit 0=disabled; bit 1=enabled */ 96 + u8 autoneg; 97 + /* Pause frames setting. bit 0=disabled; bit 1=enabled */ 98 + u8 pause; 99 + /* Negotiated link speed in Mbps */ 100 + u32 speed; 101 + }; 102 + 103 + /* get/set link info */ 104 + struct octep_ctrl_net_h2f_req_cmd_link_info { 105 + /* enum octep_ctrl_net_cmd */ 106 + u16 cmd; 107 + /* struct octep_ctrl_net_link_info */ 108 + struct octep_ctrl_net_link_info info; 109 + }; 110 + 111 + /* Host to fw request data */ 112 + struct octep_ctrl_net_h2f_req { 113 + struct octep_ctrl_net_req_hdr hdr; 114 + union { 115 + struct octep_ctrl_net_h2f_req_cmd_mtu mtu; 116 + struct octep_ctrl_net_h2f_req_cmd_mac mac; 117 + struct octep_ctrl_net_h2f_req_cmd_get_stats get_stats; 118 + struct octep_ctrl_net_h2f_req_cmd_state link; 119 + struct octep_ctrl_net_h2f_req_cmd_state rx; 120 + struct octep_ctrl_net_h2f_req_cmd_link_info link_info; 121 + }; 122 + } __packed; 123 + 124 + struct octep_ctrl_net_resp_hdr { 125 + /* sender id */ 126 + u16 sender; 127 + /* receiver id */ 128 + u16 receiver; 129 + /* octep_ctrl_net_h2t_cmd */ 130 + u16 cmd; 131 + /* octep_ctrl_net_reply */ 132 + u16 reply; 133 + }; 134 + 135 + /* get mtu response */ 136 + struct octep_ctrl_net_h2f_resp_cmd_mtu { 137 + /* 0-65535 */ 138 + u16 val; 139 + }; 140 + 141 + /* get mac response */ 142 + struct octep_ctrl_net_h2f_resp_cmd_mac { 143 + /* xx:xx:xx:xx:xx:xx */ 144 + u8 addr[ETH_ALEN]; 145 + }; 146 + 147 + /* get link state, rx state response */ 148 + struct octep_ctrl_net_h2f_resp_cmd_state { 149 + /* enum octep_ctrl_net_state */ 150 + u16 state; 151 + }; 152 + 153 + /* Host to fw response data */ 154 + struct octep_ctrl_net_h2f_resp { 155 + struct octep_ctrl_net_resp_hdr hdr; 156 + union { 157 + struct octep_ctrl_net_h2f_resp_cmd_mtu mtu; 158 + struct octep_ctrl_net_h2f_resp_cmd_mac mac; 159 + struct octep_ctrl_net_h2f_resp_cmd_state link; 160 + struct octep_ctrl_net_h2f_resp_cmd_state rx; 161 + struct octep_ctrl_net_link_info link_info; 162 + }; 163 + } __packed; 164 + 165 + /* link state notofication */ 166 + struct octep_ctrl_net_f2h_req_cmd_state { 167 + /* enum octep_ctrl_net_state */ 168 + u16 state; 169 + }; 170 + 171 + /* Fw to host request data */ 172 + struct octep_ctrl_net_f2h_req { 173 + struct octep_ctrl_net_req_hdr hdr; 174 + union { 175 + struct octep_ctrl_net_f2h_req_cmd_state link; 176 + }; 177 + }; 178 + 179 + /* Fw to host response data */ 180 + struct octep_ctrl_net_f2h_resp { 181 + struct octep_ctrl_net_resp_hdr hdr; 182 + }; 183 + 184 + /* Size of host to fw octep_ctrl_mbox queue element */ 185 + union octep_ctrl_net_h2f_data_sz { 186 + struct octep_ctrl_net_h2f_req h2f_req; 187 + struct octep_ctrl_net_h2f_resp h2f_resp; 188 + }; 189 + 190 + /* Size of fw to host octep_ctrl_mbox queue element */ 191 + union octep_ctrl_net_f2h_data_sz { 192 + struct octep_ctrl_net_f2h_req f2h_req; 193 + struct octep_ctrl_net_f2h_resp f2h_resp; 194 + }; 195 + 196 + /* size of host to fw data in words */ 197 + #define OCTEP_CTRL_NET_H2F_DATA_SZW ((sizeof(union octep_ctrl_net_h2f_data_sz)) / \ 198 + (sizeof(unsigned long))) 199 + 200 + /* size of fw to host data in words */ 201 + #define OCTEP_CTRL_NET_F2H_DATA_SZW ((sizeof(union octep_ctrl_net_f2h_data_sz)) / \ 202 + (sizeof(unsigned long))) 203 + 204 + /* size in words of get/set mtu request */ 205 + #define OCTEP_CTRL_NET_H2F_MTU_REQ_SZW 2 206 + /* size in words of get/set mac request */ 207 + #define OCTEP_CTRL_NET_H2F_MAC_REQ_SZW 2 208 + /* size in words of get stats request */ 209 + #define OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW 2 210 + /* size in words of get/set state request */ 211 + #define OCTEP_CTRL_NET_H2F_STATE_REQ_SZW 2 212 + /* size in words of get/set link info request */ 213 + #define OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW 4 214 + 215 + /* size in words of get mtu response */ 216 + #define OCTEP_CTRL_NET_H2F_GET_MTU_RESP_SZW 2 217 + /* size in words of set mtu response */ 218 + #define OCTEP_CTRL_NET_H2F_SET_MTU_RESP_SZW 1 219 + /* size in words of get mac response */ 220 + #define OCTEP_CTRL_NET_H2F_GET_MAC_RESP_SZW 2 221 + /* size in words of set mac response */ 222 + #define OCTEP_CTRL_NET_H2F_SET_MAC_RESP_SZW 1 223 + /* size in words of get state request */ 224 + #define OCTEP_CTRL_NET_H2F_GET_STATE_RESP_SZW 2 225 + /* size in words of set state request */ 226 + #define OCTEP_CTRL_NET_H2F_SET_STATE_RESP_SZW 1 227 + /* size in words of get link info request */ 228 + #define OCTEP_CTRL_NET_H2F_GET_LINK_INFO_RESP_SZW 4 229 + /* size in words of set link info request */ 230 + #define OCTEP_CTRL_NET_H2F_SET_LINK_INFO_RESP_SZW 1 231 + 232 + /** Get link status from firmware. 233 + * 234 + * @param oct: non-null pointer to struct octep_device. 235 + * 236 + * return value: link status 0=down, 1=up. 237 + */ 238 + int octep_get_link_status(struct octep_device *oct); 239 + 240 + /** Set link status in firmware. 241 + * 242 + * @param oct: non-null pointer to struct octep_device. 243 + * @param up: boolean status. 244 + */ 245 + void octep_set_link_status(struct octep_device *oct, bool up); 246 + 247 + /** Set rx state in firmware. 248 + * 249 + * @param oct: non-null pointer to struct octep_device. 250 + * @param up: boolean status. 251 + */ 252 + void octep_set_rx_state(struct octep_device *oct, bool up); 253 + 254 + /** Get mac address from firmware. 255 + * 256 + * @param oct: non-null pointer to struct octep_device. 257 + * @param addr: non-null pointer to mac address. 258 + * 259 + * return value: 0 on success, -errno on failure. 260 + */ 261 + int octep_get_mac_addr(struct octep_device *oct, u8 *addr); 262 + 263 + /** Set mac address in firmware. 264 + * 265 + * @param oct: non-null pointer to struct octep_device. 266 + * @param addr: non-null pointer to mac address. 267 + */ 268 + int octep_set_mac_addr(struct octep_device *oct, u8 *addr); 269 + 270 + /** Set mtu in firmware. 271 + * 272 + * @param oct: non-null pointer to struct octep_device. 273 + * @param mtu: mtu. 274 + */ 275 + int octep_set_mtu(struct octep_device *oct, int mtu); 276 + 277 + /** Get interface statistics from firmware. 278 + * 279 + * @param oct: non-null pointer to struct octep_device. 280 + * 281 + * return value: 0 on success, -errno on failure. 282 + */ 283 + int octep_get_if_stats(struct octep_device *oct); 284 + 285 + /** Get link info from firmware. 286 + * 287 + * @param oct: non-null pointer to struct octep_device. 288 + * 289 + * return value: 0 on success, -errno on failure. 290 + */ 291 + int octep_get_link_info(struct octep_device *oct); 292 + 293 + /** Set link info in firmware. 294 + * 295 + * @param oct: non-null pointer to struct octep_device. 296 + */ 297 + int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info); 298 + 299 + #endif /* __OCTEP_CTRL_NET_H__ */
+463
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/pci.h> 9 + #include <linux/netdevice.h> 10 + #include <linux/ethtool.h> 11 + 12 + #include "octep_config.h" 13 + #include "octep_main.h" 14 + #include "octep_ctrl_net.h" 15 + 16 + static const char octep_gstrings_global_stats[][ETH_GSTRING_LEN] = { 17 + "rx_packets", 18 + "tx_packets", 19 + "rx_bytes", 20 + "tx_bytes", 21 + "rx_alloc_errors", 22 + "tx_busy_errors", 23 + "rx_dropped", 24 + "tx_dropped", 25 + "tx_hw_pkts", 26 + "tx_hw_octs", 27 + "tx_hw_bcast", 28 + "tx_hw_mcast", 29 + "tx_hw_underflow", 30 + "tx_hw_control", 31 + "tx_less_than_64", 32 + "tx_equal_64", 33 + "tx_equal_65_to_127", 34 + "tx_equal_128_to_255", 35 + "tx_equal_256_to_511", 36 + "tx_equal_512_to_1023", 37 + "tx_equal_1024_to_1518", 38 + "tx_greater_than_1518", 39 + "rx_hw_pkts", 40 + "rx_hw_bytes", 41 + "rx_hw_bcast", 42 + "rx_hw_mcast", 43 + "rx_pause_pkts", 44 + "rx_pause_bytes", 45 + "rx_dropped_pkts_fifo_full", 46 + "rx_dropped_bytes_fifo_full", 47 + "rx_err_pkts", 48 + }; 49 + 50 + #define OCTEP_GLOBAL_STATS_CNT (sizeof(octep_gstrings_global_stats) / ETH_GSTRING_LEN) 51 + 52 + static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = { 53 + "tx_packets_posted[Q-%u]", 54 + "tx_packets_completed[Q-%u]", 55 + "tx_bytes[Q-%u]", 56 + "tx_busy[Q-%u]", 57 + }; 58 + 59 + #define OCTEP_TX_Q_STATS_CNT (sizeof(octep_gstrings_tx_q_stats) / ETH_GSTRING_LEN) 60 + 61 + static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = { 62 + "rx_packets[Q-%u]", 63 + "rx_bytes[Q-%u]", 64 + "rx_alloc_errors[Q-%u]", 65 + }; 66 + 67 + #define OCTEP_RX_Q_STATS_CNT (sizeof(octep_gstrings_rx_q_stats) / ETH_GSTRING_LEN) 68 + 69 + static void octep_get_drvinfo(struct net_device *netdev, 70 + struct ethtool_drvinfo *info) 71 + { 72 + struct octep_device *oct = netdev_priv(netdev); 73 + 74 + strscpy(info->driver, OCTEP_DRV_NAME, sizeof(info->driver)); 75 + strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info)); 76 + } 77 + 78 + static void octep_get_strings(struct net_device *netdev, 79 + u32 stringset, u8 *data) 80 + { 81 + struct octep_device *oct = netdev_priv(netdev); 82 + u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 83 + char *strings = (char *)data; 84 + int i, j; 85 + 86 + switch (stringset) { 87 + case ETH_SS_STATS: 88 + for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++) { 89 + snprintf(strings, ETH_GSTRING_LEN, 90 + octep_gstrings_global_stats[i]); 91 + strings += ETH_GSTRING_LEN; 92 + } 93 + 94 + for (i = 0; i < num_queues; i++) { 95 + for (j = 0; j < OCTEP_TX_Q_STATS_CNT; j++) { 96 + snprintf(strings, ETH_GSTRING_LEN, 97 + octep_gstrings_tx_q_stats[j], i); 98 + strings += ETH_GSTRING_LEN; 99 + } 100 + } 101 + 102 + for (i = 0; i < num_queues; i++) { 103 + for (j = 0; j < OCTEP_RX_Q_STATS_CNT; j++) { 104 + snprintf(strings, ETH_GSTRING_LEN, 105 + octep_gstrings_rx_q_stats[j], i); 106 + strings += ETH_GSTRING_LEN; 107 + } 108 + } 109 + break; 110 + default: 111 + break; 112 + } 113 + } 114 + 115 + static int octep_get_sset_count(struct net_device *netdev, int sset) 116 + { 117 + struct octep_device *oct = netdev_priv(netdev); 118 + u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 119 + 120 + switch (sset) { 121 + case ETH_SS_STATS: 122 + return OCTEP_GLOBAL_STATS_CNT + (num_queues * 123 + (OCTEP_TX_Q_STATS_CNT + OCTEP_RX_Q_STATS_CNT)); 124 + break; 125 + default: 126 + return -EOPNOTSUPP; 127 + } 128 + } 129 + 130 + static void 131 + octep_get_ethtool_stats(struct net_device *netdev, 132 + struct ethtool_stats *stats, u64 *data) 133 + { 134 + struct octep_device *oct = netdev_priv(netdev); 135 + struct octep_iface_tx_stats *iface_tx_stats; 136 + struct octep_iface_rx_stats *iface_rx_stats; 137 + u64 rx_packets, rx_bytes; 138 + u64 tx_packets, tx_bytes; 139 + u64 rx_alloc_errors, tx_busy_errors; 140 + int q, i; 141 + 142 + rx_packets = 0; 143 + rx_bytes = 0; 144 + tx_packets = 0; 145 + tx_bytes = 0; 146 + rx_alloc_errors = 0; 147 + tx_busy_errors = 0; 148 + tx_packets = 0; 149 + tx_bytes = 0; 150 + rx_packets = 0; 151 + rx_bytes = 0; 152 + 153 + octep_get_if_stats(oct); 154 + iface_tx_stats = &oct->iface_tx_stats; 155 + iface_rx_stats = &oct->iface_rx_stats; 156 + 157 + for (q = 0; q < oct->num_oqs; q++) { 158 + struct octep_iq *iq = oct->iq[q]; 159 + struct octep_oq *oq = oct->oq[q]; 160 + 161 + tx_packets += iq->stats.instr_completed; 162 + tx_bytes += iq->stats.bytes_sent; 163 + tx_busy_errors += iq->stats.tx_busy; 164 + 165 + rx_packets += oq->stats.packets; 166 + rx_bytes += oq->stats.bytes; 167 + rx_alloc_errors += oq->stats.alloc_failures; 168 + } 169 + i = 0; 170 + data[i++] = rx_packets; 171 + data[i++] = tx_packets; 172 + data[i++] = rx_bytes; 173 + data[i++] = tx_bytes; 174 + data[i++] = rx_alloc_errors; 175 + data[i++] = tx_busy_errors; 176 + data[i++] = iface_rx_stats->dropped_pkts_fifo_full + 177 + iface_rx_stats->err_pkts; 178 + data[i++] = iface_tx_stats->xscol + 179 + iface_tx_stats->xsdef; 180 + data[i++] = iface_tx_stats->pkts; 181 + data[i++] = iface_tx_stats->octs; 182 + data[i++] = iface_tx_stats->bcst; 183 + data[i++] = iface_tx_stats->mcst; 184 + data[i++] = iface_tx_stats->undflw; 185 + data[i++] = iface_tx_stats->ctl; 186 + data[i++] = iface_tx_stats->hist_lt64; 187 + data[i++] = iface_tx_stats->hist_eq64; 188 + data[i++] = iface_tx_stats->hist_65to127; 189 + data[i++] = iface_tx_stats->hist_128to255; 190 + data[i++] = iface_tx_stats->hist_256to511; 191 + data[i++] = iface_tx_stats->hist_512to1023; 192 + data[i++] = iface_tx_stats->hist_1024to1518; 193 + data[i++] = iface_tx_stats->hist_gt1518; 194 + data[i++] = iface_rx_stats->pkts; 195 + data[i++] = iface_rx_stats->octets; 196 + data[i++] = iface_rx_stats->mcast_pkts; 197 + data[i++] = iface_rx_stats->bcast_pkts; 198 + data[i++] = iface_rx_stats->pause_pkts; 199 + data[i++] = iface_rx_stats->pause_octets; 200 + data[i++] = iface_rx_stats->dropped_pkts_fifo_full; 201 + data[i++] = iface_rx_stats->dropped_octets_fifo_full; 202 + data[i++] = iface_rx_stats->err_pkts; 203 + 204 + /* Per Tx Queue stats */ 205 + for (q = 0; q < oct->num_iqs; q++) { 206 + struct octep_iq *iq = oct->iq[q]; 207 + 208 + data[i++] = iq->stats.instr_posted; 209 + data[i++] = iq->stats.instr_completed; 210 + data[i++] = iq->stats.bytes_sent; 211 + data[i++] = iq->stats.tx_busy; 212 + } 213 + 214 + /* Per Rx Queue stats */ 215 + for (q = 0; q < oct->num_oqs; q++) { 216 + struct octep_oq *oq = oct->oq[q]; 217 + 218 + data[i++] = oq->stats.packets; 219 + data[i++] = oq->stats.bytes; 220 + data[i++] = oq->stats.alloc_failures; 221 + } 222 + } 223 + 224 + #define OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(octep_speeds, ksettings, name) \ 225 + { \ 226 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_T)) \ 227 + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \ 228 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_R)) \ 229 + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \ 230 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_CR)) \ 231 + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \ 232 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_KR)) \ 233 + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \ 234 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_LR)) \ 235 + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \ 236 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_SR)) \ 237 + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \ 238 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_CR)) \ 239 + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \ 240 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_KR)) \ 241 + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \ 242 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_SR)) \ 243 + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \ 244 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_CR4)) \ 245 + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \ 246 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_KR4)) \ 247 + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \ 248 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_LR4)) \ 249 + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \ 250 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_SR4)) \ 251 + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \ 252 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR2)) \ 253 + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \ 254 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR2)) \ 255 + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \ 256 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR2)) \ 257 + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \ 258 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR)) \ 259 + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \ 260 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR)) \ 261 + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \ 262 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_LR)) \ 263 + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \ 264 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR)) \ 265 + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \ 266 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_CR4)) \ 267 + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \ 268 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_KR4)) \ 269 + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \ 270 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_LR4)) \ 271 + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \ 272 + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_SR4)) \ 273 + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \ 274 + } 275 + 276 + static int octep_get_link_ksettings(struct net_device *netdev, 277 + struct ethtool_link_ksettings *cmd) 278 + { 279 + struct octep_device *oct = netdev_priv(netdev); 280 + struct octep_iface_link_info *link_info; 281 + u32 advertised_modes, supported_modes; 282 + 283 + ethtool_link_ksettings_zero_link_mode(cmd, supported); 284 + ethtool_link_ksettings_zero_link_mode(cmd, advertising); 285 + 286 + octep_get_link_info(oct); 287 + 288 + advertised_modes = oct->link_info.advertised_modes; 289 + supported_modes = oct->link_info.supported_modes; 290 + link_info = &oct->link_info; 291 + 292 + OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported); 293 + OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising); 294 + 295 + if (link_info->autoneg) { 296 + if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED) 297 + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 298 + if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_ADVERTISED) { 299 + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 300 + cmd->base.autoneg = AUTONEG_ENABLE; 301 + } else { 302 + cmd->base.autoneg = AUTONEG_DISABLE; 303 + } 304 + } else { 305 + cmd->base.autoneg = AUTONEG_DISABLE; 306 + } 307 + 308 + if (link_info->pause) { 309 + if (link_info->pause & OCTEP_LINK_MODE_PAUSE_SUPPORTED) 310 + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 311 + if (link_info->pause & OCTEP_LINK_MODE_PAUSE_ADVERTISED) 312 + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); 313 + } 314 + 315 + cmd->base.port = PORT_FIBRE; 316 + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 317 + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); 318 + 319 + if (netif_carrier_ok(netdev)) { 320 + cmd->base.speed = link_info->speed; 321 + cmd->base.duplex = DUPLEX_FULL; 322 + } else { 323 + cmd->base.speed = SPEED_UNKNOWN; 324 + cmd->base.duplex = DUPLEX_UNKNOWN; 325 + } 326 + return 0; 327 + } 328 + 329 + static int octep_set_link_ksettings(struct net_device *netdev, 330 + const struct ethtool_link_ksettings *cmd) 331 + { 332 + struct octep_device *oct = netdev_priv(netdev); 333 + struct octep_iface_link_info link_info_new; 334 + struct octep_iface_link_info *link_info; 335 + u64 advertised = 0; 336 + u8 autoneg = 0; 337 + int err; 338 + 339 + link_info = &oct->link_info; 340 + memcpy(&link_info_new, link_info, sizeof(struct octep_iface_link_info)); 341 + 342 + /* Only Full duplex is supported; 343 + * Assume full duplex when duplex is unknown. 344 + */ 345 + if (cmd->base.duplex != DUPLEX_FULL && 346 + cmd->base.duplex != DUPLEX_UNKNOWN) 347 + return -EOPNOTSUPP; 348 + 349 + if (cmd->base.autoneg == AUTONEG_ENABLE) { 350 + if (!(link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED)) 351 + return -EOPNOTSUPP; 352 + autoneg = 1; 353 + } 354 + 355 + if (!bitmap_subset(cmd->link_modes.advertising, 356 + cmd->link_modes.supported, 357 + __ETHTOOL_LINK_MODE_MASK_NBITS)) 358 + return -EINVAL; 359 + 360 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 361 + 10000baseT_Full)) 362 + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_T); 363 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 364 + 10000baseR_FEC)) 365 + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_R); 366 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 367 + 10000baseCR_Full)) 368 + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_CR); 369 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 370 + 10000baseKR_Full)) 371 + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_KR); 372 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 373 + 10000baseLR_Full)) 374 + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_LR); 375 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 376 + 10000baseSR_Full)) 377 + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_SR); 378 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 379 + 25000baseCR_Full)) 380 + advertised |= BIT(OCTEP_LINK_MODE_25GBASE_CR); 381 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 382 + 25000baseKR_Full)) 383 + advertised |= BIT(OCTEP_LINK_MODE_25GBASE_KR); 384 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 385 + 25000baseSR_Full)) 386 + advertised |= BIT(OCTEP_LINK_MODE_25GBASE_SR); 387 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 388 + 40000baseCR4_Full)) 389 + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_CR4); 390 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 391 + 40000baseKR4_Full)) 392 + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_KR4); 393 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 394 + 40000baseLR4_Full)) 395 + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_LR4); 396 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 397 + 40000baseSR4_Full)) 398 + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_SR4); 399 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 400 + 50000baseCR2_Full)) 401 + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR2); 402 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 403 + 50000baseKR2_Full)) 404 + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR2); 405 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 406 + 50000baseSR2_Full)) 407 + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR2); 408 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 409 + 50000baseCR_Full)) 410 + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR); 411 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 412 + 50000baseKR_Full)) 413 + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR); 414 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 415 + 50000baseLR_ER_FR_Full)) 416 + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_LR); 417 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 418 + 50000baseSR_Full)) 419 + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR); 420 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 421 + 100000baseCR4_Full)) 422 + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_CR4); 423 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 424 + 100000baseKR4_Full)) 425 + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_KR4); 426 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 427 + 100000baseLR4_ER4_Full)) 428 + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_LR4); 429 + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 430 + 100000baseSR4_Full)) 431 + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_SR4); 432 + 433 + if (advertised == link_info->advertised_modes && 434 + cmd->base.speed == link_info->speed && 435 + cmd->base.autoneg == link_info->autoneg) 436 + return 0; 437 + 438 + link_info_new.advertised_modes = advertised; 439 + link_info_new.speed = cmd->base.speed; 440 + link_info_new.autoneg = autoneg; 441 + 442 + err = octep_set_link_info(oct, &link_info_new); 443 + if (err) 444 + return err; 445 + 446 + memcpy(link_info, &link_info_new, sizeof(struct octep_iface_link_info)); 447 + return 0; 448 + } 449 + 450 + static const struct ethtool_ops octep_ethtool_ops = { 451 + .get_drvinfo = octep_get_drvinfo, 452 + .get_link = ethtool_op_get_link, 453 + .get_strings = octep_get_strings, 454 + .get_sset_count = octep_get_sset_count, 455 + .get_ethtool_stats = octep_get_ethtool_stats, 456 + .get_link_ksettings = octep_get_link_ksettings, 457 + .set_link_ksettings = octep_set_link_ksettings, 458 + }; 459 + 460 + void octep_set_ethtool_ops(struct net_device *netdev) 461 + { 462 + netdev->ethtool_ops = &octep_ethtool_ops; 463 + }
+1177
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/types.h> 9 + #include <linux/module.h> 10 + #include <linux/pci.h> 11 + #include <linux/aer.h> 12 + #include <linux/netdevice.h> 13 + #include <linux/etherdevice.h> 14 + #include <linux/rtnetlink.h> 15 + #include <linux/vmalloc.h> 16 + 17 + #include "octep_config.h" 18 + #include "octep_main.h" 19 + #include "octep_ctrl_net.h" 20 + 21 + struct workqueue_struct *octep_wq; 22 + 23 + /* Supported Devices */ 24 + static const struct pci_device_id octep_pci_id_tbl[] = { 25 + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)}, 26 + {0, }, 27 + }; 28 + MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl); 29 + 30 + MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>"); 31 + MODULE_DESCRIPTION(OCTEP_DRV_STRING); 32 + MODULE_LICENSE("GPL"); 33 + MODULE_VERSION(OCTEP_DRV_VERSION_STR); 34 + 35 + /** 36 + * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info. 37 + * 38 + * @oct: Octeon device private data structure. 39 + * 40 + * Allocate resources to hold per Tx/Rx queue interrupt info. 41 + * This is the information passed to interrupt handler, from which napi poll 42 + * is scheduled and includes quick access to private data of Tx/Rx queue 43 + * corresponding to the interrupt being handled. 44 + * 45 + * Return: 0, on successful allocation of resources for all queue interrupts. 46 + * -1, if failed to allocate any resource. 47 + */ 48 + static int octep_alloc_ioq_vectors(struct octep_device *oct) 49 + { 50 + int i; 51 + struct octep_ioq_vector *ioq_vector; 52 + 53 + for (i = 0; i < oct->num_oqs; i++) { 54 + oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i])); 55 + if (!oct->ioq_vector[i]) 56 + goto free_ioq_vector; 57 + 58 + ioq_vector = oct->ioq_vector[i]; 59 + ioq_vector->iq = oct->iq[i]; 60 + ioq_vector->oq = oct->oq[i]; 61 + ioq_vector->octep_dev = oct; 62 + } 63 + 64 + dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs); 65 + return 0; 66 + 67 + free_ioq_vector: 68 + while (i) { 69 + i--; 70 + vfree(oct->ioq_vector[i]); 71 + oct->ioq_vector[i] = NULL; 72 + } 73 + return -1; 74 + } 75 + 76 + /** 77 + * octep_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info. 78 + * 79 + * @oct: Octeon device private data structure. 80 + */ 81 + static void octep_free_ioq_vectors(struct octep_device *oct) 82 + { 83 + int i; 84 + 85 + for (i = 0; i < oct->num_oqs; i++) { 86 + if (oct->ioq_vector[i]) { 87 + vfree(oct->ioq_vector[i]); 88 + oct->ioq_vector[i] = NULL; 89 + } 90 + } 91 + netdev_info(oct->netdev, "Freed IOQ Vectors\n"); 92 + } 93 + 94 + /** 95 + * octep_enable_msix_range() - enable MSI-x interrupts. 96 + * 97 + * @oct: Octeon device private data structure. 98 + * 99 + * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts) 100 + * for the Octeon device. 101 + * 102 + * Return: 0, on successfully enabling all MSI-x interrupts. 103 + * -1, if failed to enable any MSI-x interrupt. 104 + */ 105 + static int octep_enable_msix_range(struct octep_device *oct) 106 + { 107 + int num_msix, msix_allocated; 108 + int i; 109 + 110 + /* Generic interrupts apart from input/output queues */ 111 + num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf); 112 + oct->msix_entries = kcalloc(num_msix, 113 + sizeof(struct msix_entry), GFP_KERNEL); 114 + if (!oct->msix_entries) 115 + goto msix_alloc_err; 116 + 117 + for (i = 0; i < num_msix; i++) 118 + oct->msix_entries[i].entry = i; 119 + 120 + msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries, 121 + num_msix, num_msix); 122 + if (msix_allocated != num_msix) { 123 + dev_err(&oct->pdev->dev, 124 + "Failed to enable %d msix irqs; got only %d\n", 125 + num_msix, msix_allocated); 126 + goto enable_msix_err; 127 + } 128 + oct->num_irqs = msix_allocated; 129 + dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n"); 130 + 131 + return 0; 132 + 133 + enable_msix_err: 134 + if (msix_allocated > 0) 135 + pci_disable_msix(oct->pdev); 136 + kfree(oct->msix_entries); 137 + oct->msix_entries = NULL; 138 + msix_alloc_err: 139 + return -1; 140 + } 141 + 142 + /** 143 + * octep_disable_msix() - disable MSI-x interrupts. 144 + * 145 + * @oct: Octeon device private data structure. 146 + * 147 + * Disable MSI-x on the Octeon device. 148 + */ 149 + static void octep_disable_msix(struct octep_device *oct) 150 + { 151 + pci_disable_msix(oct->pdev); 152 + kfree(oct->msix_entries); 153 + oct->msix_entries = NULL; 154 + dev_info(&oct->pdev->dev, "Disabled MSI-X\n"); 155 + } 156 + 157 + /** 158 + * octep_non_ioq_intr_handler() - common handler for all generic interrupts. 159 + * 160 + * @irq: Interrupt number. 161 + * @data: interrupt data. 162 + * 163 + * this is common handler for all non-queue (generic) interrupts. 164 + */ 165 + static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data) 166 + { 167 + struct octep_device *oct = data; 168 + 169 + return oct->hw_ops.non_ioq_intr_handler(oct); 170 + } 171 + 172 + /** 173 + * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts. 174 + * 175 + * @irq: Interrupt number. 176 + * @data: interrupt data contains pointers to Tx/Rx queue private data 177 + * and correspong NAPI context. 178 + * 179 + * this is common handler for all non-queue (generic) interrupts. 180 + */ 181 + static irqreturn_t octep_ioq_intr_handler(int irq, void *data) 182 + { 183 + struct octep_ioq_vector *ioq_vector = data; 184 + struct octep_device *oct = ioq_vector->octep_dev; 185 + 186 + return oct->hw_ops.ioq_intr_handler(ioq_vector); 187 + } 188 + 189 + /** 190 + * octep_request_irqs() - Register interrupt handlers. 191 + * 192 + * @oct: Octeon device private data structure. 193 + * 194 + * Register handlers for all queue and non-queue interrupts. 195 + * 196 + * Return: 0, on successful registration of all interrupt handlers. 197 + * -1, on any error. 198 + */ 199 + static int octep_request_irqs(struct octep_device *oct) 200 + { 201 + struct net_device *netdev = oct->netdev; 202 + struct octep_ioq_vector *ioq_vector; 203 + struct msix_entry *msix_entry; 204 + char **non_ioq_msix_names; 205 + int num_non_ioq_msix; 206 + int ret, i; 207 + 208 + num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf); 209 + non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf); 210 + 211 + oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix, 212 + OCTEP_MSIX_NAME_SIZE, GFP_KERNEL); 213 + if (!oct->non_ioq_irq_names) 214 + goto alloc_err; 215 + 216 + /* First few MSI-X interrupts are non-queue interrupts */ 217 + for (i = 0; i < num_non_ioq_msix; i++) { 218 + char *irq_name; 219 + 220 + irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE]; 221 + msix_entry = &oct->msix_entries[i]; 222 + 223 + snprintf(irq_name, OCTEP_MSIX_NAME_SIZE, 224 + "%s-%s", netdev->name, non_ioq_msix_names[i]); 225 + ret = request_irq(msix_entry->vector, 226 + octep_non_ioq_intr_handler, 0, 227 + irq_name, oct); 228 + if (ret) { 229 + netdev_err(netdev, 230 + "request_irq failed for %s; err=%d", 231 + irq_name, ret); 232 + goto non_ioq_irq_err; 233 + } 234 + } 235 + 236 + /* Request IRQs for Tx/Rx queues */ 237 + for (i = 0; i < oct->num_oqs; i++) { 238 + ioq_vector = oct->ioq_vector[i]; 239 + msix_entry = &oct->msix_entries[i + num_non_ioq_msix]; 240 + 241 + snprintf(ioq_vector->name, sizeof(ioq_vector->name), 242 + "%s-q%d", netdev->name, i); 243 + ret = request_irq(msix_entry->vector, 244 + octep_ioq_intr_handler, 0, 245 + ioq_vector->name, ioq_vector); 246 + if (ret) { 247 + netdev_err(netdev, 248 + "request_irq failed for Q-%d; err=%d", 249 + i, ret); 250 + goto ioq_irq_err; 251 + } 252 + 253 + cpumask_set_cpu(i % num_online_cpus(), 254 + &ioq_vector->affinity_mask); 255 + irq_set_affinity_hint(msix_entry->vector, 256 + &ioq_vector->affinity_mask); 257 + } 258 + 259 + return 0; 260 + ioq_irq_err: 261 + while (i > num_non_ioq_msix) { 262 + --i; 263 + irq_set_affinity_hint(oct->msix_entries[i].vector, NULL); 264 + free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]); 265 + } 266 + non_ioq_irq_err: 267 + while (i) { 268 + --i; 269 + free_irq(oct->msix_entries[i].vector, oct); 270 + } 271 + alloc_err: 272 + return -1; 273 + } 274 + 275 + /** 276 + * octep_free_irqs() - free all registered interrupts. 277 + * 278 + * @oct: Octeon device private data structure. 279 + * 280 + * Free all queue and non-queue interrupts of the Octeon device. 281 + */ 282 + static void octep_free_irqs(struct octep_device *oct) 283 + { 284 + int i; 285 + 286 + /* First few MSI-X interrupts are non queue interrupts; free them */ 287 + for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++) 288 + free_irq(oct->msix_entries[i].vector, oct); 289 + kfree(oct->non_ioq_irq_names); 290 + 291 + /* Free IRQs for Input/Output (Tx/Rx) queues */ 292 + for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) { 293 + irq_set_affinity_hint(oct->msix_entries[i].vector, NULL); 294 + free_irq(oct->msix_entries[i].vector, 295 + oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]); 296 + } 297 + netdev_info(oct->netdev, "IRQs freed\n"); 298 + } 299 + 300 + /** 301 + * octep_setup_irqs() - setup interrupts for the Octeon device. 302 + * 303 + * @oct: Octeon device private data structure. 304 + * 305 + * Allocate data structures to hold per interrupt information, allocate/enable 306 + * MSI-x interrupt and register interrupt handlers. 307 + * 308 + * Return: 0, on successful allocation and registration of all interrupts. 309 + * -1, on any error. 310 + */ 311 + static int octep_setup_irqs(struct octep_device *oct) 312 + { 313 + if (octep_alloc_ioq_vectors(oct)) 314 + goto ioq_vector_err; 315 + 316 + if (octep_enable_msix_range(oct)) 317 + goto enable_msix_err; 318 + 319 + if (octep_request_irqs(oct)) 320 + goto request_irq_err; 321 + 322 + return 0; 323 + 324 + request_irq_err: 325 + octep_disable_msix(oct); 326 + enable_msix_err: 327 + octep_free_ioq_vectors(oct); 328 + ioq_vector_err: 329 + return -1; 330 + } 331 + 332 + /** 333 + * octep_clean_irqs() - free all interrupts and its resources. 334 + * 335 + * @oct: Octeon device private data structure. 336 + */ 337 + static void octep_clean_irqs(struct octep_device *oct) 338 + { 339 + octep_free_irqs(oct); 340 + octep_disable_msix(oct); 341 + octep_free_ioq_vectors(oct); 342 + } 343 + 344 + /** 345 + * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue. 346 + * 347 + * @iq: Octeon Tx queue data structure. 348 + * @oq: Octeon Rx queue data structure. 349 + */ 350 + static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) 351 + { 352 + u32 pkts_pend = oq->pkts_pending; 353 + 354 + netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); 355 + if (iq->pkts_processed) { 356 + writel(iq->pkts_processed, iq->inst_cnt_reg); 357 + iq->pkt_in_done -= iq->pkts_processed; 358 + iq->pkts_processed = 0; 359 + } 360 + if (oq->last_pkt_count - pkts_pend) { 361 + writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); 362 + oq->last_pkt_count = pkts_pend; 363 + } 364 + 365 + /* Flush the previous wrties before writing to RESEND bit */ 366 + wmb(); 367 + writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg); 368 + writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); 369 + } 370 + 371 + /** 372 + * octep_napi_poll() - NAPI poll function for Tx/Rx. 373 + * 374 + * @napi: pointer to napi context. 375 + * @budget: max number of packets to be processed in single invocation. 376 + */ 377 + static int octep_napi_poll(struct napi_struct *napi, int budget) 378 + { 379 + struct octep_ioq_vector *ioq_vector = 380 + container_of(napi, struct octep_ioq_vector, napi); 381 + u32 tx_pending, rx_done; 382 + 383 + tx_pending = octep_iq_process_completions(ioq_vector->iq, budget); 384 + rx_done = octep_oq_process_rx(ioq_vector->oq, budget); 385 + 386 + /* need more polling if tx completion processing is still pending or 387 + * processed at least 'budget' number of rx packets. 388 + */ 389 + if (tx_pending || rx_done >= budget) 390 + return budget; 391 + 392 + napi_complete(napi); 393 + octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); 394 + return rx_done; 395 + } 396 + 397 + /** 398 + * octep_napi_add() - Add NAPI poll for all Tx/Rx queues. 399 + * 400 + * @oct: Octeon device private data structure. 401 + */ 402 + static void octep_napi_add(struct octep_device *oct) 403 + { 404 + int i; 405 + 406 + for (i = 0; i < oct->num_oqs; i++) { 407 + netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i); 408 + netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, 409 + octep_napi_poll, 64); 410 + oct->oq[i]->napi = &oct->ioq_vector[i]->napi; 411 + } 412 + } 413 + 414 + /** 415 + * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues. 416 + * 417 + * @oct: Octeon device private data structure. 418 + */ 419 + static void octep_napi_delete(struct octep_device *oct) 420 + { 421 + int i; 422 + 423 + for (i = 0; i < oct->num_oqs; i++) { 424 + netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i); 425 + netif_napi_del(&oct->ioq_vector[i]->napi); 426 + oct->oq[i]->napi = NULL; 427 + } 428 + } 429 + 430 + /** 431 + * octep_napi_enable() - enable NAPI for all Tx/Rx queues. 432 + * 433 + * @oct: Octeon device private data structure. 434 + */ 435 + static void octep_napi_enable(struct octep_device *oct) 436 + { 437 + int i; 438 + 439 + for (i = 0; i < oct->num_oqs; i++) { 440 + netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i); 441 + napi_enable(&oct->ioq_vector[i]->napi); 442 + } 443 + } 444 + 445 + /** 446 + * octep_napi_disable() - disable NAPI for all Tx/Rx queues. 447 + * 448 + * @oct: Octeon device private data structure. 449 + */ 450 + static void octep_napi_disable(struct octep_device *oct) 451 + { 452 + int i; 453 + 454 + for (i = 0; i < oct->num_oqs; i++) { 455 + netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i); 456 + napi_disable(&oct->ioq_vector[i]->napi); 457 + } 458 + } 459 + 460 + static void octep_link_up(struct net_device *netdev) 461 + { 462 + netif_carrier_on(netdev); 463 + netif_tx_start_all_queues(netdev); 464 + } 465 + 466 + /** 467 + * octep_open() - start the octeon network device. 468 + * 469 + * @netdev: pointer to kernel network device. 470 + * 471 + * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues 472 + * and interrupts.. 473 + * 474 + * Return: 0, on successfully setting up device and bring it up. 475 + * -1, on any error. 476 + */ 477 + static int octep_open(struct net_device *netdev) 478 + { 479 + struct octep_device *oct = netdev_priv(netdev); 480 + int err, ret; 481 + 482 + netdev_info(netdev, "Starting netdev ...\n"); 483 + netif_carrier_off(netdev); 484 + 485 + oct->hw_ops.reset_io_queues(oct); 486 + 487 + if (octep_setup_iqs(oct)) 488 + goto setup_iq_err; 489 + if (octep_setup_oqs(oct)) 490 + goto setup_oq_err; 491 + if (octep_setup_irqs(oct)) 492 + goto setup_irq_err; 493 + 494 + err = netif_set_real_num_tx_queues(netdev, oct->num_oqs); 495 + if (err) 496 + goto set_queues_err; 497 + err = netif_set_real_num_rx_queues(netdev, oct->num_iqs); 498 + if (err) 499 + goto set_queues_err; 500 + 501 + octep_napi_add(oct); 502 + octep_napi_enable(oct); 503 + 504 + oct->link_info.admin_up = 1; 505 + octep_set_rx_state(oct, true); 506 + 507 + ret = octep_get_link_status(oct); 508 + if (!ret) 509 + octep_set_link_status(oct, true); 510 + 511 + /* Enable the input and output queues for this Octeon device */ 512 + oct->hw_ops.enable_io_queues(oct); 513 + 514 + /* Enable Octeon device interrupts */ 515 + oct->hw_ops.enable_interrupts(oct); 516 + 517 + octep_oq_dbell_init(oct); 518 + 519 + ret = octep_get_link_status(oct); 520 + if (ret) 521 + octep_link_up(netdev); 522 + 523 + return 0; 524 + 525 + set_queues_err: 526 + octep_napi_disable(oct); 527 + octep_napi_delete(oct); 528 + octep_clean_irqs(oct); 529 + setup_irq_err: 530 + octep_free_oqs(oct); 531 + setup_oq_err: 532 + octep_free_iqs(oct); 533 + setup_iq_err: 534 + return -1; 535 + } 536 + 537 + /** 538 + * octep_stop() - stop the octeon network device. 539 + * 540 + * @netdev: pointer to kernel network device. 541 + * 542 + * stop the device Tx/Rx operations, bring down the link and 543 + * free up all resources allocated for Tx/Rx queues and interrupts. 544 + */ 545 + static int octep_stop(struct net_device *netdev) 546 + { 547 + struct octep_device *oct = netdev_priv(netdev); 548 + 549 + netdev_info(netdev, "Stopping the device ...\n"); 550 + 551 + /* Stop Tx from stack */ 552 + netif_tx_stop_all_queues(netdev); 553 + netif_carrier_off(netdev); 554 + netif_tx_disable(netdev); 555 + 556 + octep_set_link_status(oct, false); 557 + octep_set_rx_state(oct, false); 558 + 559 + oct->link_info.admin_up = 0; 560 + oct->link_info.oper_up = 0; 561 + 562 + oct->hw_ops.disable_interrupts(oct); 563 + octep_napi_disable(oct); 564 + octep_napi_delete(oct); 565 + 566 + octep_clean_irqs(oct); 567 + octep_clean_iqs(oct); 568 + 569 + oct->hw_ops.disable_io_queues(oct); 570 + oct->hw_ops.reset_io_queues(oct); 571 + octep_free_oqs(oct); 572 + octep_free_iqs(oct); 573 + netdev_info(netdev, "Device stopped !!\n"); 574 + return 0; 575 + } 576 + 577 + /** 578 + * octep_iq_full_check() - check if a Tx queue is full. 579 + * 580 + * @iq: Octeon Tx queue data structure. 581 + * 582 + * Return: 0, if the Tx queue is not full. 583 + * 1, if the Tx queue is full. 584 + */ 585 + static inline int octep_iq_full_check(struct octep_iq *iq) 586 + { 587 + if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >= 588 + OCTEP_WAKE_QUEUE_THRESHOLD)) 589 + return 0; 590 + 591 + /* Stop the queue if unable to send */ 592 + netif_stop_subqueue(iq->netdev, iq->q_no); 593 + 594 + /* check again and restart the queue, in case NAPI has just freed 595 + * enough Tx ring entries. 596 + */ 597 + if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >= 598 + OCTEP_WAKE_QUEUE_THRESHOLD)) { 599 + netif_start_subqueue(iq->netdev, iq->q_no); 600 + iq->stats.restart_cnt++; 601 + return 0; 602 + } 603 + 604 + return 1; 605 + } 606 + 607 + /** 608 + * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue. 609 + * 610 + * @skb: packet skbuff pointer. 611 + * @netdev: kernel network device. 612 + * 613 + * Return: NETDEV_TX_BUSY, if Tx Queue is full. 614 + * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue. 615 + */ 616 + static netdev_tx_t octep_start_xmit(struct sk_buff *skb, 617 + struct net_device *netdev) 618 + { 619 + struct octep_device *oct = netdev_priv(netdev); 620 + struct octep_tx_sglist_desc *sglist; 621 + struct octep_tx_buffer *tx_buffer; 622 + struct octep_tx_desc_hw *hw_desc; 623 + struct skb_shared_info *shinfo; 624 + struct octep_instr_hdr *ih; 625 + struct octep_iq *iq; 626 + skb_frag_t *frag; 627 + u16 nr_frags, si; 628 + u16 q_no, wi; 629 + 630 + q_no = skb_get_queue_mapping(skb); 631 + if (q_no >= oct->num_iqs) { 632 + netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); 633 + q_no = q_no % oct->num_iqs; 634 + } 635 + 636 + iq = oct->iq[q_no]; 637 + if (octep_iq_full_check(iq)) { 638 + iq->stats.tx_busy++; 639 + return NETDEV_TX_BUSY; 640 + } 641 + 642 + shinfo = skb_shinfo(skb); 643 + nr_frags = shinfo->nr_frags; 644 + 645 + wi = iq->host_write_index; 646 + hw_desc = &iq->desc_ring[wi]; 647 + hw_desc->ih64 = 0; 648 + 649 + tx_buffer = iq->buff_info + wi; 650 + tx_buffer->skb = skb; 651 + 652 + ih = &hw_desc->ih; 653 + ih->tlen = skb->len; 654 + ih->pkind = oct->pkind; 655 + 656 + if (!nr_frags) { 657 + tx_buffer->gather = 0; 658 + tx_buffer->dma = dma_map_single(iq->dev, skb->data, 659 + skb->len, DMA_TO_DEVICE); 660 + if (dma_mapping_error(iq->dev, tx_buffer->dma)) 661 + goto dma_map_err; 662 + hw_desc->dptr = tx_buffer->dma; 663 + } else { 664 + /* Scatter/Gather */ 665 + dma_addr_t dma; 666 + u16 len; 667 + 668 + sglist = tx_buffer->sglist; 669 + 670 + ih->gsz = nr_frags + 1; 671 + ih->gather = 1; 672 + tx_buffer->gather = 1; 673 + 674 + len = skb_headlen(skb); 675 + dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE); 676 + if (dma_mapping_error(iq->dev, dma)) 677 + goto dma_map_err; 678 + 679 + dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma, 680 + OCTEP_SGLIST_SIZE_PER_PKT, 681 + DMA_TO_DEVICE); 682 + memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT); 683 + sglist[0].len[3] = len; 684 + sglist[0].dma_ptr[0] = dma; 685 + 686 + si = 1; /* entry 0 is main skb, mapped above */ 687 + frag = &shinfo->frags[0]; 688 + while (nr_frags--) { 689 + len = skb_frag_size(frag); 690 + dma = skb_frag_dma_map(iq->dev, frag, 0, 691 + len, DMA_TO_DEVICE); 692 + if (dma_mapping_error(iq->dev, dma)) 693 + goto dma_map_sg_err; 694 + 695 + sglist[si >> 2].len[3 - (si & 3)] = len; 696 + sglist[si >> 2].dma_ptr[si & 3] = dma; 697 + 698 + frag++; 699 + si++; 700 + } 701 + dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma, 702 + OCTEP_SGLIST_SIZE_PER_PKT, 703 + DMA_TO_DEVICE); 704 + 705 + hw_desc->dptr = tx_buffer->sglist_dma; 706 + } 707 + 708 + /* Flush the hw descriptor before writing to doorbell */ 709 + wmb(); 710 + 711 + /* Ring Doorbell to notify the NIC there is a new packet */ 712 + writel(1, iq->doorbell_reg); 713 + atomic_inc(&iq->instr_pending); 714 + wi++; 715 + if (wi == iq->max_count) 716 + wi = 0; 717 + iq->host_write_index = wi; 718 + 719 + netdev_tx_sent_queue(iq->netdev_q, skb->len); 720 + iq->stats.instr_posted++; 721 + skb_tx_timestamp(skb); 722 + return NETDEV_TX_OK; 723 + 724 + dma_map_sg_err: 725 + if (si > 0) { 726 + dma_unmap_single(iq->dev, sglist[0].dma_ptr[0], 727 + sglist[0].len[0], DMA_TO_DEVICE); 728 + sglist[0].len[0] = 0; 729 + } 730 + while (si > 1) { 731 + dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3], 732 + sglist[si >> 2].len[si & 3], DMA_TO_DEVICE); 733 + sglist[si >> 2].len[si & 3] = 0; 734 + si--; 735 + } 736 + tx_buffer->gather = 0; 737 + dma_map_err: 738 + dev_kfree_skb_any(skb); 739 + return NETDEV_TX_OK; 740 + } 741 + 742 + /** 743 + * octep_get_stats64() - Get Octeon network device statistics. 744 + * 745 + * @netdev: kernel network device. 746 + * @stats: pointer to stats structure to be filled in. 747 + */ 748 + static void octep_get_stats64(struct net_device *netdev, 749 + struct rtnl_link_stats64 *stats) 750 + { 751 + u64 tx_packets, tx_bytes, rx_packets, rx_bytes; 752 + struct octep_device *oct = netdev_priv(netdev); 753 + int q; 754 + 755 + octep_get_if_stats(oct); 756 + tx_packets = 0; 757 + tx_bytes = 0; 758 + rx_packets = 0; 759 + rx_bytes = 0; 760 + for (q = 0; q < oct->num_oqs; q++) { 761 + struct octep_iq *iq = oct->iq[q]; 762 + struct octep_oq *oq = oct->oq[q]; 763 + 764 + tx_packets += iq->stats.instr_completed; 765 + tx_bytes += iq->stats.bytes_sent; 766 + rx_packets += oq->stats.packets; 767 + rx_bytes += oq->stats.bytes; 768 + } 769 + stats->tx_packets = tx_packets; 770 + stats->tx_bytes = tx_bytes; 771 + stats->rx_packets = rx_packets; 772 + stats->rx_bytes = rx_bytes; 773 + stats->multicast = oct->iface_rx_stats.mcast_pkts; 774 + stats->rx_errors = oct->iface_rx_stats.err_pkts; 775 + stats->collisions = oct->iface_tx_stats.xscol; 776 + stats->tx_fifo_errors = oct->iface_tx_stats.undflw; 777 + } 778 + 779 + /** 780 + * octep_tx_timeout_task - work queue task to Handle Tx queue timeout. 781 + * 782 + * @work: pointer to Tx queue timeout work_struct 783 + * 784 + * Stop and start the device so that it frees up all queue resources 785 + * and restarts the queues, that potentially clears a Tx queue timeout 786 + * condition. 787 + **/ 788 + static void octep_tx_timeout_task(struct work_struct *work) 789 + { 790 + struct octep_device *oct = container_of(work, struct octep_device, 791 + tx_timeout_task); 792 + struct net_device *netdev = oct->netdev; 793 + 794 + rtnl_lock(); 795 + if (netif_running(netdev)) { 796 + octep_stop(netdev); 797 + octep_open(netdev); 798 + } 799 + rtnl_unlock(); 800 + } 801 + 802 + /** 803 + * octep_tx_timeout() - Handle Tx Queue timeout. 804 + * 805 + * @netdev: pointer to kernel network device. 806 + * @txqueue: Timed out Tx queue number. 807 + * 808 + * Schedule a work to handle Tx queue timeout. 809 + */ 810 + static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue) 811 + { 812 + struct octep_device *oct = netdev_priv(netdev); 813 + 814 + queue_work(octep_wq, &oct->tx_timeout_task); 815 + } 816 + 817 + static int octep_set_mac(struct net_device *netdev, void *p) 818 + { 819 + struct octep_device *oct = netdev_priv(netdev); 820 + struct sockaddr *addr = (struct sockaddr *)p; 821 + int err; 822 + 823 + if (!is_valid_ether_addr(addr->sa_data)) 824 + return -EADDRNOTAVAIL; 825 + 826 + err = octep_set_mac_addr(oct, addr->sa_data); 827 + if (err) 828 + return err; 829 + 830 + memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN); 831 + eth_hw_addr_set(netdev, addr->sa_data); 832 + 833 + return 0; 834 + } 835 + 836 + static int octep_change_mtu(struct net_device *netdev, int new_mtu) 837 + { 838 + struct octep_device *oct = netdev_priv(netdev); 839 + struct octep_iface_link_info *link_info; 840 + int err = 0; 841 + 842 + link_info = &oct->link_info; 843 + if (link_info->mtu == new_mtu) 844 + return 0; 845 + 846 + err = octep_set_mtu(oct, new_mtu); 847 + if (!err) { 848 + oct->link_info.mtu = new_mtu; 849 + netdev->mtu = new_mtu; 850 + } 851 + 852 + return err; 853 + } 854 + 855 + static const struct net_device_ops octep_netdev_ops = { 856 + .ndo_open = octep_open, 857 + .ndo_stop = octep_stop, 858 + .ndo_start_xmit = octep_start_xmit, 859 + .ndo_get_stats64 = octep_get_stats64, 860 + .ndo_tx_timeout = octep_tx_timeout, 861 + .ndo_set_mac_address = octep_set_mac, 862 + .ndo_change_mtu = octep_change_mtu, 863 + }; 864 + 865 + /** 866 + * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages. 867 + * 868 + * @work: pointer to ctrl mbox work_struct 869 + * 870 + * Poll ctrl mbox message queue and handle control messages from firmware. 871 + **/ 872 + static void octep_ctrl_mbox_task(struct work_struct *work) 873 + { 874 + struct octep_device *oct = container_of(work, struct octep_device, 875 + ctrl_mbox_task); 876 + struct net_device *netdev = oct->netdev; 877 + struct octep_ctrl_net_f2h_req req = {}; 878 + struct octep_ctrl_mbox_msg msg; 879 + int ret = 0; 880 + 881 + msg.msg = &req; 882 + while (true) { 883 + ret = octep_ctrl_mbox_recv(&oct->ctrl_mbox, &msg); 884 + if (ret) 885 + break; 886 + 887 + switch (req.hdr.cmd) { 888 + case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS: 889 + if (netif_running(netdev)) { 890 + if (req.link.state) { 891 + dev_info(&oct->pdev->dev, "netif_carrier_on\n"); 892 + netif_carrier_on(netdev); 893 + } else { 894 + dev_info(&oct->pdev->dev, "netif_carrier_off\n"); 895 + netif_carrier_off(netdev); 896 + } 897 + } 898 + break; 899 + default: 900 + pr_info("Unknown mbox req : %u\n", req.hdr.cmd); 901 + break; 902 + } 903 + } 904 + } 905 + 906 + /** 907 + * octep_device_setup() - Setup Octeon Device. 908 + * 909 + * @oct: Octeon device private data structure. 910 + * 911 + * Setup Octeon device hardware operations, configuration, etc ... 912 + */ 913 + int octep_device_setup(struct octep_device *oct) 914 + { 915 + struct octep_ctrl_mbox *ctrl_mbox; 916 + struct pci_dev *pdev = oct->pdev; 917 + int i, ret; 918 + 919 + /* allocate memory for oct->conf */ 920 + oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL); 921 + if (!oct->conf) 922 + return -ENOMEM; 923 + 924 + /* Map BAR regions */ 925 + for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { 926 + oct->mmio[i].hw_addr = 927 + ioremap(pci_resource_start(oct->pdev, i * 2), 928 + pci_resource_len(oct->pdev, i * 2)); 929 + oct->mmio[i].mapped = 1; 930 + } 931 + 932 + oct->chip_id = pdev->device; 933 + oct->rev_id = pdev->revision; 934 + dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device); 935 + 936 + switch (oct->chip_id) { 937 + case OCTEP_PCI_DEVICE_ID_CN93_PF: 938 + dev_info(&pdev->dev, 939 + "Setting up OCTEON CN93XX PF PASS%d.%d\n", 940 + OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct)); 941 + octep_device_setup_cn93_pf(oct); 942 + break; 943 + default: 944 + dev_err(&pdev->dev, 945 + "%s: unsupported device\n", __func__); 946 + goto unsupported_dev; 947 + } 948 + 949 + oct->pkind = CFG_GET_IQ_PKIND(oct->conf); 950 + 951 + /* Initialize control mbox */ 952 + ctrl_mbox = &oct->ctrl_mbox; 953 + ctrl_mbox->version = OCTEP_DRV_VERSION; 954 + ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf); 955 + ret = octep_ctrl_mbox_init(ctrl_mbox); 956 + if (ret) { 957 + dev_err(&pdev->dev, "Failed to initialize control mbox\n"); 958 + return -1; 959 + } 960 + oct->ctrl_mbox_ifstats_offset = OCTEP_CTRL_MBOX_SZ(ctrl_mbox->h2fq.elem_sz, 961 + ctrl_mbox->h2fq.elem_cnt, 962 + ctrl_mbox->f2hq.elem_sz, 963 + ctrl_mbox->f2hq.elem_cnt); 964 + 965 + return 0; 966 + 967 + unsupported_dev: 968 + return -1; 969 + } 970 + 971 + /** 972 + * octep_device_cleanup() - Cleanup Octeon Device. 973 + * 974 + * @oct: Octeon device private data structure. 975 + * 976 + * Cleanup Octeon device allocated resources. 977 + */ 978 + static void octep_device_cleanup(struct octep_device *oct) 979 + { 980 + int i; 981 + 982 + dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n"); 983 + 984 + for (i = 0; i < OCTEP_MAX_VF; i++) { 985 + if (oct->mbox[i]) 986 + vfree(oct->mbox[i]); 987 + oct->mbox[i] = NULL; 988 + } 989 + 990 + octep_ctrl_mbox_uninit(&oct->ctrl_mbox); 991 + 992 + oct->hw_ops.soft_reset(oct); 993 + for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { 994 + if (oct->mmio[i].mapped) 995 + iounmap(oct->mmio[i].hw_addr); 996 + } 997 + 998 + kfree(oct->conf); 999 + oct->conf = NULL; 1000 + } 1001 + 1002 + /** 1003 + * octep_probe() - Octeon PCI device probe handler. 1004 + * 1005 + * @pdev: PCI device structure. 1006 + * @ent: entry in Octeon PCI device ID table. 1007 + * 1008 + * Initializes and enables the Octeon PCI device for network operations. 1009 + * Initializes Octeon private data structure and registers a network device. 1010 + */ 1011 + static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1012 + { 1013 + struct octep_device *octep_dev = NULL; 1014 + struct net_device *netdev; 1015 + int err; 1016 + 1017 + err = pci_enable_device(pdev); 1018 + if (err) { 1019 + dev_err(&pdev->dev, "Failed to enable PCI device\n"); 1020 + return err; 1021 + } 1022 + 1023 + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1024 + if (err) { 1025 + dev_err(&pdev->dev, "Failed to set DMA mask !!\n"); 1026 + goto err_dma_mask; 1027 + } 1028 + 1029 + err = pci_request_mem_regions(pdev, OCTEP_DRV_NAME); 1030 + if (err) { 1031 + dev_err(&pdev->dev, "Failed to map PCI memory regions\n"); 1032 + goto err_pci_regions; 1033 + } 1034 + 1035 + pci_enable_pcie_error_reporting(pdev); 1036 + pci_set_master(pdev); 1037 + 1038 + netdev = alloc_etherdev_mq(sizeof(struct octep_device), 1039 + OCTEP_MAX_QUEUES); 1040 + if (!netdev) { 1041 + dev_err(&pdev->dev, "Failed to allocate netdev\n"); 1042 + err = -ENOMEM; 1043 + goto err_alloc_netdev; 1044 + } 1045 + SET_NETDEV_DEV(netdev, &pdev->dev); 1046 + 1047 + octep_dev = netdev_priv(netdev); 1048 + octep_dev->netdev = netdev; 1049 + octep_dev->pdev = pdev; 1050 + octep_dev->dev = &pdev->dev; 1051 + pci_set_drvdata(pdev, octep_dev); 1052 + 1053 + err = octep_device_setup(octep_dev); 1054 + if (err) { 1055 + dev_err(&pdev->dev, "Device setup failed\n"); 1056 + goto err_octep_config; 1057 + } 1058 + INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task); 1059 + INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task); 1060 + 1061 + netdev->netdev_ops = &octep_netdev_ops; 1062 + octep_set_ethtool_ops(netdev); 1063 + netif_carrier_off(netdev); 1064 + 1065 + netdev->hw_features = NETIF_F_SG; 1066 + netdev->features |= netdev->hw_features; 1067 + netdev->min_mtu = OCTEP_MIN_MTU; 1068 + netdev->max_mtu = OCTEP_MAX_MTU; 1069 + netdev->mtu = OCTEP_DEFAULT_MTU; 1070 + 1071 + octep_get_mac_addr(octep_dev, octep_dev->mac_addr); 1072 + eth_hw_addr_set(netdev, octep_dev->mac_addr); 1073 + 1074 + if (register_netdev(netdev)) { 1075 + dev_err(&pdev->dev, "Failed to register netdev\n"); 1076 + goto register_dev_err; 1077 + } 1078 + dev_info(&pdev->dev, "Device probe successful\n"); 1079 + return 0; 1080 + 1081 + register_dev_err: 1082 + octep_device_cleanup(octep_dev); 1083 + err_octep_config: 1084 + free_netdev(netdev); 1085 + err_alloc_netdev: 1086 + pci_disable_pcie_error_reporting(pdev); 1087 + pci_release_mem_regions(pdev); 1088 + err_pci_regions: 1089 + err_dma_mask: 1090 + pci_disable_device(pdev); 1091 + return err; 1092 + } 1093 + 1094 + /** 1095 + * octep_remove() - Remove Octeon PCI device from driver control. 1096 + * 1097 + * @pdev: PCI device structure of the Octeon device. 1098 + * 1099 + * Cleanup all resources allocated for the Octeon device. 1100 + * Unregister from network device and disable the PCI device. 1101 + */ 1102 + static void octep_remove(struct pci_dev *pdev) 1103 + { 1104 + struct octep_device *oct = pci_get_drvdata(pdev); 1105 + struct net_device *netdev; 1106 + 1107 + if (!oct) 1108 + return; 1109 + 1110 + cancel_work_sync(&oct->tx_timeout_task); 1111 + cancel_work_sync(&oct->ctrl_mbox_task); 1112 + netdev = oct->netdev; 1113 + if (netdev->reg_state == NETREG_REGISTERED) 1114 + unregister_netdev(netdev); 1115 + 1116 + octep_device_cleanup(oct); 1117 + pci_release_mem_regions(pdev); 1118 + free_netdev(netdev); 1119 + pci_disable_pcie_error_reporting(pdev); 1120 + pci_disable_device(pdev); 1121 + } 1122 + 1123 + static struct pci_driver octep_driver = { 1124 + .name = OCTEP_DRV_NAME, 1125 + .id_table = octep_pci_id_tbl, 1126 + .probe = octep_probe, 1127 + .remove = octep_remove, 1128 + }; 1129 + 1130 + /** 1131 + * octep_init_module() - Module initialiation. 1132 + * 1133 + * create common resource for the driver and register PCI driver. 1134 + */ 1135 + static int __init octep_init_module(void) 1136 + { 1137 + int ret; 1138 + 1139 + pr_info("%s: Loading %s ...\n", OCTEP_DRV_NAME, OCTEP_DRV_STRING); 1140 + 1141 + /* work queue for all deferred tasks */ 1142 + octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME); 1143 + if (!octep_wq) { 1144 + pr_err("%s: Failed to create common workqueue\n", 1145 + OCTEP_DRV_NAME); 1146 + return -ENOMEM; 1147 + } 1148 + 1149 + ret = pci_register_driver(&octep_driver); 1150 + if (ret < 0) { 1151 + pr_err("%s: Failed to register PCI driver; err=%d\n", 1152 + OCTEP_DRV_NAME, ret); 1153 + return ret; 1154 + } 1155 + 1156 + pr_info("%s: Loaded successfully !\n", OCTEP_DRV_NAME); 1157 + 1158 + return ret; 1159 + } 1160 + 1161 + /** 1162 + * octep_exit_module() - Module exit routine. 1163 + * 1164 + * unregister the driver with PCI subsystem and cleanup common resources. 1165 + */ 1166 + static void __exit octep_exit_module(void) 1167 + { 1168 + pr_info("%s: Unloading ...\n", OCTEP_DRV_NAME); 1169 + 1170 + pci_unregister_driver(&octep_driver); 1171 + destroy_workqueue(octep_wq); 1172 + 1173 + pr_info("%s: Unloading complete\n", OCTEP_DRV_NAME); 1174 + } 1175 + 1176 + module_init(octep_init_module); 1177 + module_exit(octep_exit_module);
+366
drivers/net/ethernet/marvell/octeon_ep/octep_main.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef _OCTEP_MAIN_H_ 9 + #define _OCTEP_MAIN_H_ 10 + 11 + #include "octep_tx.h" 12 + #include "octep_rx.h" 13 + #include "octep_ctrl_mbox.h" 14 + 15 + #define OCTEP_DRV_VERSION_MAJOR 1 16 + #define OCTEP_DRV_VERSION_MINOR 0 17 + #define OCTEP_DRV_VERSION_VARIANT 0 18 + 19 + #define OCTEP_DRV_VERSION ((OCTEP_DRV_VERSION_MAJOR << 16) + \ 20 + (OCTEP_DRV_VERSION_MINOR << 8) + \ 21 + OCTEP_DRV_VERSION_VARIANT) 22 + 23 + #define OCTEP_DRV_VERSION_STR "1.0.0" 24 + #define OCTEP_DRV_NAME "octeon_ep" 25 + #define OCTEP_DRV_STRING "Marvell Octeon EndPoint NIC Driver" 26 + 27 + #define OCTEP_PCIID_CN93_PF 0xB200177d 28 + #define OCTEP_PCIID_CN93_VF 0xB203177d 29 + 30 + #define OCTEP_PCI_DEVICE_ID_CN93_PF 0xB200 31 + #define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203 32 + 33 + #define OCTEP_MAX_QUEUES 63 34 + #define OCTEP_MAX_IQ OCTEP_MAX_QUEUES 35 + #define OCTEP_MAX_OQ OCTEP_MAX_QUEUES 36 + #define OCTEP_MAX_VF 64 37 + 38 + #define OCTEP_MAX_MSIX_VECTORS OCTEP_MAX_OQ 39 + 40 + /* Flags to disable and enable Interrupts */ 41 + #define OCTEP_INPUT_INTR (1) 42 + #define OCTEP_OUTPUT_INTR (2) 43 + #define OCTEP_MBOX_INTR (4) 44 + #define OCTEP_ALL_INTR 0xff 45 + 46 + #define OCTEP_IQ_INTR_RESEND_BIT 59 47 + #define OCTEP_OQ_INTR_RESEND_BIT 59 48 + 49 + #define OCTEP_MMIO_REGIONS 3 50 + /* PCI address space mapping information. 51 + * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of 52 + * Octeon gets mapped to different physical address spaces in 53 + * the kernel. 54 + */ 55 + struct octep_mmio { 56 + /* The physical address to which the PCI address space is mapped. */ 57 + u8 __iomem *hw_addr; 58 + 59 + /* Flag indicating the mapping was successful. */ 60 + int mapped; 61 + }; 62 + 63 + struct octep_pci_win_regs { 64 + u8 __iomem *pci_win_wr_addr; 65 + u8 __iomem *pci_win_rd_addr; 66 + u8 __iomem *pci_win_wr_data; 67 + u8 __iomem *pci_win_rd_data; 68 + }; 69 + 70 + struct octep_hw_ops { 71 + void (*setup_iq_regs)(struct octep_device *oct, int q); 72 + void (*setup_oq_regs)(struct octep_device *oct, int q); 73 + void (*setup_mbox_regs)(struct octep_device *oct, int mbox); 74 + 75 + irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector); 76 + irqreturn_t (*ioq_intr_handler)(void *ioq_vector); 77 + int (*soft_reset)(struct octep_device *oct); 78 + void (*reinit_regs)(struct octep_device *oct); 79 + u32 (*update_iq_read_idx)(struct octep_iq *iq); 80 + 81 + void (*enable_interrupts)(struct octep_device *oct); 82 + void (*disable_interrupts)(struct octep_device *oct); 83 + 84 + void (*enable_io_queues)(struct octep_device *oct); 85 + void (*disable_io_queues)(struct octep_device *oct); 86 + void (*enable_iq)(struct octep_device *oct, int q); 87 + void (*disable_iq)(struct octep_device *oct, int q); 88 + void (*enable_oq)(struct octep_device *oct, int q); 89 + void (*disable_oq)(struct octep_device *oct, int q); 90 + void (*reset_io_queues)(struct octep_device *oct); 91 + void (*dump_registers)(struct octep_device *oct); 92 + }; 93 + 94 + /* Octeon mailbox data */ 95 + struct octep_mbox_data { 96 + u32 cmd; 97 + u32 total_len; 98 + u32 recv_len; 99 + u32 rsvd; 100 + u64 *data; 101 + }; 102 + 103 + /* Octeon device mailbox */ 104 + struct octep_mbox { 105 + /* A spinlock to protect access to this q_mbox. */ 106 + spinlock_t lock; 107 + 108 + u32 q_no; 109 + u32 state; 110 + 111 + /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */ 112 + u8 __iomem *mbox_int_reg; 113 + 114 + /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF, 115 + * SLI_PKT_PF_VF_MBOX_SIG(1) for VF. 116 + */ 117 + u8 __iomem *mbox_write_reg; 118 + 119 + /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF, 120 + * SLI_PKT_PF_VF_MBOX_SIG(0) for VF. 121 + */ 122 + u8 __iomem *mbox_read_reg; 123 + 124 + struct octep_mbox_data mbox_data; 125 + }; 126 + 127 + /* Tx/Rx queue vector per interrupt. */ 128 + struct octep_ioq_vector { 129 + char name[OCTEP_MSIX_NAME_SIZE]; 130 + struct napi_struct napi; 131 + struct octep_device *octep_dev; 132 + struct octep_iq *iq; 133 + struct octep_oq *oq; 134 + cpumask_t affinity_mask; 135 + }; 136 + 137 + /* Octeon hardware/firmware offload capability flags. */ 138 + #define OCTEP_CAP_TX_CHECKSUM BIT(0) 139 + #define OCTEP_CAP_RX_CHECKSUM BIT(1) 140 + #define OCTEP_CAP_TSO BIT(2) 141 + 142 + /* Link modes */ 143 + enum octep_link_mode_bit_indices { 144 + OCTEP_LINK_MODE_10GBASE_T = 0, 145 + OCTEP_LINK_MODE_10GBASE_R, 146 + OCTEP_LINK_MODE_10GBASE_CR, 147 + OCTEP_LINK_MODE_10GBASE_KR, 148 + OCTEP_LINK_MODE_10GBASE_LR, 149 + OCTEP_LINK_MODE_10GBASE_SR, 150 + OCTEP_LINK_MODE_25GBASE_CR, 151 + OCTEP_LINK_MODE_25GBASE_KR, 152 + OCTEP_LINK_MODE_25GBASE_SR, 153 + OCTEP_LINK_MODE_40GBASE_CR4, 154 + OCTEP_LINK_MODE_40GBASE_KR4, 155 + OCTEP_LINK_MODE_40GBASE_LR4, 156 + OCTEP_LINK_MODE_40GBASE_SR4, 157 + OCTEP_LINK_MODE_50GBASE_CR2, 158 + OCTEP_LINK_MODE_50GBASE_KR2, 159 + OCTEP_LINK_MODE_50GBASE_SR2, 160 + OCTEP_LINK_MODE_50GBASE_CR, 161 + OCTEP_LINK_MODE_50GBASE_KR, 162 + OCTEP_LINK_MODE_50GBASE_LR, 163 + OCTEP_LINK_MODE_50GBASE_SR, 164 + OCTEP_LINK_MODE_100GBASE_CR4, 165 + OCTEP_LINK_MODE_100GBASE_KR4, 166 + OCTEP_LINK_MODE_100GBASE_LR4, 167 + OCTEP_LINK_MODE_100GBASE_SR4, 168 + OCTEP_LINK_MODE_NBITS 169 + }; 170 + 171 + /* Hardware interface link state information. */ 172 + struct octep_iface_link_info { 173 + /* Bitmap of Supported link speeds/modes. */ 174 + u64 supported_modes; 175 + 176 + /* Bitmap of Advertised link speeds/modes. */ 177 + u64 advertised_modes; 178 + 179 + /* Negotiated link speed in Mbps. */ 180 + u32 speed; 181 + 182 + /* MTU */ 183 + u16 mtu; 184 + 185 + /* Autonegotation state. */ 186 + #define OCTEP_LINK_MODE_AUTONEG_SUPPORTED BIT(0) 187 + #define OCTEP_LINK_MODE_AUTONEG_ADVERTISED BIT(1) 188 + u8 autoneg; 189 + 190 + /* Pause frames setting. */ 191 + #define OCTEP_LINK_MODE_PAUSE_SUPPORTED BIT(0) 192 + #define OCTEP_LINK_MODE_PAUSE_ADVERTISED BIT(1) 193 + u8 pause; 194 + 195 + /* Admin state of the link (ifconfig <iface> up/down */ 196 + u8 admin_up; 197 + 198 + /* Operational state of the link: physical link is up down */ 199 + u8 oper_up; 200 + }; 201 + 202 + /* The Octeon device specific private data structure. 203 + * Each Octeon device has this structure to represent all its components. 204 + */ 205 + struct octep_device { 206 + struct octep_config *conf; 207 + 208 + /* Octeon Chip type. */ 209 + u16 chip_id; 210 + u16 rev_id; 211 + 212 + /* Device capabilities enabled */ 213 + u64 caps_enabled; 214 + /* Device capabilities supported */ 215 + u64 caps_supported; 216 + 217 + /* Pointer to basic Linux device */ 218 + struct device *dev; 219 + /* Linux PCI device pointer */ 220 + struct pci_dev *pdev; 221 + /* Netdev corresponding to the Octeon device */ 222 + struct net_device *netdev; 223 + 224 + /* memory mapped io range */ 225 + struct octep_mmio mmio[OCTEP_MMIO_REGIONS]; 226 + 227 + /* MAC address */ 228 + u8 mac_addr[ETH_ALEN]; 229 + 230 + /* Tx queues (IQ: Instruction Queue) */ 231 + u16 num_iqs; 232 + /* pkind value to be used in every Tx hardware descriptor */ 233 + u8 pkind; 234 + /* Pointers to Octeon Tx queues */ 235 + struct octep_iq *iq[OCTEP_MAX_IQ]; 236 + 237 + /* Rx queues (OQ: Output Queue) */ 238 + u16 num_oqs; 239 + /* Pointers to Octeon Rx queues */ 240 + struct octep_oq *oq[OCTEP_MAX_OQ]; 241 + 242 + /* Hardware port number of the PCIe interface */ 243 + u16 pcie_port; 244 + 245 + /* PCI Window registers to access some hardware CSRs */ 246 + struct octep_pci_win_regs pci_win_regs; 247 + /* Hardware operations */ 248 + struct octep_hw_ops hw_ops; 249 + 250 + /* IRQ info */ 251 + u16 num_irqs; 252 + u16 num_non_ioq_irqs; 253 + char *non_ioq_irq_names; 254 + struct msix_entry *msix_entries; 255 + /* IOq information of it's corresponding MSI-X interrupt. */ 256 + struct octep_ioq_vector *ioq_vector[OCTEP_MAX_QUEUES]; 257 + 258 + /* Hardware Interface Tx statistics */ 259 + struct octep_iface_tx_stats iface_tx_stats; 260 + /* Hardware Interface Rx statistics */ 261 + struct octep_iface_rx_stats iface_rx_stats; 262 + 263 + /* Hardware Interface Link info like supported modes, aneg support */ 264 + struct octep_iface_link_info link_info; 265 + 266 + /* Mailbox to talk to VFs */ 267 + struct octep_mbox *mbox[OCTEP_MAX_VF]; 268 + 269 + /* Work entry to handle Tx timeout */ 270 + struct work_struct tx_timeout_task; 271 + 272 + /* control mbox over pf */ 273 + struct octep_ctrl_mbox ctrl_mbox; 274 + 275 + /* offset for iface stats */ 276 + u32 ctrl_mbox_ifstats_offset; 277 + 278 + /* Work entry to handle ctrl mbox interrupt */ 279 + struct work_struct ctrl_mbox_task; 280 + 281 + }; 282 + 283 + static inline u16 OCTEP_MAJOR_REV(struct octep_device *oct) 284 + { 285 + u16 rev = (oct->rev_id & 0xC) >> 2; 286 + 287 + return (rev == 0) ? 1 : rev; 288 + } 289 + 290 + static inline u16 OCTEP_MINOR_REV(struct octep_device *oct) 291 + { 292 + return (oct->rev_id & 0x3); 293 + } 294 + 295 + /* Octeon CSR read/write access APIs */ 296 + #define octep_write_csr(octep_dev, reg_off, value) \ 297 + writel(value, (octep_dev)->mmio[0].hw_addr + (reg_off)) 298 + 299 + #define octep_write_csr64(octep_dev, reg_off, val64) \ 300 + writeq(val64, (octep_dev)->mmio[0].hw_addr + (reg_off)) 301 + 302 + #define octep_read_csr(octep_dev, reg_off) \ 303 + readl((octep_dev)->mmio[0].hw_addr + (reg_off)) 304 + 305 + #define octep_read_csr64(octep_dev, reg_off) \ 306 + readq((octep_dev)->mmio[0].hw_addr + (reg_off)) 307 + 308 + /* Read windowed register. 309 + * @param oct - pointer to the Octeon device. 310 + * @param addr - Address of the register to read. 311 + * 312 + * This routine is called to read from the indirectly accessed 313 + * Octeon registers that are visible through a PCI BAR0 mapped window 314 + * register. 315 + * @return - 64 bit value read from the register. 316 + */ 317 + static inline u64 318 + OCTEP_PCI_WIN_READ(struct octep_device *oct, u64 addr) 319 + { 320 + u64 val64; 321 + 322 + addr |= 1ull << 53; /* read 8 bytes */ 323 + writeq(addr, oct->pci_win_regs.pci_win_rd_addr); 324 + val64 = readq(oct->pci_win_regs.pci_win_rd_data); 325 + 326 + dev_dbg(&oct->pdev->dev, 327 + "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val64); 328 + 329 + return val64; 330 + } 331 + 332 + /* Write windowed register. 333 + * @param oct - pointer to the Octeon device. 334 + * @param addr - Address of the register to write 335 + * @param val - Value to write 336 + * 337 + * This routine is called to write to the indirectly accessed 338 + * Octeon registers that are visible through a PCI BAR0 mapped window 339 + * register. 340 + * @return Nothing. 341 + */ 342 + static inline void 343 + OCTEP_PCI_WIN_WRITE(struct octep_device *oct, u64 addr, u64 val) 344 + { 345 + writeq(addr, oct->pci_win_regs.pci_win_wr_addr); 346 + writeq(val, oct->pci_win_regs.pci_win_wr_data); 347 + 348 + dev_dbg(&oct->pdev->dev, 349 + "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val); 350 + } 351 + 352 + extern struct workqueue_struct *octep_wq; 353 + 354 + int octep_device_setup(struct octep_device *oct); 355 + int octep_setup_iqs(struct octep_device *oct); 356 + void octep_free_iqs(struct octep_device *oct); 357 + void octep_clean_iqs(struct octep_device *oct); 358 + int octep_setup_oqs(struct octep_device *oct); 359 + void octep_free_oqs(struct octep_device *oct); 360 + void octep_oq_dbell_init(struct octep_device *oct); 361 + void octep_device_setup_cn93_pf(struct octep_device *oct); 362 + int octep_iq_process_completions(struct octep_iq *iq, u16 budget); 363 + int octep_oq_process_rx(struct octep_oq *oq, int budget); 364 + void octep_set_ethtool_ops(struct net_device *netdev); 365 + 366 + #endif /* _OCTEP_MAIN_H_ */
+367
drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef _OCTEP_REGS_CN9K_PF_H_ 9 + #define _OCTEP_REGS_CN9K_PF_H_ 10 + 11 + /* ############################ RST ######################### */ 12 + #define CN93_RST_BOOT 0x000087E006001600ULL 13 + #define CN93_RST_CORE_DOMAIN_W1S 0x000087E006001820ULL 14 + #define CN93_RST_CORE_DOMAIN_W1C 0x000087E006001828ULL 15 + 16 + #define CN93_CONFIG_XPANSION_BAR 0x38 17 + #define CN93_CONFIG_PCIE_CAP 0x70 18 + #define CN93_CONFIG_PCIE_DEVCAP 0x74 19 + #define CN93_CONFIG_PCIE_DEVCTL 0x78 20 + #define CN93_CONFIG_PCIE_LINKCAP 0x7C 21 + #define CN93_CONFIG_PCIE_LINKCTL 0x80 22 + #define CN93_CONFIG_PCIE_SLOTCAP 0x84 23 + #define CN93_CONFIG_PCIE_SLOTCTL 0x88 24 + 25 + #define CN93_PCIE_SRIOV_FDL 0x188 /* 0x98 */ 26 + #define CN93_PCIE_SRIOV_FDL_BIT_POS 0x10 27 + #define CN93_PCIE_SRIOV_FDL_MASK 0xFF 28 + 29 + #define CN93_CONFIG_PCIE_FLTMSK 0x720 30 + 31 + /* ################# Offsets of RING, EPF, MAC ######################### */ 32 + #define CN93_RING_OFFSET (0x1ULL << 17) 33 + #define CN93_EPF_OFFSET (0x1ULL << 25) 34 + #define CN93_MAC_OFFSET (0x1ULL << 4) 35 + #define CN93_BIT_ARRAY_OFFSET (0x1ULL << 4) 36 + #define CN93_EPVF_RING_OFFSET (0x1ULL << 4) 37 + 38 + /* ################# Scratch Registers ######################### */ 39 + #define CN93_SDP_EPF_SCRATCH 0x205E0 40 + 41 + /* ################# Window Registers ######################### */ 42 + #define CN93_SDP_WIN_WR_ADDR64 0x20000 43 + #define CN93_SDP_WIN_RD_ADDR64 0x20010 44 + #define CN93_SDP_WIN_WR_DATA64 0x20020 45 + #define CN93_SDP_WIN_WR_MASK_REG 0x20030 46 + #define CN93_SDP_WIN_RD_DATA64 0x20040 47 + 48 + #define CN93_SDP_MAC_NUMBER 0x2C100 49 + 50 + /* ################# Global Previliged registers ######################### */ 51 + #define CN93_SDP_EPF_RINFO 0x205F0 52 + 53 + #define CN93_SDP_EPF_RINFO_SRN(val) ((val) & 0xFF) 54 + #define CN93_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF) 55 + #define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) && 0xFF) 56 + 57 + /* SDP Function select */ 58 + #define CN93_SDP_FUNC_SEL_EPF_BIT_POS 8 59 + #define CN93_SDP_FUNC_SEL_FUNC_BIT_POS 0 60 + 61 + /* ##### RING IN (Into device from PCI: Tx Ring) REGISTERS #### */ 62 + #define CN93_SDP_R_IN_CONTROL_START 0x10000 63 + #define CN93_SDP_R_IN_ENABLE_START 0x10010 64 + #define CN93_SDP_R_IN_INSTR_BADDR_START 0x10020 65 + #define CN93_SDP_R_IN_INSTR_RSIZE_START 0x10030 66 + #define CN93_SDP_R_IN_INSTR_DBELL_START 0x10040 67 + #define CN93_SDP_R_IN_CNTS_START 0x10050 68 + #define CN93_SDP_R_IN_INT_LEVELS_START 0x10060 69 + #define CN93_SDP_R_IN_PKT_CNT_START 0x10080 70 + #define CN93_SDP_R_IN_BYTE_CNT_START 0x10090 71 + 72 + #define CN93_SDP_R_IN_CONTROL(ring) \ 73 + (CN93_SDP_R_IN_CONTROL_START + ((ring) * CN93_RING_OFFSET)) 74 + 75 + #define CN93_SDP_R_IN_ENABLE(ring) \ 76 + (CN93_SDP_R_IN_ENABLE_START + ((ring) * CN93_RING_OFFSET)) 77 + 78 + #define CN93_SDP_R_IN_INSTR_BADDR(ring) \ 79 + (CN93_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_RING_OFFSET)) 80 + 81 + #define CN93_SDP_R_IN_INSTR_RSIZE(ring) \ 82 + (CN93_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_RING_OFFSET)) 83 + 84 + #define CN93_SDP_R_IN_INSTR_DBELL(ring) \ 85 + (CN93_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_RING_OFFSET)) 86 + 87 + #define CN93_SDP_R_IN_CNTS(ring) \ 88 + (CN93_SDP_R_IN_CNTS_START + ((ring) * CN93_RING_OFFSET)) 89 + 90 + #define CN93_SDP_R_IN_INT_LEVELS(ring) \ 91 + (CN93_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET)) 92 + 93 + #define CN93_SDP_R_IN_PKT_CNT(ring) \ 94 + (CN93_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_RING_OFFSET)) 95 + 96 + #define CN93_SDP_R_IN_BYTE_CNT(ring) \ 97 + (CN93_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET)) 98 + 99 + /* Rings per Virtual Function */ 100 + #define CN93_R_IN_CTL_RPVF_MASK (0xF) 101 + #define CN93_R_IN_CTL_RPVF_POS (48) 102 + 103 + /* Number of instructions to be read in one MAC read request. 104 + * setting to Max value(4) 105 + */ 106 + #define CN93_R_IN_CTL_IDLE (0x1ULL << 28) 107 + #define CN93_R_IN_CTL_RDSIZE (0x3ULL << 25) 108 + #define CN93_R_IN_CTL_IS_64B (0x1ULL << 24) 109 + #define CN93_R_IN_CTL_D_NSR (0x1ULL << 8) 110 + #define CN93_R_IN_CTL_D_ESR (0x1ULL << 6) 111 + #define CN93_R_IN_CTL_D_ROR (0x1ULL << 5) 112 + #define CN93_R_IN_CTL_NSR (0x1ULL << 3) 113 + #define CN93_R_IN_CTL_ESR (0x1ULL << 1) 114 + #define CN93_R_IN_CTL_ROR (0x1ULL << 0) 115 + 116 + #define CN93_R_IN_CTL_MASK (CN93_R_IN_CTL_RDSIZE | CN93_R_IN_CTL_IS_64B) 117 + 118 + /* ##### RING OUT (out from device to PCI host: Rx Ring) REGISTERS #### */ 119 + #define CN93_SDP_R_OUT_CNTS_START 0x10100 120 + #define CN93_SDP_R_OUT_INT_LEVELS_START 0x10110 121 + #define CN93_SDP_R_OUT_SLIST_BADDR_START 0x10120 122 + #define CN93_SDP_R_OUT_SLIST_RSIZE_START 0x10130 123 + #define CN93_SDP_R_OUT_SLIST_DBELL_START 0x10140 124 + #define CN93_SDP_R_OUT_CONTROL_START 0x10150 125 + #define CN93_SDP_R_OUT_ENABLE_START 0x10160 126 + #define CN93_SDP_R_OUT_PKT_CNT_START 0x10180 127 + #define CN93_SDP_R_OUT_BYTE_CNT_START 0x10190 128 + 129 + #define CN93_SDP_R_OUT_CONTROL(ring) \ 130 + (CN93_SDP_R_OUT_CONTROL_START + ((ring) * CN93_RING_OFFSET)) 131 + 132 + #define CN93_SDP_R_OUT_ENABLE(ring) \ 133 + (CN93_SDP_R_OUT_ENABLE_START + ((ring) * CN93_RING_OFFSET)) 134 + 135 + #define CN93_SDP_R_OUT_SLIST_BADDR(ring) \ 136 + (CN93_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_RING_OFFSET)) 137 + 138 + #define CN93_SDP_R_OUT_SLIST_RSIZE(ring) \ 139 + (CN93_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_RING_OFFSET)) 140 + 141 + #define CN93_SDP_R_OUT_SLIST_DBELL(ring) \ 142 + (CN93_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_RING_OFFSET)) 143 + 144 + #define CN93_SDP_R_OUT_CNTS(ring) \ 145 + (CN93_SDP_R_OUT_CNTS_START + ((ring) * CN93_RING_OFFSET)) 146 + 147 + #define CN93_SDP_R_OUT_INT_LEVELS(ring) \ 148 + (CN93_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET)) 149 + 150 + #define CN93_SDP_R_OUT_PKT_CNT(ring) \ 151 + (CN93_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_RING_OFFSET)) 152 + 153 + #define CN93_SDP_R_OUT_BYTE_CNT(ring) \ 154 + (CN93_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET)) 155 + 156 + /*------------------ R_OUT Masks ----------------*/ 157 + #define CN93_R_OUT_INT_LEVELS_BMODE BIT_ULL(63) 158 + #define CN93_R_OUT_INT_LEVELS_TIMET (32) 159 + 160 + #define CN93_R_OUT_CTL_IDLE BIT_ULL(40) 161 + #define CN93_R_OUT_CTL_ES_I BIT_ULL(34) 162 + #define CN93_R_OUT_CTL_NSR_I BIT_ULL(33) 163 + #define CN93_R_OUT_CTL_ROR_I BIT_ULL(32) 164 + #define CN93_R_OUT_CTL_ES_D BIT_ULL(30) 165 + #define CN93_R_OUT_CTL_NSR_D BIT_ULL(29) 166 + #define CN93_R_OUT_CTL_ROR_D BIT_ULL(28) 167 + #define CN93_R_OUT_CTL_ES_P BIT_ULL(26) 168 + #define CN93_R_OUT_CTL_NSR_P BIT_ULL(25) 169 + #define CN93_R_OUT_CTL_ROR_P BIT_ULL(24) 170 + #define CN93_R_OUT_CTL_IMODE BIT_ULL(23) 171 + 172 + /* ############### Interrupt Moderation Registers ############### */ 173 + #define CN93_SDP_R_IN_INT_MDRT_CTL0_START 0x10280 174 + #define CN93_SDP_R_IN_INT_MDRT_CTL1_START 0x102A0 175 + #define CN93_SDP_R_IN_INT_MDRT_DBG_START 0x102C0 176 + 177 + #define CN93_SDP_R_OUT_INT_MDRT_CTL0_START 0x10380 178 + #define CN93_SDP_R_OUT_INT_MDRT_CTL1_START 0x103A0 179 + #define CN93_SDP_R_OUT_INT_MDRT_DBG_START 0x103C0 180 + 181 + #define CN93_SDP_R_IN_INT_MDRT_CTL0(ring) \ 182 + (CN93_SDP_R_IN_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET)) 183 + 184 + #define CN93_SDP_R_IN_INT_MDRT_CTL1(ring) \ 185 + (CN93_SDP_R_IN_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET)) 186 + 187 + #define CN93_SDP_R_IN_INT_MDRT_DBG(ring) \ 188 + (CN93_SDP_R_IN_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET)) 189 + 190 + #define CN93_SDP_R_OUT_INT_MDRT_CTL0(ring) \ 191 + (CN93_SDP_R_OUT_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET)) 192 + 193 + #define CN93_SDP_R_OUT_INT_MDRT_CTL1(ring) \ 194 + (CN93_SDP_R_OUT_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET)) 195 + 196 + #define CN93_SDP_R_OUT_INT_MDRT_DBG(ring) \ 197 + (CN93_SDP_R_OUT_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET)) 198 + 199 + /* ##################### Mail Box Registers ########################## */ 200 + /* INT register for VF. when a MBOX write from PF happed to a VF, 201 + * corresponding bit will be set in this register as well as in 202 + * PF_VF_INT register. 203 + * 204 + * This is a RO register, the int can be cleared by writing 1 to PF_VF_INT 205 + */ 206 + /* Basically first 3 are from PF to VF. The last one is data from VF to PF */ 207 + #define CN93_SDP_R_MBOX_PF_VF_DATA_START 0x10210 208 + #define CN93_SDP_R_MBOX_PF_VF_INT_START 0x10220 209 + #define CN93_SDP_R_MBOX_VF_PF_DATA_START 0x10230 210 + 211 + #define CN93_SDP_R_MBOX_PF_VF_DATA(ring) \ 212 + (CN93_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_RING_OFFSET)) 213 + 214 + #define CN93_SDP_R_MBOX_PF_VF_INT(ring) \ 215 + (CN93_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_RING_OFFSET)) 216 + 217 + #define CN93_SDP_R_MBOX_VF_PF_DATA(ring) \ 218 + (CN93_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_RING_OFFSET)) 219 + 220 + /* ##################### Interrupt Registers ########################## */ 221 + #define CN93_SDP_R_ERR_TYPE_START 0x10400 222 + 223 + #define CN93_SDP_R_ERR_TYPE(ring) \ 224 + (CN93_SDP_R_ERR_TYPE_START + ((ring) * CN93_RING_OFFSET)) 225 + 226 + #define CN93_SDP_R_MBOX_ISM_START 0x10500 227 + #define CN93_SDP_R_OUT_CNTS_ISM_START 0x10510 228 + #define CN93_SDP_R_IN_CNTS_ISM_START 0x10520 229 + 230 + #define CN93_SDP_R_MBOX_ISM(ring) \ 231 + (CN93_SDP_R_MBOX_ISM_START + ((ring) * CN93_RING_OFFSET)) 232 + 233 + #define CN93_SDP_R_OUT_CNTS_ISM(ring) \ 234 + (CN93_SDP_R_OUT_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET)) 235 + 236 + #define CN93_SDP_R_IN_CNTS_ISM(ring) \ 237 + (CN93_SDP_R_IN_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET)) 238 + 239 + #define CN93_SDP_EPF_MBOX_RINT_START 0x20100 240 + #define CN93_SDP_EPF_MBOX_RINT_W1S_START 0x20120 241 + #define CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START 0x20140 242 + #define CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START 0x20160 243 + 244 + #define CN93_SDP_EPF_VFIRE_RINT_START 0x20180 245 + #define CN93_SDP_EPF_VFIRE_RINT_W1S_START 0x201A0 246 + #define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START 0x201C0 247 + #define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START 0x201E0 248 + 249 + #define CN93_SDP_EPF_IRERR_RINT 0x20200 250 + #define CN93_SDP_EPF_IRERR_RINT_W1S 0x20210 251 + #define CN93_SDP_EPF_IRERR_RINT_ENA_W1C 0x20220 252 + #define CN93_SDP_EPF_IRERR_RINT_ENA_W1S 0x20230 253 + 254 + #define CN93_SDP_EPF_VFORE_RINT_START 0x20240 255 + #define CN93_SDP_EPF_VFORE_RINT_W1S_START 0x20260 256 + #define CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START 0x20280 257 + #define CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START 0x202A0 258 + 259 + #define CN93_SDP_EPF_ORERR_RINT 0x20320 260 + #define CN93_SDP_EPF_ORERR_RINT_W1S 0x20330 261 + #define CN93_SDP_EPF_ORERR_RINT_ENA_W1C 0x20340 262 + #define CN93_SDP_EPF_ORERR_RINT_ENA_W1S 0x20350 263 + 264 + #define CN93_SDP_EPF_OEI_RINT 0x20360 265 + #define CN93_SDP_EPF_OEI_RINT_W1S 0x20370 266 + #define CN93_SDP_EPF_OEI_RINT_ENA_W1C 0x20380 267 + #define CN93_SDP_EPF_OEI_RINT_ENA_W1S 0x20390 268 + 269 + #define CN93_SDP_EPF_DMA_RINT 0x20400 270 + #define CN93_SDP_EPF_DMA_RINT_W1S 0x20410 271 + #define CN93_SDP_EPF_DMA_RINT_ENA_W1C 0x20420 272 + #define CN93_SDP_EPF_DMA_RINT_ENA_W1S 0x20430 273 + 274 + #define CN93_SDP_EPF_DMA_INT_LEVEL_START 0x20440 275 + #define CN93_SDP_EPF_DMA_CNT_START 0x20460 276 + #define CN93_SDP_EPF_DMA_TIM_START 0x20480 277 + 278 + #define CN93_SDP_EPF_MISC_RINT 0x204A0 279 + #define CN93_SDP_EPF_MISC_RINT_W1S 0x204B0 280 + #define CN93_SDP_EPF_MISC_RINT_ENA_W1C 0x204C0 281 + #define CN93_SDP_EPF_MISC_RINT_ENA_W1S 0x204D0 282 + 283 + #define CN93_SDP_EPF_DMA_VF_RINT_START 0x204E0 284 + #define CN93_SDP_EPF_DMA_VF_RINT_W1S_START 0x20500 285 + #define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START 0x20520 286 + #define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START 0x20540 287 + 288 + #define CN93_SDP_EPF_PP_VF_RINT_START 0x20560 289 + #define CN93_SDP_EPF_PP_VF_RINT_W1S_START 0x20580 290 + #define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START 0x205A0 291 + #define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START 0x205C0 292 + 293 + #define CN93_SDP_EPF_MBOX_RINT(index) \ 294 + (CN93_SDP_EPF_MBOX_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET)) 295 + #define CN93_SDP_EPF_MBOX_RINT_W1S(index) \ 296 + (CN93_SDP_EPF_MBOX_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) 297 + #define CN93_SDP_EPF_MBOX_RINT_ENA_W1C(index) \ 298 + (CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET)) 299 + #define CN93_SDP_EPF_MBOX_RINT_ENA_W1S(index) \ 300 + (CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) 301 + 302 + #define CN93_SDP_EPF_VFIRE_RINT(index) \ 303 + (CN93_SDP_EPF_VFIRE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET)) 304 + #define CN93_SDP_EPF_VFIRE_RINT_W1S(index) \ 305 + (CN93_SDP_EPF_VFIRE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) 306 + #define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(index) \ 307 + (CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET)) 308 + #define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(index) \ 309 + (CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) 310 + 311 + #define CN93_SDP_EPF_VFORE_RINT(index) \ 312 + (CN93_SDP_EPF_VFORE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET)) 313 + #define CN93_SDP_EPF_VFORE_RINT_W1S(index) \ 314 + (CN93_SDP_EPF_VFORE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) 315 + #define CN93_SDP_EPF_VFORE_RINT_ENA_W1C(index) \ 316 + (CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET)) 317 + #define CN93_SDP_EPF_VFORE_RINT_ENA_W1S(index) \ 318 + (CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) 319 + 320 + #define CN93_SDP_EPF_DMA_VF_RINT(index) \ 321 + (CN93_SDP_EPF_DMA_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET)) 322 + #define CN93_SDP_EPF_DMA_VF_RINT_W1S(index) \ 323 + (CN93_SDP_EPF_DMA_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) 324 + #define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(index) \ 325 + (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET)) 326 + #define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(index) \ 327 + (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) 328 + 329 + #define CN93_SDP_EPF_PP_VF_RINT(index) \ 330 + (CN93_SDP_EPF_PP_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET)) 331 + #define CN93_SDP_EPF_PP_VF_RINT_W1S(index) \ 332 + (CN93_SDP_EPF_PP_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) 333 + #define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(index) \ 334 + (CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET)) 335 + #define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(index) \ 336 + (CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) 337 + 338 + /*------------------ Interrupt Masks ----------------*/ 339 + #define CN93_INTR_R_SEND_ISM BIT_ULL(63) 340 + #define CN93_INTR_R_OUT_INT BIT_ULL(62) 341 + #define CN93_INTR_R_IN_INT BIT_ULL(61) 342 + #define CN93_INTR_R_MBOX_INT BIT_ULL(60) 343 + #define CN93_INTR_R_RESEND BIT_ULL(59) 344 + #define CN93_INTR_R_CLR_TIM BIT_ULL(58) 345 + 346 + /* ####################### Ring Mapping Registers ################################## */ 347 + #define CN93_SDP_EPVF_RING_START 0x26000 348 + #define CN93_SDP_IN_RING_TB_MAP_START 0x28000 349 + #define CN93_SDP_IN_RATE_LIMIT_START 0x2A000 350 + #define CN93_SDP_MAC_PF_RING_CTL_START 0x2C000 351 + 352 + #define CN93_SDP_EPVF_RING(ring) \ 353 + (CN93_SDP_EPVF_RING_START + ((ring) * CN93_EPVF_RING_OFFSET)) 354 + #define CN93_SDP_IN_RING_TB_MAP(ring) \ 355 + (CN93_SDP_N_RING_TB_MAP_START + ((ring) * CN93_EPVF_RING_OFFSET)) 356 + #define CN93_SDP_IN_RATE_LIMIT(ring) \ 357 + (CN93_SDP_IN_RATE_LIMIT_START + ((ring) * CN93_EPVF_RING_OFFSET)) 358 + #define CN93_SDP_MAC_PF_RING_CTL(mac) \ 359 + (CN93_SDP_MAC_PF_RING_CTL_START + ((mac) * CN93_MAC_OFFSET)) 360 + 361 + #define CN93_SDP_MAC_PF_RING_CTL_NPFS(val) ((val) & 0xF) 362 + #define CN93_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0xFF) 363 + #define CN93_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F) 364 + 365 + /* Number of non-queue interrupts in CN93xx */ 366 + #define CN93_NUM_NON_IOQ_INTR 16 367 + #endif /* _OCTEP_REGS_CN9K_PF_H_ */
+508
drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/pci.h> 9 + #include <linux/etherdevice.h> 10 + #include <linux/vmalloc.h> 11 + 12 + #include "octep_config.h" 13 + #include "octep_main.h" 14 + 15 + static void octep_oq_reset_indices(struct octep_oq *oq) 16 + { 17 + oq->host_read_idx = 0; 18 + oq->host_refill_idx = 0; 19 + oq->refill_count = 0; 20 + oq->last_pkt_count = 0; 21 + oq->pkts_pending = 0; 22 + } 23 + 24 + /** 25 + * octep_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring. 26 + * 27 + * @oq: Octeon Rx queue data structure. 28 + * 29 + * Return: 0, if successfully filled receive buffers for all descriptors. 30 + * -1, if failed to allocate a buffer or failed to map for DMA. 31 + */ 32 + static int octep_oq_fill_ring_buffers(struct octep_oq *oq) 33 + { 34 + struct octep_oq_desc_hw *desc_ring = oq->desc_ring; 35 + struct page *page; 36 + u32 i; 37 + 38 + for (i = 0; i < oq->max_count; i++) { 39 + page = dev_alloc_page(); 40 + if (unlikely(!page)) { 41 + dev_err(oq->dev, "Rx buffer alloc failed\n"); 42 + goto rx_buf_alloc_err; 43 + } 44 + desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0, 45 + PAGE_SIZE, 46 + DMA_FROM_DEVICE); 47 + if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) { 48 + dev_err(oq->dev, 49 + "OQ-%d buffer alloc: DMA mapping error!\n", 50 + oq->q_no); 51 + put_page(page); 52 + goto dma_map_err; 53 + } 54 + oq->buff_info[i].page = page; 55 + } 56 + 57 + return 0; 58 + 59 + dma_map_err: 60 + rx_buf_alloc_err: 61 + while (i) { 62 + i--; 63 + dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE); 64 + put_page(oq->buff_info[i].page); 65 + oq->buff_info[i].page = NULL; 66 + } 67 + 68 + return -1; 69 + } 70 + 71 + /** 72 + * octep_oq_refill() - refill buffers for used Rx ring descriptors. 73 + * 74 + * @oct: Octeon device private data structure. 75 + * @oq: Octeon Rx queue data structure. 76 + * 77 + * Return: number of descriptors successfully refilled with receive buffers. 78 + */ 79 + static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq) 80 + { 81 + struct octep_oq_desc_hw *desc_ring = oq->desc_ring; 82 + struct page *page; 83 + u32 refill_idx, i; 84 + 85 + refill_idx = oq->host_refill_idx; 86 + for (i = 0; i < oq->refill_count; i++) { 87 + page = dev_alloc_page(); 88 + if (unlikely(!page)) { 89 + dev_err(oq->dev, "refill: rx buffer alloc failed\n"); 90 + oq->stats.alloc_failures++; 91 + break; 92 + } 93 + 94 + desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0, 95 + PAGE_SIZE, DMA_FROM_DEVICE); 96 + if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) { 97 + dev_err(oq->dev, 98 + "OQ-%d buffer refill: DMA mapping error!\n", 99 + oq->q_no); 100 + put_page(page); 101 + oq->stats.alloc_failures++; 102 + break; 103 + } 104 + oq->buff_info[refill_idx].page = page; 105 + refill_idx++; 106 + if (refill_idx == oq->max_count) 107 + refill_idx = 0; 108 + } 109 + oq->host_refill_idx = refill_idx; 110 + oq->refill_count -= i; 111 + 112 + return i; 113 + } 114 + 115 + /** 116 + * octep_setup_oq() - Setup a Rx queue. 117 + * 118 + * @oct: Octeon device private data structure. 119 + * @q_no: Rx queue number to be setup. 120 + * 121 + * Allocate resources for a Rx queue. 122 + */ 123 + static int octep_setup_oq(struct octep_device *oct, int q_no) 124 + { 125 + struct octep_oq *oq; 126 + u32 desc_ring_size; 127 + 128 + oq = vzalloc(sizeof(*oq)); 129 + if (!oq) 130 + goto create_oq_fail; 131 + oct->oq[q_no] = oq; 132 + 133 + oq->octep_dev = oct; 134 + oq->netdev = oct->netdev; 135 + oq->dev = &oct->pdev->dev; 136 + oq->q_no = q_no; 137 + oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf); 138 + oq->ring_size_mask = oq->max_count - 1; 139 + oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf); 140 + oq->max_single_buffer_size = oq->buffer_size - OCTEP_OQ_RESP_HW_SIZE; 141 + 142 + /* When the hardware/firmware supports additional capabilities, 143 + * additional header is filled-in by Octeon after length field in 144 + * Rx packets. this header contains additional packet information. 145 + */ 146 + if (oct->caps_enabled) 147 + oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE; 148 + 149 + oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf); 150 + 151 + desc_ring_size = oq->max_count * OCTEP_OQ_DESC_SIZE; 152 + oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size, 153 + &oq->desc_ring_dma, GFP_KERNEL); 154 + 155 + if (unlikely(!oq->desc_ring)) { 156 + dev_err(oq->dev, 157 + "Failed to allocate DMA memory for OQ-%d !!\n", q_no); 158 + goto desc_dma_alloc_err; 159 + } 160 + 161 + oq->buff_info = (struct octep_rx_buffer *) 162 + vzalloc(oq->max_count * OCTEP_OQ_RECVBUF_SIZE); 163 + if (unlikely(!oq->buff_info)) { 164 + dev_err(&oct->pdev->dev, 165 + "Failed to allocate buffer info for OQ-%d\n", q_no); 166 + goto buf_list_err; 167 + } 168 + 169 + if (octep_oq_fill_ring_buffers(oq)) 170 + goto oq_fill_buff_err; 171 + 172 + octep_oq_reset_indices(oq); 173 + oct->hw_ops.setup_oq_regs(oct, q_no); 174 + oct->num_oqs++; 175 + 176 + return 0; 177 + 178 + oq_fill_buff_err: 179 + vfree(oq->buff_info); 180 + oq->buff_info = NULL; 181 + buf_list_err: 182 + dma_free_coherent(oq->dev, desc_ring_size, 183 + oq->desc_ring, oq->desc_ring_dma); 184 + oq->desc_ring = NULL; 185 + desc_dma_alloc_err: 186 + vfree(oq); 187 + oct->oq[q_no] = NULL; 188 + create_oq_fail: 189 + return -1; 190 + } 191 + 192 + /** 193 + * octep_oq_free_ring_buffers() - Free ring buffers. 194 + * 195 + * @oq: Octeon Rx queue data structure. 196 + * 197 + * Free receive buffers in unused Rx queue descriptors. 198 + */ 199 + static void octep_oq_free_ring_buffers(struct octep_oq *oq) 200 + { 201 + struct octep_oq_desc_hw *desc_ring = oq->desc_ring; 202 + int i; 203 + 204 + if (!oq->desc_ring || !oq->buff_info) 205 + return; 206 + 207 + for (i = 0; i < oq->max_count; i++) { 208 + if (oq->buff_info[i].page) { 209 + dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, 210 + PAGE_SIZE, DMA_FROM_DEVICE); 211 + put_page(oq->buff_info[i].page); 212 + oq->buff_info[i].page = NULL; 213 + desc_ring[i].buffer_ptr = 0; 214 + } 215 + } 216 + octep_oq_reset_indices(oq); 217 + } 218 + 219 + /** 220 + * octep_free_oq() - Free Rx queue resources. 221 + * 222 + * @oq: Octeon Rx queue data structure. 223 + * 224 + * Free all resources of a Rx queue. 225 + */ 226 + static int octep_free_oq(struct octep_oq *oq) 227 + { 228 + struct octep_device *oct = oq->octep_dev; 229 + int q_no = oq->q_no; 230 + 231 + octep_oq_free_ring_buffers(oq); 232 + 233 + if (oq->buff_info) 234 + vfree(oq->buff_info); 235 + 236 + if (oq->desc_ring) 237 + dma_free_coherent(oq->dev, 238 + oq->max_count * OCTEP_OQ_DESC_SIZE, 239 + oq->desc_ring, oq->desc_ring_dma); 240 + 241 + vfree(oq); 242 + oct->oq[q_no] = NULL; 243 + oct->num_oqs--; 244 + return 0; 245 + } 246 + 247 + /** 248 + * octep_setup_oqs() - setup resources for all Rx queues. 249 + * 250 + * @oct: Octeon device private data structure. 251 + */ 252 + int octep_setup_oqs(struct octep_device *oct) 253 + { 254 + int i, retval = 0; 255 + 256 + oct->num_oqs = 0; 257 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 258 + retval = octep_setup_oq(oct, i); 259 + if (retval) { 260 + dev_err(&oct->pdev->dev, 261 + "Failed to setup OQ(RxQ)-%d.\n", i); 262 + goto oq_setup_err; 263 + } 264 + dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i); 265 + } 266 + 267 + return 0; 268 + 269 + oq_setup_err: 270 + while (i) { 271 + i--; 272 + octep_free_oq(oct->oq[i]); 273 + } 274 + return -1; 275 + } 276 + 277 + /** 278 + * octep_oq_dbell_init() - Initialize Rx queue doorbell. 279 + * 280 + * @oct: Octeon device private data structure. 281 + * 282 + * Write number of descriptors to Rx queue doorbell register. 283 + */ 284 + void octep_oq_dbell_init(struct octep_device *oct) 285 + { 286 + int i; 287 + 288 + for (i = 0; i < oct->num_oqs; i++) 289 + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); 290 + } 291 + 292 + /** 293 + * octep_free_oqs() - Free resources of all Rx queues. 294 + * 295 + * @oct: Octeon device private data structure. 296 + */ 297 + void octep_free_oqs(struct octep_device *oct) 298 + { 299 + int i; 300 + 301 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 302 + if (!oct->oq[i]) 303 + continue; 304 + octep_free_oq(oct->oq[i]); 305 + dev_dbg(&oct->pdev->dev, 306 + "Successfully freed OQ(RxQ)-%d.\n", i); 307 + } 308 + } 309 + 310 + /** 311 + * octep_oq_check_hw_for_pkts() - Check for new Rx packets. 312 + * 313 + * @oct: Octeon device private data structure. 314 + * @oq: Octeon Rx queue data structure. 315 + * 316 + * Return: packets received after previous check. 317 + */ 318 + static int octep_oq_check_hw_for_pkts(struct octep_device *oct, 319 + struct octep_oq *oq) 320 + { 321 + u32 pkt_count, new_pkts; 322 + 323 + pkt_count = readl(oq->pkts_sent_reg); 324 + new_pkts = pkt_count - oq->last_pkt_count; 325 + 326 + /* Clear the hardware packets counter register if the rx queue is 327 + * being processed continuously with-in a single interrupt and 328 + * reached half its max value. 329 + * this counter is not cleared every time read, to save write cycles. 330 + */ 331 + if (unlikely(pkt_count > 0xF0000000U)) { 332 + writel(pkt_count, oq->pkts_sent_reg); 333 + pkt_count = readl(oq->pkts_sent_reg); 334 + new_pkts += pkt_count; 335 + } 336 + oq->last_pkt_count = pkt_count; 337 + oq->pkts_pending += new_pkts; 338 + return new_pkts; 339 + } 340 + 341 + /** 342 + * __octep_oq_process_rx() - Process hardware Rx queue and push to stack. 343 + * 344 + * @oct: Octeon device private data structure. 345 + * @oq: Octeon Rx queue data structure. 346 + * @pkts_to_process: number of packets to be processed. 347 + * 348 + * Process the new packets in Rx queue. 349 + * Packets larger than single Rx buffer arrive in consecutive descriptors. 350 + * But, count returned by the API only accounts full packets, not fragments. 351 + * 352 + * Return: number of packets processed and pushed to stack. 353 + */ 354 + static int __octep_oq_process_rx(struct octep_device *oct, 355 + struct octep_oq *oq, u16 pkts_to_process) 356 + { 357 + struct octep_oq_resp_hw_ext *resp_hw_ext = NULL; 358 + struct octep_rx_buffer *buff_info; 359 + struct octep_oq_resp_hw *resp_hw; 360 + u32 pkt, rx_bytes, desc_used; 361 + struct sk_buff *skb; 362 + u16 data_offset; 363 + u32 read_idx; 364 + 365 + read_idx = oq->host_read_idx; 366 + rx_bytes = 0; 367 + desc_used = 0; 368 + for (pkt = 0; pkt < pkts_to_process; pkt++) { 369 + buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx]; 370 + dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, 371 + PAGE_SIZE, DMA_FROM_DEVICE); 372 + resp_hw = page_address(buff_info->page); 373 + buff_info->page = NULL; 374 + 375 + /* Swap the length field that is in Big-Endian to CPU */ 376 + buff_info->len = be64_to_cpu(resp_hw->length); 377 + if (oct->caps_enabled & OCTEP_CAP_RX_CHECKSUM) { 378 + /* Extended response header is immediately after 379 + * response header (resp_hw) 380 + */ 381 + resp_hw_ext = (struct octep_oq_resp_hw_ext *) 382 + (resp_hw + 1); 383 + buff_info->len -= OCTEP_OQ_RESP_HW_EXT_SIZE; 384 + /* Packet Data is immediately after 385 + * extended response header. 386 + */ 387 + data_offset = OCTEP_OQ_RESP_HW_SIZE + 388 + OCTEP_OQ_RESP_HW_EXT_SIZE; 389 + } else { 390 + /* Data is immediately after 391 + * Hardware Rx response header. 392 + */ 393 + data_offset = OCTEP_OQ_RESP_HW_SIZE; 394 + } 395 + rx_bytes += buff_info->len; 396 + 397 + if (buff_info->len <= oq->max_single_buffer_size) { 398 + skb = build_skb((void *)resp_hw, PAGE_SIZE); 399 + skb_reserve(skb, data_offset); 400 + skb_put(skb, buff_info->len); 401 + read_idx++; 402 + desc_used++; 403 + if (read_idx == oq->max_count) 404 + read_idx = 0; 405 + } else { 406 + struct skb_shared_info *shinfo; 407 + u16 data_len; 408 + 409 + skb = build_skb((void *)resp_hw, PAGE_SIZE); 410 + skb_reserve(skb, data_offset); 411 + /* Head fragment includes response header(s); 412 + * subsequent fragments contains only data. 413 + */ 414 + skb_put(skb, oq->max_single_buffer_size); 415 + read_idx++; 416 + desc_used++; 417 + if (read_idx == oq->max_count) 418 + read_idx = 0; 419 + 420 + shinfo = skb_shinfo(skb); 421 + data_len = buff_info->len - oq->max_single_buffer_size; 422 + while (data_len) { 423 + dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, 424 + PAGE_SIZE, DMA_FROM_DEVICE); 425 + buff_info = (struct octep_rx_buffer *) 426 + &oq->buff_info[read_idx]; 427 + if (data_len < oq->buffer_size) { 428 + buff_info->len = data_len; 429 + data_len = 0; 430 + } else { 431 + buff_info->len = oq->buffer_size; 432 + data_len -= oq->buffer_size; 433 + } 434 + 435 + skb_add_rx_frag(skb, shinfo->nr_frags, 436 + buff_info->page, 0, 437 + buff_info->len, 438 + buff_info->len); 439 + buff_info->page = NULL; 440 + read_idx++; 441 + desc_used++; 442 + if (read_idx == oq->max_count) 443 + read_idx = 0; 444 + } 445 + } 446 + 447 + skb->dev = oq->netdev; 448 + skb->protocol = eth_type_trans(skb, skb->dev); 449 + if (resp_hw_ext && 450 + resp_hw_ext->csum_verified == OCTEP_CSUM_VERIFIED) 451 + skb->ip_summed = CHECKSUM_UNNECESSARY; 452 + else 453 + skb->ip_summed = CHECKSUM_NONE; 454 + napi_gro_receive(oq->napi, skb); 455 + } 456 + 457 + oq->host_read_idx = read_idx; 458 + oq->refill_count += desc_used; 459 + oq->stats.packets += pkt; 460 + oq->stats.bytes += rx_bytes; 461 + 462 + return pkt; 463 + } 464 + 465 + /** 466 + * octep_oq_process_rx() - Process Rx queue. 467 + * 468 + * @oq: Octeon Rx queue data structure. 469 + * @budget: max number of packets can be processed in one invocation. 470 + * 471 + * Check for newly received packets and process them. 472 + * Keeps checking for new packets until budget is used or no new packets seen. 473 + * 474 + * Return: number of packets processed. 475 + */ 476 + int octep_oq_process_rx(struct octep_oq *oq, int budget) 477 + { 478 + u32 pkts_available, pkts_processed, total_pkts_processed; 479 + struct octep_device *oct = oq->octep_dev; 480 + 481 + pkts_available = 0; 482 + pkts_processed = 0; 483 + total_pkts_processed = 0; 484 + while (total_pkts_processed < budget) { 485 + /* update pending count only when current one exhausted */ 486 + if (oq->pkts_pending == 0) 487 + octep_oq_check_hw_for_pkts(oct, oq); 488 + pkts_available = min(budget - total_pkts_processed, 489 + oq->pkts_pending); 490 + if (!pkts_available) 491 + break; 492 + 493 + pkts_processed = __octep_oq_process_rx(oct, oq, 494 + pkts_available); 495 + oq->pkts_pending -= pkts_processed; 496 + total_pkts_processed += pkts_processed; 497 + } 498 + 499 + if (oq->refill_count >= oq->refill_threshold) { 500 + u32 desc_refilled = octep_oq_refill(oct, oq); 501 + 502 + /* flush pending writes before updating credits */ 503 + wmb(); 504 + writel(desc_refilled, oq->pkts_credit_reg); 505 + } 506 + 507 + return total_pkts_processed; 508 + }
+199
drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef _OCTEP_RX_H_ 9 + #define _OCTEP_RX_H_ 10 + 11 + /* struct octep_oq_desc_hw - Octeon Hardware OQ descriptor format. 12 + * 13 + * The descriptor ring is made of descriptors which have 2 64-bit values: 14 + * 15 + * @buffer_ptr: DMA address of the skb->data 16 + * @info_ptr: DMA address of host memory, used to update pkt count by hw. 17 + * This is currently unused to save pci writes. 18 + */ 19 + struct octep_oq_desc_hw { 20 + dma_addr_t buffer_ptr; 21 + u64 info_ptr; 22 + }; 23 + 24 + #define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw)) 25 + 26 + #define OCTEP_CSUM_L4_VERIFIED 0x1 27 + #define OCTEP_CSUM_IP_VERIFIED 0x2 28 + #define OCTEP_CSUM_VERIFIED (OCTEP_CSUM_L4_VERIFIED | OCTEP_CSUM_IP_VERIFIED) 29 + 30 + /* Extended Response Header in packet data received from Hardware. 31 + * Includes metadata like checksum status. 32 + * this is valid only if hardware/firmware published support for this. 33 + * This is at offset 0 of packet data (skb->data). 34 + */ 35 + struct octep_oq_resp_hw_ext { 36 + /* Reserved. */ 37 + u64 reserved:62; 38 + 39 + /* checksum verified. */ 40 + u64 csum_verified:2; 41 + }; 42 + 43 + #define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext)) 44 + 45 + /* Length of Rx packet DMA'ed by Octeon to Host. 46 + * this is in bigendian; so need to be converted to cpu endian. 47 + * Octeon writes this at the beginning of Rx buffer (skb->data). 48 + */ 49 + struct octep_oq_resp_hw { 50 + /* The Length of the packet. */ 51 + __be64 length; 52 + }; 53 + 54 + #define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw)) 55 + 56 + /* Pointer to data buffer. 57 + * Driver keeps a pointer to the data buffer that it made available to 58 + * the Octeon device. Since the descriptor ring keeps physical (bus) 59 + * addresses, this field is required for the driver to keep track of 60 + * the virtual address pointers. The fields are operated by 61 + * OS-dependent routines. 62 + */ 63 + struct octep_rx_buffer { 64 + struct page *page; 65 + 66 + /* length from rx hardware descriptor after converting to cpu endian */ 67 + u64 len; 68 + }; 69 + 70 + #define OCTEP_OQ_RECVBUF_SIZE (sizeof(struct octep_rx_buffer)) 71 + 72 + /* Output Queue statistics. Each output queue has four stats fields. */ 73 + struct octep_oq_stats { 74 + /* Number of packets received from the Device. */ 75 + u64 packets; 76 + 77 + /* Number of bytes received from the Device. */ 78 + u64 bytes; 79 + 80 + /* Number of times failed to allocate buffers. */ 81 + u64 alloc_failures; 82 + }; 83 + 84 + #define OCTEP_OQ_STATS_SIZE (sizeof(struct octep_oq_stats)) 85 + 86 + /* Hardware interface Rx statistics */ 87 + struct octep_iface_rx_stats { 88 + /* Received packets */ 89 + u64 pkts; 90 + 91 + /* Octets of received packets */ 92 + u64 octets; 93 + 94 + /* Received PAUSE and Control packets */ 95 + u64 pause_pkts; 96 + 97 + /* Received PAUSE and Control octets */ 98 + u64 pause_octets; 99 + 100 + /* Filtered DMAC0 packets */ 101 + u64 dmac0_pkts; 102 + 103 + /* Filtered DMAC0 octets */ 104 + u64 dmac0_octets; 105 + 106 + /* Packets dropped due to RX FIFO full */ 107 + u64 dropped_pkts_fifo_full; 108 + 109 + /* Octets dropped due to RX FIFO full */ 110 + u64 dropped_octets_fifo_full; 111 + 112 + /* Error packets */ 113 + u64 err_pkts; 114 + 115 + /* Filtered DMAC1 packets */ 116 + u64 dmac1_pkts; 117 + 118 + /* Filtered DMAC1 octets */ 119 + u64 dmac1_octets; 120 + 121 + /* NCSI-bound packets dropped */ 122 + u64 ncsi_dropped_pkts; 123 + 124 + /* NCSI-bound octets dropped */ 125 + u64 ncsi_dropped_octets; 126 + 127 + /* Multicast packets received. */ 128 + u64 mcast_pkts; 129 + 130 + /* Broadcast packets received. */ 131 + u64 bcast_pkts; 132 + 133 + }; 134 + 135 + /* The Descriptor Ring Output Queue structure. 136 + * This structure has all the information required to implement a 137 + * Octeon OQ. 138 + */ 139 + struct octep_oq { 140 + u32 q_no; 141 + 142 + struct octep_device *octep_dev; 143 + struct net_device *netdev; 144 + struct device *dev; 145 + 146 + struct napi_struct *napi; 147 + 148 + /* The receive buffer list. This list has the virtual addresses 149 + * of the buffers. 150 + */ 151 + struct octep_rx_buffer *buff_info; 152 + 153 + /* Pointer to the mapped packet credit register. 154 + * Host writes number of info/buffer ptrs available to this register 155 + */ 156 + u8 __iomem *pkts_credit_reg; 157 + 158 + /* Pointer to the mapped packet sent register. 159 + * Octeon writes the number of packets DMA'ed to host memory 160 + * in this register. 161 + */ 162 + u8 __iomem *pkts_sent_reg; 163 + 164 + /* Statistics for this OQ. */ 165 + struct octep_oq_stats stats; 166 + 167 + /* Packets pending to be processed */ 168 + u32 pkts_pending; 169 + u32 last_pkt_count; 170 + 171 + /* Index in the ring where the driver should read the next packet */ 172 + u32 host_read_idx; 173 + 174 + /* Number of descriptors in this ring. */ 175 + u32 max_count; 176 + u32 ring_size_mask; 177 + 178 + /* The number of descriptors pending refill. */ 179 + u32 refill_count; 180 + 181 + /* Index in the ring where the driver will refill the 182 + * descriptor's buffer 183 + */ 184 + u32 host_refill_idx; 185 + u32 refill_threshold; 186 + 187 + /* The size of each buffer pointed by the buffer pointer. */ 188 + u32 buffer_size; 189 + u32 max_single_buffer_size; 190 + 191 + /* The 8B aligned descriptor ring starts at this address. */ 192 + struct octep_oq_desc_hw *desc_ring; 193 + 194 + /* DMA mapped address of the OQ descriptor ring. */ 195 + dma_addr_t desc_ring_dma; 196 + }; 197 + 198 + #define OCTEP_OQ_SIZE (sizeof(struct octep_oq)) 199 + #endif /* _OCTEP_RX_H_ */
+335
drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/pci.h> 9 + #include <linux/etherdevice.h> 10 + #include <linux/vmalloc.h> 11 + 12 + #include "octep_config.h" 13 + #include "octep_main.h" 14 + 15 + /* Reset various index of Tx queue data structure. */ 16 + static void octep_iq_reset_indices(struct octep_iq *iq) 17 + { 18 + iq->fill_cnt = 0; 19 + iq->host_write_index = 0; 20 + iq->octep_read_index = 0; 21 + iq->flush_index = 0; 22 + iq->pkts_processed = 0; 23 + iq->pkt_in_done = 0; 24 + atomic_set(&iq->instr_pending, 0); 25 + } 26 + 27 + /** 28 + * octep_iq_process_completions() - Process Tx queue completions. 29 + * 30 + * @iq: Octeon Tx queue data structure. 31 + * @budget: max number of completions to be processed in one invocation. 32 + */ 33 + int octep_iq_process_completions(struct octep_iq *iq, u16 budget) 34 + { 35 + u32 compl_pkts, compl_bytes, compl_sg; 36 + struct octep_device *oct = iq->octep_dev; 37 + struct octep_tx_buffer *tx_buffer; 38 + struct skb_shared_info *shinfo; 39 + u32 fi = iq->flush_index; 40 + struct sk_buff *skb; 41 + u8 frags, i; 42 + 43 + compl_pkts = 0; 44 + compl_sg = 0; 45 + compl_bytes = 0; 46 + iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq); 47 + 48 + while (likely(budget && (fi != iq->octep_read_index))) { 49 + tx_buffer = iq->buff_info + fi; 50 + skb = tx_buffer->skb; 51 + 52 + fi++; 53 + if (unlikely(fi == iq->max_count)) 54 + fi = 0; 55 + compl_bytes += skb->len; 56 + compl_pkts++; 57 + budget--; 58 + 59 + if (!tx_buffer->gather) { 60 + dma_unmap_single(iq->dev, tx_buffer->dma, 61 + tx_buffer->skb->len, DMA_TO_DEVICE); 62 + dev_kfree_skb_any(skb); 63 + continue; 64 + } 65 + 66 + /* Scatter/Gather */ 67 + shinfo = skb_shinfo(skb); 68 + frags = shinfo->nr_frags; 69 + compl_sg++; 70 + 71 + dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0], 72 + tx_buffer->sglist[0].len[0], DMA_TO_DEVICE); 73 + 74 + i = 1; /* entry 0 is main skb, unmapped above */ 75 + while (frags--) { 76 + dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], 77 + tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE); 78 + i++; 79 + } 80 + 81 + dev_kfree_skb_any(skb); 82 + } 83 + 84 + iq->pkts_processed += compl_pkts; 85 + atomic_sub(compl_pkts, &iq->instr_pending); 86 + iq->stats.instr_completed += compl_pkts; 87 + iq->stats.bytes_sent += compl_bytes; 88 + iq->stats.sgentry_sent += compl_sg; 89 + iq->flush_index = fi; 90 + 91 + netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes); 92 + 93 + if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) && 94 + ((iq->max_count - atomic_read(&iq->instr_pending)) > 95 + OCTEP_WAKE_QUEUE_THRESHOLD)) 96 + netif_wake_subqueue(iq->netdev, iq->q_no); 97 + return !budget; 98 + } 99 + 100 + /** 101 + * octep_iq_free_pending() - Free Tx buffers for pending completions. 102 + * 103 + * @iq: Octeon Tx queue data structure. 104 + */ 105 + static void octep_iq_free_pending(struct octep_iq *iq) 106 + { 107 + struct octep_tx_buffer *tx_buffer; 108 + struct skb_shared_info *shinfo; 109 + u32 fi = iq->flush_index; 110 + struct sk_buff *skb; 111 + u8 frags, i; 112 + 113 + while (fi != iq->host_write_index) { 114 + tx_buffer = iq->buff_info + fi; 115 + skb = tx_buffer->skb; 116 + 117 + fi++; 118 + if (unlikely(fi == iq->max_count)) 119 + fi = 0; 120 + 121 + if (!tx_buffer->gather) { 122 + dma_unmap_single(iq->dev, tx_buffer->dma, 123 + tx_buffer->skb->len, DMA_TO_DEVICE); 124 + dev_kfree_skb_any(skb); 125 + continue; 126 + } 127 + 128 + /* Scatter/Gather */ 129 + shinfo = skb_shinfo(skb); 130 + frags = shinfo->nr_frags; 131 + 132 + dma_unmap_single(iq->dev, 133 + tx_buffer->sglist[0].dma_ptr[0], 134 + tx_buffer->sglist[0].len[0], 135 + DMA_TO_DEVICE); 136 + 137 + i = 1; /* entry 0 is main skb, unmapped above */ 138 + while (frags--) { 139 + dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], 140 + tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE); 141 + i++; 142 + } 143 + 144 + dev_kfree_skb_any(skb); 145 + } 146 + 147 + atomic_set(&iq->instr_pending, 0); 148 + iq->flush_index = fi; 149 + netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no)); 150 + } 151 + 152 + /** 153 + * octep_clean_iqs() - Clean Tx queues to shutdown the device. 154 + * 155 + * @oct: Octeon device private data structure. 156 + * 157 + * Free the buffers in Tx queue descriptors pending completion and 158 + * reset queue indices 159 + */ 160 + void octep_clean_iqs(struct octep_device *oct) 161 + { 162 + int i; 163 + 164 + for (i = 0; i < oct->num_iqs; i++) { 165 + octep_iq_free_pending(oct->iq[i]); 166 + octep_iq_reset_indices(oct->iq[i]); 167 + } 168 + } 169 + 170 + /** 171 + * octep_setup_iq() - Setup a Tx queue. 172 + * 173 + * @oct: Octeon device private data structure. 174 + * @q_no: Tx queue number to be setup. 175 + * 176 + * Allocate resources for a Tx queue. 177 + */ 178 + static int octep_setup_iq(struct octep_device *oct, int q_no) 179 + { 180 + u32 desc_ring_size, buff_info_size, sglist_size; 181 + struct octep_iq *iq; 182 + int i; 183 + 184 + iq = vzalloc(sizeof(*iq)); 185 + if (!iq) 186 + goto iq_alloc_err; 187 + oct->iq[q_no] = iq; 188 + 189 + iq->octep_dev = oct; 190 + iq->netdev = oct->netdev; 191 + iq->dev = &oct->pdev->dev; 192 + iq->q_no = q_no; 193 + iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf); 194 + iq->ring_size_mask = iq->max_count - 1; 195 + iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf); 196 + iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no); 197 + 198 + /* Allocate memory for hardware queue descriptors */ 199 + desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); 200 + iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size, 201 + &iq->desc_ring_dma, GFP_KERNEL); 202 + if (unlikely(!iq->desc_ring)) { 203 + dev_err(iq->dev, 204 + "Failed to allocate DMA memory for IQ-%d\n", q_no); 205 + goto desc_dma_alloc_err; 206 + } 207 + 208 + /* Allocate memory for hardware SGLIST descriptors */ 209 + sglist_size = OCTEP_SGLIST_SIZE_PER_PKT * 210 + CFG_GET_IQ_NUM_DESC(oct->conf); 211 + iq->sglist = dma_alloc_coherent(iq->dev, sglist_size, 212 + &iq->sglist_dma, GFP_KERNEL); 213 + if (unlikely(!iq->sglist)) { 214 + dev_err(iq->dev, 215 + "Failed to allocate DMA memory for IQ-%d SGLIST\n", 216 + q_no); 217 + goto sglist_alloc_err; 218 + } 219 + 220 + /* allocate memory to manage Tx packets pending completion */ 221 + buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count; 222 + iq->buff_info = vzalloc(buff_info_size); 223 + if (!iq->buff_info) { 224 + dev_err(iq->dev, 225 + "Failed to allocate buff info for IQ-%d\n", q_no); 226 + goto buff_info_err; 227 + } 228 + 229 + /* Setup sglist addresses in tx_buffer entries */ 230 + for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) { 231 + struct octep_tx_buffer *tx_buffer; 232 + 233 + tx_buffer = &iq->buff_info[i]; 234 + tx_buffer->sglist = 235 + &iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT]; 236 + tx_buffer->sglist_dma = 237 + iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT); 238 + } 239 + 240 + octep_iq_reset_indices(iq); 241 + oct->hw_ops.setup_iq_regs(oct, q_no); 242 + 243 + oct->num_iqs++; 244 + return 0; 245 + 246 + buff_info_err: 247 + dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma); 248 + sglist_alloc_err: 249 + dma_free_coherent(iq->dev, desc_ring_size, 250 + iq->desc_ring, iq->desc_ring_dma); 251 + desc_dma_alloc_err: 252 + vfree(iq); 253 + oct->iq[q_no] = NULL; 254 + iq_alloc_err: 255 + return -1; 256 + } 257 + 258 + /** 259 + * octep_free_iq() - Free Tx queue resources. 260 + * 261 + * @iq: Octeon Tx queue data structure. 262 + * 263 + * Free all the resources allocated for a Tx queue. 264 + */ 265 + static void octep_free_iq(struct octep_iq *iq) 266 + { 267 + struct octep_device *oct = iq->octep_dev; 268 + u64 desc_ring_size, sglist_size; 269 + int q_no = iq->q_no; 270 + 271 + desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); 272 + 273 + if (iq->buff_info) 274 + vfree(iq->buff_info); 275 + 276 + if (iq->desc_ring) 277 + dma_free_coherent(iq->dev, desc_ring_size, 278 + iq->desc_ring, iq->desc_ring_dma); 279 + 280 + sglist_size = OCTEP_SGLIST_SIZE_PER_PKT * 281 + CFG_GET_IQ_NUM_DESC(oct->conf); 282 + if (iq->sglist) 283 + dma_free_coherent(iq->dev, sglist_size, 284 + iq->sglist, iq->sglist_dma); 285 + 286 + vfree(iq); 287 + oct->iq[q_no] = NULL; 288 + oct->num_iqs--; 289 + } 290 + 291 + /** 292 + * octep_setup_iqs() - setup resources for all Tx queues. 293 + * 294 + * @oct: Octeon device private data structure. 295 + */ 296 + int octep_setup_iqs(struct octep_device *oct) 297 + { 298 + int i; 299 + 300 + oct->num_iqs = 0; 301 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 302 + if (octep_setup_iq(oct, i)) { 303 + dev_err(&oct->pdev->dev, 304 + "Failed to setup IQ(TxQ)-%d.\n", i); 305 + goto iq_setup_err; 306 + } 307 + dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i); 308 + } 309 + 310 + return 0; 311 + 312 + iq_setup_err: 313 + while (i) { 314 + i--; 315 + octep_free_iq(oct->iq[i]); 316 + } 317 + return -1; 318 + } 319 + 320 + /** 321 + * octep_free_iqs() - Free resources of all Tx queues. 322 + * 323 + * @oct: Octeon device private data structure. 324 + */ 325 + void octep_free_iqs(struct octep_device *oct) 326 + { 327 + int i; 328 + 329 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 330 + octep_free_iq(oct->iq[i]); 331 + dev_dbg(&oct->pdev->dev, 332 + "Successfully destroyed IQ(TxQ)-%d.\n", i); 333 + } 334 + oct->num_iqs = 0; 335 + }
+284
drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef _OCTEP_TX_H_ 9 + #define _OCTEP_TX_H_ 10 + 11 + #define IQ_SEND_OK 0 12 + #define IQ_SEND_STOP 1 13 + #define IQ_SEND_FAILED -1 14 + 15 + #define TX_BUFTYPE_NONE 0 16 + #define TX_BUFTYPE_NET 1 17 + #define TX_BUFTYPE_NET_SG 2 18 + #define NUM_TX_BUFTYPES 3 19 + 20 + /* Hardware format for Scatter/Gather list */ 21 + struct octep_tx_sglist_desc { 22 + u16 len[4]; 23 + dma_addr_t dma_ptr[4]; 24 + }; 25 + 26 + /* Each Scatter/Gather entry sent to hardwar hold four pointers. 27 + * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1' 28 + * is for main skb which also goes as a gather buffer to Octeon hardware. 29 + * To allocate sufficient SGLIST entries for a packet with max fragments, 30 + * align by adding 3 before calcuating max SGLIST entries per packet. 31 + */ 32 + #define OCTEP_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4) 33 + #define OCTEP_SGLIST_SIZE_PER_PKT \ 34 + (OCTEP_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_tx_sglist_desc)) 35 + 36 + struct octep_tx_buffer { 37 + struct sk_buff *skb; 38 + dma_addr_t dma; 39 + struct octep_tx_sglist_desc *sglist; 40 + dma_addr_t sglist_dma; 41 + u8 gather; 42 + }; 43 + 44 + #define OCTEP_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_tx_buffer)) 45 + 46 + /* Hardware interface Tx statistics */ 47 + struct octep_iface_tx_stats { 48 + /* Packets dropped due to excessive collisions */ 49 + u64 xscol; 50 + 51 + /* Packets dropped due to excessive deferral */ 52 + u64 xsdef; 53 + 54 + /* Packets sent that experienced multiple collisions before successful 55 + * transmission 56 + */ 57 + u64 mcol; 58 + 59 + /* Packets sent that experienced a single collision before successful 60 + * transmission 61 + */ 62 + u64 scol; 63 + 64 + /* Total octets sent on the interface */ 65 + u64 octs; 66 + 67 + /* Total frames sent on the interface */ 68 + u64 pkts; 69 + 70 + /* Packets sent with an octet count < 64 */ 71 + u64 hist_lt64; 72 + 73 + /* Packets sent with an octet count == 64 */ 74 + u64 hist_eq64; 75 + 76 + /* Packets sent with an octet count of 65–127 */ 77 + u64 hist_65to127; 78 + 79 + /* Packets sent with an octet count of 128–255 */ 80 + u64 hist_128to255; 81 + 82 + /* Packets sent with an octet count of 256–511 */ 83 + u64 hist_256to511; 84 + 85 + /* Packets sent with an octet count of 512–1023 */ 86 + u64 hist_512to1023; 87 + 88 + /* Packets sent with an octet count of 1024-1518 */ 89 + u64 hist_1024to1518; 90 + 91 + /* Packets sent with an octet count of > 1518 */ 92 + u64 hist_gt1518; 93 + 94 + /* Packets sent to a broadcast DMAC */ 95 + u64 bcst; 96 + 97 + /* Packets sent to the multicast DMAC */ 98 + u64 mcst; 99 + 100 + /* Packets sent that experienced a transmit underflow and were 101 + * truncated 102 + */ 103 + u64 undflw; 104 + 105 + /* Control/PAUSE packets sent */ 106 + u64 ctl; 107 + }; 108 + 109 + /* Input Queue statistics. Each input queue has four stats fields. */ 110 + struct octep_iq_stats { 111 + /* Instructions posted to this queue. */ 112 + u64 instr_posted; 113 + 114 + /* Instructions copied by hardware for processing. */ 115 + u64 instr_completed; 116 + 117 + /* Instructions that could not be processed. */ 118 + u64 instr_dropped; 119 + 120 + /* Bytes sent through this queue. */ 121 + u64 bytes_sent; 122 + 123 + /* Gather entries sent through this queue. */ 124 + u64 sgentry_sent; 125 + 126 + /* Number of transmit failures due to TX_BUSY */ 127 + u64 tx_busy; 128 + 129 + /* Number of times the queue is restarted */ 130 + u64 restart_cnt; 131 + }; 132 + 133 + /* The instruction (input) queue. 134 + * The input queue is used to post raw (instruction) mode data or packet 135 + * data to Octeon device from the host. Each input queue (up to 4) for 136 + * a Octeon device has one such structure to represent it. 137 + */ 138 + struct octep_iq { 139 + u32 q_no; 140 + 141 + struct octep_device *octep_dev; 142 + struct net_device *netdev; 143 + struct device *dev; 144 + struct netdev_queue *netdev_q; 145 + 146 + /* Index in input ring where driver should write the next packet */ 147 + u16 host_write_index; 148 + 149 + /* Index in input ring where Octeon is expected to read next packet */ 150 + u16 octep_read_index; 151 + 152 + /* This index aids in finding the window in the queue where Octeon 153 + * has read the commands. 154 + */ 155 + u16 flush_index; 156 + 157 + /* Statistics for this input queue. */ 158 + struct octep_iq_stats stats; 159 + 160 + /* This field keeps track of the instructions pending in this queue. */ 161 + atomic_t instr_pending; 162 + 163 + /* Pointer to the Virtual Base addr of the input ring. */ 164 + struct octep_tx_desc_hw *desc_ring; 165 + 166 + /* DMA mapped base address of the input descriptor ring. */ 167 + dma_addr_t desc_ring_dma; 168 + 169 + /* Info of Tx buffers pending completion. */ 170 + struct octep_tx_buffer *buff_info; 171 + 172 + /* Base pointer to Scatter/Gather lists for all ring descriptors. */ 173 + struct octep_tx_sglist_desc *sglist; 174 + 175 + /* DMA mapped addr of Scatter Gather Lists */ 176 + dma_addr_t sglist_dma; 177 + 178 + /* Octeon doorbell register for the ring. */ 179 + u8 __iomem *doorbell_reg; 180 + 181 + /* Octeon instruction count register for this ring. */ 182 + u8 __iomem *inst_cnt_reg; 183 + 184 + /* interrupt level register for this ring */ 185 + u8 __iomem *intr_lvl_reg; 186 + 187 + /* Maximum no. of instructions in this queue. */ 188 + u32 max_count; 189 + u32 ring_size_mask; 190 + 191 + u32 pkt_in_done; 192 + u32 pkts_processed; 193 + 194 + u32 status; 195 + 196 + /* Number of instructions pending to be posted to Octeon. */ 197 + u32 fill_cnt; 198 + 199 + /* The max. number of instructions that can be held pending by the 200 + * driver before ringing doorbell. 201 + */ 202 + u32 fill_threshold; 203 + }; 204 + 205 + /* Hardware Tx Instruction Header */ 206 + struct octep_instr_hdr { 207 + /* Data Len */ 208 + u64 tlen:16; 209 + 210 + /* Reserved */ 211 + u64 rsvd:20; 212 + 213 + /* PKIND for SDP */ 214 + u64 pkind:6; 215 + 216 + /* Front Data size */ 217 + u64 fsz:6; 218 + 219 + /* No. of entries in gather list */ 220 + u64 gsz:14; 221 + 222 + /* Gather indicator 1=gather*/ 223 + u64 gather:1; 224 + 225 + /* Reserved3 */ 226 + u64 reserved3:1; 227 + }; 228 + 229 + /* Hardware Tx completion response header */ 230 + struct octep_instr_resp_hdr { 231 + /* Request ID */ 232 + u64 rid:16; 233 + 234 + /* PCIe port to use for response */ 235 + u64 pcie_port:3; 236 + 237 + /* Scatter indicator 1=scatter */ 238 + u64 scatter:1; 239 + 240 + /* Size of Expected result OR no. of entries in scatter list */ 241 + u64 rlenssz:14; 242 + 243 + /* Desired destination port for result */ 244 + u64 dport:6; 245 + 246 + /* Opcode Specific parameters */ 247 + u64 param:8; 248 + 249 + /* Opcode for the return packet */ 250 + u64 opcode:16; 251 + }; 252 + 253 + /* 64-byte Tx instruction format. 254 + * Format of instruction for a 64-byte mode input queue. 255 + * 256 + * only first 16-bytes (dptr and ih) are mandatory; rest are optional 257 + * and filled by the driver based on firmware/hardware capabilities. 258 + * These optional headers together called Front Data and its size is 259 + * described by ih->fsz. 260 + */ 261 + struct octep_tx_desc_hw { 262 + /* Pointer where the input data is available. */ 263 + u64 dptr; 264 + 265 + /* Instruction Header. */ 266 + union { 267 + struct octep_instr_hdr ih; 268 + u64 ih64; 269 + }; 270 + 271 + /* Pointer where the response for a RAW mode packet will be written 272 + * by Octeon. 273 + */ 274 + u64 rptr; 275 + 276 + /* Input Instruction Response Header. */ 277 + struct octep_instr_resp_hdr irh; 278 + 279 + /* Additional headers available in a 64-byte instruction. */ 280 + u64 exhdr[4]; 281 + }; 282 + 283 + #define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw)) 284 + #endif /* _OCTEP_TX_H_ */