Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'octeon_ep_vf-driver'

Shinas Rasheed says:

====================
add octeon_ep_vf driver

This driver implements networking functionality of Marvell's Octeon
PCI Endpoint NIC VF.

This driver support following devices:
* Network controller: Cavium, Inc. Device b203
* Network controller: Cavium, Inc. Device b403
* Network controller: Cavium, Inc. Device b103
* Network controller: Cavium, Inc. Device b903
* Network controller: Cavium, Inc. Device ba03
* Network controller: Cavium, Inc. Device bc03
* Network controller: Cavium, Inc. Device bd03

Changes:
V2:
- Removed linux/version.h header file from inclusion in
octep_vf_main.c
- Corrected Makefile entry to include building octep_vf_mbox.c in
[6/8] patch.
- Removed redundant vzalloc pointer cast and vfree pointer check in
[6/8] patch.

V1: https://lore.kernel.org/all/20231221092844.2885872-1-srasheed@marvell.com/
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+5342
+1
Documentation/networking/device_drivers/ethernet/index.rst
··· 42 42 intel/ice 43 43 marvell/octeontx2 44 44 marvell/octeon_ep 45 + marvell/octeon_ep_vf 45 46 mellanox/mlx5/index 46 47 microsoft/netvsc 47 48 neterion/s2io
+24
Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0+ 2 + 3 + ======================================================================= 4 + Linux kernel networking driver for Marvell's Octeon PCI Endpoint NIC VF 5 + ======================================================================= 6 + 7 + Network driver for Marvell's Octeon PCI EndPoint NIC VF. 8 + Copyright (c) 2020 Marvell International Ltd. 9 + 10 + Overview 11 + ======== 12 + This driver implements networking functionality of Marvell's Octeon PCI 13 + EndPoint NIC VF. 14 + 15 + Supported Devices 16 + ================= 17 + Currently, this driver support following devices: 18 + * Network controller: Cavium, Inc. Device b203 19 + * Network controller: Cavium, Inc. Device b403 20 + * Network controller: Cavium, Inc. Device b103 21 + * Network controller: Cavium, Inc. Device b903 22 + * Network controller: Cavium, Inc. Device ba03 23 + * Network controller: Cavium, Inc. Device bc03 24 + * Network controller: Cavium, Inc. Device bd03
+9
MAINTAINERS
··· 12861 12861 S: Supported 12862 12862 F: drivers/net/ethernet/marvell/octeon_ep 12863 12863 12864 + MARVELL OCTEON ENDPOINT VF DRIVER 12865 + M: Veerasenareddy Burru <vburru@marvell.com> 12866 + M: Sathesh Edara <sedara@marvell.com> 12867 + M: Shinas Rasheed <srasheed@marvell.com> 12868 + M: Satananda Burla <sburla@marvell.com> 12869 + L: netdev@vger.kernel.org 12870 + S: Supported 12871 + F: drivers/net/ethernet/marvell/octeon_ep_vf 12872 + 12864 12873 MARVELL OCTEONTX2 PHYSICAL FUNCTION DRIVER 12865 12874 M: Sunil Goutham <sgoutham@marvell.com> 12866 12875 M: Geetha sowjanya <gakula@marvell.com>
+1
drivers/net/ethernet/marvell/Kconfig
··· 180 180 181 181 source "drivers/net/ethernet/marvell/octeontx2/Kconfig" 182 182 source "drivers/net/ethernet/marvell/octeon_ep/Kconfig" 183 + source "drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig" 183 184 source "drivers/net/ethernet/marvell/prestera/Kconfig" 184 185 185 186 endif # NET_VENDOR_MARVELL
+1
drivers/net/ethernet/marvell/Makefile
··· 12 12 obj-$(CONFIG_SKGE) += skge.o 13 13 obj-$(CONFIG_SKY2) += sky2.o 14 14 obj-y += octeon_ep/ 15 + obj-y += octeon_ep_vf/ 15 16 obj-y += octeontx2/ 16 17 obj-y += prestera/
+19
drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + # 3 + # Marvell's Octeon PCI Endpoint NIC VF Driver Configuration 4 + # 5 + 6 + config OCTEON_EP_VF 7 + tristate "Marvell Octeon PCI Endpoint NIC VF Driver" 8 + depends on 64BIT 9 + depends on PCI 10 + help 11 + This driver supports networking functionality of Marvell's 12 + Octeon PCI Endpoint NIC VF. 13 + 14 + To know the list of devices supported by this driver, refer 15 + documentation in 16 + <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst>. 17 + 18 + To compile this drivers as a module, choose M here. Name of the 19 + module is octeon_ep_vf.
+10
drivers/net/ethernet/marvell/octeon_ep_vf/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # 3 + # Network driver for Marvell's Octeon PCI Endpoint NIC VF 4 + # 5 + 6 + obj-$(CONFIG_OCTEON_EP_VF) += octeon_ep_vf.o 7 + 8 + octeon_ep_vf-y := octep_vf_main.o octep_vf_cn9k.o octep_vf_cnxk.o \ 9 + octep_vf_tx.o octep_vf_rx.o octep_vf_mbox.o \ 10 + octep_vf_ethtool.o
+488
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) VF Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/pci.h> 9 + #include <linux/netdevice.h> 10 + #include <linux/etherdevice.h> 11 + 12 + #include "octep_vf_config.h" 13 + #include "octep_vf_main.h" 14 + #include "octep_vf_regs_cn9k.h" 15 + 16 + /* Dump useful hardware IQ/OQ CSRs for debug purpose */ 17 + static void cn93_vf_dump_q_regs(struct octep_vf_device *oct, int qno) 18 + { 19 + struct device *dev = &oct->pdev->dev; 20 + 21 + dev_info(dev, "IQ-%d register dump\n", qno); 22 + dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n", 23 + qno, CN93_VF_SDP_R_IN_INSTR_DBELL(qno), 24 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(qno))); 25 + dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n", 26 + qno, CN93_VF_SDP_R_IN_CONTROL(qno), 27 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(qno))); 28 + dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n", 29 + qno, CN93_VF_SDP_R_IN_ENABLE(qno), 30 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(qno))); 31 + dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n", 32 + qno, CN93_VF_SDP_R_IN_INSTR_BADDR(qno), 33 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(qno))); 34 + dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n", 35 + qno, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno), 36 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno))); 37 + dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n", 38 + qno, CN93_VF_SDP_R_IN_CNTS(qno), 39 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(qno))); 40 + dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n", 41 + qno, CN93_VF_SDP_R_IN_INT_LEVELS(qno), 42 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(qno))); 43 + dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n", 44 + qno, CN93_VF_SDP_R_IN_PKT_CNT(qno), 45 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(qno))); 46 + dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n", 47 + qno, CN93_VF_SDP_R_IN_BYTE_CNT(qno), 48 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(qno))); 49 + 50 + dev_info(dev, "OQ-%d register dump\n", qno); 51 + dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n", 52 + qno, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno), 53 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno))); 54 + dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n", 55 + qno, CN93_VF_SDP_R_OUT_CONTROL(qno), 56 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(qno))); 57 + dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n", 58 + qno, CN93_VF_SDP_R_OUT_ENABLE(qno), 59 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(qno))); 60 + dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n", 61 + qno, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno), 62 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno))); 63 + dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n", 64 + qno, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno), 65 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno))); 66 + dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n", 67 + qno, CN93_VF_SDP_R_OUT_CNTS(qno), 68 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CNTS(qno))); 69 + dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n", 70 + qno, CN93_VF_SDP_R_OUT_INT_LEVELS(qno), 71 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(qno))); 72 + dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n", 73 + qno, CN93_VF_SDP_R_OUT_PKT_CNT(qno), 74 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(qno))); 75 + dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n", 76 + qno, CN93_VF_SDP_R_OUT_BYTE_CNT(qno), 77 + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_BYTE_CNT(qno))); 78 + } 79 + 80 + /* Reset Hardware Tx queue */ 81 + static int cn93_vf_reset_iq(struct octep_vf_device *oct, int q_no) 82 + { 83 + u64 val = 0ULL; 84 + 85 + dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no); 86 + 87 + /* Disable the Tx/Instruction Ring */ 88 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(q_no), val); 89 + 90 + /* clear the Instruction Ring packet/byte counts and doorbell CSRs */ 91 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q_no), val); 92 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(q_no), val); 93 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(q_no), val); 94 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(q_no), val); 95 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(q_no), val); 96 + 97 + val = 0xFFFFFFFF; 98 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(q_no), val); 99 + 100 + val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no)); 101 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no), val & 0xFFFFFFFF); 102 + 103 + return 0; 104 + } 105 + 106 + /* Reset Hardware Rx queue */ 107 + static void cn93_vf_reset_oq(struct octep_vf_device *oct, int q_no) 108 + { 109 + u64 val = 0ULL; 110 + 111 + /* Disable Output (Rx) Ring */ 112 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(q_no), val); 113 + 114 + /* Clear count CSRs */ 115 + val = octep_vf_read_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no)); 116 + octep_vf_write_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no), val); 117 + 118 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL); 119 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF); 120 + } 121 + 122 + /* Reset all hardware Tx/Rx queues */ 123 + static void octep_vf_reset_io_queues_cn93(struct octep_vf_device *oct) 124 + { 125 + struct pci_dev *pdev = oct->pdev; 126 + int q; 127 + 128 + dev_dbg(&pdev->dev, "Reset OCTEP_CN93 VF IO Queues\n"); 129 + 130 + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 131 + cn93_vf_reset_iq(oct, q); 132 + cn93_vf_reset_oq(oct, q); 133 + } 134 + } 135 + 136 + /* Initialize configuration limits and initial active config */ 137 + static void octep_vf_init_config_cn93_vf(struct octep_vf_device *oct) 138 + { 139 + struct octep_vf_config *conf = oct->conf; 140 + u64 reg_val; 141 + 142 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(0)); 143 + conf->ring_cfg.max_io_rings = (reg_val >> CN93_VF_R_IN_CTL_RPVF_POS) & 144 + CN93_VF_R_IN_CTL_RPVF_MASK; 145 + conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings; 146 + 147 + conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS; 148 + conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR; 149 + conf->iq.db_min = OCTEP_VF_DB_MIN; 150 + conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD; 151 + 152 + conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS; 153 + conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE; 154 + conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD; 155 + conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD; 156 + conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD; 157 + 158 + conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings; 159 + } 160 + 161 + /* Setup registers for a hardware Tx Queue */ 162 + static void octep_vf_setup_iq_regs_cn93(struct octep_vf_device *oct, int iq_no) 163 + { 164 + struct octep_vf_iq *iq = oct->iq[iq_no]; 165 + u32 reset_instr_cnt; 166 + u64 reg_val; 167 + 168 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no)); 169 + 170 + /* wait for IDLE to set to 1 */ 171 + if (!(reg_val & CN93_VF_R_IN_CTL_IDLE)) { 172 + do { 173 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no)); 174 + } while (!(reg_val & CN93_VF_R_IN_CTL_IDLE)); 175 + } 176 + reg_val |= CN93_VF_R_IN_CTL_RDSIZE; 177 + reg_val |= CN93_VF_R_IN_CTL_IS_64B; 178 + reg_val |= CN93_VF_R_IN_CTL_ESR; 179 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no), reg_val); 180 + 181 + /* Write the start of the input queue's ring and its size */ 182 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma); 183 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count); 184 + 185 + /* Remember the doorbell & instruction count register addr for this queue */ 186 + iq->doorbell_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no); 187 + iq->inst_cnt_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_CNTS(iq_no); 188 + iq->intr_lvl_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INT_LEVELS(iq_no); 189 + 190 + /* Store the current instruction counter (used in flush_iq calculation) */ 191 + reset_instr_cnt = readl(iq->inst_cnt_reg); 192 + writel(reset_instr_cnt, iq->inst_cnt_reg); 193 + 194 + /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */ 195 + reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff; 196 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val); 197 + } 198 + 199 + /* Setup registers for a hardware Rx Queue */ 200 + static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no) 201 + { 202 + struct octep_vf_oq *oq = oct->oq[oq_no]; 203 + u32 time_threshold = 0; 204 + u64 oq_ctl = 0ULL; 205 + u64 reg_val; 206 + 207 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no)); 208 + 209 + /* wait for IDLE to set to 1 */ 210 + if (!(reg_val & CN93_VF_R_OUT_CTL_IDLE)) { 211 + do { 212 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no)); 213 + } while (!(reg_val & CN93_VF_R_OUT_CTL_IDLE)); 214 + } 215 + 216 + reg_val &= ~(CN93_VF_R_OUT_CTL_IMODE); 217 + reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_P); 218 + reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_P); 219 + reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_I); 220 + reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_I); 221 + reg_val &= ~(CN93_VF_R_OUT_CTL_ES_I); 222 + reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_D); 223 + reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_D); 224 + reg_val &= ~(CN93_VF_R_OUT_CTL_ES_D); 225 + reg_val |= (CN93_VF_R_OUT_CTL_ES_P); 226 + 227 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), reg_val); 228 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma); 229 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count); 230 + 231 + oq_ctl = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no)); 232 + oq_ctl &= ~0x7fffffULL; //clear the ISIZE and BSIZE (22-0) 233 + oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0) 234 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl); 235 + 236 + /* Get the mapped address of the pkt_sent and pkts_credit regs */ 237 + oq->pkts_sent_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_CNTS(oq_no); 238 + oq->pkts_credit_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no); 239 + 240 + time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf); 241 + reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf); 242 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); 243 + } 244 + 245 + /* Setup registers for a VF mailbox */ 246 + static void octep_vf_setup_mbox_regs_cn93(struct octep_vf_device *oct, int q_no) 247 + { 248 + struct octep_vf_mbox *mbox = oct->mbox; 249 + 250 + /* PF to VF DATA reg. VF reads from this reg */ 251 + mbox->mbox_read_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_DATA(q_no); 252 + 253 + /* VF mbox interrupt reg */ 254 + mbox->mbox_int_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_INT(q_no); 255 + 256 + /* VF to PF DATA reg. VF writes into this reg */ 257 + mbox->mbox_write_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_VF_PF_DATA(q_no); 258 + } 259 + 260 + /* Mailbox Interrupt handler */ 261 + static void cn93_handle_vf_mbox_intr(struct octep_vf_device *oct) 262 + { 263 + if (oct->mbox) 264 + schedule_work(&oct->mbox->wk.work); 265 + else 266 + dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n"); 267 + } 268 + 269 + /* Tx/Rx queue interrupt handler */ 270 + static irqreturn_t octep_vf_ioq_intr_handler_cn93(void *data) 271 + { 272 + struct octep_vf_ioq_vector *vector = (struct octep_vf_ioq_vector *)data; 273 + struct octep_vf_oq *oq = vector->oq; 274 + struct octep_vf_device *oct = vector->octep_vf_dev; 275 + u64 reg_val = 0ULL; 276 + 277 + /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */ 278 + if (oq->q_no == 0) { 279 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0)); 280 + if (reg_val & CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS) { 281 + cn93_handle_vf_mbox_intr(oct); 282 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val); 283 + } 284 + } 285 + napi_schedule_irqoff(oq->napi); 286 + return IRQ_HANDLED; 287 + } 288 + 289 + /* Re-initialize Octeon hardware registers */ 290 + static void octep_vf_reinit_regs_cn93(struct octep_vf_device *oct) 291 + { 292 + u32 i; 293 + 294 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 295 + oct->hw_ops.setup_iq_regs(oct, i); 296 + 297 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 298 + oct->hw_ops.setup_oq_regs(oct, i); 299 + 300 + oct->hw_ops.enable_interrupts(oct); 301 + oct->hw_ops.enable_io_queues(oct); 302 + 303 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 304 + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); 305 + } 306 + 307 + /* Enable all interrupts */ 308 + static void octep_vf_enable_interrupts_cn93(struct octep_vf_device *oct) 309 + { 310 + int num_rings, q; 311 + u64 reg_val; 312 + 313 + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 314 + for (q = 0; q < num_rings; q++) { 315 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q)); 316 + reg_val |= (0x1ULL << 62); 317 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val); 318 + 319 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q)); 320 + reg_val |= (0x1ULL << 62); 321 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val); 322 + } 323 + /* Enable PF to VF mbox interrupt by setting 2nd bit*/ 324 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), 325 + CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB); 326 + } 327 + 328 + /* Disable all interrupts */ 329 + static void octep_vf_disable_interrupts_cn93(struct octep_vf_device *oct) 330 + { 331 + int num_rings, q; 332 + u64 reg_val; 333 + 334 + /* Disable PF to VF mbox interrupt by setting 2nd bit*/ 335 + if (oct->mbox) 336 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0); 337 + 338 + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 339 + for (q = 0; q < num_rings; q++) { 340 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q)); 341 + reg_val &= ~(0x1ULL << 62); 342 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val); 343 + 344 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q)); 345 + reg_val &= ~(0x1ULL << 62); 346 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val); 347 + } 348 + } 349 + 350 + /* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ 351 + static u32 octep_vf_update_iq_read_index_cn93(struct octep_vf_iq *iq) 352 + { 353 + u32 pkt_in_done = readl(iq->inst_cnt_reg); 354 + u32 last_done, new_idx; 355 + 356 + last_done = pkt_in_done - iq->pkt_in_done; 357 + iq->pkt_in_done = pkt_in_done; 358 + 359 + new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count; 360 + 361 + return new_idx; 362 + } 363 + 364 + /* Enable a hardware Tx Queue */ 365 + static void octep_vf_enable_iq_cn93(struct octep_vf_device *oct, int iq_no) 366 + { 367 + u64 loop = HZ; 368 + u64 reg_val; 369 + 370 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF); 371 + 372 + while (octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no)) && 373 + loop--) { 374 + schedule_timeout_interruptible(1); 375 + } 376 + 377 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no)); 378 + reg_val |= (0x1ULL << 62); 379 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val); 380 + 381 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no)); 382 + reg_val |= 0x1ULL; 383 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val); 384 + } 385 + 386 + /* Enable a hardware Rx Queue */ 387 + static void octep_vf_enable_oq_cn93(struct octep_vf_device *oct, int oq_no) 388 + { 389 + u64 reg_val = 0ULL; 390 + 391 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no)); 392 + reg_val |= (0x1ULL << 62); 393 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); 394 + 395 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF); 396 + 397 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no)); 398 + reg_val |= 0x1ULL; 399 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val); 400 + } 401 + 402 + /* Enable all hardware Tx/Rx Queues assigned to VF */ 403 + static void octep_vf_enable_io_queues_cn93(struct octep_vf_device *oct) 404 + { 405 + u8 q; 406 + 407 + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 408 + octep_vf_enable_iq_cn93(oct, q); 409 + octep_vf_enable_oq_cn93(oct, q); 410 + } 411 + } 412 + 413 + /* Disable a hardware Tx Queue assigned to VF */ 414 + static void octep_vf_disable_iq_cn93(struct octep_vf_device *oct, int iq_no) 415 + { 416 + u64 reg_val = 0ULL; 417 + 418 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no)); 419 + reg_val &= ~0x1ULL; 420 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val); 421 + } 422 + 423 + /* Disable a hardware Rx Queue assigned to VF */ 424 + static void octep_vf_disable_oq_cn93(struct octep_vf_device *oct, int oq_no) 425 + { 426 + u64 reg_val = 0ULL; 427 + 428 + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no)); 429 + reg_val &= ~0x1ULL; 430 + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val); 431 + } 432 + 433 + /* Disable all hardware Tx/Rx Queues assigned to VF */ 434 + static void octep_vf_disable_io_queues_cn93(struct octep_vf_device *oct) 435 + { 436 + int q = 0; 437 + 438 + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 439 + octep_vf_disable_iq_cn93(oct, q); 440 + octep_vf_disable_oq_cn93(oct, q); 441 + } 442 + } 443 + 444 + /* Dump hardware registers (including Tx/Rx queues) for debugging. */ 445 + static void octep_vf_dump_registers_cn93(struct octep_vf_device *oct) 446 + { 447 + u8 num_rings, q; 448 + 449 + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 450 + for (q = 0; q < num_rings; q++) 451 + cn93_vf_dump_q_regs(oct, q); 452 + } 453 + 454 + /** 455 + * octep_vf_device_setup_cn93() - Setup Octeon device. 456 + * 457 + * @oct: Octeon device private data structure. 458 + * 459 + * - initialize hardware operations. 460 + * - get target side pcie port number for the device. 461 + * - set initial configuration and max limits. 462 + */ 463 + void octep_vf_device_setup_cn93(struct octep_vf_device *oct) 464 + { 465 + oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cn93; 466 + oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cn93; 467 + oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cn93; 468 + 469 + oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cn93; 470 + oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cn93; 471 + 472 + oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cn93; 473 + oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cn93; 474 + 475 + oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cn93; 476 + 477 + oct->hw_ops.enable_iq = octep_vf_enable_iq_cn93; 478 + oct->hw_ops.enable_oq = octep_vf_enable_oq_cn93; 479 + oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cn93; 480 + 481 + oct->hw_ops.disable_iq = octep_vf_disable_iq_cn93; 482 + oct->hw_ops.disable_oq = octep_vf_disable_oq_cn93; 483 + oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cn93; 484 + oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cn93; 485 + 486 + oct->hw_ops.dump_registers = octep_vf_dump_registers_cn93; 487 + octep_vf_init_config_cn93_vf(oct); 488 + }
+500
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) VF Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/pci.h> 9 + #include <linux/netdevice.h> 10 + #include <linux/etherdevice.h> 11 + 12 + #include "octep_vf_config.h" 13 + #include "octep_vf_main.h" 14 + #include "octep_vf_regs_cnxk.h" 15 + 16 + /* Dump useful hardware IQ/OQ CSRs for debug purpose */ 17 + static void cnxk_vf_dump_q_regs(struct octep_vf_device *oct, int qno) 18 + { 19 + struct device *dev = &oct->pdev->dev; 20 + 21 + dev_info(dev, "IQ-%d register dump\n", qno); 22 + dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n", 23 + qno, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno), 24 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno))); 25 + dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n", 26 + qno, CNXK_VF_SDP_R_IN_CONTROL(qno), 27 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(qno))); 28 + dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n", 29 + qno, CNXK_VF_SDP_R_IN_ENABLE(qno), 30 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(qno))); 31 + dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n", 32 + qno, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno), 33 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno))); 34 + dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n", 35 + qno, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno), 36 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno))); 37 + dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n", 38 + qno, CNXK_VF_SDP_R_IN_CNTS(qno), 39 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(qno))); 40 + dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n", 41 + qno, CNXK_VF_SDP_R_IN_INT_LEVELS(qno), 42 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(qno))); 43 + dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n", 44 + qno, CNXK_VF_SDP_R_IN_PKT_CNT(qno), 45 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(qno))); 46 + dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n", 47 + qno, CNXK_VF_SDP_R_IN_BYTE_CNT(qno), 48 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(qno))); 49 + 50 + dev_info(dev, "OQ-%d register dump\n", qno); 51 + dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n", 52 + qno, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno), 53 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno))); 54 + dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n", 55 + qno, CNXK_VF_SDP_R_OUT_CONTROL(qno), 56 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(qno))); 57 + dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n", 58 + qno, CNXK_VF_SDP_R_OUT_ENABLE(qno), 59 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(qno))); 60 + dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n", 61 + qno, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno), 62 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno))); 63 + dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n", 64 + qno, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno), 65 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno))); 66 + dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n", 67 + qno, CNXK_VF_SDP_R_OUT_CNTS(qno), 68 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CNTS(qno))); 69 + dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n", 70 + qno, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno), 71 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno))); 72 + dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n", 73 + qno, CNXK_VF_SDP_R_OUT_PKT_CNT(qno), 74 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(qno))); 75 + dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n", 76 + qno, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno), 77 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno))); 78 + dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n", 79 + qno, CNXK_VF_SDP_R_ERR_TYPE(qno), 80 + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_ERR_TYPE(qno))); 81 + } 82 + 83 + /* Reset Hardware Tx queue */ 84 + static int cnxk_vf_reset_iq(struct octep_vf_device *oct, int q_no) 85 + { 86 + u64 val = 0ULL; 87 + 88 + dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no); 89 + 90 + /* Disable the Tx/Instruction Ring */ 91 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(q_no), val); 92 + 93 + /* clear the Instruction Ring packet/byte counts and doorbell CSRs */ 94 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q_no), val); 95 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(q_no), val); 96 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(q_no), val); 97 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(q_no), val); 98 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(q_no), val); 99 + 100 + val = 0xFFFFFFFF; 101 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(q_no), val); 102 + 103 + val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no)); 104 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no), val & 0xFFFFFFFF); 105 + 106 + return 0; 107 + } 108 + 109 + /* Reset Hardware Rx queue */ 110 + static void cnxk_vf_reset_oq(struct octep_vf_device *oct, int q_no) 111 + { 112 + u64 val = 0ULL; 113 + 114 + /* Disable Output (Rx) Ring */ 115 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(q_no), val); 116 + 117 + /* Clear count CSRs */ 118 + val = octep_vf_read_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no)); 119 + octep_vf_write_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no), val); 120 + 121 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL); 122 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF); 123 + } 124 + 125 + /* Reset all hardware Tx/Rx queues */ 126 + static void octep_vf_reset_io_queues_cnxk(struct octep_vf_device *oct) 127 + { 128 + struct pci_dev *pdev = oct->pdev; 129 + int q; 130 + 131 + dev_dbg(&pdev->dev, "Reset OCTEP_CNXK VF IO Queues\n"); 132 + 133 + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 134 + cnxk_vf_reset_iq(oct, q); 135 + cnxk_vf_reset_oq(oct, q); 136 + } 137 + } 138 + 139 + /* Initialize configuration limits and initial active config */ 140 + static void octep_vf_init_config_cnxk_vf(struct octep_vf_device *oct) 141 + { 142 + struct octep_vf_config *conf = oct->conf; 143 + u64 reg_val; 144 + 145 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(0)); 146 + conf->ring_cfg.max_io_rings = (reg_val >> CNXK_VF_R_IN_CTL_RPVF_POS) & 147 + CNXK_VF_R_IN_CTL_RPVF_MASK; 148 + conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings; 149 + 150 + conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS; 151 + conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR; 152 + conf->iq.db_min = OCTEP_VF_DB_MIN; 153 + conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD; 154 + 155 + conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS; 156 + conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE; 157 + conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD; 158 + conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD; 159 + conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD; 160 + conf->oq.wmark = OCTEP_VF_OQ_WMARK_MIN; 161 + 162 + conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings; 163 + } 164 + 165 + /* Setup registers for a hardware Tx Queue */ 166 + static void octep_vf_setup_iq_regs_cnxk(struct octep_vf_device *oct, int iq_no) 167 + { 168 + struct octep_vf_iq *iq = oct->iq[iq_no]; 169 + u32 reset_instr_cnt; 170 + u64 reg_val; 171 + 172 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no)); 173 + 174 + /* wait for IDLE to set to 1 */ 175 + if (!(reg_val & CNXK_VF_R_IN_CTL_IDLE)) { 176 + do { 177 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no)); 178 + } while (!(reg_val & CNXK_VF_R_IN_CTL_IDLE)); 179 + } 180 + reg_val |= CNXK_VF_R_IN_CTL_RDSIZE; 181 + reg_val |= CNXK_VF_R_IN_CTL_IS_64B; 182 + reg_val |= CNXK_VF_R_IN_CTL_ESR; 183 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no), reg_val); 184 + 185 + /* Write the start of the input queue's ring and its size */ 186 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma); 187 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count); 188 + 189 + /* Remember the doorbell & instruction count register addr for this queue */ 190 + iq->doorbell_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no); 191 + iq->inst_cnt_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_CNTS(iq_no); 192 + iq->intr_lvl_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no); 193 + 194 + /* Store the current instruction counter (used in flush_iq calculation) */ 195 + reset_instr_cnt = readl(iq->inst_cnt_reg); 196 + writel(reset_instr_cnt, iq->inst_cnt_reg); 197 + 198 + /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */ 199 + reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff; 200 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val); 201 + } 202 + 203 + /* Setup registers for a hardware Rx Queue */ 204 + static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no) 205 + { 206 + struct octep_vf_oq *oq = oct->oq[oq_no]; 207 + u32 time_threshold = 0; 208 + u64 oq_ctl = 0ULL; 209 + u64 reg_val; 210 + 211 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no)); 212 + 213 + /* wait for IDLE to set to 1 */ 214 + if (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE)) { 215 + do { 216 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no)); 217 + } while (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE)); 218 + } 219 + 220 + reg_val &= ~(CNXK_VF_R_OUT_CTL_IMODE); 221 + reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_P); 222 + reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_P); 223 + reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_I); 224 + reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_I); 225 + reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_I); 226 + reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_D); 227 + reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_D); 228 + reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_D); 229 + reg_val |= (CNXK_VF_R_OUT_CTL_ES_P); 230 + 231 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), reg_val); 232 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma); 233 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count); 234 + 235 + oq_ctl = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no)); 236 + /* Clear the ISIZE and BSIZE (22-0) */ 237 + oq_ctl &= ~0x7fffffULL; 238 + /* Populate the BSIZE (15-0) */ 239 + oq_ctl |= (oq->buffer_size & 0xffff); 240 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl); 241 + 242 + /* Get the mapped address of the pkt_sent and pkts_credit regs */ 243 + oq->pkts_sent_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_CNTS(oq_no); 244 + oq->pkts_credit_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no); 245 + 246 + time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf); 247 + reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf); 248 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); 249 + 250 + /* set watermark for backpressure */ 251 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no)); 252 + reg_val &= ~0xFFFFFFFFULL; 253 + reg_val |= CFG_GET_OQ_WMARK(oct->conf); 254 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no), reg_val); 255 + } 256 + 257 + /* Setup registers for a VF mailbox */ 258 + static void octep_vf_setup_mbox_regs_cnxk(struct octep_vf_device *oct, int q_no) 259 + { 260 + struct octep_vf_mbox *mbox = oct->mbox; 261 + 262 + /* PF to VF DATA reg. VF reads from this reg */ 263 + mbox->mbox_read_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_DATA(q_no); 264 + 265 + /* VF mbox interrupt reg */ 266 + mbox->mbox_int_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_INT(q_no); 267 + 268 + /* VF to PF DATA reg. VF writes into this reg */ 269 + mbox->mbox_write_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_VF_PF_DATA(q_no); 270 + } 271 + 272 + /* Mailbox Interrupt handler */ 273 + static void cnxk_handle_vf_mbox_intr(struct octep_vf_device *oct) 274 + { 275 + if (oct->mbox) 276 + schedule_work(&oct->mbox->wk.work); 277 + else 278 + dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n"); 279 + } 280 + 281 + /* Tx/Rx queue interrupt handler */ 282 + static irqreturn_t octep_vf_ioq_intr_handler_cnxk(void *data) 283 + { 284 + struct octep_vf_ioq_vector *vector = (struct octep_vf_ioq_vector *)data; 285 + struct octep_vf_oq *oq = vector->oq; 286 + struct octep_vf_device *oct = vector->octep_vf_dev; 287 + u64 reg_val = 0ULL; 288 + 289 + /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */ 290 + if (oq->q_no == 0) { 291 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0)); 292 + if (reg_val & CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS) { 293 + cnxk_handle_vf_mbox_intr(oct); 294 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val); 295 + } 296 + } 297 + napi_schedule_irqoff(oq->napi); 298 + return IRQ_HANDLED; 299 + } 300 + 301 + /* Re-initialize Octeon hardware registers */ 302 + static void octep_vf_reinit_regs_cnxk(struct octep_vf_device *oct) 303 + { 304 + u32 i; 305 + 306 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 307 + oct->hw_ops.setup_iq_regs(oct, i); 308 + 309 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 310 + oct->hw_ops.setup_oq_regs(oct, i); 311 + 312 + oct->hw_ops.enable_interrupts(oct); 313 + oct->hw_ops.enable_io_queues(oct); 314 + 315 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 316 + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); 317 + } 318 + 319 + /* Enable all interrupts */ 320 + static void octep_vf_enable_interrupts_cnxk(struct octep_vf_device *oct) 321 + { 322 + int num_rings, q; 323 + u64 reg_val; 324 + 325 + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 326 + for (q = 0; q < num_rings; q++) { 327 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q)); 328 + reg_val |= (0x1ULL << 62); 329 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val); 330 + 331 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q)); 332 + reg_val |= (0x1ULL << 62); 333 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val); 334 + } 335 + /* Enable PF to VF mbox interrupt by setting 2nd bit*/ 336 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), 337 + CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB); 338 + } 339 + 340 + /* Disable all interrupts */ 341 + static void octep_vf_disable_interrupts_cnxk(struct octep_vf_device *oct) 342 + { 343 + int num_rings, q; 344 + u64 reg_val; 345 + 346 + /* Disable PF to VF mbox interrupt by setting 2nd bit*/ 347 + if (oct->mbox) 348 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0); 349 + 350 + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 351 + for (q = 0; q < num_rings; q++) { 352 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q)); 353 + reg_val &= ~(0x1ULL << 62); 354 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val); 355 + 356 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q)); 357 + reg_val &= ~(0x1ULL << 62); 358 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val); 359 + } 360 + } 361 + 362 + /* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ 363 + static u32 octep_vf_update_iq_read_index_cnxk(struct octep_vf_iq *iq) 364 + { 365 + u32 pkt_in_done = readl(iq->inst_cnt_reg); 366 + u32 last_done, new_idx; 367 + 368 + last_done = pkt_in_done - iq->pkt_in_done; 369 + iq->pkt_in_done = pkt_in_done; 370 + 371 + new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count; 372 + 373 + return new_idx; 374 + } 375 + 376 + /* Enable a hardware Tx Queue */ 377 + static void octep_vf_enable_iq_cnxk(struct octep_vf_device *oct, int iq_no) 378 + { 379 + u64 loop = HZ; 380 + u64 reg_val; 381 + 382 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF); 383 + 384 + while (octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no)) && 385 + loop--) { 386 + schedule_timeout_interruptible(1); 387 + } 388 + 389 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no)); 390 + reg_val |= (0x1ULL << 62); 391 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val); 392 + 393 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no)); 394 + reg_val |= 0x1ULL; 395 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val); 396 + } 397 + 398 + /* Enable a hardware Rx Queue */ 399 + static void octep_vf_enable_oq_cnxk(struct octep_vf_device *oct, int oq_no) 400 + { 401 + u64 reg_val = 0ULL; 402 + 403 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no)); 404 + reg_val |= (0x1ULL << 62); 405 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); 406 + 407 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF); 408 + 409 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no)); 410 + reg_val |= 0x1ULL; 411 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val); 412 + } 413 + 414 + /* Enable all hardware Tx/Rx Queues assigned to VF */ 415 + static void octep_vf_enable_io_queues_cnxk(struct octep_vf_device *oct) 416 + { 417 + u8 q; 418 + 419 + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 420 + octep_vf_enable_iq_cnxk(oct, q); 421 + octep_vf_enable_oq_cnxk(oct, q); 422 + } 423 + } 424 + 425 + /* Disable a hardware Tx Queue assigned to VF */ 426 + static void octep_vf_disable_iq_cnxk(struct octep_vf_device *oct, int iq_no) 427 + { 428 + u64 reg_val = 0ULL; 429 + 430 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no)); 431 + reg_val &= ~0x1ULL; 432 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val); 433 + } 434 + 435 + /* Disable a hardware Rx Queue assigned to VF */ 436 + static void octep_vf_disable_oq_cnxk(struct octep_vf_device *oct, int oq_no) 437 + { 438 + u64 reg_val = 0ULL; 439 + 440 + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no)); 441 + reg_val &= ~0x1ULL; 442 + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val); 443 + } 444 + 445 + /* Disable all hardware Tx/Rx Queues assigned to VF */ 446 + static void octep_vf_disable_io_queues_cnxk(struct octep_vf_device *oct) 447 + { 448 + int q = 0; 449 + 450 + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 451 + octep_vf_disable_iq_cnxk(oct, q); 452 + octep_vf_disable_oq_cnxk(oct, q); 453 + } 454 + } 455 + 456 + /* Dump hardware registers (including Tx/Rx queues) for debugging. */ 457 + static void octep_vf_dump_registers_cnxk(struct octep_vf_device *oct) 458 + { 459 + u8 num_rings, q; 460 + 461 + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 462 + for (q = 0; q < num_rings; q++) 463 + cnxk_vf_dump_q_regs(oct, q); 464 + } 465 + 466 + /** 467 + * octep_vf_device_setup_cnxk() - Setup Octeon device. 468 + * 469 + * @oct: Octeon device private data structure. 470 + * 471 + * - initialize hardware operations. 472 + * - get target side pcie port number for the device. 473 + * - set initial configuration and max limits. 474 + */ 475 + void octep_vf_device_setup_cnxk(struct octep_vf_device *oct) 476 + { 477 + oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cnxk; 478 + oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cnxk; 479 + oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cnxk; 480 + 481 + oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cnxk; 482 + oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cnxk; 483 + 484 + oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cnxk; 485 + oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cnxk; 486 + 487 + oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cnxk; 488 + 489 + oct->hw_ops.enable_iq = octep_vf_enable_iq_cnxk; 490 + oct->hw_ops.enable_oq = octep_vf_enable_oq_cnxk; 491 + oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cnxk; 492 + 493 + oct->hw_ops.disable_iq = octep_vf_disable_iq_cnxk; 494 + oct->hw_ops.disable_oq = octep_vf_disable_oq_cnxk; 495 + oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cnxk; 496 + oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cnxk; 497 + 498 + oct->hw_ops.dump_registers = octep_vf_dump_registers_cnxk; 499 + octep_vf_init_config_cnxk_vf(oct); 500 + }
+160
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) VF Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef _OCTEP_VF_CONFIG_H_ 9 + #define _OCTEP_VF_CONFIG_H_ 10 + 11 + /* Tx instruction types by length */ 12 + #define OCTEP_VF_32BYTE_INSTR 32 13 + #define OCTEP_VF_64BYTE_INSTR 64 14 + 15 + /* Tx Queue: maximum descriptors per ring */ 16 + #define OCTEP_VF_IQ_MAX_DESCRIPTORS 1024 17 + /* Minimum input (Tx) requests to be enqueued to ring doorbell */ 18 + #define OCTEP_VF_DB_MIN 8 19 + /* Packet threshold for Tx queue interrupt */ 20 + #define OCTEP_VF_IQ_INTR_THRESHOLD 0x0 21 + 22 + /* Minimum watermark for backpressure */ 23 + #define OCTEP_VF_OQ_WMARK_MIN 256 24 + 25 + /* Rx Queue: maximum descriptors per ring */ 26 + #define OCTEP_VF_OQ_MAX_DESCRIPTORS 1024 27 + 28 + /* Rx buffer size: Use page size buffers. 29 + * Build skb from allocated page buffer once the packet is received. 30 + * When a gathered packet is received, make head page as skb head and 31 + * page buffers in consecutive Rx descriptors as fragments. 32 + */ 33 + #define OCTEP_VF_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE)) 34 + #define OCTEP_VF_OQ_PKTS_PER_INTR 128 35 + #define OCTEP_VF_OQ_REFILL_THRESHOLD (OCTEP_VF_OQ_MAX_DESCRIPTORS / 4) 36 + 37 + #define OCTEP_VF_OQ_INTR_PKT_THRESHOLD 1 38 + #define OCTEP_VF_OQ_INTR_TIME_THRESHOLD 10 39 + 40 + #define OCTEP_VF_MSIX_NAME_SIZE (IFNAMSIZ + 32) 41 + 42 + /* Tx Queue wake threshold 43 + * wakeup a stopped Tx queue if minimum 2 descriptors are available. 44 + * Even a skb with fragments consume only one Tx queue descriptor entry. 45 + */ 46 + #define OCTEP_VF_WAKE_QUEUE_THRESHOLD 2 47 + 48 + /* Minimum MTU supported by Octeon network interface */ 49 + #define OCTEP_VF_MIN_MTU ETH_MIN_MTU 50 + /* Maximum MTU supported by Octeon interface*/ 51 + #define OCTEP_VF_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN)) 52 + /* Default MTU */ 53 + #define OCTEP_VF_DEFAULT_MTU 1500 54 + 55 + /* Macros to get octeon config params */ 56 + #define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) 57 + #define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs) 58 + #define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) 59 + #define CFG_GET_IQ_INSTR_SIZE(cfg) (64) 60 + #define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) 61 + #define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold) 62 + 63 + #define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs) 64 + #define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size) 65 + #define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold) 66 + #define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt) 67 + #define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time) 68 + #define CFG_GET_OQ_WMARK(cfg) ((cfg)->oq.wmark) 69 + 70 + #define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->ring_cfg.active_io_rings) 71 + #define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->ring_cfg.max_io_rings) 72 + 73 + #define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us) 74 + #define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us) 75 + 76 + #define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix) 77 + 78 + /* Hardware Tx Queue configuration. */ 79 + struct octep_vf_iq_config { 80 + /* Size of the Input queue (number of commands) */ 81 + u16 num_descs; 82 + 83 + /* Command size - 32 or 64 bytes */ 84 + u16 instr_type; 85 + 86 + /* Minimum number of commands pending to be posted to Octeon before driver 87 + * hits the Input queue doorbell. 88 + */ 89 + u16 db_min; 90 + 91 + /* Trigger the IQ interrupt when processed cmd count reaches 92 + * this level. 93 + */ 94 + u32 intr_threshold; 95 + }; 96 + 97 + /* Hardware Rx Queue configuration. */ 98 + struct octep_vf_oq_config { 99 + /* Size of Output queue (number of descriptors) */ 100 + u16 num_descs; 101 + 102 + /* Size of buffer in this Output queue. */ 103 + u16 buf_size; 104 + 105 + /* The number of buffers that were consumed during packet processing 106 + * by the driver on this Output queue before the driver attempts to 107 + * replenish the descriptor ring with new buffers. 108 + */ 109 + u16 refill_threshold; 110 + 111 + /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host 112 + * only if it sent as many packets as specified by this field. 113 + * The driver usually does not use packet count interrupt coalescing. 114 + */ 115 + u32 oq_intr_pkt; 116 + 117 + /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host 118 + * if at least one packet was sent in the time interval specified by 119 + * this field. The driver uses time interval interrupt coalescing by 120 + * default. The time is specified in microseconds. 121 + */ 122 + u32 oq_intr_time; 123 + 124 + /* Water mark for backpressure. 125 + * Output queue sends backpressure signal to source when 126 + * free buffer count falls below wmark. 127 + */ 128 + u32 wmark; 129 + }; 130 + 131 + /* Tx/Rx configuration */ 132 + struct octep_vf_ring_config { 133 + /* Max number of IOQs */ 134 + u16 max_io_rings; 135 + 136 + /* Number of active IOQs */ 137 + u16 active_io_rings; 138 + }; 139 + 140 + /* Octeon MSI-x config. */ 141 + struct octep_vf_msix_config { 142 + /* Number of IOQ interrupts */ 143 + u16 ioq_msix; 144 + }; 145 + 146 + /* Data Structure to hold configuration limits and active config */ 147 + struct octep_vf_config { 148 + /* Input Queue attributes. */ 149 + struct octep_vf_iq_config iq; 150 + 151 + /* Output Queue attributes. */ 152 + struct octep_vf_oq_config oq; 153 + 154 + /* MSI-X interrupt config */ 155 + struct octep_vf_msix_config msix_cfg; 156 + 157 + /* NIC VF ring Configuration */ 158 + struct octep_vf_ring_config ring_cfg; 159 + }; 160 + #endif /* _OCTEP_VF_CONFIG_H_ */
+307
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) VF Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/pci.h> 9 + #include <linux/netdevice.h> 10 + #include <linux/ethtool.h> 11 + 12 + #include "octep_vf_config.h" 13 + #include "octep_vf_main.h" 14 + 15 + static const char octep_vf_gstrings_global_stats[][ETH_GSTRING_LEN] = { 16 + "rx_packets", 17 + "tx_packets", 18 + "rx_bytes", 19 + "tx_bytes", 20 + "rx_alloc_errors", 21 + "tx_busy_errors", 22 + "rx_dropped", 23 + "tx_dropped", 24 + "tx_hw_pkts", 25 + "tx_hw_octs", 26 + "tx_hw_bcast", 27 + "tx_hw_mcast", 28 + "rx_hw_pkts", 29 + "rx_hw_bytes", 30 + "rx_hw_bcast", 31 + "rx_hw_mcast", 32 + "rx_dropped_pkts_fifo_full", 33 + "rx_dropped_bytes_fifo_full", 34 + "rx_err_pkts", 35 + }; 36 + 37 + #define OCTEP_VF_GLOBAL_STATS_CNT (sizeof(octep_vf_gstrings_global_stats) / ETH_GSTRING_LEN) 38 + 39 + static const char octep_vf_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = { 40 + "tx_packets_posted[Q-%u]", 41 + "tx_packets_completed[Q-%u]", 42 + "tx_bytes[Q-%u]", 43 + "tx_busy[Q-%u]", 44 + }; 45 + 46 + #define OCTEP_VF_TX_Q_STATS_CNT (sizeof(octep_vf_gstrings_tx_q_stats) / ETH_GSTRING_LEN) 47 + 48 + static const char octep_vf_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = { 49 + "rx_packets[Q-%u]", 50 + "rx_bytes[Q-%u]", 51 + "rx_alloc_errors[Q-%u]", 52 + }; 53 + 54 + #define OCTEP_VF_RX_Q_STATS_CNT (sizeof(octep_vf_gstrings_rx_q_stats) / ETH_GSTRING_LEN) 55 + 56 + static void octep_vf_get_drvinfo(struct net_device *netdev, 57 + struct ethtool_drvinfo *info) 58 + { 59 + struct octep_vf_device *oct = netdev_priv(netdev); 60 + 61 + strscpy(info->driver, OCTEP_VF_DRV_NAME, sizeof(info->driver)); 62 + strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info)); 63 + } 64 + 65 + static void octep_vf_get_strings(struct net_device *netdev, 66 + u32 stringset, u8 *data) 67 + { 68 + struct octep_vf_device *oct = netdev_priv(netdev); 69 + u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 70 + char *strings = (char *)data; 71 + int i, j; 72 + 73 + switch (stringset) { 74 + case ETH_SS_STATS: 75 + for (i = 0; i < OCTEP_VF_GLOBAL_STATS_CNT; i++) { 76 + snprintf(strings, ETH_GSTRING_LEN, 77 + octep_vf_gstrings_global_stats[i]); 78 + strings += ETH_GSTRING_LEN; 79 + } 80 + 81 + for (i = 0; i < num_queues; i++) { 82 + for (j = 0; j < OCTEP_VF_TX_Q_STATS_CNT; j++) { 83 + snprintf(strings, ETH_GSTRING_LEN, 84 + octep_vf_gstrings_tx_q_stats[j], i); 85 + strings += ETH_GSTRING_LEN; 86 + } 87 + } 88 + 89 + for (i = 0; i < num_queues; i++) { 90 + for (j = 0; j < OCTEP_VF_RX_Q_STATS_CNT; j++) { 91 + snprintf(strings, ETH_GSTRING_LEN, 92 + octep_vf_gstrings_rx_q_stats[j], i); 93 + strings += ETH_GSTRING_LEN; 94 + } 95 + } 96 + break; 97 + default: 98 + break; 99 + } 100 + } 101 + 102 + static int octep_vf_get_sset_count(struct net_device *netdev, int sset) 103 + { 104 + struct octep_vf_device *oct = netdev_priv(netdev); 105 + u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 106 + 107 + switch (sset) { 108 + case ETH_SS_STATS: 109 + return OCTEP_VF_GLOBAL_STATS_CNT + (num_queues * 110 + (OCTEP_VF_TX_Q_STATS_CNT + OCTEP_VF_RX_Q_STATS_CNT)); 111 + break; 112 + default: 113 + return -EOPNOTSUPP; 114 + } 115 + } 116 + 117 + static void octep_vf_get_ethtool_stats(struct net_device *netdev, 118 + struct ethtool_stats *stats, u64 *data) 119 + { 120 + struct octep_vf_device *oct = netdev_priv(netdev); 121 + struct octep_vf_iface_tx_stats *iface_tx_stats; 122 + struct octep_vf_iface_rx_stats *iface_rx_stats; 123 + u64 rx_alloc_errors, tx_busy_errors; 124 + u64 rx_packets, rx_bytes; 125 + u64 tx_packets, tx_bytes; 126 + int q, i; 127 + 128 + rx_packets = 0; 129 + rx_bytes = 0; 130 + tx_packets = 0; 131 + tx_bytes = 0; 132 + rx_alloc_errors = 0; 133 + tx_busy_errors = 0; 134 + tx_packets = 0; 135 + tx_bytes = 0; 136 + rx_packets = 0; 137 + rx_bytes = 0; 138 + 139 + octep_vf_get_if_stats(oct); 140 + iface_tx_stats = &oct->iface_tx_stats; 141 + iface_rx_stats = &oct->iface_rx_stats; 142 + 143 + for (q = 0; q < oct->num_oqs; q++) { 144 + struct octep_vf_iq *iq = oct->iq[q]; 145 + struct octep_vf_oq *oq = oct->oq[q]; 146 + 147 + tx_packets += iq->stats.instr_completed; 148 + tx_bytes += iq->stats.bytes_sent; 149 + tx_busy_errors += iq->stats.tx_busy; 150 + 151 + rx_packets += oq->stats.packets; 152 + rx_bytes += oq->stats.bytes; 153 + rx_alloc_errors += oq->stats.alloc_failures; 154 + } 155 + i = 0; 156 + data[i++] = rx_packets; 157 + data[i++] = tx_packets; 158 + data[i++] = rx_bytes; 159 + data[i++] = tx_bytes; 160 + data[i++] = rx_alloc_errors; 161 + data[i++] = tx_busy_errors; 162 + data[i++] = iface_rx_stats->dropped_pkts_fifo_full + 163 + iface_rx_stats->err_pkts; 164 + data[i++] = iface_tx_stats->dropped; 165 + data[i++] = iface_tx_stats->pkts; 166 + data[i++] = iface_tx_stats->octs; 167 + data[i++] = iface_tx_stats->bcst; 168 + data[i++] = iface_tx_stats->mcst; 169 + data[i++] = iface_rx_stats->pkts; 170 + data[i++] = iface_rx_stats->octets; 171 + data[i++] = iface_rx_stats->mcast_pkts; 172 + data[i++] = iface_rx_stats->bcast_pkts; 173 + data[i++] = iface_rx_stats->dropped_pkts_fifo_full; 174 + data[i++] = iface_rx_stats->dropped_octets_fifo_full; 175 + data[i++] = iface_rx_stats->err_pkts; 176 + 177 + /* Per Tx Queue stats */ 178 + for (q = 0; q < oct->num_iqs; q++) { 179 + struct octep_vf_iq *iq = oct->iq[q]; 180 + 181 + data[i++] = iq->stats.instr_posted; 182 + data[i++] = iq->stats.instr_completed; 183 + data[i++] = iq->stats.bytes_sent; 184 + data[i++] = iq->stats.tx_busy; 185 + } 186 + 187 + /* Per Rx Queue stats */ 188 + for (q = 0; q < oct->num_oqs; q++) { 189 + struct octep_vf_oq *oq = oct->oq[q]; 190 + 191 + data[i++] = oq->stats.packets; 192 + data[i++] = oq->stats.bytes; 193 + data[i++] = oq->stats.alloc_failures; 194 + } 195 + } 196 + 197 + #define OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(octep_vf_speeds, ksettings, name) \ 198 + { \ 199 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_T)) \ 200 + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \ 201 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_R)) \ 202 + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \ 203 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_CR)) \ 204 + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \ 205 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_KR)) \ 206 + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \ 207 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_LR)) \ 208 + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \ 209 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_SR)) \ 210 + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \ 211 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_CR)) \ 212 + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \ 213 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_KR)) \ 214 + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \ 215 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_SR)) \ 216 + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \ 217 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_CR4)) \ 218 + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \ 219 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_KR4)) \ 220 + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \ 221 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_LR4)) \ 222 + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \ 223 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_SR4)) \ 224 + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \ 225 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR2)) \ 226 + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \ 227 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR2)) \ 228 + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \ 229 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR2)) \ 230 + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \ 231 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR)) \ 232 + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \ 233 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR)) \ 234 + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \ 235 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_LR)) \ 236 + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \ 237 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR)) \ 238 + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \ 239 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_CR4)) \ 240 + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \ 241 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_KR4)) \ 242 + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \ 243 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_LR4)) \ 244 + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \ 245 + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_SR4)) \ 246 + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \ 247 + } 248 + 249 + static int octep_vf_get_link_ksettings(struct net_device *netdev, 250 + struct ethtool_link_ksettings *cmd) 251 + { 252 + struct octep_vf_device *oct = netdev_priv(netdev); 253 + struct octep_vf_iface_link_info *link_info; 254 + u32 advertised_modes, supported_modes; 255 + 256 + ethtool_link_ksettings_zero_link_mode(cmd, supported); 257 + ethtool_link_ksettings_zero_link_mode(cmd, advertising); 258 + 259 + octep_vf_get_link_info(oct); 260 + 261 + advertised_modes = oct->link_info.advertised_modes; 262 + supported_modes = oct->link_info.supported_modes; 263 + link_info = &oct->link_info; 264 + 265 + OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported); 266 + OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising); 267 + 268 + if (link_info->autoneg) { 269 + if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED) 270 + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 271 + if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED) { 272 + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 273 + cmd->base.autoneg = AUTONEG_ENABLE; 274 + } else { 275 + cmd->base.autoneg = AUTONEG_DISABLE; 276 + } 277 + } else { 278 + cmd->base.autoneg = AUTONEG_DISABLE; 279 + } 280 + 281 + cmd->base.port = PORT_FIBRE; 282 + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 283 + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); 284 + 285 + if (netif_carrier_ok(netdev)) { 286 + cmd->base.speed = link_info->speed; 287 + cmd->base.duplex = DUPLEX_FULL; 288 + } else { 289 + cmd->base.speed = SPEED_UNKNOWN; 290 + cmd->base.duplex = DUPLEX_UNKNOWN; 291 + } 292 + return 0; 293 + } 294 + 295 + static const struct ethtool_ops octep_vf_ethtool_ops = { 296 + .get_drvinfo = octep_vf_get_drvinfo, 297 + .get_link = ethtool_op_get_link, 298 + .get_strings = octep_vf_get_strings, 299 + .get_sset_count = octep_vf_get_sset_count, 300 + .get_ethtool_stats = octep_vf_get_ethtool_stats, 301 + .get_link_ksettings = octep_vf_get_link_ksettings, 302 + }; 303 + 304 + void octep_vf_set_ethtool_ops(struct net_device *netdev) 305 + { 306 + netdev->ethtool_ops = &octep_vf_ethtool_ops; 307 + }
+1230
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) VF Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/types.h> 9 + #include <linux/module.h> 10 + #include <linux/pci.h> 11 + #include <linux/aer.h> 12 + #include <linux/netdevice.h> 13 + #include <linux/etherdevice.h> 14 + #include <linux/rtnetlink.h> 15 + #include <linux/vmalloc.h> 16 + 17 + #include "octep_vf_config.h" 18 + #include "octep_vf_main.h" 19 + 20 + struct workqueue_struct *octep_vf_wq; 21 + 22 + /* Supported Devices */ 23 + static const struct pci_device_id octep_vf_pci_id_tbl[] = { 24 + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)}, 25 + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_VF)}, 26 + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_VF)}, 27 + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_VF)}, 28 + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_VF)}, 29 + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_VF)}, 30 + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_VF)}, 31 + {0, }, 32 + }; 33 + MODULE_DEVICE_TABLE(pci, octep_vf_pci_id_tbl); 34 + 35 + MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>"); 36 + MODULE_DESCRIPTION(OCTEP_VF_DRV_STRING); 37 + MODULE_LICENSE("GPL"); 38 + 39 + /** 40 + * octep_vf_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info. 41 + * 42 + * @oct: Octeon device private data structure. 43 + * 44 + * Allocate resources to hold per Tx/Rx queue interrupt info. 45 + * This is the information passed to interrupt handler, from which napi poll 46 + * is scheduled and includes quick access to private data of Tx/Rx queue 47 + * corresponding to the interrupt being handled. 48 + * 49 + * Return: 0, on successful allocation of resources for all queue interrupts. 50 + * -1, if failed to allocate any resource. 51 + */ 52 + static int octep_vf_alloc_ioq_vectors(struct octep_vf_device *oct) 53 + { 54 + struct octep_vf_ioq_vector *ioq_vector; 55 + int i; 56 + 57 + for (i = 0; i < oct->num_oqs; i++) { 58 + oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i])); 59 + if (!oct->ioq_vector[i]) 60 + goto free_ioq_vector; 61 + 62 + ioq_vector = oct->ioq_vector[i]; 63 + ioq_vector->iq = oct->iq[i]; 64 + ioq_vector->oq = oct->oq[i]; 65 + ioq_vector->octep_vf_dev = oct; 66 + } 67 + 68 + dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs); 69 + return 0; 70 + 71 + free_ioq_vector: 72 + while (i) { 73 + i--; 74 + vfree(oct->ioq_vector[i]); 75 + oct->ioq_vector[i] = NULL; 76 + } 77 + return -1; 78 + } 79 + 80 + /** 81 + * octep_vf_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info. 82 + * 83 + * @oct: Octeon device private data structure. 84 + */ 85 + static void octep_vf_free_ioq_vectors(struct octep_vf_device *oct) 86 + { 87 + int i; 88 + 89 + for (i = 0; i < oct->num_oqs; i++) { 90 + if (oct->ioq_vector[i]) { 91 + vfree(oct->ioq_vector[i]); 92 + oct->ioq_vector[i] = NULL; 93 + } 94 + } 95 + netdev_info(oct->netdev, "Freed IOQ Vectors\n"); 96 + } 97 + 98 + /** 99 + * octep_vf_enable_msix_range() - enable MSI-x interrupts. 100 + * 101 + * @oct: Octeon device private data structure. 102 + * 103 + * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts) 104 + * for the Octeon device. 105 + * 106 + * Return: 0, on successfully enabling all MSI-x interrupts. 107 + * -1, if failed to enable any MSI-x interrupt. 108 + */ 109 + static int octep_vf_enable_msix_range(struct octep_vf_device *oct) 110 + { 111 + int num_msix, msix_allocated; 112 + int i; 113 + 114 + /* Generic interrupts apart from input/output queues */ 115 + //num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf); 116 + num_msix = oct->num_oqs; 117 + oct->msix_entries = kcalloc(num_msix, sizeof(struct msix_entry), GFP_KERNEL); 118 + if (!oct->msix_entries) 119 + goto msix_alloc_err; 120 + 121 + for (i = 0; i < num_msix; i++) 122 + oct->msix_entries[i].entry = i; 123 + 124 + msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries, 125 + num_msix, num_msix); 126 + if (msix_allocated != num_msix) { 127 + dev_err(&oct->pdev->dev, 128 + "Failed to enable %d msix irqs; got only %d\n", 129 + num_msix, msix_allocated); 130 + goto enable_msix_err; 131 + } 132 + oct->num_irqs = msix_allocated; 133 + dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n"); 134 + 135 + return 0; 136 + 137 + enable_msix_err: 138 + if (msix_allocated > 0) 139 + pci_disable_msix(oct->pdev); 140 + kfree(oct->msix_entries); 141 + oct->msix_entries = NULL; 142 + msix_alloc_err: 143 + return -1; 144 + } 145 + 146 + /** 147 + * octep_vf_disable_msix() - disable MSI-x interrupts. 148 + * 149 + * @oct: Octeon device private data structure. 150 + * 151 + * Disable MSI-x on the Octeon device. 152 + */ 153 + static void octep_vf_disable_msix(struct octep_vf_device *oct) 154 + { 155 + pci_disable_msix(oct->pdev); 156 + kfree(oct->msix_entries); 157 + oct->msix_entries = NULL; 158 + dev_info(&oct->pdev->dev, "Disabled MSI-X\n"); 159 + } 160 + 161 + /** 162 + * octep_vf_ioq_intr_handler() - handler for all Tx/Rx queue interrupts. 163 + * 164 + * @irq: Interrupt number. 165 + * @data: interrupt data contains pointers to Tx/Rx queue private data 166 + * and correspong NAPI context. 167 + * 168 + * this is common handler for all non-queue (generic) interrupts. 169 + */ 170 + static irqreturn_t octep_vf_ioq_intr_handler(int irq, void *data) 171 + { 172 + struct octep_vf_ioq_vector *ioq_vector = data; 173 + struct octep_vf_device *oct = ioq_vector->octep_vf_dev; 174 + 175 + return oct->hw_ops.ioq_intr_handler(ioq_vector); 176 + } 177 + 178 + /** 179 + * octep_vf_request_irqs() - Register interrupt handlers. 180 + * 181 + * @oct: Octeon device private data structure. 182 + * 183 + * Register handlers for all queue and non-queue interrupts. 184 + * 185 + * Return: 0, on successful registration of all interrupt handlers. 186 + * -1, on any error. 187 + */ 188 + static int octep_vf_request_irqs(struct octep_vf_device *oct) 189 + { 190 + struct net_device *netdev = oct->netdev; 191 + struct octep_vf_ioq_vector *ioq_vector; 192 + struct msix_entry *msix_entry; 193 + int ret, i; 194 + 195 + /* Request IRQs for Tx/Rx queues */ 196 + for (i = 0; i < oct->num_oqs; i++) { 197 + ioq_vector = oct->ioq_vector[i]; 198 + msix_entry = &oct->msix_entries[i]; 199 + 200 + snprintf(ioq_vector->name, sizeof(ioq_vector->name), 201 + "%s-q%d", netdev->name, i); 202 + ret = request_irq(msix_entry->vector, 203 + octep_vf_ioq_intr_handler, 0, 204 + ioq_vector->name, ioq_vector); 205 + if (ret) { 206 + netdev_err(netdev, 207 + "request_irq failed for Q-%d; err=%d", 208 + i, ret); 209 + goto ioq_irq_err; 210 + } 211 + 212 + cpumask_set_cpu(i % num_online_cpus(), 213 + &ioq_vector->affinity_mask); 214 + irq_set_affinity_hint(msix_entry->vector, 215 + &ioq_vector->affinity_mask); 216 + } 217 + 218 + return 0; 219 + ioq_irq_err: 220 + while (i) { 221 + --i; 222 + free_irq(oct->msix_entries[i].vector, oct); 223 + } 224 + return -1; 225 + } 226 + 227 + /** 228 + * octep_vf_free_irqs() - free all registered interrupts. 229 + * 230 + * @oct: Octeon device private data structure. 231 + * 232 + * Free all queue and non-queue interrupts of the Octeon device. 233 + */ 234 + static void octep_vf_free_irqs(struct octep_vf_device *oct) 235 + { 236 + int i; 237 + 238 + for (i = 0; i < oct->num_irqs; i++) { 239 + irq_set_affinity_hint(oct->msix_entries[i].vector, NULL); 240 + free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]); 241 + } 242 + netdev_info(oct->netdev, "IRQs freed\n"); 243 + } 244 + 245 + /** 246 + * octep_vf_setup_irqs() - setup interrupts for the Octeon device. 247 + * 248 + * @oct: Octeon device private data structure. 249 + * 250 + * Allocate data structures to hold per interrupt information, allocate/enable 251 + * MSI-x interrupt and register interrupt handlers. 252 + * 253 + * Return: 0, on successful allocation and registration of all interrupts. 254 + * -1, on any error. 255 + */ 256 + static int octep_vf_setup_irqs(struct octep_vf_device *oct) 257 + { 258 + if (octep_vf_alloc_ioq_vectors(oct)) 259 + goto ioq_vector_err; 260 + 261 + if (octep_vf_enable_msix_range(oct)) 262 + goto enable_msix_err; 263 + 264 + if (octep_vf_request_irqs(oct)) 265 + goto request_irq_err; 266 + 267 + return 0; 268 + 269 + request_irq_err: 270 + octep_vf_disable_msix(oct); 271 + enable_msix_err: 272 + octep_vf_free_ioq_vectors(oct); 273 + ioq_vector_err: 274 + return -1; 275 + } 276 + 277 + /** 278 + * octep_vf_clean_irqs() - free all interrupts and its resources. 279 + * 280 + * @oct: Octeon device private data structure. 281 + */ 282 + static void octep_vf_clean_irqs(struct octep_vf_device *oct) 283 + { 284 + octep_vf_free_irqs(oct); 285 + octep_vf_disable_msix(oct); 286 + octep_vf_free_ioq_vectors(oct); 287 + } 288 + 289 + /** 290 + * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue. 291 + * 292 + * @iq: Octeon Tx queue data structure. 293 + * @oq: Octeon Rx queue data structure. 294 + */ 295 + static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq) 296 + { 297 + u32 pkts_pend = oq->pkts_pending; 298 + 299 + netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); 300 + if (iq->pkts_processed) { 301 + writel(iq->pkts_processed, iq->inst_cnt_reg); 302 + iq->pkt_in_done -= iq->pkts_processed; 303 + iq->pkts_processed = 0; 304 + } 305 + if (oq->last_pkt_count - pkts_pend) { 306 + writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); 307 + oq->last_pkt_count = pkts_pend; 308 + } 309 + 310 + /* Flush the previous wrties before writing to RESEND bit */ 311 + smp_wmb(); 312 + writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg); 313 + writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); 314 + } 315 + 316 + /** 317 + * octep_vf_napi_poll() - NAPI poll function for Tx/Rx. 318 + * 319 + * @napi: pointer to napi context. 320 + * @budget: max number of packets to be processed in single invocation. 321 + */ 322 + static int octep_vf_napi_poll(struct napi_struct *napi, int budget) 323 + { 324 + struct octep_vf_ioq_vector *ioq_vector = 325 + container_of(napi, struct octep_vf_ioq_vector, napi); 326 + u32 tx_pending, rx_done; 327 + 328 + tx_pending = octep_vf_iq_process_completions(ioq_vector->iq, budget); 329 + rx_done = octep_vf_oq_process_rx(ioq_vector->oq, budget); 330 + 331 + /* need more polling if tx completion processing is still pending or 332 + * processed at least 'budget' number of rx packets. 333 + */ 334 + if (tx_pending || rx_done >= budget) 335 + return budget; 336 + 337 + napi_complete(napi); 338 + octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); 339 + return rx_done; 340 + } 341 + 342 + /** 343 + * octep_vf_napi_add() - Add NAPI poll for all Tx/Rx queues. 344 + * 345 + * @oct: Octeon device private data structure. 346 + */ 347 + static void octep_vf_napi_add(struct octep_vf_device *oct) 348 + { 349 + int i; 350 + 351 + for (i = 0; i < oct->num_oqs; i++) { 352 + netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i); 353 + netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, octep_vf_napi_poll); 354 + oct->oq[i]->napi = &oct->ioq_vector[i]->napi; 355 + } 356 + } 357 + 358 + /** 359 + * octep_vf_napi_delete() - delete NAPI poll callback for all Tx/Rx queues. 360 + * 361 + * @oct: Octeon device private data structure. 362 + */ 363 + static void octep_vf_napi_delete(struct octep_vf_device *oct) 364 + { 365 + int i; 366 + 367 + for (i = 0; i < oct->num_oqs; i++) { 368 + netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i); 369 + netif_napi_del(&oct->ioq_vector[i]->napi); 370 + oct->oq[i]->napi = NULL; 371 + } 372 + } 373 + 374 + /** 375 + * octep_vf_napi_enable() - enable NAPI for all Tx/Rx queues. 376 + * 377 + * @oct: Octeon device private data structure. 378 + */ 379 + static void octep_vf_napi_enable(struct octep_vf_device *oct) 380 + { 381 + int i; 382 + 383 + for (i = 0; i < oct->num_oqs; i++) { 384 + netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i); 385 + napi_enable(&oct->ioq_vector[i]->napi); 386 + } 387 + } 388 + 389 + /** 390 + * octep_vf_napi_disable() - disable NAPI for all Tx/Rx queues. 391 + * 392 + * @oct: Octeon device private data structure. 393 + */ 394 + static void octep_vf_napi_disable(struct octep_vf_device *oct) 395 + { 396 + int i; 397 + 398 + for (i = 0; i < oct->num_oqs; i++) { 399 + netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i); 400 + napi_disable(&oct->ioq_vector[i]->napi); 401 + } 402 + } 403 + 404 + static void octep_vf_link_up(struct net_device *netdev) 405 + { 406 + netif_carrier_on(netdev); 407 + netif_tx_start_all_queues(netdev); 408 + } 409 + 410 + static void octep_vf_set_rx_state(struct octep_vf_device *oct, bool up) 411 + { 412 + int err; 413 + 414 + err = octep_vf_mbox_set_rx_state(oct, up); 415 + if (err) 416 + netdev_err(oct->netdev, "Set Rx state to %d failed with err:%d\n", up, err); 417 + } 418 + 419 + static int octep_vf_get_link_status(struct octep_vf_device *oct) 420 + { 421 + int err; 422 + 423 + err = octep_vf_mbox_get_link_status(oct, &oct->link_info.oper_up); 424 + if (err) 425 + netdev_err(oct->netdev, "Get link status failed with err:%d\n", err); 426 + return oct->link_info.oper_up; 427 + } 428 + 429 + static void octep_vf_set_link_status(struct octep_vf_device *oct, bool up) 430 + { 431 + int err; 432 + 433 + err = octep_vf_mbox_set_link_status(oct, up); 434 + if (err) { 435 + netdev_err(oct->netdev, "Set link status to %d failed with err:%d\n", up, err); 436 + return; 437 + } 438 + oct->link_info.oper_up = up; 439 + } 440 + 441 + /** 442 + * octep_vf_open() - start the octeon network device. 443 + * 444 + * @netdev: pointer to kernel network device. 445 + * 446 + * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues 447 + * and interrupts.. 448 + * 449 + * Return: 0, on successfully setting up device and bring it up. 450 + * -1, on any error. 451 + */ 452 + static int octep_vf_open(struct net_device *netdev) 453 + { 454 + struct octep_vf_device *oct = netdev_priv(netdev); 455 + int err, ret; 456 + 457 + netdev_info(netdev, "Starting netdev ...\n"); 458 + netif_carrier_off(netdev); 459 + 460 + oct->hw_ops.reset_io_queues(oct); 461 + 462 + if (octep_vf_setup_iqs(oct)) 463 + goto setup_iq_err; 464 + if (octep_vf_setup_oqs(oct)) 465 + goto setup_oq_err; 466 + if (octep_vf_setup_irqs(oct)) 467 + goto setup_irq_err; 468 + 469 + err = netif_set_real_num_tx_queues(netdev, oct->num_oqs); 470 + if (err) 471 + goto set_queues_err; 472 + err = netif_set_real_num_rx_queues(netdev, oct->num_iqs); 473 + if (err) 474 + goto set_queues_err; 475 + 476 + octep_vf_napi_add(oct); 477 + octep_vf_napi_enable(oct); 478 + 479 + oct->link_info.admin_up = 1; 480 + octep_vf_set_rx_state(oct, true); 481 + 482 + ret = octep_vf_get_link_status(oct); 483 + if (!ret) 484 + octep_vf_set_link_status(oct, true); 485 + 486 + /* Enable the input and output queues for this Octeon device */ 487 + oct->hw_ops.enable_io_queues(oct); 488 + 489 + /* Enable Octeon device interrupts */ 490 + oct->hw_ops.enable_interrupts(oct); 491 + 492 + octep_vf_oq_dbell_init(oct); 493 + 494 + ret = octep_vf_get_link_status(oct); 495 + if (ret) 496 + octep_vf_link_up(netdev); 497 + 498 + return 0; 499 + 500 + set_queues_err: 501 + octep_vf_napi_disable(oct); 502 + octep_vf_napi_delete(oct); 503 + octep_vf_clean_irqs(oct); 504 + setup_irq_err: 505 + octep_vf_free_oqs(oct); 506 + setup_oq_err: 507 + octep_vf_free_iqs(oct); 508 + setup_iq_err: 509 + return -1; 510 + } 511 + 512 + /** 513 + * octep_vf_stop() - stop the octeon network device. 514 + * 515 + * @netdev: pointer to kernel network device. 516 + * 517 + * stop the device Tx/Rx operations, bring down the link and 518 + * free up all resources allocated for Tx/Rx queues and interrupts. 519 + */ 520 + static int octep_vf_stop(struct net_device *netdev) 521 + { 522 + struct octep_vf_device *oct = netdev_priv(netdev); 523 + 524 + netdev_info(netdev, "Stopping the device ...\n"); 525 + 526 + /* Stop Tx from stack */ 527 + netif_tx_stop_all_queues(netdev); 528 + netif_carrier_off(netdev); 529 + netif_tx_disable(netdev); 530 + 531 + octep_vf_set_link_status(oct, false); 532 + octep_vf_set_rx_state(oct, false); 533 + 534 + oct->link_info.admin_up = 0; 535 + oct->link_info.oper_up = 0; 536 + 537 + oct->hw_ops.disable_interrupts(oct); 538 + octep_vf_napi_disable(oct); 539 + octep_vf_napi_delete(oct); 540 + 541 + octep_vf_clean_irqs(oct); 542 + octep_vf_clean_iqs(oct); 543 + 544 + oct->hw_ops.disable_io_queues(oct); 545 + oct->hw_ops.reset_io_queues(oct); 546 + octep_vf_free_oqs(oct); 547 + octep_vf_free_iqs(oct); 548 + netdev_info(netdev, "Device stopped !!\n"); 549 + return 0; 550 + } 551 + 552 + /** 553 + * octep_vf_iq_full_check() - check if a Tx queue is full. 554 + * 555 + * @iq: Octeon Tx queue data structure. 556 + * 557 + * Return: 0, if the Tx queue is not full. 558 + * 1, if the Tx queue is full. 559 + */ 560 + static int octep_vf_iq_full_check(struct octep_vf_iq *iq) 561 + { 562 + if (likely((IQ_INSTR_SPACE(iq)) > 563 + OCTEP_VF_WAKE_QUEUE_THRESHOLD)) 564 + return 0; 565 + 566 + /* Stop the queue if unable to send */ 567 + netif_stop_subqueue(iq->netdev, iq->q_no); 568 + 569 + /* check again and restart the queue, in case NAPI has just freed 570 + * enough Tx ring entries. 571 + */ 572 + if (unlikely(IQ_INSTR_SPACE(iq) > 573 + OCTEP_VF_WAKE_QUEUE_THRESHOLD)) { 574 + netif_start_subqueue(iq->netdev, iq->q_no); 575 + iq->stats.restart_cnt++; 576 + return 0; 577 + } 578 + 579 + return 1; 580 + } 581 + 582 + /** 583 + * octep_vf_start_xmit() - Enqueue packet to Octoen hardware Tx Queue. 584 + * 585 + * @skb: packet skbuff pointer. 586 + * @netdev: kernel network device. 587 + * 588 + * Return: NETDEV_TX_BUSY, if Tx Queue is full. 589 + * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue. 590 + */ 591 + static netdev_tx_t octep_vf_start_xmit(struct sk_buff *skb, 592 + struct net_device *netdev) 593 + { 594 + struct octep_vf_device *oct = netdev_priv(netdev); 595 + netdev_features_t feat = netdev->features; 596 + struct octep_vf_tx_sglist_desc *sglist; 597 + struct octep_vf_tx_buffer *tx_buffer; 598 + struct octep_vf_tx_desc_hw *hw_desc; 599 + struct skb_shared_info *shinfo; 600 + struct octep_vf_instr_hdr *ih; 601 + struct octep_vf_iq *iq; 602 + skb_frag_t *frag; 603 + u16 nr_frags, si; 604 + int xmit_more; 605 + u16 q_no, wi; 606 + 607 + if (skb_put_padto(skb, ETH_ZLEN)) 608 + return NETDEV_TX_OK; 609 + 610 + q_no = skb_get_queue_mapping(skb); 611 + if (q_no >= oct->num_iqs) { 612 + netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); 613 + q_no = q_no % oct->num_iqs; 614 + } 615 + 616 + iq = oct->iq[q_no]; 617 + if (octep_vf_iq_full_check(iq)) { 618 + iq->stats.tx_busy++; 619 + return NETDEV_TX_BUSY; 620 + } 621 + 622 + shinfo = skb_shinfo(skb); 623 + nr_frags = shinfo->nr_frags; 624 + 625 + wi = iq->host_write_index; 626 + hw_desc = &iq->desc_ring[wi]; 627 + hw_desc->ih64 = 0; 628 + 629 + tx_buffer = iq->buff_info + wi; 630 + tx_buffer->skb = skb; 631 + 632 + ih = &hw_desc->ih; 633 + ih->tlen = skb->len; 634 + ih->pkind = oct->fw_info.pkind; 635 + ih->fsz = oct->fw_info.fsz; 636 + ih->tlen = skb->len + ih->fsz; 637 + 638 + if (!nr_frags) { 639 + tx_buffer->gather = 0; 640 + tx_buffer->dma = dma_map_single(iq->dev, skb->data, 641 + skb->len, DMA_TO_DEVICE); 642 + if (dma_mapping_error(iq->dev, tx_buffer->dma)) 643 + goto dma_map_err; 644 + hw_desc->dptr = tx_buffer->dma; 645 + } else { 646 + /* Scatter/Gather */ 647 + dma_addr_t dma; 648 + u16 len; 649 + 650 + sglist = tx_buffer->sglist; 651 + 652 + ih->gsz = nr_frags + 1; 653 + ih->gather = 1; 654 + tx_buffer->gather = 1; 655 + 656 + len = skb_headlen(skb); 657 + dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE); 658 + if (dma_mapping_error(iq->dev, dma)) 659 + goto dma_map_err; 660 + 661 + memset(sglist, 0, OCTEP_VF_SGLIST_SIZE_PER_PKT); 662 + sglist[0].len[3] = len; 663 + sglist[0].dma_ptr[0] = dma; 664 + 665 + si = 1; /* entry 0 is main skb, mapped above */ 666 + frag = &shinfo->frags[0]; 667 + while (nr_frags--) { 668 + len = skb_frag_size(frag); 669 + dma = skb_frag_dma_map(iq->dev, frag, 0, 670 + len, DMA_TO_DEVICE); 671 + if (dma_mapping_error(iq->dev, dma)) 672 + goto dma_map_sg_err; 673 + 674 + sglist[si >> 2].len[3 - (si & 3)] = len; 675 + sglist[si >> 2].dma_ptr[si & 3] = dma; 676 + 677 + frag++; 678 + si++; 679 + } 680 + hw_desc->dptr = tx_buffer->sglist_dma; 681 + } 682 + if (oct->fw_info.tx_ol_flags) { 683 + if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) { 684 + hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM; 685 + hw_desc->txm.ol_flags |= OCTEP_VF_TX_OFFLOAD_TSO; 686 + hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size; 687 + hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs; 688 + } else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 689 + hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM; 690 + } 691 + /* due to ESR txm will be swappeed by hw */ 692 + hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]); 693 + } 694 + 695 + netdev_tx_sent_queue(iq->netdev_q, skb->len); 696 + 697 + xmit_more = netdev_xmit_more(); 698 + 699 + skb_tx_timestamp(skb); 700 + iq->fill_cnt++; 701 + wi++; 702 + iq->host_write_index = wi & iq->ring_size_mask; 703 + if (xmit_more && 704 + (IQ_INSTR_PENDING(iq) < 705 + (iq->max_count - OCTEP_VF_WAKE_QUEUE_THRESHOLD)) && 706 + iq->fill_cnt < iq->fill_threshold) 707 + return NETDEV_TX_OK; 708 + 709 + /* Flush the hw descriptors before writing to doorbell */ 710 + smp_wmb(); 711 + writel(iq->fill_cnt, iq->doorbell_reg); 712 + iq->stats.instr_posted += iq->fill_cnt; 713 + iq->fill_cnt = 0; 714 + return NETDEV_TX_OK; 715 + 716 + dma_map_sg_err: 717 + if (si > 0) { 718 + dma_unmap_single(iq->dev, sglist[0].dma_ptr[0], 719 + sglist[0].len[0], DMA_TO_DEVICE); 720 + sglist[0].len[0] = 0; 721 + } 722 + while (si > 1) { 723 + dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3], 724 + sglist[si >> 2].len[si & 3], DMA_TO_DEVICE); 725 + sglist[si >> 2].len[si & 3] = 0; 726 + si--; 727 + } 728 + tx_buffer->gather = 0; 729 + dma_map_err: 730 + dev_kfree_skb_any(skb); 731 + return NETDEV_TX_OK; 732 + } 733 + 734 + int octep_vf_get_if_stats(struct octep_vf_device *oct) 735 + { 736 + struct octep_vf_iface_rxtx_stats vf_stats; 737 + int ret, size; 738 + 739 + memset(&vf_stats, 0, sizeof(struct octep_vf_iface_rxtx_stats)); 740 + ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_STATS, 741 + (u8 *)&vf_stats, &size); 742 + if (!ret) { 743 + memcpy(&oct->iface_rx_stats, &vf_stats.iface_rx_stats, 744 + sizeof(struct octep_vf_iface_rx_stats)); 745 + memcpy(&oct->iface_tx_stats, &vf_stats.iface_tx_stats, 746 + sizeof(struct octep_vf_iface_tx_stats)); 747 + } 748 + return ret; 749 + } 750 + 751 + int octep_vf_get_link_info(struct octep_vf_device *oct) 752 + { 753 + int ret, size; 754 + 755 + ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO, 756 + (u8 *)&oct->link_info, &size); 757 + if (ret) { 758 + dev_err(&oct->pdev->dev, "Get VF link info failed via VF Mbox\n"); 759 + return ret; 760 + } 761 + return 0; 762 + } 763 + 764 + /** 765 + * octep_vf_get_stats64() - Get Octeon network device statistics. 766 + * 767 + * @netdev: kernel network device. 768 + * @stats: pointer to stats structure to be filled in. 769 + */ 770 + static void octep_vf_get_stats64(struct net_device *netdev, 771 + struct rtnl_link_stats64 *stats) 772 + { 773 + struct octep_vf_device *oct = netdev_priv(netdev); 774 + u64 tx_packets, tx_bytes, rx_packets, rx_bytes; 775 + int q; 776 + 777 + tx_packets = 0; 778 + tx_bytes = 0; 779 + rx_packets = 0; 780 + rx_bytes = 0; 781 + for (q = 0; q < oct->num_oqs; q++) { 782 + struct octep_vf_iq *iq = oct->iq[q]; 783 + struct octep_vf_oq *oq = oct->oq[q]; 784 + 785 + tx_packets += iq->stats.instr_completed; 786 + tx_bytes += iq->stats.bytes_sent; 787 + rx_packets += oq->stats.packets; 788 + rx_bytes += oq->stats.bytes; 789 + } 790 + stats->tx_packets = tx_packets; 791 + stats->tx_bytes = tx_bytes; 792 + stats->rx_packets = rx_packets; 793 + stats->rx_bytes = rx_bytes; 794 + if (!octep_vf_get_if_stats(oct)) { 795 + stats->multicast = oct->iface_rx_stats.mcast_pkts; 796 + stats->rx_errors = oct->iface_rx_stats.err_pkts; 797 + } 798 + } 799 + 800 + /** 801 + * octep_vf_tx_timeout_task - work queue task to Handle Tx queue timeout. 802 + * 803 + * @work: pointer to Tx queue timeout work_struct 804 + * 805 + * Stop and start the device so that it frees up all queue resources 806 + * and restarts the queues, that potentially clears a Tx queue timeout 807 + * condition. 808 + **/ 809 + static void octep_vf_tx_timeout_task(struct work_struct *work) 810 + { 811 + struct octep_vf_device *oct = container_of(work, struct octep_vf_device, 812 + tx_timeout_task); 813 + struct net_device *netdev = oct->netdev; 814 + 815 + rtnl_lock(); 816 + if (netif_running(netdev)) { 817 + octep_vf_stop(netdev); 818 + octep_vf_open(netdev); 819 + } 820 + rtnl_unlock(); 821 + } 822 + 823 + /** 824 + * octep_vf_tx_timeout() - Handle Tx Queue timeout. 825 + * 826 + * @netdev: pointer to kernel network device. 827 + * @txqueue: Timed out Tx queue number. 828 + * 829 + * Schedule a work to handle Tx queue timeout. 830 + */ 831 + static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue) 832 + { 833 + struct octep_vf_device *oct = netdev_priv(netdev); 834 + 835 + queue_work(octep_vf_wq, &oct->tx_timeout_task); 836 + } 837 + 838 + static int octep_vf_set_mac(struct net_device *netdev, void *p) 839 + { 840 + struct octep_vf_device *oct = netdev_priv(netdev); 841 + struct sockaddr *addr = (struct sockaddr *)p; 842 + int err; 843 + 844 + if (!is_valid_ether_addr(addr->sa_data)) 845 + return -EADDRNOTAVAIL; 846 + 847 + err = octep_vf_mbox_set_mac_addr(oct, addr->sa_data); 848 + if (err) 849 + return err; 850 + 851 + memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN); 852 + eth_hw_addr_set(netdev, addr->sa_data); 853 + 854 + return 0; 855 + } 856 + 857 + static int octep_vf_change_mtu(struct net_device *netdev, int new_mtu) 858 + { 859 + struct octep_vf_device *oct = netdev_priv(netdev); 860 + struct octep_vf_iface_link_info *link_info; 861 + int err; 862 + 863 + link_info = &oct->link_info; 864 + if (link_info->mtu == new_mtu) 865 + return 0; 866 + 867 + err = octep_vf_mbox_set_mtu(oct, new_mtu); 868 + if (!err) { 869 + oct->link_info.mtu = new_mtu; 870 + netdev->mtu = new_mtu; 871 + } 872 + return err; 873 + } 874 + 875 + static int octep_vf_set_features(struct net_device *netdev, 876 + netdev_features_t features) 877 + { 878 + struct octep_vf_device *oct = netdev_priv(netdev); 879 + u16 rx_offloads = 0, tx_offloads = 0; 880 + int err; 881 + 882 + /* We only support features received from firmware */ 883 + if ((features & netdev->hw_features) != features) 884 + return -EINVAL; 885 + 886 + if (features & NETIF_F_TSO) 887 + tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO; 888 + 889 + if (features & NETIF_F_TSO6) 890 + tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO; 891 + 892 + if (features & NETIF_F_IP_CSUM) 893 + tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM; 894 + 895 + if (features & NETIF_F_IPV6_CSUM) 896 + tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM; 897 + 898 + if (features & NETIF_F_RXCSUM) 899 + rx_offloads |= OCTEP_VF_RX_OFFLOAD_CKSUM; 900 + 901 + err = octep_vf_mbox_set_offloads(oct, tx_offloads, rx_offloads); 902 + if (!err) 903 + netdev->features = features; 904 + 905 + return err; 906 + } 907 + 908 + static const struct net_device_ops octep_vf_netdev_ops = { 909 + .ndo_open = octep_vf_open, 910 + .ndo_stop = octep_vf_stop, 911 + .ndo_start_xmit = octep_vf_start_xmit, 912 + .ndo_get_stats64 = octep_vf_get_stats64, 913 + .ndo_tx_timeout = octep_vf_tx_timeout, 914 + .ndo_set_mac_address = octep_vf_set_mac, 915 + .ndo_change_mtu = octep_vf_change_mtu, 916 + .ndo_set_features = octep_vf_set_features, 917 + }; 918 + 919 + static const char *octep_vf_devid_to_str(struct octep_vf_device *oct) 920 + { 921 + switch (oct->chip_id) { 922 + case OCTEP_PCI_DEVICE_ID_CN93_VF: 923 + return "CN93XX"; 924 + case OCTEP_PCI_DEVICE_ID_CNF95N_VF: 925 + return "CNF95N"; 926 + case OCTEP_PCI_DEVICE_ID_CN10KA_VF: 927 + return "CN10KA"; 928 + case OCTEP_PCI_DEVICE_ID_CNF10KA_VF: 929 + return "CNF10KA"; 930 + case OCTEP_PCI_DEVICE_ID_CNF10KB_VF: 931 + return "CNF10KB"; 932 + case OCTEP_PCI_DEVICE_ID_CN10KB_VF: 933 + return "CN10KB"; 934 + default: 935 + return "Unsupported"; 936 + } 937 + } 938 + 939 + /** 940 + * octep_vf_device_setup() - Setup Octeon Device. 941 + * 942 + * @oct: Octeon device private data structure. 943 + * 944 + * Setup Octeon device hardware operations, configuration, etc ... 945 + */ 946 + int octep_vf_device_setup(struct octep_vf_device *oct) 947 + { 948 + struct pci_dev *pdev = oct->pdev; 949 + 950 + /* allocate memory for oct->conf */ 951 + oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL); 952 + if (!oct->conf) 953 + return -ENOMEM; 954 + 955 + /* Map BAR region 0 */ 956 + oct->mmio.hw_addr = ioremap(pci_resource_start(oct->pdev, 0), 957 + pci_resource_len(oct->pdev, 0)); 958 + if (!oct->mmio.hw_addr) { 959 + dev_err(&pdev->dev, 960 + "Failed to remap BAR0; start=0x%llx len=0x%llx\n", 961 + pci_resource_start(oct->pdev, 0), 962 + pci_resource_len(oct->pdev, 0)); 963 + goto ioremap_err; 964 + } 965 + oct->mmio.mapped = 1; 966 + 967 + oct->chip_id = pdev->device; 968 + oct->rev_id = pdev->revision; 969 + dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device); 970 + 971 + switch (oct->chip_id) { 972 + case OCTEP_PCI_DEVICE_ID_CN93_VF: 973 + case OCTEP_PCI_DEVICE_ID_CNF95N_VF: 974 + case OCTEP_PCI_DEVICE_ID_CN98_VF: 975 + dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n", 976 + octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct), 977 + OCTEP_VF_MINOR_REV(oct)); 978 + octep_vf_device_setup_cn93(oct); 979 + break; 980 + case OCTEP_PCI_DEVICE_ID_CNF10KA_VF: 981 + case OCTEP_PCI_DEVICE_ID_CN10KA_VF: 982 + case OCTEP_PCI_DEVICE_ID_CNF10KB_VF: 983 + case OCTEP_PCI_DEVICE_ID_CN10KB_VF: 984 + dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n", 985 + octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct), 986 + OCTEP_VF_MINOR_REV(oct)); 987 + octep_vf_device_setup_cnxk(oct); 988 + break; 989 + default: 990 + dev_err(&pdev->dev, "Unsupported device\n"); 991 + goto unsupported_dev; 992 + } 993 + 994 + return 0; 995 + 996 + unsupported_dev: 997 + iounmap(oct->mmio.hw_addr); 998 + ioremap_err: 999 + kfree(oct->conf); 1000 + return -EOPNOTSUPP; 1001 + } 1002 + 1003 + /** 1004 + * octep_vf_device_cleanup() - Cleanup Octeon Device. 1005 + * 1006 + * @oct: Octeon device private data structure. 1007 + * 1008 + * Cleanup Octeon device allocated resources. 1009 + */ 1010 + static void octep_vf_device_cleanup(struct octep_vf_device *oct) 1011 + { 1012 + dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n"); 1013 + 1014 + if (oct->mmio.mapped) 1015 + iounmap(oct->mmio.hw_addr); 1016 + 1017 + kfree(oct->conf); 1018 + oct->conf = NULL; 1019 + } 1020 + 1021 + static int octep_vf_get_mac_addr(struct octep_vf_device *oct, u8 *addr) 1022 + { 1023 + return octep_vf_mbox_get_mac_addr(oct, addr); 1024 + } 1025 + 1026 + /** 1027 + * octep_vf_probe() - Octeon PCI device probe handler. 1028 + * 1029 + * @pdev: PCI device structure. 1030 + * @ent: entry in Octeon PCI device ID table. 1031 + * 1032 + * Initializes and enables the Octeon PCI device for network operations. 1033 + * Initializes Octeon private data structure and registers a network device. 1034 + */ 1035 + static int octep_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1036 + { 1037 + struct octep_vf_device *octep_vf_dev; 1038 + struct net_device *netdev; 1039 + int err; 1040 + 1041 + err = pci_enable_device(pdev); 1042 + if (err) { 1043 + dev_err(&pdev->dev, "Failed to enable PCI device\n"); 1044 + return err; 1045 + } 1046 + 1047 + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1048 + if (err) { 1049 + dev_err(&pdev->dev, "Failed to set DMA mask !!\n"); 1050 + goto err_dma_mask; 1051 + } 1052 + 1053 + err = pci_request_mem_regions(pdev, OCTEP_VF_DRV_NAME); 1054 + if (err) { 1055 + dev_err(&pdev->dev, "Failed to map PCI memory regions\n"); 1056 + goto err_pci_regions; 1057 + } 1058 + 1059 + pci_set_master(pdev); 1060 + 1061 + netdev = alloc_etherdev_mq(sizeof(struct octep_vf_device), 1062 + OCTEP_VF_MAX_QUEUES); 1063 + if (!netdev) { 1064 + dev_err(&pdev->dev, "Failed to allocate netdev\n"); 1065 + err = -ENOMEM; 1066 + goto err_alloc_netdev; 1067 + } 1068 + SET_NETDEV_DEV(netdev, &pdev->dev); 1069 + 1070 + octep_vf_dev = netdev_priv(netdev); 1071 + octep_vf_dev->netdev = netdev; 1072 + octep_vf_dev->pdev = pdev; 1073 + octep_vf_dev->dev = &pdev->dev; 1074 + pci_set_drvdata(pdev, octep_vf_dev); 1075 + 1076 + err = octep_vf_device_setup(octep_vf_dev); 1077 + if (err) { 1078 + dev_err(&pdev->dev, "Device setup failed\n"); 1079 + goto err_octep_vf_config; 1080 + } 1081 + INIT_WORK(&octep_vf_dev->tx_timeout_task, octep_vf_tx_timeout_task); 1082 + 1083 + netdev->netdev_ops = &octep_vf_netdev_ops; 1084 + octep_vf_set_ethtool_ops(netdev); 1085 + netif_carrier_off(netdev); 1086 + 1087 + if (octep_vf_setup_mbox(octep_vf_dev)) { 1088 + dev_err(&pdev->dev, "VF Mailbox setup failed\n"); 1089 + err = -ENOMEM; 1090 + goto err_setup_mbox; 1091 + } 1092 + 1093 + if (octep_vf_mbox_version_check(octep_vf_dev)) { 1094 + dev_err(&pdev->dev, "PF VF Mailbox version mismatch\n"); 1095 + err = -EINVAL; 1096 + goto err_mbox_version; 1097 + } 1098 + 1099 + if (octep_vf_mbox_get_fw_info(octep_vf_dev)) { 1100 + dev_err(&pdev->dev, "unable to get fw info\n"); 1101 + err = -EINVAL; 1102 + goto err_mbox_version; 1103 + } 1104 + 1105 + netdev->hw_features = NETIF_F_SG; 1106 + if (OCTEP_VF_TX_IP_CSUM(octep_vf_dev->fw_info.tx_ol_flags)) 1107 + netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 1108 + 1109 + if (OCTEP_VF_RX_IP_CSUM(octep_vf_dev->fw_info.rx_ol_flags)) 1110 + netdev->hw_features |= NETIF_F_RXCSUM; 1111 + 1112 + netdev->min_mtu = OCTEP_VF_MIN_MTU; 1113 + netdev->max_mtu = OCTEP_VF_MAX_MTU; 1114 + netdev->mtu = OCTEP_VF_DEFAULT_MTU; 1115 + 1116 + if (OCTEP_VF_TX_TSO(octep_vf_dev->fw_info.tx_ol_flags)) { 1117 + netdev->hw_features |= NETIF_F_TSO; 1118 + netif_set_tso_max_size(netdev, netdev->max_mtu); 1119 + } 1120 + 1121 + netdev->features |= netdev->hw_features; 1122 + octep_vf_get_mac_addr(octep_vf_dev, octep_vf_dev->mac_addr); 1123 + eth_hw_addr_set(netdev, octep_vf_dev->mac_addr); 1124 + err = register_netdev(netdev); 1125 + if (err) { 1126 + dev_err(&pdev->dev, "Failed to register netdev\n"); 1127 + goto err_register_dev; 1128 + } 1129 + dev_info(&pdev->dev, "Device probe successful\n"); 1130 + return 0; 1131 + 1132 + err_register_dev: 1133 + err_mbox_version: 1134 + octep_vf_delete_mbox(octep_vf_dev); 1135 + err_setup_mbox: 1136 + octep_vf_device_cleanup(octep_vf_dev); 1137 + err_octep_vf_config: 1138 + free_netdev(netdev); 1139 + err_alloc_netdev: 1140 + pci_release_mem_regions(pdev); 1141 + err_pci_regions: 1142 + err_dma_mask: 1143 + pci_disable_device(pdev); 1144 + dev_err(&pdev->dev, "Device probe failed\n"); 1145 + return err; 1146 + } 1147 + 1148 + /** 1149 + * octep_vf_remove() - Remove Octeon PCI device from driver control. 1150 + * 1151 + * @pdev: PCI device structure of the Octeon device. 1152 + * 1153 + * Cleanup all resources allocated for the Octeon device. 1154 + * Unregister from network device and disable the PCI device. 1155 + */ 1156 + static void octep_vf_remove(struct pci_dev *pdev) 1157 + { 1158 + struct octep_vf_device *oct = pci_get_drvdata(pdev); 1159 + struct net_device *netdev; 1160 + 1161 + if (!oct) 1162 + return; 1163 + 1164 + octep_vf_mbox_dev_remove(oct); 1165 + cancel_work_sync(&oct->tx_timeout_task); 1166 + netdev = oct->netdev; 1167 + if (netdev->reg_state == NETREG_REGISTERED) 1168 + unregister_netdev(netdev); 1169 + octep_vf_delete_mbox(oct); 1170 + octep_vf_device_cleanup(oct); 1171 + pci_release_mem_regions(pdev); 1172 + free_netdev(netdev); 1173 + pci_disable_device(pdev); 1174 + } 1175 + 1176 + static struct pci_driver octep_vf_driver = { 1177 + .name = OCTEP_VF_DRV_NAME, 1178 + .id_table = octep_vf_pci_id_tbl, 1179 + .probe = octep_vf_probe, 1180 + .remove = octep_vf_remove, 1181 + }; 1182 + 1183 + /** 1184 + * octep_vf_init_module() - Module initialization. 1185 + * 1186 + * create common resource for the driver and register PCI driver. 1187 + */ 1188 + static int __init octep_vf_init_module(void) 1189 + { 1190 + int ret; 1191 + 1192 + pr_info("%s: Loading %s ...\n", OCTEP_VF_DRV_NAME, OCTEP_VF_DRV_STRING); 1193 + 1194 + /* work queue for all deferred tasks */ 1195 + octep_vf_wq = create_singlethread_workqueue(OCTEP_VF_DRV_NAME); 1196 + if (!octep_vf_wq) { 1197 + pr_err("%s: Failed to create common workqueue\n", 1198 + OCTEP_VF_DRV_NAME); 1199 + return -ENOMEM; 1200 + } 1201 + 1202 + ret = pci_register_driver(&octep_vf_driver); 1203 + if (ret < 0) { 1204 + pr_err("%s: Failed to register PCI driver; err=%d\n", 1205 + OCTEP_VF_DRV_NAME, ret); 1206 + return ret; 1207 + } 1208 + 1209 + pr_info("%s: Loaded successfully !\n", OCTEP_VF_DRV_NAME); 1210 + 1211 + return ret; 1212 + } 1213 + 1214 + /** 1215 + * octep_vf_exit_module() - Module exit routine. 1216 + * 1217 + * unregister the driver with PCI subsystem and cleanup common resources. 1218 + */ 1219 + static void __exit octep_vf_exit_module(void) 1220 + { 1221 + pr_info("%s: Unloading ...\n", OCTEP_VF_DRV_NAME); 1222 + 1223 + pci_unregister_driver(&octep_vf_driver); 1224 + destroy_workqueue(octep_vf_wq); 1225 + 1226 + pr_info("%s: Unloading complete\n", OCTEP_VF_DRV_NAME); 1227 + } 1228 + 1229 + module_init(octep_vf_init_module); 1230 + module_exit(octep_vf_exit_module);
+338
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) VF Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef _OCTEP_VF_MAIN_H_ 9 + #define _OCTEP_VF_MAIN_H_ 10 + 11 + #include "octep_vf_tx.h" 12 + #include "octep_vf_rx.h" 13 + #include "octep_vf_mbox.h" 14 + 15 + #define OCTEP_VF_DRV_NAME "octeon_ep_vf" 16 + #define OCTEP_VF_DRV_STRING "Marvell Octeon EndPoint NIC VF Driver" 17 + 18 + #define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203 //93xx VF 19 + #define OCTEP_PCI_DEVICE_ID_CNF95N_VF 0xB403 //95N VF 20 + #define OCTEP_PCI_DEVICE_ID_CN98_VF 0xB103 21 + #define OCTEP_PCI_DEVICE_ID_CN10KA_VF 0xB903 22 + #define OCTEP_PCI_DEVICE_ID_CNF10KA_VF 0xBA03 23 + #define OCTEP_PCI_DEVICE_ID_CNF10KB_VF 0xBC03 24 + #define OCTEP_PCI_DEVICE_ID_CN10KB_VF 0xBD03 25 + 26 + #define OCTEP_VF_MAX_QUEUES 63 27 + #define OCTEP_VF_MAX_IQ OCTEP_VF_MAX_QUEUES 28 + #define OCTEP_VF_MAX_OQ OCTEP_VF_MAX_QUEUES 29 + 30 + #define OCTEP_VF_MAX_MSIX_VECTORS OCTEP_VF_MAX_OQ 31 + 32 + #define OCTEP_VF_IQ_INTR_RESEND_BIT 59 33 + #define OCTEP_VF_OQ_INTR_RESEND_BIT 59 34 + 35 + #define IQ_INSTR_PENDING(iq) ({ typeof(iq) iq__ = (iq); \ 36 + ((iq__)->host_write_index - (iq__)->flush_index) & \ 37 + (iq__)->ring_size_mask; \ 38 + }) 39 + #define IQ_INSTR_SPACE(iq) ({ typeof(iq) iq_ = (iq); \ 40 + (iq_)->max_count - IQ_INSTR_PENDING(iq_); \ 41 + }) 42 + 43 + #ifndef UINT64_MAX 44 + #define UINT64_MAX ((u64)(~((u64)0))) /* 0xFFFFFFFFFFFFFFFF */ 45 + #endif 46 + 47 + /* PCI address space mapping information. 48 + * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of 49 + * Octeon gets mapped to different physical address spaces in 50 + * the kernel. 51 + */ 52 + struct octep_vf_mmio { 53 + /* The physical address to which the PCI address space is mapped. */ 54 + u8 __iomem *hw_addr; 55 + 56 + /* Flag indicating the mapping was successful. */ 57 + int mapped; 58 + }; 59 + 60 + struct octep_vf_hw_ops { 61 + void (*setup_iq_regs)(struct octep_vf_device *oct, int q); 62 + void (*setup_oq_regs)(struct octep_vf_device *oct, int q); 63 + void (*setup_mbox_regs)(struct octep_vf_device *oct, int mbox); 64 + 65 + irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector); 66 + irqreturn_t (*ioq_intr_handler)(void *ioq_vector); 67 + void (*reinit_regs)(struct octep_vf_device *oct); 68 + u32 (*update_iq_read_idx)(struct octep_vf_iq *iq); 69 + 70 + void (*enable_interrupts)(struct octep_vf_device *oct); 71 + void (*disable_interrupts)(struct octep_vf_device *oct); 72 + 73 + void (*enable_io_queues)(struct octep_vf_device *oct); 74 + void (*disable_io_queues)(struct octep_vf_device *oct); 75 + void (*enable_iq)(struct octep_vf_device *oct, int q); 76 + void (*disable_iq)(struct octep_vf_device *oct, int q); 77 + void (*enable_oq)(struct octep_vf_device *oct, int q); 78 + void (*disable_oq)(struct octep_vf_device *oct, int q); 79 + void (*reset_io_queues)(struct octep_vf_device *oct); 80 + void (*dump_registers)(struct octep_vf_device *oct); 81 + }; 82 + 83 + /* Octeon mailbox data */ 84 + struct octep_vf_mbox_data { 85 + /* Holds the offset of received data via mailbox. */ 86 + u32 data_index; 87 + 88 + /* Holds the received data via mailbox. */ 89 + u8 recv_data[OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE]; 90 + }; 91 + 92 + /* wrappers around work structs */ 93 + struct octep_vf_mbox_wk { 94 + struct work_struct work; 95 + void *ctxptr; 96 + }; 97 + 98 + /* Octeon device mailbox */ 99 + struct octep_vf_mbox { 100 + /* A mutex to protect access to this q_mbox. */ 101 + struct mutex lock; 102 + 103 + u32 state; 104 + 105 + /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */ 106 + u8 __iomem *mbox_int_reg; 107 + 108 + /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF, 109 + * SLI_PKT_PF_VF_MBOX_SIG(1) for VF. 110 + */ 111 + u8 __iomem *mbox_write_reg; 112 + 113 + /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF, 114 + * SLI_PKT_PF_VF_MBOX_SIG(0) for VF. 115 + */ 116 + u8 __iomem *mbox_read_reg; 117 + 118 + /* Octeon mailbox data */ 119 + struct octep_vf_mbox_data mbox_data; 120 + 121 + /* Octeon mailbox work handler to process Mbox messages */ 122 + struct octep_vf_mbox_wk wk; 123 + }; 124 + 125 + /* Tx/Rx queue vector per interrupt. */ 126 + struct octep_vf_ioq_vector { 127 + char name[OCTEP_VF_MSIX_NAME_SIZE]; 128 + struct napi_struct napi; 129 + struct octep_vf_device *octep_vf_dev; 130 + struct octep_vf_iq *iq; 131 + struct octep_vf_oq *oq; 132 + cpumask_t affinity_mask; 133 + }; 134 + 135 + /* Octeon hardware/firmware offload capability flags. */ 136 + #define OCTEP_VF_CAP_TX_CHECKSUM BIT(0) 137 + #define OCTEP_VF_CAP_RX_CHECKSUM BIT(1) 138 + #define OCTEP_VF_CAP_TSO BIT(2) 139 + 140 + /* Link modes */ 141 + enum octep_vf_link_mode_bit_indices { 142 + OCTEP_VF_LINK_MODE_10GBASE_T = 0, 143 + OCTEP_VF_LINK_MODE_10GBASE_R, 144 + OCTEP_VF_LINK_MODE_10GBASE_CR, 145 + OCTEP_VF_LINK_MODE_10GBASE_KR, 146 + OCTEP_VF_LINK_MODE_10GBASE_LR, 147 + OCTEP_VF_LINK_MODE_10GBASE_SR, 148 + OCTEP_VF_LINK_MODE_25GBASE_CR, 149 + OCTEP_VF_LINK_MODE_25GBASE_KR, 150 + OCTEP_VF_LINK_MODE_25GBASE_SR, 151 + OCTEP_VF_LINK_MODE_40GBASE_CR4, 152 + OCTEP_VF_LINK_MODE_40GBASE_KR4, 153 + OCTEP_VF_LINK_MODE_40GBASE_LR4, 154 + OCTEP_VF_LINK_MODE_40GBASE_SR4, 155 + OCTEP_VF_LINK_MODE_50GBASE_CR2, 156 + OCTEP_VF_LINK_MODE_50GBASE_KR2, 157 + OCTEP_VF_LINK_MODE_50GBASE_SR2, 158 + OCTEP_VF_LINK_MODE_50GBASE_CR, 159 + OCTEP_VF_LINK_MODE_50GBASE_KR, 160 + OCTEP_VF_LINK_MODE_50GBASE_LR, 161 + OCTEP_VF_LINK_MODE_50GBASE_SR, 162 + OCTEP_VF_LINK_MODE_100GBASE_CR4, 163 + OCTEP_VF_LINK_MODE_100GBASE_KR4, 164 + OCTEP_VF_LINK_MODE_100GBASE_LR4, 165 + OCTEP_VF_LINK_MODE_100GBASE_SR4, 166 + OCTEP_VF_LINK_MODE_NBITS 167 + }; 168 + 169 + /* Hardware interface link state information. */ 170 + struct octep_vf_iface_link_info { 171 + /* Bitmap of Supported link speeds/modes. */ 172 + u64 supported_modes; 173 + 174 + /* Bitmap of Advertised link speeds/modes. */ 175 + u64 advertised_modes; 176 + 177 + /* Negotiated link speed in Mbps. */ 178 + u32 speed; 179 + 180 + /* MTU */ 181 + u16 mtu; 182 + 183 + /* Autonegotiation state. */ 184 + #define OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED BIT(0) 185 + #define OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED BIT(1) 186 + u8 autoneg; 187 + 188 + /* Pause frames setting. */ 189 + #define OCTEP_VF_LINK_MODE_PAUSE_SUPPORTED BIT(0) 190 + #define OCTEP_VF_LINK_MODE_PAUSE_ADVERTISED BIT(1) 191 + u8 pause; 192 + 193 + /* Admin state of the link (ifconfig <iface> up/down */ 194 + u8 admin_up; 195 + 196 + /* Operational state of the link: physical link is up down */ 197 + u8 oper_up; 198 + }; 199 + 200 + /* Hardware interface stats information. */ 201 + struct octep_vf_iface_rxtx_stats { 202 + /* Hardware Interface Rx statistics */ 203 + struct octep_vf_iface_rx_stats iface_rx_stats; 204 + 205 + /* Hardware Interface Tx statistics */ 206 + struct octep_vf_iface_tx_stats iface_tx_stats; 207 + }; 208 + 209 + struct octep_vf_fw_info { 210 + /* pkind value to be used in every Tx hardware descriptor */ 211 + u8 pkind; 212 + /* front size data */ 213 + u8 fsz; 214 + /* supported rx offloads OCTEP_VF_RX_OFFLOAD_* */ 215 + u16 rx_ol_flags; 216 + /* supported tx offloads OCTEP_VF_TX_OFFLOAD_* */ 217 + u16 tx_ol_flags; 218 + }; 219 + 220 + /* The Octeon device specific private data structure. 221 + * Each Octeon device has this structure to represent all its components. 222 + */ 223 + struct octep_vf_device { 224 + struct octep_vf_config *conf; 225 + 226 + /* Octeon Chip type. */ 227 + u16 chip_id; 228 + u16 rev_id; 229 + 230 + /* Device capabilities enabled */ 231 + u64 caps_enabled; 232 + /* Device capabilities supported */ 233 + u64 caps_supported; 234 + 235 + /* Pointer to basic Linux device */ 236 + struct device *dev; 237 + /* Linux PCI device pointer */ 238 + struct pci_dev *pdev; 239 + /* Netdev corresponding to the Octeon device */ 240 + struct net_device *netdev; 241 + 242 + /* memory mapped io range */ 243 + struct octep_vf_mmio mmio; 244 + 245 + /* MAC address */ 246 + u8 mac_addr[ETH_ALEN]; 247 + 248 + /* Tx queues (IQ: Instruction Queue) */ 249 + u16 num_iqs; 250 + /* Pointers to Octeon Tx queues */ 251 + struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ]; 252 + 253 + /* Rx queues (OQ: Output Queue) */ 254 + u16 num_oqs; 255 + /* Pointers to Octeon Rx queues */ 256 + struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ]; 257 + 258 + /* Hardware port number of the PCIe interface */ 259 + u16 pcie_port; 260 + 261 + /* Hardware operations */ 262 + struct octep_vf_hw_ops hw_ops; 263 + 264 + /* IRQ info */ 265 + u16 num_irqs; 266 + u16 num_non_ioq_irqs; 267 + char *non_ioq_irq_names; 268 + struct msix_entry *msix_entries; 269 + /* IOq information of it's corresponding MSI-X interrupt. */ 270 + struct octep_vf_ioq_vector *ioq_vector[OCTEP_VF_MAX_QUEUES]; 271 + 272 + /* Hardware Interface Tx statistics */ 273 + struct octep_vf_iface_tx_stats iface_tx_stats; 274 + /* Hardware Interface Rx statistics */ 275 + struct octep_vf_iface_rx_stats iface_rx_stats; 276 + 277 + /* Hardware Interface Link info like supported modes, aneg support */ 278 + struct octep_vf_iface_link_info link_info; 279 + 280 + /* Mailbox to talk to VFs */ 281 + struct octep_vf_mbox *mbox; 282 + 283 + /* Work entry to handle Tx timeout */ 284 + struct work_struct tx_timeout_task; 285 + 286 + /* offset for iface stats */ 287 + u32 ctrl_mbox_ifstats_offset; 288 + 289 + /* Negotiated Mbox version */ 290 + u32 mbox_neg_ver; 291 + 292 + /* firmware info */ 293 + struct octep_vf_fw_info fw_info; 294 + }; 295 + 296 + static inline u16 OCTEP_VF_MAJOR_REV(struct octep_vf_device *oct) 297 + { 298 + u16 rev = (oct->rev_id & 0xC) >> 2; 299 + 300 + return (rev == 0) ? 1 : rev; 301 + } 302 + 303 + static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct) 304 + { 305 + return (oct->rev_id & 0x3); 306 + } 307 + 308 + /* Octeon CSR read/write access APIs */ 309 + #define octep_vf_write_csr(octep_vf_dev, reg_off, value) \ 310 + writel(value, (octep_vf_dev)->mmio.hw_addr + (reg_off)) 311 + 312 + #define octep_vf_write_csr64(octep_vf_dev, reg_off, val64) \ 313 + writeq(val64, (octep_vf_dev)->mmio.hw_addr + (reg_off)) 314 + 315 + #define octep_vf_read_csr(octep_vf_dev, reg_off) \ 316 + readl((octep_vf_dev)->mmio.hw_addr + (reg_off)) 317 + 318 + #define octep_vf_read_csr64(octep_vf_dev, reg_off) \ 319 + readq((octep_vf_dev)->mmio.hw_addr + (reg_off)) 320 + 321 + extern struct workqueue_struct *octep_vf_wq; 322 + 323 + int octep_vf_device_setup(struct octep_vf_device *oct); 324 + int octep_vf_setup_iqs(struct octep_vf_device *oct); 325 + void octep_vf_free_iqs(struct octep_vf_device *oct); 326 + void octep_vf_clean_iqs(struct octep_vf_device *oct); 327 + int octep_vf_setup_oqs(struct octep_vf_device *oct); 328 + void octep_vf_free_oqs(struct octep_vf_device *oct); 329 + void octep_vf_oq_dbell_init(struct octep_vf_device *oct); 330 + void octep_vf_device_setup_cn93(struct octep_vf_device *oct); 331 + void octep_vf_device_setup_cnxk(struct octep_vf_device *oct); 332 + int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget); 333 + int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget); 334 + void octep_vf_set_ethtool_ops(struct net_device *netdev); 335 + int octep_vf_get_link_info(struct octep_vf_device *oct); 336 + int octep_vf_get_if_stats(struct octep_vf_device *oct); 337 + void octep_vf_mbox_work(struct work_struct *work); 338 + #endif /* _OCTEP_VF_MAIN_H_ */
+430
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) VF Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + #include <linux/types.h> 8 + #include <linux/pci.h> 9 + #include <linux/netdevice.h> 10 + #include "octep_vf_config.h" 11 + #include "octep_vf_main.h" 12 + 13 + /* When a new command is implemented, the below table should be updated 14 + * with new command and it's version info. 15 + */ 16 + static u32 pfvf_cmd_versions[OCTEP_PFVF_MBOX_CMD_MAX] = { 17 + [0 ... OCTEP_PFVF_MBOX_CMD_DEV_REMOVE] = OCTEP_PFVF_MBOX_VERSION_V1, 18 + [OCTEP_PFVF_MBOX_CMD_GET_FW_INFO ... OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS] = 19 + OCTEP_PFVF_MBOX_VERSION_V2 20 + }; 21 + 22 + int octep_vf_setup_mbox(struct octep_vf_device *oct) 23 + { 24 + int ring = 0; 25 + 26 + oct->mbox = vzalloc(sizeof(*oct->mbox)); 27 + if (!oct->mbox) 28 + return -1; 29 + 30 + mutex_init(&oct->mbox->lock); 31 + 32 + oct->hw_ops.setup_mbox_regs(oct, ring); 33 + INIT_WORK(&oct->mbox->wk.work, octep_vf_mbox_work); 34 + oct->mbox->wk.ctxptr = oct; 35 + oct->mbox_neg_ver = OCTEP_PFVF_MBOX_VERSION_CURRENT; 36 + dev_info(&oct->pdev->dev, "setup vf mbox successfully\n"); 37 + return 0; 38 + } 39 + 40 + void octep_vf_delete_mbox(struct octep_vf_device *oct) 41 + { 42 + if (oct->mbox) { 43 + if (work_pending(&oct->mbox->wk.work)) 44 + cancel_work_sync(&oct->mbox->wk.work); 45 + 46 + mutex_destroy(&oct->mbox->lock); 47 + vfree(oct->mbox); 48 + oct->mbox = NULL; 49 + dev_info(&oct->pdev->dev, "Deleted vf mbox successfully\n"); 50 + } 51 + } 52 + 53 + int octep_vf_mbox_version_check(struct octep_vf_device *oct) 54 + { 55 + union octep_pfvf_mbox_word cmd; 56 + union octep_pfvf_mbox_word rsp; 57 + int ret; 58 + 59 + cmd.u64 = 0; 60 + cmd.s_version.opcode = OCTEP_PFVF_MBOX_CMD_VERSION; 61 + cmd.s_version.version = OCTEP_PFVF_MBOX_VERSION_CURRENT; 62 + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); 63 + if (ret == OCTEP_PFVF_MBOX_CMD_STATUS_NACK) { 64 + dev_err(&oct->pdev->dev, 65 + "VF Mbox version is incompatible with PF\n"); 66 + return -EINVAL; 67 + } 68 + oct->mbox_neg_ver = (u32)rsp.s_version.version; 69 + dev_dbg(&oct->pdev->dev, 70 + "VF Mbox version:%u Negotiated VF version with PF:%u\n", 71 + (u32)cmd.s_version.version, 72 + (u32)rsp.s_version.version); 73 + return 0; 74 + } 75 + 76 + void octep_vf_mbox_work(struct work_struct *work) 77 + { 78 + struct octep_vf_mbox_wk *wk = container_of(work, struct octep_vf_mbox_wk, work); 79 + struct octep_vf_iface_link_info *link_info; 80 + struct octep_vf_device *oct = NULL; 81 + struct octep_vf_mbox *mbox = NULL; 82 + union octep_pfvf_mbox_word *notif; 83 + u64 pf_vf_data; 84 + 85 + oct = (struct octep_vf_device *)wk->ctxptr; 86 + link_info = &oct->link_info; 87 + mbox = oct->mbox; 88 + pf_vf_data = readq(mbox->mbox_read_reg); 89 + 90 + notif = (union octep_pfvf_mbox_word *)&pf_vf_data; 91 + 92 + switch (notif->s.opcode) { 93 + case OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS: 94 + if (notif->s_link_status.status) { 95 + link_info->oper_up = OCTEP_PFVF_LINK_STATUS_UP; 96 + netif_carrier_on(oct->netdev); 97 + dev_info(&oct->pdev->dev, "netif_carrier_on\n"); 98 + } else { 99 + link_info->oper_up = OCTEP_PFVF_LINK_STATUS_DOWN; 100 + netif_carrier_off(oct->netdev); 101 + dev_info(&oct->pdev->dev, "netif_carrier_off\n"); 102 + } 103 + break; 104 + default: 105 + dev_err(&oct->pdev->dev, 106 + "Received unsupported notif %d\n", notif->s.opcode); 107 + break; 108 + } 109 + } 110 + 111 + static int __octep_vf_mbox_send_cmd(struct octep_vf_device *oct, 112 + union octep_pfvf_mbox_word cmd, 113 + union octep_pfvf_mbox_word *rsp) 114 + { 115 + struct octep_vf_mbox *mbox = oct->mbox; 116 + u64 reg_val = 0ull; 117 + int count = 0; 118 + 119 + if (!mbox) 120 + return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP; 121 + 122 + cmd.s.type = OCTEP_PFVF_MBOX_TYPE_CMD; 123 + writeq(cmd.u64, mbox->mbox_write_reg); 124 + 125 + /* No response for notification messages */ 126 + if (!rsp) 127 + return 0; 128 + 129 + for (count = 0; count < OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT; count++) { 130 + usleep_range(1000, 1500); 131 + reg_val = readq(mbox->mbox_write_reg); 132 + if (reg_val != cmd.u64) { 133 + rsp->u64 = reg_val; 134 + break; 135 + } 136 + } 137 + if (count == OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT) { 138 + dev_err(&oct->pdev->dev, "mbox send command timed out\n"); 139 + return OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT; 140 + } 141 + if (rsp->s.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { 142 + dev_err(&oct->pdev->dev, "mbox_send: Received NACK\n"); 143 + return OCTEP_PFVF_MBOX_CMD_STATUS_NACK; 144 + } 145 + rsp->u64 = reg_val; 146 + return 0; 147 + } 148 + 149 + int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd, 150 + union octep_pfvf_mbox_word *rsp) 151 + { 152 + struct octep_vf_mbox *mbox = oct->mbox; 153 + int ret; 154 + 155 + if (!mbox) 156 + return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP; 157 + mutex_lock(&mbox->lock); 158 + if (pfvf_cmd_versions[cmd.s.opcode] > oct->mbox_neg_ver) { 159 + dev_dbg(&oct->pdev->dev, "CMD:%d not supported in Version:%d\n", 160 + cmd.s.opcode, oct->mbox_neg_ver); 161 + mutex_unlock(&mbox->lock); 162 + return -EOPNOTSUPP; 163 + } 164 + ret = __octep_vf_mbox_send_cmd(oct, cmd, rsp); 165 + mutex_unlock(&mbox->lock); 166 + return ret; 167 + } 168 + 169 + int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode, 170 + u8 *data, int *size) 171 + { 172 + struct octep_vf_mbox *mbox = oct->mbox; 173 + union octep_pfvf_mbox_word cmd; 174 + union octep_pfvf_mbox_word rsp; 175 + int data_len = 0, tmp_len = 0; 176 + int read_cnt, i = 0, ret; 177 + 178 + if (!mbox) 179 + return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP; 180 + 181 + mutex_lock(&mbox->lock); 182 + cmd.u64 = 0; 183 + cmd.s_data.opcode = opcode; 184 + cmd.s_data.frag = 0; 185 + /* Send cmd to read data from PF */ 186 + ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp); 187 + if (ret) { 188 + dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n"); 189 + mutex_unlock(&mbox->lock); 190 + return ret; 191 + } 192 + /* PF sends the data length of requested CMD 193 + * in ACK 194 + */ 195 + data_len = *((int32_t *)rsp.s_data.data); 196 + tmp_len = data_len; 197 + cmd.u64 = 0; 198 + rsp.u64 = 0; 199 + cmd.s_data.opcode = opcode; 200 + cmd.s_data.frag = 1; 201 + while (data_len) { 202 + ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp); 203 + if (ret) { 204 + dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n"); 205 + mutex_unlock(&mbox->lock); 206 + mbox->mbox_data.data_index = 0; 207 + memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE); 208 + return ret; 209 + } 210 + if (data_len > OCTEP_PFVF_MBOX_MAX_DATA_SIZE) { 211 + data_len -= OCTEP_PFVF_MBOX_MAX_DATA_SIZE; 212 + read_cnt = OCTEP_PFVF_MBOX_MAX_DATA_SIZE; 213 + } else { 214 + read_cnt = data_len; 215 + data_len = 0; 216 + } 217 + for (i = 0; i < read_cnt; i++) { 218 + mbox->mbox_data.recv_data[mbox->mbox_data.data_index] = 219 + rsp.s_data.data[i]; 220 + mbox->mbox_data.data_index++; 221 + } 222 + cmd.u64 = 0; 223 + rsp.u64 = 0; 224 + cmd.s_data.opcode = opcode; 225 + cmd.s_data.frag = 1; 226 + } 227 + memcpy(data, mbox->mbox_data.recv_data, tmp_len); 228 + *size = tmp_len; 229 + mbox->mbox_data.data_index = 0; 230 + memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE); 231 + mutex_unlock(&mbox->lock); 232 + return 0; 233 + } 234 + 235 + int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu) 236 + { 237 + int frame_size = mtu + ETH_HLEN + ETH_FCS_LEN; 238 + union octep_pfvf_mbox_word cmd; 239 + union octep_pfvf_mbox_word rsp; 240 + int ret = 0; 241 + 242 + if (mtu < ETH_MIN_MTU || frame_size > ETH_MAX_MTU) { 243 + dev_err(&oct->pdev->dev, 244 + "Failed to set MTU to %d MIN MTU:%d MAX MTU:%d\n", 245 + mtu, ETH_MIN_MTU, ETH_MAX_MTU); 246 + return -EINVAL; 247 + } 248 + 249 + cmd.u64 = 0; 250 + cmd.s_set_mtu.opcode = OCTEP_PFVF_MBOX_CMD_SET_MTU; 251 + cmd.s_set_mtu.mtu = mtu; 252 + 253 + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); 254 + if (ret) { 255 + dev_err(&oct->pdev->dev, "Mbox send failed; err=%d\n", ret); 256 + return ret; 257 + } 258 + if (rsp.s_set_mtu.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { 259 + dev_err(&oct->pdev->dev, "Received Mbox NACK from PF for MTU:%d\n", mtu); 260 + return -EINVAL; 261 + } 262 + 263 + return 0; 264 + } 265 + 266 + int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr) 267 + { 268 + union octep_pfvf_mbox_word cmd; 269 + union octep_pfvf_mbox_word rsp; 270 + int i, ret; 271 + 272 + cmd.u64 = 0; 273 + cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR; 274 + for (i = 0; i < ETH_ALEN; i++) 275 + cmd.s_set_mac.mac_addr[i] = mac_addr[i]; 276 + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); 277 + if (ret) { 278 + dev_err(&oct->pdev->dev, "Mbox send failed; err = %d\n", ret); 279 + return ret; 280 + } 281 + if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { 282 + dev_err(&oct->pdev->dev, "received NACK\n"); 283 + return -EINVAL; 284 + } 285 + return 0; 286 + } 287 + 288 + int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr) 289 + { 290 + union octep_pfvf_mbox_word cmd; 291 + union octep_pfvf_mbox_word rsp; 292 + int i, ret; 293 + 294 + cmd.u64 = 0; 295 + cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR; 296 + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); 297 + if (ret) { 298 + dev_err(&oct->pdev->dev, "get_mac: mbox send failed; err = %d\n", ret); 299 + return ret; 300 + } 301 + if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { 302 + dev_err(&oct->pdev->dev, "get_mac: received NACK\n"); 303 + return -EINVAL; 304 + } 305 + for (i = 0; i < ETH_ALEN; i++) 306 + mac_addr[i] = rsp.s_set_mac.mac_addr[i]; 307 + return 0; 308 + } 309 + 310 + int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state) 311 + { 312 + union octep_pfvf_mbox_word cmd; 313 + union octep_pfvf_mbox_word rsp; 314 + int ret; 315 + 316 + cmd.u64 = 0; 317 + cmd.s_link_state.opcode = OCTEP_PFVF_MBOX_CMD_SET_RX_STATE; 318 + cmd.s_link_state.state = state; 319 + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); 320 + if (ret) { 321 + dev_err(&oct->pdev->dev, "Set Rx state via VF Mbox send failed\n"); 322 + return ret; 323 + } 324 + if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { 325 + dev_err(&oct->pdev->dev, "Set Rx state received NACK\n"); 326 + return -EINVAL; 327 + } 328 + return 0; 329 + } 330 + 331 + int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status) 332 + { 333 + union octep_pfvf_mbox_word cmd; 334 + union octep_pfvf_mbox_word rsp; 335 + int ret; 336 + 337 + cmd.u64 = 0; 338 + cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS; 339 + cmd.s_link_status.status = status; 340 + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); 341 + if (ret) { 342 + dev_err(&oct->pdev->dev, "Set link status via VF Mbox send failed\n"); 343 + return ret; 344 + } 345 + if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { 346 + dev_err(&oct->pdev->dev, "Set link status received NACK\n"); 347 + return -EINVAL; 348 + } 349 + return 0; 350 + } 351 + 352 + int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up) 353 + { 354 + union octep_pfvf_mbox_word cmd; 355 + union octep_pfvf_mbox_word rsp; 356 + int ret; 357 + 358 + cmd.u64 = 0; 359 + cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS; 360 + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); 361 + if (ret) { 362 + dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n"); 363 + return ret; 364 + } 365 + if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { 366 + dev_err(&oct->pdev->dev, "Get link status received NACK\n"); 367 + return -EINVAL; 368 + } 369 + *oper_up = rsp.s_link_status.status; 370 + return 0; 371 + } 372 + 373 + int octep_vf_mbox_dev_remove(struct octep_vf_device *oct) 374 + { 375 + union octep_pfvf_mbox_word cmd; 376 + int ret; 377 + 378 + cmd.u64 = 0; 379 + cmd.s.opcode = OCTEP_PFVF_MBOX_CMD_DEV_REMOVE; 380 + ret = octep_vf_mbox_send_cmd(oct, cmd, NULL); 381 + return ret; 382 + } 383 + 384 + int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct) 385 + { 386 + union octep_pfvf_mbox_word cmd; 387 + union octep_pfvf_mbox_word rsp; 388 + int ret; 389 + 390 + cmd.u64 = 0; 391 + cmd.s_fw_info.opcode = OCTEP_PFVF_MBOX_CMD_GET_FW_INFO; 392 + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); 393 + if (ret) { 394 + dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n"); 395 + return ret; 396 + } 397 + if (rsp.s_fw_info.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { 398 + dev_err(&oct->pdev->dev, "Get link status received NACK\n"); 399 + return -EINVAL; 400 + } 401 + oct->fw_info.pkind = rsp.s_fw_info.pkind; 402 + oct->fw_info.fsz = rsp.s_fw_info.fsz; 403 + oct->fw_info.rx_ol_flags = rsp.s_fw_info.rx_ol_flags; 404 + oct->fw_info.tx_ol_flags = rsp.s_fw_info.tx_ol_flags; 405 + 406 + return 0; 407 + } 408 + 409 + int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads, 410 + u16 rx_offloads) 411 + { 412 + union octep_pfvf_mbox_word cmd; 413 + union octep_pfvf_mbox_word rsp; 414 + int ret; 415 + 416 + cmd.u64 = 0; 417 + cmd.s_offloads.opcode = OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS; 418 + cmd.s_offloads.rx_ol_flags = rx_offloads; 419 + cmd.s_offloads.tx_ol_flags = tx_offloads; 420 + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); 421 + if (ret) { 422 + dev_err(&oct->pdev->dev, "Set offloads via VF Mbox send failed\n"); 423 + return ret; 424 + } 425 + if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { 426 + dev_err(&oct->pdev->dev, "Set offloads received NACK\n"); 427 + return -EINVAL; 428 + } 429 + return 0; 430 + }
+166
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + #ifndef _OCTEP_VF_MBOX_H_ 8 + #define _OCTEP_VF_MBOX_H_ 9 + 10 + /* When a new command is implemented, VF Mbox version should be bumped. 11 + */ 12 + enum octep_pfvf_mbox_version { 13 + OCTEP_PFVF_MBOX_VERSION_V0, 14 + OCTEP_PFVF_MBOX_VERSION_V1, 15 + OCTEP_PFVF_MBOX_VERSION_V2 16 + }; 17 + 18 + #define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2 19 + 20 + enum octep_pfvf_mbox_opcode { 21 + OCTEP_PFVF_MBOX_CMD_VERSION, 22 + OCTEP_PFVF_MBOX_CMD_SET_MTU, 23 + OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR, 24 + OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR, 25 + OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO, 26 + OCTEP_PFVF_MBOX_CMD_GET_STATS, 27 + OCTEP_PFVF_MBOX_CMD_SET_RX_STATE, 28 + OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS, 29 + OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS, 30 + OCTEP_PFVF_MBOX_CMD_GET_MTU, 31 + OCTEP_PFVF_MBOX_CMD_DEV_REMOVE, 32 + OCTEP_PFVF_MBOX_CMD_GET_FW_INFO, 33 + OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS, 34 + OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS, 35 + OCTEP_PFVF_MBOX_CMD_MAX, 36 + }; 37 + 38 + enum octep_pfvf_mbox_word_type { 39 + OCTEP_PFVF_MBOX_TYPE_CMD, 40 + OCTEP_PFVF_MBOX_TYPE_RSP_ACK, 41 + OCTEP_PFVF_MBOX_TYPE_RSP_NACK, 42 + }; 43 + 44 + enum octep_pfvf_mbox_cmd_status { 45 + OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP = 1, 46 + OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT = 2, 47 + OCTEP_PFVF_MBOX_CMD_STATUS_NACK = 3, 48 + OCTEP_PFVF_MBOX_CMD_STATUS_BUSY = 4, 49 + OCTEP_PFVF_MBOX_CMD_STATUS_ERR = 5 50 + }; 51 + 52 + enum octep_pfvf_link_status { 53 + OCTEP_PFVF_LINK_STATUS_DOWN, 54 + OCTEP_PFVF_LINK_STATUS_UP, 55 + }; 56 + 57 + enum octep_pfvf_link_speed { 58 + OCTEP_PFVF_LINK_SPEED_NONE, 59 + OCTEP_PFVF_LINK_SPEED_1000, 60 + OCTEP_PFVF_LINK_SPEED_10000, 61 + OCTEP_PFVF_LINK_SPEED_25000, 62 + OCTEP_PFVF_LINK_SPEED_40000, 63 + OCTEP_PFVF_LINK_SPEED_50000, 64 + OCTEP_PFVF_LINK_SPEED_100000, 65 + OCTEP_PFVF_LINK_SPEED_LAST, 66 + }; 67 + 68 + enum octep_pfvf_link_duplex { 69 + OCTEP_PFVF_LINK_HALF_DUPLEX, 70 + OCTEP_PFVF_LINK_FULL_DUPLEX, 71 + }; 72 + 73 + enum octep_pfvf_link_autoneg { 74 + OCTEP_PFVF_LINK_AUTONEG, 75 + OCTEP_PFVF_LINK_FIXED, 76 + }; 77 + 78 + #define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT 8000 79 + #define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_UDELAY 1000 80 + #define OCTEP_PFVF_MBOX_MAX_RETRIES 2 81 + #define OCTEP_PFVF_MBOX_VERSION 0 82 + #define OCTEP_PFVF_MBOX_MAX_DATA_SIZE 6 83 + #define OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE 320 84 + #define OCTEP_PFVF_MBOX_MORE_FRAG_FLAG 1 85 + 86 + union octep_pfvf_mbox_word { 87 + u64 u64; 88 + struct { 89 + u64 opcode:8; 90 + u64 type:2; 91 + u64 rsvd:6; 92 + u64 data:48; 93 + } s; 94 + struct { 95 + u64 opcode:8; 96 + u64 type:2; 97 + u64 frag:1; 98 + u64 rsvd:5; 99 + u8 data[6]; 100 + } s_data; 101 + struct { 102 + u64 opcode:8; 103 + u64 type:2; 104 + u64 rsvd:6; 105 + u64 version:48; 106 + } s_version; 107 + struct { 108 + u64 opcode:8; 109 + u64 type:2; 110 + u64 rsvd:6; 111 + u8 mac_addr[6]; 112 + } s_set_mac; 113 + struct { 114 + u64 opcode:8; 115 + u64 type:2; 116 + u64 rsvd:6; 117 + u64 mtu:48; 118 + } s_set_mtu; 119 + struct { 120 + u64 opcode:8; 121 + u64 type:2; 122 + u64 state:1; 123 + u64 rsvd:53; 124 + } s_link_state; 125 + struct { 126 + u64 opcode:8; 127 + u64 type:2; 128 + u64 status:1; 129 + u64 rsvd:53; 130 + } s_link_status; 131 + struct { 132 + u64 opcode:8; 133 + u64 type:2; 134 + u64 pkind:8; 135 + u64 fsz:8; 136 + u64 rx_ol_flags:16; 137 + u64 tx_ol_flags:16; 138 + u64 rsvd:6; 139 + } s_fw_info; 140 + struct { 141 + u64 opcode:8; 142 + u64 type:2; 143 + u64 rsvd:22; 144 + u64 rx_ol_flags:16; 145 + u64 tx_ol_flags:16; 146 + } s_offloads; 147 + } __packed; 148 + 149 + int octep_vf_setup_mbox(struct octep_vf_device *oct); 150 + void octep_vf_delete_mbox(struct octep_vf_device *oct); 151 + int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd, 152 + union octep_pfvf_mbox_word *rsp); 153 + int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode, 154 + u8 *data, int *size); 155 + int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu); 156 + int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr); 157 + int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr); 158 + int octep_vf_mbox_version_check(struct octep_vf_device *oct); 159 + int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state); 160 + int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status); 161 + int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up); 162 + int octep_vf_mbox_dev_remove(struct octep_vf_device *oct); 163 + int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct); 164 + int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads, u16 rx_offloads); 165 + 166 + #endif
+154
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) VF Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + #ifndef _OCTEP_VF_REGS_CN9K_H_ 8 + #define _OCTEP_VF_REGS_CN9K_H_ 9 + 10 + /*############################ RST #########################*/ 11 + #define CN93_VF_CONFIG_XPANSION_BAR 0x38 12 + #define CN93_VF_CONFIG_PCIE_CAP 0x70 13 + #define CN93_VF_CONFIG_PCIE_DEVCAP 0x74 14 + #define CN93_VF_CONFIG_PCIE_DEVCTL 0x78 15 + #define CN93_VF_CONFIG_PCIE_LINKCAP 0x7C 16 + #define CN93_VF_CONFIG_PCIE_LINKCTL 0x80 17 + #define CN93_VF_CONFIG_PCIE_SLOTCAP 0x84 18 + #define CN93_VF_CONFIG_PCIE_SLOTCTL 0x88 19 + 20 + #define CN93_VF_RING_OFFSET BIT_ULL(17) 21 + 22 + /*###################### RING IN REGISTERS #########################*/ 23 + #define CN93_VF_SDP_R_IN_CONTROL_START 0x10000 24 + #define CN93_VF_SDP_R_IN_ENABLE_START 0x10010 25 + #define CN93_VF_SDP_R_IN_INSTR_BADDR_START 0x10020 26 + #define CN93_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030 27 + #define CN93_VF_SDP_R_IN_INSTR_DBELL_START 0x10040 28 + #define CN93_VF_SDP_R_IN_CNTS_START 0x10050 29 + #define CN93_VF_SDP_R_IN_INT_LEVELS_START 0x10060 30 + #define CN93_VF_SDP_R_IN_PKT_CNT_START 0x10080 31 + #define CN93_VF_SDP_R_IN_BYTE_CNT_START 0x10090 32 + 33 + #define CN93_VF_SDP_R_IN_CONTROL(ring) \ 34 + (CN93_VF_SDP_R_IN_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET)) 35 + 36 + #define CN93_VF_SDP_R_IN_ENABLE(ring) \ 37 + (CN93_VF_SDP_R_IN_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET)) 38 + 39 + #define CN93_VF_SDP_R_IN_INSTR_BADDR(ring) \ 40 + (CN93_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_VF_RING_OFFSET)) 41 + 42 + #define CN93_VF_SDP_R_IN_INSTR_RSIZE(ring) \ 43 + (CN93_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET)) 44 + 45 + #define CN93_VF_SDP_R_IN_INSTR_DBELL(ring) \ 46 + (CN93_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_VF_RING_OFFSET)) 47 + 48 + #define CN93_VF_SDP_R_IN_CNTS(ring) \ 49 + (CN93_VF_SDP_R_IN_CNTS_START + ((ring) * CN93_VF_RING_OFFSET)) 50 + 51 + #define CN93_VF_SDP_R_IN_INT_LEVELS(ring) \ 52 + (CN93_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET)) 53 + 54 + #define CN93_VF_SDP_R_IN_PKT_CNT(ring) \ 55 + (CN93_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET)) 56 + 57 + #define CN93_VF_SDP_R_IN_BYTE_CNT(ring) \ 58 + (CN93_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET)) 59 + 60 + /*------------------ R_IN Masks ----------------*/ 61 + 62 + /** Rings per Virtual Function **/ 63 + #define CN93_VF_R_IN_CTL_RPVF_MASK (0xF) 64 + #define CN93_VF_R_IN_CTL_RPVF_POS (48) 65 + 66 + /* Number of instructions to be read in one MAC read request. 67 + * setting to Max value(4) 68 + **/ 69 + #define CN93_VF_R_IN_CTL_IDLE BIT_ULL(28) 70 + #define CN93_VF_R_IN_CTL_RDSIZE (0x3ULL << 25) 71 + #define CN93_VF_R_IN_CTL_IS_64B BIT_ULL(24) 72 + #define CN93_VF_R_IN_CTL_D_NSR BIT_ULL(8) 73 + #define CN93_VF_R_IN_CTL_D_ESR BIT_ULL(6) 74 + #define CN93_VF_R_IN_CTL_D_ROR BIT_ULL(5) 75 + #define CN93_VF_R_IN_CTL_NSR BIT_ULL(3) 76 + #define CN93_VF_R_IN_CTL_ESR BIT_ULL(1) 77 + #define CN93_VF_R_IN_CTL_ROR BIT_ULL(0) 78 + 79 + #define CN93_VF_R_IN_CTL_MASK (CN93_VF_R_IN_CTL_RDSIZE | CN93_VF_R_IN_CTL_IS_64B) 80 + 81 + /*###################### RING OUT REGISTERS #########################*/ 82 + #define CN93_VF_SDP_R_OUT_CNTS_START 0x10100 83 + #define CN93_VF_SDP_R_OUT_INT_LEVELS_START 0x10110 84 + #define CN93_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120 85 + #define CN93_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130 86 + #define CN93_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140 87 + #define CN93_VF_SDP_R_OUT_CONTROL_START 0x10150 88 + #define CN93_VF_SDP_R_OUT_ENABLE_START 0x10160 89 + #define CN93_VF_SDP_R_OUT_PKT_CNT_START 0x10180 90 + #define CN93_VF_SDP_R_OUT_BYTE_CNT_START 0x10190 91 + 92 + #define CN93_VF_SDP_R_OUT_CONTROL(ring) \ 93 + (CN93_VF_SDP_R_OUT_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET)) 94 + 95 + #define CN93_VF_SDP_R_OUT_ENABLE(ring) \ 96 + (CN93_VF_SDP_R_OUT_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET)) 97 + 98 + #define CN93_VF_SDP_R_OUT_SLIST_BADDR(ring) \ 99 + (CN93_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_VF_RING_OFFSET)) 100 + 101 + #define CN93_VF_SDP_R_OUT_SLIST_RSIZE(ring) \ 102 + (CN93_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET)) 103 + 104 + #define CN93_VF_SDP_R_OUT_SLIST_DBELL(ring) \ 105 + (CN93_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_VF_RING_OFFSET)) 106 + 107 + #define CN93_VF_SDP_R_OUT_CNTS(ring) \ 108 + (CN93_VF_SDP_R_OUT_CNTS_START + ((ring) * CN93_VF_RING_OFFSET)) 109 + 110 + #define CN93_VF_SDP_R_OUT_INT_LEVELS(ring) \ 111 + (CN93_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET)) 112 + 113 + #define CN93_VF_SDP_R_OUT_PKT_CNT(ring) \ 114 + (CN93_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET)) 115 + 116 + #define CN93_VF_SDP_R_OUT_BYTE_CNT(ring) \ 117 + (CN93_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET)) 118 + 119 + /*------------------ R_OUT Masks ----------------*/ 120 + #define CN93_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63) 121 + #define CN93_VF_R_OUT_INT_LEVELS_TIMET (32) 122 + 123 + #define CN93_VF_R_OUT_CTL_IDLE BIT_ULL(40) 124 + #define CN93_VF_R_OUT_CTL_ES_I BIT_ULL(34) 125 + #define CN93_VF_R_OUT_CTL_NSR_I BIT_ULL(33) 126 + #define CN93_VF_R_OUT_CTL_ROR_I BIT_ULL(32) 127 + #define CN93_VF_R_OUT_CTL_ES_D BIT_ULL(30) 128 + #define CN93_VF_R_OUT_CTL_NSR_D BIT_ULL(29) 129 + #define CN93_VF_R_OUT_CTL_ROR_D BIT_ULL(28) 130 + #define CN93_VF_R_OUT_CTL_ES_P BIT_ULL(26) 131 + #define CN93_VF_R_OUT_CTL_NSR_P BIT_ULL(25) 132 + #define CN93_VF_R_OUT_CTL_ROR_P BIT_ULL(24) 133 + #define CN93_VF_R_OUT_CTL_IMODE BIT_ULL(23) 134 + 135 + /* ##################### Mail Box Registers ########################## */ 136 + /* SDP PF to VF Mailbox Data Register */ 137 + #define CN93_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210 138 + /* SDP Packet PF to VF Mailbox Interrupt Register */ 139 + #define CN93_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220 140 + /* SDP VF to PF Mailbox Data Register */ 141 + #define CN93_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230 142 + 143 + #define CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1) 144 + #define CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0) 145 + 146 + #define CN93_VF_SDP_R_MBOX_PF_VF_DATA(ring) \ 147 + (CN93_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_VF_RING_OFFSET)) 148 + 149 + #define CN93_VF_SDP_R_MBOX_PF_VF_INT(ring) \ 150 + (CN93_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_VF_RING_OFFSET)) 151 + 152 + #define CN93_VF_SDP_R_MBOX_VF_PF_DATA(ring) \ 153 + (CN93_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_VF_RING_OFFSET)) 154 + #endif /* _OCTEP_VF_REGS_CN9K_H_ */
+162
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) VF Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + #ifndef _OCTEP_VF_REGS_CNXK_H_ 8 + #define _OCTEP_VF_REGS_CNXK_H_ 9 + 10 + /*############################ RST #########################*/ 11 + #define CNXK_VF_CONFIG_XPANSION_BAR 0x38 12 + #define CNXK_VF_CONFIG_PCIE_CAP 0x70 13 + #define CNXK_VF_CONFIG_PCIE_DEVCAP 0x74 14 + #define CNXK_VF_CONFIG_PCIE_DEVCTL 0x78 15 + #define CNXK_VF_CONFIG_PCIE_LINKCAP 0x7C 16 + #define CNXK_VF_CONFIG_PCIE_LINKCTL 0x80 17 + #define CNXK_VF_CONFIG_PCIE_SLOTCAP 0x84 18 + #define CNXK_VF_CONFIG_PCIE_SLOTCTL 0x88 19 + 20 + #define CNXK_VF_RING_OFFSET (0x1ULL << 17) 21 + 22 + /*###################### RING IN REGISTERS #########################*/ 23 + #define CNXK_VF_SDP_R_IN_CONTROL_START 0x10000 24 + #define CNXK_VF_SDP_R_IN_ENABLE_START 0x10010 25 + #define CNXK_VF_SDP_R_IN_INSTR_BADDR_START 0x10020 26 + #define CNXK_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030 27 + #define CNXK_VF_SDP_R_IN_INSTR_DBELL_START 0x10040 28 + #define CNXK_VF_SDP_R_IN_CNTS_START 0x10050 29 + #define CNXK_VF_SDP_R_IN_INT_LEVELS_START 0x10060 30 + #define CNXK_VF_SDP_R_IN_PKT_CNT_START 0x10080 31 + #define CNXK_VF_SDP_R_IN_BYTE_CNT_START 0x10090 32 + #define CNXK_VF_SDP_R_ERR_TYPE_START 0x10400 33 + 34 + #define CNXK_VF_SDP_R_ERR_TYPE(ring) \ 35 + (CNXK_VF_SDP_R_ERR_TYPE_START + ((ring) * CNXK_VF_RING_OFFSET)) 36 + 37 + #define CNXK_VF_SDP_R_IN_CONTROL(ring) \ 38 + (CNXK_VF_SDP_R_IN_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET)) 39 + 40 + #define CNXK_VF_SDP_R_IN_ENABLE(ring) \ 41 + (CNXK_VF_SDP_R_IN_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET)) 42 + 43 + #define CNXK_VF_SDP_R_IN_INSTR_BADDR(ring) \ 44 + (CNXK_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET)) 45 + 46 + #define CNXK_VF_SDP_R_IN_INSTR_RSIZE(ring) \ 47 + (CNXK_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET)) 48 + 49 + #define CNXK_VF_SDP_R_IN_INSTR_DBELL(ring) \ 50 + (CNXK_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET)) 51 + 52 + #define CNXK_VF_SDP_R_IN_CNTS(ring) \ 53 + (CNXK_VF_SDP_R_IN_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET)) 54 + 55 + #define CNXK_VF_SDP_R_IN_INT_LEVELS(ring) \ 56 + (CNXK_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET)) 57 + 58 + #define CNXK_VF_SDP_R_IN_PKT_CNT(ring) \ 59 + (CNXK_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET)) 60 + 61 + #define CNXK_VF_SDP_R_IN_BYTE_CNT(ring) \ 62 + (CNXK_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET)) 63 + 64 + /*------------------ R_IN Masks ----------------*/ 65 + 66 + /** Rings per Virtual Function **/ 67 + #define CNXK_VF_R_IN_CTL_RPVF_MASK (0xF) 68 + #define CNXK_VF_R_IN_CTL_RPVF_POS (48) 69 + 70 + /* Number of instructions to be read in one MAC read request. 71 + * setting to Max value(4) 72 + **/ 73 + #define CNXK_VF_R_IN_CTL_IDLE (0x1ULL << 28) 74 + #define CNXK_VF_R_IN_CTL_RDSIZE (0x3ULL << 25) 75 + #define CNXK_VF_R_IN_CTL_IS_64B (0x1ULL << 24) 76 + #define CNXK_VF_R_IN_CTL_D_NSR (0x1ULL << 8) 77 + #define CNXK_VF_R_IN_CTL_D_ESR (0x1ULL << 6) 78 + #define CNXK_VF_R_IN_CTL_D_ROR (0x1ULL << 5) 79 + #define CNXK_VF_R_IN_CTL_NSR (0x1ULL << 3) 80 + #define CNXK_VF_R_IN_CTL_ESR (0x1ULL << 1) 81 + #define CNXK_VF_R_IN_CTL_ROR (0x1ULL << 0) 82 + 83 + #define CNXK_VF_R_IN_CTL_MASK (CNXK_VF_R_IN_CTL_RDSIZE | CNXK_VF_R_IN_CTL_IS_64B) 84 + 85 + /*###################### RING OUT REGISTERS #########################*/ 86 + #define CNXK_VF_SDP_R_OUT_CNTS_START 0x10100 87 + #define CNXK_VF_SDP_R_OUT_INT_LEVELS_START 0x10110 88 + #define CNXK_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120 89 + #define CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130 90 + #define CNXK_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140 91 + #define CNXK_VF_SDP_R_OUT_CONTROL_START 0x10150 92 + #define CNXK_VF_SDP_R_OUT_WMARK_START 0x10160 93 + #define CNXK_VF_SDP_R_OUT_ENABLE_START 0x10170 94 + #define CNXK_VF_SDP_R_OUT_PKT_CNT_START 0x10180 95 + #define CNXK_VF_SDP_R_OUT_BYTE_CNT_START 0x10190 96 + 97 + #define CNXK_VF_SDP_R_OUT_CONTROL(ring) \ 98 + (CNXK_VF_SDP_R_OUT_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET)) 99 + 100 + #define CNXK_VF_SDP_R_OUT_ENABLE(ring) \ 101 + (CNXK_VF_SDP_R_OUT_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET)) 102 + 103 + #define CNXK_VF_SDP_R_OUT_SLIST_BADDR(ring) \ 104 + (CNXK_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET)) 105 + 106 + #define CNXK_VF_SDP_R_OUT_SLIST_RSIZE(ring) \ 107 + (CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET)) 108 + 109 + #define CNXK_VF_SDP_R_OUT_SLIST_DBELL(ring) \ 110 + (CNXK_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET)) 111 + 112 + #define CNXK_VF_SDP_R_OUT_WMARK(ring) \ 113 + (CNXK_VF_SDP_R_OUT_WMARK_START + ((ring) * CNXK_VF_RING_OFFSET)) 114 + 115 + #define CNXK_VF_SDP_R_OUT_CNTS(ring) \ 116 + (CNXK_VF_SDP_R_OUT_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET)) 117 + 118 + #define CNXK_VF_SDP_R_OUT_INT_LEVELS(ring) \ 119 + (CNXK_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET)) 120 + 121 + #define CNXK_VF_SDP_R_OUT_PKT_CNT(ring) \ 122 + (CNXK_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET)) 123 + 124 + #define CNXK_VF_SDP_R_OUT_BYTE_CNT(ring) \ 125 + (CNXK_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET)) 126 + 127 + /*------------------ R_OUT Masks ----------------*/ 128 + #define CNXK_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63) 129 + #define CNXK_VF_R_OUT_INT_LEVELS_TIMET (32) 130 + 131 + #define CNXK_VF_R_OUT_CTL_IDLE BIT_ULL(40) 132 + #define CNXK_VF_R_OUT_CTL_ES_I BIT_ULL(34) 133 + #define CNXK_VF_R_OUT_CTL_NSR_I BIT_ULL(33) 134 + #define CNXK_VF_R_OUT_CTL_ROR_I BIT_ULL(32) 135 + #define CNXK_VF_R_OUT_CTL_ES_D BIT_ULL(30) 136 + #define CNXK_VF_R_OUT_CTL_NSR_D BIT_ULL(29) 137 + #define CNXK_VF_R_OUT_CTL_ROR_D BIT_ULL(28) 138 + #define CNXK_VF_R_OUT_CTL_ES_P BIT_ULL(26) 139 + #define CNXK_VF_R_OUT_CTL_NSR_P BIT_ULL(25) 140 + #define CNXK_VF_R_OUT_CTL_ROR_P BIT_ULL(24) 141 + #define CNXK_VF_R_OUT_CTL_IMODE BIT_ULL(23) 142 + 143 + /* ##################### Mail Box Registers ########################## */ 144 + /* SDP PF to VF Mailbox Data Register */ 145 + #define CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210 146 + /* SDP Packet PF to VF Mailbox Interrupt Register */ 147 + #define CNXK_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220 148 + /* SDP VF to PF Mailbox Data Register */ 149 + #define CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230 150 + 151 + #define CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1) 152 + #define CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0) 153 + 154 + #define CNXK_VF_SDP_R_MBOX_PF_VF_DATA(ring) \ 155 + (CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET)) 156 + 157 + #define CNXK_VF_SDP_R_MBOX_PF_VF_INT(ring) \ 158 + (CNXK_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_VF_RING_OFFSET)) 159 + 160 + #define CNXK_VF_SDP_R_MBOX_VF_PF_DATA(ring) \ 161 + (CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET)) 162 + #endif /* _OCTEP_VF_REGS_CNXK_H_ */
+511
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) VF Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/pci.h> 9 + #include <linux/etherdevice.h> 10 + #include <linux/vmalloc.h> 11 + 12 + #include "octep_vf_config.h" 13 + #include "octep_vf_main.h" 14 + 15 + static void octep_vf_oq_reset_indices(struct octep_vf_oq *oq) 16 + { 17 + oq->host_read_idx = 0; 18 + oq->host_refill_idx = 0; 19 + oq->refill_count = 0; 20 + oq->last_pkt_count = 0; 21 + oq->pkts_pending = 0; 22 + } 23 + 24 + /** 25 + * octep_vf_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring. 26 + * 27 + * @oq: Octeon Rx queue data structure. 28 + * 29 + * Return: 0, if successfully filled receive buffers for all descriptors. 30 + * -1, if failed to allocate a buffer or failed to map for DMA. 31 + */ 32 + static int octep_vf_oq_fill_ring_buffers(struct octep_vf_oq *oq) 33 + { 34 + struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring; 35 + struct page *page; 36 + u32 i; 37 + 38 + for (i = 0; i < oq->max_count; i++) { 39 + page = dev_alloc_page(); 40 + if (unlikely(!page)) { 41 + dev_err(oq->dev, "Rx buffer alloc failed\n"); 42 + goto rx_buf_alloc_err; 43 + } 44 + desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0, 45 + PAGE_SIZE, 46 + DMA_FROM_DEVICE); 47 + if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) { 48 + dev_err(oq->dev, 49 + "OQ-%d buffer alloc: DMA mapping error!\n", 50 + oq->q_no); 51 + put_page(page); 52 + goto dma_map_err; 53 + } 54 + oq->buff_info[i].page = page; 55 + } 56 + 57 + return 0; 58 + 59 + dma_map_err: 60 + rx_buf_alloc_err: 61 + while (i) { 62 + i--; 63 + dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE); 64 + put_page(oq->buff_info[i].page); 65 + oq->buff_info[i].page = NULL; 66 + } 67 + 68 + return -1; 69 + } 70 + 71 + /** 72 + * octep_vf_oq_refill() - refill buffers for used Rx ring descriptors. 73 + * 74 + * @oct: Octeon device private data structure. 75 + * @oq: Octeon Rx queue data structure. 76 + * 77 + * Return: number of descriptors successfully refilled with receive buffers. 78 + */ 79 + static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *oq) 80 + { 81 + struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring; 82 + struct page *page; 83 + u32 refill_idx, i; 84 + 85 + refill_idx = oq->host_refill_idx; 86 + for (i = 0; i < oq->refill_count; i++) { 87 + page = dev_alloc_page(); 88 + if (unlikely(!page)) { 89 + dev_err(oq->dev, "refill: rx buffer alloc failed\n"); 90 + oq->stats.alloc_failures++; 91 + break; 92 + } 93 + 94 + desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0, 95 + PAGE_SIZE, DMA_FROM_DEVICE); 96 + if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) { 97 + dev_err(oq->dev, 98 + "OQ-%d buffer refill: DMA mapping error!\n", 99 + oq->q_no); 100 + put_page(page); 101 + oq->stats.alloc_failures++; 102 + break; 103 + } 104 + oq->buff_info[refill_idx].page = page; 105 + refill_idx++; 106 + if (refill_idx == oq->max_count) 107 + refill_idx = 0; 108 + } 109 + oq->host_refill_idx = refill_idx; 110 + oq->refill_count -= i; 111 + 112 + return i; 113 + } 114 + 115 + /** 116 + * octep_vf_setup_oq() - Setup a Rx queue. 117 + * 118 + * @oct: Octeon device private data structure. 119 + * @q_no: Rx queue number to be setup. 120 + * 121 + * Allocate resources for a Rx queue. 122 + */ 123 + static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no) 124 + { 125 + struct octep_vf_oq *oq; 126 + u32 desc_ring_size; 127 + 128 + oq = vzalloc(sizeof(*oq)); 129 + if (!oq) 130 + goto create_oq_fail; 131 + oct->oq[q_no] = oq; 132 + 133 + oq->octep_vf_dev = oct; 134 + oq->netdev = oct->netdev; 135 + oq->dev = &oct->pdev->dev; 136 + oq->q_no = q_no; 137 + oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf); 138 + oq->ring_size_mask = oq->max_count - 1; 139 + oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf); 140 + oq->max_single_buffer_size = oq->buffer_size - OCTEP_VF_OQ_RESP_HW_SIZE; 141 + 142 + /* When the hardware/firmware supports additional capabilities, 143 + * additional header is filled-in by Octeon after length field in 144 + * Rx packets. this header contains additional packet information. 145 + */ 146 + if (oct->fw_info.rx_ol_flags) 147 + oq->max_single_buffer_size -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE; 148 + 149 + oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf); 150 + 151 + desc_ring_size = oq->max_count * OCTEP_VF_OQ_DESC_SIZE; 152 + oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size, 153 + &oq->desc_ring_dma, GFP_KERNEL); 154 + 155 + if (unlikely(!oq->desc_ring)) { 156 + dev_err(oq->dev, 157 + "Failed to allocate DMA memory for OQ-%d !!\n", q_no); 158 + goto desc_dma_alloc_err; 159 + } 160 + 161 + oq->buff_info = vzalloc(oq->max_count * OCTEP_VF_OQ_RECVBUF_SIZE); 162 + 163 + if (unlikely(!oq->buff_info)) { 164 + dev_err(&oct->pdev->dev, 165 + "Failed to allocate buffer info for OQ-%d\n", q_no); 166 + goto buf_list_err; 167 + } 168 + 169 + if (octep_vf_oq_fill_ring_buffers(oq)) 170 + goto oq_fill_buff_err; 171 + 172 + octep_vf_oq_reset_indices(oq); 173 + oct->hw_ops.setup_oq_regs(oct, q_no); 174 + oct->num_oqs++; 175 + 176 + return 0; 177 + 178 + oq_fill_buff_err: 179 + vfree(oq->buff_info); 180 + oq->buff_info = NULL; 181 + buf_list_err: 182 + dma_free_coherent(oq->dev, desc_ring_size, 183 + oq->desc_ring, oq->desc_ring_dma); 184 + oq->desc_ring = NULL; 185 + desc_dma_alloc_err: 186 + vfree(oq); 187 + oct->oq[q_no] = NULL; 188 + create_oq_fail: 189 + return -1; 190 + } 191 + 192 + /** 193 + * octep_vf_oq_free_ring_buffers() - Free ring buffers. 194 + * 195 + * @oq: Octeon Rx queue data structure. 196 + * 197 + * Free receive buffers in unused Rx queue descriptors. 198 + */ 199 + static void octep_vf_oq_free_ring_buffers(struct octep_vf_oq *oq) 200 + { 201 + struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring; 202 + int i; 203 + 204 + if (!oq->desc_ring || !oq->buff_info) 205 + return; 206 + 207 + for (i = 0; i < oq->max_count; i++) { 208 + if (oq->buff_info[i].page) { 209 + dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, 210 + PAGE_SIZE, DMA_FROM_DEVICE); 211 + put_page(oq->buff_info[i].page); 212 + oq->buff_info[i].page = NULL; 213 + desc_ring[i].buffer_ptr = 0; 214 + } 215 + } 216 + octep_vf_oq_reset_indices(oq); 217 + } 218 + 219 + /** 220 + * octep_vf_free_oq() - Free Rx queue resources. 221 + * 222 + * @oq: Octeon Rx queue data structure. 223 + * 224 + * Free all resources of a Rx queue. 225 + */ 226 + static int octep_vf_free_oq(struct octep_vf_oq *oq) 227 + { 228 + struct octep_vf_device *oct = oq->octep_vf_dev; 229 + int q_no = oq->q_no; 230 + 231 + octep_vf_oq_free_ring_buffers(oq); 232 + 233 + if (oq->buff_info) 234 + vfree(oq->buff_info); 235 + 236 + if (oq->desc_ring) 237 + dma_free_coherent(oq->dev, 238 + oq->max_count * OCTEP_VF_OQ_DESC_SIZE, 239 + oq->desc_ring, oq->desc_ring_dma); 240 + 241 + vfree(oq); 242 + oct->oq[q_no] = NULL; 243 + oct->num_oqs--; 244 + return 0; 245 + } 246 + 247 + /** 248 + * octep_vf_setup_oqs() - setup resources for all Rx queues. 249 + * 250 + * @oct: Octeon device private data structure. 251 + */ 252 + int octep_vf_setup_oqs(struct octep_vf_device *oct) 253 + { 254 + int i, retval = 0; 255 + 256 + oct->num_oqs = 0; 257 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 258 + retval = octep_vf_setup_oq(oct, i); 259 + if (retval) { 260 + dev_err(&oct->pdev->dev, 261 + "Failed to setup OQ(RxQ)-%d.\n", i); 262 + goto oq_setup_err; 263 + } 264 + dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i); 265 + } 266 + 267 + return 0; 268 + 269 + oq_setup_err: 270 + while (i) { 271 + i--; 272 + octep_vf_free_oq(oct->oq[i]); 273 + } 274 + return -1; 275 + } 276 + 277 + /** 278 + * octep_vf_oq_dbell_init() - Initialize Rx queue doorbell. 279 + * 280 + * @oct: Octeon device private data structure. 281 + * 282 + * Write number of descriptors to Rx queue doorbell register. 283 + */ 284 + void octep_vf_oq_dbell_init(struct octep_vf_device *oct) 285 + { 286 + int i; 287 + 288 + for (i = 0; i < oct->num_oqs; i++) 289 + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); 290 + } 291 + 292 + /** 293 + * octep_vf_free_oqs() - Free resources of all Rx queues. 294 + * 295 + * @oct: Octeon device private data structure. 296 + */ 297 + void octep_vf_free_oqs(struct octep_vf_device *oct) 298 + { 299 + int i; 300 + 301 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 302 + if (!oct->oq[i]) 303 + continue; 304 + octep_vf_free_oq(oct->oq[i]); 305 + dev_dbg(&oct->pdev->dev, 306 + "Successfully freed OQ(RxQ)-%d.\n", i); 307 + } 308 + } 309 + 310 + /** 311 + * octep_vf_oq_check_hw_for_pkts() - Check for new Rx packets. 312 + * 313 + * @oct: Octeon device private data structure. 314 + * @oq: Octeon Rx queue data structure. 315 + * 316 + * Return: packets received after previous check. 317 + */ 318 + static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct, 319 + struct octep_vf_oq *oq) 320 + { 321 + u32 pkt_count, new_pkts; 322 + 323 + pkt_count = readl(oq->pkts_sent_reg); 324 + new_pkts = pkt_count - oq->last_pkt_count; 325 + 326 + /* Clear the hardware packets counter register if the rx queue is 327 + * being processed continuously with-in a single interrupt and 328 + * reached half its max value. 329 + * this counter is not cleared every time read, to save write cycles. 330 + */ 331 + if (unlikely(pkt_count > 0xF0000000U)) { 332 + writel(pkt_count, oq->pkts_sent_reg); 333 + pkt_count = readl(oq->pkts_sent_reg); 334 + new_pkts += pkt_count; 335 + } 336 + oq->last_pkt_count = pkt_count; 337 + oq->pkts_pending += new_pkts; 338 + return new_pkts; 339 + } 340 + 341 + /** 342 + * __octep_vf_oq_process_rx() - Process hardware Rx queue and push to stack. 343 + * 344 + * @oct: Octeon device private data structure. 345 + * @oq: Octeon Rx queue data structure. 346 + * @pkts_to_process: number of packets to be processed. 347 + * 348 + * Process the new packets in Rx queue. 349 + * Packets larger than single Rx buffer arrive in consecutive descriptors. 350 + * But, count returned by the API only accounts full packets, not fragments. 351 + * 352 + * Return: number of packets processed and pushed to stack. 353 + */ 354 + static int __octep_vf_oq_process_rx(struct octep_vf_device *oct, 355 + struct octep_vf_oq *oq, u16 pkts_to_process) 356 + { 357 + struct octep_vf_oq_resp_hw_ext *resp_hw_ext = NULL; 358 + netdev_features_t feat = oq->netdev->features; 359 + struct octep_vf_rx_buffer *buff_info; 360 + struct octep_vf_oq_resp_hw *resp_hw; 361 + u32 pkt, rx_bytes, desc_used; 362 + u16 data_offset, rx_ol_flags; 363 + struct sk_buff *skb; 364 + u32 read_idx; 365 + 366 + read_idx = oq->host_read_idx; 367 + rx_bytes = 0; 368 + desc_used = 0; 369 + for (pkt = 0; pkt < pkts_to_process; pkt++) { 370 + buff_info = (struct octep_vf_rx_buffer *)&oq->buff_info[read_idx]; 371 + dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, 372 + PAGE_SIZE, DMA_FROM_DEVICE); 373 + resp_hw = page_address(buff_info->page); 374 + buff_info->page = NULL; 375 + 376 + /* Swap the length field that is in Big-Endian to CPU */ 377 + buff_info->len = be64_to_cpu(resp_hw->length); 378 + if (oct->fw_info.rx_ol_flags) { 379 + /* Extended response header is immediately after 380 + * response header (resp_hw) 381 + */ 382 + resp_hw_ext = (struct octep_vf_oq_resp_hw_ext *) 383 + (resp_hw + 1); 384 + buff_info->len -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE; 385 + /* Packet Data is immediately after 386 + * extended response header. 387 + */ 388 + data_offset = OCTEP_VF_OQ_RESP_HW_SIZE + 389 + OCTEP_VF_OQ_RESP_HW_EXT_SIZE; 390 + rx_ol_flags = resp_hw_ext->rx_ol_flags; 391 + } else { 392 + /* Data is immediately after 393 + * Hardware Rx response header. 394 + */ 395 + data_offset = OCTEP_VF_OQ_RESP_HW_SIZE; 396 + rx_ol_flags = 0; 397 + } 398 + rx_bytes += buff_info->len; 399 + 400 + if (buff_info->len <= oq->max_single_buffer_size) { 401 + skb = build_skb((void *)resp_hw, PAGE_SIZE); 402 + skb_reserve(skb, data_offset); 403 + skb_put(skb, buff_info->len); 404 + read_idx++; 405 + desc_used++; 406 + if (read_idx == oq->max_count) 407 + read_idx = 0; 408 + } else { 409 + struct skb_shared_info *shinfo; 410 + u16 data_len; 411 + 412 + skb = build_skb((void *)resp_hw, PAGE_SIZE); 413 + skb_reserve(skb, data_offset); 414 + /* Head fragment includes response header(s); 415 + * subsequent fragments contains only data. 416 + */ 417 + skb_put(skb, oq->max_single_buffer_size); 418 + read_idx++; 419 + desc_used++; 420 + if (read_idx == oq->max_count) 421 + read_idx = 0; 422 + 423 + shinfo = skb_shinfo(skb); 424 + data_len = buff_info->len - oq->max_single_buffer_size; 425 + while (data_len) { 426 + dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, 427 + PAGE_SIZE, DMA_FROM_DEVICE); 428 + buff_info = (struct octep_vf_rx_buffer *) 429 + &oq->buff_info[read_idx]; 430 + if (data_len < oq->buffer_size) { 431 + buff_info->len = data_len; 432 + data_len = 0; 433 + } else { 434 + buff_info->len = oq->buffer_size; 435 + data_len -= oq->buffer_size; 436 + } 437 + 438 + skb_add_rx_frag(skb, shinfo->nr_frags, 439 + buff_info->page, 0, 440 + buff_info->len, 441 + buff_info->len); 442 + buff_info->page = NULL; 443 + read_idx++; 444 + desc_used++; 445 + if (read_idx == oq->max_count) 446 + read_idx = 0; 447 + } 448 + } 449 + 450 + skb->dev = oq->netdev; 451 + skb->protocol = eth_type_trans(skb, skb->dev); 452 + if (feat & NETIF_F_RXCSUM && 453 + OCTEP_VF_RX_CSUM_VERIFIED(rx_ol_flags)) 454 + skb->ip_summed = CHECKSUM_UNNECESSARY; 455 + else 456 + skb->ip_summed = CHECKSUM_NONE; 457 + napi_gro_receive(oq->napi, skb); 458 + } 459 + 460 + oq->host_read_idx = read_idx; 461 + oq->refill_count += desc_used; 462 + oq->stats.packets += pkt; 463 + oq->stats.bytes += rx_bytes; 464 + 465 + return pkt; 466 + } 467 + 468 + /** 469 + * octep_vf_oq_process_rx() - Process Rx queue. 470 + * 471 + * @oq: Octeon Rx queue data structure. 472 + * @budget: max number of packets can be processed in one invocation. 473 + * 474 + * Check for newly received packets and process them. 475 + * Keeps checking for new packets until budget is used or no new packets seen. 476 + * 477 + * Return: number of packets processed. 478 + */ 479 + int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget) 480 + { 481 + u32 pkts_available, pkts_processed, total_pkts_processed; 482 + struct octep_vf_device *oct = oq->octep_vf_dev; 483 + 484 + pkts_available = 0; 485 + pkts_processed = 0; 486 + total_pkts_processed = 0; 487 + while (total_pkts_processed < budget) { 488 + /* update pending count only when current one exhausted */ 489 + if (oq->pkts_pending == 0) 490 + octep_vf_oq_check_hw_for_pkts(oct, oq); 491 + pkts_available = min(budget - total_pkts_processed, 492 + oq->pkts_pending); 493 + if (!pkts_available) 494 + break; 495 + 496 + pkts_processed = __octep_vf_oq_process_rx(oct, oq, 497 + pkts_available); 498 + oq->pkts_pending -= pkts_processed; 499 + total_pkts_processed += pkts_processed; 500 + } 501 + 502 + if (oq->refill_count >= oq->refill_threshold) { 503 + u32 desc_refilled = octep_vf_oq_refill(oct, oq); 504 + 505 + /* flush pending writes before updating credits */ 506 + smp_wmb(); 507 + writel(desc_refilled, oq->pkts_credit_reg); 508 + } 509 + 510 + return total_pkts_processed; 511 + }
+224
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) VF Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef _OCTEP_VF_RX_H_ 9 + #define _OCTEP_VF_RX_H_ 10 + 11 + /* struct octep_vf_oq_desc_hw - Octeon Hardware OQ descriptor format. 12 + * 13 + * The descriptor ring is made of descriptors which have 2 64-bit values: 14 + * 15 + * @buffer_ptr: DMA address of the skb->data 16 + * @info_ptr: DMA address of host memory, used to update pkt count by hw. 17 + * This is currently unused to save pci writes. 18 + */ 19 + struct octep_vf_oq_desc_hw { 20 + dma_addr_t buffer_ptr; 21 + u64 info_ptr; 22 + }; 23 + 24 + static_assert(sizeof(struct octep_vf_oq_desc_hw) == 16); 25 + 26 + #define OCTEP_VF_OQ_DESC_SIZE (sizeof(struct octep_vf_oq_desc_hw)) 27 + 28 + /* Rx offload flags */ 29 + #define OCTEP_VF_RX_OFFLOAD_VLAN_STRIP BIT(0) 30 + #define OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM BIT(1) 31 + #define OCTEP_VF_RX_OFFLOAD_UDP_CKSUM BIT(2) 32 + #define OCTEP_VF_RX_OFFLOAD_TCP_CKSUM BIT(3) 33 + 34 + #define OCTEP_VF_RX_OFFLOAD_CKSUM (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \ 35 + OCTEP_VF_RX_OFFLOAD_UDP_CKSUM | \ 36 + OCTEP_VF_RX_OFFLOAD_TCP_CKSUM) 37 + 38 + #define OCTEP_VF_RX_IP_CSUM(flags) ((flags) & \ 39 + (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \ 40 + OCTEP_VF_RX_OFFLOAD_TCP_CKSUM | \ 41 + OCTEP_VF_RX_OFFLOAD_UDP_CKSUM)) 42 + 43 + /* bit 0 is vlan strip */ 44 + #define OCTEP_VF_RX_CSUM_IP_VERIFIED BIT(1) 45 + #define OCTEP_VF_RX_CSUM_L4_VERIFIED BIT(2) 46 + 47 + #define OCTEP_VF_RX_CSUM_VERIFIED(flags) ((flags) & \ 48 + (OCTEP_VF_RX_CSUM_L4_VERIFIED | \ 49 + OCTEP_VF_RX_CSUM_IP_VERIFIED)) 50 + 51 + /* Extended Response Header in packet data received from Hardware. 52 + * Includes metadata like checksum status. 53 + * this is valid only if hardware/firmware published support for this. 54 + * This is at offset 0 of packet data (skb->data). 55 + */ 56 + struct octep_vf_oq_resp_hw_ext { 57 + /* Reserved. */ 58 + u64 rsvd:48; 59 + 60 + /* rx offload flags */ 61 + u16 rx_ol_flags; 62 + }; 63 + 64 + static_assert(sizeof(struct octep_vf_oq_resp_hw_ext) == 8); 65 + 66 + #define OCTEP_VF_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_vf_oq_resp_hw_ext)) 67 + 68 + /* Length of Rx packet DMA'ed by Octeon to Host. 69 + * this is in bigendian; so need to be converted to cpu endian. 70 + * Octeon writes this at the beginning of Rx buffer (skb->data). 71 + */ 72 + struct octep_vf_oq_resp_hw { 73 + /* The Length of the packet. */ 74 + __be64 length; 75 + }; 76 + 77 + static_assert(sizeof(struct octep_vf_oq_resp_hw) == 8); 78 + 79 + #define OCTEP_VF_OQ_RESP_HW_SIZE (sizeof(struct octep_vf_oq_resp_hw)) 80 + 81 + /* Pointer to data buffer. 82 + * Driver keeps a pointer to the data buffer that it made available to 83 + * the Octeon device. Since the descriptor ring keeps physical (bus) 84 + * addresses, this field is required for the driver to keep track of 85 + * the virtual address pointers. The fields are operated by 86 + * OS-dependent routines. 87 + */ 88 + struct octep_vf_rx_buffer { 89 + struct page *page; 90 + 91 + /* length from rx hardware descriptor after converting to cpu endian */ 92 + u64 len; 93 + }; 94 + 95 + #define OCTEP_VF_OQ_RECVBUF_SIZE (sizeof(struct octep_vf_rx_buffer)) 96 + 97 + /* Output Queue statistics. Each output queue has four stats fields. */ 98 + struct octep_vf_oq_stats { 99 + /* Number of packets received from the Device. */ 100 + u64 packets; 101 + 102 + /* Number of bytes received from the Device. */ 103 + u64 bytes; 104 + 105 + /* Number of times failed to allocate buffers. */ 106 + u64 alloc_failures; 107 + }; 108 + 109 + #define OCTEP_VF_OQ_STATS_SIZE (sizeof(struct octep_vf_oq_stats)) 110 + 111 + /* Hardware interface Rx statistics */ 112 + struct octep_vf_iface_rx_stats { 113 + /* Received packets */ 114 + u64 pkts; 115 + 116 + /* Octets of received packets */ 117 + u64 octets; 118 + 119 + /* Received PAUSE and Control packets */ 120 + u64 pause_pkts; 121 + 122 + /* Received PAUSE and Control octets */ 123 + u64 pause_octets; 124 + 125 + /* Filtered DMAC0 packets */ 126 + u64 dmac0_pkts; 127 + 128 + /* Filtered DMAC0 octets */ 129 + u64 dmac0_octets; 130 + 131 + /* Packets dropped due to RX FIFO full */ 132 + u64 dropped_pkts_fifo_full; 133 + 134 + /* Octets dropped due to RX FIFO full */ 135 + u64 dropped_octets_fifo_full; 136 + 137 + /* Error packets */ 138 + u64 err_pkts; 139 + 140 + /* Filtered DMAC1 packets */ 141 + u64 dmac1_pkts; 142 + 143 + /* Filtered DMAC1 octets */ 144 + u64 dmac1_octets; 145 + 146 + /* NCSI-bound packets dropped */ 147 + u64 ncsi_dropped_pkts; 148 + 149 + /* NCSI-bound octets dropped */ 150 + u64 ncsi_dropped_octets; 151 + 152 + /* Multicast packets received. */ 153 + u64 mcast_pkts; 154 + 155 + /* Broadcast packets received. */ 156 + u64 bcast_pkts; 157 + 158 + }; 159 + 160 + /* The Descriptor Ring Output Queue structure. 161 + * This structure has all the information required to implement a 162 + * Octeon OQ. 163 + */ 164 + struct octep_vf_oq { 165 + u32 q_no; 166 + 167 + struct octep_vf_device *octep_vf_dev; 168 + struct net_device *netdev; 169 + struct device *dev; 170 + 171 + struct napi_struct *napi; 172 + 173 + /* The receive buffer list. This list has the virtual addresses 174 + * of the buffers. 175 + */ 176 + struct octep_vf_rx_buffer *buff_info; 177 + 178 + /* Pointer to the mapped packet credit register. 179 + * Host writes number of info/buffer ptrs available to this register 180 + */ 181 + u8 __iomem *pkts_credit_reg; 182 + 183 + /* Pointer to the mapped packet sent register. 184 + * Octeon writes the number of packets DMA'ed to host memory 185 + * in this register. 186 + */ 187 + u8 __iomem *pkts_sent_reg; 188 + 189 + /* Statistics for this OQ. */ 190 + struct octep_vf_oq_stats stats; 191 + 192 + /* Packets pending to be processed */ 193 + u32 pkts_pending; 194 + u32 last_pkt_count; 195 + 196 + /* Index in the ring where the driver should read the next packet */ 197 + u32 host_read_idx; 198 + 199 + /* Number of descriptors in this ring. */ 200 + u32 max_count; 201 + u32 ring_size_mask; 202 + 203 + /* The number of descriptors pending refill. */ 204 + u32 refill_count; 205 + 206 + /* Index in the ring where the driver will refill the 207 + * descriptor's buffer 208 + */ 209 + u32 host_refill_idx; 210 + u32 refill_threshold; 211 + 212 + /* The size of each buffer pointed by the buffer pointer. */ 213 + u32 buffer_size; 214 + u32 max_single_buffer_size; 215 + 216 + /* The 8B aligned descriptor ring starts at this address. */ 217 + struct octep_vf_oq_desc_hw *desc_ring; 218 + 219 + /* DMA mapped address of the OQ descriptor ring. */ 220 + dma_addr_t desc_ring_dma; 221 + }; 222 + 223 + #define OCTEP_VF_OQ_SIZE (sizeof(struct octep_vf_oq)) 224 + #endif /* _OCTEP_VF_RX_H_ */
+331
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell Octeon EP (EndPoint) VF Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/pci.h> 9 + #include <linux/etherdevice.h> 10 + #include <linux/vmalloc.h> 11 + 12 + #include "octep_vf_config.h" 13 + #include "octep_vf_main.h" 14 + 15 + /* Reset various index of Tx queue data structure. */ 16 + static void octep_vf_iq_reset_indices(struct octep_vf_iq *iq) 17 + { 18 + iq->fill_cnt = 0; 19 + iq->host_write_index = 0; 20 + iq->octep_vf_read_index = 0; 21 + iq->flush_index = 0; 22 + iq->pkts_processed = 0; 23 + iq->pkt_in_done = 0; 24 + } 25 + 26 + /** 27 + * octep_vf_iq_process_completions() - Process Tx queue completions. 28 + * 29 + * @iq: Octeon Tx queue data structure. 30 + * @budget: max number of completions to be processed in one invocation. 31 + */ 32 + int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget) 33 + { 34 + u32 compl_pkts, compl_bytes, compl_sg; 35 + struct octep_vf_device *oct = iq->octep_vf_dev; 36 + struct octep_vf_tx_buffer *tx_buffer; 37 + struct skb_shared_info *shinfo; 38 + u32 fi = iq->flush_index; 39 + struct sk_buff *skb; 40 + u8 frags, i; 41 + 42 + compl_pkts = 0; 43 + compl_sg = 0; 44 + compl_bytes = 0; 45 + iq->octep_vf_read_index = oct->hw_ops.update_iq_read_idx(iq); 46 + 47 + while (likely(budget && (fi != iq->octep_vf_read_index))) { 48 + tx_buffer = iq->buff_info + fi; 49 + skb = tx_buffer->skb; 50 + 51 + fi++; 52 + if (unlikely(fi == iq->max_count)) 53 + fi = 0; 54 + compl_bytes += skb->len; 55 + compl_pkts++; 56 + budget--; 57 + 58 + if (!tx_buffer->gather) { 59 + dma_unmap_single(iq->dev, tx_buffer->dma, 60 + tx_buffer->skb->len, DMA_TO_DEVICE); 61 + dev_kfree_skb_any(skb); 62 + continue; 63 + } 64 + 65 + /* Scatter/Gather */ 66 + shinfo = skb_shinfo(skb); 67 + frags = shinfo->nr_frags; 68 + compl_sg++; 69 + 70 + dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0], 71 + tx_buffer->sglist[0].len[3], DMA_TO_DEVICE); 72 + 73 + i = 1; /* entry 0 is main skb, unmapped above */ 74 + while (frags--) { 75 + dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], 76 + tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE); 77 + i++; 78 + } 79 + 80 + dev_kfree_skb_any(skb); 81 + } 82 + 83 + iq->pkts_processed += compl_pkts; 84 + iq->stats.instr_completed += compl_pkts; 85 + iq->stats.bytes_sent += compl_bytes; 86 + iq->stats.sgentry_sent += compl_sg; 87 + iq->flush_index = fi; 88 + 89 + netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes); 90 + 91 + if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) && 92 + (IQ_INSTR_SPACE(iq) > 93 + OCTEP_VF_WAKE_QUEUE_THRESHOLD)) 94 + netif_wake_subqueue(iq->netdev, iq->q_no); 95 + return !budget; 96 + } 97 + 98 + /** 99 + * octep_vf_iq_free_pending() - Free Tx buffers for pending completions. 100 + * 101 + * @iq: Octeon Tx queue data structure. 102 + */ 103 + static void octep_vf_iq_free_pending(struct octep_vf_iq *iq) 104 + { 105 + struct octep_vf_tx_buffer *tx_buffer; 106 + struct skb_shared_info *shinfo; 107 + u32 fi = iq->flush_index; 108 + struct sk_buff *skb; 109 + u8 frags, i; 110 + 111 + while (fi != iq->host_write_index) { 112 + tx_buffer = iq->buff_info + fi; 113 + skb = tx_buffer->skb; 114 + 115 + fi++; 116 + if (unlikely(fi == iq->max_count)) 117 + fi = 0; 118 + 119 + if (!tx_buffer->gather) { 120 + dma_unmap_single(iq->dev, tx_buffer->dma, 121 + tx_buffer->skb->len, DMA_TO_DEVICE); 122 + dev_kfree_skb_any(skb); 123 + continue; 124 + } 125 + 126 + /* Scatter/Gather */ 127 + shinfo = skb_shinfo(skb); 128 + frags = shinfo->nr_frags; 129 + 130 + dma_unmap_single(iq->dev, 131 + tx_buffer->sglist[0].dma_ptr[0], 132 + tx_buffer->sglist[0].len[0], 133 + DMA_TO_DEVICE); 134 + 135 + i = 1; /* entry 0 is main skb, unmapped above */ 136 + while (frags--) { 137 + dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], 138 + tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE); 139 + i++; 140 + } 141 + 142 + dev_kfree_skb_any(skb); 143 + } 144 + 145 + iq->flush_index = fi; 146 + netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no)); 147 + } 148 + 149 + /** 150 + * octep_vf_clean_iqs() - Clean Tx queues to shutdown the device. 151 + * 152 + * @oct: Octeon device private data structure. 153 + * 154 + * Free the buffers in Tx queue descriptors pending completion and 155 + * reset queue indices 156 + */ 157 + void octep_vf_clean_iqs(struct octep_vf_device *oct) 158 + { 159 + int i; 160 + 161 + for (i = 0; i < oct->num_iqs; i++) { 162 + octep_vf_iq_free_pending(oct->iq[i]); 163 + octep_vf_iq_reset_indices(oct->iq[i]); 164 + } 165 + } 166 + 167 + /** 168 + * octep_vf_setup_iq() - Setup a Tx queue. 169 + * 170 + * @oct: Octeon device private data structure. 171 + * @q_no: Tx queue number to be setup. 172 + * 173 + * Allocate resources for a Tx queue. 174 + */ 175 + static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no) 176 + { 177 + u32 desc_ring_size, buff_info_size, sglist_size; 178 + struct octep_vf_iq *iq; 179 + int i; 180 + 181 + iq = vzalloc(sizeof(*iq)); 182 + if (!iq) 183 + goto iq_alloc_err; 184 + oct->iq[q_no] = iq; 185 + 186 + iq->octep_vf_dev = oct; 187 + iq->netdev = oct->netdev; 188 + iq->dev = &oct->pdev->dev; 189 + iq->q_no = q_no; 190 + iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf); 191 + iq->ring_size_mask = iq->max_count - 1; 192 + iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf); 193 + iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no); 194 + 195 + /* Allocate memory for hardware queue descriptors */ 196 + desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); 197 + iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size, 198 + &iq->desc_ring_dma, GFP_KERNEL); 199 + if (unlikely(!iq->desc_ring)) { 200 + dev_err(iq->dev, 201 + "Failed to allocate DMA memory for IQ-%d\n", q_no); 202 + goto desc_dma_alloc_err; 203 + } 204 + 205 + /* Allocate memory for hardware SGLIST descriptors */ 206 + sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT * 207 + CFG_GET_IQ_NUM_DESC(oct->conf); 208 + iq->sglist = dma_alloc_coherent(iq->dev, sglist_size, 209 + &iq->sglist_dma, GFP_KERNEL); 210 + if (unlikely(!iq->sglist)) { 211 + dev_err(iq->dev, 212 + "Failed to allocate DMA memory for IQ-%d SGLIST\n", 213 + q_no); 214 + goto sglist_alloc_err; 215 + } 216 + 217 + /* allocate memory to manage Tx packets pending completion */ 218 + buff_info_size = OCTEP_VF_IQ_TXBUFF_INFO_SIZE * iq->max_count; 219 + iq->buff_info = vzalloc(buff_info_size); 220 + if (!iq->buff_info) { 221 + dev_err(iq->dev, 222 + "Failed to allocate buff info for IQ-%d\n", q_no); 223 + goto buff_info_err; 224 + } 225 + 226 + /* Setup sglist addresses in tx_buffer entries */ 227 + for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) { 228 + struct octep_vf_tx_buffer *tx_buffer; 229 + 230 + tx_buffer = &iq->buff_info[i]; 231 + tx_buffer->sglist = 232 + &iq->sglist[i * OCTEP_VF_SGLIST_ENTRIES_PER_PKT]; 233 + tx_buffer->sglist_dma = 234 + iq->sglist_dma + (i * OCTEP_VF_SGLIST_SIZE_PER_PKT); 235 + } 236 + 237 + octep_vf_iq_reset_indices(iq); 238 + oct->hw_ops.setup_iq_regs(oct, q_no); 239 + 240 + oct->num_iqs++; 241 + return 0; 242 + 243 + buff_info_err: 244 + dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma); 245 + sglist_alloc_err: 246 + dma_free_coherent(iq->dev, desc_ring_size, 247 + iq->desc_ring, iq->desc_ring_dma); 248 + desc_dma_alloc_err: 249 + vfree(iq); 250 + oct->iq[q_no] = NULL; 251 + iq_alloc_err: 252 + return -1; 253 + } 254 + 255 + /** 256 + * octep_vf_free_iq() - Free Tx queue resources. 257 + * 258 + * @iq: Octeon Tx queue data structure. 259 + * 260 + * Free all the resources allocated for a Tx queue. 261 + */ 262 + static void octep_vf_free_iq(struct octep_vf_iq *iq) 263 + { 264 + struct octep_vf_device *oct = iq->octep_vf_dev; 265 + u64 desc_ring_size, sglist_size; 266 + int q_no = iq->q_no; 267 + 268 + desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); 269 + 270 + vfree(iq->buff_info); 271 + 272 + if (iq->desc_ring) 273 + dma_free_coherent(iq->dev, desc_ring_size, 274 + iq->desc_ring, iq->desc_ring_dma); 275 + 276 + sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT * 277 + CFG_GET_IQ_NUM_DESC(oct->conf); 278 + if (iq->sglist) 279 + dma_free_coherent(iq->dev, sglist_size, 280 + iq->sglist, iq->sglist_dma); 281 + 282 + vfree(iq); 283 + oct->iq[q_no] = NULL; 284 + oct->num_iqs--; 285 + } 286 + 287 + /** 288 + * octep_vf_setup_iqs() - setup resources for all Tx queues. 289 + * 290 + * @oct: Octeon device private data structure. 291 + */ 292 + int octep_vf_setup_iqs(struct octep_vf_device *oct) 293 + { 294 + int i; 295 + 296 + oct->num_iqs = 0; 297 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 298 + if (octep_vf_setup_iq(oct, i)) { 299 + dev_err(&oct->pdev->dev, 300 + "Failed to setup IQ(TxQ)-%d.\n", i); 301 + goto iq_setup_err; 302 + } 303 + dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i); 304 + } 305 + 306 + return 0; 307 + 308 + iq_setup_err: 309 + while (i) { 310 + i--; 311 + octep_vf_free_iq(oct->iq[i]); 312 + } 313 + return -1; 314 + } 315 + 316 + /** 317 + * octep_vf_free_iqs() - Free resources of all Tx queues. 318 + * 319 + * @oct: Octeon device private data structure. 320 + */ 321 + void octep_vf_free_iqs(struct octep_vf_device *oct) 322 + { 323 + int i; 324 + 325 + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 326 + octep_vf_free_iq(oct->iq[i]); 327 + dev_dbg(&oct->pdev->dev, 328 + "Successfully destroyed IQ(TxQ)-%d.\n", i); 329 + } 330 + oct->num_iqs = 0; 331 + }
+276
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell Octeon EP (EndPoint) VF Ethernet Driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef _OCTEP_VF_TX_H_ 9 + #define _OCTEP_VF_TX_H_ 10 + 11 + #define IQ_SEND_OK 0 12 + #define IQ_SEND_STOP 1 13 + #define IQ_SEND_FAILED -1 14 + 15 + #define TX_BUFTYPE_NONE 0 16 + #define TX_BUFTYPE_NET 1 17 + #define TX_BUFTYPE_NET_SG 2 18 + #define NUM_TX_BUFTYPES 3 19 + 20 + /* Hardware format for Scatter/Gather list 21 + * 22 + * 63 48|47 32|31 16|15 0 23 + * ----------------------------------------- 24 + * | Len 0 | Len 1 | Len 2 | Len 3 | 25 + * ----------------------------------------- 26 + * | Ptr 0 | 27 + * ----------------------------------------- 28 + * | Ptr 1 | 29 + * ----------------------------------------- 30 + * | Ptr 2 | 31 + * ----------------------------------------- 32 + * | Ptr 3 | 33 + * ----------------------------------------- 34 + */ 35 + struct octep_vf_tx_sglist_desc { 36 + u16 len[4]; 37 + dma_addr_t dma_ptr[4]; 38 + }; 39 + 40 + static_assert(sizeof(struct octep_vf_tx_sglist_desc) == 40); 41 + 42 + /* Each Scatter/Gather entry sent to hardwar hold four pointers. 43 + * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1' 44 + * is for main skb which also goes as a gather buffer to Octeon hardware. 45 + * To allocate sufficient SGLIST entries for a packet with max fragments, 46 + * align by adding 3 before calcuating max SGLIST entries per packet. 47 + */ 48 + #define OCTEP_VF_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4) 49 + #define OCTEP_VF_SGLIST_SIZE_PER_PKT \ 50 + (OCTEP_VF_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_vf_tx_sglist_desc)) 51 + 52 + struct octep_vf_tx_buffer { 53 + struct sk_buff *skb; 54 + dma_addr_t dma; 55 + struct octep_vf_tx_sglist_desc *sglist; 56 + dma_addr_t sglist_dma; 57 + u8 gather; 58 + }; 59 + 60 + #define OCTEP_VF_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_vf_tx_buffer)) 61 + 62 + /* VF Hardware interface Tx statistics */ 63 + struct octep_vf_iface_tx_stats { 64 + /* Total frames sent on the interface */ 65 + u64 pkts; 66 + 67 + /* Total octets sent on the interface */ 68 + u64 octs; 69 + 70 + /* Packets sent to a broadcast DMAC */ 71 + u64 bcst; 72 + 73 + /* Packets sent to the multicast DMAC */ 74 + u64 mcst; 75 + 76 + /* Packets dropped */ 77 + u64 dropped; 78 + 79 + /* Reserved */ 80 + u64 reserved[13]; 81 + }; 82 + 83 + /* VF Input Queue statistics */ 84 + struct octep_vf_iq_stats { 85 + /* Instructions posted to this queue. */ 86 + u64 instr_posted; 87 + 88 + /* Instructions copied by hardware for processing. */ 89 + u64 instr_completed; 90 + 91 + /* Instructions that could not be processed. */ 92 + u64 instr_dropped; 93 + 94 + /* Bytes sent through this queue. */ 95 + u64 bytes_sent; 96 + 97 + /* Gather entries sent through this queue. */ 98 + u64 sgentry_sent; 99 + 100 + /* Number of transmit failures due to TX_BUSY */ 101 + u64 tx_busy; 102 + 103 + /* Number of times the queue is restarted */ 104 + u64 restart_cnt; 105 + }; 106 + 107 + /* The instruction (input) queue. 108 + * The input queue is used to post raw (instruction) mode data or packet 109 + * data to Octeon device from the host. Each input queue (up to 4) for 110 + * a Octeon device has one such structure to represent it. 111 + */ 112 + struct octep_vf_iq { 113 + u32 q_no; 114 + 115 + struct octep_vf_device *octep_vf_dev; 116 + struct net_device *netdev; 117 + struct device *dev; 118 + struct netdev_queue *netdev_q; 119 + 120 + /* Index in input ring where driver should write the next packet */ 121 + u16 host_write_index; 122 + 123 + /* Index in input ring where Octeon is expected to read next packet */ 124 + u16 octep_vf_read_index; 125 + 126 + /* This index aids in finding the window in the queue where Octeon 127 + * has read the commands. 128 + */ 129 + u16 flush_index; 130 + 131 + /* Statistics for this input queue. */ 132 + struct octep_vf_iq_stats stats; 133 + 134 + /* Pointer to the Virtual Base addr of the input ring. */ 135 + struct octep_vf_tx_desc_hw *desc_ring; 136 + 137 + /* DMA mapped base address of the input descriptor ring. */ 138 + dma_addr_t desc_ring_dma; 139 + 140 + /* Info of Tx buffers pending completion. */ 141 + struct octep_vf_tx_buffer *buff_info; 142 + 143 + /* Base pointer to Scatter/Gather lists for all ring descriptors. */ 144 + struct octep_vf_tx_sglist_desc *sglist; 145 + 146 + /* DMA mapped addr of Scatter Gather Lists */ 147 + dma_addr_t sglist_dma; 148 + 149 + /* Octeon doorbell register for the ring. */ 150 + u8 __iomem *doorbell_reg; 151 + 152 + /* Octeon instruction count register for this ring. */ 153 + u8 __iomem *inst_cnt_reg; 154 + 155 + /* interrupt level register for this ring */ 156 + u8 __iomem *intr_lvl_reg; 157 + 158 + /* Maximum no. of instructions in this queue. */ 159 + u32 max_count; 160 + u32 ring_size_mask; 161 + 162 + u32 pkt_in_done; 163 + u32 pkts_processed; 164 + 165 + u32 status; 166 + 167 + /* Number of instructions pending to be posted to Octeon. */ 168 + u32 fill_cnt; 169 + 170 + /* The max. number of instructions that can be held pending by the 171 + * driver before ringing doorbell. 172 + */ 173 + u32 fill_threshold; 174 + }; 175 + 176 + /* Hardware Tx Instruction Header */ 177 + struct octep_vf_instr_hdr { 178 + /* Data Len */ 179 + u64 tlen:16; 180 + 181 + /* Reserved */ 182 + u64 rsvd:20; 183 + 184 + /* PKIND for SDP */ 185 + u64 pkind:6; 186 + 187 + /* Front Data size */ 188 + u64 fsz:6; 189 + 190 + /* No. of entries in gather list */ 191 + u64 gsz:14; 192 + 193 + /* Gather indicator 1=gather*/ 194 + u64 gather:1; 195 + 196 + /* Reserved3 */ 197 + u64 reserved3:1; 198 + }; 199 + 200 + static_assert(sizeof(struct octep_vf_instr_hdr) == 8); 201 + 202 + /* Tx offload flags */ 203 + #define OCTEP_VF_TX_OFFLOAD_VLAN_INSERT BIT(0) 204 + #define OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM BIT(1) 205 + #define OCTEP_VF_TX_OFFLOAD_UDP_CKSUM BIT(2) 206 + #define OCTEP_VF_TX_OFFLOAD_TCP_CKSUM BIT(3) 207 + #define OCTEP_VF_TX_OFFLOAD_SCTP_CKSUM BIT(4) 208 + #define OCTEP_VF_TX_OFFLOAD_TCP_TSO BIT(5) 209 + #define OCTEP_VF_TX_OFFLOAD_UDP_TSO BIT(6) 210 + 211 + #define OCTEP_VF_TX_OFFLOAD_CKSUM (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \ 212 + OCTEP_VF_TX_OFFLOAD_UDP_CKSUM | \ 213 + OCTEP_VF_TX_OFFLOAD_TCP_CKSUM) 214 + 215 + #define OCTEP_VF_TX_OFFLOAD_TSO (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \ 216 + OCTEP_VF_TX_OFFLOAD_UDP_TSO) 217 + 218 + #define OCTEP_VF_TX_IP_CSUM(flags) ((flags) & \ 219 + (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \ 220 + OCTEP_VF_TX_OFFLOAD_TCP_CKSUM | \ 221 + OCTEP_VF_TX_OFFLOAD_UDP_CKSUM)) 222 + 223 + #define OCTEP_VF_TX_TSO(flags) ((flags) & \ 224 + (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \ 225 + OCTEP_VF_TX_OFFLOAD_UDP_TSO)) 226 + 227 + struct tx_mdata { 228 + /* offload flags */ 229 + u16 ol_flags; 230 + 231 + /* gso size */ 232 + u16 gso_size; 233 + 234 + /* gso flags */ 235 + u16 gso_segs; 236 + 237 + /* reserved */ 238 + u16 rsvd1; 239 + 240 + /* reserved */ 241 + u64 rsvd2; 242 + }; 243 + 244 + static_assert(sizeof(struct tx_mdata) == 16); 245 + 246 + /* 64-byte Tx instruction format. 247 + * Format of instruction for a 64-byte mode input queue. 248 + * 249 + * only first 16-bytes (dptr and ih) are mandatory; rest are optional 250 + * and filled by the driver based on firmware/hardware capabilities. 251 + * These optional headers together called Front Data and its size is 252 + * described by ih->fsz. 253 + */ 254 + struct octep_vf_tx_desc_hw { 255 + /* Pointer where the input data is available. */ 256 + u64 dptr; 257 + 258 + /* Instruction Header. */ 259 + union { 260 + struct octep_vf_instr_hdr ih; 261 + u64 ih64; 262 + }; 263 + 264 + union { 265 + u64 txm64[2]; 266 + struct tx_mdata txm; 267 + }; 268 + 269 + /* Additional headers available in a 64-byte instruction. */ 270 + u64 exhdr[4]; 271 + }; 272 + 273 + static_assert(sizeof(struct octep_vf_tx_desc_hw) == 64); 274 + 275 + #define OCTEP_VF_IQ_DESC_SIZE (sizeof(struct octep_vf_tx_desc_hw)) 276 + #endif /* _OCTEP_VF_TX_H_ */