Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.15 8436 lines 240 kB view raw
1/* 2 * Driver for Marvell PPv2 network controller for Armada 375 SoC. 3 * 4 * Copyright (C) 2014 Marvell 5 * 6 * Marcin Wojtas <mw@semihalf.com> 7 * 8 * This file is licensed under the terms of the GNU General Public 9 * License version 2. This program is licensed "as is" without any 10 * warranty of any kind, whether express or implied. 11 */ 12 13#include <linux/kernel.h> 14#include <linux/netdevice.h> 15#include <linux/etherdevice.h> 16#include <linux/platform_device.h> 17#include <linux/skbuff.h> 18#include <linux/inetdevice.h> 19#include <linux/mbus.h> 20#include <linux/module.h> 21#include <linux/mfd/syscon.h> 22#include <linux/interrupt.h> 23#include <linux/cpumask.h> 24#include <linux/of.h> 25#include <linux/of_irq.h> 26#include <linux/of_mdio.h> 27#include <linux/of_net.h> 28#include <linux/of_address.h> 29#include <linux/of_device.h> 30#include <linux/phy.h> 31#include <linux/phy/phy.h> 32#include <linux/clk.h> 33#include <linux/hrtimer.h> 34#include <linux/ktime.h> 35#include <linux/regmap.h> 36#include <uapi/linux/ppp_defs.h> 37#include <net/ip.h> 38#include <net/ipv6.h> 39#include <net/tso.h> 40 41/* Fifo Registers */ 42#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) 43#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) 44#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 45#define MVPP2_RX_FIFO_INIT_REG 0x64 46#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port)) 47 48/* RX DMA Top Registers */ 49#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) 50#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) 51#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) 52#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) 53#define MVPP2_POOL_BUF_SIZE_OFFSET 5 54#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) 55#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff 56#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) 57#define MVPP2_RXQ_POOL_SHORT_OFFS 20 58#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 59#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 60#define MVPP2_RXQ_POOL_LONG_OFFS 24 61#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 62#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 63#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 64#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 65#define MVPP2_RXQ_DISABLE_MASK BIT(31) 66 67/* Parser Registers */ 68#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 69#define MVPP2_PRS_PORT_LU_MAX 0xf 70#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) 71#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) 72#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) 73#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) 74#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) 75#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) 76#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) 77#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) 78#define MVPP2_PRS_TCAM_IDX_REG 0x1100 79#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) 80#define MVPP2_PRS_TCAM_INV_MASK BIT(31) 81#define MVPP2_PRS_SRAM_IDX_REG 0x1200 82#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) 83#define MVPP2_PRS_TCAM_CTRL_REG 0x1230 84#define MVPP2_PRS_TCAM_EN_MASK BIT(0) 85 86/* RSS Registers */ 87#define MVPP22_RSS_INDEX 0x1500 88#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx) 89#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8) 90#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16) 91#define MVPP22_RSS_TABLE_ENTRY 0x1508 92#define MVPP22_RSS_TABLE 0x1510 93#define MVPP22_RSS_TABLE_POINTER(p) (p) 94#define MVPP22_RSS_WIDTH 0x150c 95 96/* Classifier Registers */ 97#define MVPP2_CLS_MODE_REG 0x1800 98#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) 99#define MVPP2_CLS_PORT_WAY_REG 0x1810 100#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) 101#define MVPP2_CLS_LKP_INDEX_REG 0x1814 102#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 103#define MVPP2_CLS_LKP_TBL_REG 0x1818 104#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff 105#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) 106#define MVPP2_CLS_FLOW_INDEX_REG 0x1820 107#define MVPP2_CLS_FLOW_TBL0_REG 0x1824 108#define MVPP2_CLS_FLOW_TBL1_REG 0x1828 109#define MVPP2_CLS_FLOW_TBL2_REG 0x182c 110#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) 111#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 112#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 113#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) 114#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 115#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) 116 117/* Descriptor Manager Top Registers */ 118#define MVPP2_RXQ_NUM_REG 0x2040 119#define MVPP2_RXQ_DESC_ADDR_REG 0x2044 120#define MVPP22_DESC_ADDR_OFFS 8 121#define MVPP2_RXQ_DESC_SIZE_REG 0x2048 122#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 123#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) 124#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 125#define MVPP2_RXQ_NUM_NEW_OFFSET 16 126#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) 127#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff 128#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 129#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 130#define MVPP2_RXQ_THRESH_REG 0x204c 131#define MVPP2_OCCUPIED_THRESH_OFFSET 0 132#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff 133#define MVPP2_RXQ_INDEX_REG 0x2050 134#define MVPP2_TXQ_NUM_REG 0x2080 135#define MVPP2_TXQ_DESC_ADDR_REG 0x2084 136#define MVPP2_TXQ_DESC_SIZE_REG 0x2088 137#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 138#define MVPP2_TXQ_THRESH_REG 0x2094 139#define MVPP2_TXQ_THRESH_OFFSET 16 140#define MVPP2_TXQ_THRESH_MASK 0x3fff 141#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 142#define MVPP2_TXQ_INDEX_REG 0x2098 143#define MVPP2_TXQ_PREF_BUF_REG 0x209c 144#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) 145#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) 146#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) 147#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) 148#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) 149#define MVPP2_TXQ_PENDING_REG 0x20a0 150#define MVPP2_TXQ_PENDING_MASK 0x3fff 151#define MVPP2_TXQ_INT_STATUS_REG 0x20a4 152#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) 153#define MVPP2_TRANSMITTED_COUNT_OFFSET 16 154#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 155#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 156#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 157#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 158#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff 159#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 160#define MVPP2_TXQ_RSVD_CLR_OFFSET 16 161#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) 162#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 163#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) 164#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 165#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) 166#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff 167#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) 168 169/* MBUS bridge registers */ 170#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) 171#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) 172#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) 173#define MVPP2_BASE_ADDR_ENABLE 0x4060 174 175/* AXI Bridge Registers */ 176#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 177#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 178#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 179#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 180#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 181#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c 182#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 183#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 184#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 185#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 186#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 187#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 188 189/* Values for AXI Bridge registers */ 190#define MVPP22_AXI_ATTR_CACHE_OFFS 0 191#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 192 193#define MVPP22_AXI_CODE_CACHE_OFFS 0 194#define MVPP22_AXI_CODE_DOMAIN_OFFS 4 195 196#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 197#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 198#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb 199 200#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 201#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 202 203/* Interrupt Cause and Mask registers */ 204#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port)) 205#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0 206 207#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) 208#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 209#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port)) 210 211#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 212#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 213#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 214#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 215 216#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 217#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 218 219#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 220#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f 221#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 222#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 223 224#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) 225#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) 226#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) 227#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) 228#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 229#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 230#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16 231#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) 232#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) 233#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) 234#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) 235#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) 236#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) 237#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) 238#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc 239#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 240#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 241#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) 242#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 243 244/* Buffer Manager registers */ 245#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) 246#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 247#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) 248#define MVPP2_BM_POOL_SIZE_MASK 0xfff0 249#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) 250#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 251#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) 252#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 253#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) 254#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) 255#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff 256#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) 257#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) 258#define MVPP2_BM_START_MASK BIT(0) 259#define MVPP2_BM_STOP_MASK BIT(1) 260#define MVPP2_BM_STATE_MASK BIT(4) 261#define MVPP2_BM_LOW_THRESH_OFFS 8 262#define MVPP2_BM_LOW_THRESH_MASK 0x7f00 263#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ 264 MVPP2_BM_LOW_THRESH_OFFS) 265#define MVPP2_BM_HIGH_THRESH_OFFS 16 266#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 267#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ 268 MVPP2_BM_HIGH_THRESH_OFFS) 269#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) 270#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) 271#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) 272#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) 273#define MVPP2_BM_BPPE_FULL_MASK BIT(3) 274#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) 275#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) 276#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) 277#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) 278#define MVPP2_BM_VIRT_ALLOC_REG 0x6440 279#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444 280#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff 281#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00 282#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8 283#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) 284#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) 285#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) 286#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) 287#define MVPP2_BM_VIRT_RLS_REG 0x64c0 288#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 289#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff 290#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 291#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 292 293/* TX Scheduler registers */ 294#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 295#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 296#define MVPP2_TXP_SCHED_ENQ_MASK 0xff 297#define MVPP2_TXP_SCHED_DISQ_OFFSET 8 298#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 299#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 300#define MVPP2_TXP_SCHED_MTU_REG 0x801c 301#define MVPP2_TXP_MTU_MAX 0x7FFFF 302#define MVPP2_TXP_SCHED_REFILL_REG 0x8020 303#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff 304#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 305#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) 306#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 307#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff 308#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) 309#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff 310#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 311#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) 312#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) 313#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff 314#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) 315#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff 316 317/* TX general registers */ 318#define MVPP2_TX_SNOOP_REG 0x8800 319#define MVPP2_TX_PORT_FLUSH_REG 0x8810 320#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) 321 322/* LMS registers */ 323#define MVPP2_SRC_ADDR_MIDDLE 0x24 324#define MVPP2_SRC_ADDR_HIGH 0x28 325#define MVPP2_PHY_AN_CFG0_REG 0x34 326#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) 327#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c 328#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 329 330/* Per-port registers */ 331#define MVPP2_GMAC_CTRL_0_REG 0x0 332#define MVPP2_GMAC_PORT_EN_MASK BIT(0) 333#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) 334#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 335#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc 336#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) 337#define MVPP2_GMAC_CTRL_1_REG 0x4 338#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) 339#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) 340#define MVPP2_GMAC_PCS_LB_EN_BIT 6 341#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) 342#define MVPP2_GMAC_SA_LOW_OFFS 7 343#define MVPP2_GMAC_CTRL_2_REG 0x8 344#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) 345#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) 346#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) 347#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4) 348#define MVPP2_GMAC_DISABLE_PADDING BIT(5) 349#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) 350#define MVPP2_GMAC_AUTONEG_CONFIG 0xc 351#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) 352#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) 353#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2) 354#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3) 355#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) 356#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) 357#define MVPP2_GMAC_AN_SPEED_EN BIT(7) 358#define MVPP2_GMAC_FC_ADV_EN BIT(9) 359#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11) 360#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) 361#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) 362#define MVPP2_GMAC_STATUS0 0x10 363#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0) 364#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c 365#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 366#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 367#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ 368 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) 369#define MVPP22_GMAC_INT_STAT 0x20 370#define MVPP22_GMAC_INT_STAT_LINK BIT(1) 371#define MVPP22_GMAC_INT_MASK 0x24 372#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1) 373#define MVPP22_GMAC_CTRL_4_REG 0x90 374#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) 375#define MVPP22_CTRL4_DP_CLK_SEL BIT(5) 376#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6) 377#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) 378#define MVPP22_GMAC_INT_SUM_MASK 0xa4 379#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1) 380 381/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, 382 * relative to port->base. 383 */ 384#define MVPP22_XLG_CTRL0_REG 0x100 385#define MVPP22_XLG_CTRL0_PORT_EN BIT(0) 386#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) 387#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7) 388#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) 389#define MVPP22_XLG_CTRL1_REG 0x104 390#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0 391#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff 392#define MVPP22_XLG_STATUS 0x10c 393#define MVPP22_XLG_STATUS_LINK_UP BIT(0) 394#define MVPP22_XLG_INT_STAT 0x114 395#define MVPP22_XLG_INT_STAT_LINK BIT(1) 396#define MVPP22_XLG_INT_MASK 0x118 397#define MVPP22_XLG_INT_MASK_LINK BIT(1) 398#define MVPP22_XLG_CTRL3_REG 0x11c 399#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) 400#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) 401#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13) 402#define MVPP22_XLG_EXT_INT_MASK 0x15c 403#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1) 404#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2) 405#define MVPP22_XLG_CTRL4_REG 0x184 406#define MVPP22_XLG_CTRL4_FWD_FC BIT(5) 407#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6) 408#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12) 409 410/* SMI registers. PPv2.2 only, relative to priv->iface_base. */ 411#define MVPP22_SMI_MISC_CFG_REG 0x1204 412#define MVPP22_SMI_POLLING_EN BIT(10) 413 414#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00) 415 416#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 417 418/* Descriptor ring Macros */ 419#define MVPP2_QUEUE_NEXT_DESC(q, index) \ 420 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 421 422/* XPCS registers. PPv2.2 only */ 423#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000) 424#define MVPP22_MPCS_CTRL 0x14 425#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10) 426#define MVPP22_MPCS_CLK_RESET 0x14c 427#define MAC_CLK_RESET_SD_TX BIT(0) 428#define MAC_CLK_RESET_SD_RX BIT(1) 429#define MAC_CLK_RESET_MAC BIT(2) 430#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4) 431#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11) 432 433/* XPCS registers. PPv2.2 only */ 434#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000) 435#define MVPP22_XPCS_CFG0 0x0 436#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3) 437#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5) 438 439/* System controller registers. Accessed through a regmap. */ 440#define GENCONF_SOFT_RESET1 0x1108 441#define GENCONF_SOFT_RESET1_GOP BIT(6) 442#define GENCONF_PORT_CTRL0 0x1110 443#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1) 444#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29) 445#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31) 446#define GENCONF_PORT_CTRL1 0x1114 447#define GENCONF_PORT_CTRL1_EN(p) BIT(p) 448#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28) 449#define GENCONF_CTRL0 0x1120 450#define GENCONF_CTRL0_PORT0_RGMII BIT(0) 451#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1) 452#define GENCONF_CTRL0_PORT1_RGMII BIT(2) 453 454/* Various constants */ 455 456/* Coalescing */ 457#define MVPP2_TXDONE_COAL_PKTS_THRESH 15 458#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL 459#define MVPP2_TXDONE_COAL_USEC 1000 460#define MVPP2_RX_COAL_PKTS 32 461#define MVPP2_RX_COAL_USEC 100 462 463/* The two bytes Marvell header. Either contains a special value used 464 * by Marvell switches when a specific hardware mode is enabled (not 465 * supported by this driver) or is filled automatically by zeroes on 466 * the RX side. Those two bytes being at the front of the Ethernet 467 * header, they allow to have the IP header aligned on a 4 bytes 468 * boundary automatically: the hardware skips those two bytes on its 469 * own. 470 */ 471#define MVPP2_MH_SIZE 2 472#define MVPP2_ETH_TYPE_LEN 2 473#define MVPP2_PPPOE_HDR_SIZE 8 474#define MVPP2_VLAN_TAG_LEN 4 475 476/* Lbtd 802.3 type */ 477#define MVPP2_IP_LBDT_TYPE 0xfffa 478 479#define MVPP2_TX_CSUM_MAX_SIZE 9800 480 481/* Timeout constants */ 482#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 483#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 484 485#define MVPP2_TX_MTU_MAX 0x7ffff 486 487/* Maximum number of T-CONTs of PON port */ 488#define MVPP2_MAX_TCONT 16 489 490/* Maximum number of supported ports */ 491#define MVPP2_MAX_PORTS 4 492 493/* Maximum number of TXQs used by single port */ 494#define MVPP2_MAX_TXQ 8 495 496/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO 497 * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data), 498 * multiply this value by two to count the maximum number of skb descs needed. 499 */ 500#define MVPP2_MAX_TSO_SEGS 300 501#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 502 503/* Dfault number of RXQs in use */ 504#define MVPP2_DEFAULT_RXQ 4 505 506/* Max number of Rx descriptors */ 507#define MVPP2_MAX_RXD 128 508 509/* Max number of Tx descriptors */ 510#define MVPP2_MAX_TXD 1024 511 512/* Amount of Tx descriptors that can be reserved at once by CPU */ 513#define MVPP2_CPU_DESC_CHUNK 64 514 515/* Max number of Tx descriptors in each aggregated queue */ 516#define MVPP2_AGGR_TXQ_SIZE 256 517 518/* Descriptor aligned size */ 519#define MVPP2_DESC_ALIGNED_SIZE 32 520 521/* Descriptor alignment mask */ 522#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) 523 524/* RX FIFO constants */ 525#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000 526#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000 527#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000 528#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200 529#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80 530#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40 531#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 532 533/* TX FIFO constants */ 534#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa 535#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3 536 537/* RX buffer constants */ 538#define MVPP2_SKB_SHINFO_SIZE \ 539 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 540 541#define MVPP2_RX_PKT_SIZE(mtu) \ 542 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ 543 ETH_HLEN + ETH_FCS_LEN, cache_line_size()) 544 545#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 546#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) 547#define MVPP2_RX_MAX_PKT_SIZE(total_size) \ 548 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) 549 550#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) 551 552/* IPv6 max L3 address size */ 553#define MVPP2_MAX_L3_ADDR_SIZE 16 554 555/* Port flags */ 556#define MVPP2_F_LOOPBACK BIT(0) 557 558/* Marvell tag types */ 559enum mvpp2_tag_type { 560 MVPP2_TAG_TYPE_NONE = 0, 561 MVPP2_TAG_TYPE_MH = 1, 562 MVPP2_TAG_TYPE_DSA = 2, 563 MVPP2_TAG_TYPE_EDSA = 3, 564 MVPP2_TAG_TYPE_VLAN = 4, 565 MVPP2_TAG_TYPE_LAST = 5 566}; 567 568/* Parser constants */ 569#define MVPP2_PRS_TCAM_SRAM_SIZE 256 570#define MVPP2_PRS_TCAM_WORDS 6 571#define MVPP2_PRS_SRAM_WORDS 4 572#define MVPP2_PRS_FLOW_ID_SIZE 64 573#define MVPP2_PRS_FLOW_ID_MASK 0x3f 574#define MVPP2_PRS_TCAM_ENTRY_INVALID 1 575#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) 576#define MVPP2_PRS_IPV4_HEAD 0x40 577#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 578#define MVPP2_PRS_IPV4_MC 0xe0 579#define MVPP2_PRS_IPV4_MC_MASK 0xf0 580#define MVPP2_PRS_IPV4_BC_MASK 0xff 581#define MVPP2_PRS_IPV4_IHL 0x5 582#define MVPP2_PRS_IPV4_IHL_MASK 0xf 583#define MVPP2_PRS_IPV6_MC 0xff 584#define MVPP2_PRS_IPV6_MC_MASK 0xff 585#define MVPP2_PRS_IPV6_HOP_MASK 0xff 586#define MVPP2_PRS_TCAM_PROTO_MASK 0xff 587#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f 588#define MVPP2_PRS_DBL_VLANS_MAX 100 589 590/* Tcam structure: 591 * - lookup ID - 4 bits 592 * - port ID - 1 byte 593 * - additional information - 1 byte 594 * - header data - 8 bytes 595 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). 596 */ 597#define MVPP2_PRS_AI_BITS 8 598#define MVPP2_PRS_PORT_MASK 0xff 599#define MVPP2_PRS_LU_MASK 0xf 600#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ 601 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) 602#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ 603 (((offs) * 2) - ((offs) % 2) + 2) 604#define MVPP2_PRS_TCAM_AI_BYTE 16 605#define MVPP2_PRS_TCAM_PORT_BYTE 17 606#define MVPP2_PRS_TCAM_LU_BYTE 20 607#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) 608#define MVPP2_PRS_TCAM_INV_WORD 5 609/* Tcam entries ID */ 610#define MVPP2_PE_DROP_ALL 0 611#define MVPP2_PE_FIRST_FREE_TID 1 612#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) 613#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) 614#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) 615#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) 616#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) 617#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) 618#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) 619#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) 620#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) 621#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) 622#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) 623#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) 624#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) 625#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) 626#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) 627#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) 628#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) 629#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) 630#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) 631#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) 632#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) 633#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) 634#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) 635#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) 636#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) 637 638/* Sram structure 639 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). 640 */ 641#define MVPP2_PRS_SRAM_RI_OFFS 0 642#define MVPP2_PRS_SRAM_RI_WORD 0 643#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 644#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 645#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 646#define MVPP2_PRS_SRAM_SHIFT_OFFS 64 647#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 648#define MVPP2_PRS_SRAM_UDF_OFFS 73 649#define MVPP2_PRS_SRAM_UDF_BITS 8 650#define MVPP2_PRS_SRAM_UDF_MASK 0xff 651#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 652#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 653#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 654#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 655#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 656#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 657#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 658#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 659#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 660#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 661#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 662#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 663#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 664#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 665#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 666#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 667#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 668#define MVPP2_PRS_SRAM_AI_OFFS 90 669#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 670#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 671#define MVPP2_PRS_SRAM_AI_MASK 0xff 672#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 673#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf 674#define MVPP2_PRS_SRAM_LU_DONE_BIT 110 675#define MVPP2_PRS_SRAM_LU_GEN_BIT 111 676 677/* Sram result info bits assignment */ 678#define MVPP2_PRS_RI_MAC_ME_MASK 0x1 679#define MVPP2_PRS_RI_DSA_MASK 0x2 680#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) 681#define MVPP2_PRS_RI_VLAN_NONE 0x0 682#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) 683#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) 684#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) 685#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 686#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) 687#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) 688#define MVPP2_PRS_RI_L2_UCAST 0x0 689#define MVPP2_PRS_RI_L2_MCAST BIT(9) 690#define MVPP2_PRS_RI_L2_BCAST BIT(10) 691#define MVPP2_PRS_RI_PPPOE_MASK 0x800 692#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) 693#define MVPP2_PRS_RI_L3_UN 0x0 694#define MVPP2_PRS_RI_L3_IP4 BIT(12) 695#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) 696#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) 697#define MVPP2_PRS_RI_L3_IP6 BIT(14) 698#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) 699#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) 700#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) 701#define MVPP2_PRS_RI_L3_UCAST 0x0 702#define MVPP2_PRS_RI_L3_MCAST BIT(15) 703#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) 704#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 705#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17) 706#define MVPP2_PRS_RI_UDF3_MASK 0x300000 707#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) 708#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 709#define MVPP2_PRS_RI_L4_TCP BIT(22) 710#define MVPP2_PRS_RI_L4_UDP BIT(23) 711#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) 712#define MVPP2_PRS_RI_UDF7_MASK 0x60000000 713#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) 714#define MVPP2_PRS_RI_DROP_MASK 0x80000000 715 716/* Sram additional info bits assignment */ 717#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) 718#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) 719#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) 720#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) 721#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) 722#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) 723#define MVPP2_PRS_SINGLE_VLAN_AI 0 724#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) 725 726/* DSA/EDSA type */ 727#define MVPP2_PRS_TAGGED true 728#define MVPP2_PRS_UNTAGGED false 729#define MVPP2_PRS_EDSA true 730#define MVPP2_PRS_DSA false 731 732/* MAC entries, shadow udf */ 733enum mvpp2_prs_udf { 734 MVPP2_PRS_UDF_MAC_DEF, 735 MVPP2_PRS_UDF_MAC_RANGE, 736 MVPP2_PRS_UDF_L2_DEF, 737 MVPP2_PRS_UDF_L2_DEF_COPY, 738 MVPP2_PRS_UDF_L2_USER, 739}; 740 741/* Lookup ID */ 742enum mvpp2_prs_lookup { 743 MVPP2_PRS_LU_MH, 744 MVPP2_PRS_LU_MAC, 745 MVPP2_PRS_LU_DSA, 746 MVPP2_PRS_LU_VLAN, 747 MVPP2_PRS_LU_L2, 748 MVPP2_PRS_LU_PPPOE, 749 MVPP2_PRS_LU_IP4, 750 MVPP2_PRS_LU_IP6, 751 MVPP2_PRS_LU_FLOWS, 752 MVPP2_PRS_LU_LAST, 753}; 754 755/* L3 cast enum */ 756enum mvpp2_prs_l3_cast { 757 MVPP2_PRS_L3_UNI_CAST, 758 MVPP2_PRS_L3_MULTI_CAST, 759 MVPP2_PRS_L3_BROAD_CAST 760}; 761 762/* Classifier constants */ 763#define MVPP2_CLS_FLOWS_TBL_SIZE 512 764#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 765#define MVPP2_CLS_LKP_TBL_SIZE 64 766#define MVPP2_CLS_RX_QUEUES 256 767 768/* RSS constants */ 769#define MVPP22_RSS_TABLE_ENTRIES 32 770 771/* BM constants */ 772#define MVPP2_BM_POOLS_NUM 8 773#define MVPP2_BM_LONG_BUF_NUM 1024 774#define MVPP2_BM_SHORT_BUF_NUM 2048 775#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) 776#define MVPP2_BM_POOL_PTR_ALIGN 128 777#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port) 778#define MVPP2_BM_SWF_SHORT_POOL 3 779 780/* BM cookie (32 bits) definition */ 781#define MVPP2_BM_COOKIE_POOL_OFFS 8 782#define MVPP2_BM_COOKIE_CPU_OFFS 24 783 784/* BM short pool packet size 785 * These value assure that for SWF the total number 786 * of bytes allocated for each buffer will be 512 787 */ 788#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) 789 790#define MVPP21_ADDR_SPACE_SZ 0 791#define MVPP22_ADDR_SPACE_SZ SZ_64K 792 793#define MVPP2_MAX_THREADS 8 794#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS 795 796enum mvpp2_bm_type { 797 MVPP2_BM_FREE, 798 MVPP2_BM_SWF_LONG, 799 MVPP2_BM_SWF_SHORT 800}; 801 802/* GMAC MIB Counters register definitions */ 803#define MVPP21_MIB_COUNTERS_OFFSET 0x1000 804#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400 805#define MVPP22_MIB_COUNTERS_OFFSET 0x0 806#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100 807 808#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0 809#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8 810#define MVPP2_MIB_CRC_ERRORS_SENT 0xc 811#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10 812#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18 813#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c 814#define MVPP2_MIB_FRAMES_64_OCTETS 0x20 815#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24 816#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28 817#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c 818#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30 819#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 820#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38 821#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40 822#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48 823#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c 824#define MVPP2_MIB_FC_SENT 0x54 825#define MVPP2_MIB_FC_RCVD 0x58 826#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c 827#define MVPP2_MIB_UNDERSIZE_RCVD 0x60 828#define MVPP2_MIB_FRAGMENTS_RCVD 0x64 829#define MVPP2_MIB_OVERSIZE_RCVD 0x68 830#define MVPP2_MIB_JABBER_RCVD 0x6c 831#define MVPP2_MIB_MAC_RCV_ERROR 0x70 832#define MVPP2_MIB_BAD_CRC_EVENT 0x74 833#define MVPP2_MIB_COLLISION 0x78 834#define MVPP2_MIB_LATE_COLLISION 0x7c 835 836#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) 837 838/* Definitions */ 839 840/* Shared Packet Processor resources */ 841struct mvpp2 { 842 /* Shared registers' base addresses */ 843 void __iomem *lms_base; 844 void __iomem *iface_base; 845 846 /* On PPv2.2, each "software thread" can access the base 847 * register through a separate address space, each 64 KB apart 848 * from each other. Typically, such address spaces will be 849 * used per CPU. 850 */ 851 void __iomem *swth_base[MVPP2_MAX_THREADS]; 852 853 /* On PPv2.2, some port control registers are located into the system 854 * controller space. These registers are accessible through a regmap. 855 */ 856 struct regmap *sysctrl_base; 857 858 /* Common clocks */ 859 struct clk *pp_clk; 860 struct clk *gop_clk; 861 struct clk *mg_clk; 862 struct clk *axi_clk; 863 864 /* List of pointers to port structures */ 865 int port_count; 866 struct mvpp2_port **port_list; 867 868 /* Aggregated TXQs */ 869 struct mvpp2_tx_queue *aggr_txqs; 870 871 /* BM pools */ 872 struct mvpp2_bm_pool *bm_pools; 873 874 /* PRS shadow table */ 875 struct mvpp2_prs_shadow *prs_shadow; 876 /* PRS auxiliary table for double vlan entries control */ 877 bool *prs_double_vlans; 878 879 /* Tclk value */ 880 u32 tclk; 881 882 /* HW version */ 883 enum { MVPP21, MVPP22 } hw_version; 884 885 /* Maximum number of RXQs per port */ 886 unsigned int max_port_rxqs; 887 888 /* Workqueue to gather hardware statistics */ 889 char queue_name[30]; 890 struct workqueue_struct *stats_queue; 891}; 892 893struct mvpp2_pcpu_stats { 894 struct u64_stats_sync syncp; 895 u64 rx_packets; 896 u64 rx_bytes; 897 u64 tx_packets; 898 u64 tx_bytes; 899}; 900 901/* Per-CPU port control */ 902struct mvpp2_port_pcpu { 903 struct hrtimer tx_done_timer; 904 bool timer_scheduled; 905 /* Tasklet for egress finalization */ 906 struct tasklet_struct tx_done_tasklet; 907}; 908 909struct mvpp2_queue_vector { 910 int irq; 911 struct napi_struct napi; 912 enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type; 913 int sw_thread_id; 914 u16 sw_thread_mask; 915 int first_rxq; 916 int nrxqs; 917 u32 pending_cause_rx; 918 struct mvpp2_port *port; 919}; 920 921struct mvpp2_port { 922 u8 id; 923 924 /* Index of the port from the "group of ports" complex point 925 * of view 926 */ 927 int gop_id; 928 929 int link_irq; 930 931 struct mvpp2 *priv; 932 933 /* Per-port registers' base address */ 934 void __iomem *base; 935 void __iomem *stats_base; 936 937 struct mvpp2_rx_queue **rxqs; 938 unsigned int nrxqs; 939 struct mvpp2_tx_queue **txqs; 940 unsigned int ntxqs; 941 struct net_device *dev; 942 943 int pkt_size; 944 945 /* Per-CPU port control */ 946 struct mvpp2_port_pcpu __percpu *pcpu; 947 948 /* Flags */ 949 unsigned long flags; 950 951 u16 tx_ring_size; 952 u16 rx_ring_size; 953 struct mvpp2_pcpu_stats __percpu *stats; 954 u64 *ethtool_stats; 955 956 /* Per-port work and its lock to gather hardware statistics */ 957 struct mutex gather_stats_lock; 958 struct delayed_work stats_work; 959 960 phy_interface_t phy_interface; 961 struct device_node *phy_node; 962 struct phy *comphy; 963 unsigned int link; 964 unsigned int duplex; 965 unsigned int speed; 966 967 struct mvpp2_bm_pool *pool_long; 968 struct mvpp2_bm_pool *pool_short; 969 970 /* Index of first port's physical RXQ */ 971 u8 first_rxq; 972 973 struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS]; 974 unsigned int nqvecs; 975 bool has_tx_irqs; 976 977 u32 tx_time_coal; 978}; 979 980/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the 981 * layout of the transmit and reception DMA descriptors, and their 982 * layout is therefore defined by the hardware design 983 */ 984 985#define MVPP2_TXD_L3_OFF_SHIFT 0 986#define MVPP2_TXD_IP_HLEN_SHIFT 8 987#define MVPP2_TXD_L4_CSUM_FRAG BIT(13) 988#define MVPP2_TXD_L4_CSUM_NOT BIT(14) 989#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) 990#define MVPP2_TXD_PADDING_DISABLE BIT(23) 991#define MVPP2_TXD_L4_UDP BIT(24) 992#define MVPP2_TXD_L3_IP6 BIT(26) 993#define MVPP2_TXD_L_DESC BIT(28) 994#define MVPP2_TXD_F_DESC BIT(29) 995 996#define MVPP2_RXD_ERR_SUMMARY BIT(15) 997#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) 998#define MVPP2_RXD_ERR_CRC 0x0 999#define MVPP2_RXD_ERR_OVERRUN BIT(13) 1000#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) 1001#define MVPP2_RXD_BM_POOL_ID_OFFS 16 1002#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) 1003#define MVPP2_RXD_HWF_SYNC BIT(21) 1004#define MVPP2_RXD_L4_CSUM_OK BIT(22) 1005#define MVPP2_RXD_IP4_HEADER_ERR BIT(24) 1006#define MVPP2_RXD_L4_TCP BIT(25) 1007#define MVPP2_RXD_L4_UDP BIT(26) 1008#define MVPP2_RXD_L3_IP4 BIT(28) 1009#define MVPP2_RXD_L3_IP6 BIT(30) 1010#define MVPP2_RXD_BUF_HDR BIT(31) 1011 1012/* HW TX descriptor for PPv2.1 */ 1013struct mvpp21_tx_desc { 1014 u32 command; /* Options used by HW for packet transmitting.*/ 1015 u8 packet_offset; /* the offset from the buffer beginning */ 1016 u8 phys_txq; /* destination queue ID */ 1017 u16 data_size; /* data size of transmitted packet in bytes */ 1018 u32 buf_dma_addr; /* physical addr of transmitted buffer */ 1019 u32 buf_cookie; /* cookie for access to TX buffer in tx path */ 1020 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ 1021 u32 reserved2; /* reserved (for future use) */ 1022}; 1023 1024/* HW RX descriptor for PPv2.1 */ 1025struct mvpp21_rx_desc { 1026 u32 status; /* info about received packet */ 1027 u16 reserved1; /* parser_info (for future use, PnC) */ 1028 u16 data_size; /* size of received packet in bytes */ 1029 u32 buf_dma_addr; /* physical address of the buffer */ 1030 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 1031 u16 reserved2; /* gem_port_id (for future use, PON) */ 1032 u16 reserved3; /* csum_l4 (for future use, PnC) */ 1033 u8 reserved4; /* bm_qset (for future use, BM) */ 1034 u8 reserved5; 1035 u16 reserved6; /* classify_info (for future use, PnC) */ 1036 u32 reserved7; /* flow_id (for future use, PnC) */ 1037 u32 reserved8; 1038}; 1039 1040/* HW TX descriptor for PPv2.2 */ 1041struct mvpp22_tx_desc { 1042 u32 command; 1043 u8 packet_offset; 1044 u8 phys_txq; 1045 u16 data_size; 1046 u64 reserved1; 1047 u64 buf_dma_addr_ptp; 1048 u64 buf_cookie_misc; 1049}; 1050 1051/* HW RX descriptor for PPv2.2 */ 1052struct mvpp22_rx_desc { 1053 u32 status; 1054 u16 reserved1; 1055 u16 data_size; 1056 u32 reserved2; 1057 u32 reserved3; 1058 u64 buf_dma_addr_key_hash; 1059 u64 buf_cookie_misc; 1060}; 1061 1062/* Opaque type used by the driver to manipulate the HW TX and RX 1063 * descriptors 1064 */ 1065struct mvpp2_tx_desc { 1066 union { 1067 struct mvpp21_tx_desc pp21; 1068 struct mvpp22_tx_desc pp22; 1069 }; 1070}; 1071 1072struct mvpp2_rx_desc { 1073 union { 1074 struct mvpp21_rx_desc pp21; 1075 struct mvpp22_rx_desc pp22; 1076 }; 1077}; 1078 1079struct mvpp2_txq_pcpu_buf { 1080 /* Transmitted SKB */ 1081 struct sk_buff *skb; 1082 1083 /* Physical address of transmitted buffer */ 1084 dma_addr_t dma; 1085 1086 /* Size transmitted */ 1087 size_t size; 1088}; 1089 1090/* Per-CPU Tx queue control */ 1091struct mvpp2_txq_pcpu { 1092 int cpu; 1093 1094 /* Number of Tx DMA descriptors in the descriptor ring */ 1095 int size; 1096 1097 /* Number of currently used Tx DMA descriptor in the 1098 * descriptor ring 1099 */ 1100 int count; 1101 1102 int wake_threshold; 1103 int stop_threshold; 1104 1105 /* Number of Tx DMA descriptors reserved for each CPU */ 1106 int reserved_num; 1107 1108 /* Infos about transmitted buffers */ 1109 struct mvpp2_txq_pcpu_buf *buffs; 1110 1111 /* Index of last TX DMA descriptor that was inserted */ 1112 int txq_put_index; 1113 1114 /* Index of the TX DMA descriptor to be cleaned up */ 1115 int txq_get_index; 1116 1117 /* DMA buffer for TSO headers */ 1118 char *tso_headers; 1119 dma_addr_t tso_headers_dma; 1120}; 1121 1122struct mvpp2_tx_queue { 1123 /* Physical number of this Tx queue */ 1124 u8 id; 1125 1126 /* Logical number of this Tx queue */ 1127 u8 log_id; 1128 1129 /* Number of Tx DMA descriptors in the descriptor ring */ 1130 int size; 1131 1132 /* Number of currently used Tx DMA descriptor in the descriptor ring */ 1133 int count; 1134 1135 /* Per-CPU control of physical Tx queues */ 1136 struct mvpp2_txq_pcpu __percpu *pcpu; 1137 1138 u32 done_pkts_coal; 1139 1140 /* Virtual address of thex Tx DMA descriptors array */ 1141 struct mvpp2_tx_desc *descs; 1142 1143 /* DMA address of the Tx DMA descriptors array */ 1144 dma_addr_t descs_dma; 1145 1146 /* Index of the last Tx DMA descriptor */ 1147 int last_desc; 1148 1149 /* Index of the next Tx DMA descriptor to process */ 1150 int next_desc_to_proc; 1151}; 1152 1153struct mvpp2_rx_queue { 1154 /* RX queue number, in the range 0-31 for physical RXQs */ 1155 u8 id; 1156 1157 /* Num of rx descriptors in the rx descriptor ring */ 1158 int size; 1159 1160 u32 pkts_coal; 1161 u32 time_coal; 1162 1163 /* Virtual address of the RX DMA descriptors array */ 1164 struct mvpp2_rx_desc *descs; 1165 1166 /* DMA address of the RX DMA descriptors array */ 1167 dma_addr_t descs_dma; 1168 1169 /* Index of the last RX DMA descriptor */ 1170 int last_desc; 1171 1172 /* Index of the next RX DMA descriptor to process */ 1173 int next_desc_to_proc; 1174 1175 /* ID of port to which physical RXQ is mapped */ 1176 int port; 1177 1178 /* Port's logic RXQ number to which physical RXQ is mapped */ 1179 int logic_rxq; 1180}; 1181 1182union mvpp2_prs_tcam_entry { 1183 u32 word[MVPP2_PRS_TCAM_WORDS]; 1184 u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; 1185}; 1186 1187union mvpp2_prs_sram_entry { 1188 u32 word[MVPP2_PRS_SRAM_WORDS]; 1189 u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; 1190}; 1191 1192struct mvpp2_prs_entry { 1193 u32 index; 1194 union mvpp2_prs_tcam_entry tcam; 1195 union mvpp2_prs_sram_entry sram; 1196}; 1197 1198struct mvpp2_prs_shadow { 1199 bool valid; 1200 bool finish; 1201 1202 /* Lookup ID */ 1203 int lu; 1204 1205 /* User defined offset */ 1206 int udf; 1207 1208 /* Result info */ 1209 u32 ri; 1210 u32 ri_mask; 1211}; 1212 1213struct mvpp2_cls_flow_entry { 1214 u32 index; 1215 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; 1216}; 1217 1218struct mvpp2_cls_lookup_entry { 1219 u32 lkpid; 1220 u32 way; 1221 u32 data; 1222}; 1223 1224struct mvpp2_bm_pool { 1225 /* Pool number in the range 0-7 */ 1226 int id; 1227 enum mvpp2_bm_type type; 1228 1229 /* Buffer Pointers Pool External (BPPE) size */ 1230 int size; 1231 /* BPPE size in bytes */ 1232 int size_bytes; 1233 /* Number of buffers for this pool */ 1234 int buf_num; 1235 /* Pool buffer size */ 1236 int buf_size; 1237 /* Packet size */ 1238 int pkt_size; 1239 int frag_size; 1240 1241 /* BPPE virtual base address */ 1242 u32 *virt_addr; 1243 /* BPPE DMA base address */ 1244 dma_addr_t dma_addr; 1245 1246 /* Ports using BM pool */ 1247 u32 port_map; 1248}; 1249 1250#define IS_TSO_HEADER(txq_pcpu, addr) \ 1251 ((addr) >= (txq_pcpu)->tso_headers_dma && \ 1252 (addr) < (txq_pcpu)->tso_headers_dma + \ 1253 (txq_pcpu)->size * TSO_HEADER_SIZE) 1254 1255/* Queue modes */ 1256#define MVPP2_QDIST_SINGLE_MODE 0 1257#define MVPP2_QDIST_MULTI_MODE 1 1258 1259static int queue_mode = MVPP2_QDIST_SINGLE_MODE; 1260 1261module_param(queue_mode, int, 0444); 1262MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); 1263 1264#define MVPP2_DRIVER_NAME "mvpp2" 1265#define MVPP2_DRIVER_VERSION "1.0" 1266 1267/* Utility/helper methods */ 1268 1269static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 1270{ 1271 writel(data, priv->swth_base[0] + offset); 1272} 1273 1274static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 1275{ 1276 return readl(priv->swth_base[0] + offset); 1277} 1278 1279/* These accessors should be used to access: 1280 * 1281 * - per-CPU registers, where each CPU has its own copy of the 1282 * register. 1283 * 1284 * MVPP2_BM_VIRT_ALLOC_REG 1285 * MVPP2_BM_ADDR_HIGH_ALLOC 1286 * MVPP22_BM_ADDR_HIGH_RLS_REG 1287 * MVPP2_BM_VIRT_RLS_REG 1288 * MVPP2_ISR_RX_TX_CAUSE_REG 1289 * MVPP2_ISR_RX_TX_MASK_REG 1290 * MVPP2_TXQ_NUM_REG 1291 * MVPP2_AGGR_TXQ_UPDATE_REG 1292 * MVPP2_TXQ_RSVD_REQ_REG 1293 * MVPP2_TXQ_RSVD_RSLT_REG 1294 * MVPP2_TXQ_SENT_REG 1295 * MVPP2_RXQ_NUM_REG 1296 * 1297 * - global registers that must be accessed through a specific CPU 1298 * window, because they are related to an access to a per-CPU 1299 * register 1300 * 1301 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) 1302 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) 1303 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) 1304 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) 1305 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) 1306 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) 1307 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) 1308 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) 1309 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) 1310 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) 1311 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) 1312 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) 1313 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) 1314 */ 1315static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, 1316 u32 offset, u32 data) 1317{ 1318 writel(data, priv->swth_base[cpu] + offset); 1319} 1320 1321static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, 1322 u32 offset) 1323{ 1324 return readl(priv->swth_base[cpu] + offset); 1325} 1326 1327static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, 1328 struct mvpp2_tx_desc *tx_desc) 1329{ 1330 if (port->priv->hw_version == MVPP21) 1331 return tx_desc->pp21.buf_dma_addr; 1332 else 1333 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0); 1334} 1335 1336static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 1337 struct mvpp2_tx_desc *tx_desc, 1338 dma_addr_t dma_addr) 1339{ 1340 dma_addr_t addr, offset; 1341 1342 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; 1343 offset = dma_addr & MVPP2_TX_DESC_ALIGN; 1344 1345 if (port->priv->hw_version == MVPP21) { 1346 tx_desc->pp21.buf_dma_addr = addr; 1347 tx_desc->pp21.packet_offset = offset; 1348 } else { 1349 u64 val = (u64)addr; 1350 1351 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); 1352 tx_desc->pp22.buf_dma_addr_ptp |= val; 1353 tx_desc->pp22.packet_offset = offset; 1354 } 1355} 1356 1357static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, 1358 struct mvpp2_tx_desc *tx_desc) 1359{ 1360 if (port->priv->hw_version == MVPP21) 1361 return tx_desc->pp21.data_size; 1362 else 1363 return tx_desc->pp22.data_size; 1364} 1365 1366static void mvpp2_txdesc_size_set(struct mvpp2_port *port, 1367 struct mvpp2_tx_desc *tx_desc, 1368 size_t size) 1369{ 1370 if (port->priv->hw_version == MVPP21) 1371 tx_desc->pp21.data_size = size; 1372 else 1373 tx_desc->pp22.data_size = size; 1374} 1375 1376static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, 1377 struct mvpp2_tx_desc *tx_desc, 1378 unsigned int txq) 1379{ 1380 if (port->priv->hw_version == MVPP21) 1381 tx_desc->pp21.phys_txq = txq; 1382 else 1383 tx_desc->pp22.phys_txq = txq; 1384} 1385 1386static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, 1387 struct mvpp2_tx_desc *tx_desc, 1388 unsigned int command) 1389{ 1390 if (port->priv->hw_version == MVPP21) 1391 tx_desc->pp21.command = command; 1392 else 1393 tx_desc->pp22.command = command; 1394} 1395 1396static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, 1397 struct mvpp2_tx_desc *tx_desc) 1398{ 1399 if (port->priv->hw_version == MVPP21) 1400 return tx_desc->pp21.packet_offset; 1401 else 1402 return tx_desc->pp22.packet_offset; 1403} 1404 1405static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, 1406 struct mvpp2_rx_desc *rx_desc) 1407{ 1408 if (port->priv->hw_version == MVPP21) 1409 return rx_desc->pp21.buf_dma_addr; 1410 else 1411 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); 1412} 1413 1414static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 1415 struct mvpp2_rx_desc *rx_desc) 1416{ 1417 if (port->priv->hw_version == MVPP21) 1418 return rx_desc->pp21.buf_cookie; 1419 else 1420 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); 1421} 1422 1423static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 1424 struct mvpp2_rx_desc *rx_desc) 1425{ 1426 if (port->priv->hw_version == MVPP21) 1427 return rx_desc->pp21.data_size; 1428 else 1429 return rx_desc->pp22.data_size; 1430} 1431 1432static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, 1433 struct mvpp2_rx_desc *rx_desc) 1434{ 1435 if (port->priv->hw_version == MVPP21) 1436 return rx_desc->pp21.status; 1437 else 1438 return rx_desc->pp22.status; 1439} 1440 1441static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 1442{ 1443 txq_pcpu->txq_get_index++; 1444 if (txq_pcpu->txq_get_index == txq_pcpu->size) 1445 txq_pcpu->txq_get_index = 0; 1446} 1447 1448static void mvpp2_txq_inc_put(struct mvpp2_port *port, 1449 struct mvpp2_txq_pcpu *txq_pcpu, 1450 struct sk_buff *skb, 1451 struct mvpp2_tx_desc *tx_desc) 1452{ 1453 struct mvpp2_txq_pcpu_buf *tx_buf = 1454 txq_pcpu->buffs + txq_pcpu->txq_put_index; 1455 tx_buf->skb = skb; 1456 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); 1457 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + 1458 mvpp2_txdesc_offset_get(port, tx_desc); 1459 txq_pcpu->txq_put_index++; 1460 if (txq_pcpu->txq_put_index == txq_pcpu->size) 1461 txq_pcpu->txq_put_index = 0; 1462} 1463 1464/* Get number of physical egress port */ 1465static inline int mvpp2_egress_port(struct mvpp2_port *port) 1466{ 1467 return MVPP2_MAX_TCONT + port->id; 1468} 1469 1470/* Get number of physical TXQ */ 1471static inline int mvpp2_txq_phys(int port, int txq) 1472{ 1473 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; 1474} 1475 1476/* Parser configuration routines */ 1477 1478/* Update parser tcam and sram hw entries */ 1479static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1480{ 1481 int i; 1482 1483 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1484 return -EINVAL; 1485 1486 /* Clear entry invalidation bit */ 1487 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; 1488 1489 /* Write tcam index - indirect access */ 1490 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1491 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1492 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); 1493 1494 /* Write sram index - indirect access */ 1495 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1496 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1497 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); 1498 1499 return 0; 1500} 1501 1502/* Read tcam entry from hw */ 1503static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1504{ 1505 int i; 1506 1507 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1508 return -EINVAL; 1509 1510 /* Write tcam index - indirect access */ 1511 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1512 1513 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, 1514 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); 1515 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) 1516 return MVPP2_PRS_TCAM_ENTRY_INVALID; 1517 1518 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1519 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); 1520 1521 /* Write sram index - indirect access */ 1522 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1523 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1524 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); 1525 1526 return 0; 1527} 1528 1529/* Invalidate tcam hw entry */ 1530static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) 1531{ 1532 /* Write index - indirect access */ 1533 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 1534 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), 1535 MVPP2_PRS_TCAM_INV_MASK); 1536} 1537 1538/* Enable shadow table entry and set its lookup ID */ 1539static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) 1540{ 1541 priv->prs_shadow[index].valid = true; 1542 priv->prs_shadow[index].lu = lu; 1543} 1544 1545/* Update ri fields in shadow table entry */ 1546static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, 1547 unsigned int ri, unsigned int ri_mask) 1548{ 1549 priv->prs_shadow[index].ri_mask = ri_mask; 1550 priv->prs_shadow[index].ri = ri; 1551} 1552 1553/* Update lookup field in tcam sw entry */ 1554static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) 1555{ 1556 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); 1557 1558 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; 1559 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; 1560} 1561 1562/* Update mask for single port in tcam sw entry */ 1563static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, 1564 unsigned int port, bool add) 1565{ 1566 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1567 1568 if (add) 1569 pe->tcam.byte[enable_off] &= ~(1 << port); 1570 else 1571 pe->tcam.byte[enable_off] |= 1 << port; 1572} 1573 1574/* Update port map in tcam sw entry */ 1575static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, 1576 unsigned int ports) 1577{ 1578 unsigned char port_mask = MVPP2_PRS_PORT_MASK; 1579 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1580 1581 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; 1582 pe->tcam.byte[enable_off] &= ~port_mask; 1583 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; 1584} 1585 1586/* Obtain port map from tcam sw entry */ 1587static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) 1588{ 1589 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1590 1591 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; 1592} 1593 1594/* Set byte of data and its enable bits in tcam sw entry */ 1595static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, 1596 unsigned int offs, unsigned char byte, 1597 unsigned char enable) 1598{ 1599 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; 1600 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; 1601} 1602 1603/* Get byte of data and its enable bits from tcam sw entry */ 1604static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, 1605 unsigned int offs, unsigned char *byte, 1606 unsigned char *enable) 1607{ 1608 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; 1609 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; 1610} 1611 1612/* Compare tcam data bytes with a pattern */ 1613static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, 1614 u16 data) 1615{ 1616 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); 1617 u16 tcam_data; 1618 1619 tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off]; 1620 if (tcam_data != data) 1621 return false; 1622 return true; 1623} 1624 1625/* Update ai bits in tcam sw entry */ 1626static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, 1627 unsigned int bits, unsigned int enable) 1628{ 1629 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; 1630 1631 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { 1632 1633 if (!(enable & BIT(i))) 1634 continue; 1635 1636 if (bits & BIT(i)) 1637 pe->tcam.byte[ai_idx] |= 1 << i; 1638 else 1639 pe->tcam.byte[ai_idx] &= ~(1 << i); 1640 } 1641 1642 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; 1643} 1644 1645/* Get ai bits from tcam sw entry */ 1646static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) 1647{ 1648 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; 1649} 1650 1651/* Set ethertype in tcam sw entry */ 1652static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, 1653 unsigned short ethertype) 1654{ 1655 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); 1656 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); 1657} 1658 1659/* Set bits in sram sw entry */ 1660static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, 1661 int val) 1662{ 1663 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); 1664} 1665 1666/* Clear bits in sram sw entry */ 1667static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, 1668 int val) 1669{ 1670 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); 1671} 1672 1673/* Update ri bits in sram sw entry */ 1674static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, 1675 unsigned int bits, unsigned int mask) 1676{ 1677 unsigned int i; 1678 1679 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { 1680 int ri_off = MVPP2_PRS_SRAM_RI_OFFS; 1681 1682 if (!(mask & BIT(i))) 1683 continue; 1684 1685 if (bits & BIT(i)) 1686 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); 1687 else 1688 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); 1689 1690 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); 1691 } 1692} 1693 1694/* Obtain ri bits from sram sw entry */ 1695static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) 1696{ 1697 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; 1698} 1699 1700/* Update ai bits in sram sw entry */ 1701static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, 1702 unsigned int bits, unsigned int mask) 1703{ 1704 unsigned int i; 1705 int ai_off = MVPP2_PRS_SRAM_AI_OFFS; 1706 1707 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { 1708 1709 if (!(mask & BIT(i))) 1710 continue; 1711 1712 if (bits & BIT(i)) 1713 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); 1714 else 1715 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); 1716 1717 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); 1718 } 1719} 1720 1721/* Read ai bits from sram sw entry */ 1722static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) 1723{ 1724 u8 bits; 1725 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); 1726 int ai_en_off = ai_off + 1; 1727 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; 1728 1729 bits = (pe->sram.byte[ai_off] >> ai_shift) | 1730 (pe->sram.byte[ai_en_off] << (8 - ai_shift)); 1731 1732 return bits; 1733} 1734 1735/* In sram sw entry set lookup ID field of the tcam key to be used in the next 1736 * lookup interation 1737 */ 1738static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, 1739 unsigned int lu) 1740{ 1741 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; 1742 1743 mvpp2_prs_sram_bits_clear(pe, sram_next_off, 1744 MVPP2_PRS_SRAM_NEXT_LU_MASK); 1745 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); 1746} 1747 1748/* In the sram sw entry set sign and value of the next lookup offset 1749 * and the offset value generated to the classifier 1750 */ 1751static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, 1752 unsigned int op) 1753{ 1754 /* Set sign */ 1755 if (shift < 0) { 1756 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1757 shift = 0 - shift; 1758 } else { 1759 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1760 } 1761 1762 /* Set value */ 1763 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = 1764 (unsigned char)shift; 1765 1766 /* Reset and set operation */ 1767 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, 1768 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); 1769 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); 1770 1771 /* Set base offset as current */ 1772 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1773} 1774 1775/* In the sram sw entry set sign and value of the user defined offset 1776 * generated to the classifier 1777 */ 1778static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, 1779 unsigned int type, int offset, 1780 unsigned int op) 1781{ 1782 /* Set sign */ 1783 if (offset < 0) { 1784 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1785 offset = 0 - offset; 1786 } else { 1787 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1788 } 1789 1790 /* Set value */ 1791 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, 1792 MVPP2_PRS_SRAM_UDF_MASK); 1793 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); 1794 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1795 MVPP2_PRS_SRAM_UDF_BITS)] &= 1796 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1797 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1798 MVPP2_PRS_SRAM_UDF_BITS)] |= 1799 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1800 1801 /* Set offset type */ 1802 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, 1803 MVPP2_PRS_SRAM_UDF_TYPE_MASK); 1804 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); 1805 1806 /* Set offset operation */ 1807 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, 1808 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); 1809 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); 1810 1811 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1812 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= 1813 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> 1814 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1815 1816 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1817 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= 1818 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1819 1820 /* Set base offset as current */ 1821 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1822} 1823 1824/* Find parser flow entry */ 1825static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) 1826{ 1827 struct mvpp2_prs_entry *pe; 1828 int tid; 1829 1830 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 1831 if (!pe) 1832 return NULL; 1833 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 1834 1835 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ 1836 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { 1837 u8 bits; 1838 1839 if (!priv->prs_shadow[tid].valid || 1840 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) 1841 continue; 1842 1843 pe->index = tid; 1844 mvpp2_prs_hw_read(priv, pe); 1845 bits = mvpp2_prs_sram_ai_get(pe); 1846 1847 /* Sram store classification lookup ID in AI bits [5:0] */ 1848 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) 1849 return pe; 1850 } 1851 kfree(pe); 1852 1853 return NULL; 1854} 1855 1856/* Return first free tcam index, seeking from start to end */ 1857static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, 1858 unsigned char end) 1859{ 1860 int tid; 1861 1862 if (start > end) 1863 swap(start, end); 1864 1865 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) 1866 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; 1867 1868 for (tid = start; tid <= end; tid++) { 1869 if (!priv->prs_shadow[tid].valid) 1870 return tid; 1871 } 1872 1873 return -EINVAL; 1874} 1875 1876/* Enable/disable dropping all mac da's */ 1877static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) 1878{ 1879 struct mvpp2_prs_entry pe; 1880 1881 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { 1882 /* Entry exist - update port only */ 1883 pe.index = MVPP2_PE_DROP_ALL; 1884 mvpp2_prs_hw_read(priv, &pe); 1885 } else { 1886 /* Entry doesn't exist - create new */ 1887 memset(&pe, 0, sizeof(pe)); 1888 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1889 pe.index = MVPP2_PE_DROP_ALL; 1890 1891 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1892 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1893 MVPP2_PRS_RI_DROP_MASK); 1894 1895 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1896 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1897 1898 /* Update shadow table */ 1899 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1900 1901 /* Mask all ports */ 1902 mvpp2_prs_tcam_port_map_set(&pe, 0); 1903 } 1904 1905 /* Update port mask */ 1906 mvpp2_prs_tcam_port_set(&pe, port, add); 1907 1908 mvpp2_prs_hw_write(priv, &pe); 1909} 1910 1911/* Set port to promiscuous mode */ 1912static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) 1913{ 1914 struct mvpp2_prs_entry pe; 1915 1916 /* Promiscuous mode - Accept unknown packets */ 1917 1918 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { 1919 /* Entry exist - update port only */ 1920 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1921 mvpp2_prs_hw_read(priv, &pe); 1922 } else { 1923 /* Entry doesn't exist - create new */ 1924 memset(&pe, 0, sizeof(pe)); 1925 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1926 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1927 1928 /* Continue - set next lookup */ 1929 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1930 1931 /* Set result info bits */ 1932 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, 1933 MVPP2_PRS_RI_L2_CAST_MASK); 1934 1935 /* Shift to ethertype */ 1936 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1937 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1938 1939 /* Mask all ports */ 1940 mvpp2_prs_tcam_port_map_set(&pe, 0); 1941 1942 /* Update shadow table */ 1943 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1944 } 1945 1946 /* Update port mask */ 1947 mvpp2_prs_tcam_port_set(&pe, port, add); 1948 1949 mvpp2_prs_hw_write(priv, &pe); 1950} 1951 1952/* Accept multicast */ 1953static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, 1954 bool add) 1955{ 1956 struct mvpp2_prs_entry pe; 1957 unsigned char da_mc; 1958 1959 /* Ethernet multicast address first byte is 1960 * 0x01 for IPv4 and 0x33 for IPv6 1961 */ 1962 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; 1963 1964 if (priv->prs_shadow[index].valid) { 1965 /* Entry exist - update port only */ 1966 pe.index = index; 1967 mvpp2_prs_hw_read(priv, &pe); 1968 } else { 1969 /* Entry doesn't exist - create new */ 1970 memset(&pe, 0, sizeof(pe)); 1971 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1972 pe.index = index; 1973 1974 /* Continue - set next lookup */ 1975 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1976 1977 /* Set result info bits */ 1978 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, 1979 MVPP2_PRS_RI_L2_CAST_MASK); 1980 1981 /* Update tcam entry data first byte */ 1982 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); 1983 1984 /* Shift to ethertype */ 1985 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1986 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1987 1988 /* Mask all ports */ 1989 mvpp2_prs_tcam_port_map_set(&pe, 0); 1990 1991 /* Update shadow table */ 1992 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1993 } 1994 1995 /* Update port mask */ 1996 mvpp2_prs_tcam_port_set(&pe, port, add); 1997 1998 mvpp2_prs_hw_write(priv, &pe); 1999} 2000 2001/* Set entry for dsa packets */ 2002static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, 2003 bool tagged, bool extend) 2004{ 2005 struct mvpp2_prs_entry pe; 2006 int tid, shift; 2007 2008 if (extend) { 2009 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; 2010 shift = 8; 2011 } else { 2012 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; 2013 shift = 4; 2014 } 2015 2016 if (priv->prs_shadow[tid].valid) { 2017 /* Entry exist - update port only */ 2018 pe.index = tid; 2019 mvpp2_prs_hw_read(priv, &pe); 2020 } else { 2021 /* Entry doesn't exist - create new */ 2022 memset(&pe, 0, sizeof(pe)); 2023 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); 2024 pe.index = tid; 2025 2026 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/ 2027 mvpp2_prs_sram_shift_set(&pe, shift, 2028 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2029 2030 /* Update shadow table */ 2031 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); 2032 2033 if (tagged) { 2034 /* Set tagged bit in DSA tag */ 2035 mvpp2_prs_tcam_data_byte_set(&pe, 0, 2036 MVPP2_PRS_TCAM_DSA_TAGGED_BIT, 2037 MVPP2_PRS_TCAM_DSA_TAGGED_BIT); 2038 /* Clear all ai bits for next iteration */ 2039 mvpp2_prs_sram_ai_update(&pe, 0, 2040 MVPP2_PRS_SRAM_AI_MASK); 2041 /* If packet is tagged continue check vlans */ 2042 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); 2043 } else { 2044 /* Set result info bits to 'no vlans' */ 2045 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, 2046 MVPP2_PRS_RI_VLAN_MASK); 2047 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 2048 } 2049 2050 /* Mask all ports */ 2051 mvpp2_prs_tcam_port_map_set(&pe, 0); 2052 } 2053 2054 /* Update port mask */ 2055 mvpp2_prs_tcam_port_set(&pe, port, add); 2056 2057 mvpp2_prs_hw_write(priv, &pe); 2058} 2059 2060/* Set entry for dsa ethertype */ 2061static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, 2062 bool add, bool tagged, bool extend) 2063{ 2064 struct mvpp2_prs_entry pe; 2065 int tid, shift, port_mask; 2066 2067 if (extend) { 2068 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : 2069 MVPP2_PE_ETYPE_EDSA_UNTAGGED; 2070 port_mask = 0; 2071 shift = 8; 2072 } else { 2073 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : 2074 MVPP2_PE_ETYPE_DSA_UNTAGGED; 2075 port_mask = MVPP2_PRS_PORT_MASK; 2076 shift = 4; 2077 } 2078 2079 if (priv->prs_shadow[tid].valid) { 2080 /* Entry exist - update port only */ 2081 pe.index = tid; 2082 mvpp2_prs_hw_read(priv, &pe); 2083 } else { 2084 /* Entry doesn't exist - create new */ 2085 memset(&pe, 0, sizeof(pe)); 2086 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); 2087 pe.index = tid; 2088 2089 /* Set ethertype */ 2090 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA); 2091 mvpp2_prs_match_etype(&pe, 2, 0); 2092 2093 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, 2094 MVPP2_PRS_RI_DSA_MASK); 2095 /* Shift ethertype + 2 byte reserved + tag*/ 2096 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, 2097 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2098 2099 /* Update shadow table */ 2100 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); 2101 2102 if (tagged) { 2103 /* Set tagged bit in DSA tag */ 2104 mvpp2_prs_tcam_data_byte_set(&pe, 2105 MVPP2_ETH_TYPE_LEN + 2 + 3, 2106 MVPP2_PRS_TCAM_DSA_TAGGED_BIT, 2107 MVPP2_PRS_TCAM_DSA_TAGGED_BIT); 2108 /* Clear all ai bits for next iteration */ 2109 mvpp2_prs_sram_ai_update(&pe, 0, 2110 MVPP2_PRS_SRAM_AI_MASK); 2111 /* If packet is tagged continue check vlans */ 2112 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); 2113 } else { 2114 /* Set result info bits to 'no vlans' */ 2115 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, 2116 MVPP2_PRS_RI_VLAN_MASK); 2117 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 2118 } 2119 /* Mask/unmask all ports, depending on dsa type */ 2120 mvpp2_prs_tcam_port_map_set(&pe, port_mask); 2121 } 2122 2123 /* Update port mask */ 2124 mvpp2_prs_tcam_port_set(&pe, port, add); 2125 2126 mvpp2_prs_hw_write(priv, &pe); 2127} 2128 2129/* Search for existing single/triple vlan entry */ 2130static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv, 2131 unsigned short tpid, int ai) 2132{ 2133 struct mvpp2_prs_entry *pe; 2134 int tid; 2135 2136 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2137 if (!pe) 2138 return NULL; 2139 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); 2140 2141 /* Go through the all entries with MVPP2_PRS_LU_VLAN */ 2142 for (tid = MVPP2_PE_FIRST_FREE_TID; 2143 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 2144 unsigned int ri_bits, ai_bits; 2145 bool match; 2146 2147 if (!priv->prs_shadow[tid].valid || 2148 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) 2149 continue; 2150 2151 pe->index = tid; 2152 2153 mvpp2_prs_hw_read(priv, pe); 2154 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid)); 2155 if (!match) 2156 continue; 2157 2158 /* Get vlan type */ 2159 ri_bits = mvpp2_prs_sram_ri_get(pe); 2160 ri_bits &= MVPP2_PRS_RI_VLAN_MASK; 2161 2162 /* Get current ai value from tcam */ 2163 ai_bits = mvpp2_prs_tcam_ai_get(pe); 2164 /* Clear double vlan bit */ 2165 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; 2166 2167 if (ai != ai_bits) 2168 continue; 2169 2170 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || 2171 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) 2172 return pe; 2173 } 2174 kfree(pe); 2175 2176 return NULL; 2177} 2178 2179/* Add/update single/triple vlan entry */ 2180static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, 2181 unsigned int port_map) 2182{ 2183 struct mvpp2_prs_entry *pe; 2184 int tid_aux, tid; 2185 int ret = 0; 2186 2187 pe = mvpp2_prs_vlan_find(priv, tpid, ai); 2188 2189 if (!pe) { 2190 /* Create new tcam entry */ 2191 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, 2192 MVPP2_PE_FIRST_FREE_TID); 2193 if (tid < 0) 2194 return tid; 2195 2196 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2197 if (!pe) 2198 return -ENOMEM; 2199 2200 /* Get last double vlan tid */ 2201 for (tid_aux = MVPP2_PE_LAST_FREE_TID; 2202 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { 2203 unsigned int ri_bits; 2204 2205 if (!priv->prs_shadow[tid_aux].valid || 2206 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) 2207 continue; 2208 2209 pe->index = tid_aux; 2210 mvpp2_prs_hw_read(priv, pe); 2211 ri_bits = mvpp2_prs_sram_ri_get(pe); 2212 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == 2213 MVPP2_PRS_RI_VLAN_DOUBLE) 2214 break; 2215 } 2216 2217 if (tid <= tid_aux) { 2218 ret = -EINVAL; 2219 goto free_pe; 2220 } 2221 2222 memset(pe, 0, sizeof(*pe)); 2223 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); 2224 pe->index = tid; 2225 2226 mvpp2_prs_match_etype(pe, 0, tpid); 2227 2228 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2); 2229 /* Shift 4 bytes - skip 1 vlan tag */ 2230 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN, 2231 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2232 /* Clear all ai bits for next iteration */ 2233 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK); 2234 2235 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { 2236 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE, 2237 MVPP2_PRS_RI_VLAN_MASK); 2238 } else { 2239 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; 2240 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE, 2241 MVPP2_PRS_RI_VLAN_MASK); 2242 } 2243 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK); 2244 2245 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN); 2246 } 2247 /* Update ports' mask */ 2248 mvpp2_prs_tcam_port_map_set(pe, port_map); 2249 2250 mvpp2_prs_hw_write(priv, pe); 2251free_pe: 2252 kfree(pe); 2253 2254 return ret; 2255} 2256 2257/* Get first free double vlan ai number */ 2258static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) 2259{ 2260 int i; 2261 2262 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { 2263 if (!priv->prs_double_vlans[i]) 2264 return i; 2265 } 2266 2267 return -EINVAL; 2268} 2269 2270/* Search for existing double vlan entry */ 2271static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv, 2272 unsigned short tpid1, 2273 unsigned short tpid2) 2274{ 2275 struct mvpp2_prs_entry *pe; 2276 int tid; 2277 2278 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2279 if (!pe) 2280 return NULL; 2281 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); 2282 2283 /* Go through the all entries with MVPP2_PRS_LU_VLAN */ 2284 for (tid = MVPP2_PE_FIRST_FREE_TID; 2285 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 2286 unsigned int ri_mask; 2287 bool match; 2288 2289 if (!priv->prs_shadow[tid].valid || 2290 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) 2291 continue; 2292 2293 pe->index = tid; 2294 mvpp2_prs_hw_read(priv, pe); 2295 2296 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1)) 2297 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2)); 2298 2299 if (!match) 2300 continue; 2301 2302 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK; 2303 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) 2304 return pe; 2305 } 2306 kfree(pe); 2307 2308 return NULL; 2309} 2310 2311/* Add or update double vlan entry */ 2312static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, 2313 unsigned short tpid2, 2314 unsigned int port_map) 2315{ 2316 struct mvpp2_prs_entry *pe; 2317 int tid_aux, tid, ai, ret = 0; 2318 2319 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); 2320 2321 if (!pe) { 2322 /* Create new tcam entry */ 2323 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2324 MVPP2_PE_LAST_FREE_TID); 2325 if (tid < 0) 2326 return tid; 2327 2328 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2329 if (!pe) 2330 return -ENOMEM; 2331 2332 /* Set ai value for new double vlan entry */ 2333 ai = mvpp2_prs_double_vlan_ai_free_get(priv); 2334 if (ai < 0) { 2335 ret = ai; 2336 goto free_pe; 2337 } 2338 2339 /* Get first single/triple vlan tid */ 2340 for (tid_aux = MVPP2_PE_FIRST_FREE_TID; 2341 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { 2342 unsigned int ri_bits; 2343 2344 if (!priv->prs_shadow[tid_aux].valid || 2345 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) 2346 continue; 2347 2348 pe->index = tid_aux; 2349 mvpp2_prs_hw_read(priv, pe); 2350 ri_bits = mvpp2_prs_sram_ri_get(pe); 2351 ri_bits &= MVPP2_PRS_RI_VLAN_MASK; 2352 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || 2353 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) 2354 break; 2355 } 2356 2357 if (tid >= tid_aux) { 2358 ret = -ERANGE; 2359 goto free_pe; 2360 } 2361 2362 memset(pe, 0, sizeof(*pe)); 2363 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); 2364 pe->index = tid; 2365 2366 priv->prs_double_vlans[ai] = true; 2367 2368 mvpp2_prs_match_etype(pe, 0, tpid1); 2369 mvpp2_prs_match_etype(pe, 4, tpid2); 2370 2371 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN); 2372 /* Shift 8 bytes - skip 2 vlan tags */ 2373 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN, 2374 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2375 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE, 2376 MVPP2_PRS_RI_VLAN_MASK); 2377 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, 2378 MVPP2_PRS_SRAM_AI_MASK); 2379 2380 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN); 2381 } 2382 2383 /* Update ports' mask */ 2384 mvpp2_prs_tcam_port_map_set(pe, port_map); 2385 mvpp2_prs_hw_write(priv, pe); 2386free_pe: 2387 kfree(pe); 2388 return ret; 2389} 2390 2391/* IPv4 header parsing for fragmentation and L4 offset */ 2392static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, 2393 unsigned int ri, unsigned int ri_mask) 2394{ 2395 struct mvpp2_prs_entry pe; 2396 int tid; 2397 2398 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && 2399 (proto != IPPROTO_IGMP)) 2400 return -EINVAL; 2401 2402 /* Not fragmented packet */ 2403 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2404 MVPP2_PE_LAST_FREE_TID); 2405 if (tid < 0) 2406 return tid; 2407 2408 memset(&pe, 0, sizeof(pe)); 2409 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); 2410 pe.index = tid; 2411 2412 /* Set next lu to IPv4 */ 2413 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 2414 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2415 /* Set L4 offset */ 2416 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, 2417 sizeof(struct iphdr) - 4, 2418 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2419 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, 2420 MVPP2_PRS_IPV4_DIP_AI_BIT); 2421 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); 2422 2423 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 2424 MVPP2_PRS_TCAM_PROTO_MASK_L); 2425 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 2426 MVPP2_PRS_TCAM_PROTO_MASK); 2427 2428 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); 2429 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); 2430 /* Unmask all ports */ 2431 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2432 2433 /* Update shadow table and hw entry */ 2434 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 2435 mvpp2_prs_hw_write(priv, &pe); 2436 2437 /* Fragmented packet */ 2438 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2439 MVPP2_PE_LAST_FREE_TID); 2440 if (tid < 0) 2441 return tid; 2442 2443 pe.index = tid; 2444 /* Clear ri before updating */ 2445 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 2446 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 2447 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); 2448 2449 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, 2450 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); 2451 2452 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0); 2453 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0); 2454 2455 /* Update shadow table and hw entry */ 2456 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 2457 mvpp2_prs_hw_write(priv, &pe); 2458 2459 return 0; 2460} 2461 2462/* IPv4 L3 multicast or broadcast */ 2463static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) 2464{ 2465 struct mvpp2_prs_entry pe; 2466 int mask, tid; 2467 2468 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2469 MVPP2_PE_LAST_FREE_TID); 2470 if (tid < 0) 2471 return tid; 2472 2473 memset(&pe, 0, sizeof(pe)); 2474 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); 2475 pe.index = tid; 2476 2477 switch (l3_cast) { 2478 case MVPP2_PRS_L3_MULTI_CAST: 2479 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, 2480 MVPP2_PRS_IPV4_MC_MASK); 2481 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, 2482 MVPP2_PRS_RI_L3_ADDR_MASK); 2483 break; 2484 case MVPP2_PRS_L3_BROAD_CAST: 2485 mask = MVPP2_PRS_IPV4_BC_MASK; 2486 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); 2487 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); 2488 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); 2489 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); 2490 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, 2491 MVPP2_PRS_RI_L3_ADDR_MASK); 2492 break; 2493 default: 2494 return -EINVAL; 2495 } 2496 2497 /* Finished: go to flowid generation */ 2498 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2499 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2500 2501 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, 2502 MVPP2_PRS_IPV4_DIP_AI_BIT); 2503 /* Unmask all ports */ 2504 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2505 2506 /* Update shadow table and hw entry */ 2507 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 2508 mvpp2_prs_hw_write(priv, &pe); 2509 2510 return 0; 2511} 2512 2513/* Set entries for protocols over IPv6 */ 2514static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, 2515 unsigned int ri, unsigned int ri_mask) 2516{ 2517 struct mvpp2_prs_entry pe; 2518 int tid; 2519 2520 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && 2521 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) 2522 return -EINVAL; 2523 2524 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2525 MVPP2_PE_LAST_FREE_TID); 2526 if (tid < 0) 2527 return tid; 2528 2529 memset(&pe, 0, sizeof(pe)); 2530 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 2531 pe.index = tid; 2532 2533 /* Finished: go to flowid generation */ 2534 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2535 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2536 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); 2537 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, 2538 sizeof(struct ipv6hdr) - 6, 2539 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2540 2541 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); 2542 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 2543 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 2544 /* Unmask all ports */ 2545 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2546 2547 /* Write HW */ 2548 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); 2549 mvpp2_prs_hw_write(priv, &pe); 2550 2551 return 0; 2552} 2553 2554/* IPv6 L3 multicast entry */ 2555static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) 2556{ 2557 struct mvpp2_prs_entry pe; 2558 int tid; 2559 2560 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) 2561 return -EINVAL; 2562 2563 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2564 MVPP2_PE_LAST_FREE_TID); 2565 if (tid < 0) 2566 return tid; 2567 2568 memset(&pe, 0, sizeof(pe)); 2569 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 2570 pe.index = tid; 2571 2572 /* Finished: go to flowid generation */ 2573 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 2574 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, 2575 MVPP2_PRS_RI_L3_ADDR_MASK); 2576 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 2577 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 2578 /* Shift back to IPv6 NH */ 2579 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2580 2581 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, 2582 MVPP2_PRS_IPV6_MC_MASK); 2583 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 2584 /* Unmask all ports */ 2585 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2586 2587 /* Update shadow table and hw entry */ 2588 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); 2589 mvpp2_prs_hw_write(priv, &pe); 2590 2591 return 0; 2592} 2593 2594/* Parser per-port initialization */ 2595static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, 2596 int lu_max, int offset) 2597{ 2598 u32 val; 2599 2600 /* Set lookup ID */ 2601 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); 2602 val &= ~MVPP2_PRS_PORT_LU_MASK(port); 2603 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); 2604 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); 2605 2606 /* Set maximum number of loops for packet received from port */ 2607 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); 2608 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); 2609 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); 2610 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); 2611 2612 /* Set initial offset for packet header extraction for the first 2613 * searching loop 2614 */ 2615 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); 2616 val &= ~MVPP2_PRS_INIT_OFF_MASK(port); 2617 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); 2618 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); 2619} 2620 2621/* Default flow entries initialization for all ports */ 2622static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) 2623{ 2624 struct mvpp2_prs_entry pe; 2625 int port; 2626 2627 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 2628 memset(&pe, 0, sizeof(pe)); 2629 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2630 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; 2631 2632 /* Mask all ports */ 2633 mvpp2_prs_tcam_port_map_set(&pe, 0); 2634 2635 /* Set flow ID*/ 2636 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); 2637 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 2638 2639 /* Update shadow table and hw entry */ 2640 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); 2641 mvpp2_prs_hw_write(priv, &pe); 2642 } 2643} 2644 2645/* Set default entry for Marvell Header field */ 2646static void mvpp2_prs_mh_init(struct mvpp2 *priv) 2647{ 2648 struct mvpp2_prs_entry pe; 2649 2650 memset(&pe, 0, sizeof(pe)); 2651 2652 pe.index = MVPP2_PE_MH_DEFAULT; 2653 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); 2654 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, 2655 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2656 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); 2657 2658 /* Unmask all ports */ 2659 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2660 2661 /* Update shadow table and hw entry */ 2662 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); 2663 mvpp2_prs_hw_write(priv, &pe); 2664} 2665 2666/* Set default entires (place holder) for promiscuous, non-promiscuous and 2667 * multicast MAC addresses 2668 */ 2669static void mvpp2_prs_mac_init(struct mvpp2 *priv) 2670{ 2671 struct mvpp2_prs_entry pe; 2672 2673 memset(&pe, 0, sizeof(pe)); 2674 2675 /* Non-promiscuous mode for all ports - DROP unknown packets */ 2676 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; 2677 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 2678 2679 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 2680 MVPP2_PRS_RI_DROP_MASK); 2681 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2682 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2683 2684 /* Unmask all ports */ 2685 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2686 2687 /* Update shadow table and hw entry */ 2688 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 2689 mvpp2_prs_hw_write(priv, &pe); 2690 2691 /* place holders only - no ports */ 2692 mvpp2_prs_mac_drop_all_set(priv, 0, false); 2693 mvpp2_prs_mac_promisc_set(priv, 0, false); 2694 mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false); 2695 mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false); 2696} 2697 2698/* Set default entries for various types of dsa packets */ 2699static void mvpp2_prs_dsa_init(struct mvpp2 *priv) 2700{ 2701 struct mvpp2_prs_entry pe; 2702 2703 /* None tagged EDSA entry - place holder */ 2704 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, 2705 MVPP2_PRS_EDSA); 2706 2707 /* Tagged EDSA entry - place holder */ 2708 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 2709 2710 /* None tagged DSA entry - place holder */ 2711 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, 2712 MVPP2_PRS_DSA); 2713 2714 /* Tagged DSA entry - place holder */ 2715 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 2716 2717 /* None tagged EDSA ethertype entry - place holder*/ 2718 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, 2719 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 2720 2721 /* Tagged EDSA ethertype entry - place holder*/ 2722 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, 2723 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 2724 2725 /* None tagged DSA ethertype entry */ 2726 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, 2727 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 2728 2729 /* Tagged DSA ethertype entry */ 2730 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, 2731 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 2732 2733 /* Set default entry, in case DSA or EDSA tag not found */ 2734 memset(&pe, 0, sizeof(pe)); 2735 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); 2736 pe.index = MVPP2_PE_DSA_DEFAULT; 2737 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); 2738 2739 /* Shift 0 bytes */ 2740 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2741 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 2742 2743 /* Clear all sram ai bits for next iteration */ 2744 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 2745 2746 /* Unmask all ports */ 2747 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2748 2749 mvpp2_prs_hw_write(priv, &pe); 2750} 2751 2752/* Match basic ethertypes */ 2753static int mvpp2_prs_etype_init(struct mvpp2 *priv) 2754{ 2755 struct mvpp2_prs_entry pe; 2756 int tid; 2757 2758 /* Ethertype: PPPoE */ 2759 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2760 MVPP2_PE_LAST_FREE_TID); 2761 if (tid < 0) 2762 return tid; 2763 2764 memset(&pe, 0, sizeof(pe)); 2765 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2766 pe.index = tid; 2767 2768 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES); 2769 2770 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, 2771 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2772 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 2773 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, 2774 MVPP2_PRS_RI_PPPOE_MASK); 2775 2776 /* Update shadow table and hw entry */ 2777 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2778 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2779 priv->prs_shadow[pe.index].finish = false; 2780 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, 2781 MVPP2_PRS_RI_PPPOE_MASK); 2782 mvpp2_prs_hw_write(priv, &pe); 2783 2784 /* Ethertype: ARP */ 2785 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2786 MVPP2_PE_LAST_FREE_TID); 2787 if (tid < 0) 2788 return tid; 2789 2790 memset(&pe, 0, sizeof(pe)); 2791 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2792 pe.index = tid; 2793 2794 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP); 2795 2796 /* Generate flow in the next iteration*/ 2797 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2798 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2799 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, 2800 MVPP2_PRS_RI_L3_PROTO_MASK); 2801 /* Set L3 offset */ 2802 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2803 MVPP2_ETH_TYPE_LEN, 2804 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2805 2806 /* Update shadow table and hw entry */ 2807 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2808 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2809 priv->prs_shadow[pe.index].finish = true; 2810 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, 2811 MVPP2_PRS_RI_L3_PROTO_MASK); 2812 mvpp2_prs_hw_write(priv, &pe); 2813 2814 /* Ethertype: LBTD */ 2815 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2816 MVPP2_PE_LAST_FREE_TID); 2817 if (tid < 0) 2818 return tid; 2819 2820 memset(&pe, 0, sizeof(pe)); 2821 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2822 pe.index = tid; 2823 2824 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); 2825 2826 /* Generate flow in the next iteration*/ 2827 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2828 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2829 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2830 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2831 MVPP2_PRS_RI_CPU_CODE_MASK | 2832 MVPP2_PRS_RI_UDF3_MASK); 2833 /* Set L3 offset */ 2834 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2835 MVPP2_ETH_TYPE_LEN, 2836 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2837 2838 /* Update shadow table and hw entry */ 2839 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2840 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2841 priv->prs_shadow[pe.index].finish = true; 2842 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2843 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2844 MVPP2_PRS_RI_CPU_CODE_MASK | 2845 MVPP2_PRS_RI_UDF3_MASK); 2846 mvpp2_prs_hw_write(priv, &pe); 2847 2848 /* Ethertype: IPv4 without options */ 2849 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2850 MVPP2_PE_LAST_FREE_TID); 2851 if (tid < 0) 2852 return tid; 2853 2854 memset(&pe, 0, sizeof(pe)); 2855 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2856 pe.index = tid; 2857 2858 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); 2859 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2860 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, 2861 MVPP2_PRS_IPV4_HEAD_MASK | 2862 MVPP2_PRS_IPV4_IHL_MASK); 2863 2864 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 2865 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, 2866 MVPP2_PRS_RI_L3_PROTO_MASK); 2867 /* Skip eth_type + 4 bytes of IP header */ 2868 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, 2869 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2870 /* Set L3 offset */ 2871 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2872 MVPP2_ETH_TYPE_LEN, 2873 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2874 2875 /* Update shadow table and hw entry */ 2876 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2877 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2878 priv->prs_shadow[pe.index].finish = false; 2879 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, 2880 MVPP2_PRS_RI_L3_PROTO_MASK); 2881 mvpp2_prs_hw_write(priv, &pe); 2882 2883 /* Ethertype: IPv4 with options */ 2884 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2885 MVPP2_PE_LAST_FREE_TID); 2886 if (tid < 0) 2887 return tid; 2888 2889 pe.index = tid; 2890 2891 /* Clear tcam data before updating */ 2892 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; 2893 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; 2894 2895 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2896 MVPP2_PRS_IPV4_HEAD, 2897 MVPP2_PRS_IPV4_HEAD_MASK); 2898 2899 /* Clear ri before updating */ 2900 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 2901 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 2902 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, 2903 MVPP2_PRS_RI_L3_PROTO_MASK); 2904 2905 /* Update shadow table and hw entry */ 2906 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2907 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2908 priv->prs_shadow[pe.index].finish = false; 2909 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, 2910 MVPP2_PRS_RI_L3_PROTO_MASK); 2911 mvpp2_prs_hw_write(priv, &pe); 2912 2913 /* Ethertype: IPv6 without options */ 2914 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2915 MVPP2_PE_LAST_FREE_TID); 2916 if (tid < 0) 2917 return tid; 2918 2919 memset(&pe, 0, sizeof(pe)); 2920 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2921 pe.index = tid; 2922 2923 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6); 2924 2925 /* Skip DIP of IPV6 header */ 2926 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + 2927 MVPP2_MAX_L3_ADDR_SIZE, 2928 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2929 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 2930 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, 2931 MVPP2_PRS_RI_L3_PROTO_MASK); 2932 /* Set L3 offset */ 2933 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2934 MVPP2_ETH_TYPE_LEN, 2935 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2936 2937 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2938 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2939 priv->prs_shadow[pe.index].finish = false; 2940 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, 2941 MVPP2_PRS_RI_L3_PROTO_MASK); 2942 mvpp2_prs_hw_write(priv, &pe); 2943 2944 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ 2945 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2946 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2947 pe.index = MVPP2_PE_ETH_TYPE_UN; 2948 2949 /* Unmask all ports */ 2950 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2951 2952 /* Generate flow in the next iteration*/ 2953 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2954 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2955 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, 2956 MVPP2_PRS_RI_L3_PROTO_MASK); 2957 /* Set L3 offset even it's unknown L3 */ 2958 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2959 MVPP2_ETH_TYPE_LEN, 2960 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2961 2962 /* Update shadow table and hw entry */ 2963 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2964 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2965 priv->prs_shadow[pe.index].finish = true; 2966 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, 2967 MVPP2_PRS_RI_L3_PROTO_MASK); 2968 mvpp2_prs_hw_write(priv, &pe); 2969 2970 return 0; 2971} 2972 2973/* Configure vlan entries and detect up to 2 successive VLAN tags. 2974 * Possible options: 2975 * 0x8100, 0x88A8 2976 * 0x8100, 0x8100 2977 * 0x8100 2978 * 0x88A8 2979 */ 2980static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) 2981{ 2982 struct mvpp2_prs_entry pe; 2983 int err; 2984 2985 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), 2986 MVPP2_PRS_DBL_VLANS_MAX, 2987 GFP_KERNEL); 2988 if (!priv->prs_double_vlans) 2989 return -ENOMEM; 2990 2991 /* Double VLAN: 0x8100, 0x88A8 */ 2992 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD, 2993 MVPP2_PRS_PORT_MASK); 2994 if (err) 2995 return err; 2996 2997 /* Double VLAN: 0x8100, 0x8100 */ 2998 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q, 2999 MVPP2_PRS_PORT_MASK); 3000 if (err) 3001 return err; 3002 3003 /* Single VLAN: 0x88a8 */ 3004 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, 3005 MVPP2_PRS_PORT_MASK); 3006 if (err) 3007 return err; 3008 3009 /* Single VLAN: 0x8100 */ 3010 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, 3011 MVPP2_PRS_PORT_MASK); 3012 if (err) 3013 return err; 3014 3015 /* Set default double vlan entry */ 3016 memset(&pe, 0, sizeof(pe)); 3017 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); 3018 pe.index = MVPP2_PE_VLAN_DBL; 3019 3020 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 3021 /* Clear ai for next iterations */ 3022 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); 3023 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, 3024 MVPP2_PRS_RI_VLAN_MASK); 3025 3026 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, 3027 MVPP2_PRS_DBL_VLAN_AI_BIT); 3028 /* Unmask all ports */ 3029 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 3030 3031 /* Update shadow table and hw entry */ 3032 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); 3033 mvpp2_prs_hw_write(priv, &pe); 3034 3035 /* Set default vlan none entry */ 3036 memset(&pe, 0, sizeof(pe)); 3037 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); 3038 pe.index = MVPP2_PE_VLAN_NONE; 3039 3040 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); 3041 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, 3042 MVPP2_PRS_RI_VLAN_MASK); 3043 3044 /* Unmask all ports */ 3045 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 3046 3047 /* Update shadow table and hw entry */ 3048 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); 3049 mvpp2_prs_hw_write(priv, &pe); 3050 3051 return 0; 3052} 3053 3054/* Set entries for PPPoE ethertype */ 3055static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) 3056{ 3057 struct mvpp2_prs_entry pe; 3058 int tid; 3059 3060 /* IPv4 over PPPoE with options */ 3061 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 3062 MVPP2_PE_LAST_FREE_TID); 3063 if (tid < 0) 3064 return tid; 3065 3066 memset(&pe, 0, sizeof(pe)); 3067 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 3068 pe.index = tid; 3069 3070 mvpp2_prs_match_etype(&pe, 0, PPP_IP); 3071 3072 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 3073 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, 3074 MVPP2_PRS_RI_L3_PROTO_MASK); 3075 /* Skip eth_type + 4 bytes of IP header */ 3076 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, 3077 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 3078 /* Set L3 offset */ 3079 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 3080 MVPP2_ETH_TYPE_LEN, 3081 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 3082 3083 /* Update shadow table and hw entry */ 3084 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); 3085 mvpp2_prs_hw_write(priv, &pe); 3086 3087 /* IPv4 over PPPoE without options */ 3088 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 3089 MVPP2_PE_LAST_FREE_TID); 3090 if (tid < 0) 3091 return tid; 3092 3093 pe.index = tid; 3094 3095 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 3096 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, 3097 MVPP2_PRS_IPV4_HEAD_MASK | 3098 MVPP2_PRS_IPV4_IHL_MASK); 3099 3100 /* Clear ri before updating */ 3101 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 3102 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 3103 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, 3104 MVPP2_PRS_RI_L3_PROTO_MASK); 3105 3106 /* Update shadow table and hw entry */ 3107 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); 3108 mvpp2_prs_hw_write(priv, &pe); 3109 3110 /* IPv6 over PPPoE */ 3111 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 3112 MVPP2_PE_LAST_FREE_TID); 3113 if (tid < 0) 3114 return tid; 3115 3116 memset(&pe, 0, sizeof(pe)); 3117 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 3118 pe.index = tid; 3119 3120 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6); 3121 3122 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 3123 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, 3124 MVPP2_PRS_RI_L3_PROTO_MASK); 3125 /* Skip eth_type + 4 bytes of IPv6 header */ 3126 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, 3127 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 3128 /* Set L3 offset */ 3129 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 3130 MVPP2_ETH_TYPE_LEN, 3131 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 3132 3133 /* Update shadow table and hw entry */ 3134 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); 3135 mvpp2_prs_hw_write(priv, &pe); 3136 3137 /* Non-IP over PPPoE */ 3138 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 3139 MVPP2_PE_LAST_FREE_TID); 3140 if (tid < 0) 3141 return tid; 3142 3143 memset(&pe, 0, sizeof(pe)); 3144 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 3145 pe.index = tid; 3146 3147 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, 3148 MVPP2_PRS_RI_L3_PROTO_MASK); 3149 3150 /* Finished: go to flowid generation */ 3151 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 3152 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 3153 /* Set L3 offset even if it's unknown L3 */ 3154 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 3155 MVPP2_ETH_TYPE_LEN, 3156 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 3157 3158 /* Update shadow table and hw entry */ 3159 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); 3160 mvpp2_prs_hw_write(priv, &pe); 3161 3162 return 0; 3163} 3164 3165/* Initialize entries for IPv4 */ 3166static int mvpp2_prs_ip4_init(struct mvpp2 *priv) 3167{ 3168 struct mvpp2_prs_entry pe; 3169 int err; 3170 3171 /* Set entries for TCP, UDP and IGMP over IPv4 */ 3172 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, 3173 MVPP2_PRS_RI_L4_PROTO_MASK); 3174 if (err) 3175 return err; 3176 3177 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, 3178 MVPP2_PRS_RI_L4_PROTO_MASK); 3179 if (err) 3180 return err; 3181 3182 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP, 3183 MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 3184 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 3185 MVPP2_PRS_RI_CPU_CODE_MASK | 3186 MVPP2_PRS_RI_UDF3_MASK); 3187 if (err) 3188 return err; 3189 3190 /* IPv4 Broadcast */ 3191 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); 3192 if (err) 3193 return err; 3194 3195 /* IPv4 Multicast */ 3196 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); 3197 if (err) 3198 return err; 3199 3200 /* Default IPv4 entry for unknown protocols */ 3201 memset(&pe, 0, sizeof(pe)); 3202 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); 3203 pe.index = MVPP2_PE_IP4_PROTO_UN; 3204 3205 /* Set next lu to IPv4 */ 3206 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 3207 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 3208 /* Set L4 offset */ 3209 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, 3210 sizeof(struct iphdr) - 4, 3211 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 3212 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, 3213 MVPP2_PRS_IPV4_DIP_AI_BIT); 3214 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, 3215 MVPP2_PRS_RI_L4_PROTO_MASK); 3216 3217 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); 3218 /* Unmask all ports */ 3219 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 3220 3221 /* Update shadow table and hw entry */ 3222 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 3223 mvpp2_prs_hw_write(priv, &pe); 3224 3225 /* Default IPv4 entry for unicast address */ 3226 memset(&pe, 0, sizeof(pe)); 3227 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); 3228 pe.index = MVPP2_PE_IP4_ADDR_UN; 3229 3230 /* Finished: go to flowid generation */ 3231 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 3232 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 3233 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, 3234 MVPP2_PRS_RI_L3_ADDR_MASK); 3235 3236 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, 3237 MVPP2_PRS_IPV4_DIP_AI_BIT); 3238 /* Unmask all ports */ 3239 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 3240 3241 /* Update shadow table and hw entry */ 3242 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 3243 mvpp2_prs_hw_write(priv, &pe); 3244 3245 return 0; 3246} 3247 3248/* Initialize entries for IPv6 */ 3249static int mvpp2_prs_ip6_init(struct mvpp2 *priv) 3250{ 3251 struct mvpp2_prs_entry pe; 3252 int tid, err; 3253 3254 /* Set entries for TCP, UDP and ICMP over IPv6 */ 3255 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP, 3256 MVPP2_PRS_RI_L4_TCP, 3257 MVPP2_PRS_RI_L4_PROTO_MASK); 3258 if (err) 3259 return err; 3260 3261 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP, 3262 MVPP2_PRS_RI_L4_UDP, 3263 MVPP2_PRS_RI_L4_PROTO_MASK); 3264 if (err) 3265 return err; 3266 3267 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6, 3268 MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 3269 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 3270 MVPP2_PRS_RI_CPU_CODE_MASK | 3271 MVPP2_PRS_RI_UDF3_MASK); 3272 if (err) 3273 return err; 3274 3275 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ 3276 /* Result Info: UDF7=1, DS lite */ 3277 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP, 3278 MVPP2_PRS_RI_UDF7_IP6_LITE, 3279 MVPP2_PRS_RI_UDF7_MASK); 3280 if (err) 3281 return err; 3282 3283 /* IPv6 multicast */ 3284 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); 3285 if (err) 3286 return err; 3287 3288 /* Entry for checking hop limit */ 3289 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 3290 MVPP2_PE_LAST_FREE_TID); 3291 if (tid < 0) 3292 return tid; 3293 3294 memset(&pe, 0, sizeof(pe)); 3295 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 3296 pe.index = tid; 3297 3298 /* Finished: go to flowid generation */ 3299 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 3300 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 3301 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | 3302 MVPP2_PRS_RI_DROP_MASK, 3303 MVPP2_PRS_RI_L3_PROTO_MASK | 3304 MVPP2_PRS_RI_DROP_MASK); 3305 3306 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); 3307 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 3308 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 3309 3310 /* Update shadow table and hw entry */ 3311 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 3312 mvpp2_prs_hw_write(priv, &pe); 3313 3314 /* Default IPv6 entry for unknown protocols */ 3315 memset(&pe, 0, sizeof(pe)); 3316 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 3317 pe.index = MVPP2_PE_IP6_PROTO_UN; 3318 3319 /* Finished: go to flowid generation */ 3320 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 3321 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 3322 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, 3323 MVPP2_PRS_RI_L4_PROTO_MASK); 3324 /* Set L4 offset relatively to our current place */ 3325 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, 3326 sizeof(struct ipv6hdr) - 4, 3327 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 3328 3329 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 3330 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 3331 /* Unmask all ports */ 3332 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 3333 3334 /* Update shadow table and hw entry */ 3335 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 3336 mvpp2_prs_hw_write(priv, &pe); 3337 3338 /* Default IPv6 entry for unknown ext protocols */ 3339 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 3340 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 3341 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; 3342 3343 /* Finished: go to flowid generation */ 3344 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 3345 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 3346 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, 3347 MVPP2_PRS_RI_L4_PROTO_MASK); 3348 3349 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, 3350 MVPP2_PRS_IPV6_EXT_AI_BIT); 3351 /* Unmask all ports */ 3352 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 3353 3354 /* Update shadow table and hw entry */ 3355 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 3356 mvpp2_prs_hw_write(priv, &pe); 3357 3358 /* Default IPv6 entry for unicast address */ 3359 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 3360 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); 3361 pe.index = MVPP2_PE_IP6_ADDR_UN; 3362 3363 /* Finished: go to IPv6 again */ 3364 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 3365 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, 3366 MVPP2_PRS_RI_L3_ADDR_MASK); 3367 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, 3368 MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 3369 /* Shift back to IPV6 NH */ 3370 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 3371 3372 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 3373 /* Unmask all ports */ 3374 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 3375 3376 /* Update shadow table and hw entry */ 3377 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); 3378 mvpp2_prs_hw_write(priv, &pe); 3379 3380 return 0; 3381} 3382 3383/* Parser default initialization */ 3384static int mvpp2_prs_default_init(struct platform_device *pdev, 3385 struct mvpp2 *priv) 3386{ 3387 int err, index, i; 3388 3389 /* Enable tcam table */ 3390 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); 3391 3392 /* Clear all tcam and sram entries */ 3393 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { 3394 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 3395 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 3396 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); 3397 3398 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); 3399 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 3400 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); 3401 } 3402 3403 /* Invalidate all tcam entries */ 3404 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) 3405 mvpp2_prs_hw_inv(priv, index); 3406 3407 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, 3408 sizeof(*priv->prs_shadow), 3409 GFP_KERNEL); 3410 if (!priv->prs_shadow) 3411 return -ENOMEM; 3412 3413 /* Always start from lookup = 0 */ 3414 for (index = 0; index < MVPP2_MAX_PORTS; index++) 3415 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, 3416 MVPP2_PRS_PORT_LU_MAX, 0); 3417 3418 mvpp2_prs_def_flow_init(priv); 3419 3420 mvpp2_prs_mh_init(priv); 3421 3422 mvpp2_prs_mac_init(priv); 3423 3424 mvpp2_prs_dsa_init(priv); 3425 3426 err = mvpp2_prs_etype_init(priv); 3427 if (err) 3428 return err; 3429 3430 err = mvpp2_prs_vlan_init(pdev, priv); 3431 if (err) 3432 return err; 3433 3434 err = mvpp2_prs_pppoe_init(priv); 3435 if (err) 3436 return err; 3437 3438 err = mvpp2_prs_ip6_init(priv); 3439 if (err) 3440 return err; 3441 3442 err = mvpp2_prs_ip4_init(priv); 3443 if (err) 3444 return err; 3445 3446 return 0; 3447} 3448 3449/* Compare MAC DA with tcam entry data */ 3450static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, 3451 const u8 *da, unsigned char *mask) 3452{ 3453 unsigned char tcam_byte, tcam_mask; 3454 int index; 3455 3456 for (index = 0; index < ETH_ALEN; index++) { 3457 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); 3458 if (tcam_mask != mask[index]) 3459 return false; 3460 3461 if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) 3462 return false; 3463 } 3464 3465 return true; 3466} 3467 3468/* Find tcam entry with matched pair <MAC DA, port> */ 3469static struct mvpp2_prs_entry * 3470mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, 3471 unsigned char *mask, int udf_type) 3472{ 3473 struct mvpp2_prs_entry *pe; 3474 int tid; 3475 3476 pe = kzalloc(sizeof(*pe), GFP_ATOMIC); 3477 if (!pe) 3478 return NULL; 3479 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 3480 3481 /* Go through the all entires with MVPP2_PRS_LU_MAC */ 3482 for (tid = MVPP2_PE_FIRST_FREE_TID; 3483 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 3484 unsigned int entry_pmap; 3485 3486 if (!priv->prs_shadow[tid].valid || 3487 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || 3488 (priv->prs_shadow[tid].udf != udf_type)) 3489 continue; 3490 3491 pe->index = tid; 3492 mvpp2_prs_hw_read(priv, pe); 3493 entry_pmap = mvpp2_prs_tcam_port_map_get(pe); 3494 3495 if (mvpp2_prs_mac_range_equals(pe, da, mask) && 3496 entry_pmap == pmap) 3497 return pe; 3498 } 3499 kfree(pe); 3500 3501 return NULL; 3502} 3503 3504/* Update parser's mac da entry */ 3505static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, 3506 const u8 *da, bool add) 3507{ 3508 struct mvpp2_prs_entry *pe; 3509 unsigned int pmap, len, ri; 3510 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 3511 int tid; 3512 3513 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ 3514 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, 3515 MVPP2_PRS_UDF_MAC_DEF); 3516 3517 /* No such entry */ 3518 if (!pe) { 3519 if (!add) 3520 return 0; 3521 3522 /* Create new TCAM entry */ 3523 /* Find first range mac entry*/ 3524 for (tid = MVPP2_PE_FIRST_FREE_TID; 3525 tid <= MVPP2_PE_LAST_FREE_TID; tid++) 3526 if (priv->prs_shadow[tid].valid && 3527 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && 3528 (priv->prs_shadow[tid].udf == 3529 MVPP2_PRS_UDF_MAC_RANGE)) 3530 break; 3531 3532 /* Go through the all entries from first to last */ 3533 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 3534 tid - 1); 3535 if (tid < 0) 3536 return tid; 3537 3538 pe = kzalloc(sizeof(*pe), GFP_ATOMIC); 3539 if (!pe) 3540 return -ENOMEM; 3541 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 3542 pe->index = tid; 3543 3544 /* Mask all ports */ 3545 mvpp2_prs_tcam_port_map_set(pe, 0); 3546 } 3547 3548 /* Update port mask */ 3549 mvpp2_prs_tcam_port_set(pe, port, add); 3550 3551 /* Invalidate the entry if no ports are left enabled */ 3552 pmap = mvpp2_prs_tcam_port_map_get(pe); 3553 if (pmap == 0) { 3554 if (add) { 3555 kfree(pe); 3556 return -EINVAL; 3557 } 3558 mvpp2_prs_hw_inv(priv, pe->index); 3559 priv->prs_shadow[pe->index].valid = false; 3560 kfree(pe); 3561 return 0; 3562 } 3563 3564 /* Continue - set next lookup */ 3565 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); 3566 3567 /* Set match on DA */ 3568 len = ETH_ALEN; 3569 while (len--) 3570 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); 3571 3572 /* Set result info bits */ 3573 if (is_broadcast_ether_addr(da)) 3574 ri = MVPP2_PRS_RI_L2_BCAST; 3575 else if (is_multicast_ether_addr(da)) 3576 ri = MVPP2_PRS_RI_L2_MCAST; 3577 else 3578 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; 3579 3580 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | 3581 MVPP2_PRS_RI_MAC_ME_MASK); 3582 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | 3583 MVPP2_PRS_RI_MAC_ME_MASK); 3584 3585 /* Shift to ethertype */ 3586 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN, 3587 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 3588 3589 /* Update shadow table and hw entry */ 3590 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; 3591 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); 3592 mvpp2_prs_hw_write(priv, pe); 3593 3594 kfree(pe); 3595 3596 return 0; 3597} 3598 3599static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) 3600{ 3601 struct mvpp2_port *port = netdev_priv(dev); 3602 int err; 3603 3604 /* Remove old parser entry */ 3605 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr, 3606 false); 3607 if (err) 3608 return err; 3609 3610 /* Add new parser entry */ 3611 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true); 3612 if (err) 3613 return err; 3614 3615 /* Set addr in the device */ 3616 ether_addr_copy(dev->dev_addr, da); 3617 3618 return 0; 3619} 3620 3621/* Delete all port's multicast simple (not range) entries */ 3622static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port) 3623{ 3624 struct mvpp2_prs_entry pe; 3625 int index, tid; 3626 3627 for (tid = MVPP2_PE_FIRST_FREE_TID; 3628 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 3629 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; 3630 3631 if (!priv->prs_shadow[tid].valid || 3632 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || 3633 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) 3634 continue; 3635 3636 /* Only simple mac entries */ 3637 pe.index = tid; 3638 mvpp2_prs_hw_read(priv, &pe); 3639 3640 /* Read mac addr from entry */ 3641 for (index = 0; index < ETH_ALEN; index++) 3642 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], 3643 &da_mask[index]); 3644 3645 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da)) 3646 /* Delete this entry */ 3647 mvpp2_prs_mac_da_accept(priv, port, da, false); 3648 } 3649} 3650 3651static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) 3652{ 3653 switch (type) { 3654 case MVPP2_TAG_TYPE_EDSA: 3655 /* Add port to EDSA entries */ 3656 mvpp2_prs_dsa_tag_set(priv, port, true, 3657 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 3658 mvpp2_prs_dsa_tag_set(priv, port, true, 3659 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 3660 /* Remove port from DSA entries */ 3661 mvpp2_prs_dsa_tag_set(priv, port, false, 3662 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 3663 mvpp2_prs_dsa_tag_set(priv, port, false, 3664 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 3665 break; 3666 3667 case MVPP2_TAG_TYPE_DSA: 3668 /* Add port to DSA entries */ 3669 mvpp2_prs_dsa_tag_set(priv, port, true, 3670 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 3671 mvpp2_prs_dsa_tag_set(priv, port, true, 3672 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 3673 /* Remove port from EDSA entries */ 3674 mvpp2_prs_dsa_tag_set(priv, port, false, 3675 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 3676 mvpp2_prs_dsa_tag_set(priv, port, false, 3677 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 3678 break; 3679 3680 case MVPP2_TAG_TYPE_MH: 3681 case MVPP2_TAG_TYPE_NONE: 3682 /* Remove port form EDSA and DSA entries */ 3683 mvpp2_prs_dsa_tag_set(priv, port, false, 3684 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 3685 mvpp2_prs_dsa_tag_set(priv, port, false, 3686 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 3687 mvpp2_prs_dsa_tag_set(priv, port, false, 3688 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 3689 mvpp2_prs_dsa_tag_set(priv, port, false, 3690 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 3691 break; 3692 3693 default: 3694 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) 3695 return -EINVAL; 3696 } 3697 3698 return 0; 3699} 3700 3701/* Set prs flow for the port */ 3702static int mvpp2_prs_def_flow(struct mvpp2_port *port) 3703{ 3704 struct mvpp2_prs_entry *pe; 3705 int tid; 3706 3707 pe = mvpp2_prs_flow_find(port->priv, port->id); 3708 3709 /* Such entry not exist */ 3710 if (!pe) { 3711 /* Go through the all entires from last to first */ 3712 tid = mvpp2_prs_tcam_first_free(port->priv, 3713 MVPP2_PE_LAST_FREE_TID, 3714 MVPP2_PE_FIRST_FREE_TID); 3715 if (tid < 0) 3716 return tid; 3717 3718 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 3719 if (!pe) 3720 return -ENOMEM; 3721 3722 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 3723 pe->index = tid; 3724 3725 /* Set flow ID*/ 3726 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); 3727 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 3728 3729 /* Update shadow table */ 3730 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); 3731 } 3732 3733 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); 3734 mvpp2_prs_hw_write(port->priv, pe); 3735 kfree(pe); 3736 3737 return 0; 3738} 3739 3740/* Classifier configuration routines */ 3741 3742/* Update classification flow table registers */ 3743static void mvpp2_cls_flow_write(struct mvpp2 *priv, 3744 struct mvpp2_cls_flow_entry *fe) 3745{ 3746 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); 3747 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); 3748 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); 3749 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); 3750} 3751 3752/* Update classification lookup table register */ 3753static void mvpp2_cls_lookup_write(struct mvpp2 *priv, 3754 struct mvpp2_cls_lookup_entry *le) 3755{ 3756 u32 val; 3757 3758 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; 3759 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); 3760 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); 3761} 3762 3763/* Classifier default initialization */ 3764static void mvpp2_cls_init(struct mvpp2 *priv) 3765{ 3766 struct mvpp2_cls_lookup_entry le; 3767 struct mvpp2_cls_flow_entry fe; 3768 int index; 3769 3770 /* Enable classifier */ 3771 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); 3772 3773 /* Clear classifier flow table */ 3774 memset(&fe.data, 0, sizeof(fe.data)); 3775 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { 3776 fe.index = index; 3777 mvpp2_cls_flow_write(priv, &fe); 3778 } 3779 3780 /* Clear classifier lookup table */ 3781 le.data = 0; 3782 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { 3783 le.lkpid = index; 3784 le.way = 0; 3785 mvpp2_cls_lookup_write(priv, &le); 3786 3787 le.way = 1; 3788 mvpp2_cls_lookup_write(priv, &le); 3789 } 3790} 3791 3792static void mvpp2_cls_port_config(struct mvpp2_port *port) 3793{ 3794 struct mvpp2_cls_lookup_entry le; 3795 u32 val; 3796 3797 /* Set way for the port */ 3798 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); 3799 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); 3800 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); 3801 3802 /* Pick the entry to be accessed in lookup ID decoding table 3803 * according to the way and lkpid. 3804 */ 3805 le.lkpid = port->id; 3806 le.way = 0; 3807 le.data = 0; 3808 3809 /* Set initial CPU queue for receiving packets */ 3810 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; 3811 le.data |= port->first_rxq; 3812 3813 /* Disable classification engines */ 3814 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; 3815 3816 /* Update lookup ID table entry */ 3817 mvpp2_cls_lookup_write(port->priv, &le); 3818} 3819 3820/* Set CPU queue number for oversize packets */ 3821static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) 3822{ 3823 u32 val; 3824 3825 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), 3826 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); 3827 3828 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), 3829 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); 3830 3831 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); 3832 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); 3833 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); 3834} 3835 3836static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) 3837{ 3838 if (likely(pool->frag_size <= PAGE_SIZE)) 3839 return netdev_alloc_frag(pool->frag_size); 3840 else 3841 return kmalloc(pool->frag_size, GFP_ATOMIC); 3842} 3843 3844static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) 3845{ 3846 if (likely(pool->frag_size <= PAGE_SIZE)) 3847 skb_free_frag(data); 3848 else 3849 kfree(data); 3850} 3851 3852/* Buffer Manager configuration routines */ 3853 3854/* Create pool */ 3855static int mvpp2_bm_pool_create(struct platform_device *pdev, 3856 struct mvpp2 *priv, 3857 struct mvpp2_bm_pool *bm_pool, int size) 3858{ 3859 u32 val; 3860 3861 /* Number of buffer pointers must be a multiple of 16, as per 3862 * hardware constraints 3863 */ 3864 if (!IS_ALIGNED(size, 16)) 3865 return -EINVAL; 3866 3867 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16 3868 * bytes per buffer pointer 3869 */ 3870 if (priv->hw_version == MVPP21) 3871 bm_pool->size_bytes = 2 * sizeof(u32) * size; 3872 else 3873 bm_pool->size_bytes = 2 * sizeof(u64) * size; 3874 3875 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes, 3876 &bm_pool->dma_addr, 3877 GFP_KERNEL); 3878 if (!bm_pool->virt_addr) 3879 return -ENOMEM; 3880 3881 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 3882 MVPP2_BM_POOL_PTR_ALIGN)) { 3883 dma_free_coherent(&pdev->dev, bm_pool->size_bytes, 3884 bm_pool->virt_addr, bm_pool->dma_addr); 3885 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", 3886 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 3887 return -ENOMEM; 3888 } 3889 3890 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 3891 lower_32_bits(bm_pool->dma_addr)); 3892 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 3893 3894 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 3895 val |= MVPP2_BM_START_MASK; 3896 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 3897 3898 bm_pool->type = MVPP2_BM_FREE; 3899 bm_pool->size = size; 3900 bm_pool->pkt_size = 0; 3901 bm_pool->buf_num = 0; 3902 3903 return 0; 3904} 3905 3906/* Set pool buffer size */ 3907static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, 3908 struct mvpp2_bm_pool *bm_pool, 3909 int buf_size) 3910{ 3911 u32 val; 3912 3913 bm_pool->buf_size = buf_size; 3914 3915 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 3916 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 3917} 3918 3919static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, 3920 struct mvpp2_bm_pool *bm_pool, 3921 dma_addr_t *dma_addr, 3922 phys_addr_t *phys_addr) 3923{ 3924 int cpu = get_cpu(); 3925 3926 *dma_addr = mvpp2_percpu_read(priv, cpu, 3927 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 3928 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); 3929 3930 if (priv->hw_version == MVPP22) { 3931 u32 val; 3932 u32 dma_addr_highbits, phys_addr_highbits; 3933 3934 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); 3935 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); 3936 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> 3937 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; 3938 3939 if (sizeof(dma_addr_t) == 8) 3940 *dma_addr |= (u64)dma_addr_highbits << 32; 3941 3942 if (sizeof(phys_addr_t) == 8) 3943 *phys_addr |= (u64)phys_addr_highbits << 32; 3944 } 3945 3946 put_cpu(); 3947} 3948 3949/* Free all buffers from the pool */ 3950static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, 3951 struct mvpp2_bm_pool *bm_pool) 3952{ 3953 int i; 3954 3955 for (i = 0; i < bm_pool->buf_num; i++) { 3956 dma_addr_t buf_dma_addr; 3957 phys_addr_t buf_phys_addr; 3958 void *data; 3959 3960 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, 3961 &buf_dma_addr, &buf_phys_addr); 3962 3963 dma_unmap_single(dev, buf_dma_addr, 3964 bm_pool->buf_size, DMA_FROM_DEVICE); 3965 3966 data = (void *)phys_to_virt(buf_phys_addr); 3967 if (!data) 3968 break; 3969 3970 mvpp2_frag_free(bm_pool, data); 3971 } 3972 3973 /* Update BM driver with number of buffers removed from pool */ 3974 bm_pool->buf_num -= i; 3975} 3976 3977/* Cleanup pool */ 3978static int mvpp2_bm_pool_destroy(struct platform_device *pdev, 3979 struct mvpp2 *priv, 3980 struct mvpp2_bm_pool *bm_pool) 3981{ 3982 u32 val; 3983 3984 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool); 3985 if (bm_pool->buf_num) { 3986 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); 3987 return 0; 3988 } 3989 3990 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 3991 val |= MVPP2_BM_STOP_MASK; 3992 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 3993 3994 dma_free_coherent(&pdev->dev, bm_pool->size_bytes, 3995 bm_pool->virt_addr, 3996 bm_pool->dma_addr); 3997 return 0; 3998} 3999 4000static int mvpp2_bm_pools_init(struct platform_device *pdev, 4001 struct mvpp2 *priv) 4002{ 4003 int i, err, size; 4004 struct mvpp2_bm_pool *bm_pool; 4005 4006 /* Create all pools with maximum size */ 4007 size = MVPP2_BM_POOL_SIZE_MAX; 4008 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 4009 bm_pool = &priv->bm_pools[i]; 4010 bm_pool->id = i; 4011 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size); 4012 if (err) 4013 goto err_unroll_pools; 4014 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); 4015 } 4016 return 0; 4017 4018err_unroll_pools: 4019 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); 4020 for (i = i - 1; i >= 0; i--) 4021 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]); 4022 return err; 4023} 4024 4025static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) 4026{ 4027 int i, err; 4028 4029 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 4030 /* Mask BM all interrupts */ 4031 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); 4032 /* Clear BM cause register */ 4033 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); 4034 } 4035 4036 /* Allocate and initialize BM pools */ 4037 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM, 4038 sizeof(*priv->bm_pools), GFP_KERNEL); 4039 if (!priv->bm_pools) 4040 return -ENOMEM; 4041 4042 err = mvpp2_bm_pools_init(pdev, priv); 4043 if (err < 0) 4044 return err; 4045 return 0; 4046} 4047 4048/* Attach long pool to rxq */ 4049static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 4050 int lrxq, int long_pool) 4051{ 4052 u32 val, mask; 4053 int prxq; 4054 4055 /* Get queue physical ID */ 4056 prxq = port->rxqs[lrxq]->id; 4057 4058 if (port->priv->hw_version == MVPP21) 4059 mask = MVPP21_RXQ_POOL_LONG_MASK; 4060 else 4061 mask = MVPP22_RXQ_POOL_LONG_MASK; 4062 4063 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 4064 val &= ~mask; 4065 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; 4066 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 4067} 4068 4069/* Attach short pool to rxq */ 4070static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, 4071 int lrxq, int short_pool) 4072{ 4073 u32 val, mask; 4074 int prxq; 4075 4076 /* Get queue physical ID */ 4077 prxq = port->rxqs[lrxq]->id; 4078 4079 if (port->priv->hw_version == MVPP21) 4080 mask = MVPP21_RXQ_POOL_SHORT_MASK; 4081 else 4082 mask = MVPP22_RXQ_POOL_SHORT_MASK; 4083 4084 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 4085 val &= ~mask; 4086 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; 4087 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 4088} 4089 4090static void *mvpp2_buf_alloc(struct mvpp2_port *port, 4091 struct mvpp2_bm_pool *bm_pool, 4092 dma_addr_t *buf_dma_addr, 4093 phys_addr_t *buf_phys_addr, 4094 gfp_t gfp_mask) 4095{ 4096 dma_addr_t dma_addr; 4097 void *data; 4098 4099 data = mvpp2_frag_alloc(bm_pool); 4100 if (!data) 4101 return NULL; 4102 4103 dma_addr = dma_map_single(port->dev->dev.parent, data, 4104 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), 4105 DMA_FROM_DEVICE); 4106 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { 4107 mvpp2_frag_free(bm_pool, data); 4108 return NULL; 4109 } 4110 *buf_dma_addr = dma_addr; 4111 *buf_phys_addr = virt_to_phys(data); 4112 4113 return data; 4114} 4115 4116/* Release buffer to BM */ 4117static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 4118 dma_addr_t buf_dma_addr, 4119 phys_addr_t buf_phys_addr) 4120{ 4121 int cpu = get_cpu(); 4122 4123 if (port->priv->hw_version == MVPP22) { 4124 u32 val = 0; 4125 4126 if (sizeof(dma_addr_t) == 8) 4127 val |= upper_32_bits(buf_dma_addr) & 4128 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; 4129 4130 if (sizeof(phys_addr_t) == 8) 4131 val |= (upper_32_bits(buf_phys_addr) 4132 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & 4133 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; 4134 4135 mvpp2_percpu_write(port->priv, cpu, 4136 MVPP22_BM_ADDR_HIGH_RLS_REG, val); 4137 } 4138 4139 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply 4140 * returned in the "cookie" field of the RX 4141 * descriptor. Instead of storing the virtual address, we 4142 * store the physical address 4143 */ 4144 mvpp2_percpu_write(port->priv, cpu, 4145 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 4146 mvpp2_percpu_write(port->priv, cpu, 4147 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 4148 4149 put_cpu(); 4150} 4151 4152/* Allocate buffers for the pool */ 4153static int mvpp2_bm_bufs_add(struct mvpp2_port *port, 4154 struct mvpp2_bm_pool *bm_pool, int buf_num) 4155{ 4156 int i, buf_size, total_size; 4157 dma_addr_t dma_addr; 4158 phys_addr_t phys_addr; 4159 void *buf; 4160 4161 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); 4162 total_size = MVPP2_RX_TOTAL_SIZE(buf_size); 4163 4164 if (buf_num < 0 || 4165 (buf_num + bm_pool->buf_num > bm_pool->size)) { 4166 netdev_err(port->dev, 4167 "cannot allocate %d buffers for pool %d\n", 4168 buf_num, bm_pool->id); 4169 return 0; 4170 } 4171 4172 for (i = 0; i < buf_num; i++) { 4173 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, 4174 &phys_addr, GFP_KERNEL); 4175 if (!buf) 4176 break; 4177 4178 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, 4179 phys_addr); 4180 } 4181 4182 /* Update BM driver with number of buffers added to pool */ 4183 bm_pool->buf_num += i; 4184 4185 netdev_dbg(port->dev, 4186 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", 4187 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long", 4188 bm_pool->id, bm_pool->pkt_size, buf_size, total_size); 4189 4190 netdev_dbg(port->dev, 4191 "%s pool %d: %d of %d buffers added\n", 4192 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long", 4193 bm_pool->id, i, buf_num); 4194 return i; 4195} 4196 4197/* Notify the driver that BM pool is being used as specific type and return the 4198 * pool pointer on success 4199 */ 4200static struct mvpp2_bm_pool * 4201mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, 4202 int pkt_size) 4203{ 4204 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 4205 int num; 4206 4207 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) { 4208 netdev_err(port->dev, "mixing pool types is forbidden\n"); 4209 return NULL; 4210 } 4211 4212 if (new_pool->type == MVPP2_BM_FREE) 4213 new_pool->type = type; 4214 4215 /* Allocate buffers in case BM pool is used as long pool, but packet 4216 * size doesn't match MTU or BM pool hasn't being used yet 4217 */ 4218 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) || 4219 (new_pool->pkt_size == 0)) { 4220 int pkts_num; 4221 4222 /* Set default buffer number or free all the buffers in case 4223 * the pool is not empty 4224 */ 4225 pkts_num = new_pool->buf_num; 4226 if (pkts_num == 0) 4227 pkts_num = type == MVPP2_BM_SWF_LONG ? 4228 MVPP2_BM_LONG_BUF_NUM : 4229 MVPP2_BM_SHORT_BUF_NUM; 4230 else 4231 mvpp2_bm_bufs_free(port->dev->dev.parent, 4232 port->priv, new_pool); 4233 4234 new_pool->pkt_size = pkt_size; 4235 new_pool->frag_size = 4236 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + 4237 MVPP2_SKB_SHINFO_SIZE; 4238 4239 /* Allocate buffers for this pool */ 4240 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 4241 if (num != pkts_num) { 4242 WARN(1, "pool %d: %d of %d allocated\n", 4243 new_pool->id, num, pkts_num); 4244 return NULL; 4245 } 4246 } 4247 4248 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 4249 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 4250 4251 return new_pool; 4252} 4253 4254/* Initialize pools for swf */ 4255static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 4256{ 4257 int rxq; 4258 4259 if (!port->pool_long) { 4260 port->pool_long = 4261 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id), 4262 MVPP2_BM_SWF_LONG, 4263 port->pkt_size); 4264 if (!port->pool_long) 4265 return -ENOMEM; 4266 4267 port->pool_long->port_map |= (1 << port->id); 4268 4269 for (rxq = 0; rxq < port->nrxqs; rxq++) 4270 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 4271 } 4272 4273 if (!port->pool_short) { 4274 port->pool_short = 4275 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL, 4276 MVPP2_BM_SWF_SHORT, 4277 MVPP2_BM_SHORT_PKT_SIZE); 4278 if (!port->pool_short) 4279 return -ENOMEM; 4280 4281 port->pool_short->port_map |= (1 << port->id); 4282 4283 for (rxq = 0; rxq < port->nrxqs; rxq++) 4284 mvpp2_rxq_short_pool_set(port, rxq, 4285 port->pool_short->id); 4286 } 4287 4288 return 0; 4289} 4290 4291static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) 4292{ 4293 struct mvpp2_port *port = netdev_priv(dev); 4294 struct mvpp2_bm_pool *port_pool = port->pool_long; 4295 int num, pkts_num = port_pool->buf_num; 4296 int pkt_size = MVPP2_RX_PKT_SIZE(mtu); 4297 4298 /* Update BM pool with new buffer size */ 4299 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool); 4300 if (port_pool->buf_num) { 4301 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); 4302 return -EIO; 4303 } 4304 4305 port_pool->pkt_size = pkt_size; 4306 port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + 4307 MVPP2_SKB_SHINFO_SIZE; 4308 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num); 4309 if (num != pkts_num) { 4310 WARN(1, "pool %d: %d of %d allocated\n", 4311 port_pool->id, num, pkts_num); 4312 return -EIO; 4313 } 4314 4315 mvpp2_bm_pool_bufsize_set(port->priv, port_pool, 4316 MVPP2_RX_BUF_SIZE(port_pool->pkt_size)); 4317 dev->mtu = mtu; 4318 netdev_update_features(dev); 4319 return 0; 4320} 4321 4322static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) 4323{ 4324 int i, sw_thread_mask = 0; 4325 4326 for (i = 0; i < port->nqvecs; i++) 4327 sw_thread_mask |= port->qvecs[i].sw_thread_mask; 4328 4329 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 4330 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); 4331} 4332 4333static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) 4334{ 4335 int i, sw_thread_mask = 0; 4336 4337 for (i = 0; i < port->nqvecs; i++) 4338 sw_thread_mask |= port->qvecs[i].sw_thread_mask; 4339 4340 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 4341 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); 4342} 4343 4344static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) 4345{ 4346 struct mvpp2_port *port = qvec->port; 4347 4348 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 4349 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); 4350} 4351 4352static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) 4353{ 4354 struct mvpp2_port *port = qvec->port; 4355 4356 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 4357 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); 4358} 4359 4360/* Mask the current CPU's Rx/Tx interrupts 4361 * Called by on_each_cpu(), guaranteed to run with migration disabled, 4362 * using smp_processor_id() is OK. 4363 */ 4364static void mvpp2_interrupts_mask(void *arg) 4365{ 4366 struct mvpp2_port *port = arg; 4367 4368 mvpp2_percpu_write(port->priv, smp_processor_id(), 4369 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); 4370} 4371 4372/* Unmask the current CPU's Rx/Tx interrupts. 4373 * Called by on_each_cpu(), guaranteed to run with migration disabled, 4374 * using smp_processor_id() is OK. 4375 */ 4376static void mvpp2_interrupts_unmask(void *arg) 4377{ 4378 struct mvpp2_port *port = arg; 4379 u32 val; 4380 4381 val = MVPP2_CAUSE_MISC_SUM_MASK | 4382 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 4383 if (port->has_tx_irqs) 4384 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 4385 4386 mvpp2_percpu_write(port->priv, smp_processor_id(), 4387 MVPP2_ISR_RX_TX_MASK_REG(port->id), val); 4388} 4389 4390static void 4391mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) 4392{ 4393 u32 val; 4394 int i; 4395 4396 if (port->priv->hw_version != MVPP22) 4397 return; 4398 4399 if (mask) 4400 val = 0; 4401 else 4402 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 4403 4404 for (i = 0; i < port->nqvecs; i++) { 4405 struct mvpp2_queue_vector *v = port->qvecs + i; 4406 4407 if (v->type != MVPP2_QUEUE_VECTOR_SHARED) 4408 continue; 4409 4410 mvpp2_percpu_write(port->priv, v->sw_thread_id, 4411 MVPP2_ISR_RX_TX_MASK_REG(port->id), val); 4412 } 4413} 4414 4415/* Port configuration routines */ 4416 4417static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) 4418{ 4419 struct mvpp2 *priv = port->priv; 4420 u32 val; 4421 4422 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 4423 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; 4424 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 4425 4426 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); 4427 if (port->gop_id == 2) 4428 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII; 4429 else if (port->gop_id == 3) 4430 val |= GENCONF_CTRL0_PORT1_RGMII_MII; 4431 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); 4432} 4433 4434static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) 4435{ 4436 struct mvpp2 *priv = port->priv; 4437 u32 val; 4438 4439 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 4440 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | 4441 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; 4442 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 4443 4444 if (port->gop_id > 1) { 4445 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); 4446 if (port->gop_id == 2) 4447 val &= ~GENCONF_CTRL0_PORT0_RGMII; 4448 else if (port->gop_id == 3) 4449 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII; 4450 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); 4451 } 4452} 4453 4454static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) 4455{ 4456 struct mvpp2 *priv = port->priv; 4457 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); 4458 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); 4459 u32 val; 4460 4461 /* XPCS */ 4462 val = readl(xpcs + MVPP22_XPCS_CFG0); 4463 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | 4464 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); 4465 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); 4466 writel(val, xpcs + MVPP22_XPCS_CFG0); 4467 4468 /* MPCS */ 4469 val = readl(mpcs + MVPP22_MPCS_CTRL); 4470 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; 4471 writel(val, mpcs + MVPP22_MPCS_CTRL); 4472 4473 val = readl(mpcs + MVPP22_MPCS_CLK_RESET); 4474 val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC | 4475 MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); 4476 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); 4477 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 4478 4479 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; 4480 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX; 4481 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 4482} 4483 4484static int mvpp22_gop_init(struct mvpp2_port *port) 4485{ 4486 struct mvpp2 *priv = port->priv; 4487 u32 val; 4488 4489 if (!priv->sysctrl_base) 4490 return 0; 4491 4492 switch (port->phy_interface) { 4493 case PHY_INTERFACE_MODE_RGMII: 4494 case PHY_INTERFACE_MODE_RGMII_ID: 4495 case PHY_INTERFACE_MODE_RGMII_RXID: 4496 case PHY_INTERFACE_MODE_RGMII_TXID: 4497 if (port->gop_id == 0) 4498 goto invalid_conf; 4499 mvpp22_gop_init_rgmii(port); 4500 break; 4501 case PHY_INTERFACE_MODE_SGMII: 4502 mvpp22_gop_init_sgmii(port); 4503 break; 4504 case PHY_INTERFACE_MODE_10GKR: 4505 if (port->gop_id != 0) 4506 goto invalid_conf; 4507 mvpp22_gop_init_10gkr(port); 4508 break; 4509 default: 4510 goto unsupported_conf; 4511 } 4512 4513 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); 4514 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | 4515 GENCONF_PORT_CTRL1_EN(port->gop_id); 4516 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); 4517 4518 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 4519 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; 4520 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 4521 4522 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); 4523 val |= GENCONF_SOFT_RESET1_GOP; 4524 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); 4525 4526unsupported_conf: 4527 return 0; 4528 4529invalid_conf: 4530 netdev_err(port->dev, "Invalid port configuration\n"); 4531 return -EINVAL; 4532} 4533 4534static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) 4535{ 4536 u32 val; 4537 4538 if (phy_interface_mode_is_rgmii(port->phy_interface) || 4539 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 4540 /* Enable the GMAC link status irq for this port */ 4541 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); 4542 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; 4543 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); 4544 } 4545 4546 if (port->gop_id == 0) { 4547 /* Enable the XLG/GIG irqs for this port */ 4548 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); 4549 if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) 4550 val |= MVPP22_XLG_EXT_INT_MASK_XLG; 4551 else 4552 val |= MVPP22_XLG_EXT_INT_MASK_GIG; 4553 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); 4554 } 4555} 4556 4557static void mvpp22_gop_mask_irq(struct mvpp2_port *port) 4558{ 4559 u32 val; 4560 4561 if (port->gop_id == 0) { 4562 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); 4563 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | 4564 MVPP22_XLG_EXT_INT_MASK_GIG); 4565 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); 4566 } 4567 4568 if (phy_interface_mode_is_rgmii(port->phy_interface) || 4569 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 4570 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); 4571 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; 4572 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); 4573 } 4574} 4575 4576static void mvpp22_gop_setup_irq(struct mvpp2_port *port) 4577{ 4578 u32 val; 4579 4580 if (phy_interface_mode_is_rgmii(port->phy_interface) || 4581 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 4582 val = readl(port->base + MVPP22_GMAC_INT_MASK); 4583 val |= MVPP22_GMAC_INT_MASK_LINK_STAT; 4584 writel(val, port->base + MVPP22_GMAC_INT_MASK); 4585 } 4586 4587 if (port->gop_id == 0) { 4588 val = readl(port->base + MVPP22_XLG_INT_MASK); 4589 val |= MVPP22_XLG_INT_MASK_LINK; 4590 writel(val, port->base + MVPP22_XLG_INT_MASK); 4591 } 4592 4593 mvpp22_gop_unmask_irq(port); 4594} 4595 4596static int mvpp22_comphy_init(struct mvpp2_port *port) 4597{ 4598 enum phy_mode mode; 4599 int ret; 4600 4601 if (!port->comphy) 4602 return 0; 4603 4604 switch (port->phy_interface) { 4605 case PHY_INTERFACE_MODE_SGMII: 4606 mode = PHY_MODE_SGMII; 4607 break; 4608 case PHY_INTERFACE_MODE_10GKR: 4609 mode = PHY_MODE_10GKR; 4610 break; 4611 default: 4612 return -EINVAL; 4613 } 4614 4615 ret = phy_set_mode(port->comphy, mode); 4616 if (ret) 4617 return ret; 4618 4619 return phy_power_on(port->comphy); 4620} 4621 4622static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port) 4623{ 4624 u32 val; 4625 4626 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 4627 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); 4628 val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL | 4629 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 4630 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; 4631 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); 4632 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { 4633 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); 4634 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | 4635 MVPP22_CTRL4_SYNC_BYPASS_DIS | 4636 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 4637 val &= ~MVPP22_CTRL4_DP_CLK_SEL; 4638 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); 4639 } 4640 4641 /* The port is connected to a copper PHY */ 4642 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 4643 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 4644 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 4645 4646 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4647 val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | 4648 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | 4649 MVPP2_GMAC_AN_DUPLEX_EN; 4650 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) 4651 val |= MVPP2_GMAC_IN_BAND_AUTONEG; 4652 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4653} 4654 4655static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port) 4656{ 4657 u32 val; 4658 4659 /* Force link down */ 4660 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4661 val &= ~MVPP2_GMAC_FORCE_LINK_PASS; 4662 val |= MVPP2_GMAC_FORCE_LINK_DOWN; 4663 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4664 4665 /* Set the GMAC in a reset state */ 4666 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 4667 val |= MVPP2_GMAC_PORT_RESET_MASK; 4668 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 4669 4670 /* Configure the PCS and in-band AN */ 4671 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 4672 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 4673 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; 4674 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { 4675 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 4676 } 4677 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 4678 4679 mvpp2_port_mii_gmac_configure_mode(port); 4680 4681 /* Unset the GMAC reset state */ 4682 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 4683 val &= ~MVPP2_GMAC_PORT_RESET_MASK; 4684 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 4685 4686 /* Stop forcing link down */ 4687 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4688 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN; 4689 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4690} 4691 4692static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port) 4693{ 4694 u32 val; 4695 4696 if (port->gop_id != 0) 4697 return; 4698 4699 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 4700 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; 4701 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 4702 4703 val = readl(port->base + MVPP22_XLG_CTRL4_REG); 4704 val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; 4705 val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC; 4706 writel(val, port->base + MVPP22_XLG_CTRL4_REG); 4707} 4708 4709static void mvpp22_port_mii_set(struct mvpp2_port *port) 4710{ 4711 u32 val; 4712 4713 /* Only GOP port 0 has an XLG MAC */ 4714 if (port->gop_id == 0) { 4715 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 4716 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 4717 4718 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI || 4719 port->phy_interface == PHY_INTERFACE_MODE_10GKR) 4720 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; 4721 else 4722 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; 4723 4724 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 4725 } 4726} 4727 4728static void mvpp2_port_mii_set(struct mvpp2_port *port) 4729{ 4730 if (port->priv->hw_version == MVPP22) 4731 mvpp22_port_mii_set(port); 4732 4733 if (phy_interface_mode_is_rgmii(port->phy_interface) || 4734 port->phy_interface == PHY_INTERFACE_MODE_SGMII) 4735 mvpp2_port_mii_gmac_configure(port); 4736 else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) 4737 mvpp2_port_mii_xlg_configure(port); 4738} 4739 4740static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) 4741{ 4742 u32 val; 4743 4744 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4745 val |= MVPP2_GMAC_FC_ADV_EN; 4746 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4747} 4748 4749static void mvpp2_port_enable(struct mvpp2_port *port) 4750{ 4751 u32 val; 4752 4753 /* Only GOP port 0 has an XLG MAC */ 4754 if (port->gop_id == 0 && 4755 (port->phy_interface == PHY_INTERFACE_MODE_XAUI || 4756 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { 4757 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 4758 val |= MVPP22_XLG_CTRL0_PORT_EN | 4759 MVPP22_XLG_CTRL0_MAC_RESET_DIS; 4760 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; 4761 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 4762 } else { 4763 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 4764 val |= MVPP2_GMAC_PORT_EN_MASK; 4765 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 4766 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 4767 } 4768} 4769 4770static void mvpp2_port_disable(struct mvpp2_port *port) 4771{ 4772 u32 val; 4773 4774 /* Only GOP port 0 has an XLG MAC */ 4775 if (port->gop_id == 0 && 4776 (port->phy_interface == PHY_INTERFACE_MODE_XAUI || 4777 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { 4778 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 4779 val &= ~(MVPP22_XLG_CTRL0_PORT_EN | 4780 MVPP22_XLG_CTRL0_MAC_RESET_DIS); 4781 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 4782 } else { 4783 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 4784 val &= ~(MVPP2_GMAC_PORT_EN_MASK); 4785 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 4786 } 4787} 4788 4789/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 4790static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) 4791{ 4792 u32 val; 4793 4794 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & 4795 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 4796 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 4797} 4798 4799/* Configure loopback port */ 4800static void mvpp2_port_loopback_set(struct mvpp2_port *port) 4801{ 4802 u32 val; 4803 4804 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 4805 4806 if (port->speed == 1000) 4807 val |= MVPP2_GMAC_GMII_LB_EN_MASK; 4808 else 4809 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; 4810 4811 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) 4812 val |= MVPP2_GMAC_PCS_LB_EN_MASK; 4813 else 4814 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; 4815 4816 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 4817} 4818 4819struct mvpp2_ethtool_counter { 4820 unsigned int offset; 4821 const char string[ETH_GSTRING_LEN]; 4822 bool reg_is_64b; 4823}; 4824 4825static u64 mvpp2_read_count(struct mvpp2_port *port, 4826 const struct mvpp2_ethtool_counter *counter) 4827{ 4828 u64 val; 4829 4830 val = readl(port->stats_base + counter->offset); 4831 if (counter->reg_is_64b) 4832 val += (u64)readl(port->stats_base + counter->offset + 4) << 32; 4833 4834 return val; 4835} 4836 4837/* Due to the fact that software statistics and hardware statistics are, by 4838 * design, incremented at different moments in the chain of packet processing, 4839 * it is very likely that incoming packets could have been dropped after being 4840 * counted by hardware but before reaching software statistics (most probably 4841 * multicast packets), and in the oppposite way, during transmission, FCS bytes 4842 * are added in between as well as TSO skb will be split and header bytes added. 4843 * Hence, statistics gathered from userspace with ifconfig (software) and 4844 * ethtool (hardware) cannot be compared. 4845 */ 4846static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = { 4847 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, 4848 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, 4849 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, 4850 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, 4851 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, 4852 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, 4853 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, 4854 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, 4855 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, 4856 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, 4857 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, 4858 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, 4859 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, 4860 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, 4861 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, 4862 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, 4863 { MVPP2_MIB_FC_SENT, "fc_sent" }, 4864 { MVPP2_MIB_FC_RCVD, "fc_received" }, 4865 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, 4866 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, 4867 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, 4868 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, 4869 { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, 4870 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, 4871 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, 4872 { MVPP2_MIB_COLLISION, "collision" }, 4873 { MVPP2_MIB_LATE_COLLISION, "late_collision" }, 4874}; 4875 4876static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, 4877 u8 *data) 4878{ 4879 if (sset == ETH_SS_STATS) { 4880 int i; 4881 4882 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) 4883 memcpy(data + i * ETH_GSTRING_LEN, 4884 &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN); 4885 } 4886} 4887 4888static void mvpp2_gather_hw_statistics(struct work_struct *work) 4889{ 4890 struct delayed_work *del_work = to_delayed_work(work); 4891 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, 4892 stats_work); 4893 u64 *pstats; 4894 int i; 4895 4896 mutex_lock(&port->gather_stats_lock); 4897 4898 pstats = port->ethtool_stats; 4899 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) 4900 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); 4901 4902 /* No need to read again the counters right after this function if it 4903 * was called asynchronously by the user (ie. use of ethtool). 4904 */ 4905 cancel_delayed_work(&port->stats_work); 4906 queue_delayed_work(port->priv->stats_queue, &port->stats_work, 4907 MVPP2_MIB_COUNTERS_STATS_DELAY); 4908 4909 mutex_unlock(&port->gather_stats_lock); 4910} 4911 4912static void mvpp2_ethtool_get_stats(struct net_device *dev, 4913 struct ethtool_stats *stats, u64 *data) 4914{ 4915 struct mvpp2_port *port = netdev_priv(dev); 4916 4917 /* Update statistics for the given port, then take the lock to avoid 4918 * concurrent accesses on the ethtool_stats structure during its copy. 4919 */ 4920 mvpp2_gather_hw_statistics(&port->stats_work.work); 4921 4922 mutex_lock(&port->gather_stats_lock); 4923 memcpy(data, port->ethtool_stats, 4924 sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs)); 4925 mutex_unlock(&port->gather_stats_lock); 4926} 4927 4928static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) 4929{ 4930 if (sset == ETH_SS_STATS) 4931 return ARRAY_SIZE(mvpp2_ethtool_regs); 4932 4933 return -EOPNOTSUPP; 4934} 4935 4936static void mvpp2_port_reset(struct mvpp2_port *port) 4937{ 4938 u32 val; 4939 unsigned int i; 4940 4941 /* Read the GOP statistics to reset the hardware counters */ 4942 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) 4943 mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); 4944 4945 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 4946 ~MVPP2_GMAC_PORT_RESET_MASK; 4947 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 4948 4949 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 4950 MVPP2_GMAC_PORT_RESET_MASK) 4951 continue; 4952} 4953 4954/* Change maximum receive size of the port */ 4955static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) 4956{ 4957 u32 val; 4958 4959 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 4960 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 4961 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << 4962 MVPP2_GMAC_MAX_RX_SIZE_OFFS); 4963 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 4964} 4965 4966/* Change maximum receive size of the port */ 4967static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) 4968{ 4969 u32 val; 4970 4971 val = readl(port->base + MVPP22_XLG_CTRL1_REG); 4972 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; 4973 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << 4974 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; 4975 writel(val, port->base + MVPP22_XLG_CTRL1_REG); 4976} 4977 4978/* Set defaults to the MVPP2 port */ 4979static void mvpp2_defaults_set(struct mvpp2_port *port) 4980{ 4981 int tx_port_num, val, queue, ptxq, lrxq; 4982 4983 if (port->priv->hw_version == MVPP21) { 4984 /* Configure port to loopback if needed */ 4985 if (port->flags & MVPP2_F_LOOPBACK) 4986 mvpp2_port_loopback_set(port); 4987 4988 /* Update TX FIFO MIN Threshold */ 4989 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 4990 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 4991 /* Min. TX threshold must be less than minimal packet length */ 4992 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 4993 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 4994 } 4995 4996 /* Disable Legacy WRR, Disable EJP, Release from reset */ 4997 tx_port_num = mvpp2_egress_port(port); 4998 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, 4999 tx_port_num); 5000 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 5001 5002 /* Close bandwidth for all queues */ 5003 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { 5004 ptxq = mvpp2_txq_phys(port->id, queue); 5005 mvpp2_write(port->priv, 5006 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); 5007 } 5008 5009 /* Set refill period to 1 usec, refill tokens 5010 * and bucket size to maximum 5011 */ 5012 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 5013 port->priv->tclk / USEC_PER_SEC); 5014 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); 5015 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 5016 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); 5017 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 5018 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); 5019 val = MVPP2_TXP_TOKEN_SIZE_MAX; 5020 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 5021 5022 /* Set MaximumLowLatencyPacketSize value to 256 */ 5023 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), 5024 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | 5025 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); 5026 5027 /* Enable Rx cache snoop */ 5028 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 5029 queue = port->rxqs[lrxq]->id; 5030 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 5031 val |= MVPP2_SNOOP_PKT_SIZE_MASK | 5032 MVPP2_SNOOP_BUF_HDR_MASK; 5033 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 5034 } 5035 5036 /* At default, mask all interrupts to all present cpus */ 5037 mvpp2_interrupts_disable(port); 5038} 5039 5040/* Enable/disable receiving packets */ 5041static void mvpp2_ingress_enable(struct mvpp2_port *port) 5042{ 5043 u32 val; 5044 int lrxq, queue; 5045 5046 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 5047 queue = port->rxqs[lrxq]->id; 5048 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 5049 val &= ~MVPP2_RXQ_DISABLE_MASK; 5050 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 5051 } 5052} 5053 5054static void mvpp2_ingress_disable(struct mvpp2_port *port) 5055{ 5056 u32 val; 5057 int lrxq, queue; 5058 5059 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 5060 queue = port->rxqs[lrxq]->id; 5061 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 5062 val |= MVPP2_RXQ_DISABLE_MASK; 5063 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 5064 } 5065} 5066 5067/* Enable transmit via physical egress queue 5068 * - HW starts take descriptors from DRAM 5069 */ 5070static void mvpp2_egress_enable(struct mvpp2_port *port) 5071{ 5072 u32 qmap; 5073 int queue; 5074 int tx_port_num = mvpp2_egress_port(port); 5075 5076 /* Enable all initialized TXs. */ 5077 qmap = 0; 5078 for (queue = 0; queue < port->ntxqs; queue++) { 5079 struct mvpp2_tx_queue *txq = port->txqs[queue]; 5080 5081 if (txq->descs) 5082 qmap |= (1 << queue); 5083 } 5084 5085 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 5086 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 5087} 5088 5089/* Disable transmit via physical egress queue 5090 * - HW doesn't take descriptors from DRAM 5091 */ 5092static void mvpp2_egress_disable(struct mvpp2_port *port) 5093{ 5094 u32 reg_data; 5095 int delay; 5096 int tx_port_num = mvpp2_egress_port(port); 5097 5098 /* Issue stop command for active channels only */ 5099 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 5100 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & 5101 MVPP2_TXP_SCHED_ENQ_MASK; 5102 if (reg_data != 0) 5103 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, 5104 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); 5105 5106 /* Wait for all Tx activity to terminate. */ 5107 delay = 0; 5108 do { 5109 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 5110 netdev_warn(port->dev, 5111 "Tx stop timed out, status=0x%08x\n", 5112 reg_data); 5113 break; 5114 } 5115 mdelay(1); 5116 delay++; 5117 5118 /* Check port TX Command register that all 5119 * Tx queues are stopped 5120 */ 5121 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); 5122 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); 5123} 5124 5125/* Rx descriptors helper methods */ 5126 5127/* Get number of Rx descriptors occupied by received packets */ 5128static inline int 5129mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) 5130{ 5131 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); 5132 5133 return val & MVPP2_RXQ_OCCUPIED_MASK; 5134} 5135 5136/* Update Rx queue status with the number of occupied and available 5137 * Rx descriptor slots. 5138 */ 5139static inline void 5140mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, 5141 int used_count, int free_count) 5142{ 5143 /* Decrement the number of used descriptors and increment count 5144 * increment the number of free descriptors. 5145 */ 5146 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); 5147 5148 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); 5149} 5150 5151/* Get pointer to next RX descriptor to be processed by SW */ 5152static inline struct mvpp2_rx_desc * 5153mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) 5154{ 5155 int rx_desc = rxq->next_desc_to_proc; 5156 5157 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); 5158 prefetch(rxq->descs + rxq->next_desc_to_proc); 5159 return rxq->descs + rx_desc; 5160} 5161 5162/* Set rx queue offset */ 5163static void mvpp2_rxq_offset_set(struct mvpp2_port *port, 5164 int prxq, int offset) 5165{ 5166 u32 val; 5167 5168 /* Convert offset from bytes to units of 32 bytes */ 5169 offset = offset >> 5; 5170 5171 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 5172 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 5173 5174 /* Offset is in */ 5175 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & 5176 MVPP2_RXQ_PACKET_OFFSET_MASK); 5177 5178 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 5179} 5180 5181/* Tx descriptors helper methods */ 5182 5183/* Get pointer to next Tx descriptor to be processed (send) by HW */ 5184static struct mvpp2_tx_desc * 5185mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 5186{ 5187 int tx_desc = txq->next_desc_to_proc; 5188 5189 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); 5190 return txq->descs + tx_desc; 5191} 5192 5193/* Update HW with number of aggregated Tx descriptors to be sent 5194 * 5195 * Called only from mvpp2_tx(), so migration is disabled, using 5196 * smp_processor_id() is OK. 5197 */ 5198static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 5199{ 5200 /* aggregated access - relevant TXQ number is written in TX desc */ 5201 mvpp2_percpu_write(port->priv, smp_processor_id(), 5202 MVPP2_AGGR_TXQ_UPDATE_REG, pending); 5203} 5204 5205 5206/* Check if there are enough free descriptors in aggregated txq. 5207 * If not, update the number of occupied descriptors and repeat the check. 5208 * 5209 * Called only from mvpp2_tx(), so migration is disabled, using 5210 * smp_processor_id() is OK. 5211 */ 5212static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, 5213 struct mvpp2_tx_queue *aggr_txq, int num) 5214{ 5215 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { 5216 /* Update number of occupied aggregated Tx descriptors */ 5217 int cpu = smp_processor_id(); 5218 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu)); 5219 5220 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; 5221 } 5222 5223 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) 5224 return -ENOMEM; 5225 5226 return 0; 5227} 5228 5229/* Reserved Tx descriptors allocation request 5230 * 5231 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called 5232 * only by mvpp2_tx(), so migration is disabled, using 5233 * smp_processor_id() is OK. 5234 */ 5235static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, 5236 struct mvpp2_tx_queue *txq, int num) 5237{ 5238 u32 val; 5239 int cpu = smp_processor_id(); 5240 5241 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; 5242 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); 5243 5244 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); 5245 5246 return val & MVPP2_TXQ_RSVD_RSLT_MASK; 5247} 5248 5249/* Check if there are enough reserved descriptors for transmission. 5250 * If not, request chunk of reserved descriptors and check again. 5251 */ 5252static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, 5253 struct mvpp2_tx_queue *txq, 5254 struct mvpp2_txq_pcpu *txq_pcpu, 5255 int num) 5256{ 5257 int req, cpu, desc_count; 5258 5259 if (txq_pcpu->reserved_num >= num) 5260 return 0; 5261 5262 /* Not enough descriptors reserved! Update the reserved descriptor 5263 * count and check again. 5264 */ 5265 5266 desc_count = 0; 5267 /* Compute total of used descriptors */ 5268 for_each_present_cpu(cpu) { 5269 struct mvpp2_txq_pcpu *txq_pcpu_aux; 5270 5271 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); 5272 desc_count += txq_pcpu_aux->count; 5273 desc_count += txq_pcpu_aux->reserved_num; 5274 } 5275 5276 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); 5277 desc_count += req; 5278 5279 if (desc_count > 5280 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) 5281 return -ENOMEM; 5282 5283 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); 5284 5285 /* OK, the descriptor cound has been updated: check again. */ 5286 if (txq_pcpu->reserved_num < num) 5287 return -ENOMEM; 5288 return 0; 5289} 5290 5291/* Release the last allocated Tx descriptor. Useful to handle DMA 5292 * mapping failures in the Tx path. 5293 */ 5294static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) 5295{ 5296 if (txq->next_desc_to_proc == 0) 5297 txq->next_desc_to_proc = txq->last_desc - 1; 5298 else 5299 txq->next_desc_to_proc--; 5300} 5301 5302/* Set Tx descriptors fields relevant for CSUM calculation */ 5303static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, 5304 int ip_hdr_len, int l4_proto) 5305{ 5306 u32 command; 5307 5308 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 5309 * G_L4_chk, L4_type required only for checksum calculation 5310 */ 5311 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); 5312 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); 5313 command |= MVPP2_TXD_IP_CSUM_DISABLE; 5314 5315 if (l3_proto == swab16(ETH_P_IP)) { 5316 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ 5317 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ 5318 } else { 5319 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ 5320 } 5321 5322 if (l4_proto == IPPROTO_TCP) { 5323 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ 5324 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ 5325 } else if (l4_proto == IPPROTO_UDP) { 5326 command |= MVPP2_TXD_L4_UDP; /* enable UDP */ 5327 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ 5328 } else { 5329 command |= MVPP2_TXD_L4_CSUM_NOT; 5330 } 5331 5332 return command; 5333} 5334 5335/* Get number of sent descriptors and decrement counter. 5336 * The number of sent descriptors is returned. 5337 * Per-CPU access 5338 * 5339 * Called only from mvpp2_txq_done(), called from mvpp2_tx() 5340 * (migration disabled) and from the TX completion tasklet (migration 5341 * disabled) so using smp_processor_id() is OK. 5342 */ 5343static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, 5344 struct mvpp2_tx_queue *txq) 5345{ 5346 u32 val; 5347 5348 /* Reading status reg resets transmitted descriptor counter */ 5349 val = mvpp2_percpu_read(port->priv, smp_processor_id(), 5350 MVPP2_TXQ_SENT_REG(txq->id)); 5351 5352 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 5353 MVPP2_TRANSMITTED_COUNT_OFFSET; 5354} 5355 5356/* Called through on_each_cpu(), so runs on all CPUs, with migration 5357 * disabled, therefore using smp_processor_id() is OK. 5358 */ 5359static void mvpp2_txq_sent_counter_clear(void *arg) 5360{ 5361 struct mvpp2_port *port = arg; 5362 int queue; 5363 5364 for (queue = 0; queue < port->ntxqs; queue++) { 5365 int id = port->txqs[queue]->id; 5366 5367 mvpp2_percpu_read(port->priv, smp_processor_id(), 5368 MVPP2_TXQ_SENT_REG(id)); 5369 } 5370} 5371 5372/* Set max sizes for Tx queues */ 5373static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) 5374{ 5375 u32 val, size, mtu; 5376 int txq, tx_port_num; 5377 5378 mtu = port->pkt_size * 8; 5379 if (mtu > MVPP2_TXP_MTU_MAX) 5380 mtu = MVPP2_TXP_MTU_MAX; 5381 5382 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 5383 mtu = 3 * mtu; 5384 5385 /* Indirect access to registers */ 5386 tx_port_num = mvpp2_egress_port(port); 5387 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 5388 5389 /* Set MTU */ 5390 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); 5391 val &= ~MVPP2_TXP_MTU_MAX; 5392 val |= mtu; 5393 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); 5394 5395 /* TXP token size and all TXQs token size must be larger that MTU */ 5396 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 5397 size = val & MVPP2_TXP_TOKEN_SIZE_MAX; 5398 if (size < mtu) { 5399 size = mtu; 5400 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 5401 val |= size; 5402 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 5403 } 5404 5405 for (txq = 0; txq < port->ntxqs; txq++) { 5406 val = mvpp2_read(port->priv, 5407 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); 5408 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; 5409 5410 if (size < mtu) { 5411 size = mtu; 5412 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 5413 val |= size; 5414 mvpp2_write(port->priv, 5415 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), 5416 val); 5417 } 5418 } 5419} 5420 5421/* Set the number of packets that will be received before Rx interrupt 5422 * will be generated by HW. 5423 */ 5424static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, 5425 struct mvpp2_rx_queue *rxq) 5426{ 5427 int cpu = get_cpu(); 5428 5429 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) 5430 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; 5431 5432 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 5433 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, 5434 rxq->pkts_coal); 5435 5436 put_cpu(); 5437} 5438 5439/* For some reason in the LSP this is done on each CPU. Why ? */ 5440static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, 5441 struct mvpp2_tx_queue *txq) 5442{ 5443 int cpu = get_cpu(); 5444 u32 val; 5445 5446 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) 5447 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; 5448 5449 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); 5450 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 5451 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); 5452 5453 put_cpu(); 5454} 5455 5456static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) 5457{ 5458 u64 tmp = (u64)clk_hz * usec; 5459 5460 do_div(tmp, USEC_PER_SEC); 5461 5462 return tmp > U32_MAX ? U32_MAX : tmp; 5463} 5464 5465static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) 5466{ 5467 u64 tmp = (u64)cycles * USEC_PER_SEC; 5468 5469 do_div(tmp, clk_hz); 5470 5471 return tmp > U32_MAX ? U32_MAX : tmp; 5472} 5473 5474/* Set the time delay in usec before Rx interrupt */ 5475static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, 5476 struct mvpp2_rx_queue *rxq) 5477{ 5478 unsigned long freq = port->priv->tclk; 5479 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); 5480 5481 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { 5482 rxq->time_coal = 5483 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); 5484 5485 /* re-evaluate to get actual register value */ 5486 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); 5487 } 5488 5489 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); 5490} 5491 5492static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) 5493{ 5494 unsigned long freq = port->priv->tclk; 5495 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); 5496 5497 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { 5498 port->tx_time_coal = 5499 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); 5500 5501 /* re-evaluate to get actual register value */ 5502 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); 5503 } 5504 5505 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); 5506} 5507 5508/* Free Tx queue skbuffs */ 5509static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 5510 struct mvpp2_tx_queue *txq, 5511 struct mvpp2_txq_pcpu *txq_pcpu, int num) 5512{ 5513 int i; 5514 5515 for (i = 0; i < num; i++) { 5516 struct mvpp2_txq_pcpu_buf *tx_buf = 5517 txq_pcpu->buffs + txq_pcpu->txq_get_index; 5518 5519 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma)) 5520 dma_unmap_single(port->dev->dev.parent, tx_buf->dma, 5521 tx_buf->size, DMA_TO_DEVICE); 5522 if (tx_buf->skb) 5523 dev_kfree_skb_any(tx_buf->skb); 5524 5525 mvpp2_txq_inc_get(txq_pcpu); 5526 } 5527} 5528 5529static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, 5530 u32 cause) 5531{ 5532 int queue = fls(cause) - 1; 5533 5534 return port->rxqs[queue]; 5535} 5536 5537static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 5538 u32 cause) 5539{ 5540 int queue = fls(cause) - 1; 5541 5542 return port->txqs[queue]; 5543} 5544 5545/* Handle end of transmission */ 5546static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 5547 struct mvpp2_txq_pcpu *txq_pcpu) 5548{ 5549 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); 5550 int tx_done; 5551 5552 if (txq_pcpu->cpu != smp_processor_id()) 5553 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); 5554 5555 tx_done = mvpp2_txq_sent_desc_proc(port, txq); 5556 if (!tx_done) 5557 return; 5558 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); 5559 5560 txq_pcpu->count -= tx_done; 5561 5562 if (netif_tx_queue_stopped(nq)) 5563 if (txq_pcpu->count <= txq_pcpu->wake_threshold) 5564 netif_tx_wake_queue(nq); 5565} 5566 5567static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, 5568 int cpu) 5569{ 5570 struct mvpp2_tx_queue *txq; 5571 struct mvpp2_txq_pcpu *txq_pcpu; 5572 unsigned int tx_todo = 0; 5573 5574 while (cause) { 5575 txq = mvpp2_get_tx_queue(port, cause); 5576 if (!txq) 5577 break; 5578 5579 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 5580 5581 if (txq_pcpu->count) { 5582 mvpp2_txq_done(port, txq, txq_pcpu); 5583 tx_todo += txq_pcpu->count; 5584 } 5585 5586 cause &= ~(1 << txq->log_id); 5587 } 5588 return tx_todo; 5589} 5590 5591/* Rx/Tx queue initialization/cleanup methods */ 5592 5593/* Allocate and initialize descriptors for aggr TXQ */ 5594static int mvpp2_aggr_txq_init(struct platform_device *pdev, 5595 struct mvpp2_tx_queue *aggr_txq, int cpu, 5596 struct mvpp2 *priv) 5597{ 5598 u32 txq_dma; 5599 5600 /* Allocate memory for TX descriptors */ 5601 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, 5602 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 5603 &aggr_txq->descs_dma, GFP_KERNEL); 5604 if (!aggr_txq->descs) 5605 return -ENOMEM; 5606 5607 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; 5608 5609 /* Aggr TXQ no reset WA */ 5610 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 5611 MVPP2_AGGR_TXQ_INDEX_REG(cpu)); 5612 5613 /* Set Tx descriptors queue starting address indirect 5614 * access 5615 */ 5616 if (priv->hw_version == MVPP21) 5617 txq_dma = aggr_txq->descs_dma; 5618 else 5619 txq_dma = aggr_txq->descs_dma >> 5620 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; 5621 5622 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); 5623 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), 5624 MVPP2_AGGR_TXQ_SIZE); 5625 5626 return 0; 5627} 5628 5629/* Create a specified Rx queue */ 5630static int mvpp2_rxq_init(struct mvpp2_port *port, 5631 struct mvpp2_rx_queue *rxq) 5632 5633{ 5634 u32 rxq_dma; 5635 int cpu; 5636 5637 rxq->size = port->rx_ring_size; 5638 5639 /* Allocate memory for RX descriptors */ 5640 rxq->descs = dma_alloc_coherent(port->dev->dev.parent, 5641 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 5642 &rxq->descs_dma, GFP_KERNEL); 5643 if (!rxq->descs) 5644 return -ENOMEM; 5645 5646 rxq->last_desc = rxq->size - 1; 5647 5648 /* Zero occupied and non-occupied counters - direct access */ 5649 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 5650 5651 /* Set Rx descriptors queue starting address - indirect access */ 5652 cpu = get_cpu(); 5653 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 5654 if (port->priv->hw_version == MVPP21) 5655 rxq_dma = rxq->descs_dma; 5656 else 5657 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; 5658 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 5659 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 5660 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); 5661 put_cpu(); 5662 5663 /* Set Offset */ 5664 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); 5665 5666 /* Set coalescing pkts and time */ 5667 mvpp2_rx_pkts_coal_set(port, rxq); 5668 mvpp2_rx_time_coal_set(port, rxq); 5669 5670 /* Add number of descriptors ready for receiving packets */ 5671 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); 5672 5673 return 0; 5674} 5675 5676/* Push packets received by the RXQ to BM pool */ 5677static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, 5678 struct mvpp2_rx_queue *rxq) 5679{ 5680 int rx_received, i; 5681 5682 rx_received = mvpp2_rxq_received(port, rxq->id); 5683 if (!rx_received) 5684 return; 5685 5686 for (i = 0; i < rx_received; i++) { 5687 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 5688 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 5689 int pool; 5690 5691 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> 5692 MVPP2_RXD_BM_POOL_ID_OFFS; 5693 5694 mvpp2_bm_pool_put(port, pool, 5695 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 5696 mvpp2_rxdesc_cookie_get(port, rx_desc)); 5697 } 5698 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 5699} 5700 5701/* Cleanup Rx queue */ 5702static void mvpp2_rxq_deinit(struct mvpp2_port *port, 5703 struct mvpp2_rx_queue *rxq) 5704{ 5705 int cpu; 5706 5707 mvpp2_rxq_drop_pkts(port, rxq); 5708 5709 if (rxq->descs) 5710 dma_free_coherent(port->dev->dev.parent, 5711 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 5712 rxq->descs, 5713 rxq->descs_dma); 5714 5715 rxq->descs = NULL; 5716 rxq->last_desc = 0; 5717 rxq->next_desc_to_proc = 0; 5718 rxq->descs_dma = 0; 5719 5720 /* Clear Rx descriptors queue starting address and size; 5721 * free descriptor number 5722 */ 5723 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 5724 cpu = get_cpu(); 5725 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 5726 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); 5727 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); 5728 put_cpu(); 5729} 5730 5731/* Create and initialize a Tx queue */ 5732static int mvpp2_txq_init(struct mvpp2_port *port, 5733 struct mvpp2_tx_queue *txq) 5734{ 5735 u32 val; 5736 int cpu, desc, desc_per_txq, tx_port_num; 5737 struct mvpp2_txq_pcpu *txq_pcpu; 5738 5739 txq->size = port->tx_ring_size; 5740 5741 /* Allocate memory for Tx descriptors */ 5742 txq->descs = dma_alloc_coherent(port->dev->dev.parent, 5743 txq->size * MVPP2_DESC_ALIGNED_SIZE, 5744 &txq->descs_dma, GFP_KERNEL); 5745 if (!txq->descs) 5746 return -ENOMEM; 5747 5748 txq->last_desc = txq->size - 1; 5749 5750 /* Set Tx descriptors queue starting address - indirect access */ 5751 cpu = get_cpu(); 5752 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 5753 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 5754 txq->descs_dma); 5755 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 5756 txq->size & MVPP2_TXQ_DESC_SIZE_MASK); 5757 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); 5758 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, 5759 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 5760 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); 5761 val &= ~MVPP2_TXQ_PENDING_MASK; 5762 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); 5763 5764 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 5765 * for each existing TXQ. 5766 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT 5767 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS 5768 */ 5769 desc_per_txq = 16; 5770 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 5771 (txq->log_id * desc_per_txq); 5772 5773 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, 5774 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 5775 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 5776 put_cpu(); 5777 5778 /* WRR / EJP configuration - indirect access */ 5779 tx_port_num = mvpp2_egress_port(port); 5780 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 5781 5782 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); 5783 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 5784 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); 5785 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 5786 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); 5787 5788 val = MVPP2_TXQ_TOKEN_SIZE_MAX; 5789 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), 5790 val); 5791 5792 for_each_present_cpu(cpu) { 5793 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 5794 txq_pcpu->size = txq->size; 5795 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, 5796 sizeof(*txq_pcpu->buffs), 5797 GFP_KERNEL); 5798 if (!txq_pcpu->buffs) 5799 return -ENOMEM; 5800 5801 txq_pcpu->count = 0; 5802 txq_pcpu->reserved_num = 0; 5803 txq_pcpu->txq_put_index = 0; 5804 txq_pcpu->txq_get_index = 0; 5805 5806 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; 5807 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; 5808 5809 txq_pcpu->tso_headers = 5810 dma_alloc_coherent(port->dev->dev.parent, 5811 txq_pcpu->size * TSO_HEADER_SIZE, 5812 &txq_pcpu->tso_headers_dma, 5813 GFP_KERNEL); 5814 if (!txq_pcpu->tso_headers) 5815 return -ENOMEM; 5816 } 5817 5818 return 0; 5819} 5820 5821/* Free allocated TXQ resources */ 5822static void mvpp2_txq_deinit(struct mvpp2_port *port, 5823 struct mvpp2_tx_queue *txq) 5824{ 5825 struct mvpp2_txq_pcpu *txq_pcpu; 5826 int cpu; 5827 5828 for_each_present_cpu(cpu) { 5829 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 5830 kfree(txq_pcpu->buffs); 5831 5832 dma_free_coherent(port->dev->dev.parent, 5833 txq_pcpu->size * TSO_HEADER_SIZE, 5834 txq_pcpu->tso_headers, 5835 txq_pcpu->tso_headers_dma); 5836 } 5837 5838 if (txq->descs) 5839 dma_free_coherent(port->dev->dev.parent, 5840 txq->size * MVPP2_DESC_ALIGNED_SIZE, 5841 txq->descs, txq->descs_dma); 5842 5843 txq->descs = NULL; 5844 txq->last_desc = 0; 5845 txq->next_desc_to_proc = 0; 5846 txq->descs_dma = 0; 5847 5848 /* Set minimum bandwidth for disabled TXQs */ 5849 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); 5850 5851 /* Set Tx descriptors queue starting address and size */ 5852 cpu = get_cpu(); 5853 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 5854 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); 5855 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); 5856 put_cpu(); 5857} 5858 5859/* Cleanup Tx ports */ 5860static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) 5861{ 5862 struct mvpp2_txq_pcpu *txq_pcpu; 5863 int delay, pending, cpu; 5864 u32 val; 5865 5866 cpu = get_cpu(); 5867 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 5868 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); 5869 val |= MVPP2_TXQ_DRAIN_EN_MASK; 5870 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); 5871 5872 /* The napi queue has been stopped so wait for all packets 5873 * to be transmitted. 5874 */ 5875 delay = 0; 5876 do { 5877 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 5878 netdev_warn(port->dev, 5879 "port %d: cleaning queue %d timed out\n", 5880 port->id, txq->log_id); 5881 break; 5882 } 5883 mdelay(1); 5884 delay++; 5885 5886 pending = mvpp2_percpu_read(port->priv, cpu, 5887 MVPP2_TXQ_PENDING_REG); 5888 pending &= MVPP2_TXQ_PENDING_MASK; 5889 } while (pending); 5890 5891 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 5892 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); 5893 put_cpu(); 5894 5895 for_each_present_cpu(cpu) { 5896 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 5897 5898 /* Release all packets */ 5899 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); 5900 5901 /* Reset queue */ 5902 txq_pcpu->count = 0; 5903 txq_pcpu->txq_put_index = 0; 5904 txq_pcpu->txq_get_index = 0; 5905 } 5906} 5907 5908/* Cleanup all Tx queues */ 5909static void mvpp2_cleanup_txqs(struct mvpp2_port *port) 5910{ 5911 struct mvpp2_tx_queue *txq; 5912 int queue; 5913 u32 val; 5914 5915 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); 5916 5917 /* Reset Tx ports and delete Tx queues */ 5918 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); 5919 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 5920 5921 for (queue = 0; queue < port->ntxqs; queue++) { 5922 txq = port->txqs[queue]; 5923 mvpp2_txq_clean(port, txq); 5924 mvpp2_txq_deinit(port, txq); 5925 } 5926 5927 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 5928 5929 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); 5930 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 5931} 5932 5933/* Cleanup all Rx queues */ 5934static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) 5935{ 5936 int queue; 5937 5938 for (queue = 0; queue < port->nrxqs; queue++) 5939 mvpp2_rxq_deinit(port, port->rxqs[queue]); 5940} 5941 5942/* Init all Rx queues for port */ 5943static int mvpp2_setup_rxqs(struct mvpp2_port *port) 5944{ 5945 int queue, err; 5946 5947 for (queue = 0; queue < port->nrxqs; queue++) { 5948 err = mvpp2_rxq_init(port, port->rxqs[queue]); 5949 if (err) 5950 goto err_cleanup; 5951 } 5952 return 0; 5953 5954err_cleanup: 5955 mvpp2_cleanup_rxqs(port); 5956 return err; 5957} 5958 5959/* Init all tx queues for port */ 5960static int mvpp2_setup_txqs(struct mvpp2_port *port) 5961{ 5962 struct mvpp2_tx_queue *txq; 5963 int queue, err; 5964 5965 for (queue = 0; queue < port->ntxqs; queue++) { 5966 txq = port->txqs[queue]; 5967 err = mvpp2_txq_init(port, txq); 5968 if (err) 5969 goto err_cleanup; 5970 } 5971 5972 if (port->has_tx_irqs) { 5973 mvpp2_tx_time_coal_set(port); 5974 for (queue = 0; queue < port->ntxqs; queue++) { 5975 txq = port->txqs[queue]; 5976 mvpp2_tx_pkts_coal_set(port, txq); 5977 } 5978 } 5979 5980 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 5981 return 0; 5982 5983err_cleanup: 5984 mvpp2_cleanup_txqs(port); 5985 return err; 5986} 5987 5988/* The callback for per-port interrupt */ 5989static irqreturn_t mvpp2_isr(int irq, void *dev_id) 5990{ 5991 struct mvpp2_queue_vector *qv = dev_id; 5992 5993 mvpp2_qvec_interrupt_disable(qv); 5994 5995 napi_schedule(&qv->napi); 5996 5997 return IRQ_HANDLED; 5998} 5999 6000/* Per-port interrupt for link status changes */ 6001static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) 6002{ 6003 struct mvpp2_port *port = (struct mvpp2_port *)dev_id; 6004 struct net_device *dev = port->dev; 6005 bool event = false, link = false; 6006 u32 val; 6007 6008 mvpp22_gop_mask_irq(port); 6009 6010 if (port->gop_id == 0 && 6011 port->phy_interface == PHY_INTERFACE_MODE_10GKR) { 6012 val = readl(port->base + MVPP22_XLG_INT_STAT); 6013 if (val & MVPP22_XLG_INT_STAT_LINK) { 6014 event = true; 6015 val = readl(port->base + MVPP22_XLG_STATUS); 6016 if (val & MVPP22_XLG_STATUS_LINK_UP) 6017 link = true; 6018 } 6019 } else if (phy_interface_mode_is_rgmii(port->phy_interface) || 6020 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 6021 val = readl(port->base + MVPP22_GMAC_INT_STAT); 6022 if (val & MVPP22_GMAC_INT_STAT_LINK) { 6023 event = true; 6024 val = readl(port->base + MVPP2_GMAC_STATUS0); 6025 if (val & MVPP2_GMAC_STATUS0_LINK_UP) 6026 link = true; 6027 } 6028 } 6029 6030 if (!netif_running(dev) || !event) 6031 goto handled; 6032 6033 if (link) { 6034 mvpp2_interrupts_enable(port); 6035 6036 mvpp2_egress_enable(port); 6037 mvpp2_ingress_enable(port); 6038 netif_carrier_on(dev); 6039 netif_tx_wake_all_queues(dev); 6040 } else { 6041 netif_tx_stop_all_queues(dev); 6042 netif_carrier_off(dev); 6043 mvpp2_ingress_disable(port); 6044 mvpp2_egress_disable(port); 6045 6046 mvpp2_interrupts_disable(port); 6047 } 6048 6049handled: 6050 mvpp22_gop_unmask_irq(port); 6051 return IRQ_HANDLED; 6052} 6053 6054static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port, 6055 struct phy_device *phydev) 6056{ 6057 u32 val; 6058 6059 if (port->phy_interface != PHY_INTERFACE_MODE_RGMII && 6060 port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID && 6061 port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID && 6062 port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID && 6063 port->phy_interface != PHY_INTERFACE_MODE_SGMII) 6064 return; 6065 6066 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6067 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | 6068 MVPP2_GMAC_CONFIG_GMII_SPEED | 6069 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 6070 MVPP2_GMAC_AN_SPEED_EN | 6071 MVPP2_GMAC_AN_DUPLEX_EN); 6072 6073 if (phydev->duplex) 6074 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; 6075 6076 if (phydev->speed == SPEED_1000) 6077 val |= MVPP2_GMAC_CONFIG_GMII_SPEED; 6078 else if (phydev->speed == SPEED_100) 6079 val |= MVPP2_GMAC_CONFIG_MII_SPEED; 6080 6081 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6082} 6083 6084/* Adjust link */ 6085static void mvpp2_link_event(struct net_device *dev) 6086{ 6087 struct mvpp2_port *port = netdev_priv(dev); 6088 struct phy_device *phydev = dev->phydev; 6089 bool link_reconfigured = false; 6090 u32 val; 6091 6092 if (phydev->link) { 6093 if (port->phy_interface != phydev->interface && port->comphy) { 6094 /* disable current port for reconfiguration */ 6095 mvpp2_interrupts_disable(port); 6096 netif_carrier_off(port->dev); 6097 mvpp2_port_disable(port); 6098 phy_power_off(port->comphy); 6099 6100 /* comphy reconfiguration */ 6101 port->phy_interface = phydev->interface; 6102 mvpp22_comphy_init(port); 6103 6104 /* gop/mac reconfiguration */ 6105 mvpp22_gop_init(port); 6106 mvpp2_port_mii_set(port); 6107 6108 link_reconfigured = true; 6109 } 6110 6111 if ((port->speed != phydev->speed) || 6112 (port->duplex != phydev->duplex)) { 6113 mvpp2_gmac_set_autoneg(port, phydev); 6114 6115 port->duplex = phydev->duplex; 6116 port->speed = phydev->speed; 6117 } 6118 } 6119 6120 if (phydev->link != port->link || link_reconfigured) { 6121 port->link = phydev->link; 6122 6123 if (phydev->link) { 6124 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII || 6125 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || 6126 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || 6127 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID || 6128 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 6129 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6130 val |= (MVPP2_GMAC_FORCE_LINK_PASS | 6131 MVPP2_GMAC_FORCE_LINK_DOWN); 6132 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6133 } 6134 6135 mvpp2_interrupts_enable(port); 6136 mvpp2_port_enable(port); 6137 6138 mvpp2_egress_enable(port); 6139 mvpp2_ingress_enable(port); 6140 netif_carrier_on(dev); 6141 netif_tx_wake_all_queues(dev); 6142 } else { 6143 port->duplex = -1; 6144 port->speed = 0; 6145 6146 netif_tx_stop_all_queues(dev); 6147 netif_carrier_off(dev); 6148 mvpp2_ingress_disable(port); 6149 mvpp2_egress_disable(port); 6150 6151 mvpp2_port_disable(port); 6152 mvpp2_interrupts_disable(port); 6153 } 6154 6155 phy_print_status(phydev); 6156 } 6157} 6158 6159static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu) 6160{ 6161 ktime_t interval; 6162 6163 if (!port_pcpu->timer_scheduled) { 6164 port_pcpu->timer_scheduled = true; 6165 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS; 6166 hrtimer_start(&port_pcpu->tx_done_timer, interval, 6167 HRTIMER_MODE_REL_PINNED); 6168 } 6169} 6170 6171static void mvpp2_tx_proc_cb(unsigned long data) 6172{ 6173 struct net_device *dev = (struct net_device *)data; 6174 struct mvpp2_port *port = netdev_priv(dev); 6175 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); 6176 unsigned int tx_todo, cause; 6177 6178 if (!netif_running(dev)) 6179 return; 6180 port_pcpu->timer_scheduled = false; 6181 6182 /* Process all the Tx queues */ 6183 cause = (1 << port->ntxqs) - 1; 6184 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); 6185 6186 /* Set the timer in case not all the packets were processed */ 6187 if (tx_todo) 6188 mvpp2_timer_set(port_pcpu); 6189} 6190 6191static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) 6192{ 6193 struct mvpp2_port_pcpu *port_pcpu = container_of(timer, 6194 struct mvpp2_port_pcpu, 6195 tx_done_timer); 6196 6197 tasklet_schedule(&port_pcpu->tx_done_tasklet); 6198 6199 return HRTIMER_NORESTART; 6200} 6201 6202/* Main RX/TX processing routines */ 6203 6204/* Display more error info */ 6205static void mvpp2_rx_error(struct mvpp2_port *port, 6206 struct mvpp2_rx_desc *rx_desc) 6207{ 6208 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 6209 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); 6210 6211 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 6212 case MVPP2_RXD_ERR_CRC: 6213 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n", 6214 status, sz); 6215 break; 6216 case MVPP2_RXD_ERR_OVERRUN: 6217 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n", 6218 status, sz); 6219 break; 6220 case MVPP2_RXD_ERR_RESOURCE: 6221 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n", 6222 status, sz); 6223 break; 6224 } 6225} 6226 6227/* Handle RX checksum offload */ 6228static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, 6229 struct sk_buff *skb) 6230{ 6231 if (((status & MVPP2_RXD_L3_IP4) && 6232 !(status & MVPP2_RXD_IP4_HEADER_ERR)) || 6233 (status & MVPP2_RXD_L3_IP6)) 6234 if (((status & MVPP2_RXD_L4_UDP) || 6235 (status & MVPP2_RXD_L4_TCP)) && 6236 (status & MVPP2_RXD_L4_CSUM_OK)) { 6237 skb->csum = 0; 6238 skb->ip_summed = CHECKSUM_UNNECESSARY; 6239 return; 6240 } 6241 6242 skb->ip_summed = CHECKSUM_NONE; 6243} 6244 6245/* Reuse skb if possible, or allocate a new skb and add it to BM pool */ 6246static int mvpp2_rx_refill(struct mvpp2_port *port, 6247 struct mvpp2_bm_pool *bm_pool, int pool) 6248{ 6249 dma_addr_t dma_addr; 6250 phys_addr_t phys_addr; 6251 void *buf; 6252 6253 /* No recycle or too many buffers are in use, so allocate a new skb */ 6254 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr, 6255 GFP_ATOMIC); 6256 if (!buf) 6257 return -ENOMEM; 6258 6259 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 6260 6261 return 0; 6262} 6263 6264/* Handle tx checksum */ 6265static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) 6266{ 6267 if (skb->ip_summed == CHECKSUM_PARTIAL) { 6268 int ip_hdr_len = 0; 6269 u8 l4_proto; 6270 6271 if (skb->protocol == htons(ETH_P_IP)) { 6272 struct iphdr *ip4h = ip_hdr(skb); 6273 6274 /* Calculate IPv4 checksum and L4 checksum */ 6275 ip_hdr_len = ip4h->ihl; 6276 l4_proto = ip4h->protocol; 6277 } else if (skb->protocol == htons(ETH_P_IPV6)) { 6278 struct ipv6hdr *ip6h = ipv6_hdr(skb); 6279 6280 /* Read l4_protocol from one of IPv6 extra headers */ 6281 if (skb_network_header_len(skb) > 0) 6282 ip_hdr_len = (skb_network_header_len(skb) >> 2); 6283 l4_proto = ip6h->nexthdr; 6284 } else { 6285 return MVPP2_TXD_L4_CSUM_NOT; 6286 } 6287 6288 return mvpp2_txq_desc_csum(skb_network_offset(skb), 6289 skb->protocol, ip_hdr_len, l4_proto); 6290 } 6291 6292 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; 6293} 6294 6295/* Main rx processing */ 6296static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, 6297 int rx_todo, struct mvpp2_rx_queue *rxq) 6298{ 6299 struct net_device *dev = port->dev; 6300 int rx_received; 6301 int rx_done = 0; 6302 u32 rcvd_pkts = 0; 6303 u32 rcvd_bytes = 0; 6304 6305 /* Get number of received packets and clamp the to-do */ 6306 rx_received = mvpp2_rxq_received(port, rxq->id); 6307 if (rx_todo > rx_received) 6308 rx_todo = rx_received; 6309 6310 while (rx_done < rx_todo) { 6311 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 6312 struct mvpp2_bm_pool *bm_pool; 6313 struct sk_buff *skb; 6314 unsigned int frag_size; 6315 dma_addr_t dma_addr; 6316 phys_addr_t phys_addr; 6317 u32 rx_status; 6318 int pool, rx_bytes, err; 6319 void *data; 6320 6321 rx_done++; 6322 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); 6323 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); 6324 rx_bytes -= MVPP2_MH_SIZE; 6325 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 6326 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); 6327 data = (void *)phys_to_virt(phys_addr); 6328 6329 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> 6330 MVPP2_RXD_BM_POOL_ID_OFFS; 6331 bm_pool = &port->priv->bm_pools[pool]; 6332 6333 /* In case of an error, release the requested buffer pointer 6334 * to the Buffer Manager. This request process is controlled 6335 * by the hardware, and the information about the buffer is 6336 * comprised by the RX descriptor. 6337 */ 6338 if (rx_status & MVPP2_RXD_ERR_SUMMARY) { 6339err_drop_frame: 6340 dev->stats.rx_errors++; 6341 mvpp2_rx_error(port, rx_desc); 6342 /* Return the buffer to the pool */ 6343 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 6344 continue; 6345 } 6346 6347 if (bm_pool->frag_size > PAGE_SIZE) 6348 frag_size = 0; 6349 else 6350 frag_size = bm_pool->frag_size; 6351 6352 skb = build_skb(data, frag_size); 6353 if (!skb) { 6354 netdev_warn(port->dev, "skb build failed\n"); 6355 goto err_drop_frame; 6356 } 6357 6358 err = mvpp2_rx_refill(port, bm_pool, pool); 6359 if (err) { 6360 netdev_err(port->dev, "failed to refill BM pools\n"); 6361 goto err_drop_frame; 6362 } 6363 6364 dma_unmap_single(dev->dev.parent, dma_addr, 6365 bm_pool->buf_size, DMA_FROM_DEVICE); 6366 6367 rcvd_pkts++; 6368 rcvd_bytes += rx_bytes; 6369 6370 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD); 6371 skb_put(skb, rx_bytes); 6372 skb->protocol = eth_type_trans(skb, dev); 6373 mvpp2_rx_csum(port, rx_status, skb); 6374 6375 napi_gro_receive(napi, skb); 6376 } 6377 6378 if (rcvd_pkts) { 6379 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); 6380 6381 u64_stats_update_begin(&stats->syncp); 6382 stats->rx_packets += rcvd_pkts; 6383 stats->rx_bytes += rcvd_bytes; 6384 u64_stats_update_end(&stats->syncp); 6385 } 6386 6387 /* Update Rx queue management counters */ 6388 wmb(); 6389 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); 6390 6391 return rx_todo; 6392} 6393 6394static inline void 6395tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 6396 struct mvpp2_tx_desc *desc) 6397{ 6398 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); 6399 6400 dma_addr_t buf_dma_addr = 6401 mvpp2_txdesc_dma_addr_get(port, desc); 6402 size_t buf_sz = 6403 mvpp2_txdesc_size_get(port, desc); 6404 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) 6405 dma_unmap_single(port->dev->dev.parent, buf_dma_addr, 6406 buf_sz, DMA_TO_DEVICE); 6407 mvpp2_txq_desc_put(txq); 6408} 6409 6410/* Handle tx fragmentation processing */ 6411static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, 6412 struct mvpp2_tx_queue *aggr_txq, 6413 struct mvpp2_tx_queue *txq) 6414{ 6415 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); 6416 struct mvpp2_tx_desc *tx_desc; 6417 int i; 6418 dma_addr_t buf_dma_addr; 6419 6420 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6421 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6422 void *addr = page_address(frag->page.p) + frag->page_offset; 6423 6424 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 6425 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 6426 mvpp2_txdesc_size_set(port, tx_desc, frag->size); 6427 6428 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, 6429 frag->size, 6430 DMA_TO_DEVICE); 6431 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { 6432 mvpp2_txq_desc_put(txq); 6433 goto cleanup; 6434 } 6435 6436 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 6437 6438 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 6439 /* Last descriptor */ 6440 mvpp2_txdesc_cmd_set(port, tx_desc, 6441 MVPP2_TXD_L_DESC); 6442 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); 6443 } else { 6444 /* Descriptor in the middle: Not First, Not Last */ 6445 mvpp2_txdesc_cmd_set(port, tx_desc, 0); 6446 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); 6447 } 6448 } 6449 6450 return 0; 6451cleanup: 6452 /* Release all descriptors that were used to map fragments of 6453 * this packet, as well as the corresponding DMA mappings 6454 */ 6455 for (i = i - 1; i >= 0; i--) { 6456 tx_desc = txq->descs + i; 6457 tx_desc_unmap_put(port, txq, tx_desc); 6458 } 6459 6460 return -ENOMEM; 6461} 6462 6463static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, 6464 struct net_device *dev, 6465 struct mvpp2_tx_queue *txq, 6466 struct mvpp2_tx_queue *aggr_txq, 6467 struct mvpp2_txq_pcpu *txq_pcpu, 6468 int hdr_sz) 6469{ 6470 struct mvpp2_port *port = netdev_priv(dev); 6471 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 6472 dma_addr_t addr; 6473 6474 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 6475 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); 6476 6477 addr = txq_pcpu->tso_headers_dma + 6478 txq_pcpu->txq_put_index * TSO_HEADER_SIZE; 6479 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); 6480 6481 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | 6482 MVPP2_TXD_F_DESC | 6483 MVPP2_TXD_PADDING_DISABLE); 6484 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); 6485} 6486 6487static inline int mvpp2_tso_put_data(struct sk_buff *skb, 6488 struct net_device *dev, struct tso_t *tso, 6489 struct mvpp2_tx_queue *txq, 6490 struct mvpp2_tx_queue *aggr_txq, 6491 struct mvpp2_txq_pcpu *txq_pcpu, 6492 int sz, bool left, bool last) 6493{ 6494 struct mvpp2_port *port = netdev_priv(dev); 6495 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 6496 dma_addr_t buf_dma_addr; 6497 6498 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 6499 mvpp2_txdesc_size_set(port, tx_desc, sz); 6500 6501 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, 6502 DMA_TO_DEVICE); 6503 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { 6504 mvpp2_txq_desc_put(txq); 6505 return -ENOMEM; 6506 } 6507 6508 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 6509 6510 if (!left) { 6511 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); 6512 if (last) { 6513 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); 6514 return 0; 6515 } 6516 } else { 6517 mvpp2_txdesc_cmd_set(port, tx_desc, 0); 6518 } 6519 6520 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); 6521 return 0; 6522} 6523 6524static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, 6525 struct mvpp2_tx_queue *txq, 6526 struct mvpp2_tx_queue *aggr_txq, 6527 struct mvpp2_txq_pcpu *txq_pcpu) 6528{ 6529 struct mvpp2_port *port = netdev_priv(dev); 6530 struct tso_t tso; 6531 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); 6532 int i, len, descs = 0; 6533 6534 /* Check number of available descriptors */ 6535 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, 6536 tso_count_descs(skb)) || 6537 mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, 6538 tso_count_descs(skb))) 6539 return 0; 6540 6541 tso_start(skb, &tso); 6542 len = skb->len - hdr_sz; 6543 while (len > 0) { 6544 int left = min_t(int, skb_shinfo(skb)->gso_size, len); 6545 char *hdr = txq_pcpu->tso_headers + 6546 txq_pcpu->txq_put_index * TSO_HEADER_SIZE; 6547 6548 len -= left; 6549 descs++; 6550 6551 tso_build_hdr(skb, hdr, &tso, left, len == 0); 6552 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); 6553 6554 while (left > 0) { 6555 int sz = min_t(int, tso.size, left); 6556 left -= sz; 6557 descs++; 6558 6559 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, 6560 txq_pcpu, sz, left, len == 0)) 6561 goto release; 6562 tso_build_data(skb, &tso, sz); 6563 } 6564 } 6565 6566 return descs; 6567 6568release: 6569 for (i = descs - 1; i >= 0; i--) { 6570 struct mvpp2_tx_desc *tx_desc = txq->descs + i; 6571 tx_desc_unmap_put(port, txq, tx_desc); 6572 } 6573 return 0; 6574} 6575 6576/* Main tx processing */ 6577static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) 6578{ 6579 struct mvpp2_port *port = netdev_priv(dev); 6580 struct mvpp2_tx_queue *txq, *aggr_txq; 6581 struct mvpp2_txq_pcpu *txq_pcpu; 6582 struct mvpp2_tx_desc *tx_desc; 6583 dma_addr_t buf_dma_addr; 6584 int frags = 0; 6585 u16 txq_id; 6586 u32 tx_cmd; 6587 6588 txq_id = skb_get_queue_mapping(skb); 6589 txq = port->txqs[txq_id]; 6590 txq_pcpu = this_cpu_ptr(txq->pcpu); 6591 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; 6592 6593 if (skb_is_gso(skb)) { 6594 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); 6595 goto out; 6596 } 6597 frags = skb_shinfo(skb)->nr_frags + 1; 6598 6599 /* Check number of available descriptors */ 6600 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || 6601 mvpp2_txq_reserved_desc_num_proc(port->priv, txq, 6602 txq_pcpu, frags)) { 6603 frags = 0; 6604 goto out; 6605 } 6606 6607 /* Get a descriptor for the first part of the packet */ 6608 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 6609 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 6610 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); 6611 6612 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, 6613 skb_headlen(skb), DMA_TO_DEVICE); 6614 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { 6615 mvpp2_txq_desc_put(txq); 6616 frags = 0; 6617 goto out; 6618 } 6619 6620 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 6621 6622 tx_cmd = mvpp2_skb_tx_csum(port, skb); 6623 6624 if (frags == 1) { 6625 /* First and Last descriptor */ 6626 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 6627 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 6628 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); 6629 } else { 6630 /* First but not Last */ 6631 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; 6632 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 6633 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); 6634 6635 /* Continue with other skb fragments */ 6636 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { 6637 tx_desc_unmap_put(port, txq, tx_desc); 6638 frags = 0; 6639 } 6640 } 6641 6642out: 6643 if (frags > 0) { 6644 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); 6645 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); 6646 6647 txq_pcpu->reserved_num -= frags; 6648 txq_pcpu->count += frags; 6649 aggr_txq->count += frags; 6650 6651 /* Enable transmit */ 6652 wmb(); 6653 mvpp2_aggr_txq_pend_desc_add(port, frags); 6654 6655 if (txq_pcpu->count >= txq_pcpu->stop_threshold) 6656 netif_tx_stop_queue(nq); 6657 6658 u64_stats_update_begin(&stats->syncp); 6659 stats->tx_packets++; 6660 stats->tx_bytes += skb->len; 6661 u64_stats_update_end(&stats->syncp); 6662 } else { 6663 dev->stats.tx_dropped++; 6664 dev_kfree_skb_any(skb); 6665 } 6666 6667 /* Finalize TX processing */ 6668 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) 6669 mvpp2_txq_done(port, txq, txq_pcpu); 6670 6671 /* Set the timer in case not all frags were processed */ 6672 if (!port->has_tx_irqs && txq_pcpu->count <= frags && 6673 txq_pcpu->count > 0) { 6674 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); 6675 6676 mvpp2_timer_set(port_pcpu); 6677 } 6678 6679 return NETDEV_TX_OK; 6680} 6681 6682static inline void mvpp2_cause_error(struct net_device *dev, int cause) 6683{ 6684 if (cause & MVPP2_CAUSE_FCS_ERR_MASK) 6685 netdev_err(dev, "FCS error\n"); 6686 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) 6687 netdev_err(dev, "rx fifo overrun error\n"); 6688 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) 6689 netdev_err(dev, "tx fifo underrun error\n"); 6690} 6691 6692static int mvpp2_poll(struct napi_struct *napi, int budget) 6693{ 6694 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; 6695 int rx_done = 0; 6696 struct mvpp2_port *port = netdev_priv(napi->dev); 6697 struct mvpp2_queue_vector *qv; 6698 int cpu = smp_processor_id(); 6699 6700 qv = container_of(napi, struct mvpp2_queue_vector, napi); 6701 6702 /* Rx/Tx cause register 6703 * 6704 * Bits 0-15: each bit indicates received packets on the Rx queue 6705 * (bit 0 is for Rx queue 0). 6706 * 6707 * Bits 16-23: each bit indicates transmitted packets on the Tx queue 6708 * (bit 16 is for Tx queue 0). 6709 * 6710 * Each CPU has its own Rx/Tx cause register 6711 */ 6712 cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id, 6713 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 6714 6715 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 6716 if (cause_misc) { 6717 mvpp2_cause_error(port->dev, cause_misc); 6718 6719 /* Clear the cause register */ 6720 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); 6721 mvpp2_percpu_write(port->priv, cpu, 6722 MVPP2_ISR_RX_TX_CAUSE_REG(port->id), 6723 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); 6724 } 6725 6726 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 6727 if (cause_tx) { 6728 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; 6729 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); 6730 } 6731 6732 /* Process RX packets */ 6733 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 6734 cause_rx <<= qv->first_rxq; 6735 cause_rx |= qv->pending_cause_rx; 6736 while (cause_rx && budget > 0) { 6737 int count; 6738 struct mvpp2_rx_queue *rxq; 6739 6740 rxq = mvpp2_get_rx_queue(port, cause_rx); 6741 if (!rxq) 6742 break; 6743 6744 count = mvpp2_rx(port, napi, budget, rxq); 6745 rx_done += count; 6746 budget -= count; 6747 if (budget > 0) { 6748 /* Clear the bit associated to this Rx queue 6749 * so that next iteration will continue from 6750 * the next Rx queue. 6751 */ 6752 cause_rx &= ~(1 << rxq->logic_rxq); 6753 } 6754 } 6755 6756 if (budget > 0) { 6757 cause_rx = 0; 6758 napi_complete_done(napi, rx_done); 6759 6760 mvpp2_qvec_interrupt_enable(qv); 6761 } 6762 qv->pending_cause_rx = cause_rx; 6763 return rx_done; 6764} 6765 6766/* Set hw internals when starting port */ 6767static void mvpp2_start_dev(struct mvpp2_port *port) 6768{ 6769 struct net_device *ndev = port->dev; 6770 int i; 6771 6772 if (port->gop_id == 0 && 6773 (port->phy_interface == PHY_INTERFACE_MODE_XAUI || 6774 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) 6775 mvpp2_xlg_max_rx_size_set(port); 6776 else 6777 mvpp2_gmac_max_rx_size_set(port); 6778 6779 mvpp2_txp_max_tx_size_set(port); 6780 6781 for (i = 0; i < port->nqvecs; i++) 6782 napi_enable(&port->qvecs[i].napi); 6783 6784 /* Enable interrupts on all CPUs */ 6785 mvpp2_interrupts_enable(port); 6786 6787 if (port->priv->hw_version == MVPP22) { 6788 mvpp22_comphy_init(port); 6789 mvpp22_gop_init(port); 6790 } 6791 6792 mvpp2_port_mii_set(port); 6793 mvpp2_port_enable(port); 6794 if (ndev->phydev) 6795 phy_start(ndev->phydev); 6796 netif_tx_start_all_queues(port->dev); 6797} 6798 6799/* Set hw internals when stopping port */ 6800static void mvpp2_stop_dev(struct mvpp2_port *port) 6801{ 6802 struct net_device *ndev = port->dev; 6803 int i; 6804 6805 /* Stop new packets from arriving to RXQs */ 6806 mvpp2_ingress_disable(port); 6807 6808 mdelay(10); 6809 6810 /* Disable interrupts on all CPUs */ 6811 mvpp2_interrupts_disable(port); 6812 6813 for (i = 0; i < port->nqvecs; i++) 6814 napi_disable(&port->qvecs[i].napi); 6815 6816 netif_carrier_off(port->dev); 6817 netif_tx_stop_all_queues(port->dev); 6818 6819 mvpp2_egress_disable(port); 6820 mvpp2_port_disable(port); 6821 if (ndev->phydev) 6822 phy_stop(ndev->phydev); 6823 phy_power_off(port->comphy); 6824} 6825 6826static int mvpp2_check_ringparam_valid(struct net_device *dev, 6827 struct ethtool_ringparam *ring) 6828{ 6829 u16 new_rx_pending = ring->rx_pending; 6830 u16 new_tx_pending = ring->tx_pending; 6831 6832 if (ring->rx_pending == 0 || ring->tx_pending == 0) 6833 return -EINVAL; 6834 6835 if (ring->rx_pending > MVPP2_MAX_RXD) 6836 new_rx_pending = MVPP2_MAX_RXD; 6837 else if (!IS_ALIGNED(ring->rx_pending, 16)) 6838 new_rx_pending = ALIGN(ring->rx_pending, 16); 6839 6840 if (ring->tx_pending > MVPP2_MAX_TXD) 6841 new_tx_pending = MVPP2_MAX_TXD; 6842 else if (!IS_ALIGNED(ring->tx_pending, 32)) 6843 new_tx_pending = ALIGN(ring->tx_pending, 32); 6844 6845 /* The Tx ring size cannot be smaller than the minimum number of 6846 * descriptors needed for TSO. 6847 */ 6848 if (new_tx_pending < MVPP2_MAX_SKB_DESCS) 6849 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); 6850 6851 if (ring->rx_pending != new_rx_pending) { 6852 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", 6853 ring->rx_pending, new_rx_pending); 6854 ring->rx_pending = new_rx_pending; 6855 } 6856 6857 if (ring->tx_pending != new_tx_pending) { 6858 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", 6859 ring->tx_pending, new_tx_pending); 6860 ring->tx_pending = new_tx_pending; 6861 } 6862 6863 return 0; 6864} 6865 6866static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) 6867{ 6868 u32 mac_addr_l, mac_addr_m, mac_addr_h; 6869 6870 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 6871 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); 6872 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); 6873 addr[0] = (mac_addr_h >> 24) & 0xFF; 6874 addr[1] = (mac_addr_h >> 16) & 0xFF; 6875 addr[2] = (mac_addr_h >> 8) & 0xFF; 6876 addr[3] = mac_addr_h & 0xFF; 6877 addr[4] = mac_addr_m & 0xFF; 6878 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; 6879} 6880 6881static int mvpp2_phy_connect(struct mvpp2_port *port) 6882{ 6883 struct phy_device *phy_dev; 6884 6885 /* No PHY is attached */ 6886 if (!port->phy_node) 6887 return 0; 6888 6889 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0, 6890 port->phy_interface); 6891 if (!phy_dev) { 6892 netdev_err(port->dev, "cannot connect to phy\n"); 6893 return -ENODEV; 6894 } 6895 phy_dev->supported &= PHY_GBIT_FEATURES; 6896 phy_dev->advertising = phy_dev->supported; 6897 6898 port->link = 0; 6899 port->duplex = 0; 6900 port->speed = 0; 6901 6902 return 0; 6903} 6904 6905static void mvpp2_phy_disconnect(struct mvpp2_port *port) 6906{ 6907 struct net_device *ndev = port->dev; 6908 6909 if (!ndev->phydev) 6910 return; 6911 6912 phy_disconnect(ndev->phydev); 6913} 6914 6915static int mvpp2_irqs_init(struct mvpp2_port *port) 6916{ 6917 int err, i; 6918 6919 for (i = 0; i < port->nqvecs; i++) { 6920 struct mvpp2_queue_vector *qv = port->qvecs + i; 6921 6922 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) 6923 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); 6924 6925 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); 6926 if (err) 6927 goto err; 6928 6929 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) 6930 irq_set_affinity_hint(qv->irq, 6931 cpumask_of(qv->sw_thread_id)); 6932 } 6933 6934 return 0; 6935err: 6936 for (i = 0; i < port->nqvecs; i++) { 6937 struct mvpp2_queue_vector *qv = port->qvecs + i; 6938 6939 irq_set_affinity_hint(qv->irq, NULL); 6940 free_irq(qv->irq, qv); 6941 } 6942 6943 return err; 6944} 6945 6946static void mvpp2_irqs_deinit(struct mvpp2_port *port) 6947{ 6948 int i; 6949 6950 for (i = 0; i < port->nqvecs; i++) { 6951 struct mvpp2_queue_vector *qv = port->qvecs + i; 6952 6953 irq_set_affinity_hint(qv->irq, NULL); 6954 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); 6955 free_irq(qv->irq, qv); 6956 } 6957} 6958 6959static void mvpp22_init_rss(struct mvpp2_port *port) 6960{ 6961 struct mvpp2 *priv = port->priv; 6962 int i; 6963 6964 /* Set the table width: replace the whole classifier Rx queue number 6965 * with the ones configured in RSS table entries. 6966 */ 6967 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0)); 6968 mvpp2_write(priv, MVPP22_RSS_WIDTH, 8); 6969 6970 /* Loop through the classifier Rx Queues and map them to a RSS table. 6971 * Map them all to the first table (0) by default. 6972 */ 6973 for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) { 6974 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i)); 6975 mvpp2_write(priv, MVPP22_RSS_TABLE, 6976 MVPP22_RSS_TABLE_POINTER(0)); 6977 } 6978 6979 /* Configure the first table to evenly distribute the packets across 6980 * real Rx Queues. The table entries map a hash to an port Rx Queue. 6981 */ 6982 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { 6983 u32 sel = MVPP22_RSS_INDEX_TABLE(0) | 6984 MVPP22_RSS_INDEX_TABLE_ENTRY(i); 6985 mvpp2_write(priv, MVPP22_RSS_INDEX, sel); 6986 6987 mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs); 6988 } 6989 6990} 6991 6992static int mvpp2_open(struct net_device *dev) 6993{ 6994 struct mvpp2_port *port = netdev_priv(dev); 6995 struct mvpp2 *priv = port->priv; 6996 unsigned char mac_bcast[ETH_ALEN] = { 6997 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 6998 int err; 6999 7000 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true); 7001 if (err) { 7002 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); 7003 return err; 7004 } 7005 err = mvpp2_prs_mac_da_accept(port->priv, port->id, 7006 dev->dev_addr, true); 7007 if (err) { 7008 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n"); 7009 return err; 7010 } 7011 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); 7012 if (err) { 7013 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); 7014 return err; 7015 } 7016 err = mvpp2_prs_def_flow(port); 7017 if (err) { 7018 netdev_err(dev, "mvpp2_prs_def_flow failed\n"); 7019 return err; 7020 } 7021 7022 /* Allocate the Rx/Tx queues */ 7023 err = mvpp2_setup_rxqs(port); 7024 if (err) { 7025 netdev_err(port->dev, "cannot allocate Rx queues\n"); 7026 return err; 7027 } 7028 7029 err = mvpp2_setup_txqs(port); 7030 if (err) { 7031 netdev_err(port->dev, "cannot allocate Tx queues\n"); 7032 goto err_cleanup_rxqs; 7033 } 7034 7035 err = mvpp2_irqs_init(port); 7036 if (err) { 7037 netdev_err(port->dev, "cannot init IRQs\n"); 7038 goto err_cleanup_txqs; 7039 } 7040 7041 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) { 7042 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, 7043 dev->name, port); 7044 if (err) { 7045 netdev_err(port->dev, "cannot request link IRQ %d\n", 7046 port->link_irq); 7047 goto err_free_irq; 7048 } 7049 7050 mvpp22_gop_setup_irq(port); 7051 } 7052 7053 /* In default link is down */ 7054 netif_carrier_off(port->dev); 7055 7056 err = mvpp2_phy_connect(port); 7057 if (err < 0) 7058 goto err_free_link_irq; 7059 7060 /* Unmask interrupts on all CPUs */ 7061 on_each_cpu(mvpp2_interrupts_unmask, port, 1); 7062 mvpp2_shared_interrupt_mask_unmask(port, false); 7063 7064 mvpp2_start_dev(port); 7065 7066 if (priv->hw_version == MVPP22) 7067 mvpp22_init_rss(port); 7068 7069 /* Start hardware statistics gathering */ 7070 queue_delayed_work(priv->stats_queue, &port->stats_work, 7071 MVPP2_MIB_COUNTERS_STATS_DELAY); 7072 7073 return 0; 7074 7075err_free_link_irq: 7076 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) 7077 free_irq(port->link_irq, port); 7078err_free_irq: 7079 mvpp2_irqs_deinit(port); 7080err_cleanup_txqs: 7081 mvpp2_cleanup_txqs(port); 7082err_cleanup_rxqs: 7083 mvpp2_cleanup_rxqs(port); 7084 return err; 7085} 7086 7087static int mvpp2_stop(struct net_device *dev) 7088{ 7089 struct mvpp2_port *port = netdev_priv(dev); 7090 struct mvpp2_port_pcpu *port_pcpu; 7091 struct mvpp2 *priv = port->priv; 7092 int cpu; 7093 7094 mvpp2_stop_dev(port); 7095 mvpp2_phy_disconnect(port); 7096 7097 /* Mask interrupts on all CPUs */ 7098 on_each_cpu(mvpp2_interrupts_mask, port, 1); 7099 mvpp2_shared_interrupt_mask_unmask(port, true); 7100 7101 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) 7102 free_irq(port->link_irq, port); 7103 7104 mvpp2_irqs_deinit(port); 7105 if (!port->has_tx_irqs) { 7106 for_each_present_cpu(cpu) { 7107 port_pcpu = per_cpu_ptr(port->pcpu, cpu); 7108 7109 hrtimer_cancel(&port_pcpu->tx_done_timer); 7110 port_pcpu->timer_scheduled = false; 7111 tasklet_kill(&port_pcpu->tx_done_tasklet); 7112 } 7113 } 7114 mvpp2_cleanup_rxqs(port); 7115 mvpp2_cleanup_txqs(port); 7116 7117 cancel_delayed_work_sync(&port->stats_work); 7118 7119 return 0; 7120} 7121 7122static void mvpp2_set_rx_mode(struct net_device *dev) 7123{ 7124 struct mvpp2_port *port = netdev_priv(dev); 7125 struct mvpp2 *priv = port->priv; 7126 struct netdev_hw_addr *ha; 7127 int id = port->id; 7128 bool allmulti = dev->flags & IFF_ALLMULTI; 7129 7130 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); 7131 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); 7132 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); 7133 7134 /* Remove all port->id's mcast enries */ 7135 mvpp2_prs_mcast_del_all(priv, id); 7136 7137 if (allmulti && !netdev_mc_empty(dev)) { 7138 netdev_for_each_mc_addr(ha, dev) 7139 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); 7140 } 7141} 7142 7143static int mvpp2_set_mac_address(struct net_device *dev, void *p) 7144{ 7145 struct mvpp2_port *port = netdev_priv(dev); 7146 const struct sockaddr *addr = p; 7147 int err; 7148 7149 if (!is_valid_ether_addr(addr->sa_data)) { 7150 err = -EADDRNOTAVAIL; 7151 goto log_error; 7152 } 7153 7154 if (!netif_running(dev)) { 7155 err = mvpp2_prs_update_mac_da(dev, addr->sa_data); 7156 if (!err) 7157 return 0; 7158 /* Reconfigure parser to accept the original MAC address */ 7159 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); 7160 if (err) 7161 goto log_error; 7162 } 7163 7164 mvpp2_stop_dev(port); 7165 7166 err = mvpp2_prs_update_mac_da(dev, addr->sa_data); 7167 if (!err) 7168 goto out_start; 7169 7170 /* Reconfigure parser accept the original MAC address */ 7171 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); 7172 if (err) 7173 goto log_error; 7174out_start: 7175 mvpp2_start_dev(port); 7176 mvpp2_egress_enable(port); 7177 mvpp2_ingress_enable(port); 7178 return 0; 7179log_error: 7180 netdev_err(dev, "failed to change MAC address\n"); 7181 return err; 7182} 7183 7184static int mvpp2_change_mtu(struct net_device *dev, int mtu) 7185{ 7186 struct mvpp2_port *port = netdev_priv(dev); 7187 int err; 7188 7189 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { 7190 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, 7191 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); 7192 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); 7193 } 7194 7195 if (!netif_running(dev)) { 7196 err = mvpp2_bm_update_mtu(dev, mtu); 7197 if (!err) { 7198 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); 7199 return 0; 7200 } 7201 7202 /* Reconfigure BM to the original MTU */ 7203 err = mvpp2_bm_update_mtu(dev, dev->mtu); 7204 if (err) 7205 goto log_error; 7206 } 7207 7208 mvpp2_stop_dev(port); 7209 7210 err = mvpp2_bm_update_mtu(dev, mtu); 7211 if (!err) { 7212 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); 7213 goto out_start; 7214 } 7215 7216 /* Reconfigure BM to the original MTU */ 7217 err = mvpp2_bm_update_mtu(dev, dev->mtu); 7218 if (err) 7219 goto log_error; 7220 7221out_start: 7222 mvpp2_start_dev(port); 7223 mvpp2_egress_enable(port); 7224 mvpp2_ingress_enable(port); 7225 7226 return 0; 7227log_error: 7228 netdev_err(dev, "failed to change MTU\n"); 7229 return err; 7230} 7231 7232static void 7233mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 7234{ 7235 struct mvpp2_port *port = netdev_priv(dev); 7236 unsigned int start; 7237 int cpu; 7238 7239 for_each_possible_cpu(cpu) { 7240 struct mvpp2_pcpu_stats *cpu_stats; 7241 u64 rx_packets; 7242 u64 rx_bytes; 7243 u64 tx_packets; 7244 u64 tx_bytes; 7245 7246 cpu_stats = per_cpu_ptr(port->stats, cpu); 7247 do { 7248 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 7249 rx_packets = cpu_stats->rx_packets; 7250 rx_bytes = cpu_stats->rx_bytes; 7251 tx_packets = cpu_stats->tx_packets; 7252 tx_bytes = cpu_stats->tx_bytes; 7253 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 7254 7255 stats->rx_packets += rx_packets; 7256 stats->rx_bytes += rx_bytes; 7257 stats->tx_packets += tx_packets; 7258 stats->tx_bytes += tx_bytes; 7259 } 7260 7261 stats->rx_errors = dev->stats.rx_errors; 7262 stats->rx_dropped = dev->stats.rx_dropped; 7263 stats->tx_dropped = dev->stats.tx_dropped; 7264} 7265 7266static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 7267{ 7268 int ret; 7269 7270 if (!dev->phydev) 7271 return -ENOTSUPP; 7272 7273 ret = phy_mii_ioctl(dev->phydev, ifr, cmd); 7274 if (!ret) 7275 mvpp2_link_event(dev); 7276 7277 return ret; 7278} 7279 7280/* Ethtool methods */ 7281 7282/* Set interrupt coalescing for ethtools */ 7283static int mvpp2_ethtool_set_coalesce(struct net_device *dev, 7284 struct ethtool_coalesce *c) 7285{ 7286 struct mvpp2_port *port = netdev_priv(dev); 7287 int queue; 7288 7289 for (queue = 0; queue < port->nrxqs; queue++) { 7290 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 7291 7292 rxq->time_coal = c->rx_coalesce_usecs; 7293 rxq->pkts_coal = c->rx_max_coalesced_frames; 7294 mvpp2_rx_pkts_coal_set(port, rxq); 7295 mvpp2_rx_time_coal_set(port, rxq); 7296 } 7297 7298 if (port->has_tx_irqs) { 7299 port->tx_time_coal = c->tx_coalesce_usecs; 7300 mvpp2_tx_time_coal_set(port); 7301 } 7302 7303 for (queue = 0; queue < port->ntxqs; queue++) { 7304 struct mvpp2_tx_queue *txq = port->txqs[queue]; 7305 7306 txq->done_pkts_coal = c->tx_max_coalesced_frames; 7307 7308 if (port->has_tx_irqs) 7309 mvpp2_tx_pkts_coal_set(port, txq); 7310 } 7311 7312 return 0; 7313} 7314 7315/* get coalescing for ethtools */ 7316static int mvpp2_ethtool_get_coalesce(struct net_device *dev, 7317 struct ethtool_coalesce *c) 7318{ 7319 struct mvpp2_port *port = netdev_priv(dev); 7320 7321 c->rx_coalesce_usecs = port->rxqs[0]->time_coal; 7322 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; 7323 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; 7324 return 0; 7325} 7326 7327static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, 7328 struct ethtool_drvinfo *drvinfo) 7329{ 7330 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME, 7331 sizeof(drvinfo->driver)); 7332 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION, 7333 sizeof(drvinfo->version)); 7334 strlcpy(drvinfo->bus_info, dev_name(&dev->dev), 7335 sizeof(drvinfo->bus_info)); 7336} 7337 7338static void mvpp2_ethtool_get_ringparam(struct net_device *dev, 7339 struct ethtool_ringparam *ring) 7340{ 7341 struct mvpp2_port *port = netdev_priv(dev); 7342 7343 ring->rx_max_pending = MVPP2_MAX_RXD; 7344 ring->tx_max_pending = MVPP2_MAX_TXD; 7345 ring->rx_pending = port->rx_ring_size; 7346 ring->tx_pending = port->tx_ring_size; 7347} 7348 7349static int mvpp2_ethtool_set_ringparam(struct net_device *dev, 7350 struct ethtool_ringparam *ring) 7351{ 7352 struct mvpp2_port *port = netdev_priv(dev); 7353 u16 prev_rx_ring_size = port->rx_ring_size; 7354 u16 prev_tx_ring_size = port->tx_ring_size; 7355 int err; 7356 7357 err = mvpp2_check_ringparam_valid(dev, ring); 7358 if (err) 7359 return err; 7360 7361 if (!netif_running(dev)) { 7362 port->rx_ring_size = ring->rx_pending; 7363 port->tx_ring_size = ring->tx_pending; 7364 return 0; 7365 } 7366 7367 /* The interface is running, so we have to force a 7368 * reallocation of the queues 7369 */ 7370 mvpp2_stop_dev(port); 7371 mvpp2_cleanup_rxqs(port); 7372 mvpp2_cleanup_txqs(port); 7373 7374 port->rx_ring_size = ring->rx_pending; 7375 port->tx_ring_size = ring->tx_pending; 7376 7377 err = mvpp2_setup_rxqs(port); 7378 if (err) { 7379 /* Reallocate Rx queues with the original ring size */ 7380 port->rx_ring_size = prev_rx_ring_size; 7381 ring->rx_pending = prev_rx_ring_size; 7382 err = mvpp2_setup_rxqs(port); 7383 if (err) 7384 goto err_out; 7385 } 7386 err = mvpp2_setup_txqs(port); 7387 if (err) { 7388 /* Reallocate Tx queues with the original ring size */ 7389 port->tx_ring_size = prev_tx_ring_size; 7390 ring->tx_pending = prev_tx_ring_size; 7391 err = mvpp2_setup_txqs(port); 7392 if (err) 7393 goto err_clean_rxqs; 7394 } 7395 7396 mvpp2_start_dev(port); 7397 mvpp2_egress_enable(port); 7398 mvpp2_ingress_enable(port); 7399 7400 return 0; 7401 7402err_clean_rxqs: 7403 mvpp2_cleanup_rxqs(port); 7404err_out: 7405 netdev_err(dev, "failed to change ring parameters"); 7406 return err; 7407} 7408 7409/* Device ops */ 7410 7411static const struct net_device_ops mvpp2_netdev_ops = { 7412 .ndo_open = mvpp2_open, 7413 .ndo_stop = mvpp2_stop, 7414 .ndo_start_xmit = mvpp2_tx, 7415 .ndo_set_rx_mode = mvpp2_set_rx_mode, 7416 .ndo_set_mac_address = mvpp2_set_mac_address, 7417 .ndo_change_mtu = mvpp2_change_mtu, 7418 .ndo_get_stats64 = mvpp2_get_stats64, 7419 .ndo_do_ioctl = mvpp2_ioctl, 7420}; 7421 7422static const struct ethtool_ops mvpp2_eth_tool_ops = { 7423 .nway_reset = phy_ethtool_nway_reset, 7424 .get_link = ethtool_op_get_link, 7425 .set_coalesce = mvpp2_ethtool_set_coalesce, 7426 .get_coalesce = mvpp2_ethtool_get_coalesce, 7427 .get_drvinfo = mvpp2_ethtool_get_drvinfo, 7428 .get_ringparam = mvpp2_ethtool_get_ringparam, 7429 .set_ringparam = mvpp2_ethtool_set_ringparam, 7430 .get_strings = mvpp2_ethtool_get_strings, 7431 .get_ethtool_stats = mvpp2_ethtool_get_stats, 7432 .get_sset_count = mvpp2_ethtool_get_sset_count, 7433 .get_link_ksettings = phy_ethtool_get_link_ksettings, 7434 .set_link_ksettings = phy_ethtool_set_link_ksettings, 7435}; 7436 7437/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that 7438 * had a single IRQ defined per-port. 7439 */ 7440static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, 7441 struct device_node *port_node) 7442{ 7443 struct mvpp2_queue_vector *v = &port->qvecs[0]; 7444 7445 v->first_rxq = 0; 7446 v->nrxqs = port->nrxqs; 7447 v->type = MVPP2_QUEUE_VECTOR_SHARED; 7448 v->sw_thread_id = 0; 7449 v->sw_thread_mask = *cpumask_bits(cpu_online_mask); 7450 v->port = port; 7451 v->irq = irq_of_parse_and_map(port_node, 0); 7452 if (v->irq <= 0) 7453 return -EINVAL; 7454 netif_napi_add(port->dev, &v->napi, mvpp2_poll, 7455 NAPI_POLL_WEIGHT); 7456 7457 port->nqvecs = 1; 7458 7459 return 0; 7460} 7461 7462static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, 7463 struct device_node *port_node) 7464{ 7465 struct mvpp2_queue_vector *v; 7466 int i, ret; 7467 7468 port->nqvecs = num_possible_cpus(); 7469 if (queue_mode == MVPP2_QDIST_SINGLE_MODE) 7470 port->nqvecs += 1; 7471 7472 for (i = 0; i < port->nqvecs; i++) { 7473 char irqname[16]; 7474 7475 v = port->qvecs + i; 7476 7477 v->port = port; 7478 v->type = MVPP2_QUEUE_VECTOR_PRIVATE; 7479 v->sw_thread_id = i; 7480 v->sw_thread_mask = BIT(i); 7481 7482 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); 7483 7484 if (queue_mode == MVPP2_QDIST_MULTI_MODE) { 7485 v->first_rxq = i * MVPP2_DEFAULT_RXQ; 7486 v->nrxqs = MVPP2_DEFAULT_RXQ; 7487 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && 7488 i == (port->nqvecs - 1)) { 7489 v->first_rxq = 0; 7490 v->nrxqs = port->nrxqs; 7491 v->type = MVPP2_QUEUE_VECTOR_SHARED; 7492 strncpy(irqname, "rx-shared", sizeof(irqname)); 7493 } 7494 7495 v->irq = of_irq_get_byname(port_node, irqname); 7496 if (v->irq <= 0) { 7497 ret = -EINVAL; 7498 goto err; 7499 } 7500 7501 netif_napi_add(port->dev, &v->napi, mvpp2_poll, 7502 NAPI_POLL_WEIGHT); 7503 } 7504 7505 return 0; 7506 7507err: 7508 for (i = 0; i < port->nqvecs; i++) 7509 irq_dispose_mapping(port->qvecs[i].irq); 7510 return ret; 7511} 7512 7513static int mvpp2_queue_vectors_init(struct mvpp2_port *port, 7514 struct device_node *port_node) 7515{ 7516 if (port->has_tx_irqs) 7517 return mvpp2_multi_queue_vectors_init(port, port_node); 7518 else 7519 return mvpp2_simple_queue_vectors_init(port, port_node); 7520} 7521 7522static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) 7523{ 7524 int i; 7525 7526 for (i = 0; i < port->nqvecs; i++) 7527 irq_dispose_mapping(port->qvecs[i].irq); 7528} 7529 7530/* Configure Rx queue group interrupt for this port */ 7531static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) 7532{ 7533 struct mvpp2 *priv = port->priv; 7534 u32 val; 7535 int i; 7536 7537 if (priv->hw_version == MVPP21) { 7538 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), 7539 port->nrxqs); 7540 return; 7541 } 7542 7543 /* Handle the more complicated PPv2.2 case */ 7544 for (i = 0; i < port->nqvecs; i++) { 7545 struct mvpp2_queue_vector *qv = port->qvecs + i; 7546 7547 if (!qv->nrxqs) 7548 continue; 7549 7550 val = qv->sw_thread_id; 7551 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; 7552 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); 7553 7554 val = qv->first_rxq; 7555 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; 7556 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); 7557 } 7558} 7559 7560/* Initialize port HW */ 7561static int mvpp2_port_init(struct mvpp2_port *port) 7562{ 7563 struct device *dev = port->dev->dev.parent; 7564 struct mvpp2 *priv = port->priv; 7565 struct mvpp2_txq_pcpu *txq_pcpu; 7566 int queue, cpu, err; 7567 7568 /* Checks for hardware constraints */ 7569 if (port->first_rxq + port->nrxqs > 7570 MVPP2_MAX_PORTS * priv->max_port_rxqs) 7571 return -EINVAL; 7572 7573 if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) || 7574 (port->ntxqs > MVPP2_MAX_TXQ)) 7575 return -EINVAL; 7576 7577 /* Disable port */ 7578 mvpp2_egress_disable(port); 7579 mvpp2_port_disable(port); 7580 7581 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; 7582 7583 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), 7584 GFP_KERNEL); 7585 if (!port->txqs) 7586 return -ENOMEM; 7587 7588 /* Associate physical Tx queues to this port and initialize. 7589 * The mapping is predefined. 7590 */ 7591 for (queue = 0; queue < port->ntxqs; queue++) { 7592 int queue_phy_id = mvpp2_txq_phys(port->id, queue); 7593 struct mvpp2_tx_queue *txq; 7594 7595 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); 7596 if (!txq) { 7597 err = -ENOMEM; 7598 goto err_free_percpu; 7599 } 7600 7601 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); 7602 if (!txq->pcpu) { 7603 err = -ENOMEM; 7604 goto err_free_percpu; 7605 } 7606 7607 txq->id = queue_phy_id; 7608 txq->log_id = queue; 7609 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; 7610 for_each_present_cpu(cpu) { 7611 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 7612 txq_pcpu->cpu = cpu; 7613 } 7614 7615 port->txqs[queue] = txq; 7616 } 7617 7618 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), 7619 GFP_KERNEL); 7620 if (!port->rxqs) { 7621 err = -ENOMEM; 7622 goto err_free_percpu; 7623 } 7624 7625 /* Allocate and initialize Rx queue for this port */ 7626 for (queue = 0; queue < port->nrxqs; queue++) { 7627 struct mvpp2_rx_queue *rxq; 7628 7629 /* Map physical Rx queue to port's logical Rx queue */ 7630 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 7631 if (!rxq) { 7632 err = -ENOMEM; 7633 goto err_free_percpu; 7634 } 7635 /* Map this Rx queue to a physical queue */ 7636 rxq->id = port->first_rxq + queue; 7637 rxq->port = port->id; 7638 rxq->logic_rxq = queue; 7639 7640 port->rxqs[queue] = rxq; 7641 } 7642 7643 mvpp2_rx_irqs_setup(port); 7644 7645 /* Create Rx descriptor rings */ 7646 for (queue = 0; queue < port->nrxqs; queue++) { 7647 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 7648 7649 rxq->size = port->rx_ring_size; 7650 rxq->pkts_coal = MVPP2_RX_COAL_PKTS; 7651 rxq->time_coal = MVPP2_RX_COAL_USEC; 7652 } 7653 7654 mvpp2_ingress_disable(port); 7655 7656 /* Port default configuration */ 7657 mvpp2_defaults_set(port); 7658 7659 /* Port's classifier configuration */ 7660 mvpp2_cls_oversize_rxq_set(port); 7661 mvpp2_cls_port_config(port); 7662 7663 /* Provide an initial Rx packet size */ 7664 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); 7665 7666 /* Initialize pools for swf */ 7667 err = mvpp2_swf_bm_pool_init(port); 7668 if (err) 7669 goto err_free_percpu; 7670 7671 return 0; 7672 7673err_free_percpu: 7674 for (queue = 0; queue < port->ntxqs; queue++) { 7675 if (!port->txqs[queue]) 7676 continue; 7677 free_percpu(port->txqs[queue]->pcpu); 7678 } 7679 return err; 7680} 7681 7682/* Checks if the port DT description has the TX interrupts 7683 * described. On PPv2.1, there are no such interrupts. On PPv2.2, 7684 * there are available, but we need to keep support for old DTs. 7685 */ 7686static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, 7687 struct device_node *port_node) 7688{ 7689 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", 7690 "tx-cpu2", "tx-cpu3" }; 7691 int ret, i; 7692 7693 if (priv->hw_version == MVPP21) 7694 return false; 7695 7696 for (i = 0; i < 5; i++) { 7697 ret = of_property_match_string(port_node, "interrupt-names", 7698 irqs[i]); 7699 if (ret < 0) 7700 return false; 7701 } 7702 7703 return true; 7704} 7705 7706static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, 7707 struct device_node *port_node, 7708 char **mac_from) 7709{ 7710 struct mvpp2_port *port = netdev_priv(dev); 7711 char hw_mac_addr[ETH_ALEN] = {0}; 7712 const char *dt_mac_addr; 7713 7714 dt_mac_addr = of_get_mac_address(port_node); 7715 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { 7716 *mac_from = "device tree"; 7717 ether_addr_copy(dev->dev_addr, dt_mac_addr); 7718 return; 7719 } 7720 7721 if (priv->hw_version == MVPP21) { 7722 mvpp21_get_mac_address(port, hw_mac_addr); 7723 if (is_valid_ether_addr(hw_mac_addr)) { 7724 *mac_from = "hardware"; 7725 ether_addr_copy(dev->dev_addr, hw_mac_addr); 7726 return; 7727 } 7728 } 7729 7730 *mac_from = "random"; 7731 eth_hw_addr_random(dev); 7732} 7733 7734/* Ports initialization */ 7735static int mvpp2_port_probe(struct platform_device *pdev, 7736 struct device_node *port_node, 7737 struct mvpp2 *priv, int index) 7738{ 7739 struct device_node *phy_node; 7740 struct phy *comphy; 7741 struct mvpp2_port *port; 7742 struct mvpp2_port_pcpu *port_pcpu; 7743 struct net_device *dev; 7744 struct resource *res; 7745 char *mac_from = ""; 7746 unsigned int ntxqs, nrxqs; 7747 bool has_tx_irqs; 7748 u32 id; 7749 int features; 7750 int phy_mode; 7751 int err, i, cpu; 7752 7753 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); 7754 7755 if (!has_tx_irqs) 7756 queue_mode = MVPP2_QDIST_SINGLE_MODE; 7757 7758 ntxqs = MVPP2_MAX_TXQ; 7759 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) 7760 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); 7761 else 7762 nrxqs = MVPP2_DEFAULT_RXQ; 7763 7764 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); 7765 if (!dev) 7766 return -ENOMEM; 7767 7768 phy_node = of_parse_phandle(port_node, "phy", 0); 7769 phy_mode = of_get_phy_mode(port_node); 7770 if (phy_mode < 0) { 7771 dev_err(&pdev->dev, "incorrect phy mode\n"); 7772 err = phy_mode; 7773 goto err_free_netdev; 7774 } 7775 7776 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); 7777 if (IS_ERR(comphy)) { 7778 if (PTR_ERR(comphy) == -EPROBE_DEFER) { 7779 err = -EPROBE_DEFER; 7780 goto err_free_netdev; 7781 } 7782 comphy = NULL; 7783 } 7784 7785 if (of_property_read_u32(port_node, "port-id", &id)) { 7786 err = -EINVAL; 7787 dev_err(&pdev->dev, "missing port-id value\n"); 7788 goto err_free_netdev; 7789 } 7790 7791 dev->tx_queue_len = MVPP2_MAX_TXD; 7792 dev->watchdog_timeo = 5 * HZ; 7793 dev->netdev_ops = &mvpp2_netdev_ops; 7794 dev->ethtool_ops = &mvpp2_eth_tool_ops; 7795 7796 port = netdev_priv(dev); 7797 port->dev = dev; 7798 port->ntxqs = ntxqs; 7799 port->nrxqs = nrxqs; 7800 port->priv = priv; 7801 port->has_tx_irqs = has_tx_irqs; 7802 7803 err = mvpp2_queue_vectors_init(port, port_node); 7804 if (err) 7805 goto err_free_netdev; 7806 7807 port->link_irq = of_irq_get_byname(port_node, "link"); 7808 if (port->link_irq == -EPROBE_DEFER) { 7809 err = -EPROBE_DEFER; 7810 goto err_deinit_qvecs; 7811 } 7812 if (port->link_irq <= 0) 7813 /* the link irq is optional */ 7814 port->link_irq = 0; 7815 7816 if (of_property_read_bool(port_node, "marvell,loopback")) 7817 port->flags |= MVPP2_F_LOOPBACK; 7818 7819 port->id = id; 7820 if (priv->hw_version == MVPP21) 7821 port->first_rxq = port->id * port->nrxqs; 7822 else 7823 port->first_rxq = port->id * priv->max_port_rxqs; 7824 7825 port->phy_node = phy_node; 7826 port->phy_interface = phy_mode; 7827 port->comphy = comphy; 7828 7829 if (priv->hw_version == MVPP21) { 7830 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id); 7831 port->base = devm_ioremap_resource(&pdev->dev, res); 7832 if (IS_ERR(port->base)) { 7833 err = PTR_ERR(port->base); 7834 goto err_free_irq; 7835 } 7836 7837 port->stats_base = port->priv->lms_base + 7838 MVPP21_MIB_COUNTERS_OFFSET + 7839 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; 7840 } else { 7841 if (of_property_read_u32(port_node, "gop-port-id", 7842 &port->gop_id)) { 7843 err = -EINVAL; 7844 dev_err(&pdev->dev, "missing gop-port-id value\n"); 7845 goto err_deinit_qvecs; 7846 } 7847 7848 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); 7849 port->stats_base = port->priv->iface_base + 7850 MVPP22_MIB_COUNTERS_OFFSET + 7851 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; 7852 } 7853 7854 /* Alloc per-cpu and ethtool stats */ 7855 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); 7856 if (!port->stats) { 7857 err = -ENOMEM; 7858 goto err_free_irq; 7859 } 7860 7861 port->ethtool_stats = devm_kcalloc(&pdev->dev, 7862 ARRAY_SIZE(mvpp2_ethtool_regs), 7863 sizeof(u64), GFP_KERNEL); 7864 if (!port->ethtool_stats) { 7865 err = -ENOMEM; 7866 goto err_free_stats; 7867 } 7868 7869 mutex_init(&port->gather_stats_lock); 7870 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); 7871 7872 mvpp2_port_copy_mac_addr(dev, priv, port_node, &mac_from); 7873 7874 port->tx_ring_size = MVPP2_MAX_TXD; 7875 port->rx_ring_size = MVPP2_MAX_RXD; 7876 SET_NETDEV_DEV(dev, &pdev->dev); 7877 7878 err = mvpp2_port_init(port); 7879 if (err < 0) { 7880 dev_err(&pdev->dev, "failed to init port %d\n", id); 7881 goto err_free_stats; 7882 } 7883 7884 mvpp2_port_periodic_xon_disable(port); 7885 7886 if (priv->hw_version == MVPP21) 7887 mvpp2_port_fc_adv_enable(port); 7888 7889 mvpp2_port_reset(port); 7890 7891 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); 7892 if (!port->pcpu) { 7893 err = -ENOMEM; 7894 goto err_free_txq_pcpu; 7895 } 7896 7897 if (!port->has_tx_irqs) { 7898 for_each_present_cpu(cpu) { 7899 port_pcpu = per_cpu_ptr(port->pcpu, cpu); 7900 7901 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, 7902 HRTIMER_MODE_REL_PINNED); 7903 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; 7904 port_pcpu->timer_scheduled = false; 7905 7906 tasklet_init(&port_pcpu->tx_done_tasklet, 7907 mvpp2_tx_proc_cb, 7908 (unsigned long)dev); 7909 } 7910 } 7911 7912 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 7913 dev->features = features | NETIF_F_RXCSUM; 7914 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO; 7915 dev->vlan_features |= features; 7916 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; 7917 7918 /* MTU range: 68 - 9676 */ 7919 dev->min_mtu = ETH_MIN_MTU; 7920 /* 9676 == 9700 - 20 and rounding to 8 */ 7921 dev->max_mtu = 9676; 7922 7923 err = register_netdev(dev); 7924 if (err < 0) { 7925 dev_err(&pdev->dev, "failed to register netdev\n"); 7926 goto err_free_port_pcpu; 7927 } 7928 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); 7929 7930 priv->port_list[index] = port; 7931 return 0; 7932 7933err_free_port_pcpu: 7934 free_percpu(port->pcpu); 7935err_free_txq_pcpu: 7936 for (i = 0; i < port->ntxqs; i++) 7937 free_percpu(port->txqs[i]->pcpu); 7938err_free_stats: 7939 free_percpu(port->stats); 7940err_free_irq: 7941 if (port->link_irq) 7942 irq_dispose_mapping(port->link_irq); 7943err_deinit_qvecs: 7944 mvpp2_queue_vectors_deinit(port); 7945err_free_netdev: 7946 of_node_put(phy_node); 7947 free_netdev(dev); 7948 return err; 7949} 7950 7951/* Ports removal routine */ 7952static void mvpp2_port_remove(struct mvpp2_port *port) 7953{ 7954 int i; 7955 7956 unregister_netdev(port->dev); 7957 of_node_put(port->phy_node); 7958 free_percpu(port->pcpu); 7959 free_percpu(port->stats); 7960 for (i = 0; i < port->ntxqs; i++) 7961 free_percpu(port->txqs[i]->pcpu); 7962 mvpp2_queue_vectors_deinit(port); 7963 if (port->link_irq) 7964 irq_dispose_mapping(port->link_irq); 7965 free_netdev(port->dev); 7966} 7967 7968/* Initialize decoding windows */ 7969static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, 7970 struct mvpp2 *priv) 7971{ 7972 u32 win_enable; 7973 int i; 7974 7975 for (i = 0; i < 6; i++) { 7976 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); 7977 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); 7978 7979 if (i < 4) 7980 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); 7981 } 7982 7983 win_enable = 0; 7984 7985 for (i = 0; i < dram->num_cs; i++) { 7986 const struct mbus_dram_window *cs = dram->cs + i; 7987 7988 mvpp2_write(priv, MVPP2_WIN_BASE(i), 7989 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | 7990 dram->mbus_dram_target_id); 7991 7992 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 7993 (cs->size - 1) & 0xffff0000); 7994 7995 win_enable |= (1 << i); 7996 } 7997 7998 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); 7999} 8000 8001/* Initialize Rx FIFO's */ 8002static void mvpp2_rx_fifo_init(struct mvpp2 *priv) 8003{ 8004 int port; 8005 8006 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 8007 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 8008 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); 8009 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 8010 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); 8011 } 8012 8013 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 8014 MVPP2_RX_FIFO_PORT_MIN_PKT); 8015 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 8016} 8017 8018static void mvpp22_rx_fifo_init(struct mvpp2 *priv) 8019{ 8020 int port; 8021 8022 /* The FIFO size parameters are set depending on the maximum speed a 8023 * given port can handle: 8024 * - Port 0: 10Gbps 8025 * - Port 1: 2.5Gbps 8026 * - Ports 2 and 3: 1Gbps 8027 */ 8028 8029 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0), 8030 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); 8031 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0), 8032 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB); 8033 8034 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1), 8035 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); 8036 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1), 8037 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB); 8038 8039 for (port = 2; port < MVPP2_MAX_PORTS; port++) { 8040 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 8041 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); 8042 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 8043 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); 8044 } 8045 8046 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 8047 MVPP2_RX_FIFO_PORT_MIN_PKT); 8048 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 8049} 8050 8051/* Initialize Tx FIFO's */ 8052static void mvpp22_tx_fifo_init(struct mvpp2 *priv) 8053{ 8054 int port; 8055 8056 for (port = 0; port < MVPP2_MAX_PORTS; port++) 8057 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), 8058 MVPP22_TX_FIFO_DATA_SIZE_3KB); 8059} 8060 8061static void mvpp2_axi_init(struct mvpp2 *priv) 8062{ 8063 u32 val, rdval, wrval; 8064 8065 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); 8066 8067 /* AXI Bridge Configuration */ 8068 8069 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE 8070 << MVPP22_AXI_ATTR_CACHE_OFFS; 8071 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 8072 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 8073 8074 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE 8075 << MVPP22_AXI_ATTR_CACHE_OFFS; 8076 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 8077 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 8078 8079 /* BM */ 8080 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); 8081 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); 8082 8083 /* Descriptors */ 8084 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); 8085 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); 8086 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); 8087 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); 8088 8089 /* Buffer Data */ 8090 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); 8091 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); 8092 8093 val = MVPP22_AXI_CODE_CACHE_NON_CACHE 8094 << MVPP22_AXI_CODE_CACHE_OFFS; 8095 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM 8096 << MVPP22_AXI_CODE_DOMAIN_OFFS; 8097 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); 8098 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); 8099 8100 val = MVPP22_AXI_CODE_CACHE_RD_CACHE 8101 << MVPP22_AXI_CODE_CACHE_OFFS; 8102 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 8103 << MVPP22_AXI_CODE_DOMAIN_OFFS; 8104 8105 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); 8106 8107 val = MVPP22_AXI_CODE_CACHE_WR_CACHE 8108 << MVPP22_AXI_CODE_CACHE_OFFS; 8109 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 8110 << MVPP22_AXI_CODE_DOMAIN_OFFS; 8111 8112 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); 8113} 8114 8115/* Initialize network controller common part HW */ 8116static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) 8117{ 8118 const struct mbus_dram_target_info *dram_target_info; 8119 int err, i; 8120 u32 val; 8121 8122 /* MBUS windows configuration */ 8123 dram_target_info = mv_mbus_dram_info(); 8124 if (dram_target_info) 8125 mvpp2_conf_mbus_windows(dram_target_info, priv); 8126 8127 if (priv->hw_version == MVPP22) 8128 mvpp2_axi_init(priv); 8129 8130 /* Disable HW PHY polling */ 8131 if (priv->hw_version == MVPP21) { 8132 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 8133 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 8134 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 8135 } else { 8136 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 8137 val &= ~MVPP22_SMI_POLLING_EN; 8138 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 8139 } 8140 8141 /* Allocate and initialize aggregated TXQs */ 8142 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), 8143 sizeof(*priv->aggr_txqs), 8144 GFP_KERNEL); 8145 if (!priv->aggr_txqs) 8146 return -ENOMEM; 8147 8148 for_each_present_cpu(i) { 8149 priv->aggr_txqs[i].id = i; 8150 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; 8151 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); 8152 if (err < 0) 8153 return err; 8154 } 8155 8156 /* Fifo Init */ 8157 if (priv->hw_version == MVPP21) { 8158 mvpp2_rx_fifo_init(priv); 8159 } else { 8160 mvpp22_rx_fifo_init(priv); 8161 mvpp22_tx_fifo_init(priv); 8162 } 8163 8164 if (priv->hw_version == MVPP21) 8165 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 8166 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 8167 8168 /* Allow cache snoop when transmiting packets */ 8169 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 8170 8171 /* Buffer Manager initialization */ 8172 err = mvpp2_bm_init(pdev, priv); 8173 if (err < 0) 8174 return err; 8175 8176 /* Parser default initialization */ 8177 err = mvpp2_prs_default_init(pdev, priv); 8178 if (err < 0) 8179 return err; 8180 8181 /* Classifier default initialization */ 8182 mvpp2_cls_init(priv); 8183 8184 return 0; 8185} 8186 8187static int mvpp2_probe(struct platform_device *pdev) 8188{ 8189 struct device_node *dn = pdev->dev.of_node; 8190 struct device_node *port_node; 8191 struct mvpp2 *priv; 8192 struct resource *res; 8193 void __iomem *base; 8194 int i; 8195 int err; 8196 8197 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 8198 if (!priv) 8199 return -ENOMEM; 8200 8201 priv->hw_version = 8202 (unsigned long)of_device_get_match_data(&pdev->dev); 8203 8204 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 8205 base = devm_ioremap_resource(&pdev->dev, res); 8206 if (IS_ERR(base)) 8207 return PTR_ERR(base); 8208 8209 if (priv->hw_version == MVPP21) { 8210 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 8211 priv->lms_base = devm_ioremap_resource(&pdev->dev, res); 8212 if (IS_ERR(priv->lms_base)) 8213 return PTR_ERR(priv->lms_base); 8214 } else { 8215 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 8216 priv->iface_base = devm_ioremap_resource(&pdev->dev, res); 8217 if (IS_ERR(priv->iface_base)) 8218 return PTR_ERR(priv->iface_base); 8219 8220 priv->sysctrl_base = 8221 syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 8222 "marvell,system-controller"); 8223 if (IS_ERR(priv->sysctrl_base)) 8224 /* The system controller regmap is optional for dt 8225 * compatibility reasons. When not provided, the 8226 * configuration of the GoP relies on the 8227 * firmware/bootloader. 8228 */ 8229 priv->sysctrl_base = NULL; 8230 } 8231 8232 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 8233 u32 addr_space_sz; 8234 8235 addr_space_sz = (priv->hw_version == MVPP21 ? 8236 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); 8237 priv->swth_base[i] = base + i * addr_space_sz; 8238 } 8239 8240 if (priv->hw_version == MVPP21) 8241 priv->max_port_rxqs = 8; 8242 else 8243 priv->max_port_rxqs = 32; 8244 8245 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); 8246 if (IS_ERR(priv->pp_clk)) 8247 return PTR_ERR(priv->pp_clk); 8248 err = clk_prepare_enable(priv->pp_clk); 8249 if (err < 0) 8250 return err; 8251 8252 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); 8253 if (IS_ERR(priv->gop_clk)) { 8254 err = PTR_ERR(priv->gop_clk); 8255 goto err_pp_clk; 8256 } 8257 err = clk_prepare_enable(priv->gop_clk); 8258 if (err < 0) 8259 goto err_pp_clk; 8260 8261 if (priv->hw_version == MVPP22) { 8262 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); 8263 if (IS_ERR(priv->mg_clk)) { 8264 err = PTR_ERR(priv->mg_clk); 8265 goto err_gop_clk; 8266 } 8267 8268 err = clk_prepare_enable(priv->mg_clk); 8269 if (err < 0) 8270 goto err_gop_clk; 8271 8272 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); 8273 if (IS_ERR(priv->axi_clk)) { 8274 err = PTR_ERR(priv->axi_clk); 8275 if (err == -EPROBE_DEFER) 8276 goto err_gop_clk; 8277 priv->axi_clk = NULL; 8278 } else { 8279 err = clk_prepare_enable(priv->axi_clk); 8280 if (err < 0) 8281 goto err_gop_clk; 8282 } 8283 } 8284 8285 /* Get system's tclk rate */ 8286 priv->tclk = clk_get_rate(priv->pp_clk); 8287 8288 if (priv->hw_version == MVPP22) { 8289 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); 8290 if (err) 8291 goto err_mg_clk; 8292 /* Sadly, the BM pools all share the same register to 8293 * store the high 32 bits of their address. So they 8294 * must all have the same high 32 bits, which forces 8295 * us to restrict coherent memory to DMA_BIT_MASK(32). 8296 */ 8297 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 8298 if (err) 8299 goto err_mg_clk; 8300 } 8301 8302 /* Initialize network controller */ 8303 err = mvpp2_init(pdev, priv); 8304 if (err < 0) { 8305 dev_err(&pdev->dev, "failed to initialize controller\n"); 8306 goto err_mg_clk; 8307 } 8308 8309 priv->port_count = of_get_available_child_count(dn); 8310 if (priv->port_count == 0) { 8311 dev_err(&pdev->dev, "no ports enabled\n"); 8312 err = -ENODEV; 8313 goto err_mg_clk; 8314 } 8315 8316 priv->port_list = devm_kcalloc(&pdev->dev, priv->port_count, 8317 sizeof(*priv->port_list), 8318 GFP_KERNEL); 8319 if (!priv->port_list) { 8320 err = -ENOMEM; 8321 goto err_mg_clk; 8322 } 8323 8324 /* Initialize ports */ 8325 i = 0; 8326 for_each_available_child_of_node(dn, port_node) { 8327 err = mvpp2_port_probe(pdev, port_node, priv, i); 8328 if (err < 0) 8329 goto err_port_probe; 8330 i++; 8331 } 8332 8333 /* Statistics must be gathered regularly because some of them (like 8334 * packets counters) are 32-bit registers and could overflow quite 8335 * quickly. For instance, a 10Gb link used at full bandwidth with the 8336 * smallest packets (64B) will overflow a 32-bit counter in less than 8337 * 30 seconds. Then, use a workqueue to fill 64-bit counters. 8338 */ 8339 snprintf(priv->queue_name, sizeof(priv->queue_name), 8340 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), 8341 priv->port_count > 1 ? "+" : ""); 8342 priv->stats_queue = create_singlethread_workqueue(priv->queue_name); 8343 if (!priv->stats_queue) { 8344 err = -ENOMEM; 8345 goto err_port_probe; 8346 } 8347 8348 platform_set_drvdata(pdev, priv); 8349 return 0; 8350 8351err_port_probe: 8352 i = 0; 8353 for_each_available_child_of_node(dn, port_node) { 8354 if (priv->port_list[i]) 8355 mvpp2_port_remove(priv->port_list[i]); 8356 i++; 8357 } 8358err_mg_clk: 8359 clk_disable_unprepare(priv->axi_clk); 8360 if (priv->hw_version == MVPP22) 8361 clk_disable_unprepare(priv->mg_clk); 8362err_gop_clk: 8363 clk_disable_unprepare(priv->gop_clk); 8364err_pp_clk: 8365 clk_disable_unprepare(priv->pp_clk); 8366 return err; 8367} 8368 8369static int mvpp2_remove(struct platform_device *pdev) 8370{ 8371 struct mvpp2 *priv = platform_get_drvdata(pdev); 8372 struct device_node *dn = pdev->dev.of_node; 8373 struct device_node *port_node; 8374 int i = 0; 8375 8376 flush_workqueue(priv->stats_queue); 8377 destroy_workqueue(priv->stats_queue); 8378 8379 for_each_available_child_of_node(dn, port_node) { 8380 if (priv->port_list[i]) { 8381 mutex_destroy(&priv->port_list[i]->gather_stats_lock); 8382 mvpp2_port_remove(priv->port_list[i]); 8383 } 8384 i++; 8385 } 8386 8387 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 8388 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; 8389 8390 mvpp2_bm_pool_destroy(pdev, priv, bm_pool); 8391 } 8392 8393 for_each_present_cpu(i) { 8394 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; 8395 8396 dma_free_coherent(&pdev->dev, 8397 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 8398 aggr_txq->descs, 8399 aggr_txq->descs_dma); 8400 } 8401 8402 clk_disable_unprepare(priv->axi_clk); 8403 clk_disable_unprepare(priv->mg_clk); 8404 clk_disable_unprepare(priv->pp_clk); 8405 clk_disable_unprepare(priv->gop_clk); 8406 8407 return 0; 8408} 8409 8410static const struct of_device_id mvpp2_match[] = { 8411 { 8412 .compatible = "marvell,armada-375-pp2", 8413 .data = (void *)MVPP21, 8414 }, 8415 { 8416 .compatible = "marvell,armada-7k-pp22", 8417 .data = (void *)MVPP22, 8418 }, 8419 { } 8420}; 8421MODULE_DEVICE_TABLE(of, mvpp2_match); 8422 8423static struct platform_driver mvpp2_driver = { 8424 .probe = mvpp2_probe, 8425 .remove = mvpp2_remove, 8426 .driver = { 8427 .name = MVPP2_DRIVER_NAME, 8428 .of_match_table = mvpp2_match, 8429 }, 8430}; 8431 8432module_platform_driver(mvpp2_driver); 8433 8434MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); 8435MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); 8436MODULE_LICENSE("GPL v2");