Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'stmmac-next'

Jose Abreu says:

====================
net: stmmac: Improvements for -next

Couple of improvements for -next tree. More info in commit logs.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+1001 -47
+2
drivers/net/ethernet/stmicro/stmmac/common.h
··· 360 360 unsigned int sphen; 361 361 unsigned int vlins; 362 362 unsigned int dvlan; 363 + unsigned int l3l4fnum; 364 + unsigned int arpoffsel; 363 365 }; 364 366 365 367 /* GMAC TX FIFO is 8K, Rx FIFO is 16K */
+32 -1
drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
··· 44 44 #define XGMAC_CONFIG_CST BIT(2) 45 45 #define XGMAC_CONFIG_ACS BIT(1) 46 46 #define XGMAC_CONFIG_RE BIT(0) 47 - #define XGMAC_CORE_INIT_RX 0 47 + #define XGMAC_CORE_INIT_RX (XGMAC_CONFIG_GPSLCE | XGMAC_CONFIG_WD | \ 48 + (XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT)) 48 49 #define XGMAC_PACKET_FILTER 0x00000008 49 50 #define XGMAC_FILTER_RA BIT(31) 51 + #define XGMAC_FILTER_IPFE BIT(20) 50 52 #define XGMAC_FILTER_VTFE BIT(16) 51 53 #define XGMAC_FILTER_HPF BIT(10) 52 54 #define XGMAC_FILTER_PCF BIT(7) ··· 121 119 #define XGMAC_HWFEAT_VLHASH BIT(4) 122 120 #define XGMAC_HWFEAT_GMIISEL BIT(1) 123 121 #define XGMAC_HW_FEATURE1 0x00000120 122 + #define XGMAC_HWFEAT_L3L4FNUM GENMASK(30, 27) 124 123 #define XGMAC_HWFEAT_RSSEN BIT(20) 125 124 #define XGMAC_HWFEAT_TSOEN BIT(18) 126 125 #define XGMAC_HWFEAT_SPHEN BIT(17) ··· 153 150 #define XGMAC_DCS GENMASK(19, 16) 154 151 #define XGMAC_DCS_SHIFT 16 155 152 #define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8) 153 + #define XGMAC_L3L4_ADDR_CTRL 0x00000c00 154 + #define XGMAC_IDDR GENMASK(15, 8) 155 + #define XGMAC_IDDR_SHIFT 8 156 + #define XGMAC_IDDR_FNUM 4 157 + #define XGMAC_TT BIT(1) 158 + #define XGMAC_XB BIT(0) 159 + #define XGMAC_L3L4_DATA 0x00000c04 160 + #define XGMAC_L3L4_CTRL 0x0 161 + #define XGMAC_L4DPIM0 BIT(21) 162 + #define XGMAC_L4DPM0 BIT(20) 163 + #define XGMAC_L4SPIM0 BIT(19) 164 + #define XGMAC_L4SPM0 BIT(18) 165 + #define XGMAC_L4PEN0 BIT(16) 166 + #define XGMAC_L3HDBM0 GENMASK(15, 11) 167 + #define XGMAC_L3HSBM0 GENMASK(10, 6) 168 + #define XGMAC_L3DAIM0 BIT(5) 169 + #define XGMAC_L3DAM0 BIT(4) 170 + #define XGMAC_L3SAIM0 BIT(3) 171 + #define XGMAC_L3SAM0 BIT(2) 172 + #define XGMAC_L3PEN0 BIT(0) 173 + #define XGMAC_L4_ADDR 0x1 174 + #define XGMAC_L4DP0 GENMASK(31, 16) 175 + #define XGMAC_L4DP0_SHIFT 16 176 + #define XGMAC_L4SP0 GENMASK(15, 0) 177 + #define XGMAC_L3_ADDR0 0x4 178 + #define XGMAC_L3_ADDR1 0x5 179 + #define XGMAC_L3_ADDR2 0x6 180 + #define XMGAC_L3_ADDR3 0x7 156 181 #define XGMAC_ARP_ADDR 0x00000c10 157 182 #define XGMAC_RSS_CTRL 0x00000c80 158 183 #define XGMAC_UDP4TE BIT(3)
+194 -11
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
··· 15 15 struct net_device *dev) 16 16 { 17 17 void __iomem *ioaddr = hw->pcsr; 18 - int mtu = dev->mtu; 19 18 u32 tx, rx; 20 19 21 20 tx = readl(ioaddr + XGMAC_TX_CONFIG); ··· 22 23 23 24 tx |= XGMAC_CORE_INIT_TX; 24 25 rx |= XGMAC_CORE_INIT_RX; 25 - 26 - if (mtu >= 9000) { 27 - rx |= XGMAC_CONFIG_GPSLCE; 28 - rx |= XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT; 29 - rx |= XGMAC_CONFIG_WD; 30 - } else if (mtu > 2000) { 31 - rx |= XGMAC_CONFIG_JE; 32 - } else if (mtu > 1500) { 33 - rx |= XGMAC_CONFIG_S2KP; 34 - } 35 26 36 27 if (hw->ps) { 37 28 tx |= XGMAC_CONFIG_TE; ··· 1152 1163 writel(value, ioaddr + XGMAC_VLAN_INCL); 1153 1164 } 1154 1165 1166 + static int dwxgmac2_filter_wait(struct mac_device_info *hw) 1167 + { 1168 + void __iomem *ioaddr = hw->pcsr; 1169 + u32 value; 1170 + 1171 + if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value, 1172 + !(value & XGMAC_XB), 100, 10000)) 1173 + return -EBUSY; 1174 + return 0; 1175 + } 1176 + 1177 + static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no, 1178 + u8 reg, u32 *data) 1179 + { 1180 + void __iomem *ioaddr = hw->pcsr; 1181 + u32 value; 1182 + int ret; 1183 + 1184 + ret = dwxgmac2_filter_wait(hw); 1185 + if (ret) 1186 + return ret; 1187 + 1188 + value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT; 1189 + value |= XGMAC_TT | XGMAC_XB; 1190 + writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL); 1191 + 1192 + ret = dwxgmac2_filter_wait(hw); 1193 + if (ret) 1194 + return ret; 1195 + 1196 + *data = readl(ioaddr + XGMAC_L3L4_DATA); 1197 + return 0; 1198 + } 1199 + 1200 + static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no, 1201 + u8 reg, u32 data) 1202 + { 1203 + void __iomem *ioaddr = hw->pcsr; 1204 + u32 value; 1205 + int ret; 1206 + 1207 + ret = dwxgmac2_filter_wait(hw); 1208 + if (ret) 1209 + return ret; 1210 + 1211 + writel(data, ioaddr + XGMAC_L3L4_DATA); 1212 + 1213 + value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT; 1214 + value |= XGMAC_XB; 1215 + writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL); 1216 + 1217 + return dwxgmac2_filter_wait(hw); 1218 + } 1219 + 1220 + static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no, 1221 + bool en, bool ipv6, bool sa, bool inv, 1222 + u32 match) 1223 + { 1224 + void __iomem *ioaddr = hw->pcsr; 1225 + u32 value; 1226 + int ret; 1227 + 1228 + value = readl(ioaddr + XGMAC_PACKET_FILTER); 1229 + value |= XGMAC_FILTER_IPFE; 1230 + writel(value, ioaddr + XGMAC_PACKET_FILTER); 1231 + 1232 + ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value); 1233 + if (ret) 1234 + return ret; 1235 + 1236 + /* For IPv6 not both SA/DA filters can be active */ 1237 + if (ipv6) { 1238 + value |= XGMAC_L3PEN0; 1239 + value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0); 1240 + value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0); 1241 + if (sa) { 1242 + value |= XGMAC_L3SAM0; 1243 + if (inv) 1244 + value |= XGMAC_L3SAIM0; 1245 + } else { 1246 + value |= XGMAC_L3DAM0; 1247 + if (inv) 1248 + value |= XGMAC_L3DAIM0; 1249 + } 1250 + } else { 1251 + value &= ~XGMAC_L3PEN0; 1252 + if (sa) { 1253 + value |= XGMAC_L3SAM0; 1254 + if (inv) 1255 + value |= XGMAC_L3SAIM0; 1256 + } else { 1257 + value |= XGMAC_L3DAM0; 1258 + if (inv) 1259 + value |= XGMAC_L3DAIM0; 1260 + } 1261 + } 1262 + 1263 + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value); 1264 + if (ret) 1265 + return ret; 1266 + 1267 + if (sa) { 1268 + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match); 1269 + if (ret) 1270 + return ret; 1271 + } else { 1272 + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match); 1273 + if (ret) 1274 + return ret; 1275 + } 1276 + 1277 + if (!en) 1278 + return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0); 1279 + 1280 + return 0; 1281 + } 1282 + 1283 + static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no, 1284 + bool en, bool udp, bool sa, bool inv, 1285 + u32 match) 1286 + { 1287 + void __iomem *ioaddr = hw->pcsr; 1288 + u32 value; 1289 + int ret; 1290 + 1291 + value = readl(ioaddr + XGMAC_PACKET_FILTER); 1292 + value |= XGMAC_FILTER_IPFE; 1293 + writel(value, ioaddr + XGMAC_PACKET_FILTER); 1294 + 1295 + ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value); 1296 + if (ret) 1297 + return ret; 1298 + 1299 + if (udp) { 1300 + value |= XGMAC_L4PEN0; 1301 + } else { 1302 + value &= ~XGMAC_L4PEN0; 1303 + } 1304 + 1305 + value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0); 1306 + value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0); 1307 + if (sa) { 1308 + value |= XGMAC_L4SPM0; 1309 + if (inv) 1310 + value |= XGMAC_L4SPIM0; 1311 + } else { 1312 + value |= XGMAC_L4DPM0; 1313 + if (inv) 1314 + value |= XGMAC_L4DPIM0; 1315 + } 1316 + 1317 + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value); 1318 + if (ret) 1319 + return ret; 1320 + 1321 + if (sa) { 1322 + value = match & XGMAC_L4SP0; 1323 + 1324 + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value); 1325 + if (ret) 1326 + return ret; 1327 + } else { 1328 + value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0; 1329 + 1330 + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value); 1331 + if (ret) 1332 + return ret; 1333 + } 1334 + 1335 + if (!en) 1336 + return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0); 1337 + 1338 + return 0; 1339 + } 1340 + 1341 + static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en, 1342 + u32 addr) 1343 + { 1344 + void __iomem *ioaddr = hw->pcsr; 1345 + u32 value; 1346 + 1347 + writel(addr, ioaddr + XGMAC_ARP_ADDR); 1348 + 1349 + value = readl(ioaddr + XGMAC_RX_CONFIG); 1350 + if (en) 1351 + value |= XGMAC_CONFIG_ARPEN; 1352 + else 1353 + value &= ~XGMAC_CONFIG_ARPEN; 1354 + writel(value, ioaddr + XGMAC_RX_CONFIG); 1355 + } 1356 + 1155 1357 const struct stmmac_ops dwxgmac210_ops = { 1156 1358 .core_init = dwxgmac2_core_init, 1157 1359 .set_mac = dwxgmac2_set_mac, ··· 1383 1203 .flex_pps_config = dwxgmac2_flex_pps_config, 1384 1204 .sarc_configure = dwxgmac2_sarc_configure, 1385 1205 .enable_vlan = dwxgmac2_enable_vlan, 1206 + .config_l3_filter = dwxgmac2_config_l3_filter, 1207 + .config_l4_filter = dwxgmac2_config_l4_filter, 1208 + .set_arp_offload = dwxgmac2_set_arp_offload, 1386 1209 }; 1387 1210 1388 1211 int dwxgmac2_setup(struct stmmac_priv *priv)
+7 -1
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
··· 322 322 323 323 /* ABNORMAL interrupts */ 324 324 if (unlikely(intr_status & XGMAC_AIS)) { 325 + if (unlikely(intr_status & XGMAC_RBU)) { 326 + x->rx_buf_unav_irq++; 327 + ret |= handle_rx; 328 + } 325 329 if (unlikely(intr_status & XGMAC_TPS)) { 326 330 x->tx_process_stopped_irq++; 327 331 ret |= tx_hard_error; ··· 369 365 dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13; 370 366 dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12; 371 367 dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11; 372 - dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10; 368 + dma_cap->av &= !(hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10; 369 + dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9; 373 370 dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8; 374 371 dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7; 375 372 dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6; ··· 379 374 380 375 /* MAC HW feature 1 */ 381 376 hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1); 377 + dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27; 382 378 dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20; 383 379 dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18; 384 380 dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
+19
drivers/net/ethernet/stmicro/stmmac/hwif.h
··· 363 363 int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts); 364 364 /* Source Address Insertion / Replacement */ 365 365 void (*sarc_configure)(void __iomem *ioaddr, int val); 366 + /* Filtering */ 367 + int (*config_l3_filter)(struct mac_device_info *hw, u32 filter_no, 368 + bool en, bool ipv6, bool sa, bool inv, 369 + u32 match); 370 + int (*config_l4_filter)(struct mac_device_info *hw, u32 filter_no, 371 + bool en, bool udp, bool sa, bool inv, 372 + u32 match); 373 + void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr); 366 374 }; 367 375 368 376 #define stmmac_core_init(__priv, __args...) \ ··· 451 443 stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args) 452 444 #define stmmac_sarc_configure(__priv, __args...) \ 453 445 stmmac_do_void_callback(__priv, mac, sarc_configure, __args) 446 + #define stmmac_config_l3_filter(__priv, __args...) \ 447 + stmmac_do_callback(__priv, mac, config_l3_filter, __args) 448 + #define stmmac_config_l4_filter(__priv, __args...) \ 449 + stmmac_do_callback(__priv, mac, config_l4_filter, __args) 450 + #define stmmac_set_arp_offload(__priv, __args...) \ 451 + stmmac_do_void_callback(__priv, mac, set_arp_offload, __args) 454 452 455 453 /* PTP and HW Timer helpers */ 456 454 struct stmmac_hwtimestamp { ··· 513 499 struct stmmac_priv; 514 500 struct tc_cls_u32_offload; 515 501 struct tc_cbs_qopt_offload; 502 + struct flow_cls_offload; 516 503 517 504 struct stmmac_tc_ops { 518 505 int (*init)(struct stmmac_priv *priv); ··· 521 506 struct tc_cls_u32_offload *cls); 522 507 int (*setup_cbs)(struct stmmac_priv *priv, 523 508 struct tc_cbs_qopt_offload *qopt); 509 + int (*setup_cls)(struct stmmac_priv *priv, 510 + struct flow_cls_offload *cls); 524 511 }; 525 512 526 513 #define stmmac_tc_init(__priv, __args...) \ ··· 531 514 stmmac_do_callback(__priv, tc, setup_cls_u32, __args) 532 515 #define stmmac_tc_setup_cbs(__priv, __args...) \ 533 516 stmmac_do_callback(__priv, tc, setup_cbs, __args) 517 + #define stmmac_tc_setup_cls(__priv, __args...) \ 518 + stmmac_do_callback(__priv, tc, setup_cls, __args) 534 519 535 520 struct stmmac_counters; 536 521
+12
drivers/net/ethernet/stmicro/stmmac/stmmac.h
··· 128 128 u32 table[STMMAC_RSS_MAX_TABLE_SIZE]; 129 129 }; 130 130 131 + #define STMMAC_FLOW_ACTION_DROP BIT(0) 132 + struct stmmac_flow_entry { 133 + unsigned long cookie; 134 + unsigned long action; 135 + u8 ip_proto; 136 + int in_use; 137 + int idx; 138 + int is_l4; 139 + }; 140 + 131 141 struct stmmac_priv { 132 142 /* Frequently used values are kept adjacent for cache effect */ 133 143 u32 tx_coal_frames; ··· 226 216 unsigned int tc_entries_max; 227 217 unsigned int tc_off_max; 228 218 struct stmmac_tc_entry *tc_entries; 219 + unsigned int flow_entries_max; 220 + struct stmmac_flow_entry *flow_entries; 229 221 230 222 /* Pulse Per Second output */ 231 223 struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
+9 -12
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
··· 746 746 (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval)) 747 747 return -EOPNOTSUPP; 748 748 749 - if (ec->rx_coalesce_usecs == 0) 750 - return -EINVAL; 749 + if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) { 750 + rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); 751 + 752 + if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) 753 + return -EINVAL; 754 + 755 + priv->rx_riwt = rx_riwt; 756 + stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); 757 + } 751 758 752 759 if ((ec->tx_coalesce_usecs == 0) && 753 760 (ec->tx_max_coalesced_frames == 0)) ··· 764 757 (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES)) 765 758 return -EINVAL; 766 759 767 - rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); 768 - 769 - if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) 770 - return -EINVAL; 771 - else if (!priv->use_riwt) 772 - return -EOPNOTSUPP; 773 - 774 760 /* Only copy relevant parameters, ignore all others. */ 775 761 priv->tx_coal_frames = ec->tx_max_coalesced_frames; 776 762 priv->tx_coal_timer = ec->tx_coalesce_usecs; 777 763 priv->rx_coal_frames = ec->rx_max_coalesced_frames; 778 - priv->rx_riwt = rx_riwt; 779 - stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); 780 - 781 764 return 0; 782 765 } 783 766
+12 -6
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 3511 3511 &priv->xstats, rx_q->dma_erx + entry); 3512 3512 if (unlikely(status == discard_frame)) { 3513 3513 page_pool_recycle_direct(rx_q->page_pool, buf->page); 3514 - priv->dev->stats.rx_errors++; 3515 3514 buf->page = NULL; 3516 3515 error = 1; 3516 + if (!priv->hwts_rx_en) 3517 + priv->dev->stats.rx_errors++; 3517 3518 } 3518 3519 3519 3520 if (unlikely(error && (status & rx_not_ls))) ··· 3932 3931 struct stmmac_priv *priv = cb_priv; 3933 3932 int ret = -EOPNOTSUPP; 3934 3933 3934 + if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) 3935 + return ret; 3936 + 3935 3937 stmmac_disable_all_queues(priv); 3936 3938 3937 3939 switch (type) { 3938 3940 case TC_SETUP_CLSU32: 3939 - if (tc_cls_can_offload_and_chain0(priv->dev, type_data)) 3940 - ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 3941 + ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 3942 + break; 3943 + case TC_SETUP_CLSFLOWER: 3944 + ret = stmmac_tc_setup_cls(priv, priv, type_data); 3941 3945 break; 3942 3946 default: 3943 3947 break; ··· 4542 4536 4543 4537 /* MTU range: 46 - hw-specific max */ 4544 4538 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 4545 - if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 4546 - ndev->max_mtu = JUMBO_LEN; 4547 - else if (priv->plat->has_xgmac) 4539 + if (priv->plat->has_xgmac) 4548 4540 ndev->max_mtu = XGMAC_JUMBO_LEN; 4541 + else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 4542 + ndev->max_mtu = JUMBO_LEN; 4549 4543 else 4550 4544 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 4551 4545 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
+468 -15
drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
··· 43 43 int dont_wait; 44 44 int timeout; 45 45 int size; 46 + int max_size; 46 47 int remove_sa; 47 48 u8 id; 48 49 int sarc; 50 + u16 queue_mapping; 49 51 }; 50 52 51 53 static u8 stmmac_test_next_id; ··· 75 73 else 76 74 size += sizeof(struct udphdr); 77 75 78 - skb = netdev_alloc_skb(priv->dev, size); 76 + if (attr->max_size && (attr->max_size > size)) 77 + size = attr->max_size; 78 + 79 + skb = netdev_alloc_skb_ip_align(priv->dev, size); 79 80 if (!skb) 80 81 return NULL; 81 82 82 83 prefetchw(skb->data); 83 - skb_reserve(skb, NET_IP_ALIGN); 84 84 85 85 if (attr->vlan > 1) 86 86 ehdr = skb_push(skb, ETH_HLEN + 8); ··· 151 147 uhdr->source = htons(attr->sport); 152 148 uhdr->dest = htons(attr->dport); 153 149 uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size); 150 + if (attr->max_size) 151 + uhdr->len = htons(attr->max_size - 152 + (sizeof(*ihdr) + sizeof(*ehdr))); 154 153 uhdr->check = 0; 155 154 } 156 155 ··· 169 162 iplen += sizeof(*thdr); 170 163 else 171 164 iplen += sizeof(*uhdr); 165 + 166 + if (attr->max_size) 167 + iplen = attr->max_size - sizeof(*ehdr); 168 + 172 169 ihdr->tot_len = htons(iplen); 173 170 ihdr->frag_off = 0; 174 - ihdr->saddr = 0; 171 + ihdr->saddr = htonl(attr->ip_src); 175 172 ihdr->daddr = htonl(attr->ip_dst); 176 173 ihdr->tos = 0; 177 174 ihdr->id = 0; ··· 189 178 190 179 if (attr->size) 191 180 skb_put(skb, attr->size); 181 + if (attr->max_size && (attr->max_size > skb->len)) 182 + skb_put(skb, attr->max_size - skb->len); 192 183 193 184 skb->csum = 0; 194 185 skb->ip_summed = CHECKSUM_PARTIAL; ··· 203 190 } 204 191 205 192 skb->protocol = htons(ETH_P_IP); 193 + skb->pkt_type = PACKET_HOST; 194 + skb->dev = priv->dev; 195 + 196 + return skb; 197 + } 198 + 199 + static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv, 200 + struct stmmac_packet_attrs *attr) 201 + { 202 + __be32 ip_src = htonl(attr->ip_src); 203 + __be32 ip_dst = htonl(attr->ip_dst); 204 + struct sk_buff *skb = NULL; 205 + 206 + skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src, 207 + NULL, attr->src, attr->dst); 208 + if (!skb) 209 + return NULL; 210 + 206 211 skb->pkt_type = PACKET_HOST; 207 212 skb->dev = priv->dev; 208 213 ··· 337 306 goto cleanup; 338 307 } 339 308 340 - skb_set_queue_mapping(skb, 0); 309 + skb_set_queue_mapping(skb, attr->queue_mapping); 341 310 ret = dev_queue_xmit(skb); 342 311 if (ret) 343 312 goto cleanup; ··· 349 318 attr->timeout = STMMAC_LB_TIMEOUT; 350 319 351 320 wait_for_completion_timeout(&tpriv->comp, attr->timeout); 352 - ret = !tpriv->ok; 321 + ret = tpriv->ok ? 0 : -ETIMEDOUT; 353 322 354 323 cleanup: 355 324 if (!attr->dont_wait) ··· 511 480 512 481 /* Shall NOT receive packet */ 513 482 ret = __stmmac_test_loopback(priv, &attr); 514 - ret = !ret; 483 + ret = ret ? 0 : -EINVAL; 515 484 516 485 cleanup: 517 486 dev_mc_del(priv->dev, gd_addr); ··· 543 512 544 513 /* Shall NOT receive packet */ 545 514 ret = __stmmac_test_loopback(priv, &attr); 546 - ret = !ret; 515 + ret = ret ? 0 : -EINVAL; 547 516 548 517 cleanup: 549 518 dev_uc_del(priv->dev, gd_addr); ··· 593 562 594 563 /* Shall NOT receive packet */ 595 564 ret = __stmmac_test_loopback(priv, &attr); 596 - ret = !ret; 565 + ret = ret ? 0 : -EINVAL; 597 566 598 567 cleanup: 599 568 dev_uc_del(priv->dev, uc_addr); ··· 631 600 632 601 /* Shall NOT receive packet */ 633 602 ret = __stmmac_test_loopback(priv, &attr); 634 - ret = !ret; 603 + ret = ret ? 0 : -EINVAL; 635 604 636 605 cleanup: 637 606 dev_mc_del(priv->dev, mc_addr); ··· 730 699 } 731 700 732 701 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 733 - ret = !tpriv->ok; 702 + ret = tpriv->ok ? 0 : -ETIMEDOUT; 734 703 735 704 cleanup: 736 705 dev_mc_del(priv->dev, paddr); ··· 864 833 goto vlan_del; 865 834 866 835 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 867 - ret = !tpriv->ok; 836 + ret = tpriv->ok ? 0 : -ETIMEDOUT; 868 837 if (ret && !i) { 869 838 goto vlan_del; 870 839 } else if (!ret && i) { 871 - ret = -1; 840 + ret = -EINVAL; 872 841 goto vlan_del; 873 842 } else { 874 843 ret = 0; ··· 940 909 goto vlan_del; 941 910 942 911 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 943 - ret = !tpriv->ok; 912 + ret = tpriv->ok ? 0 : -ETIMEDOUT; 944 913 if (ret && !i) { 945 914 goto vlan_del; 946 915 } else if (!ret && i) { 947 - ret = -1; 916 + ret = -EINVAL; 948 917 goto vlan_del; 949 918 } else { 950 919 ret = 0; ··· 1029 998 attr.src = addr; 1030 999 1031 1000 ret = __stmmac_test_loopback(priv, &attr); 1032 - ret = !ret; /* Shall NOT receive packet */ 1001 + ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */ 1033 1002 1034 1003 cls_u32.command = TC_CLSU32_DELETE_KNODE; 1035 1004 stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); ··· 1199 1168 return stmmac_test_vlanoff_common(priv, true); 1200 1169 } 1201 1170 1171 + #ifdef CONFIG_NET_CLS_ACT 1172 + static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, 1173 + u32 dst_mask, u32 src_mask) 1174 + { 1175 + struct flow_dissector_key_ipv4_addrs key, mask; 1176 + unsigned long dummy_cookie = 0xdeadbeef; 1177 + struct stmmac_packet_attrs attr = { }; 1178 + struct flow_dissector *dissector; 1179 + struct flow_cls_offload *cls; 1180 + struct flow_rule *rule; 1181 + int ret; 1182 + 1183 + if (!tc_can_offload(priv->dev)) 1184 + return -EOPNOTSUPP; 1185 + if (!priv->dma_cap.l3l4fnum) 1186 + return -EOPNOTSUPP; 1187 + if (priv->rss.enable) { 1188 + struct stmmac_rss rss = { .enable = false, }; 1189 + 1190 + stmmac_rss_configure(priv, priv->hw, &rss, 1191 + priv->plat->rx_queues_to_use); 1192 + } 1193 + 1194 + dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); 1195 + if (!dissector) { 1196 + ret = -ENOMEM; 1197 + goto cleanup_rss; 1198 + } 1199 + 1200 + dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS); 1201 + dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0; 1202 + 1203 + cls = kzalloc(sizeof(*cls), GFP_KERNEL); 1204 + if (!cls) { 1205 + ret = -ENOMEM; 1206 + goto cleanup_dissector; 1207 + } 1208 + 1209 + cls->common.chain_index = 0; 1210 + cls->command = FLOW_CLS_REPLACE; 1211 + cls->cookie = dummy_cookie; 1212 + 1213 + rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); 1214 + if (!rule) { 1215 + ret = -ENOMEM; 1216 + goto cleanup_cls; 1217 + } 1218 + 1219 + rule->match.dissector = dissector; 1220 + rule->match.key = (void *)&key; 1221 + rule->match.mask = (void *)&mask; 1222 + 1223 + key.src = htonl(src); 1224 + key.dst = htonl(dst); 1225 + mask.src = src_mask; 1226 + mask.dst = dst_mask; 1227 + 1228 + cls->rule = rule; 1229 + 1230 + rule->action.entries[0].id = FLOW_ACTION_DROP; 1231 + rule->action.num_entries = 1; 1232 + 1233 + attr.dst = priv->dev->dev_addr; 1234 + attr.ip_dst = dst; 1235 + attr.ip_src = src; 1236 + 1237 + /* Shall receive packet */ 1238 + ret = __stmmac_test_loopback(priv, &attr); 1239 + if (ret) 1240 + goto cleanup_rule; 1241 + 1242 + ret = stmmac_tc_setup_cls(priv, priv, cls); 1243 + if (ret) 1244 + goto cleanup_rule; 1245 + 1246 + /* Shall NOT receive packet */ 1247 + ret = __stmmac_test_loopback(priv, &attr); 1248 + ret = ret ? 0 : -EINVAL; 1249 + 1250 + cls->command = FLOW_CLS_DESTROY; 1251 + stmmac_tc_setup_cls(priv, priv, cls); 1252 + cleanup_rule: 1253 + kfree(rule); 1254 + cleanup_cls: 1255 + kfree(cls); 1256 + cleanup_dissector: 1257 + kfree(dissector); 1258 + cleanup_rss: 1259 + if (priv->rss.enable) { 1260 + stmmac_rss_configure(priv, priv->hw, &priv->rss, 1261 + priv->plat->rx_queues_to_use); 1262 + } 1263 + 1264 + return ret; 1265 + } 1266 + #else 1267 + static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, 1268 + u32 dst_mask, u32 src_mask) 1269 + { 1270 + return -EOPNOTSUPP; 1271 + } 1272 + #endif 1273 + 1274 + static int stmmac_test_l3filt_da(struct stmmac_priv *priv) 1275 + { 1276 + u32 addr = 0x10203040; 1277 + 1278 + return __stmmac_test_l3filt(priv, addr, 0, ~0, 0); 1279 + } 1280 + 1281 + static int stmmac_test_l3filt_sa(struct stmmac_priv *priv) 1282 + { 1283 + u32 addr = 0x10203040; 1284 + 1285 + return __stmmac_test_l3filt(priv, 0, addr, 0, ~0); 1286 + } 1287 + 1288 + #ifdef CONFIG_NET_CLS_ACT 1289 + static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, 1290 + u32 dst_mask, u32 src_mask, bool udp) 1291 + { 1292 + struct { 1293 + struct flow_dissector_key_basic bkey; 1294 + struct flow_dissector_key_ports key; 1295 + } __aligned(BITS_PER_LONG / 8) keys; 1296 + struct { 1297 + struct flow_dissector_key_basic bmask; 1298 + struct flow_dissector_key_ports mask; 1299 + } __aligned(BITS_PER_LONG / 8) masks; 1300 + unsigned long dummy_cookie = 0xdeadbeef; 1301 + struct stmmac_packet_attrs attr = { }; 1302 + struct flow_dissector *dissector; 1303 + struct flow_cls_offload *cls; 1304 + struct flow_rule *rule; 1305 + int ret; 1306 + 1307 + if (!tc_can_offload(priv->dev)) 1308 + return -EOPNOTSUPP; 1309 + if (!priv->dma_cap.l3l4fnum) 1310 + return -EOPNOTSUPP; 1311 + if (priv->rss.enable) { 1312 + struct stmmac_rss rss = { .enable = false, }; 1313 + 1314 + stmmac_rss_configure(priv, priv->hw, &rss, 1315 + priv->plat->rx_queues_to_use); 1316 + } 1317 + 1318 + dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); 1319 + if (!dissector) { 1320 + ret = -ENOMEM; 1321 + goto cleanup_rss; 1322 + } 1323 + 1324 + dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC); 1325 + dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS); 1326 + dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0; 1327 + dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key); 1328 + 1329 + cls = kzalloc(sizeof(*cls), GFP_KERNEL); 1330 + if (!cls) { 1331 + ret = -ENOMEM; 1332 + goto cleanup_dissector; 1333 + } 1334 + 1335 + cls->common.chain_index = 0; 1336 + cls->command = FLOW_CLS_REPLACE; 1337 + cls->cookie = dummy_cookie; 1338 + 1339 + rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); 1340 + if (!rule) { 1341 + ret = -ENOMEM; 1342 + goto cleanup_cls; 1343 + } 1344 + 1345 + rule->match.dissector = dissector; 1346 + rule->match.key = (void *)&keys; 1347 + rule->match.mask = (void *)&masks; 1348 + 1349 + keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP; 1350 + keys.key.src = htons(src); 1351 + keys.key.dst = htons(dst); 1352 + masks.mask.src = src_mask; 1353 + masks.mask.dst = dst_mask; 1354 + 1355 + cls->rule = rule; 1356 + 1357 + rule->action.entries[0].id = FLOW_ACTION_DROP; 1358 + rule->action.num_entries = 1; 1359 + 1360 + attr.dst = priv->dev->dev_addr; 1361 + attr.tcp = !udp; 1362 + attr.sport = src; 1363 + attr.dport = dst; 1364 + attr.ip_dst = 0; 1365 + 1366 + /* Shall receive packet */ 1367 + ret = __stmmac_test_loopback(priv, &attr); 1368 + if (ret) 1369 + goto cleanup_rule; 1370 + 1371 + ret = stmmac_tc_setup_cls(priv, priv, cls); 1372 + if (ret) 1373 + goto cleanup_rule; 1374 + 1375 + /* Shall NOT receive packet */ 1376 + ret = __stmmac_test_loopback(priv, &attr); 1377 + ret = ret ? 0 : -EINVAL; 1378 + 1379 + cls->command = FLOW_CLS_DESTROY; 1380 + stmmac_tc_setup_cls(priv, priv, cls); 1381 + cleanup_rule: 1382 + kfree(rule); 1383 + cleanup_cls: 1384 + kfree(cls); 1385 + cleanup_dissector: 1386 + kfree(dissector); 1387 + cleanup_rss: 1388 + if (priv->rss.enable) { 1389 + stmmac_rss_configure(priv, priv->hw, &priv->rss, 1390 + priv->plat->rx_queues_to_use); 1391 + } 1392 + 1393 + return ret; 1394 + } 1395 + #else 1396 + static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, 1397 + u32 dst_mask, u32 src_mask, bool udp) 1398 + { 1399 + return -EOPNOTSUPP; 1400 + } 1401 + #endif 1402 + 1403 + static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv) 1404 + { 1405 + u16 dummy_port = 0x123; 1406 + 1407 + return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false); 1408 + } 1409 + 1410 + static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv) 1411 + { 1412 + u16 dummy_port = 0x123; 1413 + 1414 + return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false); 1415 + } 1416 + 1417 + static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv) 1418 + { 1419 + u16 dummy_port = 0x123; 1420 + 1421 + return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true); 1422 + } 1423 + 1424 + static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv) 1425 + { 1426 + u16 dummy_port = 0x123; 1427 + 1428 + return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true); 1429 + } 1430 + 1431 + static int stmmac_test_arp_validate(struct sk_buff *skb, 1432 + struct net_device *ndev, 1433 + struct packet_type *pt, 1434 + struct net_device *orig_ndev) 1435 + { 1436 + struct stmmac_test_priv *tpriv = pt->af_packet_priv; 1437 + struct ethhdr *ehdr; 1438 + struct arphdr *ahdr; 1439 + 1440 + ehdr = (struct ethhdr *)skb_mac_header(skb); 1441 + if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src)) 1442 + goto out; 1443 + 1444 + ahdr = arp_hdr(skb); 1445 + if (ahdr->ar_op != htons(ARPOP_REPLY)) 1446 + goto out; 1447 + 1448 + tpriv->ok = true; 1449 + complete(&tpriv->comp); 1450 + out: 1451 + kfree_skb(skb); 1452 + return 0; 1453 + } 1454 + 1455 + static int stmmac_test_arpoffload(struct stmmac_priv *priv) 1456 + { 1457 + unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06}; 1458 + unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 1459 + struct stmmac_packet_attrs attr = { }; 1460 + struct stmmac_test_priv *tpriv; 1461 + struct sk_buff *skb = NULL; 1462 + u32 ip_addr = 0xdeadcafe; 1463 + u32 ip_src = 0xdeadbeef; 1464 + int ret; 1465 + 1466 + if (!priv->dma_cap.arpoffsel) 1467 + return -EOPNOTSUPP; 1468 + 1469 + tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 1470 + if (!tpriv) 1471 + return -ENOMEM; 1472 + 1473 + tpriv->ok = false; 1474 + init_completion(&tpriv->comp); 1475 + 1476 + tpriv->pt.type = htons(ETH_P_ARP); 1477 + tpriv->pt.func = stmmac_test_arp_validate; 1478 + tpriv->pt.dev = priv->dev; 1479 + tpriv->pt.af_packet_priv = tpriv; 1480 + tpriv->packet = &attr; 1481 + dev_add_pack(&tpriv->pt); 1482 + 1483 + attr.src = src; 1484 + attr.ip_src = ip_src; 1485 + attr.dst = dst; 1486 + attr.ip_dst = ip_addr; 1487 + 1488 + skb = stmmac_test_get_arp_skb(priv, &attr); 1489 + if (!skb) { 1490 + ret = -ENOMEM; 1491 + goto cleanup; 1492 + } 1493 + 1494 + ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr); 1495 + if (ret) 1496 + goto cleanup; 1497 + 1498 + ret = dev_set_promiscuity(priv->dev, 1); 1499 + if (ret) 1500 + goto cleanup; 1501 + 1502 + skb_set_queue_mapping(skb, 0); 1503 + ret = dev_queue_xmit(skb); 1504 + if (ret) 1505 + goto cleanup_promisc; 1506 + 1507 + wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1508 + ret = tpriv->ok ? 0 : -ETIMEDOUT; 1509 + 1510 + cleanup_promisc: 1511 + dev_set_promiscuity(priv->dev, -1); 1512 + cleanup: 1513 + stmmac_set_arp_offload(priv, priv->hw, false, 0x0); 1514 + dev_remove_pack(&tpriv->pt); 1515 + kfree(tpriv); 1516 + return ret; 1517 + } 1518 + 1519 + static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue) 1520 + { 1521 + struct stmmac_packet_attrs attr = { }; 1522 + int size = priv->dma_buf_sz; 1523 + 1524 + /* Only XGMAC has SW support for multiple RX descs in same packet */ 1525 + if (priv->plat->has_xgmac) 1526 + size = priv->dev->max_mtu; 1527 + 1528 + attr.dst = priv->dev->dev_addr; 1529 + attr.max_size = size - ETH_FCS_LEN; 1530 + attr.queue_mapping = queue; 1531 + 1532 + return __stmmac_test_loopback(priv, &attr); 1533 + } 1534 + 1535 + static int stmmac_test_jumbo(struct stmmac_priv *priv) 1536 + { 1537 + return __stmmac_test_jumbo(priv, 0); 1538 + } 1539 + 1540 + static int stmmac_test_mjumbo(struct stmmac_priv *priv) 1541 + { 1542 + u32 chan, tx_cnt = priv->plat->tx_queues_to_use; 1543 + int ret; 1544 + 1545 + if (tx_cnt <= 1) 1546 + return -EOPNOTSUPP; 1547 + 1548 + for (chan = 0; chan < tx_cnt; chan++) { 1549 + ret = __stmmac_test_jumbo(priv, chan); 1550 + if (ret) 1551 + return ret; 1552 + } 1553 + 1554 + return 0; 1555 + } 1556 + 1202 1557 #define STMMAC_LOOPBACK_NONE 0 1203 1558 #define STMMAC_LOOPBACK_MAC 1 1204 1559 #define STMMAC_LOOPBACK_PHY 2 ··· 1670 1253 .name = "SVLAN TX Insertion ", 1671 1254 .lb = STMMAC_LOOPBACK_PHY, 1672 1255 .fn = stmmac_test_svlanoff, 1256 + }, { 1257 + .name = "L3 DA Filtering ", 1258 + .lb = STMMAC_LOOPBACK_PHY, 1259 + .fn = stmmac_test_l3filt_da, 1260 + }, { 1261 + .name = "L3 SA Filtering ", 1262 + .lb = STMMAC_LOOPBACK_PHY, 1263 + .fn = stmmac_test_l3filt_sa, 1264 + }, { 1265 + .name = "L4 DA TCP Filtering ", 1266 + .lb = STMMAC_LOOPBACK_PHY, 1267 + .fn = stmmac_test_l4filt_da_tcp, 1268 + }, { 1269 + .name = "L4 SA TCP Filtering ", 1270 + .lb = STMMAC_LOOPBACK_PHY, 1271 + .fn = stmmac_test_l4filt_sa_tcp, 1272 + }, { 1273 + .name = "L4 DA UDP Filtering ", 1274 + .lb = STMMAC_LOOPBACK_PHY, 1275 + .fn = stmmac_test_l4filt_da_udp, 1276 + }, { 1277 + .name = "L4 SA UDP Filtering ", 1278 + .lb = STMMAC_LOOPBACK_PHY, 1279 + .fn = stmmac_test_l4filt_sa_udp, 1280 + }, { 1281 + .name = "ARP Offload ", 1282 + .lb = STMMAC_LOOPBACK_PHY, 1283 + .fn = stmmac_test_arpoffload, 1284 + }, { 1285 + .name = "Jumbo Frame ", 1286 + .lb = STMMAC_LOOPBACK_PHY, 1287 + .fn = stmmac_test_jumbo, 1288 + }, { 1289 + .name = "Multichannel Jumbo ", 1290 + .lb = STMMAC_LOOPBACK_PHY, 1291 + .fn = stmmac_test_mjumbo, 1673 1292 }, 1674 1293 }; 1675 1294
+246 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
··· 242 242 { 243 243 struct dma_features *dma_cap = &priv->dma_cap; 244 244 unsigned int count; 245 + int i; 245 246 247 + if (dma_cap->l3l4fnum) { 248 + priv->flow_entries_max = dma_cap->l3l4fnum; 249 + priv->flow_entries = devm_kcalloc(priv->device, 250 + dma_cap->l3l4fnum, 251 + sizeof(*priv->flow_entries), 252 + GFP_KERNEL); 253 + if (!priv->flow_entries) 254 + return -ENOMEM; 255 + 256 + for (i = 0; i < priv->flow_entries_max; i++) 257 + priv->flow_entries[i].idx = i; 258 + 259 + dev_info(priv->device, "Enabled Flow TC (entries=%d)\n", 260 + priv->flow_entries_max); 261 + } 262 + 263 + /* Fail silently as we can still use remaining features, e.g. CBS */ 246 264 if (!dma_cap->frpsel) 247 - return -EINVAL; 265 + return 0; 248 266 249 267 switch (dma_cap->frpbs) { 250 268 case 0x0: ··· 367 349 return 0; 368 350 } 369 351 352 + static int tc_parse_flow_actions(struct stmmac_priv *priv, 353 + struct flow_action *action, 354 + struct stmmac_flow_entry *entry) 355 + { 356 + struct flow_action_entry *act; 357 + int i; 358 + 359 + if (!flow_action_has_entries(action)) 360 + return -EINVAL; 361 + 362 + flow_action_for_each(i, act, action) { 363 + switch (act->id) { 364 + case FLOW_ACTION_DROP: 365 + entry->action |= STMMAC_FLOW_ACTION_DROP; 366 + return 0; 367 + default: 368 + break; 369 + } 370 + } 371 + 372 + /* Nothing to do, maybe inverse filter ? */ 373 + return 0; 374 + } 375 + 376 + static int tc_add_basic_flow(struct stmmac_priv *priv, 377 + struct flow_cls_offload *cls, 378 + struct stmmac_flow_entry *entry) 379 + { 380 + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); 381 + struct flow_dissector *dissector = rule->match.dissector; 382 + struct flow_match_basic match; 383 + 384 + /* Nothing to do here */ 385 + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) 386 + return -EINVAL; 387 + 388 + flow_rule_match_basic(rule, &match); 389 + entry->ip_proto = match.key->ip_proto; 390 + return 0; 391 + } 392 + 393 + static int tc_add_ip4_flow(struct stmmac_priv *priv, 394 + struct flow_cls_offload *cls, 395 + struct stmmac_flow_entry *entry) 396 + { 397 + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); 398 + struct flow_dissector *dissector = rule->match.dissector; 399 + bool inv = entry->action & STMMAC_FLOW_ACTION_DROP; 400 + struct flow_match_ipv4_addrs match; 401 + u32 hw_match; 402 + int ret; 403 + 404 + /* Nothing to do here */ 405 + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) 406 + return -EINVAL; 407 + 408 + flow_rule_match_ipv4_addrs(rule, &match); 409 + hw_match = ntohl(match.key->src) & ntohl(match.mask->src); 410 + if (hw_match) { 411 + ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true, 412 + false, true, inv, hw_match); 413 + if (ret) 414 + return ret; 415 + } 416 + 417 + hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst); 418 + if (hw_match) { 419 + ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true, 420 + false, false, inv, hw_match); 421 + if (ret) 422 + return ret; 423 + } 424 + 425 + return 0; 426 + } 427 + 428 + static int tc_add_ports_flow(struct stmmac_priv *priv, 429 + struct flow_cls_offload *cls, 430 + struct stmmac_flow_entry *entry) 431 + { 432 + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); 433 + struct flow_dissector *dissector = rule->match.dissector; 434 + bool inv = entry->action & STMMAC_FLOW_ACTION_DROP; 435 + struct flow_match_ports match; 436 + u32 hw_match; 437 + bool is_udp; 438 + int ret; 439 + 440 + /* Nothing to do here */ 441 + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) 442 + return -EINVAL; 443 + 444 + switch (entry->ip_proto) { 445 + case IPPROTO_TCP: 446 + is_udp = false; 447 + break; 448 + case IPPROTO_UDP: 449 + is_udp = true; 450 + break; 451 + default: 452 + return -EINVAL; 453 + } 454 + 455 + flow_rule_match_ports(rule, &match); 456 + 457 + hw_match = ntohs(match.key->src) & ntohs(match.mask->src); 458 + if (hw_match) { 459 + ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true, 460 + is_udp, true, inv, hw_match); 461 + if (ret) 462 + return ret; 463 + } 464 + 465 + hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst); 466 + if (hw_match) { 467 + ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true, 468 + is_udp, false, inv, hw_match); 469 + if (ret) 470 + return ret; 471 + } 472 + 473 + entry->is_l4 = true; 474 + return 0; 475 + } 476 + 477 + static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv, 478 + struct flow_cls_offload *cls, 479 + bool get_free) 480 + { 481 + int i; 482 + 483 + for (i = 0; i < priv->flow_entries_max; i++) { 484 + struct stmmac_flow_entry *entry = &priv->flow_entries[i]; 485 + 486 + if (entry->cookie == cls->cookie) 487 + return entry; 488 + if (get_free && (entry->in_use == false)) 489 + return entry; 490 + } 491 + 492 + return NULL; 493 + } 494 + 495 + struct { 496 + int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls, 497 + struct stmmac_flow_entry *entry); 498 + } tc_flow_parsers[] = { 499 + { .fn = tc_add_basic_flow }, 500 + { .fn = tc_add_ip4_flow }, 501 + { .fn = tc_add_ports_flow }, 502 + }; 503 + 504 + static int tc_add_flow(struct stmmac_priv *priv, 505 + struct flow_cls_offload *cls) 506 + { 507 + struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false); 508 + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); 509 + int i, ret; 510 + 511 + if (!entry) { 512 + entry = tc_find_flow(priv, cls, true); 513 + if (!entry) 514 + return -ENOENT; 515 + } 516 + 517 + ret = tc_parse_flow_actions(priv, &rule->action, entry); 518 + if (ret) 519 + return ret; 520 + 521 + for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) { 522 + ret = tc_flow_parsers[i].fn(priv, cls, entry); 523 + if (!ret) { 524 + entry->in_use = true; 525 + continue; 526 + } 527 + } 528 + 529 + if (!entry->in_use) 530 + return -EINVAL; 531 + 532 + entry->cookie = cls->cookie; 533 + return 0; 534 + } 535 + 536 + static int tc_del_flow(struct stmmac_priv *priv, 537 + struct flow_cls_offload *cls) 538 + { 539 + struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false); 540 + int ret; 541 + 542 + if (!entry || !entry->in_use) 543 + return -ENOENT; 544 + 545 + if (entry->is_l4) { 546 + ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false, 547 + false, false, false, 0); 548 + } else { 549 + ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false, 550 + false, false, false, 0); 551 + } 552 + 553 + entry->in_use = false; 554 + entry->cookie = 0; 555 + entry->is_l4 = false; 556 + return ret; 557 + } 558 + 559 + static int tc_setup_cls(struct stmmac_priv *priv, 560 + struct flow_cls_offload *cls) 561 + { 562 + int ret = 0; 563 + 564 + switch (cls->command) { 565 + case FLOW_CLS_REPLACE: 566 + ret = tc_add_flow(priv, cls); 567 + break; 568 + case FLOW_CLS_DESTROY: 569 + ret = tc_del_flow(priv, cls); 570 + break; 571 + default: 572 + return -EOPNOTSUPP; 573 + } 574 + 575 + return ret; 576 + } 577 + 370 578 const struct stmmac_tc_ops dwmac510_tc_ops = { 371 579 .init = tc_init, 372 580 .setup_cls_u32 = tc_setup_cls_u32, 373 581 .setup_cbs = tc_setup_cbs, 582 + .setup_cls = tc_setup_cls, 374 583 };