Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'stmmac-EST'

Rohan G says:

====================
net: stmmac: EST conformance support

This patchset enables support for queueMaxSDU and transmission overrun
counters which are required for Qbv conformance.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+114 -4
+2
drivers/net/ethernet/stmicro/stmmac/common.h
··· 202 202 unsigned long mtl_est_hlbf; 203 203 unsigned long mtl_est_btre; 204 204 unsigned long mtl_est_btrlm; 205 + unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES]; 206 + unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES]; 205 207 /* per queue statistics */ 206 208 struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES]; 207 209 struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
+6
drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
··· 81 81 u32 status, value, feqn, hbfq, hbfs, btrl, btrl_max; 82 82 void __iomem *est_addr = priv->estaddr; 83 83 u32 txqcnt_mask = BIT(txqcnt) - 1; 84 + int i; 84 85 85 86 status = readl(est_addr + EST_STATUS); 86 87 ··· 125 124 hbfs = value & EST_SZ_CAP_HBFS_MASK; 126 125 127 126 x->mtl_est_hlbf++; 127 + 128 + for (i = 0; i < txqcnt; i++) { 129 + if (feqn & BIT(i)) 130 + x->mtl_est_txq_hlbf[i]++; 131 + } 128 132 129 133 /* Clear Interrupt */ 130 134 writel(feqn, est_addr + EST_FRM_SZ_ERR);
+22
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 2507 2507 if (!xsk_tx_peek_desc(pool, &xdp_desc)) 2508 2508 break; 2509 2509 2510 + if (priv->plat->est && priv->plat->est->enable && 2511 + priv->plat->est->max_sdu[queue] && 2512 + xdp_desc.len > priv->plat->est->max_sdu[queue]) { 2513 + priv->xstats.max_sdu_txq_drop[queue]++; 2514 + continue; 2515 + } 2516 + 2510 2517 if (likely(priv->extend_desc)) 2511 2518 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 2512 2519 else if (tx_q->tbs & STMMAC_TBS_AVAIL) ··· 4505 4498 return stmmac_tso_xmit(skb, dev); 4506 4499 } 4507 4500 4501 + if (priv->plat->est && priv->plat->est->enable && 4502 + priv->plat->est->max_sdu[queue] && 4503 + skb->len > priv->plat->est->max_sdu[queue]){ 4504 + priv->xstats.max_sdu_txq_drop[queue]++; 4505 + goto max_sdu_err; 4506 + } 4507 + 4508 4508 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 4509 4509 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4510 4510 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, ··· 4729 4715 4730 4716 dma_map_err: 4731 4717 netdev_err(priv->dev, "Tx DMA map failed\n"); 4718 + max_sdu_err: 4732 4719 dev_kfree_skb(skb); 4733 4720 priv->xstats.tx_dropped++; 4734 4721 return NETDEV_TX_OK; ··· 4885 4870 4886 4871 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) 4887 4872 return STMMAC_XDP_CONSUMED; 4873 + 4874 + if (priv->plat->est && priv->plat->est->enable && 4875 + priv->plat->est->max_sdu[queue] && 4876 + xdpf->len > priv->plat->est->max_sdu[queue]) { 4877 + priv->xstats.max_sdu_txq_drop[queue]++; 4878 + return STMMAC_XDP_CONSUMED; 4879 + } 4888 4880 4889 4881 if (likely(priv->extend_desc)) 4890 4882 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+83 -4
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
··· 915 915 return time; 916 916 } 917 917 918 - static int tc_setup_taprio(struct stmmac_priv *priv, 919 - struct tc_taprio_qopt_offload *qopt) 918 + static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv, 919 + struct tc_taprio_qopt_offload *qopt) 920 + { 921 + struct plat_stmmacenet_data *plat = priv->plat; 922 + u32 num_tc = qopt->mqprio.qopt.num_tc; 923 + u32 offset, count, i, j; 924 + 925 + /* QueueMaxSDU received from the driver corresponds to the Linux traffic 926 + * class. Map queueMaxSDU per Linux traffic class to DWMAC Tx queues. 927 + */ 928 + for (i = 0; i < num_tc; i++) { 929 + if (!qopt->max_sdu[i]) 930 + continue; 931 + 932 + offset = qopt->mqprio.qopt.offset[i]; 933 + count = qopt->mqprio.qopt.count[i]; 934 + 935 + for (j = offset; j < offset + count; j++) 936 + plat->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN; 937 + } 938 + } 939 + 940 + static int tc_taprio_configure(struct stmmac_priv *priv, 941 + struct tc_taprio_qopt_offload *qopt) 920 942 { 921 943 u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep; 922 944 struct plat_stmmacenet_data *plat = priv->plat; ··· 990 968 991 969 if (qopt->cmd == TAPRIO_CMD_DESTROY) 992 970 goto disable; 993 - else if (qopt->cmd != TAPRIO_CMD_REPLACE) 994 - return -EOPNOTSUPP; 995 971 996 972 if (qopt->num_entries >= dep) 997 973 return -EINVAL; ··· 1065 1045 1066 1046 priv->plat->est->ter = qopt->cycle_time_extension; 1067 1047 1048 + tc_taprio_map_maxsdu_txq(priv, qopt); 1049 + 1068 1050 if (fpe && !priv->dma_cap.fpesel) { 1069 1051 mutex_unlock(&priv->plat->est->lock); 1070 1052 return -EOPNOTSUPP; ··· 1100 1078 priv->plat->est->enable = false; 1101 1079 stmmac_est_configure(priv, priv, priv->plat->est, 1102 1080 priv->plat->clk_ptp_rate); 1081 + /* Reset taprio status */ 1082 + for (i = 0; i < priv->plat->tx_queues_to_use; i++) { 1083 + priv->xstats.max_sdu_txq_drop[i] = 0; 1084 + priv->xstats.mtl_est_txq_hlbf[i] = 0; 1085 + } 1103 1086 mutex_unlock(&priv->plat->est->lock); 1104 1087 } 1105 1088 ··· 1120 1093 netdev_info(priv->dev, "stop FPE handshake\n"); 1121 1094 1122 1095 return ret; 1096 + } 1097 + 1098 + static void tc_taprio_stats(struct stmmac_priv *priv, 1099 + struct tc_taprio_qopt_offload *qopt) 1100 + { 1101 + u64 window_drops = 0; 1102 + int i = 0; 1103 + 1104 + for (i = 0; i < priv->plat->tx_queues_to_use; i++) 1105 + window_drops += priv->xstats.max_sdu_txq_drop[i] + 1106 + priv->xstats.mtl_est_txq_hlbf[i]; 1107 + qopt->stats.window_drops = window_drops; 1108 + 1109 + /* Transmission overrun doesn't happen for stmmac, hence always 0 */ 1110 + qopt->stats.tx_overruns = 0; 1111 + } 1112 + 1113 + static void tc_taprio_queue_stats(struct stmmac_priv *priv, 1114 + struct tc_taprio_qopt_offload *qopt) 1115 + { 1116 + struct tc_taprio_qopt_queue_stats *q_stats = &qopt->queue_stats; 1117 + int queue = qopt->queue_stats.queue; 1118 + 1119 + q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] + 1120 + priv->xstats.mtl_est_txq_hlbf[queue]; 1121 + 1122 + /* Transmission overrun doesn't happen for stmmac, hence always 0 */ 1123 + q_stats->stats.tx_overruns = 0; 1124 + } 1125 + 1126 + static int tc_setup_taprio(struct stmmac_priv *priv, 1127 + struct tc_taprio_qopt_offload *qopt) 1128 + { 1129 + int err = 0; 1130 + 1131 + switch (qopt->cmd) { 1132 + case TAPRIO_CMD_REPLACE: 1133 + case TAPRIO_CMD_DESTROY: 1134 + err = tc_taprio_configure(priv, qopt); 1135 + break; 1136 + case TAPRIO_CMD_STATS: 1137 + tc_taprio_stats(priv, qopt); 1138 + break; 1139 + case TAPRIO_CMD_QUEUE_STATS: 1140 + tc_taprio_queue_stats(priv, qopt); 1141 + break; 1142 + default: 1143 + err = -EOPNOTSUPP; 1144 + } 1145 + 1146 + return err; 1123 1147 } 1124 1148 1125 1149 static int tc_setup_etf(struct stmmac_priv *priv, ··· 1204 1126 return -EOPNOTSUPP; 1205 1127 1206 1128 caps->gate_mask_per_txq = true; 1129 + caps->supports_queue_max_sdu = true; 1207 1130 1208 1131 return 0; 1209 1132 }
+1
include/linux/stmmac.h
··· 127 127 u32 gcl_unaligned[EST_GCL]; 128 128 u32 gcl[EST_GCL]; 129 129 u32 gcl_size; 130 + u32 max_sdu[MTL_MAX_TX_QUEUES]; 130 131 }; 131 132 132 133 struct stmmac_rxq_cfg {