Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: stmmac: dwmac: Disable flushing frames on Rx Buffer Unavailable

In Store and Forward mode, flushing frames when the receive buffer is
unavailable, can cause the MTL Rx FIFO to go out of sync. This results
in buffering of a few frames in the FIFO without triggering Rx DMA
from transferring the data to the system memory until another packet
is received. Once the issue happens, for a ping request, the packet is
forwarded to the system memory only after we receive another packet
and hece we observe a latency equivalent to the ping interval.

64 bytes from 192.168.2.100: seq=1 ttl=64 time=1000.344 ms

Also, we can observe constant gmacgrp_debug register value of
0x00000120, which indicates "Reading frame data".

The issue is not reproducible after disabling frame flushing when Rx
buffer is unavailable. But in that case, the Rx DMA enters a suspend
state due to buffer unavailability. To resume operation, software
must write to the receive_poll_demand register after adding new
descriptors, which reactivates the Rx DMA.

This issue is observed in the socfpga platforms which has dwmac1000 IP
like Arria 10, Cyclone V and Agilex 7. Issue is reproducible after
running iperf3 server at the DUT for UDP lower packet sizes.

Signed-off-by: Rohan G Thomas <rohan.g.thomas@altera.com>
Reviewed-by: Matthew Gerlach <matthew.gerlach@altera.com>
Tested-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Link: https://patch.msgid.link/20251126-a10_ext_fix-v1-1-d163507f646f@altera.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

Rohan G Thomas and committed by
Paolo Abeni
45d100ee 5c9c1e78

+14 -2
+3 -2
drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
··· 135 135 136 136 if (mode == SF_DMA_MODE) { 137 137 pr_debug("GMAC: enable RX store and forward mode\n"); 138 - csr6 |= DMA_CONTROL_RSF; 138 + csr6 |= DMA_CONTROL_RSF | DMA_CONTROL_DFF; 139 139 } else { 140 140 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); 141 - csr6 &= ~DMA_CONTROL_RSF; 141 + csr6 &= ~(DMA_CONTROL_RSF | DMA_CONTROL_DFF); 142 142 csr6 &= DMA_CONTROL_TC_RX_MASK; 143 143 if (mode <= 32) 144 144 csr6 |= DMA_CONTROL_RTC_32; ··· 262 262 .dma_rx_mode = dwmac1000_dma_operation_mode_rx, 263 263 .dma_tx_mode = dwmac1000_dma_operation_mode_tx, 264 264 .enable_dma_transmission = dwmac_enable_dma_transmission, 265 + .enable_dma_reception = dwmac_enable_dma_reception, 265 266 .enable_dma_irq = dwmac_enable_dma_irq, 266 267 .disable_dma_irq = dwmac_disable_dma_irq, 267 268 .start_tx = dwmac_dma_start_tx,
+1
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
··· 169 169 #define NUM_DWMAC4_DMA_REGS 27 170 170 171 171 void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan); 172 + void dwmac_enable_dma_reception(void __iomem *ioaddr, u32 chan); 172 173 void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, 173 174 u32 chan, bool rx, bool tx); 174 175 void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
+5
drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
··· 33 33 writel(1, ioaddr + DMA_CHAN_XMT_POLL_DEMAND(chan)); 34 34 } 35 35 36 + void dwmac_enable_dma_reception(void __iomem *ioaddr, u32 chan) 37 + { 38 + writel(1, ioaddr + DMA_CHAN_RCV_POLL_DEMAND(chan)); 39 + } 40 + 36 41 void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, 37 42 u32 chan, bool rx, bool tx) 38 43 {
+3
drivers/net/ethernet/stmicro/stmmac/hwif.h
··· 201 201 void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x, 202 202 void __iomem *ioaddr); 203 203 void (*enable_dma_transmission)(void __iomem *ioaddr, u32 chan); 204 + void (*enable_dma_reception)(void __iomem *ioaddr, u32 chan); 204 205 void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr, 205 206 u32 chan, bool rx, bool tx); 206 207 void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr, ··· 262 261 stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args) 263 262 #define stmmac_enable_dma_transmission(__priv, __args...) \ 264 263 stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args) 264 + #define stmmac_enable_dma_reception(__priv, __args...) \ 265 + stmmac_do_void_callback(__priv, dma, enable_dma_reception, __args) 265 266 #define stmmac_enable_dma_irq(__priv, __args...) \ 266 267 stmmac_do_void_callback(__priv, dma, enable_dma_irq, __priv, __args) 267 268 #define stmmac_disable_dma_irq(__priv, __args...) \
+2
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 4973 4973 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4974 4974 (rx_q->dirty_rx * sizeof(struct dma_desc)); 4975 4975 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 4976 + /* Wake up Rx DMA from the suspend state if required */ 4977 + stmmac_enable_dma_reception(priv, priv->ioaddr, queue); 4976 4978 } 4977 4979 4978 4980 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,