ravb: unmap descriptors when freeing rings

"swiotlb buffer is full" errors occur after repeated initialisation of a
device - f.e. suspend/resume or ip link set up/down. This is because memory
mapped using dma_map_single() in ravb_ring_format() and ravb_start_xmit()
is not released. Resolve this problem by unmapping descriptors when
freeing rings.

Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper")
Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com>
[simon: reworked]
Signed-off-by: Simon Horman <horms+renesas@verge.net.au>
Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by Kazuya Mizuguchi and committed by David S. Miller a47b70ea 086cb6a4

+64 -48
+64 -48
drivers/net/ethernet/renesas/ravb_main.c
··· 179 179 .get_mdio_data = ravb_get_mdio_data, 180 180 }; 181 181 182 + /* Free TX skb function for AVB-IP */ 183 + static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) 184 + { 185 + struct ravb_private *priv = netdev_priv(ndev); 186 + struct net_device_stats *stats = &priv->stats[q]; 187 + struct ravb_tx_desc *desc; 188 + int free_num = 0; 189 + int entry; 190 + u32 size; 191 + 192 + for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { 193 + bool txed; 194 + 195 + entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * 196 + NUM_TX_DESC); 197 + desc = &priv->tx_ring[q][entry]; 198 + txed = desc->die_dt == DT_FEMPTY; 199 + if (free_txed_only && !txed) 200 + break; 201 + /* Descriptor type must be checked before all other reads */ 202 + dma_rmb(); 203 + size = le16_to_cpu(desc->ds_tagl) & TX_DS; 204 + /* Free the original skb. */ 205 + if (priv->tx_skb[q][entry / NUM_TX_DESC]) { 206 + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), 207 + size, DMA_TO_DEVICE); 208 + /* Last packet descriptor? */ 209 + if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { 210 + entry /= NUM_TX_DESC; 211 + dev_kfree_skb_any(priv->tx_skb[q][entry]); 212 + priv->tx_skb[q][entry] = NULL; 213 + if (txed) 214 + stats->tx_packets++; 215 + } 216 + free_num++; 217 + } 218 + if (txed) 219 + stats->tx_bytes += size; 220 + desc->die_dt = DT_EEMPTY; 221 + } 222 + return free_num; 223 + } 224 + 182 225 /* Free skb's and DMA buffers for Ethernet AVB */ 183 226 static void ravb_ring_free(struct net_device *ndev, int q) 184 227 { ··· 237 194 kfree(priv->rx_skb[q]); 238 195 priv->rx_skb[q] = NULL; 239 196 240 - /* Free TX skb ringbuffer */ 241 - if (priv->tx_skb[q]) { 242 - for (i = 0; i < priv->num_tx_ring[q]; i++) 243 - dev_kfree_skb(priv->tx_skb[q][i]); 244 - } 245 - kfree(priv->tx_skb[q]); 246 - priv->tx_skb[q] = NULL; 247 - 248 197 /* Free aligned TX buffers */ 249 198 kfree(priv->tx_align[q]); 250 199 priv->tx_align[q] = NULL; 251 200 252 201 if (priv->rx_ring[q]) { 202 + for (i = 0; i < priv->num_rx_ring[q]; i++) { 203 + struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; 204 + 205 + if (!dma_mapping_error(ndev->dev.parent, 206 + le32_to_cpu(desc->dptr))) 207 + dma_unmap_single(ndev->dev.parent, 208 + le32_to_cpu(desc->dptr), 209 + PKT_BUF_SZ, 210 + DMA_FROM_DEVICE); 211 + } 253 212 ring_size = sizeof(struct ravb_ex_rx_desc) * 254 213 (priv->num_rx_ring[q] + 1); 255 214 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], ··· 260 215 } 261 216 262 217 if (priv->tx_ring[q]) { 218 + ravb_tx_free(ndev, q, false); 219 + 263 220 ring_size = sizeof(struct ravb_tx_desc) * 264 221 (priv->num_tx_ring[q] * NUM_TX_DESC + 1); 265 222 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], 266 223 priv->tx_desc_dma[q]); 267 224 priv->tx_ring[q] = NULL; 268 225 } 226 + 227 + /* Free TX skb ringbuffer. 228 + * SKBs are freed by ravb_tx_free() call above. 229 + */ 230 + kfree(priv->tx_skb[q]); 231 + priv->tx_skb[q] = NULL; 269 232 } 270 233 271 234 /* Format skb and descriptor buffer for Ethernet AVB */ ··· 482 429 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION); 483 430 484 431 return 0; 485 - } 486 - 487 - /* Free TX skb function for AVB-IP */ 488 - static int ravb_tx_free(struct net_device *ndev, int q) 489 - { 490 - struct ravb_private *priv = netdev_priv(ndev); 491 - struct net_device_stats *stats = &priv->stats[q]; 492 - struct ravb_tx_desc *desc; 493 - int free_num = 0; 494 - int entry; 495 - u32 size; 496 - 497 - for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { 498 - entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * 499 - NUM_TX_DESC); 500 - desc = &priv->tx_ring[q][entry]; 501 - if (desc->die_dt != DT_FEMPTY) 502 - break; 503 - /* Descriptor type must be checked before all other reads */ 504 - dma_rmb(); 505 - size = le16_to_cpu(desc->ds_tagl) & TX_DS; 506 - /* Free the original skb. */ 507 - if (priv->tx_skb[q][entry / NUM_TX_DESC]) { 508 - dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), 509 - size, DMA_TO_DEVICE); 510 - /* Last packet descriptor? */ 511 - if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { 512 - entry /= NUM_TX_DESC; 513 - dev_kfree_skb_any(priv->tx_skb[q][entry]); 514 - priv->tx_skb[q][entry] = NULL; 515 - stats->tx_packets++; 516 - } 517 - free_num++; 518 - } 519 - stats->tx_bytes += size; 520 - desc->die_dt = DT_EEMPTY; 521 - } 522 - return free_num; 523 432 } 524 433 525 434 static void ravb_get_tx_tstamp(struct net_device *ndev) ··· 917 902 spin_lock_irqsave(&priv->lock, flags); 918 903 /* Clear TX interrupt */ 919 904 ravb_write(ndev, ~mask, TIS); 920 - ravb_tx_free(ndev, q); 905 + ravb_tx_free(ndev, q, true); 921 906 netif_wake_subqueue(ndev, q); 922 907 mmiowb(); 923 908 spin_unlock_irqrestore(&priv->lock, flags); ··· 1582 1567 1583 1568 priv->cur_tx[q] += NUM_TX_DESC; 1584 1569 if (priv->cur_tx[q] - priv->dirty_tx[q] > 1585 - (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) 1570 + (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && 1571 + !ravb_tx_free(ndev, q, true)) 1586 1572 netif_stop_subqueue(ndev, q); 1587 1573 1588 1574 exit: