Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: bcmgenet: rewrite bcmgenet_rx_refill()

Currently, bcmgenet_desc_rx() calls bcmgenet_rx_refill() at the end of
Rx packet processing loop, after the current Rx packet has already been
passed to napi_gro_receive(). However, bcmgenet_rx_refill() might fail
to allocate a new Rx skb, thus leaving a hole on the Rx queue where no
valid Rx buffer exists.

To eliminate this situation:
1. Rewrite bcmgenet_rx_refill() to retain the current Rx skb on the Rx
queue if a new replacement Rx skb can't be allocated and DMA-mapped.
In this case, the data on the current Rx skb is effectively dropped.
2. Modify bcmgenet_desc_rx() to call bcmgenet_rx_refill() at the top of
Rx packet processing loop, so that the new replacement Rx skb is
already in place before the current Rx skb is processed.

Signed-off-by: Petri Gynther <pgynther@google.com>
Tested-by: Jaedon Shin <jaedon.shin@gmail.com>--
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Tested-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Petri Gynther and committed by
David S. Miller
d6707bec 5e1459ca

+45 -58
+45 -58
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 1336 1336 return ret; 1337 1337 } 1338 1338 1339 - 1340 - static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb) 1339 + static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, 1340 + struct enet_cb *cb) 1341 1341 { 1342 1342 struct device *kdev = &priv->pdev->dev; 1343 1343 struct sk_buff *skb; 1344 + struct sk_buff *rx_skb; 1344 1345 dma_addr_t mapping; 1345 - int ret; 1346 1346 1347 + /* Allocate a new Rx skb */ 1347 1348 skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); 1348 - if (!skb) 1349 - return -ENOMEM; 1350 - 1351 - /* a caller did not release this control block */ 1352 - WARN_ON(cb->skb != NULL); 1353 - cb->skb = skb; 1354 - mapping = dma_map_single(kdev, skb->data, 1355 - priv->rx_buf_len, DMA_FROM_DEVICE); 1356 - ret = dma_mapping_error(kdev, mapping); 1357 - if (ret) { 1358 - priv->mib.rx_dma_failed++; 1359 - bcmgenet_free_cb(cb); 1349 + if (!skb) { 1350 + priv->mib.alloc_rx_buff_failed++; 1360 1351 netif_err(priv, rx_err, priv->dev, 1361 - "%s DMA map failed\n", __func__); 1362 - return ret; 1352 + "%s: Rx skb allocation failed\n", __func__); 1353 + return NULL; 1363 1354 } 1364 1355 1356 + /* DMA-map the new Rx skb */ 1357 + mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, 1358 + DMA_FROM_DEVICE); 1359 + if (dma_mapping_error(kdev, mapping)) { 1360 + priv->mib.rx_dma_failed++; 1361 + dev_kfree_skb_any(skb); 1362 + netif_err(priv, rx_err, priv->dev, 1363 + "%s: Rx skb DMA mapping failed\n", __func__); 1364 + return NULL; 1365 + } 1366 + 1367 + /* Grab the current Rx skb from the ring and DMA-unmap it */ 1368 + rx_skb = cb->skb; 1369 + if (likely(rx_skb)) 1370 + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 1371 + priv->rx_buf_len, DMA_FROM_DEVICE); 1372 + 1373 + /* Put the new Rx skb on the ring */ 1374 + cb->skb = skb; 1365 1375 dma_unmap_addr_set(cb, dma_addr, mapping); 1366 1376 dmadesc_set_addr(priv, cb->bd_addr, mapping); 1367 1377 1368 - return 0; 1378 + /* Return the current Rx skb to caller */ 1379 + return rx_skb; 1369 1380 } 1370 1381 1371 1382 /* bcmgenet_desc_rx - descriptor based rx process. ··· 1392 1381 struct sk_buff *skb; 1393 1382 u32 dma_length_status; 1394 1383 unsigned long dma_flag; 1395 - int len, err; 1384 + int len; 1396 1385 unsigned int rxpktprocessed = 0, rxpkttoprocess; 1397 1386 unsigned int p_index; 1398 1387 unsigned int discards; ··· 1430 1419 while ((rxpktprocessed < rxpkttoprocess) && 1431 1420 (rxpktprocessed < budget)) { 1432 1421 cb = &priv->rx_cbs[ring->read_ptr]; 1433 - skb = cb->skb; 1422 + skb = bcmgenet_rx_refill(priv, cb); 1434 1423 1435 - /* We do not have a backing SKB, so we do not have a 1436 - * corresponding DMA mapping for this incoming packet since 1437 - * bcmgenet_rx_refill always either has both skb and mapping or 1438 - * none. 1439 - */ 1440 1424 if (unlikely(!skb)) { 1441 1425 dev->stats.rx_dropped++; 1442 1426 dev->stats.rx_errors++; 1443 - goto refill; 1427 + goto next; 1444 1428 } 1445 - 1446 - /* Unmap the packet contents such that we can use the 1447 - * RSV from the 64 bytes descriptor when enabled and save 1448 - * a 32-bits register read 1449 - */ 1450 - dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), 1451 - priv->rx_buf_len, DMA_FROM_DEVICE); 1452 1429 1453 1430 if (!priv->desc_64b_en) { 1454 1431 dma_length_status = ··· 1464 1465 "dropping fragmented packet!\n"); 1465 1466 dev->stats.rx_dropped++; 1466 1467 dev->stats.rx_errors++; 1467 - dev_kfree_skb_any(cb->skb); 1468 - cb->skb = NULL; 1469 - goto refill; 1468 + dev_kfree_skb_any(skb); 1469 + goto next; 1470 1470 } 1471 + 1471 1472 /* report errors */ 1472 1473 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | 1473 1474 DMA_RX_OV | ··· 1486 1487 dev->stats.rx_length_errors++; 1487 1488 dev->stats.rx_dropped++; 1488 1489 dev->stats.rx_errors++; 1489 - 1490 - /* discard the packet and advance consumer index.*/ 1491 - dev_kfree_skb_any(cb->skb); 1492 - cb->skb = NULL; 1493 - goto refill; 1490 + dev_kfree_skb_any(skb); 1491 + goto next; 1494 1492 } /* error packet */ 1495 1493 1496 1494 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && ··· 1520 1524 1521 1525 /* Notify kernel */ 1522 1526 napi_gro_receive(&priv->napi, skb); 1523 - cb->skb = NULL; 1524 1527 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); 1525 1528 1526 - /* refill RX path on the current control block */ 1527 - refill: 1528 - err = bcmgenet_rx_refill(priv, cb); 1529 - if (err) { 1530 - priv->mib.alloc_rx_buff_failed++; 1531 - netif_err(priv, rx_err, dev, "Rx refill failed\n"); 1532 - } 1533 - 1529 + next: 1534 1530 rxpktprocessed++; 1535 1531 if (likely(ring->read_ptr < ring->end_ptr)) 1536 1532 ring->read_ptr++; ··· 1541 1553 struct bcmgenet_rx_ring *ring) 1542 1554 { 1543 1555 struct enet_cb *cb; 1544 - int ret = 0; 1556 + struct sk_buff *skb; 1545 1557 int i; 1546 1558 1547 1559 netif_dbg(priv, hw, priv->dev, "%s\n", __func__); ··· 1549 1561 /* loop here for each buffer needing assign */ 1550 1562 for (i = 0; i < ring->size; i++) { 1551 1563 cb = ring->cbs + i; 1552 - if (cb->skb) 1553 - continue; 1554 - 1555 - ret = bcmgenet_rx_refill(priv, cb); 1556 - if (ret) 1557 - break; 1564 + skb = bcmgenet_rx_refill(priv, cb); 1565 + if (skb) 1566 + dev_kfree_skb_any(skb); 1567 + if (!cb->skb) 1568 + return -ENOMEM; 1558 1569 } 1559 1570 1560 - return ret; 1571 + return 0; 1561 1572 } 1562 1573 1563 1574 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)