Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: systemport: rewrite bcm_sysport_rx_refill

Currently, bcm_sysport_desc_rx() calls bcm_sysport_rx_refill() at the end of Rx
packet processing loop, after the current Rx packet has already been passed to
napi_gro_receive(). However, bcm_sysport_rx_refill() might fail to allocate a new
Rx skb, thus leaving a hole on the Rx queue where no valid Rx buffer exists.

To eliminate this situation:

1. Rewrite bcm_sysport_rx_refill() to retain the current Rx skb on the
Rx queue if a new replacement Rx skb can't be allocated and DMA-mapped.
In this case, the data on the current Rx skb is effectively dropped.

2. Modify bcm_sysport_desc_rx() to call bcm_sysport_rx_refill() at the
top of Rx packet processing loop, so that the new replacement Rx skb is
already in place before the current Rx skb is processed.

This is loosely inspired from d6707bec5986 ("net: bcmgenet: rewrite
bcmgenet_rx_refill()")

Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Florian Fainelli and committed by
David S. Miller
c73b0183 baf387a8

+41 -40
+41 -40
drivers/net/ethernet/broadcom/bcmsysport.c
··· 524 524 dma_unmap_addr_set(cb, dma_addr, 0); 525 525 } 526 526 527 - static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, 528 - struct bcm_sysport_cb *cb) 527 + static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, 528 + struct bcm_sysport_cb *cb) 529 529 { 530 530 struct device *kdev = &priv->pdev->dev; 531 531 struct net_device *ndev = priv->netdev; 532 + struct sk_buff *skb, *rx_skb; 532 533 dma_addr_t mapping; 533 - int ret; 534 534 535 - cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); 536 - if (!cb->skb) { 535 + /* Allocate a new SKB for a new packet */ 536 + skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); 537 + if (!skb) { 538 + priv->mib.alloc_rx_buff_failed++; 537 539 netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); 538 - return -ENOMEM; 540 + return NULL; 539 541 } 540 542 541 - mapping = dma_map_single(kdev, cb->skb->data, 543 + mapping = dma_map_single(kdev, skb->data, 542 544 RX_BUF_LENGTH, DMA_FROM_DEVICE); 543 - ret = dma_mapping_error(kdev, mapping); 544 - if (ret) { 545 + if (dma_mapping_error(kdev, mapping)) { 545 546 priv->mib.rx_dma_failed++; 546 - bcm_sysport_free_cb(cb); 547 + dev_kfree_skb_any(skb); 547 548 netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); 548 - return ret; 549 + return NULL; 549 550 } 550 551 552 + /* Grab the current SKB on the ring */ 553 + rx_skb = cb->skb; 554 + if (likely(rx_skb)) 555 + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 556 + RX_BUF_LENGTH, DMA_FROM_DEVICE); 557 + 558 + /* Put the new SKB on the ring */ 559 + cb->skb = skb; 551 560 dma_unmap_addr_set(cb, dma_addr, mapping); 552 561 dma_desc_set_addr(priv, cb->bd_addr, mapping); 553 562 554 563 netif_dbg(priv, rx_status, ndev, "RX refill\n"); 555 564 556 - return 0; 565 + /* Return the current SKB to the caller */ 566 + return rx_skb; 557 567 } 558 568 559 569 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) 560 570 { 561 571 struct bcm_sysport_cb *cb; 562 - int ret = 0; 572 + struct sk_buff *skb; 563 573 unsigned int i; 564 574 565 575 for (i = 0; i < priv->num_rx_bds; i++) { 566 576 cb = &priv->rx_cbs[i]; 567 - if (cb->skb) 568 - continue; 569 - 570 - ret = bcm_sysport_rx_refill(priv, cb); 571 - if (ret) 572 - break; 577 + skb = bcm_sysport_rx_refill(priv, cb); 578 + if (skb) 579 + dev_kfree_skb(skb); 580 + if (!cb->skb) 581 + return -ENOMEM; 573 582 } 574 583 575 - return ret; 584 + return 0; 576 585 } 577 586 578 587 /* Poll the hardware for up to budget packets to process */ 579 588 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, 580 589 unsigned int budget) 581 590 { 582 - struct device *kdev = &priv->pdev->dev; 583 591 struct net_device *ndev = priv->netdev; 584 592 unsigned int processed = 0, to_process; 585 593 struct bcm_sysport_cb *cb; ··· 595 587 unsigned int p_index; 596 588 u16 len, status; 597 589 struct bcm_rsb *rsb; 598 - int ret; 599 590 600 591 /* Determine how much we should process since last call */ 601 592 p_index = rdma_readl(priv, RDMA_PROD_INDEX); ··· 612 605 613 606 while ((processed < to_process) && (processed < budget)) { 614 607 cb = &priv->rx_cbs[priv->rx_read_ptr]; 615 - skb = cb->skb; 608 + skb = bcm_sysport_rx_refill(priv, cb); 616 609 617 - processed++; 618 - priv->rx_read_ptr++; 619 - 620 - if (priv->rx_read_ptr == priv->num_rx_bds) 621 - priv->rx_read_ptr = 0; 622 610 623 611 /* We do not have a backing SKB, so we do not a corresponding 624 612 * DMA mapping for this incoming packet since ··· 624 622 netif_err(priv, rx_err, ndev, "out of memory!\n"); 625 623 ndev->stats.rx_dropped++; 626 624 ndev->stats.rx_errors++; 627 - goto refill; 625 + goto next; 628 626 } 629 - 630 - dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 631 - RX_BUF_LENGTH, DMA_FROM_DEVICE); 632 627 633 628 /* Extract the Receive Status Block prepended */ 634 629 rsb = (struct bcm_rsb *)skb->data; ··· 642 643 netif_err(priv, rx_status, ndev, "fragmented packet!\n"); 643 644 ndev->stats.rx_dropped++; 644 645 ndev->stats.rx_errors++; 645 - bcm_sysport_free_cb(cb); 646 - goto refill; 646 + dev_kfree_skb_any(skb); 647 + goto next; 647 648 } 648 649 649 650 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { ··· 652 653 ndev->stats.rx_over_errors++; 653 654 ndev->stats.rx_dropped++; 654 655 ndev->stats.rx_errors++; 655 - bcm_sysport_free_cb(cb); 656 - goto refill; 656 + dev_kfree_skb_any(skb); 657 + goto next; 657 658 } 658 659 659 660 skb_put(skb, len); ··· 680 681 ndev->stats.rx_bytes += len; 681 682 682 683 napi_gro_receive(&priv->napi, skb); 683 - refill: 684 - ret = bcm_sysport_rx_refill(priv, cb); 685 - if (ret) 686 - priv->mib.alloc_rx_buff_failed++; 684 + next: 685 + processed++; 686 + priv->rx_read_ptr++; 687 + 688 + if (priv->rx_read_ptr == priv->num_rx_bds) 689 + priv->rx_read_ptr = 0; 687 690 } 688 691 689 692 return processed;