Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tsnep: Use page pool for RX

Use page pool for RX buffer handling. Makes RX path more efficient and
is required prework for future XDP support.

Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Gerhard Engleder and committed by
David S. Miller
bb837a37 308ce142

+99 -67
+1
drivers/net/ethernet/engleder/Kconfig
··· 21 21 depends on HAS_IOMEM && HAS_DMA 22 22 depends on PTP_1588_CLOCK_OPTIONAL 23 23 select PHYLIB 24 + select PAGE_POOL 24 25 help 25 26 Support for the Engleder TSN endpoint Ethernet MAC IP Core. 26 27
+3 -2
drivers/net/ethernet/engleder/tsnep.h
··· 96 96 97 97 u32 properties; 98 98 99 - struct sk_buff *skb; 99 + struct page *page; 100 100 size_t len; 101 - DEFINE_DMA_UNMAP_ADDR(dma); 101 + dma_addr_t dma; 102 102 }; 103 103 104 104 struct tsnep_rx { ··· 113 113 int read; 114 114 u32 owner_counter; 115 115 int increment_owner_counter; 116 + struct page_pool *page_pool; 116 117 117 118 u32 packets; 118 119 u32 bytes;
+95 -65
drivers/net/ethernet/engleder/tsnep_main.c
··· 27 27 #include <linux/phy.h> 28 28 #include <linux/iopoll.h> 29 29 30 - #define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \ 31 - TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4)) 32 - #define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN) 33 - #define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH) 30 + #define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 31 + #define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4) 32 + #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \ 33 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 34 34 35 35 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 36 36 #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF)) ··· 587 587 588 588 for (i = 0; i < TSNEP_RING_SIZE; i++) { 589 589 entry = &rx->entry[i]; 590 - if (dma_unmap_addr(entry, dma)) 591 - dma_unmap_single(dmadev, dma_unmap_addr(entry, dma), 592 - dma_unmap_len(entry, len), 593 - DMA_FROM_DEVICE); 594 - if (entry->skb) 595 - dev_kfree_skb(entry->skb); 590 + if (entry->page) 591 + page_pool_put_full_page(rx->page_pool, entry->page, 592 + false); 593 + entry->page = NULL; 596 594 } 595 + 596 + if (rx->page_pool) 597 + page_pool_destroy(rx->page_pool); 597 598 598 599 memset(rx->entry, 0, sizeof(rx->entry)); 599 600 ··· 608 607 } 609 608 } 610 609 611 - static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx, 612 - struct tsnep_rx_entry *entry) 610 + static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, 611 + struct tsnep_rx_entry *entry) 613 612 { 614 - struct device *dmadev = rx->adapter->dmadev; 615 - struct sk_buff *skb; 616 - dma_addr_t dma; 613 + struct page *page; 617 614 618 - skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH, 619 - GFP_ATOMIC | GFP_DMA); 620 - if (!skb) 615 + page = page_pool_dev_alloc_pages(rx->page_pool); 616 + if (unlikely(!page)) 621 617 return -ENOMEM; 622 618 623 - skb_reserve(skb, RX_SKB_RESERVE); 624 - 625 - dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH, 626 - DMA_FROM_DEVICE); 627 - if (dma_mapping_error(dmadev, dma)) { 628 - dev_kfree_skb(skb); 629 - return -ENOMEM; 630 - } 631 - 632 - entry->skb = skb; 633 - entry->len = RX_SKB_LENGTH; 634 - dma_unmap_addr_set(entry, dma, dma); 635 - entry->desc->rx = __cpu_to_le64(dma); 619 + entry->page = page; 620 + entry->len = TSNEP_MAX_RX_BUF_SIZE; 621 + entry->dma = page_pool_get_dma_addr(entry->page); 622 + entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD); 636 623 637 624 return 0; 638 625 } ··· 629 640 { 630 641 struct device *dmadev = rx->adapter->dmadev; 631 642 struct tsnep_rx_entry *entry; 643 + struct page_pool_params pp_params = { 0 }; 632 644 struct tsnep_rx_entry *next_entry; 633 645 int i, j; 634 646 int retval; ··· 651 661 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; 652 662 } 653 663 } 664 + 665 + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 666 + pp_params.order = 0; 667 + pp_params.pool_size = TSNEP_RING_SIZE; 668 + pp_params.nid = dev_to_node(dmadev); 669 + pp_params.dev = dmadev; 670 + pp_params.dma_dir = DMA_FROM_DEVICE; 671 + pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE; 672 + pp_params.offset = TSNEP_SKB_PAD; 673 + rx->page_pool = page_pool_create(&pp_params); 674 + if (IS_ERR(rx->page_pool)) { 675 + retval = PTR_ERR(rx->page_pool); 676 + rx->page_pool = NULL; 677 + goto failed; 678 + } 679 + 654 680 for (i = 0; i < TSNEP_RING_SIZE; i++) { 655 681 entry = &rx->entry[i]; 656 682 next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE]; 657 683 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); 658 684 659 - retval = tsnep_rx_alloc_and_map_skb(rx, entry); 685 + retval = tsnep_rx_alloc_buffer(rx, entry); 660 686 if (retval) 661 687 goto failed; 662 688 } ··· 688 682 { 689 683 struct tsnep_rx_entry *entry = &rx->entry[index]; 690 684 691 - /* RX_SKB_LENGTH is a multiple of 4 */ 685 + /* TSNEP_MAX_RX_BUF_SIZE is a multiple of 4 */ 692 686 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; 693 687 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; 694 688 if (index == rx->increment_owner_counter) { ··· 711 705 entry->desc->properties = __cpu_to_le32(entry->properties); 712 706 } 713 707 708 + static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, 709 + int length) 710 + { 711 + struct sk_buff *skb; 712 + 713 + skb = napi_build_skb(page_address(page), PAGE_SIZE); 714 + if (unlikely(!skb)) 715 + return NULL; 716 + 717 + /* update pointers within the skb to store the data */ 718 + skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE); 719 + __skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN); 720 + 721 + if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { 722 + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); 723 + struct tsnep_rx_inline *rx_inline = 724 + (struct tsnep_rx_inline *)(page_address(page) + 725 + TSNEP_SKB_PAD); 726 + 727 + skb_shinfo(skb)->tx_flags |= 728 + SKBTX_HW_TSTAMP_NETDEV; 729 + memset(hwtstamps, 0, sizeof(*hwtstamps)); 730 + hwtstamps->netdev_data = rx_inline; 731 + } 732 + 733 + skb_record_rx_queue(skb, rx->queue_index); 734 + skb->protocol = eth_type_trans(skb, rx->adapter->netdev); 735 + 736 + return skb; 737 + } 738 + 714 739 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, 715 740 int budget) 716 741 { 717 742 struct device *dmadev = rx->adapter->dmadev; 718 743 int done = 0; 744 + enum dma_data_direction dma_dir; 719 745 struct tsnep_rx_entry *entry; 746 + struct page *page; 720 747 struct sk_buff *skb; 721 - size_t len; 722 - dma_addr_t dma; 723 748 int length; 724 749 bool enable = false; 725 750 int retval; 751 + 752 + dma_dir = page_pool_get_dma_dir(rx->page_pool); 726 753 727 754 while (likely(done < budget)) { 728 755 entry = &rx->entry[rx->read]; ··· 769 730 */ 770 731 dma_rmb(); 771 732 772 - skb = entry->skb; 773 - len = dma_unmap_len(entry, len); 774 - dma = dma_unmap_addr(entry, dma); 733 + prefetch(page_address(entry->page) + TSNEP_SKB_PAD); 734 + length = __le32_to_cpu(entry->desc_wb->properties) & 735 + TSNEP_DESC_LENGTH_MASK; 736 + dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD, 737 + length, dma_dir); 738 + page = entry->page; 775 739 776 740 /* forward skb only if allocation is successful, otherwise 777 - * skb is reused and frame dropped 741 + * page is reused and frame dropped 778 742 */ 779 - retval = tsnep_rx_alloc_and_map_skb(rx, entry); 743 + retval = tsnep_rx_alloc_buffer(rx, entry); 780 744 if (!retval) { 781 - dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE); 745 + skb = tsnep_build_skb(rx, page, length); 746 + if (skb) { 747 + page_pool_release_page(rx->page_pool, page); 782 748 783 - length = __le32_to_cpu(entry->desc_wb->properties) & 784 - TSNEP_DESC_LENGTH_MASK; 785 - skb_put(skb, length - ETH_FCS_LEN); 786 - if (rx->adapter->hwtstamp_config.rx_filter == 787 - HWTSTAMP_FILTER_ALL) { 788 - struct skb_shared_hwtstamps *hwtstamps = 789 - skb_hwtstamps(skb); 790 - struct tsnep_rx_inline *rx_inline = 791 - (struct tsnep_rx_inline *)skb->data; 749 + rx->packets++; 750 + rx->bytes += length - 751 + TSNEP_RX_INLINE_METADATA_SIZE; 752 + if (skb->pkt_type == PACKET_MULTICAST) 753 + rx->multicast++; 792 754 793 - skb_shinfo(skb)->tx_flags |= 794 - SKBTX_HW_TSTAMP_NETDEV; 795 - memset(hwtstamps, 0, sizeof(*hwtstamps)); 796 - hwtstamps->netdev_data = rx_inline; 755 + napi_gro_receive(napi, skb); 756 + } else { 757 + page_pool_recycle_direct(rx->page_pool, page); 758 + 759 + rx->dropped++; 797 760 } 798 - skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE); 799 - skb_record_rx_queue(skb, rx->queue_index); 800 - skb->protocol = eth_type_trans(skb, 801 - rx->adapter->netdev); 802 - 803 - rx->packets++; 804 - rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE; 805 - if (skb->pkt_type == PACKET_MULTICAST) 806 - rx->multicast++; 807 - 808 - napi_gro_receive(napi, skb); 809 761 done++; 810 762 } else { 811 763 rx->dropped++;