Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: mvneta: introduce page pool API for sw buffer manager

Use the page_pool api for allocations and DMA handling instead of
__dev_alloc_page()/dma_map_page() and free_page()/dma_unmap_page().
Pages are unmapped using page_pool_release_page before packets
go into the network stack.

The page_pool API offers buffer recycling capabilities for XDP but
allocates one page per packet, unless the driver splits and manages
the allocated page.
This is a preliminary patch to add XDP support to mvneta driver

Signed-off-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Lorenzo Bianconi and committed by
David S. Miller
568a3fa2 ff519e2a

+65 -19
+1
drivers/net/ethernet/marvell/Kconfig
··· 61 61 depends on ARCH_MVEBU || COMPILE_TEST 62 62 select MVMDIO 63 63 select PHYLINK 64 + select PAGE_POOL 64 65 ---help--- 65 66 This driver supports the network interface units in the 66 67 Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
+64 -19
drivers/net/ethernet/marvell/mvneta.c
··· 37 37 #include <net/ip.h> 38 38 #include <net/ipv6.h> 39 39 #include <net/tso.h> 40 + #include <net/page_pool.h> 40 41 41 42 /* Registers */ 42 43 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) ··· 603 602 604 603 u32 pkts_coal; 605 604 u32 time_coal; 605 + 606 + /* page_pool */ 607 + struct page_pool *page_pool; 608 + struct xdp_rxq_info xdp_rxq; 606 609 607 610 /* Virtual address of the RX buffer */ 608 611 void **buf_virt_addr; ··· 1817 1812 struct mvneta_rx_queue *rxq, 1818 1813 gfp_t gfp_mask) 1819 1814 { 1815 + enum dma_data_direction dma_dir; 1820 1816 dma_addr_t phys_addr; 1821 1817 struct page *page; 1822 1818 1823 - page = __dev_alloc_page(gfp_mask); 1819 + page = page_pool_alloc_pages(rxq->page_pool, 1820 + gfp_mask | __GFP_NOWARN); 1824 1821 if (!page) 1825 1822 return -ENOMEM; 1826 1823 1827 - /* map page for use */ 1828 - phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE, 1829 - DMA_FROM_DEVICE); 1830 - if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { 1831 - __free_page(page); 1832 - return -ENOMEM; 1833 - } 1834 - 1835 - phys_addr += pp->rx_offset_correction; 1824 + phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; 1825 + dma_dir = page_pool_get_dma_dir(rxq->page_pool); 1826 + dma_sync_single_for_device(pp->dev->dev.parent, phys_addr, 1827 + PAGE_SIZE, dma_dir); 1836 1828 mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); 1829 + 1837 1830 return 0; 1838 1831 } 1839 1832 ··· 1897 1894 if (!data || !(rx_desc->buf_phys_addr)) 1898 1895 continue; 1899 1896 1900 - dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1901 - PAGE_SIZE, DMA_FROM_DEVICE); 1902 - __free_page(data); 1897 + page_pool_put_page(rxq->page_pool, data, false); 1903 1898 } 1899 + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) 1900 + xdp_rxq_info_unreg(&rxq->xdp_rxq); 1901 + page_pool_destroy(rxq->page_pool); 1902 + rxq->page_pool = NULL; 1904 1903 } 1905 1904 1906 1905 static void ··· 2034 2029 skb_add_rx_frag(rxq->skb, frag_num, page, 2035 2030 frag_offset, frag_size, 2036 2031 PAGE_SIZE); 2037 - dma_unmap_page(dev->dev.parent, phys_addr, 2038 - PAGE_SIZE, DMA_FROM_DEVICE); 2032 + page_pool_release_page(rxq->page_pool, page); 2039 2033 rxq->left_size -= frag_size; 2040 2034 } 2041 2035 } else { ··· 2064 2060 frag_offset, frag_size, 2065 2061 PAGE_SIZE); 2066 2062 2067 - dma_unmap_page(dev->dev.parent, phys_addr, 2068 - PAGE_SIZE, DMA_FROM_DEVICE); 2069 - 2063 + page_pool_release_page(rxq->page_pool, page); 2070 2064 rxq->left_size -= frag_size; 2071 2065 } 2072 2066 } /* Middle or Last descriptor */ ··· 2833 2831 return rx_done; 2834 2832 } 2835 2833 2834 + static int mvneta_create_page_pool(struct mvneta_port *pp, 2835 + struct mvneta_rx_queue *rxq, int size) 2836 + { 2837 + struct page_pool_params pp_params = { 2838 + .order = 0, 2839 + .flags = PP_FLAG_DMA_MAP, 2840 + .pool_size = size, 2841 + .nid = cpu_to_node(0), 2842 + .dev = pp->dev->dev.parent, 2843 + .dma_dir = DMA_FROM_DEVICE, 2844 + }; 2845 + int err; 2846 + 2847 + rxq->page_pool = page_pool_create(&pp_params); 2848 + if (IS_ERR(rxq->page_pool)) { 2849 + err = PTR_ERR(rxq->page_pool); 2850 + rxq->page_pool = NULL; 2851 + return err; 2852 + } 2853 + 2854 + err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id); 2855 + if (err < 0) 2856 + goto err_free_pp; 2857 + 2858 + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, 2859 + rxq->page_pool); 2860 + if (err) 2861 + goto err_unregister_rxq; 2862 + 2863 + return 0; 2864 + 2865 + err_unregister_rxq: 2866 + xdp_rxq_info_unreg(&rxq->xdp_rxq); 2867 + err_free_pp: 2868 + page_pool_destroy(rxq->page_pool); 2869 + rxq->page_pool = NULL; 2870 + return err; 2871 + } 2872 + 2836 2873 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ 2837 2874 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 2838 2875 int num) 2839 2876 { 2840 - int i; 2877 + int i, err; 2878 + 2879 + err = mvneta_create_page_pool(pp, rxq, num); 2880 + if (err < 0) 2881 + return err; 2841 2882 2842 2883 for (i = 0; i < num; i++) { 2843 2884 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));