Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

igb: add xdp frags support to ndo_xdp_xmit

Add the capability to map non-linear xdp frames in XDP_TX and
ndo_xdp_xmit callback.

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Link: https://lore.kernel.org/r/20220711230751.3124415-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Lorenzo Bianconi and committed by
Jakub Kicinski
1aea9d87 c9ef2a48

+78 -43
+78 -43
drivers/net/ethernet/intel/igb/igb_main.c
··· 6260 6260 struct igb_ring *tx_ring, 6261 6261 struct xdp_frame *xdpf) 6262 6262 { 6263 - union e1000_adv_tx_desc *tx_desc; 6264 - u32 len, cmd_type, olinfo_status; 6265 - struct igb_tx_buffer *tx_buffer; 6266 - dma_addr_t dma; 6267 - u16 i; 6263 + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); 6264 + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; 6265 + u16 count, i, index = tx_ring->next_to_use; 6266 + struct igb_tx_buffer *tx_head = &tx_ring->tx_buffer_info[index]; 6267 + struct igb_tx_buffer *tx_buffer = tx_head; 6268 + union e1000_adv_tx_desc *tx_desc = IGB_TX_DESC(tx_ring, index); 6269 + u32 len = xdpf->len, cmd_type, olinfo_status; 6270 + void *data = xdpf->data; 6268 6271 6269 - len = xdpf->len; 6272 + count = TXD_USE_COUNT(len); 6273 + for (i = 0; i < nr_frags; i++) 6274 + count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); 6270 6275 6271 - if (unlikely(!igb_desc_unused(tx_ring))) 6276 + if (igb_maybe_stop_tx(tx_ring, count + 3)) 6272 6277 return IGB_XDP_CONSUMED; 6273 6278 6274 - dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE); 6275 - if (dma_mapping_error(tx_ring->dev, dma)) 6276 - return IGB_XDP_CONSUMED; 6277 - 6279 + i = 0; 6278 6280 /* record the location of the first descriptor for this packet */ 6279 - tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 6280 - tx_buffer->bytecount = len; 6281 - tx_buffer->gso_segs = 1; 6282 - tx_buffer->protocol = 0; 6281 + tx_head->bytecount = xdp_get_frame_len(xdpf); 6282 + tx_head->type = IGB_TYPE_XDP; 6283 + tx_head->gso_segs = 1; 6284 + tx_head->xdpf = xdpf; 6283 6285 6284 - i = tx_ring->next_to_use; 6285 - tx_desc = IGB_TX_DESC(tx_ring, i); 6286 - 6287 - dma_unmap_len_set(tx_buffer, len, len); 6288 - dma_unmap_addr_set(tx_buffer, dma, dma); 6289 - tx_buffer->type = IGB_TYPE_XDP; 6290 - tx_buffer->xdpf = xdpf; 6291 - 6292 - tx_desc->read.buffer_addr = cpu_to_le64(dma); 6293 - 6294 - /* put descriptor type bits */ 6295 - cmd_type = E1000_ADVTXD_DTYP_DATA | 6296 - E1000_ADVTXD_DCMD_DEXT | 6297 - E1000_ADVTXD_DCMD_IFCS; 6298 - cmd_type |= len | IGB_TXD_DCMD; 6299 - tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 6300 - 6301 - olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT; 6286 + olinfo_status = tx_head->bytecount << E1000_ADVTXD_PAYLEN_SHIFT; 6302 6287 /* 82575 requires a unique index per ring */ 6303 6288 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) 6304 6289 olinfo_status |= tx_ring->reg_idx << 4; 6305 - 6306 6290 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 6307 6291 6308 - netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount); 6292 + for (;;) { 6293 + dma_addr_t dma; 6309 6294 6295 + dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); 6296 + if (dma_mapping_error(tx_ring->dev, dma)) 6297 + goto unmap; 6298 + 6299 + /* record length, and DMA address */ 6300 + dma_unmap_len_set(tx_buffer, len, len); 6301 + dma_unmap_addr_set(tx_buffer, dma, dma); 6302 + 6303 + /* put descriptor type bits */ 6304 + cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT | 6305 + E1000_ADVTXD_DCMD_IFCS | len; 6306 + 6307 + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 6308 + tx_desc->read.buffer_addr = cpu_to_le64(dma); 6309 + 6310 + tx_buffer->protocol = 0; 6311 + 6312 + if (++index == tx_ring->count) 6313 + index = 0; 6314 + 6315 + if (i == nr_frags) 6316 + break; 6317 + 6318 + tx_buffer = &tx_ring->tx_buffer_info[index]; 6319 + tx_desc = IGB_TX_DESC(tx_ring, index); 6320 + tx_desc->read.olinfo_status = 0; 6321 + 6322 + data = skb_frag_address(&sinfo->frags[i]); 6323 + len = skb_frag_size(&sinfo->frags[i]); 6324 + i++; 6325 + } 6326 + tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_TXD_DCMD); 6327 + 6328 + netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount); 6310 6329 /* set the timestamp */ 6311 - tx_buffer->time_stamp = jiffies; 6330 + tx_head->time_stamp = jiffies; 6312 6331 6313 6332 /* Avoid any potential race with xdp_xmit and cleanup */ 6314 6333 smp_wmb(); 6315 6334 6316 6335 /* set next_to_watch value indicating a packet is present */ 6317 - i++; 6318 - if (i == tx_ring->count) 6319 - i = 0; 6320 - 6321 - tx_buffer->next_to_watch = tx_desc; 6322 - tx_ring->next_to_use = i; 6336 + tx_head->next_to_watch = tx_desc; 6337 + tx_ring->next_to_use = index; 6323 6338 6324 6339 /* Make sure there is space in the ring for the next send. */ 6325 6340 igb_maybe_stop_tx(tx_ring, DESC_NEEDED); 6326 6341 6327 6342 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) 6328 - writel(i, tx_ring->tail); 6343 + writel(index, tx_ring->tail); 6329 6344 6330 6345 return IGB_XDP_TX; 6346 + 6347 + unmap: 6348 + for (;;) { 6349 + tx_buffer = &tx_ring->tx_buffer_info[index]; 6350 + if (dma_unmap_len(tx_buffer, len)) 6351 + dma_unmap_page(tx_ring->dev, 6352 + dma_unmap_addr(tx_buffer, dma), 6353 + dma_unmap_len(tx_buffer, len), 6354 + DMA_TO_DEVICE); 6355 + dma_unmap_len_set(tx_buffer, len, 0); 6356 + if (tx_buffer == tx_head) 6357 + break; 6358 + 6359 + if (!index) 6360 + index += tx_ring->count; 6361 + index--; 6362 + } 6363 + 6364 + return IGB_XDP_CONSUMED; 6331 6365 } 6332 6366 6333 6367 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, ··· 8852 8818 unsigned int offset = pkt_offset + igb_rx_offset(rx_ring); 8853 8819 8854 8820 xdp_prepare_buff(&xdp, hard_start, offset, size, true); 8821 + xdp_buff_clear_frags_flag(&xdp); 8855 8822 #if (PAGE_SIZE > 4096) 8856 8823 /* At larger PAGE_SIZE, frame_sz depend on len size */ 8857 8824 xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);