Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ice: Move common functions to ice_txrx_lib.c

In preparation of AF XDP, move functions that will be used both by skb and
zero-copy paths to a new file called ice_txrx_lib.c. This allows us to
avoid using ifdefs to control the staticness of said functions.

Move other functions (ice_rx_csum, ice_rx_hash and ice_ptype_to_htype)
called only by the moved ones to the new file as well.

Signed-off-by: Krzysztof Kazimierczak <krzysztof.kazimierczak@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>

authored by

Krzysztof Kazimierczak and committed by
Jeff Kirsher
0891d6d4 efc2214b

+334 -312
+1
drivers/net/ethernet/intel/ice/Makefile
··· 15 15 ice_sched.o \ 16 16 ice_base.o \ 17 17 ice_lib.o \ 18 + ice_txrx_lib.o \ 18 19 ice_txrx.o \ 19 20 ice_flex_pipe.o \ 20 21 ice_ethtool.o
+1 -302
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 7 7 #include <linux/mm.h> 8 8 #include <linux/bpf_trace.h> 9 9 #include <net/xdp.h> 10 + #include "ice_txrx_lib.h" 10 11 #include "ice_lib.h" 11 12 #include "ice.h" 12 13 #include "ice_dcb_lib.h" ··· 398 397 } 399 398 400 399 /** 401 - * ice_release_rx_desc - Store the new tail and head values 402 - * @rx_ring: ring to bump 403 - * @val: new head index 404 - */ 405 - static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) 406 - { 407 - u16 prev_ntu = rx_ring->next_to_use; 408 - 409 - rx_ring->next_to_use = val; 410 - 411 - /* update next to alloc since we have filled the ring */ 412 - rx_ring->next_to_alloc = val; 413 - 414 - /* QRX_TAIL will be updated with any tail value, but hardware ignores 415 - * the lower 3 bits. This makes it so we only bump tail on meaningful 416 - * boundaries. Also, this allows us to bump tail on intervals of 8 up to 417 - * the budget depending on the current traffic load. 418 - */ 419 - val &= ~0x7; 420 - if (prev_ntu != val) { 421 - /* Force memory writes to complete before letting h/w 422 - * know there are new descriptors to fetch. (Only 423 - * applicable for weak-ordered memory model archs, 424 - * such as IA-64). 425 - */ 426 - wmb(); 427 - writel(val, rx_ring->tail); 428 - } 429 - } 430 - 431 - /** 432 400 * ice_rx_offset - Return expected offset into page to access data 433 401 * @rx_ring: Ring we are requesting offset of 434 402 * ··· 406 436 static unsigned int ice_rx_offset(struct ice_ring *rx_ring) 407 437 { 408 438 return ice_is_xdp_ena_vsi(rx_ring->vsi) ? XDP_PACKET_HEADROOM : 0; 409 - } 410 - 411 - /** 412 - * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register 413 - * @xdp_ring: XDP Tx ring 414 - * 415 - * This function updates the XDP Tx ring tail register. 416 - */ 417 - static void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring) 418 - { 419 - /* Force memory writes to complete before letting h/w 420 - * know there are new descriptors to fetch. 421 - */ 422 - wmb(); 423 - writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); 424 - } 425 - 426 - /** 427 - * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission 428 - * @data: packet data pointer 429 - * @size: packet data size 430 - * @xdp_ring: XDP ring for transmission 431 - */ 432 - static int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) 433 - { 434 - u16 i = xdp_ring->next_to_use; 435 - struct ice_tx_desc *tx_desc; 436 - struct ice_tx_buf *tx_buf; 437 - dma_addr_t dma; 438 - 439 - if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) { 440 - xdp_ring->tx_stats.tx_busy++; 441 - return ICE_XDP_CONSUMED; 442 - } 443 - 444 - dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); 445 - if (dma_mapping_error(xdp_ring->dev, dma)) 446 - return ICE_XDP_CONSUMED; 447 - 448 - tx_buf = &xdp_ring->tx_buf[i]; 449 - tx_buf->bytecount = size; 450 - tx_buf->gso_segs = 1; 451 - tx_buf->raw_buf = data; 452 - 453 - /* record length, and DMA address */ 454 - dma_unmap_len_set(tx_buf, len, size); 455 - dma_unmap_addr_set(tx_buf, dma, dma); 456 - 457 - tx_desc = ICE_TX_DESC(xdp_ring, i); 458 - tx_desc->buf_addr = cpu_to_le64(dma); 459 - tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0, 460 - size, 0); 461 - 462 - /* Make certain all of the status bits have been updated 463 - * before next_to_watch is written. 464 - */ 465 - smp_wmb(); 466 - 467 - i++; 468 - if (i == xdp_ring->count) 469 - i = 0; 470 - 471 - tx_buf->next_to_watch = tx_desc; 472 - xdp_ring->next_to_use = i; 473 - 474 - return ICE_XDP_TX; 475 - } 476 - 477 - /** 478 - * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it 479 - * @xdp: XDP buffer 480 - * @xdp_ring: XDP Tx ring 481 - * 482 - * Returns negative on failure, 0 on success. 483 - */ 484 - static int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring) 485 - { 486 - struct xdp_frame *xdpf = convert_to_xdp_frame(xdp); 487 - 488 - if (unlikely(!xdpf)) 489 - return ICE_XDP_CONSUMED; 490 - 491 - return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 492 439 } 493 440 494 441 /** ··· 497 610 ice_xdp_ring_update_tail(xdp_ring); 498 611 499 612 return n - drops; 500 - } 501 - 502 - /** 503 - * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map 504 - * @rx_ring: Rx ring 505 - * @xdp_res: Result of the receive batch 506 - * 507 - * This function bumps XDP Tx tail and/or flush redirect map, and 508 - * should be called when a batch of packets has been processed in the 509 - * napi loop. 510 - */ 511 - static void 512 - ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res) 513 - { 514 - if (xdp_res & ICE_XDP_REDIR) 515 - xdp_do_flush_map(); 516 - 517 - if (xdp_res & ICE_XDP_TX) { 518 - struct ice_ring *xdp_ring = 519 - rx_ring->vsi->xdp_rings[rx_ring->q_index]; 520 - 521 - ice_xdp_ring_update_tail(xdp_ring); 522 - } 523 613 } 524 614 525 615 /** ··· 896 1032 } 897 1033 898 1034 /** 899 - * ice_test_staterr - tests bits in Rx descriptor status and error fields 900 - * @rx_desc: pointer to receive descriptor (in le64 format) 901 - * @stat_err_bits: value to mask 902 - * 903 - * This function does some fast chicanery in order to return the 904 - * value of the mask which is really only used for boolean tests. 905 - * The status_error_len doesn't need to be shifted because it begins 906 - * at offset zero. 907 - */ 908 - static bool 909 - ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits) 910 - { 911 - return !!(rx_desc->wb.status_error0 & 912 - cpu_to_le16(stat_err_bits)); 913 - } 914 - 915 - /** 916 1035 * ice_is_non_eop - process handling of non-EOP buffers 917 1036 * @rx_ring: Rx ring being processed 918 1037 * @rx_desc: Rx descriptor for current buffer ··· 918 1071 rx_ring->rx_stats.non_eop_descs++; 919 1072 920 1073 return true; 921 - } 922 - 923 - /** 924 - * ice_ptype_to_htype - get a hash type 925 - * @ptype: the ptype value from the descriptor 926 - * 927 - * Returns a hash type to be used by skb_set_hash 928 - */ 929 - static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype) 930 - { 931 - return PKT_HASH_TYPE_NONE; 932 - } 933 - 934 - /** 935 - * ice_rx_hash - set the hash value in the skb 936 - * @rx_ring: descriptor ring 937 - * @rx_desc: specific descriptor 938 - * @skb: pointer to current skb 939 - * @rx_ptype: the ptype value from the descriptor 940 - */ 941 - static void 942 - ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 943 - struct sk_buff *skb, u8 rx_ptype) 944 - { 945 - struct ice_32b_rx_flex_desc_nic *nic_mdid; 946 - u32 hash; 947 - 948 - if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) 949 - return; 950 - 951 - if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) 952 - return; 953 - 954 - nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; 955 - hash = le32_to_cpu(nic_mdid->rss_hash); 956 - skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); 957 - } 958 - 959 - /** 960 - * ice_rx_csum - Indicate in skb if checksum is good 961 - * @ring: the ring we care about 962 - * @skb: skb currently being received and modified 963 - * @rx_desc: the receive descriptor 964 - * @ptype: the packet type decoded by hardware 965 - * 966 - * skb->protocol must be set before this function is called 967 - */ 968 - static void 969 - ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, 970 - union ice_32b_rx_flex_desc *rx_desc, u8 ptype) 971 - { 972 - struct ice_rx_ptype_decoded decoded; 973 - u32 rx_error, rx_status; 974 - bool ipv4, ipv6; 975 - 976 - rx_status = le16_to_cpu(rx_desc->wb.status_error0); 977 - rx_error = rx_status; 978 - 979 - decoded = ice_decode_rx_desc_ptype(ptype); 980 - 981 - /* Start with CHECKSUM_NONE and by default csum_level = 0 */ 982 - skb->ip_summed = CHECKSUM_NONE; 983 - skb_checksum_none_assert(skb); 984 - 985 - /* check if Rx checksum is enabled */ 986 - if (!(ring->netdev->features & NETIF_F_RXCSUM)) 987 - return; 988 - 989 - /* check if HW has decoded the packet and checksum */ 990 - if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) 991 - return; 992 - 993 - if (!(decoded.known && decoded.outer_ip)) 994 - return; 995 - 996 - ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 997 - (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); 998 - ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 999 - (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); 1000 - 1001 - if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | 1002 - BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) 1003 - goto checksum_fail; 1004 - else if (ipv6 && (rx_status & 1005 - (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) 1006 - goto checksum_fail; 1007 - 1008 - /* check for L4 errors and handle packets that were not able to be 1009 - * checksummed due to arrival speed 1010 - */ 1011 - if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) 1012 - goto checksum_fail; 1013 - 1014 - /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 1015 - switch (decoded.inner_prot) { 1016 - case ICE_RX_PTYPE_INNER_PROT_TCP: 1017 - case ICE_RX_PTYPE_INNER_PROT_UDP: 1018 - case ICE_RX_PTYPE_INNER_PROT_SCTP: 1019 - skb->ip_summed = CHECKSUM_UNNECESSARY; 1020 - default: 1021 - break; 1022 - } 1023 - return; 1024 - 1025 - checksum_fail: 1026 - ring->vsi->back->hw_csum_rx_error++; 1027 - } 1028 - 1029 - /** 1030 - * ice_process_skb_fields - Populate skb header fields from Rx descriptor 1031 - * @rx_ring: Rx descriptor ring packet is being transacted on 1032 - * @rx_desc: pointer to the EOP Rx descriptor 1033 - * @skb: pointer to current skb being populated 1034 - * @ptype: the packet type decoded by hardware 1035 - * 1036 - * This function checks the ring, descriptor, and packet information in 1037 - * order to populate the hash, checksum, VLAN, protocol, and 1038 - * other fields within the skb. 1039 - */ 1040 - static void 1041 - ice_process_skb_fields(struct ice_ring *rx_ring, 1042 - union ice_32b_rx_flex_desc *rx_desc, 1043 - struct sk_buff *skb, u8 ptype) 1044 - { 1045 - ice_rx_hash(rx_ring, rx_desc, skb, ptype); 1046 - 1047 - /* modifies the skb - consumes the enet header */ 1048 - skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1049 - 1050 - ice_rx_csum(rx_ring, skb, rx_desc, ptype); 1051 - } 1052 - 1053 - /** 1054 - * ice_receive_skb - Send a completed packet up the stack 1055 - * @rx_ring: Rx ring in play 1056 - * @skb: packet to send up 1057 - * @vlan_tag: VLAN tag for packet 1058 - * 1059 - * This function sends the completed packet (via. skb) up the stack using 1060 - * gro receive functions (with/without VLAN tag) 1061 - */ 1062 - static void 1063 - ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) 1064 - { 1065 - if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1066 - (vlan_tag & VLAN_VID_MASK)) 1067 - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 1068 - napi_gro_receive(&rx_ring->q_vector->napi, skb); 1069 1074 } 1070 1075 1071 1076 /**
-10
drivers/net/ethernet/intel/ice/ice_txrx.h
··· 22 22 #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */ 23 23 #define ICE_MAX_TXQ_PER_TXQG 128 24 24 25 - static inline __le64 26 - build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) 27 - { 28 - return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | 29 - (td_cmd << ICE_TXD_QW1_CMD_S) | 30 - (td_offset << ICE_TXD_QW1_OFFSET_S) | 31 - ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | 32 - (td_tag << ICE_TXD_QW1_L2TAG1_S)); 33 - } 34 - 35 25 /* We are assuming that the cache line is always 64 Bytes here for ice. 36 26 * In order to make sure that is a correct assumption there is a check in probe 37 27 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
+273
drivers/net/ethernet/intel/ice/ice_txrx_lib.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2019, Intel Corporation. */ 3 + 4 + #include "ice_txrx_lib.h" 5 + 6 + /** 7 + * ice_release_rx_desc - Store the new tail and head values 8 + * @rx_ring: ring to bump 9 + * @val: new head index 10 + */ 11 + void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) 12 + { 13 + u16 prev_ntu = rx_ring->next_to_use; 14 + 15 + rx_ring->next_to_use = val; 16 + 17 + /* update next to alloc since we have filled the ring */ 18 + rx_ring->next_to_alloc = val; 19 + 20 + /* QRX_TAIL will be updated with any tail value, but hardware ignores 21 + * the lower 3 bits. This makes it so we only bump tail on meaningful 22 + * boundaries. Also, this allows us to bump tail on intervals of 8 up to 23 + * the budget depending on the current traffic load. 24 + */ 25 + val &= ~0x7; 26 + if (prev_ntu != val) { 27 + /* Force memory writes to complete before letting h/w 28 + * know there are new descriptors to fetch. (Only 29 + * applicable for weak-ordered memory model archs, 30 + * such as IA-64). 31 + */ 32 + wmb(); 33 + writel(val, rx_ring->tail); 34 + } 35 + } 36 + 37 + /** 38 + * ice_ptype_to_htype - get a hash type 39 + * @ptype: the ptype value from the descriptor 40 + * 41 + * Returns a hash type to be used by skb_set_hash 42 + */ 43 + static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype) 44 + { 45 + return PKT_HASH_TYPE_NONE; 46 + } 47 + 48 + /** 49 + * ice_rx_hash - set the hash value in the skb 50 + * @rx_ring: descriptor ring 51 + * @rx_desc: specific descriptor 52 + * @skb: pointer to current skb 53 + * @rx_ptype: the ptype value from the descriptor 54 + */ 55 + static void 56 + ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 57 + struct sk_buff *skb, u8 rx_ptype) 58 + { 59 + struct ice_32b_rx_flex_desc_nic *nic_mdid; 60 + u32 hash; 61 + 62 + if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) 63 + return; 64 + 65 + if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) 66 + return; 67 + 68 + nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; 69 + hash = le32_to_cpu(nic_mdid->rss_hash); 70 + skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); 71 + } 72 + 73 + /** 74 + * ice_rx_csum - Indicate in skb if checksum is good 75 + * @ring: the ring we care about 76 + * @skb: skb currently being received and modified 77 + * @rx_desc: the receive descriptor 78 + * @ptype: the packet type decoded by hardware 79 + * 80 + * skb->protocol must be set before this function is called 81 + */ 82 + static void 83 + ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, 84 + union ice_32b_rx_flex_desc *rx_desc, u8 ptype) 85 + { 86 + struct ice_rx_ptype_decoded decoded; 87 + u32 rx_error, rx_status; 88 + bool ipv4, ipv6; 89 + 90 + rx_status = le16_to_cpu(rx_desc->wb.status_error0); 91 + rx_error = rx_status; 92 + 93 + decoded = ice_decode_rx_desc_ptype(ptype); 94 + 95 + /* Start with CHECKSUM_NONE and by default csum_level = 0 */ 96 + skb->ip_summed = CHECKSUM_NONE; 97 + skb_checksum_none_assert(skb); 98 + 99 + /* check if Rx checksum is enabled */ 100 + if (!(ring->netdev->features & NETIF_F_RXCSUM)) 101 + return; 102 + 103 + /* check if HW has decoded the packet and checksum */ 104 + if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) 105 + return; 106 + 107 + if (!(decoded.known && decoded.outer_ip)) 108 + return; 109 + 110 + ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 111 + (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); 112 + ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 113 + (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); 114 + 115 + if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | 116 + BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) 117 + goto checksum_fail; 118 + else if (ipv6 && (rx_status & 119 + (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) 120 + goto checksum_fail; 121 + 122 + /* check for L4 errors and handle packets that were not able to be 123 + * checksummed due to arrival speed 124 + */ 125 + if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) 126 + goto checksum_fail; 127 + 128 + /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 129 + switch (decoded.inner_prot) { 130 + case ICE_RX_PTYPE_INNER_PROT_TCP: 131 + case ICE_RX_PTYPE_INNER_PROT_UDP: 132 + case ICE_RX_PTYPE_INNER_PROT_SCTP: 133 + skb->ip_summed = CHECKSUM_UNNECESSARY; 134 + default: 135 + break; 136 + } 137 + return; 138 + 139 + checksum_fail: 140 + ring->vsi->back->hw_csum_rx_error++; 141 + } 142 + 143 + /** 144 + * ice_process_skb_fields - Populate skb header fields from Rx descriptor 145 + * @rx_ring: Rx descriptor ring packet is being transacted on 146 + * @rx_desc: pointer to the EOP Rx descriptor 147 + * @skb: pointer to current skb being populated 148 + * @ptype: the packet type decoded by hardware 149 + * 150 + * This function checks the ring, descriptor, and packet information in 151 + * order to populate the hash, checksum, VLAN, protocol, and 152 + * other fields within the skb. 153 + */ 154 + void 155 + ice_process_skb_fields(struct ice_ring *rx_ring, 156 + union ice_32b_rx_flex_desc *rx_desc, 157 + struct sk_buff *skb, u8 ptype) 158 + { 159 + ice_rx_hash(rx_ring, rx_desc, skb, ptype); 160 + 161 + /* modifies the skb - consumes the enet header */ 162 + skb->protocol = eth_type_trans(skb, rx_ring->netdev); 163 + 164 + ice_rx_csum(rx_ring, skb, rx_desc, ptype); 165 + } 166 + 167 + /** 168 + * ice_receive_skb - Send a completed packet up the stack 169 + * @rx_ring: Rx ring in play 170 + * @skb: packet to send up 171 + * @vlan_tag: VLAN tag for packet 172 + * 173 + * This function sends the completed packet (via. skb) up the stack using 174 + * gro receive functions (with/without VLAN tag) 175 + */ 176 + void 177 + ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) 178 + { 179 + if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 180 + (vlan_tag & VLAN_VID_MASK)) 181 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 182 + napi_gro_receive(&rx_ring->q_vector->napi, skb); 183 + } 184 + 185 + /** 186 + * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission 187 + * @data: packet data pointer 188 + * @size: packet data size 189 + * @xdp_ring: XDP ring for transmission 190 + */ 191 + int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) 192 + { 193 + u16 i = xdp_ring->next_to_use; 194 + struct ice_tx_desc *tx_desc; 195 + struct ice_tx_buf *tx_buf; 196 + dma_addr_t dma; 197 + 198 + if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) { 199 + xdp_ring->tx_stats.tx_busy++; 200 + return ICE_XDP_CONSUMED; 201 + } 202 + 203 + dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); 204 + if (dma_mapping_error(xdp_ring->dev, dma)) 205 + return ICE_XDP_CONSUMED; 206 + 207 + tx_buf = &xdp_ring->tx_buf[i]; 208 + tx_buf->bytecount = size; 209 + tx_buf->gso_segs = 1; 210 + tx_buf->raw_buf = data; 211 + 212 + /* record length, and DMA address */ 213 + dma_unmap_len_set(tx_buf, len, size); 214 + dma_unmap_addr_set(tx_buf, dma, dma); 215 + 216 + tx_desc = ICE_TX_DESC(xdp_ring, i); 217 + tx_desc->buf_addr = cpu_to_le64(dma); 218 + tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0, 219 + size, 0); 220 + 221 + /* Make certain all of the status bits have been updated 222 + * before next_to_watch is written. 223 + */ 224 + smp_wmb(); 225 + 226 + i++; 227 + if (i == xdp_ring->count) 228 + i = 0; 229 + 230 + tx_buf->next_to_watch = tx_desc; 231 + xdp_ring->next_to_use = i; 232 + 233 + return ICE_XDP_TX; 234 + } 235 + 236 + /** 237 + * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it 238 + * @xdp: XDP buffer 239 + * @xdp_ring: XDP Tx ring 240 + * 241 + * Returns negative on failure, 0 on success. 242 + */ 243 + int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring) 244 + { 245 + struct xdp_frame *xdpf = convert_to_xdp_frame(xdp); 246 + 247 + if (unlikely(!xdpf)) 248 + return ICE_XDP_CONSUMED; 249 + 250 + return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 251 + } 252 + 253 + /** 254 + * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map 255 + * @rx_ring: Rx ring 256 + * @xdp_res: Result of the receive batch 257 + * 258 + * This function bumps XDP Tx tail and/or flush redirect map, and 259 + * should be called when a batch of packets has been processed in the 260 + * napi loop. 261 + */ 262 + void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res) 263 + { 264 + if (xdp_res & ICE_XDP_REDIR) 265 + xdp_do_flush_map(); 266 + 267 + if (xdp_res & ICE_XDP_TX) { 268 + struct ice_ring *xdp_ring = 269 + rx_ring->vsi->xdp_rings[rx_ring->q_index]; 270 + 271 + ice_xdp_ring_update_tail(xdp_ring); 272 + } 273 + }
+59
drivers/net/ethernet/intel/ice/ice_txrx_lib.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2019, Intel Corporation. */ 3 + 4 + #ifndef _ICE_TXRX_LIB_H_ 5 + #define _ICE_TXRX_LIB_H_ 6 + #include "ice.h" 7 + 8 + /** 9 + * ice_test_staterr - tests bits in Rx descriptor status and error fields 10 + * @rx_desc: pointer to receive descriptor (in le64 format) 11 + * @stat_err_bits: value to mask 12 + * 13 + * This function does some fast chicanery in order to return the 14 + * value of the mask which is really only used for boolean tests. 15 + * The status_error_len doesn't need to be shifted because it begins 16 + * at offset zero. 17 + */ 18 + static inline bool 19 + ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits) 20 + { 21 + return !!(rx_desc->wb.status_error0 & cpu_to_le16(stat_err_bits)); 22 + } 23 + 24 + static inline __le64 25 + build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) 26 + { 27 + return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | 28 + (td_cmd << ICE_TXD_QW1_CMD_S) | 29 + (td_offset << ICE_TXD_QW1_OFFSET_S) | 30 + ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | 31 + (td_tag << ICE_TXD_QW1_L2TAG1_S)); 32 + } 33 + 34 + /** 35 + * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register 36 + * @xdp_ring: XDP Tx ring 37 + * 38 + * This function updates the XDP Tx ring tail register. 39 + */ 40 + static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring) 41 + { 42 + /* Force memory writes to complete before letting h/w 43 + * know there are new descriptors to fetch. 44 + */ 45 + wmb(); 46 + writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); 47 + } 48 + 49 + void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res); 50 + int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring); 51 + int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring); 52 + void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val); 53 + void 54 + ice_process_skb_fields(struct ice_ring *rx_ring, 55 + union ice_32b_rx_flex_desc *rx_desc, 56 + struct sk_buff *skb, u8 ptype); 57 + void 58 + ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag); 59 + #endif /* !_ICE_TXRX_LIB_H_ */