Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'be2net-next'

Sriharsha Basavapatna says:

====================
be2net patch-set

This patch set contains a few code refactoring changes to make it easy to
support new TX WRB formats in future ASICs. Please consider applying it to
net-next tree.

Patch 1: Refactors chip specific code to setup tx wrb into a separate routine.
Patch 2: Refactors tx enqueue function to remove a bit of duplicate code and
improves wrb setup steps.
Patch 3: Minor refactoring in tx compl to limit CQE accesses to 1 routine.
Patch 4: Adds a few inline functions.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+240 -112
+40
drivers/net/ethernet/emulex/benet/be.h
··· 238 238 struct u64_stats_sync sync_compl; 239 239 }; 240 240 241 + /* Structure to hold some data of interest obtained from a TX CQE */ 242 + struct be_tx_compl_info { 243 + u8 status; /* Completion status */ 244 + u16 end_index; /* Completed TXQ Index */ 245 + }; 246 + 241 247 struct be_tx_obj { 242 248 u32 db_offset; 243 249 struct be_queue_info q; 244 250 struct be_queue_info cq; 251 + struct be_tx_compl_info txcp; 245 252 /* Remember the skbs that were transmitted */ 246 253 struct sk_buff *sent_skb_list[TX_Q_LEN]; 247 254 struct be_tx_stats stats; ··· 422 415 u8 rsstable[RSS_INDIR_TABLE_LEN]; 423 416 u8 rss_queue[RSS_INDIR_TABLE_LEN]; 424 417 u8 rss_hkey[RSS_HASH_KEY_LEN]; 418 + }; 419 + 420 + /* Macros to read/write the 'features' word of be_wrb_params structure. 421 + */ 422 + #define BE_WRB_F_BIT(name) BE_WRB_F_##name##_BIT 423 + #define BE_WRB_F_MASK(name) BIT_MASK(BE_WRB_F_##name##_BIT) 424 + 425 + #define BE_WRB_F_GET(word, name) \ 426 + (((word) & (BE_WRB_F_MASK(name))) >> BE_WRB_F_BIT(name)) 427 + 428 + #define BE_WRB_F_SET(word, name, val) \ 429 + ((word) |= (((val) << BE_WRB_F_BIT(name)) & BE_WRB_F_MASK(name))) 430 + 431 + /* Feature/offload bits */ 432 + enum { 433 + BE_WRB_F_CRC_BIT, /* Ethernet CRC */ 434 + BE_WRB_F_IPCS_BIT, /* IP csum */ 435 + BE_WRB_F_TCPCS_BIT, /* TCP csum */ 436 + BE_WRB_F_UDPCS_BIT, /* UDP csum */ 437 + BE_WRB_F_LSO_BIT, /* LSO */ 438 + BE_WRB_F_LSO6_BIT, /* LSO6 */ 439 + BE_WRB_F_VLAN_BIT, /* VLAN */ 440 + BE_WRB_F_VLAN_SKIP_HW_BIT /* Skip VLAN tag (workaround) */ 441 + }; 442 + 443 + /* The structure below provides a HW-agnostic abstraction of WRB params 444 + * retrieved from a TX skb. This is in turn passed to chip specific routines 445 + * during transmit, to set the corresponding params in the WRB. 446 + */ 447 + struct be_wrb_params { 448 + u32 features; /* Feature bits */ 449 + u16 vlan_tag; /* VLAN tag */ 450 + u16 lso_mss; /* MSS for LSO */ 425 451 }; 426 452 427 453 struct be_adapter {
+200 -112
drivers/net/ethernet/emulex/benet/be_main.c
··· 727 727 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; 728 728 } 729 729 730 - static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, 731 - struct sk_buff *skb, u32 wrb_cnt, u32 len, 732 - bool skip_hw_vlan) 730 + static inline bool be_is_txq_full(struct be_tx_obj *txo) 733 731 { 734 - u16 vlan_tag, proto; 732 + return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len; 733 + } 735 734 736 - memset(hdr, 0, sizeof(*hdr)); 735 + static inline bool be_can_txq_wake(struct be_tx_obj *txo) 736 + { 737 + return atomic_read(&txo->q.used) < txo->q.len / 2; 738 + } 737 739 738 - SET_TX_WRB_HDR_BITS(crc, hdr, 1); 740 + static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo) 741 + { 742 + return atomic_read(&txo->q.used) > txo->pend_wrb_cnt; 743 + } 744 + 745 + static void be_get_wrb_params_from_skb(struct be_adapter *adapter, 746 + struct sk_buff *skb, 747 + struct be_wrb_params *wrb_params) 748 + { 749 + u16 proto; 739 750 740 751 if (skb_is_gso(skb)) { 741 - SET_TX_WRB_HDR_BITS(lso, hdr, 1); 742 - SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size); 752 + BE_WRB_F_SET(wrb_params->features, LSO, 1); 753 + wrb_params->lso_mss = skb_shinfo(skb)->gso_size; 743 754 if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) 744 - SET_TX_WRB_HDR_BITS(lso6, hdr, 1); 755 + BE_WRB_F_SET(wrb_params->features, LSO6, 1); 745 756 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 746 757 if (skb->encapsulation) { 747 - SET_TX_WRB_HDR_BITS(ipcs, hdr, 1); 758 + BE_WRB_F_SET(wrb_params->features, IPCS, 1); 748 759 proto = skb_inner_ip_proto(skb); 749 760 } else { 750 761 proto = skb_ip_proto(skb); 751 762 } 752 763 if (proto == IPPROTO_TCP) 753 - SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1); 764 + BE_WRB_F_SET(wrb_params->features, TCPCS, 1); 754 765 else if (proto == IPPROTO_UDP) 755 - SET_TX_WRB_HDR_BITS(udpcs, hdr, 1); 766 + BE_WRB_F_SET(wrb_params->features, UDPCS, 1); 756 767 } 757 768 758 769 if (skb_vlan_tag_present(skb)) { 759 - SET_TX_WRB_HDR_BITS(vlan, hdr, 1); 760 - vlan_tag = be_get_tx_vlan_tag(adapter, skb); 761 - SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag); 770 + BE_WRB_F_SET(wrb_params->features, VLAN, 1); 771 + wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb); 762 772 } 763 773 764 - SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt); 765 - SET_TX_WRB_HDR_BITS(len, hdr, len); 774 + BE_WRB_F_SET(wrb_params->features, CRC, 1); 775 + } 766 776 767 - /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0 768 - * When this hack is not needed, the evt bit is set while ringing DB 777 + static void wrb_fill_hdr(struct be_adapter *adapter, 778 + struct be_eth_hdr_wrb *hdr, 779 + struct be_wrb_params *wrb_params, 780 + struct sk_buff *skb) 781 + { 782 + memset(hdr, 0, sizeof(*hdr)); 783 + 784 + SET_TX_WRB_HDR_BITS(crc, hdr, 785 + BE_WRB_F_GET(wrb_params->features, CRC)); 786 + SET_TX_WRB_HDR_BITS(ipcs, hdr, 787 + BE_WRB_F_GET(wrb_params->features, IPCS)); 788 + SET_TX_WRB_HDR_BITS(tcpcs, hdr, 789 + BE_WRB_F_GET(wrb_params->features, TCPCS)); 790 + SET_TX_WRB_HDR_BITS(udpcs, hdr, 791 + BE_WRB_F_GET(wrb_params->features, UDPCS)); 792 + 793 + SET_TX_WRB_HDR_BITS(lso, hdr, 794 + BE_WRB_F_GET(wrb_params->features, LSO)); 795 + SET_TX_WRB_HDR_BITS(lso6, hdr, 796 + BE_WRB_F_GET(wrb_params->features, LSO6)); 797 + SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss); 798 + 799 + /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this 800 + * hack is not needed, the evt bit is set while ringing DB. 769 801 */ 770 - if (skip_hw_vlan) 771 - SET_TX_WRB_HDR_BITS(event, hdr, 1); 802 + SET_TX_WRB_HDR_BITS(event, hdr, 803 + BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW)); 804 + SET_TX_WRB_HDR_BITS(vlan, hdr, 805 + BE_WRB_F_GET(wrb_params->features, VLAN)); 806 + SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag); 807 + 808 + SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb)); 809 + SET_TX_WRB_HDR_BITS(len, hdr, skb->len); 772 810 } 773 811 774 812 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, ··· 826 788 } 827 789 } 828 790 829 - /* Returns the number of WRBs used up by the skb */ 830 - static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, 831 - struct sk_buff *skb, bool skip_hw_vlan) 791 + /* Grab a WRB header for xmit */ 792 + static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo) 832 793 { 833 - u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb); 834 - struct device *dev = &adapter->pdev->dev; 794 + u16 head = txo->q.head; 795 + 796 + queue_head_inc(&txo->q); 797 + return head; 798 + } 799 + 800 + /* Set up the WRB header for xmit */ 801 + static void be_tx_setup_wrb_hdr(struct be_adapter *adapter, 802 + struct be_tx_obj *txo, 803 + struct be_wrb_params *wrb_params, 804 + struct sk_buff *skb, u16 head) 805 + { 806 + u32 num_frags = skb_wrb_cnt(skb); 835 807 struct be_queue_info *txq = &txo->q; 836 - struct be_eth_hdr_wrb *hdr; 837 - bool map_single = false; 838 - struct be_eth_wrb *wrb; 839 - dma_addr_t busaddr; 840 - u16 head = txq->head; 808 + struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head); 841 809 842 - hdr = queue_head_node(txq); 843 - wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan); 810 + wrb_fill_hdr(adapter, hdr, wrb_params, skb); 844 811 be_dws_cpu_to_le(hdr, sizeof(*hdr)); 845 - 846 - queue_head_inc(txq); 847 - 848 - if (skb->len > skb->data_len) { 849 - int len = skb_headlen(skb); 850 - 851 - busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); 852 - if (dma_mapping_error(dev, busaddr)) 853 - goto dma_err; 854 - map_single = true; 855 - wrb = queue_head_node(txq); 856 - wrb_fill(wrb, busaddr, len); 857 - queue_head_inc(txq); 858 - copied += len; 859 - } 860 - 861 - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 862 - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 863 - 864 - busaddr = skb_frag_dma_map(dev, frag, 0, 865 - skb_frag_size(frag), DMA_TO_DEVICE); 866 - if (dma_mapping_error(dev, busaddr)) 867 - goto dma_err; 868 - wrb = queue_head_node(txq); 869 - wrb_fill(wrb, busaddr, skb_frag_size(frag)); 870 - queue_head_inc(txq); 871 - copied += skb_frag_size(frag); 872 - } 873 812 874 813 BUG_ON(txo->sent_skb_list[head]); 875 814 txo->sent_skb_list[head] = skb; 876 815 txo->last_req_hdr = head; 877 - atomic_add(wrb_cnt, &txq->used); 878 - txo->last_req_wrb_cnt = wrb_cnt; 879 - txo->pend_wrb_cnt += wrb_cnt; 816 + atomic_add(num_frags, &txq->used); 817 + txo->last_req_wrb_cnt = num_frags; 818 + txo->pend_wrb_cnt += num_frags; 819 + } 880 820 881 - be_tx_stats_update(txo, skb); 882 - return wrb_cnt; 821 + /* Setup a WRB fragment (buffer descriptor) for xmit */ 822 + static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr, 823 + int len) 824 + { 825 + struct be_eth_wrb *wrb; 826 + struct be_queue_info *txq = &txo->q; 883 827 884 - dma_err: 885 - /* Bring the queue back to the state it was in before this 886 - * routine was invoked. 887 - */ 828 + wrb = queue_head_node(txq); 829 + wrb_fill(wrb, busaddr, len); 830 + queue_head_inc(txq); 831 + } 832 + 833 + /* Bring the queue back to the state it was in before be_xmit_enqueue() routine 834 + * was invoked. The producer index is restored to the previous packet and the 835 + * WRBs of the current packet are unmapped. Invoked to handle tx setup errors. 836 + */ 837 + static void be_xmit_restore(struct be_adapter *adapter, 838 + struct be_tx_obj *txo, u16 head, bool map_single, 839 + u32 copied) 840 + { 841 + struct device *dev; 842 + struct be_eth_wrb *wrb; 843 + struct be_queue_info *txq = &txo->q; 844 + 845 + dev = &adapter->pdev->dev; 888 846 txq->head = head; 847 + 889 848 /* skip the first wrb (hdr); it's not mapped */ 890 849 queue_head_inc(txq); 891 850 while (copied) { ··· 890 855 unmap_tx_frag(dev, wrb, map_single); 891 856 map_single = false; 892 857 copied -= le32_to_cpu(wrb->frag_len); 893 - adapter->drv_stats.dma_map_errors++; 894 858 queue_head_inc(txq); 895 859 } 860 + 896 861 txq->head = head; 862 + } 863 + 864 + /* Enqueue the given packet for transmit. This routine allocates WRBs for the 865 + * packet, dma maps the packet buffers and sets up the WRBs. Returns the number 866 + * of WRBs used up by the packet. 867 + */ 868 + static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, 869 + struct sk_buff *skb, 870 + struct be_wrb_params *wrb_params) 871 + { 872 + u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb); 873 + struct device *dev = &adapter->pdev->dev; 874 + struct be_queue_info *txq = &txo->q; 875 + bool map_single = false; 876 + u16 head = txq->head; 877 + dma_addr_t busaddr; 878 + int len; 879 + 880 + head = be_tx_get_wrb_hdr(txo); 881 + 882 + if (skb->len > skb->data_len) { 883 + len = skb_headlen(skb); 884 + 885 + busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); 886 + if (dma_mapping_error(dev, busaddr)) 887 + goto dma_err; 888 + map_single = true; 889 + be_tx_setup_wrb_frag(txo, busaddr, len); 890 + copied += len; 891 + } 892 + 893 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 894 + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 895 + len = skb_frag_size(frag); 896 + 897 + busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE); 898 + if (dma_mapping_error(dev, busaddr)) 899 + goto dma_err; 900 + be_tx_setup_wrb_frag(txo, busaddr, len); 901 + copied += len; 902 + } 903 + 904 + be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head); 905 + 906 + be_tx_stats_update(txo, skb); 907 + return wrb_cnt; 908 + 909 + dma_err: 910 + adapter->drv_stats.dma_map_errors++; 911 + be_xmit_restore(adapter, txo, head, map_single, copied); 897 912 return 0; 898 913 } 899 914 ··· 954 869 955 870 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, 956 871 struct sk_buff *skb, 957 - bool *skip_hw_vlan) 872 + struct be_wrb_params 873 + *wrb_params) 958 874 { 959 875 u16 vlan_tag = 0; 960 876 ··· 972 886 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to 973 887 * skip VLAN insertion 974 888 */ 975 - if (skip_hw_vlan) 976 - *skip_hw_vlan = true; 889 + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); 977 890 } 978 891 979 892 if (vlan_tag) { ··· 990 905 vlan_tag); 991 906 if (unlikely(!skb)) 992 907 return skb; 993 - if (skip_hw_vlan) 994 - *skip_hw_vlan = true; 908 + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); 995 909 } 996 910 997 911 return skb; ··· 1030 946 1031 947 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, 1032 948 struct sk_buff *skb, 1033 - bool *skip_hw_vlan) 949 + struct be_wrb_params 950 + *wrb_params) 1034 951 { 1035 952 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 1036 953 unsigned int eth_hdr_len; ··· 1055 970 */ 1056 971 if (be_pvid_tagging_enabled(adapter) && 1057 972 veh->h_vlan_proto == htons(ETH_P_8021Q)) 1058 - *skip_hw_vlan = true; 973 + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); 1059 974 1060 975 /* HW has a bug wherein it will calculate CSUM for VLAN 1061 976 * pkts even though it is disabled. ··· 1063 978 */ 1064 979 if (skb->ip_summed != CHECKSUM_PARTIAL && 1065 980 skb_vlan_tag_present(skb)) { 1066 - skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); 981 + skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params); 1067 982 if (unlikely(!skb)) 1068 983 goto err; 1069 984 } ··· 1085 1000 */ 1086 1001 if (be_ipv6_tx_stall_chk(adapter, skb) && 1087 1002 be_vlan_tag_tx_chk(adapter, skb)) { 1088 - skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); 1003 + skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params); 1089 1004 if (unlikely(!skb)) 1090 1005 goto err; 1091 1006 } ··· 1099 1014 1100 1015 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, 1101 1016 struct sk_buff *skb, 1102 - bool *skip_hw_vlan) 1017 + struct be_wrb_params *wrb_params) 1103 1018 { 1104 1019 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or 1105 1020 * less may cause a transmit stall on that port. So the work-around is ··· 1111 1026 } 1112 1027 1113 1028 if (BEx_chip(adapter) || lancer_chip(adapter)) { 1114 - skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan); 1029 + skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params); 1115 1030 if (!skb) 1116 1031 return NULL; 1117 1032 } ··· 1145 1060 1146 1061 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) 1147 1062 { 1148 - bool skip_hw_vlan = false, flush = !skb->xmit_more; 1149 1063 struct be_adapter *adapter = netdev_priv(netdev); 1150 1064 u16 q_idx = skb_get_queue_mapping(skb); 1151 1065 struct be_tx_obj *txo = &adapter->tx_obj[q_idx]; 1152 - struct be_queue_info *txq = &txo->q; 1066 + struct be_wrb_params wrb_params = { 0 }; 1067 + bool flush = !skb->xmit_more; 1153 1068 u16 wrb_cnt; 1154 1069 1155 - skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); 1070 + skb = be_xmit_workarounds(adapter, skb, &wrb_params); 1156 1071 if (unlikely(!skb)) 1157 1072 goto drop; 1158 1073 1159 - wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan); 1074 + be_get_wrb_params_from_skb(adapter, skb, &wrb_params); 1075 + 1076 + wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params); 1160 1077 if (unlikely(!wrb_cnt)) { 1161 1078 dev_kfree_skb_any(skb); 1162 1079 goto drop; 1163 1080 } 1164 1081 1165 - if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) { 1082 + if (be_is_txq_full(txo)) { 1166 1083 netif_stop_subqueue(netdev, q_idx); 1167 1084 tx_stats(txo)->tx_stops++; 1168 1085 } ··· 2078 1991 } 2079 1992 } 2080 1993 2081 - static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) 1994 + static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo) 2082 1995 { 2083 - struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); 1996 + struct be_queue_info *tx_cq = &txo->cq; 1997 + struct be_tx_compl_info *txcp = &txo->txcp; 1998 + struct be_eth_tx_compl *compl = queue_tail_node(tx_cq); 2084 1999 2085 - if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) 2000 + if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) 2086 2001 return NULL; 2087 2002 2003 + /* Ensure load ordering of valid bit dword and other dwords below */ 2088 2004 rmb(); 2089 - be_dws_le_to_cpu(txcp, sizeof(*txcp)); 2005 + be_dws_le_to_cpu(compl, sizeof(*compl)); 2090 2006 2091 - txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; 2007 + txcp->status = GET_TX_COMPL_BITS(status, compl); 2008 + txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl); 2092 2009 2010 + compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; 2093 2011 queue_tail_inc(tx_cq); 2094 2012 return txcp; 2095 2013 } ··· 2215 2123 { 2216 2124 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0; 2217 2125 struct device *dev = &adapter->pdev->dev; 2218 - struct be_tx_obj *txo; 2126 + struct be_tx_compl_info *txcp; 2219 2127 struct be_queue_info *txq; 2220 - struct be_eth_tx_compl *txcp; 2128 + struct be_tx_obj *txo; 2221 2129 int i, pending_txqs; 2222 2130 2223 2131 /* Stop polling for compls when HW has been silent for 10ms */ ··· 2228 2136 cmpl = 0; 2229 2137 num_wrbs = 0; 2230 2138 txq = &txo->q; 2231 - while ((txcp = be_tx_compl_get(&txo->cq))) { 2232 - end_idx = GET_TX_COMPL_BITS(wrb_index, txcp); 2233 - num_wrbs += be_tx_compl_process(adapter, txo, 2234 - end_idx); 2139 + while ((txcp = be_tx_compl_get(txo))) { 2140 + num_wrbs += 2141 + be_tx_compl_process(adapter, txo, 2142 + txcp->end_index); 2235 2143 cmpl++; 2236 2144 } 2237 2145 if (cmpl) { ··· 2239 2147 atomic_sub(num_wrbs, &txq->used); 2240 2148 timeo = 0; 2241 2149 } 2242 - if (atomic_read(&txq->used) == txo->pend_wrb_cnt) 2150 + if (!be_is_tx_compl_pending(txo)) 2243 2151 pending_txqs--; 2244 2152 } 2245 2153 ··· 2590 2498 return work_done; 2591 2499 } 2592 2500 2593 - static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status) 2501 + static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status) 2594 2502 { 2595 2503 switch (status) { 2596 2504 case BE_TX_COMP_HDR_PARSE_ERR: ··· 2605 2513 } 2606 2514 } 2607 2515 2608 - static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status) 2516 + static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status) 2609 2517 { 2610 2518 switch (status) { 2611 2519 case LANCER_TX_COMP_LSO_ERR: ··· 2630 2538 static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, 2631 2539 int idx) 2632 2540 { 2633 - struct be_eth_tx_compl *txcp; 2634 2541 int num_wrbs = 0, work_done = 0; 2635 - u32 compl_status; 2636 - u16 last_idx; 2542 + struct be_tx_compl_info *txcp; 2637 2543 2638 - while ((txcp = be_tx_compl_get(&txo->cq))) { 2639 - last_idx = GET_TX_COMPL_BITS(wrb_index, txcp); 2640 - num_wrbs += be_tx_compl_process(adapter, txo, last_idx); 2544 + while ((txcp = be_tx_compl_get(txo))) { 2545 + num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index); 2641 2546 work_done++; 2642 2547 2643 - compl_status = GET_TX_COMPL_BITS(status, txcp); 2644 - if (compl_status) { 2548 + if (txcp->status) { 2645 2549 if (lancer_chip(adapter)) 2646 - lancer_update_tx_err(txo, compl_status); 2550 + lancer_update_tx_err(txo, txcp->status); 2647 2551 else 2648 - be_update_tx_err(txo, compl_status); 2552 + be_update_tx_err(txo, txcp->status); 2649 2553 } 2650 2554 } 2651 2555 ··· 2652 2564 /* As Tx wrbs have been freed up, wake up netdev queue 2653 2565 * if it was stopped due to lack of tx wrbs. */ 2654 2566 if (__netif_subqueue_stopped(adapter->netdev, idx) && 2655 - atomic_read(&txo->q.used) < txo->q.len / 2) { 2567 + be_can_txq_wake(txo)) { 2656 2568 netif_wake_subqueue(adapter->netdev, idx); 2657 2569 } 2658 2570