Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

be2net: support multiple TX queues

This patch provides support for multiple TX queues.

Signed-off-by: Sathya Perla <sathya.perla@emulex.com>
Signed-off-by: David S. Miller <davem@conan.davemloft.net>

authored by

Sathya Perla and committed by
David S. Miller
3c8def97 15b4d93f

+171 -119
+9 -4
drivers/net/benet/be.h
··· 87 87 88 88 #define MAX_RSS_QS 4 /* BE limit is 4 queues/port */ 89 89 #define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */ 90 + #define MAX_TX_QS 8 90 91 #define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RX + TX */ 91 92 #define BE_NAPI_WEIGHT 64 92 93 #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ ··· 171 170 u32 be_tx_reqs; /* number of TX requests initiated */ 172 171 u32 be_tx_stops; /* number of times TX Q was stopped */ 173 172 u32 be_tx_wrbs; /* number of tx WRBs used */ 174 - u32 be_tx_events; /* number of tx completion events */ 175 173 u32 be_tx_compl; /* number of tx completion entries processed */ 176 174 ulong be_tx_jiffies; 177 175 u64 be_tx_bytes; ··· 184 184 struct be_queue_info cq; 185 185 /* Remember the skbs that were transmitted */ 186 186 struct sk_buff *sent_skb_list[TX_Q_LEN]; 187 + struct be_tx_stats stats; 187 188 }; 188 189 189 190 /* Struct to remember the pages posted for rx frags */ ··· 320 319 321 320 /* TX Rings */ 322 321 struct be_eq_obj tx_eq; 323 - struct be_tx_obj tx_obj; 324 - struct be_tx_stats tx_stats; 322 + struct be_tx_obj tx_obj[MAX_TX_QS]; 323 + u8 num_tx_qs; 325 324 326 325 u32 cache_line_break[8]; 327 326 ··· 392 391 extern const struct ethtool_ops be_ethtool_ops; 393 392 394 393 #define msix_enabled(adapter) (adapter->num_msix_vec > 0) 395 - #define tx_stats(adapter) (&adapter->tx_stats) 394 + #define tx_stats(txo) (&txo->stats) 396 395 #define rx_stats(rxo) (&rxo->stats) 397 396 398 397 #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops) ··· 405 404 #define for_all_rss_queues(adapter, rxo, i) \ 406 405 for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\ 407 406 i++, rxo++) 407 + 408 + #define for_all_tx_queues(adapter, txo, i) \ 409 + for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \ 410 + i++, txo++) 408 411 409 412 #define PAGE_SHIFT_4K 12 410 413 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
+36 -14
drivers/net/benet/be_ethtool.c
··· 52 52 {NETSTAT_INFO(tx_errors)}, 53 53 {NETSTAT_INFO(rx_dropped)}, 54 54 {NETSTAT_INFO(tx_dropped)}, 55 - {DRVSTAT_TX_INFO(be_tx_rate)}, 56 - {DRVSTAT_TX_INFO(be_tx_reqs)}, 57 - {DRVSTAT_TX_INFO(be_tx_wrbs)}, 58 - {DRVSTAT_TX_INFO(be_tx_stops)}, 59 - {DRVSTAT_TX_INFO(be_tx_events)}, 60 - {DRVSTAT_TX_INFO(be_tx_compl)}, 55 + {DRVSTAT_INFO(be_tx_events)}, 61 56 {DRVSTAT_INFO(rx_crc_errors)}, 62 57 {DRVSTAT_INFO(rx_alignment_symbol_errors)}, 63 58 {DRVSTAT_INFO(rx_pause_frames)}, ··· 105 110 {ERXSTAT_INFO(rx_drops_no_fragments)} 106 111 }; 107 112 #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats)) 113 + 114 + /* Stats related to multi TX queues */ 115 + static const struct be_ethtool_stat et_tx_stats[] = { 116 + {DRVSTAT_TX_INFO(be_tx_rate)}, 117 + {DRVSTAT_TX_INFO(be_tx_reqs)}, 118 + {DRVSTAT_TX_INFO(be_tx_wrbs)}, 119 + {DRVSTAT_TX_INFO(be_tx_stops)}, 120 + {DRVSTAT_TX_INFO(be_tx_compl)} 121 + }; 122 + #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats)) 108 123 109 124 static const char et_self_tests[][ETH_GSTRING_LEN] = { 110 125 "MAC Loopback test", ··· 258 253 { 259 254 struct be_adapter *adapter = netdev_priv(netdev); 260 255 struct be_rx_obj *rxo; 256 + struct be_tx_obj *txo; 261 257 void *p = NULL; 262 - int i, j; 258 + int i, j, base; 263 259 264 260 for (i = 0; i < ETHTOOL_STATS_NUM; i++) { 265 261 switch (et_stats[i].type) { 266 262 case NETSTAT: 267 263 p = &netdev->stats; 268 - break; 269 - case DRVSTAT_TX: 270 - p = &adapter->tx_stats; 271 264 break; 272 265 case DRVSTAT: 273 266 p = &adapter->drv_stats; ··· 277 274 *(u64 *)p: *(u32 *)p; 278 275 } 279 276 277 + base = ETHTOOL_STATS_NUM; 280 278 for_all_rx_queues(adapter, rxo, j) { 281 279 for (i = 0; i < ETHTOOL_RXSTATS_NUM; i++) { 282 280 switch (et_rx_stats[i].type) { ··· 289 285 rxo->q.id; 290 286 break; 291 287 } 292 - data[ETHTOOL_STATS_NUM + j * ETHTOOL_RXSTATS_NUM + i] = 288 + data[base + j * ETHTOOL_RXSTATS_NUM + i] = 293 289 (et_rx_stats[i].size == sizeof(u64)) ? 290 + *(u64 *)p: *(u32 *)p; 291 + } 292 + } 293 + 294 + base = ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM; 295 + for_all_tx_queues(adapter, txo, j) { 296 + for (i = 0; i < ETHTOOL_TXSTATS_NUM; i++) { 297 + p = (u8 *)&txo->stats + et_tx_stats[i].offset; 298 + data[base + j * ETHTOOL_TXSTATS_NUM + i] = 299 + (et_tx_stats[i].size == sizeof(u64)) ? 294 300 *(u64 *)p: *(u32 *)p; 295 301 } 296 302 } ··· 326 312 data += ETH_GSTRING_LEN; 327 313 } 328 314 } 315 + for (i = 0; i < adapter->num_tx_qs; i++) { 316 + for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) { 317 + sprintf(data, "txq%d: %s", i, 318 + et_tx_stats[j].desc); 319 + data += ETH_GSTRING_LEN; 320 + } 321 + } 329 322 break; 330 323 case ETH_SS_TEST: 331 324 for (i = 0; i < ETHTOOL_TESTS_NUM; i++) { ··· 352 331 return ETHTOOL_TESTS_NUM; 353 332 case ETH_SS_STATS: 354 333 return ETHTOOL_STATS_NUM + 355 - adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM; 334 + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM + 335 + adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM; 356 336 default: 357 337 return -EINVAL; 358 338 } ··· 479 457 struct be_adapter *adapter = netdev_priv(netdev); 480 458 481 459 ring->rx_max_pending = adapter->rx_obj[0].q.len; 482 - ring->tx_max_pending = adapter->tx_obj.q.len; 460 + ring->tx_max_pending = adapter->tx_obj[0].q.len; 483 461 484 462 ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used); 485 - ring->tx_pending = atomic_read(&adapter->tx_obj.q.used); 463 + ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used); 486 464 } 487 465 488 466 static void
+126 -101
drivers/net/benet/be_main.c
··· 427 427 struct be_drv_stats *drvs = &adapter->drv_stats; 428 428 struct net_device_stats *dev_stats = &adapter->netdev->stats; 429 429 struct be_rx_obj *rxo; 430 + struct be_tx_obj *txo; 430 431 int i; 431 432 432 433 memset(dev_stats, 0, sizeof(*dev_stats)); ··· 451 450 } 452 451 } 453 452 454 - dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts; 455 - dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes; 453 + for_all_tx_queues(adapter, txo, i) { 454 + dev_stats->tx_packets += tx_stats(txo)->be_tx_pkts; 455 + dev_stats->tx_bytes += tx_stats(txo)->be_tx_bytes; 456 + } 456 457 457 458 /* bad pkts received */ 458 459 dev_stats->rx_errors = drvs->rx_crc_errors + ··· 557 554 return rate; 558 555 } 559 556 560 - static void be_tx_rate_update(struct be_adapter *adapter) 557 + static void be_tx_rate_update(struct be_tx_obj *txo) 561 558 { 562 - struct be_tx_stats *stats = tx_stats(adapter); 559 + struct be_tx_stats *stats = tx_stats(txo); 563 560 ulong now = jiffies; 564 561 565 562 /* Wrapped around? */ ··· 578 575 } 579 576 } 580 577 581 - static void be_tx_stats_update(struct be_adapter *adapter, 578 + static void be_tx_stats_update(struct be_tx_obj *txo, 582 579 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped) 583 580 { 584 - struct be_tx_stats *stats = tx_stats(adapter); 581 + struct be_tx_stats *stats = tx_stats(txo); 582 + 585 583 stats->be_tx_reqs++; 586 584 stats->be_tx_wrbs += wrb_cnt; 587 585 stats->be_tx_bytes += copied; ··· 686 682 } 687 683 } 688 684 689 - static int make_tx_wrbs(struct be_adapter *adapter, 685 + static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, 690 686 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb) 691 687 { 692 688 dma_addr_t busaddr; 693 689 int i, copied = 0; 694 690 struct device *dev = &adapter->pdev->dev; 695 691 struct sk_buff *first_skb = skb; 696 - struct be_queue_info *txq = &adapter->tx_obj.q; 697 692 struct be_eth_wrb *wrb; 698 693 struct be_eth_hdr_wrb *hdr; 699 694 bool map_single = false; ··· 756 753 struct net_device *netdev) 757 754 { 758 755 struct be_adapter *adapter = netdev_priv(netdev); 759 - struct be_tx_obj *tx_obj = &adapter->tx_obj; 760 - struct be_queue_info *txq = &tx_obj->q; 756 + struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; 757 + struct be_queue_info *txq = &txo->q; 761 758 u32 wrb_cnt = 0, copied = 0; 762 759 u32 start = txq->head; 763 760 bool dummy_wrb, stopped = false; 764 761 765 762 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); 766 763 767 - copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); 764 + copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); 768 765 if (copied) { 769 766 /* record the sent skb in the sent_skb table */ 770 - BUG_ON(tx_obj->sent_skb_list[start]); 771 - tx_obj->sent_skb_list[start] = skb; 767 + BUG_ON(txo->sent_skb_list[start]); 768 + txo->sent_skb_list[start] = skb; 772 769 773 770 /* Ensure txq has space for the next skb; Else stop the queue 774 771 * *BEFORE* ringing the tx doorbell, so that we serialze the ··· 777 774 atomic_add(wrb_cnt, &txq->used); 778 775 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= 779 776 txq->len) { 780 - netif_stop_queue(netdev); 777 + netif_stop_subqueue(netdev, skb_get_queue_mapping(skb)); 781 778 stopped = true; 782 779 } 783 780 784 781 be_txq_notify(adapter, txq->id, wrb_cnt); 785 782 786 - be_tx_stats_update(adapter, wrb_cnt, copied, 783 + be_tx_stats_update(txo, wrb_cnt, copied, 787 784 skb_shinfo(skb)->gso_segs, stopped); 788 785 } else { 789 786 txq->head = start; ··· 1462 1459 return txcp; 1463 1460 } 1464 1461 1465 - static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index) 1462 + static u16 be_tx_compl_process(struct be_adapter *adapter, 1463 + struct be_tx_obj *txo, u16 last_index) 1466 1464 { 1467 - struct be_queue_info *txq = &adapter->tx_obj.q; 1465 + struct be_queue_info *txq = &txo->q; 1468 1466 struct be_eth_wrb *wrb; 1469 - struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; 1467 + struct sk_buff **sent_skbs = txo->sent_skb_list; 1470 1468 struct sk_buff *sent_skb; 1471 1469 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */ 1472 1470 bool unmap_skb_hdr = true; ··· 1508 1504 } 1509 1505 1510 1506 static int event_handle(struct be_adapter *adapter, 1511 - struct be_eq_obj *eq_obj) 1507 + struct be_eq_obj *eq_obj, 1508 + bool rearm) 1512 1509 { 1513 1510 struct be_eq_entry *eqe; 1514 1511 u16 num = 0; ··· 1522 1517 /* Deal with any spurious interrupts that come 1523 1518 * without events 1524 1519 */ 1525 - be_eq_notify(adapter, eq_obj->q.id, true, true, num); 1520 + if (!num) 1521 + rearm = true; 1522 + 1523 + be_eq_notify(adapter, eq_obj->q.id, rearm, true, num); 1526 1524 if (num) 1527 1525 napi_schedule(&eq_obj->napi); 1528 1526 ··· 1573 1565 BUG_ON(atomic_read(&rxq->used)); 1574 1566 } 1575 1567 1576 - static void be_tx_compl_clean(struct be_adapter *adapter) 1568 + static void be_tx_compl_clean(struct be_adapter *adapter, 1569 + struct be_tx_obj *txo) 1577 1570 { 1578 - struct be_queue_info *tx_cq = &adapter->tx_obj.cq; 1579 - struct be_queue_info *txq = &adapter->tx_obj.q; 1571 + struct be_queue_info *tx_cq = &txo->cq; 1572 + struct be_queue_info *txq = &txo->q; 1580 1573 struct be_eth_tx_compl *txcp; 1581 1574 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0; 1582 - struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; 1575 + struct sk_buff **sent_skbs = txo->sent_skb_list; 1583 1576 struct sk_buff *sent_skb; 1584 1577 bool dummy_wrb; 1585 1578 ··· 1589 1580 while ((txcp = be_tx_compl_get(tx_cq))) { 1590 1581 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, 1591 1582 wrb_index, txcp); 1592 - num_wrbs += be_tx_compl_process(adapter, end_idx); 1583 + num_wrbs += be_tx_compl_process(adapter, txo, end_idx); 1593 1584 cmpl++; 1594 1585 } 1595 1586 if (cmpl) { ··· 1616 1607 index_adv(&end_idx, 1617 1608 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1, 1618 1609 txq->len); 1619 - num_wrbs = be_tx_compl_process(adapter, end_idx); 1610 + num_wrbs = be_tx_compl_process(adapter, txo, end_idx); 1620 1611 atomic_sub(num_wrbs, &txq->used); 1621 1612 } 1622 1613 } ··· 1675 1666 static void be_tx_queues_destroy(struct be_adapter *adapter) 1676 1667 { 1677 1668 struct be_queue_info *q; 1669 + struct be_tx_obj *txo; 1670 + u8 i; 1678 1671 1679 - q = &adapter->tx_obj.q; 1680 - if (q->created) 1681 - be_cmd_q_destroy(adapter, q, QTYPE_TXQ); 1682 - be_queue_free(adapter, q); 1672 + for_all_tx_queues(adapter, txo, i) { 1673 + q = &txo->q; 1674 + if (q->created) 1675 + be_cmd_q_destroy(adapter, q, QTYPE_TXQ); 1676 + be_queue_free(adapter, q); 1683 1677 1684 - q = &adapter->tx_obj.cq; 1685 - if (q->created) 1686 - be_cmd_q_destroy(adapter, q, QTYPE_CQ); 1687 - be_queue_free(adapter, q); 1678 + q = &txo->cq; 1679 + if (q->created) 1680 + be_cmd_q_destroy(adapter, q, QTYPE_CQ); 1681 + be_queue_free(adapter, q); 1682 + } 1688 1683 1689 1684 /* Clear any residual events */ 1690 1685 be_eq_clean(adapter, &adapter->tx_eq); ··· 1699 1686 be_queue_free(adapter, q); 1700 1687 } 1701 1688 1689 + /* One TX event queue is shared by all TX compl qs */ 1702 1690 static int be_tx_queues_create(struct be_adapter *adapter) 1703 1691 { 1704 1692 struct be_queue_info *eq, *q, *cq; 1693 + struct be_tx_obj *txo; 1694 + u8 i; 1705 1695 1706 1696 adapter->tx_eq.max_eqd = 0; 1707 1697 adapter->tx_eq.min_eqd = 0; 1708 1698 adapter->tx_eq.cur_eqd = 96; 1709 1699 adapter->tx_eq.enable_aic = false; 1710 - /* Alloc Tx Event queue */ 1700 + 1711 1701 eq = &adapter->tx_eq.q; 1712 - if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry))) 1702 + if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, 1703 + sizeof(struct be_eq_entry))) 1713 1704 return -1; 1714 1705 1715 - /* Ask BE to create Tx Event queue */ 1716 1706 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) 1717 - goto tx_eq_free; 1718 - 1707 + goto err; 1719 1708 adapter->tx_eq.eq_idx = adapter->eq_next_idx++; 1720 1709 1721 - 1722 - /* Alloc TX eth compl queue */ 1723 - cq = &adapter->tx_obj.cq; 1724 - if (be_queue_alloc(adapter, cq, TX_CQ_LEN, 1710 + for_all_tx_queues(adapter, txo, i) { 1711 + cq = &txo->cq; 1712 + if (be_queue_alloc(adapter, cq, TX_CQ_LEN, 1725 1713 sizeof(struct be_eth_tx_compl))) 1726 - goto tx_eq_destroy; 1714 + goto err; 1727 1715 1728 - /* Ask BE to create Tx eth compl queue */ 1729 - if (be_cmd_cq_create(adapter, cq, eq, false, false, 3)) 1730 - goto tx_cq_free; 1716 + if (be_cmd_cq_create(adapter, cq, eq, false, false, 3)) 1717 + goto err; 1731 1718 1732 - /* Alloc TX eth queue */ 1733 - q = &adapter->tx_obj.q; 1734 - if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb))) 1735 - goto tx_cq_destroy; 1719 + q = &txo->q; 1720 + if (be_queue_alloc(adapter, q, TX_Q_LEN, 1721 + sizeof(struct be_eth_wrb))) 1722 + goto err; 1736 1723 1737 - /* Ask BE to create Tx eth queue */ 1738 - if (be_cmd_txq_create(adapter, q, cq)) 1739 - goto tx_q_free; 1724 + if (be_cmd_txq_create(adapter, q, cq)) 1725 + goto err; 1726 + } 1740 1727 return 0; 1741 1728 1742 - tx_q_free: 1743 - be_queue_free(adapter, q); 1744 - tx_cq_destroy: 1745 - be_cmd_q_destroy(adapter, cq, QTYPE_CQ); 1746 - tx_cq_free: 1747 - be_queue_free(adapter, cq); 1748 - tx_eq_destroy: 1749 - be_cmd_q_destroy(adapter, eq, QTYPE_EQ); 1750 - tx_eq_free: 1751 - be_queue_free(adapter, eq); 1729 + err: 1730 + be_tx_queues_destroy(adapter); 1752 1731 return -1; 1753 1732 } 1754 1733 ··· 1881 1876 1882 1877 if (lancer_chip(adapter)) { 1883 1878 if (event_peek(&adapter->tx_eq)) 1884 - tx = event_handle(adapter, &adapter->tx_eq); 1879 + tx = event_handle(adapter, &adapter->tx_eq, false); 1885 1880 for_all_rx_queues(adapter, rxo, i) { 1886 1881 if (event_peek(&rxo->rx_eq)) 1887 - rx |= event_handle(adapter, &rxo->rx_eq); 1882 + rx |= event_handle(adapter, &rxo->rx_eq, true); 1888 1883 } 1889 1884 1890 1885 if (!(tx || rx)) ··· 1897 1892 return IRQ_NONE; 1898 1893 1899 1894 if ((1 << adapter->tx_eq.eq_idx & isr)) 1900 - event_handle(adapter, &adapter->tx_eq); 1895 + event_handle(adapter, &adapter->tx_eq, false); 1901 1896 1902 1897 for_all_rx_queues(adapter, rxo, i) { 1903 1898 if ((1 << rxo->rx_eq.eq_idx & isr)) 1904 - event_handle(adapter, &rxo->rx_eq); 1899 + event_handle(adapter, &rxo->rx_eq, true); 1905 1900 } 1906 1901 } 1907 1902 ··· 1913 1908 struct be_rx_obj *rxo = dev; 1914 1909 struct be_adapter *adapter = rxo->adapter; 1915 1910 1916 - event_handle(adapter, &rxo->rx_eq); 1911 + event_handle(adapter, &rxo->rx_eq, true); 1917 1912 1918 1913 return IRQ_HANDLED; 1919 1914 } ··· 1922 1917 { 1923 1918 struct be_adapter *adapter = dev; 1924 1919 1925 - event_handle(adapter, &adapter->tx_eq); 1920 + event_handle(adapter, &adapter->tx_eq, false); 1926 1921 1927 1922 return IRQ_HANDLED; 1928 1923 } ··· 1983 1978 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); 1984 1979 struct be_adapter *adapter = 1985 1980 container_of(tx_eq, struct be_adapter, tx_eq); 1986 - struct be_queue_info *txq = &adapter->tx_obj.q; 1987 - struct be_queue_info *tx_cq = &adapter->tx_obj.cq; 1981 + struct be_tx_obj *txo; 1988 1982 struct be_eth_tx_compl *txcp; 1989 - int tx_compl = 0, mcc_compl, status = 0; 1990 - u16 end_idx, num_wrbs = 0; 1983 + int tx_compl, mcc_compl, status = 0; 1984 + u8 i; 1985 + u16 num_wrbs; 1991 1986 1992 - while ((txcp = be_tx_compl_get(tx_cq))) { 1993 - end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, 1994 - wrb_index, txcp); 1995 - num_wrbs += be_tx_compl_process(adapter, end_idx); 1996 - tx_compl++; 1987 + for_all_tx_queues(adapter, txo, i) { 1988 + tx_compl = 0; 1989 + num_wrbs = 0; 1990 + while ((txcp = be_tx_compl_get(&txo->cq))) { 1991 + num_wrbs += be_tx_compl_process(adapter, txo, 1992 + AMAP_GET_BITS(struct amap_eth_tx_compl, 1993 + wrb_index, txcp)); 1994 + tx_compl++; 1995 + } 1996 + if (tx_compl) { 1997 + be_cq_notify(adapter, txo->cq.id, true, tx_compl); 1998 + 1999 + atomic_sub(num_wrbs, &txo->q.used); 2000 + 2001 + /* As Tx wrbs have been freed up, wake up netdev queue 2002 + * if it was stopped due to lack of tx wrbs. */ 2003 + if (__netif_subqueue_stopped(adapter->netdev, i) && 2004 + atomic_read(&txo->q.used) < txo->q.len / 2) { 2005 + netif_wake_subqueue(adapter->netdev, i); 2006 + } 2007 + 2008 + adapter->drv_stats.be_tx_events++; 2009 + txo->stats.be_tx_compl += tx_compl; 2010 + } 1997 2011 } 1998 2012 1999 2013 mcc_compl = be_process_mcc(adapter, &status); 2000 - 2001 - napi_complete(napi); 2002 2014 2003 2015 if (mcc_compl) { 2004 2016 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 2005 2017 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl); 2006 2018 } 2007 2019 2008 - if (tx_compl) { 2009 - be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl); 2020 + napi_complete(napi); 2010 2021 2011 - atomic_sub(num_wrbs, &txq->used); 2012 - 2013 - /* As Tx wrbs have been freed up, wake up netdev queue if 2014 - * it was stopped due to lack of tx wrbs. 2015 - */ 2016 - if (netif_queue_stopped(adapter->netdev) && 2017 - atomic_read(&txq->used) < txq->len / 2) { 2018 - netif_wake_queue(adapter->netdev); 2019 - } 2020 - 2021 - tx_stats(adapter)->be_tx_events++; 2022 - tx_stats(adapter)->be_tx_compl += tx_compl; 2023 - } 2024 - 2022 + be_eq_notify(adapter, tx_eq->q.id, true, false, 0); 2025 2023 return 1; 2026 2024 } 2027 2025 ··· 2073 2065 struct be_adapter *adapter = 2074 2066 container_of(work, struct be_adapter, work.work); 2075 2067 struct be_rx_obj *rxo; 2068 + struct be_tx_obj *txo; 2076 2069 int i; 2077 2070 2078 2071 if (!adapter->ue_detected && !lancer_chip(adapter)) ··· 2101 2092 else 2102 2093 be_cmd_get_stats(adapter, &adapter->stats_cmd); 2103 2094 } 2104 - be_tx_rate_update(adapter); 2095 + 2096 + for_all_tx_queues(adapter, txo, i) 2097 + be_tx_rate_update(txo); 2105 2098 2106 2099 for_all_rx_queues(adapter, rxo, i) { 2107 2100 be_rx_rate_update(rxo); ··· 2305 2294 { 2306 2295 struct be_adapter *adapter = netdev_priv(netdev); 2307 2296 struct be_rx_obj *rxo; 2297 + struct be_tx_obj *txo; 2308 2298 struct be_eq_obj *tx_eq = &adapter->tx_eq; 2309 2299 int vec, i; 2310 2300 ··· 2323 2311 napi_disable(&tx_eq->napi); 2324 2312 2325 2313 if (lancer_chip(adapter)) { 2326 - be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0); 2327 2314 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); 2328 2315 for_all_rx_queues(adapter, rxo, i) 2329 2316 be_cq_notify(adapter, rxo->cq.id, false, 0); 2317 + for_all_tx_queues(adapter, txo, i) 2318 + be_cq_notify(adapter, txo->cq.id, false, 0); 2330 2319 } 2331 2320 2332 2321 if (msix_enabled(adapter)) { ··· 2346 2333 /* Wait for all pending tx completions to arrive so that 2347 2334 * all tx skbs are freed. 2348 2335 */ 2349 - be_tx_compl_clean(adapter); 2336 + for_all_tx_queues(adapter, txo, i) 2337 + be_tx_compl_clean(adapter, txo); 2350 2338 2351 2339 return 0; 2352 2340 } ··· 3197 3183 return status; 3198 3184 3199 3185 be_cmd_check_native_mode(adapter); 3186 + 3187 + if ((num_vfs && adapter->sriov_enabled) || 3188 + (adapter->function_mode & 0x400) || 3189 + lancer_chip(adapter) || !be_physfn(adapter)) { 3190 + adapter->num_tx_qs = 1; 3191 + netif_set_real_num_tx_queues(adapter->netdev, 3192 + adapter->num_tx_qs); 3193 + } else { 3194 + adapter->num_tx_qs = MAX_TX_QS; 3195 + } 3196 + 3200 3197 return 0; 3201 3198 } 3202 3199 ··· 3310 3285 goto disable_dev; 3311 3286 pci_set_master(pdev); 3312 3287 3313 - netdev = alloc_etherdev(sizeof(struct be_adapter)); 3288 + netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS); 3314 3289 if (netdev == NULL) { 3315 3290 status = -ENOMEM; 3316 3291 goto rel_reg;