Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/mlx4_en: Force user priority by QP attribute

Instead of relying on HW to change schedule queue by UP, schedule
queue is fixed for a tx_ring, and UP in WQE is ignored in this aspect. This
resolves two issues with untagged traffic:
1. untagged traffic has no UP in packet which is needed for QoS. The change
above allows setting the schedule queue (and by that the UP) of such a stream.
2. BlueFlame uses the same field used by vlan tag. So forcing UP from QPC
allows using BF for untagged but prioritized traffic.

In old firmware that force UP is not supported, untagged traffic will not subject to
QoS.

Because UP is set by QP, need to always have a tx ring per UP, even if pfcrx
module paramter is false.

Signed-off-by: Amir Vadai <amirv@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Amir Vadai and committed by
David S. Miller
0e98b523 73a0d907

+19 -17
+1 -1
drivers/net/ethernet/mellanox/mlx4/en_main.c
··· 114 114 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; 115 115 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; 116 116 params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS + 117 - (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS; 117 + MLX4_EN_NUM_PPP_RINGS; 118 118 params->prof[i].rss_rings = 0; 119 119 } 120 120
+2 -1
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 650 650 651 651 /* Configure ring */ 652 652 tx_ring = &priv->tx_ring[i]; 653 - err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn); 653 + err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 654 + max(0, i - MLX4_EN_NUM_TX_RINGS)); 654 655 if (err) { 655 656 en_err(priv, "Failed allocating Tx ring\n"); 656 657 mlx4_en_deactivate_cq(priv, cq);
+5 -1
drivers/net/ethernet/mellanox/mlx4/en_resources.c
··· 39 39 40 40 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, 41 41 int is_tx, int rss, int qpn, int cqn, 42 - struct mlx4_qp_context *context) 42 + int user_prio, struct mlx4_qp_context *context) 43 43 { 44 44 struct mlx4_en_dev *mdev = priv->mdev; 45 45 ··· 57 57 context->local_qpn = cpu_to_be32(qpn); 58 58 context->pri_path.ackto = 1 & 0x07; 59 59 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; 60 + if (user_prio >= 0) { 61 + context->pri_path.sched_queue |= user_prio << 3; 62 + context->pri_path.feup = 1 << 6; 63 + } 60 64 context->pri_path.counter_index = 0xff; 61 65 context->cqn_send = cpu_to_be32(cqn); 62 66 context->cqn_recv = cpu_to_be32(cqn);
+2 -2
drivers/net/ethernet/mellanox/mlx4/en_rx.c
··· 823 823 824 824 memset(context, 0, sizeof *context); 825 825 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, 826 - qpn, ring->cqn, context); 826 + qpn, ring->cqn, -1, context); 827 827 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); 828 828 829 829 /* Cancel FCS removal if FW allows */ ··· 890 890 } 891 891 rss_map->indir_qp.event = mlx4_en_sqp_event; 892 892 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 893 - priv->rx_ring[0].cqn, &context); 893 + priv->rx_ring[0].cqn, -1, &context); 894 894 895 895 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) 896 896 rss_rings = priv->rx_ring_num;
+4 -8
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 156 156 157 157 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 158 158 struct mlx4_en_tx_ring *ring, 159 - int cq) 159 + int cq, int user_prio) 160 160 { 161 161 struct mlx4_en_dev *mdev = priv->mdev; 162 162 int err; ··· 174 174 ring->doorbell_qpn = ring->qp.qpn << 8; 175 175 176 176 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 177 - ring->cqn, &ring->context); 177 + ring->cqn, user_prio, &ring->context); 178 178 if (ring->bf_enabled) 179 179 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 180 180 ··· 570 570 571 571 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) 572 572 { 573 - struct mlx4_en_priv *priv = netdev_priv(dev); 574 573 u16 vlan_tag = 0; 575 574 576 - /* If we support per priority flow control and the packet contains 577 - * a vlan tag, send the packet to the TX ring assigned to that priority 578 - */ 579 - if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) { 575 + if (vlan_tx_tag_present(skb)) { 580 576 vlan_tag = vlan_tx_tag_get(skb); 581 577 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); 582 578 } 583 579 584 - return skb_tx_hash(dev, skb); 580 + return __skb_tx_hash(dev, skb, MLX4_EN_NUM_TX_RINGS); 585 581 } 586 582 587 583 static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
+3 -3
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 521 521 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); 522 522 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 523 523 struct mlx4_en_tx_ring *ring, 524 - int cq); 524 + int cq, int user_prio); 525 525 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, 526 526 struct mlx4_en_tx_ring *ring); 527 527 ··· 539 539 int budget); 540 540 int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); 541 541 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, 542 - int is_tx, int rss, int qpn, int cqn, 543 - struct mlx4_qp_context *context); 542 + int is_tx, int rss, int qpn, int cqn, int user_prio, 543 + struct mlx4_qp_context *context); 544 544 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); 545 545 int mlx4_en_map_buffer(struct mlx4_buf *buf); 546 546 void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
+2 -1
include/linux/mlx4/qp.h
··· 139 139 u8 rgid[16]; 140 140 u8 sched_queue; 141 141 u8 vlan_index; 142 - u8 reserved3[2]; 142 + u8 feup; 143 + u8 reserved3; 143 144 u8 reserved4[2]; 144 145 u8 dmac[6]; 145 146 };