Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-fixes-2019-08-08' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2019-08-08

This series introduces some fixes to mlx5 driver.

Highlights:
1) From Tariq, Critical mlx5 kTLS fixes to better align with hw specs.
2) From Aya, Fixes to mlx5 tx devlink health reporter.
3) From Maxim, aRFs parsing to use flow dissector to avoid relying on
invalid skb fields.

Please pull and let me know if there is any problem.

For -stable v4.3
('net/mlx5e: Only support tx/rx pause setting for port owner')
For -stable v4.9
('net/mlx5e: Use flow keys dissector to parse packets for ARFS')
For -stable v5.1
('net/mlx5e: Fix false negative indication on tx reporter CQE recovery')
('net/mlx5e: Remove redundant check in CQE recovery flow of tx reporter')
('net/mlx5e: ethtool, Avoid setting speed to 56GBASE when autoneg off')

Note: when merged with net-next this minor conflict will pop up:
++<<<<<<< (net-next)
+ if (is_eswitch_flow) {
+ flow->esw_attr->match_level = match_level;
+ flow->esw_attr->tunnel_match_level = tunnel_match_level;
++=======
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+ flow->esw_attr->inner_match_level = inner_match_level;
+ flow->esw_attr->outer_match_level = outer_match_level;
++>>>>>>> (net)

To resolve, use hunks from net (2nd) and replace:
if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
with
if (is_eswitch_flow)
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+101 -109
+7 -2
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 184 184 185 185 struct mlx5e_tx_wqe { 186 186 struct mlx5_wqe_ctrl_seg ctrl; 187 - struct mlx5_wqe_eth_seg eth; 188 - struct mlx5_wqe_data_seg data[0]; 187 + union { 188 + struct { 189 + struct mlx5_wqe_eth_seg eth; 190 + struct mlx5_wqe_data_seg data[0]; 191 + }; 192 + u8 tls_progress_params_ctx[0]; 193 + }; 189 194 }; 190 195 191 196 struct mlx5e_rx_wqe_ll {
+9 -10
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
··· 76 76 u8 state; 77 77 int err; 78 78 79 - if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) 80 - return 0; 81 - 82 79 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); 83 80 if (err) { 84 81 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", 85 82 sq->sqn, err); 86 - return err; 83 + goto out; 87 84 } 88 85 89 - if (state != MLX5_SQC_STATE_ERR) { 90 - netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); 91 - return -EINVAL; 92 - } 86 + if (state != MLX5_SQC_STATE_ERR) 87 + goto out; 93 88 94 89 mlx5e_tx_disable_queue(sq->txq); 95 90 96 91 err = mlx5e_wait_for_sq_flush(sq); 97 92 if (err) 98 - return err; 93 + goto out; 99 94 100 95 /* At this point, no new packets will arrive from the stack as TXQ is 101 96 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all ··· 99 104 100 105 err = mlx5e_sq_to_ready(sq, state); 101 106 if (err) 102 - return err; 107 + goto out; 103 108 104 109 mlx5e_reset_txqsq_cc_pc(sq); 105 110 sq->stats->recover++; 111 + clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); 106 112 mlx5e_activate_txqsq(sq); 107 113 108 114 return 0; 115 + out: 116 + clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); 117 + return err; 109 118 } 110 119 111 120 static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
+4 -2
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
··· 11 11 #include "accel/tls.h" 12 12 13 13 #define MLX5E_KTLS_STATIC_UMR_WQE_SZ \ 14 - (sizeof(struct mlx5e_umr_wqe) + MLX5_ST_SZ_BYTES(tls_static_params)) 14 + (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \ 15 + MLX5_ST_SZ_BYTES(tls_static_params)) 15 16 #define MLX5E_KTLS_STATIC_WQEBBS \ 16 17 (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB)) 17 18 18 19 #define MLX5E_KTLS_PROGRESS_WQE_SZ \ 19 - (sizeof(struct mlx5e_tx_wqe) + MLX5_ST_SZ_BYTES(tls_progress_params)) 20 + (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \ 21 + MLX5_ST_SZ_BYTES(tls_progress_params)) 20 22 #define MLX5E_KTLS_PROGRESS_WQEBBS \ 21 23 (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) 22 24 #define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
+5 -5
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
··· 69 69 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | 70 70 STATIC_PARAMS_DS_CNT); 71 71 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 72 - cseg->imm = cpu_to_be32(priv_tx->tisn); 72 + cseg->tisn = cpu_to_be32(priv_tx->tisn << 8); 73 73 74 74 ucseg->flags = MLX5_UMR_INLINE; 75 75 ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); ··· 80 80 static void 81 81 fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) 82 82 { 83 - MLX5_SET(tls_progress_params, ctx, pd, priv_tx->tisn); 83 + MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn); 84 84 MLX5_SET(tls_progress_params, ctx, record_tracker_state, 85 85 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); 86 86 MLX5_SET(tls_progress_params, ctx, auth_state, ··· 104 104 PROGRESS_PARAMS_DS_CNT); 105 105 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 106 106 107 - fill_progress_params_ctx(wqe->data, priv_tx); 107 + fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx); 108 108 } 109 109 110 110 static void tx_fill_wi(struct mlx5e_txqsq *sq, ··· 278 278 279 279 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); 280 280 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 281 - cseg->imm = cpu_to_be32(tisn); 281 + cseg->tisn = cpu_to_be32(tisn << 8); 282 282 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 283 283 284 284 eseg->inline_hdr.sz = cpu_to_be16(ihs); ··· 434 434 priv_tx->expected_seq = seq + datalen; 435 435 436 436 cseg = &(*wqe)->ctrl; 437 - cseg->imm = cpu_to_be32(priv_tx->tisn); 437 + cseg->tisn = cpu_to_be32(priv_tx->tisn << 8); 438 438 439 439 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 440 440 stats->tls_encrypted_bytes += datalen;
+34 -63
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
··· 437 437 return &arfs_t->rules_hash[bucket_idx]; 438 438 } 439 439 440 - static u8 arfs_get_ip_proto(const struct sk_buff *skb) 441 - { 442 - return (skb->protocol == htons(ETH_P_IP)) ? 443 - ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; 444 - } 445 - 446 440 static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, 447 441 u8 ip_proto, __be16 etype) 448 442 { ··· 596 602 arfs_may_expire_flow(priv); 597 603 } 598 604 599 - /* return L4 destination port from ip4/6 packets */ 600 - static __be16 arfs_get_dst_port(const struct sk_buff *skb) 601 - { 602 - char *transport_header; 603 - 604 - transport_header = skb_transport_header(skb); 605 - if (arfs_get_ip_proto(skb) == IPPROTO_TCP) 606 - return ((struct tcphdr *)transport_header)->dest; 607 - return ((struct udphdr *)transport_header)->dest; 608 - } 609 - 610 - /* return L4 source port from ip4/6 packets */ 611 - static __be16 arfs_get_src_port(const struct sk_buff *skb) 612 - { 613 - char *transport_header; 614 - 615 - transport_header = skb_transport_header(skb); 616 - if (arfs_get_ip_proto(skb) == IPPROTO_TCP) 617 - return ((struct tcphdr *)transport_header)->source; 618 - return ((struct udphdr *)transport_header)->source; 619 - } 620 - 621 605 static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, 622 606 struct arfs_table *arfs_t, 623 - const struct sk_buff *skb, 607 + const struct flow_keys *fk, 624 608 u16 rxq, u32 flow_id) 625 609 { 626 610 struct arfs_rule *rule; ··· 613 641 INIT_WORK(&rule->arfs_work, arfs_handle_work); 614 642 615 643 tuple = &rule->tuple; 616 - tuple->etype = skb->protocol; 644 + tuple->etype = fk->basic.n_proto; 645 + tuple->ip_proto = fk->basic.ip_proto; 617 646 if (tuple->etype == htons(ETH_P_IP)) { 618 - tuple->src_ipv4 = ip_hdr(skb)->saddr; 619 - tuple->dst_ipv4 = ip_hdr(skb)->daddr; 647 + tuple->src_ipv4 = fk->addrs.v4addrs.src; 648 + tuple->dst_ipv4 = fk->addrs.v4addrs.dst; 620 649 } else { 621 - memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, 650 + memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src, 622 651 sizeof(struct in6_addr)); 623 - memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, 652 + memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst, 624 653 sizeof(struct in6_addr)); 625 654 } 626 - tuple->ip_proto = arfs_get_ip_proto(skb); 627 - tuple->src_port = arfs_get_src_port(skb); 628 - tuple->dst_port = arfs_get_dst_port(skb); 655 + tuple->src_port = fk->ports.src; 656 + tuple->dst_port = fk->ports.dst; 629 657 630 658 rule->flow_id = flow_id; 631 659 rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; ··· 636 664 return rule; 637 665 } 638 666 639 - static bool arfs_cmp_ips(struct arfs_tuple *tuple, 640 - const struct sk_buff *skb) 667 + static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk) 641 668 { 642 - if (tuple->etype == htons(ETH_P_IP) && 643 - tuple->src_ipv4 == ip_hdr(skb)->saddr && 644 - tuple->dst_ipv4 == ip_hdr(skb)->daddr) 645 - return true; 646 - if (tuple->etype == htons(ETH_P_IPV6) && 647 - (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, 648 - sizeof(struct in6_addr))) && 649 - (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, 650 - sizeof(struct in6_addr)))) 651 - return true; 669 + if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst) 670 + return false; 671 + if (tuple->etype != fk->basic.n_proto) 672 + return false; 673 + if (tuple->etype == htons(ETH_P_IP)) 674 + return tuple->src_ipv4 == fk->addrs.v4addrs.src && 675 + tuple->dst_ipv4 == fk->addrs.v4addrs.dst; 676 + if (tuple->etype == htons(ETH_P_IPV6)) 677 + return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src, 678 + sizeof(struct in6_addr)) && 679 + !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst, 680 + sizeof(struct in6_addr)); 652 681 return false; 653 682 } 654 683 655 684 static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t, 656 - const struct sk_buff *skb) 685 + const struct flow_keys *fk) 657 686 { 658 687 struct arfs_rule *arfs_rule; 659 688 struct hlist_head *head; 660 - __be16 src_port = arfs_get_src_port(skb); 661 - __be16 dst_port = arfs_get_dst_port(skb); 662 689 663 - head = arfs_hash_bucket(arfs_t, src_port, dst_port); 690 + head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst); 664 691 hlist_for_each_entry(arfs_rule, head, hlist) { 665 - if (arfs_rule->tuple.src_port == src_port && 666 - arfs_rule->tuple.dst_port == dst_port && 667 - arfs_cmp_ips(&arfs_rule->tuple, skb)) { 692 + if (arfs_cmp(&arfs_rule->tuple, fk)) 668 693 return arfs_rule; 669 - } 670 694 } 671 695 672 696 return NULL; ··· 675 707 struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; 676 708 struct arfs_table *arfs_t; 677 709 struct arfs_rule *arfs_rule; 710 + struct flow_keys fk; 678 711 679 - if (skb->protocol != htons(ETH_P_IP) && 680 - skb->protocol != htons(ETH_P_IPV6)) 712 + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) 713 + return -EPROTONOSUPPORT; 714 + 715 + if (fk.basic.n_proto != htons(ETH_P_IP) && 716 + fk.basic.n_proto != htons(ETH_P_IPV6)) 681 717 return -EPROTONOSUPPORT; 682 718 683 719 if (skb->encapsulation) 684 720 return -EPROTONOSUPPORT; 685 721 686 - arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); 722 + arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto); 687 723 if (!arfs_t) 688 724 return -EPROTONOSUPPORT; 689 725 690 726 spin_lock_bh(&arfs->arfs_lock); 691 - arfs_rule = arfs_find_rule(arfs_t, skb); 727 + arfs_rule = arfs_find_rule(arfs_t, &fk); 692 728 if (arfs_rule) { 693 729 if (arfs_rule->rxq == rxq_index) { 694 730 spin_unlock_bh(&arfs->arfs_lock); ··· 700 728 } 701 729 arfs_rule->rxq = rxq_index; 702 730 } else { 703 - arfs_rule = arfs_alloc_rule(priv, arfs_t, skb, 704 - rxq_index, flow_id); 731 + arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id); 705 732 if (!arfs_rule) { 706 733 spin_unlock_bh(&arfs->arfs_lock); 707 734 return -ENOMEM;
+11
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 1081 1081 link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : 1082 1082 mlx5e_port_speed2linkmodes(mdev, speed, !ext); 1083 1083 1084 + if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && 1085 + autoneg != AUTONEG_ENABLE) { 1086 + netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n", 1087 + __func__); 1088 + err = -EINVAL; 1089 + goto out; 1090 + } 1091 + 1084 1092 link_modes = link_modes & eproto.cap; 1085 1093 if (!link_modes) { 1086 1094 netdev_err(priv->netdev, "%s: Not supported link mode(s) requested", ··· 1345 1337 { 1346 1338 struct mlx5_core_dev *mdev = priv->mdev; 1347 1339 int err; 1340 + 1341 + if (!MLX5_CAP_GEN(mdev, vport_group_manager)) 1342 + return -EOPNOTSUPP; 1348 1343 1349 1344 if (pauseparam->autoneg) 1350 1345 return -EINVAL;
-1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 1321 1321 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) 1322 1322 { 1323 1323 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); 1324 - clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); 1325 1324 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); 1326 1325 netdev_tx_reset_queue(sq->txq); 1327 1326 netif_tx_start_queue(sq->txq);
+20 -11
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1480 1480 struct mlx5_flow_spec *spec, 1481 1481 struct flow_cls_offload *f, 1482 1482 struct net_device *filter_dev, 1483 - u8 *match_level, u8 *tunnel_match_level) 1483 + u8 *inner_match_level, u8 *outer_match_level) 1484 1484 { 1485 1485 struct netlink_ext_ack *extack = f->common.extack; 1486 1486 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, ··· 1495 1495 struct flow_dissector *dissector = rule->match.dissector; 1496 1496 u16 addr_type = 0; 1497 1497 u8 ip_proto = 0; 1498 + u8 *match_level; 1498 1499 1499 - *match_level = MLX5_MATCH_NONE; 1500 + match_level = outer_match_level; 1500 1501 1501 1502 if (dissector->used_keys & 1502 1503 ~(BIT(FLOW_DISSECTOR_KEY_META) | ··· 1525 1524 } 1526 1525 1527 1526 if (mlx5e_get_tc_tun(filter_dev)) { 1528 - if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) 1527 + if (parse_tunnel_attr(priv, spec, f, filter_dev, 1528 + outer_match_level)) 1529 1529 return -EOPNOTSUPP; 1530 1530 1531 - /* In decap flow, header pointers should point to the inner 1531 + /* At this point, header pointers should point to the inner 1532 1532 * headers, outer header were already set by parse_tunnel_attr 1533 1533 */ 1534 + match_level = inner_match_level; 1534 1535 headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP, 1535 1536 spec); 1536 1537 headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP, ··· 1834 1831 struct flow_cls_offload *f, 1835 1832 struct net_device *filter_dev) 1836 1833 { 1834 + u8 inner_match_level, outer_match_level, non_tunnel_match_level; 1837 1835 struct netlink_ext_ack *extack = f->common.extack; 1838 1836 struct mlx5_core_dev *dev = priv->mdev; 1839 1837 struct mlx5_eswitch *esw = dev->priv.eswitch; 1840 1838 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1841 - u8 match_level, tunnel_match_level = MLX5_MATCH_NONE; 1842 1839 struct mlx5_eswitch_rep *rep; 1843 1840 int err; 1844 1841 1845 - err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); 1842 + inner_match_level = MLX5_MATCH_NONE; 1843 + outer_match_level = MLX5_MATCH_NONE; 1844 + 1845 + err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level, 1846 + &outer_match_level); 1847 + non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ? 1848 + outer_match_level : inner_match_level; 1846 1849 1847 1850 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { 1848 1851 rep = rpriv->rep; 1849 1852 if (rep->vport != MLX5_VPORT_UPLINK && 1850 1853 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && 1851 - esw->offloads.inline_mode < match_level)) { 1854 + esw->offloads.inline_mode < non_tunnel_match_level)) { 1852 1855 NL_SET_ERR_MSG_MOD(extack, 1853 1856 "Flow is not offloaded due to min inline setting"); 1854 1857 netdev_warn(priv->netdev, 1855 1858 "Flow is not offloaded due to min inline setting, required %d actual %d\n", 1856 - match_level, esw->offloads.inline_mode); 1859 + non_tunnel_match_level, esw->offloads.inline_mode); 1857 1860 return -EOPNOTSUPP; 1858 1861 } 1859 1862 } 1860 1863 1861 1864 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { 1862 - flow->esw_attr->match_level = match_level; 1863 - flow->esw_attr->tunnel_match_level = tunnel_match_level; 1865 + flow->esw_attr->inner_match_level = inner_match_level; 1866 + flow->esw_attr->outer_match_level = outer_match_level; 1864 1867 } else { 1865 - flow->nic_attr->match_level = match_level; 1868 + flow->nic_attr->match_level = non_tunnel_match_level; 1866 1869 } 1867 1870 1868 1871 return err;
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 377 377 struct mlx5_termtbl_handle *termtbl; 378 378 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 379 379 u32 mod_hdr_id; 380 - u8 match_level; 381 - u8 tunnel_match_level; 380 + u8 inner_match_level; 381 + u8 outer_match_level; 382 382 struct mlx5_fc *counter; 383 383 u32 chain; 384 384 u16 prio;
+4 -8
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 207 207 208 208 mlx5_eswitch_set_rule_source_port(esw, spec, attr); 209 209 210 - if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { 211 - if (attr->tunnel_match_level != MLX5_MATCH_NONE) 212 - spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 213 - if (attr->match_level != MLX5_MATCH_NONE) 214 - spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; 215 - } else if (attr->match_level != MLX5_MATCH_NONE) { 210 + if (attr->outer_match_level != MLX5_MATCH_NONE) 216 211 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 217 - } 212 + if (attr->inner_match_level != MLX5_MATCH_NONE) 213 + spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; 218 214 219 215 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 220 216 flow_act.modify_id = attr->mod_hdr_id; ··· 286 290 mlx5_eswitch_set_rule_source_port(esw, spec, attr); 287 291 288 292 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 289 - if (attr->match_level != MLX5_MATCH_NONE) 293 + if (attr->outer_match_level != MLX5_MATCH_NONE) 290 294 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 291 295 292 296 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
+1
drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
··· 27 27 case 128: 28 28 general_obj_key_size = 29 29 MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128; 30 + key_p += sz_bytes; 30 31 break; 31 32 case 256: 32 33 general_obj_key_size =
+2 -2
include/linux/mlx5/device.h
··· 446 446 }; 447 447 448 448 enum { 449 - MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x20, 449 + MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1, 450 450 }; 451 451 452 452 enum { 453 - MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x20, 453 + MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1, 454 454 }; 455 455 456 456 enum {
+2 -3
include/linux/mlx5/mlx5_ifc.h
··· 10054 10054 }; 10055 10055 10056 10056 struct mlx5_ifc_tls_progress_params_bits { 10057 - u8 valid[0x1]; 10058 - u8 reserved_at_1[0x7]; 10059 - u8 pd[0x18]; 10057 + u8 reserved_at_0[0x8]; 10058 + u8 tisn[0x18]; 10060 10059 10061 10060 u8 next_record_tcp_sn[0x20]; 10062 10061