Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-fixes-2019-09-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2019-09-24

This series introduces some fixes to mlx5 driver.
For more information please see tag log below.

Please pull and let me know if there is any problem.

For -stable v4.20:
('net/mlx5e: Fix traffic duplication in ethtool steering')

For -stable v4.19:
('net/mlx5: Add device ID of upcoming BlueField-2')

For -stable v5.3:
('net/mlx5e: Fix matching on tunnel addresses type')
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+117 -77
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
··· 399 399 struct mlx5_flow_table *ft, 400 400 struct ethtool_rx_flow_spec *fs) 401 401 { 402 + struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND }; 402 403 struct mlx5_flow_destination *dst = NULL; 403 - struct mlx5_flow_act flow_act = {0}; 404 - struct mlx5_flow_spec *spec; 405 404 struct mlx5_flow_handle *rule; 405 + struct mlx5_flow_spec *spec; 406 406 int err = 0; 407 407 408 408 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+51 -34
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1664 1664 return err; 1665 1665 } 1666 1666 1667 - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { 1668 - struct flow_match_ipv4_addrs match; 1667 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 1668 + struct flow_match_control match; 1669 + u16 addr_type; 1669 1670 1670 - flow_rule_match_enc_ipv4_addrs(rule, &match); 1671 - MLX5_SET(fte_match_set_lyr_2_4, headers_c, 1672 - src_ipv4_src_ipv6.ipv4_layout.ipv4, 1673 - ntohl(match.mask->src)); 1674 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, 1675 - src_ipv4_src_ipv6.ipv4_layout.ipv4, 1676 - ntohl(match.key->src)); 1671 + flow_rule_match_enc_control(rule, &match); 1672 + addr_type = match.key->addr_type; 1677 1673 1678 - MLX5_SET(fte_match_set_lyr_2_4, headers_c, 1679 - dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 1680 - ntohl(match.mask->dst)); 1681 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, 1682 - dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 1683 - ntohl(match.key->dst)); 1674 + /* For tunnel addr_type used same key id`s as for non-tunnel */ 1675 + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 1676 + struct flow_match_ipv4_addrs match; 1684 1677 1685 - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); 1686 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); 1687 - } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { 1688 - struct flow_match_ipv6_addrs match; 1678 + flow_rule_match_enc_ipv4_addrs(rule, &match); 1679 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, 1680 + src_ipv4_src_ipv6.ipv4_layout.ipv4, 1681 + ntohl(match.mask->src)); 1682 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, 1683 + src_ipv4_src_ipv6.ipv4_layout.ipv4, 1684 + ntohl(match.key->src)); 1689 1685 1690 - flow_rule_match_enc_ipv6_addrs(rule, &match); 1691 - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 1692 - src_ipv4_src_ipv6.ipv6_layout.ipv6), 1693 - &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); 1694 - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 1695 - src_ipv4_src_ipv6.ipv6_layout.ipv6), 1696 - &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); 1686 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, 1687 + dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 1688 + ntohl(match.mask->dst)); 1689 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, 1690 + dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 1691 + ntohl(match.key->dst)); 1697 1692 1698 - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 1699 - dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1700 - &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); 1701 - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 1702 - dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1703 - &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); 1693 + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, 1694 + ethertype); 1695 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 1696 + ETH_P_IP); 1697 + } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 1698 + struct flow_match_ipv6_addrs match; 1704 1699 1705 - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); 1706 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6); 1700 + flow_rule_match_enc_ipv6_addrs(rule, &match); 1701 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 1702 + src_ipv4_src_ipv6.ipv6_layout.ipv6), 1703 + &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, 1704 + ipv6)); 1705 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 1706 + src_ipv4_src_ipv6.ipv6_layout.ipv6), 1707 + &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, 1708 + ipv6)); 1709 + 1710 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 1711 + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1712 + &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, 1713 + ipv6)); 1714 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 1715 + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1716 + &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, 1717 + ipv6)); 1718 + 1719 + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, 1720 + ethertype); 1721 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 1722 + ETH_P_IPV6); 1723 + } 1707 1724 } 1708 1725 1709 1726 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+1
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1568 1568 { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ 1569 1569 { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ 1570 1570 { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ 1571 + { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ 1571 1572 { 0, } 1572 1573 }; 1573 1574
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
··· 615 615 * that recalculates the CS and forwards to the vport. 616 616 */ 617 617 ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport.dmn, 618 - dest_action->vport.num, 618 + dest_action->vport.caps->num, 619 619 final_icm_addr); 620 620 if (ret) { 621 621 mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n"); ··· 744 744 dest_action = action; 745 745 if (rx_rule) { 746 746 /* Loopback on WIRE vport is not supported */ 747 - if (action->vport.num == WIRE_PORT) 747 + if (action->vport.caps->num == WIRE_PORT) 748 748 goto out_invalid_arg; 749 749 750 750 attr.final_icm_addr = action->vport.caps->icm_address_rx;
+5 -8
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
··· 230 230 (dmn->type == MLX5DR_DOMAIN_TYPE_FDB || 231 231 dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) { 232 232 ret = mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask, 233 - &dmn->info.caps, 234 - inner, rx); 233 + dmn, inner, rx); 235 234 if (ret) 236 235 return ret; 237 236 } ··· 457 458 458 459 prev_matcher = NULL; 459 460 if (next_matcher && !first) 460 - prev_matcher = list_entry(next_matcher->matcher_list.prev, 461 - struct mlx5dr_matcher, 462 - matcher_list); 461 + prev_matcher = list_prev_entry(next_matcher, matcher_list); 463 462 else if (!first) 464 - prev_matcher = list_entry(tbl->matcher_list.prev, 465 - struct mlx5dr_matcher, 466 - matcher_list); 463 + prev_matcher = list_last_entry(&tbl->matcher_list, 464 + struct mlx5dr_matcher, 465 + matcher_list); 467 466 468 467 if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB || 469 468 dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
··· 18 18 struct mlx5dr_ste *last_ste; 19 19 20 20 /* The new entry will be inserted after the last */ 21 - last_ste = list_entry(miss_list->prev, struct mlx5dr_ste, miss_list_node); 21 + last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node); 22 22 WARN_ON(!last_ste); 23 23 24 24 ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
+37 -13
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
··· 429 429 struct mlx5dr_ste *prev_ste; 430 430 u64 miss_addr; 431 431 432 - prev_ste = list_entry(mlx5dr_ste_get_miss_list(ste)->prev, struct mlx5dr_ste, 433 - miss_list_node); 434 - if (!prev_ste) { 435 - WARN_ON(true); 432 + prev_ste = list_prev_entry(ste, miss_list_node); 433 + if (WARN_ON(!prev_ste)) 436 434 return; 437 - } 438 435 439 436 miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste); 440 437 mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr); ··· 458 461 struct mlx5dr_ste_htbl *stats_tbl; 459 462 LIST_HEAD(send_ste_list); 460 463 461 - first_ste = list_entry(mlx5dr_ste_get_miss_list(ste)->next, 462 - struct mlx5dr_ste, miss_list_node); 464 + first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste), 465 + struct mlx5dr_ste, miss_list_node); 463 466 stats_tbl = first_ste->htbl; 464 467 465 468 /* Two options: ··· 476 479 if (last_ste == first_ste) 477 480 next_ste = NULL; 478 481 else 479 - next_ste = list_entry(ste->miss_list_node.next, 480 - struct mlx5dr_ste, miss_list_node); 482 + next_ste = list_next_entry(ste, miss_list_node); 481 483 482 484 if (!next_ste) { 483 485 /* One and only entry in the list */ ··· 837 841 spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn); 838 842 839 843 spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port); 844 + spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask, 845 + source_eswitch_owner_vhca_id); 840 846 841 847 spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio); 842 848 spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi); ··· 2252 2254 { 2253 2255 struct mlx5dr_match_misc *misc_mask = &value->misc; 2254 2256 2255 - if (misc_mask->source_port != 0xffff) 2257 + /* Partial misc source_port is not supported */ 2258 + if (misc_mask->source_port && misc_mask->source_port != 0xffff) 2259 + return -EINVAL; 2260 + 2261 + /* Partial misc source_eswitch_owner_vhca_id is not supported */ 2262 + if (misc_mask->source_eswitch_owner_vhca_id && 2263 + misc_mask->source_eswitch_owner_vhca_id != 0xffff) 2256 2264 return -EINVAL; 2257 2265 2258 2266 DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port); 2259 2267 DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn); 2268 + misc_mask->source_eswitch_owner_vhca_id = 0; 2260 2269 2261 2270 return 0; 2262 2271 } ··· 2275 2270 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 2276 2271 struct mlx5dr_match_misc *misc = &value->misc; 2277 2272 struct mlx5dr_cmd_vport_cap *vport_cap; 2273 + struct mlx5dr_domain *dmn = sb->dmn; 2274 + struct mlx5dr_cmd_caps *caps; 2278 2275 u8 *tag = hw_ste->tag; 2279 2276 2280 2277 DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn); 2281 2278 2282 - vport_cap = mlx5dr_get_vport_cap(sb->caps, misc->source_port); 2279 + if (sb->vhca_id_valid) { 2280 + /* Find port GVMI based on the eswitch_owner_vhca_id */ 2281 + if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) 2282 + caps = &dmn->info.caps; 2283 + else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == 2284 + dmn->peer_dmn->info.caps.gvmi)) 2285 + caps = &dmn->peer_dmn->info.caps; 2286 + else 2287 + return -EINVAL; 2288 + } else { 2289 + caps = &dmn->info.caps; 2290 + } 2291 + 2292 + vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port); 2283 2293 if (!vport_cap) 2284 2294 return -EINVAL; 2285 2295 2286 2296 if (vport_cap->vport_gvmi) 2287 2297 MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi); 2288 2298 2299 + misc->source_eswitch_owner_vhca_id = 0; 2289 2300 misc->source_port = 0; 2290 2301 2291 2302 return 0; ··· 2309 2288 2310 2289 int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb, 2311 2290 struct mlx5dr_match_param *mask, 2312 - struct mlx5dr_cmd_caps *caps, 2291 + struct mlx5dr_domain *dmn, 2313 2292 bool inner, bool rx) 2314 2293 { 2315 2294 int ret; 2295 + 2296 + /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */ 2297 + sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id; 2316 2298 2317 2299 ret = dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask); 2318 2300 if (ret) 2319 2301 return ret; 2320 2302 2321 2303 sb->rx = rx; 2322 - sb->caps = caps; 2304 + sb->dmn = dmn; 2323 2305 sb->inner = inner; 2324 2306 sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP; 2325 2307 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+4 -3
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
··· 180 180 struct mlx5dr_ste_build { 181 181 u8 inner:1; 182 182 u8 rx:1; 183 + u8 vhca_id_valid:1; 184 + struct mlx5dr_domain *dmn; 183 185 struct mlx5dr_cmd_caps *caps; 184 186 u8 lu_type; 185 187 u16 byte_mask; ··· 333 331 bool inner, bool rx); 334 332 int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb, 335 333 struct mlx5dr_match_param *mask, 336 - struct mlx5dr_cmd_caps *caps, 334 + struct mlx5dr_domain *dmn, 337 335 bool inner, bool rx); 338 336 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx); 339 337 ··· 455 453 u32 gre_c_present:1; 456 454 /* Source port.;0xffff determines wire port */ 457 455 u32 source_port:16; 458 - u32 reserved_auto2:16; 456 + u32 source_eswitch_owner_vhca_id:16; 459 457 /* VLAN ID of first VLAN tag the inner header of the incoming packet. 460 458 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1 461 459 */ ··· 747 745 struct { 748 746 struct mlx5dr_domain *dmn; 749 747 struct mlx5dr_cmd_vport_cap *caps; 750 - u32 num; 751 748 } vport; 752 749 struct { 753 750 u32 vlan_hdr; /* tpid_pcp_dei_vid */
+14 -14
include/linux/mlx5/mlx5_ifc.h
··· 282 282 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940, 283 283 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941, 284 284 MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942, 285 - MLX5_CMD_OP_SYNC_STEERING = 0xb00, 286 285 MLX5_CMD_OP_FPGA_CREATE_QP = 0x960, 287 286 MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961, 288 287 MLX5_CMD_OP_FPGA_QUERY_QP = 0x962, ··· 295 296 MLX5_CMD_OP_DESTROY_UCTX = 0xa06, 296 297 MLX5_CMD_OP_CREATE_UMEM = 0xa08, 297 298 MLX5_CMD_OP_DESTROY_UMEM = 0xa0a, 299 + MLX5_CMD_OP_SYNC_STEERING = 0xb00, 298 300 MLX5_CMD_OP_MAX 299 301 }; 300 302 ··· 487 487 488 488 struct mlx5_ifc_fte_match_set_misc_bits { 489 489 u8 gre_c_present[0x1]; 490 - u8 reserved_auto1[0x1]; 490 + u8 reserved_at_1[0x1]; 491 491 u8 gre_k_present[0x1]; 492 492 u8 gre_s_present[0x1]; 493 493 u8 source_vhca_port[0x4]; ··· 5054 5054 5055 5055 struct mlx5_ifc_other_hca_cap_bits { 5056 5056 u8 roce[0x1]; 5057 - u8 reserved_0[0x27f]; 5057 + u8 reserved_at_1[0x27f]; 5058 5058 }; 5059 5059 5060 5060 struct mlx5_ifc_query_other_hca_cap_out_bits { 5061 5061 u8 status[0x8]; 5062 - u8 reserved_0[0x18]; 5062 + u8 reserved_at_8[0x18]; 5063 5063 5064 5064 u8 syndrome[0x20]; 5065 5065 5066 - u8 reserved_1[0x40]; 5066 + u8 reserved_at_40[0x40]; 5067 5067 5068 5068 struct mlx5_ifc_other_hca_cap_bits other_capability; 5069 5069 }; 5070 5070 5071 5071 struct mlx5_ifc_query_other_hca_cap_in_bits { 5072 5072 u8 opcode[0x10]; 5073 - u8 reserved_0[0x10]; 5073 + u8 reserved_at_10[0x10]; 5074 5074 5075 - u8 reserved_1[0x10]; 5075 + u8 reserved_at_20[0x10]; 5076 5076 u8 op_mod[0x10]; 5077 5077 5078 - u8 reserved_2[0x10]; 5078 + u8 reserved_at_40[0x10]; 5079 5079 u8 function_id[0x10]; 5080 5080 5081 - u8 reserved_3[0x20]; 5081 + u8 reserved_at_60[0x20]; 5082 5082 }; 5083 5083 5084 5084 struct mlx5_ifc_modify_other_hca_cap_out_bits { 5085 5085 u8 status[0x8]; 5086 - u8 reserved_0[0x18]; 5086 + u8 reserved_at_8[0x18]; 5087 5087 5088 5088 u8 syndrome[0x20]; 5089 5089 5090 - u8 reserved_1[0x40]; 5090 + u8 reserved_at_40[0x40]; 5091 5091 }; 5092 5092 5093 5093 struct mlx5_ifc_modify_other_hca_cap_in_bits { 5094 5094 u8 opcode[0x10]; 5095 - u8 reserved_0[0x10]; 5095 + u8 reserved_at_10[0x10]; 5096 5096 5097 - u8 reserved_1[0x10]; 5097 + u8 reserved_at_20[0x10]; 5098 5098 u8 op_mod[0x10]; 5099 5099 5100 - u8 reserved_2[0x10]; 5100 + u8 reserved_at_40[0x10]; 5101 5101 u8 function_id[0x10]; 5102 5102 u8 field_select[0x20]; 5103 5103