Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'ct-offload' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

+1628 -254
+2 -1
drivers/infiniband/hw/mlx5/main.c
··· 3570 3570 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 3571 3571 misc_parameters_2); 3572 3572 3573 - MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0); 3573 + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 3574 + mlx5_eswitch_get_vport_metadata_mask()); 3574 3575 } else { 3575 3576 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 3576 3577 misc_parameters);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/Makefile
··· 34 34 mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o 35 35 mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o 36 36 mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \ 37 - lib/geneve.o en/tc_tun_vxlan.o en/tc_tun_gre.o \ 37 + lib/geneve.o en/mapping.o en/tc_tun_vxlan.o en/tc_tun_gre.o \ 38 38 en/tc_tun_geneve.o diag/en_tc_tracepoint.o 39 39 mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o 40 40
+218
drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 + /* Copyright (c) 2018 Mellanox Technologies */ 3 + 4 + #include <linux/jhash.h> 5 + #include <linux/slab.h> 6 + #include <linux/xarray.h> 7 + #include <linux/hashtable.h> 8 + 9 + #include "mapping.h" 10 + 11 + #define MAPPING_GRACE_PERIOD 2000 12 + 13 + struct mapping_ctx { 14 + struct xarray xarray; 15 + DECLARE_HASHTABLE(ht, 8); 16 + struct mutex lock; /* Guards hashtable and xarray */ 17 + unsigned long max_id; 18 + size_t data_size; 19 + bool delayed_removal; 20 + struct delayed_work dwork; 21 + struct list_head pending_list; 22 + spinlock_t pending_list_lock; /* Guards pending list */ 23 + }; 24 + 25 + struct mapping_item { 26 + struct rcu_head rcu; 27 + struct list_head list; 28 + unsigned long timeout; 29 + struct hlist_node node; 30 + int cnt; 31 + u32 id; 32 + char data[]; 33 + }; 34 + 35 + int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id) 36 + { 37 + struct mapping_item *mi; 38 + int err = -ENOMEM; 39 + u32 hash_key; 40 + 41 + mutex_lock(&ctx->lock); 42 + 43 + hash_key = jhash(data, ctx->data_size, 0); 44 + hash_for_each_possible(ctx->ht, mi, node, hash_key) { 45 + if (!memcmp(data, mi->data, ctx->data_size)) 46 + goto attach; 47 + } 48 + 49 + mi = kzalloc(sizeof(*mi) + ctx->data_size, GFP_KERNEL); 50 + if (!mi) 51 + goto err_alloc; 52 + 53 + memcpy(mi->data, data, ctx->data_size); 54 + hash_add(ctx->ht, &mi->node, hash_key); 55 + 56 + err = xa_alloc(&ctx->xarray, &mi->id, mi, XA_LIMIT(1, ctx->max_id), 57 + GFP_KERNEL); 58 + if (err) 59 + goto err_assign; 60 + attach: 61 + ++mi->cnt; 62 + *id = mi->id; 63 + 64 + mutex_unlock(&ctx->lock); 65 + 66 + return 0; 67 + 68 + err_assign: 69 + hash_del(&mi->node); 70 + kfree(mi); 71 + err_alloc: 72 + mutex_unlock(&ctx->lock); 73 + 74 + return err; 75 + } 76 + 77 + static void mapping_remove_and_free(struct mapping_ctx *ctx, 78 + struct mapping_item *mi) 79 + { 80 + xa_erase(&ctx->xarray, mi->id); 81 + kfree_rcu(mi, rcu); 82 + } 83 + 84 + static void mapping_free_item(struct mapping_ctx *ctx, 85 + struct mapping_item *mi) 86 + { 87 + if (!ctx->delayed_removal) { 88 + mapping_remove_and_free(ctx, mi); 89 + return; 90 + } 91 + 92 + mi->timeout = jiffies + msecs_to_jiffies(MAPPING_GRACE_PERIOD); 93 + 94 + spin_lock(&ctx->pending_list_lock); 95 + list_add_tail(&mi->list, &ctx->pending_list); 96 + spin_unlock(&ctx->pending_list_lock); 97 + 98 + schedule_delayed_work(&ctx->dwork, MAPPING_GRACE_PERIOD); 99 + } 100 + 101 + int mapping_remove(struct mapping_ctx *ctx, u32 id) 102 + { 103 + unsigned long index = id; 104 + struct mapping_item *mi; 105 + int err = -ENOENT; 106 + 107 + mutex_lock(&ctx->lock); 108 + mi = xa_load(&ctx->xarray, index); 109 + if (!mi) 110 + goto out; 111 + err = 0; 112 + 113 + if (--mi->cnt > 0) 114 + goto out; 115 + 116 + hash_del(&mi->node); 117 + mapping_free_item(ctx, mi); 118 + out: 119 + mutex_unlock(&ctx->lock); 120 + 121 + return err; 122 + } 123 + 124 + int mapping_find(struct mapping_ctx *ctx, u32 id, void *data) 125 + { 126 + unsigned long index = id; 127 + struct mapping_item *mi; 128 + int err = -ENOENT; 129 + 130 + rcu_read_lock(); 131 + mi = xa_load(&ctx->xarray, index); 132 + if (!mi) 133 + goto err_find; 134 + 135 + memcpy(data, mi->data, ctx->data_size); 136 + err = 0; 137 + 138 + err_find: 139 + rcu_read_unlock(); 140 + return err; 141 + } 142 + 143 + static void 144 + mapping_remove_and_free_list(struct mapping_ctx *ctx, struct list_head *list) 145 + { 146 + struct mapping_item *mi; 147 + 148 + list_for_each_entry(mi, list, list) 149 + mapping_remove_and_free(ctx, mi); 150 + } 151 + 152 + static void mapping_work_handler(struct work_struct *work) 153 + { 154 + unsigned long min_timeout = 0, now = jiffies; 155 + struct mapping_item *mi, *next; 156 + LIST_HEAD(pending_items); 157 + struct mapping_ctx *ctx; 158 + 159 + ctx = container_of(work, struct mapping_ctx, dwork.work); 160 + 161 + spin_lock(&ctx->pending_list_lock); 162 + list_for_each_entry_safe(mi, next, &ctx->pending_list, list) { 163 + if (time_after(now, mi->timeout)) 164 + list_move(&mi->list, &pending_items); 165 + else if (!min_timeout || 166 + time_before(mi->timeout, min_timeout)) 167 + min_timeout = mi->timeout; 168 + } 169 + spin_unlock(&ctx->pending_list_lock); 170 + 171 + mapping_remove_and_free_list(ctx, &pending_items); 172 + 173 + if (min_timeout) 174 + schedule_delayed_work(&ctx->dwork, abs(min_timeout - now)); 175 + } 176 + 177 + static void mapping_flush_work(struct mapping_ctx *ctx) 178 + { 179 + if (!ctx->delayed_removal) 180 + return; 181 + 182 + cancel_delayed_work_sync(&ctx->dwork); 183 + mapping_remove_and_free_list(ctx, &ctx->pending_list); 184 + } 185 + 186 + struct mapping_ctx * 187 + mapping_create(size_t data_size, u32 max_id, bool delayed_removal) 188 + { 189 + struct mapping_ctx *ctx; 190 + 191 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 192 + if (!ctx) 193 + return ERR_PTR(-ENOMEM); 194 + 195 + ctx->max_id = max_id ? max_id : UINT_MAX; 196 + ctx->data_size = data_size; 197 + 198 + if (delayed_removal) { 199 + INIT_DELAYED_WORK(&ctx->dwork, mapping_work_handler); 200 + INIT_LIST_HEAD(&ctx->pending_list); 201 + spin_lock_init(&ctx->pending_list_lock); 202 + ctx->delayed_removal = true; 203 + } 204 + 205 + mutex_init(&ctx->lock); 206 + xa_init_flags(&ctx->xarray, XA_FLAGS_ALLOC1); 207 + 208 + return ctx; 209 + } 210 + 211 + void mapping_destroy(struct mapping_ctx *ctx) 212 + { 213 + mapping_flush_work(ctx); 214 + xa_destroy(&ctx->xarray); 215 + mutex_destroy(&ctx->lock); 216 + 217 + kfree(ctx); 218 + }
+27
drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 + /* Copyright (c) 2019 Mellanox Technologies */ 3 + 4 + #ifndef __MLX5_MAPPING_H__ 5 + #define __MLX5_MAPPING_H__ 6 + 7 + struct mapping_ctx; 8 + 9 + int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id); 10 + int mapping_remove(struct mapping_ctx *ctx, u32 id); 11 + int mapping_find(struct mapping_ctx *ctx, u32 id, void *data); 12 + 13 + /* mapping uses an xarray to map data to ids in add(), and for find(). 14 + * For locking, it uses a internal xarray spin lock for add()/remove(), 15 + * find() uses rcu_read_lock(). 16 + * Choosing delayed_removal postpones the removal of a previously mapped 17 + * id by MAPPING_GRACE_PERIOD milliseconds. 18 + * This is to avoid races against hardware, where we mark the packet in 19 + * hardware with a previous id, and quick remove() and add() reusing the same 20 + * previous id. Then find() will get the new mapping instead of the old 21 + * which was used to mark the packet. 22 + */ 23 + struct mapping_ctx *mapping_create(size_t data_size, u32 max_id, 24 + bool delayed_removal); 25 + void mapping_destroy(struct mapping_ctx *ctx); 26 + 27 + #endif /* __MLX5_MAPPING_H__ */
+110 -2
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
··· 469 469 struct mlx5e_priv *priv, 470 470 struct mlx5_flow_spec *spec, 471 471 struct flow_cls_offload *f, 472 - void *headers_c, 473 - void *headers_v, u8 *match_level) 472 + u8 *match_level) 474 473 { 475 474 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev); 475 + struct flow_rule *rule = flow_cls_offload_flow_rule(f); 476 + void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 477 + outer_headers); 478 + void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 479 + outer_headers); 480 + struct netlink_ext_ack *extack = f->common.extack; 476 481 int err = 0; 477 482 478 483 if (!tunnel) { ··· 503 498 if (err) 504 499 goto out; 505 500 } 501 + 502 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 503 + struct flow_match_control match; 504 + u16 addr_type; 505 + 506 + flow_rule_match_enc_control(rule, &match); 507 + addr_type = match.key->addr_type; 508 + 509 + /* For tunnel addr_type used same key id`s as for non-tunnel */ 510 + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 511 + struct flow_match_ipv4_addrs match; 512 + 513 + flow_rule_match_enc_ipv4_addrs(rule, &match); 514 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, 515 + src_ipv4_src_ipv6.ipv4_layout.ipv4, 516 + ntohl(match.mask->src)); 517 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, 518 + src_ipv4_src_ipv6.ipv4_layout.ipv4, 519 + ntohl(match.key->src)); 520 + 521 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, 522 + dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 523 + ntohl(match.mask->dst)); 524 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, 525 + dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 526 + ntohl(match.key->dst)); 527 + 528 + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, 529 + ethertype); 530 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 531 + ETH_P_IP); 532 + } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 533 + struct flow_match_ipv6_addrs match; 534 + 535 + flow_rule_match_enc_ipv6_addrs(rule, &match); 536 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 537 + src_ipv4_src_ipv6.ipv6_layout.ipv6), 538 + &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, 539 + ipv6)); 540 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 541 + src_ipv4_src_ipv6.ipv6_layout.ipv6), 542 + &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, 543 + ipv6)); 544 + 545 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 546 + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 547 + &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, 548 + ipv6)); 549 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 550 + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 551 + &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, 552 + ipv6)); 553 + 554 + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, 555 + ethertype); 556 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 557 + ETH_P_IPV6); 558 + } 559 + } 560 + 561 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { 562 + struct flow_match_ip match; 563 + 564 + flow_rule_match_enc_ip(rule, &match); 565 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, 566 + match.mask->tos & 0x3); 567 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, 568 + match.key->tos & 0x3); 569 + 570 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, 571 + match.mask->tos >> 2); 572 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, 573 + match.key->tos >> 2); 574 + 575 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, 576 + match.mask->ttl); 577 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, 578 + match.key->ttl); 579 + 580 + if (match.mask->ttl && 581 + !MLX5_CAP_ESW_FLOWTABLE_FDB 582 + (priv->mdev, 583 + ft_field_support.outer_ipv4_ttl)) { 584 + NL_SET_ERR_MSG_MOD(extack, 585 + "Matching on TTL is not supported"); 586 + err = -EOPNOTSUPP; 587 + goto out; 588 + } 589 + } 590 + 591 + /* Enforce DMAC when offloading incoming tunneled flows. 592 + * Flow counters require a match on the DMAC. 593 + */ 594 + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16); 595 + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0); 596 + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 597 + dmac_47_16), priv->netdev->dev_addr); 598 + 599 + /* let software handle IP fragments */ 600 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); 601 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); 602 + 603 + return 0; 506 604 507 605 out: 508 606 return err;
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
··· 76 76 struct mlx5e_priv *priv, 77 77 struct mlx5_flow_spec *spec, 78 78 struct flow_cls_offload *f, 79 - void *headers_c, 80 - void *headers_v, u8 *match_level); 79 + u8 *match_level); 81 80 82 81 int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv, 83 82 struct mlx5_flow_spec *spec,
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 1952 1952 .update_rx = mlx5e_update_rep_rx, 1953 1953 .update_stats = mlx5e_update_ndo_stats, 1954 1954 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, 1955 - .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, 1955 + .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, 1956 1956 .max_tc = 1, 1957 1957 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), 1958 1958 .stats_grps = mlx5e_rep_stats_grps, ··· 1972 1972 .update_stats = mlx5e_update_ndo_stats, 1973 1973 .update_carrier = mlx5e_update_carrier, 1974 1974 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, 1975 - .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, 1975 + .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, 1976 1976 .max_tc = MLX5E_MAX_NUM_TC, 1977 1977 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), 1978 1978 .stats_grps = mlx5e_ul_rep_stats_grps,
+7
drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
··· 81 81 struct mutex unready_flows_lock; 82 82 struct list_head unready_flows; 83 83 struct work_struct reoffload_flows_work; 84 + 85 + /* maps tun_info to a unique id*/ 86 + struct mapping_ctx *tunnel_mapping; 87 + /* maps tun_enc_opts to a unique id*/ 88 + struct mapping_ctx *tunnel_enc_opts_mapping; 84 89 }; 85 90 86 91 struct mlx5e_rep_priv { ··· 197 192 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); 198 193 199 194 void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 195 + void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, 196 + struct mlx5_cqe64 *cqe); 200 197 201 198 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, 202 199 struct mlx5e_encap_entry *e);
+66
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 1195 1195 struct mlx5e_priv *priv = netdev_priv(netdev); 1196 1196 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1197 1197 struct mlx5_eswitch_rep *rep = rpriv->rep; 1198 + struct mlx5e_tc_update_priv tc_priv = {}; 1198 1199 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 1199 1200 struct mlx5e_wqe_frag_info *wi; 1200 1201 struct sk_buff *skb; ··· 1228 1227 if (rep->vlan && skb_vlan_tag_present(skb)) 1229 1228 skb_vlan_pop(skb); 1230 1229 1230 + if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv)) 1231 + goto free_wqe; 1232 + 1231 1233 napi_gro_receive(rq->cq.napi, skb); 1234 + 1235 + mlx5_tc_rep_post_napi_receive(&tc_priv); 1232 1236 1233 1237 free_wqe: 1234 1238 mlx5e_free_rx_wqe(rq, wi, true); 1235 1239 wq_cyc_pop: 1236 1240 mlx5_wq_cyc_pop(wq); 1241 + } 1242 + 1243 + void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, 1244 + struct mlx5_cqe64 *cqe) 1245 + { 1246 + u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); 1247 + u16 wqe_id = be16_to_cpu(cqe->wqe_id); 1248 + struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id]; 1249 + u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); 1250 + u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; 1251 + u32 head_offset = wqe_offset & (PAGE_SIZE - 1); 1252 + u32 page_idx = wqe_offset >> PAGE_SHIFT; 1253 + struct mlx5e_tc_update_priv tc_priv = {}; 1254 + struct mlx5e_rx_wqe_ll *wqe; 1255 + struct mlx5_wq_ll *wq; 1256 + struct sk_buff *skb; 1257 + u16 cqe_bcnt; 1258 + 1259 + wi->consumed_strides += cstrides; 1260 + 1261 + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 1262 + trigger_report(rq, cqe); 1263 + rq->stats->wqe_err++; 1264 + goto mpwrq_cqe_out; 1265 + } 1266 + 1267 + if (unlikely(mpwrq_is_filler_cqe(cqe))) { 1268 + struct mlx5e_rq_stats *stats = rq->stats; 1269 + 1270 + stats->mpwqe_filler_cqes++; 1271 + stats->mpwqe_filler_strides += cstrides; 1272 + goto mpwrq_cqe_out; 1273 + } 1274 + 1275 + cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); 1276 + 1277 + skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq, 1278 + mlx5e_skb_from_cqe_mpwrq_linear, 1279 + mlx5e_skb_from_cqe_mpwrq_nonlinear, 1280 + rq, wi, cqe_bcnt, head_offset, page_idx); 1281 + if (!skb) 1282 + goto mpwrq_cqe_out; 1283 + 1284 + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1285 + 1286 + if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv)) 1287 + goto mpwrq_cqe_out; 1288 + 1289 + napi_gro_receive(rq->cq.napi, skb); 1290 + 1291 + mlx5_tc_rep_post_napi_receive(&tc_priv); 1292 + 1293 + mpwrq_cqe_out: 1294 + if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) 1295 + return; 1296 + 1297 + wq = &rq->mpwqe.wq; 1298 + wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); 1299 + mlx5e_free_rx_mpwqe(rq, wi, true); 1300 + mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); 1237 1301 } 1238 1302 #endif 1239 1303
+636 -198
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 55 55 #include "fs_core.h" 56 56 #include "en/port.h" 57 57 #include "en/tc_tun.h" 58 + #include "en/mapping.h" 58 59 #include "lib/devcom.h" 59 60 #include "lib/geneve.h" 60 61 #include "diag/en_tc_tracepoint.h" 62 + 63 + #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) 61 64 62 65 struct mlx5_nic_flow_attr { 63 66 u32 action; ··· 137 134 refcount_t refcnt; 138 135 struct rcu_head rcu_head; 139 136 struct completion init_done; 137 + int tunnel_id; /* the mapped tunnel id of this flow */ 138 + 140 139 union { 141 140 struct mlx5_esw_flow_attr esw_attr[0]; 142 141 struct mlx5_nic_flow_attr nic_attr[0]; ··· 149 144 const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS]; 150 145 struct net_device *filter_dev; 151 146 struct mlx5_flow_spec spec; 152 - int num_mod_hdr_actions; 153 - int max_mod_hdr_actions; 154 - void *mod_hdr_actions; 147 + struct mlx5e_tc_mod_hdr_acts mod_hdr_acts; 155 148 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; 156 149 }; 157 150 158 151 #define MLX5E_TC_TABLE_NUM_GROUPS 4 159 152 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16) 153 + 154 + struct tunnel_match_key { 155 + struct flow_dissector_key_control enc_control; 156 + struct flow_dissector_key_keyid enc_key_id; 157 + struct flow_dissector_key_ports enc_tp; 158 + struct flow_dissector_key_ip enc_ip; 159 + union { 160 + struct flow_dissector_key_ipv4_addrs enc_ipv4; 161 + struct flow_dissector_key_ipv6_addrs enc_ipv6; 162 + }; 163 + 164 + int filter_ifindex; 165 + }; 166 + 167 + /* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS. 168 + * Upper TUNNEL_INFO_BITS for general tunnel info. 169 + * Lower ENC_OPTS_BITS bits for enc_opts. 170 + */ 171 + #define TUNNEL_INFO_BITS 6 172 + #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0) 173 + #define ENC_OPTS_BITS 2 174 + #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0) 175 + #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS) 176 + #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0) 177 + 178 + struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { 179 + [CHAIN_TO_REG] = { 180 + .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, 181 + .moffset = 0, 182 + .mlen = 2, 183 + }, 184 + [TUNNEL_TO_REG] = { 185 + .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1, 186 + .moffset = 3, 187 + .mlen = 1, 188 + .soffset = MLX5_BYTE_OFF(fte_match_param, 189 + misc_parameters_2.metadata_reg_c_1), 190 + }, 191 + }; 192 + 193 + static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); 194 + 195 + void 196 + mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, 197 + enum mlx5e_tc_attr_to_reg type, 198 + u32 data, 199 + u32 mask) 200 + { 201 + int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset; 202 + int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen; 203 + void *headers_c = spec->match_criteria; 204 + void *headers_v = spec->match_value; 205 + void *fmask, *fval; 206 + 207 + fmask = headers_c + soffset; 208 + fval = headers_v + soffset; 209 + 210 + mask = cpu_to_be32(mask) >> (32 - (match_len * 8)); 211 + data = cpu_to_be32(data) >> (32 - (match_len * 8)); 212 + 213 + memcpy(fmask, &mask, match_len); 214 + memcpy(fval, &data, match_len); 215 + 216 + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 217 + } 218 + 219 + int 220 + mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, 221 + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 222 + enum mlx5e_tc_attr_to_reg type, 223 + u32 data) 224 + { 225 + int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 226 + int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield; 227 + int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen; 228 + char *modact; 229 + int err; 230 + 231 + err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB, 232 + mod_hdr_acts); 233 + if (err) 234 + return err; 235 + 236 + modact = mod_hdr_acts->actions + 237 + (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ); 238 + 239 + /* Firmware has 5bit length field and 0 means 32bits */ 240 + if (mlen == 4) 241 + mlen = 0; 242 + 243 + MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); 244 + MLX5_SET(set_action_in, modact, field, mfield); 245 + MLX5_SET(set_action_in, modact, offset, moffset * 8); 246 + MLX5_SET(set_action_in, modact, length, mlen * 8); 247 + MLX5_SET(set_action_in, modact, data, data); 248 + mod_hdr_acts->num_actions++; 249 + 250 + return 0; 251 + } 160 252 161 253 struct mlx5e_hairpin { 162 254 struct mlx5_hairpin *pair; ··· 311 209 struct completion res_ready; 312 210 int compl_result; 313 211 }; 314 - 315 - #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) 316 212 317 213 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 318 214 struct mlx5e_tc_flow *flow); ··· 461 361 struct mod_hdr_key key; 462 362 u32 hash_key; 463 363 464 - num_actions = parse_attr->num_mod_hdr_actions; 364 + num_actions = parse_attr->mod_hdr_acts.num_actions; 465 365 actions_size = MLX5_MH_ACT_SZ * num_actions; 466 366 467 - key.actions = parse_attr->mod_hdr_actions; 367 + key.actions = parse_attr->mod_hdr_acts.actions; 468 368 key.num_actions = num_actions; 469 369 470 370 hash_key = hash_mod_hdr_info(&key); ··· 1054 954 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1055 955 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); 1056 956 flow_act.modify_hdr = attr->modify_hdr; 1057 - kfree(parse_attr->mod_hdr_actions); 957 + dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); 1058 958 if (err) 1059 959 return err; 1060 960 } ··· 1324 1224 1325 1225 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1326 1226 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); 1327 - kfree(parse_attr->mod_hdr_actions); 1227 + dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); 1328 1228 if (err) 1329 1229 return err; 1330 1230 } ··· 1373 1273 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1374 1274 struct mlx5_esw_flow_attr *attr = flow->esw_attr; 1375 1275 int out_index; 1276 + 1277 + mlx5e_put_flow_tunnel_id(flow); 1376 1278 1377 1279 if (flow_flag_test(flow, NOT_READY)) { 1378 1280 remove_unready_flow(flow); ··· 1764 1662 } 1765 1663 } 1766 1664 1767 - 1768 - static int parse_tunnel_attr(struct mlx5e_priv *priv, 1769 - struct mlx5_flow_spec *spec, 1770 - struct flow_cls_offload *f, 1771 - struct net_device *filter_dev, u8 *match_level) 1665 + static int flow_has_tc_fwd_action(struct flow_cls_offload *f) 1772 1666 { 1773 - struct netlink_ext_ack *extack = f->common.extack; 1774 - void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1775 - outer_headers); 1776 - void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1777 - outer_headers); 1778 1667 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 1779 - int err; 1668 + struct flow_action *flow_action = &rule->action; 1669 + const struct flow_action_entry *act; 1670 + int i; 1780 1671 1781 - err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, 1782 - headers_c, headers_v, match_level); 1783 - if (err) { 1784 - NL_SET_ERR_MSG_MOD(extack, 1785 - "failed to parse tunnel attributes"); 1786 - return err; 1787 - } 1788 - 1789 - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 1790 - struct flow_match_control match; 1791 - u16 addr_type; 1792 - 1793 - flow_rule_match_enc_control(rule, &match); 1794 - addr_type = match.key->addr_type; 1795 - 1796 - /* For tunnel addr_type used same key id`s as for non-tunnel */ 1797 - if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 1798 - struct flow_match_ipv4_addrs match; 1799 - 1800 - flow_rule_match_enc_ipv4_addrs(rule, &match); 1801 - MLX5_SET(fte_match_set_lyr_2_4, headers_c, 1802 - src_ipv4_src_ipv6.ipv4_layout.ipv4, 1803 - ntohl(match.mask->src)); 1804 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, 1805 - src_ipv4_src_ipv6.ipv4_layout.ipv4, 1806 - ntohl(match.key->src)); 1807 - 1808 - MLX5_SET(fte_match_set_lyr_2_4, headers_c, 1809 - dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 1810 - ntohl(match.mask->dst)); 1811 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, 1812 - dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 1813 - ntohl(match.key->dst)); 1814 - 1815 - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, 1816 - ethertype); 1817 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 1818 - ETH_P_IP); 1819 - } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 1820 - struct flow_match_ipv6_addrs match; 1821 - 1822 - flow_rule_match_enc_ipv6_addrs(rule, &match); 1823 - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 1824 - src_ipv4_src_ipv6.ipv6_layout.ipv6), 1825 - &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, 1826 - ipv6)); 1827 - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 1828 - src_ipv4_src_ipv6.ipv6_layout.ipv6), 1829 - &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, 1830 - ipv6)); 1831 - 1832 - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 1833 - dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1834 - &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, 1835 - ipv6)); 1836 - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 1837 - dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1838 - &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, 1839 - ipv6)); 1840 - 1841 - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, 1842 - ethertype); 1843 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 1844 - ETH_P_IPV6); 1672 + flow_action_for_each(i, act, flow_action) { 1673 + switch (act->id) { 1674 + case FLOW_ACTION_GOTO: 1675 + return true; 1676 + default: 1677 + continue; 1845 1678 } 1846 1679 } 1847 1680 1848 - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { 1849 - struct flow_match_ip match; 1681 + return false; 1682 + } 1850 1683 1851 - flow_rule_match_enc_ip(rule, &match); 1852 - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, 1853 - match.mask->tos & 0x3); 1854 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, 1855 - match.key->tos & 0x3); 1684 + static int 1685 + enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv, 1686 + struct flow_dissector_key_enc_opts *opts, 1687 + struct netlink_ext_ack *extack, 1688 + bool *dont_care) 1689 + { 1690 + struct geneve_opt *opt; 1691 + int off = 0; 1856 1692 1857 - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, 1858 - match.mask->tos >> 2); 1859 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, 1860 - match.key->tos >> 2); 1693 + *dont_care = true; 1861 1694 1862 - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, 1863 - match.mask->ttl); 1864 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, 1865 - match.key->ttl); 1695 + while (opts->len > off) { 1696 + opt = (struct geneve_opt *)&opts->data[off]; 1866 1697 1867 - if (match.mask->ttl && 1868 - !MLX5_CAP_ESW_FLOWTABLE_FDB 1869 - (priv->mdev, 1870 - ft_field_support.outer_ipv4_ttl)) { 1871 - NL_SET_ERR_MSG_MOD(extack, 1872 - "Matching on TTL is not supported"); 1873 - return -EOPNOTSUPP; 1698 + if (!(*dont_care) || opt->opt_class || opt->type || 1699 + memchr_inv(opt->opt_data, 0, opt->length * 4)) { 1700 + *dont_care = false; 1701 + 1702 + if (opt->opt_class != U16_MAX || 1703 + opt->type != U8_MAX || 1704 + memchr_inv(opt->opt_data, 0xFF, 1705 + opt->length * 4)) { 1706 + NL_SET_ERR_MSG(extack, 1707 + "Partial match of tunnel options in chain > 0 isn't supported"); 1708 + netdev_warn(priv->netdev, 1709 + "Partial match of tunnel options in chain > 0 isn't supported"); 1710 + return -EOPNOTSUPP; 1711 + } 1874 1712 } 1875 1713 1714 + off += sizeof(struct geneve_opt) + opt->length * 4; 1876 1715 } 1877 - 1878 - /* Enforce DMAC when offloading incoming tunneled flows. 1879 - * Flow counters require a match on the DMAC. 1880 - */ 1881 - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16); 1882 - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0); 1883 - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 1884 - dmac_47_16), priv->netdev->dev_addr); 1885 - 1886 - /* let software handle IP fragments */ 1887 - MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); 1888 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); 1889 1716 1890 1717 return 0; 1891 1718 } 1892 1719 1893 - static void *get_match_headers_criteria(u32 flags, 1894 - struct mlx5_flow_spec *spec) 1720 + #define COPY_DISSECTOR(rule, diss_key, dst)\ 1721 + ({ \ 1722 + struct flow_rule *__rule = (rule);\ 1723 + typeof(dst) __dst = dst;\ 1724 + \ 1725 + memcpy(__dst,\ 1726 + skb_flow_dissector_target(__rule->match.dissector,\ 1727 + diss_key,\ 1728 + __rule->match.key),\ 1729 + sizeof(*__dst));\ 1730 + }) 1731 + 1732 + static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv, 1733 + struct mlx5e_tc_flow *flow, 1734 + struct flow_cls_offload *f, 1735 + struct net_device *filter_dev) 1895 1736 { 1896 - return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? 1897 - MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1898 - inner_headers) : 1899 - MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1900 - outer_headers); 1737 + struct flow_rule *rule = flow_cls_offload_flow_rule(f); 1738 + struct netlink_ext_ack *extack = f->common.extack; 1739 + struct mlx5_esw_flow_attr *attr = flow->esw_attr; 1740 + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts; 1741 + struct flow_match_enc_opts enc_opts_match; 1742 + struct mlx5_rep_uplink_priv *uplink_priv; 1743 + struct mlx5e_rep_priv *uplink_rpriv; 1744 + struct tunnel_match_key tunnel_key; 1745 + bool enc_opts_is_dont_care = true; 1746 + u32 tun_id, enc_opts_id = 0; 1747 + struct mlx5_eswitch *esw; 1748 + u32 value, mask; 1749 + int err; 1750 + 1751 + esw = priv->mdev->priv.eswitch; 1752 + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 1753 + uplink_priv = &uplink_rpriv->uplink_priv; 1754 + 1755 + memset(&tunnel_key, 0, sizeof(tunnel_key)); 1756 + COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, 1757 + &tunnel_key.enc_control); 1758 + if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 1759 + COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 1760 + &tunnel_key.enc_ipv4); 1761 + else 1762 + COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, 1763 + &tunnel_key.enc_ipv6); 1764 + COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip); 1765 + COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, 1766 + &tunnel_key.enc_tp); 1767 + COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, 1768 + &tunnel_key.enc_key_id); 1769 + tunnel_key.filter_ifindex = filter_dev->ifindex; 1770 + 1771 + err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id); 1772 + if (err) 1773 + return err; 1774 + 1775 + flow_rule_match_enc_opts(rule, &enc_opts_match); 1776 + err = enc_opts_is_dont_care_or_full_match(priv, 1777 + enc_opts_match.mask, 1778 + extack, 1779 + &enc_opts_is_dont_care); 1780 + if (err) 1781 + goto err_enc_opts; 1782 + 1783 + if (!enc_opts_is_dont_care) { 1784 + err = mapping_add(uplink_priv->tunnel_enc_opts_mapping, 1785 + enc_opts_match.key, &enc_opts_id); 1786 + if (err) 1787 + goto err_enc_opts; 1788 + } 1789 + 1790 + value = tun_id << ENC_OPTS_BITS | enc_opts_id; 1791 + mask = enc_opts_id ? TUNNEL_ID_MASK : 1792 + (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK); 1793 + 1794 + if (attr->chain) { 1795 + mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec, 1796 + TUNNEL_TO_REG, value, mask); 1797 + } else { 1798 + mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; 1799 + err = mlx5e_tc_match_to_reg_set(priv->mdev, 1800 + mod_hdr_acts, 1801 + TUNNEL_TO_REG, value); 1802 + if (err) 1803 + goto err_set; 1804 + 1805 + attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1806 + } 1807 + 1808 + flow->tunnel_id = value; 1809 + return 0; 1810 + 1811 + err_set: 1812 + if (enc_opts_id) 1813 + mapping_remove(uplink_priv->tunnel_enc_opts_mapping, 1814 + enc_opts_id); 1815 + err_enc_opts: 1816 + mapping_remove(uplink_priv->tunnel_mapping, tun_id); 1817 + return err; 1818 + } 1819 + 1820 + static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow) 1821 + { 1822 + u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK; 1823 + u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS; 1824 + struct mlx5_rep_uplink_priv *uplink_priv; 1825 + struct mlx5e_rep_priv *uplink_rpriv; 1826 + struct mlx5_eswitch *esw; 1827 + 1828 + esw = flow->priv->mdev->priv.eswitch; 1829 + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 1830 + uplink_priv = &uplink_rpriv->uplink_priv; 1831 + 1832 + if (tun_id) 1833 + mapping_remove(uplink_priv->tunnel_mapping, tun_id); 1834 + if (enc_opts_id) 1835 + mapping_remove(uplink_priv->tunnel_enc_opts_mapping, 1836 + enc_opts_id); 1837 + } 1838 + 1839 + static int parse_tunnel_attr(struct mlx5e_priv *priv, 1840 + struct mlx5e_tc_flow *flow, 1841 + struct mlx5_flow_spec *spec, 1842 + struct flow_cls_offload *f, 1843 + struct net_device *filter_dev, 1844 + u8 *match_level, 1845 + bool *match_inner) 1846 + { 1847 + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1848 + struct netlink_ext_ack *extack = f->common.extack; 1849 + bool needs_mapping, sets_mapping; 1850 + int err; 1851 + 1852 + if (!mlx5e_is_eswitch_flow(flow)) 1853 + return -EOPNOTSUPP; 1854 + 1855 + needs_mapping = !!flow->esw_attr->chain; 1856 + sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f); 1857 + *match_inner = !needs_mapping; 1858 + 1859 + if ((needs_mapping || sets_mapping) && 1860 + !mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1861 + NL_SET_ERR_MSG(extack, 1862 + "Chains on tunnel devices isn't supported without register metadata support"); 1863 + netdev_warn(priv->netdev, 1864 + "Chains on tunnel devices isn't supported without register metadata support"); 1865 + return -EOPNOTSUPP; 1866 + } 1867 + 1868 + if (!flow->esw_attr->chain) { 1869 + err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, 1870 + match_level); 1871 + if (err) { 1872 + NL_SET_ERR_MSG_MOD(extack, 1873 + "Failed to parse tunnel attributes"); 1874 + netdev_warn(priv->netdev, 1875 + "Failed to parse tunnel attributes"); 1876 + return err; 1877 + } 1878 + 1879 + flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 1880 + } 1881 + 1882 + if (!needs_mapping && !sets_mapping) 1883 + return 0; 1884 + 1885 + return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev); 1886 + } 1887 + 1888 + static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec) 1889 + { 1890 + return MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1891 + inner_headers); 1892 + } 1893 + 1894 + static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec) 1895 + { 1896 + return MLX5_ADDR_OF(fte_match_param, spec->match_value, 1897 + inner_headers); 1898 + } 1899 + 1900 + static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec) 1901 + { 1902 + return MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1903 + outer_headers); 1904 + } 1905 + 1906 + static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec) 1907 + { 1908 + return MLX5_ADDR_OF(fte_match_param, spec->match_value, 1909 + outer_headers); 1901 1910 } 1902 1911 1903 1912 static void *get_match_headers_value(u32 flags, 1904 1913 struct mlx5_flow_spec *spec) 1905 1914 { 1906 1915 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? 1907 - MLX5_ADDR_OF(fte_match_param, spec->match_value, 1908 - inner_headers) : 1909 - MLX5_ADDR_OF(fte_match_param, spec->match_value, 1910 - outer_headers); 1916 + get_match_inner_headers_value(spec) : 1917 + get_match_outer_headers_value(spec); 1918 + } 1919 + 1920 + static void *get_match_headers_criteria(u32 flags, 1921 + struct mlx5_flow_spec *spec) 1922 + { 1923 + return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? 1924 + get_match_inner_headers_criteria(spec) : 1925 + get_match_outer_headers_criteria(spec); 1911 1926 } 1912 1927 1913 1928 static int mlx5e_flower_parse_meta(struct net_device *filter_dev, ··· 2062 1843 } 2063 1844 2064 1845 static int __parse_cls_flower(struct mlx5e_priv *priv, 1846 + struct mlx5e_tc_flow *flow, 2065 1847 struct mlx5_flow_spec *spec, 2066 1848 struct flow_cls_offload *f, 2067 1849 struct net_device *filter_dev, ··· 2112 1892 } 2113 1893 2114 1894 if (mlx5e_get_tc_tun(filter_dev)) { 2115 - if (parse_tunnel_attr(priv, spec, f, filter_dev, 2116 - outer_match_level)) 2117 - return -EOPNOTSUPP; 1895 + bool match_inner = false; 2118 1896 2119 - /* At this point, header pointers should point to the inner 2120 - * headers, outer header were already set by parse_tunnel_attr 2121 - */ 2122 - match_level = inner_match_level; 2123 - headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP, 2124 - spec); 2125 - headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP, 2126 - spec); 1897 + err = parse_tunnel_attr(priv, flow, spec, f, filter_dev, 1898 + outer_match_level, &match_inner); 1899 + if (err) 1900 + return err; 1901 + 1902 + if (match_inner) { 1903 + /* header pointers should point to the inner headers 1904 + * if the packet was decapsulated already. 1905 + * outer headers are set by parse_tunnel_attr. 1906 + */ 1907 + match_level = inner_match_level; 1908 + headers_c = get_match_inner_headers_criteria(spec); 1909 + headers_v = get_match_inner_headers_value(spec); 1910 + } 2127 1911 } 2128 1912 2129 1913 err = mlx5e_flower_parse_meta(filter_dev, f); ··· 2444 2220 inner_match_level = MLX5_MATCH_NONE; 2445 2221 outer_match_level = MLX5_MATCH_NONE; 2446 2222 2447 - err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level, 2448 - &outer_match_level); 2223 + err = __parse_cls_flower(priv, flow, spec, f, filter_dev, 2224 + &inner_match_level, &outer_match_level); 2449 2225 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ? 2450 2226 outer_match_level : inner_match_level; 2451 2227 ··· 2605 2381 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport), 2606 2382 }; 2607 2383 2608 - /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at 2609 - * max from the SW pedit action. On success, attr->num_mod_hdr_actions 2610 - * says how many HW actions were actually parsed. 2611 - */ 2612 - static int offload_pedit_fields(struct pedit_headers_action *hdrs, 2384 + static int offload_pedit_fields(struct mlx5e_priv *priv, 2385 + int namespace, 2386 + struct pedit_headers_action *hdrs, 2613 2387 struct mlx5e_tc_flow_parse_attr *parse_attr, 2614 2388 u32 *action_flags, 2615 2389 struct netlink_ext_ack *extack) 2616 2390 { 2617 2391 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; 2618 - int i, action_size, nactions, max_actions, first, last, next_z; 2392 + int i, action_size, first, last, next_z; 2619 2393 void *headers_c, *headers_v, *action, *vals_p; 2620 2394 u32 *s_masks_p, *a_masks_p, s_mask, a_mask; 2395 + struct mlx5e_tc_mod_hdr_acts *mod_acts; 2621 2396 struct mlx5_fields *f; 2622 2397 unsigned long mask; 2623 2398 __be32 mask_be32; 2624 2399 __be16 mask_be16; 2400 + int err; 2625 2401 u8 cmd; 2626 2402 2403 + mod_acts = &parse_attr->mod_hdr_acts; 2627 2404 headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec); 2628 2405 headers_v = get_match_headers_value(*action_flags, &parse_attr->spec); 2629 2406 ··· 2634 2409 add_vals = &hdrs[1].vals; 2635 2410 2636 2411 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); 2637 - action = parse_attr->mod_hdr_actions + 2638 - parse_attr->num_mod_hdr_actions * action_size; 2639 - 2640 - max_actions = parse_attr->max_mod_hdr_actions; 2641 - nactions = parse_attr->num_mod_hdr_actions; 2642 2412 2643 2413 for (i = 0; i < ARRAY_SIZE(fields); i++) { 2644 2414 bool skip; ··· 2656 2436 NL_SET_ERR_MSG_MOD(extack, 2657 2437 "can't set and add to the same HW field"); 2658 2438 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field); 2659 - return -EOPNOTSUPP; 2660 - } 2661 - 2662 - if (nactions == max_actions) { 2663 - NL_SET_ERR_MSG_MOD(extack, 2664 - "too many pedit actions, can't offload"); 2665 - printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions); 2666 2439 return -EOPNOTSUPP; 2667 2440 } 2668 2441 ··· 2705 2492 return -EOPNOTSUPP; 2706 2493 } 2707 2494 2495 + err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts); 2496 + if (err) { 2497 + NL_SET_ERR_MSG_MOD(extack, 2498 + "too many pedit actions, can't offload"); 2499 + mlx5_core_warn(priv->mdev, 2500 + "mlx5: parsed %d pedit actions, can't do more\n", 2501 + mod_acts->num_actions); 2502 + return err; 2503 + } 2504 + 2505 + action = mod_acts->actions + 2506 + (mod_acts->num_actions * action_size); 2708 2507 MLX5_SET(set_action_in, action, action_type, cmd); 2709 2508 MLX5_SET(set_action_in, action, field, f->field); 2710 2509 ··· 2739 2514 else if (f->field_bsize == 8) 2740 2515 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first); 2741 2516 2742 - action += action_size; 2743 - nactions++; 2517 + ++mod_acts->num_actions; 2744 2518 } 2745 2519 2746 - parse_attr->num_mod_hdr_actions = nactions; 2747 2520 return 0; 2748 2521 } 2749 2522 ··· 2754 2531 return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions); 2755 2532 } 2756 2533 2757 - static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, 2758 - struct pedit_headers_action *hdrs, 2759 - int namespace, 2760 - struct mlx5e_tc_flow_parse_attr *parse_attr) 2534 + int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev, 2535 + int namespace, 2536 + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts) 2761 2537 { 2762 - int nkeys, action_size, max_actions; 2538 + int action_size, new_num_actions, max_hw_actions; 2539 + size_t new_sz, old_sz; 2540 + void *ret; 2763 2541 2764 - nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits + 2765 - hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits; 2542 + if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions) 2543 + return 0; 2544 + 2766 2545 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); 2767 2546 2768 - max_actions = mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace); 2769 - /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */ 2770 - max_actions = min(max_actions, nkeys * 16); 2547 + max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev, 2548 + namespace); 2549 + new_num_actions = min(max_hw_actions, 2550 + mod_hdr_acts->actions ? 2551 + mod_hdr_acts->max_actions * 2 : 1); 2552 + if (mod_hdr_acts->max_actions == new_num_actions) 2553 + return -ENOSPC; 2771 2554 2772 - parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL); 2773 - if (!parse_attr->mod_hdr_actions) 2555 + new_sz = action_size * new_num_actions; 2556 + old_sz = mod_hdr_acts->max_actions * action_size; 2557 + ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL); 2558 + if (!ret) 2774 2559 return -ENOMEM; 2775 2560 2776 - parse_attr->max_mod_hdr_actions = max_actions; 2561 + memset(ret + old_sz, 0, new_sz - old_sz); 2562 + mod_hdr_acts->actions = ret; 2563 + mod_hdr_acts->max_actions = new_num_actions; 2564 + 2777 2565 return 0; 2566 + } 2567 + 2568 + void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts) 2569 + { 2570 + kfree(mod_hdr_acts->actions); 2571 + mod_hdr_acts->actions = NULL; 2572 + mod_hdr_acts->num_actions = 0; 2573 + mod_hdr_acts->max_actions = 0; 2778 2574 } 2779 2575 2780 2576 static const struct pedit_headers zero_masks = {}; ··· 2847 2605 int err; 2848 2606 u8 cmd; 2849 2607 2850 - if (!parse_attr->mod_hdr_actions) { 2851 - err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr); 2852 - if (err) 2853 - goto out_err; 2854 - } 2855 - 2856 - err = offload_pedit_fields(hdrs, parse_attr, action_flags, extack); 2608 + err = offload_pedit_fields(priv, namespace, hdrs, parse_attr, 2609 + action_flags, extack); 2857 2610 if (err < 0) 2858 2611 goto out_dealloc_parsed_actions; 2859 2612 ··· 2868 2631 return 0; 2869 2632 2870 2633 out_dealloc_parsed_actions: 2871 - kfree(parse_attr->mod_hdr_actions); 2872 - out_err: 2634 + dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); 2873 2635 return err; 2874 2636 } 2875 2637 ··· 2997 2761 struct mlx5e_tc_flow *flow, 2998 2762 struct netlink_ext_ack *extack) 2999 2763 { 2764 + struct net_device *filter_dev = parse_attr->filter_dev; 2765 + bool drop_action, pop_action; 3000 2766 u32 actions; 3001 2767 3002 2768 if (mlx5e_is_eswitch_flow(flow)) ··· 3006 2768 else 3007 2769 actions = flow->nic_attr->action; 3008 2770 3009 - if (flow_flag_test(flow, EGRESS) && 3010 - !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) || 3011 - (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || 3012 - (actions & MLX5_FLOW_CONTEXT_ACTION_DROP))) 3013 - return false; 2771 + drop_action = actions & MLX5_FLOW_CONTEXT_ACTION_DROP; 2772 + pop_action = actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; 2773 + 2774 + if (flow_flag_test(flow, EGRESS) && !drop_action) { 2775 + /* We only support filters on tunnel device, or on vlan 2776 + * devices if they have pop/drop action 2777 + */ 2778 + if (!mlx5e_get_tc_tun(filter_dev) || 2779 + (is_vlan_dev(filter_dev) && !pop_action)) 2780 + return false; 2781 + } 3014 2782 3015 2783 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 3016 2784 return modify_header_match_supported(&parse_attr->spec, ··· 3212 2968 /* in case all pedit actions are skipped, remove the MOD_HDR 3213 2969 * flag. 3214 2970 */ 3215 - if (parse_attr->num_mod_hdr_actions == 0) { 2971 + if (parse_attr->mod_hdr_acts.num_actions == 0) { 3216 2972 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 3217 - kfree(parse_attr->mod_hdr_actions); 2973 + dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); 3218 2974 } 3219 2975 } 3220 2976 ··· 3610 3366 int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; 3611 3367 bool ft_flow = mlx5e_is_ft_flow(flow); 3612 3368 const struct flow_action_entry *act; 3369 + bool encap = false, decap = false; 3370 + u32 action = attr->action; 3613 3371 int err, i, if_count = 0; 3614 - bool encap = false; 3615 - u32 action = 0; 3616 3372 3617 3373 if (!flow_action_has_entries(flow_action)) 3618 3374 return -EINVAL; ··· 3815 3571 attr->split_count = attr->out_count; 3816 3572 break; 3817 3573 case FLOW_ACTION_TUNNEL_DECAP: 3818 - action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 3574 + decap = true; 3819 3575 break; 3820 3576 case FLOW_ACTION_GOTO: 3821 3577 err = mlx5_validate_goto_chain(esw, flow, act, action, ··· 3854 3610 * flag. we might have set split_count either by pedit or 3855 3611 * pop/push. if there is no pop/push either, reset it too. 3856 3612 */ 3857 - if (parse_attr->num_mod_hdr_actions == 0) { 3613 + if (parse_attr->mod_hdr_acts.num_actions == 0) { 3858 3614 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 3859 - kfree(parse_attr->mod_hdr_actions); 3615 + dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); 3860 3616 if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || 3861 3617 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))) 3862 3618 attr->split_count = 0; ··· 3868 3624 return -EOPNOTSUPP; 3869 3625 3870 3626 if (attr->dest_chain) { 3627 + if (decap) { 3628 + /* It can be supported if we'll create a mapping for 3629 + * the tunnel device only (without tunnel), and set 3630 + * this tunnel id with this decap flow. 3631 + * 3632 + * On restore (miss), we'll just set this saved tunnel 3633 + * device. 3634 + */ 3635 + 3636 + NL_SET_ERR_MSG(extack, 3637 + "Decap with goto isn't supported"); 3638 + netdev_warn(priv->netdev, 3639 + "Decap with goto isn't supported"); 3640 + return -EOPNOTSUPP; 3641 + } 3642 + 3871 3643 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 3872 3644 NL_SET_ERR_MSG_MOD(extack, 3873 3645 "Mirroring goto chain rules isn't supported"); ··· 4613 4353 4614 4354 int mlx5e_tc_esw_init(struct rhashtable *tc_ht) 4615 4355 { 4616 - return rhashtable_init(tc_ht, &tc_ht_params); 4356 + const size_t sz_enc_opts = sizeof(struct flow_dissector_key_enc_opts); 4357 + struct mlx5_rep_uplink_priv *uplink_priv; 4358 + struct mlx5e_rep_priv *priv; 4359 + struct mapping_ctx *mapping; 4360 + int err; 4361 + 4362 + uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht); 4363 + priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); 4364 + 4365 + mapping = mapping_create(sizeof(struct tunnel_match_key), 4366 + TUNNEL_INFO_BITS_MASK, true); 4367 + if (IS_ERR(mapping)) { 4368 + err = PTR_ERR(mapping); 4369 + goto err_tun_mapping; 4370 + } 4371 + uplink_priv->tunnel_mapping = mapping; 4372 + 4373 + mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true); 4374 + if (IS_ERR(mapping)) { 4375 + err = PTR_ERR(mapping); 4376 + goto err_enc_opts_mapping; 4377 + } 4378 + uplink_priv->tunnel_enc_opts_mapping = mapping; 4379 + 4380 + err = rhashtable_init(tc_ht, &tc_ht_params); 4381 + if (err) 4382 + goto err_ht_init; 4383 + 4384 + return err; 4385 + 4386 + err_ht_init: 4387 + mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); 4388 + err_enc_opts_mapping: 4389 + mapping_destroy(uplink_priv->tunnel_mapping); 4390 + err_tun_mapping: 4391 + netdev_warn(priv->netdev, 4392 + "Failed to initialize tc (eswitch), err: %d", err); 4393 + return err; 4617 4394 } 4618 4395 4619 4396 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) 4620 4397 { 4398 + struct mlx5_rep_uplink_priv *uplink_priv; 4399 + 4621 4400 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); 4401 + 4402 + uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht); 4403 + mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); 4404 + mapping_destroy(uplink_priv->tunnel_mapping); 4622 4405 } 4623 4406 4624 4407 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) ··· 4692 4389 unready_flow_del(flow); 4693 4390 } 4694 4391 mutex_unlock(&rpriv->unready_flows_lock); 4392 + } 4393 + 4394 + #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 4395 + static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, 4396 + struct mlx5e_tc_update_priv *tc_priv, 4397 + u32 tunnel_id) 4398 + { 4399 + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 4400 + struct flow_dissector_key_enc_opts enc_opts = {}; 4401 + struct mlx5_rep_uplink_priv *uplink_priv; 4402 + struct mlx5e_rep_priv *uplink_rpriv; 4403 + struct metadata_dst *tun_dst; 4404 + struct tunnel_match_key key; 4405 + u32 tun_id, enc_opts_id; 4406 + struct net_device *dev; 4407 + int err; 4408 + 4409 + enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK; 4410 + tun_id = tunnel_id >> ENC_OPTS_BITS; 4411 + 4412 + if (!tun_id) 4413 + return true; 4414 + 4415 + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 4416 + uplink_priv = &uplink_rpriv->uplink_priv; 4417 + 4418 + err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key); 4419 + if (err) { 4420 + WARN_ON_ONCE(true); 4421 + netdev_dbg(priv->netdev, 4422 + "Couldn't find tunnel for tun_id: %d, err: %d\n", 4423 + tun_id, err); 4424 + return false; 4425 + } 4426 + 4427 + if (enc_opts_id) { 4428 + err = mapping_find(uplink_priv->tunnel_enc_opts_mapping, 4429 + enc_opts_id, &enc_opts); 4430 + if (err) { 4431 + netdev_dbg(priv->netdev, 4432 + "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n", 4433 + enc_opts_id, err); 4434 + return false; 4435 + } 4436 + } 4437 + 4438 + tun_dst = tun_rx_dst(enc_opts.len); 4439 + if (!tun_dst) { 4440 + WARN_ON_ONCE(true); 4441 + return false; 4442 + } 4443 + 4444 + ip_tunnel_key_init(&tun_dst->u.tun_info.key, 4445 + key.enc_ipv4.src, key.enc_ipv4.dst, 4446 + key.enc_ip.tos, key.enc_ip.ttl, 4447 + 0, /* label */ 4448 + key.enc_tp.src, key.enc_tp.dst, 4449 + key32_to_tunnel_id(key.enc_key_id.keyid), 4450 + TUNNEL_KEY); 4451 + 4452 + if (enc_opts.len) 4453 + ip_tunnel_info_opts_set(&tun_dst->u.tun_info, enc_opts.data, 4454 + enc_opts.len, enc_opts.dst_opt_type); 4455 + 4456 + skb_dst_set(skb, (struct dst_entry *)tun_dst); 4457 + dev = dev_get_by_index(&init_net, key.filter_ifindex); 4458 + if (!dev) { 4459 + netdev_dbg(priv->netdev, 4460 + "Couldn't find tunnel device with ifindex: %d\n", 4461 + key.filter_ifindex); 4462 + return false; 4463 + } 4464 + 4465 + /* Set tun_dev so we do dev_put() after datapath */ 4466 + tc_priv->tun_dev = dev; 4467 + 4468 + skb->dev = dev; 4469 + 4470 + return true; 4471 + } 4472 + #endif /* CONFIG_NET_TC_SKB_EXT */ 4473 + 4474 + bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, 4475 + struct sk_buff *skb, 4476 + struct mlx5e_tc_update_priv *tc_priv) 4477 + { 4478 + #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 4479 + u32 chain = 0, reg_c0, reg_c1, tunnel_id; 4480 + struct tc_skb_ext *tc_skb_ext; 4481 + struct mlx5_eswitch *esw; 4482 + struct mlx5e_priv *priv; 4483 + int tunnel_moffset; 4484 + int err; 4485 + 4486 + reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK); 4487 + if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG) 4488 + reg_c0 = 0; 4489 + reg_c1 = be32_to_cpu(cqe->imm_inval_pkey); 4490 + 4491 + if (!reg_c0) 4492 + return true; 4493 + 4494 + priv = netdev_priv(skb->dev); 4495 + esw = priv->mdev->priv.eswitch; 4496 + 4497 + err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain); 4498 + if (err) { 4499 + netdev_dbg(priv->netdev, 4500 + "Couldn't find chain for chain tag: %d, err: %d\n", 4501 + reg_c0, err); 4502 + return false; 4503 + } 4504 + 4505 + if (chain) { 4506 + tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); 4507 + if (!tc_skb_ext) { 4508 + WARN_ON(1); 4509 + return false; 4510 + } 4511 + 4512 + tc_skb_ext->chain = chain; 4513 + } 4514 + 4515 + tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset; 4516 + tunnel_id = reg_c1 >> (8 * tunnel_moffset); 4517 + return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id); 4518 + #endif /* CONFIG_NET_TC_SKB_EXT */ 4519 + 4520 + return true; 4521 + } 4522 + 4523 + void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv) 4524 + { 4525 + if (tc_priv->tun_dev) 4526 + dev_put(tc_priv->tun_dev); 4695 4527 }
+45
drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
··· 91 91 92 92 void mlx5e_tc_reoffload_flows_work(struct work_struct *work); 93 93 94 + enum mlx5e_tc_attr_to_reg { 95 + CHAIN_TO_REG, 96 + TUNNEL_TO_REG, 97 + }; 98 + 99 + struct mlx5e_tc_attr_to_reg_mapping { 100 + int mfield; /* rewrite field */ 101 + int moffset; /* offset of mfield */ 102 + int mlen; /* bytes to rewrite/match */ 103 + 104 + int soffset; /* offset of spec for match */ 105 + }; 106 + 107 + extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[]; 108 + 94 109 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, 95 110 struct net_device *out_dev); 111 + 112 + struct mlx5e_tc_update_priv { 113 + struct net_device *tun_dev; 114 + }; 115 + 116 + bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb, 117 + struct mlx5e_tc_update_priv *tc_priv); 118 + 119 + void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv); 120 + 121 + struct mlx5e_tc_mod_hdr_acts { 122 + int num_actions; 123 + int max_actions; 124 + void *actions; 125 + }; 126 + 127 + int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, 128 + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 129 + enum mlx5e_tc_attr_to_reg type, 130 + u32 data); 131 + 132 + void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, 133 + enum mlx5e_tc_attr_to_reg type, 134 + u32 data, 135 + u32 mask); 136 + 137 + int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev, 138 + int namespace, 139 + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); 140 + void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); 96 141 97 142 #else /* CONFIG_MLX5_ESWITCH */ 98 143 static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
+15
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 197 197 }; 198 198 199 199 struct mlx5_esw_offload { 200 + struct mlx5_flow_table *ft_offloads_restore; 201 + struct mlx5_flow_group *restore_group; 202 + struct mlx5_modify_hdr *restore_copy_hdr_id; 203 + 200 204 struct mlx5_flow_table *ft_offloads; 201 205 struct mlx5_flow_group *vport_rx_group; 202 206 struct mlx5_eswitch_rep *vport_reps; ··· 640 636 int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw); 641 637 void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw); 642 638 639 + struct mlx5_flow_handle * 640 + esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); 641 + u32 642 + esw_get_max_restore_tag(struct mlx5_eswitch *esw); 643 + 643 644 #else /* CONFIG_MLX5_ESWITCH */ 644 645 /* eswitch API stubs */ 645 646 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } ··· 659 650 } 660 651 661 652 static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {} 653 + 654 + static struct mlx5_flow_handle * 655 + esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 656 + { 657 + return ERR_PTR(-EOPNOTSUPP); 658 + } 662 659 663 660 #endif /* CONFIG_MLX5_ESWITCH */ 664 661
+220 -21
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 260 260 attr->in_rep->vport)); 261 261 262 262 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 263 - MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0); 263 + MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 264 + mlx5_eswitch_get_vport_metadata_mask()); 264 265 265 266 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 266 267 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); ··· 782 781 esw_vport_context.fdb_to_vport_reg_c_id); 783 782 784 783 if (enable) 785 - fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0; 784 + fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0 | 785 + MLX5_FDB_TO_VPORT_REG_C_1; 786 786 else 787 - fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0; 787 + fdb_to_vport_reg_c_id &= ~(MLX5_FDB_TO_VPORT_REG_C_0 | 788 + MLX5_FDB_TO_VPORT_REG_C_1); 788 789 789 790 MLX5_SET(modify_esw_vport_context_in, in, 790 791 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id); ··· 808 805 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 809 806 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 810 807 misc_parameters_2); 811 - MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0); 808 + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 809 + mlx5_eswitch_get_vport_metadata_mask()); 812 810 813 811 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 814 812 } else { ··· 1024 1020 return err; 1025 1021 } 1026 1022 1023 + struct mlx5_flow_handle * 1024 + esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 1025 + { 1026 + struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 1027 + struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore; 1028 + struct mlx5_flow_context *flow_context; 1029 + struct mlx5_flow_handle *flow_rule; 1030 + struct mlx5_flow_destination dest; 1031 + struct mlx5_flow_spec *spec; 1032 + void *misc; 1033 + 1034 + spec = kzalloc(sizeof(*spec), GFP_KERNEL); 1035 + if (!spec) 1036 + return ERR_PTR(-ENOMEM); 1037 + 1038 + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1039 + misc_parameters_2); 1040 + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1041 + ESW_CHAIN_TAG_METADATA_MASK); 1042 + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1043 + misc_parameters_2); 1044 + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag); 1045 + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1046 + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1047 + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1048 + flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id; 1049 + 1050 + flow_context = &spec->flow_context; 1051 + flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 1052 + flow_context->flow_tag = tag; 1053 + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1054 + dest.ft = esw->offloads.ft_offloads; 1055 + 1056 + flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 1057 + kfree(spec); 1058 + 1059 + if (IS_ERR(flow_rule)) 1060 + esw_warn(esw->dev, 1061 + "Failed to create restore rule for tag: %d, err(%d)\n", 1062 + tag, (int)PTR_ERR(flow_rule)); 1063 + 1064 + return flow_rule; 1065 + } 1066 + 1067 + u32 1068 + esw_get_max_restore_tag(struct mlx5_eswitch *esw) 1069 + { 1070 + return ESW_CHAIN_TAG_METADATA_MASK; 1071 + } 1072 + 1027 1073 #define MAX_PF_SQ 256 1028 1074 #define MAX_SQ_NVPORTS 32 1029 1075 ··· 1089 1035 match_criteria_enable, 1090 1036 MLX5_MATCH_MISC_PARAMETERS_2); 1091 1037 1092 - MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1093 - misc_parameters_2.metadata_reg_c_0); 1038 + MLX5_SET(fte_match_param, match_criteria, 1039 + misc_parameters_2.metadata_reg_c_0, 1040 + mlx5_eswitch_get_vport_metadata_mask()); 1094 1041 } else { 1095 1042 MLX5_SET(create_flow_group_in, flow_group_in, 1096 1043 match_criteria_enable, ··· 1296 1241 } 1297 1242 1298 1243 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS; 1244 + ft_attr.prio = 1; 1299 1245 1300 1246 ft_offloads = mlx5_create_flow_table(ns, &ft_attr); 1301 1247 if (IS_ERR(ft_offloads)) { ··· 1374 1318 mlx5_eswitch_get_vport_metadata_for_match(esw, vport)); 1375 1319 1376 1320 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 1377 - MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0); 1321 + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1322 + mlx5_eswitch_get_vport_metadata_mask()); 1378 1323 1379 1324 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1380 1325 } else { ··· 1400 1343 kvfree(spec); 1401 1344 return flow_rule; 1402 1345 } 1346 + 1403 1347 1404 1348 static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode) 1405 1349 { ··· 1437 1379 out: 1438 1380 *mode = mlx5_mode; 1439 1381 return 0; 1382 + } 1383 + 1384 + static void esw_destroy_restore_table(struct mlx5_eswitch *esw) 1385 + { 1386 + struct mlx5_esw_offload *offloads = &esw->offloads; 1387 + 1388 + mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id); 1389 + mlx5_destroy_flow_group(offloads->restore_group); 1390 + mlx5_destroy_flow_table(offloads->ft_offloads_restore); 1391 + } 1392 + 1393 + static int esw_create_restore_table(struct mlx5_eswitch *esw) 1394 + { 1395 + u8 modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {}; 1396 + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1397 + struct mlx5_flow_table_attr ft_attr = {}; 1398 + struct mlx5_core_dev *dev = esw->dev; 1399 + struct mlx5_flow_namespace *ns; 1400 + struct mlx5_modify_hdr *mod_hdr; 1401 + void *match_criteria, *misc; 1402 + struct mlx5_flow_table *ft; 1403 + struct mlx5_flow_group *g; 1404 + u32 *flow_group_in; 1405 + int err = 0; 1406 + 1407 + ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 1408 + if (!ns) { 1409 + esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 1410 + return -EOPNOTSUPP; 1411 + } 1412 + 1413 + flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1414 + if (!flow_group_in) { 1415 + err = -ENOMEM; 1416 + goto out_free; 1417 + } 1418 + 1419 + ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS; 1420 + ft = mlx5_create_flow_table(ns, &ft_attr); 1421 + if (IS_ERR(ft)) { 1422 + err = PTR_ERR(ft); 1423 + esw_warn(esw->dev, "Failed to create restore table, err %d\n", 1424 + err); 1425 + goto out_free; 1426 + } 1427 + 1428 + memset(flow_group_in, 0, inlen); 1429 + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 1430 + match_criteria); 1431 + misc = MLX5_ADDR_OF(fte_match_param, match_criteria, 1432 + misc_parameters_2); 1433 + 1434 + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1435 + ESW_CHAIN_TAG_METADATA_MASK); 1436 + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1437 + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1438 + ft_attr.max_fte - 1); 1439 + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1440 + MLX5_MATCH_MISC_PARAMETERS_2); 1441 + g = mlx5_create_flow_group(ft, flow_group_in); 1442 + if (IS_ERR(g)) { 1443 + err = PTR_ERR(g); 1444 + esw_warn(dev, "Failed to create restore flow group, err: %d\n", 1445 + err); 1446 + goto err_group; 1447 + } 1448 + 1449 + MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY); 1450 + MLX5_SET(copy_action_in, modact, src_field, 1451 + MLX5_ACTION_IN_FIELD_METADATA_REG_C_1); 1452 + MLX5_SET(copy_action_in, modact, dst_field, 1453 + MLX5_ACTION_IN_FIELD_METADATA_REG_B); 1454 + mod_hdr = mlx5_modify_header_alloc(esw->dev, 1455 + MLX5_FLOW_NAMESPACE_KERNEL, 1, 1456 + modact); 1457 + if (IS_ERR(mod_hdr)) { 1458 + esw_warn(dev, "Failed to create restore mod header, err: %d\n", 1459 + err); 1460 + err = PTR_ERR(mod_hdr); 1461 + goto err_mod_hdr; 1462 + } 1463 + 1464 + esw->offloads.ft_offloads_restore = ft; 1465 + esw->offloads.restore_group = g; 1466 + esw->offloads.restore_copy_hdr_id = mod_hdr; 1467 + 1468 + return 0; 1469 + 1470 + err_mod_hdr: 1471 + mlx5_destroy_flow_group(g); 1472 + err_group: 1473 + mlx5_destroy_flow_table(ft); 1474 + out_free: 1475 + kvfree(flow_group_in); 1476 + 1477 + return err; 1440 1478 } 1441 1479 1442 1480 static int esw_offloads_start(struct mlx5_eswitch *esw, ··· 1979 1825 static const struct mlx5_flow_spec spec = {}; 1980 1826 struct mlx5_flow_act flow_act = {}; 1981 1827 int err = 0; 1828 + u32 key; 1829 + 1830 + key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport); 1831 + key >>= ESW_SOURCE_PORT_METADATA_OFFSET; 1982 1832 1983 1833 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); 1984 - MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0); 1985 - MLX5_SET(set_action_in, action, data, 1986 - mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport)); 1834 + MLX5_SET(set_action_in, action, field, 1835 + MLX5_ACTION_IN_FIELD_METADATA_REG_C_0); 1836 + MLX5_SET(set_action_in, action, data, key); 1837 + MLX5_SET(set_action_in, action, offset, 1838 + ESW_SOURCE_PORT_METADATA_OFFSET); 1839 + MLX5_SET(set_action_in, action, length, 1840 + ESW_SOURCE_PORT_METADATA_BITS); 1987 1841 1988 1842 vport->ingress.offloads.modify_metadata = 1989 1843 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, ··· 2306 2144 if (err) 2307 2145 return err; 2308 2146 2147 + err = esw_create_offloads_table(esw, total_vports); 2148 + if (err) 2149 + goto create_offloads_err; 2150 + 2151 + err = esw_create_restore_table(esw); 2152 + if (err) 2153 + goto create_restore_err; 2154 + 2309 2155 err = esw_create_offloads_fdb_tables(esw, total_vports); 2310 2156 if (err) 2311 2157 goto create_fdb_err; 2312 - 2313 - err = esw_create_offloads_table(esw, total_vports); 2314 - if (err) 2315 - goto create_ft_err; 2316 2158 2317 2159 err = esw_create_vport_rx_group(esw, total_vports); 2318 2160 if (err) ··· 2328 2162 return 0; 2329 2163 2330 2164 create_fg_err: 2331 - esw_destroy_offloads_table(esw); 2332 - 2333 - create_ft_err: 2334 2165 esw_destroy_offloads_fdb_tables(esw); 2335 - 2336 2166 create_fdb_err: 2167 + esw_destroy_restore_table(esw); 2168 + create_restore_err: 2169 + esw_destroy_offloads_table(esw); 2170 + create_offloads_err: 2337 2171 esw_destroy_uplink_offloads_acl_tables(esw); 2338 2172 2339 2173 return err; ··· 2343 2177 { 2344 2178 mutex_destroy(&esw->fdb_table.offloads.vports.lock); 2345 2179 esw_destroy_vport_rx_group(esw); 2346 - esw_destroy_offloads_table(esw); 2347 2180 esw_destroy_offloads_fdb_tables(esw); 2181 + esw_destroy_restore_table(esw); 2182 + esw_destroy_offloads_table(esw); 2348 2183 esw_destroy_uplink_offloads_acl_tables(esw); 2349 2184 } 2350 2185 ··· 2837 2670 } 2838 2671 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled); 2839 2672 2840 - u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, 2673 + u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, 2841 2674 u16 vport_num) 2842 2675 { 2843 - return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num; 2676 + u32 vport_num_mask = GENMASK(ESW_VPORT_BITS - 1, 0); 2677 + u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0); 2678 + u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); 2679 + u32 val; 2680 + 2681 + /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */ 2682 + WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS)); 2683 + 2684 + /* Trim vhca_id to ESW_VHCA_ID_BITS */ 2685 + vhca_id &= vhca_id_mask; 2686 + 2687 + /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they 2688 + * don't overlap with VF numbers, and themselves, after trimming. 2689 + */ 2690 + WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) < 2691 + vport_num_mask - 1); 2692 + WARN_ON_ONCE((MLX5_VPORT_ECPF & vport_num_mask) < 2693 + vport_num_mask - 1); 2694 + WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) == 2695 + (MLX5_VPORT_ECPF & vport_num_mask)); 2696 + 2697 + /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't 2698 + * overlap with pf and ecpf. 2699 + */ 2700 + if (vport_num != MLX5_VPORT_UPLINK && 2701 + vport_num != MLX5_VPORT_ECPF) 2702 + WARN_ON_ONCE(vport_num >= vport_num_mask - 1); 2703 + 2704 + /* We can now trim vport_num to ESW_VPORT_BITS */ 2705 + vport_num &= vport_num_mask; 2706 + 2707 + val = (vhca_id << ESW_VPORT_BITS) | vport_num; 2708 + return val << (32 - ESW_SOURCE_PORT_METADATA_BITS); 2844 2709 } 2845 2710 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
+127 -3
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
··· 6 6 #include <linux/mlx5/fs.h> 7 7 8 8 #include "eswitch_offloads_chains.h" 9 + #include "en/mapping.h" 9 10 #include "mlx5_core.h" 10 11 #include "fs_core.h" 11 12 #include "eswitch.h" 12 13 #include "en.h" 14 + #include "en_tc.h" 13 15 14 16 #define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv) 15 17 #define esw_chains_lock(esw) (esw_chains_priv(esw)->lock) 16 18 #define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht) 19 + #define esw_chains_mapping(esw) (esw_chains_priv(esw)->chains_mapping) 17 20 #define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht) 18 21 #define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left) 19 22 #define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb) ··· 46 43 struct mutex lock; 47 44 48 45 struct mlx5_flow_table *tc_end_fdb; 46 + struct mapping_ctx *chains_mapping; 49 47 50 48 int fdb_left[ARRAY_SIZE(ESW_POOLS)]; 51 49 }; ··· 57 53 u32 chain; 58 54 59 55 int ref; 56 + int id; 60 57 61 58 struct mlx5_eswitch *esw; 62 59 struct list_head prios_list; 60 + struct mlx5_flow_handle *restore_rule; 61 + struct mlx5_modify_hdr *miss_modify_hdr; 63 62 }; 64 63 65 64 struct fdb_prio_key { ··· 268 261 mlx5_destroy_flow_table(fdb); 269 262 } 270 263 264 + static int 265 + create_fdb_chain_restore(struct fdb_chain *fdb_chain) 266 + { 267 + char modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)]; 268 + struct mlx5_eswitch *esw = fdb_chain->esw; 269 + struct mlx5_modify_hdr *mod_hdr; 270 + u32 index; 271 + int err; 272 + 273 + if (fdb_chain->chain == mlx5_esw_chains_get_ft_chain(esw)) 274 + return 0; 275 + 276 + err = mapping_add(esw_chains_mapping(esw), &fdb_chain->chain, &index); 277 + if (err) 278 + return err; 279 + if (index == MLX5_FS_DEFAULT_FLOW_TAG) { 280 + /* we got the special default flow tag id, so we won't know 281 + * if we actually marked the packet with the restore rule 282 + * we create. 283 + * 284 + * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0. 285 + */ 286 + err = mapping_add(esw_chains_mapping(esw), 287 + &fdb_chain->chain, &index); 288 + mapping_remove(esw_chains_mapping(esw), 289 + MLX5_FS_DEFAULT_FLOW_TAG); 290 + if (err) 291 + return err; 292 + } 293 + 294 + fdb_chain->id = index; 295 + 296 + MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); 297 + MLX5_SET(set_action_in, modact, field, 298 + mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mfield); 299 + MLX5_SET(set_action_in, modact, offset, 300 + mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].moffset * 8); 301 + MLX5_SET(set_action_in, modact, length, 302 + mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mlen * 8); 303 + MLX5_SET(set_action_in, modact, data, fdb_chain->id); 304 + mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 305 + 1, modact); 306 + if (IS_ERR(mod_hdr)) { 307 + err = PTR_ERR(mod_hdr); 308 + goto err_mod_hdr; 309 + } 310 + fdb_chain->miss_modify_hdr = mod_hdr; 311 + 312 + fdb_chain->restore_rule = esw_add_restore_rule(esw, fdb_chain->id); 313 + if (IS_ERR(fdb_chain->restore_rule)) { 314 + err = PTR_ERR(fdb_chain->restore_rule); 315 + goto err_rule; 316 + } 317 + 318 + return 0; 319 + 320 + err_rule: 321 + mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr); 322 + err_mod_hdr: 323 + /* Datapath can't find this mapping, so we can safely remove it */ 324 + mapping_remove(esw_chains_mapping(esw), fdb_chain->id); 325 + return err; 326 + } 327 + 271 328 static struct fdb_chain * 272 329 mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain) 273 330 { ··· 346 275 fdb_chain->chain = chain; 347 276 INIT_LIST_HEAD(&fdb_chain->prios_list); 348 277 278 + err = create_fdb_chain_restore(fdb_chain); 279 + if (err) 280 + goto err_restore; 281 + 349 282 err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node, 350 283 chain_params); 351 284 if (err) ··· 358 283 return fdb_chain; 359 284 360 285 err_insert: 286 + if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) { 287 + mlx5_del_flow_rules(fdb_chain->restore_rule); 288 + mlx5_modify_header_dealloc(esw->dev, 289 + fdb_chain->miss_modify_hdr); 290 + } 291 + err_restore: 361 292 kvfree(fdb_chain); 362 293 return ERR_PTR(err); 363 294 } ··· 375 294 376 295 rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node, 377 296 chain_params); 297 + 298 + if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) { 299 + mlx5_del_flow_rules(fdb_chain->restore_rule); 300 + mlx5_modify_header_dealloc(esw->dev, 301 + fdb_chain->miss_modify_hdr); 302 + 303 + mapping_remove(esw_chains_mapping(esw), fdb_chain->id); 304 + } 305 + 378 306 kvfree(fdb_chain); 379 307 } 380 308 ··· 406 316 } 407 317 408 318 static struct mlx5_flow_handle * 409 - mlx5_esw_chains_add_miss_rule(struct mlx5_flow_table *fdb, 319 + mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain, 320 + struct mlx5_flow_table *fdb, 410 321 struct mlx5_flow_table *next_fdb) 411 322 { 412 323 static const struct mlx5_flow_spec spec = {}; 324 + struct mlx5_eswitch *esw = fdb_chain->esw; 413 325 struct mlx5_flow_destination dest = {}; 414 326 struct mlx5_flow_act act = {}; 415 327 ··· 419 327 act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 420 328 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 421 329 dest.ft = next_fdb; 330 + 331 + if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) { 332 + act.modify_hdr = fdb_chain->miss_modify_hdr; 333 + act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 334 + } 422 335 423 336 return mlx5_add_flow_rules(fdb, &spec, &act, &dest, 1); 424 337 } ··· 448 351 list_for_each_entry_continue_reverse(pos, 449 352 &fdb_chain->prios_list, 450 353 list) { 451 - miss_rules[n] = mlx5_esw_chains_add_miss_rule(pos->fdb, 354 + miss_rules[n] = mlx5_esw_chains_add_miss_rule(fdb_chain, 355 + pos->fdb, 452 356 next_fdb); 453 357 if (IS_ERR(miss_rules[n])) { 454 358 err = PTR_ERR(miss_rules[n]); ··· 563 465 } 564 466 565 467 /* Add miss rule to next_fdb */ 566 - miss_rule = mlx5_esw_chains_add_miss_rule(fdb, next_fdb); 468 + miss_rule = mlx5_esw_chains_add_miss_rule(fdb_chain, fdb, next_fdb); 567 469 if (IS_ERR(miss_rule)) { 568 470 err = PTR_ERR(miss_rule); 569 471 goto err_miss_rule; ··· 728 630 struct mlx5_esw_chains_priv *chains_priv; 729 631 struct mlx5_core_dev *dev = esw->dev; 730 632 u32 max_flow_counter, fdb_max; 633 + struct mapping_ctx *mapping; 731 634 int err; 732 635 733 636 chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL); ··· 765 666 if (err) 766 667 goto init_prios_ht_err; 767 668 669 + mapping = mapping_create(sizeof(u32), esw_get_max_restore_tag(esw), 670 + true); 671 + if (IS_ERR(mapping)) { 672 + err = PTR_ERR(mapping); 673 + goto mapping_err; 674 + } 675 + esw_chains_mapping(esw) = mapping; 676 + 768 677 mutex_init(&esw_chains_lock(esw)); 769 678 770 679 return 0; 771 680 681 + mapping_err: 682 + rhashtable_destroy(&esw_prios_ht(esw)); 772 683 init_prios_ht_err: 773 684 rhashtable_destroy(&esw_chains_ht(esw)); 774 685 init_chains_ht_err: ··· 790 681 mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw) 791 682 { 792 683 mutex_destroy(&esw_chains_lock(esw)); 684 + mapping_destroy(esw_chains_mapping(esw)); 793 685 rhashtable_destroy(&esw_prios_ht(esw)); 794 686 rhashtable_destroy(&esw_chains_ht(esw)); 795 687 ··· 868 758 { 869 759 mlx5_esw_chains_close(esw); 870 760 mlx5_esw_chains_cleanup(esw); 761 + } 762 + 763 + int mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, 764 + u32 *chain) 765 + { 766 + int err; 767 + 768 + err = mapping_find(esw_chains_mapping(esw), tag, chain); 769 + if (err) { 770 + esw_warn(esw->dev, "Can't find chain for tag: %d\n", tag); 771 + return -ENOENT; 772 + } 773 + 774 + return 0; 871 775 }
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h
··· 28 28 int mlx5_esw_chains_create(struct mlx5_eswitch *esw); 29 29 void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw); 30 30 31 - #endif /* __ML5_ESW_CHAINS_H__ */ 31 + int 32 + mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, u32 *chain); 32 33 34 + #endif /* __ML5_ESW_CHAINS_H__ */
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 111 111 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1) 112 112 113 113 #define OFFLOADS_MAX_FT 2 114 - #define OFFLOADS_NUM_PRIOS 1 115 - #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1) 114 + #define OFFLOADS_NUM_PRIOS 2 115 + #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS) 116 116 117 117 #define LAG_PRIO_NUM_LEVELS 1 118 118 #define LAG_NUM_PRIOS 1
+29 -2
include/linux/mlx5/eswitch.h
··· 71 71 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev); 72 72 73 73 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw); 74 - u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, 74 + 75 + /* Reg C0 usage: 76 + * Reg C0 = < ESW_VHCA_ID_BITS(8) | ESW_VPORT BITS(8) | ESW_CHAIN_TAG(16) > 77 + * 78 + * Highest 8 bits of the reg c0 is the vhca_id, next 8 bits is vport_num, 79 + * the rest (lowest 16 bits) is left for tc chain tag restoration. 80 + * VHCA_ID + VPORT comprise the SOURCE_PORT matching. 81 + */ 82 + #define ESW_VHCA_ID_BITS 8 83 + #define ESW_VPORT_BITS 8 84 + #define ESW_SOURCE_PORT_METADATA_BITS (ESW_VHCA_ID_BITS + ESW_VPORT_BITS) 85 + #define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS) 86 + #define ESW_CHAIN_TAG_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS) 87 + #define ESW_CHAIN_TAG_METADATA_MASK GENMASK(ESW_CHAIN_TAG_METADATA_BITS - 1,\ 88 + 0) 89 + 90 + static inline u32 mlx5_eswitch_get_vport_metadata_mask(void) 91 + { 92 + return GENMASK(31, 32 - ESW_SOURCE_PORT_METADATA_BITS); 93 + } 94 + 95 + u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, 75 96 u16 vport_num); 76 97 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); 77 98 #else /* CONFIG_MLX5_ESWITCH */ ··· 115 94 }; 116 95 117 96 static inline u32 118 - mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, 97 + mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, 119 98 int vport_num) 120 99 { 121 100 return 0; 122 101 }; 102 + 103 + static inline u32 104 + mlx5_eswitch_get_vport_metadata_mask(void) 105 + { 106 + return 0; 107 + } 123 108 #endif /* CONFIG_MLX5_ESWITCH */ 124 109 125 110 #endif
+13
include/net/pkt_cls.h
··· 72 72 73 73 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 74 74 struct tcf_result *res, bool compat_mode); 75 + int tcf_classify_ingress(struct sk_buff *skb, 76 + const struct tcf_block *ingress_block, 77 + const struct tcf_proto *tp, struct tcf_result *res, 78 + bool compat_mode); 75 79 76 80 #else 77 81 static inline bool tcf_block_shared(struct tcf_block *block) ··· 137 133 { 138 134 return TC_ACT_UNSPEC; 139 135 } 136 + 137 + static inline int tcf_classify_ingress(struct sk_buff *skb, 138 + const struct tcf_block *ingress_block, 139 + const struct tcf_proto *tp, 140 + struct tcf_result *res, bool compat_mode) 141 + { 142 + return TC_ACT_UNSPEC; 143 + } 144 + 140 145 #endif 141 146 142 147 static inline unsigned long
+3
include/net/sch_generic.h
··· 1269 1269 */ 1270 1270 struct mini_Qdisc { 1271 1271 struct tcf_proto *filter_list; 1272 + struct tcf_block *block; 1272 1273 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 1273 1274 struct gnet_stats_queue __percpu *cpu_qstats; 1274 1275 struct rcu_head rcu; ··· 1296 1295 struct tcf_proto *tp_head); 1297 1296 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1298 1297 struct mini_Qdisc __rcu **p_miniq); 1298 + void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, 1299 + struct tcf_block *block); 1299 1300 1300 1301 static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res) 1301 1302 {
+2 -1
net/core/dev.c
··· 4848 4848 skb->tc_at_ingress = 1; 4849 4849 mini_qdisc_bstats_cpu_update(miniq, skb); 4850 4850 4851 - switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) { 4851 + switch (tcf_classify_ingress(skb, miniq->block, miniq->filter_list, 4852 + &cl_res, false)) { 4852 4853 case TC_ACT_OK: 4853 4854 case TC_ACT_RECLASSIFY: 4854 4855 skb->tc_index = TC_H_MIN(cl_res.classid);
+81 -17
net/sched/cls_api.c
··· 22 22 #include <linux/idr.h> 23 23 #include <linux/rhashtable.h> 24 24 #include <linux/jhash.h> 25 + #include <linux/rculist.h> 25 26 #include <net/net_namespace.h> 26 27 #include <net/sock.h> 27 28 #include <net/netlink.h> ··· 355 354 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 356 355 if (!chain) 357 356 return NULL; 358 - list_add_tail(&chain->list, &block->chain_list); 357 + list_add_tail_rcu(&chain->list, &block->chain_list); 359 358 mutex_init(&chain->filter_chain_lock); 360 359 chain->block = block; 361 360 chain->index = chain_index; ··· 395 394 396 395 ASSERT_BLOCK_LOCKED(block); 397 396 398 - list_del(&chain->list); 397 + list_del_rcu(&chain->list); 399 398 if (!chain->index) 400 399 block->chain0.chain = NULL; 401 400 ··· 453 452 } 454 453 return NULL; 455 454 } 455 + 456 + #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 457 + static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, 458 + u32 chain_index) 459 + { 460 + struct tcf_chain *chain; 461 + 462 + list_for_each_entry_rcu(chain, &block->chain_list, list) { 463 + if (chain->index == chain_index) 464 + return chain; 465 + } 466 + return NULL; 467 + } 468 + #endif 456 469 457 470 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 458 471 u32 seq, u16 flags, int event, bool unicast); ··· 1574 1559 * to this qdisc, (optionally) tests for protocol and asks 1575 1560 * specific classifiers. 1576 1561 */ 1577 - int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 1578 - struct tcf_result *res, bool compat_mode) 1562 + static inline int __tcf_classify(struct sk_buff *skb, 1563 + const struct tcf_proto *tp, 1564 + const struct tcf_proto *orig_tp, 1565 + struct tcf_result *res, 1566 + bool compat_mode, 1567 + u32 *last_executed_chain) 1579 1568 { 1580 1569 #ifdef CONFIG_NET_CLS_ACT 1581 1570 const int max_reclassify_loop = 4; 1582 - const struct tcf_proto *orig_tp = tp; 1583 1571 const struct tcf_proto *first_tp; 1584 1572 int limit = 0; 1585 1573 ··· 1600 1582 #ifdef CONFIG_NET_CLS_ACT 1601 1583 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 1602 1584 first_tp = orig_tp; 1585 + *last_executed_chain = first_tp->chain->index; 1603 1586 goto reset; 1604 1587 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 1605 1588 first_tp = res->goto_tp; 1606 - 1607 - #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1608 - { 1609 - struct tc_skb_ext *ext; 1610 - 1611 - ext = skb_ext_add(skb, TC_SKB_EXT); 1612 - if (WARN_ON_ONCE(!ext)) 1613 - return TC_ACT_SHOT; 1614 - 1615 - ext->chain = err & TC_ACT_EXT_VAL_MASK; 1616 - } 1617 - #endif 1589 + *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; 1618 1590 goto reset; 1619 1591 } 1620 1592 #endif ··· 1627 1619 goto reclassify; 1628 1620 #endif 1629 1621 } 1622 + 1623 + int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 1624 + struct tcf_result *res, bool compat_mode) 1625 + { 1626 + u32 last_executed_chain = 0; 1627 + 1628 + return __tcf_classify(skb, tp, tp, res, compat_mode, 1629 + &last_executed_chain); 1630 + } 1630 1631 EXPORT_SYMBOL(tcf_classify); 1632 + 1633 + int tcf_classify_ingress(struct sk_buff *skb, 1634 + const struct tcf_block *ingress_block, 1635 + const struct tcf_proto *tp, 1636 + struct tcf_result *res, bool compat_mode) 1637 + { 1638 + #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1639 + u32 last_executed_chain = 0; 1640 + 1641 + return __tcf_classify(skb, tp, tp, res, compat_mode, 1642 + &last_executed_chain); 1643 + #else 1644 + u32 last_executed_chain = tp ? tp->chain->index : 0; 1645 + const struct tcf_proto *orig_tp = tp; 1646 + struct tc_skb_ext *ext; 1647 + int ret; 1648 + 1649 + ext = skb_ext_find(skb, TC_SKB_EXT); 1650 + 1651 + if (ext && ext->chain) { 1652 + struct tcf_chain *fchain; 1653 + 1654 + fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain); 1655 + if (!fchain) 1656 + return TC_ACT_SHOT; 1657 + 1658 + /* Consume, so cloned/redirect skbs won't inherit ext */ 1659 + skb_ext_del(skb, TC_SKB_EXT); 1660 + 1661 + tp = rcu_dereference_bh(fchain->filter_chain); 1662 + } 1663 + 1664 + ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, 1665 + &last_executed_chain); 1666 + 1667 + /* If we missed on some chain */ 1668 + if (ret == TC_ACT_UNSPEC && last_executed_chain) { 1669 + ext = skb_ext_add(skb, TC_SKB_EXT); 1670 + if (WARN_ON_ONCE(!ext)) 1671 + return TC_ACT_SHOT; 1672 + ext->chain = last_executed_chain; 1673 + } 1674 + 1675 + return ret; 1676 + #endif 1677 + } 1678 + EXPORT_SYMBOL(tcf_classify_ingress); 1631 1679 1632 1680 struct tcf_chain_info { 1633 1681 struct tcf_proto __rcu **pprev;
+8
net/sched/sch_generic.c
··· 1391 1391 } 1392 1392 EXPORT_SYMBOL(mini_qdisc_pair_swap); 1393 1393 1394 + void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, 1395 + struct tcf_block *block) 1396 + { 1397 + miniqp->miniq1.block = block; 1398 + miniqp->miniq2.block = block; 1399 + } 1400 + EXPORT_SYMBOL(mini_qdisc_pair_block_init); 1401 + 1394 1402 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1395 1403 struct mini_Qdisc __rcu **p_miniq) 1396 1404 {
+10 -1
net/sched/sch_ingress.c
··· 78 78 { 79 79 struct ingress_sched_data *q = qdisc_priv(sch); 80 80 struct net_device *dev = qdisc_dev(sch); 81 + int err; 81 82 82 83 net_inc_ingress_queue(); 83 84 ··· 88 87 q->block_info.chain_head_change = clsact_chain_head_change; 89 88 q->block_info.chain_head_change_priv = &q->miniqp; 90 89 91 - return tcf_block_get_ext(&q->block, sch, &q->block_info, extack); 90 + err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack); 91 + if (err) 92 + return err; 93 + 94 + mini_qdisc_pair_block_init(&q->miniqp, q->block); 95 + 96 + return 0; 92 97 } 93 98 94 99 static void ingress_destroy(struct Qdisc *sch) ··· 232 225 extack); 233 226 if (err) 234 227 return err; 228 + 229 + mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block); 235 230 236 231 mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress); 237 232