Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/mlx5e: Move TC tunnel offloading code to separate source file

Move tunnel offloading related code to a separate source file for better
code maintainability.

Code refactoring with no functional change.

Signed-off-by: Oz Shlomo <ozsh@mellanox.com>
Reviewed-by: Eli Britstein <elibr@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>

authored by

Oz Shlomo and committed by
Saeed Mahameed
101f4de9 54c177ca

+548 -496
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/Makefile
··· 30 30 mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o 31 31 mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o 32 32 mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o 33 - mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o 33 + mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o 34 34 35 35 # 36 36 # Core extra
+496
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 + /* Copyright (c) 2018 Mellanox Technologies. */ 3 + 4 + #include <net/vxlan.h> 5 + #include "lib/vxlan.h" 6 + #include "en/tc_tun.h" 7 + 8 + static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, 9 + struct net_device *mirred_dev, 10 + struct net_device **out_dev, 11 + struct flowi4 *fl4, 12 + struct neighbour **out_n, 13 + u8 *out_ttl) 14 + { 15 + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 16 + struct mlx5e_rep_priv *uplink_rpriv; 17 + struct rtable *rt; 18 + struct neighbour *n = NULL; 19 + 20 + #if IS_ENABLED(CONFIG_INET) 21 + int ret; 22 + 23 + rt = ip_route_output_key(dev_net(mirred_dev), fl4); 24 + ret = PTR_ERR_OR_ZERO(rt); 25 + if (ret) 26 + return ret; 27 + #else 28 + return -EOPNOTSUPP; 29 + #endif 30 + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 31 + /* if the egress device isn't on the same HW e-switch, we use the uplink */ 32 + if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) 33 + *out_dev = uplink_rpriv->netdev; 34 + else 35 + *out_dev = rt->dst.dev; 36 + 37 + if (!(*out_ttl)) 38 + *out_ttl = ip4_dst_hoplimit(&rt->dst); 39 + n = dst_neigh_lookup(&rt->dst, &fl4->daddr); 40 + ip_rt_put(rt); 41 + if (!n) 42 + return -ENOMEM; 43 + 44 + *out_n = n; 45 + return 0; 46 + } 47 + 48 + static const char *mlx5e_netdev_kind(struct net_device *dev) 49 + { 50 + if (dev->rtnl_link_ops) 51 + return dev->rtnl_link_ops->kind; 52 + else 53 + return ""; 54 + } 55 + 56 + static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, 57 + struct net_device *mirred_dev, 58 + struct net_device **out_dev, 59 + struct flowi6 *fl6, 60 + struct neighbour **out_n, 61 + u8 *out_ttl) 62 + { 63 + struct neighbour *n = NULL; 64 + struct dst_entry *dst; 65 + 66 + #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) 67 + struct mlx5e_rep_priv *uplink_rpriv; 68 + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 69 + int ret; 70 + 71 + ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, 72 + fl6); 73 + if (ret < 0) 74 + return ret; 75 + 76 + if (!(*out_ttl)) 77 + *out_ttl = ip6_dst_hoplimit(dst); 78 + 79 + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 80 + /* if the egress device isn't on the same HW e-switch, we use the uplink */ 81 + if (!switchdev_port_same_parent_id(priv->netdev, dst->dev)) 82 + *out_dev = uplink_rpriv->netdev; 83 + else 84 + *out_dev = dst->dev; 85 + #else 86 + return -EOPNOTSUPP; 87 + #endif 88 + 89 + n = dst_neigh_lookup(dst, &fl6->daddr); 90 + dst_release(dst); 91 + if (!n) 92 + return -ENOMEM; 93 + 94 + *out_n = n; 95 + return 0; 96 + } 97 + 98 + static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key) 99 + { 100 + __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id); 101 + struct udphdr *udp = (struct udphdr *)(buf); 102 + struct vxlanhdr *vxh = (struct vxlanhdr *) 103 + ((char *)udp + sizeof(struct udphdr)); 104 + 105 + udp->dest = tun_key->tp_dst; 106 + vxh->vx_flags = VXLAN_HF_VNI; 107 + vxh->vx_vni = vxlan_vni_field(tun_id); 108 + 109 + return 0; 110 + } 111 + 112 + static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto, 113 + struct mlx5e_encap_entry *e) 114 + { 115 + int err = 0; 116 + struct ip_tunnel_key *key = &e->tun_info.key; 117 + 118 + if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { 119 + *ip_proto = IPPROTO_UDP; 120 + err = mlx5e_gen_vxlan_header(buf, key); 121 + } else { 122 + pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n" 123 + , e->tunnel_type); 124 + err = -EOPNOTSUPP; 125 + } 126 + 127 + return err; 128 + } 129 + 130 + int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, 131 + struct net_device *mirred_dev, 132 + struct mlx5e_encap_entry *e) 133 + { 134 + int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 135 + int ipv4_encap_size = ETH_HLEN + 136 + sizeof(struct iphdr) + 137 + e->tunnel_hlen; 138 + struct ip_tunnel_key *tun_key = &e->tun_info.key; 139 + struct net_device *out_dev; 140 + struct neighbour *n = NULL; 141 + struct flowi4 fl4 = {}; 142 + char *encap_header; 143 + struct ethhdr *eth; 144 + u8 nud_state, ttl; 145 + struct iphdr *ip; 146 + int err; 147 + 148 + if (max_encap_size < ipv4_encap_size) { 149 + mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", 150 + ipv4_encap_size, max_encap_size); 151 + return -EOPNOTSUPP; 152 + } 153 + 154 + encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); 155 + if (!encap_header) 156 + return -ENOMEM; 157 + 158 + /* add the IP fields */ 159 + fl4.flowi4_tos = tun_key->tos; 160 + fl4.daddr = tun_key->u.ipv4.dst; 161 + fl4.saddr = tun_key->u.ipv4.src; 162 + ttl = tun_key->ttl; 163 + 164 + err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, 165 + &fl4, &n, &ttl); 166 + if (err) 167 + goto free_encap; 168 + 169 + /* used by mlx5e_detach_encap to lookup a neigh hash table 170 + * entry in the neigh hash table when a user deletes a rule 171 + */ 172 + e->m_neigh.dev = n->dev; 173 + e->m_neigh.family = n->ops->family; 174 + memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); 175 + e->out_dev = out_dev; 176 + 177 + /* It's important to add the neigh to the hash table before checking 178 + * the neigh validity state. So if we'll get a notification, in case the 179 + * neigh changes it's validity state, we would find the relevant neigh 180 + * in the hash. 181 + */ 182 + err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); 183 + if (err) 184 + goto free_encap; 185 + 186 + read_lock_bh(&n->lock); 187 + nud_state = n->nud_state; 188 + ether_addr_copy(e->h_dest, n->ha); 189 + read_unlock_bh(&n->lock); 190 + 191 + /* add ethernet header */ 192 + eth = (struct ethhdr *)encap_header; 193 + ether_addr_copy(eth->h_dest, e->h_dest); 194 + ether_addr_copy(eth->h_source, out_dev->dev_addr); 195 + eth->h_proto = htons(ETH_P_IP); 196 + 197 + /* add ip header */ 198 + ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr)); 199 + ip->tos = tun_key->tos; 200 + ip->version = 0x4; 201 + ip->ihl = 0x5; 202 + ip->ttl = ttl; 203 + ip->daddr = fl4.daddr; 204 + ip->saddr = fl4.saddr; 205 + 206 + /* add tunneling protocol header */ 207 + err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr), 208 + &ip->protocol, e); 209 + if (err) 210 + goto destroy_neigh_entry; 211 + 212 + e->encap_size = ipv4_encap_size; 213 + e->encap_header = encap_header; 214 + 215 + if (!(nud_state & NUD_VALID)) { 216 + neigh_event_send(n, NULL); 217 + err = -EAGAIN; 218 + goto out; 219 + } 220 + 221 + err = mlx5_packet_reformat_alloc(priv->mdev, 222 + e->reformat_type, 223 + ipv4_encap_size, encap_header, 224 + MLX5_FLOW_NAMESPACE_FDB, 225 + &e->encap_id); 226 + if (err) 227 + goto destroy_neigh_entry; 228 + 229 + e->flags |= MLX5_ENCAP_ENTRY_VALID; 230 + mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); 231 + neigh_release(n); 232 + return err; 233 + 234 + destroy_neigh_entry: 235 + mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); 236 + free_encap: 237 + kfree(encap_header); 238 + out: 239 + if (n) 240 + neigh_release(n); 241 + return err; 242 + } 243 + 244 + int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, 245 + struct net_device *mirred_dev, 246 + struct mlx5e_encap_entry *e) 247 + { 248 + int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 249 + int ipv6_encap_size = ETH_HLEN + 250 + sizeof(struct ipv6hdr) + 251 + e->tunnel_hlen; 252 + struct ip_tunnel_key *tun_key = &e->tun_info.key; 253 + struct net_device *out_dev; 254 + struct neighbour *n = NULL; 255 + struct flowi6 fl6 = {}; 256 + struct ipv6hdr *ip6h; 257 + char *encap_header; 258 + struct ethhdr *eth; 259 + u8 nud_state, ttl; 260 + int err; 261 + 262 + if (max_encap_size < ipv6_encap_size) { 263 + mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", 264 + ipv6_encap_size, max_encap_size); 265 + return -EOPNOTSUPP; 266 + } 267 + 268 + encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); 269 + if (!encap_header) 270 + return -ENOMEM; 271 + 272 + ttl = tun_key->ttl; 273 + 274 + fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); 275 + fl6.daddr = tun_key->u.ipv6.dst; 276 + fl6.saddr = tun_key->u.ipv6.src; 277 + 278 + err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, 279 + &fl6, &n, &ttl); 280 + if (err) 281 + goto free_encap; 282 + 283 + /* used by mlx5e_detach_encap to lookup a neigh hash table 284 + * entry in the neigh hash table when a user deletes a rule 285 + */ 286 + e->m_neigh.dev = n->dev; 287 + e->m_neigh.family = n->ops->family; 288 + memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); 289 + e->out_dev = out_dev; 290 + 291 + /* It's importent to add the neigh to the hash table before checking 292 + * the neigh validity state. So if we'll get a notification, in case the 293 + * neigh changes it's validity state, we would find the relevant neigh 294 + * in the hash. 295 + */ 296 + err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); 297 + if (err) 298 + goto free_encap; 299 + 300 + read_lock_bh(&n->lock); 301 + nud_state = n->nud_state; 302 + ether_addr_copy(e->h_dest, n->ha); 303 + read_unlock_bh(&n->lock); 304 + 305 + /* add ethernet header */ 306 + eth = (struct ethhdr *)encap_header; 307 + ether_addr_copy(eth->h_dest, e->h_dest); 308 + ether_addr_copy(eth->h_source, out_dev->dev_addr); 309 + eth->h_proto = htons(ETH_P_IPV6); 310 + 311 + /* add ip header */ 312 + ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); 313 + ip6_flow_hdr(ip6h, tun_key->tos, 0); 314 + /* the HW fills up ipv6 payload len */ 315 + ip6h->hop_limit = ttl; 316 + ip6h->daddr = fl6.daddr; 317 + ip6h->saddr = fl6.saddr; 318 + 319 + /* add tunneling protocol header */ 320 + err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr), 321 + &ip6h->nexthdr, e); 322 + if (err) 323 + goto destroy_neigh_entry; 324 + 325 + e->encap_size = ipv6_encap_size; 326 + e->encap_header = encap_header; 327 + 328 + if (!(nud_state & NUD_VALID)) { 329 + neigh_event_send(n, NULL); 330 + err = -EAGAIN; 331 + goto out; 332 + } 333 + 334 + err = mlx5_packet_reformat_alloc(priv->mdev, 335 + e->reformat_type, 336 + ipv6_encap_size, encap_header, 337 + MLX5_FLOW_NAMESPACE_FDB, 338 + &e->encap_id); 339 + if (err) 340 + goto destroy_neigh_entry; 341 + 342 + e->flags |= MLX5_ENCAP_ENTRY_VALID; 343 + mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); 344 + neigh_release(n); 345 + return err; 346 + 347 + destroy_neigh_entry: 348 + mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); 349 + free_encap: 350 + kfree(encap_header); 351 + out: 352 + if (n) 353 + neigh_release(n); 354 + return err; 355 + } 356 + 357 + int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev) 358 + { 359 + if (netif_is_vxlan(tunnel_dev)) 360 + return MLX5E_TC_TUNNEL_TYPE_VXLAN; 361 + else 362 + return MLX5E_TC_TUNNEL_TYPE_UNKNOWN; 363 + } 364 + 365 + bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, 366 + struct net_device *netdev) 367 + { 368 + int tunnel_type = mlx5e_tc_tun_get_type(netdev); 369 + 370 + if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN && 371 + MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) 372 + return true; 373 + else 374 + return false; 375 + } 376 + 377 + int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev, 378 + struct mlx5e_priv *priv, 379 + struct mlx5e_encap_entry *e, 380 + struct netlink_ext_ack *extack) 381 + { 382 + e->tunnel_type = mlx5e_tc_tun_get_type(tunnel_dev); 383 + 384 + if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { 385 + int dst_port = be16_to_cpu(e->tun_info.key.tp_dst); 386 + 387 + if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) { 388 + NL_SET_ERR_MSG_MOD(extack, 389 + "vxlan udp dport was not registered with the HW"); 390 + netdev_warn(priv->netdev, 391 + "%d isn't an offloaded vxlan udp dport\n", 392 + dst_port); 393 + return -EOPNOTSUPP; 394 + } 395 + e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN; 396 + e->tunnel_hlen = VXLAN_HLEN; 397 + } else { 398 + e->reformat_type = -1; 399 + e->tunnel_hlen = -1; 400 + return -EOPNOTSUPP; 401 + } 402 + return 0; 403 + } 404 + 405 + static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, 406 + struct mlx5_flow_spec *spec, 407 + struct tc_cls_flower_offload *f, 408 + void *headers_c, 409 + void *headers_v) 410 + { 411 + struct netlink_ext_ack *extack = f->common.extack; 412 + struct flow_dissector_key_ports *key = 413 + skb_flow_dissector_target(f->dissector, 414 + FLOW_DISSECTOR_KEY_ENC_PORTS, 415 + f->key); 416 + struct flow_dissector_key_ports *mask = 417 + skb_flow_dissector_target(f->dissector, 418 + FLOW_DISSECTOR_KEY_ENC_PORTS, 419 + f->mask); 420 + void *misc_c = MLX5_ADDR_OF(fte_match_param, 421 + spec->match_criteria, 422 + misc_parameters); 423 + void *misc_v = MLX5_ADDR_OF(fte_match_param, 424 + spec->match_value, 425 + misc_parameters); 426 + 427 + /* Full udp dst port must be given */ 428 + if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) || 429 + memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) { 430 + NL_SET_ERR_MSG_MOD(extack, 431 + "VXLAN decap filter must include enc_dst_port condition"); 432 + netdev_warn(priv->netdev, 433 + "VXLAN decap filter must include enc_dst_port condition\n"); 434 + return -EOPNOTSUPP; 435 + } 436 + 437 + /* udp dst port must be knonwn as a VXLAN port */ 438 + if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) { 439 + NL_SET_ERR_MSG_MOD(extack, 440 + "Matched UDP port is not registered as a VXLAN port"); 441 + netdev_warn(priv->netdev, 442 + "UDP port %d is not registered as a VXLAN port\n", 443 + be16_to_cpu(key->dst)); 444 + return -EOPNOTSUPP; 445 + } 446 + 447 + /* dst UDP port is valid here */ 448 + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol); 449 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); 450 + 451 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst)); 452 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst)); 453 + 454 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src)); 455 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src)); 456 + 457 + /* match on VNI */ 458 + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 459 + struct flow_dissector_key_keyid *key = 460 + skb_flow_dissector_target(f->dissector, 461 + FLOW_DISSECTOR_KEY_ENC_KEYID, 462 + f->key); 463 + struct flow_dissector_key_keyid *mask = 464 + skb_flow_dissector_target(f->dissector, 465 + FLOW_DISSECTOR_KEY_ENC_KEYID, 466 + f->mask); 467 + MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni, 468 + be32_to_cpu(mask->keyid)); 469 + MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, 470 + be32_to_cpu(key->keyid)); 471 + } 472 + return 0; 473 + } 474 + 475 + int mlx5e_tc_tun_parse(struct net_device *filter_dev, 476 + struct mlx5e_priv *priv, 477 + struct mlx5_flow_spec *spec, 478 + struct tc_cls_flower_offload *f, 479 + void *headers_c, 480 + void *headers_v) 481 + { 482 + int tunnel_type; 483 + int err = 0; 484 + 485 + tunnel_type = mlx5e_tc_tun_get_type(filter_dev); 486 + if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { 487 + err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, 488 + headers_c, headers_v); 489 + } else { 490 + netdev_warn(priv->netdev, 491 + "decapsulation offload is not supported for %s net device (%d)\n", 492 + mlx5e_netdev_kind(filter_dev), tunnel_type); 493 + return -EOPNOTSUPP; 494 + } 495 + return err; 496 + }
+43
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 + /* Copyright (c) 2018 Mellanox Technologies. */ 3 + 4 + #ifndef __MLX5_EN_TC_TUNNEL_H__ 5 + #define __MLX5_EN_TC_TUNNEL_H__ 6 + 7 + #include <linux/netdevice.h> 8 + #include <linux/mlx5/fs.h> 9 + #include <net/pkt_cls.h> 10 + #include <linux/netlink.h> 11 + #include "en.h" 12 + #include "en_rep.h" 13 + 14 + enum { 15 + MLX5E_TC_TUNNEL_TYPE_UNKNOWN, 16 + MLX5E_TC_TUNNEL_TYPE_VXLAN 17 + }; 18 + 19 + int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev, 20 + struct mlx5e_priv *priv, 21 + struct mlx5e_encap_entry *e, 22 + struct netlink_ext_ack *extack); 23 + 24 + int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, 25 + struct net_device *mirred_dev, 26 + struct mlx5e_encap_entry *e); 27 + 28 + int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, 29 + struct net_device *mirred_dev, 30 + struct mlx5e_encap_entry *e); 31 + 32 + int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev); 33 + bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, 34 + struct net_device *netdev); 35 + 36 + int mlx5e_tc_tun_parse(struct net_device *filter_dev, 37 + struct mlx5e_priv *priv, 38 + struct mlx5_flow_spec *spec, 39 + struct tc_cls_flower_offload *f, 40 + void *headers_c, 41 + void *headers_v); 42 + 43 + #endif //__MLX5_EN_TC_TUNNEL_H__
+1
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 42 42 #include "en.h" 43 43 #include "en_rep.h" 44 44 #include "en_tc.h" 45 + #include "en/tc_tun.h" 45 46 #include "fs_core.h" 46 47 47 48 #define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
+7 -493
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 44 44 #include <net/tc_act/tc_tunnel_key.h> 45 45 #include <net/tc_act/tc_pedit.h> 46 46 #include <net/tc_act/tc_csum.h> 47 - #include <net/vxlan.h> 48 47 #include <net/arp.h> 49 48 #include "en.h" 50 49 #include "en_rep.h" 51 50 #include "en_tc.h" 52 51 #include "eswitch.h" 53 - #include "lib/vxlan.h" 54 52 #include "fs_core.h" 55 53 #include "en/port.h" 54 + #include "en/tc_tun.h" 56 55 57 56 struct mlx5_nic_flow_attr { 58 57 u32 action; ··· 77 78 }; 78 79 79 80 #define MLX5E_TC_MAX_SPLITS 1 80 - 81 - enum { 82 - MLX5E_TC_TUNNEL_TYPE_UNKNOWN, 83 - MLX5E_TC_TUNNEL_TYPE_VXLAN 84 - }; 85 - 86 - static int mlx5e_get_tunnel_type(struct net_device *tunnel_dev); 87 81 88 82 struct mlx5e_tc_flow { 89 83 struct rhash_head node; ··· 676 684 } 677 685 } 678 686 679 - static const char *mlx5e_netdev_kind(struct net_device *dev) 680 - { 681 - if (dev->rtnl_link_ops) 682 - return dev->rtnl_link_ops->kind; 683 - else 684 - return ""; 685 - } 686 - 687 687 static int 688 688 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, 689 689 struct mlx5e_tc_flow_parse_attr *parse_attr, ··· 1193 1209 mlx5e_tc_del_nic_flow(priv, flow); 1194 1210 } 1195 1211 1196 - static int parse_tunnel_vxlan_attr(struct mlx5e_priv *priv, 1197 - struct mlx5_flow_spec *spec, 1198 - struct tc_cls_flower_offload *f, 1199 - void *headers_c, 1200 - void *headers_v) 1201 - { 1202 - struct netlink_ext_ack *extack = f->common.extack; 1203 - struct flow_dissector_key_ports *key = 1204 - skb_flow_dissector_target(f->dissector, 1205 - FLOW_DISSECTOR_KEY_ENC_PORTS, 1206 - f->key); 1207 - struct flow_dissector_key_ports *mask = 1208 - skb_flow_dissector_target(f->dissector, 1209 - FLOW_DISSECTOR_KEY_ENC_PORTS, 1210 - f->mask); 1211 - void *misc_c = MLX5_ADDR_OF(fte_match_param, 1212 - spec->match_criteria, 1213 - misc_parameters); 1214 - void *misc_v = MLX5_ADDR_OF(fte_match_param, 1215 - spec->match_value, 1216 - misc_parameters); 1217 - 1218 - /* Full udp dst port must be given */ 1219 - if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) || 1220 - memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) { 1221 - NL_SET_ERR_MSG_MOD(extack, 1222 - "VXLAN decap filter must include enc_dst_port condition"); 1223 - netdev_warn(priv->netdev, 1224 - "VXLAN decap filter must include enc_dst_port condition\n"); 1225 - return -EOPNOTSUPP; 1226 - } 1227 - 1228 - /* udp dst port must be knonwn as a VXLAN port */ 1229 - if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) { 1230 - NL_SET_ERR_MSG_MOD(extack, 1231 - "Matched UDP port is not registered as a VXLAN port"); 1232 - netdev_warn(priv->netdev, 1233 - "UDP port %d is not registered as a VXLAN port\n", 1234 - be16_to_cpu(key->dst)); 1235 - return -EOPNOTSUPP; 1236 - } 1237 - 1238 - /* dst UDP port is valid here */ 1239 - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol); 1240 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); 1241 - 1242 - MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst)); 1243 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst)); 1244 - 1245 - MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src)); 1246 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src)); 1247 - 1248 - /* match on VNI */ 1249 - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 1250 - struct flow_dissector_key_keyid *key = 1251 - skb_flow_dissector_target(f->dissector, 1252 - FLOW_DISSECTOR_KEY_ENC_KEYID, 1253 - f->key); 1254 - struct flow_dissector_key_keyid *mask = 1255 - skb_flow_dissector_target(f->dissector, 1256 - FLOW_DISSECTOR_KEY_ENC_KEYID, 1257 - f->mask); 1258 - MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni, 1259 - be32_to_cpu(mask->keyid)); 1260 - MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, 1261 - be32_to_cpu(key->keyid)); 1262 - } 1263 - return 0; 1264 - } 1265 1212 1266 1213 static int parse_tunnel_attr(struct mlx5e_priv *priv, 1267 1214 struct mlx5_flow_spec *spec, ··· 1209 1294 skb_flow_dissector_target(f->dissector, 1210 1295 FLOW_DISSECTOR_KEY_ENC_CONTROL, 1211 1296 f->key); 1212 - int tunnel_type; 1213 1297 int err = 0; 1214 1298 1215 - tunnel_type = mlx5e_get_tunnel_type(filter_dev); 1216 - if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { 1217 - err = parse_tunnel_vxlan_attr(priv, spec, f, 1218 - headers_c, headers_v); 1219 - } else { 1220 - NL_SET_ERR_MSG_MOD(extack, 1221 - "decapsulation offload is not supported"); 1222 - netdev_warn(priv->netdev, 1223 - "decapsulation offload is not supported for %s net device (%d)\n", 1224 - mlx5e_netdev_kind(filter_dev), tunnel_type); 1225 - return -EOPNOTSUPP; 1226 - } 1227 - 1299 + err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, 1300 + headers_c, headers_v); 1228 1301 if (err) { 1229 1302 NL_SET_ERR_MSG_MOD(extack, 1230 1303 "failed to parse tunnel attributes"); 1231 - netdev_warn(priv->netdev, 1232 - "failed to parse %s tunnel attributes (%d)\n", 1233 - mlx5e_netdev_kind(filter_dev), tunnel_type); 1234 - return -EOPNOTSUPP; 1304 + return err; 1235 1305 } 1236 1306 1237 1307 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { ··· 2259 2359 return jhash(key, sizeof(*key), 0); 2260 2360 } 2261 2361 2262 - static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, 2263 - struct net_device *mirred_dev, 2264 - struct net_device **out_dev, 2265 - struct flowi4 *fl4, 2266 - struct neighbour **out_n, 2267 - u8 *out_ttl) 2268 - { 2269 - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 2270 - struct mlx5e_rep_priv *uplink_rpriv; 2271 - struct rtable *rt; 2272 - struct neighbour *n = NULL; 2273 - 2274 - #if IS_ENABLED(CONFIG_INET) 2275 - int ret; 2276 - 2277 - rt = ip_route_output_key(dev_net(mirred_dev), fl4); 2278 - ret = PTR_ERR_OR_ZERO(rt); 2279 - if (ret) 2280 - return ret; 2281 - #else 2282 - return -EOPNOTSUPP; 2283 - #endif 2284 - uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 2285 - /* if the egress device isn't on the same HW e-switch, we use the uplink */ 2286 - if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) 2287 - *out_dev = uplink_rpriv->netdev; 2288 - else 2289 - *out_dev = rt->dst.dev; 2290 - 2291 - if (!(*out_ttl)) 2292 - *out_ttl = ip4_dst_hoplimit(&rt->dst); 2293 - n = dst_neigh_lookup(&rt->dst, &fl4->daddr); 2294 - ip_rt_put(rt); 2295 - if (!n) 2296 - return -ENOMEM; 2297 - 2298 - *out_n = n; 2299 - return 0; 2300 - } 2301 2362 2302 2363 static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, 2303 2364 struct net_device *peer_netdev) ··· 2274 2413 (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS)); 2275 2414 } 2276 2415 2277 - static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, 2278 - struct net_device *mirred_dev, 2279 - struct net_device **out_dev, 2280 - struct flowi6 *fl6, 2281 - struct neighbour **out_n, 2282 - u8 *out_ttl) 2283 - { 2284 - struct neighbour *n = NULL; 2285 - struct dst_entry *dst; 2286 2416 2287 - #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) 2288 - struct mlx5e_rep_priv *uplink_rpriv; 2289 - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 2290 - int ret; 2291 - 2292 - ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, 2293 - fl6); 2294 - if (ret < 0) 2295 - return ret; 2296 - 2297 - if (!(*out_ttl)) 2298 - *out_ttl = ip6_dst_hoplimit(dst); 2299 - 2300 - uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 2301 - /* if the egress device isn't on the same HW e-switch, we use the uplink */ 2302 - if (!switchdev_port_same_parent_id(priv->netdev, dst->dev)) 2303 - *out_dev = uplink_rpriv->netdev; 2304 - else 2305 - *out_dev = dst->dev; 2306 - #else 2307 - return -EOPNOTSUPP; 2308 - #endif 2309 - 2310 - n = dst_neigh_lookup(dst, &fl6->daddr); 2311 - dst_release(dst); 2312 - if (!n) 2313 - return -ENOMEM; 2314 - 2315 - *out_n = n; 2316 - return 0; 2317 - } 2318 - 2319 - static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key) 2320 - { 2321 - __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id); 2322 - struct udphdr *udp = (struct udphdr *)(buf); 2323 - struct vxlanhdr *vxh = (struct vxlanhdr *) 2324 - ((char *)udp + sizeof(struct udphdr)); 2325 - 2326 - udp->dest = tun_key->tp_dst; 2327 - vxh->vx_flags = VXLAN_HF_VNI; 2328 - vxh->vx_vni = vxlan_vni_field(tun_id); 2329 - 2330 - return 0; 2331 - } 2332 - 2333 - static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto, 2334 - struct mlx5e_encap_entry *e) 2335 - { 2336 - int err = 0; 2337 - struct ip_tunnel_key *key = &e->tun_info.key; 2338 - 2339 - if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { 2340 - *ip_proto = IPPROTO_UDP; 2341 - err = mlx5e_gen_vxlan_header(buf, key); 2342 - } else { 2343 - pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n" 2344 - , e->tunnel_type); 2345 - err = -EOPNOTSUPP; 2346 - } 2347 - 2348 - return err; 2349 - } 2350 - 2351 - static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, 2352 - struct net_device *mirred_dev, 2353 - struct mlx5e_encap_entry *e) 2354 - { 2355 - int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 2356 - int ipv4_encap_size = ETH_HLEN + 2357 - sizeof(struct iphdr) + 2358 - e->tunnel_hlen; 2359 - struct ip_tunnel_key *tun_key = &e->tun_info.key; 2360 - struct net_device *out_dev; 2361 - struct neighbour *n = NULL; 2362 - struct flowi4 fl4 = {}; 2363 - char *encap_header; 2364 - struct ethhdr *eth; 2365 - u8 nud_state, ttl; 2366 - struct iphdr *ip; 2367 - int err; 2368 - 2369 - if (max_encap_size < ipv4_encap_size) { 2370 - mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", 2371 - ipv4_encap_size, max_encap_size); 2372 - return -EOPNOTSUPP; 2373 - } 2374 - 2375 - encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); 2376 - if (!encap_header) 2377 - return -ENOMEM; 2378 - 2379 - /* add the IP fields */ 2380 - fl4.flowi4_tos = tun_key->tos; 2381 - fl4.daddr = tun_key->u.ipv4.dst; 2382 - fl4.saddr = tun_key->u.ipv4.src; 2383 - ttl = tun_key->ttl; 2384 - 2385 - err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, 2386 - &fl4, &n, &ttl); 2387 - if (err) 2388 - goto free_encap; 2389 - 2390 - /* used by mlx5e_detach_encap to lookup a neigh hash table 2391 - * entry in the neigh hash table when a user deletes a rule 2392 - */ 2393 - e->m_neigh.dev = n->dev; 2394 - e->m_neigh.family = n->ops->family; 2395 - memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); 2396 - e->out_dev = out_dev; 2397 - 2398 - /* It's important to add the neigh to the hash table before checking 2399 - * the neigh validity state. So if we'll get a notification, in case the 2400 - * neigh changes it's validity state, we would find the relevant neigh 2401 - * in the hash. 2402 - */ 2403 - err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); 2404 - if (err) 2405 - goto free_encap; 2406 - 2407 - read_lock_bh(&n->lock); 2408 - nud_state = n->nud_state; 2409 - ether_addr_copy(e->h_dest, n->ha); 2410 - read_unlock_bh(&n->lock); 2411 - 2412 - /* add ethernet header */ 2413 - eth = (struct ethhdr *)encap_header; 2414 - ether_addr_copy(eth->h_dest, e->h_dest); 2415 - ether_addr_copy(eth->h_source, out_dev->dev_addr); 2416 - eth->h_proto = htons(ETH_P_IP); 2417 - 2418 - /* add ip header */ 2419 - ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr)); 2420 - ip->tos = tun_key->tos; 2421 - ip->version = 0x4; 2422 - ip->ihl = 0x5; 2423 - ip->ttl = ttl; 2424 - ip->daddr = fl4.daddr; 2425 - ip->saddr = fl4.saddr; 2426 - 2427 - /* add tunneling protocol header */ 2428 - err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr), 2429 - &ip->protocol, e); 2430 - if (err) 2431 - goto destroy_neigh_entry; 2432 - 2433 - e->encap_size = ipv4_encap_size; 2434 - e->encap_header = encap_header; 2435 - 2436 - if (!(nud_state & NUD_VALID)) { 2437 - neigh_event_send(n, NULL); 2438 - err = -EAGAIN; 2439 - goto out; 2440 - } 2441 - 2442 - err = mlx5_packet_reformat_alloc(priv->mdev, 2443 - e->reformat_type, 2444 - ipv4_encap_size, encap_header, 2445 - MLX5_FLOW_NAMESPACE_FDB, 2446 - &e->encap_id); 2447 - if (err) 2448 - goto destroy_neigh_entry; 2449 - 2450 - e->flags |= MLX5_ENCAP_ENTRY_VALID; 2451 - mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); 2452 - neigh_release(n); 2453 - return err; 2454 - 2455 - destroy_neigh_entry: 2456 - mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); 2457 - free_encap: 2458 - kfree(encap_header); 2459 - out: 2460 - if (n) 2461 - neigh_release(n); 2462 - return err; 2463 - } 2464 - 2465 - static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, 2466 - struct net_device *mirred_dev, 2467 - struct mlx5e_encap_entry *e) 2468 - { 2469 - int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 2470 - int ipv6_encap_size = ETH_HLEN + 2471 - sizeof(struct ipv6hdr) + 2472 - e->tunnel_hlen; 2473 - struct ip_tunnel_key *tun_key = &e->tun_info.key; 2474 - struct net_device *out_dev; 2475 - struct neighbour *n = NULL; 2476 - struct flowi6 fl6 = {}; 2477 - struct ipv6hdr *ip6h; 2478 - char *encap_header; 2479 - struct ethhdr *eth; 2480 - u8 nud_state, ttl; 2481 - int err; 2482 - 2483 - if (max_encap_size < ipv6_encap_size) { 2484 - mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", 2485 - ipv6_encap_size, max_encap_size); 2486 - return -EOPNOTSUPP; 2487 - } 2488 - 2489 - encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); 2490 - if (!encap_header) 2491 - return -ENOMEM; 2492 - 2493 - ttl = tun_key->ttl; 2494 - 2495 - fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); 2496 - fl6.daddr = tun_key->u.ipv6.dst; 2497 - fl6.saddr = tun_key->u.ipv6.src; 2498 - 2499 - err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, 2500 - &fl6, &n, &ttl); 2501 - if (err) 2502 - goto free_encap; 2503 - 2504 - /* used by mlx5e_detach_encap to lookup a neigh hash table 2505 - * entry in the neigh hash table when a user deletes a rule 2506 - */ 2507 - e->m_neigh.dev = n->dev; 2508 - e->m_neigh.family = n->ops->family; 2509 - memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); 2510 - e->out_dev = out_dev; 2511 - 2512 - /* It's importent to add the neigh to the hash table before checking 2513 - * the neigh validity state. So if we'll get a notification, in case the 2514 - * neigh changes it's validity state, we would find the relevant neigh 2515 - * in the hash. 2516 - */ 2517 - err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); 2518 - if (err) 2519 - goto free_encap; 2520 - 2521 - read_lock_bh(&n->lock); 2522 - nud_state = n->nud_state; 2523 - ether_addr_copy(e->h_dest, n->ha); 2524 - read_unlock_bh(&n->lock); 2525 - 2526 - /* add ethernet header */ 2527 - eth = (struct ethhdr *)encap_header; 2528 - ether_addr_copy(eth->h_dest, e->h_dest); 2529 - ether_addr_copy(eth->h_source, out_dev->dev_addr); 2530 - eth->h_proto = htons(ETH_P_IPV6); 2531 - 2532 - /* add ip header */ 2533 - ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); 2534 - ip6_flow_hdr(ip6h, tun_key->tos, 0); 2535 - /* the HW fills up ipv6 payload len */ 2536 - ip6h->hop_limit = ttl; 2537 - ip6h->daddr = fl6.daddr; 2538 - ip6h->saddr = fl6.saddr; 2539 - 2540 - /* add tunneling protocol header */ 2541 - err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr), 2542 - &ip6h->nexthdr, e); 2543 - if (err) 2544 - goto destroy_neigh_entry; 2545 - 2546 - e->encap_size = ipv6_encap_size; 2547 - e->encap_header = encap_header; 2548 - 2549 - if (!(nud_state & NUD_VALID)) { 2550 - neigh_event_send(n, NULL); 2551 - err = -EAGAIN; 2552 - goto out; 2553 - } 2554 - 2555 - err = mlx5_packet_reformat_alloc(priv->mdev, 2556 - e->reformat_type, 2557 - ipv6_encap_size, encap_header, 2558 - MLX5_FLOW_NAMESPACE_FDB, 2559 - &e->encap_id); 2560 - if (err) 2561 - goto destroy_neigh_entry; 2562 - 2563 - e->flags |= MLX5_ENCAP_ENTRY_VALID; 2564 - mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); 2565 - neigh_release(n); 2566 - return err; 2567 - 2568 - destroy_neigh_entry: 2569 - mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); 2570 - free_encap: 2571 - kfree(encap_header); 2572 - out: 2573 - if (n) 2574 - neigh_release(n); 2575 - return err; 2576 - } 2577 - 2578 - static int mlx5e_get_tunnel_type(struct net_device *tunnel_dev) 2579 - { 2580 - if (netif_is_vxlan(tunnel_dev)) 2581 - return MLX5E_TC_TUNNEL_TYPE_VXLAN; 2582 - else 2583 - return MLX5E_TC_TUNNEL_TYPE_UNKNOWN; 2584 - } 2585 - 2586 - bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, 2587 - struct net_device *netdev) 2588 - { 2589 - int tunnel_type = mlx5e_get_tunnel_type(netdev); 2590 - 2591 - if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN && 2592 - MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) 2593 - return true; 2594 - else 2595 - return false; 2596 - } 2597 - 2598 - static int mlx5e_init_tunnel_attr(struct net_device *tunnel_dev, 2599 - struct mlx5e_priv *priv, 2600 - struct mlx5e_encap_entry *e, 2601 - struct netlink_ext_ack *extack) 2602 - { 2603 - e->tunnel_type = mlx5e_get_tunnel_type(tunnel_dev); 2604 - 2605 - if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { 2606 - int dst_port = be16_to_cpu(e->tun_info.key.tp_dst); 2607 - 2608 - if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) { 2609 - NL_SET_ERR_MSG_MOD(extack, 2610 - "vxlan udp dport was not registered with the HW"); 2611 - netdev_warn(priv->netdev, 2612 - "%d isn't an offloaded vxlan udp dport\n", 2613 - dst_port); 2614 - return -EOPNOTSUPP; 2615 - } 2616 - e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN; 2617 - e->tunnel_hlen = VXLAN_HLEN; 2618 - } else { 2619 - e->reformat_type = -1; 2620 - e->tunnel_hlen = -1; 2621 - return -EOPNOTSUPP; 2622 - } 2623 - return 0; 2624 - } 2625 2417 2626 2418 static int mlx5e_attach_encap(struct mlx5e_priv *priv, 2627 2419 struct ip_tunnel_info *tun_info, ··· 2311 2797 return -ENOMEM; 2312 2798 2313 2799 e->tun_info = *tun_info; 2314 - err = mlx5e_init_tunnel_attr(mirred_dev, priv, e, extack); 2800 + err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); 2315 2801 if (err) 2316 2802 goto out_err; 2317 2803 2318 2804 INIT_LIST_HEAD(&e->flows); 2319 2805 2320 2806 if (family == AF_INET) 2321 - err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e); 2807 + err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e); 2322 2808 else if (family == AF_INET6) 2323 - err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e); 2809 + err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e); 2324 2810 2325 2811 if (err && err != -EAGAIN) 2326 2812 goto out_err;
-2
drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
··· 70 70 71 71 int mlx5e_tc_num_filters(struct mlx5e_priv *priv); 72 72 73 - bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, 74 - struct net_device *netdev); 75 73 76 74 #else /* CONFIG_MLX5_ESWITCH */ 77 75 static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }