Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Revert "GTP: add support for flow based tunneling API"

This reverts commit 9ab7e76aefc97a9aa664accb59d6e8dc5e52514a.

This patch was committed without maintainer approval and despite a number
of unaddressed concerns from review. There are several issues that
impede the acceptance of this patch and that make a reversion of this
particular instance of these changes the best way forward:

i) the patch contains several logically separate changes that would be
better served as smaller patches (for review purposes)
ii) functionality like the handling of end markers has been introduced
without further explanation
iii) symmetry between the handling of GTPv0 and GTPv1 has been
unnecessarily broken
iv) the patchset produces 'broken' packets when extension headers are
included
v) there are no available userspace tools to allow for testing this
functionality
vi) there is an unaddressed Coverity report against the patch concering
memory leakage
vii) most importantly, the patch contains a large amount of superfluous
churn that impedes other ongoing work with this driver

This patch will be reworked into a series that aligns with other
ongoing work and facilitates review.

Signed-off-by: Jonas Bonn <jonas@norrbonn.se>
Acked-by: Harald Welte <laforge@gnumonks.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Jonas Bonn and committed by
Jakub Kicinski
49ecc587 3dd344ea

+144 -398
+144 -383
drivers/net/gtp.c
··· 21 21 #include <linux/file.h> 22 22 #include <linux/gtp.h> 23 23 24 - #include <net/dst_metadata.h> 25 24 #include <net/net_namespace.h> 26 25 #include <net/protocol.h> 27 26 #include <net/ip.h> ··· 73 74 unsigned int hash_size; 74 75 struct hlist_head *tid_hash; 75 76 struct hlist_head *addr_hash; 76 - /* Used by LWT tunnel. */ 77 - bool collect_md; 78 - struct socket *collect_md_sock; 79 77 }; 80 78 81 79 static unsigned int gtp_net_id __read_mostly; ··· 179 183 return false; 180 184 } 181 185 182 - static int gtp_set_tun_dst(struct gtp_dev *gtp, struct sk_buff *skb, 183 - unsigned int hdrlen, u8 gtp_version, 184 - __be64 tid, u8 flags) 186 + static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, 187 + unsigned int hdrlen, unsigned int role) 185 188 { 186 - struct metadata_dst *tun_dst; 187 - int opts_len = 0; 188 - 189 - if (unlikely(flags & GTP1_F_MASK)) 190 - opts_len = sizeof(struct gtpu_metadata); 191 - 192 - tun_dst = udp_tun_rx_dst(skb, gtp->sk1u->sk_family, TUNNEL_KEY, tid, opts_len); 193 - if (!tun_dst) { 194 - netdev_dbg(gtp->dev, "Failed to allocate tun_dst"); 195 - goto err; 189 + if (!gtp_check_ms(skb, pctx, hdrlen, role)) { 190 + netdev_dbg(pctx->dev, "No PDP ctx for this MS\n"); 191 + return 1; 196 192 } 197 193 198 - netdev_dbg(gtp->dev, "attaching metadata_dst to skb, gtp ver %d hdrlen %d\n", 199 - gtp_version, hdrlen); 200 - if (unlikely(opts_len)) { 201 - struct gtpu_metadata *opts; 202 - struct gtp1_header *gtp1; 203 - 204 - opts = ip_tunnel_info_opts(&tun_dst->u.tun_info); 205 - gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); 206 - opts->ver = GTP_METADATA_V1; 207 - opts->flags = gtp1->flags; 208 - opts->type = gtp1->type; 209 - netdev_dbg(gtp->dev, "recved control pkt: flag %x type: %d\n", 210 - opts->flags, opts->type); 211 - tun_dst->u.tun_info.key.tun_flags |= TUNNEL_GTPU_OPT; 212 - tun_dst->u.tun_info.options_len = opts_len; 213 - skb->protocol = htons(0xffff); /* Unknown */ 214 - } 215 194 /* Get rid of the GTP + UDP headers. */ 216 195 if (iptunnel_pull_header(skb, hdrlen, skb->protocol, 217 - !net_eq(sock_net(gtp->sk1u), dev_net(gtp->dev)))) { 218 - gtp->dev->stats.rx_length_errors++; 219 - goto err; 220 - } 196 + !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) 197 + return -1; 221 198 222 - skb_dst_set(skb, &tun_dst->dst); 223 - return 0; 224 - err: 225 - return -1; 226 - } 227 - 228 - static int gtp_rx(struct gtp_dev *gtp, struct sk_buff *skb, 229 - unsigned int hdrlen, u8 gtp_version, unsigned int role, 230 - __be64 tid, u8 flags, u8 type) 231 - { 232 - if (ip_tunnel_collect_metadata() || gtp->collect_md) { 233 - int err; 234 - 235 - err = gtp_set_tun_dst(gtp, skb, hdrlen, gtp_version, tid, flags); 236 - if (err) 237 - goto err; 238 - } else { 239 - struct pdp_ctx *pctx; 240 - 241 - if (flags & GTP1_F_MASK) 242 - hdrlen += 4; 243 - 244 - if (type != GTP_TPDU) 245 - return 1; 246 - 247 - if (gtp_version == GTP_V0) 248 - pctx = gtp0_pdp_find(gtp, be64_to_cpu(tid)); 249 - else 250 - pctx = gtp1_pdp_find(gtp, be64_to_cpu(tid)); 251 - if (!pctx) { 252 - netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); 253 - return 1; 254 - } 255 - 256 - if (!gtp_check_ms(skb, pctx, hdrlen, role)) { 257 - netdev_dbg(pctx->dev, "No PDP ctx for this MS\n"); 258 - return 1; 259 - } 260 - /* Get rid of the GTP + UDP headers. */ 261 - if (iptunnel_pull_header(skb, hdrlen, skb->protocol, 262 - !net_eq(sock_net(pctx->sk), dev_net(gtp->dev)))) { 263 - gtp->dev->stats.rx_length_errors++; 264 - goto err; 265 - } 266 - } 267 - netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n"); 199 + netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n"); 268 200 269 201 /* Now that the UDP and the GTP header have been removed, set up the 270 202 * new network header. This is required by the upper layer to 271 203 * calculate the transport header. 272 204 */ 273 205 skb_reset_network_header(skb); 274 - if (pskb_may_pull(skb, sizeof(struct iphdr))) { 275 - struct iphdr *iph; 276 206 277 - iph = ip_hdr(skb); 278 - if (iph->version == 4) { 279 - netdev_dbg(gtp->dev, "inner pkt: ipv4"); 280 - skb->protocol = htons(ETH_P_IP); 281 - } else if (iph->version == 6) { 282 - netdev_dbg(gtp->dev, "inner pkt: ipv6"); 283 - skb->protocol = htons(ETH_P_IPV6); 284 - } else { 285 - netdev_dbg(gtp->dev, "inner pkt error: Unknown type"); 286 - } 287 - } 207 + skb->dev = pctx->dev; 288 208 289 - skb->dev = gtp->dev; 290 - dev_sw_netstats_rx_add(gtp->dev, skb->len); 209 + dev_sw_netstats_rx_add(pctx->dev, skb->len); 210 + 291 211 netif_rx(skb); 292 212 return 0; 293 - 294 - err: 295 - gtp->dev->stats.rx_dropped++; 296 - return -1; 297 213 } 298 214 299 215 /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */ ··· 214 306 unsigned int hdrlen = sizeof(struct udphdr) + 215 307 sizeof(struct gtp0_header); 216 308 struct gtp0_header *gtp0; 309 + struct pdp_ctx *pctx; 217 310 218 311 if (!pskb_may_pull(skb, hdrlen)) 219 312 return -1; ··· 224 315 if ((gtp0->flags >> 5) != GTP_V0) 225 316 return 1; 226 317 227 - return gtp_rx(gtp, skb, hdrlen, GTP_V0, gtp->role, gtp0->tid, gtp0->flags, gtp0->type); 318 + if (gtp0->type != GTP_TPDU) 319 + return 1; 320 + 321 + pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid)); 322 + if (!pctx) { 323 + netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); 324 + return 1; 325 + } 326 + 327 + return gtp_rx(pctx, skb, hdrlen, gtp->role); 228 328 } 229 329 230 330 static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) ··· 241 323 unsigned int hdrlen = sizeof(struct udphdr) + 242 324 sizeof(struct gtp1_header); 243 325 struct gtp1_header *gtp1; 326 + struct pdp_ctx *pctx; 244 327 245 328 if (!pskb_may_pull(skb, hdrlen)) 246 329 return -1; 247 330 248 331 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); 249 332 250 - netdev_dbg(gtp->dev, "GTPv1 recv: flags %x\n", gtp1->flags); 251 333 if ((gtp1->flags >> 5) != GTP_V1) 334 + return 1; 335 + 336 + if (gtp1->type != GTP_TPDU) 252 337 return 1; 253 338 254 339 /* From 29.060: "This field shall be present if and only if any one or ··· 260 339 * If any of the bit is set, then the remaining ones also have to be 261 340 * set. 262 341 */ 342 + if (gtp1->flags & GTP1_F_MASK) 343 + hdrlen += 4; 344 + 263 345 /* Make sure the header is larger enough, including extensions. */ 264 346 if (!pskb_may_pull(skb, hdrlen)) 265 347 return -1; 266 348 267 349 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); 268 350 269 - return gtp_rx(gtp, skb, hdrlen, GTP_V1, gtp->role, 270 - key32_to_tunnel_id(gtp1->tid), gtp1->flags, gtp1->type); 351 + pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid)); 352 + if (!pctx) { 353 + netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); 354 + return 1; 355 + } 356 + 357 + return gtp_rx(pctx, skb, hdrlen, gtp->role); 271 358 } 272 359 273 360 static void __gtp_encap_destroy(struct sock *sk) ··· 315 386 { 316 387 gtp_encap_disable_sock(gtp->sk0); 317 388 gtp_encap_disable_sock(gtp->sk1u); 318 - if (gtp->collect_md_sock) { 319 - udp_tunnel_sock_release(gtp->collect_md_sock); 320 - gtp->collect_md_sock = NULL; 321 - netdev_dbg(gtp->dev, "GTP socket released.\n"); 322 - } 323 389 } 324 390 325 391 /* UDP encapsulation receive handler. See net/ipv4/udp.c. ··· 329 405 if (!gtp) 330 406 return 1; 331 407 332 - netdev_dbg(gtp->dev, "encap_recv sk=%p type %d\n", 333 - sk, udp_sk(sk)->encap_type); 408 + netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); 334 409 335 410 switch (udp_sk(sk)->encap_type) { 336 411 case UDP_ENCAP_GTP0: ··· 383 460 384 461 static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4, 385 462 const struct sock *sk, 386 - __be32 daddr, 387 - __be32 saddr) 463 + __be32 daddr) 388 464 { 389 465 memset(fl4, 0, sizeof(*fl4)); 390 466 fl4->flowi4_oif = sk->sk_bound_dev_if; 391 467 fl4->daddr = daddr; 392 - fl4->saddr = saddr; 468 + fl4->saddr = inet_sk(sk)->inet_saddr; 393 469 fl4->flowi4_tos = RT_CONN_FLAGS(sk); 394 470 fl4->flowi4_proto = sk->sk_protocol; 395 471 ··· 412 490 gtp0->tid = cpu_to_be64(pctx->u.v0.tid); 413 491 } 414 492 415 - static inline void gtp1_push_header(struct sk_buff *skb, __be32 tid) 493 + static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) 416 494 { 417 495 int payload_len = skb->len; 418 496 struct gtp1_header *gtp1; ··· 428 506 gtp1->flags = 0x30; /* v1, GTP-non-prime. */ 429 507 gtp1->type = GTP_TPDU; 430 508 gtp1->length = htons(payload_len); 431 - gtp1->tid = tid; 509 + gtp1->tid = htonl(pctx->u.v1.o_tei); 432 510 433 511 /* TODO: Suppport for extension header, sequence number and N-PDU. 434 512 * Update the length field if any of them is available. 435 513 */ 436 514 } 437 515 438 - static inline int gtp1_push_control_header(struct sk_buff *skb, 439 - __be32 tid, 440 - struct gtpu_metadata *opts, 441 - struct net_device *dev) 442 - { 443 - struct gtp1_header *gtp1c; 444 - int payload_len; 445 - 446 - if (opts->ver != GTP_METADATA_V1) 447 - return -ENOENT; 448 - 449 - if (opts->type == 0xFE) { 450 - /* for end marker ignore skb data. */ 451 - netdev_dbg(dev, "xmit pkt with null data"); 452 - pskb_trim(skb, 0); 453 - } 454 - if (skb_cow_head(skb, sizeof(*gtp1c)) < 0) 455 - return -ENOMEM; 456 - 457 - payload_len = skb->len; 458 - 459 - gtp1c = skb_push(skb, sizeof(*gtp1c)); 460 - 461 - gtp1c->flags = opts->flags; 462 - gtp1c->type = opts->type; 463 - gtp1c->length = htons(payload_len); 464 - gtp1c->tid = tid; 465 - netdev_dbg(dev, "xmit control pkt: ver %d flags %x type %x pkt len %d tid %x", 466 - opts->ver, opts->flags, opts->type, skb->len, tid); 467 - return 0; 468 - } 469 - 470 516 struct gtp_pktinfo { 471 - struct sock *sk; 472 - __u8 tos; 473 - struct flowi4 fl4; 474 - struct rtable *rt; 475 - struct net_device *dev; 476 - __be16 gtph_port; 517 + struct sock *sk; 518 + struct iphdr *iph; 519 + struct flowi4 fl4; 520 + struct rtable *rt; 521 + struct pdp_ctx *pctx; 522 + struct net_device *dev; 523 + __be16 gtph_port; 477 524 }; 478 525 526 + static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo) 527 + { 528 + switch (pktinfo->pctx->gtp_version) { 529 + case GTP_V0: 530 + pktinfo->gtph_port = htons(GTP0_PORT); 531 + gtp0_push_header(skb, pktinfo->pctx); 532 + break; 533 + case GTP_V1: 534 + pktinfo->gtph_port = htons(GTP1U_PORT); 535 + gtp1_push_header(skb, pktinfo->pctx); 536 + break; 537 + } 538 + } 539 + 479 540 static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo, 480 - struct sock *sk, 481 - __u8 tos, 482 - struct rtable *rt, 541 + struct sock *sk, struct iphdr *iph, 542 + struct pdp_ctx *pctx, struct rtable *rt, 483 543 struct flowi4 *fl4, 484 544 struct net_device *dev) 485 545 { 486 546 pktinfo->sk = sk; 487 - pktinfo->tos = tos; 547 + pktinfo->iph = iph; 548 + pktinfo->pctx = pctx; 488 549 pktinfo->rt = rt; 489 550 pktinfo->fl4 = *fl4; 490 551 pktinfo->dev = dev; ··· 477 572 struct gtp_pktinfo *pktinfo) 478 573 { 479 574 struct gtp_dev *gtp = netdev_priv(dev); 480 - struct gtpu_metadata *opts = NULL; 481 - struct sock *sk = NULL; 482 575 struct pdp_ctx *pctx; 483 576 struct rtable *rt; 484 577 struct flowi4 fl4; 485 - u8 gtp_version; 486 - __be16 df = 0; 487 - __be32 tun_id; 488 - __be32 daddr; 489 - __be32 saddr; 490 - __u8 tos; 578 + struct iphdr *iph; 579 + __be16 df; 491 580 int mtu; 492 581 493 - if (gtp->collect_md) { 494 - /* LWT GTP1U encap */ 495 - struct ip_tunnel_info *info = NULL; 582 + /* Read the IP destination address and resolve the PDP context. 583 + * Prepend PDP header with TEI/TID from PDP ctx. 584 + */ 585 + iph = ip_hdr(skb); 586 + if (gtp->role == GTP_ROLE_SGSN) 587 + pctx = ipv4_pdp_find(gtp, iph->saddr); 588 + else 589 + pctx = ipv4_pdp_find(gtp, iph->daddr); 496 590 497 - info = skb_tunnel_info(skb); 498 - if (!info) { 499 - netdev_dbg(dev, "missing tunnel info"); 500 - return -ENOENT; 501 - } 502 - if (info->key.tp_dst && ntohs(info->key.tp_dst) != GTP1U_PORT) { 503 - netdev_dbg(dev, "unexpected GTP dst port: %d", ntohs(info->key.tp_dst)); 504 - return -EOPNOTSUPP; 505 - } 506 - pctx = NULL; 507 - gtp_version = GTP_V1; 508 - tun_id = tunnel_id_to_key32(info->key.tun_id); 509 - daddr = info->key.u.ipv4.dst; 510 - saddr = info->key.u.ipv4.src; 511 - sk = gtp->sk1u; 512 - if (!sk) { 513 - netdev_dbg(dev, "missing tunnel sock"); 514 - return -EOPNOTSUPP; 515 - } 516 - tos = info->key.tos; 517 - if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) 518 - df = htons(IP_DF); 519 - 520 - if (info->options_len != 0) { 521 - if (info->key.tun_flags & TUNNEL_GTPU_OPT) { 522 - opts = ip_tunnel_info_opts(info); 523 - } else { 524 - netdev_dbg(dev, "missing tunnel metadata for control pkt"); 525 - return -EOPNOTSUPP; 526 - } 527 - } 528 - netdev_dbg(dev, "flow-based GTP1U encap: tunnel id %d\n", 529 - be32_to_cpu(tun_id)); 530 - } else { 531 - struct iphdr *iph; 532 - 533 - if (ntohs(skb->protocol) != ETH_P_IP) 534 - return -EOPNOTSUPP; 535 - 536 - iph = ip_hdr(skb); 537 - 538 - /* Read the IP destination address and resolve the PDP context. 539 - * Prepend PDP header with TEI/TID from PDP ctx. 540 - */ 541 - if (gtp->role == GTP_ROLE_SGSN) 542 - pctx = ipv4_pdp_find(gtp, iph->saddr); 543 - else 544 - pctx = ipv4_pdp_find(gtp, iph->daddr); 545 - 546 - if (!pctx) { 547 - netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n", 548 - &iph->daddr); 549 - return -ENOENT; 550 - } 551 - sk = pctx->sk; 552 - netdev_dbg(dev, "found PDP context %p\n", pctx); 553 - 554 - gtp_version = pctx->gtp_version; 555 - tun_id = htonl(pctx->u.v1.o_tei); 556 - daddr = pctx->peer_addr_ip4.s_addr; 557 - saddr = inet_sk(sk)->inet_saddr; 558 - tos = iph->tos; 559 - df = iph->frag_off; 560 - netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n", 561 - &iph->saddr, &iph->daddr); 591 + if (!pctx) { 592 + netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n", 593 + &iph->daddr); 594 + return -ENOENT; 562 595 } 596 + netdev_dbg(dev, "found PDP context %p\n", pctx); 563 597 564 - rt = ip4_route_output_gtp(&fl4, sk, daddr, saddr); 598 + rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr); 565 599 if (IS_ERR(rt)) { 566 - netdev_dbg(dev, "no route to SSGN %pI4\n", &daddr); 600 + netdev_dbg(dev, "no route to SSGN %pI4\n", 601 + &pctx->peer_addr_ip4.s_addr); 567 602 dev->stats.tx_carrier_errors++; 568 603 goto err; 569 604 } 570 605 571 606 if (rt->dst.dev == dev) { 572 - netdev_dbg(dev, "circular route to SSGN %pI4\n", &daddr); 607 + netdev_dbg(dev, "circular route to SSGN %pI4\n", 608 + &pctx->peer_addr_ip4.s_addr); 573 609 dev->stats.collisions++; 574 610 goto err_rt; 575 611 } ··· 518 672 skb_dst_drop(skb); 519 673 520 674 /* This is similar to tnl_update_pmtu(). */ 675 + df = iph->frag_off; 521 676 if (df) { 522 677 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - 523 678 sizeof(struct iphdr) - sizeof(struct udphdr); 524 - switch (gtp_version) { 679 + switch (pctx->gtp_version) { 525 680 case GTP_V0: 526 681 mtu -= sizeof(struct gtp0_header); 527 682 break; ··· 536 689 537 690 rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false); 538 691 539 - if (!skb_is_gso(skb) && (df & htons(IP_DF)) && mtu < skb->len) { 540 - netdev_dbg(dev, "packet too big, fragmentation needed"); 692 + if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && 693 + mtu < ntohs(iph->tot_len)) { 694 + netdev_dbg(dev, "packet too big, fragmentation needed\n"); 541 695 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 542 696 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 543 697 htonl(mtu)); 544 698 goto err_rt; 545 699 } 546 700 547 - gtp_set_pktinfo_ipv4(pktinfo, sk, tos, rt, &fl4, dev); 548 - 549 - if (unlikely(opts)) { 550 - int err; 551 - 552 - pktinfo->gtph_port = htons(GTP1U_PORT); 553 - err = gtp1_push_control_header(skb, tun_id, opts, dev); 554 - if (err) { 555 - netdev_info(dev, "cntr pkt error %d", err); 556 - goto err_rt; 557 - } 558 - return 0; 559 - } 560 - 561 - switch (gtp_version) { 562 - case GTP_V0: 563 - pktinfo->gtph_port = htons(GTP0_PORT); 564 - gtp0_push_header(skb, pctx); 565 - break; 566 - case GTP_V1: 567 - pktinfo->gtph_port = htons(GTP1U_PORT); 568 - gtp1_push_header(skb, tun_id); 569 - break; 570 - } 701 + gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev); 702 + gtp_push_header(skb, pktinfo); 571 703 572 704 return 0; 573 705 err_rt: ··· 557 731 558 732 static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) 559 733 { 734 + unsigned int proto = ntohs(skb->protocol); 560 735 struct gtp_pktinfo pktinfo; 561 736 int err; 562 737 ··· 569 742 570 743 /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */ 571 744 rcu_read_lock(); 572 - err = gtp_build_skb_ip4(skb, dev, &pktinfo); 745 + switch (proto) { 746 + case ETH_P_IP: 747 + err = gtp_build_skb_ip4(skb, dev, &pktinfo); 748 + break; 749 + default: 750 + err = -EOPNOTSUPP; 751 + break; 752 + } 573 753 rcu_read_unlock(); 574 754 575 755 if (err < 0) 576 756 goto tx_err; 577 757 578 - udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb, 579 - pktinfo.fl4.saddr, 580 - pktinfo.fl4.daddr, 581 - pktinfo.tos, 582 - ip4_dst_hoplimit(&pktinfo.rt->dst), 583 - 0, 584 - pktinfo.gtph_port, 585 - pktinfo.gtph_port, 586 - true, 587 - false); 758 + switch (proto) { 759 + case ETH_P_IP: 760 + netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n", 761 + &pktinfo.iph->saddr, &pktinfo.iph->daddr); 762 + udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb, 763 + pktinfo.fl4.saddr, pktinfo.fl4.daddr, 764 + pktinfo.iph->tos, 765 + ip4_dst_hoplimit(&pktinfo.rt->dst), 766 + 0, 767 + pktinfo.gtph_port, pktinfo.gtph_port, 768 + true, false); 769 + break; 770 + } 588 771 589 772 return NETDEV_TX_OK; 590 773 tx_err: ··· 609 772 .ndo_start_xmit = gtp_dev_xmit, 610 773 .ndo_get_stats64 = dev_get_tstats64, 611 774 }; 612 - 613 - static struct gtp_dev *gtp_find_flow_based_dev(struct net *net) 614 - { 615 - struct gtp_net *gn = net_generic(net, gtp_net_id); 616 - struct gtp_dev *gtp; 617 - 618 - list_for_each_entry(gtp, &gn->gtp_dev_list, list) { 619 - if (gtp->collect_md) 620 - return gtp; 621 - } 622 - 623 - return NULL; 624 - } 625 775 626 776 static void gtp_link_setup(struct net_device *dev) 627 777 { ··· 634 810 } 635 811 636 812 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); 637 - static int gtp_encap_enable(struct gtp_dev *gtp, struct net_device *dev, struct nlattr *data[]); 813 + static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]); 638 814 639 815 static void gtp_destructor(struct net_device *dev) 640 816 { ··· 652 828 struct gtp_net *gn; 653 829 int hashsize, err; 654 830 655 - if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1] && 656 - !data[IFLA_GTP_COLLECT_METADATA]) 831 + if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1]) 657 832 return -EINVAL; 658 833 659 834 gtp = netdev_priv(dev); 660 - 661 - if (data[IFLA_GTP_COLLECT_METADATA]) { 662 - if (data[IFLA_GTP_FD0]) { 663 - netdev_dbg(dev, "LWT device does not support setting v0 socket"); 664 - return -EINVAL; 665 - } 666 - if (gtp_find_flow_based_dev(src_net)) { 667 - netdev_dbg(dev, "LWT device already exist"); 668 - return -EBUSY; 669 - } 670 - gtp->collect_md = true; 671 - } 672 835 673 836 if (!data[IFLA_GTP_PDP_HASHSIZE]) { 674 837 hashsize = 1024; ··· 669 858 if (err < 0) 670 859 return err; 671 860 672 - err = gtp_encap_enable(gtp, dev, data); 861 + err = gtp_encap_enable(gtp, data); 673 862 if (err < 0) 674 863 goto out_hashtable; 675 864 ··· 683 872 list_add_rcu(&gtp->list, &gn->gtp_dev_list); 684 873 dev->priv_destructor = gtp_destructor; 685 874 686 - netdev_dbg(dev, "registered new GTP interface %s\n", dev->name); 875 + netdev_dbg(dev, "registered new GTP interface\n"); 687 876 688 877 return 0; 689 878 ··· 714 903 [IFLA_GTP_FD1] = { .type = NLA_U32 }, 715 904 [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 }, 716 905 [IFLA_GTP_ROLE] = { .type = NLA_U32 }, 717 - [IFLA_GTP_COLLECT_METADATA] = { .type = NLA_FLAG }, 718 906 }; 719 907 720 908 static int gtp_validate(struct nlattr *tb[], struct nlattr *data[], ··· 735 925 struct gtp_dev *gtp = netdev_priv(dev); 736 926 737 927 if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size)) 738 - goto nla_put_failure; 739 - 740 - if (gtp->collect_md && nla_put_flag(skb, IFLA_GTP_COLLECT_METADATA)) 741 928 goto nla_put_failure; 742 929 743 930 return 0; ··· 782 975 return -ENOMEM; 783 976 } 784 977 785 - static int __gtp_encap_enable_socket(struct socket *sock, int type, 786 - struct gtp_dev *gtp) 978 + static struct sock *gtp_encap_enable_socket(int fd, int type, 979 + struct gtp_dev *gtp) 787 980 { 788 981 struct udp_tunnel_sock_cfg tuncfg = {NULL}; 982 + struct socket *sock; 789 983 struct sock *sk; 984 + int err; 985 + 986 + pr_debug("enable gtp on %d, %d\n", fd, type); 987 + 988 + sock = sockfd_lookup(fd, &err); 989 + if (!sock) { 990 + pr_debug("gtp socket fd=%d not found\n", fd); 991 + return NULL; 992 + } 790 993 791 994 sk = sock->sk; 792 995 if (sk->sk_protocol != IPPROTO_UDP || 793 996 sk->sk_type != SOCK_DGRAM || 794 997 (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) { 795 - pr_debug("socket not UDP\n"); 796 - return -EINVAL; 998 + pr_debug("socket fd=%d not UDP\n", fd); 999 + sk = ERR_PTR(-EINVAL); 1000 + goto out_sock; 797 1001 } 798 1002 799 1003 lock_sock(sk); 800 1004 if (sk->sk_user_data) { 801 - release_sock(sock->sk); 802 - return -EBUSY; 1005 + sk = ERR_PTR(-EBUSY); 1006 + goto out_rel_sock; 803 1007 } 804 1008 805 1009 sock_hold(sk); ··· 821 1003 tuncfg.encap_destroy = gtp_encap_destroy; 822 1004 823 1005 setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); 1006 + 1007 + out_rel_sock: 824 1008 release_sock(sock->sk); 825 - return 0; 826 - } 827 - 828 - static struct sock *gtp_encap_enable_socket(int fd, int type, 829 - struct gtp_dev *gtp) 830 - { 831 - struct socket *sock; 832 - int err; 833 - 834 - pr_debug("enable gtp on %d, %d\n", fd, type); 835 - 836 - sock = sockfd_lookup(fd, &err); 837 - if (!sock) { 838 - pr_debug("gtp socket fd=%d not found\n", fd); 839 - return NULL; 840 - } 841 - err = __gtp_encap_enable_socket(sock, type, gtp); 1009 + out_sock: 842 1010 sockfd_put(sock); 843 - if (err) 844 - return ERR_PTR(err); 845 - 846 - return sock->sk; 1011 + return sk; 847 1012 } 848 1013 849 - static struct socket *gtp_create_gtp_socket(struct gtp_dev *gtp, struct net_device *dev) 850 - { 851 - struct udp_port_cfg udp_conf; 852 - struct socket *sock; 853 - int err; 854 - 855 - memset(&udp_conf, 0, sizeof(udp_conf)); 856 - udp_conf.family = AF_INET; 857 - udp_conf.local_ip.s_addr = htonl(INADDR_ANY); 858 - udp_conf.local_udp_port = htons(GTP1U_PORT); 859 - 860 - err = udp_sock_create(dev_net(dev), &udp_conf, &sock); 861 - if (err < 0) { 862 - pr_debug("create gtp sock failed: %d\n", err); 863 - return ERR_PTR(err); 864 - } 865 - err = __gtp_encap_enable_socket(sock, UDP_ENCAP_GTP1U, gtp); 866 - if (err) { 867 - pr_debug("enable gtp sock encap failed: %d\n", err); 868 - udp_tunnel_sock_release(sock); 869 - return ERR_PTR(err); 870 - } 871 - pr_debug("create gtp sock done\n"); 872 - return sock; 873 - } 874 - 875 - static int gtp_encap_enable(struct gtp_dev *gtp, struct net_device *dev, struct nlattr *data[]) 1014 + static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]) 876 1015 { 877 1016 struct sock *sk1u = NULL; 878 1017 struct sock *sk0 = NULL; ··· 853 1078 } 854 1079 } 855 1080 856 - if (data[IFLA_GTP_COLLECT_METADATA]) { 857 - struct socket *sock; 858 - 859 - if (!sk1u) { 860 - sock = gtp_create_gtp_socket(gtp, dev); 861 - if (IS_ERR(sock)) 862 - return PTR_ERR(sock); 863 - 864 - gtp->collect_md_sock = sock; 865 - sk1u = sock->sk; 866 - } else { 867 - gtp->collect_md_sock = NULL; 868 - } 869 - } 870 - 871 1081 if (data[IFLA_GTP_ROLE]) { 872 1082 role = nla_get_u32(data[IFLA_GTP_ROLE]); 873 1083 if (role > GTP_ROLE_SGSN) { 874 - gtp_encap_disable(gtp); 1084 + gtp_encap_disable_sock(sk0); 1085 + gtp_encap_disable_sock(sk1u); 875 1086 return -EINVAL; 876 1087 } 877 1088 } ··· 1416 1655 if (err < 0) 1417 1656 goto unreg_genl_family; 1418 1657 1419 - pr_info("GTP module loaded (pdp ctx size %zd bytes) with tnl-md support\n", 1658 + pr_info("GTP module loaded (pdp ctx size %zd bytes)\n", 1420 1659 sizeof(struct pdp_ctx)); 1421 1660 return 0; 1422 1661
-12
include/uapi/linux/gtp.h
··· 2 2 #ifndef _UAPI_LINUX_GTP_H_ 3 3 #define _UAPI_LINUX_GTP_H_ 4 4 5 - #include <linux/types.h> 6 - 7 5 #define GTP_GENL_MCGRP_NAME "gtp" 8 6 9 7 enum gtp_genl_cmds { ··· 33 35 __GTPA_MAX, 34 36 }; 35 37 #define GTPA_MAX (__GTPA_MAX + 1) 36 - 37 - enum { 38 - GTP_METADATA_V1 39 - }; 40 - 41 - struct gtpu_metadata { 42 - __u8 ver; 43 - __u8 flags; 44 - __u8 type; 45 - }; 46 38 47 39 #endif /* _UAPI_LINUX_GTP_H_ */
-1
include/uapi/linux/if_link.h
··· 811 811 IFLA_GTP_FD1, 812 812 IFLA_GTP_PDP_HASHSIZE, 813 813 IFLA_GTP_ROLE, 814 - IFLA_GTP_COLLECT_METADATA, 815 814 __IFLA_GTP_MAX, 816 815 }; 817 816 #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
-1
include/uapi/linux/if_tunnel.h
··· 176 176 #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000) 177 177 #define TUNNEL_NOCACHE __cpu_to_be16(0x2000) 178 178 #define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000) 179 - #define TUNNEL_GTPU_OPT __cpu_to_be16(0x8000) 180 179 181 180 #define TUNNEL_OPTIONS_PRESENT \ 182 181 (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
-1
tools/include/uapi/linux/if_link.h
··· 617 617 IFLA_GTP_FD1, 618 618 IFLA_GTP_PDP_HASHSIZE, 619 619 IFLA_GTP_ROLE, 620 - IFLA_GTP_COLLECT_METADATA, 621 620 __IFLA_GTP_MAX, 622 621 }; 623 622 #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)