Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

udp: restrict offloads to one namespace

udp tunnel offloads tend to aggregate datagrams based on inner
headers. gro engine gets notified by tunnel implementations about
possible offloads. The match is solely based on the port number.

Imagine a tunnel bound to port 53, the offloading will look into all
DNS packets and tries to aggregate them based on the inner data found
within. This could lead to data corruption and malformed DNS packets.

While this patch minimizes the problem and helps an administrator to find
the issue by querying ip tunnel/fou, a better way would be to match on
the specific destination ip address so if a user space socket is bound
to the same address it will conflict.

Cc: Tom Herbert <tom@herbertland.com>
Cc: Eric Dumazet <edumazet@google.com>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Hannes Frederic Sowa and committed by
David S. Miller
787d7ac3 07b9b37c

+11 -7
+1 -1
drivers/net/geneve.c
··· 376 376 int err; 377 377 378 378 if (sa_family == AF_INET) { 379 - err = udp_add_offload(&gs->udp_offloads); 379 + err = udp_add_offload(sock_net(sk), &gs->udp_offloads); 380 380 if (err) 381 381 pr_warn("geneve: udp_add_offload failed with status %d\n", 382 382 err);
+1 -1
drivers/net/vxlan.c
··· 621 621 int err; 622 622 623 623 if (sa_family == AF_INET) { 624 - err = udp_add_offload(&vs->udp_offloads); 624 + err = udp_add_offload(net, &vs->udp_offloads); 625 625 if (err) 626 626 pr_warn("vxlan: udp_add_offload failed with status %d\n", err); 627 627 }
+1 -1
include/net/protocol.h
··· 107 107 void inet_register_protosw(struct inet_protosw *p); 108 108 void inet_unregister_protosw(struct inet_protosw *p); 109 109 110 - int udp_add_offload(struct udp_offload *prot); 110 + int udp_add_offload(struct net *net, struct udp_offload *prot); 111 111 void udp_del_offload(struct udp_offload *prot); 112 112 113 113 #if IS_ENABLED(CONFIG_IPV6)
+1 -1
net/ipv4/fou.c
··· 498 498 sk->sk_allocation = GFP_ATOMIC; 499 499 500 500 if (cfg->udp_config.family == AF_INET) { 501 - err = udp_add_offload(&fou->udp_offloads); 501 + err = udp_add_offload(net, &fou->udp_offloads); 502 502 if (err) 503 503 goto error; 504 504 }
+7 -3
net/ipv4/udp_offload.c
··· 21 21 22 22 struct udp_offload_priv { 23 23 struct udp_offload *offload; 24 + possible_net_t net; 24 25 struct rcu_head rcu; 25 26 struct udp_offload_priv __rcu *next; 26 27 }; ··· 242 241 return segs; 243 242 } 244 243 245 - int udp_add_offload(struct udp_offload *uo) 244 + int udp_add_offload(struct net *net, struct udp_offload *uo) 246 245 { 247 246 struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC); 248 247 249 248 if (!new_offload) 250 249 return -ENOMEM; 251 250 251 + write_pnet(&new_offload->net, net); 252 252 new_offload->offload = uo; 253 253 254 254 spin_lock(&udp_offload_lock); ··· 313 311 rcu_read_lock(); 314 312 uo_priv = rcu_dereference(udp_offload_base); 315 313 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { 316 - if (uo_priv->offload->port == uh->dest && 314 + if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) && 315 + uo_priv->offload->port == uh->dest && 317 316 uo_priv->offload->callbacks.gro_receive) 318 317 goto unflush; 319 318 } ··· 392 389 393 390 uo_priv = rcu_dereference(udp_offload_base); 394 391 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { 395 - if (uo_priv->offload->port == uh->dest && 392 + if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) && 393 + uo_priv->offload->port == uh->dest && 396 394 uo_priv->offload->callbacks.gro_complete) 397 395 break; 398 396 }