Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: always use icmp{,v6}_ndo_send from ndo_start_xmit

There were a few remaining tunnel drivers that didn't receive the prior
conversion to icmp{,v6}_ndo_send. Knowing now that this could lead to
memory corrution (see ee576c47db60 ("net: icmp: pass zeroed opts from
icmp{,v6}_ndo_send before sending") for details), there's even more
imperative to have these all converted. So this commit goes through the
remaining cases that I could find and does a boring translation to the
ndo variety.

The Fixes: line below is the merge that originally added icmp{,v6}_
ndo_send and converted the first batch of icmp{,v6}_send users. The
rationale then for the change applies equally to this patch. It's just
that these drivers were left out of the initial conversion because these
network devices are hiding in net/ rather than in drivers/net/.

Cc: Florian Westphal <fw@strlen.de>
Cc: Willem de Bruijn <willemb@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
Cc: David Ahern <dsahern@kernel.org>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Steffen Klassert <steffen.klassert@secunet.com>
Fixes: 803381f9f117 ("Merge branch 'icmp-account-for-NAT-when-sending-icmps-from-ndo-layer'")
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Acked-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Jason A. Donenfeld and committed by
David S. Miller
4372339e 9eb8bc59

+22 -23
+2 -3
net/ipv4/ip_tunnel.c
··· 502 502 if (!skb_is_gso(skb) && 503 503 (inner_iph->frag_off & htons(IP_DF)) && 504 504 mtu < pkt_size) { 505 - memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 506 - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 505 + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 507 506 return -E2BIG; 508 507 } 509 508 } ··· 526 527 527 528 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU && 528 529 mtu < pkt_size) { 529 - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 530 + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 530 531 return -E2BIG; 531 532 } 532 533 }
+3 -3
net/ipv4/ip_vti.c
··· 238 238 if (skb->len > mtu) { 239 239 skb_dst_update_pmtu_no_confirm(skb, mtu); 240 240 if (skb->protocol == htons(ETH_P_IP)) { 241 - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 242 - htonl(mtu)); 241 + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 242 + htonl(mtu)); 243 243 } else { 244 244 if (mtu < IPV6_MIN_MTU) 245 245 mtu = IPV6_MIN_MTU; 246 246 247 - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 247 + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 248 248 } 249 249 250 250 dst_release(dst);
+8 -8
net/ipv6/ip6_gre.c
··· 678 678 679 679 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 680 680 if (tel->encap_limit == 0) { 681 - icmpv6_send(skb, ICMPV6_PARAMPROB, 682 - ICMPV6_HDR_FIELD, offset + 2); 681 + icmpv6_ndo_send(skb, ICMPV6_PARAMPROB, 682 + ICMPV6_HDR_FIELD, offset + 2); 683 683 return -1; 684 684 } 685 685 *encap_limit = tel->encap_limit - 1; ··· 805 805 if (err != 0) { 806 806 /* XXX: send ICMP error even if DF is not set. */ 807 807 if (err == -EMSGSIZE) 808 - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 809 - htonl(mtu)); 808 + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 809 + htonl(mtu)); 810 810 return -1; 811 811 } 812 812 ··· 837 837 &mtu, skb->protocol); 838 838 if (err != 0) { 839 839 if (err == -EMSGSIZE) 840 - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 840 + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 841 841 return -1; 842 842 } 843 843 ··· 1063 1063 /* XXX: send ICMP error even if DF is not set. */ 1064 1064 if (err == -EMSGSIZE) { 1065 1065 if (skb->protocol == htons(ETH_P_IP)) 1066 - icmp_send(skb, ICMP_DEST_UNREACH, 1067 - ICMP_FRAG_NEEDED, htonl(mtu)); 1066 + icmp_ndo_send(skb, ICMP_DEST_UNREACH, 1067 + ICMP_FRAG_NEEDED, htonl(mtu)); 1068 1068 else 1069 - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1069 + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1070 1070 } 1071 1071 1072 1072 goto tx_err;
+5 -5
net/ipv6/ip6_tunnel.c
··· 1332 1332 1333 1333 tel = (void *)&skb_network_header(skb)[offset]; 1334 1334 if (tel->encap_limit == 0) { 1335 - icmpv6_send(skb, ICMPV6_PARAMPROB, 1336 - ICMPV6_HDR_FIELD, offset + 2); 1335 + icmpv6_ndo_send(skb, ICMPV6_PARAMPROB, 1336 + ICMPV6_HDR_FIELD, offset + 2); 1337 1337 return -1; 1338 1338 } 1339 1339 encap_limit = tel->encap_limit - 1; ··· 1385 1385 if (err == -EMSGSIZE) 1386 1386 switch (protocol) { 1387 1387 case IPPROTO_IPIP: 1388 - icmp_send(skb, ICMP_DEST_UNREACH, 1389 - ICMP_FRAG_NEEDED, htonl(mtu)); 1388 + icmp_ndo_send(skb, ICMP_DEST_UNREACH, 1389 + ICMP_FRAG_NEEDED, htonl(mtu)); 1390 1390 break; 1391 1391 case IPPROTO_IPV6: 1392 - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1392 + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1393 1393 break; 1394 1394 default: 1395 1395 break;
+3 -3
net/ipv6/ip6_vti.c
··· 521 521 if (mtu < IPV6_MIN_MTU) 522 522 mtu = IPV6_MIN_MTU; 523 523 524 - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 524 + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 525 525 } else { 526 - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 527 - htonl(mtu)); 526 + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 527 + htonl(mtu)); 528 528 } 529 529 530 530 err = -EMSGSIZE;
+1 -1
net/ipv6/sit.c
··· 987 987 skb_dst_update_pmtu_no_confirm(skb, mtu); 988 988 989 989 if (skb->len > mtu && !skb_is_gso(skb)) { 990 - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 990 + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 991 991 ip_rt_put(rt); 992 992 goto tx_error; 993 993 }