Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

macvlan: handle fragmented multicast frames

Fragmented multicast frames are delivered to a single macvlan port,
because ip defrag logic considers other samples are redundant.

Implement a defrag step before trying to send the multicast frame.

Reported-by: Ben Greear <greearb@candelatech.com>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
bc416d97 f7ba35da

+49 -38
+3
drivers/net/macvlan.c
··· 169 169 170 170 port = macvlan_port_get_rcu(skb->dev); 171 171 if (is_multicast_ether_addr(eth->h_dest)) { 172 + skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN); 173 + if (!skb) 174 + return RX_HANDLER_CONSUMED; 172 175 src = macvlan_hash_lookup(port, eth->h_source); 173 176 if (!src) 174 177 /* frame comes from an external address */
+9
include/net/ip.h
··· 406 406 IP_DEFRAG_VS_OUT, 407 407 IP_DEFRAG_VS_FWD, 408 408 IP_DEFRAG_AF_PACKET, 409 + IP_DEFRAG_MACVLAN, 409 410 }; 410 411 411 412 int ip_defrag(struct sk_buff *skb, u32 user); 413 + #ifdef CONFIG_INET 414 + struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user); 415 + #else 416 + static inline struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) 417 + { 418 + return skb; 419 + } 420 + #endif 412 421 int ip_frag_mem(struct net *net); 413 422 int ip_frag_nqueues(struct net *net); 414 423
+36
net/ipv4/ip_fragment.c
··· 682 682 } 683 683 EXPORT_SYMBOL(ip_defrag); 684 684 685 + struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) 686 + { 687 + const struct iphdr *iph; 688 + u32 len; 689 + 690 + if (skb->protocol != htons(ETH_P_IP)) 691 + return skb; 692 + 693 + if (!pskb_may_pull(skb, sizeof(struct iphdr))) 694 + return skb; 695 + 696 + iph = ip_hdr(skb); 697 + if (iph->ihl < 5 || iph->version != 4) 698 + return skb; 699 + if (!pskb_may_pull(skb, iph->ihl*4)) 700 + return skb; 701 + iph = ip_hdr(skb); 702 + len = ntohs(iph->tot_len); 703 + if (skb->len < len || len < (iph->ihl * 4)) 704 + return skb; 705 + 706 + if (ip_is_fragment(ip_hdr(skb))) { 707 + skb = skb_share_check(skb, GFP_ATOMIC); 708 + if (skb) { 709 + if (pskb_trim_rcsum(skb, len)) 710 + return skb; 711 + memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 712 + if (ip_defrag(skb, user)) 713 + return NULL; 714 + skb->rxhash = 0; 715 + } 716 + } 717 + return skb; 718 + } 719 + EXPORT_SYMBOL(ip_check_defrag); 720 + 685 721 #ifdef CONFIG_SYSCTL 686 722 static int zero; 687 723
+1 -38
net/packet/af_packet.c
··· 1213 1213 return f->arr[cpu % num]; 1214 1214 } 1215 1215 1216 - static struct sk_buff *fanout_check_defrag(struct sk_buff *skb) 1217 - { 1218 - #ifdef CONFIG_INET 1219 - const struct iphdr *iph; 1220 - u32 len; 1221 - 1222 - if (skb->protocol != htons(ETH_P_IP)) 1223 - return skb; 1224 - 1225 - if (!pskb_may_pull(skb, sizeof(struct iphdr))) 1226 - return skb; 1227 - 1228 - iph = ip_hdr(skb); 1229 - if (iph->ihl < 5 || iph->version != 4) 1230 - return skb; 1231 - if (!pskb_may_pull(skb, iph->ihl*4)) 1232 - return skb; 1233 - iph = ip_hdr(skb); 1234 - len = ntohs(iph->tot_len); 1235 - if (skb->len < len || len < (iph->ihl * 4)) 1236 - return skb; 1237 - 1238 - if (ip_is_fragment(ip_hdr(skb))) { 1239 - skb = skb_share_check(skb, GFP_ATOMIC); 1240 - if (skb) { 1241 - if (pskb_trim_rcsum(skb, len)) 1242 - return skb; 1243 - memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 1244 - if (ip_defrag(skb, IP_DEFRAG_AF_PACKET)) 1245 - return NULL; 1246 - skb->rxhash = 0; 1247 - } 1248 - } 1249 - #endif 1250 - return skb; 1251 - } 1252 - 1253 1216 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, 1254 1217 struct packet_type *pt, struct net_device *orig_dev) 1255 1218 { ··· 1231 1268 case PACKET_FANOUT_HASH: 1232 1269 default: 1233 1270 if (f->defrag) { 1234 - skb = fanout_check_defrag(skb); 1271 + skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET); 1235 1272 if (!skb) 1236 1273 return 0; 1237 1274 }