Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix BPF handling of branch offset adjustmnets on backjumps, from
Daniel Borkmann.

2) Make sure selinux knows about SOCK_DESTROY netlink messages, from
Lorenzo Colitti.

3) Fix openvswitch tunnel mtu regression, from David Wragg.

4) Fix ICMP handling of TCP sockets in syn_recv state, from Eric
Dumazet.

5) Fix SCTP user hmacid byte ordering bug, from Xin Long.

6) Fix recursive locking in ipv6 addrconf, from Subash Abhinov
Kasiviswanathan.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
bpf: fix branch offset adjustment on backjumps after patching ctx expansion
vxlan, gre, geneve: Set a large MTU on ovs-created tunnel devices
geneve: Relax MTU constraints
vxlan: Relax MTU constraints
flow_dissector: Fix unaligned access in __skb_flow_dissector when used by eth_get_headlen
of: of_mdio: Add marvell, 88e1145 to whitelist of PHY compatibilities.
selinux: nlmsgtab: add SOCK_DESTROY to the netlink mapping tables
sctp: translate network order to host order when users get a hmacid
enic: increment devcmd2 result ring in case of timeout
tg3: Fix for tg3 transmit queue 0 timed out when too many gso_segs
net:Add sysctl_max_skb_frags
tcp: do not drop syn_recv on all icmp reports
ipv6: fix a lockdep splat
unix: correctly track in-flight fds in sending process user_struct
update be2net maintainers' email addresses
dwc_eth_qos: Reset hardware before PHY start
ipv6: addrconf: Fix recursive spin lock call

+197 -72
+5 -4
MAINTAINERS
··· 9787 F: drivers/scsi/be2iscsi/ 9788 9789 Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER 9790 - M: Sathya Perla <sathya.perla@avagotech.com> 9791 - M: Ajit Khaparde <ajit.khaparde@avagotech.com> 9792 - M: Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com> 9793 - M: Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com> 9794 L: netdev@vger.kernel.org 9795 W: http://www.emulex.com 9796 S: Supported
··· 9787 F: drivers/scsi/be2iscsi/ 9788 9789 Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER 9790 + M: Sathya Perla <sathya.perla@broadcom.com> 9791 + M: Ajit Khaparde <ajit.khaparde@broadcom.com> 9792 + M: Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com> 9793 + M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com> 9794 + M: Somnath Kotur <somnath.kotur@broadcom.com> 9795 L: netdev@vger.kernel.org 9796 W: http://www.emulex.com 9797 S: Supported
+19 -6
drivers/net/ethernet/broadcom/tg3.c
··· 7831 return ret; 7832 } 7833 7834 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7835 7836 /* Use GSO to workaround all TSO packets that meet HW bug conditions ··· 7942 * vlan encapsulated. 7943 */ 7944 if (skb->protocol == htons(ETH_P_8021Q) || 7945 - skb->protocol == htons(ETH_P_8021AD)) 7946 - return tg3_tso_bug(tp, tnapi, txq, skb); 7947 7948 if (!skb_is_gso_v6(skb)) { 7949 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7950 - tg3_flag(tp, TSO_BUG)) 7951 - return tg3_tso_bug(tp, tnapi, txq, skb); 7952 - 7953 ip_csum = iph->check; 7954 ip_tot_len = iph->tot_len; 7955 iph->check = 0; ··· 8086 if (would_hit_hwbug) { 8087 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8088 8089 - if (mss) { 8090 /* If it's a TSO packet, do GSO instead of 8091 * allocating and copying to a large linear SKB 8092 */
··· 7831 return ret; 7832 } 7833 7834 + static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) 7835 + { 7836 + /* Check if we will never have enough descriptors, 7837 + * as gso_segs can be more than current ring size 7838 + */ 7839 + return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; 7840 + } 7841 + 7842 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7843 7844 /* Use GSO to workaround all TSO packets that meet HW bug conditions ··· 7934 * vlan encapsulated. 7935 */ 7936 if (skb->protocol == htons(ETH_P_8021Q) || 7937 + skb->protocol == htons(ETH_P_8021AD)) { 7938 + if (tg3_tso_bug_gso_check(tnapi, skb)) 7939 + return tg3_tso_bug(tp, tnapi, txq, skb); 7940 + goto drop; 7941 + } 7942 7943 if (!skb_is_gso_v6(skb)) { 7944 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7945 + tg3_flag(tp, TSO_BUG)) { 7946 + if (tg3_tso_bug_gso_check(tnapi, skb)) 7947 + return tg3_tso_bug(tp, tnapi, txq, skb); 7948 + goto drop; 7949 + } 7950 ip_csum = iph->check; 7951 ip_tot_len = iph->tot_len; 7952 iph->check = 0; ··· 8073 if (would_hit_hwbug) { 8074 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8075 8076 + if (mss && tg3_tso_bug_gso_check(tnapi, skb)) { 8077 /* If it's a TSO packet, do GSO instead of 8078 * allocating and copying to a large linear SKB 8079 */
+1 -1
drivers/net/ethernet/cisco/enic/enic.h
··· 33 34 #define DRV_NAME "enic" 35 #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 36 - #define DRV_VERSION "2.3.0.12" 37 #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" 38 39 #define ENIC_BARS_MAX 6
··· 33 34 #define DRV_NAME "enic" 35 #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 36 + #define DRV_VERSION "2.3.0.20" 37 #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" 38 39 #define ENIC_BARS_MAX 6
+12 -7
drivers/net/ethernet/cisco/enic/vnic_dev.c
··· 298 int wait) 299 { 300 struct devcmd2_controller *dc2c = vdev->devcmd2; 301 - struct devcmd2_result *result = dc2c->result + dc2c->next_result; 302 unsigned int i; 303 int delay, err; 304 u32 fetch_index, new_posted; ··· 337 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) 338 return 0; 339 340 for (delay = 0; delay < wait; delay++) { 341 - if (result->color == dc2c->color) { 342 - dc2c->next_result++; 343 - if (dc2c->next_result == dc2c->result_size) { 344 - dc2c->next_result = 0; 345 - dc2c->color = dc2c->color ? 0 : 1; 346 - } 347 if (result->error) { 348 err = result->error; 349 if (err != ERR_ECMDUNKNOWN ||
··· 298 int wait) 299 { 300 struct devcmd2_controller *dc2c = vdev->devcmd2; 301 + struct devcmd2_result *result; 302 + u8 color; 303 unsigned int i; 304 int delay, err; 305 u32 fetch_index, new_posted; ··· 336 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) 337 return 0; 338 339 + result = dc2c->result + dc2c->next_result; 340 + color = dc2c->color; 341 + 342 + dc2c->next_result++; 343 + if (dc2c->next_result == dc2c->result_size) { 344 + dc2c->next_result = 0; 345 + dc2c->color = dc2c->color ? 0 : 1; 346 + } 347 + 348 for (delay = 0; delay < wait; delay++) { 349 + if (result->color == color) { 350 if (result->error) { 351 err = result->error; 352 if (err != ERR_ECMDUNKNOWN ||
+1 -1
drivers/net/ethernet/synopsys/dwc_eth_qos.c
··· 1880 } 1881 netdev_reset_queue(ndev); 1882 1883 napi_enable(&lp->napi); 1884 phy_start(lp->phy_dev); 1885 - dwceqos_init_hw(lp); 1886 1887 netif_start_queue(ndev); 1888 tasklet_enable(&lp->tx_bdreclaim_tasklet);
··· 1880 } 1881 netdev_reset_queue(ndev); 1882 1883 + dwceqos_init_hw(lp); 1884 napi_enable(&lp->napi); 1885 phy_start(lp->phy_dev); 1886 1887 netif_start_queue(ndev); 1888 tasklet_enable(&lp->tx_bdreclaim_tasklet);
+26 -5
drivers/net/geneve.c
··· 1039 return geneve_xmit_skb(skb, dev, info); 1040 } 1041 1042 static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 1043 { 1044 struct ip_tunnel_info *info = skb_tunnel_info(skb); ··· 1094 .ndo_stop = geneve_stop, 1095 .ndo_start_xmit = geneve_xmit, 1096 .ndo_get_stats64 = ip_tunnel_get_stats64, 1097 - .ndo_change_mtu = eth_change_mtu, 1098 .ndo_validate_addr = eth_validate_addr, 1099 .ndo_set_mac_address = eth_mac_addr, 1100 .ndo_fill_metadata_dst = geneve_fill_metadata_dst, ··· 1453 1454 err = geneve_configure(net, dev, &geneve_remote_unspec, 1455 0, 0, 0, htons(dst_port), true, 0); 1456 - if (err) { 1457 - free_netdev(dev); 1458 - return ERR_PTR(err); 1459 - } 1460 return dev; 1461 } 1462 EXPORT_SYMBOL_GPL(geneve_dev_create_fb); 1463
··· 1039 return geneve_xmit_skb(skb, dev, info); 1040 } 1041 1042 + static int geneve_change_mtu(struct net_device *dev, int new_mtu) 1043 + { 1044 + /* GENEVE overhead is not fixed, so we can't enforce a more 1045 + * precise max MTU. 1046 + */ 1047 + if (new_mtu < 68 || new_mtu > IP_MAX_MTU) 1048 + return -EINVAL; 1049 + dev->mtu = new_mtu; 1050 + return 0; 1051 + } 1052 + 1053 static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 1054 { 1055 struct ip_tunnel_info *info = skb_tunnel_info(skb); ··· 1083 .ndo_stop = geneve_stop, 1084 .ndo_start_xmit = geneve_xmit, 1085 .ndo_get_stats64 = ip_tunnel_get_stats64, 1086 + .ndo_change_mtu = geneve_change_mtu, 1087 .ndo_validate_addr = eth_validate_addr, 1088 .ndo_set_mac_address = eth_mac_addr, 1089 .ndo_fill_metadata_dst = geneve_fill_metadata_dst, ··· 1442 1443 err = geneve_configure(net, dev, &geneve_remote_unspec, 1444 0, 0, 0, htons(dst_port), true, 0); 1445 + if (err) 1446 + goto err; 1447 + 1448 + /* openvswitch users expect packet sizes to be unrestricted, 1449 + * so set the largest MTU we can. 1450 + */ 1451 + err = geneve_change_mtu(dev, IP_MAX_MTU); 1452 + if (err) 1453 + goto err; 1454 + 1455 return dev; 1456 + 1457 + err: 1458 + free_netdev(dev); 1459 + return ERR_PTR(err); 1460 } 1461 EXPORT_SYMBOL_GPL(geneve_dev_create_fb); 1462
+39 -20
drivers/net/vxlan.c
··· 2367 { 2368 } 2369 2370 static int vxlan_change_mtu(struct net_device *dev, int new_mtu) 2371 { 2372 struct vxlan_dev *vxlan = netdev_priv(dev); 2373 struct vxlan_rdst *dst = &vxlan->default_dst; 2374 - struct net_device *lowerdev; 2375 - int max_mtu; 2376 - 2377 - lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex); 2378 - if (lowerdev == NULL) 2379 - return eth_change_mtu(dev, new_mtu); 2380 - 2381 - if (dst->remote_ip.sa.sa_family == AF_INET6) 2382 - max_mtu = lowerdev->mtu - VXLAN6_HEADROOM; 2383 - else 2384 - max_mtu = lowerdev->mtu - VXLAN_HEADROOM; 2385 - 2386 - if (new_mtu < 68 || new_mtu > max_mtu) 2387 - return -EINVAL; 2388 - 2389 - dev->mtu = new_mtu; 2390 - return 0; 2391 } 2392 2393 static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb, ··· 2779 int err; 2780 bool use_ipv6 = false; 2781 __be16 default_port = vxlan->cfg.dst_port; 2782 2783 vxlan->net = src_net; 2784 ··· 2800 } 2801 2802 if (conf->remote_ifindex) { 2803 - struct net_device *lowerdev 2804 - = __dev_get_by_index(src_net, conf->remote_ifindex); 2805 - 2806 dst->remote_ifindex = conf->remote_ifindex; 2807 2808 if (!lowerdev) { ··· 2822 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2823 2824 needed_headroom = lowerdev->hard_header_len; 2825 } 2826 2827 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
··· 2367 { 2368 } 2369 2370 + static int __vxlan_change_mtu(struct net_device *dev, 2371 + struct net_device *lowerdev, 2372 + struct vxlan_rdst *dst, int new_mtu, bool strict) 2373 + { 2374 + int max_mtu = IP_MAX_MTU; 2375 + 2376 + if (lowerdev) 2377 + max_mtu = lowerdev->mtu; 2378 + 2379 + if (dst->remote_ip.sa.sa_family == AF_INET6) 2380 + max_mtu -= VXLAN6_HEADROOM; 2381 + else 2382 + max_mtu -= VXLAN_HEADROOM; 2383 + 2384 + if (new_mtu < 68) 2385 + return -EINVAL; 2386 + 2387 + if (new_mtu > max_mtu) { 2388 + if (strict) 2389 + return -EINVAL; 2390 + 2391 + new_mtu = max_mtu; 2392 + } 2393 + 2394 + dev->mtu = new_mtu; 2395 + return 0; 2396 + } 2397 + 2398 static int vxlan_change_mtu(struct net_device *dev, int new_mtu) 2399 { 2400 struct vxlan_dev *vxlan = netdev_priv(dev); 2401 struct vxlan_rdst *dst = &vxlan->default_dst; 2402 + struct net_device *lowerdev = __dev_get_by_index(vxlan->net, 2403 + dst->remote_ifindex); 2404 + return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true); 2405 } 2406 2407 static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb, ··· 2765 int err; 2766 bool use_ipv6 = false; 2767 __be16 default_port = vxlan->cfg.dst_port; 2768 + struct net_device *lowerdev = NULL; 2769 2770 vxlan->net = src_net; 2771 ··· 2785 } 2786 2787 if (conf->remote_ifindex) { 2788 + lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex); 2789 dst->remote_ifindex = conf->remote_ifindex; 2790 2791 if (!lowerdev) { ··· 2809 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2810 2811 needed_headroom = lowerdev->hard_header_len; 2812 + } 2813 + 2814 + if (conf->mtu) { 2815 + err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false); 2816 + if (err) 2817 + return err; 2818 } 2819 2820 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
+1
drivers/of/of_mdio.c
··· 154 { .compatible = "marvell,88E1111", }, 155 { .compatible = "marvell,88e1116", }, 156 { .compatible = "marvell,88e1118", }, 157 { .compatible = "marvell,88e1149r", }, 158 { .compatible = "marvell,88e1310", }, 159 { .compatible = "marvell,88E1510", },
··· 154 { .compatible = "marvell,88E1111", }, 155 { .compatible = "marvell,88e1116", }, 156 { .compatible = "marvell,88e1118", }, 157 + { .compatible = "marvell,88e1145", }, 158 { .compatible = "marvell,88e1149r", }, 159 { .compatible = "marvell,88e1310", }, 160 { .compatible = "marvell,88E1510", },
+1
include/linux/skbuff.h
··· 299 #else 300 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) 301 #endif 302 303 typedef struct skb_frag_struct skb_frag_t; 304
··· 299 #else 300 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) 301 #endif 302 + extern int sysctl_max_skb_frags; 303 304 typedef struct skb_frag_struct skb_frag_t; 305
+2 -2
include/net/af_unix.h
··· 6 #include <linux/mutex.h> 7 #include <net/sock.h> 8 9 - void unix_inflight(struct file *fp); 10 - void unix_notinflight(struct file *fp); 11 void unix_gc(void); 12 void wait_for_unix_gc(void); 13 struct sock *unix_get_socket(struct file *filp);
··· 6 #include <linux/mutex.h> 7 #include <net/sock.h> 8 9 + void unix_inflight(struct user_struct *user, struct file *fp); 10 + void unix_notinflight(struct user_struct *user, struct file *fp); 11 void unix_gc(void); 12 void wait_for_unix_gc(void); 13 struct sock *unix_get_socket(struct file *filp);
+1
include/net/ip_tunnels.h
··· 230 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); 231 int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, 232 u8 *protocol, struct flowi4 *fl4); 233 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); 234 235 struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
··· 230 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); 231 int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, 232 u8 *protocol, struct flowi4 *fl4); 233 + int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); 234 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); 235 236 struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
+1
include/net/scm.h
··· 21 struct scm_fp_list { 22 short count; 23 short max; 24 struct file *fp[SCM_MAX_FD]; 25 }; 26
··· 21 struct scm_fp_list { 22 short count; 23 short max; 24 + struct user_struct *user; 25 struct file *fp[SCM_MAX_FD]; 26 }; 27
+1 -1
include/net/tcp.h
··· 447 448 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); 449 void tcp_v4_mtu_reduced(struct sock *sk); 450 - void tcp_req_err(struct sock *sk, u32 seq); 451 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); 452 struct sock *tcp_create_openreq_child(const struct sock *sk, 453 struct request_sock *req,
··· 447 448 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); 449 void tcp_v4_mtu_reduced(struct sock *sk); 450 + void tcp_req_err(struct sock *sk, u32 seq, bool abort); 451 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); 452 struct sock *tcp_create_openreq_child(const struct sock *sk, 453 struct request_sock *req,
+1 -1
kernel/bpf/verifier.c
··· 2082 /* adjust offset of jmps if necessary */ 2083 if (i < pos && i + insn->off + 1 > pos) 2084 insn->off += delta; 2085 - else if (i > pos && i + insn->off + 1 < pos) 2086 insn->off -= delta; 2087 } 2088 }
··· 2082 /* adjust offset of jmps if necessary */ 2083 if (i < pos && i + insn->off + 1 > pos) 2084 insn->off += delta; 2085 + else if (i > pos + delta && i + insn->off + 1 <= pos + delta) 2086 insn->off -= delta; 2087 } 2088 }
+6 -3
net/core/flow_dissector.c
··· 208 case htons(ETH_P_IPV6): { 209 const struct ipv6hdr *iph; 210 struct ipv6hdr _iph; 211 - __be32 flow_label; 212 213 ipv6: 214 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); ··· 229 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 230 } 231 232 - flow_label = ip6_flowlabel(iph); 233 - if (flow_label) { 234 if (dissector_uses_key(flow_dissector, 235 FLOW_DISSECTOR_KEY_FLOW_LABEL)) { 236 key_tags = skb_flow_dissector_target(flow_dissector,
··· 208 case htons(ETH_P_IPV6): { 209 const struct ipv6hdr *iph; 210 struct ipv6hdr _iph; 211 212 ipv6: 213 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); ··· 230 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 231 } 232 233 + if ((dissector_uses_key(flow_dissector, 234 + FLOW_DISSECTOR_KEY_FLOW_LABEL) || 235 + (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) && 236 + ip6_flowlabel(iph)) { 237 + __be32 flow_label = ip6_flowlabel(iph); 238 + 239 if (dissector_uses_key(flow_dissector, 240 FLOW_DISSECTOR_KEY_FLOW_LABEL)) { 241 key_tags = skb_flow_dissector_target(flow_dissector,
+7
net/core/scm.c
··· 87 *fplp = fpl; 88 fpl->count = 0; 89 fpl->max = SCM_MAX_FD; 90 } 91 fpp = &fpl->fp[fpl->count]; 92 ··· 108 *fpp++ = file; 109 fpl->count++; 110 } 111 return num; 112 } 113 ··· 124 scm->fp = NULL; 125 for (i=fpl->count-1; i>=0; i--) 126 fput(fpl->fp[i]); 127 kfree(fpl); 128 } 129 } ··· 342 for (i = 0; i < fpl->count; i++) 343 get_file(fpl->fp[i]); 344 new_fpl->max = new_fpl->count; 345 } 346 return new_fpl; 347 }
··· 87 *fplp = fpl; 88 fpl->count = 0; 89 fpl->max = SCM_MAX_FD; 90 + fpl->user = NULL; 91 } 92 fpp = &fpl->fp[fpl->count]; 93 ··· 107 *fpp++ = file; 108 fpl->count++; 109 } 110 + 111 + if (!fpl->user) 112 + fpl->user = get_uid(current_user()); 113 + 114 return num; 115 } 116 ··· 119 scm->fp = NULL; 120 for (i=fpl->count-1; i>=0; i--) 121 fput(fpl->fp[i]); 122 + free_uid(fpl->user); 123 kfree(fpl); 124 } 125 } ··· 336 for (i = 0; i < fpl->count; i++) 337 get_file(fpl->fp[i]); 338 new_fpl->max = new_fpl->count; 339 + new_fpl->user = get_uid(fpl->user); 340 } 341 return new_fpl; 342 }
+2
net/core/skbuff.c
··· 79 80 struct kmem_cache *skbuff_head_cache __read_mostly; 81 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 82 83 /** 84 * skb_panic - private function for out-of-line support
··· 79 80 struct kmem_cache *skbuff_head_cache __read_mostly; 81 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 82 + int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 83 + EXPORT_SYMBOL(sysctl_max_skb_frags); 84 85 /** 86 * skb_panic - private function for out-of-line support
+10
net/core/sysctl_net_core.c
··· 26 static int one = 1; 27 static int min_sndbuf = SOCK_MIN_SNDBUF; 28 static int min_rcvbuf = SOCK_MIN_RCVBUF; 29 30 static int net_msg_warn; /* Unused, but still a sysctl */ 31 ··· 392 .maxlen = sizeof(int), 393 .mode = 0644, 394 .proc_handler = proc_dointvec 395 }, 396 { } 397 };
··· 26 static int one = 1; 27 static int min_sndbuf = SOCK_MIN_SNDBUF; 28 static int min_rcvbuf = SOCK_MIN_RCVBUF; 29 + static int max_skb_frags = MAX_SKB_FRAGS; 30 31 static int net_msg_warn; /* Unused, but still a sysctl */ 32 ··· 391 .maxlen = sizeof(int), 392 .mode = 0644, 393 .proc_handler = proc_dointvec 394 + }, 395 + { 396 + .procname = "max_skb_frags", 397 + .data = &sysctl_max_skb_frags, 398 + .maxlen = sizeof(int), 399 + .mode = 0644, 400 + .proc_handler = proc_dointvec_minmax, 401 + .extra1 = &one, 402 + .extra2 = &max_skb_frags, 403 }, 404 { } 405 };
+8
net/ipv4/ip_gre.c
··· 1240 err = ipgre_newlink(net, dev, tb, NULL); 1241 if (err < 0) 1242 goto out; 1243 return dev; 1244 out: 1245 free_netdev(dev);
··· 1240 err = ipgre_newlink(net, dev, tb, NULL); 1241 if (err < 0) 1242 goto out; 1243 + 1244 + /* openvswitch users expect packet sizes to be unrestricted, 1245 + * so set the largest MTU we can. 1246 + */ 1247 + err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false); 1248 + if (err) 1249 + goto out; 1250 + 1251 return dev; 1252 out: 1253 free_netdev(dev);
+17 -3
net/ipv4/ip_tunnel.c
··· 943 } 944 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl); 945 946 - int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu) 947 { 948 struct ip_tunnel *tunnel = netdev_priv(dev); 949 int t_hlen = tunnel->hlen + sizeof(struct iphdr); 950 951 - if (new_mtu < 68 || 952 - new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen) 953 return -EINVAL; 954 dev->mtu = new_mtu; 955 return 0; 956 } 957 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu); 958
··· 943 } 944 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl); 945 946 + int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict) 947 { 948 struct ip_tunnel *tunnel = netdev_priv(dev); 949 int t_hlen = tunnel->hlen + sizeof(struct iphdr); 950 + int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; 951 952 + if (new_mtu < 68) 953 return -EINVAL; 954 + 955 + if (new_mtu > max_mtu) { 956 + if (strict) 957 + return -EINVAL; 958 + 959 + new_mtu = max_mtu; 960 + } 961 + 962 dev->mtu = new_mtu; 963 return 0; 964 + } 965 + EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu); 966 + 967 + int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu) 968 + { 969 + return __ip_tunnel_change_mtu(dev, new_mtu, true); 970 } 971 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu); 972
+2 -2
net/ipv4/tcp.c
··· 940 941 i = skb_shinfo(skb)->nr_frags; 942 can_coalesce = skb_can_coalesce(skb, i, page, offset); 943 - if (!can_coalesce && i >= MAX_SKB_FRAGS) { 944 tcp_mark_push(tp, skb); 945 goto new_segment; 946 } ··· 1213 1214 if (!skb_can_coalesce(skb, i, pfrag->page, 1215 pfrag->offset)) { 1216 - if (i == MAX_SKB_FRAGS || !sg) { 1217 tcp_mark_push(tp, skb); 1218 goto new_segment; 1219 }
··· 940 941 i = skb_shinfo(skb)->nr_frags; 942 can_coalesce = skb_can_coalesce(skb, i, page, offset); 943 + if (!can_coalesce && i >= sysctl_max_skb_frags) { 944 tcp_mark_push(tp, skb); 945 goto new_segment; 946 } ··· 1213 1214 if (!skb_can_coalesce(skb, i, pfrag->page, 1215 pfrag->offset)) { 1216 + if (i == sysctl_max_skb_frags || !sg) { 1217 tcp_mark_push(tp, skb); 1218 goto new_segment; 1219 }
+8 -3
net/ipv4/tcp_ipv4.c
··· 311 312 313 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */ 314 - void tcp_req_err(struct sock *sk, u32 seq) 315 { 316 struct request_sock *req = inet_reqsk(sk); 317 struct net *net = sock_net(sk); ··· 323 324 if (seq != tcp_rsk(req)->snt_isn) { 325 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 326 - } else { 327 /* 328 * Still in SYN_RECV, just remove it silently. 329 * There is no good way to pass the error to the newly ··· 383 } 384 seq = ntohl(th->seq); 385 if (sk->sk_state == TCP_NEW_SYN_RECV) 386 - return tcp_req_err(sk, seq); 387 388 bh_lock_sock(sk); 389 /* If too many ICMPs get dropped on busy
··· 311 312 313 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */ 314 + void tcp_req_err(struct sock *sk, u32 seq, bool abort) 315 { 316 struct request_sock *req = inet_reqsk(sk); 317 struct net *net = sock_net(sk); ··· 323 324 if (seq != tcp_rsk(req)->snt_isn) { 325 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 326 + } else if (abort) { 327 /* 328 * Still in SYN_RECV, just remove it silently. 329 * There is no good way to pass the error to the newly ··· 383 } 384 seq = ntohl(th->seq); 385 if (sk->sk_state == TCP_NEW_SYN_RECV) 386 + return tcp_req_err(sk, seq, 387 + type == ICMP_PARAMETERPROB || 388 + type == ICMP_TIME_EXCEEDED || 389 + (type == ICMP_DEST_UNREACH && 390 + (code == ICMP_NET_UNREACH || 391 + code == ICMP_HOST_UNREACH))); 392 393 bh_lock_sock(sk); 394 /* If too many ICMPs get dropped on busy
+4 -1
net/ipv6/addrconf.c
··· 3538 { 3539 struct inet6_dev *idev = ifp->idev; 3540 struct net_device *dev = idev->dev; 3541 3542 addrconf_join_solict(dev, &ifp->addr); 3543 ··· 3584 /* Because optimistic nodes can use this address, 3585 * notify listeners. If DAD fails, RTM_DELADDR is sent. 3586 */ 3587 - ipv6_ifa_notify(RTM_NEWADDR, ifp); 3588 } 3589 } 3590 ··· 3592 out: 3593 spin_unlock(&ifp->lock); 3594 read_unlock_bh(&idev->lock); 3595 } 3596 3597 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
··· 3538 { 3539 struct inet6_dev *idev = ifp->idev; 3540 struct net_device *dev = idev->dev; 3541 + bool notify = false; 3542 3543 addrconf_join_solict(dev, &ifp->addr); 3544 ··· 3583 /* Because optimistic nodes can use this address, 3584 * notify listeners. If DAD fails, RTM_DELADDR is sent. 3585 */ 3586 + notify = true; 3587 } 3588 } 3589 ··· 3591 out: 3592 spin_unlock(&ifp->lock); 3593 read_unlock_bh(&idev->lock); 3594 + if (notify) 3595 + ipv6_ifa_notify(RTM_NEWADDR, ifp); 3596 } 3597 3598 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
+3 -2
net/ipv6/ip6_flowlabel.c
··· 540 } 541 spin_lock_bh(&ip6_sk_fl_lock); 542 for (sflp = &np->ipv6_fl_list; 543 - (sfl = rcu_dereference(*sflp)) != NULL; 544 sflp = &sfl->next) { 545 if (sfl->fl->label == freq.flr_label) { 546 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK)) 547 np->flow_label &= ~IPV6_FLOWLABEL_MASK; 548 - *sflp = rcu_dereference(sfl->next); 549 spin_unlock_bh(&ip6_sk_fl_lock); 550 fl_release(sfl->fl); 551 kfree_rcu(sfl, rcu);
··· 540 } 541 spin_lock_bh(&ip6_sk_fl_lock); 542 for (sflp = &np->ipv6_fl_list; 543 + (sfl = rcu_dereference_protected(*sflp, 544 + lockdep_is_held(&ip6_sk_fl_lock))) != NULL; 545 sflp = &sfl->next) { 546 if (sfl->fl->label == freq.flr_label) { 547 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK)) 548 np->flow_label &= ~IPV6_FLOWLABEL_MASK; 549 + *sflp = sfl->next; 550 spin_unlock_bh(&ip6_sk_fl_lock); 551 fl_release(sfl->fl); 552 kfree_rcu(sfl, rcu);
+3 -2
net/ipv6/tcp_ipv6.c
··· 327 struct tcp_sock *tp; 328 __u32 seq, snd_una; 329 struct sock *sk; 330 int err; 331 332 sk = __inet6_lookup_established(net, &tcp_hashinfo, ··· 346 return; 347 } 348 seq = ntohl(th->seq); 349 if (sk->sk_state == TCP_NEW_SYN_RECV) 350 - return tcp_req_err(sk, seq); 351 352 bh_lock_sock(sk); 353 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) ··· 402 goto out; 403 } 404 405 - icmpv6_err_convert(type, code, &err); 406 407 /* Might be for an request_sock */ 408 switch (sk->sk_state) {
··· 327 struct tcp_sock *tp; 328 __u32 seq, snd_una; 329 struct sock *sk; 330 + bool fatal; 331 int err; 332 333 sk = __inet6_lookup_established(net, &tcp_hashinfo, ··· 345 return; 346 } 347 seq = ntohl(th->seq); 348 + fatal = icmpv6_err_convert(type, code, &err); 349 if (sk->sk_state == TCP_NEW_SYN_RECV) 350 + return tcp_req_err(sk, seq, fatal); 351 352 bh_lock_sock(sk); 353 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) ··· 400 goto out; 401 } 402 403 404 /* Might be for an request_sock */ 405 switch (sk->sk_state) {
+2
net/openvswitch/vport-vxlan.c
··· 91 struct vxlan_config conf = { 92 .no_share = true, 93 .flags = VXLAN_F_COLLECT_METADATA, 94 }; 95 96 if (!options) {
··· 91 struct vxlan_config conf = { 92 .no_share = true, 93 .flags = VXLAN_F_COLLECT_METADATA, 94 + /* Don't restrict the packets that can be sent by MTU */ 95 + .mtu = IP_MAX_MTU, 96 }; 97 98 if (!options) {
+7 -2
net/sctp/socket.c
··· 5538 struct sctp_hmac_algo_param *hmacs; 5539 __u16 data_len = 0; 5540 u32 num_idents; 5541 5542 if (!ep->auth_enable) 5543 return -EACCES; ··· 5556 return -EFAULT; 5557 if (put_user(num_idents, &p->shmac_num_idents)) 5558 return -EFAULT; 5559 - if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) 5560 - return -EFAULT; 5561 return 0; 5562 } 5563
··· 5538 struct sctp_hmac_algo_param *hmacs; 5539 __u16 data_len = 0; 5540 u32 num_idents; 5541 + int i; 5542 5543 if (!ep->auth_enable) 5544 return -EACCES; ··· 5555 return -EFAULT; 5556 if (put_user(num_idents, &p->shmac_num_idents)) 5557 return -EFAULT; 5558 + for (i = 0; i < num_idents; i++) { 5559 + __u16 hmacid = ntohs(hmacs->hmac_ids[i]); 5560 + 5561 + if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16))) 5562 + return -EFAULT; 5563 + } 5564 return 0; 5565 } 5566
+2 -2
net/unix/af_unix.c
··· 1496 UNIXCB(skb).fp = NULL; 1497 1498 for (i = scm->fp->count-1; i >= 0; i--) 1499 - unix_notinflight(scm->fp->fp[i]); 1500 } 1501 1502 static void unix_destruct_scm(struct sk_buff *skb) ··· 1561 return -ENOMEM; 1562 1563 for (i = scm->fp->count - 1; i >= 0; i--) 1564 - unix_inflight(scm->fp->fp[i]); 1565 return max_level; 1566 } 1567
··· 1496 UNIXCB(skb).fp = NULL; 1497 1498 for (i = scm->fp->count-1; i >= 0; i--) 1499 + unix_notinflight(scm->fp->user, scm->fp->fp[i]); 1500 } 1501 1502 static void unix_destruct_scm(struct sk_buff *skb) ··· 1561 return -ENOMEM; 1562 1563 for (i = scm->fp->count - 1; i >= 0; i--) 1564 + unix_inflight(scm->fp->user, scm->fp->fp[i]); 1565 return max_level; 1566 } 1567
+4 -4
net/unix/garbage.c
··· 116 * descriptor if it is for an AF_UNIX socket. 117 */ 118 119 - void unix_inflight(struct file *fp) 120 { 121 struct sock *s = unix_get_socket(fp); 122 ··· 133 } 134 unix_tot_inflight++; 135 } 136 - fp->f_cred->user->unix_inflight++; 137 spin_unlock(&unix_gc_lock); 138 } 139 140 - void unix_notinflight(struct file *fp) 141 { 142 struct sock *s = unix_get_socket(fp); 143 ··· 152 list_del_init(&u->link); 153 unix_tot_inflight--; 154 } 155 - fp->f_cred->user->unix_inflight--; 156 spin_unlock(&unix_gc_lock); 157 } 158
··· 116 * descriptor if it is for an AF_UNIX socket. 117 */ 118 119 + void unix_inflight(struct user_struct *user, struct file *fp) 120 { 121 struct sock *s = unix_get_socket(fp); 122 ··· 133 } 134 unix_tot_inflight++; 135 } 136 + user->unix_inflight++; 137 spin_unlock(&unix_gc_lock); 138 } 139 140 + void unix_notinflight(struct user_struct *user, struct file *fp) 141 { 142 struct sock *s = unix_get_socket(fp); 143 ··· 152 list_del_init(&u->link); 153 unix_tot_inflight--; 154 } 155 + user->unix_inflight--; 156 spin_unlock(&unix_gc_lock); 157 } 158
+1
security/selinux/nlmsgtab.c
··· 83 { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, 84 { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, 85 { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, 86 }; 87 88 static struct nlmsg_perm nlmsg_xfrm_perms[] =
··· 83 { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, 84 { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, 85 { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, 86 + { SOCK_DESTROY, NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE }, 87 }; 88 89 static struct nlmsg_perm nlmsg_xfrm_perms[] =