Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix RTNL locking in batman-adv, from Matthias Schiffer.

2) Don't allow non-passthrough macvlan devices to set NOPROMISC via
netlink, otherwise we can end up with corrupted promisc counter
values on the device. From Michael S Tsirkin.

3) Fix stmmac driver build with debugging defines enabled, from Dinh
Nguyen.

4) Make sure name string we give in socket address in AF_PACKET is NULL
terminated, from Daniel Borkmann.

5) Fix leaking of two uninitialized bytes of memory to userspace in
l2tp, from Guillaume Nault.

6) Clear IPCB(skb) before tunneling otherwise we touch dangling IP
options state and crash. From Saurabh Mohan.

7) Fix suspend/resume for davinci_mdio by using suspend_late and
resume_early. From Mugunthan V N.

8) Don't tag ip_tunnel_init_net and ip_tunnel_delete_net with
__net_{init,exit}, they can be called outside of those contexts.
From Eric Dumazet.

9) Fix RX length error in sh_eth driver, from Yoshihiro Shimoda.

10) Fix missing sctp_outq initialization in some code paths of SCTP
stack, from Neil Horman.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (21 commits)
sctp: fully initialize sctp_outq in sctp_outq_init
netiucv: Hold rtnl between name allocation and device registration.
tulip: Properly check dma mapping result
net: sh_eth: fix incorrect RX length error if R8A7740
ip_tunnel: remove __net_init/exit from exported functions
drivers: net: davinci_mdio: restore mdio clk divider in mdio resume
drivers: net: davinci_mdio: moving mdio resume earlier than cpsw ethernet driver
net/ipv4: ip_vti clear skb cb before tunneling.
tg3: Wait for boot code to finish after power on
l2tp: Fix sendmsg() return value
l2tp: Fix PPP header erasure and memory leak
bonding: fix igmp_retrans type and two related races
bonding: reset master mac on first enslave failure
packet: packet_getname_spkt: make sure string is always 0-terminated
net: ethernet: stmicro: stmmac: Fix compile error when STMMAC_XMIT_DEBUG used
be2net: Fix 32-bit DMA Mask handling
xen-netback: don't de-reference vif pointer after having called xenvif_put()
macvlan: don't touch promisc without passthrough
batman-adv: Don't handle address updates when bla is disabled
batman-adv: forward late OGMs from best next hop
...

+148 -82
+17 -6
drivers/net/bonding/bond_main.c
··· 764 struct net_device *bond_dev, *vlan_dev, *upper_dev; 765 struct vlan_entry *vlan; 766 767 - rcu_read_lock(); 768 read_lock(&bond->lock); 769 770 bond_dev = bond->dev; 771 ··· 787 if (vlan_dev) 788 __bond_resend_igmp_join_requests(vlan_dev); 789 } 790 - 791 - if (--bond->igmp_retrans > 0) 792 - queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); 793 - 794 - read_unlock(&bond->lock); 795 rcu_read_unlock(); 796 } 797 798 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) ··· 1964 1965 err_undo_flags: 1966 bond_compute_features(bond); 1967 1968 return res; 1969 }
··· 764 struct net_device *bond_dev, *vlan_dev, *upper_dev; 765 struct vlan_entry *vlan; 766 767 read_lock(&bond->lock); 768 + rcu_read_lock(); 769 770 bond_dev = bond->dev; 771 ··· 787 if (vlan_dev) 788 __bond_resend_igmp_join_requests(vlan_dev); 789 } 790 rcu_read_unlock(); 791 + 792 + /* We use curr_slave_lock to protect against concurrent access to 793 + * igmp_retrans from multiple running instances of this function and 794 + * bond_change_active_slave 795 + */ 796 + write_lock_bh(&bond->curr_slave_lock); 797 + if (bond->igmp_retrans > 1) { 798 + bond->igmp_retrans--; 799 + queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); 800 + } 801 + write_unlock_bh(&bond->curr_slave_lock); 802 + read_unlock(&bond->lock); 803 } 804 805 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) ··· 1957 1958 err_undo_flags: 1959 bond_compute_features(bond); 1960 + /* Enslave of first slave has failed and we need to fix master's mac */ 1961 + if (bond->slave_cnt == 0 && 1962 + ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr)) 1963 + eth_hw_addr_random(bond_dev); 1964 1965 return res; 1966 }
+1 -1
drivers/net/bonding/bonding.h
··· 225 rwlock_t curr_slave_lock; 226 u8 send_peer_notif; 227 s8 setup_by_slave; 228 - s8 igmp_retrans; 229 #ifdef CONFIG_PROC_FS 230 struct proc_dir_entry *proc_entry; 231 char proc_file_name[IFNAMSIZ];
··· 225 rwlock_t curr_slave_lock; 226 u8 send_peer_notif; 227 s8 setup_by_slave; 228 + u8 igmp_retrans; 229 #ifdef CONFIG_PROC_FS 230 struct proc_dir_entry *proc_entry; 231 char proc_file_name[IFNAMSIZ];
+10
drivers/net/ethernet/broadcom/tg3.c
··· 1800 int i; 1801 u32 val; 1802 1803 if (tg3_flag(tp, IS_SSB_CORE)) { 1804 /* We don't use firmware. */ 1805 return 0; ··· 10407 */ 10408 static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10409 { 10410 tg3_switch_clocks(tp); 10411 10412 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
··· 1800 int i; 1801 u32 val; 1802 1803 + if (tg3_flag(tp, NO_FWARE_REPORTED)) 1804 + return 0; 1805 + 1806 if (tg3_flag(tp, IS_SSB_CORE)) { 1807 /* We don't use firmware. */ 1808 return 0; ··· 10404 */ 10405 static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10406 { 10407 + /* Chip may have been just powered on. If so, the boot code may still 10408 + * be running initialization. Wait for it to finish to avoid races in 10409 + * accessing the hardware. 10410 + */ 10411 + tg3_enable_register_access(tp); 10412 + tg3_poll_fw(tp); 10413 + 10414 tg3_switch_clocks(tp); 10415 10416 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+6
drivers/net/ethernet/dec/tulip/interrupt.c
··· 76 77 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, 78 PCI_DMA_FROMDEVICE); 79 tp->rx_buffers[entry].mapping = mapping; 80 81 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
··· 76 77 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, 78 PCI_DMA_FROMDEVICE); 79 + if (dma_mapping_error(&tp->pdev->dev, mapping)) { 80 + dev_kfree_skb(skb); 81 + tp->rx_buffers[entry].skb = NULL; 82 + break; 83 + } 84 + 85 tp->rx_buffers[entry].mapping = mapping; 86 87 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
+3
drivers/net/ethernet/emulex/benet/be_main.c
··· 4262 netdev->features |= NETIF_F_HIGHDMA; 4263 } else { 4264 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 4265 if (status) { 4266 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); 4267 goto free_netdev;
··· 4262 netdev->features |= NETIF_F_HIGHDMA; 4263 } else { 4264 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 4265 + if (!status) 4266 + status = dma_set_coherent_mask(&pdev->dev, 4267 + DMA_BIT_MASK(32)); 4268 if (status) { 4269 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); 4270 goto free_netdev;
+11 -4
drivers/net/ethernet/renesas/sh_eth.c
··· 1401 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1402 pkt_len = rxdesc->frame_length; 1403 1404 - #if defined(CONFIG_ARCH_R8A7740) 1405 - desc_status >>= 16; 1406 - #endif 1407 - 1408 if (--boguscnt < 0) 1409 break; 1410 1411 if (!(desc_status & RDFEND)) 1412 ndev->stats.rx_length_errors++; 1413 1414 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1415 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
··· 1401 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1402 pkt_len = rxdesc->frame_length; 1403 1404 if (--boguscnt < 0) 1405 break; 1406 1407 if (!(desc_status & RDFEND)) 1408 ndev->stats.rx_length_errors++; 1409 + 1410 + #if defined(CONFIG_ARCH_R8A7740) 1411 + /* 1412 + * In case of almost all GETHER/ETHERs, the Receive Frame State 1413 + * (RFS) bits in the Receive Descriptor 0 are from bit 9 to 1414 + * bit 0. However, in case of the R8A7740's GETHER, the RFS 1415 + * bits are from bit 25 to bit 16. So, the driver needs right 1416 + * shifting by 16. 1417 + */ 1418 + desc_status >>= 16; 1419 + #endif 1420 1421 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1422 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 1899 1900 #ifdef STMMAC_XMIT_DEBUG 1901 if (netif_msg_pktdata(priv)) { 1902 - pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d" 1903 __func__, (priv->cur_tx % txsize), 1904 (priv->dirty_tx % txsize), entry, first, nfrags); 1905 if (priv->extend_desc)
··· 1899 1900 #ifdef STMMAC_XMIT_DEBUG 1901 if (netif_msg_pktdata(priv)) { 1902 + pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d", 1903 __func__, (priv->cur_tx % txsize), 1904 (priv->dirty_tx % txsize), entry, first, nfrags); 1905 if (priv->extend_desc)
+3 -6
drivers/net/ethernet/ti/davinci_mdio.c
··· 459 static int davinci_mdio_resume(struct device *dev) 460 { 461 struct davinci_mdio_data *data = dev_get_drvdata(dev); 462 - u32 ctrl; 463 464 pm_runtime_get_sync(data->dev); 465 466 spin_lock(&data->lock); 467 /* restart the scan state machine */ 468 - ctrl = __raw_readl(&data->regs->control); 469 - ctrl |= CONTROL_ENABLE; 470 - __raw_writel(ctrl, &data->regs->control); 471 472 data->suspended = false; 473 spin_unlock(&data->lock); ··· 473 } 474 475 static const struct dev_pm_ops davinci_mdio_pm_ops = { 476 - .suspend = davinci_mdio_suspend, 477 - .resume = davinci_mdio_resume, 478 }; 479 480 static const struct of_device_id davinci_mdio_of_mtable[] = {
··· 459 static int davinci_mdio_resume(struct device *dev) 460 { 461 struct davinci_mdio_data *data = dev_get_drvdata(dev); 462 463 pm_runtime_get_sync(data->dev); 464 465 spin_lock(&data->lock); 466 /* restart the scan state machine */ 467 + __davinci_mdio_reset(data); 468 469 data->suspended = false; 470 spin_unlock(&data->lock); ··· 476 } 477 478 static const struct dev_pm_ops davinci_mdio_pm_ops = { 479 + .suspend_late = davinci_mdio_suspend, 480 + .resume_early = davinci_mdio_resume, 481 }; 482 483 static const struct of_device_id davinci_mdio_of_mtable[] = {
+12 -6
drivers/net/macvlan.c
··· 853 struct nlattr *tb[], struct nlattr *data[]) 854 { 855 struct macvlan_dev *vlan = netdev_priv(dev); 856 - if (data && data[IFLA_MACVLAN_MODE]) 857 - vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 858 if (data && data[IFLA_MACVLAN_FLAGS]) { 859 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 860 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; 861 862 - if (promisc && (flags & MACVLAN_FLAG_NOPROMISC)) 863 - dev_set_promiscuity(vlan->lowerdev, -1); 864 - else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC)) 865 - dev_set_promiscuity(vlan->lowerdev, 1); 866 vlan->flags = flags; 867 } 868 return 0; 869 } 870
··· 853 struct nlattr *tb[], struct nlattr *data[]) 854 { 855 struct macvlan_dev *vlan = netdev_priv(dev); 856 + 857 if (data && data[IFLA_MACVLAN_FLAGS]) { 858 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 859 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; 860 + if (vlan->port->passthru && promisc) { 861 + int err; 862 863 + if (flags & MACVLAN_FLAG_NOPROMISC) 864 + err = dev_set_promiscuity(vlan->lowerdev, -1); 865 + else 866 + err = dev_set_promiscuity(vlan->lowerdev, 1); 867 + if (err < 0) 868 + return err; 869 + } 870 vlan->flags = flags; 871 } 872 + if (data && data[IFLA_MACVLAN_MODE]) 873 + vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 874 return 0; 875 } 876
+6 -5
drivers/net/xen-netback/netback.c
··· 662 { 663 struct xenvif *vif = NULL, *tmp; 664 s8 status; 665 - u16 irq, flags; 666 struct xen_netif_rx_response *resp; 667 struct sk_buff_head rxq; 668 struct sk_buff *skb; ··· 771 sco->meta_slots_used); 772 773 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); 774 - irq = vif->irq; 775 - if (ret && list_empty(&vif->notify_list)) 776 - list_add_tail(&vif->notify_list, &notify); 777 778 xenvif_notify_tx_completion(vif); 779 780 - xenvif_put(vif); 781 npo.meta_cons += sco->meta_slots_used; 782 dev_kfree_skb(skb); 783 } ··· 785 list_for_each_entry_safe(vif, tmp, &notify, notify_list) { 786 notify_remote_via_irq(vif->irq); 787 list_del_init(&vif->notify_list); 788 } 789 790 /* More work to do? */
··· 662 { 663 struct xenvif *vif = NULL, *tmp; 664 s8 status; 665 + u16 flags; 666 struct xen_netif_rx_response *resp; 667 struct sk_buff_head rxq; 668 struct sk_buff *skb; ··· 771 sco->meta_slots_used); 772 773 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); 774 775 xenvif_notify_tx_completion(vif); 776 777 + if (ret && list_empty(&vif->notify_list)) 778 + list_add_tail(&vif->notify_list, &notify); 779 + else 780 + xenvif_put(vif); 781 npo.meta_cons += sco->meta_slots_used; 782 dev_kfree_skb(skb); 783 } ··· 785 list_for_each_entry_safe(vif, tmp, &notify, notify_list) { 786 notify_remote_via_irq(vif->irq); 787 list_del_init(&vif->notify_list); 788 + xenvif_put(vif); 789 } 790 791 /* More work to do? */
+5 -1
drivers/s390/net/netiucv.c
··· 2040 netiucv_setup_netdevice); 2041 if (!dev) 2042 return NULL; 2043 if (dev_alloc_name(dev, dev->name) < 0) 2044 goto out_netdev; 2045 ··· 2062 out_fsm: 2063 kfree_fsm(privptr->fsm); 2064 out_netdev: 2065 free_netdev(dev); 2066 return NULL; 2067 } ··· 2102 2103 rc = netiucv_register_device(dev); 2104 if (rc) { 2105 IUCV_DBF_TEXT_(setup, 2, 2106 "ret %d from netiucv_register_device\n", rc); 2107 goto out_free_ndev; ··· 2112 priv = netdev_priv(dev); 2113 SET_NETDEV_DEV(dev, priv->dev); 2114 2115 - rc = register_netdev(dev); 2116 if (rc) 2117 goto out_unreg; 2118
··· 2040 netiucv_setup_netdevice); 2041 if (!dev) 2042 return NULL; 2043 + rtnl_lock(); 2044 if (dev_alloc_name(dev, dev->name) < 0) 2045 goto out_netdev; 2046 ··· 2061 out_fsm: 2062 kfree_fsm(privptr->fsm); 2063 out_netdev: 2064 + rtnl_unlock(); 2065 free_netdev(dev); 2066 return NULL; 2067 } ··· 2100 2101 rc = netiucv_register_device(dev); 2102 if (rc) { 2103 + rtnl_unlock(); 2104 IUCV_DBF_TEXT_(setup, 2, 2105 "ret %d from netiucv_register_device\n", rc); 2106 goto out_free_ndev; ··· 2109 priv = netdev_priv(dev); 2110 SET_NETDEV_DEV(dev, priv->dev); 2111 2112 + rc = register_netdevice(dev); 2113 + rtnl_unlock(); 2114 if (rc) 2115 goto out_unreg; 2116
+3 -3
include/net/ip_tunnels.h
··· 95 int ip_tunnel_init(struct net_device *dev); 96 void ip_tunnel_uninit(struct net_device *dev); 97 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); 98 - int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, 99 - struct rtnl_link_ops *ops, char *devname); 100 101 - void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn); 102 103 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 104 const struct iphdr *tnl_params);
··· 95 int ip_tunnel_init(struct net_device *dev); 96 void ip_tunnel_uninit(struct net_device *dev); 97 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); 98 + int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, 99 + struct rtnl_link_ops *ops, char *devname); 100 101 + void ip_tunnel_delete_net(struct ip_tunnel_net *itn); 102 103 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 104 const struct iphdr *tnl_params);
+55 -31
net/batman-adv/bat_iv_ogm.c
··· 29 #include "bat_algo.h" 30 #include "network-coding.h" 31 32 static struct batadv_neigh_node * 33 batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, 34 const uint8_t *neigh_addr, ··· 665 const struct batadv_ogm_packet *batadv_ogm_packet, 666 struct batadv_hard_iface *if_incoming, 667 const unsigned char *tt_buff, 668 - int is_duplicate) 669 { 670 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 671 struct batadv_neigh_node *router = NULL; ··· 691 continue; 692 } 693 694 - if (is_duplicate) 695 continue; 696 697 spin_lock_bh(&tmp_neigh_node->lq_update_lock); ··· 733 neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv); 734 spin_unlock_bh(&neigh_node->lq_update_lock); 735 736 - if (!is_duplicate) { 737 orig_node->last_ttl = batadv_ogm_packet->header.ttl; 738 neigh_node->last_ttl = batadv_ogm_packet->header.ttl; 739 } ··· 917 return ret; 918 } 919 920 - /* processes a batman packet for all interfaces, adjusts the sequence number and 921 - * finds out whether it is a duplicate. 922 - * returns: 923 - * 1 the packet is a duplicate 924 - * 0 the packet has not yet been received 925 - * -1 the packet is old and has been received while the seqno window 926 - * was protected. Caller should drop it. 927 */ 928 - static int 929 batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, 930 const struct batadv_ogm_packet *batadv_ogm_packet, 931 const struct batadv_hard_iface *if_incoming) ··· 934 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 935 struct batadv_orig_node *orig_node; 936 struct batadv_neigh_node *tmp_neigh_node; 937 - int is_duplicate = 0; 938 int32_t seq_diff; 939 int need_update = 0; 940 - int set_mark, ret = -1; 941 uint32_t seqno = ntohl(batadv_ogm_packet->seqno); 942 uint8_t *neigh_addr; 943 uint8_t packet_count; 944 945 orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig); 946 if (!orig_node) 947 - return 0; 948 949 spin_lock_bh(&orig_node->ogm_cnt_lock); 950 seq_diff = seqno - orig_node->last_real_seqno; ··· 953 /* signalize caller that the packet is to be dropped. */ 954 if (!hlist_empty(&orig_node->neigh_list) && 955 batadv_window_protected(bat_priv, seq_diff, 956 - &orig_node->batman_seqno_reset)) 957 goto out; 958 959 rcu_read_lock(); 960 hlist_for_each_entry_rcu(tmp_neigh_node, 961 &orig_node->neigh_list, list) { 962 - is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits, 963 - orig_node->last_real_seqno, 964 - seqno); 965 - 966 neigh_addr = tmp_neigh_node->addr; 967 if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && 968 - tmp_neigh_node->if_incoming == if_incoming) 969 set_mark = 1; 970 - else 971 set_mark = 0; 972 973 /* if the window moved, set the update flag. */ 974 need_update |= batadv_bit_get_packet(bat_priv, ··· 994 orig_node->last_real_seqno, seqno); 995 orig_node->last_real_seqno = seqno; 996 } 997 - 998 - ret = is_duplicate; 999 1000 out: 1001 spin_unlock_bh(&orig_node->ogm_cnt_lock); ··· 1016 int is_broadcast = 0, is_bidirect; 1017 bool is_single_hop_neigh = false; 1018 bool is_from_best_next_hop = false; 1019 - int is_duplicate, sameseq, simlar_ttl; 1020 uint32_t if_incoming_seqno; 1021 uint8_t *prev_sender; 1022 ··· 1161 if (!orig_node) 1162 return; 1163 1164 - is_duplicate = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet, 1165 - if_incoming); 1166 1167 - if (is_duplicate == -1) { 1168 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 1169 "Drop packet: packet within seqno protection time (sender: %pM)\n", 1170 ethhdr->h_source); ··· 1234 * seqno and similar ttl as the non-duplicate 1235 */ 1236 sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno); 1237 - simlar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl; 1238 - if (is_bidirect && (!is_duplicate || (sameseq && simlar_ttl))) 1239 batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, 1240 batadv_ogm_packet, if_incoming, 1241 - tt_buff, is_duplicate); 1242 1243 /* is single hop (direct) neighbor */ 1244 if (is_single_hop_neigh) { ··· 1260 goto out_neigh; 1261 } 1262 1263 - if (is_duplicate) { 1264 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 1265 "Drop packet: duplicate packet received\n"); 1266 goto out_neigh;
··· 29 #include "bat_algo.h" 30 #include "network-coding.h" 31 32 + /** 33 + * batadv_dup_status - duplicate status 34 + * @BATADV_NO_DUP: the packet is a duplicate 35 + * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the 36 + * neighbor) 37 + * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor 38 + * @BATADV_PROTECTED: originator is currently protected (after reboot) 39 + */ 40 + enum batadv_dup_status { 41 + BATADV_NO_DUP = 0, 42 + BATADV_ORIG_DUP, 43 + BATADV_NEIGH_DUP, 44 + BATADV_PROTECTED, 45 + }; 46 + 47 static struct batadv_neigh_node * 48 batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, 49 const uint8_t *neigh_addr, ··· 650 const struct batadv_ogm_packet *batadv_ogm_packet, 651 struct batadv_hard_iface *if_incoming, 652 const unsigned char *tt_buff, 653 + enum batadv_dup_status dup_status) 654 { 655 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 656 struct batadv_neigh_node *router = NULL; ··· 676 continue; 677 } 678 679 + if (dup_status != BATADV_NO_DUP) 680 continue; 681 682 spin_lock_bh(&tmp_neigh_node->lq_update_lock); ··· 718 neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv); 719 spin_unlock_bh(&neigh_node->lq_update_lock); 720 721 + if (dup_status == BATADV_NO_DUP) { 722 orig_node->last_ttl = batadv_ogm_packet->header.ttl; 723 neigh_node->last_ttl = batadv_ogm_packet->header.ttl; 724 } ··· 902 return ret; 903 } 904 905 + /** 906 + * batadv_iv_ogm_update_seqnos - process a batman packet for all interfaces, 907 + * adjust the sequence number and find out whether it is a duplicate 908 + * @ethhdr: ethernet header of the packet 909 + * @batadv_ogm_packet: OGM packet to be considered 910 + * @if_incoming: interface on which the OGM packet was received 911 + * 912 + * Returns duplicate status as enum batadv_dup_status 913 */ 914 + static enum batadv_dup_status 915 batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, 916 const struct batadv_ogm_packet *batadv_ogm_packet, 917 const struct batadv_hard_iface *if_incoming) ··· 918 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 919 struct batadv_orig_node *orig_node; 920 struct batadv_neigh_node *tmp_neigh_node; 921 + int is_dup; 922 int32_t seq_diff; 923 int need_update = 0; 924 + int set_mark; 925 + enum batadv_dup_status ret = BATADV_NO_DUP; 926 uint32_t seqno = ntohl(batadv_ogm_packet->seqno); 927 uint8_t *neigh_addr; 928 uint8_t packet_count; 929 930 orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig); 931 if (!orig_node) 932 + return BATADV_NO_DUP; 933 934 spin_lock_bh(&orig_node->ogm_cnt_lock); 935 seq_diff = seqno - orig_node->last_real_seqno; ··· 936 /* signalize caller that the packet is to be dropped. */ 937 if (!hlist_empty(&orig_node->neigh_list) && 938 batadv_window_protected(bat_priv, seq_diff, 939 + &orig_node->batman_seqno_reset)) { 940 + ret = BATADV_PROTECTED; 941 goto out; 942 + } 943 944 rcu_read_lock(); 945 hlist_for_each_entry_rcu(tmp_neigh_node, 946 &orig_node->neigh_list, list) { 947 neigh_addr = tmp_neigh_node->addr; 948 + is_dup = batadv_test_bit(tmp_neigh_node->real_bits, 949 + orig_node->last_real_seqno, 950 + seqno); 951 + 952 if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && 953 + tmp_neigh_node->if_incoming == if_incoming) { 954 set_mark = 1; 955 + if (is_dup) 956 + ret = BATADV_NEIGH_DUP; 957 + } else { 958 set_mark = 0; 959 + if (is_dup && (ret != BATADV_NEIGH_DUP)) 960 + ret = BATADV_ORIG_DUP; 961 + } 962 963 /* if the window moved, set the update flag. */ 964 need_update |= batadv_bit_get_packet(bat_priv, ··· 970 orig_node->last_real_seqno, seqno); 971 orig_node->last_real_seqno = seqno; 972 } 973 974 out: 975 spin_unlock_bh(&orig_node->ogm_cnt_lock); ··· 994 int is_broadcast = 0, is_bidirect; 995 bool is_single_hop_neigh = false; 996 bool is_from_best_next_hop = false; 997 + int sameseq, similar_ttl; 998 + enum batadv_dup_status dup_status; 999 uint32_t if_incoming_seqno; 1000 uint8_t *prev_sender; 1001 ··· 1138 if (!orig_node) 1139 return; 1140 1141 + dup_status = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet, 1142 + if_incoming); 1143 1144 + if (dup_status == BATADV_PROTECTED) { 1145 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 1146 "Drop packet: packet within seqno protection time (sender: %pM)\n", 1147 ethhdr->h_source); ··· 1211 * seqno and similar ttl as the non-duplicate 1212 */ 1213 sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno); 1214 + similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl; 1215 + if (is_bidirect && ((dup_status == BATADV_NO_DUP) || 1216 + (sameseq && similar_ttl))) 1217 batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, 1218 batadv_ogm_packet, if_incoming, 1219 + tt_buff, dup_status); 1220 1221 /* is single hop (direct) neighbor */ 1222 if (is_single_hop_neigh) { ··· 1236 goto out_neigh; 1237 } 1238 1239 + if (dup_status == BATADV_NEIGH_DUP) { 1240 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 1241 "Drop packet: duplicate packet received\n"); 1242 goto out_neigh;
+4
net/batman-adv/bridge_loop_avoidance.c
··· 1067 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); 1068 bat_priv->bla.claim_dest.group = group; 1069 1070 if (!oldif) { 1071 batadv_bla_purge_claims(bat_priv, NULL, 1); 1072 batadv_bla_purge_backbone_gw(bat_priv, 1);
··· 1067 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); 1068 bat_priv->bla.claim_dest.group = group; 1069 1070 + /* purge everything when bridge loop avoidance is turned off */ 1071 + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1072 + oldif = NULL; 1073 + 1074 if (!oldif) { 1075 batadv_bla_purge_claims(bat_priv, NULL, 1); 1076 batadv_bla_purge_backbone_gw(bat_priv, 1);
+1 -4
net/batman-adv/sysfs.c
··· 582 (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0)) 583 goto out; 584 585 - if (!rtnl_trylock()) { 586 - ret = -ERESTARTSYS; 587 - goto out; 588 - } 589 590 if (status_tmp == BATADV_IF_NOT_IN_USE) { 591 batadv_hardif_disable_interface(hard_iface,
··· 582 (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0)) 583 goto out; 584 585 + rtnl_lock(); 586 587 if (status_tmp == BATADV_IF_NOT_IN_USE) { 588 batadv_hardif_disable_interface(hard_iface,
+2 -2
net/ipv4/ip_tunnel.c
··· 853 } 854 EXPORT_SYMBOL_GPL(ip_tunnel_dellink); 855 856 - int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, 857 struct rtnl_link_ops *ops, char *devname) 858 { 859 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id); ··· 899 unregister_netdevice_queue(itn->fb_tunnel_dev, head); 900 } 901 902 - void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn) 903 { 904 LIST_HEAD(list); 905
··· 853 } 854 EXPORT_SYMBOL_GPL(ip_tunnel_dellink); 855 856 + int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, 857 struct rtnl_link_ops *ops, char *devname) 858 { 859 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id); ··· 899 unregister_netdevice_queue(itn->fb_tunnel_dev, head); 900 } 901 902 + void ip_tunnel_delete_net(struct ip_tunnel_net *itn) 903 { 904 LIST_HEAD(list); 905
+1 -2
net/ipv4/ip_vti.c
··· 361 tunnel->err_count = 0; 362 } 363 364 - IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 365 - IPSKB_REROUTED); 366 skb_dst_drop(skb); 367 skb_dst_set(skb, &rt->dst); 368 nf_reset(skb);
··· 361 tunnel->err_count = 0; 362 } 363 364 + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 365 skb_dst_drop(skb); 366 skb_dst_set(skb, &rt->dst); 367 nf_reset(skb);
+3 -3
net/l2tp/l2tp_ppp.c
··· 346 skb_put(skb, 2); 347 348 /* Copy user data into skb */ 349 - error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); 350 if (error < 0) { 351 kfree_skb(skb); 352 goto error_put_sess_tun; 353 } 354 - skb_put(skb, total_len); 355 356 l2tp_xmit_skb(session, skb, session->hdr_len); 357 358 sock_put(ps->tunnel_sock); 359 sock_put(sk); 360 361 - return error; 362 363 error_put_sess_tun: 364 sock_put(ps->tunnel_sock);
··· 346 skb_put(skb, 2); 347 348 /* Copy user data into skb */ 349 + error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov, 350 + total_len); 351 if (error < 0) { 352 kfree_skb(skb); 353 goto error_put_sess_tun; 354 } 355 356 l2tp_xmit_skb(session, skb, session->hdr_len); 357 358 sock_put(ps->tunnel_sock); 359 sock_put(sk); 360 361 + return total_len; 362 363 error_put_sess_tun: 364 sock_put(ps->tunnel_sock);
+2 -3
net/packet/af_packet.c
··· 2851 return -EOPNOTSUPP; 2852 2853 uaddr->sa_family = AF_PACKET; 2854 rcu_read_lock(); 2855 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 2856 if (dev) 2857 - strncpy(uaddr->sa_data, dev->name, 14); 2858 - else 2859 - memset(uaddr->sa_data, 0, 14); 2860 rcu_read_unlock(); 2861 *uaddr_len = sizeof(*uaddr); 2862
··· 2851 return -EOPNOTSUPP; 2852 2853 uaddr->sa_family = AF_PACKET; 2854 + memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); 2855 rcu_read_lock(); 2856 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 2857 if (dev) 2858 + strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); 2859 rcu_read_unlock(); 2860 *uaddr_len = sizeof(*uaddr); 2861
+2 -4
net/sctp/outqueue.c
··· 206 */ 207 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) 208 { 209 q->asoc = asoc; 210 INIT_LIST_HEAD(&q->out_chunk_list); 211 INIT_LIST_HEAD(&q->control_chunk_list); ··· 215 INIT_LIST_HEAD(&q->sacked); 216 INIT_LIST_HEAD(&q->abandoned); 217 218 - q->fast_rtx = 0; 219 - q->outstanding_bytes = 0; 220 q->empty = 1; 221 - q->cork = 0; 222 - q->out_qlen = 0; 223 } 224 225 /* Free the outqueue structure and any related pending chunks.
··· 206 */ 207 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) 208 { 209 + memset(q, 0, sizeof(struct sctp_outq)); 210 + 211 q->asoc = asoc; 212 INIT_LIST_HEAD(&q->out_chunk_list); 213 INIT_LIST_HEAD(&q->control_chunk_list); ··· 213 INIT_LIST_HEAD(&q->sacked); 214 INIT_LIST_HEAD(&q->abandoned); 215 216 q->empty = 1; 217 } 218 219 /* Free the outqueue structure and any related pending chunks.