Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Out of bounds array access in 802.11 minstrel code, from Adrien
Schildknecht.

2) Don't use skb_get() in IGMP/MLD code paths, as this makes
pskb_may_pull() BUG. From Linus Luessing.

3) Fix off by one in ipv4 route dumping code, from Andy Whitcroft.

4) Fix deadlock in reqsk_queue_unlink(), from Eric Dumazet.

5) Fix ppp device deregistration wrt. netns deletion, from Guillaume
Nault.

6) Fix deadlock when creating per-cpu ipv6 routes, from Martin KaFai
Lau.

7) Fix memory leak in batman-adv code, from Sven Eckelmann.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
batman-adv: Fix memory leak on tt add with invalid vlan
net: phy: fix semicolon.cocci warnings
net: qmi_wwan: add HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module
be2net: avoid vxlan offloading on multichannel configs
ipv6: Fix a potential deadlock when creating pcpu rt
ipv6: Add rt6_make_pcpu_route()
ipv6: Remove un-used argument from ip6_dst_alloc()
net: phy: workaround for buggy cable detection by LAN8700 after cable plugging
net: ethernet: micrel: fix an error code
ppp: fix device unregistration upon netns deletion
net: phy: fix PHY_RUNNING in phy_state_machine
Revert "net: limit tcp/udp rmem/wmem to SOCK_{RCV,SND}BUF_MIN"
inet: fix potential deadlock in reqsk_queue_unlink()
gianfar: Restore link state settings after MAC reset
ipv4: off-by-one in continuation handling in /proc/net/route
net: fix wrong skb_get() usage / crash in IGMP/MLD parsing code
mac80211: fix invalid read in minstrel_sort_best_tp_rates()

+212 -146
+2 -2
drivers/net/ethernet/emulex/benet/be_main.c
··· 5174 5174 struct device *dev = &adapter->pdev->dev; 5175 5175 int status; 5176 5176 5177 - if (lancer_chip(adapter) || BEx_chip(adapter)) 5177 + if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) 5178 5178 return; 5179 5179 5180 5180 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { ··· 5221 5221 { 5222 5222 struct be_adapter *adapter = netdev_priv(netdev); 5223 5223 5224 - if (lancer_chip(adapter) || BEx_chip(adapter)) 5224 + if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) 5225 5225 return; 5226 5226 5227 5227 if (adapter->vxlan_port != port)
+5
drivers/net/ethernet/freescale/gianfar.c
··· 2102 2102 /* Start Rx/Tx DMA and enable the interrupts */ 2103 2103 gfar_start(priv); 2104 2104 2105 + /* force link state update after mac reset */ 2106 + priv->oldlink = 0; 2107 + priv->oldspeed = 0; 2108 + priv->oldduplex = -1; 2109 + 2105 2110 phy_start(priv->phydev); 2106 2111 2107 2112 enable_napi(priv);
+2 -3
drivers/net/ethernet/micrel/ks8842.c
··· 952 952 953 953 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, 954 954 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); 955 - err = dma_mapping_error(adapter->dev, 956 - sg_dma_address(&tx_ctl->sg)); 957 - if (err) { 955 + if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) { 956 + err = -ENOMEM; 958 957 sg_dma_address(&tx_ctl->sg) = 0; 959 958 goto err; 960 959 }
+12 -4
drivers/net/phy/phy.c
··· 811 811 bool needs_aneg = false, do_suspend = false; 812 812 enum phy_state old_state; 813 813 int err = 0; 814 + int old_link; 814 815 815 816 mutex_lock(&phydev->lock); 816 817 ··· 897 896 phydev->adjust_link(phydev->attached_dev); 898 897 break; 899 898 case PHY_RUNNING: 900 - /* Only register a CHANGE if we are 901 - * polling or ignoring interrupts 899 + /* Only register a CHANGE if we are polling or ignoring 900 + * interrupts and link changed since latest checking. 902 901 */ 903 - if (!phy_interrupt_is_valid(phydev)) 904 - phydev->state = PHY_CHANGELINK; 902 + if (!phy_interrupt_is_valid(phydev)) { 903 + old_link = phydev->link; 904 + err = phy_read_status(phydev); 905 + if (err) 906 + break; 907 + 908 + if (old_link != phydev->link) 909 + phydev->state = PHY_CHANGELINK; 910 + } 905 911 break; 906 912 case PHY_CHANGELINK: 907 913 err = phy_read_status(phydev);
+19 -12
drivers/net/phy/smsc.c
··· 91 91 } 92 92 93 93 /* 94 - * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each 95 - * other in order to set the ENERGYON bit and exit EDPD mode. If a link partner 96 - * does send the pulses within this interval, the PHY will remained powered 97 - * down. 98 - * 99 - * This workaround will manually toggle the PHY on/off upon calls to read_status 100 - * in order to generate link test pulses if the link is down. If a link partner 101 - * is present, it will respond to the pulses, which will cause the ENERGYON bit 102 - * to be set and will cause the EDPD mode to be exited. 94 + * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable 95 + * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to 96 + * unstable detection of plugging in Ethernet cable. 97 + * This workaround disables Energy Detect Power-Down mode and waiting for 98 + * response on link pulses to detect presence of plugged Ethernet cable. 99 + * The Energy Detect Power-Down mode is enabled again in the end of procedure to 100 + * save approximately 220 mW of power if cable is unplugged. 103 101 */ 104 102 static int lan87xx_read_status(struct phy_device *phydev) 105 103 { 106 104 int err = genphy_read_status(phydev); 105 + int i; 107 106 108 107 if (!phydev->link) { 109 108 /* Disable EDPD to wake up PHY */ ··· 115 116 if (rc < 0) 116 117 return rc; 117 118 118 - /* Sleep 64 ms to allow ~5 link test pulses to be sent */ 119 - msleep(64); 119 + /* Wait max 640 ms to detect energy */ 120 + for (i = 0; i < 64; i++) { 121 + /* Sleep to allow link test pulses to be sent */ 122 + msleep(10); 123 + rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); 124 + if (rc < 0) 125 + return rc; 126 + if (rc & MII_LAN83C185_ENERGYON) 127 + break; 128 + } 120 129 121 130 /* Re-enable EDPD */ 122 131 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); ··· 198 191 199 192 /* basic functions */ 200 193 .config_aneg = genphy_config_aneg, 201 - .read_status = genphy_read_status, 194 + .read_status = lan87xx_read_status, 202 195 .config_init = smsc_phy_config_init, 203 196 .soft_reset = smsc_phy_reset, 204 197
+42 -36
drivers/net/ppp/ppp_generic.c
··· 269 269 static void ppp_ccp_closed(struct ppp *ppp); 270 270 static struct compressor *find_compressor(int type); 271 271 static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); 272 - static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp); 272 + static struct ppp *ppp_create_interface(struct net *net, int unit, 273 + struct file *file, int *retp); 273 274 static void init_ppp_file(struct ppp_file *pf, int kind); 274 - static void ppp_shutdown_interface(struct ppp *ppp); 275 275 static void ppp_destroy_interface(struct ppp *ppp); 276 276 static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); 277 277 static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); ··· 392 392 file->private_data = NULL; 393 393 if (pf->kind == INTERFACE) { 394 394 ppp = PF_TO_PPP(pf); 395 + rtnl_lock(); 395 396 if (file == ppp->owner) 396 - ppp_shutdown_interface(ppp); 397 + unregister_netdevice(ppp->dev); 398 + rtnl_unlock(); 397 399 } 398 400 if (atomic_dec_and_test(&pf->refcnt)) { 399 401 switch (pf->kind) { ··· 595 593 mutex_lock(&ppp_mutex); 596 594 if (pf->kind == INTERFACE) { 597 595 ppp = PF_TO_PPP(pf); 596 + rtnl_lock(); 598 597 if (file == ppp->owner) 599 - ppp_shutdown_interface(ppp); 598 + unregister_netdevice(ppp->dev); 599 + rtnl_unlock(); 600 600 } 601 601 if (atomic_long_read(&file->f_count) < 2) { 602 602 ppp_release(NULL, file); ··· 842 838 /* Create a new ppp unit */ 843 839 if (get_user(unit, p)) 844 840 break; 845 - ppp = ppp_create_interface(net, unit, &err); 841 + ppp = ppp_create_interface(net, unit, file, &err); 846 842 if (!ppp) 847 843 break; 848 844 file->private_data = &ppp->file; 849 - ppp->owner = file; 850 845 err = -EFAULT; 851 846 if (put_user(ppp->file.index, p)) 852 847 break; ··· 919 916 static __net_exit void ppp_exit_net(struct net *net) 920 917 { 921 918 struct ppp_net *pn = net_generic(net, ppp_net_id); 919 + struct ppp *ppp; 920 + LIST_HEAD(list); 921 + int id; 922 + 923 + rtnl_lock(); 924 + idr_for_each_entry(&pn->units_idr, ppp, id) 925 + unregister_netdevice_queue(ppp->dev, &list); 926 + 927 + unregister_netdevice_many(&list); 928 + rtnl_unlock(); 922 929 923 930 idr_destroy(&pn->units_idr); 924 931 } ··· 1101 1088 return 0; 1102 1089 } 1103 1090 1091 + static void ppp_dev_uninit(struct net_device *dev) 1092 + { 1093 + struct ppp *ppp = netdev_priv(dev); 1094 + struct ppp_net *pn = ppp_pernet(ppp->ppp_net); 1095 + 1096 + ppp_lock(ppp); 1097 + ppp->closing = 1; 1098 + ppp_unlock(ppp); 1099 + 1100 + mutex_lock(&pn->all_ppp_mutex); 1101 + unit_put(&pn->units_idr, ppp->file.index); 1102 + mutex_unlock(&pn->all_ppp_mutex); 1103 + 1104 + ppp->owner = NULL; 1105 + 1106 + ppp->file.dead = 1; 1107 + wake_up_interruptible(&ppp->file.rwait); 1108 + } 1109 + 1104 1110 static const struct net_device_ops ppp_netdev_ops = { 1105 1111 .ndo_init = ppp_dev_init, 1112 + .ndo_uninit = ppp_dev_uninit, 1106 1113 .ndo_start_xmit = ppp_start_xmit, 1107 1114 .ndo_do_ioctl = ppp_net_ioctl, 1108 1115 .ndo_get_stats64 = ppp_get_stats64, ··· 2700 2667 * or if there is already a unit with the requested number. 2701 2668 * unit == -1 means allocate a new number. 2702 2669 */ 2703 - static struct ppp * 2704 - ppp_create_interface(struct net *net, int unit, int *retp) 2670 + static struct ppp *ppp_create_interface(struct net *net, int unit, 2671 + struct file *file, int *retp) 2705 2672 { 2706 2673 struct ppp *ppp; 2707 2674 struct ppp_net *pn; ··· 2721 2688 ppp->mru = PPP_MRU; 2722 2689 init_ppp_file(&ppp->file, INTERFACE); 2723 2690 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ 2691 + ppp->owner = file; 2724 2692 for (i = 0; i < NUM_NP; ++i) 2725 2693 ppp->npmode[i] = NPMODE_PASS; 2726 2694 INIT_LIST_HEAD(&ppp->channels); ··· 2807 2773 skb_queue_head_init(&pf->rq); 2808 2774 atomic_set(&pf->refcnt, 1); 2809 2775 init_waitqueue_head(&pf->rwait); 2810 - } 2811 - 2812 - /* 2813 - * Take down a ppp interface unit - called when the owning file 2814 - * (the one that created the unit) is closed or detached. 2815 - */ 2816 - static void ppp_shutdown_interface(struct ppp *ppp) 2817 - { 2818 - struct ppp_net *pn; 2819 - 2820 - pn = ppp_pernet(ppp->ppp_net); 2821 - mutex_lock(&pn->all_ppp_mutex); 2822 - 2823 - /* This will call dev_close() for us. */ 2824 - ppp_lock(ppp); 2825 - if (!ppp->closing) { 2826 - ppp->closing = 1; 2827 - ppp_unlock(ppp); 2828 - unregister_netdev(ppp->dev); 2829 - unit_put(&pn->units_idr, ppp->file.index); 2830 - } else 2831 - ppp_unlock(ppp); 2832 - 2833 - ppp->file.dead = 1; 2834 - ppp->owner = NULL; 2835 - wake_up_interruptible(&ppp->file.rwait); 2836 - 2837 - mutex_unlock(&pn->all_ppp_mutex); 2838 2776 } 2839 2777 2840 2778 /*
+1
drivers/net/usb/qmi_wwan.c
··· 785 785 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 786 786 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 787 787 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 788 + {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ 788 789 {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ 789 790 790 791 /* 4. Gobi 1000 devices */
+4 -1
net/batman-adv/translation-table.c
··· 595 595 /* increase the refcounter of the related vlan */ 596 596 vlan = batadv_softif_vlan_get(bat_priv, vid); 597 597 if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d", 598 - addr, BATADV_PRINT_VID(vid))) 598 + addr, BATADV_PRINT_VID(vid))) { 599 + kfree(tt_local); 600 + tt_local = NULL; 599 601 goto out; 602 + } 600 603 601 604 batadv_dbg(BATADV_DBG_TT, bat_priv, 602 605 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
+2 -2
net/bridge/br_multicast.c
··· 1591 1591 break; 1592 1592 } 1593 1593 1594 - if (skb_trimmed) 1594 + if (skb_trimmed && skb_trimmed != skb) 1595 1595 kfree_skb(skb_trimmed); 1596 1596 1597 1597 return err; ··· 1636 1636 break; 1637 1637 } 1638 1638 1639 - if (skb_trimmed) 1639 + if (skb_trimmed && skb_trimmed != skb) 1640 1640 kfree_skb(skb_trimmed); 1641 1641 1642 1642 return err;
+18 -19
net/core/skbuff.c
··· 4022 4022 * Otherwise returns the provided skb. Returns NULL in error cases 4023 4023 * (e.g. transport_len exceeds skb length or out-of-memory). 4024 4024 * 4025 - * Caller needs to set the skb transport header and release the returned skb. 4026 - * Provided skb is consumed. 4025 + * Caller needs to set the skb transport header and free any returned skb if it 4026 + * differs from the provided skb. 4027 4027 */ 4028 4028 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 4029 4029 unsigned int transport_len) ··· 4032 4032 unsigned int len = skb_transport_offset(skb) + transport_len; 4033 4033 int ret; 4034 4034 4035 - if (skb->len < len) { 4036 - kfree_skb(skb); 4035 + if (skb->len < len) 4037 4036 return NULL; 4038 - } else if (skb->len == len) { 4037 + else if (skb->len == len) 4039 4038 return skb; 4040 - } 4041 4039 4042 4040 skb_chk = skb_clone(skb, GFP_ATOMIC); 4043 - kfree_skb(skb); 4044 - 4045 4041 if (!skb_chk) 4046 4042 return NULL; 4047 4043 ··· 4062 4066 * If the skb has data beyond the given transport length, then a 4063 4067 * trimmed & cloned skb is checked and returned. 4064 4068 * 4065 - * Caller needs to set the skb transport header and release the returned skb. 4066 - * Provided skb is consumed. 4069 + * Caller needs to set the skb transport header and free any returned skb if it 4070 + * differs from the provided skb. 4067 4071 */ 4068 4072 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 4069 4073 unsigned int transport_len, ··· 4075 4079 4076 4080 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 4077 4081 if (!skb_chk) 4078 - return NULL; 4082 + goto err; 4079 4083 4080 - if (!pskb_may_pull(skb_chk, offset)) { 4081 - kfree_skb(skb_chk); 4082 - return NULL; 4083 - } 4084 + if (!pskb_may_pull(skb_chk, offset)) 4085 + goto err; 4084 4086 4085 4087 __skb_pull(skb_chk, offset); 4086 4088 ret = skb_chkf(skb_chk); 4087 4089 __skb_push(skb_chk, offset); 4088 4090 4089 - if (ret) { 4090 - kfree_skb(skb_chk); 4091 - return NULL; 4092 - } 4091 + if (ret) 4092 + goto err; 4093 4093 4094 4094 return skb_chk; 4095 + 4096 + err: 4097 + if (skb_chk && skb_chk != skb) 4098 + kfree_skb(skb_chk); 4099 + 4100 + return NULL; 4101 + 4095 4102 } 4096 4103 EXPORT_SYMBOL(skb_checksum_trimmed); 4097 4104
+1 -1
net/ipv4/fib_trie.c
··· 2465 2465 key = l->key + 1; 2466 2466 iter->pos++; 2467 2467 2468 - if (pos-- <= 0) 2468 + if (--pos <= 0) 2469 2469 break; 2470 2470 2471 2471 l = NULL;
+18 -15
net/ipv4/igmp.c
··· 1435 1435 struct sk_buff *skb_chk; 1436 1436 unsigned int transport_len; 1437 1437 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr); 1438 - int ret; 1438 + int ret = -EINVAL; 1439 1439 1440 1440 transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 1441 1441 1442 - skb_get(skb); 1443 1442 skb_chk = skb_checksum_trimmed(skb, transport_len, 1444 1443 ip_mc_validate_checksum); 1445 1444 if (!skb_chk) 1446 - return -EINVAL; 1445 + goto err; 1447 1446 1448 - if (!pskb_may_pull(skb_chk, len)) { 1449 - kfree_skb(skb_chk); 1450 - return -EINVAL; 1451 - } 1447 + if (!pskb_may_pull(skb_chk, len)) 1448 + goto err; 1452 1449 1453 1450 ret = ip_mc_check_igmp_msg(skb_chk); 1454 - if (ret) { 1455 - kfree_skb(skb_chk); 1456 - return ret; 1457 - } 1451 + if (ret) 1452 + goto err; 1458 1453 1459 1454 if (skb_trimmed) 1460 1455 *skb_trimmed = skb_chk; 1461 - else 1456 + /* free now unneeded clone */ 1457 + else if (skb_chk != skb) 1462 1458 kfree_skb(skb_chk); 1463 1459 1464 - return 0; 1460 + ret = 0; 1461 + 1462 + err: 1463 + if (ret && skb_chk && skb_chk != skb) 1464 + kfree_skb(skb_chk); 1465 + 1466 + return ret; 1465 1467 } 1466 1468 1467 1469 /** ··· 1472 1470 * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional) 1473 1471 * 1474 1472 * Checks whether an IPv4 packet is a valid IGMP packet. If so sets 1475 - * skb network and transport headers accordingly and returns zero. 1473 + * skb transport header accordingly and returns zero. 1476 1474 * 1477 1475 * -EINVAL: A broken packet was detected, i.e. it violates some internet 1478 1476 * standard ··· 1487 1485 * to leave the original skb and its full frame unchanged (which might be 1488 1486 * desirable for layer 2 frame jugglers). 1489 1487 * 1490 - * The caller needs to release a reference count from any returned skb_trimmed. 1488 + * Caller needs to set the skb network header and free any returned skb if it 1489 + * differs from the provided skb. 1491 1490 */ 1492 1491 int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed) 1493 1492 {
+1 -1
net/ipv4/inet_connection_sock.c
··· 593 593 } 594 594 595 595 spin_unlock(&queue->syn_wait_lock); 596 - if (del_timer_sync(&req->rsk_timer)) 596 + if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer)) 597 597 reqsk_put(req); 598 598 return found; 599 599 }
+4 -6
net/ipv4/sysctl_net_ipv4.c
··· 41 41 static int tcp_syn_retries_max = MAX_TCP_SYNCNT; 42 42 static int ip_ping_group_range_min[] = { 0, 0 }; 43 43 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; 44 - static int min_sndbuf = SOCK_MIN_SNDBUF; 45 - static int min_rcvbuf = SOCK_MIN_RCVBUF; 46 44 47 45 /* Update system visible IP port range */ 48 46 static void set_local_port_range(struct net *net, int range[2]) ··· 528 530 .maxlen = sizeof(sysctl_tcp_wmem), 529 531 .mode = 0644, 530 532 .proc_handler = proc_dointvec_minmax, 531 - .extra1 = &min_sndbuf, 533 + .extra1 = &one, 532 534 }, 533 535 { 534 536 .procname = "tcp_notsent_lowat", ··· 543 545 .maxlen = sizeof(sysctl_tcp_rmem), 544 546 .mode = 0644, 545 547 .proc_handler = proc_dointvec_minmax, 546 - .extra1 = &min_rcvbuf, 548 + .extra1 = &one, 547 549 }, 548 550 { 549 551 .procname = "tcp_app_win", ··· 756 758 .maxlen = sizeof(sysctl_udp_rmem_min), 757 759 .mode = 0644, 758 760 .proc_handler = proc_dointvec_minmax, 759 - .extra1 = &min_rcvbuf, 761 + .extra1 = &one 760 762 }, 761 763 { 762 764 .procname = "udp_wmem_min", ··· 764 766 .maxlen = sizeof(sysctl_udp_wmem_min), 765 767 .mode = 0644, 766 768 .proc_handler = proc_dointvec_minmax, 767 - .extra1 = &min_sndbuf, 769 + .extra1 = &one 768 770 }, 769 771 { } 770 772 };
+2
net/ipv6/ip6_fib.c
··· 172 172 *ppcpu_rt = NULL; 173 173 } 174 174 } 175 + 176 + non_pcpu_rt->rt6i_pcpu = NULL; 175 177 } 176 178 177 179 static void rt6_release(struct rt6_info *rt)
+18 -15
net/ipv6/mcast_snoop.c
··· 143 143 struct sk_buff *skb_chk = NULL; 144 144 unsigned int transport_len; 145 145 unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg); 146 - int ret; 146 + int ret = -EINVAL; 147 147 148 148 transport_len = ntohs(ipv6_hdr(skb)->payload_len); 149 149 transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr); 150 150 151 - skb_get(skb); 152 151 skb_chk = skb_checksum_trimmed(skb, transport_len, 153 152 ipv6_mc_validate_checksum); 154 153 if (!skb_chk) 155 - return -EINVAL; 154 + goto err; 156 155 157 - if (!pskb_may_pull(skb_chk, len)) { 158 - kfree_skb(skb_chk); 159 - return -EINVAL; 160 - } 156 + if (!pskb_may_pull(skb_chk, len)) 157 + goto err; 161 158 162 159 ret = ipv6_mc_check_mld_msg(skb_chk); 163 - if (ret) { 164 - kfree_skb(skb_chk); 165 - return ret; 166 - } 160 + if (ret) 161 + goto err; 167 162 168 163 if (skb_trimmed) 169 164 *skb_trimmed = skb_chk; 170 - else 165 + /* free now unneeded clone */ 166 + else if (skb_chk != skb) 171 167 kfree_skb(skb_chk); 172 168 173 - return 0; 169 + ret = 0; 170 + 171 + err: 172 + if (ret && skb_chk && skb_chk != skb) 173 + kfree_skb(skb_chk); 174 + 175 + return ret; 174 176 } 175 177 176 178 /** ··· 181 179 * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional) 182 180 * 183 181 * Checks whether an IPv6 packet is a valid MLD packet. If so sets 184 - * skb network and transport headers accordingly and returns zero. 182 + * skb transport header accordingly and returns zero. 185 183 * 186 184 * -EINVAL: A broken packet was detected, i.e. it violates some internet 187 185 * standard ··· 196 194 * to leave the original skb and its full frame unchanged (which might be 197 195 * desirable for layer 2 frame jugglers). 198 196 * 199 - * The caller needs to release a reference count from any returned skb_trimmed. 197 + * Caller needs to set the skb network header and free any returned skb if it 198 + * differs from the provided skb. 200 199 */ 201 200 int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed) 202 201 {
+55 -24
net/ipv6/route.c
··· 318 318 /* allocate dst with ip6_dst_ops */ 319 319 static struct rt6_info *__ip6_dst_alloc(struct net *net, 320 320 struct net_device *dev, 321 - int flags, 322 - struct fib6_table *table) 321 + int flags) 323 322 { 324 323 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 325 324 0, DST_OBSOLETE_FORCE_CHK, flags); ··· 335 336 336 337 static struct rt6_info *ip6_dst_alloc(struct net *net, 337 338 struct net_device *dev, 338 - int flags, 339 - struct fib6_table *table) 339 + int flags) 340 340 { 341 - struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table); 341 + struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); 342 342 343 343 if (rt) { 344 344 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC); ··· 948 950 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) 949 951 ort = (struct rt6_info *)ort->dst.from; 950 952 951 - rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 952 - 0, ort->rt6i_table); 953 + rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0); 953 954 954 955 if (!rt) 955 956 return NULL; ··· 980 983 struct rt6_info *pcpu_rt; 981 984 982 985 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev), 983 - rt->dst.dev, rt->dst.flags, 984 - rt->rt6i_table); 986 + rt->dst.dev, rt->dst.flags); 985 987 986 988 if (!pcpu_rt) 987 989 return NULL; ··· 993 997 /* It should be called with read_lock_bh(&tb6_lock) acquired */ 994 998 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) 995 999 { 996 - struct rt6_info *pcpu_rt, *prev, **p; 1000 + struct rt6_info *pcpu_rt, **p; 997 1001 998 1002 p = this_cpu_ptr(rt->rt6i_pcpu); 999 1003 pcpu_rt = *p; 1000 1004 1001 - if (pcpu_rt) 1002 - goto done; 1005 + if (pcpu_rt) { 1006 + dst_hold(&pcpu_rt->dst); 1007 + rt6_dst_from_metrics_check(pcpu_rt); 1008 + } 1009 + return pcpu_rt; 1010 + } 1011 + 1012 + static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt) 1013 + { 1014 + struct fib6_table *table = rt->rt6i_table; 1015 + struct rt6_info *pcpu_rt, *prev, **p; 1003 1016 1004 1017 pcpu_rt = ip6_rt_pcpu_alloc(rt); 1005 1018 if (!pcpu_rt) { 1006 1019 struct net *net = dev_net(rt->dst.dev); 1007 1020 1008 - pcpu_rt = net->ipv6.ip6_null_entry; 1009 - goto done; 1021 + dst_hold(&net->ipv6.ip6_null_entry->dst); 1022 + return net->ipv6.ip6_null_entry; 1010 1023 } 1011 1024 1012 - prev = cmpxchg(p, NULL, pcpu_rt); 1013 - if (prev) { 1014 - /* If someone did it before us, return prev instead */ 1025 + read_lock_bh(&table->tb6_lock); 1026 + if (rt->rt6i_pcpu) { 1027 + p = this_cpu_ptr(rt->rt6i_pcpu); 1028 + prev = cmpxchg(p, NULL, pcpu_rt); 1029 + if (prev) { 1030 + /* If someone did it before us, return prev instead */ 1031 + dst_destroy(&pcpu_rt->dst); 1032 + pcpu_rt = prev; 1033 + } 1034 + } else { 1035 + /* rt has been removed from the fib6 tree 1036 + * before we have a chance to acquire the read_lock. 1037 + * In this case, don't brother to create a pcpu rt 1038 + * since rt is going away anyway. The next 1039 + * dst_check() will trigger a re-lookup. 1040 + */ 1015 1041 dst_destroy(&pcpu_rt->dst); 1016 - pcpu_rt = prev; 1042 + pcpu_rt = rt; 1017 1043 } 1018 - 1019 - done: 1020 1044 dst_hold(&pcpu_rt->dst); 1021 1045 rt6_dst_from_metrics_check(pcpu_rt); 1046 + read_unlock_bh(&table->tb6_lock); 1022 1047 return pcpu_rt; 1023 1048 } 1024 1049 ··· 1114 1097 rt->dst.lastuse = jiffies; 1115 1098 rt->dst.__use++; 1116 1099 pcpu_rt = rt6_get_pcpu_route(rt); 1117 - read_unlock_bh(&table->tb6_lock); 1100 + 1101 + if (pcpu_rt) { 1102 + read_unlock_bh(&table->tb6_lock); 1103 + } else { 1104 + /* We have to do the read_unlock first 1105 + * because rt6_make_pcpu_route() may trigger 1106 + * ip6_dst_gc() which will take the write_lock. 1107 + */ 1108 + dst_hold(&rt->dst); 1109 + read_unlock_bh(&table->tb6_lock); 1110 + pcpu_rt = rt6_make_pcpu_route(rt); 1111 + dst_release(&rt->dst); 1112 + } 1118 1113 1119 1114 return pcpu_rt; 1115 + 1120 1116 } 1121 1117 } 1122 1118 ··· 1585 1555 if (unlikely(!idev)) 1586 1556 return ERR_PTR(-ENODEV); 1587 1557 1588 - rt = ip6_dst_alloc(net, dev, 0, NULL); 1558 + rt = ip6_dst_alloc(net, dev, 0); 1589 1559 if (unlikely(!rt)) { 1590 1560 in6_dev_put(idev); 1591 1561 dst = ERR_PTR(-ENOMEM); ··· 1772 1742 if (!table) 1773 1743 goto out; 1774 1744 1775 - rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table); 1745 + rt = ip6_dst_alloc(net, NULL, 1746 + (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT); 1776 1747 1777 1748 if (!rt) { 1778 1749 err = -ENOMEM; ··· 2430 2399 { 2431 2400 struct net *net = dev_net(idev->dev); 2432 2401 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 2433 - DST_NOCOUNT, NULL); 2402 + DST_NOCOUNT); 2434 2403 if (!rt) 2435 2404 return ERR_PTR(-ENOMEM); 2436 2405
+6 -5
net/mac80211/rc80211_minstrel.c
··· 92 92 static inline void 93 93 minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list) 94 94 { 95 - int j = MAX_THR_RATES; 96 - struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats; 95 + int j; 96 + struct minstrel_rate_stats *tmp_mrs; 97 97 struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats; 98 98 99 - while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) > 100 - minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) { 101 - j--; 99 + for (j = MAX_THR_RATES; j > 0; --j) { 102 100 tmp_mrs = &mi->r[tp_list[j - 1]].stats; 101 + if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <= 102 + minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma)) 103 + break; 103 104 } 104 105 105 106 if (j < MAX_THR_RATES - 1)