Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking from David Miller:

1) IPV4 routing metrics can become stale when routes are changed by the
administrator, fix from Steffen Klassert.

2) atl1c does "val |= XXX;" where XXX is a bit number not a bit mask,
fix by using set_bit. From Dan Carpenter.

3) Memory accounting bug in carl9170 driver results in wedged TX queue.
Fix from Nicolas Cavallari.

4) iwlwifi accidently uses "sizeof(ptr)" instead of "sizeof(*ptr)", fix
from Johannes Berg.

5) Openvswitch doesn't honor dp_ifindex when doing vport lookups, fix
from Ben Pfaff.

6) ehea conversion to 64-bit stats lost multicast and rx_errors
accounting, fix from Eric Dumazet.

7) Bridge state transition logging in br_stp_disable_port() is busted,
it's emitted at the wrong time and the message is in the wrong tense,
fix from Paulius Zaleckas.

8) mlx4 device erroneously invokes the queue resize firmware operation
twice, fix from Jack Morgenstein.

9) Fix deadlock in usbnet, need to drop lock when invoking usb_unlink_urb()
otherwise we recurse into taking it again. Fix from Sebastian Siewior.

10) hyperv network driver uses the wrong driver name string, fix from
Haiyang Zhang.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
net/hyperv: Use the built-in macro KBUILD_MODNAME for this driver
net/usbnet: avoid recursive locking in usbnet_stop()
route: Remove redirect_genid
inetpeer: Invalidate the inetpeer tree along with the routing cache
mlx4_core: fix bug in modify_cq wrapper for resize flow.
atl1c: set ATL1C_WORK_EVENT_RESET bit correctly
bridge: fix state reporting when port is disabled
bridge: br_log_state() s/entering/entered/
ehea: restore multicast and rx_errors fields
openvswitch: Fix checksum update for actions on UDP packets.
openvswitch: Honor dp_ifindex, when specified, for vport lookup by name.
iwlwifi: fix wowlan suspend
mwifiex: reset encryption mode flag before association
carl9170: fix frame delivery if sta is in powersave mode
carl9170: Fix memory accounting when sta is in power-save mode.

+139 -37
+1 -1
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
··· 1710 1710 "atl1c hardware error (status = 0x%x)\n", 1711 1711 status & ISR_ERROR); 1712 1712 /* reset MAC */ 1713 - adapter->work_event |= ATL1C_WORK_EVENT_RESET; 1713 + set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); 1714 1714 schedule_work(&adapter->common_task); 1715 1715 return IRQ_HANDLED; 1716 1716 }
+3 -1
drivers/net/ethernet/ibm/ehea/ehea_main.c
··· 336 336 stats->tx_bytes = tx_bytes; 337 337 stats->rx_packets = rx_packets; 338 338 339 - return &port->stats; 339 + stats->multicast = port->stats.multicast; 340 + stats->rx_errors = port->stats.rx_errors; 341 + return stats; 340 342 } 341 343 342 344 static void ehea_update_stats(struct work_struct *work)
+1 -2
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 2255 2255 2256 2256 if (vhcr->op_modifier == 0) { 2257 2257 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); 2258 - if (err) 2259 - goto ex_put; 2258 + goto ex_put; 2260 2259 } 2261 2260 2262 2261 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+2 -2
drivers/net/hyperv/netvsc_drv.c
··· 313 313 static void netvsc_get_drvinfo(struct net_device *net, 314 314 struct ethtool_drvinfo *info) 315 315 { 316 - strcpy(info->driver, "hv_netvsc"); 316 + strcpy(info->driver, KBUILD_MODNAME); 317 317 strcpy(info->version, HV_DRV_VERSION); 318 318 strcpy(info->fw_version, "N/A"); 319 319 } ··· 485 485 486 486 /* The one and only one */ 487 487 static struct hv_driver netvsc_drv = { 488 - .name = "netvsc", 488 + .name = KBUILD_MODNAME, 489 489 .id_table = id_table, 490 490 .probe = netvsc_probe, 491 491 .remove = netvsc_remove,
+2
drivers/net/usb/usbnet.c
··· 589 589 entry = (struct skb_data *) skb->cb; 590 590 urb = entry->urb; 591 591 592 + spin_unlock_irqrestore(&q->lock, flags); 592 593 // during some PM-driven resume scenarios, 593 594 // these (async) unlinks complete immediately 594 595 retval = usb_unlink_urb (urb); ··· 597 596 netdev_dbg(dev->net, "unlink urb err, %d\n", retval); 598 597 else 599 598 count++; 599 + spin_lock_irqsave(&q->lock, flags); 600 600 } 601 601 spin_unlock_irqrestore (&q->lock, flags); 602 602 return count;
+6 -3
drivers/net/wireless/ath/carl9170/tx.c
··· 1234 1234 { 1235 1235 struct ieee80211_sta *sta; 1236 1236 struct carl9170_sta_info *sta_info; 1237 + struct ieee80211_tx_info *tx_info; 1237 1238 1238 1239 rcu_read_lock(); 1239 1240 sta = __carl9170_get_tx_sta(ar, skb); ··· 1242 1241 goto out_rcu; 1243 1242 1244 1243 sta_info = (void *) sta->drv_priv; 1245 - if (unlikely(sta_info->sleeping)) { 1246 - struct ieee80211_tx_info *tx_info; 1244 + tx_info = IEEE80211_SKB_CB(skb); 1247 1245 1246 + if (unlikely(sta_info->sleeping) && 1247 + !(tx_info->flags & (IEEE80211_TX_CTL_POLL_RESPONSE | 1248 + IEEE80211_TX_CTL_CLEAR_PS_FILT))) { 1248 1249 rcu_read_unlock(); 1249 1250 1250 - tx_info = IEEE80211_SKB_CB(skb); 1251 1251 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) 1252 1252 atomic_dec(&ar->tx_ampdu_upload); 1253 1253 1254 1254 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1255 + carl9170_release_dev_space(ar, skb); 1255 1256 carl9170_tx_status(ar, skb, false); 1256 1257 return true; 1257 1258 }
+1 -1
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
··· 1240 1240 .flags = CMD_SYNC, 1241 1241 .data[0] = key_data.rsc_tsc, 1242 1242 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 1243 - .len[0] = sizeof(key_data.rsc_tsc), 1243 + .len[0] = sizeof(*key_data.rsc_tsc), 1244 1244 }; 1245 1245 1246 1246 ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
+1
drivers/net/wireless/mwifiex/cfg80211.c
··· 846 846 priv->sec_info.wpa_enabled = false; 847 847 priv->sec_info.wpa2_enabled = false; 848 848 priv->wep_key_curr_index = 0; 849 + priv->sec_info.encryption_mode = 0; 849 850 ret = mwifiex_set_encode(priv, NULL, 0, 0, 1); 850 851 851 852 if (mode == NL80211_IFTYPE_ADHOC) {
+3 -1
include/net/inetpeer.h
··· 35 35 36 36 u32 metrics[RTAX_MAX]; 37 37 u32 rate_tokens; /* rate limiting for ICMP */ 38 - int redirect_genid; 39 38 unsigned long rate_last; 40 39 unsigned long pmtu_expires; 41 40 u32 pmtu_orig; 42 41 u32 pmtu_learned; 43 42 struct inetpeer_addr_base redirect_learned; 43 + struct list_head gc_list; 44 44 /* 45 45 * Once inet_peer is queued for deletion (refcnt == -1), following fields 46 46 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp ··· 95 95 /* can be called from BH context or outside */ 96 96 extern void inet_putpeer(struct inet_peer *p); 97 97 extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); 98 + 99 + extern void inetpeer_invalidate_tree(int family); 98 100 99 101 /* 100 102 * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
+1 -1
net/bridge/br_stp.c
··· 31 31 32 32 void br_log_state(const struct net_bridge_port *p) 33 33 { 34 - br_info(p->br, "port %u(%s) entering %s state\n", 34 + br_info(p->br, "port %u(%s) entered %s state\n", 35 35 (unsigned) p->port_no, p->dev->name, 36 36 br_port_state_names[p->state]); 37 37 }
+1 -2
net/bridge/br_stp_if.c
··· 98 98 struct net_bridge *br = p->br; 99 99 int wasroot; 100 100 101 - br_log_state(p); 102 - 103 101 wasroot = br_is_root_bridge(br); 104 102 br_become_designated_port(p); 105 103 p->state = BR_STATE_DISABLED; 106 104 p->topology_change_ack = 0; 107 105 p->config_pending = 0; 108 106 107 + br_log_state(p); 109 108 br_ifinfo_notify(RTM_NEWLINK, p); 110 109 111 110 del_timer(&p->message_age_timer);
+79 -2
net/ipv4/inetpeer.c
··· 17 17 #include <linux/kernel.h> 18 18 #include <linux/mm.h> 19 19 #include <linux/net.h> 20 + #include <linux/workqueue.h> 20 21 #include <net/ip.h> 21 22 #include <net/inetpeer.h> 22 23 #include <net/secure_seq.h> ··· 67 66 68 67 static struct kmem_cache *peer_cachep __read_mostly; 69 68 69 + static LIST_HEAD(gc_list); 70 + static const int gc_delay = 60 * HZ; 71 + static struct delayed_work gc_work; 72 + static DEFINE_SPINLOCK(gc_lock); 73 + 70 74 #define node_height(x) x->avl_height 71 75 72 76 #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) ··· 108 102 int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ 109 103 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ 110 104 105 + static void inetpeer_gc_worker(struct work_struct *work) 106 + { 107 + struct inet_peer *p, *n; 108 + LIST_HEAD(list); 109 + 110 + spin_lock_bh(&gc_lock); 111 + list_replace_init(&gc_list, &list); 112 + spin_unlock_bh(&gc_lock); 113 + 114 + if (list_empty(&list)) 115 + return; 116 + 117 + list_for_each_entry_safe(p, n, &list, gc_list) { 118 + 119 + if(need_resched()) 120 + cond_resched(); 121 + 122 + if (p->avl_left != peer_avl_empty) { 123 + list_add_tail(&p->avl_left->gc_list, &list); 124 + p->avl_left = peer_avl_empty; 125 + } 126 + 127 + if (p->avl_right != peer_avl_empty) { 128 + list_add_tail(&p->avl_right->gc_list, &list); 129 + p->avl_right = peer_avl_empty; 130 + } 131 + 132 + n = list_entry(p->gc_list.next, struct inet_peer, gc_list); 133 + 134 + if (!atomic_read(&p->refcnt)) { 135 + list_del(&p->gc_list); 136 + kmem_cache_free(peer_cachep, p); 137 + } 138 + } 139 + 140 + if (list_empty(&list)) 141 + return; 142 + 143 + spin_lock_bh(&gc_lock); 144 + list_splice(&list, &gc_list); 145 + spin_unlock_bh(&gc_lock); 146 + 147 + schedule_delayed_work(&gc_work, gc_delay); 148 + } 111 149 112 150 /* Called from ip_output.c:ip_init */ 113 151 void __init inet_initpeers(void) ··· 176 126 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 177 127 NULL); 178 128 129 + INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker); 179 130 } 180 131 181 132 static int addr_compare(const struct inetpeer_addr *a, ··· 498 447 p->rate_last = 0; 499 448 p->pmtu_expires = 0; 500 449 p->pmtu_orig = 0; 501 - p->redirect_genid = 0; 502 450 memset(&p->redirect_learned, 0, sizeof(p->redirect_learned)); 503 - 451 + INIT_LIST_HEAD(&p->gc_list); 504 452 505 453 /* Link the node. */ 506 454 link_to_pool(p, base); ··· 559 509 return rc; 560 510 } 561 511 EXPORT_SYMBOL(inet_peer_xrlim_allow); 512 + 513 + void inetpeer_invalidate_tree(int family) 514 + { 515 + struct inet_peer *old, *new, *prev; 516 + struct inet_peer_base *base = family_to_base(family); 517 + 518 + write_seqlock_bh(&base->lock); 519 + 520 + old = base->root; 521 + if (old == peer_avl_empty_rcu) 522 + goto out; 523 + 524 + new = peer_avl_empty_rcu; 525 + 526 + prev = cmpxchg(&base->root, old, new); 527 + if (prev == old) { 528 + base->total = 0; 529 + spin_lock(&gc_lock); 530 + list_add_tail(&prev->gc_list, &gc_list); 531 + spin_unlock(&gc_lock); 532 + schedule_delayed_work(&gc_work, gc_delay); 533 + } 534 + 535 + out: 536 + write_sequnlock_bh(&base->lock); 537 + } 538 + EXPORT_SYMBOL(inetpeer_invalidate_tree);
+3 -9
net/ipv4/route.c
··· 132 132 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 133 133 static int ip_rt_min_advmss __read_mostly = 256; 134 134 static int rt_chain_length_max __read_mostly = 20; 135 - static int redirect_genid; 136 135 137 136 static struct delayed_work expires_work; 138 137 static unsigned long expires_ljiffies; ··· 936 937 937 938 get_random_bytes(&shuffle, sizeof(shuffle)); 938 939 atomic_add(shuffle + 1U, &net->ipv4.rt_genid); 939 - redirect_genid++; 940 + inetpeer_invalidate_tree(AF_INET); 940 941 } 941 942 942 943 /* ··· 1484 1485 1485 1486 peer = rt->peer; 1486 1487 if (peer) { 1487 - if (peer->redirect_learned.a4 != new_gw || 1488 - peer->redirect_genid != redirect_genid) { 1488 + if (peer->redirect_learned.a4 != new_gw) { 1489 1489 peer->redirect_learned.a4 = new_gw; 1490 - peer->redirect_genid = redirect_genid; 1491 1490 atomic_inc(&__rt_peer_genid); 1492 1491 } 1493 1492 check_peer_redir(&rt->dst, peer); ··· 1790 1793 if (peer) { 1791 1794 check_peer_pmtu(&rt->dst, peer); 1792 1795 1793 - if (peer->redirect_genid != redirect_genid) 1794 - peer->redirect_learned.a4 = 0; 1795 1796 if (peer->redirect_learned.a4 && 1796 1797 peer->redirect_learned.a4 != rt->rt_gateway) 1797 1798 check_peer_redir(&rt->dst, peer); ··· 1953 1958 dst_init_metrics(&rt->dst, peer->metrics, false); 1954 1959 1955 1960 check_peer_pmtu(&rt->dst, peer); 1956 - if (peer->redirect_genid != redirect_genid) 1957 - peer->redirect_learned.a4 = 0; 1961 + 1958 1962 if (peer->redirect_learned.a4 && 1959 1963 peer->redirect_learned.a4 != rt->rt_gateway) { 1960 1964 rt->rt_gateway = peer->redirect_learned.a4;
+32 -12
net/openvswitch/actions.c
··· 1 1 /* 2 - * Copyright (c) 2007-2011 Nicira Networks. 2 + * Copyright (c) 2007-2012 Nicira Networks. 3 3 * 4 4 * This program is free software; you can redistribute it and/or 5 5 * modify it under the terms of version 2 of the GNU General Public ··· 145 145 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, 146 146 *addr, new_addr, 1); 147 147 } else if (nh->protocol == IPPROTO_UDP) { 148 - if (likely(transport_len >= sizeof(struct udphdr))) 149 - inet_proto_csum_replace4(&udp_hdr(skb)->check, skb, 150 - *addr, new_addr, 1); 148 + if (likely(transport_len >= sizeof(struct udphdr))) { 149 + struct udphdr *uh = udp_hdr(skb); 150 + 151 + if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { 152 + inet_proto_csum_replace4(&uh->check, skb, 153 + *addr, new_addr, 1); 154 + if (!uh->check) 155 + uh->check = CSUM_MANGLED_0; 156 + } 157 + } 151 158 } 152 159 153 160 csum_replace4(&nh->check, *addr, new_addr); ··· 204 197 skb->rxhash = 0; 205 198 } 206 199 207 - static int set_udp_port(struct sk_buff *skb, 208 - const struct ovs_key_udp *udp_port_key) 200 + static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) 201 + { 202 + struct udphdr *uh = udp_hdr(skb); 203 + 204 + if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) { 205 + set_tp_port(skb, port, new_port, &uh->check); 206 + 207 + if (!uh->check) 208 + uh->check = CSUM_MANGLED_0; 209 + } else { 210 + *port = new_port; 211 + skb->rxhash = 0; 212 + } 213 + } 214 + 215 + static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key) 209 216 { 210 217 struct udphdr *uh; 211 218 int err; ··· 231 210 232 211 uh = udp_hdr(skb); 233 212 if (udp_port_key->udp_src != uh->source) 234 - set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check); 213 + set_udp_port(skb, &uh->source, udp_port_key->udp_src); 235 214 236 215 if (udp_port_key->udp_dst != uh->dest) 237 - set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check); 216 + set_udp_port(skb, &uh->dest, udp_port_key->udp_dst); 238 217 239 218 return 0; 240 219 } 241 220 242 - static int set_tcp_port(struct sk_buff *skb, 243 - const struct ovs_key_tcp *tcp_port_key) 221 + static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key) 244 222 { 245 223 struct tcphdr *th; 246 224 int err; ··· 348 328 break; 349 329 350 330 case OVS_KEY_ATTR_TCP: 351 - err = set_tcp_port(skb, nla_data(nested_attr)); 331 + err = set_tcp(skb, nla_data(nested_attr)); 352 332 break; 353 333 354 334 case OVS_KEY_ATTR_UDP: 355 - err = set_udp_port(skb, nla_data(nested_attr)); 335 + err = set_udp(skb, nla_data(nested_attr)); 356 336 break; 357 337 } 358 338
+3
net/openvswitch/datapath.c
··· 1521 1521 vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME])); 1522 1522 if (!vport) 1523 1523 return ERR_PTR(-ENODEV); 1524 + if (ovs_header->dp_ifindex && 1525 + ovs_header->dp_ifindex != get_dpifindex(vport->dp)) 1526 + return ERR_PTR(-ENODEV); 1524 1527 return vport; 1525 1528 } else if (a[OVS_VPORT_ATTR_PORT_NO]) { 1526 1529 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);