Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking from David Miller:

1) IPV4 routing metrics can become stale when routes are changed by the
administrator, fix from Steffen Klassert.

2) atl1c does "val |= XXX;" where XXX is a bit number not a bit mask,
fix by using set_bit. From Dan Carpenter.

3) Memory accounting bug in carl9170 driver results in wedged TX queue.
Fix from Nicolas Cavallari.

4) iwlwifi accidently uses "sizeof(ptr)" instead of "sizeof(*ptr)", fix
from Johannes Berg.

5) Openvswitch doesn't honor dp_ifindex when doing vport lookups, fix
from Ben Pfaff.

6) ehea conversion to 64-bit stats lost multicast and rx_errors
accounting, fix from Eric Dumazet.

7) Bridge state transition logging in br_stp_disable_port() is busted,
it's emitted at the wrong time and the message is in the wrong tense,
fix from Paulius Zaleckas.

8) mlx4 device erroneously invokes the queue resize firmware operation
twice, fix from Jack Morgenstein.

9) Fix deadlock in usbnet, need to drop lock when invoking usb_unlink_urb()
otherwise we recurse into taking it again. Fix from Sebastian Siewior.

10) hyperv network driver uses the wrong driver name string, fix from
Haiyang Zhang.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
net/hyperv: Use the built-in macro KBUILD_MODNAME for this driver
net/usbnet: avoid recursive locking in usbnet_stop()
route: Remove redirect_genid
inetpeer: Invalidate the inetpeer tree along with the routing cache
mlx4_core: fix bug in modify_cq wrapper for resize flow.
atl1c: set ATL1C_WORK_EVENT_RESET bit correctly
bridge: fix state reporting when port is disabled
bridge: br_log_state() s/entering/entered/
ehea: restore multicast and rx_errors fields
openvswitch: Fix checksum update for actions on UDP packets.
openvswitch: Honor dp_ifindex, when specified, for vport lookup by name.
iwlwifi: fix wowlan suspend
mwifiex: reset encryption mode flag before association
carl9170: fix frame delivery if sta is in powersave mode
carl9170: Fix memory accounting when sta is in power-save mode.

+139 -37
+1 -1
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
··· 1710 "atl1c hardware error (status = 0x%x)\n", 1711 status & ISR_ERROR); 1712 /* reset MAC */ 1713 - adapter->work_event |= ATL1C_WORK_EVENT_RESET; 1714 schedule_work(&adapter->common_task); 1715 return IRQ_HANDLED; 1716 }
··· 1710 "atl1c hardware error (status = 0x%x)\n", 1711 status & ISR_ERROR); 1712 /* reset MAC */ 1713 + set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); 1714 schedule_work(&adapter->common_task); 1715 return IRQ_HANDLED; 1716 }
+3 -1
drivers/net/ethernet/ibm/ehea/ehea_main.c
··· 336 stats->tx_bytes = tx_bytes; 337 stats->rx_packets = rx_packets; 338 339 - return &port->stats; 340 } 341 342 static void ehea_update_stats(struct work_struct *work)
··· 336 stats->tx_bytes = tx_bytes; 337 stats->rx_packets = rx_packets; 338 339 + stats->multicast = port->stats.multicast; 340 + stats->rx_errors = port->stats.rx_errors; 341 + return stats; 342 } 343 344 static void ehea_update_stats(struct work_struct *work)
+1 -2
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 2255 2256 if (vhcr->op_modifier == 0) { 2257 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); 2258 - if (err) 2259 - goto ex_put; 2260 } 2261 2262 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
··· 2255 2256 if (vhcr->op_modifier == 0) { 2257 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); 2258 + goto ex_put; 2259 } 2260 2261 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+2 -2
drivers/net/hyperv/netvsc_drv.c
··· 313 static void netvsc_get_drvinfo(struct net_device *net, 314 struct ethtool_drvinfo *info) 315 { 316 - strcpy(info->driver, "hv_netvsc"); 317 strcpy(info->version, HV_DRV_VERSION); 318 strcpy(info->fw_version, "N/A"); 319 } ··· 485 486 /* The one and only one */ 487 static struct hv_driver netvsc_drv = { 488 - .name = "netvsc", 489 .id_table = id_table, 490 .probe = netvsc_probe, 491 .remove = netvsc_remove,
··· 313 static void netvsc_get_drvinfo(struct net_device *net, 314 struct ethtool_drvinfo *info) 315 { 316 + strcpy(info->driver, KBUILD_MODNAME); 317 strcpy(info->version, HV_DRV_VERSION); 318 strcpy(info->fw_version, "N/A"); 319 } ··· 485 486 /* The one and only one */ 487 static struct hv_driver netvsc_drv = { 488 + .name = KBUILD_MODNAME, 489 .id_table = id_table, 490 .probe = netvsc_probe, 491 .remove = netvsc_remove,
+2
drivers/net/usb/usbnet.c
··· 589 entry = (struct skb_data *) skb->cb; 590 urb = entry->urb; 591 592 // during some PM-driven resume scenarios, 593 // these (async) unlinks complete immediately 594 retval = usb_unlink_urb (urb); ··· 597 netdev_dbg(dev->net, "unlink urb err, %d\n", retval); 598 else 599 count++; 600 } 601 spin_unlock_irqrestore (&q->lock, flags); 602 return count;
··· 589 entry = (struct skb_data *) skb->cb; 590 urb = entry->urb; 591 592 + spin_unlock_irqrestore(&q->lock, flags); 593 // during some PM-driven resume scenarios, 594 // these (async) unlinks complete immediately 595 retval = usb_unlink_urb (urb); ··· 596 netdev_dbg(dev->net, "unlink urb err, %d\n", retval); 597 else 598 count++; 599 + spin_lock_irqsave(&q->lock, flags); 600 } 601 spin_unlock_irqrestore (&q->lock, flags); 602 return count;
+6 -3
drivers/net/wireless/ath/carl9170/tx.c
··· 1234 { 1235 struct ieee80211_sta *sta; 1236 struct carl9170_sta_info *sta_info; 1237 1238 rcu_read_lock(); 1239 sta = __carl9170_get_tx_sta(ar, skb); ··· 1242 goto out_rcu; 1243 1244 sta_info = (void *) sta->drv_priv; 1245 - if (unlikely(sta_info->sleeping)) { 1246 - struct ieee80211_tx_info *tx_info; 1247 1248 rcu_read_unlock(); 1249 1250 - tx_info = IEEE80211_SKB_CB(skb); 1251 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) 1252 atomic_dec(&ar->tx_ampdu_upload); 1253 1254 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1255 carl9170_tx_status(ar, skb, false); 1256 return true; 1257 }
··· 1234 { 1235 struct ieee80211_sta *sta; 1236 struct carl9170_sta_info *sta_info; 1237 + struct ieee80211_tx_info *tx_info; 1238 1239 rcu_read_lock(); 1240 sta = __carl9170_get_tx_sta(ar, skb); ··· 1241 goto out_rcu; 1242 1243 sta_info = (void *) sta->drv_priv; 1244 + tx_info = IEEE80211_SKB_CB(skb); 1245 1246 + if (unlikely(sta_info->sleeping) && 1247 + !(tx_info->flags & (IEEE80211_TX_CTL_POLL_RESPONSE | 1248 + IEEE80211_TX_CTL_CLEAR_PS_FILT))) { 1249 rcu_read_unlock(); 1250 1251 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) 1252 atomic_dec(&ar->tx_ampdu_upload); 1253 1254 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1255 + carl9170_release_dev_space(ar, skb); 1256 carl9170_tx_status(ar, skb, false); 1257 return true; 1258 }
+1 -1
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
··· 1240 .flags = CMD_SYNC, 1241 .data[0] = key_data.rsc_tsc, 1242 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 1243 - .len[0] = sizeof(key_data.rsc_tsc), 1244 }; 1245 1246 ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
··· 1240 .flags = CMD_SYNC, 1241 .data[0] = key_data.rsc_tsc, 1242 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 1243 + .len[0] = sizeof(*key_data.rsc_tsc), 1244 }; 1245 1246 ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
+1
drivers/net/wireless/mwifiex/cfg80211.c
··· 846 priv->sec_info.wpa_enabled = false; 847 priv->sec_info.wpa2_enabled = false; 848 priv->wep_key_curr_index = 0; 849 ret = mwifiex_set_encode(priv, NULL, 0, 0, 1); 850 851 if (mode == NL80211_IFTYPE_ADHOC) {
··· 846 priv->sec_info.wpa_enabled = false; 847 priv->sec_info.wpa2_enabled = false; 848 priv->wep_key_curr_index = 0; 849 + priv->sec_info.encryption_mode = 0; 850 ret = mwifiex_set_encode(priv, NULL, 0, 0, 1); 851 852 if (mode == NL80211_IFTYPE_ADHOC) {
+3 -1
include/net/inetpeer.h
··· 35 36 u32 metrics[RTAX_MAX]; 37 u32 rate_tokens; /* rate limiting for ICMP */ 38 - int redirect_genid; 39 unsigned long rate_last; 40 unsigned long pmtu_expires; 41 u32 pmtu_orig; 42 u32 pmtu_learned; 43 struct inetpeer_addr_base redirect_learned; 44 /* 45 * Once inet_peer is queued for deletion (refcnt == -1), following fields 46 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp ··· 95 /* can be called from BH context or outside */ 96 extern void inet_putpeer(struct inet_peer *p); 97 extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); 98 99 /* 100 * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
··· 35 36 u32 metrics[RTAX_MAX]; 37 u32 rate_tokens; /* rate limiting for ICMP */ 38 unsigned long rate_last; 39 unsigned long pmtu_expires; 40 u32 pmtu_orig; 41 u32 pmtu_learned; 42 struct inetpeer_addr_base redirect_learned; 43 + struct list_head gc_list; 44 /* 45 * Once inet_peer is queued for deletion (refcnt == -1), following fields 46 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp ··· 95 /* can be called from BH context or outside */ 96 extern void inet_putpeer(struct inet_peer *p); 97 extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); 98 + 99 + extern void inetpeer_invalidate_tree(int family); 100 101 /* 102 * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
+1 -1
net/bridge/br_stp.c
··· 31 32 void br_log_state(const struct net_bridge_port *p) 33 { 34 - br_info(p->br, "port %u(%s) entering %s state\n", 35 (unsigned) p->port_no, p->dev->name, 36 br_port_state_names[p->state]); 37 }
··· 31 32 void br_log_state(const struct net_bridge_port *p) 33 { 34 + br_info(p->br, "port %u(%s) entered %s state\n", 35 (unsigned) p->port_no, p->dev->name, 36 br_port_state_names[p->state]); 37 }
+1 -2
net/bridge/br_stp_if.c
··· 98 struct net_bridge *br = p->br; 99 int wasroot; 100 101 - br_log_state(p); 102 - 103 wasroot = br_is_root_bridge(br); 104 br_become_designated_port(p); 105 p->state = BR_STATE_DISABLED; 106 p->topology_change_ack = 0; 107 p->config_pending = 0; 108 109 br_ifinfo_notify(RTM_NEWLINK, p); 110 111 del_timer(&p->message_age_timer);
··· 98 struct net_bridge *br = p->br; 99 int wasroot; 100 101 wasroot = br_is_root_bridge(br); 102 br_become_designated_port(p); 103 p->state = BR_STATE_DISABLED; 104 p->topology_change_ack = 0; 105 p->config_pending = 0; 106 107 + br_log_state(p); 108 br_ifinfo_notify(RTM_NEWLINK, p); 109 110 del_timer(&p->message_age_timer);
+79 -2
net/ipv4/inetpeer.c
··· 17 #include <linux/kernel.h> 18 #include <linux/mm.h> 19 #include <linux/net.h> 20 #include <net/ip.h> 21 #include <net/inetpeer.h> 22 #include <net/secure_seq.h> ··· 67 68 static struct kmem_cache *peer_cachep __read_mostly; 69 70 #define node_height(x) x->avl_height 71 72 #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) ··· 108 int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ 109 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ 110 111 112 /* Called from ip_output.c:ip_init */ 113 void __init inet_initpeers(void) ··· 176 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 177 NULL); 178 179 } 180 181 static int addr_compare(const struct inetpeer_addr *a, ··· 498 p->rate_last = 0; 499 p->pmtu_expires = 0; 500 p->pmtu_orig = 0; 501 - p->redirect_genid = 0; 502 memset(&p->redirect_learned, 0, sizeof(p->redirect_learned)); 503 - 504 505 /* Link the node. */ 506 link_to_pool(p, base); ··· 559 return rc; 560 } 561 EXPORT_SYMBOL(inet_peer_xrlim_allow);
··· 17 #include <linux/kernel.h> 18 #include <linux/mm.h> 19 #include <linux/net.h> 20 + #include <linux/workqueue.h> 21 #include <net/ip.h> 22 #include <net/inetpeer.h> 23 #include <net/secure_seq.h> ··· 66 67 static struct kmem_cache *peer_cachep __read_mostly; 68 69 + static LIST_HEAD(gc_list); 70 + static const int gc_delay = 60 * HZ; 71 + static struct delayed_work gc_work; 72 + static DEFINE_SPINLOCK(gc_lock); 73 + 74 #define node_height(x) x->avl_height 75 76 #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) ··· 102 int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ 103 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ 104 105 + static void inetpeer_gc_worker(struct work_struct *work) 106 + { 107 + struct inet_peer *p, *n; 108 + LIST_HEAD(list); 109 + 110 + spin_lock_bh(&gc_lock); 111 + list_replace_init(&gc_list, &list); 112 + spin_unlock_bh(&gc_lock); 113 + 114 + if (list_empty(&list)) 115 + return; 116 + 117 + list_for_each_entry_safe(p, n, &list, gc_list) { 118 + 119 + if(need_resched()) 120 + cond_resched(); 121 + 122 + if (p->avl_left != peer_avl_empty) { 123 + list_add_tail(&p->avl_left->gc_list, &list); 124 + p->avl_left = peer_avl_empty; 125 + } 126 + 127 + if (p->avl_right != peer_avl_empty) { 128 + list_add_tail(&p->avl_right->gc_list, &list); 129 + p->avl_right = peer_avl_empty; 130 + } 131 + 132 + n = list_entry(p->gc_list.next, struct inet_peer, gc_list); 133 + 134 + if (!atomic_read(&p->refcnt)) { 135 + list_del(&p->gc_list); 136 + kmem_cache_free(peer_cachep, p); 137 + } 138 + } 139 + 140 + if (list_empty(&list)) 141 + return; 142 + 143 + spin_lock_bh(&gc_lock); 144 + list_splice(&list, &gc_list); 145 + spin_unlock_bh(&gc_lock); 146 + 147 + schedule_delayed_work(&gc_work, gc_delay); 148 + } 149 150 /* Called from ip_output.c:ip_init */ 151 void __init inet_initpeers(void) ··· 126 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 127 NULL); 128 129 + INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker); 130 } 131 132 static int addr_compare(const struct inetpeer_addr *a, ··· 447 p->rate_last = 0; 448 p->pmtu_expires = 0; 449 p->pmtu_orig = 0; 450 memset(&p->redirect_learned, 0, sizeof(p->redirect_learned)); 451 + INIT_LIST_HEAD(&p->gc_list); 452 453 /* Link the node. */ 454 link_to_pool(p, base); ··· 509 return rc; 510 } 511 EXPORT_SYMBOL(inet_peer_xrlim_allow); 512 + 513 + void inetpeer_invalidate_tree(int family) 514 + { 515 + struct inet_peer *old, *new, *prev; 516 + struct inet_peer_base *base = family_to_base(family); 517 + 518 + write_seqlock_bh(&base->lock); 519 + 520 + old = base->root; 521 + if (old == peer_avl_empty_rcu) 522 + goto out; 523 + 524 + new = peer_avl_empty_rcu; 525 + 526 + prev = cmpxchg(&base->root, old, new); 527 + if (prev == old) { 528 + base->total = 0; 529 + spin_lock(&gc_lock); 530 + list_add_tail(&prev->gc_list, &gc_list); 531 + spin_unlock(&gc_lock); 532 + schedule_delayed_work(&gc_work, gc_delay); 533 + } 534 + 535 + out: 536 + write_sequnlock_bh(&base->lock); 537 + } 538 + EXPORT_SYMBOL(inetpeer_invalidate_tree);
+3 -9
net/ipv4/route.c
··· 132 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 133 static int ip_rt_min_advmss __read_mostly = 256; 134 static int rt_chain_length_max __read_mostly = 20; 135 - static int redirect_genid; 136 137 static struct delayed_work expires_work; 138 static unsigned long expires_ljiffies; ··· 936 937 get_random_bytes(&shuffle, sizeof(shuffle)); 938 atomic_add(shuffle + 1U, &net->ipv4.rt_genid); 939 - redirect_genid++; 940 } 941 942 /* ··· 1484 1485 peer = rt->peer; 1486 if (peer) { 1487 - if (peer->redirect_learned.a4 != new_gw || 1488 - peer->redirect_genid != redirect_genid) { 1489 peer->redirect_learned.a4 = new_gw; 1490 - peer->redirect_genid = redirect_genid; 1491 atomic_inc(&__rt_peer_genid); 1492 } 1493 check_peer_redir(&rt->dst, peer); ··· 1790 if (peer) { 1791 check_peer_pmtu(&rt->dst, peer); 1792 1793 - if (peer->redirect_genid != redirect_genid) 1794 - peer->redirect_learned.a4 = 0; 1795 if (peer->redirect_learned.a4 && 1796 peer->redirect_learned.a4 != rt->rt_gateway) 1797 check_peer_redir(&rt->dst, peer); ··· 1953 dst_init_metrics(&rt->dst, peer->metrics, false); 1954 1955 check_peer_pmtu(&rt->dst, peer); 1956 - if (peer->redirect_genid != redirect_genid) 1957 - peer->redirect_learned.a4 = 0; 1958 if (peer->redirect_learned.a4 && 1959 peer->redirect_learned.a4 != rt->rt_gateway) { 1960 rt->rt_gateway = peer->redirect_learned.a4;
··· 132 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 133 static int ip_rt_min_advmss __read_mostly = 256; 134 static int rt_chain_length_max __read_mostly = 20; 135 136 static struct delayed_work expires_work; 137 static unsigned long expires_ljiffies; ··· 937 938 get_random_bytes(&shuffle, sizeof(shuffle)); 939 atomic_add(shuffle + 1U, &net->ipv4.rt_genid); 940 + inetpeer_invalidate_tree(AF_INET); 941 } 942 943 /* ··· 1485 1486 peer = rt->peer; 1487 if (peer) { 1488 + if (peer->redirect_learned.a4 != new_gw) { 1489 peer->redirect_learned.a4 = new_gw; 1490 atomic_inc(&__rt_peer_genid); 1491 } 1492 check_peer_redir(&rt->dst, peer); ··· 1793 if (peer) { 1794 check_peer_pmtu(&rt->dst, peer); 1795 1796 if (peer->redirect_learned.a4 && 1797 peer->redirect_learned.a4 != rt->rt_gateway) 1798 check_peer_redir(&rt->dst, peer); ··· 1958 dst_init_metrics(&rt->dst, peer->metrics, false); 1959 1960 check_peer_pmtu(&rt->dst, peer); 1961 + 1962 if (peer->redirect_learned.a4 && 1963 peer->redirect_learned.a4 != rt->rt_gateway) { 1964 rt->rt_gateway = peer->redirect_learned.a4;
+32 -12
net/openvswitch/actions.c
··· 1 /* 2 - * Copyright (c) 2007-2011 Nicira Networks. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public ··· 145 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, 146 *addr, new_addr, 1); 147 } else if (nh->protocol == IPPROTO_UDP) { 148 - if (likely(transport_len >= sizeof(struct udphdr))) 149 - inet_proto_csum_replace4(&udp_hdr(skb)->check, skb, 150 - *addr, new_addr, 1); 151 } 152 153 csum_replace4(&nh->check, *addr, new_addr); ··· 204 skb->rxhash = 0; 205 } 206 207 - static int set_udp_port(struct sk_buff *skb, 208 - const struct ovs_key_udp *udp_port_key) 209 { 210 struct udphdr *uh; 211 int err; ··· 231 232 uh = udp_hdr(skb); 233 if (udp_port_key->udp_src != uh->source) 234 - set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check); 235 236 if (udp_port_key->udp_dst != uh->dest) 237 - set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check); 238 239 return 0; 240 } 241 242 - static int set_tcp_port(struct sk_buff *skb, 243 - const struct ovs_key_tcp *tcp_port_key) 244 { 245 struct tcphdr *th; 246 int err; ··· 348 break; 349 350 case OVS_KEY_ATTR_TCP: 351 - err = set_tcp_port(skb, nla_data(nested_attr)); 352 break; 353 354 case OVS_KEY_ATTR_UDP: 355 - err = set_udp_port(skb, nla_data(nested_attr)); 356 break; 357 } 358
··· 1 /* 2 + * Copyright (c) 2007-2012 Nicira Networks. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public ··· 145 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, 146 *addr, new_addr, 1); 147 } else if (nh->protocol == IPPROTO_UDP) { 148 + if (likely(transport_len >= sizeof(struct udphdr))) { 149 + struct udphdr *uh = udp_hdr(skb); 150 + 151 + if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { 152 + inet_proto_csum_replace4(&uh->check, skb, 153 + *addr, new_addr, 1); 154 + if (!uh->check) 155 + uh->check = CSUM_MANGLED_0; 156 + } 157 + } 158 } 159 160 csum_replace4(&nh->check, *addr, new_addr); ··· 197 skb->rxhash = 0; 198 } 199 200 + static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) 201 + { 202 + struct udphdr *uh = udp_hdr(skb); 203 + 204 + if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) { 205 + set_tp_port(skb, port, new_port, &uh->check); 206 + 207 + if (!uh->check) 208 + uh->check = CSUM_MANGLED_0; 209 + } else { 210 + *port = new_port; 211 + skb->rxhash = 0; 212 + } 213 + } 214 + 215 + static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key) 216 { 217 struct udphdr *uh; 218 int err; ··· 210 211 uh = udp_hdr(skb); 212 if (udp_port_key->udp_src != uh->source) 213 + set_udp_port(skb, &uh->source, udp_port_key->udp_src); 214 215 if (udp_port_key->udp_dst != uh->dest) 216 + set_udp_port(skb, &uh->dest, udp_port_key->udp_dst); 217 218 return 0; 219 } 220 221 + static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key) 222 { 223 struct tcphdr *th; 224 int err; ··· 328 break; 329 330 case OVS_KEY_ATTR_TCP: 331 + err = set_tcp(skb, nla_data(nested_attr)); 332 break; 333 334 case OVS_KEY_ATTR_UDP: 335 + err = set_udp(skb, nla_data(nested_attr)); 336 break; 337 } 338
+3
net/openvswitch/datapath.c
··· 1521 vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME])); 1522 if (!vport) 1523 return ERR_PTR(-ENODEV); 1524 return vport; 1525 } else if (a[OVS_VPORT_ATTR_PORT_NO]) { 1526 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
··· 1521 vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME])); 1522 if (!vport) 1523 return ERR_PTR(-ENODEV); 1524 + if (ovs_header->dp_ifindex && 1525 + ovs_header->dp_ifindex != get_dpifindex(vport->dp)) 1526 + return ERR_PTR(-ENODEV); 1527 return vport; 1528 } else if (a[OVS_VPORT_ATTR_PORT_NO]) { 1529 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);