Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (24 commits)
bridge: Partially disable netpoll support
tcp: fix crash in tcp_xmit_retransmit_queue
IPv6: fix CoA check in RH2 input handler (mip6_rthdr_input())
ibmveth: lost IRQ while closing/opening device leads to service loss
rt2x00: Fix lockdep warning in rt2x00lib_probe_dev()
vhost: avoid pr_err on condition guest can trigger
ipmr: Don't leak memory if fib lookup fails.
vhost-net: avoid flush under lock
net: fix problem in reading sock TX queue
net/core: neighbour update Oops
net: skb_tx_hash() fix relative to skb_orphan_try()
rfs: call sock_rps_record_flow() in tcp_splice_read()
xfrm: do not assume that template resolving always returns xfrms
hostap_pci: set dev->base_addr during probe
axnet_cs: use spin_lock_irqsave in ax_interrupt
dsa: Fix Kconfig dependencies.
act_nat: not all of the ICMP packets need an IP header payload
r8169: incorrect identifier for a 8168dp
Phonet: fix skb leak in pipe endpoint accept()
Bluetooth: Update sec_level/auth_type for already existing connections
...

+108 -71
+3 -1
drivers/net/ibmveth.c
··· 677 if (!adapter->pool_config) 678 netif_stop_queue(netdev); 679 680 - free_irq(netdev->irq, netdev); 681 682 do { 683 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); ··· 688 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n", 689 lpar_rc); 690 } 691 692 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 693
··· 677 if (!adapter->pool_config) 678 netif_stop_queue(netdev); 679 680 + h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 681 682 do { 683 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); ··· 688 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n", 689 lpar_rc); 690 } 691 + 692 + free_irq(netdev->irq, netdev); 693 694 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 695
+4 -3
drivers/net/pcmcia/axnet_cs.c
··· 1168 int interrupts, nr_serviced = 0, i; 1169 struct ei_device *ei_local; 1170 int handled = 0; 1171 1172 e8390_base = dev->base_addr; 1173 ei_local = netdev_priv(dev); ··· 1177 * Protect the irq test too. 1178 */ 1179 1180 - spin_lock(&ei_local->page_lock); 1181 1182 if (ei_local->irqlock) 1183 { ··· 1189 dev->name, inb_p(e8390_base + EN0_ISR), 1190 inb_p(e8390_base + EN0_IMR)); 1191 #endif 1192 - spin_unlock(&ei_local->page_lock); 1193 return IRQ_NONE; 1194 } 1195 ··· 1262 ei_local->irqlock = 0; 1263 outb_p(ENISR_ALL, e8390_base + EN0_IMR); 1264 1265 - spin_unlock(&ei_local->page_lock); 1266 return IRQ_RETVAL(handled); 1267 } 1268
··· 1168 int interrupts, nr_serviced = 0, i; 1169 struct ei_device *ei_local; 1170 int handled = 0; 1171 + unsigned long flags; 1172 1173 e8390_base = dev->base_addr; 1174 ei_local = netdev_priv(dev); ··· 1176 * Protect the irq test too. 1177 */ 1178 1179 + spin_lock_irqsave(&ei_local->page_lock, flags); 1180 1181 if (ei_local->irqlock) 1182 { ··· 1188 dev->name, inb_p(e8390_base + EN0_ISR), 1189 inb_p(e8390_base + EN0_IMR)); 1190 #endif 1191 + spin_unlock_irqrestore(&ei_local->page_lock, flags); 1192 return IRQ_NONE; 1193 } 1194 ··· 1261 ei_local->irqlock = 0; 1262 outb_p(ENISR_ALL, e8390_base + EN0_IMR); 1263 1264 + spin_unlock_irqrestore(&ei_local->page_lock, flags); 1265 return IRQ_RETVAL(handled); 1266 } 1267
+1 -1
drivers/net/r8169.c
··· 1316 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, 1317 1318 /* 8168C family. */ 1319 - { 0x7cf00000, 0x3ca00000, RTL_GIGA_MAC_VER_24 }, 1320 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 }, 1321 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, 1322 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
··· 1316 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, 1317 1318 /* 8168C family. */ 1319 + { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 }, 1320 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 }, 1321 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, 1322 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
+6 -2
drivers/net/wireless/ath/ath9k/hif_usb.c
··· 730 731 /* RX */ 732 if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0) 733 - goto err; 734 735 /* Register Read */ 736 if (ath9k_hif_usb_alloc_reg_in_urb(hif_dev) < 0) 737 - goto err; 738 739 return 0; 740 err: 741 return -ENOMEM; 742 }
··· 730 731 /* RX */ 732 if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0) 733 + goto err_rx; 734 735 /* Register Read */ 736 if (ath9k_hif_usb_alloc_reg_in_urb(hif_dev) < 0) 737 + goto err_reg; 738 739 return 0; 740 + err_reg: 741 + ath9k_hif_usb_dealloc_rx_urbs(hif_dev); 742 + err_rx: 743 + ath9k_hif_usb_dealloc_tx_urbs(hif_dev); 744 err: 745 return -ENOMEM; 746 }
+1
drivers/net/wireless/hostap/hostap_pci.c
··· 330 331 dev->irq = pdev->irq; 332 hw_priv->mem_start = mem; 333 334 prism2_pci_cor_sreset(local); 335
··· 330 331 dev->irq = pdev->irq; 332 hw_priv->mem_start = mem; 333 + dev->base_addr = (unsigned long) mem; 334 335 prism2_pci_cor_sreset(local); 336
+11
drivers/net/wireless/iwlwifi/iwl-sta.h
··· 97 spin_lock_irqsave(&priv->sta_lock, flags); 98 memset(priv->stations, 0, sizeof(priv->stations)); 99 priv->num_stations = 0; 100 spin_unlock_irqrestore(&priv->sta_lock, flags); 101 } 102
··· 97 spin_lock_irqsave(&priv->sta_lock, flags); 98 memset(priv->stations, 0, sizeof(priv->stations)); 99 priv->num_stations = 0; 100 + 101 + /* 102 + * Remove all key information that is not stored as part of station 103 + * information since mac80211 may not have had a 104 + * chance to remove all the keys. When device is reconfigured by 105 + * mac80211 after an error all keys will be reconfigured. 106 + */ 107 + priv->ucode_key_table = 0; 108 + priv->key_mapping_key = 0; 109 + memset(priv->wep_keys, 0, sizeof(priv->wep_keys)); 110 + 111 spin_unlock_irqrestore(&priv->sta_lock, flags); 112 } 113
+5 -5
drivers/net/wireless/rt2x00/rt2x00dev.c
··· 854 BIT(NL80211_IFTYPE_WDS); 855 856 /* 857 * Let the driver probe the device to detect the capabilities. 858 */ 859 retval = rt2x00dev->ops->lib->probe_hw(rt2x00dev); ··· 866 ERROR(rt2x00dev, "Failed to allocate device.\n"); 867 goto exit; 868 } 869 - 870 - /* 871 - * Initialize configuration work. 872 - */ 873 - INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); 874 875 /* 876 * Allocate queue array.
··· 854 BIT(NL80211_IFTYPE_WDS); 855 856 /* 857 + * Initialize configuration work. 858 + */ 859 + INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); 860 + 861 + /* 862 * Let the driver probe the device to detect the capabilities. 863 */ 864 retval = rt2x00dev->ops->lib->probe_hw(rt2x00dev); ··· 861 ERROR(rt2x00dev, "Failed to allocate device.\n"); 862 goto exit; 863 } 864 865 /* 866 * Allocate queue array.
+9 -4
drivers/vhost/net.c
··· 177 break; 178 } 179 if (err != len) 180 - pr_err("Truncated TX packet: " 181 - " len %d != %zd\n", err, len); 182 vhost_add_used_and_signal(&net->dev, vq, head, 0); 183 total_len += len; 184 if (unlikely(total_len >= VHOST_NET_WEIGHT)) { ··· 275 } 276 /* TODO: Should check and handle checksum. */ 277 if (err > len) { 278 - pr_err("Discarded truncated rx packet: " 279 - " len %d > %zd\n", err, len); 280 vhost_discard_vq_desc(vq); 281 continue; 282 } ··· 534 rcu_assign_pointer(vq->private_data, sock); 535 vhost_net_enable_vq(n, vq); 536 done: 537 if (oldsock) { 538 vhost_net_flush_vq(n, index); 539 fput(oldsock->file); 540 } 541 542 err_vq: 543 mutex_unlock(&vq->mutex);
··· 177 break; 178 } 179 if (err != len) 180 + pr_debug("Truncated TX packet: " 181 + " len %d != %zd\n", err, len); 182 vhost_add_used_and_signal(&net->dev, vq, head, 0); 183 total_len += len; 184 if (unlikely(total_len >= VHOST_NET_WEIGHT)) { ··· 275 } 276 /* TODO: Should check and handle checksum. */ 277 if (err > len) { 278 + pr_debug("Discarded truncated rx packet: " 279 + " len %d > %zd\n", err, len); 280 vhost_discard_vq_desc(vq); 281 continue; 282 } ··· 534 rcu_assign_pointer(vq->private_data, sock); 535 vhost_net_enable_vq(n, vq); 536 done: 537 + mutex_unlock(&vq->mutex); 538 + 539 if (oldsock) { 540 vhost_net_flush_vq(n, index); 541 fput(oldsock->file); 542 } 543 + 544 + mutex_unlock(&n->dev.mutex); 545 + return 0; 546 547 err_vq: 548 mutex_unlock(&vq->mutex);
+1 -6
include/net/sock.h
··· 1224 1225 static inline int sk_tx_queue_get(const struct sock *sk) 1226 { 1227 - return sk->sk_tx_queue_mapping; 1228 - } 1229 - 1230 - static inline bool sk_tx_queue_recorded(const struct sock *sk) 1231 - { 1232 - return (sk && sk->sk_tx_queue_mapping >= 0); 1233 } 1234 1235 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
··· 1224 1225 static inline int sk_tx_queue_get(const struct sock *sk) 1226 { 1227 + return sk ? sk->sk_tx_queue_mapping : -1; 1228 } 1229 1230 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
+5
net/bluetooth/hci_conn.c
··· 358 acl->sec_level = sec_level; 359 acl->auth_type = auth_type; 360 hci_acl_connect(acl); 361 } 362 363 if (type == ACL_LINK)
··· 358 acl->sec_level = sec_level; 359 acl->auth_type = auth_type; 360 hci_acl_connect(acl); 361 + } else { 362 + if (acl->sec_level < sec_level) 363 + acl->sec_level = sec_level; 364 + if (acl->auth_type < auth_type) 365 + acl->auth_type = auth_type; 366 } 367 368 if (type == ACL_LINK)
+2
net/bluetooth/hci_event.c
··· 1049 if (conn) { 1050 if (!ev->status) 1051 conn->link_mode |= HCI_LM_AUTH; 1052 1053 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1054
··· 1049 if (conn) { 1050 if (!ev->status) 1051 conn->link_mode |= HCI_LM_AUTH; 1052 + else 1053 + conn->sec_level = BT_SECURITY_LOW; 1054 1055 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1056
+11 -3
net/bluetooth/l2cap.c
··· 401 l2cap_send_sframe(pi, control); 402 } 403 404 static void l2cap_do_start(struct sock *sk) 405 { 406 struct l2cap_conn *conn = l2cap_pi(sk)->conn; ··· 414 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 415 return; 416 417 - if (l2cap_check_security(sk)) { 418 struct l2cap_conn_req req; 419 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 420 req.psm = l2cap_pi(sk)->psm; 421 422 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 423 424 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 425 L2CAP_CONN_REQ, sizeof(req), &req); ··· 470 } 471 472 if (sk->sk_state == BT_CONNECT) { 473 - if (l2cap_check_security(sk)) { 474 struct l2cap_conn_req req; 475 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 476 req.psm = l2cap_pi(sk)->psm; 477 478 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 479 480 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 481 L2CAP_CONN_REQ, sizeof(req), &req); ··· 2920 l2cap_pi(sk)->ident = 0; 2921 l2cap_pi(sk)->dcid = dcid; 2922 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 2923 - 2924 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; 2925 2926 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, ··· 4411 req.psm = l2cap_pi(sk)->psm; 4412 4413 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 4414 4415 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 4416 L2CAP_CONN_REQ, sizeof(req), &req);
··· 401 l2cap_send_sframe(pi, control); 402 } 403 404 + static inline int __l2cap_no_conn_pending(struct sock *sk) 405 + { 406 + return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND); 407 + } 408 + 409 static void l2cap_do_start(struct sock *sk) 410 { 411 struct l2cap_conn *conn = l2cap_pi(sk)->conn; ··· 409 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 410 return; 411 412 + if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) { 413 struct l2cap_conn_req req; 414 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 415 req.psm = l2cap_pi(sk)->psm; 416 417 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 418 + l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 419 420 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 421 L2CAP_CONN_REQ, sizeof(req), &req); ··· 464 } 465 466 if (sk->sk_state == BT_CONNECT) { 467 + if (l2cap_check_security(sk) && 468 + __l2cap_no_conn_pending(sk)) { 469 struct l2cap_conn_req req; 470 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 471 req.psm = l2cap_pi(sk)->psm; 472 473 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 474 + l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 475 476 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 477 L2CAP_CONN_REQ, sizeof(req), &req); ··· 2912 l2cap_pi(sk)->ident = 0; 2913 l2cap_pi(sk)->dcid = dcid; 2914 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 2915 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; 2916 2917 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, ··· 4404 req.psm = l2cap_pi(sk)->psm; 4405 4406 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 4407 + l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 4408 4409 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 4410 L2CAP_CONN_REQ, sizeof(req), &req);
-9
net/bridge/br_device.c
··· 217 return count != 0 && ret; 218 } 219 220 - static void br_poll_controller(struct net_device *br_dev) 221 - { 222 - struct netpoll *np = br_dev->npinfo->netpoll; 223 - 224 - if (np->real_dev != br_dev) 225 - netpoll_poll_dev(np->real_dev); 226 - } 227 - 228 void br_netpoll_cleanup(struct net_device *dev) 229 { 230 struct net_bridge *br = netdev_priv(dev); ··· 287 .ndo_do_ioctl = br_dev_ioctl, 288 #ifdef CONFIG_NET_POLL_CONTROLLER 289 .ndo_netpoll_cleanup = br_netpoll_cleanup, 290 - .ndo_poll_controller = br_poll_controller, 291 #endif 292 }; 293
··· 217 return count != 0 && ret; 218 } 219 220 void br_netpoll_cleanup(struct net_device *dev) 221 { 222 struct net_bridge *br = netdev_priv(dev); ··· 295 .ndo_do_ioctl = br_dev_ioctl, 296 #ifdef CONFIG_NET_POLL_CONTROLLER 297 .ndo_netpoll_cleanup = br_netpoll_cleanup, 298 #endif 299 }; 300
+1 -22
net/bridge/br_forward.c
··· 50 kfree_skb(skb); 51 else { 52 skb_push(skb, ETH_HLEN); 53 - 54 - #ifdef CONFIG_NET_POLL_CONTROLLER 55 - if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) { 56 - netpoll_send_skb(skb->dev->npinfo->netpoll, skb); 57 - skb->dev->priv_flags &= ~IFF_IN_NETPOLL; 58 - } else 59 - #endif 60 - dev_queue_xmit(skb); 61 } 62 } 63 ··· 66 67 static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) 68 { 69 - #ifdef CONFIG_NET_POLL_CONTROLLER 70 - struct net_bridge *br = to->br; 71 - if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) { 72 - struct netpoll *np; 73 - to->dev->npinfo = skb->dev->npinfo; 74 - np = skb->dev->npinfo->netpoll; 75 - np->real_dev = np->dev = to->dev; 76 - to->dev->priv_flags |= IFF_IN_NETPOLL; 77 - } 78 - #endif 79 skb->dev = to->dev; 80 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 81 br_forward_finish); 82 - #ifdef CONFIG_NET_POLL_CONTROLLER 83 - if (skb->dev->npinfo) 84 - skb->dev->npinfo->netpoll->dev = br->dev; 85 - #endif 86 } 87 88 static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
··· 50 kfree_skb(skb); 51 else { 52 skb_push(skb, ETH_HLEN); 53 + dev_queue_xmit(skb); 54 } 55 } 56 ··· 73 74 static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) 75 { 76 skb->dev = to->dev; 77 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 78 br_forward_finish); 79 } 80 81 static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
+13 -7
net/core/dev.c
··· 1911 */ 1912 static inline void skb_orphan_try(struct sk_buff *skb) 1913 { 1914 - if (!skb_tx(skb)->flags) 1915 skb_orphan(skb); 1916 } 1917 1918 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, ··· 2006 if (skb->sk && skb->sk->sk_hash) 2007 hash = skb->sk->sk_hash; 2008 else 2009 - hash = (__force u16) skb->protocol; 2010 - 2011 hash = jhash_1word(hash, hashrnd); 2012 2013 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); ··· 2029 static struct netdev_queue *dev_pick_tx(struct net_device *dev, 2030 struct sk_buff *skb) 2031 { 2032 - u16 queue_index; 2033 struct sock *sk = skb->sk; 2034 2035 - if (sk_tx_queue_recorded(sk)) { 2036 - queue_index = sk_tx_queue_get(sk); 2037 - } else { 2038 const struct net_device_ops *ops = dev->netdev_ops; 2039 2040 if (ops->ndo_select_queue) {
··· 1911 */ 1912 static inline void skb_orphan_try(struct sk_buff *skb) 1913 { 1914 + struct sock *sk = skb->sk; 1915 + 1916 + if (sk && !skb_tx(skb)->flags) { 1917 + /* skb_tx_hash() wont be able to get sk. 1918 + * We copy sk_hash into skb->rxhash 1919 + */ 1920 + if (!skb->rxhash) 1921 + skb->rxhash = sk->sk_hash; 1922 skb_orphan(skb); 1923 + } 1924 } 1925 1926 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, ··· 1998 if (skb->sk && skb->sk->sk_hash) 1999 hash = skb->sk->sk_hash; 2000 else 2001 + hash = (__force u16) skb->protocol ^ skb->rxhash; 2002 hash = jhash_1word(hash, hashrnd); 2003 2004 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); ··· 2022 static struct netdev_queue *dev_pick_tx(struct net_device *dev, 2023 struct sk_buff *skb) 2024 { 2025 + int queue_index; 2026 struct sock *sk = skb->sk; 2027 2028 + queue_index = sk_tx_queue_get(sk); 2029 + if (queue_index < 0) { 2030 const struct net_device_ops *ops = dev->netdev_ops; 2031 2032 if (ops->ndo_select_queue) {
+4 -1
net/core/neighbour.c
··· 949 { 950 struct hh_cache *hh; 951 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) 952 - = neigh->dev->header_ops->cache_update; 953 954 if (update) { 955 for (hh = neigh->hh; hh; hh = hh->hh_next) {
··· 949 { 950 struct hh_cache *hh; 951 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) 952 + = NULL; 953 + 954 + if (neigh->dev->header_ops) 955 + update = neigh->dev->header_ops->cache_update; 956 957 if (update) { 958 for (hh = neigh->hh; hh; hh = hh->hh_next) {
+1 -1
net/dsa/Kconfig
··· 1 menuconfig NET_DSA 2 bool "Distributed Switch Architecture support" 3 default n 4 - depends on EXPERIMENTAL && !S390 5 select PHYLIB 6 ---help--- 7 This allows you to use hardware switch chips that use
··· 1 menuconfig NET_DSA 2 bool "Distributed Switch Architecture support" 3 default n 4 + depends on EXPERIMENTAL && NET_ETHERNET && !S390 5 select PHYLIB 6 ---help--- 7 This allows you to use hardware switch chips that use
+6 -2
net/ipv4/ipmr.c
··· 442 int err; 443 444 err = ipmr_fib_lookup(net, &fl, &mrt); 445 - if (err < 0) 446 return err; 447 448 read_lock(&mrt_lock); 449 dev->stats.tx_bytes += skb->len; ··· 1730 goto dont_forward; 1731 1732 err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt); 1733 - if (err < 0) 1734 return err; 1735 1736 if (!local) { 1737 if (IPCB(skb)->opt.router_alert) {
··· 442 int err; 443 444 err = ipmr_fib_lookup(net, &fl, &mrt); 445 + if (err < 0) { 446 + kfree_skb(skb); 447 return err; 448 + } 449 450 read_lock(&mrt_lock); 451 dev->stats.tx_bytes += skb->len; ··· 1728 goto dont_forward; 1729 1730 err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt); 1731 + if (err < 0) { 1732 + kfree_skb(skb); 1733 return err; 1734 + } 1735 1736 if (!local) { 1737 if (IPCB(skb)->opt.router_alert) {
+1
net/ipv4/tcp.c
··· 608 ssize_t spliced; 609 int ret; 610 611 /* 612 * We can't seek on a socket input 613 */
··· 608 ssize_t spliced; 609 int ret; 610 611 + sock_rps_record_flow(sk); 612 /* 613 * We can't seek on a socket input 614 */
+3
net/ipv4/tcp_output.c
··· 2208 int mib_idx; 2209 int fwd_rexmitting = 0; 2210 2211 if (!tp->lost_out) 2212 tp->retransmit_high = tp->snd_una; 2213
··· 2208 int mib_idx; 2209 int fwd_rexmitting = 0; 2210 2211 + if (!tp->packets_out) 2212 + return; 2213 + 2214 if (!tp->lost_out) 2215 tp->retransmit_high = tp->snd_una; 2216
+2 -1
net/ipv6/mip6.c
··· 347 348 static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb) 349 { 350 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data; 351 int err = rt2->rt_hdr.nexthdr; 352 353 spin_lock(&x->lock); 354 - if (!ipv6_addr_equal(&rt2->addr, (struct in6_addr *)x->coaddr) && 355 !ipv6_addr_any((struct in6_addr *)x->coaddr)) 356 err = -ENOENT; 357 spin_unlock(&x->lock);
··· 347 348 static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb) 349 { 350 + struct ipv6hdr *iph = ipv6_hdr(skb); 351 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data; 352 int err = rt2->rt_hdr.nexthdr; 353 354 spin_lock(&x->lock); 355 + if (!ipv6_addr_equal(&iph->daddr, (struct in6_addr *)x->coaddr) && 356 !ipv6_addr_any((struct in6_addr *)x->coaddr)) 357 err = -ENOENT; 358 spin_unlock(&x->lock);
+1
net/phonet/pep.c
··· 698 newsk = NULL; 699 goto out; 700 } 701 702 sock_hold(sk); 703 pep_sk(newsk)->listener = sk;
··· 698 newsk = NULL; 699 goto out; 700 } 701 + kfree_skb(oskb); 702 703 sock_hold(sk); 704 pep_sk(newsk)->listener = sk;
+4 -1
net/sched/act_nat.c
··· 205 { 206 struct icmphdr *icmph; 207 208 - if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph))) 209 goto drop; 210 211 icmph = (void *)(skb_network_header(skb) + ihl); ··· 214 (icmph->type != ICMP_TIME_EXCEEDED) && 215 (icmph->type != ICMP_PARAMETERPROB)) 216 break; 217 218 iph = (void *)(icmph + 1); 219 if (egress)
··· 205 { 206 struct icmphdr *icmph; 207 208 + if (!pskb_may_pull(skb, ihl + sizeof(*icmph))) 209 goto drop; 210 211 icmph = (void *)(skb_network_header(skb) + ihl); ··· 214 (icmph->type != ICMP_TIME_EXCEEDED) && 215 (icmph->type != ICMP_PARAMETERPROB)) 216 break; 217 + 218 + if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph))) 219 + goto drop; 220 221 iph = (void *)(icmph + 1); 222 if (egress)
+13 -2
net/xfrm/xfrm_policy.c
··· 1594 1595 /* Try to instantiate a bundle */ 1596 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 1597 - if (err < 0) { 1598 - if (err != -EAGAIN) 1599 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1600 return ERR_PTR(err); 1601 } ··· 1676 goto error; 1677 if (oldflo == NULL) 1678 goto make_dummy_bundle; 1679 dst_hold(&xdst->u.dst); 1680 return oldflo; 1681 } ··· 1767 xfrm_pols_put(pols, num_pols); 1768 err = PTR_ERR(xdst); 1769 goto dropdst; 1770 } 1771 1772 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
··· 1594 1595 /* Try to instantiate a bundle */ 1596 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 1597 + if (err <= 0) { 1598 + if (err != 0 && err != -EAGAIN) 1599 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1600 return ERR_PTR(err); 1601 } ··· 1676 goto error; 1677 if (oldflo == NULL) 1678 goto make_dummy_bundle; 1679 + dst_hold(&xdst->u.dst); 1680 + return oldflo; 1681 + } else if (new_xdst == NULL) { 1682 + num_xfrms = 0; 1683 + if (oldflo == NULL) 1684 + goto make_dummy_bundle; 1685 + xdst->num_xfrms = 0; 1686 dst_hold(&xdst->u.dst); 1687 return oldflo; 1688 } ··· 1760 xfrm_pols_put(pols, num_pols); 1761 err = PTR_ERR(xdst); 1762 goto dropdst; 1763 + } else if (xdst == NULL) { 1764 + num_xfrms = 0; 1765 + drop_pols = num_pols; 1766 + goto no_transform; 1767 } 1768 1769 spin_lock_bh(&xfrm_policy_sk_bundle_lock);