Merge git://github.com/davem330/net

* git://github.com/davem330/net:
net: fix typos in Documentation/networking/scaling.txt
bridge: leave carrier on for empty bridge
netfilter: Use proper rwlock init function
tcp: properly update lost_cnt_hint during shifting
tcp: properly handle md5sig_pool references
macvlan/macvtap: Fix unicast between macvtap interfaces in bridge mode

+20 -20
+5 -5
Documentation/networking/scaling.txt
··· 27 27 of logical flows. Packets for each flow are steered to a separate receive 28 28 queue, which in turn can be processed by separate CPUs. This mechanism is 29 29 generally known as “Receive-side Scaling” (RSS). The goal of RSS and 30 - the other scaling techniques to increase performance uniformly. 30 + the other scaling techniques is to increase performance uniformly. 31 31 Multi-queue distribution can also be used for traffic prioritization, but 32 32 that is not the focus of these techniques. 33 33 ··· 186 186 same CPU. Indeed, with many flows and few CPUs, it is very likely that 187 187 a single application thread handles flows with many different flow hashes. 188 188 189 - rps_sock_table is a global flow table that contains the *desired* CPU for 190 - flows: the CPU that is currently processing the flow in userspace. Each 191 - table value is a CPU index that is updated during calls to recvmsg and 192 - sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage() 189 + rps_sock_flow_table is a global flow table that contains the *desired* CPU 190 + for flows: the CPU that is currently processing the flow in userspace. 191 + Each table value is a CPU index that is updated during calls to recvmsg 192 + and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage() 193 193 and tcp_splice_read()). 194 194 195 195 When the scheduler moves a thread to a new CPU while it has outstanding
+1 -1
drivers/net/macvlan.c
··· 239 239 dest = macvlan_hash_lookup(port, eth->h_dest); 240 240 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { 241 241 /* send to lowerdev first for its network taps */ 242 - vlan->forward(vlan->lowerdev, skb); 242 + dev_forward_skb(vlan->lowerdev, skb); 243 243 244 244 return NET_XMIT_SUCCESS; 245 245 }
-3
net/bridge/br_device.c
··· 91 91 { 92 92 struct net_bridge *br = netdev_priv(dev); 93 93 94 - netif_carrier_off(dev); 95 94 netdev_update_features(dev); 96 95 netif_start_queue(dev); 97 96 br_stp_enable_bridge(br); ··· 106 107 static int br_dev_stop(struct net_device *dev) 107 108 { 108 109 struct net_bridge *br = netdev_priv(dev); 109 - 110 - netif_carrier_off(dev); 111 110 112 111 br_stp_disable_bridge(br); 113 112 br_multicast_stop(br);
+1 -3
net/ipv4/tcp_input.c
··· 1389 1389 1390 1390 BUG_ON(!pcount); 1391 1391 1392 - /* Tweak before seqno plays */ 1393 - if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint && 1394 - !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq)) 1392 + if (skb == tp->lost_skb_hint) 1395 1393 tp->lost_cnt_hint += pcount; 1396 1394 1397 1395 TCP_SKB_CB(prev)->end_seq += shifted;
+7 -4
net/ipv4/tcp_ipv4.c
··· 927 927 } 928 928 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 929 929 } 930 - if (tcp_alloc_md5sig_pool(sk) == NULL) { 930 + 931 + md5sig = tp->md5sig_info; 932 + if (md5sig->entries4 == 0 && 933 + tcp_alloc_md5sig_pool(sk) == NULL) { 931 934 kfree(newkey); 932 935 return -ENOMEM; 933 936 } 934 - md5sig = tp->md5sig_info; 935 937 936 938 if (md5sig->alloced4 == md5sig->entries4) { 937 939 keys = kmalloc((sizeof(*keys) * 938 940 (md5sig->entries4 + 1)), GFP_ATOMIC); 939 941 if (!keys) { 940 942 kfree(newkey); 941 - tcp_free_md5sig_pool(); 943 + if (md5sig->entries4 == 0) 944 + tcp_free_md5sig_pool(); 942 945 return -ENOMEM; 943 946 } 944 947 ··· 985 982 kfree(tp->md5sig_info->keys4); 986 983 tp->md5sig_info->keys4 = NULL; 987 984 tp->md5sig_info->alloced4 = 0; 985 + tcp_free_md5sig_pool(); 988 986 } else if (tp->md5sig_info->entries4 != i) { 989 987 /* Need to do some manipulation */ 990 988 memmove(&tp->md5sig_info->keys4[i], ··· 993 989 (tp->md5sig_info->entries4 - i) * 994 990 sizeof(struct tcp4_md5sig_key)); 995 991 } 996 - tcp_free_md5sig_pool(); 997 992 return 0; 998 993 } 999 994 }
+5 -3
net/ipv6/tcp_ipv6.c
··· 591 591 } 592 592 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 593 593 } 594 - if (tcp_alloc_md5sig_pool(sk) == NULL) { 594 + if (tp->md5sig_info->entries6 == 0 && 595 + tcp_alloc_md5sig_pool(sk) == NULL) { 595 596 kfree(newkey); 596 597 return -ENOMEM; 597 598 } ··· 601 600 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); 602 601 603 602 if (!keys) { 604 - tcp_free_md5sig_pool(); 605 603 kfree(newkey); 604 + if (tp->md5sig_info->entries6 == 0) 605 + tcp_free_md5sig_pool(); 606 606 return -ENOMEM; 607 607 } 608 608 ··· 649 647 kfree(tp->md5sig_info->keys6); 650 648 tp->md5sig_info->keys6 = NULL; 651 649 tp->md5sig_info->alloced6 = 0; 650 + tcp_free_md5sig_pool(); 652 651 } else { 653 652 /* shrink the database */ 654 653 if (tp->md5sig_info->entries6 != i) ··· 658 655 (tp->md5sig_info->entries6 - i) 659 656 * sizeof (tp->md5sig_info->keys6[0])); 660 657 } 661 - tcp_free_md5sig_pool(); 662 658 return 0; 663 659 } 664 660 }
+1 -1
net/netfilter/ipvs/ip_vs_ctl.c
··· 3679 3679 int idx; 3680 3680 struct netns_ipvs *ipvs = net_ipvs(net); 3681 3681 3682 - ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock); 3682 + rwlock_init(&ipvs->rs_lock); 3683 3683 3684 3684 /* Initialize rs_table */ 3685 3685 for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)