Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

The dwmac-socfpga.c conflict was a case of a bug fix overlapping
changes in net-next to handle an error pointer differently.

Signed-off-by: David S. Miller <davem@davemloft.net>

+85 -37
+4 -5
Documentation/networking/rds.txt
··· 62 62 ================ 63 63 64 64 AF_RDS, PF_RDS, SOL_RDS 65 - These constants haven't been assigned yet, because RDS isn't in 66 - mainline yet. Currently, the kernel module assigns some constant 67 - and publishes it to user space through two sysctl files 68 - /proc/sys/net/rds/pf_rds 69 - /proc/sys/net/rds/sol_rds 65 + AF_RDS and PF_RDS are the domain type to be used with socket(2) 66 + to create RDS sockets. SOL_RDS is the socket-level to be used 67 + with setsockopt(2) and getsockopt(2) for RDS specific socket 68 + options. 70 69 71 70 fd = socket(PF_RDS, SOCK_SEQPACKET, 0); 72 71 This creates a new, unbound RDS socket.
+1
drivers/net/ethernet/emulex/benet/be.h
··· 99 99 #define BE_NAPI_WEIGHT 64 100 100 #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ 101 101 #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) 102 + #define MAX_NUM_POST_ERX_DB 255u 102 103 103 104 #define MAX_VFS 30 /* Max VFs supported by BE3 FW */ 104 105 #define FW_VER_LEN 32
+1 -1
drivers/net/ethernet/emulex/benet/be_main.c
··· 2122 2122 if (rxo->rx_post_starved) 2123 2123 rxo->rx_post_starved = false; 2124 2124 do { 2125 - notify = min(256u, posted); 2125 + notify = min(MAX_NUM_POST_ERX_DB, posted); 2126 2126 be_rxq_notify(adapter, rxq->id, notify); 2127 2127 posted -= notify; 2128 2128 } while (posted);
+2 -2
drivers/net/phy/Kconfig
··· 68 68 config BROADCOM_PHY 69 69 tristate "Drivers for Broadcom PHYs" 70 70 ---help--- 71 - Currently supports the BCM5411, BCM5421, BCM5461, BCM5464, BCM5481 72 - and BCM5482 PHYs. 71 + Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464, 72 + BCM5481 and BCM5482 PHYs. 73 73 74 74 config BCM63XX_PHY 75 75 tristate "Drivers for Broadcom 63xx SOCs internal PHY"
+14
drivers/net/phy/broadcom.c
··· 549 549 .config_intr = bcm54xx_config_intr, 550 550 .driver = { .owner = THIS_MODULE }, 551 551 }, { 552 + .phy_id = PHY_ID_BCM54616S, 553 + .phy_id_mask = 0xfffffff0, 554 + .name = "Broadcom BCM54616S", 555 + .features = PHY_GBIT_FEATURES | 556 + SUPPORTED_Pause | SUPPORTED_Asym_Pause, 557 + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 558 + .config_init = bcm54xx_config_init, 559 + .config_aneg = genphy_config_aneg, 560 + .read_status = genphy_read_status, 561 + .ack_interrupt = bcm54xx_ack_interrupt, 562 + .config_intr = bcm54xx_config_intr, 563 + .driver = { .owner = THIS_MODULE }, 564 + }, { 552 565 .phy_id = PHY_ID_BCM5464, 553 566 .phy_id_mask = 0xfffffff0, 554 567 .name = "Broadcom BCM5464", ··· 673 660 { PHY_ID_BCM5411, 0xfffffff0 }, 674 661 { PHY_ID_BCM5421, 0xfffffff0 }, 675 662 { PHY_ID_BCM5461, 0xfffffff0 }, 663 + { PHY_ID_BCM54616S, 0xfffffff0 }, 676 664 { PHY_ID_BCM5464, 0xfffffff0 }, 677 665 { PHY_ID_BCM5482, 0xfffffff0 }, 678 666 { PHY_ID_BCM5482, 0xfffffff0 },
+2 -2
drivers/net/usb/usbnet.c
··· 1072 1072 * especially now that control transfers can be queued. 1073 1073 */ 1074 1074 static void 1075 - kevent (struct work_struct *work) 1075 + usbnet_deferred_kevent (struct work_struct *work) 1076 1076 { 1077 1077 struct usbnet *dev = 1078 1078 container_of(work, struct usbnet, kevent); ··· 1626 1626 skb_queue_head_init(&dev->rxq_pause); 1627 1627 dev->bh.func = usbnet_bh; 1628 1628 dev->bh.data = (unsigned long) dev; 1629 - INIT_WORK (&dev->kevent, kevent); 1629 + INIT_WORK (&dev->kevent, usbnet_deferred_kevent); 1630 1630 init_usb_anchor(&dev->deferred); 1631 1631 dev->delay.function = usbnet_bh; 1632 1632 dev->delay.data = (unsigned long) dev;
+10 -10
drivers/net/vxlan.c
··· 1699 1699 } 1700 1700 } 1701 1701 1702 - skb = iptunnel_handle_offloads(skb, udp_sum, type); 1703 - if (IS_ERR(skb)) { 1704 - err = -EINVAL; 1705 - goto err; 1706 - } 1707 - 1708 1702 skb_scrub_packet(skb, xnet); 1709 1703 1710 1704 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len ··· 1715 1721 skb = vlan_hwaccel_push_inside(skb); 1716 1722 if (WARN_ON(!skb)) { 1717 1723 err = -ENOMEM; 1724 + goto err; 1725 + } 1726 + 1727 + skb = iptunnel_handle_offloads(skb, udp_sum, type); 1728 + if (IS_ERR(skb)) { 1729 + err = -EINVAL; 1718 1730 goto err; 1719 1731 } 1720 1732 ··· 1784 1784 } 1785 1785 } 1786 1786 1787 - skb = iptunnel_handle_offloads(skb, udp_sum, type); 1788 - if (IS_ERR(skb)) 1789 - return PTR_ERR(skb); 1790 - 1791 1787 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 1792 1788 + VXLAN_HLEN + sizeof(struct iphdr) 1793 1789 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); ··· 1798 1802 skb = vlan_hwaccel_push_inside(skb); 1799 1803 if (WARN_ON(!skb)) 1800 1804 return -ENOMEM; 1805 + 1806 + skb = iptunnel_handle_offloads(skb, udp_sum, type); 1807 + if (IS_ERR(skb)) 1808 + return PTR_ERR(skb); 1801 1809 1802 1810 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1803 1811 vxh->vx_flags = htonl(VXLAN_HF_VNI);
+1
include/linux/brcmphy.h
··· 11 11 #define PHY_ID_BCM5421 0x002060e0 12 12 #define PHY_ID_BCM5464 0x002060b0 13 13 #define PHY_ID_BCM5461 0x002060c0 14 + #define PHY_ID_BCM54616S 0x03625d10 14 15 #define PHY_ID_BCM57780 0x03625d90 15 16 16 17 #define PHY_ID_BCM7250 0xae025280
+2 -2
net/ipv4/fou.c
··· 886 886 887 887 #ifdef CONFIG_NET_FOU_IP_TUNNELS 888 888 889 - static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = { 889 + static const struct ip_tunnel_encap_ops fou_iptun_ops = { 890 890 .encap_hlen = fou_encap_hlen, 891 891 .build_header = fou_build_header, 892 892 }; 893 893 894 - static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = { 894 + static const struct ip_tunnel_encap_ops gue_iptun_ops = { 895 895 .encap_hlen = gue_encap_hlen, 896 896 .build_header = gue_build_header, 897 897 };
+4 -4
net/ipv4/geneve.c
··· 113 113 int min_headroom; 114 114 int err; 115 115 116 - skb = udp_tunnel_handle_offloads(skb, csum); 117 - if (IS_ERR(skb)) 118 - return PTR_ERR(skb); 119 - 120 116 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 121 117 + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr) 122 118 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); ··· 126 130 skb = vlan_hwaccel_push_inside(skb); 127 131 if (unlikely(!skb)) 128 132 return -ENOMEM; 133 + 134 + skb = udp_tunnel_handle_offloads(skb, csum); 135 + if (IS_ERR(skb)) 136 + return PTR_ERR(skb); 129 137 130 138 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); 131 139 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
+2
net/ipv4/tcp_output.c
··· 2994 2994 rcu_read_unlock(); 2995 2995 #endif 2996 2996 2997 + /* Do not fool tcpdump (if any), clean our debris */ 2998 + skb->tstamp.tv64 = 0; 2997 2999 return skb; 2998 3000 } 2999 3001 EXPORT_SYMBOL(tcp_make_synack);
+1 -2
net/ipv6/ip6_vti.c
··· 288 288 static void vti6_dev_uninit(struct net_device *dev) 289 289 { 290 290 struct ip6_tnl *t = netdev_priv(dev); 291 - struct net *net = dev_net(dev); 292 - struct vti6_net *ip6n = net_generic(net, vti6_net_id); 291 + struct vti6_net *ip6n = net_generic(t->net, vti6_net_id); 293 292 294 293 if (dev == ip6n->fb_tnl_dev) 295 294 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
+2 -1
net/rds/connection.c
··· 130 130 rcu_read_lock(); 131 131 conn = rds_conn_lookup(head, laddr, faddr, trans); 132 132 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && 133 - !is_outgoing) { 133 + laddr == faddr && !is_outgoing) { 134 134 /* This is a looped back IB connection, and we're 135 135 * called by the code handling the incoming connect. 136 136 * We need a second connection object into which we ··· 193 193 } 194 194 195 195 atomic_set(&conn->c_state, RDS_CONN_DOWN); 196 + conn->c_send_gen = 0; 196 197 conn->c_reconnect_jiffies = 0; 197 198 INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker); 198 199 INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
+1
net/rds/rds.h
··· 110 110 void *c_transport_data; 111 111 112 112 atomic_t c_state; 113 + unsigned long c_send_gen; 113 114 unsigned long c_flags; 114 115 unsigned long c_reconnect_jiffies; 115 116 struct delayed_work c_send_w;
+31 -2
net/rds/send.c
··· 140 140 struct scatterlist *sg; 141 141 int ret = 0; 142 142 LIST_HEAD(to_be_dropped); 143 + int batch_count; 144 + unsigned long send_gen = 0; 143 145 144 146 restart: 147 + batch_count = 0; 145 148 146 149 /* 147 150 * sendmsg calls here after having queued its message on the send ··· 158 155 ret = -ENOMEM; 159 156 goto out; 160 157 } 158 + 159 + /* 160 + * we record the send generation after doing the xmit acquire. 161 + * if someone else manages to jump in and do some work, we'll use 162 + * this to avoid a goto restart farther down. 163 + * 164 + * The acquire_in_xmit() check above ensures that only one 165 + * caller can increment c_send_gen at any time. 166 + */ 167 + conn->c_send_gen++; 168 + send_gen = conn->c_send_gen; 161 169 162 170 /* 163 171 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT, ··· 215 201 */ 216 202 if (!rm) { 217 203 unsigned int len; 204 + 205 + batch_count++; 206 + 207 + /* we want to process as big a batch as we can, but 208 + * we also want to avoid softlockups. If we've been 209 + * through a lot of messages, lets back off and see 210 + * if anyone else jumps in 211 + */ 212 + if (batch_count >= 1024) 213 + goto over_batch; 218 214 219 215 spin_lock_irqsave(&conn->c_lock, flags); 220 216 ··· 381 357 } 382 358 } 383 359 360 + over_batch: 384 361 if (conn->c_trans->xmit_complete) 385 362 conn->c_trans->xmit_complete(conn); 386 - 387 363 release_in_xmit(conn); 388 364 389 365 /* Nuke any messages we decided not to retransmit. */ ··· 404 380 * If the transport cannot continue (i.e ret != 0), then it must 405 381 * call us when more room is available, such as from the tx 406 382 * completion handler. 383 + * 384 + * We have an extra generation check here so that if someone manages 385 + * to jump in after our release_in_xmit, we'll see that they have done 386 + * some work and we will skip our goto 407 387 */ 408 388 if (ret == 0) { 409 389 smp_mb(); 410 - if (!list_empty(&conn->c_send_queue)) { 390 + if (!list_empty(&conn->c_send_queue) && 391 + send_gen == conn->c_send_gen) { 411 392 rds_stats_inc(s_send_lock_queue_raced); 412 393 goto restart; 413 394 }
+2 -1
net/sched/sch_netem.c
··· 560 560 tfifo_dequeue: 561 561 skb = __skb_dequeue(&sch->q); 562 562 if (skb) { 563 - deliver: 564 563 qdisc_qstats_backlog_dec(sch, skb); 564 + deliver: 565 565 qdisc_unthrottled(sch); 566 566 qdisc_bstats_update(sch, skb); 567 567 return skb; ··· 578 578 rb_erase(p, &q->t_root); 579 579 580 580 sch->q.qlen--; 581 + qdisc_qstats_backlog_dec(sch, skb); 581 582 skb->next = NULL; 582 583 skb->prev = NULL; 583 584 skb->tstamp = netem_skb_cb(skb)->tstamp_save;
+5 -5
net/xfrm/xfrm_input.c
··· 238 238 239 239 skb->sp->xvec[skb->sp->len++] = x; 240 240 241 - if (xfrm_tunnel_check(skb, x, family)) { 242 - XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); 243 - goto drop; 244 - } 245 - 246 241 spin_lock(&x->lock); 247 242 if (unlikely(x->km.state == XFRM_STATE_ACQ)) { 248 243 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); ··· 265 270 } 266 271 267 272 spin_unlock(&x->lock); 273 + 274 + if (xfrm_tunnel_check(skb, x, family)) { 275 + XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); 276 + goto drop; 277 + } 268 278 269 279 seq_hi = htonl(xfrm_replay_seqhi(x, seq)); 270 280