Merge git://github.com/davem330/net

* git://github.com/davem330/net:
pch_gbe: Fixed the issue on which a network freezes
pch_gbe: Fixed the issue on which PC was frozen when link was downed.
make PACKET_STATISTICS getsockopt report consistently between ring and non-ring
net: xen-netback: correctly restart Tx after a VM restore/migrate
bonding: properly stop queuing work when requested
can bcm: fix incomplete tx_setup fix
RDSRDMA: Fix cleanup of rds_iw_mr_pool
net: Documentation: Fix type of variables
ibmveth: Fix oops on request_irq failure
ipv6: nullify ipv6_ac_list and ipv6_fl_list when creating new socket
cxgb4: Fix EEH on IBM P7IOC
can bcm: fix tx_setup off-by-one errors
MAINTAINERS: tehuti: Alexander Indenbaum's address bounces
dp83640: reduce driver noise
ptp: fix L2 event message recognition

Changed files
+100 -82
Documentation
networking
drivers
include
net
can
ipv6
packet
rds
+2 -2
Documentation/networking/ip-sysctl.txt
··· 1042 1042 The functional behaviour for certain settings is different 1043 1043 depending on whether local forwarding is enabled or not. 1044 1044 1045 - accept_ra - BOOLEAN 1045 + accept_ra - INTEGER 1046 1046 Accept Router Advertisements; autoconfigure using them. 1047 1047 1048 1048 Possible values are: ··· 1106 1106 The amount of Duplicate Address Detection probes to send. 1107 1107 Default: 1 1108 1108 1109 - forwarding - BOOLEAN 1109 + forwarding - INTEGER 1110 1110 Configure interface-specific Host/Router behaviour. 1111 1111 1112 1112 Note: It is recommended to have the same setting on all
-1
MAINTAINERS
··· 6374 6374 F: arch/arm/mach-tegra 6375 6375 6376 6376 TEHUTI ETHERNET DRIVER 6377 - M: Alexander Indenbaum <baum@tehutinetworks.net> 6378 6377 M: Andy Gospodarek <andy@greyhouse.net> 6379 6378 L: netdev@vger.kernel.org 6380 6379 S: Supported
+2 -1
drivers/net/bonding/bond_3ad.c
··· 2168 2168 } 2169 2169 2170 2170 re_arm: 2171 - queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); 2171 + if (!bond->kill_timers) 2172 + queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); 2172 2173 out: 2173 2174 read_unlock(&bond->lock); 2174 2175 }
+2 -1
drivers/net/bonding/bond_alb.c
··· 1440 1440 } 1441 1441 1442 1442 re_arm: 1443 - queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); 1443 + if (!bond->kill_timers) 1444 + queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); 1444 1445 out: 1445 1446 read_unlock(&bond->lock); 1446 1447 }
+8 -5
drivers/net/bonding/bond_main.c
··· 777 777 778 778 read_lock(&bond->lock); 779 779 780 + if (bond->kill_timers) 781 + goto out; 782 + 780 783 /* rejoin all groups on bond device */ 781 784 __bond_resend_igmp_join_requests(bond->dev); 782 785 ··· 793 790 __bond_resend_igmp_join_requests(vlan_dev); 794 791 } 795 792 796 - if (--bond->igmp_retrans > 0) 793 + if ((--bond->igmp_retrans > 0) && !bond->kill_timers) 797 794 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); 798 - 795 + out: 799 796 read_unlock(&bond->lock); 800 797 } 801 798 ··· 2541 2538 } 2542 2539 2543 2540 re_arm: 2544 - if (bond->params.miimon) 2541 + if (bond->params.miimon && !bond->kill_timers) 2545 2542 queue_delayed_work(bond->wq, &bond->mii_work, 2546 2543 msecs_to_jiffies(bond->params.miimon)); 2547 2544 out: ··· 2889 2886 } 2890 2887 2891 2888 re_arm: 2892 - if (bond->params.arp_interval) 2889 + if (bond->params.arp_interval && !bond->kill_timers) 2893 2890 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 2894 2891 out: 2895 2892 read_unlock(&bond->lock); ··· 3157 3154 bond_ab_arp_probe(bond); 3158 3155 3159 3156 re_arm: 3160 - if (bond->params.arp_interval) 3157 + if (bond->params.arp_interval && !bond->kill_timers) 3161 3158 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 3162 3159 out: 3163 3160 read_unlock(&bond->lock);
+3
drivers/net/cxgb4/cxgb4_main.c
··· 3712 3712 setup_debugfs(adapter); 3713 3713 } 3714 3714 3715 + /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 3716 + pdev->needs_freset = 1; 3717 + 3715 3718 if (is_offload(adapter)) 3716 3719 attach_ulds(adapter); 3717 3720
+2 -2
drivers/net/ibmveth.c
··· 636 636 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", 637 637 netdev->irq, rc); 638 638 do { 639 - rc = h_free_logical_lan(adapter->vdev->unit_address); 640 - } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); 639 + lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 640 + } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); 641 641 642 642 goto err_out; 643 643 }
+27 -29
drivers/net/pch_gbe/pch_gbe_main.c
··· 1199 1199 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), 1200 1200 &hw->reg->INT_EN); 1201 1201 pch_gbe_stop_receive(adapter); 1202 + int_st |= ioread32(&hw->reg->INT_ST); 1203 + int_st = int_st & ioread32(&hw->reg->INT_EN); 1202 1204 } 1203 1205 if (int_st & PCH_GBE_INT_RX_DMA_ERR) 1204 1206 adapter->stats.intr_rx_dma_err_count++; ··· 1220 1218 /* Set Pause packet */ 1221 1219 pch_gbe_mac_set_pause_packet(hw); 1222 1220 } 1223 - if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) 1224 - == 0) { 1225 - return IRQ_HANDLED; 1226 - } 1227 1221 } 1228 1222 1229 1223 /* When request status is Receive interruption */ 1230 - if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) { 1224 + if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) || 1225 + (adapter->rx_stop_flag == true)) { 1231 1226 if (likely(napi_schedule_prep(&adapter->napi))) { 1232 1227 /* Enable only Rx Descriptor empty */ 1233 1228 atomic_inc(&adapter->irq_sem); ··· 1384 1385 struct sk_buff *skb; 1385 1386 unsigned int i; 1386 1387 unsigned int cleaned_count = 0; 1387 - bool cleaned = false; 1388 + bool cleaned = true; 1388 1389 1389 1390 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); 1390 1391 ··· 1395 1396 1396 1397 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { 1397 1398 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); 1398 - cleaned = true; 1399 1399 buffer_info = &tx_ring->buffer_info[i]; 1400 1400 skb = buffer_info->skb; 1401 1401 ··· 1437 1439 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); 1438 1440 1439 1441 /* weight of a sort for tx, to avoid endless transmit cleanup */ 1440 - if (cleaned_count++ == PCH_GBE_TX_WEIGHT) 1442 + if (cleaned_count++ == PCH_GBE_TX_WEIGHT) { 1443 + cleaned = false; 1441 1444 break; 1445 + } 1442 1446 } 1443 1447 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", 1444 1448 cleaned_count); ··· 2168 2168 { 2169 2169 struct pch_gbe_adapter *adapter = 2170 2170 container_of(napi, struct pch_gbe_adapter, napi); 2171 - struct net_device *netdev = adapter->netdev; 2172 2171 int work_done = 0; 2173 2172 bool poll_end_flag = false; 2174 2173 bool cleaned = false; ··· 2175 2176 2176 2177 pr_debug("budget : %d\n", budget); 2177 2178 2178 - /* Keep link state information with original netdev */ 2179 - if (!netif_carrier_ok(netdev)) { 2179 + pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); 2180 + cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); 2181 + 2182 + if (!cleaned) 2183 + work_done = budget; 2184 + /* If no Tx and not enough Rx work done, 2185 + * exit the polling mode 2186 + */ 2187 + if (work_done < budget) 2180 2188 poll_end_flag = true; 2181 - } else { 2182 - pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); 2189 + 2190 + if (poll_end_flag) { 2191 + napi_complete(napi); 2192 + if (adapter->rx_stop_flag) { 2193 + adapter->rx_stop_flag = false; 2194 + pch_gbe_start_receive(&adapter->hw); 2195 + } 2196 + pch_gbe_irq_enable(adapter); 2197 + } else 2183 2198 if (adapter->rx_stop_flag) { 2184 2199 adapter->rx_stop_flag = false; 2185 2200 pch_gbe_start_receive(&adapter->hw); 2186 2201 int_en = ioread32(&adapter->hw.reg->INT_EN); 2187 2202 iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), 2188 - &adapter->hw.reg->INT_EN); 2203 + &adapter->hw.reg->INT_EN); 2189 2204 } 2190 - cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); 2191 - 2192 - if (cleaned) 2193 - work_done = budget; 2194 - /* If no Tx and not enough Rx work done, 2195 - * exit the polling mode 2196 - */ 2197 - if ((work_done < budget) || !netif_running(netdev)) 2198 - poll_end_flag = true; 2199 - } 2200 - 2201 - if (poll_end_flag) { 2202 - napi_complete(napi); 2203 - pch_gbe_irq_enable(adapter); 2204 - } 2205 2205 2206 2206 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", 2207 2207 poll_end_flag, work_done, budget);
+2 -2
drivers/net/phy/dp83640.c
··· 589 589 prune_rx_ts(dp83640); 590 590 591 591 if (list_empty(&dp83640->rxpool)) { 592 - pr_warning("dp83640: rx timestamp pool is empty\n"); 592 + pr_debug("dp83640: rx timestamp pool is empty\n"); 593 593 goto out; 594 594 } 595 595 rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); ··· 612 612 skb = skb_dequeue(&dp83640->tx_queue); 613 613 614 614 if (!skb) { 615 - pr_warning("dp83640: have timestamp but tx_queue empty\n"); 615 + pr_debug("dp83640: have timestamp but tx_queue empty\n"); 616 616 return; 617 617 } 618 618 ns = phy2txts(phy_txts);
+2 -2
drivers/net/xen-netback/interface.c
··· 327 327 xenvif_get(vif); 328 328 329 329 rtnl_lock(); 330 - if (netif_running(vif->dev)) 331 - xenvif_up(vif); 332 330 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 333 331 dev_set_mtu(vif->dev, ETH_DATA_LEN); 334 332 netdev_update_features(vif->dev); 335 333 netif_carrier_on(vif->dev); 334 + if (netif_running(vif->dev)) 335 + xenvif_up(vif); 336 336 rtnl_unlock(); 337 337 338 338 return 0;
+10 -3
include/linux/ptp_classify.h
··· 51 51 #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) 52 52 53 53 #define PTP_EV_PORT 319 54 + #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ 54 55 55 56 #define OFF_ETYPE 12 56 57 #define OFF_IHL 14 ··· 117 116 {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \ 118 117 {OP_RETA, 0, 0, 0 }, /* */ \ 119 118 /*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ 120 - /*L40*/ {OP_JEQ, 0, 6, ETH_P_8021Q }, /* f goto L50 */ \ 119 + /*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \ 121 120 {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \ 122 - {OP_JEQ, 0, 9, ETH_P_1588 }, /* f goto L60 */ \ 121 + {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \ 122 + {OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ 123 + {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \ 124 + {OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \ 123 125 {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ 124 126 {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ 125 127 {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \ 126 128 {OP_RETA, 0, 0, 0 }, /* */ \ 127 - /*L50*/ {OP_JEQ, 0, 4, ETH_P_1588 }, /* f goto L61 */ \ 129 + /*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \ 130 + {OP_LDB, 0, 0, ETH_HLEN }, /* */ \ 131 + {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \ 132 + {OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \ 128 133 {OP_LDH, 0, 0, ETH_HLEN }, /* */ \ 129 134 {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ 130 135 {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \
+24 -29
net/can/bcm.c
··· 344 344 } 345 345 } 346 346 347 + static void bcm_tx_start_timer(struct bcm_op *op) 348 + { 349 + if (op->kt_ival1.tv64 && op->count) 350 + hrtimer_start(&op->timer, 351 + ktime_add(ktime_get(), op->kt_ival1), 352 + HRTIMER_MODE_ABS); 353 + else if (op->kt_ival2.tv64) 354 + hrtimer_start(&op->timer, 355 + ktime_add(ktime_get(), op->kt_ival2), 356 + HRTIMER_MODE_ABS); 357 + } 358 + 347 359 static void bcm_tx_timeout_tsklet(unsigned long data) 348 360 { 349 361 struct bcm_op *op = (struct bcm_op *)data; ··· 377 365 378 366 bcm_send_to_user(op, &msg_head, NULL, 0); 379 367 } 380 - } 381 - 382 - if (op->kt_ival1.tv64 && (op->count > 0)) { 383 - 384 - /* send (next) frame */ 385 368 bcm_can_tx(op); 386 - hrtimer_start(&op->timer, 387 - ktime_add(ktime_get(), op->kt_ival1), 388 - HRTIMER_MODE_ABS); 389 369 390 - } else { 391 - if (op->kt_ival2.tv64) { 370 + } else if (op->kt_ival2.tv64) 371 + bcm_can_tx(op); 392 372 393 - /* send (next) frame */ 394 - bcm_can_tx(op); 395 - hrtimer_start(&op->timer, 396 - ktime_add(ktime_get(), op->kt_ival2), 397 - HRTIMER_MODE_ABS); 398 - } 399 - } 373 + bcm_tx_start_timer(op); 400 374 } 401 375 402 376 /* ··· 962 964 hrtimer_cancel(&op->timer); 963 965 } 964 966 965 - if ((op->flags & STARTTIMER) && 966 - ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) { 967 - 967 + if (op->flags & STARTTIMER) { 968 + hrtimer_cancel(&op->timer); 968 969 /* spec: send can_frame when starting timer */ 969 970 op->flags |= TX_ANNOUNCE; 970 - 971 - if (op->kt_ival1.tv64 && (op->count > 0)) { 972 - /* op->count-- is done in bcm_tx_timeout_handler */ 973 - hrtimer_start(&op->timer, op->kt_ival1, 974 - HRTIMER_MODE_REL); 975 - } else 976 - hrtimer_start(&op->timer, op->kt_ival2, 977 - HRTIMER_MODE_REL); 978 971 } 979 972 980 - if (op->flags & TX_ANNOUNCE) 973 + if (op->flags & TX_ANNOUNCE) { 981 974 bcm_can_tx(op); 975 + if (op->count) 976 + op->count--; 977 + } 978 + 979 + if (op->flags & STARTTIMER) 980 + bcm_tx_start_timer(op); 982 981 983 982 return msg_head->nframes * CFSIZ + MHSIZ; 984 983 }
+3
net/ipv6/tcp_ipv6.c
··· 1383 1383 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1384 1384 #endif 1385 1385 1386 + newnp->ipv6_ac_list = NULL; 1387 + newnp->ipv6_fl_list = NULL; 1386 1388 newnp->pktoptions = NULL; 1387 1389 newnp->opt = NULL; 1388 1390 newnp->mcast_oif = inet6_iif(skb); ··· 1449 1447 First: no IPv4 options. 1450 1448 */ 1451 1449 newinet->inet_opt = NULL; 1450 + newnp->ipv6_ac_list = NULL; 1452 1451 newnp->ipv6_fl_list = NULL; 1453 1452 1454 1453 /* Clone RX bits */
+4 -1
net/packet/af_packet.c
··· 961 961 return 0; 962 962 963 963 drop_n_acct: 964 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); 964 + spin_lock(&sk->sk_receive_queue.lock); 965 + po->stats.tp_drops++; 966 + atomic_inc(&sk->sk_drops); 967 + spin_unlock(&sk->sk_receive_queue.lock); 965 968 966 969 drop_n_restore: 967 970 if (skb_head != skb->data && skb_shared(skb)) {
+9 -4
net/rds/iw_rdma.c
··· 84 84 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 85 85 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, 86 86 struct list_head *unmap_list, 87 - struct list_head *kill_list); 87 + struct list_head *kill_list, 88 + int *unpinned); 88 89 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 89 90 90 91 static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) ··· 500 499 LIST_HEAD(unmap_list); 501 500 LIST_HEAD(kill_list); 502 501 unsigned long flags; 503 - unsigned int nfreed = 0, ncleaned = 0, free_goal; 502 + unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal; 504 503 int ret = 0; 505 504 506 505 rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); ··· 525 524 * will be destroyed by the unmap function. 526 525 */ 527 526 if (!list_empty(&unmap_list)) { 528 - ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list); 527 + ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, 528 + &kill_list, &unpinned); 529 529 /* If we've been asked to destroy all MRs, move those 530 530 * that were simply cleaned to the kill list */ 531 531 if (free_all) ··· 550 548 spin_unlock_irqrestore(&pool->list_lock, flags); 551 549 } 552 550 551 + atomic_sub(unpinned, &pool->free_pinned); 553 552 atomic_sub(ncleaned, &pool->dirty_count); 554 553 atomic_sub(nfreed, &pool->item_count); 555 554 ··· 831 828 832 829 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, 833 830 struct list_head *unmap_list, 834 - struct list_head *kill_list) 831 + struct list_head *kill_list, 832 + int *unpinned) 835 833 { 836 834 struct rds_iw_mapping *mapping, *next; 837 835 unsigned int ncleaned = 0; ··· 859 855 860 856 spin_lock_irqsave(&pool->list_lock, flags); 861 857 list_for_each_entry_safe(mapping, next, unmap_list, m_list) { 858 + *unpinned += mapping->m_sg.len; 862 859 list_move(&mapping->m_list, &laundered); 863 860 ncleaned++; 864 861 }