Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
net/ipv4: Eliminate kstrdup memory leak
net/caif/cfrfml.c: use asm/unaligned.h
ax25: missplaced sock_put(sk)
qlge: reset the chip before freeing the buffers
l2tp: test for ethernet header in l2tp_eth_dev_recv()
tcp: select(writefds) don't hang up when a peer close connection
tcp: fix three tcp sysctls tuning
tcp: Combat per-cpu skew in orphan tests.
pxa168_eth: silence gcc warnings
pxa168_eth: update call to phy_mii_ioctl()
pxa168_eth: fix error handling in prope
pxa168_eth: remove unneeded null check
phylib: Fix race between returning phydev and calling adjust_link
caif-driver: add HAS_DMA dependency
3c59x: Fix deadlock between boomerang_interrupt and boomerang_start_tx
qlcnic: fix poll implementation
netxen: fix poll implementation
bridge: netfilter: fix a memory leak

+98 -72
+14 -1
drivers/net/3c59x.c
··· 633 633 open:1, 634 634 medialock:1, 635 635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ 636 - large_frames:1; /* accept large frames */ 636 + large_frames:1, /* accept large frames */ 637 + handling_irq:1; /* private in_irq indicator */ 637 638 int drv_flags; 638 639 u16 status_enable; 639 640 u16 intr_enable; ··· 2134 2133 dev->name, vp->cur_tx); 2135 2134 } 2136 2135 2136 + /* 2137 + * We can't allow a recursion from our interrupt handler back into the 2138 + * tx routine, as they take the same spin lock, and that causes 2139 + * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in 2140 + * a bit 2141 + */ 2142 + if (vp->handling_irq) 2143 + return NETDEV_TX_BUSY; 2144 + 2137 2145 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { 2138 2146 if (vortex_debug > 0) 2139 2147 pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", ··· 2345 2335 2346 2336 ioaddr = vp->ioaddr; 2347 2337 2338 + 2348 2339 /* 2349 2340 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout 2350 2341 * and boomerang_start_xmit 2351 2342 */ 2352 2343 spin_lock(&vp->lock); 2344 + vp->handling_irq = 1; 2353 2345 2354 2346 status = ioread16(ioaddr + EL3_STATUS); 2355 2347 ··· 2459 2447 pr_debug("%s: exiting interrupt, status %4.4x.\n", 2460 2448 dev->name, status); 2461 2449 handler_exit: 2450 + vp->handling_irq = 0; 2462 2451 spin_unlock(&vp->lock); 2463 2452 return IRQ_HANDLED; 2464 2453 }
+1 -1
drivers/net/caif/Kconfig
··· 15 15 16 16 config CAIF_SPI_SLAVE 17 17 tristate "CAIF SPI transport driver for slave interface" 18 - depends on CAIF 18 + depends on CAIF && HAS_DMA 19 19 default n 20 20 ---help--- 21 21 The CAIF Link layer SPI Protocol driver for Slave SPI interface.
+8 -1
drivers/net/netxen/netxen_nic_main.c
··· 2131 2131 #ifdef CONFIG_NET_POLL_CONTROLLER 2132 2132 static void netxen_nic_poll_controller(struct net_device *netdev) 2133 2133 { 2134 + int ring; 2135 + struct nx_host_sds_ring *sds_ring; 2134 2136 struct netxen_adapter *adapter = netdev_priv(netdev); 2137 + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 2138 + 2135 2139 disable_irq(adapter->irq); 2136 - netxen_intr(adapter->irq, adapter); 2140 + for (ring = 0; ring < adapter->max_sds_rings; ring++) { 2141 + sds_ring = &recv_ctx->sds_rings[ring]; 2142 + netxen_intr(adapter->irq, sds_ring); 2143 + } 2137 2144 enable_irq(adapter->irq); 2138 2145 } 2139 2146 #endif
+2
drivers/net/phy/phy_device.c
··· 466 466 467 467 phydev->interface = interface; 468 468 469 + phydev->state = PHY_READY; 470 + 469 471 /* Do initial configuration here, now that 470 472 * we have certain key parameters 471 473 * (dev_flags and interface) */
+28 -30
drivers/net/pxa168_eth.c
··· 654 654 /* Assignment of Tx CTRP of given queue */ 655 655 tx_curr_desc = pep->tx_curr_desc_q; 656 656 wrl(pep, ETH_C_TX_DESC_1, 657 - (u32) ((struct tx_desc *)pep->tx_desc_dma + tx_curr_desc)); 657 + (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc))); 658 658 659 659 /* Assignment of Rx CRDP of given queue */ 660 660 rx_curr_desc = pep->rx_curr_desc_q; 661 661 wrl(pep, ETH_C_RX_DESC_0, 662 - (u32) ((struct rx_desc *)pep->rx_desc_dma + rx_curr_desc)); 662 + (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); 663 663 664 664 wrl(pep, ETH_F_RX_DESC_0, 665 - (u32) ((struct rx_desc *)pep->rx_desc_dma + rx_curr_desc)); 665 + (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); 666 666 667 667 /* Clear all interrupts */ 668 668 wrl(pep, INT_CAUSE, 0); ··· 1350 1350 { 1351 1351 struct pxa168_eth_private *pep = netdev_priv(dev); 1352 1352 if (pep->phy != NULL) 1353 - return phy_mii_ioctl(pep->phy, if_mii(ifr), cmd); 1353 + return phy_mii_ioctl(pep->phy, ifr, cmd); 1354 1354 1355 1355 return -EOPNOTSUPP; 1356 1356 } ··· 1414 1414 { 1415 1415 struct pxa168_eth_private *pep = netdev_priv(dev); 1416 1416 1417 - if (pep->pd != NULL) { 1418 - if (pep->pd->init) 1419 - pep->pd->init(); 1420 - } 1417 + if (pep->pd->init) 1418 + pep->pd->init(); 1421 1419 pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f); 1422 1420 if (pep->phy != NULL) 1423 1421 phy_init(pep, pep->pd->speed, pep->pd->duplex); ··· 1497 1499 dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); 1498 1500 if (!dev) { 1499 1501 err = -ENOMEM; 1500 - goto out; 1502 + goto err_clk; 1501 1503 } 1502 1504 1503 1505 platform_set_drvdata(pdev, dev); ··· 1507 1509 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1508 1510 if (res == NULL) { 1509 1511 err = -ENODEV; 1510 - goto out; 1512 + goto err_netdev; 1511 1513 } 1512 1514 pep->base = ioremap(res->start, res->end - res->start + 1); 1513 1515 if (pep->base == NULL) { 1514 1516 err = -ENOMEM; 1515 - goto out; 1517 + goto err_netdev; 1516 1518 } 1517 1519 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1518 1520 BUG_ON(!res); ··· 1549 1551 pep->smi_bus = mdiobus_alloc(); 1550 1552 if (pep->smi_bus == NULL) { 1551 1553 err = -ENOMEM; 1552 - goto out; 1554 + goto err_base; 1553 1555 } 1554 1556 pep->smi_bus->priv = pep; 1555 1557 pep->smi_bus->name = "pxa168_eth smi"; ··· 1558 1560 snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); 1559 1561 pep->smi_bus->parent = &pdev->dev; 1560 1562 pep->smi_bus->phy_mask = 0xffffffff; 1561 - if (mdiobus_register(pep->smi_bus) < 0) { 1562 - err = -ENOMEM; 1563 - goto out; 1564 - } 1563 + err = mdiobus_register(pep->smi_bus); 1564 + if (err) 1565 + goto err_free_mdio; 1566 + 1565 1567 pxa168_init_hw(pep); 1566 1568 err = ethernet_phy_setup(dev); 1567 1569 if (err) 1568 - goto out; 1570 + goto err_mdiobus; 1569 1571 SET_NETDEV_DEV(dev, &pdev->dev); 1570 1572 err = register_netdev(dev); 1571 1573 if (err) 1572 - goto out; 1574 + goto err_mdiobus; 1573 1575 return 0; 1574 - out: 1575 - if (pep->clk) { 1576 - clk_disable(pep->clk); 1577 - clk_put(pep->clk); 1578 - pep->clk = NULL; 1579 - } 1580 - if (pep->base) { 1581 - iounmap(pep->base); 1582 - pep->base = NULL; 1583 - } 1584 - if (dev) 1585 - free_netdev(dev); 1576 + 1577 + err_mdiobus: 1578 + mdiobus_unregister(pep->smi_bus); 1579 + err_free_mdio: 1580 + mdiobus_free(pep->smi_bus); 1581 + err_base: 1582 + iounmap(pep->base); 1583 + err_netdev: 1584 + free_netdev(dev); 1585 + err_clk: 1586 + clk_disable(clk); 1587 + clk_put(clk); 1586 1588 return err; 1587 1589 } 1588 1590
+8 -1
drivers/net/qlcnic/qlcnic_main.c
··· 2188 2188 #ifdef CONFIG_NET_POLL_CONTROLLER 2189 2189 static void qlcnic_poll_controller(struct net_device *netdev) 2190 2190 { 2191 + int ring; 2192 + struct qlcnic_host_sds_ring *sds_ring; 2191 2193 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2194 + struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 2195 + 2192 2196 disable_irq(adapter->irq); 2193 - qlcnic_intr(adapter->irq, adapter); 2197 + for (ring = 0; ring < adapter->max_sds_rings; ring++) { 2198 + sds_ring = &recv_ctx->sds_rings[ring]; 2199 + qlcnic_intr(adapter->irq, sds_ring); 2200 + } 2194 2201 enable_irq(adapter->irq); 2195 2202 } 2196 2203 #endif
+2 -2
drivers/net/qlge/qlge_main.c
··· 3919 3919 for (i = 0; i < qdev->rss_ring_count; i++) 3920 3920 netif_napi_del(&qdev->rx_ring[i].napi); 3921 3921 3922 - ql_free_rx_buffers(qdev); 3923 - 3924 3922 status = ql_adapter_reset(qdev); 3925 3923 if (status) 3926 3924 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", 3927 3925 qdev->func); 3926 + ql_free_rx_buffers(qdev); 3927 + 3928 3928 return status; 3929 3929 } 3930 3930
+14 -4
include/net/tcp.h
··· 268 268 return seq3 - seq2 >= seq1 - seq2; 269 269 } 270 270 271 - static inline int tcp_too_many_orphans(struct sock *sk, int num) 271 + static inline bool tcp_too_many_orphans(struct sock *sk, int shift) 272 272 { 273 - return (num > sysctl_tcp_max_orphans) || 274 - (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 275 - atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]); 273 + struct percpu_counter *ocp = sk->sk_prot->orphan_count; 274 + int orphans = percpu_counter_read_positive(ocp); 275 + 276 + if (orphans << shift > sysctl_tcp_max_orphans) { 277 + orphans = percpu_counter_sum_positive(ocp); 278 + if (orphans << shift > sysctl_tcp_max_orphans) 279 + return true; 280 + } 281 + 282 + if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 283 + atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) 284 + return true; 285 + return false; 276 286 } 277 287 278 288 /* syncookies: remember time of last synqueue overflow */
+1 -1
net/ax25/ax25_ds_timer.c
··· 112 112 if (sk) { 113 113 sock_hold(sk); 114 114 ax25_destroy_socket(ax25); 115 - sock_put(sk); 116 115 bh_unlock_sock(sk); 116 + sock_put(sk); 117 117 } else 118 118 ax25_destroy_socket(ax25); 119 119 return;
+1 -1
net/bridge/br_netfilter.c
··· 162 162 if (tmp) { 163 163 memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info)); 164 164 atomic_set(&tmp->use, 1); 165 - nf_bridge_put(nf_bridge); 166 165 } 166 + nf_bridge_put(nf_bridge); 167 167 nf_bridge = tmp; 168 168 } 169 169 return nf_bridge;
+1 -1
net/caif/cfrfml.c
··· 7 7 #include <linux/stddef.h> 8 8 #include <linux/spinlock.h> 9 9 #include <linux/slab.h> 10 - #include <linux/unaligned/le_byteshift.h> 10 + #include <asm/unaligned.h> 11 11 #include <net/caif/caif_layer.h> 12 12 #include <net/caif/cfsrvl.h> 13 13 #include <net/caif/cfpkt.h>
+10 -22
net/ipv4/tcp.c
··· 451 451 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) 452 452 mask |= POLLOUT | POLLWRNORM; 453 453 } 454 - } 454 + } else 455 + mask |= POLLOUT | POLLWRNORM; 455 456 456 457 if (tp->urg_data & TCP_URG_VALID) 457 458 mask |= POLLPRI; ··· 2012 2011 } 2013 2012 } 2014 2013 if (sk->sk_state != TCP_CLOSE) { 2015 - int orphan_count = percpu_counter_read_positive( 2016 - sk->sk_prot->orphan_count); 2017 - 2018 2014 sk_mem_reclaim(sk); 2019 - if (tcp_too_many_orphans(sk, orphan_count)) { 2015 + if (tcp_too_many_orphans(sk, 0)) { 2020 2016 if (net_ratelimit()) 2021 2017 printk(KERN_INFO "TCP: too many of orphaned " 2022 2018 "sockets\n"); ··· 3210 3212 { 3211 3213 struct sk_buff *skb = NULL; 3212 3214 unsigned long nr_pages, limit; 3213 - int order, i, max_share; 3215 + int i, max_share, cnt; 3214 3216 unsigned long jiffy = jiffies; 3215 3217 3216 3218 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); ··· 3259 3261 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 3260 3262 } 3261 3263 3262 - /* Try to be a bit smarter and adjust defaults depending 3263 - * on available memory. 3264 - */ 3265 - for (order = 0; ((1 << order) << PAGE_SHIFT) < 3266 - (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket)); 3267 - order++) 3268 - ; 3269 - if (order >= 4) { 3270 - tcp_death_row.sysctl_max_tw_buckets = 180000; 3271 - sysctl_tcp_max_orphans = 4096 << (order - 4); 3272 - sysctl_max_syn_backlog = 1024; 3273 - } else if (order < 3) { 3274 - tcp_death_row.sysctl_max_tw_buckets >>= (3 - order); 3275 - sysctl_tcp_max_orphans >>= (3 - order); 3276 - sysctl_max_syn_backlog = 128; 3277 - } 3264 + 3265 + cnt = tcp_hashinfo.ehash_mask + 1; 3266 + 3267 + tcp_death_row.sysctl_max_tw_buckets = cnt / 2; 3268 + sysctl_tcp_max_orphans = cnt / 2; 3269 + sysctl_max_syn_backlog = max(128, cnt / 256); 3278 3270 3279 3271 /* Set the pressure threshold to be a fraction of global memory that 3280 3272 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
+3 -2
net/ipv4/tcp_cong.c
··· 196 196 int tcp_set_allowed_congestion_control(char *val) 197 197 { 198 198 struct tcp_congestion_ops *ca; 199 - char *clone, *name; 199 + char *saved_clone, *clone, *name; 200 200 int ret = 0; 201 201 202 - clone = kstrdup(val, GFP_USER); 202 + saved_clone = clone = kstrdup(val, GFP_USER); 203 203 if (!clone) 204 204 return -ENOMEM; 205 205 ··· 226 226 } 227 227 out: 228 228 spin_unlock(&tcp_cong_list_lock); 229 + kfree(saved_clone); 229 230 230 231 return ret; 231 232 }
+4 -4
net/ipv4/tcp_timer.c
··· 66 66 static int tcp_out_of_resources(struct sock *sk, int do_reset) 67 67 { 68 68 struct tcp_sock *tp = tcp_sk(sk); 69 - int orphans = percpu_counter_read_positive(&tcp_orphan_count); 69 + int shift = 0; 70 70 71 71 /* If peer does not open window for long time, or did not transmit 72 72 * anything for long time, penalize it. */ 73 73 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) 74 - orphans <<= 1; 74 + shift++; 75 75 76 76 /* If some dubious ICMP arrived, penalize even more. */ 77 77 if (sk->sk_err_soft) 78 - orphans <<= 1; 78 + shift++; 79 79 80 - if (tcp_too_many_orphans(sk, orphans)) { 80 + if (tcp_too_many_orphans(sk, shift)) { 81 81 if (net_ratelimit()) 82 82 printk(KERN_INFO "Out of socket memory\n"); 83 83
+1 -1
net/l2tp/l2tp_eth.c
··· 132 132 printk("\n"); 133 133 } 134 134 135 - if (data_len < ETH_HLEN) 135 + if (!pskb_may_pull(skb, sizeof(ETH_HLEN))) 136 136 goto error; 137 137 138 138 secpath_reset(skb);