Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6:
Broadcom 4400 resume small fix
b44: src_desc->addr is little-endian
e100: fix irq leak on suspend/resume
bonding: ARP monitoring broken on x86_64
ehea: Fixed missing tasklet_kill() call
ehea: Fixed wrong jumbo frames status query
82596 warning fixes
FS_ENET: OF-related fixup for FEC and SCC MAC's
net: ifb error path loop fix
b44: Fix frequent link changes

+83 -40
+4 -3
drivers/net/82596.c
··· 1066 short length = skb->len; 1067 dev->trans_start = jiffies; 1068 1069 - DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%x) called\n", dev->name, 1070 - skb->len, (unsigned int)skb->data)); 1071 1072 if (skb->len < ETH_ZLEN) { 1073 if (skb_padto(skb, ETH_ZLEN)) ··· 1246 dev->priv = (void *)(dev->mem_start); 1247 1248 lp = dev->priv; 1249 - DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%d bytes), lp->scb at 0x%08lx\n", 1250 dev->name, (unsigned long)lp, 1251 sizeof(struct i596_private), (unsigned long)&lp->scb)); 1252 memset((void *) lp, 0, sizeof(struct i596_private));
··· 1066 short length = skb->len; 1067 dev->trans_start = jiffies; 1068 1069 + DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n", 1070 + dev->name, skb->len, skb->data)); 1071 1072 if (skb->len < ETH_ZLEN) { 1073 if (skb_padto(skb, ETH_ZLEN)) ··· 1246 dev->priv = (void *)(dev->mem_start); 1247 1248 lp = dev->priv; 1249 + DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), " 1250 + "lp->scb at 0x%08lx\n", 1251 dev->name, (unsigned long)lp, 1252 sizeof(struct i596_private), (unsigned long)&lp->scb)); 1253 memset((void *) lp, 0, sizeof(struct i596_private));
+33 -17
drivers/net/b44.c
··· 110 111 static void b44_halt(struct b44 *); 112 static void b44_init_rings(struct b44 *); 113 static void b44_init_hw(struct b44 *, int); 114 115 static int dma_desc_align_mask; ··· 889 spin_lock_irqsave(&bp->lock, flags); 890 b44_halt(bp); 891 b44_init_rings(bp); 892 - b44_init_hw(bp, 1); 893 netif_wake_queue(bp->dev); 894 spin_unlock_irqrestore(&bp->lock, flags); 895 done = 1; ··· 959 960 b44_halt(bp); 961 b44_init_rings(bp); 962 - b44_init_hw(bp, 1); 963 964 spin_unlock_irq(&bp->lock); 965 ··· 1076 b44_halt(bp); 1077 dev->mtu = new_mtu; 1078 b44_init_rings(bp); 1079 - b44_init_hw(bp, 1); 1080 spin_unlock_irq(&bp->lock); 1081 1082 b44_enable_ints(bp); ··· 1373 * packet processing. Invoked with bp->lock held. 1374 */ 1375 static void __b44_set_rx_mode(struct net_device *); 1376 - static void b44_init_hw(struct b44 *bp, int full_reset) 1377 { 1378 u32 val; 1379 1380 b44_chip_reset(bp); 1381 - if (full_reset) { 1382 b44_phy_reset(bp); 1383 b44_setup_phy(bp); 1384 } ··· 1395 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); 1396 1397 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ 1398 - if (full_reset) { 1399 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); 1400 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); 1401 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | ··· 1409 bp->rx_prod = bp->rx_pending; 1410 1411 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); 1412 - } else { 1413 - bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | 1414 - (bp->rx_offset << DMARX_CTRL_ROSHIFT))); 1415 } 1416 1417 val = br32(bp, B44_ENET_CTRL); ··· 1425 goto out; 1426 1427 b44_init_rings(bp); 1428 - b44_init_hw(bp, 1); 1429 1430 b44_check_phy(bp); 1431 ··· 1634 netif_poll_enable(dev); 1635 1636 if (bp->flags & B44_FLAG_WOL_ENABLE) { 1637 - b44_init_hw(bp, 0); 1638 b44_setup_wol(bp); 1639 } 1640 ··· 1910 1911 b44_halt(bp); 1912 b44_init_rings(bp); 1913 - b44_init_hw(bp, 1); 1914 netif_wake_queue(bp->dev); 1915 spin_unlock_irq(&bp->lock); 1916 ··· 1953 if (bp->flags & B44_FLAG_PAUSE_AUTO) { 1954 b44_halt(bp); 1955 b44_init_rings(bp); 1956 - b44_init_hw(bp, 1); 1957 } else { 1958 __b44_set_flow_ctrl(bp, bp->flags); 1959 } ··· 2309 2310 free_irq(dev->irq, dev); 2311 if (bp->flags & B44_FLAG_WOL_ENABLE) { 2312 - b44_init_hw(bp, 0); 2313 b44_setup_wol(bp); 2314 } 2315 pci_disable_device(pdev); ··· 2320 { 2321 struct net_device *dev = pci_get_drvdata(pdev); 2322 struct b44 *bp = netdev_priv(dev); 2323 2324 pci_restore_state(pdev); 2325 - pci_enable_device(pdev); 2326 pci_set_master(pdev); 2327 2328 if (!netif_running(dev)) 2329 return 0; 2330 2331 - if (request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev)) 2332 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name); 2333 2334 spin_lock_irq(&bp->lock); 2335 2336 b44_init_rings(bp); 2337 - b44_init_hw(bp, 1); 2338 netif_device_attach(bp->dev); 2339 spin_unlock_irq(&bp->lock); 2340
··· 110 111 static void b44_halt(struct b44 *); 112 static void b44_init_rings(struct b44 *); 113 + 114 + #define B44_FULL_RESET 1 115 + #define B44_FULL_RESET_SKIP_PHY 2 116 + #define B44_PARTIAL_RESET 3 117 + 118 static void b44_init_hw(struct b44 *, int); 119 120 static int dma_desc_align_mask; ··· 884 spin_lock_irqsave(&bp->lock, flags); 885 b44_halt(bp); 886 b44_init_rings(bp); 887 + b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); 888 netif_wake_queue(bp->dev); 889 spin_unlock_irqrestore(&bp->lock, flags); 890 done = 1; ··· 954 955 b44_halt(bp); 956 b44_init_rings(bp); 957 + b44_init_hw(bp, B44_FULL_RESET); 958 959 spin_unlock_irq(&bp->lock); 960 ··· 1071 b44_halt(bp); 1072 dev->mtu = new_mtu; 1073 b44_init_rings(bp); 1074 + b44_init_hw(bp, B44_FULL_RESET); 1075 spin_unlock_irq(&bp->lock); 1076 1077 b44_enable_ints(bp); ··· 1368 * packet processing. Invoked with bp->lock held. 1369 */ 1370 static void __b44_set_rx_mode(struct net_device *); 1371 + static void b44_init_hw(struct b44 *bp, int reset_kind) 1372 { 1373 u32 val; 1374 1375 b44_chip_reset(bp); 1376 + if (reset_kind == B44_FULL_RESET) { 1377 b44_phy_reset(bp); 1378 b44_setup_phy(bp); 1379 } ··· 1390 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); 1391 1392 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ 1393 + if (reset_kind == B44_PARTIAL_RESET) { 1394 + bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | 1395 + (bp->rx_offset << DMARX_CTRL_ROSHIFT))); 1396 + } else { 1397 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); 1398 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); 1399 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | ··· 1401 bp->rx_prod = bp->rx_pending; 1402 1403 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); 1404 } 1405 1406 val = br32(bp, B44_ENET_CTRL); ··· 1420 goto out; 1421 1422 b44_init_rings(bp); 1423 + b44_init_hw(bp, B44_FULL_RESET); 1424 1425 b44_check_phy(bp); 1426 ··· 1629 netif_poll_enable(dev); 1630 1631 if (bp->flags & B44_FLAG_WOL_ENABLE) { 1632 + b44_init_hw(bp, B44_PARTIAL_RESET); 1633 b44_setup_wol(bp); 1634 } 1635 ··· 1905 1906 b44_halt(bp); 1907 b44_init_rings(bp); 1908 + b44_init_hw(bp, B44_FULL_RESET); 1909 netif_wake_queue(bp->dev); 1910 spin_unlock_irq(&bp->lock); 1911 ··· 1948 if (bp->flags & B44_FLAG_PAUSE_AUTO) { 1949 b44_halt(bp); 1950 b44_init_rings(bp); 1951 + b44_init_hw(bp, B44_FULL_RESET); 1952 } else { 1953 __b44_set_flow_ctrl(bp, bp->flags); 1954 } ··· 2304 2305 free_irq(dev->irq, dev); 2306 if (bp->flags & B44_FLAG_WOL_ENABLE) { 2307 + b44_init_hw(bp, B44_PARTIAL_RESET); 2308 b44_setup_wol(bp); 2309 } 2310 pci_disable_device(pdev); ··· 2315 { 2316 struct net_device *dev = pci_get_drvdata(pdev); 2317 struct b44 *bp = netdev_priv(dev); 2318 + int rc = 0; 2319 2320 pci_restore_state(pdev); 2321 + rc = pci_enable_device(pdev); 2322 + if (rc) { 2323 + printk(KERN_ERR PFX "%s: pci_enable_device failed\n", 2324 + dev->name); 2325 + return rc; 2326 + } 2327 + 2328 pci_set_master(pdev); 2329 2330 if (!netif_running(dev)) 2331 return 0; 2332 2333 + rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); 2334 + if (rc) { 2335 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name); 2336 + pci_disable_device(pdev); 2337 + return rc; 2338 + } 2339 2340 spin_lock_irq(&bp->lock); 2341 2342 b44_init_rings(bp); 2343 + b44_init_hw(bp, B44_FULL_RESET); 2344 netif_device_attach(bp->dev); 2345 spin_unlock_irq(&bp->lock); 2346
+4 -3
drivers/net/bonding/bonding.h
··· 151 struct slave *next; 152 struct slave *prev; 153 int delay; 154 - u32 jiffies; 155 - u32 last_arp_rx; 156 s8 link; /* one of BOND_LINK_XXXX */ 157 s8 state; /* one of BOND_STATE_XXXX */ 158 u32 original_flags; ··· 242 return bond->params.arp_validate & (1 << slave->state); 243 } 244 245 - extern inline u32 slave_last_rx(struct bonding *bond, struct slave *slave) 246 { 247 if (slave_do_arp_validate(bond, slave)) 248 return slave->last_arp_rx;
··· 151 struct slave *next; 152 struct slave *prev; 153 int delay; 154 + unsigned long jiffies; 155 + unsigned long last_arp_rx; 156 s8 link; /* one of BOND_LINK_XXXX */ 157 s8 state; /* one of BOND_STATE_XXXX */ 158 u32 original_flags; ··· 242 return bond->params.arp_validate & (1 << slave->state); 243 } 244 245 + extern inline unsigned long slave_last_rx(struct bonding *bond, 246 + struct slave *slave) 247 { 248 if (slave_do_arp_validate(bond, slave)) 249 return slave->last_arp_rx;
+2
drivers/net/e100.c
··· 2725 del_timer_sync(&nic->watchdog); 2726 netif_carrier_off(nic->netdev); 2727 2728 pci_save_state(pdev); 2729 2730 if ((nic->flags & wol_magic) | e100_asf(nic)) { ··· 2737 } 2738 2739 pci_disable_device(pdev); 2740 pci_set_power_state(pdev, PCI_D3hot); 2741 2742 return 0;
··· 2725 del_timer_sync(&nic->watchdog); 2726 netif_carrier_off(nic->netdev); 2727 2728 + netif_device_detach(netdev); 2729 pci_save_state(pdev); 2730 2731 if ((nic->flags & wol_magic) | e100_asf(nic)) { ··· 2736 } 2737 2738 pci_disable_device(pdev); 2739 + free_irq(pdev->irq, netdev); 2740 pci_set_power_state(pdev, PCI_D3hot); 2741 2742 return 0;
+1 -1
drivers/net/ehea/ehea.h
··· 39 #include <asm/io.h> 40 41 #define DRV_NAME "ehea" 42 - #define DRV_VERSION "EHEA_0044" 43 44 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ 45 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
··· 39 #include <asm/io.h> 40 41 #define DRV_NAME "ehea" 42 + #define DRV_VERSION "EHEA_0045" 43 44 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ 45 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+24 -7
drivers/net/ehea/ehea_main.c
··· 2316 struct ehea_adapter *adapter = port->adapter; 2317 struct hcp_ehea_port_cb4 *cb4; 2318 u32 *dn_log_port_id; 2319 2320 sema_init(&port->port_lock, 1); 2321 port->state = EHEA_PORT_DOWN; ··· 2358 if (!cb4) { 2359 ehea_error("no mem for cb4"); 2360 } else { 2361 - cb4->jumbo_frame = 1; 2362 - hret = ehea_h_modify_ehea_port(adapter->handle, 2363 - port->logical_port_id, 2364 - H_PORT_CB4, H_PORT_CB4_JUMBO, 2365 - cb4); 2366 - if (hret != H_SUCCESS) { 2367 - ehea_info("Jumbo frames not activated"); 2368 } 2369 kfree(cb4); 2370 } ··· 2414 ehea_error("register_netdev failed. ret=%d", ret); 2415 goto out_free; 2416 } 2417 2418 port->netdev = dev; 2419 ret = 0; ··· 2598 destroy_workqueue(adapter->ehea_wq); 2599 2600 ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); 2601 2602 ehea_destroy_eq(adapter->neq); 2603
··· 2316 struct ehea_adapter *adapter = port->adapter; 2317 struct hcp_ehea_port_cb4 *cb4; 2318 u32 *dn_log_port_id; 2319 + int jumbo = 0; 2320 2321 sema_init(&port->port_lock, 1); 2322 port->state = EHEA_PORT_DOWN; ··· 2357 if (!cb4) { 2358 ehea_error("no mem for cb4"); 2359 } else { 2360 + hret = ehea_h_query_ehea_port(adapter->handle, 2361 + port->logical_port_id, 2362 + H_PORT_CB4, 2363 + H_PORT_CB4_JUMBO, cb4); 2364 + 2365 + if (hret == H_SUCCESS) { 2366 + if (cb4->jumbo_frame) 2367 + jumbo = 1; 2368 + else { 2369 + cb4->jumbo_frame = 1; 2370 + hret = ehea_h_modify_ehea_port(adapter->handle, 2371 + port-> 2372 + logical_port_id, 2373 + H_PORT_CB4, 2374 + H_PORT_CB4_JUMBO, 2375 + cb4); 2376 + if (hret == H_SUCCESS) 2377 + jumbo = 1; 2378 + } 2379 } 2380 kfree(cb4); 2381 } ··· 2401 ehea_error("register_netdev failed. ret=%d", ret); 2402 goto out_free; 2403 } 2404 + 2405 + ehea_info("%s: Jumbo frames are %sabled", dev->name, 2406 + jumbo == 1 ? "en" : "dis"); 2407 2408 port->netdev = dev; 2409 ret = 0; ··· 2582 destroy_workqueue(adapter->ehea_wq); 2583 2584 ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); 2585 + tasklet_kill(&adapter->neq_tasklet); 2586 2587 ehea_destroy_eq(adapter->neq); 2588
+9 -4
drivers/net/fs_enet/mac-fec.c
··· 104 fep->interrupt = platform_get_irq_byname(pdev,"interrupt"); 105 if (fep->interrupt < 0) 106 return -EINVAL; 107 - 108 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 109 - fep->fec.fecp =(void*)r->start; 110 111 if(fep->fec.fecp == NULL) 112 return -EINVAL; ··· 319 * Clear any outstanding interrupt. 320 */ 321 FW(fecp, ievent, 0xffc0); 322 FW(fecp, ivec, (fep->interrupt / 2) << 29); 323 - 324 325 /* 326 - * adjust to speed (only for DUET & RMII) 327 */ 328 #ifdef CONFIG_DUET 329 if (fpi->use_rmii) { ··· 421 422 static void pre_request_irq(struct net_device *dev, int irq) 423 { 424 immap_t *immap = fs_enet_immap; 425 u32 siel; 426 ··· 435 siel &= ~(0x80000000 >> (irq & ~1)); 436 out_be32(&immap->im_siu_conf.sc_siel, siel); 437 } 438 } 439 440 static void post_free_irq(struct net_device *dev, int irq)
··· 104 fep->interrupt = platform_get_irq_byname(pdev,"interrupt"); 105 if (fep->interrupt < 0) 106 return -EINVAL; 107 + 108 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 109 + fep->fec.fecp = ioremap(r->start, r->end - r->start + 1); 110 111 if(fep->fec.fecp == NULL) 112 return -EINVAL; ··· 319 * Clear any outstanding interrupt. 320 */ 321 FW(fecp, ievent, 0xffc0); 322 + #ifndef CONFIG_PPC_MERGE 323 FW(fecp, ivec, (fep->interrupt / 2) << 29); 324 + #else 325 + FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29); 326 + #endif 327 328 /* 329 + * adjust to speed (only for DUET & RMII) 330 */ 331 #ifdef CONFIG_DUET 332 if (fpi->use_rmii) { ··· 418 419 static void pre_request_irq(struct net_device *dev, int irq) 420 { 421 + #ifndef CONFIG_PPC_MERGE 422 immap_t *immap = fs_enet_immap; 423 u32 siel; 424 ··· 431 siel &= ~(0x80000000 >> (irq & ~1)); 432 out_be32(&immap->im_siu_conf.sc_siel, siel); 433 } 434 + #endif 435 } 436 437 static void post_free_irq(struct net_device *dev, int irq)
+4 -2
drivers/net/fs_enet/mac-scc.c
··· 121 return -EINVAL; 122 123 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 124 - fep->scc.sccp = (void *)r->start; 125 126 if (fep->scc.sccp == NULL) 127 return -EINVAL; 128 129 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pram"); 130 - fep->scc.ep = (void *)r->start; 131 132 if (fep->scc.ep == NULL) 133 return -EINVAL; ··· 397 398 static void pre_request_irq(struct net_device *dev, int irq) 399 { 400 immap_t *immap = fs_enet_immap; 401 u32 siel; 402 ··· 411 siel &= ~(0x80000000 >> (irq & ~1)); 412 out_be32(&immap->im_siu_conf.sc_siel, siel); 413 } 414 } 415 416 static void post_free_irq(struct net_device *dev, int irq)
··· 121 return -EINVAL; 122 123 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 124 + fep->scc.sccp = ioremap(r->start, r->end - r->start + 1); 125 126 if (fep->scc.sccp == NULL) 127 return -EINVAL; 128 129 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pram"); 130 + fep->scc.ep = ioremap(r->start, r->end - r->start + 1); 131 132 if (fep->scc.ep == NULL) 133 return -EINVAL; ··· 397 398 static void pre_request_irq(struct net_device *dev, int irq) 399 { 400 + #ifndef CONFIG_PPC_MERGE 401 immap_t *immap = fs_enet_immap; 402 u32 siel; 403 ··· 410 siel &= ~(0x80000000 >> (irq & ~1)); 411 out_be32(&immap->im_siu_conf.sc_siel, siel); 412 } 413 + #endif 414 } 415 416 static void post_free_irq(struct net_device *dev, int irq)
+1 -2
drivers/net/ifb.c
··· 271 for (i = 0; i < numifbs && !err; i++) 272 err = ifb_init_one(i); 273 if (err) { 274 - i--; 275 - while (--i >= 0) 276 ifb_free_one(i); 277 } 278
··· 271 for (i = 0; i < numifbs && !err; i++) 272 err = ifb_init_one(i); 273 if (err) { 274 + while (i--) 275 ifb_free_one(i); 276 } 277
+1 -1
drivers/net/phy/fixed.c
··· 349 fixed_mdio_register_device(0, 100, 1); 350 #endif 351 352 - #ifdef CONFIX_FIXED_MII_10_FDX 353 fixed_mdio_register_device(0, 10, 1); 354 #endif 355 return 0;
··· 349 fixed_mdio_register_device(0, 100, 1); 350 #endif 351 352 + #ifdef CONFIG_FIXED_MII_10_FDX 353 fixed_mdio_register_device(0, 10, 1); 354 #endif 355 return 0;