Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (39 commits)
Remove Andrew Morton from list of net driver maintainers.
bonding: Acquire correct locks in alb for promisc change
bonding: Convert more locks to _bh, acquire rtnl, for new locking
bonding: Convert locks to _bh, rework alb locking for new locking
bonding: Convert miimon to new locking
bonding: Convert balance-rr transmit to new locking
Convert bonding timers to workqueues
Update MAINTAINERS to reflect my (jgarzik's) current efforts.
pasemi_mac: fix typo
defxx.c: dfx_bus_init() is __devexit not __devinit
s390 MAINTAINERS
remove header_ops bug in qeth driver
sky2: crash on remove
MIPSnet: Delete all the useless debugging printks.
AR7 ethernet: small post-merge cleanups and fixes
mv643xx_eth: Hook up mv643xx_get_sset_count
mv643xx_eth: Remove obsolete checksum offload comment
mv643xx_eth: Merge drivers/net/mv643xx_eth.h into mv643xx_eth.c
mv643xx_eth: Remove unused register defines
mv643xx_eth: Clean up mv643xx_eth.h
...

+1330 -1284
+2 -29
MAINTAINERS
··· 136 136 L: linux-hams@vger.kernel.org 137 137 S: Maintained 138 138 139 - 8139CP 10/100 FAST ETHERNET DRIVER 140 - P: Jeff Garzik 141 - M: jgarzik@pobox.com 142 - S: Maintained 143 - 144 - 8139TOO 10/100 FAST ETHERNET DRIVER 145 - P: Jeff Garzik 146 - M: jgarzik@pobox.com 147 - W: http://sourceforge.net/projects/gkernel/ 148 - S: Maintained 149 - 150 139 8169 10/100/1000 GIGABIT ETHERNET DRIVER 151 140 P: Francois Romieu 152 141 M: romieu@fr.zoreil.com ··· 1031 1042 M: kernel@wantstofly.org 1032 1043 L: netdev@vger.kernel.org 1033 1044 S: Maintained 1034 - 1035 - CIRRUS LOGIC GENERIC FBDEV DRIVER 1036 - P: Jeff Garzik 1037 - M: jgarzik@pobox.com 1038 - L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only) 1039 - S: Odd Fixes 1040 1045 1041 1046 CIRRUS LOGIC EP93XX OHCI USB HOST DRIVER 1042 1047 P: Lennert Buytenhek ··· 1952 1969 L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only) 1953 1970 S: Maintained 1954 1971 1955 - INTEL I8XX RANDOM NUMBER GENERATOR SUPPORT 1956 - P: Jeff Garzik 1957 - M: jgarzik@pobox.com 1958 - W: http://sourceforge.net/projects/gkernel/ 1959 - S: Maintained 1960 - 1961 1972 INTEL IA32 MICROCODE UPDATE SUPPORT 1962 1973 P: Tigran Aivazian 1963 1974 M: tigran@aivazian.fsnet.co.uk ··· 2678 2701 S: Maintained 2679 2702 2680 2703 NETWORK DEVICE DRIVERS 2681 - P: Andrew Morton 2682 - M: akpm@linux-foundation.org 2683 2704 P: Jeff Garzik 2684 2705 M: jgarzik@pobox.com 2685 2706 L: netdev@vger.kernel.org ··· 3229 3254 S390 NETWORK DRIVERS 3230 3255 P: Ursula Braun 3231 3256 M: ubraun@linux.vnet.ibm.com 3257 + P: Frank Blaschka 3258 + M: blaschka@linux.vnet.ibm.com 3232 3259 M: linux390@de.ibm.com 3233 3260 L: linux-s390@vger.kernel.org 3234 3261 W: http://www.ibm.com/developerworks/linux/linux390/ ··· 4085 4108 M: hirofumi@mail.parknet.co.jp 4086 4109 L: linux-kernel@vger.kernel.org 4087 4110 S: Maintained 4088 - 4089 - VIA 82Cxxx AUDIO DRIVER (old OSS driver) 4090 - P: Jeff Garzik 4091 - S: Odd fixes 4092 4111 4093 4112 VIA RHINE NETWORK DRIVER 4094 4113 P: Roger Luethi
+8 -5
drivers/net/Kconfig
··· 2371 2371 depends on UCC_GETH 2372 2372 2373 2373 config MV643XX_ETH 2374 - tristate "MV-643XX Ethernet support" 2375 - depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) 2374 + tristate "Marvell Discovery (643XX) and Orion ethernet support" 2375 + depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) || ARCH_ORION 2376 2376 select MII 2377 2377 help 2378 - This driver supports the gigabit Ethernet on the Marvell MV643XX 2379 - chipset which is used in the Momenco Ocelot C and Jaguar ATX and 2380 - Pegasos II, amongst other PPC and MIPS boards. 2378 + This driver supports the gigabit ethernet MACs in the 2379 + Marvell Discovery PPC/MIPS chipset family (MV643XX) and 2380 + in the Marvell Orion ARM SoC family. 2381 + 2382 + Some boards that use the Discovery chipset are the Momenco 2383 + Ocelot C and Jaguar ATX and Pegasos II. 2381 2384 2382 2385 config QLA3XXX 2383 2386 tristate "QLogic QLA3XXX Network Driver Support"
+4 -2
drivers/net/bonding/bond_3ad.c
··· 2076 2076 * times out, and it selects an aggregator for the ports that are yet not 2077 2077 * related to any aggregator, and selects the active aggregator for a bond. 2078 2078 */ 2079 - void bond_3ad_state_machine_handler(struct bonding *bond) 2079 + void bond_3ad_state_machine_handler(struct work_struct *work) 2080 2080 { 2081 + struct bonding *bond = container_of(work, struct bonding, 2082 + ad_work.work); 2081 2083 struct port *port; 2082 2084 struct aggregator *aggregator; 2083 2085 ··· 2130 2128 } 2131 2129 2132 2130 re_arm: 2133 - mod_timer(&(BOND_AD_INFO(bond).ad_timer), jiffies + ad_delta_in_ticks); 2131 + queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); 2134 2132 out: 2135 2133 read_unlock(&bond->lock); 2136 2134 }
+1 -1
drivers/net/bonding/bond_3ad.h
··· 276 276 void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fast); 277 277 int bond_3ad_bind_slave(struct slave *slave); 278 278 void bond_3ad_unbind_slave(struct slave *slave); 279 - void bond_3ad_state_machine_handler(struct bonding *bond); 279 + void bond_3ad_state_machine_handler(struct work_struct *); 280 280 void bond_3ad_adapter_speed_changed(struct slave *slave); 281 281 void bond_3ad_adapter_duplex_changed(struct slave *slave); 282 282 void bond_3ad_handle_link_change(struct slave *slave, char link);
+85 -25
drivers/net/bonding/bond_alb.c
··· 128 128 129 129 static inline void _lock_tx_hashtbl(struct bonding *bond) 130 130 { 131 - spin_lock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); 131 + spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); 132 132 } 133 133 134 134 static inline void _unlock_tx_hashtbl(struct bonding *bond) 135 135 { 136 - spin_unlock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); 136 + spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); 137 137 } 138 138 139 139 /* Caller must hold tx_hashtbl lock */ ··· 305 305 /*********************** rlb specific functions ***************************/ 306 306 static inline void _lock_rx_hashtbl(struct bonding *bond) 307 307 { 308 - spin_lock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); 308 + spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); 309 309 } 310 310 311 311 static inline void _unlock_rx_hashtbl(struct bonding *bond) 312 312 { 313 - spin_unlock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); 313 + spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); 314 314 } 315 315 316 316 /* when an ARP REPLY is received from a client update its info ··· 472 472 473 473 _unlock_rx_hashtbl(bond); 474 474 475 - write_lock(&bond->curr_slave_lock); 475 + write_lock_bh(&bond->curr_slave_lock); 476 476 477 477 if (slave != bond->curr_active_slave) { 478 478 rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr); 479 479 } 480 480 481 - write_unlock(&bond->curr_slave_lock); 481 + write_unlock_bh(&bond->curr_slave_lock); 482 482 } 483 483 484 484 static void rlb_update_client(struct rlb_client_info *client_info) ··· 959 959 return 0; 960 960 } 961 961 962 - /* Caller must hold bond lock for write or curr_slave_lock for write*/ 962 + /* 963 + * Swap MAC addresses between two slaves. 964 + * 965 + * Called with RTNL held, and no other locks. 966 + * 967 + */ 968 + 963 969 static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct slave *slave2) 964 970 { 965 - struct slave *disabled_slave = NULL; 966 971 u8 tmp_mac_addr[ETH_ALEN]; 967 - int slaves_state_differ; 968 - 969 - slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2)); 970 972 971 973 memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN); 972 974 alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled); 973 975 alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled); 976 + 977 + } 978 + 979 + /* 980 + * Send learning packets after MAC address swap. 981 + * 982 + * Called with RTNL and bond->lock held for read. 983 + */ 984 + static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, 985 + struct slave *slave2) 986 + { 987 + int slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2)); 988 + struct slave *disabled_slave = NULL; 974 989 975 990 /* fasten the change in the switch */ 976 991 if (SLAVE_IS_OK(slave1)) { ··· 1059 1044 } 1060 1045 1061 1046 if (found) { 1047 + /* locking: needs RTNL and nothing else */ 1062 1048 alb_swap_mac_addr(bond, slave, tmp_slave); 1049 + alb_fasten_mac_swap(bond, slave, tmp_slave); 1063 1050 } 1064 1051 } 1065 1052 } ··· 1392 1375 return 0; 1393 1376 } 1394 1377 1395 - void bond_alb_monitor(struct bonding *bond) 1378 + void bond_alb_monitor(struct work_struct *work) 1396 1379 { 1380 + struct bonding *bond = container_of(work, struct bonding, 1381 + alb_work.work); 1397 1382 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 1398 1383 struct slave *slave; 1399 1384 int i; ··· 1455 1436 1456 1437 /* handle rlb stuff */ 1457 1438 if (bond_info->rlb_enabled) { 1458 - /* the following code changes the promiscuity of the 1459 - * the curr_active_slave. It needs to be locked with a 1460 - * write lock to protect from other code that also 1461 - * sets the promiscuity. 1462 - */ 1463 - write_lock_bh(&bond->curr_slave_lock); 1464 - 1465 1439 if (bond_info->primary_is_promisc && 1466 1440 (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) { 1441 + 1442 + /* 1443 + * dev_set_promiscuity requires rtnl and 1444 + * nothing else. 1445 + */ 1446 + read_unlock(&bond->lock); 1447 + rtnl_lock(); 1467 1448 1468 1449 bond_info->rlb_promisc_timeout_counter = 0; 1469 1450 ··· 1473 1454 */ 1474 1455 dev_set_promiscuity(bond->curr_active_slave->dev, -1); 1475 1456 bond_info->primary_is_promisc = 0; 1476 - } 1477 1457 1478 - write_unlock_bh(&bond->curr_slave_lock); 1458 + rtnl_unlock(); 1459 + read_lock(&bond->lock); 1460 + } 1479 1461 1480 1462 if (bond_info->rlb_rebalance) { 1481 1463 bond_info->rlb_rebalance = 0; ··· 1499 1479 } 1500 1480 1501 1481 re_arm: 1502 - mod_timer(&(bond_info->alb_timer), jiffies + alb_delta_in_ticks); 1482 + queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); 1503 1483 out: 1504 1484 read_unlock(&bond->lock); 1505 1485 } ··· 1520 1500 /* caller must hold the bond lock for write since the mac addresses 1521 1501 * are compared and may be swapped. 1522 1502 */ 1523 - write_lock_bh(&bond->lock); 1503 + read_lock(&bond->lock); 1524 1504 1525 1505 res = alb_handle_addr_collision_on_attach(bond, slave); 1526 1506 1527 - write_unlock_bh(&bond->lock); 1507 + read_unlock(&bond->lock); 1528 1508 1529 1509 if (res) { 1530 1510 return res; ··· 1589 1569 * Set the bond->curr_active_slave to @new_slave and handle 1590 1570 * mac address swapping and promiscuity changes as needed. 1591 1571 * 1592 - * Caller must hold bond curr_slave_lock for write (or bond lock for write) 1572 + * If new_slave is NULL, caller must hold curr_slave_lock or 1573 + * bond->lock for write. 1574 + * 1575 + * If new_slave is not NULL, caller must hold RTNL, bond->lock for 1576 + * read and curr_slave_lock for write. Processing here may sleep, so 1577 + * no other locks may be held. 1593 1578 */ 1594 1579 void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave) 1595 1580 { 1596 1581 struct slave *swap_slave; 1597 1582 int i; 1583 + 1584 + if (new_slave) 1585 + ASSERT_RTNL(); 1598 1586 1599 1587 if (bond->curr_active_slave == new_slave) { 1600 1588 return; ··· 1636 1608 } 1637 1609 } 1638 1610 1611 + /* 1612 + * Arrange for swap_slave and new_slave to temporarily be 1613 + * ignored so we can mess with their MAC addresses without 1614 + * fear of interference from transmit activity. 1615 + */ 1616 + if (swap_slave) { 1617 + tlb_clear_slave(bond, swap_slave, 1); 1618 + } 1619 + tlb_clear_slave(bond, new_slave, 1); 1620 + 1621 + write_unlock_bh(&bond->curr_slave_lock); 1622 + read_unlock(&bond->lock); 1623 + 1639 1624 /* curr_active_slave must be set before calling alb_swap_mac_addr */ 1640 1625 if (swap_slave) { 1641 1626 /* swap mac address */ ··· 1657 1616 /* set the new_slave to the bond mac address */ 1658 1617 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr, 1659 1618 bond->alb_info.rlb_enabled); 1619 + } 1620 + 1621 + read_lock(&bond->lock); 1622 + 1623 + if (swap_slave) { 1624 + alb_fasten_mac_swap(bond, swap_slave, new_slave); 1625 + } else { 1660 1626 /* fasten bond mac on new current slave */ 1661 1627 alb_send_learning_packets(new_slave, bond->dev->dev_addr); 1662 1628 } 1629 + 1630 + write_lock_bh(&bond->curr_slave_lock); 1663 1631 } 1664 1632 1633 + /* 1634 + * Called with RTNL 1635 + */ 1665 1636 int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) 1666 1637 { 1667 1638 struct bonding *bond = bond_dev->priv; ··· 1710 1657 } 1711 1658 } 1712 1659 1660 + write_unlock_bh(&bond->curr_slave_lock); 1661 + read_unlock(&bond->lock); 1662 + 1713 1663 if (swap_slave) { 1714 1664 alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave); 1665 + alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave); 1715 1666 } else { 1716 1667 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr, 1717 1668 bond->alb_info.rlb_enabled); ··· 1726 1669 rlb_req_update_slave_clients(bond, bond->curr_active_slave); 1727 1670 } 1728 1671 } 1672 + 1673 + read_lock(&bond->lock); 1674 + write_lock_bh(&bond->curr_slave_lock); 1729 1675 1730 1676 return 0; 1731 1677 }
+1 -1
drivers/net/bonding/bond_alb.h
··· 125 125 void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link); 126 126 void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave); 127 127 int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev); 128 - void bond_alb_monitor(struct bonding *bond); 128 + void bond_alb_monitor(struct work_struct *); 129 129 int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr); 130 130 void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id); 131 131 #endif /* __BOND_ALB_H__ */
+201 -129
drivers/net/bonding/bond_main.c
··· 1590 1590 case BOND_MODE_TLB: 1591 1591 case BOND_MODE_ALB: 1592 1592 new_slave->state = BOND_STATE_ACTIVE; 1593 - if ((!bond->curr_active_slave) && 1594 - (new_slave->link != BOND_LINK_DOWN)) { 1595 - /* first slave or no active slave yet, and this link 1596 - * is OK, so make this interface the active one 1597 - */ 1598 - bond_change_active_slave(bond, new_slave); 1599 - } else { 1600 - bond_set_slave_inactive_flags(new_slave); 1601 - } 1593 + bond_set_slave_inactive_flags(new_slave); 1602 1594 break; 1603 1595 default: 1604 1596 dprintk("This slave is always active in trunk mode\n"); ··· 1746 1754 bond_alb_deinit_slave(bond, slave); 1747 1755 } 1748 1756 1749 - if (oldcurrent == slave) 1757 + if (oldcurrent == slave) { 1758 + /* 1759 + * Note that we hold RTNL over this sequence, so there 1760 + * is no concern that another slave add/remove event 1761 + * will interfere. 1762 + */ 1763 + write_unlock_bh(&bond->lock); 1764 + read_lock(&bond->lock); 1765 + write_lock_bh(&bond->curr_slave_lock); 1766 + 1750 1767 bond_select_active_slave(bond); 1768 + 1769 + write_unlock_bh(&bond->curr_slave_lock); 1770 + read_unlock(&bond->lock); 1771 + write_lock_bh(&bond->lock); 1772 + } 1751 1773 1752 1774 if (bond->slave_cnt == 0) { 1753 1775 bond_set_carrier(bond); ··· 1846 1840 */ 1847 1841 void bond_destroy(struct bonding *bond) 1848 1842 { 1843 + unregister_netdevice(bond->dev); 1849 1844 bond_deinit(bond->dev); 1850 1845 bond_destroy_sysfs_entry(bond); 1851 - unregister_netdevice(bond->dev); 1852 1846 } 1853 1847 1854 1848 /* ··· 2018 2012 return -EINVAL; 2019 2013 } 2020 2014 2021 - write_lock_bh(&bond->lock); 2015 + read_lock(&bond->lock); 2022 2016 2017 + read_lock(&bond->curr_slave_lock); 2023 2018 old_active = bond->curr_active_slave; 2019 + read_unlock(&bond->curr_slave_lock); 2020 + 2024 2021 new_active = bond_get_slave_by_dev(bond, slave_dev); 2025 2022 2026 2023 /* 2027 2024 * Changing to the current active: do nothing; return success. 2028 2025 */ 2029 2026 if (new_active && (new_active == old_active)) { 2030 - write_unlock_bh(&bond->lock); 2027 + read_unlock(&bond->lock); 2031 2028 return 0; 2032 2029 } 2033 2030 ··· 2038 2029 (old_active) && 2039 2030 (new_active->link == BOND_LINK_UP) && 2040 2031 IS_UP(new_active->dev)) { 2032 + write_lock_bh(&bond->curr_slave_lock); 2041 2033 bond_change_active_slave(bond, new_active); 2034 + write_unlock_bh(&bond->curr_slave_lock); 2042 2035 } else { 2043 2036 res = -EINVAL; 2044 2037 } 2045 2038 2046 - write_unlock_bh(&bond->lock); 2039 + read_unlock(&bond->lock); 2047 2040 2048 2041 return res; 2049 2042 } ··· 2057 2046 info->bond_mode = bond->params.mode; 2058 2047 info->miimon = bond->params.miimon; 2059 2048 2060 - read_lock_bh(&bond->lock); 2049 + read_lock(&bond->lock); 2061 2050 info->num_slaves = bond->slave_cnt; 2062 - read_unlock_bh(&bond->lock); 2051 + read_unlock(&bond->lock); 2063 2052 2064 2053 return 0; 2065 2054 } ··· 2074 2063 return -ENODEV; 2075 2064 } 2076 2065 2077 - read_lock_bh(&bond->lock); 2066 + read_lock(&bond->lock); 2078 2067 2079 2068 bond_for_each_slave(bond, slave, i) { 2080 2069 if (i == (int)info->slave_id) { ··· 2083 2072 } 2084 2073 } 2085 2074 2086 - read_unlock_bh(&bond->lock); 2075 + read_unlock(&bond->lock); 2087 2076 2088 2077 if (found) { 2089 2078 strcpy(info->slave_name, slave->dev->name); ··· 2099 2088 2100 2089 /*-------------------------------- Monitoring -------------------------------*/ 2101 2090 2102 - /* this function is called regularly to monitor each slave's link. */ 2103 - void bond_mii_monitor(struct net_device *bond_dev) 2091 + /* 2092 + * if !have_locks, return nonzero if a failover is necessary. if 2093 + * have_locks, do whatever failover activities are needed. 2094 + * 2095 + * This is to separate the inspection and failover steps for locking 2096 + * purposes; failover requires rtnl, but acquiring it for every 2097 + * inspection is undesirable, so a wrapper first does inspection, and 2098 + * the acquires the necessary locks and calls again to perform 2099 + * failover if needed. Since all locks are dropped, a complete 2100 + * restart is needed between calls. 2101 + */ 2102 + static int __bond_mii_monitor(struct bonding *bond, int have_locks) 2104 2103 { 2105 - struct bonding *bond = bond_dev->priv; 2106 2104 struct slave *slave, *oldcurrent; 2107 2105 int do_failover = 0; 2108 - int delta_in_ticks; 2109 2106 int i; 2110 2107 2111 - read_lock(&bond->lock); 2112 - 2113 - delta_in_ticks = (bond->params.miimon * HZ) / 1000; 2114 - 2115 - if (bond->kill_timers) { 2108 + if (bond->slave_cnt == 0) 2116 2109 goto out; 2117 - } 2118 - 2119 - if (bond->slave_cnt == 0) { 2120 - goto re_arm; 2121 - } 2122 2110 2123 2111 /* we will try to read the link status of each of our slaves, and 2124 2112 * set their IFF_RUNNING flag appropriately. For each slave not ··· 2151 2141 switch (slave->link) { 2152 2142 case BOND_LINK_UP: /* the link was up */ 2153 2143 if (link_state == BMSR_LSTATUS) { 2154 - /* link stays up, nothing more to do */ 2144 + if (!oldcurrent) { 2145 + if (!have_locks) 2146 + return 1; 2147 + do_failover = 1; 2148 + } 2155 2149 break; 2156 2150 } else { /* link going down */ 2157 2151 slave->link = BOND_LINK_FAIL; ··· 2170 2156 ": %s: link status down for %s " 2171 2157 "interface %s, disabling it in " 2172 2158 "%d ms.\n", 2173 - bond_dev->name, 2159 + bond->dev->name, 2174 2160 IS_UP(slave_dev) 2175 2161 ? ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) 2176 2162 ? ((slave == oldcurrent) ··· 2188 2174 if (link_state != BMSR_LSTATUS) { 2189 2175 /* link stays down */ 2190 2176 if (slave->delay <= 0) { 2177 + if (!have_locks) 2178 + return 1; 2179 + 2191 2180 /* link down for too long time */ 2192 2181 slave->link = BOND_LINK_DOWN; 2193 2182 ··· 2206 2189 ": %s: link status definitely " 2207 2190 "down for interface %s, " 2208 2191 "disabling it\n", 2209 - bond_dev->name, 2192 + bond->dev->name, 2210 2193 slave_dev->name); 2211 2194 2212 2195 /* notify ad that the link status has changed */ ··· 2232 2215 printk(KERN_INFO DRV_NAME 2233 2216 ": %s: link status up again after %d " 2234 2217 "ms for interface %s.\n", 2235 - bond_dev->name, 2218 + bond->dev->name, 2236 2219 (bond->params.downdelay - slave->delay) * bond->params.miimon, 2237 2220 slave_dev->name); 2238 2221 } ··· 2252 2235 ": %s: link status up for " 2253 2236 "interface %s, enabling it " 2254 2237 "in %d ms.\n", 2255 - bond_dev->name, 2238 + bond->dev->name, 2256 2239 slave_dev->name, 2257 2240 bond->params.updelay * bond->params.miimon); 2258 2241 } ··· 2268 2251 printk(KERN_INFO DRV_NAME 2269 2252 ": %s: link status down again after %d " 2270 2253 "ms for interface %s.\n", 2271 - bond_dev->name, 2254 + bond->dev->name, 2272 2255 (bond->params.updelay - slave->delay) * bond->params.miimon, 2273 2256 slave_dev->name); 2274 2257 } else { 2275 2258 /* link stays up */ 2276 2259 if (slave->delay == 0) { 2260 + if (!have_locks) 2261 + return 1; 2262 + 2277 2263 /* now the link has been up for long time enough */ 2278 2264 slave->link = BOND_LINK_UP; 2279 2265 slave->jiffies = jiffies; ··· 2295 2275 printk(KERN_INFO DRV_NAME 2296 2276 ": %s: link status definitely " 2297 2277 "up for interface %s.\n", 2298 - bond_dev->name, 2278 + bond->dev->name, 2299 2279 slave_dev->name); 2300 2280 2301 2281 /* notify ad that the link status has changed */ ··· 2321 2301 /* Should not happen */ 2322 2302 printk(KERN_ERR DRV_NAME 2323 2303 ": %s: Error: %s Illegal value (link=%d)\n", 2324 - bond_dev->name, 2304 + bond->dev->name, 2325 2305 slave->dev->name, 2326 2306 slave->link); 2327 2307 goto out; ··· 2342 2322 } /* end of for */ 2343 2323 2344 2324 if (do_failover) { 2345 - write_lock(&bond->curr_slave_lock); 2325 + ASSERT_RTNL(); 2326 + 2327 + write_lock_bh(&bond->curr_slave_lock); 2346 2328 2347 2329 bond_select_active_slave(bond); 2348 2330 2349 - write_unlock(&bond->curr_slave_lock); 2331 + write_unlock_bh(&bond->curr_slave_lock); 2332 + 2350 2333 } else 2351 2334 bond_set_carrier(bond); 2352 2335 2353 - re_arm: 2354 - if (bond->params.miimon) { 2355 - mod_timer(&bond->mii_timer, jiffies + delta_in_ticks); 2356 - } 2357 2336 out: 2358 - read_unlock(&bond->lock); 2337 + return 0; 2359 2338 } 2360 2339 2340 + /* 2341 + * bond_mii_monitor 2342 + * 2343 + * Really a wrapper that splits the mii monitor into two phases: an 2344 + * inspection, then (if inspection indicates something needs to be 2345 + * done) an acquisition of appropriate locks followed by another pass 2346 + * to implement whatever link state changes are indicated. 2347 + */ 2348 + void bond_mii_monitor(struct work_struct *work) 2349 + { 2350 + struct bonding *bond = container_of(work, struct bonding, 2351 + mii_work.work); 2352 + unsigned long delay; 2353 + 2354 + read_lock(&bond->lock); 2355 + if (bond->kill_timers) { 2356 + read_unlock(&bond->lock); 2357 + return; 2358 + } 2359 + if (__bond_mii_monitor(bond, 0)) { 2360 + read_unlock(&bond->lock); 2361 + rtnl_lock(); 2362 + read_lock(&bond->lock); 2363 + __bond_mii_monitor(bond, 1); 2364 + rtnl_unlock(); 2365 + } 2366 + 2367 + delay = ((bond->params.miimon * HZ) / 1000) ? : 1; 2368 + read_unlock(&bond->lock); 2369 + queue_delayed_work(bond->wq, &bond->mii_work, delay); 2370 + } 2361 2371 2362 2372 static __be32 bond_glean_dev_ip(struct net_device *dev) 2363 2373 { ··· 2686 2636 * arp is transmitted to generate traffic. see activebackup_arp_monitor for 2687 2637 * arp monitoring in active backup mode. 2688 2638 */ 2689 - void bond_loadbalance_arp_mon(struct net_device *bond_dev) 2639 + void bond_loadbalance_arp_mon(struct work_struct *work) 2690 2640 { 2691 - struct bonding *bond = bond_dev->priv; 2641 + struct bonding *bond = container_of(work, struct bonding, 2642 + arp_work.work); 2692 2643 struct slave *slave, *oldcurrent; 2693 2644 int do_failover = 0; 2694 2645 int delta_in_ticks; ··· 2736 2685 printk(KERN_INFO DRV_NAME 2737 2686 ": %s: link status definitely " 2738 2687 "up for interface %s, ", 2739 - bond_dev->name, 2688 + bond->dev->name, 2740 2689 slave->dev->name); 2741 2690 do_failover = 1; 2742 2691 } else { 2743 2692 printk(KERN_INFO DRV_NAME 2744 2693 ": %s: interface %s is now up\n", 2745 - bond_dev->name, 2694 + bond->dev->name, 2746 2695 slave->dev->name); 2747 2696 } 2748 2697 } ··· 2766 2715 2767 2716 printk(KERN_INFO DRV_NAME 2768 2717 ": %s: interface %s is now down.\n", 2769 - bond_dev->name, 2718 + bond->dev->name, 2770 2719 slave->dev->name); 2771 2720 2772 2721 if (slave == oldcurrent) { ··· 2788 2737 } 2789 2738 2790 2739 if (do_failover) { 2791 - write_lock(&bond->curr_slave_lock); 2740 + rtnl_lock(); 2741 + write_lock_bh(&bond->curr_slave_lock); 2792 2742 2793 2743 bond_select_active_slave(bond); 2794 2744 2795 - write_unlock(&bond->curr_slave_lock); 2745 + write_unlock_bh(&bond->curr_slave_lock); 2746 + rtnl_unlock(); 2747 + 2796 2748 } 2797 2749 2798 2750 re_arm: 2799 - if (bond->params.arp_interval) { 2800 - mod_timer(&bond->arp_timer, jiffies + delta_in_ticks); 2801 - } 2751 + if (bond->params.arp_interval) 2752 + queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 2802 2753 out: 2803 2754 read_unlock(&bond->lock); 2804 2755 } ··· 2820 2767 * may have received. 2821 2768 * see loadbalance_arp_monitor for arp monitoring in load balancing mode 2822 2769 */ 2823 - void bond_activebackup_arp_mon(struct net_device *bond_dev) 2770 + void bond_activebackup_arp_mon(struct work_struct *work) 2824 2771 { 2825 - struct bonding *bond = bond_dev->priv; 2772 + struct bonding *bond = container_of(work, struct bonding, 2773 + arp_work.work); 2826 2774 struct slave *slave; 2827 2775 int delta_in_ticks; 2828 2776 int i; ··· 2852 2798 2853 2799 slave->link = BOND_LINK_UP; 2854 2800 2855 - write_lock(&bond->curr_slave_lock); 2801 + rtnl_lock(); 2802 + 2803 + write_lock_bh(&bond->curr_slave_lock); 2856 2804 2857 2805 if ((!bond->curr_active_slave) && 2858 2806 ((jiffies - slave->dev->trans_start) <= delta_in_ticks)) { ··· 2877 2821 printk(KERN_INFO DRV_NAME 2878 2822 ": %s: %s is up and now the " 2879 2823 "active interface\n", 2880 - bond_dev->name, 2824 + bond->dev->name, 2881 2825 slave->dev->name); 2882 2826 netif_carrier_on(bond->dev); 2883 2827 } else { 2884 2828 printk(KERN_INFO DRV_NAME 2885 2829 ": %s: backup interface %s is " 2886 2830 "now up\n", 2887 - bond_dev->name, 2831 + bond->dev->name, 2888 2832 slave->dev->name); 2889 2833 } 2890 2834 2891 - write_unlock(&bond->curr_slave_lock); 2835 + write_unlock_bh(&bond->curr_slave_lock); 2836 + rtnl_unlock(); 2892 2837 } 2893 2838 } else { 2894 2839 read_lock(&bond->curr_slave_lock); ··· 2921 2864 2922 2865 printk(KERN_INFO DRV_NAME 2923 2866 ": %s: backup interface %s is now down\n", 2924 - bond_dev->name, 2867 + bond->dev->name, 2925 2868 slave->dev->name); 2926 2869 } else { 2927 2870 read_unlock(&bond->curr_slave_lock); ··· 2956 2899 printk(KERN_INFO DRV_NAME 2957 2900 ": %s: link status down for active interface " 2958 2901 "%s, disabling it\n", 2959 - bond_dev->name, 2902 + bond->dev->name, 2960 2903 slave->dev->name); 2961 2904 2962 - write_lock(&bond->curr_slave_lock); 2905 + rtnl_lock(); 2906 + write_lock_bh(&bond->curr_slave_lock); 2963 2907 2964 2908 bond_select_active_slave(bond); 2965 2909 slave = bond->curr_active_slave; 2966 2910 2967 - write_unlock(&bond->curr_slave_lock); 2911 + write_unlock_bh(&bond->curr_slave_lock); 2912 + 2913 + rtnl_unlock(); 2968 2914 2969 2915 bond->current_arp_slave = slave; 2970 2916 ··· 2981 2921 printk(KERN_INFO DRV_NAME 2982 2922 ": %s: changing from interface %s to primary " 2983 2923 "interface %s\n", 2984 - bond_dev->name, 2924 + bond->dev->name, 2985 2925 slave->dev->name, 2986 2926 bond->primary_slave->dev->name); 2987 2927 2988 2928 /* primary is up so switch to it */ 2989 - write_lock(&bond->curr_slave_lock); 2929 + rtnl_lock(); 2930 + write_lock_bh(&bond->curr_slave_lock); 2990 2931 bond_change_active_slave(bond, bond->primary_slave); 2991 - write_unlock(&bond->curr_slave_lock); 2932 + write_unlock_bh(&bond->curr_slave_lock); 2933 + 2934 + rtnl_unlock(); 2992 2935 2993 2936 slave = bond->primary_slave; 2994 2937 slave->jiffies = jiffies; ··· 3048 2985 printk(KERN_INFO DRV_NAME 3049 2986 ": %s: backup interface %s is " 3050 2987 "now down.\n", 3051 - bond_dev->name, 2988 + bond->dev->name, 3052 2989 slave->dev->name); 3053 2990 } 3054 2991 } ··· 3057 2994 3058 2995 re_arm: 3059 2996 if (bond->params.arp_interval) { 3060 - mod_timer(&bond->arp_timer, jiffies + delta_in_ticks); 2997 + queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 3061 2998 } 3062 2999 out: 3063 3000 read_unlock(&bond->lock); ··· 3078 3015 3079 3016 /* make sure the bond won't be taken away */ 3080 3017 read_lock(&dev_base_lock); 3081 - read_lock_bh(&bond->lock); 3018 + read_lock(&bond->lock); 3082 3019 3083 3020 if (*pos == 0) { 3084 3021 return SEQ_START_TOKEN; ··· 3112 3049 { 3113 3050 struct bonding *bond = seq->private; 3114 3051 3115 - read_unlock_bh(&bond->lock); 3052 + read_unlock(&bond->lock); 3116 3053 read_unlock(&dev_base_lock); 3117 3054 } 3118 3055 ··· 3645 3582 static int bond_open(struct net_device *bond_dev) 3646 3583 { 3647 3584 struct bonding *bond = bond_dev->priv; 3648 - struct timer_list *mii_timer = &bond->mii_timer; 3649 - struct timer_list *arp_timer = &bond->arp_timer; 3650 3585 3651 3586 bond->kill_timers = 0; 3652 3587 3653 3588 if ((bond->params.mode == BOND_MODE_TLB) || 3654 3589 (bond->params.mode == BOND_MODE_ALB)) { 3655 - struct timer_list *alb_timer = &(BOND_ALB_INFO(bond).alb_timer); 3656 - 3657 3590 /* bond_alb_initialize must be called before the timer 3658 3591 * is started. 3659 3592 */ ··· 3658 3599 return -1; 3659 3600 } 3660 3601 3661 - init_timer(alb_timer); 3662 - alb_timer->expires = jiffies + 1; 3663 - alb_timer->data = (unsigned long)bond; 3664 - alb_timer->function = (void *)&bond_alb_monitor; 3665 - add_timer(alb_timer); 3602 + INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); 3603 + queue_delayed_work(bond->wq, &bond->alb_work, 0); 3666 3604 } 3667 3605 3668 3606 if (bond->params.miimon) { /* link check interval, in milliseconds. */ 3669 - init_timer(mii_timer); 3670 - mii_timer->expires = jiffies + 1; 3671 - mii_timer->data = (unsigned long)bond_dev; 3672 - mii_timer->function = (void *)&bond_mii_monitor; 3673 - add_timer(mii_timer); 3607 + INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); 3608 + queue_delayed_work(bond->wq, &bond->mii_work, 0); 3674 3609 } 3675 3610 3676 3611 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ 3677 - init_timer(arp_timer); 3678 - arp_timer->expires = jiffies + 1; 3679 - arp_timer->data = (unsigned long)bond_dev; 3680 - if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { 3681 - arp_timer->function = (void *)&bond_activebackup_arp_mon; 3682 - } else { 3683 - arp_timer->function = (void *)&bond_loadbalance_arp_mon; 3684 - } 3612 + if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) 3613 + INIT_DELAYED_WORK(&bond->arp_work, 3614 + bond_activebackup_arp_mon); 3615 + else 3616 + INIT_DELAYED_WORK(&bond->arp_work, 3617 + bond_loadbalance_arp_mon); 3618 + 3619 + queue_delayed_work(bond->wq, &bond->arp_work, 0); 3685 3620 if (bond->params.arp_validate) 3686 3621 bond_register_arp(bond); 3687 - 3688 - add_timer(arp_timer); 3689 3622 } 3690 3623 3691 3624 if (bond->params.mode == BOND_MODE_8023AD) { 3692 - struct timer_list *ad_timer = &(BOND_AD_INFO(bond).ad_timer); 3693 - init_timer(ad_timer); 3694 - ad_timer->expires = jiffies + 1; 3695 - ad_timer->data = (unsigned long)bond; 3696 - ad_timer->function = (void *)&bond_3ad_state_machine_handler; 3697 - add_timer(ad_timer); 3698 - 3625 + INIT_DELAYED_WORK(&bond->ad_work, bond_alb_monitor); 3626 + queue_delayed_work(bond->wq, &bond->ad_work, 0); 3699 3627 /* register to receive LACPDUs */ 3700 3628 bond_register_lacpdu(bond); 3701 3629 } ··· 3710 3664 3711 3665 write_unlock_bh(&bond->lock); 3712 3666 3713 - /* del_timer_sync must run without holding the bond->lock 3714 - * because a running timer might be trying to hold it too 3715 - */ 3716 - 3717 3667 if (bond->params.miimon) { /* link check interval, in milliseconds. */ 3718 - del_timer_sync(&bond->mii_timer); 3668 + cancel_delayed_work(&bond->mii_work); 3719 3669 } 3720 3670 3721 3671 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ 3722 - del_timer_sync(&bond->arp_timer); 3672 + cancel_delayed_work(&bond->arp_work); 3723 3673 } 3724 3674 3725 3675 switch (bond->params.mode) { 3726 3676 case BOND_MODE_8023AD: 3727 - del_timer_sync(&(BOND_AD_INFO(bond).ad_timer)); 3677 + cancel_delayed_work(&bond->ad_work); 3728 3678 break; 3729 3679 case BOND_MODE_TLB: 3730 3680 case BOND_MODE_ALB: 3731 - del_timer_sync(&(BOND_ALB_INFO(bond).alb_timer)); 3681 + cancel_delayed_work(&bond->alb_work); 3732 3682 break; 3733 3683 default: 3734 3684 break; ··· 3821 3779 if (mii->reg_num == 1) { 3822 3780 struct bonding *bond = bond_dev->priv; 3823 3781 mii->val_out = 0; 3824 - read_lock_bh(&bond->lock); 3782 + read_lock(&bond->lock); 3825 3783 read_lock(&bond->curr_slave_lock); 3826 3784 if (netif_carrier_ok(bond->dev)) { 3827 3785 mii->val_out = BMSR_LSTATUS; 3828 3786 } 3829 3787 read_unlock(&bond->curr_slave_lock); 3830 - read_unlock_bh(&bond->lock); 3788 + read_unlock(&bond->lock); 3831 3789 } 3832 3790 3833 3791 return 0; ··· 4119 4077 { 4120 4078 struct bonding *bond = bond_dev->priv; 4121 4079 struct slave *slave, *start_at; 4122 - int i; 4123 - int res = 1; 4080 + int i, slave_no, res = 1; 4124 4081 4125 4082 read_lock(&bond->lock); 4126 4083 ··· 4127 4086 goto out; 4128 4087 } 4129 4088 4130 - read_lock(&bond->curr_slave_lock); 4131 - slave = start_at = bond->curr_active_slave; 4132 - read_unlock(&bond->curr_slave_lock); 4089 + /* 4090 + * Concurrent TX may collide on rr_tx_counter; we accept that 4091 + * as being rare enough not to justify using an atomic op here 4092 + */ 4093 + slave_no = bond->rr_tx_counter++ % bond->slave_cnt; 4133 4094 4134 - if (!slave) { 4135 - goto out; 4095 + bond_for_each_slave(bond, slave, i) { 4096 + slave_no--; 4097 + if (slave_no < 0) { 4098 + break; 4099 + } 4136 4100 } 4137 4101 4102 + start_at = slave; 4138 4103 bond_for_each_slave_from(bond, slave, i, start_at) { 4139 4104 if (IS_UP(slave->dev) && 4140 4105 (slave->link == BOND_LINK_UP) && 4141 4106 (slave->state == BOND_STATE_ACTIVE)) { 4142 4107 res = bond_dev_queue_xmit(bond, skb, slave->dev); 4143 - 4144 - write_lock(&bond->curr_slave_lock); 4145 - bond->curr_active_slave = slave->next; 4146 - write_unlock(&bond->curr_slave_lock); 4147 - 4148 4108 break; 4149 4109 } 4150 4110 } 4151 - 4152 4111 4153 4112 out: 4154 4113 if (res) { ··· 4381 4340 4382 4341 bond->params = *params; /* copy params struct */ 4383 4342 4343 + bond->wq = create_singlethread_workqueue(bond_dev->name); 4344 + if (!bond->wq) 4345 + return -ENOMEM; 4346 + 4384 4347 /* Initialize pointers */ 4385 4348 bond->first_slave = NULL; 4386 4349 bond->curr_active_slave = NULL; ··· 4473 4428 bond_mc_list_destroy(bond); 4474 4429 /* Release the bonded slaves */ 4475 4430 bond_release_all(bond_dev); 4476 - bond_deinit(bond_dev); 4477 4431 unregister_netdevice(bond_dev); 4432 + bond_deinit(bond_dev); 4478 4433 } 4479 4434 4480 4435 #ifdef CONFIG_PROC_FS ··· 4871 4826 return res; 4872 4827 } 4873 4828 4829 + static void bond_work_cancel_all(struct bonding *bond) 4830 + { 4831 + write_lock_bh(&bond->lock); 4832 + bond->kill_timers = 1; 4833 + write_unlock_bh(&bond->lock); 4834 + 4835 + if (bond->params.miimon && delayed_work_pending(&bond->mii_work)) 4836 + cancel_delayed_work(&bond->mii_work); 4837 + 4838 + if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work)) 4839 + cancel_delayed_work(&bond->arp_work); 4840 + 4841 + if (bond->params.mode == BOND_MODE_ALB && 4842 + delayed_work_pending(&bond->alb_work)) 4843 + cancel_delayed_work(&bond->alb_work); 4844 + 4845 + if (bond->params.mode == BOND_MODE_8023AD && 4846 + delayed_work_pending(&bond->ad_work)) 4847 + cancel_delayed_work(&bond->ad_work); 4848 + } 4849 + 4874 4850 static int __init bonding_init(void) 4875 4851 { 4876 4852 int i; 4877 4853 int res; 4854 + struct bonding *bond, *nxt; 4878 4855 4879 4856 printk(KERN_INFO "%s", version); 4880 4857 ··· 4923 4856 4924 4857 goto out; 4925 4858 err: 4859 + list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) { 4860 + bond_work_cancel_all(bond); 4861 + destroy_workqueue(bond->wq); 4862 + } 4863 + 4926 4864 rtnl_lock(); 4927 4865 bond_free_all(); 4928 4866 bond_destroy_sysfs();
+31 -48
drivers/net/bonding/bond_sysfs.c
··· 229 229 int i, res = 0; 230 230 struct bonding *bond = to_bond(d); 231 231 232 - read_lock_bh(&bond->lock); 232 + read_lock(&bond->lock); 233 233 bond_for_each_slave(bond, slave, i) { 234 234 if (res > (PAGE_SIZE - IFNAMSIZ)) { 235 235 /* not enough space for another interface name */ ··· 240 240 } 241 241 res += sprintf(buf + res, "%s ", slave->dev->name); 242 242 } 243 - read_unlock_bh(&bond->lock); 243 + read_unlock(&bond->lock); 244 244 res += sprintf(buf + res, "\n"); 245 245 res++; 246 246 return res; ··· 282 282 283 283 /* Got a slave name in ifname. Is it already in the list? */ 284 284 found = 0; 285 - read_lock_bh(&bond->lock); 285 + read_lock(&bond->lock); 286 286 bond_for_each_slave(bond, slave, i) 287 287 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { 288 288 printk(KERN_ERR DRV_NAME 289 289 ": %s: Interface %s is already enslaved!\n", 290 290 bond->dev->name, ifname); 291 291 ret = -EPERM; 292 - read_unlock_bh(&bond->lock); 292 + read_unlock(&bond->lock); 293 293 goto out; 294 294 } 295 295 296 - read_unlock_bh(&bond->lock); 296 + read_unlock(&bond->lock); 297 297 printk(KERN_INFO DRV_NAME ": %s: Adding slave %s.\n", 298 298 bond->dev->name, ifname); 299 299 dev = dev_get_by_name(&init_net, ifname); ··· 662 662 "%s Disabling MII monitoring.\n", 663 663 bond->dev->name, bond->dev->name); 664 664 bond->params.miimon = 0; 665 - /* Kill MII timer, else it brings bond's link down */ 666 - if (bond->arp_timer.function) { 667 - printk(KERN_INFO DRV_NAME 668 - ": %s: Kill MII timer, else it brings bond's link down...\n", 669 - bond->dev->name); 670 - del_timer_sync(&bond->mii_timer); 665 + if (delayed_work_pending(&bond->mii_work)) { 666 + cancel_delayed_work(&bond->mii_work); 667 + flush_workqueue(bond->wq); 671 668 } 672 669 } 673 670 if (!bond->params.arp_targets[0]) { ··· 679 682 * timer will get fired off when the open function 680 683 * is called. 681 684 */ 682 - if (bond->arp_timer.function) { 683 - /* The timer's already set up, so fire it off */ 684 - mod_timer(&bond->arp_timer, jiffies + 1); 685 - } else { 686 - /* Set up the timer. */ 687 - init_timer(&bond->arp_timer); 688 - bond->arp_timer.expires = jiffies + 1; 689 - bond->arp_timer.data = 690 - (unsigned long) bond->dev; 691 - if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { 692 - bond->arp_timer.function = 693 - (void *) 694 - &bond_activebackup_arp_mon; 695 - } else { 696 - bond->arp_timer.function = 697 - (void *) 698 - &bond_loadbalance_arp_mon; 699 - } 700 - add_timer(&bond->arp_timer); 685 + if (!delayed_work_pending(&bond->arp_work)) { 686 + if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) 687 + INIT_DELAYED_WORK(&bond->arp_work, 688 + bond_activebackup_arp_mon); 689 + else 690 + INIT_DELAYED_WORK(&bond->arp_work, 691 + bond_loadbalance_arp_mon); 692 + 693 + queue_delayed_work(bond->wq, &bond->arp_work, 0); 701 694 } 702 695 } 703 696 ··· 1043 1056 bond->params.arp_validate = 1044 1057 BOND_ARP_VALIDATE_NONE; 1045 1058 } 1046 - /* Kill ARP timer, else it brings bond's link down */ 1047 - if (bond->mii_timer.function) { 1048 - printk(KERN_INFO DRV_NAME 1049 - ": %s: Kill ARP timer, else it brings bond's link down...\n", 1050 - bond->dev->name); 1051 - del_timer_sync(&bond->arp_timer); 1059 + if (delayed_work_pending(&bond->arp_work)) { 1060 + cancel_delayed_work(&bond->arp_work); 1061 + flush_workqueue(bond->wq); 1052 1062 } 1053 1063 } 1054 1064 ··· 1055 1071 * timer will get fired off when the open function 1056 1072 * is called. 1057 1073 */ 1058 - if (bond->mii_timer.function) { 1059 - /* The timer's already set up, so fire it off */ 1060 - mod_timer(&bond->mii_timer, jiffies + 1); 1061 - } else { 1062 - /* Set up the timer. */ 1063 - init_timer(&bond->mii_timer); 1064 - bond->mii_timer.expires = jiffies + 1; 1065 - bond->mii_timer.data = 1066 - (unsigned long) bond->dev; 1067 - bond->mii_timer.function = 1068 - (void *) &bond_mii_monitor; 1069 - add_timer(&bond->mii_timer); 1074 + if (!delayed_work_pending(&bond->mii_work)) { 1075 + INIT_DELAYED_WORK(&bond->mii_work, 1076 + bond_mii_monitor); 1077 + queue_delayed_work(bond->wq, 1078 + &bond->mii_work, 0); 1070 1079 } 1071 1080 } 1072 1081 } ··· 1133 1156 } 1134 1157 out: 1135 1158 write_unlock_bh(&bond->lock); 1159 + 1160 + rtnl_unlock(); 1161 + 1136 1162 return count; 1137 1163 } 1138 1164 static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary); ··· 1193 1213 struct bonding *bond = to_bond(d); 1194 1214 int count; 1195 1215 1216 + rtnl_lock(); 1196 1217 1197 1218 read_lock(&bond->curr_slave_lock); 1198 1219 curr = bond->curr_active_slave; ··· 1273 1292 } 1274 1293 out: 1275 1294 write_unlock_bh(&bond->lock); 1295 + rtnl_unlock(); 1296 + 1276 1297 return count; 1277 1298 1278 1299 }
+9 -5
drivers/net/bonding/bonding.h
··· 184 184 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ 185 185 rwlock_t lock; 186 186 rwlock_t curr_slave_lock; 187 - struct timer_list mii_timer; 188 - struct timer_list arp_timer; 189 187 s8 kill_timers; 190 188 s8 send_grat_arp; 191 189 s8 setup_by_slave; ··· 197 199 int (*xmit_hash_policy)(struct sk_buff *, struct net_device *, int); 198 200 __be32 master_ip; 199 201 u16 flags; 202 + u16 rr_tx_counter; 200 203 struct ad_bond_info ad_info; 201 204 struct alb_bond_info alb_info; 202 205 struct bond_params params; 203 206 struct list_head vlan_list; 204 207 struct vlan_group *vlgrp; 205 208 struct packet_type arp_mon_pt; 209 + struct workqueue_struct *wq; 210 + struct delayed_work mii_work; 211 + struct delayed_work arp_work; 212 + struct delayed_work alb_work; 213 + struct delayed_work ad_work; 206 214 }; 207 215 208 216 /** ··· 311 307 void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave); 312 308 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); 313 309 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); 314 - void bond_mii_monitor(struct net_device *bond_dev); 315 - void bond_loadbalance_arp_mon(struct net_device *bond_dev); 316 - void bond_activebackup_arp_mon(struct net_device *bond_dev); 310 + void bond_mii_monitor(struct work_struct *); 311 + void bond_loadbalance_arp_mon(struct work_struct *); 312 + void bond_activebackup_arp_mon(struct work_struct *); 317 313 void bond_set_mode_ops(struct bonding *bond, int mode); 318 314 int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl); 319 315 void bond_select_active_slave(struct bonding *bond);
+12 -19
drivers/net/cpmac.c
··· 460 460 struct cpmac_desc *desc; 461 461 struct cpmac_priv *priv = netdev_priv(dev); 462 462 463 - if (unlikely(skb_padto(skb, ETH_ZLEN))) { 464 - if (netif_msg_tx_err(priv) && net_ratelimit()) 465 - printk(KERN_WARNING 466 - "%s: tx: padding failed, dropping\n", dev->name); 467 - spin_lock(&priv->lock); 468 - dev->stats.tx_dropped++; 469 - spin_unlock(&priv->lock); 470 - return -ENOMEM; 471 - } 463 + if (unlikely(skb_padto(skb, ETH_ZLEN))) 464 + return NETDEV_TX_OK; 472 465 473 466 len = max(skb->len, ETH_ZLEN); 474 - queue = skb_get_queue_mapping(skb); 467 + queue = skb->queue_mapping; 475 468 #ifdef CONFIG_NETDEVICES_MULTIQUEUE 476 469 netif_stop_subqueue(dev, queue); 477 470 #else ··· 474 481 desc = &priv->desc_ring[queue]; 475 482 if (unlikely(desc->dataflags & CPMAC_OWN)) { 476 483 if (netif_msg_tx_err(priv) && net_ratelimit()) 477 - printk(KERN_WARNING "%s: tx dma ring full, dropping\n", 484 + printk(KERN_WARNING "%s: tx dma ring full\n", 478 485 dev->name); 479 - spin_lock(&priv->lock); 480 - dev->stats.tx_dropped++; 481 - spin_unlock(&priv->lock); 482 - dev_kfree_skb_any(skb); 483 - return -ENOMEM; 486 + return NETDEV_TX_BUSY; 484 487 } 485 488 486 489 spin_lock(&priv->lock); ··· 498 509 cpmac_dump_skb(dev, skb); 499 510 cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); 500 511 501 - return 0; 512 + return NETDEV_TX_OK; 502 513 } 503 514 504 515 static void cpmac_end_xmit(struct net_device *dev, int queue) ··· 635 646 int i; 636 647 if (unlikely(!priv->desc_ring)) 637 648 return; 638 - for (i = 0; i < CPMAC_QUEUES; i++) 649 + for (i = 0; i < CPMAC_QUEUES; i++) { 650 + priv->desc_ring[i].dataflags = 0; 639 651 if (priv->desc_ring[i].skb) { 640 652 dev_kfree_skb_any(priv->desc_ring[i].skb); 641 653 if (netif_subqueue_stopped(dev, i)) 642 654 netif_wake_subqueue(dev, i); 643 655 } 656 + } 644 657 } 645 658 646 659 static void cpmac_hw_error(struct work_struct *work) ··· 718 727 #ifdef CONFIG_NETDEVICES_MULTIQUEUE 719 728 for (i = 0; i < CPMAC_QUEUES; i++) 720 729 if (priv->desc_ring[i].skb) { 730 + priv->desc_ring[i].dataflags = 0; 721 731 dev_kfree_skb_any(priv->desc_ring[i].skb); 722 732 netif_wake_subqueue(dev, i); 723 733 break; 724 734 } 725 735 #else 736 + priv->desc_ring[0].dataflags = 0; 726 737 if (priv->desc_ring[0].skb) 727 738 dev_kfree_skb_any(priv->desc_ring[0].skb); 728 739 netif_wake_queue(dev); ··· 787 794 { 788 795 struct cpmac_priv *priv = netdev_priv(dev); 789 796 790 - if (dev->flags && IFF_UP) 797 + if (netif_running(dev)) 791 798 return -EBUSY; 792 799 priv->ring_size = ring->rx_pending; 793 800 return 0;
+1 -1
drivers/net/defxx.c
··· 805 805 * Interrupts are disabled at the adapter bus-specific logic. 806 806 */ 807 807 808 - static void __devinit dfx_bus_uninit(struct net_device *dev) 808 + static void __devexit dfx_bus_uninit(struct net_device *dev) 809 809 { 810 810 DFX_board_t *bp = netdev_priv(dev); 811 811 struct device *bdev = bp->bus_dev;
+4 -40
drivers/net/mipsnet.c
··· 30 30 int len) 31 31 { 32 32 uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount)); 33 + 33 34 if (available_len < len) 34 35 return -EFAULT; 35 36 ··· 46 45 int count_to_go = skb->len; 47 46 char *buf_ptr = skb->data; 48 47 49 - pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n", 50 - dev->name, __FUNCTION__, skb->len); 51 - 52 48 outl(skb->len, mipsnet_reg_address(dev, txDataCount)); 53 - 54 - pr_debug("%s: %s(): sending data to MIPSNET txDataBuffer(%d)\n", 55 - dev->name, __FUNCTION__, skb->len); 56 49 57 50 for (; count_to_go; buf_ptr++, count_to_go--) 58 51 outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer)); ··· 59 64 60 65 static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev) 61 66 { 62 - pr_debug("%s:%s(): transmitting %d bytes\n", 63 - dev->name, __FUNCTION__, skb->len); 64 - 65 - /* Only one packet at a time. Once TXDONE interrupt is serviced, the 67 + /* 68 + * Only one packet at a time. Once TXDONE interrupt is serviced, the 66 69 * queue will be restarted. 67 70 */ 68 71 netif_stop_queue(dev); ··· 87 94 skb->protocol = eth_type_trans(skb, dev); 88 95 skb->ip_summed = CHECKSUM_UNNECESSARY; 89 96 90 - pr_debug("%s:%s(): pushing RXed data to kernel\n", 91 - dev->name, __FUNCTION__); 92 97 netif_rx(skb); 93 98 94 99 dev->stats.rx_packets++; ··· 103 112 uint64_t interruptFlags; 104 113 105 114 if (irq == dev->irq) { 106 - pr_debug("%s:%s(): irq %d for device\n", 107 - dev->name, __FUNCTION__, irq); 108 - 109 115 retval = IRQ_HANDLED; 110 116 111 117 interruptFlags = 112 118 inl(mipsnet_reg_address(dev, interruptControl)); 113 - pr_debug("%s:%s(): intCtl=0x%016llx\n", dev->name, 114 - __FUNCTION__, interruptFlags); 115 119 116 120 if (interruptFlags & MIPSNET_INTCTL_TXDONE) { 117 - pr_debug("%s:%s(): got TXDone\n", 118 - dev->name, __FUNCTION__); 119 121 outl(MIPSNET_INTCTL_TXDONE, 120 122 mipsnet_reg_address(dev, interruptControl)); 121 123 /* only one packet at a time, we are done. */ 122 124 netif_wake_queue(dev); 123 125 } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) { 124 - pr_debug("%s:%s(): got RX data\n", 125 - dev->name, __FUNCTION__); 126 126 mipsnet_get_fromdev(dev, 127 127 inl(mipsnet_reg_address(dev, rxDataCount))); 128 - pr_debug("%s:%s(): clearing RX int\n", 129 - dev->name, __FUNCTION__); 130 128 outl(MIPSNET_INTCTL_RXDONE, 131 129 mipsnet_reg_address(dev, interruptControl)); 132 130 133 131 } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) { 134 - pr_debug("%s:%s(): got test interrupt\n", 135 - dev->name, __FUNCTION__); 136 132 /* 137 133 * TESTBIT is cleared on read. 138 134 * And takes effect after a write with 0 139 135 */ 140 136 outl(0, mipsnet_reg_address(dev, interruptControl)); 141 137 } else { 142 - pr_debug("%s:%s(): no valid fags 0x%016llx\n", 143 - dev->name, __FUNCTION__, interruptFlags); 144 138 /* Maybe shared IRQ, just ignore, no clearing. */ 145 139 retval = IRQ_NONE; 146 140 } ··· 141 165 static int mipsnet_open(struct net_device *dev) 142 166 { 143 167 int err; 144 - pr_debug("%s: mipsnet_open\n", dev->name); 145 168 146 169 err = request_irq(dev->irq, &mipsnet_interrupt, 147 170 IRQF_SHARED, dev->name, (void *) dev); 148 171 149 172 if (err) { 150 - pr_debug("%s: %s(): can't get irq %d\n", 151 - dev->name, __FUNCTION__, dev->irq); 152 173 release_region(dev->base_addr, MIPSNET_IO_EXTENT); 153 174 return err; 154 175 } 155 - 156 - pr_debug("%s: %s(): got IO region at 0x%04lx and irq %d for dev.\n", 157 - dev->name, __FUNCTION__, dev->base_addr, dev->irq); 158 - 159 176 160 177 netif_start_queue(dev); 161 178 ··· 162 193 163 194 static int mipsnet_close(struct net_device *dev) 164 195 { 165 - pr_debug("%s: %s()\n", dev->name, __FUNCTION__); 166 196 netif_stop_queue(dev); 197 + 167 198 return 0; 168 199 } 169 200 ··· 198 229 199 230 /* Get the io region now, get irq on open() */ 200 231 if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) { 201 - pr_debug("%s: %s(): IO region {start: 0x%04lux, len: %d} " 202 - "for dev is not availble.\n", netdev->name, 203 - __FUNCTION__, netdev->base_addr, MIPSNET_IO_EXTENT); 204 232 err = -EBUSY; 205 233 goto out_free_netdev; 206 234 } ··· 261 295 262 296 static void __exit mipsnet_exit_module(void) 263 297 { 264 - pr_debug("MIPSNet Ethernet driver exiting\n"); 265 - 266 298 driver_unregister(&mipsnet_driver); 267 299 } 268 300
+670 -137
drivers/net/mv643xx_eth.c
··· 1 1 /* 2 - * drivers/net/mv643xx_eth.c - Driver for MV643XX ethernet ports 2 + * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports 3 3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> 4 4 * 5 5 * Based on the 64360 driver from: ··· 43 43 #include <linux/ethtool.h> 44 44 #include <linux/platform_device.h> 45 45 46 + #include <linux/module.h> 47 + #include <linux/kernel.h> 48 + #include <linux/spinlock.h> 49 + #include <linux/workqueue.h> 50 + #include <linux/mii.h> 51 + 52 + #include <linux/mv643xx_eth.h> 53 + 46 54 #include <asm/io.h> 47 55 #include <asm/types.h> 48 56 #include <asm/pgtable.h> 49 57 #include <asm/system.h> 50 58 #include <asm/delay.h> 51 - #include "mv643xx_eth.h" 59 + #include <asm/dma-mapping.h> 60 + 61 + #define MV643XX_CHECKSUM_OFFLOAD_TX 62 + #define MV643XX_NAPI 63 + #define MV643XX_TX_FAST_REFILL 64 + #undef MV643XX_COAL 65 + 66 + /* 67 + * Number of RX / TX descriptors on RX / TX rings. 68 + * Note that allocating RX descriptors is done by allocating the RX 69 + * ring AND a preallocated RX buffers (skb's) for each descriptor. 70 + * The TX descriptors only allocates the TX descriptors ring, 71 + * with no pre allocated TX buffers (skb's are allocated by higher layers. 72 + */ 73 + 74 + /* Default TX ring size is 1000 descriptors */ 75 + #define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000 76 + 77 + /* Default RX ring size is 400 descriptors */ 78 + #define MV643XX_DEFAULT_RX_QUEUE_SIZE 400 79 + 80 + #define MV643XX_TX_COAL 100 81 + #ifdef MV643XX_COAL 82 + #define MV643XX_RX_COAL 100 83 + #endif 84 + 85 + #ifdef MV643XX_CHECKSUM_OFFLOAD_TX 86 + #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1) 87 + #else 88 + #define MAX_DESCS_PER_SKB 1 89 + #endif 90 + 91 + #define ETH_VLAN_HLEN 4 92 + #define ETH_FCS_LEN 4 93 + #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ 94 + #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ 95 + ETH_VLAN_HLEN + ETH_FCS_LEN) 96 + #define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + \ 97 + dma_get_cache_alignment()) 98 + 99 + /* 100 + * Registers shared between all ports. 101 + */ 102 + #define PHY_ADDR_REG 0x0000 103 + #define SMI_REG 0x0004 104 + 105 + /* 106 + * Per-port registers. 107 + */ 108 + #define PORT_CONFIG_REG(p) (0x0400 + ((p) << 10)) 109 + #define PORT_CONFIG_EXTEND_REG(p) (0x0404 + ((p) << 10)) 110 + #define MAC_ADDR_LOW(p) (0x0414 + ((p) << 10)) 111 + #define MAC_ADDR_HIGH(p) (0x0418 + ((p) << 10)) 112 + #define SDMA_CONFIG_REG(p) (0x041c + ((p) << 10)) 113 + #define PORT_SERIAL_CONTROL_REG(p) (0x043c + ((p) << 10)) 114 + #define PORT_STATUS_REG(p) (0x0444 + ((p) << 10)) 115 + #define TRANSMIT_QUEUE_COMMAND_REG(p) (0x0448 + ((p) << 10)) 116 + #define MAXIMUM_TRANSMIT_UNIT(p) (0x0458 + ((p) << 10)) 117 + #define INTERRUPT_CAUSE_REG(p) (0x0460 + ((p) << 10)) 118 + #define INTERRUPT_CAUSE_EXTEND_REG(p) (0x0464 + ((p) << 10)) 119 + #define INTERRUPT_MASK_REG(p) (0x0468 + ((p) << 10)) 120 + #define INTERRUPT_EXTEND_MASK_REG(p) (0x046c + ((p) << 10)) 121 + #define TX_FIFO_URGENT_THRESHOLD_REG(p) (0x0474 + ((p) << 10)) 122 + #define RX_CURRENT_QUEUE_DESC_PTR_0(p) (0x060c + ((p) << 10)) 123 + #define RECEIVE_QUEUE_COMMAND_REG(p) (0x0680 + ((p) << 10)) 124 + #define TX_CURRENT_QUEUE_DESC_PTR_0(p) (0x06c0 + ((p) << 10)) 125 + #define MIB_COUNTERS_BASE(p) (0x1000 + ((p) << 7)) 126 + #define DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(p) (0x1400 + ((p) << 10)) 127 + #define DA_FILTER_OTHER_MULTICAST_TABLE_BASE(p) (0x1500 + ((p) << 10)) 128 + #define DA_FILTER_UNICAST_TABLE_BASE(p) (0x1600 + ((p) << 10)) 129 + 130 + /* These macros describe Ethernet Port configuration reg (Px_cR) bits */ 131 + #define UNICAST_NORMAL_MODE (0 << 0) 132 + #define UNICAST_PROMISCUOUS_MODE (1 << 0) 133 + #define DEFAULT_RX_QUEUE(queue) ((queue) << 1) 134 + #define DEFAULT_RX_ARP_QUEUE(queue) ((queue) << 4) 135 + #define RECEIVE_BC_IF_NOT_IP_OR_ARP (0 << 7) 136 + #define REJECT_BC_IF_NOT_IP_OR_ARP (1 << 7) 137 + #define RECEIVE_BC_IF_IP (0 << 8) 138 + #define REJECT_BC_IF_IP (1 << 8) 139 + #define RECEIVE_BC_IF_ARP (0 << 9) 140 + #define REJECT_BC_IF_ARP (1 << 9) 141 + #define TX_AM_NO_UPDATE_ERROR_SUMMARY (1 << 12) 142 + #define CAPTURE_TCP_FRAMES_DIS (0 << 14) 143 + #define CAPTURE_TCP_FRAMES_EN (1 << 14) 144 + #define CAPTURE_UDP_FRAMES_DIS (0 << 15) 145 + #define CAPTURE_UDP_FRAMES_EN (1 << 15) 146 + #define DEFAULT_RX_TCP_QUEUE(queue) ((queue) << 16) 147 + #define DEFAULT_RX_UDP_QUEUE(queue) ((queue) << 19) 148 + #define DEFAULT_RX_BPDU_QUEUE(queue) ((queue) << 22) 149 + 150 + #define PORT_CONFIG_DEFAULT_VALUE \ 151 + UNICAST_NORMAL_MODE | \ 152 + DEFAULT_RX_QUEUE(0) | \ 153 + DEFAULT_RX_ARP_QUEUE(0) | \ 154 + RECEIVE_BC_IF_NOT_IP_OR_ARP | \ 155 + RECEIVE_BC_IF_IP | \ 156 + RECEIVE_BC_IF_ARP | \ 157 + CAPTURE_TCP_FRAMES_DIS | \ 158 + CAPTURE_UDP_FRAMES_DIS | \ 159 + DEFAULT_RX_TCP_QUEUE(0) | \ 160 + DEFAULT_RX_UDP_QUEUE(0) | \ 161 + DEFAULT_RX_BPDU_QUEUE(0) 162 + 163 + /* These macros describe Ethernet Port configuration extend reg (Px_cXR) bits*/ 164 + #define CLASSIFY_EN (1 << 0) 165 + #define SPAN_BPDU_PACKETS_AS_NORMAL (0 << 1) 166 + #define SPAN_BPDU_PACKETS_TO_RX_QUEUE_7 (1 << 1) 167 + #define PARTITION_DISABLE (0 << 2) 168 + #define PARTITION_ENABLE (1 << 2) 169 + 170 + #define PORT_CONFIG_EXTEND_DEFAULT_VALUE \ 171 + SPAN_BPDU_PACKETS_AS_NORMAL | \ 172 + PARTITION_DISABLE 173 + 174 + /* These macros describe Ethernet Port Sdma configuration reg (SDCR) bits */ 175 + #define RIFB (1 << 0) 176 + #define RX_BURST_SIZE_1_64BIT (0 << 1) 177 + #define RX_BURST_SIZE_2_64BIT (1 << 1) 178 + #define RX_BURST_SIZE_4_64BIT (2 << 1) 179 + #define RX_BURST_SIZE_8_64BIT (3 << 1) 180 + #define RX_BURST_SIZE_16_64BIT (4 << 1) 181 + #define BLM_RX_NO_SWAP (1 << 4) 182 + #define BLM_RX_BYTE_SWAP (0 << 4) 183 + #define BLM_TX_NO_SWAP (1 << 5) 184 + #define BLM_TX_BYTE_SWAP (0 << 5) 185 + #define DESCRIPTORS_BYTE_SWAP (1 << 6) 186 + #define DESCRIPTORS_NO_SWAP (0 << 6) 187 + #define IPG_INT_RX(value) (((value) & 0x3fff) << 8) 188 + #define TX_BURST_SIZE_1_64BIT (0 << 22) 189 + #define TX_BURST_SIZE_2_64BIT (1 << 22) 190 + #define TX_BURST_SIZE_4_64BIT (2 << 22) 191 + #define TX_BURST_SIZE_8_64BIT (3 << 22) 192 + #define TX_BURST_SIZE_16_64BIT (4 << 22) 193 + 194 + #if defined(__BIG_ENDIAN) 195 + #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 196 + RX_BURST_SIZE_4_64BIT | \ 197 + IPG_INT_RX(0) | \ 198 + TX_BURST_SIZE_4_64BIT 199 + #elif defined(__LITTLE_ENDIAN) 200 + #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 201 + RX_BURST_SIZE_4_64BIT | \ 202 + BLM_RX_NO_SWAP | \ 203 + BLM_TX_NO_SWAP | \ 204 + IPG_INT_RX(0) | \ 205 + TX_BURST_SIZE_4_64BIT 206 + #else 207 + #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 208 + #endif 209 + 210 + /* These macros describe Ethernet Port serial control reg (PSCR) bits */ 211 + #define SERIAL_PORT_DISABLE (0 << 0) 212 + #define SERIAL_PORT_ENABLE (1 << 0) 213 + #define DO_NOT_FORCE_LINK_PASS (0 << 1) 214 + #define FORCE_LINK_PASS (1 << 1) 215 + #define ENABLE_AUTO_NEG_FOR_DUPLX (0 << 2) 216 + #define DISABLE_AUTO_NEG_FOR_DUPLX (1 << 2) 217 + #define ENABLE_AUTO_NEG_FOR_FLOW_CTRL (0 << 3) 218 + #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3) 219 + #define ADV_NO_FLOW_CTRL (0 << 4) 220 + #define ADV_SYMMETRIC_FLOW_CTRL (1 << 4) 221 + #define FORCE_FC_MODE_NO_PAUSE_DIS_TX (0 << 5) 222 + #define FORCE_FC_MODE_TX_PAUSE_DIS (1 << 5) 223 + #define FORCE_BP_MODE_NO_JAM (0 << 7) 224 + #define FORCE_BP_MODE_JAM_TX (1 << 7) 225 + #define FORCE_BP_MODE_JAM_TX_ON_RX_ERR (2 << 7) 226 + #define SERIAL_PORT_CONTROL_RESERVED (1 << 9) 227 + #define FORCE_LINK_FAIL (0 << 10) 228 + #define DO_NOT_FORCE_LINK_FAIL (1 << 10) 229 + #define RETRANSMIT_16_ATTEMPTS (0 << 11) 230 + #define RETRANSMIT_FOREVER (1 << 11) 231 + #define ENABLE_AUTO_NEG_SPEED_GMII (0 << 13) 232 + #define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13) 233 + #define DTE_ADV_0 (0 << 14) 234 + #define DTE_ADV_1 (1 << 14) 235 + #define DISABLE_AUTO_NEG_BYPASS (0 << 15) 236 + #define ENABLE_AUTO_NEG_BYPASS (1 << 15) 237 + #define AUTO_NEG_NO_CHANGE (0 << 16) 238 + #define RESTART_AUTO_NEG (1 << 16) 239 + #define MAX_RX_PACKET_1518BYTE (0 << 17) 240 + #define MAX_RX_PACKET_1522BYTE (1 << 17) 241 + #define MAX_RX_PACKET_1552BYTE (2 << 17) 242 + #define MAX_RX_PACKET_9022BYTE (3 << 17) 243 + #define MAX_RX_PACKET_9192BYTE (4 << 17) 244 + #define MAX_RX_PACKET_9700BYTE (5 << 17) 245 + #define MAX_RX_PACKET_MASK (7 << 17) 246 + #define CLR_EXT_LOOPBACK (0 << 20) 247 + #define SET_EXT_LOOPBACK (1 << 20) 248 + #define SET_HALF_DUPLEX_MODE (0 << 21) 249 + #define SET_FULL_DUPLEX_MODE (1 << 21) 250 + #define DISABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (0 << 22) 251 + #define ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (1 << 22) 252 + #define SET_GMII_SPEED_TO_10_100 (0 << 23) 253 + #define SET_GMII_SPEED_TO_1000 (1 << 23) 254 + #define SET_MII_SPEED_TO_10 (0 << 24) 255 + #define SET_MII_SPEED_TO_100 (1 << 24) 256 + 257 + #define PORT_SERIAL_CONTROL_DEFAULT_VALUE \ 258 + DO_NOT_FORCE_LINK_PASS | \ 259 + ENABLE_AUTO_NEG_FOR_DUPLX | \ 260 + DISABLE_AUTO_NEG_FOR_FLOW_CTRL | \ 261 + ADV_SYMMETRIC_FLOW_CTRL | \ 262 + FORCE_FC_MODE_NO_PAUSE_DIS_TX | \ 263 + FORCE_BP_MODE_NO_JAM | \ 264 + (1 << 9) /* reserved */ | \ 265 + DO_NOT_FORCE_LINK_FAIL | \ 266 + RETRANSMIT_16_ATTEMPTS | \ 267 + ENABLE_AUTO_NEG_SPEED_GMII | \ 268 + DTE_ADV_0 | \ 269 + DISABLE_AUTO_NEG_BYPASS | \ 270 + AUTO_NEG_NO_CHANGE | \ 271 + MAX_RX_PACKET_9700BYTE | \ 272 + CLR_EXT_LOOPBACK | \ 273 + SET_FULL_DUPLEX_MODE | \ 274 + ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX 275 + 276 + /* These macros describe Ethernet Serial Status reg (PSR) bits */ 277 + #define PORT_STATUS_MODE_10_BIT (1 << 0) 278 + #define PORT_STATUS_LINK_UP (1 << 1) 279 + #define PORT_STATUS_FULL_DUPLEX (1 << 2) 280 + #define PORT_STATUS_FLOW_CONTROL (1 << 3) 281 + #define PORT_STATUS_GMII_1000 (1 << 4) 282 + #define PORT_STATUS_MII_100 (1 << 5) 283 + /* PSR bit 6 is undocumented */ 284 + #define PORT_STATUS_TX_IN_PROGRESS (1 << 7) 285 + #define PORT_STATUS_AUTONEG_BYPASSED (1 << 8) 286 + #define PORT_STATUS_PARTITION (1 << 9) 287 + #define PORT_STATUS_TX_FIFO_EMPTY (1 << 10) 288 + /* PSR bits 11-31 are reserved */ 289 + 290 + #define PORT_DEFAULT_TRANSMIT_QUEUE_SIZE 800 291 + #define PORT_DEFAULT_RECEIVE_QUEUE_SIZE 400 292 + 293 + #define DESC_SIZE 64 294 + 295 + #define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */ 296 + #define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */ 297 + 298 + #define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2) 299 + #define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9) 300 + #define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR) 301 + #define ETH_INT_CAUSE_EXT 0x00000002 302 + #define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT) 303 + 304 + #define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0) 305 + #define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8) 306 + #define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR) 307 + #define ETH_INT_CAUSE_PHY 0x00010000 308 + #define ETH_INT_CAUSE_STATE 0x00100000 309 + #define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \ 310 + ETH_INT_CAUSE_STATE) 311 + 312 + #define ETH_INT_MASK_ALL 0x00000000 313 + #define ETH_INT_MASK_ALL_EXT 0x00000000 314 + 315 + #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */ 316 + #define PHY_WAIT_MICRO_SECONDS 10 317 + 318 + /* Buffer offset from buffer pointer */ 319 + #define RX_BUF_OFFSET 0x2 320 + 321 + /* Gigabit Ethernet Unit Global Registers */ 322 + 323 + /* MIB Counters register definitions */ 324 + #define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0 325 + #define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4 326 + #define ETH_MIB_BAD_OCTETS_RECEIVED 0x8 327 + #define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc 328 + #define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10 329 + #define ETH_MIB_BAD_FRAMES_RECEIVED 0x14 330 + #define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18 331 + #define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c 332 + #define ETH_MIB_FRAMES_64_OCTETS 0x20 333 + #define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24 334 + #define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28 335 + #define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c 336 + #define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30 337 + #define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 338 + #define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38 339 + #define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c 340 + #define ETH_MIB_GOOD_FRAMES_SENT 0x40 341 + #define ETH_MIB_EXCESSIVE_COLLISION 0x44 342 + #define ETH_MIB_MULTICAST_FRAMES_SENT 0x48 343 + #define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c 344 + #define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50 345 + #define ETH_MIB_FC_SENT 0x54 346 + #define ETH_MIB_GOOD_FC_RECEIVED 0x58 347 + #define ETH_MIB_BAD_FC_RECEIVED 0x5c 348 + #define ETH_MIB_UNDERSIZE_RECEIVED 0x60 349 + #define ETH_MIB_FRAGMENTS_RECEIVED 0x64 350 + #define ETH_MIB_OVERSIZE_RECEIVED 0x68 351 + #define ETH_MIB_JABBER_RECEIVED 0x6c 352 + #define ETH_MIB_MAC_RECEIVE_ERROR 0x70 353 + #define ETH_MIB_BAD_CRC_EVENT 0x74 354 + #define ETH_MIB_COLLISION 0x78 355 + #define ETH_MIB_LATE_COLLISION 0x7c 356 + 357 + /* Port serial status reg (PSR) */ 358 + #define ETH_INTERFACE_PCM 0x00000001 359 + #define ETH_LINK_IS_UP 0x00000002 360 + #define ETH_PORT_AT_FULL_DUPLEX 0x00000004 361 + #define ETH_RX_FLOW_CTRL_ENABLED 0x00000008 362 + #define ETH_GMII_SPEED_1000 0x00000010 363 + #define ETH_MII_SPEED_100 0x00000020 364 + #define ETH_TX_IN_PROGRESS 0x00000080 365 + #define ETH_BYPASS_ACTIVE 0x00000100 366 + #define ETH_PORT_AT_PARTITION_STATE 0x00000200 367 + #define ETH_PORT_TX_FIFO_EMPTY 0x00000400 368 + 369 + /* SMI reg */ 370 + #define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */ 371 + #define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */ 372 + #define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */ 373 + #define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */ 374 + 375 + /* Interrupt Cause Register Bit Definitions */ 376 + 377 + /* SDMA command status fields macros */ 378 + 379 + /* Tx & Rx descriptors status */ 380 + #define ETH_ERROR_SUMMARY 0x00000001 381 + 382 + /* Tx & Rx descriptors command */ 383 + #define ETH_BUFFER_OWNED_BY_DMA 0x80000000 384 + 385 + /* Tx descriptors status */ 386 + #define ETH_LC_ERROR 0 387 + #define ETH_UR_ERROR 0x00000002 388 + #define ETH_RL_ERROR 0x00000004 389 + #define ETH_LLC_SNAP_FORMAT 0x00000200 390 + 391 + /* Rx descriptors status */ 392 + #define ETH_OVERRUN_ERROR 0x00000002 393 + #define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004 394 + #define ETH_RESOURCE_ERROR 0x00000006 395 + #define ETH_VLAN_TAGGED 0x00080000 396 + #define ETH_BPDU_FRAME 0x00100000 397 + #define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000 398 + #define ETH_OTHER_FRAME_TYPE 0x00400000 399 + #define ETH_LAYER_2_IS_ETH_V_2 0x00800000 400 + #define ETH_FRAME_TYPE_IP_V_4 0x01000000 401 + #define ETH_FRAME_HEADER_OK 0x02000000 402 + #define ETH_RX_LAST_DESC 0x04000000 403 + #define ETH_RX_FIRST_DESC 0x08000000 404 + #define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000 405 + #define ETH_RX_ENABLE_INTERRUPT 0x20000000 406 + #define ETH_LAYER_4_CHECKSUM_OK 0x40000000 407 + 408 + /* Rx descriptors byte count */ 409 + #define ETH_FRAME_FRAGMENTED 0x00000004 410 + 411 + /* Tx descriptors command */ 412 + #define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400 413 + #define ETH_FRAME_SET_TO_VLAN 0x00008000 414 + #define ETH_UDP_FRAME 0x00010000 415 + #define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000 416 + #define ETH_GEN_IP_V_4_CHECKSUM 0x00040000 417 + #define ETH_ZERO_PADDING 0x00080000 418 + #define ETH_TX_LAST_DESC 0x00100000 419 + #define ETH_TX_FIRST_DESC 0x00200000 420 + #define ETH_GEN_CRC 0x00400000 421 + #define ETH_TX_ENABLE_INTERRUPT 0x00800000 422 + #define ETH_AUTO_MODE 0x40000000 423 + 424 + #define ETH_TX_IHL_SHIFT 11 425 + 426 + /* typedefs */ 427 + 428 + typedef enum _eth_func_ret_status { 429 + ETH_OK, /* Returned as expected. */ 430 + ETH_ERROR, /* Fundamental error. */ 431 + ETH_RETRY, /* Could not process request. Try later.*/ 432 + ETH_END_OF_JOB, /* Ring has nothing to process. */ 433 + ETH_QUEUE_FULL, /* Ring resource error. */ 434 + ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */ 435 + } ETH_FUNC_RET_STATUS; 436 + 437 + typedef enum _eth_target { 438 + ETH_TARGET_DRAM, 439 + ETH_TARGET_DEVICE, 440 + ETH_TARGET_CBS, 441 + ETH_TARGET_PCI0, 442 + ETH_TARGET_PCI1 443 + } ETH_TARGET; 444 + 445 + /* These are for big-endian machines. Little endian needs different 446 + * definitions. 447 + */ 448 + #if defined(__BIG_ENDIAN) 449 + struct eth_rx_desc { 450 + u16 byte_cnt; /* Descriptor buffer byte count */ 451 + u16 buf_size; /* Buffer size */ 452 + u32 cmd_sts; /* Descriptor command status */ 453 + u32 next_desc_ptr; /* Next descriptor pointer */ 454 + u32 buf_ptr; /* Descriptor buffer pointer */ 455 + }; 456 + 457 + struct eth_tx_desc { 458 + u16 byte_cnt; /* buffer byte count */ 459 + u16 l4i_chk; /* CPU provided TCP checksum */ 460 + u32 cmd_sts; /* Command/status field */ 461 + u32 next_desc_ptr; /* Pointer to next descriptor */ 462 + u32 buf_ptr; /* pointer to buffer for this descriptor*/ 463 + }; 464 + #elif defined(__LITTLE_ENDIAN) 465 + struct eth_rx_desc { 466 + u32 cmd_sts; /* Descriptor command status */ 467 + u16 buf_size; /* Buffer size */ 468 + u16 byte_cnt; /* Descriptor buffer byte count */ 469 + u32 buf_ptr; /* Descriptor buffer pointer */ 470 + u32 next_desc_ptr; /* Next descriptor pointer */ 471 + }; 472 + 473 + struct eth_tx_desc { 474 + u32 cmd_sts; /* Command/status field */ 475 + u16 l4i_chk; /* CPU provided TCP checksum */ 476 + u16 byte_cnt; /* buffer byte count */ 477 + u32 buf_ptr; /* pointer to buffer for this descriptor*/ 478 + u32 next_desc_ptr; /* Pointer to next descriptor */ 479 + }; 480 + #else 481 + #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 482 + #endif 483 + 484 + /* Unified struct for Rx and Tx operations. The user is not required to */ 485 + /* be familier with neither Tx nor Rx descriptors. */ 486 + struct pkt_info { 487 + unsigned short byte_cnt; /* Descriptor buffer byte count */ 488 + unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */ 489 + unsigned int cmd_sts; /* Descriptor command status */ 490 + dma_addr_t buf_ptr; /* Descriptor buffer pointer */ 491 + struct sk_buff *return_info; /* User resource return information */ 492 + }; 493 + 494 + /* Ethernet port specific information */ 495 + struct mv643xx_mib_counters { 496 + u64 good_octets_received; 497 + u32 bad_octets_received; 498 + u32 internal_mac_transmit_err; 499 + u32 good_frames_received; 500 + u32 bad_frames_received; 501 + u32 broadcast_frames_received; 502 + u32 multicast_frames_received; 503 + u32 frames_64_octets; 504 + u32 frames_65_to_127_octets; 505 + u32 frames_128_to_255_octets; 506 + u32 frames_256_to_511_octets; 507 + u32 frames_512_to_1023_octets; 508 + u32 frames_1024_to_max_octets; 509 + u64 good_octets_sent; 510 + u32 good_frames_sent; 511 + u32 excessive_collision; 512 + u32 multicast_frames_sent; 513 + u32 broadcast_frames_sent; 514 + u32 unrec_mac_control_received; 515 + u32 fc_sent; 516 + u32 good_fc_received; 517 + u32 bad_fc_received; 518 + u32 undersize_received; 519 + u32 fragments_received; 520 + u32 oversize_received; 521 + u32 jabber_received; 522 + u32 mac_receive_error; 523 + u32 bad_crc_event; 524 + u32 collision; 525 + u32 late_collision; 526 + }; 527 + 528 + struct mv643xx_private { 529 + int port_num; /* User Ethernet port number */ 530 + 531 + u32 rx_sram_addr; /* Base address of rx sram area */ 532 + u32 rx_sram_size; /* Size of rx sram area */ 533 + u32 tx_sram_addr; /* Base address of tx sram area */ 534 + u32 tx_sram_size; /* Size of tx sram area */ 535 + 536 + int rx_resource_err; /* Rx ring resource error flag */ 537 + 538 + /* Tx/Rx rings managment indexes fields. For driver use */ 539 + 540 + /* Next available and first returning Rx resource */ 541 + int rx_curr_desc_q, rx_used_desc_q; 542 + 543 + /* Next available and first returning Tx resource */ 544 + int tx_curr_desc_q, tx_used_desc_q; 545 + 546 + #ifdef MV643XX_TX_FAST_REFILL 547 + u32 tx_clean_threshold; 548 + #endif 549 + 550 + struct eth_rx_desc *p_rx_desc_area; 551 + dma_addr_t rx_desc_dma; 552 + int rx_desc_area_size; 553 + struct sk_buff **rx_skb; 554 + 555 + struct eth_tx_desc *p_tx_desc_area; 556 + dma_addr_t tx_desc_dma; 557 + int tx_desc_area_size; 558 + struct sk_buff **tx_skb; 559 + 560 + struct work_struct tx_timeout_task; 561 + 562 + struct net_device *dev; 563 + struct napi_struct napi; 564 + struct net_device_stats stats; 565 + struct mv643xx_mib_counters mib_counters; 566 + spinlock_t lock; 567 + /* Size of Tx Ring per queue */ 568 + int tx_ring_size; 569 + /* Number of tx descriptors in use */ 570 + int tx_desc_count; 571 + /* Size of Rx Ring per queue */ 572 + int rx_ring_size; 573 + /* Number of rx descriptors in use */ 574 + int rx_desc_count; 575 + 576 + /* 577 + * Used in case RX Ring is empty, which can be caused when 578 + * system does not have resources (skb's) 579 + */ 580 + struct timer_list timeout; 581 + 582 + u32 rx_int_coal; 583 + u32 tx_int_coal; 584 + struct mii_if_info mii; 585 + }; 52 586 53 587 /* Static function declarations */ 588 + static void eth_port_init(struct mv643xx_private *mp); 589 + static void eth_port_reset(unsigned int eth_port_num); 590 + static void eth_port_start(struct net_device *dev); 591 + 592 + static void ethernet_phy_reset(unsigned int eth_port_num); 593 + 594 + static void eth_port_write_smi_reg(unsigned int eth_port_num, 595 + unsigned int phy_reg, unsigned int value); 596 + 597 + static void eth_port_read_smi_reg(unsigned int eth_port_num, 598 + unsigned int phy_reg, unsigned int *value); 599 + 600 + static void eth_clear_mib_counters(unsigned int eth_port_num); 601 + 602 + static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, 603 + struct pkt_info *p_pkt_info); 604 + static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, 605 + struct pkt_info *p_pkt_info); 606 + 54 607 static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr); 55 608 static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr); 56 609 static void eth_port_set_multicast_list(struct net_device *); ··· 631 78 static char mv643xx_driver_name[] = "mv643xx_eth"; 632 79 static char mv643xx_driver_version[] = "1.0"; 633 80 634 - static void __iomem *mv643xx_eth_shared_base; 81 + static void __iomem *mv643xx_eth_base; 635 82 636 - /* used to protect MV643XX_ETH_SMI_REG, which is shared across ports */ 83 + /* used to protect SMI_REG, which is shared across ports */ 637 84 static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); 638 85 639 86 static inline u32 mv_read(int offset) 640 87 { 641 - void __iomem *reg_base; 642 - 643 - reg_base = mv643xx_eth_shared_base - MV643XX_ETH_SHARED_REGS; 644 - 645 - return readl(reg_base + offset); 88 + return readl(mv643xx_eth_base + offset); 646 89 } 647 90 648 91 static inline void mv_write(int offset, u32 data) 649 92 { 650 - void __iomem *reg_base; 651 - 652 - reg_base = mv643xx_eth_shared_base - MV643XX_ETH_SHARED_REGS; 653 - writel(data, reg_base + offset); 93 + writel(data, mv643xx_eth_base + offset); 654 94 } 655 95 656 96 /* ··· 767 221 struct mv643xx_private *mp = netdev_priv(dev); 768 222 u32 config_reg; 769 223 770 - config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num)); 224 + config_reg = mv_read(PORT_CONFIG_REG(mp->port_num)); 771 225 if (dev->flags & IFF_PROMISC) 772 - config_reg |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; 226 + config_reg |= (u32) UNICAST_PROMISCUOUS_MODE; 773 227 else 774 - config_reg &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; 775 - mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), config_reg); 228 + config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE; 229 + mv_write(PORT_CONFIG_REG(mp->port_num), config_reg); 776 230 777 231 eth_port_set_multicast_list(dev); 778 232 } ··· 1008 462 u32 o_pscr, n_pscr; 1009 463 unsigned int queues; 1010 464 1011 - o_pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); 465 + o_pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); 1012 466 n_pscr = o_pscr; 1013 467 1014 468 /* clear speed, duplex and rx buffer size fields */ 1015 - n_pscr &= ~(MV643XX_ETH_SET_MII_SPEED_TO_100 | 1016 - MV643XX_ETH_SET_GMII_SPEED_TO_1000 | 1017 - MV643XX_ETH_SET_FULL_DUPLEX_MODE | 1018 - MV643XX_ETH_MAX_RX_PACKET_MASK); 469 + n_pscr &= ~(SET_MII_SPEED_TO_100 | 470 + SET_GMII_SPEED_TO_1000 | 471 + SET_FULL_DUPLEX_MODE | 472 + MAX_RX_PACKET_MASK); 1019 473 1020 474 if (ecmd->duplex == DUPLEX_FULL) 1021 - n_pscr |= MV643XX_ETH_SET_FULL_DUPLEX_MODE; 475 + n_pscr |= SET_FULL_DUPLEX_MODE; 1022 476 1023 477 if (ecmd->speed == SPEED_1000) 1024 - n_pscr |= MV643XX_ETH_SET_GMII_SPEED_TO_1000 | 1025 - MV643XX_ETH_MAX_RX_PACKET_9700BYTE; 478 + n_pscr |= SET_GMII_SPEED_TO_1000 | 479 + MAX_RX_PACKET_9700BYTE; 1026 480 else { 1027 481 if (ecmd->speed == SPEED_100) 1028 - n_pscr |= MV643XX_ETH_SET_MII_SPEED_TO_100; 1029 - n_pscr |= MV643XX_ETH_MAX_RX_PACKET_1522BYTE; 482 + n_pscr |= SET_MII_SPEED_TO_100; 483 + n_pscr |= MAX_RX_PACKET_1522BYTE; 1030 484 } 1031 485 1032 486 if (n_pscr != o_pscr) { 1033 - if ((o_pscr & MV643XX_ETH_SERIAL_PORT_ENABLE) == 0) 1034 - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), 1035 - n_pscr); 487 + if ((o_pscr & SERIAL_PORT_ENABLE) == 0) 488 + mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); 1036 489 else { 1037 490 queues = mv643xx_eth_port_disable_tx(port_num); 1038 491 1039 - o_pscr &= ~MV643XX_ETH_SERIAL_PORT_ENABLE; 1040 - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), 1041 - o_pscr); 1042 - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), 1043 - n_pscr); 1044 - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), 1045 - n_pscr); 492 + o_pscr &= ~SERIAL_PORT_ENABLE; 493 + mv_write(PORT_SERIAL_CONTROL_REG(port_num), o_pscr); 494 + mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); 495 + mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); 1046 496 if (queues) 1047 497 mv643xx_eth_port_enable_tx(port_num, queues); 1048 498 } ··· 1064 522 unsigned int port_num = mp->port_num; 1065 523 1066 524 /* Read interrupt cause registers */ 1067 - eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & 525 + eth_int_cause = mv_read(INTERRUPT_CAUSE_REG(port_num)) & 1068 526 ETH_INT_UNMASK_ALL; 1069 527 if (eth_int_cause & ETH_INT_CAUSE_EXT) { 1070 528 eth_int_cause_ext = mv_read( 1071 - MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & 529 + INTERRUPT_CAUSE_EXTEND_REG(port_num)) & 1072 530 ETH_INT_UNMASK_ALL_EXT; 1073 - mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 531 + mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 1074 532 ~eth_int_cause_ext); 1075 533 } 1076 534 ··· 1098 556 #ifdef MV643XX_NAPI 1099 557 if (eth_int_cause & ETH_INT_CAUSE_RX) { 1100 558 /* schedule the NAPI poll routine to maintain port */ 1101 - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 1102 - ETH_INT_MASK_ALL); 559 + mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 560 + 1103 561 /* wait for previous write to complete */ 1104 - mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 562 + mv_read(INTERRUPT_MASK_REG(port_num)); 1105 563 1106 564 netif_rx_schedule(dev, &mp->napi); 1107 565 } ··· 1153 611 unsigned int coal = ((t_clk / 1000000) * delay) / 64; 1154 612 1155 613 /* Set RX Coalescing mechanism */ 1156 - mv_write(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num), 614 + mv_write(SDMA_CONFIG_REG(eth_port_num), 1157 615 ((coal & 0x3fff) << 8) | 1158 - (mv_read(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num)) 616 + (mv_read(SDMA_CONFIG_REG(eth_port_num)) 1159 617 & 0xffc000ff)); 1160 618 1161 619 return coal; ··· 1191 649 unsigned int coal; 1192 650 coal = ((t_clk / 1000000) * delay) / 64; 1193 651 /* Set TX Coalescing mechanism */ 1194 - mv_write(MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), 1195 - coal << 4); 652 + mv_write(TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), coal << 4); 1196 653 return coal; 1197 654 } 1198 655 ··· 1327 786 int err; 1328 787 1329 788 /* Clear any pending ethernet port interrupts */ 1330 - mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 1331 - mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 789 + mv_write(INTERRUPT_CAUSE_REG(port_num), 0); 790 + mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1332 791 /* wait for previous write to complete */ 1333 - mv_read (MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)); 792 + mv_read (INTERRUPT_CAUSE_EXTEND_REG(port_num)); 1334 793 1335 794 err = request_irq(dev->irq, mv643xx_eth_int_handler, 1336 795 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); ··· 1437 896 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); 1438 897 1439 898 /* Unmask phy and link status changes interrupts */ 1440 - mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 1441 - ETH_INT_UNMASK_ALL_EXT); 899 + mv_write(INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); 1442 900 1443 901 /* Unmask RX buffer and TX end interrupt */ 1444 - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 902 + mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 1445 903 1446 904 return 0; 1447 905 ··· 1520 980 unsigned int port_num = mp->port_num; 1521 981 1522 982 /* Mask all interrupts on ethernet port */ 1523 - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 983 + mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 1524 984 /* wait for previous write to complete */ 1525 - mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 985 + mv_read(INTERRUPT_MASK_REG(port_num)); 1526 986 1527 987 #ifdef MV643XX_NAPI 1528 988 napi_disable(&mp->napi); ··· 1561 1021 #endif 1562 1022 1563 1023 work_done = 0; 1564 - if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) 1024 + if ((mv_read(RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) 1565 1025 != (u32) mp->rx_used_desc_q) 1566 1026 work_done = mv643xx_eth_receive_queue(dev, budget); 1567 1027 1568 1028 if (work_done < budget) { 1569 1029 netif_rx_complete(dev, napi); 1570 - mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 1571 - mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1572 - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 1573 - ETH_INT_UNMASK_ALL); 1030 + mv_write(INTERRUPT_CAUSE_REG(port_num), 0); 1031 + mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1032 + mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 1574 1033 } 1575 1034 1576 1035 return work_done; ··· 1772 1233 struct mv643xx_private *mp = netdev_priv(netdev); 1773 1234 int port_num = mp->port_num; 1774 1235 1775 - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 1236 + mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 1776 1237 /* wait for previous write to complete */ 1777 - mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 1238 + mv_read(INTERRUPT_MASK_REG(port_num)); 1778 1239 1779 1240 mv643xx_eth_int_handler(netdev->irq, netdev); 1780 1241 1781 - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 1242 + mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 1782 1243 } 1783 1244 #endif 1784 1245 ··· 1896 1357 1897 1358 /* set default config values */ 1898 1359 eth_port_uc_addr_get(port_num, dev->dev_addr); 1899 - mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; 1900 - mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; 1360 + mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE; 1361 + mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; 1901 1362 1902 1363 if (is_valid_ether_addr(pd->mac_addr)) 1903 1364 memcpy(dev->dev_addr, pd->mac_addr, 6); ··· 2009 1470 if (res == NULL) 2010 1471 return -ENODEV; 2011 1472 2012 - mv643xx_eth_shared_base = ioremap(res->start, 2013 - MV643XX_ETH_SHARED_REGS_SIZE); 2014 - if (mv643xx_eth_shared_base == NULL) 1473 + mv643xx_eth_base = ioremap(res->start, res->end - res->start + 1); 1474 + if (mv643xx_eth_base == NULL) 2015 1475 return -ENOMEM; 2016 1476 2017 1477 return 0; ··· 2019 1481 2020 1482 static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2021 1483 { 2022 - iounmap(mv643xx_eth_shared_base); 2023 - mv643xx_eth_shared_base = NULL; 1484 + iounmap(mv643xx_eth_base); 1485 + mv643xx_eth_base = NULL; 2024 1486 2025 1487 return 0; 2026 1488 } ··· 2032 1494 unsigned int port_num = mp->port_num; 2033 1495 2034 1496 /* Mask all interrupts on ethernet port */ 2035 - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); 2036 - mv_read (MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 1497 + mv_write(INTERRUPT_MASK_REG(port_num), 0); 1498 + mv_read (INTERRUPT_MASK_REG(port_num)); 2037 1499 2038 1500 eth_port_reset(port_num); 2039 1501 } ··· 2300 1762 2301 1763 /* Assignment of Tx CTRP of given queue */ 2302 1764 tx_curr_desc = mp->tx_curr_desc_q; 2303 - mv_write(MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port_num), 1765 + mv_write(TX_CURRENT_QUEUE_DESC_PTR_0(port_num), 2304 1766 (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); 2305 1767 2306 1768 /* Assignment of Rx CRDP of given queue */ 2307 1769 rx_curr_desc = mp->rx_curr_desc_q; 2308 - mv_write(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num), 1770 + mv_write(RX_CURRENT_QUEUE_DESC_PTR_0(port_num), 2309 1771 (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); 2310 1772 2311 1773 /* Add the assigned Ethernet address to the port's address table */ 2312 1774 eth_port_uc_addr_set(port_num, dev->dev_addr); 2313 1775 2314 1776 /* Assign port configuration and command. */ 2315 - mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), 2316 - MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE); 1777 + mv_write(PORT_CONFIG_REG(port_num), 1778 + PORT_CONFIG_DEFAULT_VALUE); 2317 1779 2318 - mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num), 2319 - MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE); 1780 + mv_write(PORT_CONFIG_EXTEND_REG(port_num), 1781 + PORT_CONFIG_EXTEND_DEFAULT_VALUE); 2320 1782 2321 - pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); 1783 + pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); 2322 1784 2323 - pscr &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | MV643XX_ETH_FORCE_LINK_PASS); 2324 - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); 1785 + pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); 1786 + mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); 2325 1787 2326 - pscr |= MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | 2327 - MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII | 2328 - MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX | 2329 - MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | 2330 - MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED; 1788 + pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | 1789 + DISABLE_AUTO_NEG_SPEED_GMII | 1790 + DISABLE_AUTO_NEG_FOR_DUPLX | 1791 + DO_NOT_FORCE_LINK_FAIL | 1792 + SERIAL_PORT_CONTROL_RESERVED; 2331 1793 2332 - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); 1794 + mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); 2333 1795 2334 - pscr |= MV643XX_ETH_SERIAL_PORT_ENABLE; 2335 - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); 1796 + pscr |= SERIAL_PORT_ENABLE; 1797 + mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); 2336 1798 2337 1799 /* Assign port SDMA configuration */ 2338 - mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num), 2339 - MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE); 1800 + mv_write(SDMA_CONFIG_REG(port_num), 1801 + PORT_SDMA_CONFIG_DEFAULT_VALUE); 2340 1802 2341 1803 /* Enable port Rx. */ 2342 1804 mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED); 2343 1805 2344 1806 /* Disable port bandwidth limits by clearing MTU register */ 2345 - mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0); 1807 + mv_write(MAXIMUM_TRANSMIT_UNIT(port_num), 0); 2346 1808 2347 1809 /* save phy settings across reset */ 2348 1810 mv643xx_get_settings(dev, &ethtool_cmd); ··· 2363 1825 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | 2364 1826 (p_addr[3] << 0); 2365 1827 2366 - mv_write(MV643XX_ETH_MAC_ADDR_LOW(port_num), mac_l); 2367 - mv_write(MV643XX_ETH_MAC_ADDR_HIGH(port_num), mac_h); 1828 + mv_write(MAC_ADDR_LOW(port_num), mac_l); 1829 + mv_write(MAC_ADDR_HIGH(port_num), mac_h); 2368 1830 2369 1831 /* Accept frames with this address */ 2370 - table = MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(port_num); 1832 + table = DA_FILTER_UNICAST_TABLE_BASE(port_num); 2371 1833 eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f); 2372 1834 } 2373 1835 ··· 2379 1841 unsigned int mac_h; 2380 1842 unsigned int mac_l; 2381 1843 2382 - mac_h = mv_read(MV643XX_ETH_MAC_ADDR_HIGH(port_num)); 2383 - mac_l = mv_read(MV643XX_ETH_MAC_ADDR_LOW(port_num)); 1844 + mac_h = mv_read(MAC_ADDR_HIGH(port_num)); 1845 + mac_l = mv_read(MAC_ADDR_LOW(port_num)); 2384 1846 2385 1847 p_addr[0] = (mac_h >> 24) & 0xff; 2386 1848 p_addr[1] = (mac_h >> 16) & 0xff; ··· 2440 1902 2441 1903 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && 2442 1904 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { 2443 - table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 1905 + table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2444 1906 (eth_port_num); 2445 1907 eth_port_set_filter_table_entry(table, p_addr[5]); 2446 1908 return; ··· 2514 1976 for (i = 0; i < 8; i++) 2515 1977 crc_result = crc_result | (crc[i] << i); 2516 1978 2517 - table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num); 1979 + table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num); 2518 1980 eth_port_set_filter_table_entry(table, crc_result); 2519 1981 } 2520 1982 ··· 2544 2006 * 3-1 Queue ETH_Q0=0 2545 2007 * 7-4 Reserved = 0; 2546 2008 */ 2547 - mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); 2009 + mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); 2548 2010 2549 2011 /* Set all entries in DA filter other multicast 2550 2012 * table (Ex_dFOMT) ··· 2554 2016 * 3-1 Queue ETH_Q0=0 2555 2017 * 7-4 Reserved = 0; 2556 2018 */ 2557 - mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); 2019 + mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); 2558 2020 } 2559 2021 return; 2560 2022 } ··· 2564 2026 */ 2565 2027 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 2566 2028 /* Clear DA filter special multicast table (Ex_dFSMT) */ 2567 - mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2029 + mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2568 2030 (eth_port_num) + table_index, 0); 2569 2031 2570 2032 /* Clear DA filter other multicast table (Ex_dFOMT) */ 2571 - mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2033 + mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2572 2034 (eth_port_num) + table_index, 0); 2573 2035 } 2574 2036 ··· 2602 2064 2603 2065 /* Clear DA filter unicast table (Ex_dFUT) */ 2604 2066 for (table_index = 0; table_index <= 0xC; table_index += 4) 2605 - mv_write(MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE 2067 + mv_write(DA_FILTER_UNICAST_TABLE_BASE 2606 2068 (eth_port_num) + table_index, 0); 2607 2069 2608 2070 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 2609 2071 /* Clear DA filter special multicast table (Ex_dFSMT) */ 2610 - mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2072 + mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2611 2073 (eth_port_num) + table_index, 0); 2612 2074 /* Clear DA filter other multicast table (Ex_dFOMT) */ 2613 - mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2075 + mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2614 2076 (eth_port_num) + table_index, 0); 2615 2077 } 2616 2078 } ··· 2639 2101 /* Perform dummy reads from MIB counters */ 2640 2102 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; 2641 2103 i += 4) 2642 - mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(eth_port_num) + i); 2104 + mv_read(MIB_COUNTERS_BASE(eth_port_num) + i); 2643 2105 } 2644 2106 2645 2107 static inline u32 read_mib(struct mv643xx_private *mp, int offset) 2646 2108 { 2647 - return mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(mp->port_num) + offset); 2109 + return mv_read(MIB_COUNTERS_BASE(mp->port_num) + offset); 2648 2110 } 2649 2111 2650 2112 static void eth_update_mib_counters(struct mv643xx_private *mp) ··· 2729 2191 { 2730 2192 unsigned int reg_data; 2731 2193 2732 - reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG); 2194 + reg_data = mv_read(PHY_ADDR_REG); 2733 2195 2734 2196 return ((reg_data >> (5 * eth_port_num)) & 0x1f); 2735 2197 } ··· 2756 2218 u32 reg_data; 2757 2219 int addr_shift = 5 * eth_port_num; 2758 2220 2759 - reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG); 2221 + reg_data = mv_read(PHY_ADDR_REG); 2760 2222 reg_data &= ~(0x1f << addr_shift); 2761 2223 reg_data |= (phy_addr & 0x1f) << addr_shift; 2762 - mv_write(MV643XX_ETH_PHY_ADDR_REG, reg_data); 2224 + mv_write(PHY_ADDR_REG, reg_data); 2763 2225 } 2764 2226 2765 2227 /* ··· 2797 2259 static void mv643xx_eth_port_enable_tx(unsigned int port_num, 2798 2260 unsigned int queues) 2799 2261 { 2800 - mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); 2262 + mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); 2801 2263 } 2802 2264 2803 2265 static void mv643xx_eth_port_enable_rx(unsigned int port_num, 2804 2266 unsigned int queues) 2805 2267 { 2806 - mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), queues); 2268 + mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), queues); 2807 2269 } 2808 2270 2809 2271 static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) ··· 2811 2273 u32 queues; 2812 2274 2813 2275 /* Stop Tx port activity. Check port Tx activity. */ 2814 - queues = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) 2815 - & 0xFF; 2276 + queues = mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF; 2816 2277 if (queues) { 2817 2278 /* Issue stop command for active queues only */ 2818 - mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 2819 - (queues << 8)); 2279 + mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8)); 2820 2280 2821 2281 /* Wait for all Tx activity to terminate. */ 2822 2282 /* Check port cause register that all Tx queues are stopped */ 2823 - while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) 2824 - & 0xFF) 2283 + while (mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF) 2825 2284 udelay(PHY_WAIT_MICRO_SECONDS); 2826 2285 2827 2286 /* Wait for Tx FIFO to empty */ 2828 - while (mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)) & 2287 + while (mv_read(PORT_STATUS_REG(port_num)) & 2829 2288 ETH_PORT_TX_FIFO_EMPTY) 2830 2289 udelay(PHY_WAIT_MICRO_SECONDS); 2831 2290 } ··· 2835 2300 u32 queues; 2836 2301 2837 2302 /* Stop Rx port activity. Check port Rx activity. */ 2838 - queues = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) 2839 - & 0xFF; 2303 + queues = mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF; 2840 2304 if (queues) { 2841 2305 /* Issue stop command for active queues only */ 2842 - mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 2843 - (queues << 8)); 2306 + mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8)); 2844 2307 2845 2308 /* Wait for all Rx activity to terminate. */ 2846 2309 /* Check port cause register that all Rx queues are stopped */ 2847 - while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) 2848 - & 0xFF) 2310 + while (mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF) 2849 2311 udelay(PHY_WAIT_MICRO_SECONDS); 2850 2312 } 2851 2313 ··· 2878 2346 eth_clear_mib_counters(port_num); 2879 2347 2880 2348 /* Reset the Enable bit in the Configuration Register */ 2881 - reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); 2882 - reg_data &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | 2883 - MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | 2884 - MV643XX_ETH_FORCE_LINK_PASS); 2885 - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); 2349 + reg_data = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); 2350 + reg_data &= ~(SERIAL_PORT_ENABLE | 2351 + DO_NOT_FORCE_LINK_FAIL | 2352 + FORCE_LINK_PASS); 2353 + mv_write(PORT_SERIAL_CONTROL_REG(port_num), reg_data); 2886 2354 } 2887 2355 2888 2356 ··· 2917 2385 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); 2918 2386 2919 2387 /* wait for the SMI register to become available */ 2920 - for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) { 2388 + for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { 2921 2389 if (i == PHY_WAIT_ITERATIONS) { 2922 2390 printk("mv643xx PHY busy timeout, port %d\n", port_num); 2923 2391 goto out; ··· 2925 2393 udelay(PHY_WAIT_MICRO_SECONDS); 2926 2394 } 2927 2395 2928 - mv_write(MV643XX_ETH_SMI_REG, 2396 + mv_write(SMI_REG, 2929 2397 (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); 2930 2398 2931 2399 /* now wait for the data to be valid */ 2932 - for (i = 0; !(mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) { 2400 + for (i = 0; !(mv_read(SMI_REG) & ETH_SMI_READ_VALID); i++) { 2933 2401 if (i == PHY_WAIT_ITERATIONS) { 2934 2402 printk("mv643xx PHY read timeout, port %d\n", port_num); 2935 2403 goto out; ··· 2937 2405 udelay(PHY_WAIT_MICRO_SECONDS); 2938 2406 } 2939 2407 2940 - *value = mv_read(MV643XX_ETH_SMI_REG) & 0xffff; 2408 + *value = mv_read(SMI_REG) & 0xffff; 2941 2409 out: 2942 2410 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); 2943 2411 } ··· 2975 2443 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); 2976 2444 2977 2445 /* wait for the SMI register to become available */ 2978 - for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) { 2446 + for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { 2979 2447 if (i == PHY_WAIT_ITERATIONS) { 2980 2448 printk("mv643xx PHY busy timeout, port %d\n", 2981 2449 eth_port_num); ··· 2984 2452 udelay(PHY_WAIT_MICRO_SECONDS); 2985 2453 } 2986 2454 2987 - mv_write(MV643XX_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) | 2455 + mv_write(SMI_REG, (phy_addr << 16) | (phy_reg << 21) | 2988 2456 ETH_SMI_OPCODE_WRITE | (value & 0xffff)); 2989 2457 out: 2990 2458 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); ··· 3274 2742 .get_drvinfo = mv643xx_get_drvinfo, 3275 2743 .get_link = mv643xx_eth_get_link, 3276 2744 .set_sg = ethtool_op_set_sg, 2745 + .get_sset_count = mv643xx_get_sset_count, 3277 2746 .get_ethtool_stats = mv643xx_get_ethtool_stats, 3278 2747 .get_strings = mv643xx_get_strings, 3279 2748 .nway_reset = mv643xx_eth_nway_restart,
-370
drivers/net/mv643xx_eth.h
··· 1 - #ifndef __MV643XX_ETH_H__ 2 - #define __MV643XX_ETH_H__ 3 - 4 - #include <linux/module.h> 5 - #include <linux/kernel.h> 6 - #include <linux/spinlock.h> 7 - #include <linux/workqueue.h> 8 - #include <linux/mii.h> 9 - 10 - #include <linux/mv643xx.h> 11 - 12 - #include <asm/dma-mapping.h> 13 - 14 - /* Checksum offload for Tx works for most packets, but 15 - * fails if previous packet sent did not use hw csum 16 - */ 17 - #define MV643XX_CHECKSUM_OFFLOAD_TX 18 - #define MV643XX_NAPI 19 - #define MV643XX_TX_FAST_REFILL 20 - #undef MV643XX_COAL 21 - 22 - /* 23 - * Number of RX / TX descriptors on RX / TX rings. 24 - * Note that allocating RX descriptors is done by allocating the RX 25 - * ring AND a preallocated RX buffers (skb's) for each descriptor. 26 - * The TX descriptors only allocates the TX descriptors ring, 27 - * with no pre allocated TX buffers (skb's are allocated by higher layers. 28 - */ 29 - 30 - /* Default TX ring size is 1000 descriptors */ 31 - #define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000 32 - 33 - /* Default RX ring size is 400 descriptors */ 34 - #define MV643XX_DEFAULT_RX_QUEUE_SIZE 400 35 - 36 - #define MV643XX_TX_COAL 100 37 - #ifdef MV643XX_COAL 38 - #define MV643XX_RX_COAL 100 39 - #endif 40 - 41 - #ifdef MV643XX_CHECKSUM_OFFLOAD_TX 42 - #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1) 43 - #else 44 - #define MAX_DESCS_PER_SKB 1 45 - #endif 46 - 47 - #define ETH_VLAN_HLEN 4 48 - #define ETH_FCS_LEN 4 49 - #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ 50 - #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ 51 - ETH_VLAN_HLEN + ETH_FCS_LEN) 52 - #define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + dma_get_cache_alignment()) 53 - 54 - #define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */ 55 - #define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */ 56 - 57 - #define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2) 58 - #define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9) 59 - #define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR) 60 - #define ETH_INT_CAUSE_EXT 0x00000002 61 - #define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT) 62 - 63 - #define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0) 64 - #define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8) 65 - #define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR) 66 - #define ETH_INT_CAUSE_PHY 0x00010000 67 - #define ETH_INT_CAUSE_STATE 0x00100000 68 - #define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \ 69 - ETH_INT_CAUSE_STATE) 70 - 71 - #define ETH_INT_MASK_ALL 0x00000000 72 - #define ETH_INT_MASK_ALL_EXT 0x00000000 73 - 74 - #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */ 75 - #define PHY_WAIT_MICRO_SECONDS 10 76 - 77 - /* Buffer offset from buffer pointer */ 78 - #define RX_BUF_OFFSET 0x2 79 - 80 - /* Gigabit Ethernet Unit Global Registers */ 81 - 82 - /* MIB Counters register definitions */ 83 - #define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0 84 - #define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4 85 - #define ETH_MIB_BAD_OCTETS_RECEIVED 0x8 86 - #define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc 87 - #define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10 88 - #define ETH_MIB_BAD_FRAMES_RECEIVED 0x14 89 - #define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18 90 - #define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c 91 - #define ETH_MIB_FRAMES_64_OCTETS 0x20 92 - #define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24 93 - #define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28 94 - #define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c 95 - #define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30 96 - #define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 97 - #define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38 98 - #define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c 99 - #define ETH_MIB_GOOD_FRAMES_SENT 0x40 100 - #define ETH_MIB_EXCESSIVE_COLLISION 0x44 101 - #define ETH_MIB_MULTICAST_FRAMES_SENT 0x48 102 - #define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c 103 - #define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50 104 - #define ETH_MIB_FC_SENT 0x54 105 - #define ETH_MIB_GOOD_FC_RECEIVED 0x58 106 - #define ETH_MIB_BAD_FC_RECEIVED 0x5c 107 - #define ETH_MIB_UNDERSIZE_RECEIVED 0x60 108 - #define ETH_MIB_FRAGMENTS_RECEIVED 0x64 109 - #define ETH_MIB_OVERSIZE_RECEIVED 0x68 110 - #define ETH_MIB_JABBER_RECEIVED 0x6c 111 - #define ETH_MIB_MAC_RECEIVE_ERROR 0x70 112 - #define ETH_MIB_BAD_CRC_EVENT 0x74 113 - #define ETH_MIB_COLLISION 0x78 114 - #define ETH_MIB_LATE_COLLISION 0x7c 115 - 116 - /* Port serial status reg (PSR) */ 117 - #define ETH_INTERFACE_PCM 0x00000001 118 - #define ETH_LINK_IS_UP 0x00000002 119 - #define ETH_PORT_AT_FULL_DUPLEX 0x00000004 120 - #define ETH_RX_FLOW_CTRL_ENABLED 0x00000008 121 - #define ETH_GMII_SPEED_1000 0x00000010 122 - #define ETH_MII_SPEED_100 0x00000020 123 - #define ETH_TX_IN_PROGRESS 0x00000080 124 - #define ETH_BYPASS_ACTIVE 0x00000100 125 - #define ETH_PORT_AT_PARTITION_STATE 0x00000200 126 - #define ETH_PORT_TX_FIFO_EMPTY 0x00000400 127 - 128 - /* SMI reg */ 129 - #define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */ 130 - #define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */ 131 - #define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */ 132 - #define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */ 133 - 134 - /* Interrupt Cause Register Bit Definitions */ 135 - 136 - /* SDMA command status fields macros */ 137 - 138 - /* Tx & Rx descriptors status */ 139 - #define ETH_ERROR_SUMMARY 0x00000001 140 - 141 - /* Tx & Rx descriptors command */ 142 - #define ETH_BUFFER_OWNED_BY_DMA 0x80000000 143 - 144 - /* Tx descriptors status */ 145 - #define ETH_LC_ERROR 0 146 - #define ETH_UR_ERROR 0x00000002 147 - #define ETH_RL_ERROR 0x00000004 148 - #define ETH_LLC_SNAP_FORMAT 0x00000200 149 - 150 - /* Rx descriptors status */ 151 - #define ETH_OVERRUN_ERROR 0x00000002 152 - #define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004 153 - #define ETH_RESOURCE_ERROR 0x00000006 154 - #define ETH_VLAN_TAGGED 0x00080000 155 - #define ETH_BPDU_FRAME 0x00100000 156 - #define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000 157 - #define ETH_OTHER_FRAME_TYPE 0x00400000 158 - #define ETH_LAYER_2_IS_ETH_V_2 0x00800000 159 - #define ETH_FRAME_TYPE_IP_V_4 0x01000000 160 - #define ETH_FRAME_HEADER_OK 0x02000000 161 - #define ETH_RX_LAST_DESC 0x04000000 162 - #define ETH_RX_FIRST_DESC 0x08000000 163 - #define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000 164 - #define ETH_RX_ENABLE_INTERRUPT 0x20000000 165 - #define ETH_LAYER_4_CHECKSUM_OK 0x40000000 166 - 167 - /* Rx descriptors byte count */ 168 - #define ETH_FRAME_FRAGMENTED 0x00000004 169 - 170 - /* Tx descriptors command */ 171 - #define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400 172 - #define ETH_FRAME_SET_TO_VLAN 0x00008000 173 - #define ETH_UDP_FRAME 0x00010000 174 - #define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000 175 - #define ETH_GEN_IP_V_4_CHECKSUM 0x00040000 176 - #define ETH_ZERO_PADDING 0x00080000 177 - #define ETH_TX_LAST_DESC 0x00100000 178 - #define ETH_TX_FIRST_DESC 0x00200000 179 - #define ETH_GEN_CRC 0x00400000 180 - #define ETH_TX_ENABLE_INTERRUPT 0x00800000 181 - #define ETH_AUTO_MODE 0x40000000 182 - 183 - #define ETH_TX_IHL_SHIFT 11 184 - 185 - /* typedefs */ 186 - 187 - typedef enum _eth_func_ret_status { 188 - ETH_OK, /* Returned as expected. */ 189 - ETH_ERROR, /* Fundamental error. */ 190 - ETH_RETRY, /* Could not process request. Try later.*/ 191 - ETH_END_OF_JOB, /* Ring has nothing to process. */ 192 - ETH_QUEUE_FULL, /* Ring resource error. */ 193 - ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */ 194 - } ETH_FUNC_RET_STATUS; 195 - 196 - typedef enum _eth_target { 197 - ETH_TARGET_DRAM, 198 - ETH_TARGET_DEVICE, 199 - ETH_TARGET_CBS, 200 - ETH_TARGET_PCI0, 201 - ETH_TARGET_PCI1 202 - } ETH_TARGET; 203 - 204 - /* These are for big-endian machines. Little endian needs different 205 - * definitions. 206 - */ 207 - #if defined(__BIG_ENDIAN) 208 - struct eth_rx_desc { 209 - u16 byte_cnt; /* Descriptor buffer byte count */ 210 - u16 buf_size; /* Buffer size */ 211 - u32 cmd_sts; /* Descriptor command status */ 212 - u32 next_desc_ptr; /* Next descriptor pointer */ 213 - u32 buf_ptr; /* Descriptor buffer pointer */ 214 - }; 215 - 216 - struct eth_tx_desc { 217 - u16 byte_cnt; /* buffer byte count */ 218 - u16 l4i_chk; /* CPU provided TCP checksum */ 219 - u32 cmd_sts; /* Command/status field */ 220 - u32 next_desc_ptr; /* Pointer to next descriptor */ 221 - u32 buf_ptr; /* pointer to buffer for this descriptor*/ 222 - }; 223 - 224 - #elif defined(__LITTLE_ENDIAN) 225 - struct eth_rx_desc { 226 - u32 cmd_sts; /* Descriptor command status */ 227 - u16 buf_size; /* Buffer size */ 228 - u16 byte_cnt; /* Descriptor buffer byte count */ 229 - u32 buf_ptr; /* Descriptor buffer pointer */ 230 - u32 next_desc_ptr; /* Next descriptor pointer */ 231 - }; 232 - 233 - struct eth_tx_desc { 234 - u32 cmd_sts; /* Command/status field */ 235 - u16 l4i_chk; /* CPU provided TCP checksum */ 236 - u16 byte_cnt; /* buffer byte count */ 237 - u32 buf_ptr; /* pointer to buffer for this descriptor*/ 238 - u32 next_desc_ptr; /* Pointer to next descriptor */ 239 - }; 240 - #else 241 - #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 242 - #endif 243 - 244 - /* Unified struct for Rx and Tx operations. The user is not required to */ 245 - /* be familier with neither Tx nor Rx descriptors. */ 246 - struct pkt_info { 247 - unsigned short byte_cnt; /* Descriptor buffer byte count */ 248 - unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */ 249 - unsigned int cmd_sts; /* Descriptor command status */ 250 - dma_addr_t buf_ptr; /* Descriptor buffer pointer */ 251 - struct sk_buff *return_info; /* User resource return information */ 252 - }; 253 - 254 - /* Ethernet port specific information */ 255 - 256 - struct mv643xx_mib_counters { 257 - u64 good_octets_received; 258 - u32 bad_octets_received; 259 - u32 internal_mac_transmit_err; 260 - u32 good_frames_received; 261 - u32 bad_frames_received; 262 - u32 broadcast_frames_received; 263 - u32 multicast_frames_received; 264 - u32 frames_64_octets; 265 - u32 frames_65_to_127_octets; 266 - u32 frames_128_to_255_octets; 267 - u32 frames_256_to_511_octets; 268 - u32 frames_512_to_1023_octets; 269 - u32 frames_1024_to_max_octets; 270 - u64 good_octets_sent; 271 - u32 good_frames_sent; 272 - u32 excessive_collision; 273 - u32 multicast_frames_sent; 274 - u32 broadcast_frames_sent; 275 - u32 unrec_mac_control_received; 276 - u32 fc_sent; 277 - u32 good_fc_received; 278 - u32 bad_fc_received; 279 - u32 undersize_received; 280 - u32 fragments_received; 281 - u32 oversize_received; 282 - u32 jabber_received; 283 - u32 mac_receive_error; 284 - u32 bad_crc_event; 285 - u32 collision; 286 - u32 late_collision; 287 - }; 288 - 289 - struct mv643xx_private { 290 - int port_num; /* User Ethernet port number */ 291 - 292 - u32 rx_sram_addr; /* Base address of rx sram area */ 293 - u32 rx_sram_size; /* Size of rx sram area */ 294 - u32 tx_sram_addr; /* Base address of tx sram area */ 295 - u32 tx_sram_size; /* Size of tx sram area */ 296 - 297 - int rx_resource_err; /* Rx ring resource error flag */ 298 - 299 - /* Tx/Rx rings managment indexes fields. For driver use */ 300 - 301 - /* Next available and first returning Rx resource */ 302 - int rx_curr_desc_q, rx_used_desc_q; 303 - 304 - /* Next available and first returning Tx resource */ 305 - int tx_curr_desc_q, tx_used_desc_q; 306 - 307 - #ifdef MV643XX_TX_FAST_REFILL 308 - u32 tx_clean_threshold; 309 - #endif 310 - 311 - struct eth_rx_desc *p_rx_desc_area; 312 - dma_addr_t rx_desc_dma; 313 - int rx_desc_area_size; 314 - struct sk_buff **rx_skb; 315 - 316 - struct eth_tx_desc *p_tx_desc_area; 317 - dma_addr_t tx_desc_dma; 318 - int tx_desc_area_size; 319 - struct sk_buff **tx_skb; 320 - 321 - struct work_struct tx_timeout_task; 322 - 323 - struct net_device *dev; 324 - struct napi_struct napi; 325 - struct net_device_stats stats; 326 - struct mv643xx_mib_counters mib_counters; 327 - spinlock_t lock; 328 - /* Size of Tx Ring per queue */ 329 - int tx_ring_size; 330 - /* Number of tx descriptors in use */ 331 - int tx_desc_count; 332 - /* Size of Rx Ring per queue */ 333 - int rx_ring_size; 334 - /* Number of rx descriptors in use */ 335 - int rx_desc_count; 336 - 337 - /* 338 - * Used in case RX Ring is empty, which can be caused when 339 - * system does not have resources (skb's) 340 - */ 341 - struct timer_list timeout; 342 - 343 - u32 rx_int_coal; 344 - u32 tx_int_coal; 345 - struct mii_if_info mii; 346 - }; 347 - 348 - /* Port operation control routines */ 349 - static void eth_port_init(struct mv643xx_private *mp); 350 - static void eth_port_reset(unsigned int eth_port_num); 351 - static void eth_port_start(struct net_device *dev); 352 - 353 - /* PHY and MIB routines */ 354 - static void ethernet_phy_reset(unsigned int eth_port_num); 355 - 356 - static void eth_port_write_smi_reg(unsigned int eth_port_num, 357 - unsigned int phy_reg, unsigned int value); 358 - 359 - static void eth_port_read_smi_reg(unsigned int eth_port_num, 360 - unsigned int phy_reg, unsigned int *value); 361 - 362 - static void eth_clear_mib_counters(unsigned int eth_port_num); 363 - 364 - /* Port data flow control routines */ 365 - static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, 366 - struct pkt_info *p_pkt_info); 367 - static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, 368 - struct pkt_info *p_pkt_info); 369 - 370 - #endif /* __MV643XX_ETH_H__ */
+1 -1
drivers/net/pasemi_mac.c
··· 550 550 551 551 n = mac->rx->next_to_clean; 552 552 553 - prefetch(RX_RING(mac, n)); 553 + prefetch(&RX_RING(mac, n)); 554 554 555 555 for (count = 0; count < limit; count++) { 556 556 macrx = RX_RING(mac, n);
+265 -141
drivers/net/r8169.c
··· 44 44 printk( "Assertion failed! %s,%s,%s,line=%d\n", \ 45 45 #expr,__FILE__,__FUNCTION__,__LINE__); \ 46 46 } 47 - #define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0) 47 + #define dprintk(fmt, args...) \ 48 + do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) 48 49 #else 49 50 #define assert(expr) do {} while (0) 50 51 #define dprintk(fmt, args...) do {} while (0) ··· 112 111 RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd 113 112 RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe 114 113 RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb 115 - RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be 8168Bf 116 - RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb 8101Ec 117 - RTL_GIGA_MAC_VER_14 = 0x0e, // 8101 118 - RTL_GIGA_MAC_VER_15 = 0x0f // 8101 119 - }; 120 - 121 - enum phy_version { 122 - RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */ 123 - RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */ 124 - RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */ 125 - RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */ 126 - RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */ 127 - RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */ 114 + RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be 115 + RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb 116 + RTL_GIGA_MAC_VER_14 = 0x0e, // 8101 ? 117 + RTL_GIGA_MAC_VER_15 = 0x0f, // 8101 ? 118 + RTL_GIGA_MAC_VER_16 = 0x11, // 8101Ec 119 + RTL_GIGA_MAC_VER_17 = 0x10, // 8168Bf 120 + RTL_GIGA_MAC_VER_18 = 0x12, // 8168CP 121 + RTL_GIGA_MAC_VER_19 = 0x13, // 8168C 122 + RTL_GIGA_MAC_VER_20 = 0x14 // 8168C 128 123 }; 129 124 130 125 #define _R(NAME,MAC,MASK) \ ··· 141 144 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E 142 145 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139 143 146 _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139 144 - _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880) // PCI-E 8139 147 + _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880), // PCI-E 8139 148 + _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_17, 0xff7e1880), // PCI-E 149 + _R("RTL8101e", RTL_GIGA_MAC_VER_16, 0xff7e1880), // PCI-E 150 + _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_18, 0xff7e1880), // PCI-E 151 + _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_19, 0xff7e1880), // PCI-E 152 + _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_20, 0xff7e1880) // PCI-E 145 153 }; 146 154 #undef _R 147 155 ··· 167 165 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, 168 166 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, 169 167 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, 170 - { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 }, 168 + { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, 171 169 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, 172 170 { PCI_VENDOR_ID_LINKSYS, 0x1032, 173 171 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, ··· 279 277 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ 280 278 281 279 /* Config1 register p.24 */ 280 + MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */ 282 281 PMEnable = (1 << 0), /* Power Management Enable */ 283 282 284 283 /* Config2 register p. 25 */ ··· 383 380 u8 __pad[sizeof(void *) - sizeof(u32)]; 384 381 }; 385 382 383 + enum features { 384 + RTL_FEATURE_WOL = (1 << 0), 385 + RTL_FEATURE_MSI = (1 << 1), 386 + }; 387 + 386 388 struct rtl8169_private { 387 389 void __iomem *mmio_addr; /* memory map physical address */ 388 390 struct pci_dev *pci_dev; /* Index of PCI device */ 389 391 struct net_device *dev; 390 392 struct napi_struct napi; 391 - struct net_device_stats stats; /* statistics of net device */ 392 393 spinlock_t lock; /* spin lock flag */ 393 394 u32 msg_enable; 394 395 int chipset; 395 396 int mac_version; 396 - int phy_version; 397 397 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ 398 398 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ 399 399 u32 dirty_rx; ··· 426 420 unsigned int (*phy_reset_pending)(void __iomem *); 427 421 unsigned int (*link_ok)(void __iomem *); 428 422 struct delayed_work task; 429 - unsigned wol_enabled : 1; 423 + unsigned features; 430 424 }; 431 425 432 426 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); ··· 632 626 633 627 RTL_W8(Cfg9346, Cfg9346_Lock); 634 628 635 - tp->wol_enabled = (wol->wolopts) ? 1 : 0; 629 + if (wol->wolopts) 630 + tp->features |= RTL_FEATURE_WOL; 631 + else 632 + tp->features &= ~RTL_FEATURE_WOL; 636 633 637 634 spin_unlock_irq(&tp->lock); 638 635 ··· 716 707 717 708 /* This tweak comes straight from Realtek's driver. */ 718 709 if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) && 719 - (tp->mac_version == RTL_GIGA_MAC_VER_13)) { 710 + ((tp->mac_version == RTL_GIGA_MAC_VER_13) || 711 + (tp->mac_version == RTL_GIGA_MAC_VER_16))) { 720 712 auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA; 721 713 } 722 714 } ··· 725 715 /* The 8100e/8101e do Fast Ethernet only. */ 726 716 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) || 727 717 (tp->mac_version == RTL_GIGA_MAC_VER_14) || 728 - (tp->mac_version == RTL_GIGA_MAC_VER_15)) { 718 + (tp->mac_version == RTL_GIGA_MAC_VER_15) || 719 + (tp->mac_version == RTL_GIGA_MAC_VER_16)) { 729 720 if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) && 730 721 netif_msg_link(tp)) { 731 722 printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n", ··· 737 726 738 727 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 739 728 740 - if (tp->mac_version == RTL_GIGA_MAC_VER_12) { 729 + if ((tp->mac_version == RTL_GIGA_MAC_VER_12) || 730 + (tp->mac_version == RTL_GIGA_MAC_VER_17)) { 741 731 /* Vendor specific (0x1f) and reserved (0x0e) MII registers. */ 742 732 mdio_write(ioaddr, 0x1f, 0x0000); 743 733 mdio_write(ioaddr, 0x0e, 0x0000); ··· 1116 1104 */ 1117 1105 const struct { 1118 1106 u32 mask; 1107 + u32 val; 1119 1108 int mac_version; 1120 1109 } mac_info[] = { 1121 - { 0x38800000, RTL_GIGA_MAC_VER_15 }, 1122 - { 0x38000000, RTL_GIGA_MAC_VER_12 }, 1123 - { 0x34000000, RTL_GIGA_MAC_VER_13 }, 1124 - { 0x30800000, RTL_GIGA_MAC_VER_14 }, 1125 - { 0x30000000, RTL_GIGA_MAC_VER_11 }, 1126 - { 0x98000000, RTL_GIGA_MAC_VER_06 }, 1127 - { 0x18000000, RTL_GIGA_MAC_VER_05 }, 1128 - { 0x10000000, RTL_GIGA_MAC_VER_04 }, 1129 - { 0x04000000, RTL_GIGA_MAC_VER_03 }, 1130 - { 0x00800000, RTL_GIGA_MAC_VER_02 }, 1131 - { 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */ 1110 + /* 8168B family. */ 1111 + { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, 1112 + { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 }, 1113 + { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 }, 1114 + { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_20 }, 1115 + 1116 + /* 8168B family. */ 1117 + { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 }, 1118 + { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 }, 1119 + { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 }, 1120 + { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, 1121 + 1122 + /* 8101 family. */ 1123 + { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 }, 1124 + { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 }, 1125 + { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 }, 1126 + /* FIXME: where did these entries come from ? -- FR */ 1127 + { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 }, 1128 + { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 }, 1129 + 1130 + /* 8110 family. */ 1131 + { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 }, 1132 + { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 }, 1133 + { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 }, 1134 + { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 }, 1135 + { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 }, 1136 + { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 }, 1137 + 1138 + { 0x00000000, 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */ 1132 1139 }, *p = mac_info; 1133 1140 u32 reg; 1134 1141 1135 - reg = RTL_R32(TxConfig) & 0xfc800000; 1136 - while ((reg & p->mask) != p->mask) 1142 + reg = RTL_R32(TxConfig); 1143 + while ((reg & p->mask) != p->val) 1137 1144 p++; 1138 1145 tp->mac_version = p->mac_version; 1146 + 1147 + if (p->mask == 0x00000000) { 1148 + struct pci_dev *pdev = tp->pci_dev; 1149 + 1150 + dev_info(&pdev->dev, "unknown MAC (%08x)\n", reg); 1151 + } 1139 1152 } 1140 1153 1141 1154 static void rtl8169_print_mac_version(struct rtl8169_private *tp) ··· 1168 1131 dprintk("mac_version = 0x%02x\n", tp->mac_version); 1169 1132 } 1170 1133 1171 - static void rtl8169_get_phy_version(struct rtl8169_private *tp, 1172 - void __iomem *ioaddr) 1173 - { 1174 - const struct { 1175 - u16 mask; 1176 - u16 set; 1177 - int phy_version; 1178 - } phy_info[] = { 1179 - { 0x000f, 0x0002, RTL_GIGA_PHY_VER_G }, 1180 - { 0x000f, 0x0001, RTL_GIGA_PHY_VER_F }, 1181 - { 0x000f, 0x0000, RTL_GIGA_PHY_VER_E }, 1182 - { 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */ 1183 - }, *p = phy_info; 1134 + struct phy_reg { 1184 1135 u16 reg; 1136 + u16 val; 1137 + }; 1185 1138 1186 - reg = mdio_read(ioaddr, MII_PHYSID2) & 0xffff; 1187 - while ((reg & p->mask) != p->set) 1188 - p++; 1189 - tp->phy_version = p->phy_version; 1190 - } 1191 - 1192 - static void rtl8169_print_phy_version(struct rtl8169_private *tp) 1139 + static void rtl_phy_write(void __iomem *ioaddr, struct phy_reg *regs, int len) 1193 1140 { 1194 - struct { 1195 - int version; 1196 - char *msg; 1197 - u32 reg; 1198 - } phy_print[] = { 1199 - { RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 }, 1200 - { RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 }, 1201 - { RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 }, 1202 - { RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 }, 1203 - { 0, NULL, 0x0000 } 1204 - }, *p; 1205 - 1206 - for (p = phy_print; p->msg; p++) { 1207 - if (tp->phy_version == p->version) { 1208 - dprintk("phy_version == %s (%04x)\n", p->msg, p->reg); 1209 - return; 1210 - } 1141 + while (len-- > 0) { 1142 + mdio_write(ioaddr, regs->reg, regs->val); 1143 + regs++; 1211 1144 } 1212 - dprintk("phy_version == Unknown\n"); 1213 1145 } 1214 1146 1215 - static void rtl8169_hw_phy_config(struct net_device *dev) 1147 + static void rtl8169s_hw_phy_config(void __iomem *ioaddr) 1216 1148 { 1217 - struct rtl8169_private *tp = netdev_priv(dev); 1218 - void __iomem *ioaddr = tp->mmio_addr; 1219 1149 struct { 1220 1150 u16 regs[5]; /* Beware of bit-sign propagation */ 1221 1151 } phy_magic[5] = { { ··· 1215 1211 }, *p = phy_magic; 1216 1212 unsigned int i; 1217 1213 1218 - rtl8169_print_mac_version(tp); 1219 - rtl8169_print_phy_version(tp); 1220 - 1221 - if (tp->mac_version <= RTL_GIGA_MAC_VER_01) 1222 - return; 1223 - if (tp->phy_version >= RTL_GIGA_PHY_VER_H) 1224 - return; 1225 - 1226 - dprintk("MAC version != 0 && PHY version == 0 or 1\n"); 1227 - dprintk("Do final_reg2.cfg\n"); 1228 - 1229 - /* Shazam ! */ 1230 - 1231 - if (tp->mac_version == RTL_GIGA_MAC_VER_04) { 1232 - mdio_write(ioaddr, 31, 0x0002); 1233 - mdio_write(ioaddr, 1, 0x90d0); 1234 - mdio_write(ioaddr, 31, 0x0000); 1235 - return; 1236 - } 1237 - 1238 - if ((tp->mac_version != RTL_GIGA_MAC_VER_02) && 1239 - (tp->mac_version != RTL_GIGA_MAC_VER_03)) 1240 - return; 1241 - 1242 - mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1 1243 - mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000 1244 - mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7 1214 + mdio_write(ioaddr, 0x1f, 0x0001); //w 31 2 0 1 1215 + mdio_write(ioaddr, 0x15, 0x1000); //w 21 15 0 1000 1216 + mdio_write(ioaddr, 0x18, 0x65c7); //w 24 15 0 65c7 1245 1217 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0 1246 1218 1247 1219 for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) { ··· 1230 1250 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1 1231 1251 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0 1232 1252 } 1233 - mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0 1253 + mdio_write(ioaddr, 0x1f, 0x0000); //w 31 2 0 0 1254 + } 1255 + 1256 + static void rtl8169sb_hw_phy_config(void __iomem *ioaddr) 1257 + { 1258 + struct phy_reg phy_reg_init[] = { 1259 + { 0x1f, 0x0002 }, 1260 + { 0x01, 0x90d0 }, 1261 + { 0x1f, 0x0000 } 1262 + }; 1263 + 1264 + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 1265 + } 1266 + static void rtl8168b_hw_phy_config(void __iomem *ioaddr) 1267 + { 1268 + struct phy_reg phy_reg_init[] = { 1269 + { 0x1f, 0x0000 }, 1270 + { 0x10, 0xf41b }, 1271 + { 0x1f, 0x0000 } 1272 + }; 1273 + 1274 + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 1275 + } 1276 + 1277 + static void rtl8168cp_hw_phy_config(void __iomem *ioaddr) 1278 + { 1279 + struct phy_reg phy_reg_init[] = { 1280 + { 0x1f, 0x0000 }, 1281 + { 0x1d, 0x0f00 }, 1282 + { 0x1f, 0x0002 }, 1283 + { 0x0c, 0x1ec8 }, 1284 + { 0x1f, 0x0000 } 1285 + }; 1286 + 1287 + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 1288 + } 1289 + 1290 + static void rtl8168c_hw_phy_config(void __iomem *ioaddr) 1291 + { 1292 + struct phy_reg phy_reg_init[] = { 1293 + { 0x1f, 0x0001 }, 1294 + { 0x12, 0x2300 }, 1295 + { 0x1f, 0x0002 }, 1296 + { 0x00, 0x88d4 }, 1297 + { 0x01, 0x82b1 }, 1298 + { 0x03, 0x7002 }, 1299 + { 0x08, 0x9e30 }, 1300 + { 0x09, 0x01f0 }, 1301 + { 0x0a, 0x5500 }, 1302 + { 0x0c, 0x00c8 }, 1303 + { 0x1f, 0x0003 }, 1304 + { 0x12, 0xc096 }, 1305 + { 0x16, 0x000a }, 1306 + { 0x1f, 0x0000 } 1307 + }; 1308 + 1309 + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 1310 + } 1311 + 1312 + static void rtl8168cx_hw_phy_config(void __iomem *ioaddr) 1313 + { 1314 + struct phy_reg phy_reg_init[] = { 1315 + { 0x1f, 0x0000 }, 1316 + { 0x12, 0x2300 }, 1317 + { 0x1f, 0x0003 }, 1318 + { 0x16, 0x0f0a }, 1319 + { 0x1f, 0x0000 }, 1320 + { 0x1f, 0x0002 }, 1321 + { 0x0c, 0x7eb8 }, 1322 + { 0x1f, 0x0000 } 1323 + }; 1324 + 1325 + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 1326 + } 1327 + 1328 + static void rtl_hw_phy_config(struct net_device *dev) 1329 + { 1330 + struct rtl8169_private *tp = netdev_priv(dev); 1331 + void __iomem *ioaddr = tp->mmio_addr; 1332 + 1333 + rtl8169_print_mac_version(tp); 1334 + 1335 + switch (tp->mac_version) { 1336 + case RTL_GIGA_MAC_VER_01: 1337 + break; 1338 + case RTL_GIGA_MAC_VER_02: 1339 + case RTL_GIGA_MAC_VER_03: 1340 + rtl8169s_hw_phy_config(ioaddr); 1341 + break; 1342 + case RTL_GIGA_MAC_VER_04: 1343 + rtl8169sb_hw_phy_config(ioaddr); 1344 + break; 1345 + case RTL_GIGA_MAC_VER_11: 1346 + case RTL_GIGA_MAC_VER_12: 1347 + case RTL_GIGA_MAC_VER_17: 1348 + rtl8168b_hw_phy_config(ioaddr); 1349 + break; 1350 + case RTL_GIGA_MAC_VER_18: 1351 + rtl8168cp_hw_phy_config(ioaddr); 1352 + break; 1353 + case RTL_GIGA_MAC_VER_19: 1354 + rtl8168c_hw_phy_config(ioaddr); 1355 + break; 1356 + case RTL_GIGA_MAC_VER_20: 1357 + rtl8168cx_hw_phy_config(ioaddr); 1358 + break; 1359 + default: 1360 + break; 1361 + } 1234 1362 } 1235 1363 1236 1364 static void rtl8169_phy_timer(unsigned long __opaque) ··· 1350 1262 unsigned long timeout = RTL8169_PHY_TIMEOUT; 1351 1263 1352 1264 assert(tp->mac_version > RTL_GIGA_MAC_VER_01); 1353 - assert(tp->phy_version < RTL_GIGA_PHY_VER_H); 1354 1265 1355 1266 if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)) 1356 1267 return; ··· 1384 1297 struct rtl8169_private *tp = netdev_priv(dev); 1385 1298 struct timer_list *timer = &tp->timer; 1386 1299 1387 - if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) || 1388 - (tp->phy_version >= RTL_GIGA_PHY_VER_H)) 1300 + if (tp->mac_version <= RTL_GIGA_MAC_VER_01) 1389 1301 return; 1390 1302 1391 1303 del_timer_sync(timer); ··· 1395 1309 struct rtl8169_private *tp = netdev_priv(dev); 1396 1310 struct timer_list *timer = &tp->timer; 1397 1311 1398 - if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) || 1399 - (tp->phy_version >= RTL_GIGA_PHY_VER_H)) 1312 + if (tp->mac_version <= RTL_GIGA_MAC_VER_01) 1400 1313 return; 1401 1314 1402 1315 mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT); ··· 1447 1362 { 1448 1363 void __iomem *ioaddr = tp->mmio_addr; 1449 1364 1450 - rtl8169_hw_phy_config(dev); 1365 + rtl_hw_phy_config(dev); 1451 1366 1452 1367 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); 1453 1368 RTL_W8(0x82, 0x01); ··· 1542 1457 unsigned int align; 1543 1458 u16 intr_event; 1544 1459 u16 napi_event; 1460 + unsigned msi; 1545 1461 } rtl_cfg_infos [] = { 1546 1462 [RTL_CFG_0] = { 1547 1463 .hw_start = rtl_hw_start_8169, ··· 1550 1464 .align = 0, 1551 1465 .intr_event = SYSErr | LinkChg | RxOverflow | 1552 1466 RxFIFOOver | TxErr | TxOK | RxOK | RxErr, 1553 - .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow 1467 + .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, 1468 + .msi = 0 1554 1469 }, 1555 1470 [RTL_CFG_1] = { 1556 1471 .hw_start = rtl_hw_start_8168, ··· 1559 1472 .align = 8, 1560 1473 .intr_event = SYSErr | LinkChg | RxOverflow | 1561 1474 TxErr | TxOK | RxOK | RxErr, 1562 - .napi_event = TxErr | TxOK | RxOK | RxOverflow 1475 + .napi_event = TxErr | TxOK | RxOK | RxOverflow, 1476 + .msi = RTL_FEATURE_MSI 1563 1477 }, 1564 1478 [RTL_CFG_2] = { 1565 1479 .hw_start = rtl_hw_start_8101, ··· 1568 1480 .align = 8, 1569 1481 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | 1570 1482 RxFIFOOver | TxErr | TxOK | RxOK | RxErr, 1571 - .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow 1483 + .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, 1484 + .msi = RTL_FEATURE_MSI 1572 1485 } 1573 1486 }; 1487 + 1488 + /* Cfg9346_Unlock assumed. */ 1489 + static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr, 1490 + const struct rtl_cfg_info *cfg) 1491 + { 1492 + unsigned msi = 0; 1493 + u8 cfg2; 1494 + 1495 + cfg2 = RTL_R8(Config2) & ~MSIEnable; 1496 + if (cfg->msi) { 1497 + if (pci_enable_msi(pdev)) { 1498 + dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); 1499 + } else { 1500 + cfg2 |= MSIEnable; 1501 + msi = RTL_FEATURE_MSI; 1502 + } 1503 + } 1504 + RTL_W8(Config2, cfg2); 1505 + return msi; 1506 + } 1507 + 1508 + static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp) 1509 + { 1510 + if (tp->features & RTL_FEATURE_MSI) { 1511 + pci_disable_msi(pdev); 1512 + tp->features &= ~RTL_FEATURE_MSI; 1513 + } 1514 + } 1574 1515 1575 1516 static int __devinit 1576 1517 rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ··· 1713 1596 1714 1597 /* Identify chip attached to board */ 1715 1598 rtl8169_get_mac_version(tp, ioaddr); 1716 - rtl8169_get_phy_version(tp, ioaddr); 1717 1599 1718 1600 rtl8169_print_mac_version(tp); 1719 - rtl8169_print_phy_version(tp); 1720 1601 1721 1602 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) { 1722 1603 if (tp->mac_version == rtl_chip_info[i].mac_version) ··· 1734 1619 RTL_W8(Cfg9346, Cfg9346_Unlock); 1735 1620 RTL_W8(Config1, RTL_R8(Config1) | PMEnable); 1736 1621 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); 1622 + tp->features |= rtl_try_msi(pdev, ioaddr, cfg); 1737 1623 RTL_W8(Cfg9346, Cfg9346_Lock); 1738 1624 1739 1625 if (RTL_R8(PHYstatus) & TBI_Enable) { ··· 1802 1686 1803 1687 rc = register_netdev(dev); 1804 1688 if (rc < 0) 1805 - goto err_out_unmap_5; 1689 + goto err_out_msi_5; 1806 1690 1807 1691 pci_set_drvdata(pdev, dev); 1808 1692 ··· 1825 1709 out: 1826 1710 return rc; 1827 1711 1828 - err_out_unmap_5: 1712 + err_out_msi_5: 1713 + rtl_disable_msi(pdev, tp); 1829 1714 iounmap(ioaddr); 1830 1715 err_out_free_res_4: 1831 1716 pci_release_regions(pdev); ··· 1847 1730 flush_scheduled_work(); 1848 1731 1849 1732 unregister_netdev(dev); 1733 + rtl_disable_msi(pdev, tp); 1850 1734 rtl8169_release_board(pdev, dev, tp->mmio_addr); 1851 1735 pci_set_drvdata(pdev, NULL); 1852 1736 } ··· 1891 1773 1892 1774 smp_mb(); 1893 1775 1894 - retval = request_irq(dev->irq, rtl8169_interrupt, IRQF_SHARED, 1776 + retval = request_irq(dev->irq, rtl8169_interrupt, 1777 + (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, 1895 1778 dev->name, dev); 1896 1779 if (retval < 0) 1897 1780 goto err_release_ring_2; ··· 2052 1933 2053 1934 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) || 2054 1935 (tp->mac_version == RTL_GIGA_MAC_VER_03)) { 2055 - dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. " 1936 + dprintk("Set MAC Reg C+CR Offset 0xE0. " 2056 1937 "Bit-3 and bit-14 MUST be 1\n"); 2057 1938 tp->cp_cmd |= (1 << 14); 2058 1939 } ··· 2148 2029 void __iomem *ioaddr = tp->mmio_addr; 2149 2030 struct pci_dev *pdev = tp->pci_dev; 2150 2031 2151 - if (tp->mac_version == RTL_GIGA_MAC_VER_13) { 2032 + if ((tp->mac_version == RTL_GIGA_MAC_VER_13) || 2033 + (tp->mac_version == RTL_GIGA_MAC_VER_16)) { 2152 2034 pci_write_config_word(pdev, 0x68, 0x00); 2153 2035 pci_write_config_word(pdev, 0x69, 0x08); 2154 2036 } ··· 2379 2259 dev_kfree_skb(skb); 2380 2260 tx_skb->skb = NULL; 2381 2261 } 2382 - tp->stats.tx_dropped++; 2262 + tp->dev->stats.tx_dropped++; 2383 2263 } 2384 2264 } 2385 2265 tp->cur_tx = tp->dirty_tx = 0; ··· 2430 2310 ret = rtl8169_open(dev); 2431 2311 if (unlikely(ret < 0)) { 2432 2312 if (net_ratelimit() && netif_msg_drv(tp)) { 2433 - printk(PFX KERN_ERR "%s: reinit failure (status = %d)." 2313 + printk(KERN_ERR PFX "%s: reinit failure (status = %d)." 2434 2314 " Rescheduling.\n", dev->name, ret); 2435 2315 } 2436 2316 rtl8169_schedule_work(dev, rtl8169_reinit_task); ··· 2460 2340 rtl8169_init_ring_indexes(tp); 2461 2341 rtl_hw_start(dev); 2462 2342 netif_wake_queue(dev); 2343 + rtl8169_check_link_status(dev, tp, tp->mmio_addr); 2463 2344 } else { 2464 2345 if (net_ratelimit() && netif_msg_intr(tp)) { 2465 - printk(PFX KERN_EMERG "%s: Rx buffers shortage\n", 2346 + printk(KERN_EMERG PFX "%s: Rx buffers shortage\n", 2466 2347 dev->name); 2467 2348 } 2468 2349 rtl8169_schedule_work(dev, rtl8169_reset_task); ··· 2617 2496 netif_stop_queue(dev); 2618 2497 ret = NETDEV_TX_BUSY; 2619 2498 err_update_stats: 2620 - tp->stats.tx_dropped++; 2499 + dev->stats.tx_dropped++; 2621 2500 goto out; 2622 2501 } 2623 2502 ··· 2692 2571 if (status & DescOwn) 2693 2572 break; 2694 2573 2695 - tp->stats.tx_bytes += len; 2696 - tp->stats.tx_packets++; 2574 + dev->stats.tx_bytes += len; 2575 + dev->stats.tx_packets++; 2697 2576 2698 2577 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry); 2699 2578 ··· 2793 2672 "%s: Rx ERROR. status = %08x\n", 2794 2673 dev->name, status); 2795 2674 } 2796 - tp->stats.rx_errors++; 2675 + dev->stats.rx_errors++; 2797 2676 if (status & (RxRWT | RxRUNT)) 2798 - tp->stats.rx_length_errors++; 2677 + dev->stats.rx_length_errors++; 2799 2678 if (status & RxCRC) 2800 - tp->stats.rx_crc_errors++; 2679 + dev->stats.rx_crc_errors++; 2801 2680 if (status & RxFOVF) { 2802 2681 rtl8169_schedule_work(dev, rtl8169_reset_task); 2803 - tp->stats.rx_fifo_errors++; 2682 + dev->stats.rx_fifo_errors++; 2804 2683 } 2805 2684 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 2806 2685 } else { ··· 2815 2694 * sized frames. 2816 2695 */ 2817 2696 if (unlikely(rtl8169_fragmented_frame(status))) { 2818 - tp->stats.rx_dropped++; 2819 - tp->stats.rx_length_errors++; 2697 + dev->stats.rx_dropped++; 2698 + dev->stats.rx_length_errors++; 2820 2699 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 2821 2700 continue; 2822 2701 } ··· 2840 2719 rtl8169_rx_skb(skb); 2841 2720 2842 2721 dev->last_rx = jiffies; 2843 - tp->stats.rx_bytes += pkt_size; 2844 - tp->stats.rx_packets++; 2722 + dev->stats.rx_bytes += pkt_size; 2723 + dev->stats.rx_packets++; 2845 2724 } 2846 2725 2847 2726 /* Work around for AMD plateform. */ ··· 3002 2881 rtl8169_asic_down(ioaddr); 3003 2882 3004 2883 /* Update the error counts. */ 3005 - tp->stats.rx_missed_errors += RTL_R32(RxMissed); 2884 + dev->stats.rx_missed_errors += RTL_R32(RxMissed); 3006 2885 RTL_W32(RxMissed, 0); 3007 2886 3008 2887 spin_unlock_irq(&tp->lock); ··· 3105 2984 (tp->mac_version == RTL_GIGA_MAC_VER_12) || 3106 2985 (tp->mac_version == RTL_GIGA_MAC_VER_13) || 3107 2986 (tp->mac_version == RTL_GIGA_MAC_VER_14) || 3108 - (tp->mac_version == RTL_GIGA_MAC_VER_15)) { 2987 + (tp->mac_version == RTL_GIGA_MAC_VER_15) || 2988 + (tp->mac_version == RTL_GIGA_MAC_VER_16) || 2989 + (tp->mac_version == RTL_GIGA_MAC_VER_17)) { 3109 2990 mc_filter[0] = 0xffffffff; 3110 2991 mc_filter[1] = 0xffffffff; 3111 2992 } ··· 3134 3011 3135 3012 if (netif_running(dev)) { 3136 3013 spin_lock_irqsave(&tp->lock, flags); 3137 - tp->stats.rx_missed_errors += RTL_R32(RxMissed); 3014 + dev->stats.rx_missed_errors += RTL_R32(RxMissed); 3138 3015 RTL_W32(RxMissed, 0); 3139 3016 spin_unlock_irqrestore(&tp->lock, flags); 3140 3017 } 3141 3018 3142 - return &tp->stats; 3019 + return &dev->stats; 3143 3020 } 3144 3021 3145 3022 #ifdef CONFIG_PM ··· 3160 3037 3161 3038 rtl8169_asic_down(ioaddr); 3162 3039 3163 - tp->stats.rx_missed_errors += RTL_R32(RxMissed); 3040 + dev->stats.rx_missed_errors += RTL_R32(RxMissed); 3164 3041 RTL_W32(RxMissed, 0); 3165 3042 3166 3043 spin_unlock_irq(&tp->lock); 3167 3044 3168 3045 out_pci_suspend: 3169 3046 pci_save_state(pdev); 3170 - pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled); 3047 + pci_enable_wake(pdev, pci_choose_state(pdev, state), 3048 + (tp->features & RTL_FEATURE_WOL) ? 1 : 0); 3171 3049 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3172 3050 3173 3051 return 0;
+2 -2
drivers/net/sky2.c
··· 4271 4271 del_timer_sync(&hw->watchdog_timer); 4272 4272 cancel_work_sync(&hw->restart_work); 4273 4273 4274 - for (i = hw->ports; i >= 0; --i) 4274 + for (i = hw->ports-1; i >= 0; --i) 4275 4275 unregister_netdev(hw->dev[i]); 4276 4276 4277 4277 sky2_write32(hw, B0_IMSK, 0); ··· 4289 4289 pci_release_regions(pdev); 4290 4290 pci_disable_device(pdev); 4291 4291 4292 - for (i = hw->ports; i >= 0; --i) 4292 + for (i = hw->ports-1; i >= 0; --i) 4293 4293 free_netdev(hw->dev[i]); 4294 4294 4295 4295 iounmap(hw->regs);
+1 -327
include/linux/mv643xx.h
··· 14 14 #define __ASM_MV643XX_H 15 15 16 16 #include <asm/types.h> 17 + #include <linux/mv643xx_eth.h> 17 18 18 19 /****************************************/ 19 20 /* Processor Address Space */ ··· 659 658 /* Ethernet Unit Registers */ 660 659 /****************************************/ 661 660 662 - #define MV643XX_ETH_SHARED_REGS 0x2000 663 - #define MV643XX_ETH_SHARED_REGS_SIZE 0x2000 664 - 665 - #define MV643XX_ETH_PHY_ADDR_REG 0x2000 666 - #define MV643XX_ETH_SMI_REG 0x2004 667 - #define MV643XX_ETH_UNIT_DEFAULT_ADDR_REG 0x2008 668 - #define MV643XX_ETH_UNIT_DEFAULTID_REG 0x200c 669 - #define MV643XX_ETH_UNIT_INTERRUPT_CAUSE_REG 0x2080 670 - #define MV643XX_ETH_UNIT_INTERRUPT_MASK_REG 0x2084 671 - #define MV643XX_ETH_UNIT_INTERNAL_USE_REG 0x24fc 672 - #define MV643XX_ETH_UNIT_ERROR_ADDR_REG 0x2094 673 - #define MV643XX_ETH_BAR_0 0x2200 674 - #define MV643XX_ETH_BAR_1 0x2208 675 - #define MV643XX_ETH_BAR_2 0x2210 676 - #define MV643XX_ETH_BAR_3 0x2218 677 - #define MV643XX_ETH_BAR_4 0x2220 678 - #define MV643XX_ETH_BAR_5 0x2228 679 - #define MV643XX_ETH_SIZE_REG_0 0x2204 680 - #define MV643XX_ETH_SIZE_REG_1 0x220c 681 - #define MV643XX_ETH_SIZE_REG_2 0x2214 682 - #define MV643XX_ETH_SIZE_REG_3 0x221c 683 - #define MV643XX_ETH_SIZE_REG_4 0x2224 684 - #define MV643XX_ETH_SIZE_REG_5 0x222c 685 - #define MV643XX_ETH_HEADERS_RETARGET_BASE_REG 0x2230 686 - #define MV643XX_ETH_HEADERS_RETARGET_CONTROL_REG 0x2234 687 - #define MV643XX_ETH_HIGH_ADDR_REMAP_REG_0 0x2280 688 - #define MV643XX_ETH_HIGH_ADDR_REMAP_REG_1 0x2284 689 - #define MV643XX_ETH_HIGH_ADDR_REMAP_REG_2 0x2288 690 - #define MV643XX_ETH_HIGH_ADDR_REMAP_REG_3 0x228c 691 - #define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x2290 692 - #define MV643XX_ETH_ACCESS_PROTECTION_REG(port) (0x2294 + (port<<2)) 693 - #define MV643XX_ETH_MIB_COUNTERS_BASE(port) (0x3000 + (port<<7)) 694 - #define MV643XX_ETH_PORT_CONFIG_REG(port) (0x2400 + (port<<10)) 695 - #define MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port) (0x2404 + (port<<10)) 696 - #define MV643XX_ETH_MII_SERIAL_PARAMETRS_REG(port) (0x2408 + (port<<10)) 697 - #define MV643XX_ETH_GMII_SERIAL_PARAMETRS_REG(port) (0x240c + (port<<10)) 698 - #define MV643XX_ETH_VLAN_ETHERTYPE_REG(port) (0x2410 + (port<<10)) 699 - #define MV643XX_ETH_MAC_ADDR_LOW(port) (0x2414 + (port<<10)) 700 - #define MV643XX_ETH_MAC_ADDR_HIGH(port) (0x2418 + (port<<10)) 701 - #define MV643XX_ETH_SDMA_CONFIG_REG(port) (0x241c + (port<<10)) 702 - #define MV643XX_ETH_DSCP_0(port) (0x2420 + (port<<10)) 703 - #define MV643XX_ETH_DSCP_1(port) (0x2424 + (port<<10)) 704 - #define MV643XX_ETH_DSCP_2(port) (0x2428 + (port<<10)) 705 - #define MV643XX_ETH_DSCP_3(port) (0x242c + (port<<10)) 706 - #define MV643XX_ETH_DSCP_4(port) (0x2430 + (port<<10)) 707 - #define MV643XX_ETH_DSCP_5(port) (0x2434 + (port<<10)) 708 - #define MV643XX_ETH_DSCP_6(port) (0x2438 + (port<<10)) 709 - #define MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port) (0x243c + (port<<10)) 710 - #define MV643XX_ETH_VLAN_PRIORITY_TAG_TO_PRIORITY(port) (0x2440 + (port<<10)) 711 - #define MV643XX_ETH_PORT_STATUS_REG(port) (0x2444 + (port<<10)) 712 - #define MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port) (0x2448 + (port<<10)) 713 - #define MV643XX_ETH_TX_QUEUE_FIXED_PRIORITY(port) (0x244c + (port<<10)) 714 - #define MV643XX_ETH_PORT_TX_TOKEN_BUCKET_RATE_CONFIG(port) (0x2450 + (port<<10)) 715 - #define MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port) (0x2458 + (port<<10)) 716 - #define MV643XX_ETH_PORT_MAXIMUM_TOKEN_BUCKET_SIZE(port) (0x245c + (port<<10)) 717 - #define MV643XX_ETH_INTERRUPT_CAUSE_REG(port) (0x2460 + (port<<10)) 718 - #define MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port) (0x2464 + (port<<10)) 719 - #define MV643XX_ETH_INTERRUPT_MASK_REG(port) (0x2468 + (port<<10)) 720 - #define MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port) (0x246c + (port<<10)) 721 - #define MV643XX_ETH_RX_FIFO_URGENT_THRESHOLD_REG(port) (0x2470 + (port<<10)) 722 - #define MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(port) (0x2474 + (port<<10)) 723 - #define MV643XX_ETH_RX_MINIMAL_FRAME_SIZE_REG(port) (0x247c + (port<<10)) 724 - #define MV643XX_ETH_RX_DISCARDED_FRAMES_COUNTER(port) (0x2484 + (port<<10)) 725 - #define MV643XX_ETH_PORT_DEBUG_0_REG(port) (0x248c + (port<<10)) 726 - #define MV643XX_ETH_PORT_DEBUG_1_REG(port) (0x2490 + (port<<10)) 727 - #define MV643XX_ETH_PORT_INTERNAL_ADDR_ERROR_REG(port) (0x2494 + (port<<10)) 728 - #define MV643XX_ETH_INTERNAL_USE_REG(port) (0x24fc + (port<<10)) 729 - #define MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port) (0x2680 + (port<<10)) 730 - #define MV643XX_ETH_CURRENT_SERVED_TX_DESC_PTR(port) (0x2684 + (port<<10)) 731 - #define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port) (0x260c + (port<<10)) 732 - #define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_1(port) (0x261c + (port<<10)) 733 - #define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_2(port) (0x262c + (port<<10)) 734 - #define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_3(port) (0x263c + (port<<10)) 735 - #define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_4(port) (0x264c + (port<<10)) 736 - #define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_5(port) (0x265c + (port<<10)) 737 - #define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_6(port) (0x266c + (port<<10)) 738 - #define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_7(port) (0x267c + (port<<10)) 739 - #define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port) (0x26c0 + (port<<10)) 740 - #define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_1(port) (0x26c4 + (port<<10)) 741 - #define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_2(port) (0x26c8 + (port<<10)) 742 - #define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_3(port) (0x26cc + (port<<10)) 743 - #define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_4(port) (0x26d0 + (port<<10)) 744 - #define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_5(port) (0x26d4 + (port<<10)) 745 - #define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_6(port) (0x26d8 + (port<<10)) 746 - #define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_7(port) (0x26dc + (port<<10)) 747 - #define MV643XX_ETH_TX_QUEUE_0_TOKEN_BUCKET_COUNT(port) (0x2700 + (port<<10)) 748 - #define MV643XX_ETH_TX_QUEUE_1_TOKEN_BUCKET_COUNT(port) (0x2710 + (port<<10)) 749 - #define MV643XX_ETH_TX_QUEUE_2_TOKEN_BUCKET_COUNT(port) (0x2720 + (port<<10)) 750 - #define MV643XX_ETH_TX_QUEUE_3_TOKEN_BUCKET_COUNT(port) (0x2730 + (port<<10)) 751 - #define MV643XX_ETH_TX_QUEUE_4_TOKEN_BUCKET_COUNT(port) (0x2740 + (port<<10)) 752 - #define MV643XX_ETH_TX_QUEUE_5_TOKEN_BUCKET_COUNT(port) (0x2750 + (port<<10)) 753 - #define MV643XX_ETH_TX_QUEUE_6_TOKEN_BUCKET_COUNT(port) (0x2760 + (port<<10)) 754 - #define MV643XX_ETH_TX_QUEUE_7_TOKEN_BUCKET_COUNT(port) (0x2770 + (port<<10)) 755 - #define MV643XX_ETH_TX_QUEUE_0_TOKEN_BUCKET_CONFIG(port) (0x2704 + (port<<10)) 756 - #define MV643XX_ETH_TX_QUEUE_1_TOKEN_BUCKET_CONFIG(port) (0x2714 + (port<<10)) 757 - #define MV643XX_ETH_TX_QUEUE_2_TOKEN_BUCKET_CONFIG(port) (0x2724 + (port<<10)) 758 - #define MV643XX_ETH_TX_QUEUE_3_TOKEN_BUCKET_CONFIG(port) (0x2734 + (port<<10)) 759 - #define MV643XX_ETH_TX_QUEUE_4_TOKEN_BUCKET_CONFIG(port) (0x2744 + (port<<10)) 760 - #define MV643XX_ETH_TX_QUEUE_5_TOKEN_BUCKET_CONFIG(port) (0x2754 + (port<<10)) 761 - #define MV643XX_ETH_TX_QUEUE_6_TOKEN_BUCKET_CONFIG(port) (0x2764 + (port<<10)) 762 - #define MV643XX_ETH_TX_QUEUE_7_TOKEN_BUCKET_CONFIG(port) (0x2774 + (port<<10)) 763 - #define MV643XX_ETH_TX_QUEUE_0_ARBITER_CONFIG(port) (0x2708 + (port<<10)) 764 - #define MV643XX_ETH_TX_QUEUE_1_ARBITER_CONFIG(port) (0x2718 + (port<<10)) 765 - #define MV643XX_ETH_TX_QUEUE_2_ARBITER_CONFIG(port) (0x2728 + (port<<10)) 766 - #define MV643XX_ETH_TX_QUEUE_3_ARBITER_CONFIG(port) (0x2738 + (port<<10)) 767 - #define MV643XX_ETH_TX_QUEUE_4_ARBITER_CONFIG(port) (0x2748 + (port<<10)) 768 - #define MV643XX_ETH_TX_QUEUE_5_ARBITER_CONFIG(port) (0x2758 + (port<<10)) 769 - #define MV643XX_ETH_TX_QUEUE_6_ARBITER_CONFIG(port) (0x2768 + (port<<10)) 770 - #define MV643XX_ETH_TX_QUEUE_7_ARBITER_CONFIG(port) (0x2778 + (port<<10)) 771 - #define MV643XX_ETH_PORT_TX_TOKEN_BUCKET_COUNT(port) (0x2780 + (port<<10)) 772 - #define MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port) (0x3400 + (port<<10)) 773 - #define MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port) (0x3500 + (port<<10)) 774 - #define MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(port) (0x3600 + (port<<10)) 775 - 776 661 /*******************************************/ 777 662 /* CUNIT Registers */ 778 663 /*******************************************/ ··· 974 1087 u32 freq_n; 975 1088 u32 timeout; /* In milliseconds */ 976 1089 u32 retries; 977 - }; 978 - 979 - /* These macros describe Ethernet Port configuration reg (Px_cR) bits */ 980 - #define MV643XX_ETH_UNICAST_NORMAL_MODE 0 981 - #define MV643XX_ETH_UNICAST_PROMISCUOUS_MODE (1<<0) 982 - #define MV643XX_ETH_DEFAULT_RX_QUEUE_0 0 983 - #define MV643XX_ETH_DEFAULT_RX_QUEUE_1 (1<<1) 984 - #define MV643XX_ETH_DEFAULT_RX_QUEUE_2 (1<<2) 985 - #define MV643XX_ETH_DEFAULT_RX_QUEUE_3 ((1<<2) | (1<<1)) 986 - #define MV643XX_ETH_DEFAULT_RX_QUEUE_4 (1<<3) 987 - #define MV643XX_ETH_DEFAULT_RX_QUEUE_5 ((1<<3) | (1<<1)) 988 - #define MV643XX_ETH_DEFAULT_RX_QUEUE_6 ((1<<3) | (1<<2)) 989 - #define MV643XX_ETH_DEFAULT_RX_QUEUE_7 ((1<<3) | (1<<2) | (1<<1)) 990 - #define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_0 0 991 - #define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_1 (1<<4) 992 - #define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_2 (1<<5) 993 - #define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_3 ((1<<5) | (1<<4)) 994 - #define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_4 (1<<6) 995 - #define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_5 ((1<<6) | (1<<4)) 996 - #define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_6 ((1<<6) | (1<<5)) 997 - #define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_7 ((1<<6) | (1<<5) | (1<<4)) 998 - #define MV643XX_ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP 0 999 - #define MV643XX_ETH_REJECT_BC_IF_NOT_IP_OR_ARP (1<<7) 1000 - #define MV643XX_ETH_RECEIVE_BC_IF_IP 0 1001 - #define MV643XX_ETH_REJECT_BC_IF_IP (1<<8) 1002 - #define MV643XX_ETH_RECEIVE_BC_IF_ARP 0 1003 - #define MV643XX_ETH_REJECT_BC_IF_ARP (1<<9) 1004 - #define MV643XX_ETH_TX_AM_NO_UPDATE_ERROR_SUMMARY (1<<12) 1005 - #define MV643XX_ETH_CAPTURE_TCP_FRAMES_DIS 0 1006 - #define MV643XX_ETH_CAPTURE_TCP_FRAMES_EN (1<<14) 1007 - #define MV643XX_ETH_CAPTURE_UDP_FRAMES_DIS 0 1008 - #define MV643XX_ETH_CAPTURE_UDP_FRAMES_EN (1<<15) 1009 - #define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_0 0 1010 - #define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_1 (1<<16) 1011 - #define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_2 (1<<17) 1012 - #define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_3 ((1<<17) | (1<<16)) 1013 - #define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_4 (1<<18) 1014 - #define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_5 ((1<<18) | (1<<16)) 1015 - #define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_6 ((1<<18) | (1<<17)) 1016 - #define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_7 ((1<<18) | (1<<17) | (1<<16)) 1017 - #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_0 0 1018 - #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_1 (1<<19) 1019 - #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_2 (1<<20) 1020 - #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_3 ((1<<20) | (1<<19)) 1021 - #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_4 (1<<21) 1022 - #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_5 ((1<<21) | (1<<19)) 1023 - #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_6 ((1<<21) | (1<<20)) 1024 - #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_7 ((1<<21) | (1<<20) | (1<<19)) 1025 - #define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_0 0 1026 - #define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_1 (1<<22) 1027 - #define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_2 (1<<23) 1028 - #define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_3 ((1<<23) | (1<<22)) 1029 - #define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_4 (1<<24) 1030 - #define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_5 ((1<<24) | (1<<22)) 1031 - #define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_6 ((1<<24) | (1<<23)) 1032 - #define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_7 ((1<<24) | (1<<23) | (1<<22)) 1033 - 1034 - #define MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE \ 1035 - MV643XX_ETH_UNICAST_NORMAL_MODE | \ 1036 - MV643XX_ETH_DEFAULT_RX_QUEUE_0 | \ 1037 - MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_0 | \ 1038 - MV643XX_ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP | \ 1039 - MV643XX_ETH_RECEIVE_BC_IF_IP | \ 1040 - MV643XX_ETH_RECEIVE_BC_IF_ARP | \ 1041 - MV643XX_ETH_CAPTURE_TCP_FRAMES_DIS | \ 1042 - MV643XX_ETH_CAPTURE_UDP_FRAMES_DIS | \ 1043 - MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_0 | \ 1044 - MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_0 | \ 1045 - MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_0 1046 - 1047 - /* These macros describe Ethernet Port configuration extend reg (Px_cXR) bits*/ 1048 - #define MV643XX_ETH_CLASSIFY_EN (1<<0) 1049 - #define MV643XX_ETH_SPAN_BPDU_PACKETS_AS_NORMAL 0 1050 - #define MV643XX_ETH_SPAN_BPDU_PACKETS_TO_RX_QUEUE_7 (1<<1) 1051 - #define MV643XX_ETH_PARTITION_DISABLE 0 1052 - #define MV643XX_ETH_PARTITION_ENABLE (1<<2) 1053 - 1054 - #define MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE \ 1055 - MV643XX_ETH_SPAN_BPDU_PACKETS_AS_NORMAL | \ 1056 - MV643XX_ETH_PARTITION_DISABLE 1057 - 1058 - /* These macros describe Ethernet Port Sdma configuration reg (SDCR) bits */ 1059 - #define MV643XX_ETH_RIFB (1<<0) 1060 - #define MV643XX_ETH_RX_BURST_SIZE_1_64BIT 0 1061 - #define MV643XX_ETH_RX_BURST_SIZE_2_64BIT (1<<1) 1062 - #define MV643XX_ETH_RX_BURST_SIZE_4_64BIT (1<<2) 1063 - #define MV643XX_ETH_RX_BURST_SIZE_8_64BIT ((1<<2) | (1<<1)) 1064 - #define MV643XX_ETH_RX_BURST_SIZE_16_64BIT (1<<3) 1065 - #define MV643XX_ETH_BLM_RX_NO_SWAP (1<<4) 1066 - #define MV643XX_ETH_BLM_RX_BYTE_SWAP 0 1067 - #define MV643XX_ETH_BLM_TX_NO_SWAP (1<<5) 1068 - #define MV643XX_ETH_BLM_TX_BYTE_SWAP 0 1069 - #define MV643XX_ETH_DESCRIPTORS_BYTE_SWAP (1<<6) 1070 - #define MV643XX_ETH_DESCRIPTORS_NO_SWAP 0 1071 - #define MV643XX_ETH_TX_BURST_SIZE_1_64BIT 0 1072 - #define MV643XX_ETH_TX_BURST_SIZE_2_64BIT (1<<22) 1073 - #define MV643XX_ETH_TX_BURST_SIZE_4_64BIT (1<<23) 1074 - #define MV643XX_ETH_TX_BURST_SIZE_8_64BIT ((1<<23) | (1<<22)) 1075 - #define MV643XX_ETH_TX_BURST_SIZE_16_64BIT (1<<24) 1076 - 1077 - #define MV643XX_ETH_IPG_INT_RX(value) ((value & 0x3fff) << 8) 1078 - 1079 - #define MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE \ 1080 - MV643XX_ETH_RX_BURST_SIZE_4_64BIT | \ 1081 - MV643XX_ETH_IPG_INT_RX(0) | \ 1082 - MV643XX_ETH_TX_BURST_SIZE_4_64BIT 1083 - 1084 - /* These macros describe Ethernet Port serial control reg (PSCR) bits */ 1085 - #define MV643XX_ETH_SERIAL_PORT_DISABLE 0 1086 - #define MV643XX_ETH_SERIAL_PORT_ENABLE (1<<0) 1087 - #define MV643XX_ETH_FORCE_LINK_PASS (1<<1) 1088 - #define MV643XX_ETH_DO_NOT_FORCE_LINK_PASS 0 1089 - #define MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX 0 1090 - #define MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX (1<<2) 1091 - #define MV643XX_ETH_ENABLE_AUTO_NEG_FOR_FLOW_CTRL 0 1092 - #define MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1<<3) 1093 - #define MV643XX_ETH_ADV_NO_FLOW_CTRL 0 1094 - #define MV643XX_ETH_ADV_SYMMETRIC_FLOW_CTRL (1<<4) 1095 - #define MV643XX_ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX 0 1096 - #define MV643XX_ETH_FORCE_FC_MODE_TX_PAUSE_DIS (1<<5) 1097 - #define MV643XX_ETH_FORCE_BP_MODE_NO_JAM 0 1098 - #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX (1<<7) 1099 - #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8) 1100 - #define MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED (1<<9) 1101 - #define MV643XX_ETH_FORCE_LINK_FAIL 0 1102 - #define MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL (1<<10) 1103 - #define MV643XX_ETH_RETRANSMIT_16_ATTEMPTS 0 1104 - #define MV643XX_ETH_RETRANSMIT_FOREVER (1<<11) 1105 - #define MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII (1<<13) 1106 - #define MV643XX_ETH_ENABLE_AUTO_NEG_SPEED_GMII 0 1107 - #define MV643XX_ETH_DTE_ADV_0 0 1108 - #define MV643XX_ETH_DTE_ADV_1 (1<<14) 1109 - #define MV643XX_ETH_DISABLE_AUTO_NEG_BYPASS 0 1110 - #define MV643XX_ETH_ENABLE_AUTO_NEG_BYPASS (1<<15) 1111 - #define MV643XX_ETH_AUTO_NEG_NO_CHANGE 0 1112 - #define MV643XX_ETH_RESTART_AUTO_NEG (1<<16) 1113 - #define MV643XX_ETH_MAX_RX_PACKET_1518BYTE 0 1114 - #define MV643XX_ETH_MAX_RX_PACKET_1522BYTE (1<<17) 1115 - #define MV643XX_ETH_MAX_RX_PACKET_1552BYTE (1<<18) 1116 - #define MV643XX_ETH_MAX_RX_PACKET_9022BYTE ((1<<18) | (1<<17)) 1117 - #define MV643XX_ETH_MAX_RX_PACKET_9192BYTE (1<<19) 1118 - #define MV643XX_ETH_MAX_RX_PACKET_9700BYTE ((1<<19) | (1<<17)) 1119 - #define MV643XX_ETH_SET_EXT_LOOPBACK (1<<20) 1120 - #define MV643XX_ETH_CLR_EXT_LOOPBACK 0 1121 - #define MV643XX_ETH_SET_FULL_DUPLEX_MODE (1<<21) 1122 - #define MV643XX_ETH_SET_HALF_DUPLEX_MODE 0 1123 - #define MV643XX_ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (1<<22) 1124 - #define MV643XX_ETH_DISABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX 0 1125 - #define MV643XX_ETH_SET_GMII_SPEED_TO_10_100 0 1126 - #define MV643XX_ETH_SET_GMII_SPEED_TO_1000 (1<<23) 1127 - #define MV643XX_ETH_SET_MII_SPEED_TO_10 0 1128 - #define MV643XX_ETH_SET_MII_SPEED_TO_100 (1<<24) 1129 - 1130 - #define MV643XX_ETH_MAX_RX_PACKET_MASK (0x7<<17) 1131 - 1132 - #define MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE \ 1133 - MV643XX_ETH_DO_NOT_FORCE_LINK_PASS | \ 1134 - MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \ 1135 - MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | \ 1136 - MV643XX_ETH_ADV_SYMMETRIC_FLOW_CTRL | \ 1137 - MV643XX_ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX | \ 1138 - MV643XX_ETH_FORCE_BP_MODE_NO_JAM | \ 1139 - (1<<9) /* reserved */ | \ 1140 - MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | \ 1141 - MV643XX_ETH_RETRANSMIT_16_ATTEMPTS | \ 1142 - MV643XX_ETH_ENABLE_AUTO_NEG_SPEED_GMII | \ 1143 - MV643XX_ETH_DTE_ADV_0 | \ 1144 - MV643XX_ETH_DISABLE_AUTO_NEG_BYPASS | \ 1145 - MV643XX_ETH_AUTO_NEG_NO_CHANGE | \ 1146 - MV643XX_ETH_MAX_RX_PACKET_9700BYTE | \ 1147 - MV643XX_ETH_CLR_EXT_LOOPBACK | \ 1148 - MV643XX_ETH_SET_FULL_DUPLEX_MODE | \ 1149 - MV643XX_ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX 1150 - 1151 - /* These macros describe Ethernet Serial Status reg (PSR) bits */ 1152 - #define MV643XX_ETH_PORT_STATUS_MODE_10_BIT (1<<0) 1153 - #define MV643XX_ETH_PORT_STATUS_LINK_UP (1<<1) 1154 - #define MV643XX_ETH_PORT_STATUS_FULL_DUPLEX (1<<2) 1155 - #define MV643XX_ETH_PORT_STATUS_FLOW_CONTROL (1<<3) 1156 - #define MV643XX_ETH_PORT_STATUS_GMII_1000 (1<<4) 1157 - #define MV643XX_ETH_PORT_STATUS_MII_100 (1<<5) 1158 - /* PSR bit 6 is undocumented */ 1159 - #define MV643XX_ETH_PORT_STATUS_TX_IN_PROGRESS (1<<7) 1160 - #define MV643XX_ETH_PORT_STATUS_AUTONEG_BYPASSED (1<<8) 1161 - #define MV643XX_ETH_PORT_STATUS_PARTITION (1<<9) 1162 - #define MV643XX_ETH_PORT_STATUS_TX_FIFO_EMPTY (1<<10) 1163 - /* PSR bits 11-31 are reserved */ 1164 - 1165 - #define MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE 800 1166 - #define MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE 400 1167 - 1168 - #define MV643XX_ETH_DESC_SIZE 64 1169 - 1170 - #define MV643XX_ETH_SHARED_NAME "mv643xx_eth_shared" 1171 - #define MV643XX_ETH_NAME "mv643xx_eth" 1172 - 1173 - struct mv643xx_eth_platform_data { 1174 - int port_number; 1175 - u16 force_phy_addr; /* force override if phy_addr == 0 */ 1176 - u16 phy_addr; 1177 - 1178 - /* If speed is 0, then speed and duplex are autonegotiated. */ 1179 - int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */ 1180 - int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ 1181 - 1182 - /* non-zero values of the following fields override defaults */ 1183 - u32 tx_queue_size; 1184 - u32 rx_queue_size; 1185 - u32 tx_sram_addr; 1186 - u32 tx_sram_size; 1187 - u32 rx_sram_addr; 1188 - u32 rx_sram_size; 1189 - u8 mac_addr[6]; /* mac address if non-zero*/ 1190 1090 }; 1191 1091 1192 1092 /* Watchdog Platform Device, Driver Data */
+31
include/linux/mv643xx_eth.h
··· 1 + /* 2 + * MV-643XX ethernet platform device data definition file. 3 + */ 4 + #ifndef __LINUX_MV643XX_ETH_H 5 + #define __LINUX_MV643XX_ETH_H 6 + 7 + #define MV643XX_ETH_SHARED_NAME "mv643xx_eth_shared" 8 + #define MV643XX_ETH_NAME "mv643xx_eth" 9 + #define MV643XX_ETH_SHARED_REGS 0x2000 10 + #define MV643XX_ETH_SHARED_REGS_SIZE 0x2000 11 + 12 + struct mv643xx_eth_platform_data { 13 + int port_number; 14 + u16 force_phy_addr; /* force override if phy_addr == 0 */ 15 + u16 phy_addr; 16 + 17 + /* If speed is 0, then speed and duplex are autonegotiated. */ 18 + int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */ 19 + int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ 20 + 21 + /* non-zero values of the following fields override defaults */ 22 + u32 tx_queue_size; 23 + u32 rx_queue_size; 24 + u32 tx_sram_addr; 25 + u32 tx_sram_size; 26 + u32 rx_sram_addr; 27 + u32 rx_sram_size; 28 + u8 mac_addr[6]; /* mac address if non-zero*/ 29 + }; 30 + 31 + #endif /* __LINUX_MV643XX_ETH_H */
+1 -1
include/linux/netdevice.h
··· 834 834 const void *daddr, const void *saddr, 835 835 unsigned len) 836 836 { 837 - if (!dev->header_ops) 837 + if (!dev->header_ops || !dev->header_ops->create) 838 838 return 0; 839 839 840 840 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);