Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 17431928194b36a0f88082df875e2e036da7fddf 14804 lines 403 kB view raw
1/* 2 * tg3.c: Broadcom Tigon3 ethernet driver. 3 * 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2005-2010 Broadcom Corporation. 8 * 9 * Firmware is: 10 * Derived from proprietary unpublished source code, 11 * Copyright (C) 2000-2003 Broadcom Corporation. 12 * 13 * Permission is hereby granted for the distribution of this firmware 14 * data in hexadecimal or equivalent format, provided this copyright 15 * notice is accompanying it. 16 */ 17 18 19#include <linux/module.h> 20#include <linux/moduleparam.h> 21#include <linux/kernel.h> 22#include <linux/types.h> 23#include <linux/compiler.h> 24#include <linux/slab.h> 25#include <linux/delay.h> 26#include <linux/in.h> 27#include <linux/init.h> 28#include <linux/ioport.h> 29#include <linux/pci.h> 30#include <linux/netdevice.h> 31#include <linux/etherdevice.h> 32#include <linux/skbuff.h> 33#include <linux/ethtool.h> 34#include <linux/mii.h> 35#include <linux/phy.h> 36#include <linux/brcmphy.h> 37#include <linux/if_vlan.h> 38#include <linux/ip.h> 39#include <linux/tcp.h> 40#include <linux/workqueue.h> 41#include <linux/prefetch.h> 42#include <linux/dma-mapping.h> 43#include <linux/firmware.h> 44 45#include <net/checksum.h> 46#include <net/ip.h> 47 48#include <asm/system.h> 49#include <asm/io.h> 50#include <asm/byteorder.h> 51#include <asm/uaccess.h> 52 53#ifdef CONFIG_SPARC 54#include <asm/idprom.h> 55#include <asm/prom.h> 56#endif 57 58#define BAR_0 0 59#define BAR_2 2 60 61#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 62#define TG3_VLAN_TAG_USED 1 63#else 64#define TG3_VLAN_TAG_USED 0 65#endif 66 67#include "tg3.h" 68 69#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_VERSION "3.110" 71#define DRV_MODULE_RELDATE "April 9, 2010" 72 73#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_TX_MODE 0 76#define TG3_DEF_MSG_ENABLE \ 77 (NETIF_MSG_DRV | \ 78 NETIF_MSG_PROBE | \ 79 NETIF_MSG_LINK | \ 80 NETIF_MSG_TIMER | \ 81 NETIF_MSG_IFDOWN | \ 82 NETIF_MSG_IFUP | \ 83 NETIF_MSG_RX_ERR | \ 84 NETIF_MSG_TX_ERR) 85 86/* length of time before we decide the hardware is borked, 87 * and dev->tx_timeout() should be called to fix the problem 88 */ 89#define TG3_TX_TIMEOUT (5 * HZ) 90 91/* hardware minimum and maximum for a single frame's data payload */ 92#define TG3_MIN_MTU 60 93#define TG3_MAX_MTU(tp) \ 94 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500) 95 96/* These numbers seem to be hard coded in the NIC firmware somehow. 97 * You can't change the ring sizes, but you can change where you place 98 * them in the NIC onboard memory. 99 */ 100#define TG3_RX_RING_SIZE 512 101#define TG3_DEF_RX_RING_PENDING 200 102#define TG3_RX_JUMBO_RING_SIZE 256 103#define TG3_DEF_RX_JUMBO_RING_PENDING 100 104#define TG3_RSS_INDIR_TBL_SIZE 128 105 106/* Do not place this n-ring entries value into the tp struct itself, 107 * we really want to expose these constants to GCC so that modulo et 108 * al. operations are done with shifts and masks instead of with 109 * hw multiply/modulo instructions. Another solution would be to 110 * replace things like '% foo' with '& (foo - 1)'. 111 */ 112#define TG3_RX_RCB_RING_SIZE(tp) \ 113 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \ 114 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512) 115 116#define TG3_TX_RING_SIZE 512 117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 118 119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ 120 TG3_RX_RING_SIZE) 121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \ 122 TG3_RX_JUMBO_RING_SIZE) 123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \ 124 TG3_RX_RCB_RING_SIZE(tp)) 125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 126 TG3_TX_RING_SIZE) 127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 128 129#define TG3_RX_DMA_ALIGN 16 130#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN) 131 132#define TG3_DMA_BYTE_ENAB 64 133 134#define TG3_RX_STD_DMA_SZ 1536 135#define TG3_RX_JMB_DMA_SZ 9046 136 137#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) 138 139#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 140#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 141 142#define TG3_RX_STD_BUFF_RING_SIZE \ 143 (sizeof(struct ring_info) * TG3_RX_RING_SIZE) 144 145#define TG3_RX_JMB_BUFF_RING_SIZE \ 146 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) 147 148#define TG3_RSS_MIN_NUM_MSIX_VECS 2 149 150/* Due to a hardware bug, the 5701 can only DMA to memory addresses 151 * that are at least dword aligned when used in PCIX mode. The driver 152 * works around this bug by double copying the packet. This workaround 153 * is built into the normal double copy length check for efficiency. 154 * 155 * However, the double copy is only necessary on those architectures 156 * where unaligned memory accesses are inefficient. For those architectures 157 * where unaligned memory accesses incur little penalty, we can reintegrate 158 * the 5701 in the normal rx path. Doing so saves a device structure 159 * dereference by hardcoding the double copy threshold in place. 160 */ 161#define TG3_RX_COPY_THRESHOLD 256 162#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 163 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD 164#else 165 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) 166#endif 167 168/* minimum number of free TX descriptors required to wake up TX process */ 169#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 170 171#define TG3_RAW_IP_ALIGN 2 172 173/* number of ETHTOOL_GSTATS u64's */ 174#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) 175 176#define TG3_NUM_TEST 6 177 178#define TG3_FW_UPDATE_TIMEOUT_SEC 5 179 180#define FIRMWARE_TG3 "tigon/tg3.bin" 181#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 182#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 183 184static char version[] __devinitdata = 185 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; 186 187MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 188MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); 189MODULE_LICENSE("GPL"); 190MODULE_VERSION(DRV_MODULE_VERSION); 191MODULE_FIRMWARE(FIRMWARE_TG3); 192MODULE_FIRMWARE(FIRMWARE_TG3TSO); 193MODULE_FIRMWARE(FIRMWARE_TG3TSO5); 194 195static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 196module_param(tg3_debug, int, 0); 197MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 198 199static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { 200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, 204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, 205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, 206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, 207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, 208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, 209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, 210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, 211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, 212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, 213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, 214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, 215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, 216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, 217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, 218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)}, 219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)}, 220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, 221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)}, 222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)}, 223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, 224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, 225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, 226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, 227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)}, 228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, 229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)}, 230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, 231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, 232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, 233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, 234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)}, 235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, 236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, 237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, 238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, 239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, 240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)}, 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, 246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, 250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, 251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, 259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, 260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, 261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, 262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, 263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, 265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)}, 269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, 272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, 274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, 275 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 276 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 277 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 278 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, 279 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, 280 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, 281 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, 282 {} 283}; 284 285MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 286 287static const struct { 288 const char string[ETH_GSTRING_LEN]; 289} ethtool_stats_keys[TG3_NUM_STATS] = { 290 { "rx_octets" }, 291 { "rx_fragments" }, 292 { "rx_ucast_packets" }, 293 { "rx_mcast_packets" }, 294 { "rx_bcast_packets" }, 295 { "rx_fcs_errors" }, 296 { "rx_align_errors" }, 297 { "rx_xon_pause_rcvd" }, 298 { "rx_xoff_pause_rcvd" }, 299 { "rx_mac_ctrl_rcvd" }, 300 { "rx_xoff_entered" }, 301 { "rx_frame_too_long_errors" }, 302 { "rx_jabbers" }, 303 { "rx_undersize_packets" }, 304 { "rx_in_length_errors" }, 305 { "rx_out_length_errors" }, 306 { "rx_64_or_less_octet_packets" }, 307 { "rx_65_to_127_octet_packets" }, 308 { "rx_128_to_255_octet_packets" }, 309 { "rx_256_to_511_octet_packets" }, 310 { "rx_512_to_1023_octet_packets" }, 311 { "rx_1024_to_1522_octet_packets" }, 312 { "rx_1523_to_2047_octet_packets" }, 313 { "rx_2048_to_4095_octet_packets" }, 314 { "rx_4096_to_8191_octet_packets" }, 315 { "rx_8192_to_9022_octet_packets" }, 316 317 { "tx_octets" }, 318 { "tx_collisions" }, 319 320 { "tx_xon_sent" }, 321 { "tx_xoff_sent" }, 322 { "tx_flow_control" }, 323 { "tx_mac_errors" }, 324 { "tx_single_collisions" }, 325 { "tx_mult_collisions" }, 326 { "tx_deferred" }, 327 { "tx_excessive_collisions" }, 328 { "tx_late_collisions" }, 329 { "tx_collide_2times" }, 330 { "tx_collide_3times" }, 331 { "tx_collide_4times" }, 332 { "tx_collide_5times" }, 333 { "tx_collide_6times" }, 334 { "tx_collide_7times" }, 335 { "tx_collide_8times" }, 336 { "tx_collide_9times" }, 337 { "tx_collide_10times" }, 338 { "tx_collide_11times" }, 339 { "tx_collide_12times" }, 340 { "tx_collide_13times" }, 341 { "tx_collide_14times" }, 342 { "tx_collide_15times" }, 343 { "tx_ucast_packets" }, 344 { "tx_mcast_packets" }, 345 { "tx_bcast_packets" }, 346 { "tx_carrier_sense_errors" }, 347 { "tx_discards" }, 348 { "tx_errors" }, 349 350 { "dma_writeq_full" }, 351 { "dma_write_prioq_full" }, 352 { "rxbds_empty" }, 353 { "rx_discards" }, 354 { "rx_errors" }, 355 { "rx_threshold_hit" }, 356 357 { "dma_readq_full" }, 358 { "dma_read_prioq_full" }, 359 { "tx_comp_queue_full" }, 360 361 { "ring_set_send_prod_index" }, 362 { "ring_status_update" }, 363 { "nic_irqs" }, 364 { "nic_avoided_irqs" }, 365 { "nic_tx_threshold_hit" } 366}; 367 368static const struct { 369 const char string[ETH_GSTRING_LEN]; 370} ethtool_test_keys[TG3_NUM_TEST] = { 371 { "nvram test (online) " }, 372 { "link test (online) " }, 373 { "register test (offline)" }, 374 { "memory test (offline)" }, 375 { "loopback test (offline)" }, 376 { "interrupt test (offline)" }, 377}; 378 379static void tg3_write32(struct tg3 *tp, u32 off, u32 val) 380{ 381 writel(val, tp->regs + off); 382} 383 384static u32 tg3_read32(struct tg3 *tp, u32 off) 385{ 386 return readl(tp->regs + off); 387} 388 389static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) 390{ 391 writel(val, tp->aperegs + off); 392} 393 394static u32 tg3_ape_read32(struct tg3 *tp, u32 off) 395{ 396 return readl(tp->aperegs + off); 397} 398 399static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 400{ 401 unsigned long flags; 402 403 spin_lock_irqsave(&tp->indirect_lock, flags); 404 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 405 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 406 spin_unlock_irqrestore(&tp->indirect_lock, flags); 407} 408 409static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) 410{ 411 writel(val, tp->regs + off); 412 readl(tp->regs + off); 413} 414 415static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) 416{ 417 unsigned long flags; 418 u32 val; 419 420 spin_lock_irqsave(&tp->indirect_lock, flags); 421 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 422 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 423 spin_unlock_irqrestore(&tp->indirect_lock, flags); 424 return val; 425} 426 427static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) 428{ 429 unsigned long flags; 430 431 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { 432 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + 433 TG3_64BIT_REG_LOW, val); 434 return; 435 } 436 if (off == TG3_RX_STD_PROD_IDX_REG) { 437 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 438 TG3_64BIT_REG_LOW, val); 439 return; 440 } 441 442 spin_lock_irqsave(&tp->indirect_lock, flags); 443 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 444 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 445 spin_unlock_irqrestore(&tp->indirect_lock, flags); 446 447 /* In indirect mode when disabling interrupts, we also need 448 * to clear the interrupt bit in the GRC local ctrl register. 449 */ 450 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && 451 (val == 0x1)) { 452 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, 453 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); 454 } 455} 456 457static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) 458{ 459 unsigned long flags; 460 u32 val; 461 462 spin_lock_irqsave(&tp->indirect_lock, flags); 463 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 464 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 465 spin_unlock_irqrestore(&tp->indirect_lock, flags); 466 return val; 467} 468 469/* usec_wait specifies the wait time in usec when writing to certain registers 470 * where it is unsafe to read back the register without some delay. 471 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. 472 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. 473 */ 474static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) 475{ 476 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) || 477 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) 478 /* Non-posted methods */ 479 tp->write32(tp, off, val); 480 else { 481 /* Posted method */ 482 tg3_write32(tp, off, val); 483 if (usec_wait) 484 udelay(usec_wait); 485 tp->read32(tp, off); 486 } 487 /* Wait again after the read for the posted method to guarantee that 488 * the wait time is met. 489 */ 490 if (usec_wait) 491 udelay(usec_wait); 492} 493 494static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 495{ 496 tp->write32_mbox(tp, off, val); 497 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) && 498 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) 499 tp->read32_mbox(tp, off); 500} 501 502static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 503{ 504 void __iomem *mbox = tp->regs + off; 505 writel(val, mbox); 506 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) 507 writel(val, mbox); 508 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) 509 readl(mbox); 510} 511 512static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) 513{ 514 return readl(tp->regs + off + GRCMBOX_BASE); 515} 516 517static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) 518{ 519 writel(val, tp->regs + off + GRCMBOX_BASE); 520} 521 522#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 523#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 524#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 525#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 526#define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 527 528#define tw32(reg, val) tp->write32(tp, reg, val) 529#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) 530#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) 531#define tr32(reg) tp->read32(tp, reg) 532 533static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 534{ 535 unsigned long flags; 536 537 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && 538 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) 539 return; 540 541 spin_lock_irqsave(&tp->indirect_lock, flags); 542 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { 543 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 544 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 545 546 /* Always leave this as zero. */ 547 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 548 } else { 549 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 550 tw32_f(TG3PCI_MEM_WIN_DATA, val); 551 552 /* Always leave this as zero. */ 553 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 554 } 555 spin_unlock_irqrestore(&tp->indirect_lock, flags); 556} 557 558static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 559{ 560 unsigned long flags; 561 562 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && 563 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { 564 *val = 0; 565 return; 566 } 567 568 spin_lock_irqsave(&tp->indirect_lock, flags); 569 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { 570 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 571 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 572 573 /* Always leave this as zero. */ 574 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 575 } else { 576 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 577 *val = tr32(TG3PCI_MEM_WIN_DATA); 578 579 /* Always leave this as zero. */ 580 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 581 } 582 spin_unlock_irqrestore(&tp->indirect_lock, flags); 583} 584 585static void tg3_ape_lock_init(struct tg3 *tp) 586{ 587 int i; 588 589 /* Make sure the driver hasn't any stale locks. */ 590 for (i = 0; i < 8; i++) 591 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i, 592 APE_LOCK_GRANT_DRIVER); 593} 594 595static int tg3_ape_lock(struct tg3 *tp, int locknum) 596{ 597 int i, off; 598 int ret = 0; 599 u32 status; 600 601 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) 602 return 0; 603 604 switch (locknum) { 605 case TG3_APE_LOCK_GRC: 606 case TG3_APE_LOCK_MEM: 607 break; 608 default: 609 return -EINVAL; 610 } 611 612 off = 4 * locknum; 613 614 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER); 615 616 /* Wait for up to 1 millisecond to acquire lock. */ 617 for (i = 0; i < 100; i++) { 618 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off); 619 if (status == APE_LOCK_GRANT_DRIVER) 620 break; 621 udelay(10); 622 } 623 624 if (status != APE_LOCK_GRANT_DRIVER) { 625 /* Revoke the lock request. */ 626 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, 627 APE_LOCK_GRANT_DRIVER); 628 629 ret = -EBUSY; 630 } 631 632 return ret; 633} 634 635static void tg3_ape_unlock(struct tg3 *tp, int locknum) 636{ 637 int off; 638 639 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) 640 return; 641 642 switch (locknum) { 643 case TG3_APE_LOCK_GRC: 644 case TG3_APE_LOCK_MEM: 645 break; 646 default: 647 return; 648 } 649 650 off = 4 * locknum; 651 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER); 652} 653 654static void tg3_disable_ints(struct tg3 *tp) 655{ 656 int i; 657 658 tw32(TG3PCI_MISC_HOST_CTRL, 659 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 660 for (i = 0; i < tp->irq_max; i++) 661 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); 662} 663 664static void tg3_enable_ints(struct tg3 *tp) 665{ 666 int i; 667 668 tp->irq_sync = 0; 669 wmb(); 670 671 tw32(TG3PCI_MISC_HOST_CTRL, 672 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 673 674 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; 675 for (i = 0; i < tp->irq_cnt; i++) { 676 struct tg3_napi *tnapi = &tp->napi[i]; 677 678 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 679 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) 680 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 681 682 tp->coal_now |= tnapi->coal_now; 683 } 684 685 /* Force an initial interrupt */ 686 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && 687 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) 688 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 689 else 690 tw32(HOSTCC_MODE, tp->coal_now); 691 692 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); 693} 694 695static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) 696{ 697 struct tg3 *tp = tnapi->tp; 698 struct tg3_hw_status *sblk = tnapi->hw_status; 699 unsigned int work_exists = 0; 700 701 /* check for phy events */ 702 if (!(tp->tg3_flags & 703 (TG3_FLAG_USE_LINKCHG_REG | 704 TG3_FLAG_POLL_SERDES))) { 705 if (sblk->status & SD_STATUS_LINK_CHG) 706 work_exists = 1; 707 } 708 /* check for RX/TX work to do */ 709 if (sblk->idx[0].tx_consumer != tnapi->tx_cons || 710 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 711 work_exists = 1; 712 713 return work_exists; 714} 715 716/* tg3_int_reenable 717 * similar to tg3_enable_ints, but it accurately determines whether there 718 * is new work pending and can return without flushing the PIO write 719 * which reenables interrupts 720 */ 721static void tg3_int_reenable(struct tg3_napi *tnapi) 722{ 723 struct tg3 *tp = tnapi->tp; 724 725 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 726 mmiowb(); 727 728 /* When doing tagged status, this work check is unnecessary. 729 * The last_tag we write above tells the chip which piece of 730 * work we've completed. 731 */ 732 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && 733 tg3_has_work(tnapi)) 734 tw32(HOSTCC_MODE, tp->coalesce_mode | 735 HOSTCC_MODE_ENABLE | tnapi->coal_now); 736} 737 738static void tg3_napi_disable(struct tg3 *tp) 739{ 740 int i; 741 742 for (i = tp->irq_cnt - 1; i >= 0; i--) 743 napi_disable(&tp->napi[i].napi); 744} 745 746static void tg3_napi_enable(struct tg3 *tp) 747{ 748 int i; 749 750 for (i = 0; i < tp->irq_cnt; i++) 751 napi_enable(&tp->napi[i].napi); 752} 753 754static inline void tg3_netif_stop(struct tg3 *tp) 755{ 756 tp->dev->trans_start = jiffies; /* prevent tx timeout */ 757 tg3_napi_disable(tp); 758 netif_tx_disable(tp->dev); 759} 760 761static inline void tg3_netif_start(struct tg3 *tp) 762{ 763 /* NOTE: unconditional netif_tx_wake_all_queues is only 764 * appropriate so long as all callers are assured to 765 * have free tx slots (such as after tg3_init_hw) 766 */ 767 netif_tx_wake_all_queues(tp->dev); 768 769 tg3_napi_enable(tp); 770 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; 771 tg3_enable_ints(tp); 772} 773 774static void tg3_switch_clocks(struct tg3 *tp) 775{ 776 u32 clock_ctrl; 777 u32 orig_clock_ctrl; 778 779 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || 780 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 781 return; 782 783 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 784 785 orig_clock_ctrl = clock_ctrl; 786 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | 787 CLOCK_CTRL_CLKRUN_OENABLE | 788 0x1f); 789 tp->pci_clock_ctrl = clock_ctrl; 790 791 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 792 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 793 tw32_wait_f(TG3PCI_CLOCK_CTRL, 794 clock_ctrl | CLOCK_CTRL_625_CORE, 40); 795 } 796 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { 797 tw32_wait_f(TG3PCI_CLOCK_CTRL, 798 clock_ctrl | 799 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), 800 40); 801 tw32_wait_f(TG3PCI_CLOCK_CTRL, 802 clock_ctrl | (CLOCK_CTRL_ALTCLK), 803 40); 804 } 805 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); 806} 807 808#define PHY_BUSY_LOOPS 5000 809 810static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) 811{ 812 u32 frame_val; 813 unsigned int loops; 814 int ret; 815 816 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 817 tw32_f(MAC_MI_MODE, 818 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 819 udelay(80); 820 } 821 822 *val = 0x0; 823 824 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & 825 MI_COM_PHY_ADDR_MASK); 826 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 827 MI_COM_REG_ADDR_MASK); 828 frame_val |= (MI_COM_CMD_READ | MI_COM_START); 829 830 tw32_f(MAC_MI_COM, frame_val); 831 832 loops = PHY_BUSY_LOOPS; 833 while (loops != 0) { 834 udelay(10); 835 frame_val = tr32(MAC_MI_COM); 836 837 if ((frame_val & MI_COM_BUSY) == 0) { 838 udelay(5); 839 frame_val = tr32(MAC_MI_COM); 840 break; 841 } 842 loops -= 1; 843 } 844 845 ret = -EBUSY; 846 if (loops != 0) { 847 *val = frame_val & MI_COM_DATA_MASK; 848 ret = 0; 849 } 850 851 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 852 tw32_f(MAC_MI_MODE, tp->mi_mode); 853 udelay(80); 854 } 855 856 return ret; 857} 858 859static int tg3_writephy(struct tg3 *tp, int reg, u32 val) 860{ 861 u32 frame_val; 862 unsigned int loops; 863 int ret; 864 865 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && 866 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL)) 867 return 0; 868 869 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 870 tw32_f(MAC_MI_MODE, 871 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 872 udelay(80); 873 } 874 875 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & 876 MI_COM_PHY_ADDR_MASK); 877 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 878 MI_COM_REG_ADDR_MASK); 879 frame_val |= (val & MI_COM_DATA_MASK); 880 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); 881 882 tw32_f(MAC_MI_COM, frame_val); 883 884 loops = PHY_BUSY_LOOPS; 885 while (loops != 0) { 886 udelay(10); 887 frame_val = tr32(MAC_MI_COM); 888 if ((frame_val & MI_COM_BUSY) == 0) { 889 udelay(5); 890 frame_val = tr32(MAC_MI_COM); 891 break; 892 } 893 loops -= 1; 894 } 895 896 ret = -EBUSY; 897 if (loops != 0) 898 ret = 0; 899 900 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 901 tw32_f(MAC_MI_MODE, tp->mi_mode); 902 udelay(80); 903 } 904 905 return ret; 906} 907 908static int tg3_bmcr_reset(struct tg3 *tp) 909{ 910 u32 phy_control; 911 int limit, err; 912 913 /* OK, reset it, and poll the BMCR_RESET bit until it 914 * clears or we time out. 915 */ 916 phy_control = BMCR_RESET; 917 err = tg3_writephy(tp, MII_BMCR, phy_control); 918 if (err != 0) 919 return -EBUSY; 920 921 limit = 5000; 922 while (limit--) { 923 err = tg3_readphy(tp, MII_BMCR, &phy_control); 924 if (err != 0) 925 return -EBUSY; 926 927 if ((phy_control & BMCR_RESET) == 0) { 928 udelay(40); 929 break; 930 } 931 udelay(10); 932 } 933 if (limit < 0) 934 return -EBUSY; 935 936 return 0; 937} 938 939static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) 940{ 941 struct tg3 *tp = bp->priv; 942 u32 val; 943 944 spin_lock_bh(&tp->lock); 945 946 if (tg3_readphy(tp, reg, &val)) 947 val = -EIO; 948 949 spin_unlock_bh(&tp->lock); 950 951 return val; 952} 953 954static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 955{ 956 struct tg3 *tp = bp->priv; 957 u32 ret = 0; 958 959 spin_lock_bh(&tp->lock); 960 961 if (tg3_writephy(tp, reg, val)) 962 ret = -EIO; 963 964 spin_unlock_bh(&tp->lock); 965 966 return ret; 967} 968 969static int tg3_mdio_reset(struct mii_bus *bp) 970{ 971 return 0; 972} 973 974static void tg3_mdio_config_5785(struct tg3 *tp) 975{ 976 u32 val; 977 struct phy_device *phydev; 978 979 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 980 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 981 case PHY_ID_BCM50610: 982 case PHY_ID_BCM50610M: 983 val = MAC_PHYCFG2_50610_LED_MODES; 984 break; 985 case PHY_ID_BCMAC131: 986 val = MAC_PHYCFG2_AC131_LED_MODES; 987 break; 988 case PHY_ID_RTL8211C: 989 val = MAC_PHYCFG2_RTL8211C_LED_MODES; 990 break; 991 case PHY_ID_RTL8201E: 992 val = MAC_PHYCFG2_RTL8201E_LED_MODES; 993 break; 994 default: 995 return; 996 } 997 998 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { 999 tw32(MAC_PHYCFG2, val); 1000 1001 val = tr32(MAC_PHYCFG1); 1002 val &= ~(MAC_PHYCFG1_RGMII_INT | 1003 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); 1004 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; 1005 tw32(MAC_PHYCFG1, val); 1006 1007 return; 1008 } 1009 1010 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) 1011 val |= MAC_PHYCFG2_EMODE_MASK_MASK | 1012 MAC_PHYCFG2_FMODE_MASK_MASK | 1013 MAC_PHYCFG2_GMODE_MASK_MASK | 1014 MAC_PHYCFG2_ACT_MASK_MASK | 1015 MAC_PHYCFG2_QUAL_MASK_MASK | 1016 MAC_PHYCFG2_INBAND_ENABLE; 1017 1018 tw32(MAC_PHYCFG2, val); 1019 1020 val = tr32(MAC_PHYCFG1); 1021 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | 1022 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); 1023 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) { 1024 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 1025 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 1026 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) 1027 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; 1028 } 1029 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | 1030 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; 1031 tw32(MAC_PHYCFG1, val); 1032 1033 val = tr32(MAC_EXT_RGMII_MODE); 1034 val &= ~(MAC_RGMII_MODE_RX_INT_B | 1035 MAC_RGMII_MODE_RX_QUALITY | 1036 MAC_RGMII_MODE_RX_ACTIVITY | 1037 MAC_RGMII_MODE_RX_ENG_DET | 1038 MAC_RGMII_MODE_TX_ENABLE | 1039 MAC_RGMII_MODE_TX_LOWPWR | 1040 MAC_RGMII_MODE_TX_RESET); 1041 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) { 1042 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 1043 val |= MAC_RGMII_MODE_RX_INT_B | 1044 MAC_RGMII_MODE_RX_QUALITY | 1045 MAC_RGMII_MODE_RX_ACTIVITY | 1046 MAC_RGMII_MODE_RX_ENG_DET; 1047 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) 1048 val |= MAC_RGMII_MODE_TX_ENABLE | 1049 MAC_RGMII_MODE_TX_LOWPWR | 1050 MAC_RGMII_MODE_TX_RESET; 1051 } 1052 tw32(MAC_EXT_RGMII_MODE, val); 1053} 1054 1055static void tg3_mdio_start(struct tg3 *tp) 1056{ 1057 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1058 tw32_f(MAC_MI_MODE, tp->mi_mode); 1059 udelay(80); 1060 1061 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && 1062 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 1063 tg3_mdio_config_5785(tp); 1064} 1065 1066static int tg3_mdio_init(struct tg3 *tp) 1067{ 1068 int i; 1069 u32 reg; 1070 struct phy_device *phydev; 1071 1072 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 1073 u32 funcnum, is_serdes; 1074 1075 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC; 1076 if (funcnum) 1077 tp->phy_addr = 2; 1078 else 1079 tp->phy_addr = 1; 1080 1081 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) 1082 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1083 else 1084 is_serdes = tr32(TG3_CPMU_PHY_STRAP) & 1085 TG3_CPMU_PHY_STRAP_IS_SERDES; 1086 if (is_serdes) 1087 tp->phy_addr += 7; 1088 } else 1089 tp->phy_addr = TG3_PHY_MII_ADDR; 1090 1091 tg3_mdio_start(tp); 1092 1093 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) || 1094 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)) 1095 return 0; 1096 1097 tp->mdio_bus = mdiobus_alloc(); 1098 if (tp->mdio_bus == NULL) 1099 return -ENOMEM; 1100 1101 tp->mdio_bus->name = "tg3 mdio bus"; 1102 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", 1103 (tp->pdev->bus->number << 8) | tp->pdev->devfn); 1104 tp->mdio_bus->priv = tp; 1105 tp->mdio_bus->parent = &tp->pdev->dev; 1106 tp->mdio_bus->read = &tg3_mdio_read; 1107 tp->mdio_bus->write = &tg3_mdio_write; 1108 tp->mdio_bus->reset = &tg3_mdio_reset; 1109 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); 1110 tp->mdio_bus->irq = &tp->mdio_irq[0]; 1111 1112 for (i = 0; i < PHY_MAX_ADDR; i++) 1113 tp->mdio_bus->irq[i] = PHY_POLL; 1114 1115 /* The bus registration will look for all the PHYs on the mdio bus. 1116 * Unfortunately, it does not ensure the PHY is powered up before 1117 * accessing the PHY ID registers. A chip reset is the 1118 * quickest way to bring the device back to an operational state.. 1119 */ 1120 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN)) 1121 tg3_bmcr_reset(tp); 1122 1123 i = mdiobus_register(tp->mdio_bus); 1124 if (i) { 1125 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); 1126 mdiobus_free(tp->mdio_bus); 1127 return i; 1128 } 1129 1130 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 1131 1132 if (!phydev || !phydev->drv) { 1133 dev_warn(&tp->pdev->dev, "No PHY devices\n"); 1134 mdiobus_unregister(tp->mdio_bus); 1135 mdiobus_free(tp->mdio_bus); 1136 return -ENODEV; 1137 } 1138 1139 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1140 case PHY_ID_BCM57780: 1141 phydev->interface = PHY_INTERFACE_MODE_GMII; 1142 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1143 break; 1144 case PHY_ID_BCM50610: 1145 case PHY_ID_BCM50610M: 1146 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | 1147 PHY_BRCM_RX_REFCLK_UNUSED | 1148 PHY_BRCM_DIS_TXCRXC_NOENRGY | 1149 PHY_BRCM_AUTO_PWRDWN_ENABLE; 1150 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE) 1151 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; 1152 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 1153 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; 1154 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) 1155 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; 1156 /* fallthru */ 1157 case PHY_ID_RTL8211C: 1158 phydev->interface = PHY_INTERFACE_MODE_RGMII; 1159 break; 1160 case PHY_ID_RTL8201E: 1161 case PHY_ID_BCMAC131: 1162 phydev->interface = PHY_INTERFACE_MODE_MII; 1163 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1164 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; 1165 break; 1166 } 1167 1168 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED; 1169 1170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 1171 tg3_mdio_config_5785(tp); 1172 1173 return 0; 1174} 1175 1176static void tg3_mdio_fini(struct tg3 *tp) 1177{ 1178 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { 1179 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; 1180 mdiobus_unregister(tp->mdio_bus); 1181 mdiobus_free(tp->mdio_bus); 1182 } 1183} 1184 1185/* tp->lock is held. */ 1186static inline void tg3_generate_fw_event(struct tg3 *tp) 1187{ 1188 u32 val; 1189 1190 val = tr32(GRC_RX_CPU_EVENT); 1191 val |= GRC_RX_CPU_DRIVER_EVENT; 1192 tw32_f(GRC_RX_CPU_EVENT, val); 1193 1194 tp->last_event_jiffies = jiffies; 1195} 1196 1197#define TG3_FW_EVENT_TIMEOUT_USEC 2500 1198 1199/* tp->lock is held. */ 1200static void tg3_wait_for_event_ack(struct tg3 *tp) 1201{ 1202 int i; 1203 unsigned int delay_cnt; 1204 long time_remain; 1205 1206 /* If enough time has passed, no wait is necessary. */ 1207 time_remain = (long)(tp->last_event_jiffies + 1 + 1208 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - 1209 (long)jiffies; 1210 if (time_remain < 0) 1211 return; 1212 1213 /* Check if we can shorten the wait time. */ 1214 delay_cnt = jiffies_to_usecs(time_remain); 1215 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) 1216 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; 1217 delay_cnt = (delay_cnt >> 3) + 1; 1218 1219 for (i = 0; i < delay_cnt; i++) { 1220 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1221 break; 1222 udelay(8); 1223 } 1224} 1225 1226/* tp->lock is held. */ 1227static void tg3_ump_link_report(struct tg3 *tp) 1228{ 1229 u32 reg; 1230 u32 val; 1231 1232 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 1233 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 1234 return; 1235 1236 tg3_wait_for_event_ack(tp); 1237 1238 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); 1239 1240 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); 1241 1242 val = 0; 1243 if (!tg3_readphy(tp, MII_BMCR, &reg)) 1244 val = reg << 16; 1245 if (!tg3_readphy(tp, MII_BMSR, &reg)) 1246 val |= (reg & 0xffff); 1247 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val); 1248 1249 val = 0; 1250 if (!tg3_readphy(tp, MII_ADVERTISE, &reg)) 1251 val = reg << 16; 1252 if (!tg3_readphy(tp, MII_LPA, &reg)) 1253 val |= (reg & 0xffff); 1254 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val); 1255 1256 val = 0; 1257 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) { 1258 if (!tg3_readphy(tp, MII_CTRL1000, &reg)) 1259 val = reg << 16; 1260 if (!tg3_readphy(tp, MII_STAT1000, &reg)) 1261 val |= (reg & 0xffff); 1262 } 1263 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val); 1264 1265 if (!tg3_readphy(tp, MII_PHYADDR, &reg)) 1266 val = reg << 16; 1267 else 1268 val = 0; 1269 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); 1270 1271 tg3_generate_fw_event(tp); 1272} 1273 1274static void tg3_link_report(struct tg3 *tp) 1275{ 1276 if (!netif_carrier_ok(tp->dev)) { 1277 netif_info(tp, link, tp->dev, "Link is down\n"); 1278 tg3_ump_link_report(tp); 1279 } else if (netif_msg_link(tp)) { 1280 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", 1281 (tp->link_config.active_speed == SPEED_1000 ? 1282 1000 : 1283 (tp->link_config.active_speed == SPEED_100 ? 1284 100 : 10)), 1285 (tp->link_config.active_duplex == DUPLEX_FULL ? 1286 "full" : "half")); 1287 1288 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", 1289 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? 1290 "on" : "off", 1291 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? 1292 "on" : "off"); 1293 tg3_ump_link_report(tp); 1294 } 1295} 1296 1297static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl) 1298{ 1299 u16 miireg; 1300 1301 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) 1302 miireg = ADVERTISE_PAUSE_CAP; 1303 else if (flow_ctrl & FLOW_CTRL_TX) 1304 miireg = ADVERTISE_PAUSE_ASYM; 1305 else if (flow_ctrl & FLOW_CTRL_RX) 1306 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1307 else 1308 miireg = 0; 1309 1310 return miireg; 1311} 1312 1313static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1314{ 1315 u16 miireg; 1316 1317 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) 1318 miireg = ADVERTISE_1000XPAUSE; 1319 else if (flow_ctrl & FLOW_CTRL_TX) 1320 miireg = ADVERTISE_1000XPSE_ASYM; 1321 else if (flow_ctrl & FLOW_CTRL_RX) 1322 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 1323 else 1324 miireg = 0; 1325 1326 return miireg; 1327} 1328 1329static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) 1330{ 1331 u8 cap = 0; 1332 1333 if (lcladv & ADVERTISE_1000XPAUSE) { 1334 if (lcladv & ADVERTISE_1000XPSE_ASYM) { 1335 if (rmtadv & LPA_1000XPAUSE) 1336 cap = FLOW_CTRL_TX | FLOW_CTRL_RX; 1337 else if (rmtadv & LPA_1000XPAUSE_ASYM) 1338 cap = FLOW_CTRL_RX; 1339 } else { 1340 if (rmtadv & LPA_1000XPAUSE) 1341 cap = FLOW_CTRL_TX | FLOW_CTRL_RX; 1342 } 1343 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) { 1344 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) 1345 cap = FLOW_CTRL_TX; 1346 } 1347 1348 return cap; 1349} 1350 1351static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) 1352{ 1353 u8 autoneg; 1354 u8 flowctrl = 0; 1355 u32 old_rx_mode = tp->rx_mode; 1356 u32 old_tx_mode = tp->tx_mode; 1357 1358 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) 1359 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; 1360 else 1361 autoneg = tp->link_config.autoneg; 1362 1363 if (autoneg == AUTONEG_ENABLE && 1364 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) { 1365 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) 1366 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1367 else 1368 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1369 } else 1370 flowctrl = tp->link_config.flowctrl; 1371 1372 tp->link_config.active_flowctrl = flowctrl; 1373 1374 if (flowctrl & FLOW_CTRL_RX) 1375 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; 1376 else 1377 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; 1378 1379 if (old_rx_mode != tp->rx_mode) 1380 tw32_f(MAC_RX_MODE, tp->rx_mode); 1381 1382 if (flowctrl & FLOW_CTRL_TX) 1383 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; 1384 else 1385 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; 1386 1387 if (old_tx_mode != tp->tx_mode) 1388 tw32_f(MAC_TX_MODE, tp->tx_mode); 1389} 1390 1391static void tg3_adjust_link(struct net_device *dev) 1392{ 1393 u8 oldflowctrl, linkmesg = 0; 1394 u32 mac_mode, lcl_adv, rmt_adv; 1395 struct tg3 *tp = netdev_priv(dev); 1396 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 1397 1398 spin_lock_bh(&tp->lock); 1399 1400 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 1401 MAC_MODE_HALF_DUPLEX); 1402 1403 oldflowctrl = tp->link_config.active_flowctrl; 1404 1405 if (phydev->link) { 1406 lcl_adv = 0; 1407 rmt_adv = 0; 1408 1409 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 1410 mac_mode |= MAC_MODE_PORT_MODE_MII; 1411 else if (phydev->speed == SPEED_1000 || 1412 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) 1413 mac_mode |= MAC_MODE_PORT_MODE_GMII; 1414 else 1415 mac_mode |= MAC_MODE_PORT_MODE_MII; 1416 1417 if (phydev->duplex == DUPLEX_HALF) 1418 mac_mode |= MAC_MODE_HALF_DUPLEX; 1419 else { 1420 lcl_adv = tg3_advert_flowctrl_1000T( 1421 tp->link_config.flowctrl); 1422 1423 if (phydev->pause) 1424 rmt_adv = LPA_PAUSE_CAP; 1425 if (phydev->asym_pause) 1426 rmt_adv |= LPA_PAUSE_ASYM; 1427 } 1428 1429 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 1430 } else 1431 mac_mode |= MAC_MODE_PORT_MODE_GMII; 1432 1433 if (mac_mode != tp->mac_mode) { 1434 tp->mac_mode = mac_mode; 1435 tw32_f(MAC_MODE, tp->mac_mode); 1436 udelay(40); 1437 } 1438 1439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { 1440 if (phydev->speed == SPEED_10) 1441 tw32(MAC_MI_STAT, 1442 MAC_MI_STAT_10MBPS_MODE | 1443 MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 1444 else 1445 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 1446 } 1447 1448 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) 1449 tw32(MAC_TX_LENGTHS, 1450 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 1451 (6 << TX_LENGTHS_IPG_SHIFT) | 1452 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); 1453 else 1454 tw32(MAC_TX_LENGTHS, 1455 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 1456 (6 << TX_LENGTHS_IPG_SHIFT) | 1457 (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); 1458 1459 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) || 1460 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) || 1461 phydev->speed != tp->link_config.active_speed || 1462 phydev->duplex != tp->link_config.active_duplex || 1463 oldflowctrl != tp->link_config.active_flowctrl) 1464 linkmesg = 1; 1465 1466 tp->link_config.active_speed = phydev->speed; 1467 tp->link_config.active_duplex = phydev->duplex; 1468 1469 spin_unlock_bh(&tp->lock); 1470 1471 if (linkmesg) 1472 tg3_link_report(tp); 1473} 1474 1475static int tg3_phy_init(struct tg3 *tp) 1476{ 1477 struct phy_device *phydev; 1478 1479 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) 1480 return 0; 1481 1482 /* Bring the PHY back to a known state. */ 1483 tg3_bmcr_reset(tp); 1484 1485 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 1486 1487 /* Attach the MAC to the PHY. */ 1488 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, 1489 phydev->dev_flags, phydev->interface); 1490 if (IS_ERR(phydev)) { 1491 dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); 1492 return PTR_ERR(phydev); 1493 } 1494 1495 /* Mask with MAC supported features. */ 1496 switch (phydev->interface) { 1497 case PHY_INTERFACE_MODE_GMII: 1498 case PHY_INTERFACE_MODE_RGMII: 1499 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { 1500 phydev->supported &= (PHY_GBIT_FEATURES | 1501 SUPPORTED_Pause | 1502 SUPPORTED_Asym_Pause); 1503 break; 1504 } 1505 /* fallthru */ 1506 case PHY_INTERFACE_MODE_MII: 1507 phydev->supported &= (PHY_BASIC_FEATURES | 1508 SUPPORTED_Pause | 1509 SUPPORTED_Asym_Pause); 1510 break; 1511 default: 1512 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); 1513 return -EINVAL; 1514 } 1515 1516 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED; 1517 1518 phydev->advertising = phydev->supported; 1519 1520 return 0; 1521} 1522 1523static void tg3_phy_start(struct tg3 *tp) 1524{ 1525 struct phy_device *phydev; 1526 1527 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 1528 return; 1529 1530 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 1531 1532 if (tp->link_config.phy_is_low_power) { 1533 tp->link_config.phy_is_low_power = 0; 1534 phydev->speed = tp->link_config.orig_speed; 1535 phydev->duplex = tp->link_config.orig_duplex; 1536 phydev->autoneg = tp->link_config.orig_autoneg; 1537 phydev->advertising = tp->link_config.orig_advertising; 1538 } 1539 1540 phy_start(phydev); 1541 1542 phy_start_aneg(phydev); 1543} 1544 1545static void tg3_phy_stop(struct tg3 *tp) 1546{ 1547 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 1548 return; 1549 1550 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); 1551} 1552 1553static void tg3_phy_fini(struct tg3 *tp) 1554{ 1555 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { 1556 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); 1557 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; 1558 } 1559} 1560 1561static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1562{ 1563 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1564 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1565} 1566 1567static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) 1568{ 1569 u32 phytest; 1570 1571 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 1572 u32 phy; 1573 1574 tg3_writephy(tp, MII_TG3_FET_TEST, 1575 phytest | MII_TG3_FET_SHADOW_EN); 1576 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { 1577 if (enable) 1578 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; 1579 else 1580 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; 1581 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); 1582 } 1583 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 1584 } 1585} 1586 1587static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) 1588{ 1589 u32 reg; 1590 1591 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 1592 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && 1593 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) 1594 return; 1595 1596 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 1597 tg3_phy_fet_toggle_apd(tp, enable); 1598 return; 1599 } 1600 1601 reg = MII_TG3_MISC_SHDW_WREN | 1602 MII_TG3_MISC_SHDW_SCR5_SEL | 1603 MII_TG3_MISC_SHDW_SCR5_LPED | 1604 MII_TG3_MISC_SHDW_SCR5_DLPTLM | 1605 MII_TG3_MISC_SHDW_SCR5_SDTL | 1606 MII_TG3_MISC_SHDW_SCR5_C125OE; 1607 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable) 1608 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; 1609 1610 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); 1611 1612 1613 reg = MII_TG3_MISC_SHDW_WREN | 1614 MII_TG3_MISC_SHDW_APD_SEL | 1615 MII_TG3_MISC_SHDW_APD_WKTM_84MS; 1616 if (enable) 1617 reg |= MII_TG3_MISC_SHDW_APD_ENABLE; 1618 1619 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); 1620} 1621 1622static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) 1623{ 1624 u32 phy; 1625 1626 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 1627 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) 1628 return; 1629 1630 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 1631 u32 ephy; 1632 1633 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { 1634 u32 reg = MII_TG3_FET_SHDW_MISCCTRL; 1635 1636 tg3_writephy(tp, MII_TG3_FET_TEST, 1637 ephy | MII_TG3_FET_SHADOW_EN); 1638 if (!tg3_readphy(tp, reg, &phy)) { 1639 if (enable) 1640 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; 1641 else 1642 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; 1643 tg3_writephy(tp, reg, phy); 1644 } 1645 tg3_writephy(tp, MII_TG3_FET_TEST, ephy); 1646 } 1647 } else { 1648 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC | 1649 MII_TG3_AUXCTL_SHDWSEL_MISC; 1650 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) && 1651 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) { 1652 if (enable) 1653 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 1654 else 1655 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 1656 phy |= MII_TG3_AUXCTL_MISC_WREN; 1657 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); 1658 } 1659 } 1660} 1661 1662static void tg3_phy_set_wirespeed(struct tg3 *tp) 1663{ 1664 u32 val; 1665 1666 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) 1667 return; 1668 1669 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) && 1670 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val)) 1671 tg3_writephy(tp, MII_TG3_AUX_CTRL, 1672 (val | (1 << 15) | (1 << 4))); 1673} 1674 1675static void tg3_phy_apply_otp(struct tg3 *tp) 1676{ 1677 u32 otp, phy; 1678 1679 if (!tp->phy_otp) 1680 return; 1681 1682 otp = tp->phy_otp; 1683 1684 /* Enable SM_DSP clock and tx 6dB coding. */ 1685 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | 1686 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | 1687 MII_TG3_AUXCTL_ACTL_TX_6DB; 1688 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); 1689 1690 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); 1691 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; 1692 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); 1693 1694 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | 1695 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); 1696 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); 1697 1698 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); 1699 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; 1700 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); 1701 1702 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); 1703 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); 1704 1705 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); 1706 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); 1707 1708 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | 1709 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 1710 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 1711 1712 /* Turn off SM_DSP clock. */ 1713 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | 1714 MII_TG3_AUXCTL_ACTL_TX_6DB; 1715 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); 1716} 1717 1718static int tg3_wait_macro_done(struct tg3 *tp) 1719{ 1720 int limit = 100; 1721 1722 while (limit--) { 1723 u32 tmp32; 1724 1725 if (!tg3_readphy(tp, 0x16, &tmp32)) { 1726 if ((tmp32 & 0x1000) == 0) 1727 break; 1728 } 1729 } 1730 if (limit < 0) 1731 return -EBUSY; 1732 1733 return 0; 1734} 1735 1736static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) 1737{ 1738 static const u32 test_pat[4][6] = { 1739 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, 1740 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, 1741 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, 1742 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } 1743 }; 1744 int chan; 1745 1746 for (chan = 0; chan < 4; chan++) { 1747 int i; 1748 1749 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 1750 (chan * 0x2000) | 0x0200); 1751 tg3_writephy(tp, 0x16, 0x0002); 1752 1753 for (i = 0; i < 6; i++) 1754 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 1755 test_pat[chan][i]); 1756 1757 tg3_writephy(tp, 0x16, 0x0202); 1758 if (tg3_wait_macro_done(tp)) { 1759 *resetp = 1; 1760 return -EBUSY; 1761 } 1762 1763 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 1764 (chan * 0x2000) | 0x0200); 1765 tg3_writephy(tp, 0x16, 0x0082); 1766 if (tg3_wait_macro_done(tp)) { 1767 *resetp = 1; 1768 return -EBUSY; 1769 } 1770 1771 tg3_writephy(tp, 0x16, 0x0802); 1772 if (tg3_wait_macro_done(tp)) { 1773 *resetp = 1; 1774 return -EBUSY; 1775 } 1776 1777 for (i = 0; i < 6; i += 2) { 1778 u32 low, high; 1779 1780 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || 1781 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || 1782 tg3_wait_macro_done(tp)) { 1783 *resetp = 1; 1784 return -EBUSY; 1785 } 1786 low &= 0x7fff; 1787 high &= 0x000f; 1788 if (low != test_pat[chan][i] || 1789 high != test_pat[chan][i+1]) { 1790 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); 1791 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); 1792 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); 1793 1794 return -EBUSY; 1795 } 1796 } 1797 } 1798 1799 return 0; 1800} 1801 1802static int tg3_phy_reset_chanpat(struct tg3 *tp) 1803{ 1804 int chan; 1805 1806 for (chan = 0; chan < 4; chan++) { 1807 int i; 1808 1809 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 1810 (chan * 0x2000) | 0x0200); 1811 tg3_writephy(tp, 0x16, 0x0002); 1812 for (i = 0; i < 6; i++) 1813 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); 1814 tg3_writephy(tp, 0x16, 0x0202); 1815 if (tg3_wait_macro_done(tp)) 1816 return -EBUSY; 1817 } 1818 1819 return 0; 1820} 1821 1822static int tg3_phy_reset_5703_4_5(struct tg3 *tp) 1823{ 1824 u32 reg32, phy9_orig; 1825 int retries, do_phy_reset, err; 1826 1827 retries = 10; 1828 do_phy_reset = 1; 1829 do { 1830 if (do_phy_reset) { 1831 err = tg3_bmcr_reset(tp); 1832 if (err) 1833 return err; 1834 do_phy_reset = 0; 1835 } 1836 1837 /* Disable transmitter and interrupt. */ 1838 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) 1839 continue; 1840 1841 reg32 |= 0x3000; 1842 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 1843 1844 /* Set full-duplex, 1000 mbps. */ 1845 tg3_writephy(tp, MII_BMCR, 1846 BMCR_FULLDPLX | TG3_BMCR_SPEED1000); 1847 1848 /* Set to master mode. */ 1849 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig)) 1850 continue; 1851 1852 tg3_writephy(tp, MII_TG3_CTRL, 1853 (MII_TG3_CTRL_AS_MASTER | 1854 MII_TG3_CTRL_ENABLE_AS_MASTER)); 1855 1856 /* Enable SM_DSP_CLOCK and 6dB. */ 1857 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 1858 1859 /* Block the PHY control access. */ 1860 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); 1861 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800); 1862 1863 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); 1864 if (!err) 1865 break; 1866 } while (--retries); 1867 1868 err = tg3_phy_reset_chanpat(tp); 1869 if (err) 1870 return err; 1871 1872 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); 1873 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000); 1874 1875 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 1876 tg3_writephy(tp, 0x16, 0x0000); 1877 1878 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 1879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 1880 /* Set Extended packet length bit for jumbo frames */ 1881 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400); 1882 } else { 1883 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 1884 } 1885 1886 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig); 1887 1888 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) { 1889 reg32 &= ~0x3000; 1890 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 1891 } else if (!err) 1892 err = -EBUSY; 1893 1894 return err; 1895} 1896 1897/* This will reset the tigon3 PHY if there is no valid 1898 * link unless the FORCE argument is non-zero. 1899 */ 1900static int tg3_phy_reset(struct tg3 *tp) 1901{ 1902 u32 cpmuctrl; 1903 u32 phy_status; 1904 int err; 1905 1906 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 1907 u32 val; 1908 1909 val = tr32(GRC_MISC_CFG); 1910 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); 1911 udelay(40); 1912 } 1913 err = tg3_readphy(tp, MII_BMSR, &phy_status); 1914 err |= tg3_readphy(tp, MII_BMSR, &phy_status); 1915 if (err != 0) 1916 return -EBUSY; 1917 1918 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) { 1919 netif_carrier_off(tp->dev); 1920 tg3_link_report(tp); 1921 } 1922 1923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 1924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 1925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 1926 err = tg3_phy_reset_5703_4_5(tp); 1927 if (err) 1928 return err; 1929 goto out; 1930 } 1931 1932 cpmuctrl = 0; 1933 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 1934 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { 1935 cpmuctrl = tr32(TG3_CPMU_CTRL); 1936 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) 1937 tw32(TG3_CPMU_CTRL, 1938 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); 1939 } 1940 1941 err = tg3_bmcr_reset(tp); 1942 if (err) 1943 return err; 1944 1945 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { 1946 u32 phy; 1947 1948 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; 1949 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy); 1950 1951 tw32(TG3_CPMU_CTRL, cpmuctrl); 1952 } 1953 1954 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || 1955 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { 1956 u32 val; 1957 1958 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 1959 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == 1960 CPMU_LSPD_1000MB_MACCLK_12_5) { 1961 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 1962 udelay(40); 1963 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 1964 } 1965 } 1966 1967 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && 1968 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) 1969 return 0; 1970 1971 tg3_phy_apply_otp(tp); 1972 1973 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) 1974 tg3_phy_toggle_apd(tp, true); 1975 else 1976 tg3_phy_toggle_apd(tp, false); 1977 1978out: 1979 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) { 1980 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 1981 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); 1982 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa); 1983 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 1984 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323); 1985 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 1986 } 1987 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) { 1988 tg3_writephy(tp, 0x1c, 0x8d68); 1989 tg3_writephy(tp, 0x1c, 0x8d68); 1990 } 1991 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) { 1992 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 1993 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 1994 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b); 1995 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); 1996 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506); 1997 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f); 1998 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); 1999 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 2000 } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { 2001 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 2002 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2003 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) { 2004 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2005 tg3_writephy(tp, MII_TG3_TEST1, 2006 MII_TG3_TEST1_TRIM_EN | 0x4); 2007 } else 2008 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2009 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 2010 } 2011 /* Set Extended packet length bit (bit 14) on all chips that */ 2012 /* support jumbo frames */ 2013 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2014 /* Cannot do read-modify-write on 5401 */ 2015 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); 2016 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 2017 u32 phy_reg; 2018 2019 /* Set bit 14 with read-modify-write to preserve other bits */ 2020 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) && 2021 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg)) 2022 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000); 2023 } 2024 2025 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2026 * jumbo frames transmission. 2027 */ 2028 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 2029 u32 phy_reg; 2030 2031 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg)) 2032 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2033 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2034 } 2035 2036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 2037 /* adjust output voltage */ 2038 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); 2039 } 2040 2041 tg3_phy_toggle_automdix(tp, 1); 2042 tg3_phy_set_wirespeed(tp); 2043 return 0; 2044} 2045 2046static void tg3_frob_aux_power(struct tg3 *tp) 2047{ 2048 struct tg3 *tp_peer = tp; 2049 2050 /* The GPIOs do something completely different on 57765. */ 2051 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 || 2052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 2053 return; 2054 2055 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 2056 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || 2057 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 2058 struct net_device *dev_peer; 2059 2060 dev_peer = pci_get_drvdata(tp->pdev_peer); 2061 /* remove_one() may have been run on the peer. */ 2062 if (!dev_peer) 2063 tp_peer = tp; 2064 else 2065 tp_peer = netdev_priv(dev_peer); 2066 } 2067 2068 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || 2069 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 || 2070 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || 2071 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { 2072 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 2073 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { 2074 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2075 (GRC_LCLCTRL_GPIO_OE0 | 2076 GRC_LCLCTRL_GPIO_OE1 | 2077 GRC_LCLCTRL_GPIO_OE2 | 2078 GRC_LCLCTRL_GPIO_OUTPUT0 | 2079 GRC_LCLCTRL_GPIO_OUTPUT1), 2080 100); 2081 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 2082 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 2083 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ 2084 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | 2085 GRC_LCLCTRL_GPIO_OE1 | 2086 GRC_LCLCTRL_GPIO_OE2 | 2087 GRC_LCLCTRL_GPIO_OUTPUT0 | 2088 GRC_LCLCTRL_GPIO_OUTPUT1 | 2089 tp->grc_local_ctrl; 2090 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); 2091 2092 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; 2093 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); 2094 2095 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; 2096 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); 2097 } else { 2098 u32 no_gpio2; 2099 u32 grc_local_ctrl = 0; 2100 2101 if (tp_peer != tp && 2102 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) 2103 return; 2104 2105 /* Workaround to prevent overdrawing Amps. */ 2106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == 2107 ASIC_REV_5714) { 2108 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 2109 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2110 grc_local_ctrl, 100); 2111 } 2112 2113 /* On 5753 and variants, GPIO2 cannot be used. */ 2114 no_gpio2 = tp->nic_sram_data_cfg & 2115 NIC_SRAM_DATA_CFG_NO_GPIO2; 2116 2117 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 2118 GRC_LCLCTRL_GPIO_OE1 | 2119 GRC_LCLCTRL_GPIO_OE2 | 2120 GRC_LCLCTRL_GPIO_OUTPUT1 | 2121 GRC_LCLCTRL_GPIO_OUTPUT2; 2122 if (no_gpio2) { 2123 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | 2124 GRC_LCLCTRL_GPIO_OUTPUT2); 2125 } 2126 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2127 grc_local_ctrl, 100); 2128 2129 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; 2130 2131 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2132 grc_local_ctrl, 100); 2133 2134 if (!no_gpio2) { 2135 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; 2136 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2137 grc_local_ctrl, 100); 2138 } 2139 } 2140 } else { 2141 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 2142 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { 2143 if (tp_peer != tp && 2144 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) 2145 return; 2146 2147 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2148 (GRC_LCLCTRL_GPIO_OE1 | 2149 GRC_LCLCTRL_GPIO_OUTPUT1), 100); 2150 2151 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2152 GRC_LCLCTRL_GPIO_OE1, 100); 2153 2154 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2155 (GRC_LCLCTRL_GPIO_OE1 | 2156 GRC_LCLCTRL_GPIO_OUTPUT1), 100); 2157 } 2158 } 2159} 2160 2161static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) 2162{ 2163 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) 2164 return 1; 2165 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { 2166 if (speed != SPEED_10) 2167 return 1; 2168 } else if (speed == SPEED_10) 2169 return 1; 2170 2171 return 0; 2172} 2173 2174static int tg3_setup_phy(struct tg3 *, int); 2175 2176#define RESET_KIND_SHUTDOWN 0 2177#define RESET_KIND_INIT 1 2178#define RESET_KIND_SUSPEND 2 2179 2180static void tg3_write_sig_post_reset(struct tg3 *, int); 2181static int tg3_halt_cpu(struct tg3 *, u32); 2182 2183static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 2184{ 2185 u32 val; 2186 2187 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 2188 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 2189 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); 2190 u32 serdes_cfg = tr32(MAC_SERDES_CFG); 2191 2192 sg_dig_ctrl |= 2193 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; 2194 tw32(SG_DIG_CTRL, sg_dig_ctrl); 2195 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); 2196 } 2197 return; 2198 } 2199 2200 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 2201 tg3_bmcr_reset(tp); 2202 val = tr32(GRC_MISC_CFG); 2203 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 2204 udelay(40); 2205 return; 2206 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 2207 u32 phytest; 2208 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 2209 u32 phy; 2210 2211 tg3_writephy(tp, MII_ADVERTISE, 0); 2212 tg3_writephy(tp, MII_BMCR, 2213 BMCR_ANENABLE | BMCR_ANRESTART); 2214 2215 tg3_writephy(tp, MII_TG3_FET_TEST, 2216 phytest | MII_TG3_FET_SHADOW_EN); 2217 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { 2218 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; 2219 tg3_writephy(tp, 2220 MII_TG3_FET_SHDW_AUXMODE4, 2221 phy); 2222 } 2223 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 2224 } 2225 return; 2226 } else if (do_low_power) { 2227 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2228 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 2229 2230 tg3_writephy(tp, MII_TG3_AUX_CTRL, 2231 MII_TG3_AUXCTL_SHDWSEL_PWRCTL | 2232 MII_TG3_AUXCTL_PCTL_100TX_LPWR | 2233 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 2234 MII_TG3_AUXCTL_PCTL_VREG_11V); 2235 } 2236 2237 /* The PHY should not be powered down on some chips because 2238 * of bugs. 2239 */ 2240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 2241 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 2242 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && 2243 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) 2244 return; 2245 2246 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || 2247 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { 2248 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 2249 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 2250 val |= CPMU_LSPD_1000MB_MACCLK_12_5; 2251 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 2252 } 2253 2254 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); 2255} 2256 2257/* tp->lock is held. */ 2258static int tg3_nvram_lock(struct tg3 *tp) 2259{ 2260 if (tp->tg3_flags & TG3_FLAG_NVRAM) { 2261 int i; 2262 2263 if (tp->nvram_lock_cnt == 0) { 2264 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 2265 for (i = 0; i < 8000; i++) { 2266 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 2267 break; 2268 udelay(20); 2269 } 2270 if (i == 8000) { 2271 tw32(NVRAM_SWARB, SWARB_REQ_CLR1); 2272 return -ENODEV; 2273 } 2274 } 2275 tp->nvram_lock_cnt++; 2276 } 2277 return 0; 2278} 2279 2280/* tp->lock is held. */ 2281static void tg3_nvram_unlock(struct tg3 *tp) 2282{ 2283 if (tp->tg3_flags & TG3_FLAG_NVRAM) { 2284 if (tp->nvram_lock_cnt > 0) 2285 tp->nvram_lock_cnt--; 2286 if (tp->nvram_lock_cnt == 0) 2287 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 2288 } 2289} 2290 2291/* tp->lock is held. */ 2292static void tg3_enable_nvram_access(struct tg3 *tp) 2293{ 2294 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2295 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { 2296 u32 nvaccess = tr32(NVRAM_ACCESS); 2297 2298 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 2299 } 2300} 2301 2302/* tp->lock is held. */ 2303static void tg3_disable_nvram_access(struct tg3 *tp) 2304{ 2305 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2306 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { 2307 u32 nvaccess = tr32(NVRAM_ACCESS); 2308 2309 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 2310 } 2311} 2312 2313static int tg3_nvram_read_using_eeprom(struct tg3 *tp, 2314 u32 offset, u32 *val) 2315{ 2316 u32 tmp; 2317 int i; 2318 2319 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) 2320 return -EINVAL; 2321 2322 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | 2323 EEPROM_ADDR_DEVID_MASK | 2324 EEPROM_ADDR_READ); 2325 tw32(GRC_EEPROM_ADDR, 2326 tmp | 2327 (0 << EEPROM_ADDR_DEVID_SHIFT) | 2328 ((offset << EEPROM_ADDR_ADDR_SHIFT) & 2329 EEPROM_ADDR_ADDR_MASK) | 2330 EEPROM_ADDR_READ | EEPROM_ADDR_START); 2331 2332 for (i = 0; i < 1000; i++) { 2333 tmp = tr32(GRC_EEPROM_ADDR); 2334 2335 if (tmp & EEPROM_ADDR_COMPLETE) 2336 break; 2337 msleep(1); 2338 } 2339 if (!(tmp & EEPROM_ADDR_COMPLETE)) 2340 return -EBUSY; 2341 2342 tmp = tr32(GRC_EEPROM_DATA); 2343 2344 /* 2345 * The data will always be opposite the native endian 2346 * format. Perform a blind byteswap to compensate. 2347 */ 2348 *val = swab32(tmp); 2349 2350 return 0; 2351} 2352 2353#define NVRAM_CMD_TIMEOUT 10000 2354 2355static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 2356{ 2357 int i; 2358 2359 tw32(NVRAM_CMD, nvram_cmd); 2360 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 2361 udelay(10); 2362 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 2363 udelay(10); 2364 break; 2365 } 2366 } 2367 2368 if (i == NVRAM_CMD_TIMEOUT) 2369 return -EBUSY; 2370 2371 return 0; 2372} 2373 2374static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) 2375{ 2376 if ((tp->tg3_flags & TG3_FLAG_NVRAM) && 2377 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && 2378 (tp->tg3_flags2 & TG3_FLG2_FLASH) && 2379 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && 2380 (tp->nvram_jedecnum == JEDEC_ATMEL)) 2381 2382 addr = ((addr / tp->nvram_pagesize) << 2383 ATMEL_AT45DB0X1B_PAGE_POS) + 2384 (addr % tp->nvram_pagesize); 2385 2386 return addr; 2387} 2388 2389static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) 2390{ 2391 if ((tp->tg3_flags & TG3_FLAG_NVRAM) && 2392 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && 2393 (tp->tg3_flags2 & TG3_FLG2_FLASH) && 2394 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && 2395 (tp->nvram_jedecnum == JEDEC_ATMEL)) 2396 2397 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * 2398 tp->nvram_pagesize) + 2399 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); 2400 2401 return addr; 2402} 2403 2404/* NOTE: Data read in from NVRAM is byteswapped according to 2405 * the byteswapping settings for all other register accesses. 2406 * tg3 devices are BE devices, so on a BE machine, the data 2407 * returned will be exactly as it is seen in NVRAM. On a LE 2408 * machine, the 32-bit value will be byteswapped. 2409 */ 2410static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) 2411{ 2412 int ret; 2413 2414 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) 2415 return tg3_nvram_read_using_eeprom(tp, offset, val); 2416 2417 offset = tg3_nvram_phys_addr(tp, offset); 2418 2419 if (offset > NVRAM_ADDR_MSK) 2420 return -EINVAL; 2421 2422 ret = tg3_nvram_lock(tp); 2423 if (ret) 2424 return ret; 2425 2426 tg3_enable_nvram_access(tp); 2427 2428 tw32(NVRAM_ADDR, offset); 2429 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | 2430 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); 2431 2432 if (ret == 0) 2433 *val = tr32(NVRAM_RDDATA); 2434 2435 tg3_disable_nvram_access(tp); 2436 2437 tg3_nvram_unlock(tp); 2438 2439 return ret; 2440} 2441 2442/* Ensures NVRAM data is in bytestream format. */ 2443static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) 2444{ 2445 u32 v; 2446 int res = tg3_nvram_read(tp, offset, &v); 2447 if (!res) 2448 *val = cpu_to_be32(v); 2449 return res; 2450} 2451 2452/* tp->lock is held. */ 2453static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) 2454{ 2455 u32 addr_high, addr_low; 2456 int i; 2457 2458 addr_high = ((tp->dev->dev_addr[0] << 8) | 2459 tp->dev->dev_addr[1]); 2460 addr_low = ((tp->dev->dev_addr[2] << 24) | 2461 (tp->dev->dev_addr[3] << 16) | 2462 (tp->dev->dev_addr[4] << 8) | 2463 (tp->dev->dev_addr[5] << 0)); 2464 for (i = 0; i < 4; i++) { 2465 if (i == 1 && skip_mac_1) 2466 continue; 2467 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); 2468 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); 2469 } 2470 2471 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 2472 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 2473 for (i = 0; i < 12; i++) { 2474 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); 2475 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); 2476 } 2477 } 2478 2479 addr_high = (tp->dev->dev_addr[0] + 2480 tp->dev->dev_addr[1] + 2481 tp->dev->dev_addr[2] + 2482 tp->dev->dev_addr[3] + 2483 tp->dev->dev_addr[4] + 2484 tp->dev->dev_addr[5]) & 2485 TX_BACKOFF_SEED_MASK; 2486 tw32(MAC_TX_BACKOFF_SEED, addr_high); 2487} 2488 2489static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) 2490{ 2491 u32 misc_host_ctrl; 2492 bool device_should_wake, do_low_power; 2493 2494 /* Make sure register accesses (indirect or otherwise) 2495 * will function correctly. 2496 */ 2497 pci_write_config_dword(tp->pdev, 2498 TG3PCI_MISC_HOST_CTRL, 2499 tp->misc_host_ctrl); 2500 2501 switch (state) { 2502 case PCI_D0: 2503 pci_enable_wake(tp->pdev, state, false); 2504 pci_set_power_state(tp->pdev, PCI_D0); 2505 2506 /* Switch out of Vaux if it is a NIC */ 2507 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) 2508 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100); 2509 2510 return 0; 2511 2512 case PCI_D1: 2513 case PCI_D2: 2514 case PCI_D3hot: 2515 break; 2516 2517 default: 2518 netdev_err(tp->dev, "Invalid power state (D%d) requested\n", 2519 state); 2520 return -EINVAL; 2521 } 2522 2523 /* Restore the CLKREQ setting. */ 2524 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { 2525 u16 lnkctl; 2526 2527 pci_read_config_word(tp->pdev, 2528 tp->pcie_cap + PCI_EXP_LNKCTL, 2529 &lnkctl); 2530 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; 2531 pci_write_config_word(tp->pdev, 2532 tp->pcie_cap + PCI_EXP_LNKCTL, 2533 lnkctl); 2534 } 2535 2536 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 2537 tw32(TG3PCI_MISC_HOST_CTRL, 2538 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 2539 2540 device_should_wake = pci_pme_capable(tp->pdev, state) && 2541 device_may_wakeup(&tp->pdev->dev) && 2542 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE); 2543 2544 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 2545 do_low_power = false; 2546 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) && 2547 !tp->link_config.phy_is_low_power) { 2548 struct phy_device *phydev; 2549 u32 phyid, advertising; 2550 2551 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 2552 2553 tp->link_config.phy_is_low_power = 1; 2554 2555 tp->link_config.orig_speed = phydev->speed; 2556 tp->link_config.orig_duplex = phydev->duplex; 2557 tp->link_config.orig_autoneg = phydev->autoneg; 2558 tp->link_config.orig_advertising = phydev->advertising; 2559 2560 advertising = ADVERTISED_TP | 2561 ADVERTISED_Pause | 2562 ADVERTISED_Autoneg | 2563 ADVERTISED_10baseT_Half; 2564 2565 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || 2566 device_should_wake) { 2567 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) 2568 advertising |= 2569 ADVERTISED_100baseT_Half | 2570 ADVERTISED_100baseT_Full | 2571 ADVERTISED_10baseT_Full; 2572 else 2573 advertising |= ADVERTISED_10baseT_Full; 2574 } 2575 2576 phydev->advertising = advertising; 2577 2578 phy_start_aneg(phydev); 2579 2580 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 2581 if (phyid != PHY_ID_BCMAC131) { 2582 phyid &= PHY_BCM_OUI_MASK; 2583 if (phyid == PHY_BCM_OUI_1 || 2584 phyid == PHY_BCM_OUI_2 || 2585 phyid == PHY_BCM_OUI_3) 2586 do_low_power = true; 2587 } 2588 } 2589 } else { 2590 do_low_power = true; 2591 2592 if (tp->link_config.phy_is_low_power == 0) { 2593 tp->link_config.phy_is_low_power = 1; 2594 tp->link_config.orig_speed = tp->link_config.speed; 2595 tp->link_config.orig_duplex = tp->link_config.duplex; 2596 tp->link_config.orig_autoneg = tp->link_config.autoneg; 2597 } 2598 2599 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { 2600 tp->link_config.speed = SPEED_10; 2601 tp->link_config.duplex = DUPLEX_HALF; 2602 tp->link_config.autoneg = AUTONEG_ENABLE; 2603 tg3_setup_phy(tp, 0); 2604 } 2605 } 2606 2607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 2608 u32 val; 2609 2610 val = tr32(GRC_VCPU_EXT_CTRL); 2611 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); 2612 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 2613 int i; 2614 u32 val; 2615 2616 for (i = 0; i < 200; i++) { 2617 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); 2618 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 2619 break; 2620 msleep(1); 2621 } 2622 } 2623 if (tp->tg3_flags & TG3_FLAG_WOL_CAP) 2624 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | 2625 WOL_DRV_STATE_SHUTDOWN | 2626 WOL_DRV_WOL | 2627 WOL_SET_MAGIC_PKT); 2628 2629 if (device_should_wake) { 2630 u32 mac_mode; 2631 2632 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 2633 if (do_low_power) { 2634 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); 2635 udelay(40); 2636 } 2637 2638 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 2639 mac_mode = MAC_MODE_PORT_MODE_GMII; 2640 else 2641 mac_mode = MAC_MODE_PORT_MODE_MII; 2642 2643 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; 2644 if (GET_ASIC_REV(tp->pci_chip_rev_id) == 2645 ASIC_REV_5700) { 2646 u32 speed = (tp->tg3_flags & 2647 TG3_FLAG_WOL_SPEED_100MB) ? 2648 SPEED_100 : SPEED_10; 2649 if (tg3_5700_link_polarity(tp, speed)) 2650 mac_mode |= MAC_MODE_LINK_POLARITY; 2651 else 2652 mac_mode &= ~MAC_MODE_LINK_POLARITY; 2653 } 2654 } else { 2655 mac_mode = MAC_MODE_PORT_MODE_TBI; 2656 } 2657 2658 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 2659 tw32(MAC_LED_CTRL, tp->led_ctrl); 2660 2661 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 2662 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 2663 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) && 2664 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || 2665 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))) 2666 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 2667 2668 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 2669 mac_mode |= tp->mac_mode & 2670 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); 2671 if (mac_mode & MAC_MODE_APE_TX_EN) 2672 mac_mode |= MAC_MODE_TDE_ENABLE; 2673 } 2674 2675 tw32_f(MAC_MODE, mac_mode); 2676 udelay(100); 2677 2678 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); 2679 udelay(10); 2680 } 2681 2682 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) && 2683 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 2684 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { 2685 u32 base_val; 2686 2687 base_val = tp->pci_clock_ctrl; 2688 base_val |= (CLOCK_CTRL_RXCLK_DISABLE | 2689 CLOCK_CTRL_TXCLK_DISABLE); 2690 2691 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | 2692 CLOCK_CTRL_PWRDOWN_PLL133, 40); 2693 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 2694 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || 2695 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) { 2696 /* do nothing */ 2697 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2698 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) { 2699 u32 newbits1, newbits2; 2700 2701 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 2702 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { 2703 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | 2704 CLOCK_CTRL_TXCLK_DISABLE | 2705 CLOCK_CTRL_ALTCLK); 2706 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 2707 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 2708 newbits1 = CLOCK_CTRL_625_CORE; 2709 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; 2710 } else { 2711 newbits1 = CLOCK_CTRL_ALTCLK; 2712 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 2713 } 2714 2715 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, 2716 40); 2717 2718 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 2719 40); 2720 2721 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 2722 u32 newbits3; 2723 2724 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 2725 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { 2726 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | 2727 CLOCK_CTRL_TXCLK_DISABLE | 2728 CLOCK_CTRL_44MHZ_CORE); 2729 } else { 2730 newbits3 = CLOCK_CTRL_44MHZ_CORE; 2731 } 2732 2733 tw32_wait_f(TG3PCI_CLOCK_CTRL, 2734 tp->pci_clock_ctrl | newbits3, 40); 2735 } 2736 } 2737 2738 if (!(device_should_wake) && 2739 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 2740 tg3_power_down_phy(tp, do_low_power); 2741 2742 tg3_frob_aux_power(tp); 2743 2744 /* Workaround for unstable PLL clock */ 2745 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || 2746 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { 2747 u32 val = tr32(0x7d00); 2748 2749 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 2750 tw32(0x7d00, val); 2751 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 2752 int err; 2753 2754 err = tg3_nvram_lock(tp); 2755 tg3_halt_cpu(tp, RX_CPU_BASE); 2756 if (!err) 2757 tg3_nvram_unlock(tp); 2758 } 2759 } 2760 2761 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 2762 2763 if (device_should_wake) 2764 pci_enable_wake(tp->pdev, state, true); 2765 2766 /* Finally, set the new power state. */ 2767 pci_set_power_state(tp->pdev, state); 2768 2769 return 0; 2770} 2771 2772static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) 2773{ 2774 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 2775 case MII_TG3_AUX_STAT_10HALF: 2776 *speed = SPEED_10; 2777 *duplex = DUPLEX_HALF; 2778 break; 2779 2780 case MII_TG3_AUX_STAT_10FULL: 2781 *speed = SPEED_10; 2782 *duplex = DUPLEX_FULL; 2783 break; 2784 2785 case MII_TG3_AUX_STAT_100HALF: 2786 *speed = SPEED_100; 2787 *duplex = DUPLEX_HALF; 2788 break; 2789 2790 case MII_TG3_AUX_STAT_100FULL: 2791 *speed = SPEED_100; 2792 *duplex = DUPLEX_FULL; 2793 break; 2794 2795 case MII_TG3_AUX_STAT_1000HALF: 2796 *speed = SPEED_1000; 2797 *duplex = DUPLEX_HALF; 2798 break; 2799 2800 case MII_TG3_AUX_STAT_1000FULL: 2801 *speed = SPEED_1000; 2802 *duplex = DUPLEX_FULL; 2803 break; 2804 2805 default: 2806 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 2807 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : 2808 SPEED_10; 2809 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : 2810 DUPLEX_HALF; 2811 break; 2812 } 2813 *speed = SPEED_INVALID; 2814 *duplex = DUPLEX_INVALID; 2815 break; 2816 } 2817} 2818 2819static void tg3_phy_copper_begin(struct tg3 *tp) 2820{ 2821 u32 new_adv; 2822 int i; 2823 2824 if (tp->link_config.phy_is_low_power) { 2825 /* Entering low power mode. Disable gigabit and 2826 * 100baseT advertisements. 2827 */ 2828 tg3_writephy(tp, MII_TG3_CTRL, 0); 2829 2830 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL | 2831 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); 2832 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) 2833 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL); 2834 2835 tg3_writephy(tp, MII_ADVERTISE, new_adv); 2836 } else if (tp->link_config.speed == SPEED_INVALID) { 2837 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) 2838 tp->link_config.advertising &= 2839 ~(ADVERTISED_1000baseT_Half | 2840 ADVERTISED_1000baseT_Full); 2841 2842 new_adv = ADVERTISE_CSMA; 2843 if (tp->link_config.advertising & ADVERTISED_10baseT_Half) 2844 new_adv |= ADVERTISE_10HALF; 2845 if (tp->link_config.advertising & ADVERTISED_10baseT_Full) 2846 new_adv |= ADVERTISE_10FULL; 2847 if (tp->link_config.advertising & ADVERTISED_100baseT_Half) 2848 new_adv |= ADVERTISE_100HALF; 2849 if (tp->link_config.advertising & ADVERTISED_100baseT_Full) 2850 new_adv |= ADVERTISE_100FULL; 2851 2852 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); 2853 2854 tg3_writephy(tp, MII_ADVERTISE, new_adv); 2855 2856 if (tp->link_config.advertising & 2857 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) { 2858 new_adv = 0; 2859 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) 2860 new_adv |= MII_TG3_CTRL_ADV_1000_HALF; 2861 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) 2862 new_adv |= MII_TG3_CTRL_ADV_1000_FULL; 2863 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) && 2864 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || 2865 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) 2866 new_adv |= (MII_TG3_CTRL_AS_MASTER | 2867 MII_TG3_CTRL_ENABLE_AS_MASTER); 2868 tg3_writephy(tp, MII_TG3_CTRL, new_adv); 2869 } else { 2870 tg3_writephy(tp, MII_TG3_CTRL, 0); 2871 } 2872 } else { 2873 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); 2874 new_adv |= ADVERTISE_CSMA; 2875 2876 /* Asking for a specific link mode. */ 2877 if (tp->link_config.speed == SPEED_1000) { 2878 tg3_writephy(tp, MII_ADVERTISE, new_adv); 2879 2880 if (tp->link_config.duplex == DUPLEX_FULL) 2881 new_adv = MII_TG3_CTRL_ADV_1000_FULL; 2882 else 2883 new_adv = MII_TG3_CTRL_ADV_1000_HALF; 2884 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || 2885 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) 2886 new_adv |= (MII_TG3_CTRL_AS_MASTER | 2887 MII_TG3_CTRL_ENABLE_AS_MASTER); 2888 } else { 2889 if (tp->link_config.speed == SPEED_100) { 2890 if (tp->link_config.duplex == DUPLEX_FULL) 2891 new_adv |= ADVERTISE_100FULL; 2892 else 2893 new_adv |= ADVERTISE_100HALF; 2894 } else { 2895 if (tp->link_config.duplex == DUPLEX_FULL) 2896 new_adv |= ADVERTISE_10FULL; 2897 else 2898 new_adv |= ADVERTISE_10HALF; 2899 } 2900 tg3_writephy(tp, MII_ADVERTISE, new_adv); 2901 2902 new_adv = 0; 2903 } 2904 2905 tg3_writephy(tp, MII_TG3_CTRL, new_adv); 2906 } 2907 2908 if (tp->link_config.autoneg == AUTONEG_DISABLE && 2909 tp->link_config.speed != SPEED_INVALID) { 2910 u32 bmcr, orig_bmcr; 2911 2912 tp->link_config.active_speed = tp->link_config.speed; 2913 tp->link_config.active_duplex = tp->link_config.duplex; 2914 2915 bmcr = 0; 2916 switch (tp->link_config.speed) { 2917 default: 2918 case SPEED_10: 2919 break; 2920 2921 case SPEED_100: 2922 bmcr |= BMCR_SPEED100; 2923 break; 2924 2925 case SPEED_1000: 2926 bmcr |= TG3_BMCR_SPEED1000; 2927 break; 2928 } 2929 2930 if (tp->link_config.duplex == DUPLEX_FULL) 2931 bmcr |= BMCR_FULLDPLX; 2932 2933 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && 2934 (bmcr != orig_bmcr)) { 2935 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); 2936 for (i = 0; i < 1500; i++) { 2937 u32 tmp; 2938 2939 udelay(10); 2940 if (tg3_readphy(tp, MII_BMSR, &tmp) || 2941 tg3_readphy(tp, MII_BMSR, &tmp)) 2942 continue; 2943 if (!(tmp & BMSR_LSTATUS)) { 2944 udelay(40); 2945 break; 2946 } 2947 } 2948 tg3_writephy(tp, MII_BMCR, bmcr); 2949 udelay(40); 2950 } 2951 } else { 2952 tg3_writephy(tp, MII_BMCR, 2953 BMCR_ANENABLE | BMCR_ANRESTART); 2954 } 2955} 2956 2957static int tg3_init_5401phy_dsp(struct tg3 *tp) 2958{ 2959 int err; 2960 2961 /* Turn off tap power management. */ 2962 /* Set Extended packet length bit */ 2963 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); 2964 2965 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012); 2966 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804); 2967 2968 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013); 2969 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204); 2970 2971 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006); 2972 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132); 2973 2974 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006); 2975 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232); 2976 2977 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); 2978 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20); 2979 2980 udelay(40); 2981 2982 return err; 2983} 2984 2985static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) 2986{ 2987 u32 adv_reg, all_mask = 0; 2988 2989 if (mask & ADVERTISED_10baseT_Half) 2990 all_mask |= ADVERTISE_10HALF; 2991 if (mask & ADVERTISED_10baseT_Full) 2992 all_mask |= ADVERTISE_10FULL; 2993 if (mask & ADVERTISED_100baseT_Half) 2994 all_mask |= ADVERTISE_100HALF; 2995 if (mask & ADVERTISED_100baseT_Full) 2996 all_mask |= ADVERTISE_100FULL; 2997 2998 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) 2999 return 0; 3000 3001 if ((adv_reg & all_mask) != all_mask) 3002 return 0; 3003 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { 3004 u32 tg3_ctrl; 3005 3006 all_mask = 0; 3007 if (mask & ADVERTISED_1000baseT_Half) 3008 all_mask |= ADVERTISE_1000HALF; 3009 if (mask & ADVERTISED_1000baseT_Full) 3010 all_mask |= ADVERTISE_1000FULL; 3011 3012 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl)) 3013 return 0; 3014 3015 if ((tg3_ctrl & all_mask) != all_mask) 3016 return 0; 3017 } 3018 return 1; 3019} 3020 3021static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv) 3022{ 3023 u32 curadv, reqadv; 3024 3025 if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) 3026 return 1; 3027 3028 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3029 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); 3030 3031 if (tp->link_config.active_duplex == DUPLEX_FULL) { 3032 if (curadv != reqadv) 3033 return 0; 3034 3035 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) 3036 tg3_readphy(tp, MII_LPA, rmtadv); 3037 } else { 3038 /* Reprogram the advertisement register, even if it 3039 * does not affect the current link. If the link 3040 * gets renegotiated in the future, we can save an 3041 * additional renegotiation cycle by advertising 3042 * it correctly in the first place. 3043 */ 3044 if (curadv != reqadv) { 3045 *lcladv &= ~(ADVERTISE_PAUSE_CAP | 3046 ADVERTISE_PAUSE_ASYM); 3047 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv); 3048 } 3049 } 3050 3051 return 1; 3052} 3053 3054static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) 3055{ 3056 int current_link_up; 3057 u32 bmsr, dummy; 3058 u32 lcl_adv, rmt_adv; 3059 u16 current_speed; 3060 u8 current_duplex; 3061 int i, err; 3062 3063 tw32(MAC_EVENT, 0); 3064 3065 tw32_f(MAC_STATUS, 3066 (MAC_STATUS_SYNC_CHANGED | 3067 MAC_STATUS_CFG_CHANGED | 3068 MAC_STATUS_MI_COMPLETION | 3069 MAC_STATUS_LNKSTATE_CHANGED)); 3070 udelay(40); 3071 3072 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 3073 tw32_f(MAC_MI_MODE, 3074 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 3075 udelay(80); 3076 } 3077 3078 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02); 3079 3080 /* Some third-party PHYs need to be reset on link going 3081 * down. 3082 */ 3083 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 3084 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 3085 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && 3086 netif_carrier_ok(tp->dev)) { 3087 tg3_readphy(tp, MII_BMSR, &bmsr); 3088 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 3089 !(bmsr & BMSR_LSTATUS)) 3090 force_reset = 1; 3091 } 3092 if (force_reset) 3093 tg3_phy_reset(tp); 3094 3095 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 3096 tg3_readphy(tp, MII_BMSR, &bmsr); 3097 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 3098 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) 3099 bmsr = 0; 3100 3101 if (!(bmsr & BMSR_LSTATUS)) { 3102 err = tg3_init_5401phy_dsp(tp); 3103 if (err) 3104 return err; 3105 3106 tg3_readphy(tp, MII_BMSR, &bmsr); 3107 for (i = 0; i < 1000; i++) { 3108 udelay(10); 3109 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 3110 (bmsr & BMSR_LSTATUS)) { 3111 udelay(40); 3112 break; 3113 } 3114 } 3115 3116 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == 3117 TG3_PHY_REV_BCM5401_B0 && 3118 !(bmsr & BMSR_LSTATUS) && 3119 tp->link_config.active_speed == SPEED_1000) { 3120 err = tg3_phy_reset(tp); 3121 if (!err) 3122 err = tg3_init_5401phy_dsp(tp); 3123 if (err) 3124 return err; 3125 } 3126 } 3127 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || 3128 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { 3129 /* 5701 {A0,B0} CRC bug workaround */ 3130 tg3_writephy(tp, 0x15, 0x0a75); 3131 tg3_writephy(tp, 0x1c, 0x8c68); 3132 tg3_writephy(tp, 0x1c, 0x8d68); 3133 tg3_writephy(tp, 0x1c, 0x8c68); 3134 } 3135 3136 /* Clear pending interrupts... */ 3137 tg3_readphy(tp, MII_TG3_ISTAT, &dummy); 3138 tg3_readphy(tp, MII_TG3_ISTAT, &dummy); 3139 3140 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) 3141 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 3142 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) 3143 tg3_writephy(tp, MII_TG3_IMASK, ~0); 3144 3145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 3146 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { 3147 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) 3148 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3149 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 3150 else 3151 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); 3152 } 3153 3154 current_link_up = 0; 3155 current_speed = SPEED_INVALID; 3156 current_duplex = DUPLEX_INVALID; 3157 3158 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) { 3159 u32 val; 3160 3161 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007); 3162 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val); 3163 if (!(val & (1 << 10))) { 3164 val |= (1 << 10); 3165 tg3_writephy(tp, MII_TG3_AUX_CTRL, val); 3166 goto relink; 3167 } 3168 } 3169 3170 bmsr = 0; 3171 for (i = 0; i < 100; i++) { 3172 tg3_readphy(tp, MII_BMSR, &bmsr); 3173 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 3174 (bmsr & BMSR_LSTATUS)) 3175 break; 3176 udelay(40); 3177 } 3178 3179 if (bmsr & BMSR_LSTATUS) { 3180 u32 aux_stat, bmcr; 3181 3182 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); 3183 for (i = 0; i < 2000; i++) { 3184 udelay(10); 3185 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && 3186 aux_stat) 3187 break; 3188 } 3189 3190 tg3_aux_stat_to_speed_duplex(tp, aux_stat, 3191 &current_speed, 3192 &current_duplex); 3193 3194 bmcr = 0; 3195 for (i = 0; i < 200; i++) { 3196 tg3_readphy(tp, MII_BMCR, &bmcr); 3197 if (tg3_readphy(tp, MII_BMCR, &bmcr)) 3198 continue; 3199 if (bmcr && bmcr != 0x7fff) 3200 break; 3201 udelay(10); 3202 } 3203 3204 lcl_adv = 0; 3205 rmt_adv = 0; 3206 3207 tp->link_config.active_speed = current_speed; 3208 tp->link_config.active_duplex = current_duplex; 3209 3210 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 3211 if ((bmcr & BMCR_ANENABLE) && 3212 tg3_copper_is_advertising_all(tp, 3213 tp->link_config.advertising)) { 3214 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv, 3215 &rmt_adv)) 3216 current_link_up = 1; 3217 } 3218 } else { 3219 if (!(bmcr & BMCR_ANENABLE) && 3220 tp->link_config.speed == current_speed && 3221 tp->link_config.duplex == current_duplex && 3222 tp->link_config.flowctrl == 3223 tp->link_config.active_flowctrl) { 3224 current_link_up = 1; 3225 } 3226 } 3227 3228 if (current_link_up == 1 && 3229 tp->link_config.active_duplex == DUPLEX_FULL) 3230 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 3231 } 3232 3233relink: 3234 if (current_link_up == 0 || tp->link_config.phy_is_low_power) { 3235 u32 tmp; 3236 3237 tg3_phy_copper_begin(tp); 3238 3239 tg3_readphy(tp, MII_BMSR, &tmp); 3240 if (!tg3_readphy(tp, MII_BMSR, &tmp) && 3241 (tmp & BMSR_LSTATUS)) 3242 current_link_up = 1; 3243 } 3244 3245 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 3246 if (current_link_up == 1) { 3247 if (tp->link_config.active_speed == SPEED_100 || 3248 tp->link_config.active_speed == SPEED_10) 3249 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 3250 else 3251 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 3252 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) 3253 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 3254 else 3255 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 3256 3257 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 3258 if (tp->link_config.active_duplex == DUPLEX_HALF) 3259 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 3260 3261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { 3262 if (current_link_up == 1 && 3263 tg3_5700_link_polarity(tp, tp->link_config.active_speed)) 3264 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 3265 else 3266 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 3267 } 3268 3269 /* ??? Without this setting Netgear GA302T PHY does not 3270 * ??? send/receive packets... 3271 */ 3272 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && 3273 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { 3274 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; 3275 tw32_f(MAC_MI_MODE, tp->mi_mode); 3276 udelay(80); 3277 } 3278 3279 tw32_f(MAC_MODE, tp->mac_mode); 3280 udelay(40); 3281 3282 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 3283 /* Polled via timer. */ 3284 tw32_f(MAC_EVENT, 0); 3285 } else { 3286 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 3287 } 3288 udelay(40); 3289 3290 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && 3291 current_link_up == 1 && 3292 tp->link_config.active_speed == SPEED_1000 && 3293 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) || 3294 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) { 3295 udelay(120); 3296 tw32_f(MAC_STATUS, 3297 (MAC_STATUS_SYNC_CHANGED | 3298 MAC_STATUS_CFG_CHANGED)); 3299 udelay(40); 3300 tg3_write_mem(tp, 3301 NIC_SRAM_FIRMWARE_MBOX, 3302 NIC_SRAM_FIRMWARE_MBOX_MAGIC2); 3303 } 3304 3305 /* Prevent send BD corruption. */ 3306 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { 3307 u16 oldlnkctl, newlnkctl; 3308 3309 pci_read_config_word(tp->pdev, 3310 tp->pcie_cap + PCI_EXP_LNKCTL, 3311 &oldlnkctl); 3312 if (tp->link_config.active_speed == SPEED_100 || 3313 tp->link_config.active_speed == SPEED_10) 3314 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN; 3315 else 3316 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; 3317 if (newlnkctl != oldlnkctl) 3318 pci_write_config_word(tp->pdev, 3319 tp->pcie_cap + PCI_EXP_LNKCTL, 3320 newlnkctl); 3321 } 3322 3323 if (current_link_up != netif_carrier_ok(tp->dev)) { 3324 if (current_link_up) 3325 netif_carrier_on(tp->dev); 3326 else 3327 netif_carrier_off(tp->dev); 3328 tg3_link_report(tp); 3329 } 3330 3331 return 0; 3332} 3333 3334struct tg3_fiber_aneginfo { 3335 int state; 3336#define ANEG_STATE_UNKNOWN 0 3337#define ANEG_STATE_AN_ENABLE 1 3338#define ANEG_STATE_RESTART_INIT 2 3339#define ANEG_STATE_RESTART 3 3340#define ANEG_STATE_DISABLE_LINK_OK 4 3341#define ANEG_STATE_ABILITY_DETECT_INIT 5 3342#define ANEG_STATE_ABILITY_DETECT 6 3343#define ANEG_STATE_ACK_DETECT_INIT 7 3344#define ANEG_STATE_ACK_DETECT 8 3345#define ANEG_STATE_COMPLETE_ACK_INIT 9 3346#define ANEG_STATE_COMPLETE_ACK 10 3347#define ANEG_STATE_IDLE_DETECT_INIT 11 3348#define ANEG_STATE_IDLE_DETECT 12 3349#define ANEG_STATE_LINK_OK 13 3350#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 3351#define ANEG_STATE_NEXT_PAGE_WAIT 15 3352 3353 u32 flags; 3354#define MR_AN_ENABLE 0x00000001 3355#define MR_RESTART_AN 0x00000002 3356#define MR_AN_COMPLETE 0x00000004 3357#define MR_PAGE_RX 0x00000008 3358#define MR_NP_LOADED 0x00000010 3359#define MR_TOGGLE_TX 0x00000020 3360#define MR_LP_ADV_FULL_DUPLEX 0x00000040 3361#define MR_LP_ADV_HALF_DUPLEX 0x00000080 3362#define MR_LP_ADV_SYM_PAUSE 0x00000100 3363#define MR_LP_ADV_ASYM_PAUSE 0x00000200 3364#define MR_LP_ADV_REMOTE_FAULT1 0x00000400 3365#define MR_LP_ADV_REMOTE_FAULT2 0x00000800 3366#define MR_LP_ADV_NEXT_PAGE 0x00001000 3367#define MR_TOGGLE_RX 0x00002000 3368#define MR_NP_RX 0x00004000 3369 3370#define MR_LINK_OK 0x80000000 3371 3372 unsigned long link_time, cur_time; 3373 3374 u32 ability_match_cfg; 3375 int ability_match_count; 3376 3377 char ability_match, idle_match, ack_match; 3378 3379 u32 txconfig, rxconfig; 3380#define ANEG_CFG_NP 0x00000080 3381#define ANEG_CFG_ACK 0x00000040 3382#define ANEG_CFG_RF2 0x00000020 3383#define ANEG_CFG_RF1 0x00000010 3384#define ANEG_CFG_PS2 0x00000001 3385#define ANEG_CFG_PS1 0x00008000 3386#define ANEG_CFG_HD 0x00004000 3387#define ANEG_CFG_FD 0x00002000 3388#define ANEG_CFG_INVAL 0x00001f06 3389 3390}; 3391#define ANEG_OK 0 3392#define ANEG_DONE 1 3393#define ANEG_TIMER_ENAB 2 3394#define ANEG_FAILED -1 3395 3396#define ANEG_STATE_SETTLE_TIME 10000 3397 3398static int tg3_fiber_aneg_smachine(struct tg3 *tp, 3399 struct tg3_fiber_aneginfo *ap) 3400{ 3401 u16 flowctrl; 3402 unsigned long delta; 3403 u32 rx_cfg_reg; 3404 int ret; 3405 3406 if (ap->state == ANEG_STATE_UNKNOWN) { 3407 ap->rxconfig = 0; 3408 ap->link_time = 0; 3409 ap->cur_time = 0; 3410 ap->ability_match_cfg = 0; 3411 ap->ability_match_count = 0; 3412 ap->ability_match = 0; 3413 ap->idle_match = 0; 3414 ap->ack_match = 0; 3415 } 3416 ap->cur_time++; 3417 3418 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { 3419 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); 3420 3421 if (rx_cfg_reg != ap->ability_match_cfg) { 3422 ap->ability_match_cfg = rx_cfg_reg; 3423 ap->ability_match = 0; 3424 ap->ability_match_count = 0; 3425 } else { 3426 if (++ap->ability_match_count > 1) { 3427 ap->ability_match = 1; 3428 ap->ability_match_cfg = rx_cfg_reg; 3429 } 3430 } 3431 if (rx_cfg_reg & ANEG_CFG_ACK) 3432 ap->ack_match = 1; 3433 else 3434 ap->ack_match = 0; 3435 3436 ap->idle_match = 0; 3437 } else { 3438 ap->idle_match = 1; 3439 ap->ability_match_cfg = 0; 3440 ap->ability_match_count = 0; 3441 ap->ability_match = 0; 3442 ap->ack_match = 0; 3443 3444 rx_cfg_reg = 0; 3445 } 3446 3447 ap->rxconfig = rx_cfg_reg; 3448 ret = ANEG_OK; 3449 3450 switch (ap->state) { 3451 case ANEG_STATE_UNKNOWN: 3452 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 3453 ap->state = ANEG_STATE_AN_ENABLE; 3454 3455 /* fallthru */ 3456 case ANEG_STATE_AN_ENABLE: 3457 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); 3458 if (ap->flags & MR_AN_ENABLE) { 3459 ap->link_time = 0; 3460 ap->cur_time = 0; 3461 ap->ability_match_cfg = 0; 3462 ap->ability_match_count = 0; 3463 ap->ability_match = 0; 3464 ap->idle_match = 0; 3465 ap->ack_match = 0; 3466 3467 ap->state = ANEG_STATE_RESTART_INIT; 3468 } else { 3469 ap->state = ANEG_STATE_DISABLE_LINK_OK; 3470 } 3471 break; 3472 3473 case ANEG_STATE_RESTART_INIT: 3474 ap->link_time = ap->cur_time; 3475 ap->flags &= ~(MR_NP_LOADED); 3476 ap->txconfig = 0; 3477 tw32(MAC_TX_AUTO_NEG, 0); 3478 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 3479 tw32_f(MAC_MODE, tp->mac_mode); 3480 udelay(40); 3481 3482 ret = ANEG_TIMER_ENAB; 3483 ap->state = ANEG_STATE_RESTART; 3484 3485 /* fallthru */ 3486 case ANEG_STATE_RESTART: 3487 delta = ap->cur_time - ap->link_time; 3488 if (delta > ANEG_STATE_SETTLE_TIME) 3489 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 3490 else 3491 ret = ANEG_TIMER_ENAB; 3492 break; 3493 3494 case ANEG_STATE_DISABLE_LINK_OK: 3495 ret = ANEG_DONE; 3496 break; 3497 3498 case ANEG_STATE_ABILITY_DETECT_INIT: 3499 ap->flags &= ~(MR_TOGGLE_TX); 3500 ap->txconfig = ANEG_CFG_FD; 3501 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 3502 if (flowctrl & ADVERTISE_1000XPAUSE) 3503 ap->txconfig |= ANEG_CFG_PS1; 3504 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 3505 ap->txconfig |= ANEG_CFG_PS2; 3506 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 3507 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 3508 tw32_f(MAC_MODE, tp->mac_mode); 3509 udelay(40); 3510 3511 ap->state = ANEG_STATE_ABILITY_DETECT; 3512 break; 3513 3514 case ANEG_STATE_ABILITY_DETECT: 3515 if (ap->ability_match != 0 && ap->rxconfig != 0) 3516 ap->state = ANEG_STATE_ACK_DETECT_INIT; 3517 break; 3518 3519 case ANEG_STATE_ACK_DETECT_INIT: 3520 ap->txconfig |= ANEG_CFG_ACK; 3521 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 3522 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 3523 tw32_f(MAC_MODE, tp->mac_mode); 3524 udelay(40); 3525 3526 ap->state = ANEG_STATE_ACK_DETECT; 3527 3528 /* fallthru */ 3529 case ANEG_STATE_ACK_DETECT: 3530 if (ap->ack_match != 0) { 3531 if ((ap->rxconfig & ~ANEG_CFG_ACK) == 3532 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { 3533 ap->state = ANEG_STATE_COMPLETE_ACK_INIT; 3534 } else { 3535 ap->state = ANEG_STATE_AN_ENABLE; 3536 } 3537 } else if (ap->ability_match != 0 && 3538 ap->rxconfig == 0) { 3539 ap->state = ANEG_STATE_AN_ENABLE; 3540 } 3541 break; 3542 3543 case ANEG_STATE_COMPLETE_ACK_INIT: 3544 if (ap->rxconfig & ANEG_CFG_INVAL) { 3545 ret = ANEG_FAILED; 3546 break; 3547 } 3548 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | 3549 MR_LP_ADV_HALF_DUPLEX | 3550 MR_LP_ADV_SYM_PAUSE | 3551 MR_LP_ADV_ASYM_PAUSE | 3552 MR_LP_ADV_REMOTE_FAULT1 | 3553 MR_LP_ADV_REMOTE_FAULT2 | 3554 MR_LP_ADV_NEXT_PAGE | 3555 MR_TOGGLE_RX | 3556 MR_NP_RX); 3557 if (ap->rxconfig & ANEG_CFG_FD) 3558 ap->flags |= MR_LP_ADV_FULL_DUPLEX; 3559 if (ap->rxconfig & ANEG_CFG_HD) 3560 ap->flags |= MR_LP_ADV_HALF_DUPLEX; 3561 if (ap->rxconfig & ANEG_CFG_PS1) 3562 ap->flags |= MR_LP_ADV_SYM_PAUSE; 3563 if (ap->rxconfig & ANEG_CFG_PS2) 3564 ap->flags |= MR_LP_ADV_ASYM_PAUSE; 3565 if (ap->rxconfig & ANEG_CFG_RF1) 3566 ap->flags |= MR_LP_ADV_REMOTE_FAULT1; 3567 if (ap->rxconfig & ANEG_CFG_RF2) 3568 ap->flags |= MR_LP_ADV_REMOTE_FAULT2; 3569 if (ap->rxconfig & ANEG_CFG_NP) 3570 ap->flags |= MR_LP_ADV_NEXT_PAGE; 3571 3572 ap->link_time = ap->cur_time; 3573 3574 ap->flags ^= (MR_TOGGLE_TX); 3575 if (ap->rxconfig & 0x0008) 3576 ap->flags |= MR_TOGGLE_RX; 3577 if (ap->rxconfig & ANEG_CFG_NP) 3578 ap->flags |= MR_NP_RX; 3579 ap->flags |= MR_PAGE_RX; 3580 3581 ap->state = ANEG_STATE_COMPLETE_ACK; 3582 ret = ANEG_TIMER_ENAB; 3583 break; 3584 3585 case ANEG_STATE_COMPLETE_ACK: 3586 if (ap->ability_match != 0 && 3587 ap->rxconfig == 0) { 3588 ap->state = ANEG_STATE_AN_ENABLE; 3589 break; 3590 } 3591 delta = ap->cur_time - ap->link_time; 3592 if (delta > ANEG_STATE_SETTLE_TIME) { 3593 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { 3594 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 3595 } else { 3596 if ((ap->txconfig & ANEG_CFG_NP) == 0 && 3597 !(ap->flags & MR_NP_RX)) { 3598 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 3599 } else { 3600 ret = ANEG_FAILED; 3601 } 3602 } 3603 } 3604 break; 3605 3606 case ANEG_STATE_IDLE_DETECT_INIT: 3607 ap->link_time = ap->cur_time; 3608 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 3609 tw32_f(MAC_MODE, tp->mac_mode); 3610 udelay(40); 3611 3612 ap->state = ANEG_STATE_IDLE_DETECT; 3613 ret = ANEG_TIMER_ENAB; 3614 break; 3615 3616 case ANEG_STATE_IDLE_DETECT: 3617 if (ap->ability_match != 0 && 3618 ap->rxconfig == 0) { 3619 ap->state = ANEG_STATE_AN_ENABLE; 3620 break; 3621 } 3622 delta = ap->cur_time - ap->link_time; 3623 if (delta > ANEG_STATE_SETTLE_TIME) { 3624 /* XXX another gem from the Broadcom driver :( */ 3625 ap->state = ANEG_STATE_LINK_OK; 3626 } 3627 break; 3628 3629 case ANEG_STATE_LINK_OK: 3630 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); 3631 ret = ANEG_DONE; 3632 break; 3633 3634 case ANEG_STATE_NEXT_PAGE_WAIT_INIT: 3635 /* ??? unimplemented */ 3636 break; 3637 3638 case ANEG_STATE_NEXT_PAGE_WAIT: 3639 /* ??? unimplemented */ 3640 break; 3641 3642 default: 3643 ret = ANEG_FAILED; 3644 break; 3645 } 3646 3647 return ret; 3648} 3649 3650static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) 3651{ 3652 int res = 0; 3653 struct tg3_fiber_aneginfo aninfo; 3654 int status = ANEG_FAILED; 3655 unsigned int tick; 3656 u32 tmp; 3657 3658 tw32_f(MAC_TX_AUTO_NEG, 0); 3659 3660 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 3661 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); 3662 udelay(40); 3663 3664 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); 3665 udelay(40); 3666 3667 memset(&aninfo, 0, sizeof(aninfo)); 3668 aninfo.flags |= MR_AN_ENABLE; 3669 aninfo.state = ANEG_STATE_UNKNOWN; 3670 aninfo.cur_time = 0; 3671 tick = 0; 3672 while (++tick < 195000) { 3673 status = tg3_fiber_aneg_smachine(tp, &aninfo); 3674 if (status == ANEG_DONE || status == ANEG_FAILED) 3675 break; 3676 3677 udelay(1); 3678 } 3679 3680 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 3681 tw32_f(MAC_MODE, tp->mac_mode); 3682 udelay(40); 3683 3684 *txflags = aninfo.txconfig; 3685 *rxflags = aninfo.flags; 3686 3687 if (status == ANEG_DONE && 3688 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | 3689 MR_LP_ADV_FULL_DUPLEX))) 3690 res = 1; 3691 3692 return res; 3693} 3694 3695static void tg3_init_bcm8002(struct tg3 *tp) 3696{ 3697 u32 mac_status = tr32(MAC_STATUS); 3698 int i; 3699 3700 /* Reset when initting first time or we have a link. */ 3701 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) && 3702 !(mac_status & MAC_STATUS_PCS_SYNCED)) 3703 return; 3704 3705 /* Set PLL lock range. */ 3706 tg3_writephy(tp, 0x16, 0x8007); 3707 3708 /* SW reset */ 3709 tg3_writephy(tp, MII_BMCR, BMCR_RESET); 3710 3711 /* Wait for reset to complete. */ 3712 /* XXX schedule_timeout() ... */ 3713 for (i = 0; i < 500; i++) 3714 udelay(10); 3715 3716 /* Config mode; select PMA/Ch 1 regs. */ 3717 tg3_writephy(tp, 0x10, 0x8411); 3718 3719 /* Enable auto-lock and comdet, select txclk for tx. */ 3720 tg3_writephy(tp, 0x11, 0x0a10); 3721 3722 tg3_writephy(tp, 0x18, 0x00a0); 3723 tg3_writephy(tp, 0x16, 0x41ff); 3724 3725 /* Assert and deassert POR. */ 3726 tg3_writephy(tp, 0x13, 0x0400); 3727 udelay(40); 3728 tg3_writephy(tp, 0x13, 0x0000); 3729 3730 tg3_writephy(tp, 0x11, 0x0a50); 3731 udelay(40); 3732 tg3_writephy(tp, 0x11, 0x0a10); 3733 3734 /* Wait for signal to stabilize */ 3735 /* XXX schedule_timeout() ... */ 3736 for (i = 0; i < 15000; i++) 3737 udelay(10); 3738 3739 /* Deselect the channel register so we can read the PHYID 3740 * later. 3741 */ 3742 tg3_writephy(tp, 0x10, 0x8011); 3743} 3744 3745static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) 3746{ 3747 u16 flowctrl; 3748 u32 sg_dig_ctrl, sg_dig_status; 3749 u32 serdes_cfg, expected_sg_dig_ctrl; 3750 int workaround, port_a; 3751 int current_link_up; 3752 3753 serdes_cfg = 0; 3754 expected_sg_dig_ctrl = 0; 3755 workaround = 0; 3756 port_a = 1; 3757 current_link_up = 0; 3758 3759 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 && 3760 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) { 3761 workaround = 1; 3762 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 3763 port_a = 0; 3764 3765 /* preserve bits 0-11,13,14 for signal pre-emphasis */ 3766 /* preserve bits 20-23 for voltage regulator */ 3767 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; 3768 } 3769 3770 sg_dig_ctrl = tr32(SG_DIG_CTRL); 3771 3772 if (tp->link_config.autoneg != AUTONEG_ENABLE) { 3773 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { 3774 if (workaround) { 3775 u32 val = serdes_cfg; 3776 3777 if (port_a) 3778 val |= 0xc010000; 3779 else 3780 val |= 0x4010000; 3781 tw32_f(MAC_SERDES_CFG, val); 3782 } 3783 3784 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 3785 } 3786 if (mac_status & MAC_STATUS_PCS_SYNCED) { 3787 tg3_setup_flow_control(tp, 0, 0); 3788 current_link_up = 1; 3789 } 3790 goto out; 3791 } 3792 3793 /* Want auto-negotiation. */ 3794 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; 3795 3796 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 3797 if (flowctrl & ADVERTISE_1000XPAUSE) 3798 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; 3799 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 3800 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; 3801 3802 if (sg_dig_ctrl != expected_sg_dig_ctrl) { 3803 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) && 3804 tp->serdes_counter && 3805 ((mac_status & (MAC_STATUS_PCS_SYNCED | 3806 MAC_STATUS_RCVD_CFG)) == 3807 MAC_STATUS_PCS_SYNCED)) { 3808 tp->serdes_counter--; 3809 current_link_up = 1; 3810 goto out; 3811 } 3812restart_autoneg: 3813 if (workaround) 3814 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); 3815 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); 3816 udelay(5); 3817 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); 3818 3819 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 3820 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 3821 } else if (mac_status & (MAC_STATUS_PCS_SYNCED | 3822 MAC_STATUS_SIGNAL_DET)) { 3823 sg_dig_status = tr32(SG_DIG_STATUS); 3824 mac_status = tr32(MAC_STATUS); 3825 3826 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && 3827 (mac_status & MAC_STATUS_PCS_SYNCED)) { 3828 u32 local_adv = 0, remote_adv = 0; 3829 3830 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) 3831 local_adv |= ADVERTISE_1000XPAUSE; 3832 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) 3833 local_adv |= ADVERTISE_1000XPSE_ASYM; 3834 3835 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) 3836 remote_adv |= LPA_1000XPAUSE; 3837 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) 3838 remote_adv |= LPA_1000XPAUSE_ASYM; 3839 3840 tg3_setup_flow_control(tp, local_adv, remote_adv); 3841 current_link_up = 1; 3842 tp->serdes_counter = 0; 3843 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 3844 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { 3845 if (tp->serdes_counter) 3846 tp->serdes_counter--; 3847 else { 3848 if (workaround) { 3849 u32 val = serdes_cfg; 3850 3851 if (port_a) 3852 val |= 0xc010000; 3853 else 3854 val |= 0x4010000; 3855 3856 tw32_f(MAC_SERDES_CFG, val); 3857 } 3858 3859 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 3860 udelay(40); 3861 3862 /* Link parallel detection - link is up */ 3863 /* only if we have PCS_SYNC and not */ 3864 /* receiving config code words */ 3865 mac_status = tr32(MAC_STATUS); 3866 if ((mac_status & MAC_STATUS_PCS_SYNCED) && 3867 !(mac_status & MAC_STATUS_RCVD_CFG)) { 3868 tg3_setup_flow_control(tp, 0, 0); 3869 current_link_up = 1; 3870 tp->tg3_flags2 |= 3871 TG3_FLG2_PARALLEL_DETECT; 3872 tp->serdes_counter = 3873 SERDES_PARALLEL_DET_TIMEOUT; 3874 } else 3875 goto restart_autoneg; 3876 } 3877 } 3878 } else { 3879 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 3880 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 3881 } 3882 3883out: 3884 return current_link_up; 3885} 3886 3887static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) 3888{ 3889 int current_link_up = 0; 3890 3891 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) 3892 goto out; 3893 3894 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 3895 u32 txflags, rxflags; 3896 int i; 3897 3898 if (fiber_autoneg(tp, &txflags, &rxflags)) { 3899 u32 local_adv = 0, remote_adv = 0; 3900 3901 if (txflags & ANEG_CFG_PS1) 3902 local_adv |= ADVERTISE_1000XPAUSE; 3903 if (txflags & ANEG_CFG_PS2) 3904 local_adv |= ADVERTISE_1000XPSE_ASYM; 3905 3906 if (rxflags & MR_LP_ADV_SYM_PAUSE) 3907 remote_adv |= LPA_1000XPAUSE; 3908 if (rxflags & MR_LP_ADV_ASYM_PAUSE) 3909 remote_adv |= LPA_1000XPAUSE_ASYM; 3910 3911 tg3_setup_flow_control(tp, local_adv, remote_adv); 3912 3913 current_link_up = 1; 3914 } 3915 for (i = 0; i < 30; i++) { 3916 udelay(20); 3917 tw32_f(MAC_STATUS, 3918 (MAC_STATUS_SYNC_CHANGED | 3919 MAC_STATUS_CFG_CHANGED)); 3920 udelay(40); 3921 if ((tr32(MAC_STATUS) & 3922 (MAC_STATUS_SYNC_CHANGED | 3923 MAC_STATUS_CFG_CHANGED)) == 0) 3924 break; 3925 } 3926 3927 mac_status = tr32(MAC_STATUS); 3928 if (current_link_up == 0 && 3929 (mac_status & MAC_STATUS_PCS_SYNCED) && 3930 !(mac_status & MAC_STATUS_RCVD_CFG)) 3931 current_link_up = 1; 3932 } else { 3933 tg3_setup_flow_control(tp, 0, 0); 3934 3935 /* Forcing 1000FD link up. */ 3936 current_link_up = 1; 3937 3938 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 3939 udelay(40); 3940 3941 tw32_f(MAC_MODE, tp->mac_mode); 3942 udelay(40); 3943 } 3944 3945out: 3946 return current_link_up; 3947} 3948 3949static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) 3950{ 3951 u32 orig_pause_cfg; 3952 u16 orig_active_speed; 3953 u8 orig_active_duplex; 3954 u32 mac_status; 3955 int current_link_up; 3956 int i; 3957 3958 orig_pause_cfg = tp->link_config.active_flowctrl; 3959 orig_active_speed = tp->link_config.active_speed; 3960 orig_active_duplex = tp->link_config.active_duplex; 3961 3962 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) && 3963 netif_carrier_ok(tp->dev) && 3964 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) { 3965 mac_status = tr32(MAC_STATUS); 3966 mac_status &= (MAC_STATUS_PCS_SYNCED | 3967 MAC_STATUS_SIGNAL_DET | 3968 MAC_STATUS_CFG_CHANGED | 3969 MAC_STATUS_RCVD_CFG); 3970 if (mac_status == (MAC_STATUS_PCS_SYNCED | 3971 MAC_STATUS_SIGNAL_DET)) { 3972 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 3973 MAC_STATUS_CFG_CHANGED)); 3974 return 0; 3975 } 3976 } 3977 3978 tw32_f(MAC_TX_AUTO_NEG, 0); 3979 3980 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 3981 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; 3982 tw32_f(MAC_MODE, tp->mac_mode); 3983 udelay(40); 3984 3985 if (tp->phy_id == TG3_PHY_ID_BCM8002) 3986 tg3_init_bcm8002(tp); 3987 3988 /* Enable link change event even when serdes polling. */ 3989 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 3990 udelay(40); 3991 3992 current_link_up = 0; 3993 mac_status = tr32(MAC_STATUS); 3994 3995 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) 3996 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); 3997 else 3998 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 3999 4000 tp->napi[0].hw_status->status = 4001 (SD_STATUS_UPDATED | 4002 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); 4003 4004 for (i = 0; i < 100; i++) { 4005 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 4006 MAC_STATUS_CFG_CHANGED)); 4007 udelay(5); 4008 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | 4009 MAC_STATUS_CFG_CHANGED | 4010 MAC_STATUS_LNKSTATE_CHANGED)) == 0) 4011 break; 4012 } 4013 4014 mac_status = tr32(MAC_STATUS); 4015 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { 4016 current_link_up = 0; 4017 if (tp->link_config.autoneg == AUTONEG_ENABLE && 4018 tp->serdes_counter == 0) { 4019 tw32_f(MAC_MODE, (tp->mac_mode | 4020 MAC_MODE_SEND_CONFIGS)); 4021 udelay(1); 4022 tw32_f(MAC_MODE, tp->mac_mode); 4023 } 4024 } 4025 4026 if (current_link_up == 1) { 4027 tp->link_config.active_speed = SPEED_1000; 4028 tp->link_config.active_duplex = DUPLEX_FULL; 4029 tw32(MAC_LED_CTRL, (tp->led_ctrl | 4030 LED_CTRL_LNKLED_OVERRIDE | 4031 LED_CTRL_1000MBPS_ON)); 4032 } else { 4033 tp->link_config.active_speed = SPEED_INVALID; 4034 tp->link_config.active_duplex = DUPLEX_INVALID; 4035 tw32(MAC_LED_CTRL, (tp->led_ctrl | 4036 LED_CTRL_LNKLED_OVERRIDE | 4037 LED_CTRL_TRAFFIC_OVERRIDE)); 4038 } 4039 4040 if (current_link_up != netif_carrier_ok(tp->dev)) { 4041 if (current_link_up) 4042 netif_carrier_on(tp->dev); 4043 else 4044 netif_carrier_off(tp->dev); 4045 tg3_link_report(tp); 4046 } else { 4047 u32 now_pause_cfg = tp->link_config.active_flowctrl; 4048 if (orig_pause_cfg != now_pause_cfg || 4049 orig_active_speed != tp->link_config.active_speed || 4050 orig_active_duplex != tp->link_config.active_duplex) 4051 tg3_link_report(tp); 4052 } 4053 4054 return 0; 4055} 4056 4057static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) 4058{ 4059 int current_link_up, err = 0; 4060 u32 bmsr, bmcr; 4061 u16 current_speed; 4062 u8 current_duplex; 4063 u32 local_adv, remote_adv; 4064 4065 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 4066 tw32_f(MAC_MODE, tp->mac_mode); 4067 udelay(40); 4068 4069 tw32(MAC_EVENT, 0); 4070 4071 tw32_f(MAC_STATUS, 4072 (MAC_STATUS_SYNC_CHANGED | 4073 MAC_STATUS_CFG_CHANGED | 4074 MAC_STATUS_MI_COMPLETION | 4075 MAC_STATUS_LNKSTATE_CHANGED)); 4076 udelay(40); 4077 4078 if (force_reset) 4079 tg3_phy_reset(tp); 4080 4081 current_link_up = 0; 4082 current_speed = SPEED_INVALID; 4083 current_duplex = DUPLEX_INVALID; 4084 4085 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 4086 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 4087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { 4088 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 4089 bmsr |= BMSR_LSTATUS; 4090 else 4091 bmsr &= ~BMSR_LSTATUS; 4092 } 4093 4094 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 4095 4096 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && 4097 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { 4098 /* do nothing, just check for link up at the end */ 4099 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { 4100 u32 adv, new_adv; 4101 4102 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 4103 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | 4104 ADVERTISE_1000XPAUSE | 4105 ADVERTISE_1000XPSE_ASYM | 4106 ADVERTISE_SLCT); 4107 4108 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 4109 4110 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) 4111 new_adv |= ADVERTISE_1000XHALF; 4112 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) 4113 new_adv |= ADVERTISE_1000XFULL; 4114 4115 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { 4116 tg3_writephy(tp, MII_ADVERTISE, new_adv); 4117 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; 4118 tg3_writephy(tp, MII_BMCR, bmcr); 4119 4120 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 4121 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; 4122 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 4123 4124 return err; 4125 } 4126 } else { 4127 u32 new_bmcr; 4128 4129 bmcr &= ~BMCR_SPEED1000; 4130 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); 4131 4132 if (tp->link_config.duplex == DUPLEX_FULL) 4133 new_bmcr |= BMCR_FULLDPLX; 4134 4135 if (new_bmcr != bmcr) { 4136 /* BMCR_SPEED1000 is a reserved bit that needs 4137 * to be set on write. 4138 */ 4139 new_bmcr |= BMCR_SPEED1000; 4140 4141 /* Force a linkdown */ 4142 if (netif_carrier_ok(tp->dev)) { 4143 u32 adv; 4144 4145 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 4146 adv &= ~(ADVERTISE_1000XFULL | 4147 ADVERTISE_1000XHALF | 4148 ADVERTISE_SLCT); 4149 tg3_writephy(tp, MII_ADVERTISE, adv); 4150 tg3_writephy(tp, MII_BMCR, bmcr | 4151 BMCR_ANRESTART | 4152 BMCR_ANENABLE); 4153 udelay(10); 4154 netif_carrier_off(tp->dev); 4155 } 4156 tg3_writephy(tp, MII_BMCR, new_bmcr); 4157 bmcr = new_bmcr; 4158 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 4159 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 4160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == 4161 ASIC_REV_5714) { 4162 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 4163 bmsr |= BMSR_LSTATUS; 4164 else 4165 bmsr &= ~BMSR_LSTATUS; 4166 } 4167 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 4168 } 4169 } 4170 4171 if (bmsr & BMSR_LSTATUS) { 4172 current_speed = SPEED_1000; 4173 current_link_up = 1; 4174 if (bmcr & BMCR_FULLDPLX) 4175 current_duplex = DUPLEX_FULL; 4176 else 4177 current_duplex = DUPLEX_HALF; 4178 4179 local_adv = 0; 4180 remote_adv = 0; 4181 4182 if (bmcr & BMCR_ANENABLE) { 4183 u32 common; 4184 4185 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); 4186 err |= tg3_readphy(tp, MII_LPA, &remote_adv); 4187 common = local_adv & remote_adv; 4188 if (common & (ADVERTISE_1000XHALF | 4189 ADVERTISE_1000XFULL)) { 4190 if (common & ADVERTISE_1000XFULL) 4191 current_duplex = DUPLEX_FULL; 4192 else 4193 current_duplex = DUPLEX_HALF; 4194 } else { 4195 current_link_up = 0; 4196 } 4197 } 4198 } 4199 4200 if (current_link_up == 1 && current_duplex == DUPLEX_FULL) 4201 tg3_setup_flow_control(tp, local_adv, remote_adv); 4202 4203 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 4204 if (tp->link_config.active_duplex == DUPLEX_HALF) 4205 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 4206 4207 tw32_f(MAC_MODE, tp->mac_mode); 4208 udelay(40); 4209 4210 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 4211 4212 tp->link_config.active_speed = current_speed; 4213 tp->link_config.active_duplex = current_duplex; 4214 4215 if (current_link_up != netif_carrier_ok(tp->dev)) { 4216 if (current_link_up) 4217 netif_carrier_on(tp->dev); 4218 else { 4219 netif_carrier_off(tp->dev); 4220 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 4221 } 4222 tg3_link_report(tp); 4223 } 4224 return err; 4225} 4226 4227static void tg3_serdes_parallel_detect(struct tg3 *tp) 4228{ 4229 if (tp->serdes_counter) { 4230 /* Give autoneg time to complete. */ 4231 tp->serdes_counter--; 4232 return; 4233 } 4234 4235 if (!netif_carrier_ok(tp->dev) && 4236 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 4237 u32 bmcr; 4238 4239 tg3_readphy(tp, MII_BMCR, &bmcr); 4240 if (bmcr & BMCR_ANENABLE) { 4241 u32 phy1, phy2; 4242 4243 /* Select shadow register 0x1f */ 4244 tg3_writephy(tp, 0x1c, 0x7c00); 4245 tg3_readphy(tp, 0x1c, &phy1); 4246 4247 /* Select expansion interrupt status register */ 4248 tg3_writephy(tp, 0x17, 0x0f01); 4249 tg3_readphy(tp, 0x15, &phy2); 4250 tg3_readphy(tp, 0x15, &phy2); 4251 4252 if ((phy1 & 0x10) && !(phy2 & 0x20)) { 4253 /* We have signal detect and not receiving 4254 * config code words, link is up by parallel 4255 * detection. 4256 */ 4257 4258 bmcr &= ~BMCR_ANENABLE; 4259 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; 4260 tg3_writephy(tp, MII_BMCR, bmcr); 4261 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT; 4262 } 4263 } 4264 } else if (netif_carrier_ok(tp->dev) && 4265 (tp->link_config.autoneg == AUTONEG_ENABLE) && 4266 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { 4267 u32 phy2; 4268 4269 /* Select expansion interrupt status register */ 4270 tg3_writephy(tp, 0x17, 0x0f01); 4271 tg3_readphy(tp, 0x15, &phy2); 4272 if (phy2 & 0x20) { 4273 u32 bmcr; 4274 4275 /* Config code words received, turn on autoneg. */ 4276 tg3_readphy(tp, MII_BMCR, &bmcr); 4277 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); 4278 4279 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 4280 4281 } 4282 } 4283} 4284 4285static int tg3_setup_phy(struct tg3 *tp, int force_reset) 4286{ 4287 int err; 4288 4289 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 4290 err = tg3_setup_fiber_phy(tp, force_reset); 4291 else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 4292 err = tg3_setup_fiber_mii_phy(tp, force_reset); 4293 else 4294 err = tg3_setup_copper_phy(tp, force_reset); 4295 4296 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { 4297 u32 val, scale; 4298 4299 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 4300 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) 4301 scale = 65; 4302 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) 4303 scale = 6; 4304 else 4305 scale = 12; 4306 4307 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; 4308 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); 4309 tw32(GRC_MISC_CFG, val); 4310 } 4311 4312 if (tp->link_config.active_speed == SPEED_1000 && 4313 tp->link_config.active_duplex == DUPLEX_HALF) 4314 tw32(MAC_TX_LENGTHS, 4315 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 4316 (6 << TX_LENGTHS_IPG_SHIFT) | 4317 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); 4318 else 4319 tw32(MAC_TX_LENGTHS, 4320 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 4321 (6 << TX_LENGTHS_IPG_SHIFT) | 4322 (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); 4323 4324 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 4325 if (netif_carrier_ok(tp->dev)) { 4326 tw32(HOSTCC_STAT_COAL_TICKS, 4327 tp->coal.stats_block_coalesce_usecs); 4328 } else { 4329 tw32(HOSTCC_STAT_COAL_TICKS, 0); 4330 } 4331 } 4332 4333 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) { 4334 u32 val = tr32(PCIE_PWR_MGMT_THRESH); 4335 if (!netif_carrier_ok(tp->dev)) 4336 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 4337 tp->pwrmgmt_thresh; 4338 else 4339 val |= PCIE_PWR_MGMT_L1_THRESH_MSK; 4340 tw32(PCIE_PWR_MGMT_THRESH, val); 4341 } 4342 4343 return err; 4344} 4345 4346/* This is called whenever we suspect that the system chipset is re- 4347 * ordering the sequence of MMIO to the tx send mailbox. The symptom 4348 * is bogus tx completions. We try to recover by setting the 4349 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later 4350 * in the workqueue. 4351 */ 4352static void tg3_tx_recover(struct tg3 *tp) 4353{ 4354 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || 4355 tp->write32_tx_mbox == tg3_write_indirect_mbox); 4356 4357 netdev_warn(tp->dev, 4358 "The system may be re-ordering memory-mapped I/O " 4359 "cycles to the network device, attempting to recover. " 4360 "Please report the problem to the driver maintainer " 4361 "and include system chipset information.\n"); 4362 4363 spin_lock(&tp->lock); 4364 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; 4365 spin_unlock(&tp->lock); 4366} 4367 4368static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) 4369{ 4370 smp_mb(); 4371 return tnapi->tx_pending - 4372 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); 4373} 4374 4375/* Tigon3 never reports partial packet sends. So we do not 4376 * need special logic to handle SKBs that have not had all 4377 * of their frags sent yet, like SunGEM does. 4378 */ 4379static void tg3_tx(struct tg3_napi *tnapi) 4380{ 4381 struct tg3 *tp = tnapi->tp; 4382 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; 4383 u32 sw_idx = tnapi->tx_cons; 4384 struct netdev_queue *txq; 4385 int index = tnapi - tp->napi; 4386 4387 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 4388 index--; 4389 4390 txq = netdev_get_tx_queue(tp->dev, index); 4391 4392 while (sw_idx != hw_idx) { 4393 struct ring_info *ri = &tnapi->tx_buffers[sw_idx]; 4394 struct sk_buff *skb = ri->skb; 4395 int i, tx_bug = 0; 4396 4397 if (unlikely(skb == NULL)) { 4398 tg3_tx_recover(tp); 4399 return; 4400 } 4401 4402 pci_unmap_single(tp->pdev, 4403 dma_unmap_addr(ri, mapping), 4404 skb_headlen(skb), 4405 PCI_DMA_TODEVICE); 4406 4407 ri->skb = NULL; 4408 4409 sw_idx = NEXT_TX(sw_idx); 4410 4411 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 4412 ri = &tnapi->tx_buffers[sw_idx]; 4413 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 4414 tx_bug = 1; 4415 4416 pci_unmap_page(tp->pdev, 4417 dma_unmap_addr(ri, mapping), 4418 skb_shinfo(skb)->frags[i].size, 4419 PCI_DMA_TODEVICE); 4420 sw_idx = NEXT_TX(sw_idx); 4421 } 4422 4423 dev_kfree_skb(skb); 4424 4425 if (unlikely(tx_bug)) { 4426 tg3_tx_recover(tp); 4427 return; 4428 } 4429 } 4430 4431 tnapi->tx_cons = sw_idx; 4432 4433 /* Need to make the tx_cons update visible to tg3_start_xmit() 4434 * before checking for netif_queue_stopped(). Without the 4435 * memory barrier, there is a small possibility that tg3_start_xmit() 4436 * will miss it and cause the queue to be stopped forever. 4437 */ 4438 smp_mb(); 4439 4440 if (unlikely(netif_tx_queue_stopped(txq) && 4441 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { 4442 __netif_tx_lock(txq, smp_processor_id()); 4443 if (netif_tx_queue_stopped(txq) && 4444 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) 4445 netif_tx_wake_queue(txq); 4446 __netif_tx_unlock(txq); 4447 } 4448} 4449 4450static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) 4451{ 4452 if (!ri->skb) 4453 return; 4454 4455 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), 4456 map_sz, PCI_DMA_FROMDEVICE); 4457 dev_kfree_skb_any(ri->skb); 4458 ri->skb = NULL; 4459} 4460 4461/* Returns size of skb allocated or < 0 on error. 4462 * 4463 * We only need to fill in the address because the other members 4464 * of the RX descriptor are invariant, see tg3_init_rings. 4465 * 4466 * Note the purposeful assymetry of cpu vs. chip accesses. For 4467 * posting buffers we only dirty the first cache line of the RX 4468 * descriptor (containing the address). Whereas for the RX status 4469 * buffers the cpu only reads the last cacheline of the RX descriptor 4470 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 4471 */ 4472static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, 4473 u32 opaque_key, u32 dest_idx_unmasked) 4474{ 4475 struct tg3_rx_buffer_desc *desc; 4476 struct ring_info *map, *src_map; 4477 struct sk_buff *skb; 4478 dma_addr_t mapping; 4479 int skb_size, dest_idx; 4480 4481 src_map = NULL; 4482 switch (opaque_key) { 4483 case RXD_OPAQUE_RING_STD: 4484 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4485 desc = &tpr->rx_std[dest_idx]; 4486 map = &tpr->rx_std_buffers[dest_idx]; 4487 skb_size = tp->rx_pkt_map_sz; 4488 break; 4489 4490 case RXD_OPAQUE_RING_JUMBO: 4491 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4492 desc = &tpr->rx_jmb[dest_idx].std; 4493 map = &tpr->rx_jmb_buffers[dest_idx]; 4494 skb_size = TG3_RX_JMB_MAP_SZ; 4495 break; 4496 4497 default: 4498 return -EINVAL; 4499 } 4500 4501 /* Do not overwrite any of the map or rp information 4502 * until we are sure we can commit to a new buffer. 4503 * 4504 * Callers depend upon this behavior and assume that 4505 * we leave everything unchanged if we fail. 4506 */ 4507 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset); 4508 if (skb == NULL) 4509 return -ENOMEM; 4510 4511 skb_reserve(skb, tp->rx_offset); 4512 4513 mapping = pci_map_single(tp->pdev, skb->data, skb_size, 4514 PCI_DMA_FROMDEVICE); 4515 if (pci_dma_mapping_error(tp->pdev, mapping)) { 4516 dev_kfree_skb(skb); 4517 return -EIO; 4518 } 4519 4520 map->skb = skb; 4521 dma_unmap_addr_set(map, mapping, mapping); 4522 4523 desc->addr_hi = ((u64)mapping >> 32); 4524 desc->addr_lo = ((u64)mapping & 0xffffffff); 4525 4526 return skb_size; 4527} 4528 4529/* We only need to move over in the address because the other 4530 * members of the RX descriptor are invariant. See notes above 4531 * tg3_alloc_rx_skb for full details. 4532 */ 4533static void tg3_recycle_rx(struct tg3_napi *tnapi, 4534 struct tg3_rx_prodring_set *dpr, 4535 u32 opaque_key, int src_idx, 4536 u32 dest_idx_unmasked) 4537{ 4538 struct tg3 *tp = tnapi->tp; 4539 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 4540 struct ring_info *src_map, *dest_map; 4541 struct tg3_rx_prodring_set *spr = &tp->prodring[0]; 4542 int dest_idx; 4543 4544 switch (opaque_key) { 4545 case RXD_OPAQUE_RING_STD: 4546 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4547 dest_desc = &dpr->rx_std[dest_idx]; 4548 dest_map = &dpr->rx_std_buffers[dest_idx]; 4549 src_desc = &spr->rx_std[src_idx]; 4550 src_map = &spr->rx_std_buffers[src_idx]; 4551 break; 4552 4553 case RXD_OPAQUE_RING_JUMBO: 4554 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4555 dest_desc = &dpr->rx_jmb[dest_idx].std; 4556 dest_map = &dpr->rx_jmb_buffers[dest_idx]; 4557 src_desc = &spr->rx_jmb[src_idx].std; 4558 src_map = &spr->rx_jmb_buffers[src_idx]; 4559 break; 4560 4561 default: 4562 return; 4563 } 4564 4565 dest_map->skb = src_map->skb; 4566 dma_unmap_addr_set(dest_map, mapping, 4567 dma_unmap_addr(src_map, mapping)); 4568 dest_desc->addr_hi = src_desc->addr_hi; 4569 dest_desc->addr_lo = src_desc->addr_lo; 4570 4571 /* Ensure that the update to the skb happens after the physical 4572 * addresses have been transferred to the new BD location. 4573 */ 4574 smp_wmb(); 4575 4576 src_map->skb = NULL; 4577} 4578 4579/* The RX ring scheme is composed of multiple rings which post fresh 4580 * buffers to the chip, and one special ring the chip uses to report 4581 * status back to the host. 4582 * 4583 * The special ring reports the status of received packets to the 4584 * host. The chip does not write into the original descriptor the 4585 * RX buffer was obtained from. The chip simply takes the original 4586 * descriptor as provided by the host, updates the status and length 4587 * field, then writes this into the next status ring entry. 4588 * 4589 * Each ring the host uses to post buffers to the chip is described 4590 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, 4591 * it is first placed into the on-chip ram. When the packet's length 4592 * is known, it walks down the TG3_BDINFO entries to select the ring. 4593 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO 4594 * which is within the range of the new packet's length is chosen. 4595 * 4596 * The "separate ring for rx status" scheme may sound queer, but it makes 4597 * sense from a cache coherency perspective. If only the host writes 4598 * to the buffer post rings, and only the chip writes to the rx status 4599 * rings, then cache lines never move beyond shared-modified state. 4600 * If both the host and chip were to write into the same ring, cache line 4601 * eviction could occur since both entities want it in an exclusive state. 4602 */ 4603static int tg3_rx(struct tg3_napi *tnapi, int budget) 4604{ 4605 struct tg3 *tp = tnapi->tp; 4606 u32 work_mask, rx_std_posted = 0; 4607 u32 std_prod_idx, jmb_prod_idx; 4608 u32 sw_idx = tnapi->rx_rcb_ptr; 4609 u16 hw_idx; 4610 int received; 4611 struct tg3_rx_prodring_set *tpr = tnapi->prodring; 4612 4613 hw_idx = *(tnapi->rx_rcb_prod_idx); 4614 /* 4615 * We need to order the read of hw_idx and the read of 4616 * the opaque cookie. 4617 */ 4618 rmb(); 4619 work_mask = 0; 4620 received = 0; 4621 std_prod_idx = tpr->rx_std_prod_idx; 4622 jmb_prod_idx = tpr->rx_jmb_prod_idx; 4623 while (sw_idx != hw_idx && budget > 0) { 4624 struct ring_info *ri; 4625 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 4626 unsigned int len; 4627 struct sk_buff *skb; 4628 dma_addr_t dma_addr; 4629 u32 opaque_key, desc_idx, *post_ptr; 4630 bool hw_vlan __maybe_unused = false; 4631 u16 vtag __maybe_unused = 0; 4632 4633 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4634 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4635 if (opaque_key == RXD_OPAQUE_RING_STD) { 4636 ri = &tp->prodring[0].rx_std_buffers[desc_idx]; 4637 dma_addr = dma_unmap_addr(ri, mapping); 4638 skb = ri->skb; 4639 post_ptr = &std_prod_idx; 4640 rx_std_posted++; 4641 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 4642 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; 4643 dma_addr = dma_unmap_addr(ri, mapping); 4644 skb = ri->skb; 4645 post_ptr = &jmb_prod_idx; 4646 } else 4647 goto next_pkt_nopost; 4648 4649 work_mask |= opaque_key; 4650 4651 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 4652 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { 4653 drop_it: 4654 tg3_recycle_rx(tnapi, tpr, opaque_key, 4655 desc_idx, *post_ptr); 4656 drop_it_no_recycle: 4657 /* Other statistics kept track of by card. */ 4658 tp->net_stats.rx_dropped++; 4659 goto next_pkt; 4660 } 4661 4662 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4663 ETH_FCS_LEN; 4664 4665 if (len > TG3_RX_COPY_THRESH(tp)) { 4666 int skb_size; 4667 4668 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, 4669 *post_ptr); 4670 if (skb_size < 0) 4671 goto drop_it; 4672 4673 pci_unmap_single(tp->pdev, dma_addr, skb_size, 4674 PCI_DMA_FROMDEVICE); 4675 4676 /* Ensure that the update to the skb happens 4677 * after the usage of the old DMA mapping. 4678 */ 4679 smp_wmb(); 4680 4681 ri->skb = NULL; 4682 4683 skb_put(skb, len); 4684 } else { 4685 struct sk_buff *copy_skb; 4686 4687 tg3_recycle_rx(tnapi, tpr, opaque_key, 4688 desc_idx, *post_ptr); 4689 4690 copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN + 4691 TG3_RAW_IP_ALIGN); 4692 if (copy_skb == NULL) 4693 goto drop_it_no_recycle; 4694 4695 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN); 4696 skb_put(copy_skb, len); 4697 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 4698 skb_copy_from_linear_data(skb, copy_skb->data, len); 4699 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 4700 4701 /* We'll reuse the original ring buffer. */ 4702 skb = copy_skb; 4703 } 4704 4705 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) && 4706 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 4707 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 4708 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 4709 skb->ip_summed = CHECKSUM_UNNECESSARY; 4710 else 4711 skb->ip_summed = CHECKSUM_NONE; 4712 4713 skb->protocol = eth_type_trans(skb, tp->dev); 4714 4715 if (len > (tp->dev->mtu + ETH_HLEN) && 4716 skb->protocol != htons(ETH_P_8021Q)) { 4717 dev_kfree_skb(skb); 4718 goto next_pkt; 4719 } 4720 4721 if (desc->type_flags & RXD_FLAG_VLAN && 4722 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) { 4723 vtag = desc->err_vlan & RXD_VLAN_MASK; 4724#if TG3_VLAN_TAG_USED 4725 if (tp->vlgrp) 4726 hw_vlan = true; 4727 else 4728#endif 4729 { 4730 struct vlan_ethhdr *ve = (struct vlan_ethhdr *) 4731 __skb_push(skb, VLAN_HLEN); 4732 4733 memmove(ve, skb->data + VLAN_HLEN, 4734 ETH_ALEN * 2); 4735 ve->h_vlan_proto = htons(ETH_P_8021Q); 4736 ve->h_vlan_TCI = htons(vtag); 4737 } 4738 } 4739 4740#if TG3_VLAN_TAG_USED 4741 if (hw_vlan) 4742 vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb); 4743 else 4744#endif 4745 napi_gro_receive(&tnapi->napi, skb); 4746 4747 received++; 4748 budget--; 4749 4750next_pkt: 4751 (*post_ptr)++; 4752 4753 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 4754 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; 4755 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 4756 tpr->rx_std_prod_idx); 4757 work_mask &= ~RXD_OPAQUE_RING_STD; 4758 rx_std_posted = 0; 4759 } 4760next_pkt_nopost: 4761 sw_idx++; 4762 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1); 4763 4764 /* Refresh hw_idx to see if there is new work */ 4765 if (sw_idx == hw_idx) { 4766 hw_idx = *(tnapi->rx_rcb_prod_idx); 4767 rmb(); 4768 } 4769 } 4770 4771 /* ACK the status ring. */ 4772 tnapi->rx_rcb_ptr = sw_idx; 4773 tw32_rx_mbox(tnapi->consmbox, sw_idx); 4774 4775 /* Refill RX ring(s). */ 4776 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) { 4777 if (work_mask & RXD_OPAQUE_RING_STD) { 4778 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; 4779 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 4780 tpr->rx_std_prod_idx); 4781 } 4782 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 4783 tpr->rx_jmb_prod_idx = jmb_prod_idx % 4784 TG3_RX_JUMBO_RING_SIZE; 4785 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 4786 tpr->rx_jmb_prod_idx); 4787 } 4788 mmiowb(); 4789 } else if (work_mask) { 4790 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be 4791 * updated before the producer indices can be updated. 4792 */ 4793 smp_wmb(); 4794 4795 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; 4796 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE; 4797 4798 if (tnapi != &tp->napi[1]) 4799 napi_schedule(&tp->napi[1].napi); 4800 } 4801 4802 return received; 4803} 4804 4805static void tg3_poll_link(struct tg3 *tp) 4806{ 4807 /* handle link change and other phy events */ 4808 if (!(tp->tg3_flags & 4809 (TG3_FLAG_USE_LINKCHG_REG | 4810 TG3_FLAG_POLL_SERDES))) { 4811 struct tg3_hw_status *sblk = tp->napi[0].hw_status; 4812 4813 if (sblk->status & SD_STATUS_LINK_CHG) { 4814 sblk->status = SD_STATUS_UPDATED | 4815 (sblk->status & ~SD_STATUS_LINK_CHG); 4816 spin_lock(&tp->lock); 4817 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 4818 tw32_f(MAC_STATUS, 4819 (MAC_STATUS_SYNC_CHANGED | 4820 MAC_STATUS_CFG_CHANGED | 4821 MAC_STATUS_MI_COMPLETION | 4822 MAC_STATUS_LNKSTATE_CHANGED)); 4823 udelay(40); 4824 } else 4825 tg3_setup_phy(tp, 0); 4826 spin_unlock(&tp->lock); 4827 } 4828 } 4829} 4830 4831static int tg3_rx_prodring_xfer(struct tg3 *tp, 4832 struct tg3_rx_prodring_set *dpr, 4833 struct tg3_rx_prodring_set *spr) 4834{ 4835 u32 si, di, cpycnt, src_prod_idx; 4836 int i, err = 0; 4837 4838 while (1) { 4839 src_prod_idx = spr->rx_std_prod_idx; 4840 4841 /* Make sure updates to the rx_std_buffers[] entries and the 4842 * standard producer index are seen in the correct order. 4843 */ 4844 smp_rmb(); 4845 4846 if (spr->rx_std_cons_idx == src_prod_idx) 4847 break; 4848 4849 if (spr->rx_std_cons_idx < src_prod_idx) 4850 cpycnt = src_prod_idx - spr->rx_std_cons_idx; 4851 else 4852 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx; 4853 4854 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx); 4855 4856 si = spr->rx_std_cons_idx; 4857 di = dpr->rx_std_prod_idx; 4858 4859 for (i = di; i < di + cpycnt; i++) { 4860 if (dpr->rx_std_buffers[i].skb) { 4861 cpycnt = i - di; 4862 err = -ENOSPC; 4863 break; 4864 } 4865 } 4866 4867 if (!cpycnt) 4868 break; 4869 4870 /* Ensure that updates to the rx_std_buffers ring and the 4871 * shadowed hardware producer ring from tg3_recycle_skb() are 4872 * ordered correctly WRT the skb check above. 4873 */ 4874 smp_rmb(); 4875 4876 memcpy(&dpr->rx_std_buffers[di], 4877 &spr->rx_std_buffers[si], 4878 cpycnt * sizeof(struct ring_info)); 4879 4880 for (i = 0; i < cpycnt; i++, di++, si++) { 4881 struct tg3_rx_buffer_desc *sbd, *dbd; 4882 sbd = &spr->rx_std[si]; 4883 dbd = &dpr->rx_std[di]; 4884 dbd->addr_hi = sbd->addr_hi; 4885 dbd->addr_lo = sbd->addr_lo; 4886 } 4887 4888 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) % 4889 TG3_RX_RING_SIZE; 4890 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) % 4891 TG3_RX_RING_SIZE; 4892 } 4893 4894 while (1) { 4895 src_prod_idx = spr->rx_jmb_prod_idx; 4896 4897 /* Make sure updates to the rx_jmb_buffers[] entries and 4898 * the jumbo producer index are seen in the correct order. 4899 */ 4900 smp_rmb(); 4901 4902 if (spr->rx_jmb_cons_idx == src_prod_idx) 4903 break; 4904 4905 if (spr->rx_jmb_cons_idx < src_prod_idx) 4906 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; 4907 else 4908 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx; 4909 4910 cpycnt = min(cpycnt, 4911 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx); 4912 4913 si = spr->rx_jmb_cons_idx; 4914 di = dpr->rx_jmb_prod_idx; 4915 4916 for (i = di; i < di + cpycnt; i++) { 4917 if (dpr->rx_jmb_buffers[i].skb) { 4918 cpycnt = i - di; 4919 err = -ENOSPC; 4920 break; 4921 } 4922 } 4923 4924 if (!cpycnt) 4925 break; 4926 4927 /* Ensure that updates to the rx_jmb_buffers ring and the 4928 * shadowed hardware producer ring from tg3_recycle_skb() are 4929 * ordered correctly WRT the skb check above. 4930 */ 4931 smp_rmb(); 4932 4933 memcpy(&dpr->rx_jmb_buffers[di], 4934 &spr->rx_jmb_buffers[si], 4935 cpycnt * sizeof(struct ring_info)); 4936 4937 for (i = 0; i < cpycnt; i++, di++, si++) { 4938 struct tg3_rx_buffer_desc *sbd, *dbd; 4939 sbd = &spr->rx_jmb[si].std; 4940 dbd = &dpr->rx_jmb[di].std; 4941 dbd->addr_hi = sbd->addr_hi; 4942 dbd->addr_lo = sbd->addr_lo; 4943 } 4944 4945 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) % 4946 TG3_RX_JUMBO_RING_SIZE; 4947 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) % 4948 TG3_RX_JUMBO_RING_SIZE; 4949 } 4950 4951 return err; 4952} 4953 4954static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 4955{ 4956 struct tg3 *tp = tnapi->tp; 4957 4958 /* run TX completion thread */ 4959 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 4960 tg3_tx(tnapi); 4961 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 4962 return work_done; 4963 } 4964 4965 /* run RX thread, within the bounds set by NAPI. 4966 * All RX "locking" is done by ensuring outside 4967 * code synchronizes with tg3->napi.poll() 4968 */ 4969 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 4970 work_done += tg3_rx(tnapi, budget - work_done); 4971 4972 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) { 4973 struct tg3_rx_prodring_set *dpr = &tp->prodring[0]; 4974 int i, err = 0; 4975 u32 std_prod_idx = dpr->rx_std_prod_idx; 4976 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 4977 4978 for (i = 1; i < tp->irq_cnt; i++) 4979 err |= tg3_rx_prodring_xfer(tp, dpr, 4980 tp->napi[i].prodring); 4981 4982 wmb(); 4983 4984 if (std_prod_idx != dpr->rx_std_prod_idx) 4985 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 4986 dpr->rx_std_prod_idx); 4987 4988 if (jmb_prod_idx != dpr->rx_jmb_prod_idx) 4989 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 4990 dpr->rx_jmb_prod_idx); 4991 4992 mmiowb(); 4993 4994 if (err) 4995 tw32_f(HOSTCC_MODE, tp->coal_now); 4996 } 4997 4998 return work_done; 4999} 5000 5001static int tg3_poll_msix(struct napi_struct *napi, int budget) 5002{ 5003 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 5004 struct tg3 *tp = tnapi->tp; 5005 int work_done = 0; 5006 struct tg3_hw_status *sblk = tnapi->hw_status; 5007 5008 while (1) { 5009 work_done = tg3_poll_work(tnapi, work_done, budget); 5010 5011 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 5012 goto tx_recovery; 5013 5014 if (unlikely(work_done >= budget)) 5015 break; 5016 5017 /* tp->last_tag is used in tg3_int_reenable() below 5018 * to tell the hw how much work has been processed, 5019 * so we must read it before checking for more work. 5020 */ 5021 tnapi->last_tag = sblk->status_tag; 5022 tnapi->last_irq_tag = tnapi->last_tag; 5023 rmb(); 5024 5025 /* check for RX/TX work to do */ 5026 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && 5027 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { 5028 napi_complete(napi); 5029 /* Reenable interrupts. */ 5030 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 5031 mmiowb(); 5032 break; 5033 } 5034 } 5035 5036 return work_done; 5037 5038tx_recovery: 5039 /* work_done is guaranteed to be less than budget. */ 5040 napi_complete(napi); 5041 schedule_work(&tp->reset_task); 5042 return work_done; 5043} 5044 5045static int tg3_poll(struct napi_struct *napi, int budget) 5046{ 5047 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 5048 struct tg3 *tp = tnapi->tp; 5049 int work_done = 0; 5050 struct tg3_hw_status *sblk = tnapi->hw_status; 5051 5052 while (1) { 5053 tg3_poll_link(tp); 5054 5055 work_done = tg3_poll_work(tnapi, work_done, budget); 5056 5057 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 5058 goto tx_recovery; 5059 5060 if (unlikely(work_done >= budget)) 5061 break; 5062 5063 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { 5064 /* tp->last_tag is used in tg3_int_reenable() below 5065 * to tell the hw how much work has been processed, 5066 * so we must read it before checking for more work. 5067 */ 5068 tnapi->last_tag = sblk->status_tag; 5069 tnapi->last_irq_tag = tnapi->last_tag; 5070 rmb(); 5071 } else 5072 sblk->status &= ~SD_STATUS_UPDATED; 5073 5074 if (likely(!tg3_has_work(tnapi))) { 5075 napi_complete(napi); 5076 tg3_int_reenable(tnapi); 5077 break; 5078 } 5079 } 5080 5081 return work_done; 5082 5083tx_recovery: 5084 /* work_done is guaranteed to be less than budget. */ 5085 napi_complete(napi); 5086 schedule_work(&tp->reset_task); 5087 return work_done; 5088} 5089 5090static void tg3_irq_quiesce(struct tg3 *tp) 5091{ 5092 int i; 5093 5094 BUG_ON(tp->irq_sync); 5095 5096 tp->irq_sync = 1; 5097 smp_mb(); 5098 5099 for (i = 0; i < tp->irq_cnt; i++) 5100 synchronize_irq(tp->napi[i].irq_vec); 5101} 5102 5103static inline int tg3_irq_sync(struct tg3 *tp) 5104{ 5105 return tp->irq_sync; 5106} 5107 5108/* Fully shutdown all tg3 driver activity elsewhere in the system. 5109 * If irq_sync is non-zero, then the IRQ handler must be synchronized 5110 * with as well. Most of the time, this is not necessary except when 5111 * shutting down the device. 5112 */ 5113static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 5114{ 5115 spin_lock_bh(&tp->lock); 5116 if (irq_sync) 5117 tg3_irq_quiesce(tp); 5118} 5119 5120static inline void tg3_full_unlock(struct tg3 *tp) 5121{ 5122 spin_unlock_bh(&tp->lock); 5123} 5124 5125/* One-shot MSI handler - Chip automatically disables interrupt 5126 * after sending MSI so driver doesn't have to do it. 5127 */ 5128static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) 5129{ 5130 struct tg3_napi *tnapi = dev_id; 5131 struct tg3 *tp = tnapi->tp; 5132 5133 prefetch(tnapi->hw_status); 5134 if (tnapi->rx_rcb) 5135 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 5136 5137 if (likely(!tg3_irq_sync(tp))) 5138 napi_schedule(&tnapi->napi); 5139 5140 return IRQ_HANDLED; 5141} 5142 5143/* MSI ISR - No need to check for interrupt sharing and no need to 5144 * flush status block and interrupt mailbox. PCI ordering rules 5145 * guarantee that MSI will arrive after the status block. 5146 */ 5147static irqreturn_t tg3_msi(int irq, void *dev_id) 5148{ 5149 struct tg3_napi *tnapi = dev_id; 5150 struct tg3 *tp = tnapi->tp; 5151 5152 prefetch(tnapi->hw_status); 5153 if (tnapi->rx_rcb) 5154 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 5155 /* 5156 * Writing any value to intr-mbox-0 clears PCI INTA# and 5157 * chip-internal interrupt pending events. 5158 * Writing non-zero to intr-mbox-0 additional tells the 5159 * NIC to stop sending us irqs, engaging "in-intr-handler" 5160 * event coalescing. 5161 */ 5162 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 5163 if (likely(!tg3_irq_sync(tp))) 5164 napi_schedule(&tnapi->napi); 5165 5166 return IRQ_RETVAL(1); 5167} 5168 5169static irqreturn_t tg3_interrupt(int irq, void *dev_id) 5170{ 5171 struct tg3_napi *tnapi = dev_id; 5172 struct tg3 *tp = tnapi->tp; 5173 struct tg3_hw_status *sblk = tnapi->hw_status; 5174 unsigned int handled = 1; 5175 5176 /* In INTx mode, it is possible for the interrupt to arrive at 5177 * the CPU before the status block posted prior to the interrupt. 5178 * Reading the PCI State register will confirm whether the 5179 * interrupt is ours and will flush the status block. 5180 */ 5181 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { 5182 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || 5183 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 5184 handled = 0; 5185 goto out; 5186 } 5187 } 5188 5189 /* 5190 * Writing any value to intr-mbox-0 clears PCI INTA# and 5191 * chip-internal interrupt pending events. 5192 * Writing non-zero to intr-mbox-0 additional tells the 5193 * NIC to stop sending us irqs, engaging "in-intr-handler" 5194 * event coalescing. 5195 * 5196 * Flush the mailbox to de-assert the IRQ immediately to prevent 5197 * spurious interrupts. The flush impacts performance but 5198 * excessive spurious interrupts can be worse in some cases. 5199 */ 5200 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 5201 if (tg3_irq_sync(tp)) 5202 goto out; 5203 sblk->status &= ~SD_STATUS_UPDATED; 5204 if (likely(tg3_has_work(tnapi))) { 5205 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 5206 napi_schedule(&tnapi->napi); 5207 } else { 5208 /* No work, shared interrupt perhaps? re-enable 5209 * interrupts, and flush that PCI write 5210 */ 5211 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 5212 0x00000000); 5213 } 5214out: 5215 return IRQ_RETVAL(handled); 5216} 5217 5218static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) 5219{ 5220 struct tg3_napi *tnapi = dev_id; 5221 struct tg3 *tp = tnapi->tp; 5222 struct tg3_hw_status *sblk = tnapi->hw_status; 5223 unsigned int handled = 1; 5224 5225 /* In INTx mode, it is possible for the interrupt to arrive at 5226 * the CPU before the status block posted prior to the interrupt. 5227 * Reading the PCI State register will confirm whether the 5228 * interrupt is ours and will flush the status block. 5229 */ 5230 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { 5231 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || 5232 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 5233 handled = 0; 5234 goto out; 5235 } 5236 } 5237 5238 /* 5239 * writing any value to intr-mbox-0 clears PCI INTA# and 5240 * chip-internal interrupt pending events. 5241 * writing non-zero to intr-mbox-0 additional tells the 5242 * NIC to stop sending us irqs, engaging "in-intr-handler" 5243 * event coalescing. 5244 * 5245 * Flush the mailbox to de-assert the IRQ immediately to prevent 5246 * spurious interrupts. The flush impacts performance but 5247 * excessive spurious interrupts can be worse in some cases. 5248 */ 5249 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 5250 5251 /* 5252 * In a shared interrupt configuration, sometimes other devices' 5253 * interrupts will scream. We record the current status tag here 5254 * so that the above check can report that the screaming interrupts 5255 * are unhandled. Eventually they will be silenced. 5256 */ 5257 tnapi->last_irq_tag = sblk->status_tag; 5258 5259 if (tg3_irq_sync(tp)) 5260 goto out; 5261 5262 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 5263 5264 napi_schedule(&tnapi->napi); 5265 5266out: 5267 return IRQ_RETVAL(handled); 5268} 5269 5270/* ISR for interrupt test */ 5271static irqreturn_t tg3_test_isr(int irq, void *dev_id) 5272{ 5273 struct tg3_napi *tnapi = dev_id; 5274 struct tg3 *tp = tnapi->tp; 5275 struct tg3_hw_status *sblk = tnapi->hw_status; 5276 5277 if ((sblk->status & SD_STATUS_UPDATED) || 5278 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 5279 tg3_disable_ints(tp); 5280 return IRQ_RETVAL(1); 5281 } 5282 return IRQ_RETVAL(0); 5283} 5284 5285static int tg3_init_hw(struct tg3 *, int); 5286static int tg3_halt(struct tg3 *, int, int); 5287 5288/* Restart hardware after configuration changes, self-test, etc. 5289 * Invoked with tp->lock held. 5290 */ 5291static int tg3_restart_hw(struct tg3 *tp, int reset_phy) 5292 __releases(tp->lock) 5293 __acquires(tp->lock) 5294{ 5295 int err; 5296 5297 err = tg3_init_hw(tp, reset_phy); 5298 if (err) { 5299 netdev_err(tp->dev, 5300 "Failed to re-initialize device, aborting\n"); 5301 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 5302 tg3_full_unlock(tp); 5303 del_timer_sync(&tp->timer); 5304 tp->irq_sync = 0; 5305 tg3_napi_enable(tp); 5306 dev_close(tp->dev); 5307 tg3_full_lock(tp, 0); 5308 } 5309 return err; 5310} 5311 5312#ifdef CONFIG_NET_POLL_CONTROLLER 5313static void tg3_poll_controller(struct net_device *dev) 5314{ 5315 int i; 5316 struct tg3 *tp = netdev_priv(dev); 5317 5318 for (i = 0; i < tp->irq_cnt; i++) 5319 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); 5320} 5321#endif 5322 5323static void tg3_reset_task(struct work_struct *work) 5324{ 5325 struct tg3 *tp = container_of(work, struct tg3, reset_task); 5326 int err; 5327 unsigned int restart_timer; 5328 5329 tg3_full_lock(tp, 0); 5330 5331 if (!netif_running(tp->dev)) { 5332 tg3_full_unlock(tp); 5333 return; 5334 } 5335 5336 tg3_full_unlock(tp); 5337 5338 tg3_phy_stop(tp); 5339 5340 tg3_netif_stop(tp); 5341 5342 tg3_full_lock(tp, 1); 5343 5344 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; 5345 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 5346 5347 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) { 5348 tp->write32_tx_mbox = tg3_write32_tx_mbox; 5349 tp->write32_rx_mbox = tg3_write_flush_reg32; 5350 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; 5351 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING; 5352 } 5353 5354 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 5355 err = tg3_init_hw(tp, 1); 5356 if (err) 5357 goto out; 5358 5359 tg3_netif_start(tp); 5360 5361 if (restart_timer) 5362 mod_timer(&tp->timer, jiffies + 1); 5363 5364out: 5365 tg3_full_unlock(tp); 5366 5367 if (!err) 5368 tg3_phy_start(tp); 5369} 5370 5371static void tg3_dump_short_state(struct tg3 *tp) 5372{ 5373 netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n", 5374 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS)); 5375 netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n", 5376 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS)); 5377} 5378 5379static void tg3_tx_timeout(struct net_device *dev) 5380{ 5381 struct tg3 *tp = netdev_priv(dev); 5382 5383 if (netif_msg_tx_err(tp)) { 5384 netdev_err(dev, "transmit timed out, resetting\n"); 5385 tg3_dump_short_state(tp); 5386 } 5387 5388 schedule_work(&tp->reset_task); 5389} 5390 5391/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 5392static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) 5393{ 5394 u32 base = (u32) mapping & 0xffffffff; 5395 5396 return ((base > 0xffffdcc0) && 5397 (base + len + 8 < base)); 5398} 5399 5400/* Test for DMA addresses > 40-bit */ 5401static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, 5402 int len) 5403{ 5404#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 5405 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) 5406 return (((u64) mapping + len) > DMA_BIT_MASK(40)); 5407 return 0; 5408#else 5409 return 0; 5410#endif 5411} 5412 5413static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); 5414 5415/* Workaround 4GB and 40-bit hardware DMA bugs. */ 5416static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 5417 struct sk_buff *skb, u32 last_plus_one, 5418 u32 *start, u32 base_flags, u32 mss) 5419{ 5420 struct tg3 *tp = tnapi->tp; 5421 struct sk_buff *new_skb; 5422 dma_addr_t new_addr = 0; 5423 u32 entry = *start; 5424 int i, ret = 0; 5425 5426 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 5427 new_skb = skb_copy(skb, GFP_ATOMIC); 5428 else { 5429 int more_headroom = 4 - ((unsigned long)skb->data & 3); 5430 5431 new_skb = skb_copy_expand(skb, 5432 skb_headroom(skb) + more_headroom, 5433 skb_tailroom(skb), GFP_ATOMIC); 5434 } 5435 5436 if (!new_skb) { 5437 ret = -1; 5438 } else { 5439 /* New SKB is guaranteed to be linear. */ 5440 entry = *start; 5441 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, 5442 PCI_DMA_TODEVICE); 5443 /* Make sure the mapping succeeded */ 5444 if (pci_dma_mapping_error(tp->pdev, new_addr)) { 5445 ret = -1; 5446 dev_kfree_skb(new_skb); 5447 new_skb = NULL; 5448 5449 /* Make sure new skb does not cross any 4G boundaries. 5450 * Drop the packet if it does. 5451 */ 5452 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && 5453 tg3_4g_overflow_test(new_addr, new_skb->len)) { 5454 pci_unmap_single(tp->pdev, new_addr, new_skb->len, 5455 PCI_DMA_TODEVICE); 5456 ret = -1; 5457 dev_kfree_skb(new_skb); 5458 new_skb = NULL; 5459 } else { 5460 tg3_set_txd(tnapi, entry, new_addr, new_skb->len, 5461 base_flags, 1 | (mss << 1)); 5462 *start = NEXT_TX(entry); 5463 } 5464 } 5465 5466 /* Now clean up the sw ring entries. */ 5467 i = 0; 5468 while (entry != last_plus_one) { 5469 int len; 5470 5471 if (i == 0) 5472 len = skb_headlen(skb); 5473 else 5474 len = skb_shinfo(skb)->frags[i-1].size; 5475 5476 pci_unmap_single(tp->pdev, 5477 dma_unmap_addr(&tnapi->tx_buffers[entry], 5478 mapping), 5479 len, PCI_DMA_TODEVICE); 5480 if (i == 0) { 5481 tnapi->tx_buffers[entry].skb = new_skb; 5482 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 5483 new_addr); 5484 } else { 5485 tnapi->tx_buffers[entry].skb = NULL; 5486 } 5487 entry = NEXT_TX(entry); 5488 i++; 5489 } 5490 5491 dev_kfree_skb(skb); 5492 5493 return ret; 5494} 5495 5496static void tg3_set_txd(struct tg3_napi *tnapi, int entry, 5497 dma_addr_t mapping, int len, u32 flags, 5498 u32 mss_and_is_end) 5499{ 5500 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry]; 5501 int is_end = (mss_and_is_end & 0x1); 5502 u32 mss = (mss_and_is_end >> 1); 5503 u32 vlan_tag = 0; 5504 5505 if (is_end) 5506 flags |= TXD_FLAG_END; 5507 if (flags & TXD_FLAG_VLAN) { 5508 vlan_tag = flags >> 16; 5509 flags &= 0xffff; 5510 } 5511 vlan_tag |= (mss << TXD_MSS_SHIFT); 5512 5513 txd->addr_hi = ((u64) mapping >> 32); 5514 txd->addr_lo = ((u64) mapping & 0xffffffff); 5515 txd->len_flags = (len << TXD_LEN_SHIFT) | flags; 5516 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; 5517} 5518 5519/* hard_start_xmit for devices that don't have any bugs and 5520 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only. 5521 */ 5522static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, 5523 struct net_device *dev) 5524{ 5525 struct tg3 *tp = netdev_priv(dev); 5526 u32 len, entry, base_flags, mss; 5527 dma_addr_t mapping; 5528 struct tg3_napi *tnapi; 5529 struct netdev_queue *txq; 5530 unsigned int i, last; 5531 5532 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 5533 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 5534 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 5535 tnapi++; 5536 5537 /* We are running in BH disabled context with netif_tx_lock 5538 * and TX reclaim runs via tp->napi.poll inside of a software 5539 * interrupt. Furthermore, IRQ processing runs lockless so we have 5540 * no IRQ context deadlocks to worry about either. Rejoice! 5541 */ 5542 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { 5543 if (!netif_tx_queue_stopped(txq)) { 5544 netif_tx_stop_queue(txq); 5545 5546 /* This is a hard error, log it. */ 5547 netdev_err(dev, 5548 "BUG! Tx Ring full when queue awake!\n"); 5549 } 5550 return NETDEV_TX_BUSY; 5551 } 5552 5553 entry = tnapi->tx_prod; 5554 base_flags = 0; 5555 mss = 0; 5556 if ((mss = skb_shinfo(skb)->gso_size) != 0) { 5557 int tcp_opt_len, ip_tcp_len; 5558 u32 hdrlen; 5559 5560 if (skb_header_cloned(skb) && 5561 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 5562 dev_kfree_skb(skb); 5563 goto out_unlock; 5564 } 5565 5566 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 5567 hdrlen = skb_headlen(skb) - ETH_HLEN; 5568 else { 5569 struct iphdr *iph = ip_hdr(skb); 5570 5571 tcp_opt_len = tcp_optlen(skb); 5572 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); 5573 5574 iph->check = 0; 5575 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); 5576 hdrlen = ip_tcp_len + tcp_opt_len; 5577 } 5578 5579 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { 5580 mss |= (hdrlen & 0xc) << 12; 5581 if (hdrlen & 0x10) 5582 base_flags |= 0x00000010; 5583 base_flags |= (hdrlen & 0x3e0) << 5; 5584 } else 5585 mss |= hdrlen << 9; 5586 5587 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 5588 TXD_FLAG_CPU_POST_DMA); 5589 5590 tcp_hdr(skb)->check = 0; 5591 5592 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 5593 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5594 } 5595 5596#if TG3_VLAN_TAG_USED 5597 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) 5598 base_flags |= (TXD_FLAG_VLAN | 5599 (vlan_tx_tag_get(skb) << 16)); 5600#endif 5601 5602 len = skb_headlen(skb); 5603 5604 /* Queue skb data, a.k.a. the main skb fragment. */ 5605 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); 5606 if (pci_dma_mapping_error(tp->pdev, mapping)) { 5607 dev_kfree_skb(skb); 5608 goto out_unlock; 5609 } 5610 5611 tnapi->tx_buffers[entry].skb = skb; 5612 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 5613 5614 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5615 !mss && skb->len > ETH_DATA_LEN) 5616 base_flags |= TXD_FLAG_JMB_PKT; 5617 5618 tg3_set_txd(tnapi, entry, mapping, len, base_flags, 5619 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 5620 5621 entry = NEXT_TX(entry); 5622 5623 /* Now loop through additional data fragments, and queue them. */ 5624 if (skb_shinfo(skb)->nr_frags > 0) { 5625 last = skb_shinfo(skb)->nr_frags - 1; 5626 for (i = 0; i <= last; i++) { 5627 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5628 5629 len = frag->size; 5630 mapping = pci_map_page(tp->pdev, 5631 frag->page, 5632 frag->page_offset, 5633 len, PCI_DMA_TODEVICE); 5634 if (pci_dma_mapping_error(tp->pdev, mapping)) 5635 goto dma_error; 5636 5637 tnapi->tx_buffers[entry].skb = NULL; 5638 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 5639 mapping); 5640 5641 tg3_set_txd(tnapi, entry, mapping, len, 5642 base_flags, (i == last) | (mss << 1)); 5643 5644 entry = NEXT_TX(entry); 5645 } 5646 } 5647 5648 /* Packets are ready, update Tx producer idx local and on card. */ 5649 tw32_tx_mbox(tnapi->prodmbox, entry); 5650 5651 tnapi->tx_prod = entry; 5652 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 5653 netif_tx_stop_queue(txq); 5654 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 5655 netif_tx_wake_queue(txq); 5656 } 5657 5658out_unlock: 5659 mmiowb(); 5660 5661 return NETDEV_TX_OK; 5662 5663dma_error: 5664 last = i; 5665 entry = tnapi->tx_prod; 5666 tnapi->tx_buffers[entry].skb = NULL; 5667 pci_unmap_single(tp->pdev, 5668 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping), 5669 skb_headlen(skb), 5670 PCI_DMA_TODEVICE); 5671 for (i = 0; i <= last; i++) { 5672 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5673 entry = NEXT_TX(entry); 5674 5675 pci_unmap_page(tp->pdev, 5676 dma_unmap_addr(&tnapi->tx_buffers[entry], 5677 mapping), 5678 frag->size, PCI_DMA_TODEVICE); 5679 } 5680 5681 dev_kfree_skb(skb); 5682 return NETDEV_TX_OK; 5683} 5684 5685static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *, 5686 struct net_device *); 5687 5688/* Use GSO to workaround a rare TSO bug that may be triggered when the 5689 * TSO header is greater than 80 bytes. 5690 */ 5691static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) 5692{ 5693 struct sk_buff *segs, *nskb; 5694 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; 5695 5696 /* Estimate the number of fragments in the worst case */ 5697 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { 5698 netif_stop_queue(tp->dev); 5699 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) 5700 return NETDEV_TX_BUSY; 5701 5702 netif_wake_queue(tp->dev); 5703 } 5704 5705 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); 5706 if (IS_ERR(segs)) 5707 goto tg3_tso_bug_end; 5708 5709 do { 5710 nskb = segs; 5711 segs = segs->next; 5712 nskb->next = NULL; 5713 tg3_start_xmit_dma_bug(nskb, tp->dev); 5714 } while (segs); 5715 5716tg3_tso_bug_end: 5717 dev_kfree_skb(skb); 5718 5719 return NETDEV_TX_OK; 5720} 5721 5722/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and 5723 * support TG3_FLG2_HW_TSO_1 or firmware TSO only. 5724 */ 5725static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, 5726 struct net_device *dev) 5727{ 5728 struct tg3 *tp = netdev_priv(dev); 5729 u32 len, entry, base_flags, mss; 5730 int would_hit_hwbug; 5731 dma_addr_t mapping; 5732 struct tg3_napi *tnapi; 5733 struct netdev_queue *txq; 5734 unsigned int i, last; 5735 5736 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 5737 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 5738 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 5739 tnapi++; 5740 5741 /* We are running in BH disabled context with netif_tx_lock 5742 * and TX reclaim runs via tp->napi.poll inside of a software 5743 * interrupt. Furthermore, IRQ processing runs lockless so we have 5744 * no IRQ context deadlocks to worry about either. Rejoice! 5745 */ 5746 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { 5747 if (!netif_tx_queue_stopped(txq)) { 5748 netif_tx_stop_queue(txq); 5749 5750 /* This is a hard error, log it. */ 5751 netdev_err(dev, 5752 "BUG! Tx Ring full when queue awake!\n"); 5753 } 5754 return NETDEV_TX_BUSY; 5755 } 5756 5757 entry = tnapi->tx_prod; 5758 base_flags = 0; 5759 if (skb->ip_summed == CHECKSUM_PARTIAL) 5760 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5761 5762 if ((mss = skb_shinfo(skb)->gso_size) != 0) { 5763 struct iphdr *iph; 5764 u32 tcp_opt_len, ip_tcp_len, hdr_len; 5765 5766 if (skb_header_cloned(skb) && 5767 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 5768 dev_kfree_skb(skb); 5769 goto out_unlock; 5770 } 5771 5772 tcp_opt_len = tcp_optlen(skb); 5773 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); 5774 5775 hdr_len = ip_tcp_len + tcp_opt_len; 5776 if (unlikely((ETH_HLEN + hdr_len) > 80) && 5777 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG)) 5778 return tg3_tso_bug(tp, skb); 5779 5780 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 5781 TXD_FLAG_CPU_POST_DMA); 5782 5783 iph = ip_hdr(skb); 5784 iph->check = 0; 5785 iph->tot_len = htons(mss + hdr_len); 5786 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 5787 tcp_hdr(skb)->check = 0; 5788 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 5789 } else 5790 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 5791 iph->daddr, 0, 5792 IPPROTO_TCP, 5793 0); 5794 5795 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { 5796 mss |= (hdr_len & 0xc) << 12; 5797 if (hdr_len & 0x10) 5798 base_flags |= 0x00000010; 5799 base_flags |= (hdr_len & 0x3e0) << 5; 5800 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) 5801 mss |= hdr_len << 9; 5802 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) || 5803 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 5804 if (tcp_opt_len || iph->ihl > 5) { 5805 int tsflags; 5806 5807 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 5808 mss |= (tsflags << 11); 5809 } 5810 } else { 5811 if (tcp_opt_len || iph->ihl > 5) { 5812 int tsflags; 5813 5814 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 5815 base_flags |= tsflags << 12; 5816 } 5817 } 5818 } 5819#if TG3_VLAN_TAG_USED 5820 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) 5821 base_flags |= (TXD_FLAG_VLAN | 5822 (vlan_tx_tag_get(skb) << 16)); 5823#endif 5824 5825 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5826 !mss && skb->len > ETH_DATA_LEN) 5827 base_flags |= TXD_FLAG_JMB_PKT; 5828 5829 len = skb_headlen(skb); 5830 5831 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); 5832 if (pci_dma_mapping_error(tp->pdev, mapping)) { 5833 dev_kfree_skb(skb); 5834 goto out_unlock; 5835 } 5836 5837 tnapi->tx_buffers[entry].skb = skb; 5838 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 5839 5840 would_hit_hwbug = 0; 5841 5842 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) 5843 would_hit_hwbug = 1; 5844 5845 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && 5846 tg3_4g_overflow_test(mapping, len)) 5847 would_hit_hwbug = 1; 5848 5849 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && 5850 tg3_40bit_overflow_test(tp, mapping, len)) 5851 would_hit_hwbug = 1; 5852 5853 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) 5854 would_hit_hwbug = 1; 5855 5856 tg3_set_txd(tnapi, entry, mapping, len, base_flags, 5857 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 5858 5859 entry = NEXT_TX(entry); 5860 5861 /* Now loop through additional data fragments, and queue them. */ 5862 if (skb_shinfo(skb)->nr_frags > 0) { 5863 last = skb_shinfo(skb)->nr_frags - 1; 5864 for (i = 0; i <= last; i++) { 5865 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5866 5867 len = frag->size; 5868 mapping = pci_map_page(tp->pdev, 5869 frag->page, 5870 frag->page_offset, 5871 len, PCI_DMA_TODEVICE); 5872 5873 tnapi->tx_buffers[entry].skb = NULL; 5874 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 5875 mapping); 5876 if (pci_dma_mapping_error(tp->pdev, mapping)) 5877 goto dma_error; 5878 5879 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && 5880 len <= 8) 5881 would_hit_hwbug = 1; 5882 5883 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && 5884 tg3_4g_overflow_test(mapping, len)) 5885 would_hit_hwbug = 1; 5886 5887 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && 5888 tg3_40bit_overflow_test(tp, mapping, len)) 5889 would_hit_hwbug = 1; 5890 5891 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 5892 tg3_set_txd(tnapi, entry, mapping, len, 5893 base_flags, (i == last)|(mss << 1)); 5894 else 5895 tg3_set_txd(tnapi, entry, mapping, len, 5896 base_flags, (i == last)); 5897 5898 entry = NEXT_TX(entry); 5899 } 5900 } 5901 5902 if (would_hit_hwbug) { 5903 u32 last_plus_one = entry; 5904 u32 start; 5905 5906 start = entry - 1 - skb_shinfo(skb)->nr_frags; 5907 start &= (TG3_TX_RING_SIZE - 1); 5908 5909 /* If the workaround fails due to memory/mapping 5910 * failure, silently drop this packet. 5911 */ 5912 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one, 5913 &start, base_flags, mss)) 5914 goto out_unlock; 5915 5916 entry = start; 5917 } 5918 5919 /* Packets are ready, update Tx producer idx local and on card. */ 5920 tw32_tx_mbox(tnapi->prodmbox, entry); 5921 5922 tnapi->tx_prod = entry; 5923 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 5924 netif_tx_stop_queue(txq); 5925 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 5926 netif_tx_wake_queue(txq); 5927 } 5928 5929out_unlock: 5930 mmiowb(); 5931 5932 return NETDEV_TX_OK; 5933 5934dma_error: 5935 last = i; 5936 entry = tnapi->tx_prod; 5937 tnapi->tx_buffers[entry].skb = NULL; 5938 pci_unmap_single(tp->pdev, 5939 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping), 5940 skb_headlen(skb), 5941 PCI_DMA_TODEVICE); 5942 for (i = 0; i <= last; i++) { 5943 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5944 entry = NEXT_TX(entry); 5945 5946 pci_unmap_page(tp->pdev, 5947 dma_unmap_addr(&tnapi->tx_buffers[entry], 5948 mapping), 5949 frag->size, PCI_DMA_TODEVICE); 5950 } 5951 5952 dev_kfree_skb(skb); 5953 return NETDEV_TX_OK; 5954} 5955 5956static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 5957 int new_mtu) 5958{ 5959 dev->mtu = new_mtu; 5960 5961 if (new_mtu > ETH_DATA_LEN) { 5962 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { 5963 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 5964 ethtool_op_set_tso(dev, 0); 5965 } else { 5966 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; 5967 } 5968 } else { 5969 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) 5970 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 5971 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE; 5972 } 5973} 5974 5975static int tg3_change_mtu(struct net_device *dev, int new_mtu) 5976{ 5977 struct tg3 *tp = netdev_priv(dev); 5978 int err; 5979 5980 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) 5981 return -EINVAL; 5982 5983 if (!netif_running(dev)) { 5984 /* We'll just catch it later when the 5985 * device is up'd. 5986 */ 5987 tg3_set_mtu(dev, tp, new_mtu); 5988 return 0; 5989 } 5990 5991 tg3_phy_stop(tp); 5992 5993 tg3_netif_stop(tp); 5994 5995 tg3_full_lock(tp, 1); 5996 5997 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 5998 5999 tg3_set_mtu(dev, tp, new_mtu); 6000 6001 err = tg3_restart_hw(tp, 0); 6002 6003 if (!err) 6004 tg3_netif_start(tp); 6005 6006 tg3_full_unlock(tp); 6007 6008 if (!err) 6009 tg3_phy_start(tp); 6010 6011 return err; 6012} 6013 6014static void tg3_rx_prodring_free(struct tg3 *tp, 6015 struct tg3_rx_prodring_set *tpr) 6016{ 6017 int i; 6018 6019 if (tpr != &tp->prodring[0]) { 6020 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 6021 i = (i + 1) % TG3_RX_RING_SIZE) 6022 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], 6023 tp->rx_pkt_map_sz); 6024 6025 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6026 for (i = tpr->rx_jmb_cons_idx; 6027 i != tpr->rx_jmb_prod_idx; 6028 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) { 6029 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], 6030 TG3_RX_JMB_MAP_SZ); 6031 } 6032 } 6033 6034 return; 6035 } 6036 6037 for (i = 0; i < TG3_RX_RING_SIZE; i++) 6038 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], 6039 tp->rx_pkt_map_sz); 6040 6041 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6042 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) 6043 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], 6044 TG3_RX_JMB_MAP_SZ); 6045 } 6046} 6047 6048/* Initialize rx rings for packet processing. 6049 * 6050 * The chip has been shut down and the driver detached from 6051 * the networking, so no interrupts or new tx packets will 6052 * end up in the driver. tp->{tx,}lock are held and thus 6053 * we may not sleep. 6054 */ 6055static int tg3_rx_prodring_alloc(struct tg3 *tp, 6056 struct tg3_rx_prodring_set *tpr) 6057{ 6058 u32 i, rx_pkt_dma_sz; 6059 6060 tpr->rx_std_cons_idx = 0; 6061 tpr->rx_std_prod_idx = 0; 6062 tpr->rx_jmb_cons_idx = 0; 6063 tpr->rx_jmb_prod_idx = 0; 6064 6065 if (tpr != &tp->prodring[0]) { 6066 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE); 6067 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) 6068 memset(&tpr->rx_jmb_buffers[0], 0, 6069 TG3_RX_JMB_BUFF_RING_SIZE); 6070 goto done; 6071 } 6072 6073 /* Zero out all descriptors. */ 6074 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); 6075 6076 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 6077 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && 6078 tp->dev->mtu > ETH_DATA_LEN) 6079 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; 6080 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); 6081 6082 /* Initialize invariants of the rings, we only set this 6083 * stuff once. This works because the card does not 6084 * write into the rx buffer posting rings. 6085 */ 6086 for (i = 0; i < TG3_RX_RING_SIZE; i++) { 6087 struct tg3_rx_buffer_desc *rxd; 6088 6089 rxd = &tpr->rx_std[i]; 6090 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; 6091 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 6092 rxd->opaque = (RXD_OPAQUE_RING_STD | 6093 (i << RXD_OPAQUE_INDEX_SHIFT)); 6094 } 6095 6096 /* Now allocate fresh SKBs for each rx ring. */ 6097 for (i = 0; i < tp->rx_pending; i++) { 6098 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { 6099 netdev_warn(tp->dev, 6100 "Using a smaller RX standard ring. Only " 6101 "%d out of %d buffers were allocated " 6102 "successfully\n", i, tp->rx_pending); 6103 if (i == 0) 6104 goto initfail; 6105 tp->rx_pending = i; 6106 break; 6107 } 6108 } 6109 6110 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)) 6111 goto done; 6112 6113 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES); 6114 6115 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)) 6116 goto done; 6117 6118 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { 6119 struct tg3_rx_buffer_desc *rxd; 6120 6121 rxd = &tpr->rx_jmb[i].std; 6122 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; 6123 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | 6124 RXD_FLAG_JUMBO; 6125 rxd->opaque = (RXD_OPAQUE_RING_JUMBO | 6126 (i << RXD_OPAQUE_INDEX_SHIFT)); 6127 } 6128 6129 for (i = 0; i < tp->rx_jumbo_pending; i++) { 6130 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { 6131 netdev_warn(tp->dev, 6132 "Using a smaller RX jumbo ring. Only %d " 6133 "out of %d buffers were allocated " 6134 "successfully\n", i, tp->rx_jumbo_pending); 6135 if (i == 0) 6136 goto initfail; 6137 tp->rx_jumbo_pending = i; 6138 break; 6139 } 6140 } 6141 6142done: 6143 return 0; 6144 6145initfail: 6146 tg3_rx_prodring_free(tp, tpr); 6147 return -ENOMEM; 6148} 6149 6150static void tg3_rx_prodring_fini(struct tg3 *tp, 6151 struct tg3_rx_prodring_set *tpr) 6152{ 6153 kfree(tpr->rx_std_buffers); 6154 tpr->rx_std_buffers = NULL; 6155 kfree(tpr->rx_jmb_buffers); 6156 tpr->rx_jmb_buffers = NULL; 6157 if (tpr->rx_std) { 6158 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, 6159 tpr->rx_std, tpr->rx_std_mapping); 6160 tpr->rx_std = NULL; 6161 } 6162 if (tpr->rx_jmb) { 6163 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, 6164 tpr->rx_jmb, tpr->rx_jmb_mapping); 6165 tpr->rx_jmb = NULL; 6166 } 6167} 6168 6169static int tg3_rx_prodring_init(struct tg3 *tp, 6170 struct tg3_rx_prodring_set *tpr) 6171{ 6172 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL); 6173 if (!tpr->rx_std_buffers) 6174 return -ENOMEM; 6175 6176 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, 6177 &tpr->rx_std_mapping); 6178 if (!tpr->rx_std) 6179 goto err_out; 6180 6181 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6182 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE, 6183 GFP_KERNEL); 6184 if (!tpr->rx_jmb_buffers) 6185 goto err_out; 6186 6187 tpr->rx_jmb = pci_alloc_consistent(tp->pdev, 6188 TG3_RX_JUMBO_RING_BYTES, 6189 &tpr->rx_jmb_mapping); 6190 if (!tpr->rx_jmb) 6191 goto err_out; 6192 } 6193 6194 return 0; 6195 6196err_out: 6197 tg3_rx_prodring_fini(tp, tpr); 6198 return -ENOMEM; 6199} 6200 6201/* Free up pending packets in all rx/tx rings. 6202 * 6203 * The chip has been shut down and the driver detached from 6204 * the networking, so no interrupts or new tx packets will 6205 * end up in the driver. tp->{tx,}lock is not held and we are not 6206 * in an interrupt context and thus may sleep. 6207 */ 6208static void tg3_free_rings(struct tg3 *tp) 6209{ 6210 int i, j; 6211 6212 for (j = 0; j < tp->irq_cnt; j++) { 6213 struct tg3_napi *tnapi = &tp->napi[j]; 6214 6215 if (!tnapi->tx_buffers) 6216 continue; 6217 6218 for (i = 0; i < TG3_TX_RING_SIZE; ) { 6219 struct ring_info *txp; 6220 struct sk_buff *skb; 6221 unsigned int k; 6222 6223 txp = &tnapi->tx_buffers[i]; 6224 skb = txp->skb; 6225 6226 if (skb == NULL) { 6227 i++; 6228 continue; 6229 } 6230 6231 pci_unmap_single(tp->pdev, 6232 dma_unmap_addr(txp, mapping), 6233 skb_headlen(skb), 6234 PCI_DMA_TODEVICE); 6235 txp->skb = NULL; 6236 6237 i++; 6238 6239 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) { 6240 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; 6241 pci_unmap_page(tp->pdev, 6242 dma_unmap_addr(txp, mapping), 6243 skb_shinfo(skb)->frags[k].size, 6244 PCI_DMA_TODEVICE); 6245 i++; 6246 } 6247 6248 dev_kfree_skb_any(skb); 6249 } 6250 6251 tg3_rx_prodring_free(tp, &tp->prodring[j]); 6252 } 6253} 6254 6255/* Initialize tx/rx rings for packet processing. 6256 * 6257 * The chip has been shut down and the driver detached from 6258 * the networking, so no interrupts or new tx packets will 6259 * end up in the driver. tp->{tx,}lock are held and thus 6260 * we may not sleep. 6261 */ 6262static int tg3_init_rings(struct tg3 *tp) 6263{ 6264 int i; 6265 6266 /* Free up all the SKBs. */ 6267 tg3_free_rings(tp); 6268 6269 for (i = 0; i < tp->irq_cnt; i++) { 6270 struct tg3_napi *tnapi = &tp->napi[i]; 6271 6272 tnapi->last_tag = 0; 6273 tnapi->last_irq_tag = 0; 6274 tnapi->hw_status->status = 0; 6275 tnapi->hw_status->status_tag = 0; 6276 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 6277 6278 tnapi->tx_prod = 0; 6279 tnapi->tx_cons = 0; 6280 if (tnapi->tx_ring) 6281 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); 6282 6283 tnapi->rx_rcb_ptr = 0; 6284 if (tnapi->rx_rcb) 6285 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 6286 6287 if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) { 6288 tg3_free_rings(tp); 6289 return -ENOMEM; 6290 } 6291 } 6292 6293 return 0; 6294} 6295 6296/* 6297 * Must not be invoked with interrupt sources disabled and 6298 * the hardware shutdown down. 6299 */ 6300static void tg3_free_consistent(struct tg3 *tp) 6301{ 6302 int i; 6303 6304 for (i = 0; i < tp->irq_cnt; i++) { 6305 struct tg3_napi *tnapi = &tp->napi[i]; 6306 6307 if (tnapi->tx_ring) { 6308 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, 6309 tnapi->tx_ring, tnapi->tx_desc_mapping); 6310 tnapi->tx_ring = NULL; 6311 } 6312 6313 kfree(tnapi->tx_buffers); 6314 tnapi->tx_buffers = NULL; 6315 6316 if (tnapi->rx_rcb) { 6317 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), 6318 tnapi->rx_rcb, 6319 tnapi->rx_rcb_mapping); 6320 tnapi->rx_rcb = NULL; 6321 } 6322 6323 if (tnapi->hw_status) { 6324 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, 6325 tnapi->hw_status, 6326 tnapi->status_mapping); 6327 tnapi->hw_status = NULL; 6328 } 6329 } 6330 6331 if (tp->hw_stats) { 6332 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), 6333 tp->hw_stats, tp->stats_mapping); 6334 tp->hw_stats = NULL; 6335 } 6336 6337 for (i = 0; i < tp->irq_cnt; i++) 6338 tg3_rx_prodring_fini(tp, &tp->prodring[i]); 6339} 6340 6341/* 6342 * Must not be invoked with interrupt sources disabled and 6343 * the hardware shutdown down. Can sleep. 6344 */ 6345static int tg3_alloc_consistent(struct tg3 *tp) 6346{ 6347 int i; 6348 6349 for (i = 0; i < tp->irq_cnt; i++) { 6350 if (tg3_rx_prodring_init(tp, &tp->prodring[i])) 6351 goto err_out; 6352 } 6353 6354 tp->hw_stats = pci_alloc_consistent(tp->pdev, 6355 sizeof(struct tg3_hw_stats), 6356 &tp->stats_mapping); 6357 if (!tp->hw_stats) 6358 goto err_out; 6359 6360 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 6361 6362 for (i = 0; i < tp->irq_cnt; i++) { 6363 struct tg3_napi *tnapi = &tp->napi[i]; 6364 struct tg3_hw_status *sblk; 6365 6366 tnapi->hw_status = pci_alloc_consistent(tp->pdev, 6367 TG3_HW_STATUS_SIZE, 6368 &tnapi->status_mapping); 6369 if (!tnapi->hw_status) 6370 goto err_out; 6371 6372 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 6373 sblk = tnapi->hw_status; 6374 6375 /* If multivector TSS is enabled, vector 0 does not handle 6376 * tx interrupts. Don't allocate any resources for it. 6377 */ 6378 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) || 6379 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) { 6380 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) * 6381 TG3_TX_RING_SIZE, 6382 GFP_KERNEL); 6383 if (!tnapi->tx_buffers) 6384 goto err_out; 6385 6386 tnapi->tx_ring = pci_alloc_consistent(tp->pdev, 6387 TG3_TX_RING_BYTES, 6388 &tnapi->tx_desc_mapping); 6389 if (!tnapi->tx_ring) 6390 goto err_out; 6391 } 6392 6393 /* 6394 * When RSS is enabled, the status block format changes 6395 * slightly. The "rx_jumbo_consumer", "reserved", 6396 * and "rx_mini_consumer" members get mapped to the 6397 * other three rx return ring producer indexes. 6398 */ 6399 switch (i) { 6400 default: 6401 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 6402 break; 6403 case 2: 6404 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer; 6405 break; 6406 case 3: 6407 tnapi->rx_rcb_prod_idx = &sblk->reserved; 6408 break; 6409 case 4: 6410 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer; 6411 break; 6412 } 6413 6414 tnapi->prodring = &tp->prodring[i]; 6415 6416 /* 6417 * If multivector RSS is enabled, vector 0 does not handle 6418 * rx or tx interrupts. Don't allocate any resources for it. 6419 */ 6420 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) 6421 continue; 6422 6423 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev, 6424 TG3_RX_RCB_RING_BYTES(tp), 6425 &tnapi->rx_rcb_mapping); 6426 if (!tnapi->rx_rcb) 6427 goto err_out; 6428 6429 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 6430 } 6431 6432 return 0; 6433 6434err_out: 6435 tg3_free_consistent(tp); 6436 return -ENOMEM; 6437} 6438 6439#define MAX_WAIT_CNT 1000 6440 6441/* To stop a block, clear the enable bit and poll till it 6442 * clears. tp->lock is held. 6443 */ 6444static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent) 6445{ 6446 unsigned int i; 6447 u32 val; 6448 6449 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 6450 switch (ofs) { 6451 case RCVLSC_MODE: 6452 case DMAC_MODE: 6453 case MBFREE_MODE: 6454 case BUFMGR_MODE: 6455 case MEMARB_MODE: 6456 /* We can't enable/disable these bits of the 6457 * 5705/5750, just say success. 6458 */ 6459 return 0; 6460 6461 default: 6462 break; 6463 } 6464 } 6465 6466 val = tr32(ofs); 6467 val &= ~enable_bit; 6468 tw32_f(ofs, val); 6469 6470 for (i = 0; i < MAX_WAIT_CNT; i++) { 6471 udelay(100); 6472 val = tr32(ofs); 6473 if ((val & enable_bit) == 0) 6474 break; 6475 } 6476 6477 if (i == MAX_WAIT_CNT && !silent) { 6478 dev_err(&tp->pdev->dev, 6479 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", 6480 ofs, enable_bit); 6481 return -ENODEV; 6482 } 6483 6484 return 0; 6485} 6486 6487/* tp->lock is held. */ 6488static int tg3_abort_hw(struct tg3 *tp, int silent) 6489{ 6490 int i, err; 6491 6492 tg3_disable_ints(tp); 6493 6494 tp->rx_mode &= ~RX_MODE_ENABLE; 6495 tw32_f(MAC_RX_MODE, tp->rx_mode); 6496 udelay(10); 6497 6498 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); 6499 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); 6500 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); 6501 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); 6502 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); 6503 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); 6504 6505 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); 6506 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); 6507 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); 6508 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); 6509 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); 6510 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); 6511 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); 6512 6513 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 6514 tw32_f(MAC_MODE, tp->mac_mode); 6515 udelay(40); 6516 6517 tp->tx_mode &= ~TX_MODE_ENABLE; 6518 tw32_f(MAC_TX_MODE, tp->tx_mode); 6519 6520 for (i = 0; i < MAX_WAIT_CNT; i++) { 6521 udelay(100); 6522 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) 6523 break; 6524 } 6525 if (i >= MAX_WAIT_CNT) { 6526 dev_err(&tp->pdev->dev, 6527 "%s timed out, TX_MODE_ENABLE will not clear " 6528 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); 6529 err |= -ENODEV; 6530 } 6531 6532 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); 6533 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); 6534 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); 6535 6536 tw32(FTQ_RESET, 0xffffffff); 6537 tw32(FTQ_RESET, 0x00000000); 6538 6539 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 6540 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 6541 6542 for (i = 0; i < tp->irq_cnt; i++) { 6543 struct tg3_napi *tnapi = &tp->napi[i]; 6544 if (tnapi->hw_status) 6545 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 6546 } 6547 if (tp->hw_stats) 6548 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 6549 6550 return err; 6551} 6552 6553static void tg3_ape_send_event(struct tg3 *tp, u32 event) 6554{ 6555 int i; 6556 u32 apedata; 6557 6558 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 6559 if (apedata != APE_SEG_SIG_MAGIC) 6560 return; 6561 6562 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 6563 if (!(apedata & APE_FW_STATUS_READY)) 6564 return; 6565 6566 /* Wait for up to 1 millisecond for APE to service previous event. */ 6567 for (i = 0; i < 10; i++) { 6568 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) 6569 return; 6570 6571 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 6572 6573 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 6574 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, 6575 event | APE_EVENT_STATUS_EVENT_PENDING); 6576 6577 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 6578 6579 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 6580 break; 6581 6582 udelay(100); 6583 } 6584 6585 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 6586 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 6587} 6588 6589static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) 6590{ 6591 u32 event; 6592 u32 apedata; 6593 6594 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) 6595 return; 6596 6597 switch (kind) { 6598 case RESET_KIND_INIT: 6599 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 6600 APE_HOST_SEG_SIG_MAGIC); 6601 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 6602 APE_HOST_SEG_LEN_MAGIC); 6603 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 6604 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 6605 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 6606 APE_HOST_DRIVER_ID_MAGIC); 6607 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 6608 APE_HOST_BEHAV_NO_PHYLOCK); 6609 6610 event = APE_EVENT_STATUS_STATE_START; 6611 break; 6612 case RESET_KIND_SHUTDOWN: 6613 /* With the interface we are currently using, 6614 * APE does not track driver state. Wiping 6615 * out the HOST SEGMENT SIGNATURE forces 6616 * the APE to assume OS absent status. 6617 */ 6618 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); 6619 6620 event = APE_EVENT_STATUS_STATE_UNLOAD; 6621 break; 6622 case RESET_KIND_SUSPEND: 6623 event = APE_EVENT_STATUS_STATE_SUSPEND; 6624 break; 6625 default: 6626 return; 6627 } 6628 6629 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; 6630 6631 tg3_ape_send_event(tp, event); 6632} 6633 6634/* tp->lock is held. */ 6635static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 6636{ 6637 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 6638 NIC_SRAM_FIRMWARE_MBOX_MAGIC1); 6639 6640 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { 6641 switch (kind) { 6642 case RESET_KIND_INIT: 6643 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 6644 DRV_STATE_START); 6645 break; 6646 6647 case RESET_KIND_SHUTDOWN: 6648 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 6649 DRV_STATE_UNLOAD); 6650 break; 6651 6652 case RESET_KIND_SUSPEND: 6653 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 6654 DRV_STATE_SUSPEND); 6655 break; 6656 6657 default: 6658 break; 6659 } 6660 } 6661 6662 if (kind == RESET_KIND_INIT || 6663 kind == RESET_KIND_SUSPEND) 6664 tg3_ape_driver_state_change(tp, kind); 6665} 6666 6667/* tp->lock is held. */ 6668static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) 6669{ 6670 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { 6671 switch (kind) { 6672 case RESET_KIND_INIT: 6673 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 6674 DRV_STATE_START_DONE); 6675 break; 6676 6677 case RESET_KIND_SHUTDOWN: 6678 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 6679 DRV_STATE_UNLOAD_DONE); 6680 break; 6681 6682 default: 6683 break; 6684 } 6685 } 6686 6687 if (kind == RESET_KIND_SHUTDOWN) 6688 tg3_ape_driver_state_change(tp, kind); 6689} 6690 6691/* tp->lock is held. */ 6692static void tg3_write_sig_legacy(struct tg3 *tp, int kind) 6693{ 6694 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 6695 switch (kind) { 6696 case RESET_KIND_INIT: 6697 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 6698 DRV_STATE_START); 6699 break; 6700 6701 case RESET_KIND_SHUTDOWN: 6702 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 6703 DRV_STATE_UNLOAD); 6704 break; 6705 6706 case RESET_KIND_SUSPEND: 6707 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 6708 DRV_STATE_SUSPEND); 6709 break; 6710 6711 default: 6712 break; 6713 } 6714 } 6715} 6716 6717static int tg3_poll_fw(struct tg3 *tp) 6718{ 6719 int i; 6720 u32 val; 6721 6722 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 6723 /* Wait up to 20ms for init done. */ 6724 for (i = 0; i < 200; i++) { 6725 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 6726 return 0; 6727 udelay(100); 6728 } 6729 return -ENODEV; 6730 } 6731 6732 /* Wait for firmware initialization to complete. */ 6733 for (i = 0; i < 100000; i++) { 6734 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 6735 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 6736 break; 6737 udelay(10); 6738 } 6739 6740 /* Chip might not be fitted with firmware. Some Sun onboard 6741 * parts are configured like that. So don't signal the timeout 6742 * of the above loop as an error, but do report the lack of 6743 * running firmware once. 6744 */ 6745 if (i >= 100000 && 6746 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { 6747 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED; 6748 6749 netdev_info(tp->dev, "No firmware running\n"); 6750 } 6751 6752 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { 6753 /* The 57765 A0 needs a little more 6754 * time to do some important work. 6755 */ 6756 mdelay(10); 6757 } 6758 6759 return 0; 6760} 6761 6762/* Save PCI command register before chip reset */ 6763static void tg3_save_pci_state(struct tg3 *tp) 6764{ 6765 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); 6766} 6767 6768/* Restore PCI state after chip reset */ 6769static void tg3_restore_pci_state(struct tg3 *tp) 6770{ 6771 u32 val; 6772 6773 /* Re-enable indirect register accesses. */ 6774 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 6775 tp->misc_host_ctrl); 6776 6777 /* Set MAX PCI retry to zero. */ 6778 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); 6779 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && 6780 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) 6781 val |= PCISTATE_RETRY_SAME_DMA; 6782 /* Allow reads and writes to the APE register and memory space. */ 6783 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 6784 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 6785 PCISTATE_ALLOW_APE_SHMEM_WR; 6786 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 6787 6788 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 6789 6790 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { 6791 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) 6792 pcie_set_readrq(tp->pdev, 4096); 6793 else { 6794 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 6795 tp->pci_cacheline_sz); 6796 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 6797 tp->pci_lat_timer); 6798 } 6799 } 6800 6801 /* Make sure PCI-X relaxed ordering bit is clear. */ 6802 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { 6803 u16 pcix_cmd; 6804 6805 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 6806 &pcix_cmd); 6807 pcix_cmd &= ~PCI_X_CMD_ERO; 6808 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 6809 pcix_cmd); 6810 } 6811 6812 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { 6813 6814 /* Chip reset on 5780 will reset MSI enable bit, 6815 * so need to restore it. 6816 */ 6817 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6818 u16 ctrl; 6819 6820 pci_read_config_word(tp->pdev, 6821 tp->msi_cap + PCI_MSI_FLAGS, 6822 &ctrl); 6823 pci_write_config_word(tp->pdev, 6824 tp->msi_cap + PCI_MSI_FLAGS, 6825 ctrl | PCI_MSI_FLAGS_ENABLE); 6826 val = tr32(MSGINT_MODE); 6827 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); 6828 } 6829 } 6830} 6831 6832static void tg3_stop_fw(struct tg3 *); 6833 6834/* tp->lock is held. */ 6835static int tg3_chip_reset(struct tg3 *tp) 6836{ 6837 u32 val; 6838 void (*write_op)(struct tg3 *, u32, u32); 6839 int i, err; 6840 6841 tg3_nvram_lock(tp); 6842 6843 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 6844 6845 /* No matching tg3_nvram_unlock() after this because 6846 * chip reset below will undo the nvram lock. 6847 */ 6848 tp->nvram_lock_cnt = 0; 6849 6850 /* GRC_MISC_CFG core clock reset will clear the memory 6851 * enable bit in PCI register 4 and the MSI enable bit 6852 * on some chips, so we save relevant registers here. 6853 */ 6854 tg3_save_pci_state(tp); 6855 6856 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 6857 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) 6858 tw32(GRC_FASTBOOT_PC, 0); 6859 6860 /* 6861 * We must avoid the readl() that normally takes place. 6862 * It locks machines, causes machine checks, and other 6863 * fun things. So, temporarily disable the 5701 6864 * hardware workaround, while we do the reset. 6865 */ 6866 write_op = tp->write32; 6867 if (write_op == tg3_write_flush_reg32) 6868 tp->write32 = tg3_write32; 6869 6870 /* Prevent the irq handler from reading or writing PCI registers 6871 * during chip reset when the memory enable bit in the PCI command 6872 * register may be cleared. The chip does not generate interrupt 6873 * at this time, but the irq handler may still be called due to irq 6874 * sharing or irqpoll. 6875 */ 6876 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING; 6877 for (i = 0; i < tp->irq_cnt; i++) { 6878 struct tg3_napi *tnapi = &tp->napi[i]; 6879 if (tnapi->hw_status) { 6880 tnapi->hw_status->status = 0; 6881 tnapi->hw_status->status_tag = 0; 6882 } 6883 tnapi->last_tag = 0; 6884 tnapi->last_irq_tag = 0; 6885 } 6886 smp_mb(); 6887 6888 for (i = 0; i < tp->irq_cnt; i++) 6889 synchronize_irq(tp->napi[i].irq_vec); 6890 6891 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { 6892 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 6893 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 6894 } 6895 6896 /* do the reset */ 6897 val = GRC_MISC_CFG_CORECLK_RESET; 6898 6899 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 6900 if (tr32(0x7e2c) == 0x60) { 6901 tw32(0x7e2c, 0x20); 6902 } 6903 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { 6904 tw32(GRC_MISC_CFG, (1 << 29)); 6905 val |= (1 << 29); 6906 } 6907 } 6908 6909 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 6910 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); 6911 tw32(GRC_VCPU_EXT_CTRL, 6912 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); 6913 } 6914 6915 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 6916 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 6917 tw32(GRC_MISC_CFG, val); 6918 6919 /* restore 5701 hardware bug workaround write method */ 6920 tp->write32 = write_op; 6921 6922 /* Unfortunately, we have to delay before the PCI read back. 6923 * Some 575X chips even will not respond to a PCI cfg access 6924 * when the reset command is given to the chip. 6925 * 6926 * How do these hardware designers expect things to work 6927 * properly if the PCI write is posted for a long period 6928 * of time? It is always necessary to have some method by 6929 * which a register read back can occur to push the write 6930 * out which does the reset. 6931 * 6932 * For most tg3 variants the trick below was working. 6933 * Ho hum... 6934 */ 6935 udelay(120); 6936 6937 /* Flush PCI posted writes. The normal MMIO registers 6938 * are inaccessible at this time so this is the only 6939 * way to make this reliably (actually, this is no longer 6940 * the case, see above). I tried to use indirect 6941 * register read/write but this upset some 5701 variants. 6942 */ 6943 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); 6944 6945 udelay(120); 6946 6947 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) { 6948 u16 val16; 6949 6950 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { 6951 int i; 6952 u32 cfg_val; 6953 6954 /* Wait for link training to complete. */ 6955 for (i = 0; i < 5000; i++) 6956 udelay(100); 6957 6958 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); 6959 pci_write_config_dword(tp->pdev, 0xc4, 6960 cfg_val | (1 << 15)); 6961 } 6962 6963 /* Clear the "no snoop" and "relaxed ordering" bits. */ 6964 pci_read_config_word(tp->pdev, 6965 tp->pcie_cap + PCI_EXP_DEVCTL, 6966 &val16); 6967 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN | 6968 PCI_EXP_DEVCTL_NOSNOOP_EN); 6969 /* 6970 * Older PCIe devices only support the 128 byte 6971 * MPS setting. Enforce the restriction. 6972 */ 6973 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || 6974 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)) 6975 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; 6976 pci_write_config_word(tp->pdev, 6977 tp->pcie_cap + PCI_EXP_DEVCTL, 6978 val16); 6979 6980 pcie_set_readrq(tp->pdev, 4096); 6981 6982 /* Clear error status */ 6983 pci_write_config_word(tp->pdev, 6984 tp->pcie_cap + PCI_EXP_DEVSTA, 6985 PCI_EXP_DEVSTA_CED | 6986 PCI_EXP_DEVSTA_NFED | 6987 PCI_EXP_DEVSTA_FED | 6988 PCI_EXP_DEVSTA_URD); 6989 } 6990 6991 tg3_restore_pci_state(tp); 6992 6993 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING; 6994 6995 val = 0; 6996 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) 6997 val = tr32(MEMARB_MODE); 6998 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 6999 7000 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) { 7001 tg3_stop_fw(tp); 7002 tw32(0x5000, 0x400); 7003 } 7004 7005 tw32(GRC_MODE, tp->grc_mode); 7006 7007 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) { 7008 val = tr32(0xc4); 7009 7010 tw32(0xc4, val | (1 << 15)); 7011 } 7012 7013 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && 7014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 7015 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; 7016 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) 7017 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; 7018 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 7019 } 7020 7021 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 7022 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 7023 tw32_f(MAC_MODE, tp->mac_mode); 7024 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 7025 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 7026 tw32_f(MAC_MODE, tp->mac_mode); 7027 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 7028 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); 7029 if (tp->mac_mode & MAC_MODE_APE_TX_EN) 7030 tp->mac_mode |= MAC_MODE_TDE_ENABLE; 7031 tw32_f(MAC_MODE, tp->mac_mode); 7032 } else 7033 tw32_f(MAC_MODE, 0); 7034 udelay(40); 7035 7036 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 7037 7038 err = tg3_poll_fw(tp); 7039 if (err) 7040 return err; 7041 7042 tg3_mdio_start(tp); 7043 7044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { 7045 u8 phy_addr; 7046 7047 phy_addr = tp->phy_addr; 7048 tp->phy_addr = TG3_PHY_PCIE_ADDR; 7049 7050 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR, 7051 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT); 7052 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL | 7053 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL | 7054 TG3_PCIEPHY_TX0CTRL1_NB_EN; 7055 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val); 7056 udelay(10); 7057 7058 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR, 7059 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT); 7060 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN | 7061 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN; 7062 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val); 7063 udelay(10); 7064 7065 tp->phy_addr = phy_addr; 7066 } 7067 7068 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 7069 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && 7070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 7071 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 7072 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { 7073 val = tr32(0x7c00); 7074 7075 tw32(0x7c00, val | (1 << 25)); 7076 } 7077 7078 /* Reprobe ASF enable state. */ 7079 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF; 7080 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE; 7081 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 7082 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 7083 u32 nic_cfg; 7084 7085 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 7086 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 7087 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; 7088 tp->last_event_jiffies = jiffies; 7089 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 7090 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; 7091 } 7092 } 7093 7094 return 0; 7095} 7096 7097/* tp->lock is held. */ 7098static void tg3_stop_fw(struct tg3 *tp) 7099{ 7100 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && 7101 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { 7102 /* Wait for RX cpu to ACK the previous event. */ 7103 tg3_wait_for_event_ack(tp); 7104 7105 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 7106 7107 tg3_generate_fw_event(tp); 7108 7109 /* Wait for RX cpu to ACK this event. */ 7110 tg3_wait_for_event_ack(tp); 7111 } 7112} 7113 7114/* tp->lock is held. */ 7115static int tg3_halt(struct tg3 *tp, int kind, int silent) 7116{ 7117 int err; 7118 7119 tg3_stop_fw(tp); 7120 7121 tg3_write_sig_pre_reset(tp, kind); 7122 7123 tg3_abort_hw(tp, silent); 7124 err = tg3_chip_reset(tp); 7125 7126 __tg3_set_mac_addr(tp, 0); 7127 7128 tg3_write_sig_legacy(tp, kind); 7129 tg3_write_sig_post_reset(tp, kind); 7130 7131 if (err) 7132 return err; 7133 7134 return 0; 7135} 7136 7137#define RX_CPU_SCRATCH_BASE 0x30000 7138#define RX_CPU_SCRATCH_SIZE 0x04000 7139#define TX_CPU_SCRATCH_BASE 0x34000 7140#define TX_CPU_SCRATCH_SIZE 0x04000 7141 7142/* tp->lock is held. */ 7143static int tg3_halt_cpu(struct tg3 *tp, u32 offset) 7144{ 7145 int i; 7146 7147 BUG_ON(offset == TX_CPU_BASE && 7148 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)); 7149 7150 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 7151 u32 val = tr32(GRC_VCPU_EXT_CTRL); 7152 7153 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); 7154 return 0; 7155 } 7156 if (offset == RX_CPU_BASE) { 7157 for (i = 0; i < 10000; i++) { 7158 tw32(offset + CPU_STATE, 0xffffffff); 7159 tw32(offset + CPU_MODE, CPU_MODE_HALT); 7160 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) 7161 break; 7162 } 7163 7164 tw32(offset + CPU_STATE, 0xffffffff); 7165 tw32_f(offset + CPU_MODE, CPU_MODE_HALT); 7166 udelay(10); 7167 } else { 7168 for (i = 0; i < 10000; i++) { 7169 tw32(offset + CPU_STATE, 0xffffffff); 7170 tw32(offset + CPU_MODE, CPU_MODE_HALT); 7171 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) 7172 break; 7173 } 7174 } 7175 7176 if (i >= 10000) { 7177 netdev_err(tp->dev, "%s timed out, %s CPU\n", 7178 __func__, offset == RX_CPU_BASE ? "RX" : "TX"); 7179 return -ENODEV; 7180 } 7181 7182 /* Clear firmware's nvram arbitration. */ 7183 if (tp->tg3_flags & TG3_FLAG_NVRAM) 7184 tw32(NVRAM_SWARB, SWARB_REQ_CLR0); 7185 return 0; 7186} 7187 7188struct fw_info { 7189 unsigned int fw_base; 7190 unsigned int fw_len; 7191 const __be32 *fw_data; 7192}; 7193 7194/* tp->lock is held. */ 7195static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, 7196 int cpu_scratch_size, struct fw_info *info) 7197{ 7198 int err, lock_err, i; 7199 void (*write_op)(struct tg3 *, u32, u32); 7200 7201 if (cpu_base == TX_CPU_BASE && 7202 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 7203 netdev_err(tp->dev, 7204 "%s: Trying to load TX cpu firmware which is 5705\n", 7205 __func__); 7206 return -EINVAL; 7207 } 7208 7209 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 7210 write_op = tg3_write_mem; 7211 else 7212 write_op = tg3_write_indirect_reg32; 7213 7214 /* It is possible that bootcode is still loading at this point. 7215 * Get the nvram lock first before halting the cpu. 7216 */ 7217 lock_err = tg3_nvram_lock(tp); 7218 err = tg3_halt_cpu(tp, cpu_base); 7219 if (!lock_err) 7220 tg3_nvram_unlock(tp); 7221 if (err) 7222 goto out; 7223 7224 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 7225 write_op(tp, cpu_scratch_base + i, 0); 7226 tw32(cpu_base + CPU_STATE, 0xffffffff); 7227 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); 7228 for (i = 0; i < (info->fw_len / sizeof(u32)); i++) 7229 write_op(tp, (cpu_scratch_base + 7230 (info->fw_base & 0xffff) + 7231 (i * sizeof(u32))), 7232 be32_to_cpu(info->fw_data[i])); 7233 7234 err = 0; 7235 7236out: 7237 return err; 7238} 7239 7240/* tp->lock is held. */ 7241static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 7242{ 7243 struct fw_info info; 7244 const __be32 *fw_data; 7245 int err, i; 7246 7247 fw_data = (void *)tp->fw->data; 7248 7249 /* Firmware blob starts with version numbers, followed by 7250 start address and length. We are setting complete length. 7251 length = end_address_of_bss - start_address_of_text. 7252 Remainder is the blob to be loaded contiguously 7253 from start address. */ 7254 7255 info.fw_base = be32_to_cpu(fw_data[1]); 7256 info.fw_len = tp->fw->size - 12; 7257 info.fw_data = &fw_data[3]; 7258 7259 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 7260 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 7261 &info); 7262 if (err) 7263 return err; 7264 7265 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 7266 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 7267 &info); 7268 if (err) 7269 return err; 7270 7271 /* Now startup only the RX cpu. */ 7272 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 7273 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); 7274 7275 for (i = 0; i < 5; i++) { 7276 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) 7277 break; 7278 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 7279 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); 7280 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); 7281 udelay(1000); 7282 } 7283 if (i >= 5) { 7284 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " 7285 "should be %08x\n", __func__, 7286 tr32(RX_CPU_BASE + CPU_PC), info.fw_base); 7287 return -ENODEV; 7288 } 7289 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 7290 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); 7291 7292 return 0; 7293} 7294 7295/* 5705 needs a special version of the TSO firmware. */ 7296 7297/* tp->lock is held. */ 7298static int tg3_load_tso_firmware(struct tg3 *tp) 7299{ 7300 struct fw_info info; 7301 const __be32 *fw_data; 7302 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 7303 int err, i; 7304 7305 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 7306 return 0; 7307 7308 fw_data = (void *)tp->fw->data; 7309 7310 /* Firmware blob starts with version numbers, followed by 7311 start address and length. We are setting complete length. 7312 length = end_address_of_bss - start_address_of_text. 7313 Remainder is the blob to be loaded contiguously 7314 from start address. */ 7315 7316 info.fw_base = be32_to_cpu(fw_data[1]); 7317 cpu_scratch_size = tp->fw_len; 7318 info.fw_len = tp->fw->size - 12; 7319 info.fw_data = &fw_data[3]; 7320 7321 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 7322 cpu_base = RX_CPU_BASE; 7323 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; 7324 } else { 7325 cpu_base = TX_CPU_BASE; 7326 cpu_scratch_base = TX_CPU_SCRATCH_BASE; 7327 cpu_scratch_size = TX_CPU_SCRATCH_SIZE; 7328 } 7329 7330 err = tg3_load_firmware_cpu(tp, cpu_base, 7331 cpu_scratch_base, cpu_scratch_size, 7332 &info); 7333 if (err) 7334 return err; 7335 7336 /* Now startup the cpu. */ 7337 tw32(cpu_base + CPU_STATE, 0xffffffff); 7338 tw32_f(cpu_base + CPU_PC, info.fw_base); 7339 7340 for (i = 0; i < 5; i++) { 7341 if (tr32(cpu_base + CPU_PC) == info.fw_base) 7342 break; 7343 tw32(cpu_base + CPU_STATE, 0xffffffff); 7344 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 7345 tw32_f(cpu_base + CPU_PC, info.fw_base); 7346 udelay(1000); 7347 } 7348 if (i >= 5) { 7349 netdev_err(tp->dev, 7350 "%s fails to set CPU PC, is %08x should be %08x\n", 7351 __func__, tr32(cpu_base + CPU_PC), info.fw_base); 7352 return -ENODEV; 7353 } 7354 tw32(cpu_base + CPU_STATE, 0xffffffff); 7355 tw32_f(cpu_base + CPU_MODE, 0x00000000); 7356 return 0; 7357} 7358 7359 7360static int tg3_set_mac_addr(struct net_device *dev, void *p) 7361{ 7362 struct tg3 *tp = netdev_priv(dev); 7363 struct sockaddr *addr = p; 7364 int err = 0, skip_mac_1 = 0; 7365 7366 if (!is_valid_ether_addr(addr->sa_data)) 7367 return -EINVAL; 7368 7369 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 7370 7371 if (!netif_running(dev)) 7372 return 0; 7373 7374 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 7375 u32 addr0_high, addr0_low, addr1_high, addr1_low; 7376 7377 addr0_high = tr32(MAC_ADDR_0_HIGH); 7378 addr0_low = tr32(MAC_ADDR_0_LOW); 7379 addr1_high = tr32(MAC_ADDR_1_HIGH); 7380 addr1_low = tr32(MAC_ADDR_1_LOW); 7381 7382 /* Skip MAC addr 1 if ASF is using it. */ 7383 if ((addr0_high != addr1_high || addr0_low != addr1_low) && 7384 !(addr1_high == 0 && addr1_low == 0)) 7385 skip_mac_1 = 1; 7386 } 7387 spin_lock_bh(&tp->lock); 7388 __tg3_set_mac_addr(tp, skip_mac_1); 7389 spin_unlock_bh(&tp->lock); 7390 7391 return err; 7392} 7393 7394/* tp->lock is held. */ 7395static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, 7396 dma_addr_t mapping, u32 maxlen_flags, 7397 u32 nic_addr) 7398{ 7399 tg3_write_mem(tp, 7400 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), 7401 ((u64) mapping >> 32)); 7402 tg3_write_mem(tp, 7403 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), 7404 ((u64) mapping & 0xffffffff)); 7405 tg3_write_mem(tp, 7406 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), 7407 maxlen_flags); 7408 7409 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7410 tg3_write_mem(tp, 7411 (bdinfo_addr + TG3_BDINFO_NIC_ADDR), 7412 nic_addr); 7413} 7414 7415static void __tg3_set_rx_mode(struct net_device *); 7416static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 7417{ 7418 int i; 7419 7420 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) { 7421 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 7422 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); 7423 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); 7424 } else { 7425 tw32(HOSTCC_TXCOL_TICKS, 0); 7426 tw32(HOSTCC_TXMAX_FRAMES, 0); 7427 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 7428 } 7429 7430 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { 7431 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 7432 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 7433 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 7434 } else { 7435 tw32(HOSTCC_RXCOL_TICKS, 0); 7436 tw32(HOSTCC_RXMAX_FRAMES, 0); 7437 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 7438 } 7439 7440 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 7441 u32 val = ec->stats_block_coalesce_usecs; 7442 7443 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 7444 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); 7445 7446 if (!netif_carrier_ok(tp->dev)) 7447 val = 0; 7448 7449 tw32(HOSTCC_STAT_COAL_TICKS, val); 7450 } 7451 7452 for (i = 0; i < tp->irq_cnt - 1; i++) { 7453 u32 reg; 7454 7455 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; 7456 tw32(reg, ec->rx_coalesce_usecs); 7457 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; 7458 tw32(reg, ec->rx_max_coalesced_frames); 7459 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 7460 tw32(reg, ec->rx_max_coalesced_frames_irq); 7461 7462 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { 7463 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; 7464 tw32(reg, ec->tx_coalesce_usecs); 7465 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; 7466 tw32(reg, ec->tx_max_coalesced_frames); 7467 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; 7468 tw32(reg, ec->tx_max_coalesced_frames_irq); 7469 } 7470 } 7471 7472 for (; i < tp->irq_max - 1; i++) { 7473 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); 7474 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 7475 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 7476 7477 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { 7478 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 7479 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 7480 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 7481 } 7482 } 7483} 7484 7485/* tp->lock is held. */ 7486static void tg3_rings_reset(struct tg3 *tp) 7487{ 7488 int i; 7489 u32 stblk, txrcb, rxrcb, limit; 7490 struct tg3_napi *tnapi = &tp->napi[0]; 7491 7492 /* Disable all transmit rings but the first. */ 7493 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7494 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 7495 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 7496 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 7497 else 7498 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 7499 7500 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 7501 txrcb < limit; txrcb += TG3_BDINFO_SIZE) 7502 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, 7503 BDINFO_FLAGS_DISABLED); 7504 7505 7506 /* Disable all receive return rings but the first. */ 7507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 7508 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 7509 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7510 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 7511 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 7512 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 7513 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; 7514 else 7515 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 7516 7517 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 7518 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) 7519 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, 7520 BDINFO_FLAGS_DISABLED); 7521 7522 /* Disable interrupts */ 7523 tw32_mailbox_f(tp->napi[0].int_mbox, 1); 7524 7525 /* Zero mailbox registers. */ 7526 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) { 7527 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) { 7528 tp->napi[i].tx_prod = 0; 7529 tp->napi[i].tx_cons = 0; 7530 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 7531 tw32_mailbox(tp->napi[i].prodmbox, 0); 7532 tw32_rx_mbox(tp->napi[i].consmbox, 0); 7533 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 7534 } 7535 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) 7536 tw32_mailbox(tp->napi[0].prodmbox, 0); 7537 } else { 7538 tp->napi[0].tx_prod = 0; 7539 tp->napi[0].tx_cons = 0; 7540 tw32_mailbox(tp->napi[0].prodmbox, 0); 7541 tw32_rx_mbox(tp->napi[0].consmbox, 0); 7542 } 7543 7544 /* Make sure the NIC-based send BD rings are disabled. */ 7545 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 7546 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; 7547 for (i = 0; i < 16; i++) 7548 tw32_tx_mbox(mbox + i * 8, 0); 7549 } 7550 7551 txrcb = NIC_SRAM_SEND_RCB; 7552 rxrcb = NIC_SRAM_RCV_RET_RCB; 7553 7554 /* Clear status block in ram. */ 7555 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 7556 7557 /* Set status block DMA address */ 7558 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 7559 ((u64) tnapi->status_mapping >> 32)); 7560 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 7561 ((u64) tnapi->status_mapping & 0xffffffff)); 7562 7563 if (tnapi->tx_ring) { 7564 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, 7565 (TG3_TX_RING_SIZE << 7566 BDINFO_FLAGS_MAXLEN_SHIFT), 7567 NIC_SRAM_TX_BUFFER_DESC); 7568 txrcb += TG3_BDINFO_SIZE; 7569 } 7570 7571 if (tnapi->rx_rcb) { 7572 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 7573 (TG3_RX_RCB_RING_SIZE(tp) << 7574 BDINFO_FLAGS_MAXLEN_SHIFT), 0); 7575 rxrcb += TG3_BDINFO_SIZE; 7576 } 7577 7578 stblk = HOSTCC_STATBLCK_RING1; 7579 7580 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { 7581 u64 mapping = (u64)tnapi->status_mapping; 7582 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); 7583 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); 7584 7585 /* Clear status block in ram. */ 7586 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 7587 7588 if (tnapi->tx_ring) { 7589 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, 7590 (TG3_TX_RING_SIZE << 7591 BDINFO_FLAGS_MAXLEN_SHIFT), 7592 NIC_SRAM_TX_BUFFER_DESC); 7593 txrcb += TG3_BDINFO_SIZE; 7594 } 7595 7596 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 7597 (TG3_RX_RCB_RING_SIZE(tp) << 7598 BDINFO_FLAGS_MAXLEN_SHIFT), 0); 7599 7600 stblk += 8; 7601 rxrcb += TG3_BDINFO_SIZE; 7602 } 7603} 7604 7605/* tp->lock is held. */ 7606static int tg3_reset_hw(struct tg3 *tp, int reset_phy) 7607{ 7608 u32 val, rdmac_mode; 7609 int i, err, limit; 7610 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 7611 7612 tg3_disable_ints(tp); 7613 7614 tg3_stop_fw(tp); 7615 7616 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 7617 7618 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) 7619 tg3_abort_hw(tp, 1); 7620 7621 if (reset_phy) 7622 tg3_phy_reset(tp); 7623 7624 err = tg3_chip_reset(tp); 7625 if (err) 7626 return err; 7627 7628 tg3_write_sig_legacy(tp, RESET_KIND_INIT); 7629 7630 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { 7631 val = tr32(TG3_CPMU_CTRL); 7632 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); 7633 tw32(TG3_CPMU_CTRL, val); 7634 7635 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 7636 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 7637 val |= CPMU_LSPD_10MB_MACCLK_6_25; 7638 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 7639 7640 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); 7641 val &= ~CPMU_LNK_AWARE_MACCLK_MASK; 7642 val |= CPMU_LNK_AWARE_MACCLK_6_25; 7643 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); 7644 7645 val = tr32(TG3_CPMU_HST_ACC); 7646 val &= ~CPMU_HST_ACC_MACCLK_MASK; 7647 val |= CPMU_HST_ACC_MACCLK_6_25; 7648 tw32(TG3_CPMU_HST_ACC, val); 7649 } 7650 7651 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { 7652 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; 7653 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | 7654 PCIE_PWR_MGMT_L1_THRESH_4MS; 7655 tw32(PCIE_PWR_MGMT_THRESH, val); 7656 7657 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; 7658 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 7659 7660 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 7661 7662 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 7663 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 7664 } 7665 7666 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) { 7667 u32 grc_mode = tr32(GRC_MODE); 7668 7669 /* Access the lower 1K of PL PCIE block registers. */ 7670 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 7671 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 7672 7673 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); 7674 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, 7675 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); 7676 7677 tw32(GRC_MODE, grc_mode); 7678 } 7679 7680 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { 7681 u32 grc_mode = tr32(GRC_MODE); 7682 7683 /* Access the lower 1K of PL PCIE block registers. */ 7684 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 7685 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 7686 7687 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5); 7688 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 7689 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 7690 7691 tw32(GRC_MODE, grc_mode); 7692 7693 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 7694 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 7695 val |= CPMU_LSPD_10MB_MACCLK_6_25; 7696 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 7697 } 7698 7699 /* This works around an issue with Athlon chipsets on 7700 * B3 tigon3 silicon. This bit has no effect on any 7701 * other revision. But do not set this on PCI Express 7702 * chips and don't even touch the clocks if the CPMU is present. 7703 */ 7704 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) { 7705 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) 7706 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; 7707 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 7708 } 7709 7710 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && 7711 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { 7712 val = tr32(TG3PCI_PCISTATE); 7713 val |= PCISTATE_RETRY_SAME_DMA; 7714 tw32(TG3PCI_PCISTATE, val); 7715 } 7716 7717 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 7718 /* Allow reads and writes to the 7719 * APE register and memory space. 7720 */ 7721 val = tr32(TG3PCI_PCISTATE); 7722 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 7723 PCISTATE_ALLOW_APE_SHMEM_WR; 7724 tw32(TG3PCI_PCISTATE, val); 7725 } 7726 7727 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) { 7728 /* Enable some hw fixes. */ 7729 val = tr32(TG3PCI_MSI_DATA); 7730 val |= (1 << 26) | (1 << 28) | (1 << 29); 7731 tw32(TG3PCI_MSI_DATA, val); 7732 } 7733 7734 /* Descriptor ring init may make accesses to the 7735 * NIC SRAM area to setup the TX descriptors, so we 7736 * can only do this after the hardware has been 7737 * successfully reset. 7738 */ 7739 err = tg3_init_rings(tp); 7740 if (err) 7741 return err; 7742 7743 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7744 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { 7745 val = tr32(TG3PCI_DMA_RW_CTRL) & 7746 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 7747 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) 7748 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; 7749 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 7750 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && 7751 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { 7752 /* This value is determined during the probe time DMA 7753 * engine test, tg3_test_dma. 7754 */ 7755 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 7756 } 7757 7758 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | 7759 GRC_MODE_4X_NIC_SEND_RINGS | 7760 GRC_MODE_NO_TX_PHDR_CSUM | 7761 GRC_MODE_NO_RX_PHDR_CSUM); 7762 tp->grc_mode |= GRC_MODE_HOST_SENDBDS; 7763 7764 /* Pseudo-header checksum is done by hardware logic and not 7765 * the offload processers, so make the chip do the pseudo- 7766 * header checksums on receive. For transmit it is more 7767 * convenient to do the pseudo-header checksum in software 7768 * as Linux does that on transmit for us in all cases. 7769 */ 7770 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; 7771 7772 tw32(GRC_MODE, 7773 tp->grc_mode | 7774 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP)); 7775 7776 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 7777 val = tr32(GRC_MISC_CFG); 7778 val &= ~0xff; 7779 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); 7780 tw32(GRC_MISC_CFG, val); 7781 7782 /* Initialize MBUF/DESC pool. */ 7783 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 7784 /* Do nothing. */ 7785 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { 7786 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); 7787 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) 7788 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); 7789 else 7790 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 7791 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 7792 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 7793 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { 7794 int fw_len; 7795 7796 fw_len = tp->fw_len; 7797 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); 7798 tw32(BUFMGR_MB_POOL_ADDR, 7799 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); 7800 tw32(BUFMGR_MB_POOL_SIZE, 7801 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 7802 } 7803 7804 if (tp->dev->mtu <= ETH_DATA_LEN) { 7805 tw32(BUFMGR_MB_RDMA_LOW_WATER, 7806 tp->bufmgr_config.mbuf_read_dma_low_water); 7807 tw32(BUFMGR_MB_MACRX_LOW_WATER, 7808 tp->bufmgr_config.mbuf_mac_rx_low_water); 7809 tw32(BUFMGR_MB_HIGH_WATER, 7810 tp->bufmgr_config.mbuf_high_water); 7811 } else { 7812 tw32(BUFMGR_MB_RDMA_LOW_WATER, 7813 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); 7814 tw32(BUFMGR_MB_MACRX_LOW_WATER, 7815 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); 7816 tw32(BUFMGR_MB_HIGH_WATER, 7817 tp->bufmgr_config.mbuf_high_water_jumbo); 7818 } 7819 tw32(BUFMGR_DMA_LOW_WATER, 7820 tp->bufmgr_config.dma_low_water); 7821 tw32(BUFMGR_DMA_HIGH_WATER, 7822 tp->bufmgr_config.dma_high_water); 7823 7824 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE); 7825 for (i = 0; i < 2000; i++) { 7826 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 7827 break; 7828 udelay(10); 7829 } 7830 if (i >= 2000) { 7831 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); 7832 return -ENODEV; 7833 } 7834 7835 /* Setup replenish threshold. */ 7836 val = tp->rx_pending / 8; 7837 if (val == 0) 7838 val = 1; 7839 else if (val > tp->rx_std_max_post) 7840 val = tp->rx_std_max_post; 7841 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 7842 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) 7843 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); 7844 7845 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2)) 7846 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2; 7847 } 7848 7849 tw32(RCVBDI_STD_THRESH, val); 7850 7851 /* Initialize TG3_BDINFO's at: 7852 * RCVDBDI_STD_BD: standard eth size rx ring 7853 * RCVDBDI_JUMBO_BD: jumbo frame rx ring 7854 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) 7855 * 7856 * like so: 7857 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring 7858 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | 7859 * ring attribute flags 7860 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM 7861 * 7862 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. 7863 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. 7864 * 7865 * The size of each ring is fixed in the firmware, but the location is 7866 * configurable. 7867 */ 7868 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 7869 ((u64) tpr->rx_std_mapping >> 32)); 7870 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 7871 ((u64) tpr->rx_std_mapping & 0xffffffff)); 7872 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) 7873 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 7874 NIC_SRAM_RX_BUFFER_DESC); 7875 7876 /* Disable the mini ring */ 7877 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7878 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 7879 BDINFO_FLAGS_DISABLED); 7880 7881 /* Program the jumbo buffer descriptor ring control 7882 * blocks on those devices that have them. 7883 */ 7884 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && 7885 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 7886 /* Setup replenish threshold. */ 7887 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); 7888 7889 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 7890 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 7891 ((u64) tpr->rx_jmb_mapping >> 32)); 7892 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 7893 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); 7894 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7895 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | 7896 BDINFO_FLAGS_USE_EXT_RECV); 7897 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) 7898 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 7899 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 7900 } else { 7901 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7902 BDINFO_FLAGS_DISABLED); 7903 } 7904 7905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 7907 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | 7908 (TG3_RX_STD_DMA_SZ << 2); 7909 else 7910 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 7911 } else 7912 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT; 7913 7914 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 7915 7916 tpr->rx_std_prod_idx = tp->rx_pending; 7917 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); 7918 7919 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? 7920 tp->rx_jumbo_pending : 0; 7921 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 7922 7923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { 7925 tw32(STD_REPLENISH_LWM, 32); 7926 tw32(JMB_REPLENISH_LWM, 16); 7927 } 7928 7929 tg3_rings_reset(tp); 7930 7931 /* Initialize MAC address and backoff seed. */ 7932 __tg3_set_mac_addr(tp, 0); 7933 7934 /* MTU + ethernet header + FCS + optional VLAN tag */ 7935 tw32(MAC_RX_MTU_SIZE, 7936 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 7937 7938 /* The slot time is changed by tg3_setup_phy if we 7939 * run at gigabit with half duplex. 7940 */ 7941 tw32(MAC_TX_LENGTHS, 7942 (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 7943 (6 << TX_LENGTHS_IPG_SHIFT) | 7944 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 7945 7946 /* Receive rules. */ 7947 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 7948 tw32(RCVLPC_CONFIG, 0x0181); 7949 7950 /* Calculate RDMAC_MODE setting early, we need it to determine 7951 * the RCVLPC_STATE_ENABLE mask. 7952 */ 7953 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | 7954 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | 7955 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | 7956 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 7957 RDMAC_MODE_LNGREAD_ENAB); 7958 7959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 7960 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 7961 7962 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 7963 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 7964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 7965 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 7966 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 7967 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 7968 7969 /* If statement applies to 5705 and 5750 PCI devices only */ 7970 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 7971 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || 7972 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) { 7973 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE && 7974 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 7975 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 7976 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 7977 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) { 7978 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 7979 } 7980 } 7981 7982 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) 7983 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 7984 7985 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 7986 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 7987 7988 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || 7989 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 7990 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 7991 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 7992 7993 /* Receive/send statistics. */ 7994 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 7995 val = tr32(RCVLPC_STATS_ENABLE); 7996 val &= ~RCVLPC_STATSENAB_DACK_FIX; 7997 tw32(RCVLPC_STATS_ENABLE, val); 7998 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 7999 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { 8000 val = tr32(RCVLPC_STATS_ENABLE); 8001 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 8002 tw32(RCVLPC_STATS_ENABLE, val); 8003 } else { 8004 tw32(RCVLPC_STATS_ENABLE, 0xffffff); 8005 } 8006 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); 8007 tw32(SNDDATAI_STATSENAB, 0xffffff); 8008 tw32(SNDDATAI_STATSCTRL, 8009 (SNDDATAI_SCTRL_ENABLE | 8010 SNDDATAI_SCTRL_FASTUPD)); 8011 8012 /* Setup host coalescing engine. */ 8013 tw32(HOSTCC_MODE, 0); 8014 for (i = 0; i < 2000; i++) { 8015 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) 8016 break; 8017 udelay(10); 8018 } 8019 8020 __tg3_set_coalesce(tp, &tp->coal); 8021 8022 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 8023 /* Status/statistics block address. See tg3_timer, 8024 * the tg3_periodic_fetch_stats call there, and 8025 * tg3_get_stats to see how this works for 5705/5750 chips. 8026 */ 8027 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 8028 ((u64) tp->stats_mapping >> 32)); 8029 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 8030 ((u64) tp->stats_mapping & 0xffffffff)); 8031 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); 8032 8033 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); 8034 8035 /* Clear statistics and status block memory areas */ 8036 for (i = NIC_SRAM_STATS_BLK; 8037 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; 8038 i += sizeof(u32)) { 8039 tg3_write_mem(tp, i, 0); 8040 udelay(40); 8041 } 8042 } 8043 8044 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); 8045 8046 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); 8047 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); 8048 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 8049 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 8050 8051 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 8052 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 8053 /* reset to prevent losing 1st rx packet intermittently */ 8054 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8055 udelay(10); 8056 } 8057 8058 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 8059 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 8060 else 8061 tp->mac_mode = 0; 8062 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 8063 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; 8064 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 8065 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 8066 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) 8067 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 8068 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 8069 udelay(40); 8070 8071 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). 8072 * If TG3_FLG2_IS_NIC is zero, we should read the 8073 * register to preserve the GPIO settings for LOMs. The GPIOs, 8074 * whether used as inputs or outputs, are set by boot code after 8075 * reset. 8076 */ 8077 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) { 8078 u32 gpio_mask; 8079 8080 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | 8081 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | 8082 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; 8083 8084 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) 8085 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | 8086 GRC_LCLCTRL_GPIO_OUTPUT3; 8087 8088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) 8089 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; 8090 8091 tp->grc_local_ctrl &= ~gpio_mask; 8092 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 8093 8094 /* GPIO1 must be driven high for eeprom write protect */ 8095 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) 8096 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 8097 GRC_LCLCTRL_GPIO_OUTPUT1); 8098 } 8099 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 8100 udelay(100); 8101 8102 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) { 8103 val = tr32(MSGINT_MODE); 8104 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; 8105 tw32(MSGINT_MODE, val); 8106 } 8107 8108 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 8109 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 8110 udelay(40); 8111 } 8112 8113 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | 8114 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | 8115 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | 8116 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 8117 WDMAC_MODE_LNGREAD_ENAB); 8118 8119 /* If statement applies to 5705 and 5750 PCI devices only */ 8120 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 8121 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || 8122 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) { 8123 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 8124 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || 8125 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { 8126 /* nothing */ 8127 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 8128 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) && 8129 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { 8130 val |= WDMAC_MODE_RX_ACCEL; 8131 } 8132 } 8133 8134 /* Enable host coalescing bug fix */ 8135 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 8136 val |= WDMAC_MODE_STATUS_TAG_FIX; 8137 8138 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 8139 val |= WDMAC_MODE_BURST_ALL_DATA; 8140 8141 tw32_f(WDMAC_MODE, val); 8142 udelay(40); 8143 8144 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { 8145 u16 pcix_cmd; 8146 8147 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8148 &pcix_cmd); 8149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { 8150 pcix_cmd &= ~PCI_X_CMD_MAX_READ; 8151 pcix_cmd |= PCI_X_CMD_READ_2K; 8152 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 8153 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); 8154 pcix_cmd |= PCI_X_CMD_READ_2K; 8155 } 8156 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8157 pcix_cmd); 8158 } 8159 8160 tw32_f(RDMAC_MODE, rdmac_mode); 8161 udelay(40); 8162 8163 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); 8164 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 8165 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); 8166 8167 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 8168 tw32(SNDDATAC_MODE, 8169 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); 8170 else 8171 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); 8172 8173 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 8174 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 8175 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ); 8176 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 8177 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 8178 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 8179 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; 8180 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 8181 val |= SNDBDI_MODE_MULTI_TXQ_EN; 8182 tw32(SNDBDI_MODE, val); 8183 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 8184 8185 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { 8186 err = tg3_load_5701_a0_firmware_fix(tp); 8187 if (err) 8188 return err; 8189 } 8190 8191 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { 8192 err = tg3_load_tso_firmware(tp); 8193 if (err) 8194 return err; 8195 } 8196 8197 tp->tx_mode = TX_MODE_ENABLE; 8198 tw32_f(MAC_TX_MODE, tp->tx_mode); 8199 udelay(100); 8200 8201 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) { 8202 u32 reg = MAC_RSS_INDIR_TBL_0; 8203 u8 *ent = (u8 *)&val; 8204 8205 /* Setup the indirection table */ 8206 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { 8207 int idx = i % sizeof(val); 8208 8209 ent[idx] = i % (tp->irq_cnt - 1); 8210 if (idx == sizeof(val) - 1) { 8211 tw32(reg, val); 8212 reg += 4; 8213 } 8214 } 8215 8216 /* Setup the "secret" hash key. */ 8217 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); 8218 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc); 8219 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45); 8220 tw32(MAC_RSS_HASH_KEY_3, 0x36621985); 8221 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8); 8222 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e); 8223 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556); 8224 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe); 8225 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7); 8226 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481); 8227 } 8228 8229 tp->rx_mode = RX_MODE_ENABLE; 8230 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 8231 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 8232 8233 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) 8234 tp->rx_mode |= RX_MODE_RSS_ENABLE | 8235 RX_MODE_RSS_ITBL_HASH_BITS_7 | 8236 RX_MODE_RSS_IPV6_HASH_EN | 8237 RX_MODE_RSS_TCP_IPV6_HASH_EN | 8238 RX_MODE_RSS_IPV4_HASH_EN | 8239 RX_MODE_RSS_TCP_IPV4_HASH_EN; 8240 8241 tw32_f(MAC_RX_MODE, tp->rx_mode); 8242 udelay(10); 8243 8244 tw32(MAC_LED_CTRL, tp->led_ctrl); 8245 8246 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 8247 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 8248 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8249 udelay(10); 8250 } 8251 tw32_f(MAC_RX_MODE, tp->rx_mode); 8252 udelay(10); 8253 8254 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 8255 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && 8256 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) { 8257 /* Set drive transmission level to 1.2V */ 8258 /* only if the signal pre-emphasis bit is not set */ 8259 val = tr32(MAC_SERDES_CFG); 8260 val &= 0xfffff000; 8261 val |= 0x880; 8262 tw32(MAC_SERDES_CFG, val); 8263 } 8264 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) 8265 tw32(MAC_SERDES_CFG, 0x616000); 8266 } 8267 8268 /* Prevent chip from dropping frames when flow control 8269 * is enabled. 8270 */ 8271 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 8272 val = 1; 8273 else 8274 val = 2; 8275 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); 8276 8277 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && 8278 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 8279 /* Use hardware link auto-negotiation */ 8280 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; 8281 } 8282 8283 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && 8284 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { 8285 u32 tmp; 8286 8287 tmp = tr32(SERDES_RX_CTRL); 8288 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); 8289 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; 8290 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; 8291 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 8292 } 8293 8294 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { 8295 if (tp->link_config.phy_is_low_power) { 8296 tp->link_config.phy_is_low_power = 0; 8297 tp->link_config.speed = tp->link_config.orig_speed; 8298 tp->link_config.duplex = tp->link_config.orig_duplex; 8299 tp->link_config.autoneg = tp->link_config.orig_autoneg; 8300 } 8301 8302 err = tg3_setup_phy(tp, 0); 8303 if (err) 8304 return err; 8305 8306 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 8307 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) { 8308 u32 tmp; 8309 8310 /* Clear CRC stats. */ 8311 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 8312 tg3_writephy(tp, MII_TG3_TEST1, 8313 tmp | MII_TG3_TEST1_CRC_EN); 8314 tg3_readphy(tp, 0x14, &tmp); 8315 } 8316 } 8317 } 8318 8319 __tg3_set_rx_mode(tp->dev); 8320 8321 /* Initialize receive rules. */ 8322 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); 8323 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); 8324 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); 8325 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 8326 8327 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 8328 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 8329 limit = 8; 8330 else 8331 limit = 16; 8332 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) 8333 limit -= 4; 8334 switch (limit) { 8335 case 16: 8336 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); 8337 case 15: 8338 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); 8339 case 14: 8340 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); 8341 case 13: 8342 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); 8343 case 12: 8344 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); 8345 case 11: 8346 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); 8347 case 10: 8348 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); 8349 case 9: 8350 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); 8351 case 8: 8352 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); 8353 case 7: 8354 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); 8355 case 6: 8356 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); 8357 case 5: 8358 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); 8359 case 4: 8360 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ 8361 case 3: 8362 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ 8363 case 2: 8364 case 1: 8365 8366 default: 8367 break; 8368 } 8369 8370 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 8371 /* Write our heartbeat update interval to APE. */ 8372 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 8373 APE_HOST_HEARTBEAT_INT_DISABLE); 8374 8375 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 8376 8377 return 0; 8378} 8379 8380/* Called at device open time to get the chip ready for 8381 * packet processing. Invoked with tp->lock held. 8382 */ 8383static int tg3_init_hw(struct tg3 *tp, int reset_phy) 8384{ 8385 tg3_switch_clocks(tp); 8386 8387 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 8388 8389 return tg3_reset_hw(tp, reset_phy); 8390} 8391 8392#define TG3_STAT_ADD32(PSTAT, REG) \ 8393do { u32 __val = tr32(REG); \ 8394 (PSTAT)->low += __val; \ 8395 if ((PSTAT)->low < __val) \ 8396 (PSTAT)->high += 1; \ 8397} while (0) 8398 8399static void tg3_periodic_fetch_stats(struct tg3 *tp) 8400{ 8401 struct tg3_hw_stats *sp = tp->hw_stats; 8402 8403 if (!netif_carrier_ok(tp->dev)) 8404 return; 8405 8406 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); 8407 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); 8408 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); 8409 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); 8410 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); 8411 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); 8412 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); 8413 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); 8414 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); 8415 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); 8416 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); 8417 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); 8418 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); 8419 8420 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); 8421 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); 8422 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); 8423 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); 8424 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); 8425 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); 8426 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); 8427 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); 8428 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); 8429 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); 8430 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); 8431 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 8432 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 8433 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 8434 8435 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 8436 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 8437 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 8438} 8439 8440static void tg3_timer(unsigned long __opaque) 8441{ 8442 struct tg3 *tp = (struct tg3 *) __opaque; 8443 8444 if (tp->irq_sync) 8445 goto restart_timer; 8446 8447 spin_lock(&tp->lock); 8448 8449 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { 8450 /* All of this garbage is because when using non-tagged 8451 * IRQ status the mailbox/status_block protocol the chip 8452 * uses with the cpu is race prone. 8453 */ 8454 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { 8455 tw32(GRC_LOCAL_CTRL, 8456 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 8457 } else { 8458 tw32(HOSTCC_MODE, tp->coalesce_mode | 8459 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); 8460 } 8461 8462 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 8463 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; 8464 spin_unlock(&tp->lock); 8465 schedule_work(&tp->reset_task); 8466 return; 8467 } 8468 } 8469 8470 /* This part only runs once per second. */ 8471 if (!--tp->timer_counter) { 8472 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 8473 tg3_periodic_fetch_stats(tp); 8474 8475 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 8476 u32 mac_stat; 8477 int phy_event; 8478 8479 mac_stat = tr32(MAC_STATUS); 8480 8481 phy_event = 0; 8482 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) { 8483 if (mac_stat & MAC_STATUS_MI_INTERRUPT) 8484 phy_event = 1; 8485 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) 8486 phy_event = 1; 8487 8488 if (phy_event) 8489 tg3_setup_phy(tp, 0); 8490 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) { 8491 u32 mac_stat = tr32(MAC_STATUS); 8492 int need_setup = 0; 8493 8494 if (netif_carrier_ok(tp->dev) && 8495 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 8496 need_setup = 1; 8497 } 8498 if (! netif_carrier_ok(tp->dev) && 8499 (mac_stat & (MAC_STATUS_PCS_SYNCED | 8500 MAC_STATUS_SIGNAL_DET))) { 8501 need_setup = 1; 8502 } 8503 if (need_setup) { 8504 if (!tp->serdes_counter) { 8505 tw32_f(MAC_MODE, 8506 (tp->mac_mode & 8507 ~MAC_MODE_PORT_MODE_MASK)); 8508 udelay(40); 8509 tw32_f(MAC_MODE, tp->mac_mode); 8510 udelay(40); 8511 } 8512 tg3_setup_phy(tp, 0); 8513 } 8514 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 8515 tg3_serdes_parallel_detect(tp); 8516 8517 tp->timer_counter = tp->timer_multiplier; 8518 } 8519 8520 /* Heartbeat is only sent once every 2 seconds. 8521 * 8522 * The heartbeat is to tell the ASF firmware that the host 8523 * driver is still alive. In the event that the OS crashes, 8524 * ASF needs to reset the hardware to free up the FIFO space 8525 * that may be filled with rx packets destined for the host. 8526 * If the FIFO is full, ASF will no longer function properly. 8527 * 8528 * Unintended resets have been reported on real time kernels 8529 * where the timer doesn't run on time. Netpoll will also have 8530 * same problem. 8531 * 8532 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware 8533 * to check the ring condition when the heartbeat is expiring 8534 * before doing the reset. This will prevent most unintended 8535 * resets. 8536 */ 8537 if (!--tp->asf_counter) { 8538 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && 8539 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { 8540 tg3_wait_for_event_ack(tp); 8541 8542 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 8543 FWCMD_NICDRV_ALIVE3); 8544 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 8545 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 8546 TG3_FW_UPDATE_TIMEOUT_SEC); 8547 8548 tg3_generate_fw_event(tp); 8549 } 8550 tp->asf_counter = tp->asf_multiplier; 8551 } 8552 8553 spin_unlock(&tp->lock); 8554 8555restart_timer: 8556 tp->timer.expires = jiffies + tp->timer_offset; 8557 add_timer(&tp->timer); 8558} 8559 8560static int tg3_request_irq(struct tg3 *tp, int irq_num) 8561{ 8562 irq_handler_t fn; 8563 unsigned long flags; 8564 char *name; 8565 struct tg3_napi *tnapi = &tp->napi[irq_num]; 8566 8567 if (tp->irq_cnt == 1) 8568 name = tp->dev->name; 8569 else { 8570 name = &tnapi->irq_lbl[0]; 8571 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); 8572 name[IFNAMSIZ-1] = 0; 8573 } 8574 8575 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { 8576 fn = tg3_msi; 8577 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) 8578 fn = tg3_msi_1shot; 8579 flags = IRQF_SAMPLE_RANDOM; 8580 } else { 8581 fn = tg3_interrupt; 8582 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) 8583 fn = tg3_interrupt_tagged; 8584 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; 8585 } 8586 8587 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); 8588} 8589 8590static int tg3_test_interrupt(struct tg3 *tp) 8591{ 8592 struct tg3_napi *tnapi = &tp->napi[0]; 8593 struct net_device *dev = tp->dev; 8594 int err, i, intr_ok = 0; 8595 u32 val; 8596 8597 if (!netif_running(dev)) 8598 return -ENODEV; 8599 8600 tg3_disable_ints(tp); 8601 8602 free_irq(tnapi->irq_vec, tnapi); 8603 8604 /* 8605 * Turn off MSI one shot mode. Otherwise this test has no 8606 * observable way to know whether the interrupt was delivered. 8607 */ 8608 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 8609 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && 8610 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { 8611 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 8612 tw32(MSGINT_MODE, val); 8613 } 8614 8615 err = request_irq(tnapi->irq_vec, tg3_test_isr, 8616 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi); 8617 if (err) 8618 return err; 8619 8620 tnapi->hw_status->status &= ~SD_STATUS_UPDATED; 8621 tg3_enable_ints(tp); 8622 8623 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 8624 tnapi->coal_now); 8625 8626 for (i = 0; i < 5; i++) { 8627 u32 int_mbox, misc_host_ctrl; 8628 8629 int_mbox = tr32_mailbox(tnapi->int_mbox); 8630 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 8631 8632 if ((int_mbox != 0) || 8633 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { 8634 intr_ok = 1; 8635 break; 8636 } 8637 8638 msleep(10); 8639 } 8640 8641 tg3_disable_ints(tp); 8642 8643 free_irq(tnapi->irq_vec, tnapi); 8644 8645 err = tg3_request_irq(tp, 0); 8646 8647 if (err) 8648 return err; 8649 8650 if (intr_ok) { 8651 /* Reenable MSI one shot mode. */ 8652 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 8653 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && 8654 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { 8655 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 8656 tw32(MSGINT_MODE, val); 8657 } 8658 return 0; 8659 } 8660 8661 return -EIO; 8662} 8663 8664/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is 8665 * successfully restored 8666 */ 8667static int tg3_test_msi(struct tg3 *tp) 8668{ 8669 int err; 8670 u16 pci_cmd; 8671 8672 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI)) 8673 return 0; 8674 8675 /* Turn off SERR reporting in case MSI terminates with Master 8676 * Abort. 8677 */ 8678 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 8679 pci_write_config_word(tp->pdev, PCI_COMMAND, 8680 pci_cmd & ~PCI_COMMAND_SERR); 8681 8682 err = tg3_test_interrupt(tp); 8683 8684 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 8685 8686 if (!err) 8687 return 0; 8688 8689 /* other failures */ 8690 if (err != -EIO) 8691 return err; 8692 8693 /* MSI test failed, go back to INTx mode */ 8694 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " 8695 "to INTx mode. Please report this failure to the PCI " 8696 "maintainer and include system chipset information\n"); 8697 8698 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 8699 8700 pci_disable_msi(tp->pdev); 8701 8702 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 8703 tp->napi[0].irq_vec = tp->pdev->irq; 8704 8705 err = tg3_request_irq(tp, 0); 8706 if (err) 8707 return err; 8708 8709 /* Need to reset the chip because the MSI cycle may have terminated 8710 * with Master Abort. 8711 */ 8712 tg3_full_lock(tp, 1); 8713 8714 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8715 err = tg3_init_hw(tp, 1); 8716 8717 tg3_full_unlock(tp); 8718 8719 if (err) 8720 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 8721 8722 return err; 8723} 8724 8725static int tg3_request_firmware(struct tg3 *tp) 8726{ 8727 const __be32 *fw_data; 8728 8729 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 8730 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 8731 tp->fw_needed); 8732 return -ENOENT; 8733 } 8734 8735 fw_data = (void *)tp->fw->data; 8736 8737 /* Firmware blob starts with version numbers, followed by 8738 * start address and _full_ length including BSS sections 8739 * (which must be longer than the actual data, of course 8740 */ 8741 8742 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ 8743 if (tp->fw_len < (tp->fw->size - 12)) { 8744 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", 8745 tp->fw_len, tp->fw_needed); 8746 release_firmware(tp->fw); 8747 tp->fw = NULL; 8748 return -EINVAL; 8749 } 8750 8751 /* We no longer need firmware; we have it. */ 8752 tp->fw_needed = NULL; 8753 return 0; 8754} 8755 8756static bool tg3_enable_msix(struct tg3 *tp) 8757{ 8758 int i, rc, cpus = num_online_cpus(); 8759 struct msix_entry msix_ent[tp->irq_max]; 8760 8761 if (cpus == 1) 8762 /* Just fallback to the simpler MSI mode. */ 8763 return false; 8764 8765 /* 8766 * We want as many rx rings enabled as there are cpus. 8767 * The first MSIX vector only deals with link interrupts, etc, 8768 * so we add one to the number of vectors we are requesting. 8769 */ 8770 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max); 8771 8772 for (i = 0; i < tp->irq_max; i++) { 8773 msix_ent[i].entry = i; 8774 msix_ent[i].vector = 0; 8775 } 8776 8777 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); 8778 if (rc != 0) { 8779 if (rc < TG3_RSS_MIN_NUM_MSIX_VECS) 8780 return false; 8781 if (pci_enable_msix(tp->pdev, msix_ent, rc)) 8782 return false; 8783 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 8784 tp->irq_cnt, rc); 8785 tp->irq_cnt = rc; 8786 } 8787 8788 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; 8789 8790 for (i = 0; i < tp->irq_max; i++) 8791 tp->napi[i].irq_vec = msix_ent[i].vector; 8792 8793 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 8794 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS; 8795 tp->dev->real_num_tx_queues = tp->irq_cnt - 1; 8796 } else 8797 tp->dev->real_num_tx_queues = 1; 8798 8799 return true; 8800} 8801 8802static void tg3_ints_init(struct tg3 *tp) 8803{ 8804 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) && 8805 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { 8806 /* All MSI supporting chips should support tagged 8807 * status. Assert that this is the case. 8808 */ 8809 netdev_warn(tp->dev, 8810 "MSI without TAGGED_STATUS? Not using MSI\n"); 8811 goto defcfg; 8812 } 8813 8814 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp)) 8815 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX; 8816 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) && 8817 pci_enable_msi(tp->pdev) == 0) 8818 tp->tg3_flags2 |= TG3_FLG2_USING_MSI; 8819 8820 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { 8821 u32 msi_mode = tr32(MSGINT_MODE); 8822 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) 8823 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 8824 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 8825 } 8826defcfg: 8827 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { 8828 tp->irq_cnt = 1; 8829 tp->napi[0].irq_vec = tp->pdev->irq; 8830 tp->dev->real_num_tx_queues = 1; 8831 } 8832} 8833 8834static void tg3_ints_fini(struct tg3 *tp) 8835{ 8836 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) 8837 pci_disable_msix(tp->pdev); 8838 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 8839 pci_disable_msi(tp->pdev); 8840 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX; 8841 tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS; 8842} 8843 8844static int tg3_open(struct net_device *dev) 8845{ 8846 struct tg3 *tp = netdev_priv(dev); 8847 int i, err; 8848 8849 if (tp->fw_needed) { 8850 err = tg3_request_firmware(tp); 8851 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { 8852 if (err) 8853 return err; 8854 } else if (err) { 8855 netdev_warn(tp->dev, "TSO capability disabled\n"); 8856 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 8857 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { 8858 netdev_notice(tp->dev, "TSO capability restored\n"); 8859 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 8860 } 8861 } 8862 8863 netif_carrier_off(tp->dev); 8864 8865 err = tg3_set_power_state(tp, PCI_D0); 8866 if (err) 8867 return err; 8868 8869 tg3_full_lock(tp, 0); 8870 8871 tg3_disable_ints(tp); 8872 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 8873 8874 tg3_full_unlock(tp); 8875 8876 /* 8877 * Setup interrupts first so we know how 8878 * many NAPI resources to allocate 8879 */ 8880 tg3_ints_init(tp); 8881 8882 /* The placement of this call is tied 8883 * to the setup and use of Host TX descriptors. 8884 */ 8885 err = tg3_alloc_consistent(tp); 8886 if (err) 8887 goto err_out1; 8888 8889 tg3_napi_enable(tp); 8890 8891 for (i = 0; i < tp->irq_cnt; i++) { 8892 struct tg3_napi *tnapi = &tp->napi[i]; 8893 err = tg3_request_irq(tp, i); 8894 if (err) { 8895 for (i--; i >= 0; i--) 8896 free_irq(tnapi->irq_vec, tnapi); 8897 break; 8898 } 8899 } 8900 8901 if (err) 8902 goto err_out2; 8903 8904 tg3_full_lock(tp, 0); 8905 8906 err = tg3_init_hw(tp, 1); 8907 if (err) { 8908 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8909 tg3_free_rings(tp); 8910 } else { 8911 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) 8912 tp->timer_offset = HZ; 8913 else 8914 tp->timer_offset = HZ / 10; 8915 8916 BUG_ON(tp->timer_offset > HZ); 8917 tp->timer_counter = tp->timer_multiplier = 8918 (HZ / tp->timer_offset); 8919 tp->asf_counter = tp->asf_multiplier = 8920 ((HZ / tp->timer_offset) * 2); 8921 8922 init_timer(&tp->timer); 8923 tp->timer.expires = jiffies + tp->timer_offset; 8924 tp->timer.data = (unsigned long) tp; 8925 tp->timer.function = tg3_timer; 8926 } 8927 8928 tg3_full_unlock(tp); 8929 8930 if (err) 8931 goto err_out3; 8932 8933 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 8934 err = tg3_test_msi(tp); 8935 8936 if (err) { 8937 tg3_full_lock(tp, 0); 8938 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8939 tg3_free_rings(tp); 8940 tg3_full_unlock(tp); 8941 8942 goto err_out2; 8943 } 8944 8945 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 8946 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && 8947 (tp->tg3_flags2 & TG3_FLG2_USING_MSI) && 8948 (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) { 8949 u32 val = tr32(PCIE_TRANSACTION_CFG); 8950 8951 tw32(PCIE_TRANSACTION_CFG, 8952 val | PCIE_TRANS_CFG_1SHOT_MSI); 8953 } 8954 } 8955 8956 tg3_phy_start(tp); 8957 8958 tg3_full_lock(tp, 0); 8959 8960 add_timer(&tp->timer); 8961 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 8962 tg3_enable_ints(tp); 8963 8964 tg3_full_unlock(tp); 8965 8966 netif_tx_start_all_queues(dev); 8967 8968 return 0; 8969 8970err_out3: 8971 for (i = tp->irq_cnt - 1; i >= 0; i--) { 8972 struct tg3_napi *tnapi = &tp->napi[i]; 8973 free_irq(tnapi->irq_vec, tnapi); 8974 } 8975 8976err_out2: 8977 tg3_napi_disable(tp); 8978 tg3_free_consistent(tp); 8979 8980err_out1: 8981 tg3_ints_fini(tp); 8982 return err; 8983} 8984 8985static struct net_device_stats *tg3_get_stats(struct net_device *); 8986static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); 8987 8988static int tg3_close(struct net_device *dev) 8989{ 8990 int i; 8991 struct tg3 *tp = netdev_priv(dev); 8992 8993 tg3_napi_disable(tp); 8994 cancel_work_sync(&tp->reset_task); 8995 8996 netif_tx_stop_all_queues(dev); 8997 8998 del_timer_sync(&tp->timer); 8999 9000 tg3_phy_stop(tp); 9001 9002 tg3_full_lock(tp, 1); 9003 9004 tg3_disable_ints(tp); 9005 9006 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 9007 tg3_free_rings(tp); 9008 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 9009 9010 tg3_full_unlock(tp); 9011 9012 for (i = tp->irq_cnt - 1; i >= 0; i--) { 9013 struct tg3_napi *tnapi = &tp->napi[i]; 9014 free_irq(tnapi->irq_vec, tnapi); 9015 } 9016 9017 tg3_ints_fini(tp); 9018 9019 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev), 9020 sizeof(tp->net_stats_prev)); 9021 memcpy(&tp->estats_prev, tg3_get_estats(tp), 9022 sizeof(tp->estats_prev)); 9023 9024 tg3_free_consistent(tp); 9025 9026 tg3_set_power_state(tp, PCI_D3hot); 9027 9028 netif_carrier_off(tp->dev); 9029 9030 return 0; 9031} 9032 9033static inline unsigned long get_stat64(tg3_stat64_t *val) 9034{ 9035 unsigned long ret; 9036 9037#if (BITS_PER_LONG == 32) 9038 ret = val->low; 9039#else 9040 ret = ((u64)val->high << 32) | ((u64)val->low); 9041#endif 9042 return ret; 9043} 9044 9045static inline u64 get_estat64(tg3_stat64_t *val) 9046{ 9047 return ((u64)val->high << 32) | ((u64)val->low); 9048} 9049 9050static unsigned long calc_crc_errors(struct tg3 *tp) 9051{ 9052 struct tg3_hw_stats *hw_stats = tp->hw_stats; 9053 9054 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 9055 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 9056 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { 9057 u32 val; 9058 9059 spin_lock_bh(&tp->lock); 9060 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { 9061 tg3_writephy(tp, MII_TG3_TEST1, 9062 val | MII_TG3_TEST1_CRC_EN); 9063 tg3_readphy(tp, 0x14, &val); 9064 } else 9065 val = 0; 9066 spin_unlock_bh(&tp->lock); 9067 9068 tp->phy_crc_errors += val; 9069 9070 return tp->phy_crc_errors; 9071 } 9072 9073 return get_stat64(&hw_stats->rx_fcs_errors); 9074} 9075 9076#define ESTAT_ADD(member) \ 9077 estats->member = old_estats->member + \ 9078 get_estat64(&hw_stats->member) 9079 9080static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) 9081{ 9082 struct tg3_ethtool_stats *estats = &tp->estats; 9083 struct tg3_ethtool_stats *old_estats = &tp->estats_prev; 9084 struct tg3_hw_stats *hw_stats = tp->hw_stats; 9085 9086 if (!hw_stats) 9087 return old_estats; 9088 9089 ESTAT_ADD(rx_octets); 9090 ESTAT_ADD(rx_fragments); 9091 ESTAT_ADD(rx_ucast_packets); 9092 ESTAT_ADD(rx_mcast_packets); 9093 ESTAT_ADD(rx_bcast_packets); 9094 ESTAT_ADD(rx_fcs_errors); 9095 ESTAT_ADD(rx_align_errors); 9096 ESTAT_ADD(rx_xon_pause_rcvd); 9097 ESTAT_ADD(rx_xoff_pause_rcvd); 9098 ESTAT_ADD(rx_mac_ctrl_rcvd); 9099 ESTAT_ADD(rx_xoff_entered); 9100 ESTAT_ADD(rx_frame_too_long_errors); 9101 ESTAT_ADD(rx_jabbers); 9102 ESTAT_ADD(rx_undersize_packets); 9103 ESTAT_ADD(rx_in_length_errors); 9104 ESTAT_ADD(rx_out_length_errors); 9105 ESTAT_ADD(rx_64_or_less_octet_packets); 9106 ESTAT_ADD(rx_65_to_127_octet_packets); 9107 ESTAT_ADD(rx_128_to_255_octet_packets); 9108 ESTAT_ADD(rx_256_to_511_octet_packets); 9109 ESTAT_ADD(rx_512_to_1023_octet_packets); 9110 ESTAT_ADD(rx_1024_to_1522_octet_packets); 9111 ESTAT_ADD(rx_1523_to_2047_octet_packets); 9112 ESTAT_ADD(rx_2048_to_4095_octet_packets); 9113 ESTAT_ADD(rx_4096_to_8191_octet_packets); 9114 ESTAT_ADD(rx_8192_to_9022_octet_packets); 9115 9116 ESTAT_ADD(tx_octets); 9117 ESTAT_ADD(tx_collisions); 9118 ESTAT_ADD(tx_xon_sent); 9119 ESTAT_ADD(tx_xoff_sent); 9120 ESTAT_ADD(tx_flow_control); 9121 ESTAT_ADD(tx_mac_errors); 9122 ESTAT_ADD(tx_single_collisions); 9123 ESTAT_ADD(tx_mult_collisions); 9124 ESTAT_ADD(tx_deferred); 9125 ESTAT_ADD(tx_excessive_collisions); 9126 ESTAT_ADD(tx_late_collisions); 9127 ESTAT_ADD(tx_collide_2times); 9128 ESTAT_ADD(tx_collide_3times); 9129 ESTAT_ADD(tx_collide_4times); 9130 ESTAT_ADD(tx_collide_5times); 9131 ESTAT_ADD(tx_collide_6times); 9132 ESTAT_ADD(tx_collide_7times); 9133 ESTAT_ADD(tx_collide_8times); 9134 ESTAT_ADD(tx_collide_9times); 9135 ESTAT_ADD(tx_collide_10times); 9136 ESTAT_ADD(tx_collide_11times); 9137 ESTAT_ADD(tx_collide_12times); 9138 ESTAT_ADD(tx_collide_13times); 9139 ESTAT_ADD(tx_collide_14times); 9140 ESTAT_ADD(tx_collide_15times); 9141 ESTAT_ADD(tx_ucast_packets); 9142 ESTAT_ADD(tx_mcast_packets); 9143 ESTAT_ADD(tx_bcast_packets); 9144 ESTAT_ADD(tx_carrier_sense_errors); 9145 ESTAT_ADD(tx_discards); 9146 ESTAT_ADD(tx_errors); 9147 9148 ESTAT_ADD(dma_writeq_full); 9149 ESTAT_ADD(dma_write_prioq_full); 9150 ESTAT_ADD(rxbds_empty); 9151 ESTAT_ADD(rx_discards); 9152 ESTAT_ADD(rx_errors); 9153 ESTAT_ADD(rx_threshold_hit); 9154 9155 ESTAT_ADD(dma_readq_full); 9156 ESTAT_ADD(dma_read_prioq_full); 9157 ESTAT_ADD(tx_comp_queue_full); 9158 9159 ESTAT_ADD(ring_set_send_prod_index); 9160 ESTAT_ADD(ring_status_update); 9161 ESTAT_ADD(nic_irqs); 9162 ESTAT_ADD(nic_avoided_irqs); 9163 ESTAT_ADD(nic_tx_threshold_hit); 9164 9165 return estats; 9166} 9167 9168static struct net_device_stats *tg3_get_stats(struct net_device *dev) 9169{ 9170 struct tg3 *tp = netdev_priv(dev); 9171 struct net_device_stats *stats = &tp->net_stats; 9172 struct net_device_stats *old_stats = &tp->net_stats_prev; 9173 struct tg3_hw_stats *hw_stats = tp->hw_stats; 9174 9175 if (!hw_stats) 9176 return old_stats; 9177 9178 stats->rx_packets = old_stats->rx_packets + 9179 get_stat64(&hw_stats->rx_ucast_packets) + 9180 get_stat64(&hw_stats->rx_mcast_packets) + 9181 get_stat64(&hw_stats->rx_bcast_packets); 9182 9183 stats->tx_packets = old_stats->tx_packets + 9184 get_stat64(&hw_stats->tx_ucast_packets) + 9185 get_stat64(&hw_stats->tx_mcast_packets) + 9186 get_stat64(&hw_stats->tx_bcast_packets); 9187 9188 stats->rx_bytes = old_stats->rx_bytes + 9189 get_stat64(&hw_stats->rx_octets); 9190 stats->tx_bytes = old_stats->tx_bytes + 9191 get_stat64(&hw_stats->tx_octets); 9192 9193 stats->rx_errors = old_stats->rx_errors + 9194 get_stat64(&hw_stats->rx_errors); 9195 stats->tx_errors = old_stats->tx_errors + 9196 get_stat64(&hw_stats->tx_errors) + 9197 get_stat64(&hw_stats->tx_mac_errors) + 9198 get_stat64(&hw_stats->tx_carrier_sense_errors) + 9199 get_stat64(&hw_stats->tx_discards); 9200 9201 stats->multicast = old_stats->multicast + 9202 get_stat64(&hw_stats->rx_mcast_packets); 9203 stats->collisions = old_stats->collisions + 9204 get_stat64(&hw_stats->tx_collisions); 9205 9206 stats->rx_length_errors = old_stats->rx_length_errors + 9207 get_stat64(&hw_stats->rx_frame_too_long_errors) + 9208 get_stat64(&hw_stats->rx_undersize_packets); 9209 9210 stats->rx_over_errors = old_stats->rx_over_errors + 9211 get_stat64(&hw_stats->rxbds_empty); 9212 stats->rx_frame_errors = old_stats->rx_frame_errors + 9213 get_stat64(&hw_stats->rx_align_errors); 9214 stats->tx_aborted_errors = old_stats->tx_aborted_errors + 9215 get_stat64(&hw_stats->tx_discards); 9216 stats->tx_carrier_errors = old_stats->tx_carrier_errors + 9217 get_stat64(&hw_stats->tx_carrier_sense_errors); 9218 9219 stats->rx_crc_errors = old_stats->rx_crc_errors + 9220 calc_crc_errors(tp); 9221 9222 stats->rx_missed_errors = old_stats->rx_missed_errors + 9223 get_stat64(&hw_stats->rx_discards); 9224 9225 return stats; 9226} 9227 9228static inline u32 calc_crc(unsigned char *buf, int len) 9229{ 9230 u32 reg; 9231 u32 tmp; 9232 int j, k; 9233 9234 reg = 0xffffffff; 9235 9236 for (j = 0; j < len; j++) { 9237 reg ^= buf[j]; 9238 9239 for (k = 0; k < 8; k++) { 9240 tmp = reg & 0x01; 9241 9242 reg >>= 1; 9243 9244 if (tmp) 9245 reg ^= 0xedb88320; 9246 } 9247 } 9248 9249 return ~reg; 9250} 9251 9252static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) 9253{ 9254 /* accept or reject all multicast frames */ 9255 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); 9256 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); 9257 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); 9258 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); 9259} 9260 9261static void __tg3_set_rx_mode(struct net_device *dev) 9262{ 9263 struct tg3 *tp = netdev_priv(dev); 9264 u32 rx_mode; 9265 9266 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9267 RX_MODE_KEEP_VLAN_TAG); 9268 9269 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9270 * flag clear. 9271 */ 9272#if TG3_VLAN_TAG_USED 9273 if (!tp->vlgrp && 9274 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 9275 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9276#else 9277 /* By definition, VLAN is disabled always in this 9278 * case. 9279 */ 9280 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 9281 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9282#endif 9283 9284 if (dev->flags & IFF_PROMISC) { 9285 /* Promiscuous mode. */ 9286 rx_mode |= RX_MODE_PROMISC; 9287 } else if (dev->flags & IFF_ALLMULTI) { 9288 /* Accept all multicast. */ 9289 tg3_set_multi(tp, 1); 9290 } else if (netdev_mc_empty(dev)) { 9291 /* Reject all multicast. */ 9292 tg3_set_multi(tp, 0); 9293 } else { 9294 /* Accept one or more multicast(s). */ 9295 struct netdev_hw_addr *ha; 9296 u32 mc_filter[4] = { 0, }; 9297 u32 regidx; 9298 u32 bit; 9299 u32 crc; 9300 9301 netdev_for_each_mc_addr(ha, dev) { 9302 crc = calc_crc(ha->addr, ETH_ALEN); 9303 bit = ~crc & 0x7f; 9304 regidx = (bit & 0x60) >> 5; 9305 bit &= 0x1f; 9306 mc_filter[regidx] |= (1 << bit); 9307 } 9308 9309 tw32(MAC_HASH_REG_0, mc_filter[0]); 9310 tw32(MAC_HASH_REG_1, mc_filter[1]); 9311 tw32(MAC_HASH_REG_2, mc_filter[2]); 9312 tw32(MAC_HASH_REG_3, mc_filter[3]); 9313 } 9314 9315 if (rx_mode != tp->rx_mode) { 9316 tp->rx_mode = rx_mode; 9317 tw32_f(MAC_RX_MODE, rx_mode); 9318 udelay(10); 9319 } 9320} 9321 9322static void tg3_set_rx_mode(struct net_device *dev) 9323{ 9324 struct tg3 *tp = netdev_priv(dev); 9325 9326 if (!netif_running(dev)) 9327 return; 9328 9329 tg3_full_lock(tp, 0); 9330 __tg3_set_rx_mode(dev); 9331 tg3_full_unlock(tp); 9332} 9333 9334#define TG3_REGDUMP_LEN (32 * 1024) 9335 9336static int tg3_get_regs_len(struct net_device *dev) 9337{ 9338 return TG3_REGDUMP_LEN; 9339} 9340 9341static void tg3_get_regs(struct net_device *dev, 9342 struct ethtool_regs *regs, void *_p) 9343{ 9344 u32 *p = _p; 9345 struct tg3 *tp = netdev_priv(dev); 9346 u8 *orig_p = _p; 9347 int i; 9348 9349 regs->version = 0; 9350 9351 memset(p, 0, TG3_REGDUMP_LEN); 9352 9353 if (tp->link_config.phy_is_low_power) 9354 return; 9355 9356 tg3_full_lock(tp, 0); 9357 9358#define __GET_REG32(reg) (*(p)++ = tr32(reg)) 9359#define GET_REG32_LOOP(base,len) \ 9360do { p = (u32 *)(orig_p + (base)); \ 9361 for (i = 0; i < len; i += 4) \ 9362 __GET_REG32((base) + i); \ 9363} while (0) 9364#define GET_REG32_1(reg) \ 9365do { p = (u32 *)(orig_p + (reg)); \ 9366 __GET_REG32((reg)); \ 9367} while (0) 9368 9369 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0); 9370 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200); 9371 GET_REG32_LOOP(MAC_MODE, 0x4f0); 9372 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0); 9373 GET_REG32_1(SNDDATAC_MODE); 9374 GET_REG32_LOOP(SNDBDS_MODE, 0x80); 9375 GET_REG32_LOOP(SNDBDI_MODE, 0x48); 9376 GET_REG32_1(SNDBDC_MODE); 9377 GET_REG32_LOOP(RCVLPC_MODE, 0x20); 9378 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c); 9379 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c); 9380 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c); 9381 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44); 9382 GET_REG32_1(RCVDCC_MODE); 9383 GET_REG32_LOOP(RCVBDI_MODE, 0x20); 9384 GET_REG32_LOOP(RCVCC_MODE, 0x14); 9385 GET_REG32_LOOP(RCVLSC_MODE, 0x08); 9386 GET_REG32_1(MBFREE_MODE); 9387 GET_REG32_LOOP(HOSTCC_MODE, 0x100); 9388 GET_REG32_LOOP(MEMARB_MODE, 0x10); 9389 GET_REG32_LOOP(BUFMGR_MODE, 0x58); 9390 GET_REG32_LOOP(RDMAC_MODE, 0x08); 9391 GET_REG32_LOOP(WDMAC_MODE, 0x08); 9392 GET_REG32_1(RX_CPU_MODE); 9393 GET_REG32_1(RX_CPU_STATE); 9394 GET_REG32_1(RX_CPU_PGMCTR); 9395 GET_REG32_1(RX_CPU_HWBKPT); 9396 GET_REG32_1(TX_CPU_MODE); 9397 GET_REG32_1(TX_CPU_STATE); 9398 GET_REG32_1(TX_CPU_PGMCTR); 9399 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110); 9400 GET_REG32_LOOP(FTQ_RESET, 0x120); 9401 GET_REG32_LOOP(MSGINT_MODE, 0x0c); 9402 GET_REG32_1(DMAC_MODE); 9403 GET_REG32_LOOP(GRC_MODE, 0x4c); 9404 if (tp->tg3_flags & TG3_FLAG_NVRAM) 9405 GET_REG32_LOOP(NVRAM_CMD, 0x24); 9406 9407#undef __GET_REG32 9408#undef GET_REG32_LOOP 9409#undef GET_REG32_1 9410 9411 tg3_full_unlock(tp); 9412} 9413 9414static int tg3_get_eeprom_len(struct net_device *dev) 9415{ 9416 struct tg3 *tp = netdev_priv(dev); 9417 9418 return tp->nvram_size; 9419} 9420 9421static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 9422{ 9423 struct tg3 *tp = netdev_priv(dev); 9424 int ret; 9425 u8 *pd; 9426 u32 i, offset, len, b_offset, b_count; 9427 __be32 val; 9428 9429 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) 9430 return -EINVAL; 9431 9432 if (tp->link_config.phy_is_low_power) 9433 return -EAGAIN; 9434 9435 offset = eeprom->offset; 9436 len = eeprom->len; 9437 eeprom->len = 0; 9438 9439 eeprom->magic = TG3_EEPROM_MAGIC; 9440 9441 if (offset & 3) { 9442 /* adjustments to start on required 4 byte boundary */ 9443 b_offset = offset & 3; 9444 b_count = 4 - b_offset; 9445 if (b_count > len) { 9446 /* i.e. offset=1 len=2 */ 9447 b_count = len; 9448 } 9449 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 9450 if (ret) 9451 return ret; 9452 memcpy(data, ((char*)&val) + b_offset, b_count); 9453 len -= b_count; 9454 offset += b_count; 9455 eeprom->len += b_count; 9456 } 9457 9458 /* read bytes upto the last 4 byte boundary */ 9459 pd = &data[eeprom->len]; 9460 for (i = 0; i < (len - (len & 3)); i += 4) { 9461 ret = tg3_nvram_read_be32(tp, offset + i, &val); 9462 if (ret) { 9463 eeprom->len += i; 9464 return ret; 9465 } 9466 memcpy(pd + i, &val, 4); 9467 } 9468 eeprom->len += i; 9469 9470 if (len & 3) { 9471 /* read last bytes not ending on 4 byte boundary */ 9472 pd = &data[eeprom->len]; 9473 b_count = len & 3; 9474 b_offset = offset + len - b_count; 9475 ret = tg3_nvram_read_be32(tp, b_offset, &val); 9476 if (ret) 9477 return ret; 9478 memcpy(pd, &val, b_count); 9479 eeprom->len += b_count; 9480 } 9481 return 0; 9482} 9483 9484static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 9485 9486static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 9487{ 9488 struct tg3 *tp = netdev_priv(dev); 9489 int ret; 9490 u32 offset, len, b_offset, odd_len; 9491 u8 *buf; 9492 __be32 start, end; 9493 9494 if (tp->link_config.phy_is_low_power) 9495 return -EAGAIN; 9496 9497 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || 9498 eeprom->magic != TG3_EEPROM_MAGIC) 9499 return -EINVAL; 9500 9501 offset = eeprom->offset; 9502 len = eeprom->len; 9503 9504 if ((b_offset = (offset & 3))) { 9505 /* adjustments to start on required 4 byte boundary */ 9506 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); 9507 if (ret) 9508 return ret; 9509 len += b_offset; 9510 offset &= ~3; 9511 if (len < 4) 9512 len = 4; 9513 } 9514 9515 odd_len = 0; 9516 if (len & 3) { 9517 /* adjustments to end on required 4 byte boundary */ 9518 odd_len = 1; 9519 len = (len + 3) & ~3; 9520 ret = tg3_nvram_read_be32(tp, offset+len-4, &end); 9521 if (ret) 9522 return ret; 9523 } 9524 9525 buf = data; 9526 if (b_offset || odd_len) { 9527 buf = kmalloc(len, GFP_KERNEL); 9528 if (!buf) 9529 return -ENOMEM; 9530 if (b_offset) 9531 memcpy(buf, &start, 4); 9532 if (odd_len) 9533 memcpy(buf+len-4, &end, 4); 9534 memcpy(buf + b_offset, data, eeprom->len); 9535 } 9536 9537 ret = tg3_nvram_write_block(tp, offset, len, buf); 9538 9539 if (buf != data) 9540 kfree(buf); 9541 9542 return ret; 9543} 9544 9545static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 9546{ 9547 struct tg3 *tp = netdev_priv(dev); 9548 9549 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9550 struct phy_device *phydev; 9551 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9552 return -EAGAIN; 9553 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 9554 return phy_ethtool_gset(phydev, cmd); 9555 } 9556 9557 cmd->supported = (SUPPORTED_Autoneg); 9558 9559 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) 9560 cmd->supported |= (SUPPORTED_1000baseT_Half | 9561 SUPPORTED_1000baseT_Full); 9562 9563 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { 9564 cmd->supported |= (SUPPORTED_100baseT_Half | 9565 SUPPORTED_100baseT_Full | 9566 SUPPORTED_10baseT_Half | 9567 SUPPORTED_10baseT_Full | 9568 SUPPORTED_TP); 9569 cmd->port = PORT_TP; 9570 } else { 9571 cmd->supported |= SUPPORTED_FIBRE; 9572 cmd->port = PORT_FIBRE; 9573 } 9574 9575 cmd->advertising = tp->link_config.advertising; 9576 if (netif_running(dev)) { 9577 cmd->speed = tp->link_config.active_speed; 9578 cmd->duplex = tp->link_config.active_duplex; 9579 } 9580 cmd->phy_address = tp->phy_addr; 9581 cmd->transceiver = XCVR_INTERNAL; 9582 cmd->autoneg = tp->link_config.autoneg; 9583 cmd->maxtxpkt = 0; 9584 cmd->maxrxpkt = 0; 9585 return 0; 9586} 9587 9588static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 9589{ 9590 struct tg3 *tp = netdev_priv(dev); 9591 9592 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9593 struct phy_device *phydev; 9594 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9595 return -EAGAIN; 9596 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 9597 return phy_ethtool_sset(phydev, cmd); 9598 } 9599 9600 if (cmd->autoneg != AUTONEG_ENABLE && 9601 cmd->autoneg != AUTONEG_DISABLE) 9602 return -EINVAL; 9603 9604 if (cmd->autoneg == AUTONEG_DISABLE && 9605 cmd->duplex != DUPLEX_FULL && 9606 cmd->duplex != DUPLEX_HALF) 9607 return -EINVAL; 9608 9609 if (cmd->autoneg == AUTONEG_ENABLE) { 9610 u32 mask = ADVERTISED_Autoneg | 9611 ADVERTISED_Pause | 9612 ADVERTISED_Asym_Pause; 9613 9614 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) 9615 mask |= ADVERTISED_1000baseT_Half | 9616 ADVERTISED_1000baseT_Full; 9617 9618 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) 9619 mask |= ADVERTISED_100baseT_Half | 9620 ADVERTISED_100baseT_Full | 9621 ADVERTISED_10baseT_Half | 9622 ADVERTISED_10baseT_Full | 9623 ADVERTISED_TP; 9624 else 9625 mask |= ADVERTISED_FIBRE; 9626 9627 if (cmd->advertising & ~mask) 9628 return -EINVAL; 9629 9630 mask &= (ADVERTISED_1000baseT_Half | 9631 ADVERTISED_1000baseT_Full | 9632 ADVERTISED_100baseT_Half | 9633 ADVERTISED_100baseT_Full | 9634 ADVERTISED_10baseT_Half | 9635 ADVERTISED_10baseT_Full); 9636 9637 cmd->advertising &= mask; 9638 } else { 9639 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 9640 if (cmd->speed != SPEED_1000) 9641 return -EINVAL; 9642 9643 if (cmd->duplex != DUPLEX_FULL) 9644 return -EINVAL; 9645 } else { 9646 if (cmd->speed != SPEED_100 && 9647 cmd->speed != SPEED_10) 9648 return -EINVAL; 9649 } 9650 } 9651 9652 tg3_full_lock(tp, 0); 9653 9654 tp->link_config.autoneg = cmd->autoneg; 9655 if (cmd->autoneg == AUTONEG_ENABLE) { 9656 tp->link_config.advertising = (cmd->advertising | 9657 ADVERTISED_Autoneg); 9658 tp->link_config.speed = SPEED_INVALID; 9659 tp->link_config.duplex = DUPLEX_INVALID; 9660 } else { 9661 tp->link_config.advertising = 0; 9662 tp->link_config.speed = cmd->speed; 9663 tp->link_config.duplex = cmd->duplex; 9664 } 9665 9666 tp->link_config.orig_speed = tp->link_config.speed; 9667 tp->link_config.orig_duplex = tp->link_config.duplex; 9668 tp->link_config.orig_autoneg = tp->link_config.autoneg; 9669 9670 if (netif_running(dev)) 9671 tg3_setup_phy(tp, 1); 9672 9673 tg3_full_unlock(tp); 9674 9675 return 0; 9676} 9677 9678static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 9679{ 9680 struct tg3 *tp = netdev_priv(dev); 9681 9682 strcpy(info->driver, DRV_MODULE_NAME); 9683 strcpy(info->version, DRV_MODULE_VERSION); 9684 strcpy(info->fw_version, tp->fw_ver); 9685 strcpy(info->bus_info, pci_name(tp->pdev)); 9686} 9687 9688static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 9689{ 9690 struct tg3 *tp = netdev_priv(dev); 9691 9692 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && 9693 device_can_wakeup(&tp->pdev->dev)) 9694 wol->supported = WAKE_MAGIC; 9695 else 9696 wol->supported = 0; 9697 wol->wolopts = 0; 9698 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && 9699 device_can_wakeup(&tp->pdev->dev)) 9700 wol->wolopts = WAKE_MAGIC; 9701 memset(&wol->sopass, 0, sizeof(wol->sopass)); 9702} 9703 9704static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 9705{ 9706 struct tg3 *tp = netdev_priv(dev); 9707 struct device *dp = &tp->pdev->dev; 9708 9709 if (wol->wolopts & ~WAKE_MAGIC) 9710 return -EINVAL; 9711 if ((wol->wolopts & WAKE_MAGIC) && 9712 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp))) 9713 return -EINVAL; 9714 9715 spin_lock_bh(&tp->lock); 9716 if (wol->wolopts & WAKE_MAGIC) { 9717 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 9718 device_set_wakeup_enable(dp, true); 9719 } else { 9720 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; 9721 device_set_wakeup_enable(dp, false); 9722 } 9723 spin_unlock_bh(&tp->lock); 9724 9725 return 0; 9726} 9727 9728static u32 tg3_get_msglevel(struct net_device *dev) 9729{ 9730 struct tg3 *tp = netdev_priv(dev); 9731 return tp->msg_enable; 9732} 9733 9734static void tg3_set_msglevel(struct net_device *dev, u32 value) 9735{ 9736 struct tg3 *tp = netdev_priv(dev); 9737 tp->msg_enable = value; 9738} 9739 9740static int tg3_set_tso(struct net_device *dev, u32 value) 9741{ 9742 struct tg3 *tp = netdev_priv(dev); 9743 9744 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { 9745 if (value) 9746 return -EINVAL; 9747 return 0; 9748 } 9749 if ((dev->features & NETIF_F_IPV6_CSUM) && 9750 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || 9751 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) { 9752 if (value) { 9753 dev->features |= NETIF_F_TSO6; 9754 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || 9755 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 9756 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 9757 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 9758 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 9759 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 9760 dev->features |= NETIF_F_TSO_ECN; 9761 } else 9762 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); 9763 } 9764 return ethtool_op_set_tso(dev, value); 9765} 9766 9767static int tg3_nway_reset(struct net_device *dev) 9768{ 9769 struct tg3 *tp = netdev_priv(dev); 9770 int r; 9771 9772 if (!netif_running(dev)) 9773 return -EAGAIN; 9774 9775 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 9776 return -EINVAL; 9777 9778 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9779 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9780 return -EAGAIN; 9781 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); 9782 } else { 9783 u32 bmcr; 9784 9785 spin_lock_bh(&tp->lock); 9786 r = -EINVAL; 9787 tg3_readphy(tp, MII_BMCR, &bmcr); 9788 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 9789 ((bmcr & BMCR_ANENABLE) || 9790 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) { 9791 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 9792 BMCR_ANENABLE); 9793 r = 0; 9794 } 9795 spin_unlock_bh(&tp->lock); 9796 } 9797 9798 return r; 9799} 9800 9801static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 9802{ 9803 struct tg3 *tp = netdev_priv(dev); 9804 9805 ering->rx_max_pending = TG3_RX_RING_SIZE - 1; 9806 ering->rx_mini_max_pending = 0; 9807 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) 9808 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1; 9809 else 9810 ering->rx_jumbo_max_pending = 0; 9811 9812 ering->tx_max_pending = TG3_TX_RING_SIZE - 1; 9813 9814 ering->rx_pending = tp->rx_pending; 9815 ering->rx_mini_pending = 0; 9816 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) 9817 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 9818 else 9819 ering->rx_jumbo_pending = 0; 9820 9821 ering->tx_pending = tp->napi[0].tx_pending; 9822} 9823 9824static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 9825{ 9826 struct tg3 *tp = netdev_priv(dev); 9827 int i, irq_sync = 0, err = 0; 9828 9829 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || 9830 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || 9831 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 9832 (ering->tx_pending <= MAX_SKB_FRAGS) || 9833 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) && 9834 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) 9835 return -EINVAL; 9836 9837 if (netif_running(dev)) { 9838 tg3_phy_stop(tp); 9839 tg3_netif_stop(tp); 9840 irq_sync = 1; 9841 } 9842 9843 tg3_full_lock(tp, irq_sync); 9844 9845 tp->rx_pending = ering->rx_pending; 9846 9847 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) && 9848 tp->rx_pending > 63) 9849 tp->rx_pending = 63; 9850 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 9851 9852 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) 9853 tp->napi[i].tx_pending = ering->tx_pending; 9854 9855 if (netif_running(dev)) { 9856 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 9857 err = tg3_restart_hw(tp, 1); 9858 if (!err) 9859 tg3_netif_start(tp); 9860 } 9861 9862 tg3_full_unlock(tp); 9863 9864 if (irq_sync && !err) 9865 tg3_phy_start(tp); 9866 9867 return err; 9868} 9869 9870static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 9871{ 9872 struct tg3 *tp = netdev_priv(dev); 9873 9874 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0; 9875 9876 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) 9877 epause->rx_pause = 1; 9878 else 9879 epause->rx_pause = 0; 9880 9881 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX) 9882 epause->tx_pause = 1; 9883 else 9884 epause->tx_pause = 0; 9885} 9886 9887static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 9888{ 9889 struct tg3 *tp = netdev_priv(dev); 9890 int err = 0; 9891 9892 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9893 u32 newadv; 9894 struct phy_device *phydev; 9895 9896 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 9897 9898 if (!(phydev->supported & SUPPORTED_Pause) || 9899 (!(phydev->supported & SUPPORTED_Asym_Pause) && 9900 ((epause->rx_pause && !epause->tx_pause) || 9901 (!epause->rx_pause && epause->tx_pause)))) 9902 return -EINVAL; 9903 9904 tp->link_config.flowctrl = 0; 9905 if (epause->rx_pause) { 9906 tp->link_config.flowctrl |= FLOW_CTRL_RX; 9907 9908 if (epause->tx_pause) { 9909 tp->link_config.flowctrl |= FLOW_CTRL_TX; 9910 newadv = ADVERTISED_Pause; 9911 } else 9912 newadv = ADVERTISED_Pause | 9913 ADVERTISED_Asym_Pause; 9914 } else if (epause->tx_pause) { 9915 tp->link_config.flowctrl |= FLOW_CTRL_TX; 9916 newadv = ADVERTISED_Asym_Pause; 9917 } else 9918 newadv = 0; 9919 9920 if (epause->autoneg) 9921 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 9922 else 9923 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; 9924 9925 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { 9926 u32 oldadv = phydev->advertising & 9927 (ADVERTISED_Pause | ADVERTISED_Asym_Pause); 9928 if (oldadv != newadv) { 9929 phydev->advertising &= 9930 ~(ADVERTISED_Pause | 9931 ADVERTISED_Asym_Pause); 9932 phydev->advertising |= newadv; 9933 if (phydev->autoneg) { 9934 /* 9935 * Always renegotiate the link to 9936 * inform our link partner of our 9937 * flow control settings, even if the 9938 * flow control is forced. Let 9939 * tg3_adjust_link() do the final 9940 * flow control setup. 9941 */ 9942 return phy_start_aneg(phydev); 9943 } 9944 } 9945 9946 if (!epause->autoneg) 9947 tg3_setup_flow_control(tp, 0, 0); 9948 } else { 9949 tp->link_config.orig_advertising &= 9950 ~(ADVERTISED_Pause | 9951 ADVERTISED_Asym_Pause); 9952 tp->link_config.orig_advertising |= newadv; 9953 } 9954 } else { 9955 int irq_sync = 0; 9956 9957 if (netif_running(dev)) { 9958 tg3_netif_stop(tp); 9959 irq_sync = 1; 9960 } 9961 9962 tg3_full_lock(tp, irq_sync); 9963 9964 if (epause->autoneg) 9965 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 9966 else 9967 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; 9968 if (epause->rx_pause) 9969 tp->link_config.flowctrl |= FLOW_CTRL_RX; 9970 else 9971 tp->link_config.flowctrl &= ~FLOW_CTRL_RX; 9972 if (epause->tx_pause) 9973 tp->link_config.flowctrl |= FLOW_CTRL_TX; 9974 else 9975 tp->link_config.flowctrl &= ~FLOW_CTRL_TX; 9976 9977 if (netif_running(dev)) { 9978 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 9979 err = tg3_restart_hw(tp, 1); 9980 if (!err) 9981 tg3_netif_start(tp); 9982 } 9983 9984 tg3_full_unlock(tp); 9985 } 9986 9987 return err; 9988} 9989 9990static u32 tg3_get_rx_csum(struct net_device *dev) 9991{ 9992 struct tg3 *tp = netdev_priv(dev); 9993 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0; 9994} 9995 9996static int tg3_set_rx_csum(struct net_device *dev, u32 data) 9997{ 9998 struct tg3 *tp = netdev_priv(dev); 9999 10000 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { 10001 if (data != 0) 10002 return -EINVAL; 10003 return 0; 10004 } 10005 10006 spin_lock_bh(&tp->lock); 10007 if (data) 10008 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 10009 else 10010 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; 10011 spin_unlock_bh(&tp->lock); 10012 10013 return 0; 10014} 10015 10016static int tg3_set_tx_csum(struct net_device *dev, u32 data) 10017{ 10018 struct tg3 *tp = netdev_priv(dev); 10019 10020 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { 10021 if (data != 0) 10022 return -EINVAL; 10023 return 0; 10024 } 10025 10026 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 10027 ethtool_op_set_tx_ipv6_csum(dev, data); 10028 else 10029 ethtool_op_set_tx_csum(dev, data); 10030 10031 return 0; 10032} 10033 10034static int tg3_get_sset_count(struct net_device *dev, int sset) 10035{ 10036 switch (sset) { 10037 case ETH_SS_TEST: 10038 return TG3_NUM_TEST; 10039 case ETH_SS_STATS: 10040 return TG3_NUM_STATS; 10041 default: 10042 return -EOPNOTSUPP; 10043 } 10044} 10045 10046static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 10047{ 10048 switch (stringset) { 10049 case ETH_SS_STATS: 10050 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); 10051 break; 10052 case ETH_SS_TEST: 10053 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys)); 10054 break; 10055 default: 10056 WARN_ON(1); /* we need a WARN() */ 10057 break; 10058 } 10059} 10060 10061static int tg3_phys_id(struct net_device *dev, u32 data) 10062{ 10063 struct tg3 *tp = netdev_priv(dev); 10064 int i; 10065 10066 if (!netif_running(tp->dev)) 10067 return -EAGAIN; 10068 10069 if (data == 0) 10070 data = UINT_MAX / 2; 10071 10072 for (i = 0; i < (data * 2); i++) { 10073 if ((i % 2) == 0) 10074 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 10075 LED_CTRL_1000MBPS_ON | 10076 LED_CTRL_100MBPS_ON | 10077 LED_CTRL_10MBPS_ON | 10078 LED_CTRL_TRAFFIC_OVERRIDE | 10079 LED_CTRL_TRAFFIC_BLINK | 10080 LED_CTRL_TRAFFIC_LED); 10081 10082 else 10083 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 10084 LED_CTRL_TRAFFIC_OVERRIDE); 10085 10086 if (msleep_interruptible(500)) 10087 break; 10088 } 10089 tw32(MAC_LED_CTRL, tp->led_ctrl); 10090 return 0; 10091} 10092 10093static void tg3_get_ethtool_stats(struct net_device *dev, 10094 struct ethtool_stats *estats, u64 *tmp_stats) 10095{ 10096 struct tg3 *tp = netdev_priv(dev); 10097 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); 10098} 10099 10100#define NVRAM_TEST_SIZE 0x100 10101#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 10102#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 10103#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c 10104#define NVRAM_SELFBOOT_HW_SIZE 0x20 10105#define NVRAM_SELFBOOT_DATA_SIZE 0x1c 10106 10107static int tg3_test_nvram(struct tg3 *tp) 10108{ 10109 u32 csum, magic; 10110 __be32 *buf; 10111 int i, j, k, err = 0, size; 10112 10113 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) 10114 return 0; 10115 10116 if (tg3_nvram_read(tp, 0, &magic) != 0) 10117 return -EIO; 10118 10119 if (magic == TG3_EEPROM_MAGIC) 10120 size = NVRAM_TEST_SIZE; 10121 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { 10122 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == 10123 TG3_EEPROM_SB_FORMAT_1) { 10124 switch (magic & TG3_EEPROM_SB_REVISION_MASK) { 10125 case TG3_EEPROM_SB_REVISION_0: 10126 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; 10127 break; 10128 case TG3_EEPROM_SB_REVISION_2: 10129 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; 10130 break; 10131 case TG3_EEPROM_SB_REVISION_3: 10132 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; 10133 break; 10134 default: 10135 return 0; 10136 } 10137 } else 10138 return 0; 10139 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 10140 size = NVRAM_SELFBOOT_HW_SIZE; 10141 else 10142 return -EIO; 10143 10144 buf = kmalloc(size, GFP_KERNEL); 10145 if (buf == NULL) 10146 return -ENOMEM; 10147 10148 err = -EIO; 10149 for (i = 0, j = 0; i < size; i += 4, j++) { 10150 err = tg3_nvram_read_be32(tp, i, &buf[j]); 10151 if (err) 10152 break; 10153 } 10154 if (i < size) 10155 goto out; 10156 10157 /* Selfboot format */ 10158 magic = be32_to_cpu(buf[0]); 10159 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == 10160 TG3_EEPROM_MAGIC_FW) { 10161 u8 *buf8 = (u8 *) buf, csum8 = 0; 10162 10163 if ((magic & TG3_EEPROM_SB_REVISION_MASK) == 10164 TG3_EEPROM_SB_REVISION_2) { 10165 /* For rev 2, the csum doesn't include the MBA. */ 10166 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) 10167 csum8 += buf8[i]; 10168 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) 10169 csum8 += buf8[i]; 10170 } else { 10171 for (i = 0; i < size; i++) 10172 csum8 += buf8[i]; 10173 } 10174 10175 if (csum8 == 0) { 10176 err = 0; 10177 goto out; 10178 } 10179 10180 err = -EIO; 10181 goto out; 10182 } 10183 10184 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == 10185 TG3_EEPROM_MAGIC_HW) { 10186 u8 data[NVRAM_SELFBOOT_DATA_SIZE]; 10187 u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; 10188 u8 *buf8 = (u8 *) buf; 10189 10190 /* Separate the parity bits and the data bytes. */ 10191 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { 10192 if ((i == 0) || (i == 8)) { 10193 int l; 10194 u8 msk; 10195 10196 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) 10197 parity[k++] = buf8[i] & msk; 10198 i++; 10199 } else if (i == 16) { 10200 int l; 10201 u8 msk; 10202 10203 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) 10204 parity[k++] = buf8[i] & msk; 10205 i++; 10206 10207 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) 10208 parity[k++] = buf8[i] & msk; 10209 i++; 10210 } 10211 data[j++] = buf8[i]; 10212 } 10213 10214 err = -EIO; 10215 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { 10216 u8 hw8 = hweight8(data[i]); 10217 10218 if ((hw8 & 0x1) && parity[i]) 10219 goto out; 10220 else if (!(hw8 & 0x1) && !parity[i]) 10221 goto out; 10222 } 10223 err = 0; 10224 goto out; 10225 } 10226 10227 /* Bootstrap checksum at offset 0x10 */ 10228 csum = calc_crc((unsigned char *) buf, 0x10); 10229 if (csum != be32_to_cpu(buf[0x10/4])) 10230 goto out; 10231 10232 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 10233 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 10234 if (csum != be32_to_cpu(buf[0xfc/4])) 10235 goto out; 10236 10237 err = 0; 10238 10239out: 10240 kfree(buf); 10241 return err; 10242} 10243 10244#define TG3_SERDES_TIMEOUT_SEC 2 10245#define TG3_COPPER_TIMEOUT_SEC 6 10246 10247static int tg3_test_link(struct tg3 *tp) 10248{ 10249 int i, max; 10250 10251 if (!netif_running(tp->dev)) 10252 return -ENODEV; 10253 10254 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) 10255 max = TG3_SERDES_TIMEOUT_SEC; 10256 else 10257 max = TG3_COPPER_TIMEOUT_SEC; 10258 10259 for (i = 0; i < max; i++) { 10260 if (netif_carrier_ok(tp->dev)) 10261 return 0; 10262 10263 if (msleep_interruptible(1000)) 10264 break; 10265 } 10266 10267 return -EIO; 10268} 10269 10270/* Only test the commonly used registers */ 10271static int tg3_test_registers(struct tg3 *tp) 10272{ 10273 int i, is_5705, is_5750; 10274 u32 offset, read_mask, write_mask, val, save_val, read_val; 10275 static struct { 10276 u16 offset; 10277 u16 flags; 10278#define TG3_FL_5705 0x1 10279#define TG3_FL_NOT_5705 0x2 10280#define TG3_FL_NOT_5788 0x4 10281#define TG3_FL_NOT_5750 0x8 10282 u32 read_mask; 10283 u32 write_mask; 10284 } reg_tbl[] = { 10285 /* MAC Control Registers */ 10286 { MAC_MODE, TG3_FL_NOT_5705, 10287 0x00000000, 0x00ef6f8c }, 10288 { MAC_MODE, TG3_FL_5705, 10289 0x00000000, 0x01ef6b8c }, 10290 { MAC_STATUS, TG3_FL_NOT_5705, 10291 0x03800107, 0x00000000 }, 10292 { MAC_STATUS, TG3_FL_5705, 10293 0x03800100, 0x00000000 }, 10294 { MAC_ADDR_0_HIGH, 0x0000, 10295 0x00000000, 0x0000ffff }, 10296 { MAC_ADDR_0_LOW, 0x0000, 10297 0x00000000, 0xffffffff }, 10298 { MAC_RX_MTU_SIZE, 0x0000, 10299 0x00000000, 0x0000ffff }, 10300 { MAC_TX_MODE, 0x0000, 10301 0x00000000, 0x00000070 }, 10302 { MAC_TX_LENGTHS, 0x0000, 10303 0x00000000, 0x00003fff }, 10304 { MAC_RX_MODE, TG3_FL_NOT_5705, 10305 0x00000000, 0x000007fc }, 10306 { MAC_RX_MODE, TG3_FL_5705, 10307 0x00000000, 0x000007dc }, 10308 { MAC_HASH_REG_0, 0x0000, 10309 0x00000000, 0xffffffff }, 10310 { MAC_HASH_REG_1, 0x0000, 10311 0x00000000, 0xffffffff }, 10312 { MAC_HASH_REG_2, 0x0000, 10313 0x00000000, 0xffffffff }, 10314 { MAC_HASH_REG_3, 0x0000, 10315 0x00000000, 0xffffffff }, 10316 10317 /* Receive Data and Receive BD Initiator Control Registers. */ 10318 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 10319 0x00000000, 0xffffffff }, 10320 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 10321 0x00000000, 0xffffffff }, 10322 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 10323 0x00000000, 0x00000003 }, 10324 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 10325 0x00000000, 0xffffffff }, 10326 { RCVDBDI_STD_BD+0, 0x0000, 10327 0x00000000, 0xffffffff }, 10328 { RCVDBDI_STD_BD+4, 0x0000, 10329 0x00000000, 0xffffffff }, 10330 { RCVDBDI_STD_BD+8, 0x0000, 10331 0x00000000, 0xffff0002 }, 10332 { RCVDBDI_STD_BD+0xc, 0x0000, 10333 0x00000000, 0xffffffff }, 10334 10335 /* Receive BD Initiator Control Registers. */ 10336 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 10337 0x00000000, 0xffffffff }, 10338 { RCVBDI_STD_THRESH, TG3_FL_5705, 10339 0x00000000, 0x000003ff }, 10340 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 10341 0x00000000, 0xffffffff }, 10342 10343 /* Host Coalescing Control Registers. */ 10344 { HOSTCC_MODE, TG3_FL_NOT_5705, 10345 0x00000000, 0x00000004 }, 10346 { HOSTCC_MODE, TG3_FL_5705, 10347 0x00000000, 0x000000f6 }, 10348 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 10349 0x00000000, 0xffffffff }, 10350 { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 10351 0x00000000, 0x000003ff }, 10352 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 10353 0x00000000, 0xffffffff }, 10354 { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 10355 0x00000000, 0x000003ff }, 10356 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 10357 0x00000000, 0xffffffff }, 10358 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 10359 0x00000000, 0x000000ff }, 10360 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 10361 0x00000000, 0xffffffff }, 10362 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 10363 0x00000000, 0x000000ff }, 10364 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 10365 0x00000000, 0xffffffff }, 10366 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 10367 0x00000000, 0xffffffff }, 10368 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 10369 0x00000000, 0xffffffff }, 10370 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 10371 0x00000000, 0x000000ff }, 10372 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 10373 0x00000000, 0xffffffff }, 10374 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 10375 0x00000000, 0x000000ff }, 10376 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 10377 0x00000000, 0xffffffff }, 10378 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 10379 0x00000000, 0xffffffff }, 10380 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 10381 0x00000000, 0xffffffff }, 10382 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 10383 0x00000000, 0xffffffff }, 10384 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 10385 0x00000000, 0xffffffff }, 10386 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 10387 0xffffffff, 0x00000000 }, 10388 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 10389 0xffffffff, 0x00000000 }, 10390 10391 /* Buffer Manager Control Registers. */ 10392 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, 10393 0x00000000, 0x007fff80 }, 10394 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, 10395 0x00000000, 0x007fffff }, 10396 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 10397 0x00000000, 0x0000003f }, 10398 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 10399 0x00000000, 0x000001ff }, 10400 { BUFMGR_MB_HIGH_WATER, 0x0000, 10401 0x00000000, 0x000001ff }, 10402 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 10403 0xffffffff, 0x00000000 }, 10404 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 10405 0xffffffff, 0x00000000 }, 10406 10407 /* Mailbox Registers */ 10408 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 10409 0x00000000, 0x000001ff }, 10410 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 10411 0x00000000, 0x000001ff }, 10412 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 10413 0x00000000, 0x000007ff }, 10414 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 10415 0x00000000, 0x000001ff }, 10416 10417 { 0xffff, 0x0000, 0x00000000, 0x00000000 }, 10418 }; 10419 10420 is_5705 = is_5750 = 0; 10421 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 10422 is_5705 = 1; 10423 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 10424 is_5750 = 1; 10425 } 10426 10427 for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 10428 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) 10429 continue; 10430 10431 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 10432 continue; 10433 10434 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) && 10435 (reg_tbl[i].flags & TG3_FL_NOT_5788)) 10436 continue; 10437 10438 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) 10439 continue; 10440 10441 offset = (u32) reg_tbl[i].offset; 10442 read_mask = reg_tbl[i].read_mask; 10443 write_mask = reg_tbl[i].write_mask; 10444 10445 /* Save the original register content */ 10446 save_val = tr32(offset); 10447 10448 /* Determine the read-only value. */ 10449 read_val = save_val & read_mask; 10450 10451 /* Write zero to the register, then make sure the read-only bits 10452 * are not changed and the read/write bits are all zeros. 10453 */ 10454 tw32(offset, 0); 10455 10456 val = tr32(offset); 10457 10458 /* Test the read-only and read/write bits. */ 10459 if (((val & read_mask) != read_val) || (val & write_mask)) 10460 goto out; 10461 10462 /* Write ones to all the bits defined by RdMask and WrMask, then 10463 * make sure the read-only bits are not changed and the 10464 * read/write bits are all ones. 10465 */ 10466 tw32(offset, read_mask | write_mask); 10467 10468 val = tr32(offset); 10469 10470 /* Test the read-only bits. */ 10471 if ((val & read_mask) != read_val) 10472 goto out; 10473 10474 /* Test the read/write bits. */ 10475 if ((val & write_mask) != write_mask) 10476 goto out; 10477 10478 tw32(offset, save_val); 10479 } 10480 10481 return 0; 10482 10483out: 10484 if (netif_msg_hw(tp)) 10485 netdev_err(tp->dev, 10486 "Register test failed at offset %x\n", offset); 10487 tw32(offset, save_val); 10488 return -EIO; 10489} 10490 10491static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 10492{ 10493 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 10494 int i; 10495 u32 j; 10496 10497 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { 10498 for (j = 0; j < len; j += 4) { 10499 u32 val; 10500 10501 tg3_write_mem(tp, offset + j, test_pattern[i]); 10502 tg3_read_mem(tp, offset + j, &val); 10503 if (val != test_pattern[i]) 10504 return -EIO; 10505 } 10506 } 10507 return 0; 10508} 10509 10510static int tg3_test_memory(struct tg3 *tp) 10511{ 10512 static struct mem_entry { 10513 u32 offset; 10514 u32 len; 10515 } mem_tbl_570x[] = { 10516 { 0x00000000, 0x00b50}, 10517 { 0x00002000, 0x1c000}, 10518 { 0xffffffff, 0x00000} 10519 }, mem_tbl_5705[] = { 10520 { 0x00000100, 0x0000c}, 10521 { 0x00000200, 0x00008}, 10522 { 0x00004000, 0x00800}, 10523 { 0x00006000, 0x01000}, 10524 { 0x00008000, 0x02000}, 10525 { 0x00010000, 0x0e000}, 10526 { 0xffffffff, 0x00000} 10527 }, mem_tbl_5755[] = { 10528 { 0x00000200, 0x00008}, 10529 { 0x00004000, 0x00800}, 10530 { 0x00006000, 0x00800}, 10531 { 0x00008000, 0x02000}, 10532 { 0x00010000, 0x0c000}, 10533 { 0xffffffff, 0x00000} 10534 }, mem_tbl_5906[] = { 10535 { 0x00000200, 0x00008}, 10536 { 0x00004000, 0x00400}, 10537 { 0x00006000, 0x00400}, 10538 { 0x00008000, 0x01000}, 10539 { 0x00010000, 0x01000}, 10540 { 0xffffffff, 0x00000} 10541 }, mem_tbl_5717[] = { 10542 { 0x00000200, 0x00008}, 10543 { 0x00010000, 0x0a000}, 10544 { 0x00020000, 0x13c00}, 10545 { 0xffffffff, 0x00000} 10546 }, mem_tbl_57765[] = { 10547 { 0x00000200, 0x00008}, 10548 { 0x00004000, 0x00800}, 10549 { 0x00006000, 0x09800}, 10550 { 0x00010000, 0x0a000}, 10551 { 0xffffffff, 0x00000} 10552 }; 10553 struct mem_entry *mem_tbl; 10554 int err = 0; 10555 int i; 10556 10557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 10558 mem_tbl = mem_tbl_5717; 10559 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 10560 mem_tbl = mem_tbl_57765; 10561 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 10562 mem_tbl = mem_tbl_5755; 10563 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 10564 mem_tbl = mem_tbl_5906; 10565 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 10566 mem_tbl = mem_tbl_5705; 10567 else 10568 mem_tbl = mem_tbl_570x; 10569 10570 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 10571 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset, 10572 mem_tbl[i].len)) != 0) 10573 break; 10574 } 10575 10576 return err; 10577} 10578 10579#define TG3_MAC_LOOPBACK 0 10580#define TG3_PHY_LOOPBACK 1 10581 10582static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) 10583{ 10584 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; 10585 u32 desc_idx, coal_now; 10586 struct sk_buff *skb, *rx_skb; 10587 u8 *tx_data; 10588 dma_addr_t map; 10589 int num_pkts, tx_len, rx_len, i, err; 10590 struct tg3_rx_buffer_desc *desc; 10591 struct tg3_napi *tnapi, *rnapi; 10592 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 10593 10594 tnapi = &tp->napi[0]; 10595 rnapi = &tp->napi[0]; 10596 if (tp->irq_cnt > 1) { 10597 rnapi = &tp->napi[1]; 10598 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 10599 tnapi = &tp->napi[1]; 10600 } 10601 coal_now = tnapi->coal_now | rnapi->coal_now; 10602 10603 if (loopback_mode == TG3_MAC_LOOPBACK) { 10604 /* HW errata - mac loopback fails in some cases on 5780. 10605 * Normal traffic and PHY loopback are not affected by 10606 * errata. 10607 */ 10608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 10609 return 0; 10610 10611 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 10612 MAC_MODE_PORT_INT_LPBACK; 10613 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 10614 mac_mode |= MAC_MODE_LINK_POLARITY; 10615 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) 10616 mac_mode |= MAC_MODE_PORT_MODE_MII; 10617 else 10618 mac_mode |= MAC_MODE_PORT_MODE_GMII; 10619 tw32(MAC_MODE, mac_mode); 10620 } else if (loopback_mode == TG3_PHY_LOOPBACK) { 10621 u32 val; 10622 10623 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 10624 tg3_phy_fet_toggle_apd(tp, false); 10625 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; 10626 } else 10627 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; 10628 10629 tg3_phy_toggle_automdix(tp, 0); 10630 10631 tg3_writephy(tp, MII_BMCR, val); 10632 udelay(40); 10633 10634 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 10635 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 10636 tg3_writephy(tp, MII_TG3_FET_PTEST, 10637 MII_TG3_FET_PTEST_FRC_TX_LINK | 10638 MII_TG3_FET_PTEST_FRC_TX_LOCK); 10639 /* The write needs to be flushed for the AC131 */ 10640 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 10641 tg3_readphy(tp, MII_TG3_FET_PTEST, &val); 10642 mac_mode |= MAC_MODE_PORT_MODE_MII; 10643 } else 10644 mac_mode |= MAC_MODE_PORT_MODE_GMII; 10645 10646 /* reset to prevent losing 1st rx packet intermittently */ 10647 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 10648 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10649 udelay(10); 10650 tw32_f(MAC_RX_MODE, tp->rx_mode); 10651 } 10652 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { 10653 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; 10654 if (masked_phy_id == TG3_PHY_ID_BCM5401) 10655 mac_mode &= ~MAC_MODE_LINK_POLARITY; 10656 else if (masked_phy_id == TG3_PHY_ID_BCM5411) 10657 mac_mode |= MAC_MODE_LINK_POLARITY; 10658 tg3_writephy(tp, MII_TG3_EXT_CTRL, 10659 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 10660 } 10661 tw32(MAC_MODE, mac_mode); 10662 } else { 10663 return -EINVAL; 10664 } 10665 10666 err = -EIO; 10667 10668 tx_len = 1514; 10669 skb = netdev_alloc_skb(tp->dev, tx_len); 10670 if (!skb) 10671 return -ENOMEM; 10672 10673 tx_data = skb_put(skb, tx_len); 10674 memcpy(tx_data, tp->dev->dev_addr, 6); 10675 memset(tx_data + 6, 0x0, 8); 10676 10677 tw32(MAC_RX_MTU_SIZE, tx_len + 4); 10678 10679 for (i = 14; i < tx_len; i++) 10680 tx_data[i] = (u8) (i & 0xff); 10681 10682 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); 10683 if (pci_dma_mapping_error(tp->pdev, map)) { 10684 dev_kfree_skb(skb); 10685 return -EIO; 10686 } 10687 10688 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 10689 rnapi->coal_now); 10690 10691 udelay(10); 10692 10693 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 10694 10695 num_pkts = 0; 10696 10697 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1); 10698 10699 tnapi->tx_prod++; 10700 num_pkts++; 10701 10702 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 10703 tr32_mailbox(tnapi->prodmbox); 10704 10705 udelay(10); 10706 10707 /* 350 usec to allow enough time on some 10/100 Mbps devices. */ 10708 for (i = 0; i < 35; i++) { 10709 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 10710 coal_now); 10711 10712 udelay(10); 10713 10714 tx_idx = tnapi->hw_status->idx[0].tx_consumer; 10715 rx_idx = rnapi->hw_status->idx[0].rx_producer; 10716 if ((tx_idx == tnapi->tx_prod) && 10717 (rx_idx == (rx_start_idx + num_pkts))) 10718 break; 10719 } 10720 10721 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); 10722 dev_kfree_skb(skb); 10723 10724 if (tx_idx != tnapi->tx_prod) 10725 goto out; 10726 10727 if (rx_idx != rx_start_idx + num_pkts) 10728 goto out; 10729 10730 desc = &rnapi->rx_rcb[rx_start_idx]; 10731 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 10732 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 10733 if (opaque_key != RXD_OPAQUE_RING_STD) 10734 goto out; 10735 10736 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 10737 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 10738 goto out; 10739 10740 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; 10741 if (rx_len != tx_len) 10742 goto out; 10743 10744 rx_skb = tpr->rx_std_buffers[desc_idx].skb; 10745 10746 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); 10747 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); 10748 10749 for (i = 14; i < tx_len; i++) { 10750 if (*(rx_skb->data + i) != (u8) (i & 0xff)) 10751 goto out; 10752 } 10753 err = 0; 10754 10755 /* tg3_free_rings will unmap and free the rx_skb */ 10756out: 10757 return err; 10758} 10759 10760#define TG3_MAC_LOOPBACK_FAILED 1 10761#define TG3_PHY_LOOPBACK_FAILED 2 10762#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \ 10763 TG3_PHY_LOOPBACK_FAILED) 10764 10765static int tg3_test_loopback(struct tg3 *tp) 10766{ 10767 int err = 0; 10768 u32 cpmuctrl = 0; 10769 10770 if (!netif_running(tp->dev)) 10771 return TG3_LOOPBACK_FAILED; 10772 10773 err = tg3_reset_hw(tp, 1); 10774 if (err) 10775 return TG3_LOOPBACK_FAILED; 10776 10777 /* Turn off gphy autopowerdown. */ 10778 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) 10779 tg3_phy_toggle_apd(tp, false); 10780 10781 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { 10782 int i; 10783 u32 status; 10784 10785 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER); 10786 10787 /* Wait for up to 40 microseconds to acquire lock. */ 10788 for (i = 0; i < 4; i++) { 10789 status = tr32(TG3_CPMU_MUTEX_GNT); 10790 if (status == CPMU_MUTEX_GNT_DRIVER) 10791 break; 10792 udelay(10); 10793 } 10794 10795 if (status != CPMU_MUTEX_GNT_DRIVER) 10796 return TG3_LOOPBACK_FAILED; 10797 10798 /* Turn off link-based power management. */ 10799 cpmuctrl = tr32(TG3_CPMU_CTRL); 10800 tw32(TG3_CPMU_CTRL, 10801 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE | 10802 CPMU_CTRL_LINK_AWARE_MODE)); 10803 } 10804 10805 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) 10806 err |= TG3_MAC_LOOPBACK_FAILED; 10807 10808 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { 10809 tw32(TG3_CPMU_CTRL, cpmuctrl); 10810 10811 /* Release the mutex */ 10812 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER); 10813 } 10814 10815 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 10816 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { 10817 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) 10818 err |= TG3_PHY_LOOPBACK_FAILED; 10819 } 10820 10821 /* Re-enable gphy autopowerdown. */ 10822 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) 10823 tg3_phy_toggle_apd(tp, true); 10824 10825 return err; 10826} 10827 10828static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 10829 u64 *data) 10830{ 10831 struct tg3 *tp = netdev_priv(dev); 10832 10833 if (tp->link_config.phy_is_low_power) 10834 tg3_set_power_state(tp, PCI_D0); 10835 10836 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 10837 10838 if (tg3_test_nvram(tp) != 0) { 10839 etest->flags |= ETH_TEST_FL_FAILED; 10840 data[0] = 1; 10841 } 10842 if (tg3_test_link(tp) != 0) { 10843 etest->flags |= ETH_TEST_FL_FAILED; 10844 data[1] = 1; 10845 } 10846 if (etest->flags & ETH_TEST_FL_OFFLINE) { 10847 int err, err2 = 0, irq_sync = 0; 10848 10849 if (netif_running(dev)) { 10850 tg3_phy_stop(tp); 10851 tg3_netif_stop(tp); 10852 irq_sync = 1; 10853 } 10854 10855 tg3_full_lock(tp, irq_sync); 10856 10857 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 10858 err = tg3_nvram_lock(tp); 10859 tg3_halt_cpu(tp, RX_CPU_BASE); 10860 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 10861 tg3_halt_cpu(tp, TX_CPU_BASE); 10862 if (!err) 10863 tg3_nvram_unlock(tp); 10864 10865 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 10866 tg3_phy_reset(tp); 10867 10868 if (tg3_test_registers(tp) != 0) { 10869 etest->flags |= ETH_TEST_FL_FAILED; 10870 data[2] = 1; 10871 } 10872 if (tg3_test_memory(tp) != 0) { 10873 etest->flags |= ETH_TEST_FL_FAILED; 10874 data[3] = 1; 10875 } 10876 if ((data[4] = tg3_test_loopback(tp)) != 0) 10877 etest->flags |= ETH_TEST_FL_FAILED; 10878 10879 tg3_full_unlock(tp); 10880 10881 if (tg3_test_interrupt(tp) != 0) { 10882 etest->flags |= ETH_TEST_FL_FAILED; 10883 data[5] = 1; 10884 } 10885 10886 tg3_full_lock(tp, 0); 10887 10888 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 10889 if (netif_running(dev)) { 10890 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 10891 err2 = tg3_restart_hw(tp, 1); 10892 if (!err2) 10893 tg3_netif_start(tp); 10894 } 10895 10896 tg3_full_unlock(tp); 10897 10898 if (irq_sync && !err2) 10899 tg3_phy_start(tp); 10900 } 10901 if (tp->link_config.phy_is_low_power) 10902 tg3_set_power_state(tp, PCI_D3hot); 10903 10904} 10905 10906static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 10907{ 10908 struct mii_ioctl_data *data = if_mii(ifr); 10909 struct tg3 *tp = netdev_priv(dev); 10910 int err; 10911 10912 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 10913 struct phy_device *phydev; 10914 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 10915 return -EAGAIN; 10916 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 10917 return phy_mii_ioctl(phydev, data, cmd); 10918 } 10919 10920 switch (cmd) { 10921 case SIOCGMIIPHY: 10922 data->phy_id = tp->phy_addr; 10923 10924 /* fallthru */ 10925 case SIOCGMIIREG: { 10926 u32 mii_regval; 10927 10928 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 10929 break; /* We have no PHY */ 10930 10931 if (tp->link_config.phy_is_low_power) 10932 return -EAGAIN; 10933 10934 spin_lock_bh(&tp->lock); 10935 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); 10936 spin_unlock_bh(&tp->lock); 10937 10938 data->val_out = mii_regval; 10939 10940 return err; 10941 } 10942 10943 case SIOCSMIIREG: 10944 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 10945 break; /* We have no PHY */ 10946 10947 if (tp->link_config.phy_is_low_power) 10948 return -EAGAIN; 10949 10950 spin_lock_bh(&tp->lock); 10951 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); 10952 spin_unlock_bh(&tp->lock); 10953 10954 return err; 10955 10956 default: 10957 /* do nothing */ 10958 break; 10959 } 10960 return -EOPNOTSUPP; 10961} 10962 10963#if TG3_VLAN_TAG_USED 10964static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 10965{ 10966 struct tg3 *tp = netdev_priv(dev); 10967 10968 if (!netif_running(dev)) { 10969 tp->vlgrp = grp; 10970 return; 10971 } 10972 10973 tg3_netif_stop(tp); 10974 10975 tg3_full_lock(tp, 0); 10976 10977 tp->vlgrp = grp; 10978 10979 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ 10980 __tg3_set_rx_mode(dev); 10981 10982 tg3_netif_start(tp); 10983 10984 tg3_full_unlock(tp); 10985} 10986#endif 10987 10988static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 10989{ 10990 struct tg3 *tp = netdev_priv(dev); 10991 10992 memcpy(ec, &tp->coal, sizeof(*ec)); 10993 return 0; 10994} 10995 10996static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 10997{ 10998 struct tg3 *tp = netdev_priv(dev); 10999 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; 11000 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; 11001 11002 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 11003 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; 11004 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; 11005 max_stat_coal_ticks = MAX_STAT_COAL_TICKS; 11006 min_stat_coal_ticks = MIN_STAT_COAL_TICKS; 11007 } 11008 11009 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 11010 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 11011 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 11012 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 11013 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 11014 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || 11015 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || 11016 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || 11017 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || 11018 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 11019 return -EINVAL; 11020 11021 /* No rx interrupts will be generated if both are zero */ 11022 if ((ec->rx_coalesce_usecs == 0) && 11023 (ec->rx_max_coalesced_frames == 0)) 11024 return -EINVAL; 11025 11026 /* No tx interrupts will be generated if both are zero */ 11027 if ((ec->tx_coalesce_usecs == 0) && 11028 (ec->tx_max_coalesced_frames == 0)) 11029 return -EINVAL; 11030 11031 /* Only copy relevant parameters, ignore all others. */ 11032 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 11033 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 11034 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 11035 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; 11036 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; 11037 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; 11038 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; 11039 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; 11040 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; 11041 11042 if (netif_running(dev)) { 11043 tg3_full_lock(tp, 0); 11044 __tg3_set_coalesce(tp, &tp->coal); 11045 tg3_full_unlock(tp); 11046 } 11047 return 0; 11048} 11049 11050static const struct ethtool_ops tg3_ethtool_ops = { 11051 .get_settings = tg3_get_settings, 11052 .set_settings = tg3_set_settings, 11053 .get_drvinfo = tg3_get_drvinfo, 11054 .get_regs_len = tg3_get_regs_len, 11055 .get_regs = tg3_get_regs, 11056 .get_wol = tg3_get_wol, 11057 .set_wol = tg3_set_wol, 11058 .get_msglevel = tg3_get_msglevel, 11059 .set_msglevel = tg3_set_msglevel, 11060 .nway_reset = tg3_nway_reset, 11061 .get_link = ethtool_op_get_link, 11062 .get_eeprom_len = tg3_get_eeprom_len, 11063 .get_eeprom = tg3_get_eeprom, 11064 .set_eeprom = tg3_set_eeprom, 11065 .get_ringparam = tg3_get_ringparam, 11066 .set_ringparam = tg3_set_ringparam, 11067 .get_pauseparam = tg3_get_pauseparam, 11068 .set_pauseparam = tg3_set_pauseparam, 11069 .get_rx_csum = tg3_get_rx_csum, 11070 .set_rx_csum = tg3_set_rx_csum, 11071 .set_tx_csum = tg3_set_tx_csum, 11072 .set_sg = ethtool_op_set_sg, 11073 .set_tso = tg3_set_tso, 11074 .self_test = tg3_self_test, 11075 .get_strings = tg3_get_strings, 11076 .phys_id = tg3_phys_id, 11077 .get_ethtool_stats = tg3_get_ethtool_stats, 11078 .get_coalesce = tg3_get_coalesce, 11079 .set_coalesce = tg3_set_coalesce, 11080 .get_sset_count = tg3_get_sset_count, 11081}; 11082 11083static void __devinit tg3_get_eeprom_size(struct tg3 *tp) 11084{ 11085 u32 cursize, val, magic; 11086 11087 tp->nvram_size = EEPROM_CHIP_SIZE; 11088 11089 if (tg3_nvram_read(tp, 0, &magic) != 0) 11090 return; 11091 11092 if ((magic != TG3_EEPROM_MAGIC) && 11093 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && 11094 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) 11095 return; 11096 11097 /* 11098 * Size the chip by reading offsets at increasing powers of two. 11099 * When we encounter our validation signature, we know the addressing 11100 * has wrapped around, and thus have our chip size. 11101 */ 11102 cursize = 0x10; 11103 11104 while (cursize < tp->nvram_size) { 11105 if (tg3_nvram_read(tp, cursize, &val) != 0) 11106 return; 11107 11108 if (val == magic) 11109 break; 11110 11111 cursize <<= 1; 11112 } 11113 11114 tp->nvram_size = cursize; 11115} 11116 11117static void __devinit tg3_get_nvram_size(struct tg3 *tp) 11118{ 11119 u32 val; 11120 11121 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || 11122 tg3_nvram_read(tp, 0, &val) != 0) 11123 return; 11124 11125 /* Selfboot format */ 11126 if (val != TG3_EEPROM_MAGIC) { 11127 tg3_get_eeprom_size(tp); 11128 return; 11129 } 11130 11131 if (tg3_nvram_read(tp, 0xf0, &val) == 0) { 11132 if (val != 0) { 11133 /* This is confusing. We want to operate on the 11134 * 16-bit value at offset 0xf2. The tg3_nvram_read() 11135 * call will read from NVRAM and byteswap the data 11136 * according to the byteswapping settings for all 11137 * other register accesses. This ensures the data we 11138 * want will always reside in the lower 16-bits. 11139 * However, the data in NVRAM is in LE format, which 11140 * means the data from the NVRAM read will always be 11141 * opposite the endianness of the CPU. The 16-bit 11142 * byteswap then brings the data to CPU endianness. 11143 */ 11144 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; 11145 return; 11146 } 11147 } 11148 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 11149} 11150 11151static void __devinit tg3_get_nvram_info(struct tg3 *tp) 11152{ 11153 u32 nvcfg1; 11154 11155 nvcfg1 = tr32(NVRAM_CFG1); 11156 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 11157 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11158 } else { 11159 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 11160 tw32(NVRAM_CFG1, nvcfg1); 11161 } 11162 11163 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || 11164 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 11165 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 11166 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 11167 tp->nvram_jedecnum = JEDEC_ATMEL; 11168 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 11169 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11170 break; 11171 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 11172 tp->nvram_jedecnum = JEDEC_ATMEL; 11173 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; 11174 break; 11175 case FLASH_VENDOR_ATMEL_EEPROM: 11176 tp->nvram_jedecnum = JEDEC_ATMEL; 11177 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 11178 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11179 break; 11180 case FLASH_VENDOR_ST: 11181 tp->nvram_jedecnum = JEDEC_ST; 11182 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 11183 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11184 break; 11185 case FLASH_VENDOR_SAIFUN: 11186 tp->nvram_jedecnum = JEDEC_SAIFUN; 11187 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; 11188 break; 11189 case FLASH_VENDOR_SST_SMALL: 11190 case FLASH_VENDOR_SST_LARGE: 11191 tp->nvram_jedecnum = JEDEC_SST; 11192 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; 11193 break; 11194 } 11195 } else { 11196 tp->nvram_jedecnum = JEDEC_ATMEL; 11197 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 11198 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11199 } 11200} 11201 11202static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) 11203{ 11204 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 11205 case FLASH_5752PAGE_SIZE_256: 11206 tp->nvram_pagesize = 256; 11207 break; 11208 case FLASH_5752PAGE_SIZE_512: 11209 tp->nvram_pagesize = 512; 11210 break; 11211 case FLASH_5752PAGE_SIZE_1K: 11212 tp->nvram_pagesize = 1024; 11213 break; 11214 case FLASH_5752PAGE_SIZE_2K: 11215 tp->nvram_pagesize = 2048; 11216 break; 11217 case FLASH_5752PAGE_SIZE_4K: 11218 tp->nvram_pagesize = 4096; 11219 break; 11220 case FLASH_5752PAGE_SIZE_264: 11221 tp->nvram_pagesize = 264; 11222 break; 11223 case FLASH_5752PAGE_SIZE_528: 11224 tp->nvram_pagesize = 528; 11225 break; 11226 } 11227} 11228 11229static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) 11230{ 11231 u32 nvcfg1; 11232 11233 nvcfg1 = tr32(NVRAM_CFG1); 11234 11235 /* NVRAM protection for TPM */ 11236 if (nvcfg1 & (1 << 27)) 11237 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; 11238 11239 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11240 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 11241 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 11242 tp->nvram_jedecnum = JEDEC_ATMEL; 11243 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11244 break; 11245 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 11246 tp->nvram_jedecnum = JEDEC_ATMEL; 11247 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11248 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11249 break; 11250 case FLASH_5752VENDOR_ST_M45PE10: 11251 case FLASH_5752VENDOR_ST_M45PE20: 11252 case FLASH_5752VENDOR_ST_M45PE40: 11253 tp->nvram_jedecnum = JEDEC_ST; 11254 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11255 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11256 break; 11257 } 11258 11259 if (tp->tg3_flags2 & TG3_FLG2_FLASH) { 11260 tg3_nvram_get_pagesize(tp, nvcfg1); 11261 } else { 11262 /* For eeprom, set pagesize to maximum eeprom size */ 11263 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 11264 11265 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 11266 tw32(NVRAM_CFG1, nvcfg1); 11267 } 11268} 11269 11270static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) 11271{ 11272 u32 nvcfg1, protect = 0; 11273 11274 nvcfg1 = tr32(NVRAM_CFG1); 11275 11276 /* NVRAM protection for TPM */ 11277 if (nvcfg1 & (1 << 27)) { 11278 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; 11279 protect = 1; 11280 } 11281 11282 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 11283 switch (nvcfg1) { 11284 case FLASH_5755VENDOR_ATMEL_FLASH_1: 11285 case FLASH_5755VENDOR_ATMEL_FLASH_2: 11286 case FLASH_5755VENDOR_ATMEL_FLASH_3: 11287 case FLASH_5755VENDOR_ATMEL_FLASH_5: 11288 tp->nvram_jedecnum = JEDEC_ATMEL; 11289 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11290 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11291 tp->nvram_pagesize = 264; 11292 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 11293 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 11294 tp->nvram_size = (protect ? 0x3e200 : 11295 TG3_NVRAM_SIZE_512KB); 11296 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 11297 tp->nvram_size = (protect ? 0x1f200 : 11298 TG3_NVRAM_SIZE_256KB); 11299 else 11300 tp->nvram_size = (protect ? 0x1f200 : 11301 TG3_NVRAM_SIZE_128KB); 11302 break; 11303 case FLASH_5752VENDOR_ST_M45PE10: 11304 case FLASH_5752VENDOR_ST_M45PE20: 11305 case FLASH_5752VENDOR_ST_M45PE40: 11306 tp->nvram_jedecnum = JEDEC_ST; 11307 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11308 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11309 tp->nvram_pagesize = 256; 11310 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 11311 tp->nvram_size = (protect ? 11312 TG3_NVRAM_SIZE_64KB : 11313 TG3_NVRAM_SIZE_128KB); 11314 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) 11315 tp->nvram_size = (protect ? 11316 TG3_NVRAM_SIZE_64KB : 11317 TG3_NVRAM_SIZE_256KB); 11318 else 11319 tp->nvram_size = (protect ? 11320 TG3_NVRAM_SIZE_128KB : 11321 TG3_NVRAM_SIZE_512KB); 11322 break; 11323 } 11324} 11325 11326static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) 11327{ 11328 u32 nvcfg1; 11329 11330 nvcfg1 = tr32(NVRAM_CFG1); 11331 11332 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11333 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: 11334 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 11335 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 11336 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 11337 tp->nvram_jedecnum = JEDEC_ATMEL; 11338 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11339 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 11340 11341 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 11342 tw32(NVRAM_CFG1, nvcfg1); 11343 break; 11344 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 11345 case FLASH_5755VENDOR_ATMEL_FLASH_1: 11346 case FLASH_5755VENDOR_ATMEL_FLASH_2: 11347 case FLASH_5755VENDOR_ATMEL_FLASH_3: 11348 tp->nvram_jedecnum = JEDEC_ATMEL; 11349 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11350 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11351 tp->nvram_pagesize = 264; 11352 break; 11353 case FLASH_5752VENDOR_ST_M45PE10: 11354 case FLASH_5752VENDOR_ST_M45PE20: 11355 case FLASH_5752VENDOR_ST_M45PE40: 11356 tp->nvram_jedecnum = JEDEC_ST; 11357 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11358 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11359 tp->nvram_pagesize = 256; 11360 break; 11361 } 11362} 11363 11364static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) 11365{ 11366 u32 nvcfg1, protect = 0; 11367 11368 nvcfg1 = tr32(NVRAM_CFG1); 11369 11370 /* NVRAM protection for TPM */ 11371 if (nvcfg1 & (1 << 27)) { 11372 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; 11373 protect = 1; 11374 } 11375 11376 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 11377 switch (nvcfg1) { 11378 case FLASH_5761VENDOR_ATMEL_ADB021D: 11379 case FLASH_5761VENDOR_ATMEL_ADB041D: 11380 case FLASH_5761VENDOR_ATMEL_ADB081D: 11381 case FLASH_5761VENDOR_ATMEL_ADB161D: 11382 case FLASH_5761VENDOR_ATMEL_MDB021D: 11383 case FLASH_5761VENDOR_ATMEL_MDB041D: 11384 case FLASH_5761VENDOR_ATMEL_MDB081D: 11385 case FLASH_5761VENDOR_ATMEL_MDB161D: 11386 tp->nvram_jedecnum = JEDEC_ATMEL; 11387 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11388 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11389 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; 11390 tp->nvram_pagesize = 256; 11391 break; 11392 case FLASH_5761VENDOR_ST_A_M45PE20: 11393 case FLASH_5761VENDOR_ST_A_M45PE40: 11394 case FLASH_5761VENDOR_ST_A_M45PE80: 11395 case FLASH_5761VENDOR_ST_A_M45PE16: 11396 case FLASH_5761VENDOR_ST_M_M45PE20: 11397 case FLASH_5761VENDOR_ST_M_M45PE40: 11398 case FLASH_5761VENDOR_ST_M_M45PE80: 11399 case FLASH_5761VENDOR_ST_M_M45PE16: 11400 tp->nvram_jedecnum = JEDEC_ST; 11401 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11402 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11403 tp->nvram_pagesize = 256; 11404 break; 11405 } 11406 11407 if (protect) { 11408 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); 11409 } else { 11410 switch (nvcfg1) { 11411 case FLASH_5761VENDOR_ATMEL_ADB161D: 11412 case FLASH_5761VENDOR_ATMEL_MDB161D: 11413 case FLASH_5761VENDOR_ST_A_M45PE16: 11414 case FLASH_5761VENDOR_ST_M_M45PE16: 11415 tp->nvram_size = TG3_NVRAM_SIZE_2MB; 11416 break; 11417 case FLASH_5761VENDOR_ATMEL_ADB081D: 11418 case FLASH_5761VENDOR_ATMEL_MDB081D: 11419 case FLASH_5761VENDOR_ST_A_M45PE80: 11420 case FLASH_5761VENDOR_ST_M_M45PE80: 11421 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 11422 break; 11423 case FLASH_5761VENDOR_ATMEL_ADB041D: 11424 case FLASH_5761VENDOR_ATMEL_MDB041D: 11425 case FLASH_5761VENDOR_ST_A_M45PE40: 11426 case FLASH_5761VENDOR_ST_M_M45PE40: 11427 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 11428 break; 11429 case FLASH_5761VENDOR_ATMEL_ADB021D: 11430 case FLASH_5761VENDOR_ATMEL_MDB021D: 11431 case FLASH_5761VENDOR_ST_A_M45PE20: 11432 case FLASH_5761VENDOR_ST_M_M45PE20: 11433 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 11434 break; 11435 } 11436 } 11437} 11438 11439static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) 11440{ 11441 tp->nvram_jedecnum = JEDEC_ATMEL; 11442 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11443 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 11444} 11445 11446static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) 11447{ 11448 u32 nvcfg1; 11449 11450 nvcfg1 = tr32(NVRAM_CFG1); 11451 11452 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11453 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 11454 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 11455 tp->nvram_jedecnum = JEDEC_ATMEL; 11456 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11457 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 11458 11459 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 11460 tw32(NVRAM_CFG1, nvcfg1); 11461 return; 11462 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 11463 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 11464 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 11465 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 11466 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 11467 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 11468 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 11469 tp->nvram_jedecnum = JEDEC_ATMEL; 11470 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11471 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11472 11473 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11474 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 11475 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 11476 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 11477 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 11478 break; 11479 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 11480 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 11481 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 11482 break; 11483 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 11484 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 11485 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 11486 break; 11487 } 11488 break; 11489 case FLASH_5752VENDOR_ST_M45PE10: 11490 case FLASH_5752VENDOR_ST_M45PE20: 11491 case FLASH_5752VENDOR_ST_M45PE40: 11492 tp->nvram_jedecnum = JEDEC_ST; 11493 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11494 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11495 11496 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11497 case FLASH_5752VENDOR_ST_M45PE10: 11498 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 11499 break; 11500 case FLASH_5752VENDOR_ST_M45PE20: 11501 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 11502 break; 11503 case FLASH_5752VENDOR_ST_M45PE40: 11504 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 11505 break; 11506 } 11507 break; 11508 default: 11509 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM; 11510 return; 11511 } 11512 11513 tg3_nvram_get_pagesize(tp, nvcfg1); 11514 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 11515 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; 11516} 11517 11518 11519static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) 11520{ 11521 u32 nvcfg1; 11522 11523 nvcfg1 = tr32(NVRAM_CFG1); 11524 11525 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11526 case FLASH_5717VENDOR_ATMEL_EEPROM: 11527 case FLASH_5717VENDOR_MICRO_EEPROM: 11528 tp->nvram_jedecnum = JEDEC_ATMEL; 11529 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11530 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 11531 11532 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 11533 tw32(NVRAM_CFG1, nvcfg1); 11534 return; 11535 case FLASH_5717VENDOR_ATMEL_MDB011D: 11536 case FLASH_5717VENDOR_ATMEL_ADB011B: 11537 case FLASH_5717VENDOR_ATMEL_ADB011D: 11538 case FLASH_5717VENDOR_ATMEL_MDB021D: 11539 case FLASH_5717VENDOR_ATMEL_ADB021B: 11540 case FLASH_5717VENDOR_ATMEL_ADB021D: 11541 case FLASH_5717VENDOR_ATMEL_45USPT: 11542 tp->nvram_jedecnum = JEDEC_ATMEL; 11543 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11544 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11545 11546 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11547 case FLASH_5717VENDOR_ATMEL_MDB021D: 11548 case FLASH_5717VENDOR_ATMEL_ADB021B: 11549 case FLASH_5717VENDOR_ATMEL_ADB021D: 11550 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 11551 break; 11552 default: 11553 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 11554 break; 11555 } 11556 break; 11557 case FLASH_5717VENDOR_ST_M_M25PE10: 11558 case FLASH_5717VENDOR_ST_A_M25PE10: 11559 case FLASH_5717VENDOR_ST_M_M45PE10: 11560 case FLASH_5717VENDOR_ST_A_M45PE10: 11561 case FLASH_5717VENDOR_ST_M_M25PE20: 11562 case FLASH_5717VENDOR_ST_A_M25PE20: 11563 case FLASH_5717VENDOR_ST_M_M45PE20: 11564 case FLASH_5717VENDOR_ST_A_M45PE20: 11565 case FLASH_5717VENDOR_ST_25USPT: 11566 case FLASH_5717VENDOR_ST_45USPT: 11567 tp->nvram_jedecnum = JEDEC_ST; 11568 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11569 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11570 11571 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11572 case FLASH_5717VENDOR_ST_M_M25PE20: 11573 case FLASH_5717VENDOR_ST_A_M25PE20: 11574 case FLASH_5717VENDOR_ST_M_M45PE20: 11575 case FLASH_5717VENDOR_ST_A_M45PE20: 11576 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 11577 break; 11578 default: 11579 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 11580 break; 11581 } 11582 break; 11583 default: 11584 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM; 11585 return; 11586 } 11587 11588 tg3_nvram_get_pagesize(tp, nvcfg1); 11589 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 11590 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; 11591} 11592 11593/* Chips other than 5700/5701 use the NVRAM for fetching info. */ 11594static void __devinit tg3_nvram_init(struct tg3 *tp) 11595{ 11596 tw32_f(GRC_EEPROM_ADDR, 11597 (EEPROM_ADDR_FSM_RESET | 11598 (EEPROM_DEFAULT_CLOCK_PERIOD << 11599 EEPROM_ADDR_CLKPERD_SHIFT))); 11600 11601 msleep(1); 11602 11603 /* Enable seeprom accesses. */ 11604 tw32_f(GRC_LOCAL_CTRL, 11605 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); 11606 udelay(100); 11607 11608 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 11609 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { 11610 tp->tg3_flags |= TG3_FLAG_NVRAM; 11611 11612 if (tg3_nvram_lock(tp)) { 11613 netdev_warn(tp->dev, 11614 "Cannot get nvram lock, %s failed\n", 11615 __func__); 11616 return; 11617 } 11618 tg3_enable_nvram_access(tp); 11619 11620 tp->nvram_size = 0; 11621 11622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) 11623 tg3_get_5752_nvram_info(tp); 11624 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) 11625 tg3_get_5755_nvram_info(tp); 11626 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 11627 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 11628 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 11629 tg3_get_5787_nvram_info(tp); 11630 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 11631 tg3_get_5761_nvram_info(tp); 11632 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 11633 tg3_get_5906_nvram_info(tp); 11634 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 11635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 11636 tg3_get_57780_nvram_info(tp); 11637 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 11638 tg3_get_5717_nvram_info(tp); 11639 else 11640 tg3_get_nvram_info(tp); 11641 11642 if (tp->nvram_size == 0) 11643 tg3_get_nvram_size(tp); 11644 11645 tg3_disable_nvram_access(tp); 11646 tg3_nvram_unlock(tp); 11647 11648 } else { 11649 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED); 11650 11651 tg3_get_eeprom_size(tp); 11652 } 11653} 11654 11655static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, 11656 u32 offset, u32 len, u8 *buf) 11657{ 11658 int i, j, rc = 0; 11659 u32 val; 11660 11661 for (i = 0; i < len; i += 4) { 11662 u32 addr; 11663 __be32 data; 11664 11665 addr = offset + i; 11666 11667 memcpy(&data, buf + i, 4); 11668 11669 /* 11670 * The SEEPROM interface expects the data to always be opposite 11671 * the native endian format. We accomplish this by reversing 11672 * all the operations that would have been performed on the 11673 * data from a call to tg3_nvram_read_be32(). 11674 */ 11675 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); 11676 11677 val = tr32(GRC_EEPROM_ADDR); 11678 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); 11679 11680 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | 11681 EEPROM_ADDR_READ); 11682 tw32(GRC_EEPROM_ADDR, val | 11683 (0 << EEPROM_ADDR_DEVID_SHIFT) | 11684 (addr & EEPROM_ADDR_ADDR_MASK) | 11685 EEPROM_ADDR_START | 11686 EEPROM_ADDR_WRITE); 11687 11688 for (j = 0; j < 1000; j++) { 11689 val = tr32(GRC_EEPROM_ADDR); 11690 11691 if (val & EEPROM_ADDR_COMPLETE) 11692 break; 11693 msleep(1); 11694 } 11695 if (!(val & EEPROM_ADDR_COMPLETE)) { 11696 rc = -EBUSY; 11697 break; 11698 } 11699 } 11700 11701 return rc; 11702} 11703 11704/* offset and length are dword aligned */ 11705static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, 11706 u8 *buf) 11707{ 11708 int ret = 0; 11709 u32 pagesize = tp->nvram_pagesize; 11710 u32 pagemask = pagesize - 1; 11711 u32 nvram_cmd; 11712 u8 *tmp; 11713 11714 tmp = kmalloc(pagesize, GFP_KERNEL); 11715 if (tmp == NULL) 11716 return -ENOMEM; 11717 11718 while (len) { 11719 int j; 11720 u32 phy_addr, page_off, size; 11721 11722 phy_addr = offset & ~pagemask; 11723 11724 for (j = 0; j < pagesize; j += 4) { 11725 ret = tg3_nvram_read_be32(tp, phy_addr + j, 11726 (__be32 *) (tmp + j)); 11727 if (ret) 11728 break; 11729 } 11730 if (ret) 11731 break; 11732 11733 page_off = offset & pagemask; 11734 size = pagesize; 11735 if (len < size) 11736 size = len; 11737 11738 len -= size; 11739 11740 memcpy(tmp + page_off, buf, size); 11741 11742 offset = offset + (pagesize - page_off); 11743 11744 tg3_enable_nvram_access(tp); 11745 11746 /* 11747 * Before we can erase the flash page, we need 11748 * to issue a special "write enable" command. 11749 */ 11750 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 11751 11752 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 11753 break; 11754 11755 /* Erase the target page */ 11756 tw32(NVRAM_ADDR, phy_addr); 11757 11758 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 11759 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 11760 11761 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 11762 break; 11763 11764 /* Issue another write enable to start the write. */ 11765 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 11766 11767 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 11768 break; 11769 11770 for (j = 0; j < pagesize; j += 4) { 11771 __be32 data; 11772 11773 data = *((__be32 *) (tmp + j)); 11774 11775 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 11776 11777 tw32(NVRAM_ADDR, phy_addr + j); 11778 11779 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | 11780 NVRAM_CMD_WR; 11781 11782 if (j == 0) 11783 nvram_cmd |= NVRAM_CMD_FIRST; 11784 else if (j == (pagesize - 4)) 11785 nvram_cmd |= NVRAM_CMD_LAST; 11786 11787 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) 11788 break; 11789 } 11790 if (ret) 11791 break; 11792 } 11793 11794 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; 11795 tg3_nvram_exec_cmd(tp, nvram_cmd); 11796 11797 kfree(tmp); 11798 11799 return ret; 11800} 11801 11802/* offset and length are dword aligned */ 11803static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, 11804 u8 *buf) 11805{ 11806 int i, ret = 0; 11807 11808 for (i = 0; i < len; i += 4, offset += 4) { 11809 u32 page_off, phy_addr, nvram_cmd; 11810 __be32 data; 11811 11812 memcpy(&data, buf + i, 4); 11813 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 11814 11815 page_off = offset % tp->nvram_pagesize; 11816 11817 phy_addr = tg3_nvram_phys_addr(tp, offset); 11818 11819 tw32(NVRAM_ADDR, phy_addr); 11820 11821 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 11822 11823 if (page_off == 0 || i == 0) 11824 nvram_cmd |= NVRAM_CMD_FIRST; 11825 if (page_off == (tp->nvram_pagesize - 4)) 11826 nvram_cmd |= NVRAM_CMD_LAST; 11827 11828 if (i == (len - 4)) 11829 nvram_cmd |= NVRAM_CMD_LAST; 11830 11831 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && 11832 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && 11833 (tp->nvram_jedecnum == JEDEC_ST) && 11834 (nvram_cmd & NVRAM_CMD_FIRST)) { 11835 11836 if ((ret = tg3_nvram_exec_cmd(tp, 11837 NVRAM_CMD_WREN | NVRAM_CMD_GO | 11838 NVRAM_CMD_DONE))) 11839 11840 break; 11841 } 11842 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) { 11843 /* We always do complete word writes to eeprom. */ 11844 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); 11845 } 11846 11847 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) 11848 break; 11849 } 11850 return ret; 11851} 11852 11853/* offset and length are dword aligned */ 11854static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) 11855{ 11856 int ret; 11857 11858 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { 11859 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 11860 ~GRC_LCLCTRL_GPIO_OUTPUT1); 11861 udelay(40); 11862 } 11863 11864 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) { 11865 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 11866 } else { 11867 u32 grc_mode; 11868 11869 ret = tg3_nvram_lock(tp); 11870 if (ret) 11871 return ret; 11872 11873 tg3_enable_nvram_access(tp); 11874 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 11875 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) 11876 tw32(NVRAM_WRITE1, 0x406); 11877 11878 grc_mode = tr32(GRC_MODE); 11879 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); 11880 11881 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) || 11882 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) { 11883 11884 ret = tg3_nvram_write_block_buffered(tp, offset, len, 11885 buf); 11886 } else { 11887 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 11888 buf); 11889 } 11890 11891 grc_mode = tr32(GRC_MODE); 11892 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); 11893 11894 tg3_disable_nvram_access(tp); 11895 tg3_nvram_unlock(tp); 11896 } 11897 11898 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { 11899 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 11900 udelay(40); 11901 } 11902 11903 return ret; 11904} 11905 11906struct subsys_tbl_ent { 11907 u16 subsys_vendor, subsys_devid; 11908 u32 phy_id; 11909}; 11910 11911static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = { 11912 /* Broadcom boards. */ 11913 { TG3PCI_SUBVENDOR_ID_BROADCOM, 11914 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, 11915 { TG3PCI_SUBVENDOR_ID_BROADCOM, 11916 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, 11917 { TG3PCI_SUBVENDOR_ID_BROADCOM, 11918 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, 11919 { TG3PCI_SUBVENDOR_ID_BROADCOM, 11920 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, 11921 { TG3PCI_SUBVENDOR_ID_BROADCOM, 11922 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, 11923 { TG3PCI_SUBVENDOR_ID_BROADCOM, 11924 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, 11925 { TG3PCI_SUBVENDOR_ID_BROADCOM, 11926 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, 11927 { TG3PCI_SUBVENDOR_ID_BROADCOM, 11928 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, 11929 { TG3PCI_SUBVENDOR_ID_BROADCOM, 11930 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, 11931 { TG3PCI_SUBVENDOR_ID_BROADCOM, 11932 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, 11933 { TG3PCI_SUBVENDOR_ID_BROADCOM, 11934 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, 11935 11936 /* 3com boards. */ 11937 { TG3PCI_SUBVENDOR_ID_3COM, 11938 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, 11939 { TG3PCI_SUBVENDOR_ID_3COM, 11940 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, 11941 { TG3PCI_SUBVENDOR_ID_3COM, 11942 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, 11943 { TG3PCI_SUBVENDOR_ID_3COM, 11944 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, 11945 { TG3PCI_SUBVENDOR_ID_3COM, 11946 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, 11947 11948 /* DELL boards. */ 11949 { TG3PCI_SUBVENDOR_ID_DELL, 11950 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, 11951 { TG3PCI_SUBVENDOR_ID_DELL, 11952 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, 11953 { TG3PCI_SUBVENDOR_ID_DELL, 11954 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, 11955 { TG3PCI_SUBVENDOR_ID_DELL, 11956 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, 11957 11958 /* Compaq boards. */ 11959 { TG3PCI_SUBVENDOR_ID_COMPAQ, 11960 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, 11961 { TG3PCI_SUBVENDOR_ID_COMPAQ, 11962 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, 11963 { TG3PCI_SUBVENDOR_ID_COMPAQ, 11964 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, 11965 { TG3PCI_SUBVENDOR_ID_COMPAQ, 11966 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, 11967 { TG3PCI_SUBVENDOR_ID_COMPAQ, 11968 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, 11969 11970 /* IBM boards. */ 11971 { TG3PCI_SUBVENDOR_ID_IBM, 11972 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } 11973}; 11974 11975static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp) 11976{ 11977 int i; 11978 11979 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { 11980 if ((subsys_id_to_phy_id[i].subsys_vendor == 11981 tp->pdev->subsystem_vendor) && 11982 (subsys_id_to_phy_id[i].subsys_devid == 11983 tp->pdev->subsystem_device)) 11984 return &subsys_id_to_phy_id[i]; 11985 } 11986 return NULL; 11987} 11988 11989static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) 11990{ 11991 u32 val; 11992 u16 pmcsr; 11993 11994 /* On some early chips the SRAM cannot be accessed in D3hot state, 11995 * so need make sure we're in D0. 11996 */ 11997 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr); 11998 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 11999 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr); 12000 msleep(1); 12001 12002 /* Make sure register accesses (indirect or otherwise) 12003 * will function correctly. 12004 */ 12005 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 12006 tp->misc_host_ctrl); 12007 12008 /* The memory arbiter has to be enabled in order for SRAM accesses 12009 * to succeed. Normally on powerup the tg3 chip firmware will make 12010 * sure it is enabled, but other entities such as system netboot 12011 * code might disable it. 12012 */ 12013 val = tr32(MEMARB_MODE); 12014 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 12015 12016 tp->phy_id = TG3_PHY_ID_INVALID; 12017 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 12018 12019 /* Assume an onboard device and WOL capable by default. */ 12020 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP; 12021 12022 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 12023 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { 12024 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; 12025 tp->tg3_flags2 |= TG3_FLG2_IS_NIC; 12026 } 12027 val = tr32(VCPU_CFGSHDW); 12028 if (val & VCPU_CFGSHDW_ASPM_DBNC) 12029 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; 12030 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 12031 (val & VCPU_CFGSHDW_WOL_MAGPKT)) 12032 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 12033 goto done; 12034 } 12035 12036 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 12037 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 12038 u32 nic_cfg, led_cfg; 12039 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id; 12040 int eeprom_phy_serdes = 0; 12041 12042 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 12043 tp->nic_sram_data_cfg = nic_cfg; 12044 12045 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); 12046 ver >>= NIC_SRAM_DATA_VER_SHIFT; 12047 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) && 12048 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) && 12049 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) && 12050 (ver > 0) && (ver < 0x100)) 12051 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 12052 12053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 12054 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); 12055 12056 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 12057 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 12058 eeprom_phy_serdes = 1; 12059 12060 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); 12061 if (nic_phy_id != 0) { 12062 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; 12063 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; 12064 12065 eeprom_phy_id = (id1 >> 16) << 10; 12066 eeprom_phy_id |= (id2 & 0xfc00) << 16; 12067 eeprom_phy_id |= (id2 & 0x03ff) << 0; 12068 } else 12069 eeprom_phy_id = 0; 12070 12071 tp->phy_id = eeprom_phy_id; 12072 if (eeprom_phy_serdes) { 12073 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 12074 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 12075 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES; 12076 else 12077 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 12078 } 12079 12080 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 12081 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | 12082 SHASTA_EXT_LED_MODE_MASK); 12083 else 12084 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; 12085 12086 switch (led_cfg) { 12087 default: 12088 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: 12089 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 12090 break; 12091 12092 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: 12093 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 12094 break; 12095 12096 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 12097 tp->led_ctrl = LED_CTRL_MODE_MAC; 12098 12099 /* Default to PHY_1_MODE if 0 (MAC_MODE) is 12100 * read on some older 5700/5701 bootcode. 12101 */ 12102 if (GET_ASIC_REV(tp->pci_chip_rev_id) == 12103 ASIC_REV_5700 || 12104 GET_ASIC_REV(tp->pci_chip_rev_id) == 12105 ASIC_REV_5701) 12106 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 12107 12108 break; 12109 12110 case SHASTA_EXT_LED_SHARED: 12111 tp->led_ctrl = LED_CTRL_MODE_SHARED; 12112 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && 12113 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1) 12114 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 12115 LED_CTRL_MODE_PHY_2); 12116 break; 12117 12118 case SHASTA_EXT_LED_MAC: 12119 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; 12120 break; 12121 12122 case SHASTA_EXT_LED_COMBO: 12123 tp->led_ctrl = LED_CTRL_MODE_COMBO; 12124 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) 12125 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 12126 LED_CTRL_MODE_PHY_2); 12127 break; 12128 12129 } 12130 12131 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 12132 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && 12133 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) 12134 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 12135 12136 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) 12137 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 12138 12139 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { 12140 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; 12141 if ((tp->pdev->subsystem_vendor == 12142 PCI_VENDOR_ID_ARIMA) && 12143 (tp->pdev->subsystem_device == 0x205a || 12144 tp->pdev->subsystem_device == 0x2063)) 12145 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; 12146 } else { 12147 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; 12148 tp->tg3_flags2 |= TG3_FLG2_IS_NIC; 12149 } 12150 12151 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 12152 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; 12153 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 12154 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; 12155 } 12156 12157 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && 12158 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 12159 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE; 12160 12161 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES && 12162 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 12163 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; 12164 12165 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && 12166 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) 12167 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 12168 12169 if (cfg2 & (1 << 17)) 12170 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING; 12171 12172 /* serdes signal pre-emphasis in register 0x590 set by */ 12173 /* bootcode if bit 18 is set */ 12174 if (cfg2 & (1 << 18)) 12175 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS; 12176 12177 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 12178 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && 12179 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 12180 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD; 12181 12182 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 12183 u32 cfg3; 12184 12185 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 12186 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) 12187 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; 12188 } 12189 12190 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) 12191 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE; 12192 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) 12193 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN; 12194 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 12195 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN; 12196 } 12197done: 12198 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP); 12199 device_set_wakeup_enable(&tp->pdev->dev, 12200 tp->tg3_flags & TG3_FLAG_WOL_ENABLE); 12201} 12202 12203static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 12204{ 12205 int i; 12206 u32 val; 12207 12208 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); 12209 tw32(OTP_CTRL, cmd); 12210 12211 /* Wait for up to 1 ms for command to execute. */ 12212 for (i = 0; i < 100; i++) { 12213 val = tr32(OTP_STATUS); 12214 if (val & OTP_STATUS_CMD_DONE) 12215 break; 12216 udelay(10); 12217 } 12218 12219 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; 12220} 12221 12222/* Read the gphy configuration from the OTP region of the chip. The gphy 12223 * configuration is a 32-bit value that straddles the alignment boundary. 12224 * We do two 32-bit reads and then shift and merge the results. 12225 */ 12226static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) 12227{ 12228 u32 bhalf_otp, thalf_otp; 12229 12230 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); 12231 12232 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) 12233 return 0; 12234 12235 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); 12236 12237 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 12238 return 0; 12239 12240 thalf_otp = tr32(OTP_READ_DATA); 12241 12242 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); 12243 12244 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 12245 return 0; 12246 12247 bhalf_otp = tr32(OTP_READ_DATA); 12248 12249 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 12250} 12251 12252static int __devinit tg3_phy_probe(struct tg3 *tp) 12253{ 12254 u32 hw_phy_id_1, hw_phy_id_2; 12255 u32 hw_phy_id, hw_phy_id_masked; 12256 int err; 12257 12258 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) 12259 return tg3_phy_init(tp); 12260 12261 /* Reading the PHY ID register can conflict with ASF 12262 * firmware access to the PHY hardware. 12263 */ 12264 err = 0; 12265 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || 12266 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { 12267 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; 12268 } else { 12269 /* Now read the physical PHY_ID from the chip and verify 12270 * that it is sane. If it doesn't look good, we fall back 12271 * to either the hard-coded table based PHY_ID and failing 12272 * that the value found in the eeprom area. 12273 */ 12274 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); 12275 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); 12276 12277 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; 12278 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; 12279 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; 12280 12281 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; 12282 } 12283 12284 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { 12285 tp->phy_id = hw_phy_id; 12286 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) 12287 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 12288 else 12289 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES; 12290 } else { 12291 if (tp->phy_id != TG3_PHY_ID_INVALID) { 12292 /* Do nothing, phy ID already set up in 12293 * tg3_get_eeprom_hw_cfg(). 12294 */ 12295 } else { 12296 struct subsys_tbl_ent *p; 12297 12298 /* No eeprom signature? Try the hardcoded 12299 * subsys device table. 12300 */ 12301 p = tg3_lookup_by_subsys(tp); 12302 if (!p) 12303 return -ENODEV; 12304 12305 tp->phy_id = p->phy_id; 12306 if (!tp->phy_id || 12307 tp->phy_id == TG3_PHY_ID_BCM8002) 12308 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 12309 } 12310 } 12311 12312 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) && 12313 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && 12314 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 12315 u32 bmsr, adv_reg, tg3_ctrl, mask; 12316 12317 tg3_readphy(tp, MII_BMSR, &bmsr); 12318 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 12319 (bmsr & BMSR_LSTATUS)) 12320 goto skip_phy_reset; 12321 12322 err = tg3_phy_reset(tp); 12323 if (err) 12324 return err; 12325 12326 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL | 12327 ADVERTISE_100HALF | ADVERTISE_100FULL | 12328 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); 12329 tg3_ctrl = 0; 12330 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { 12331 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF | 12332 MII_TG3_CTRL_ADV_1000_FULL); 12333 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || 12334 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) 12335 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER | 12336 MII_TG3_CTRL_ENABLE_AS_MASTER); 12337 } 12338 12339 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 12340 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | 12341 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); 12342 if (!tg3_copper_is_advertising_all(tp, mask)) { 12343 tg3_writephy(tp, MII_ADVERTISE, adv_reg); 12344 12345 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) 12346 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); 12347 12348 tg3_writephy(tp, MII_BMCR, 12349 BMCR_ANENABLE | BMCR_ANRESTART); 12350 } 12351 tg3_phy_set_wirespeed(tp); 12352 12353 tg3_writephy(tp, MII_ADVERTISE, adv_reg); 12354 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) 12355 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); 12356 } 12357 12358skip_phy_reset: 12359 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 12360 err = tg3_init_5401phy_dsp(tp); 12361 if (err) 12362 return err; 12363 12364 err = tg3_init_5401phy_dsp(tp); 12365 } 12366 12367 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) 12368 tp->link_config.advertising = 12369 (ADVERTISED_1000baseT_Half | 12370 ADVERTISED_1000baseT_Full | 12371 ADVERTISED_Autoneg | 12372 ADVERTISED_FIBRE); 12373 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) 12374 tp->link_config.advertising &= 12375 ~(ADVERTISED_1000baseT_Half | 12376 ADVERTISED_1000baseT_Full); 12377 12378 return err; 12379} 12380 12381static void __devinit tg3_read_vpd(struct tg3 *tp) 12382{ 12383 u8 vpd_data[TG3_NVM_VPD_LEN]; 12384 unsigned int block_end, rosize, len; 12385 int j, i = 0; 12386 u32 magic; 12387 12388 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || 12389 tg3_nvram_read(tp, 0x0, &magic)) 12390 goto out_not_found; 12391 12392 if (magic == TG3_EEPROM_MAGIC) { 12393 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) { 12394 u32 tmp; 12395 12396 /* The data is in little-endian format in NVRAM. 12397 * Use the big-endian read routines to preserve 12398 * the byte order as it exists in NVRAM. 12399 */ 12400 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp)) 12401 goto out_not_found; 12402 12403 memcpy(&vpd_data[i], &tmp, sizeof(tmp)); 12404 } 12405 } else { 12406 ssize_t cnt; 12407 unsigned int pos = 0; 12408 12409 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) { 12410 cnt = pci_read_vpd(tp->pdev, pos, 12411 TG3_NVM_VPD_LEN - pos, 12412 &vpd_data[pos]); 12413 if (cnt == -ETIMEDOUT || -EINTR) 12414 cnt = 0; 12415 else if (cnt < 0) 12416 goto out_not_found; 12417 } 12418 if (pos != TG3_NVM_VPD_LEN) 12419 goto out_not_found; 12420 } 12421 12422 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN, 12423 PCI_VPD_LRDT_RO_DATA); 12424 if (i < 0) 12425 goto out_not_found; 12426 12427 rosize = pci_vpd_lrdt_size(&vpd_data[i]); 12428 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; 12429 i += PCI_VPD_LRDT_TAG_SIZE; 12430 12431 if (block_end > TG3_NVM_VPD_LEN) 12432 goto out_not_found; 12433 12434 j = pci_vpd_find_info_keyword(vpd_data, i, rosize, 12435 PCI_VPD_RO_KEYWORD_MFR_ID); 12436 if (j > 0) { 12437 len = pci_vpd_info_field_size(&vpd_data[j]); 12438 12439 j += PCI_VPD_INFO_FLD_HDR_SIZE; 12440 if (j + len > block_end || len != 4 || 12441 memcmp(&vpd_data[j], "1028", 4)) 12442 goto partno; 12443 12444 j = pci_vpd_find_info_keyword(vpd_data, i, rosize, 12445 PCI_VPD_RO_KEYWORD_VENDOR0); 12446 if (j < 0) 12447 goto partno; 12448 12449 len = pci_vpd_info_field_size(&vpd_data[j]); 12450 12451 j += PCI_VPD_INFO_FLD_HDR_SIZE; 12452 if (j + len > block_end) 12453 goto partno; 12454 12455 memcpy(tp->fw_ver, &vpd_data[j], len); 12456 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1); 12457 } 12458 12459partno: 12460 i = pci_vpd_find_info_keyword(vpd_data, i, rosize, 12461 PCI_VPD_RO_KEYWORD_PARTNO); 12462 if (i < 0) 12463 goto out_not_found; 12464 12465 len = pci_vpd_info_field_size(&vpd_data[i]); 12466 12467 i += PCI_VPD_INFO_FLD_HDR_SIZE; 12468 if (len > TG3_BPN_SIZE || 12469 (len + i) > TG3_NVM_VPD_LEN) 12470 goto out_not_found; 12471 12472 memcpy(tp->board_part_number, &vpd_data[i], len); 12473 12474 return; 12475 12476out_not_found: 12477 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 12478 strcpy(tp->board_part_number, "BCM95906"); 12479 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && 12480 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) 12481 strcpy(tp->board_part_number, "BCM57780"); 12482 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && 12483 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) 12484 strcpy(tp->board_part_number, "BCM57760"); 12485 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && 12486 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) 12487 strcpy(tp->board_part_number, "BCM57790"); 12488 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && 12489 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) 12490 strcpy(tp->board_part_number, "BCM57788"); 12491 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && 12492 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) 12493 strcpy(tp->board_part_number, "BCM57761"); 12494 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && 12495 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) 12496 strcpy(tp->board_part_number, "BCM57765"); 12497 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && 12498 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) 12499 strcpy(tp->board_part_number, "BCM57781"); 12500 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && 12501 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) 12502 strcpy(tp->board_part_number, "BCM57785"); 12503 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && 12504 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) 12505 strcpy(tp->board_part_number, "BCM57791"); 12506 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && 12507 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) 12508 strcpy(tp->board_part_number, "BCM57795"); 12509 else 12510 strcpy(tp->board_part_number, "none"); 12511} 12512 12513static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) 12514{ 12515 u32 val; 12516 12517 if (tg3_nvram_read(tp, offset, &val) || 12518 (val & 0xfc000000) != 0x0c000000 || 12519 tg3_nvram_read(tp, offset + 4, &val) || 12520 val != 0) 12521 return 0; 12522 12523 return 1; 12524} 12525 12526static void __devinit tg3_read_bc_ver(struct tg3 *tp) 12527{ 12528 u32 val, offset, start, ver_offset; 12529 int i, dst_off; 12530 bool newver = false; 12531 12532 if (tg3_nvram_read(tp, 0xc, &offset) || 12533 tg3_nvram_read(tp, 0x4, &start)) 12534 return; 12535 12536 offset = tg3_nvram_logical_addr(tp, offset); 12537 12538 if (tg3_nvram_read(tp, offset, &val)) 12539 return; 12540 12541 if ((val & 0xfc000000) == 0x0c000000) { 12542 if (tg3_nvram_read(tp, offset + 4, &val)) 12543 return; 12544 12545 if (val == 0) 12546 newver = true; 12547 } 12548 12549 dst_off = strlen(tp->fw_ver); 12550 12551 if (newver) { 12552 if (TG3_VER_SIZE - dst_off < 16 || 12553 tg3_nvram_read(tp, offset + 8, &ver_offset)) 12554 return; 12555 12556 offset = offset + ver_offset - start; 12557 for (i = 0; i < 16; i += 4) { 12558 __be32 v; 12559 if (tg3_nvram_read_be32(tp, offset + i, &v)) 12560 return; 12561 12562 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); 12563 } 12564 } else { 12565 u32 major, minor; 12566 12567 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) 12568 return; 12569 12570 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> 12571 TG3_NVM_BCVER_MAJSFT; 12572 minor = ver_offset & TG3_NVM_BCVER_MINMSK; 12573 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, 12574 "v%d.%02d", major, minor); 12575 } 12576} 12577 12578static void __devinit tg3_read_hwsb_ver(struct tg3 *tp) 12579{ 12580 u32 val, major, minor; 12581 12582 /* Use native endian representation */ 12583 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) 12584 return; 12585 12586 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> 12587 TG3_NVM_HWSB_CFG1_MAJSFT; 12588 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> 12589 TG3_NVM_HWSB_CFG1_MINSFT; 12590 12591 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); 12592} 12593 12594static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) 12595{ 12596 u32 offset, major, minor, build; 12597 12598 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); 12599 12600 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) 12601 return; 12602 12603 switch (val & TG3_EEPROM_SB_REVISION_MASK) { 12604 case TG3_EEPROM_SB_REVISION_0: 12605 offset = TG3_EEPROM_SB_F1R0_EDH_OFF; 12606 break; 12607 case TG3_EEPROM_SB_REVISION_2: 12608 offset = TG3_EEPROM_SB_F1R2_EDH_OFF; 12609 break; 12610 case TG3_EEPROM_SB_REVISION_3: 12611 offset = TG3_EEPROM_SB_F1R3_EDH_OFF; 12612 break; 12613 case TG3_EEPROM_SB_REVISION_4: 12614 offset = TG3_EEPROM_SB_F1R4_EDH_OFF; 12615 break; 12616 case TG3_EEPROM_SB_REVISION_5: 12617 offset = TG3_EEPROM_SB_F1R5_EDH_OFF; 12618 break; 12619 default: 12620 return; 12621 } 12622 12623 if (tg3_nvram_read(tp, offset, &val)) 12624 return; 12625 12626 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> 12627 TG3_EEPROM_SB_EDH_BLD_SHFT; 12628 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> 12629 TG3_EEPROM_SB_EDH_MAJ_SHFT; 12630 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; 12631 12632 if (minor > 99 || build > 26) 12633 return; 12634 12635 offset = strlen(tp->fw_ver); 12636 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, 12637 " v%d.%02d", major, minor); 12638 12639 if (build > 0) { 12640 offset = strlen(tp->fw_ver); 12641 if (offset < TG3_VER_SIZE - 1) 12642 tp->fw_ver[offset] = 'a' + build - 1; 12643 } 12644} 12645 12646static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp) 12647{ 12648 u32 val, offset, start; 12649 int i, vlen; 12650 12651 for (offset = TG3_NVM_DIR_START; 12652 offset < TG3_NVM_DIR_END; 12653 offset += TG3_NVM_DIRENT_SIZE) { 12654 if (tg3_nvram_read(tp, offset, &val)) 12655 return; 12656 12657 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) 12658 break; 12659 } 12660 12661 if (offset == TG3_NVM_DIR_END) 12662 return; 12663 12664 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 12665 start = 0x08000000; 12666 else if (tg3_nvram_read(tp, offset - 4, &start)) 12667 return; 12668 12669 if (tg3_nvram_read(tp, offset + 4, &offset) || 12670 !tg3_fw_img_is_valid(tp, offset) || 12671 tg3_nvram_read(tp, offset + 8, &val)) 12672 return; 12673 12674 offset += val - start; 12675 12676 vlen = strlen(tp->fw_ver); 12677 12678 tp->fw_ver[vlen++] = ','; 12679 tp->fw_ver[vlen++] = ' '; 12680 12681 for (i = 0; i < 4; i++) { 12682 __be32 v; 12683 if (tg3_nvram_read_be32(tp, offset, &v)) 12684 return; 12685 12686 offset += sizeof(v); 12687 12688 if (vlen > TG3_VER_SIZE - sizeof(v)) { 12689 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); 12690 break; 12691 } 12692 12693 memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); 12694 vlen += sizeof(v); 12695 } 12696} 12697 12698static void __devinit tg3_read_dash_ver(struct tg3 *tp) 12699{ 12700 int vlen; 12701 u32 apedata; 12702 12703 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || 12704 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 12705 return; 12706 12707 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 12708 if (apedata != APE_SEG_SIG_MAGIC) 12709 return; 12710 12711 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 12712 if (!(apedata & APE_FW_STATUS_READY)) 12713 return; 12714 12715 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 12716 12717 vlen = strlen(tp->fw_ver); 12718 12719 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d", 12720 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 12721 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 12722 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 12723 (apedata & APE_FW_VERSION_BLDMSK)); 12724} 12725 12726static void __devinit tg3_read_fw_ver(struct tg3 *tp) 12727{ 12728 u32 val; 12729 bool vpd_vers = false; 12730 12731 if (tp->fw_ver[0] != 0) 12732 vpd_vers = true; 12733 12734 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) { 12735 strcat(tp->fw_ver, "sb"); 12736 return; 12737 } 12738 12739 if (tg3_nvram_read(tp, 0, &val)) 12740 return; 12741 12742 if (val == TG3_EEPROM_MAGIC) 12743 tg3_read_bc_ver(tp); 12744 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) 12745 tg3_read_sb_ver(tp, val); 12746 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 12747 tg3_read_hwsb_ver(tp); 12748 else 12749 return; 12750 12751 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || 12752 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers) 12753 goto done; 12754 12755 tg3_read_mgmtfw_ver(tp); 12756 12757done: 12758 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 12759} 12760 12761static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); 12762 12763static int __devinit tg3_get_invariants(struct tg3 *tp) 12764{ 12765 static struct pci_device_id write_reorder_chipsets[] = { 12766 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 12767 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 12768 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 12769 PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 12770 { PCI_DEVICE(PCI_VENDOR_ID_VIA, 12771 PCI_DEVICE_ID_VIA_8385_0) }, 12772 { }, 12773 }; 12774 u32 misc_ctrl_reg; 12775 u32 pci_state_reg, grc_misc_cfg; 12776 u32 val; 12777 u16 pci_cmd; 12778 int err; 12779 12780 /* Force memory write invalidate off. If we leave it on, 12781 * then on 5700_BX chips we have to enable a workaround. 12782 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 12783 * to match the cacheline size. The Broadcom driver have this 12784 * workaround but turns MWI off all the times so never uses 12785 * it. This seems to suggest that the workaround is insufficient. 12786 */ 12787 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 12788 pci_cmd &= ~PCI_COMMAND_INVALIDATE; 12789 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 12790 12791 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL 12792 * has the register indirect write enable bit set before 12793 * we try to access any of the MMIO registers. It is also 12794 * critical that the PCI-X hw workaround situation is decided 12795 * before that as well. 12796 */ 12797 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 12798 &misc_ctrl_reg); 12799 12800 tp->pci_chip_rev_id = (misc_ctrl_reg >> 12801 MISC_HOST_CTRL_CHIPREV_SHIFT); 12802 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { 12803 u32 prod_id_asic_rev; 12804 12805 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 12806 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 12807 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724) 12808 pci_read_config_dword(tp->pdev, 12809 TG3PCI_GEN2_PRODID_ASICREV, 12810 &prod_id_asic_rev); 12811 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || 12812 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || 12813 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || 12814 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || 12815 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || 12816 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) 12817 pci_read_config_dword(tp->pdev, 12818 TG3PCI_GEN15_PRODID_ASICREV, 12819 &prod_id_asic_rev); 12820 else 12821 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, 12822 &prod_id_asic_rev); 12823 12824 tp->pci_chip_rev_id = prod_id_asic_rev; 12825 } 12826 12827 /* Wrong chip ID in 5752 A0. This code can be removed later 12828 * as A0 is not in production. 12829 */ 12830 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) 12831 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 12832 12833 /* If we have 5702/03 A1 or A2 on certain ICH chipsets, 12834 * we need to disable memory and use config. cycles 12835 * only to access all registers. The 5702/03 chips 12836 * can mistakenly decode the special cycles from the 12837 * ICH chipsets as memory write cycles, causing corruption 12838 * of register and memory space. Only certain ICH bridges 12839 * will drive special cycles with non-zero data during the 12840 * address phase which can fall within the 5703's address 12841 * range. This is not an ICH bug as the PCI spec allows 12842 * non-zero address during special cycles. However, only 12843 * these ICH bridges are known to drive non-zero addresses 12844 * during special cycles. 12845 * 12846 * Since special cycles do not cross PCI bridges, we only 12847 * enable this workaround if the 5703 is on the secondary 12848 * bus of these ICH bridges. 12849 */ 12850 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) || 12851 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) { 12852 static struct tg3_dev_id { 12853 u32 vendor; 12854 u32 device; 12855 u32 rev; 12856 } ich_chipsets[] = { 12857 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, 12858 PCI_ANY_ID }, 12859 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, 12860 PCI_ANY_ID }, 12861 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, 12862 0xa }, 12863 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, 12864 PCI_ANY_ID }, 12865 { }, 12866 }; 12867 struct tg3_dev_id *pci_id = &ich_chipsets[0]; 12868 struct pci_dev *bridge = NULL; 12869 12870 while (pci_id->vendor != 0) { 12871 bridge = pci_get_device(pci_id->vendor, pci_id->device, 12872 bridge); 12873 if (!bridge) { 12874 pci_id++; 12875 continue; 12876 } 12877 if (pci_id->rev != PCI_ANY_ID) { 12878 if (bridge->revision > pci_id->rev) 12879 continue; 12880 } 12881 if (bridge->subordinate && 12882 (bridge->subordinate->number == 12883 tp->pdev->bus->number)) { 12884 12885 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND; 12886 pci_dev_put(bridge); 12887 break; 12888 } 12889 } 12890 } 12891 12892 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { 12893 static struct tg3_dev_id { 12894 u32 vendor; 12895 u32 device; 12896 } bridge_chipsets[] = { 12897 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, 12898 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, 12899 { }, 12900 }; 12901 struct tg3_dev_id *pci_id = &bridge_chipsets[0]; 12902 struct pci_dev *bridge = NULL; 12903 12904 while (pci_id->vendor != 0) { 12905 bridge = pci_get_device(pci_id->vendor, 12906 pci_id->device, 12907 bridge); 12908 if (!bridge) { 12909 pci_id++; 12910 continue; 12911 } 12912 if (bridge->subordinate && 12913 (bridge->subordinate->number <= 12914 tp->pdev->bus->number) && 12915 (bridge->subordinate->subordinate >= 12916 tp->pdev->bus->number)) { 12917 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG; 12918 pci_dev_put(bridge); 12919 break; 12920 } 12921 } 12922 } 12923 12924 /* The EPB bridge inside 5714, 5715, and 5780 cannot support 12925 * DMA addresses > 40-bit. This bridge may have other additional 12926 * 57xx devices behind it in some 4-port NIC designs for example. 12927 * Any tg3 device found behind the bridge will also need the 40-bit 12928 * DMA workaround. 12929 */ 12930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || 12931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { 12932 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS; 12933 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; 12934 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); 12935 } else { 12936 struct pci_dev *bridge = NULL; 12937 12938 do { 12939 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 12940 PCI_DEVICE_ID_SERVERWORKS_EPB, 12941 bridge); 12942 if (bridge && bridge->subordinate && 12943 (bridge->subordinate->number <= 12944 tp->pdev->bus->number) && 12945 (bridge->subordinate->subordinate >= 12946 tp->pdev->bus->number)) { 12947 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; 12948 pci_dev_put(bridge); 12949 break; 12950 } 12951 } while (bridge); 12952 } 12953 12954 /* Initialize misc host control in PCI block. */ 12955 tp->misc_host_ctrl |= (misc_ctrl_reg & 12956 MISC_HOST_CTRL_CHIPREV); 12957 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 12958 tp->misc_host_ctrl); 12959 12960 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 12961 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || 12962 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 12963 tp->pdev_peer = tg3_find_peer(tp); 12964 12965 /* Intentionally exclude ASIC_REV_5906 */ 12966 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 12967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 12968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 12970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 12971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 12972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 12973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 12974 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; 12975 12976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || 12977 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 12978 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || 12979 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 12980 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 12981 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; 12982 12983 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) || 12984 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 12985 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; 12986 12987 /* 5700 B0 chips do not support checksumming correctly due 12988 * to hardware bugs. 12989 */ 12990 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) 12991 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS; 12992 else { 12993 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 12994 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 12995 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 12996 tp->dev->features |= NETIF_F_IPV6_CSUM; 12997 tp->dev->features |= NETIF_F_GRO; 12998 } 12999 13000 /* Determine TSO capabilities */ 13001 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 13003 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; 13004 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 13005 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13006 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; 13007 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 13008 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; 13009 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && 13010 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) 13011 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; 13012 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 13013 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && 13014 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { 13015 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG; 13016 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) 13017 tp->fw_needed = FIRMWARE_TG3TSO5; 13018 else 13019 tp->fw_needed = FIRMWARE_TG3TSO; 13020 } 13021 13022 tp->irq_max = 1; 13023 13024 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 13025 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; 13026 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || 13027 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || 13028 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && 13029 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && 13030 tp->pdev_peer == tp->pdev)) 13031 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI; 13032 13033 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 13034 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 13035 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 13036 } 13037 13038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { 13040 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; 13041 tp->irq_max = TG3_IRQ_MAX_VECS; 13042 } 13043 } 13044 13045 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13047 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; 13048 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { 13049 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG; 13050 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; 13051 } 13052 13053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13054 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 13055 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; 13056 13057 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 13058 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 13059 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG)) 13060 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; 13061 13062 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 13063 &pci_state_reg); 13064 13065 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP); 13066 if (tp->pcie_cap != 0) { 13067 u16 lnkctl; 13068 13069 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13070 13071 pcie_set_readrq(tp->pdev, 4096); 13072 13073 pci_read_config_word(tp->pdev, 13074 tp->pcie_cap + PCI_EXP_LNKCTL, 13075 &lnkctl); 13076 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 13077 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13078 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2; 13079 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 13080 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 13081 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || 13082 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) 13083 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG; 13084 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) { 13085 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN; 13086 } 13087 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { 13088 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13089 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 13090 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 13091 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 13092 if (!tp->pcix_cap) { 13093 dev_err(&tp->pdev->dev, 13094 "Cannot find PCI-X capability, aborting\n"); 13095 return -EIO; 13096 } 13097 13098 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) 13099 tp->tg3_flags |= TG3_FLAG_PCIX_MODE; 13100 } 13101 13102 /* If we have an AMD 762 or VIA K8T800 chipset, write 13103 * reordering to the mailbox registers done by the host 13104 * controller can cause major troubles. We read back from 13105 * every mailbox register write to force the writes to be 13106 * posted to the chip in order. 13107 */ 13108 if (pci_dev_present(write_reorder_chipsets) && 13109 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) 13110 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; 13111 13112 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 13113 &tp->pci_cacheline_sz); 13114 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, 13115 &tp->pci_lat_timer); 13116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 13117 tp->pci_lat_timer < 64) { 13118 tp->pci_lat_timer = 64; 13119 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 13120 tp->pci_lat_timer); 13121 } 13122 13123 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { 13124 /* 5700 BX chips need to have their TX producer index 13125 * mailboxes written twice to workaround a bug. 13126 */ 13127 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG; 13128 13129 /* If we are in PCI-X mode, enable register write workaround. 13130 * 13131 * The workaround is to use indirect register accesses 13132 * for all chip writes not to mailbox registers. 13133 */ 13134 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { 13135 u32 pm_reg; 13136 13137 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; 13138 13139 /* The chip can have it's power management PCI config 13140 * space registers clobbered due to this bug. 13141 * So explicitly force the chip into D0 here. 13142 */ 13143 pci_read_config_dword(tp->pdev, 13144 tp->pm_cap + PCI_PM_CTRL, 13145 &pm_reg); 13146 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 13147 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 13148 pci_write_config_dword(tp->pdev, 13149 tp->pm_cap + PCI_PM_CTRL, 13150 pm_reg); 13151 13152 /* Also, force SERR#/PERR# in PCI command. */ 13153 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 13154 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 13155 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 13156 } 13157 } 13158 13159 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 13160 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED; 13161 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 13162 tp->tg3_flags |= TG3_FLAG_PCI_32BIT; 13163 13164 /* Chip-specific fixup from Broadcom driver */ 13165 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && 13166 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { 13167 pci_state_reg |= PCISTATE_RETRY_SAME_DMA; 13168 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 13169 } 13170 13171 /* Default fast path register access methods */ 13172 tp->read32 = tg3_read32; 13173 tp->write32 = tg3_write32; 13174 tp->read32_mbox = tg3_read32; 13175 tp->write32_mbox = tg3_write32; 13176 tp->write32_tx_mbox = tg3_write32; 13177 tp->write32_rx_mbox = tg3_write32; 13178 13179 /* Various workaround register access methods */ 13180 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) 13181 tp->write32 = tg3_write_indirect_reg32; 13182 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || 13183 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 13184 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { 13185 /* 13186 * Back to back register writes can cause problems on these 13187 * chips, the workaround is to read back all reg writes 13188 * except those to mailbox regs. 13189 * 13190 * See tg3_write_indirect_reg32(). 13191 */ 13192 tp->write32 = tg3_write_flush_reg32; 13193 } 13194 13195 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) || 13196 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) { 13197 tp->write32_tx_mbox = tg3_write32_tx_mbox; 13198 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) 13199 tp->write32_rx_mbox = tg3_write_flush_reg32; 13200 } 13201 13202 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) { 13203 tp->read32 = tg3_read_indirect_reg32; 13204 tp->write32 = tg3_write_indirect_reg32; 13205 tp->read32_mbox = tg3_read_indirect_mbox; 13206 tp->write32_mbox = tg3_write_indirect_mbox; 13207 tp->write32_tx_mbox = tg3_write_indirect_mbox; 13208 tp->write32_rx_mbox = tg3_write_indirect_mbox; 13209 13210 iounmap(tp->regs); 13211 tp->regs = NULL; 13212 13213 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 13214 pci_cmd &= ~PCI_COMMAND_MEMORY; 13215 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 13216 } 13217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 13218 tp->read32_mbox = tg3_read32_mbox_5906; 13219 tp->write32_mbox = tg3_write32_mbox_5906; 13220 tp->write32_tx_mbox = tg3_write32_mbox_5906; 13221 tp->write32_rx_mbox = tg3_write32_mbox_5906; 13222 } 13223 13224 if (tp->write32 == tg3_write_indirect_reg32 || 13225 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && 13226 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 13227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) 13228 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG; 13229 13230 /* Get eeprom hw config before calling tg3_set_power_state(). 13231 * In particular, the TG3_FLG2_IS_NIC flag must be 13232 * determined before calling tg3_set_power_state() so that 13233 * we know whether or not to switch out of Vaux power. 13234 * When the flag is set, it means that GPIO1 is used for eeprom 13235 * write protect and also implies that it is a LOM where GPIOs 13236 * are not used to switch power. 13237 */ 13238 tg3_get_eeprom_hw_cfg(tp); 13239 13240 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 13241 /* Allow reads and writes to the 13242 * APE register and memory space. 13243 */ 13244 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 13245 PCISTATE_ALLOW_APE_SHMEM_WR; 13246 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, 13247 pci_state_reg); 13248 } 13249 13250 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 13251 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 13252 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 13253 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 13254 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13255 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 13256 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; 13257 13258 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). 13259 * GPIO1 driven high will bring 5700's external PHY out of reset. 13260 * It is also used as eeprom write protect on LOMs. 13261 */ 13262 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; 13263 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || 13264 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) 13265 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 13266 GRC_LCLCTRL_GPIO_OUTPUT1); 13267 /* Unused GPIO3 must be driven as output on 5752 because there 13268 * are no pull-up resistors on unused GPIO pins. 13269 */ 13270 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) 13271 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 13272 13273 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13274 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 13275 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 13276 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 13277 13278 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 13279 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 13280 /* Turn off the debug UART. */ 13281 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 13282 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) 13283 /* Keep VMain power. */ 13284 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 13285 GRC_LCLCTRL_GPIO_OUTPUT0; 13286 } 13287 13288 /* Force the chip into D0. */ 13289 err = tg3_set_power_state(tp, PCI_D0); 13290 if (err) { 13291 dev_err(&tp->pdev->dev, "Transition to D0 failed\n"); 13292 return err; 13293 } 13294 13295 /* Derive initial jumbo mode from MTU assigned in 13296 * ether_setup() via the alloc_etherdev() call 13297 */ 13298 if (tp->dev->mtu > ETH_DATA_LEN && 13299 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 13300 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; 13301 13302 /* Determine WakeOnLan speed to use. */ 13303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 13304 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || 13305 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || 13306 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { 13307 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB); 13308 } else { 13309 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB; 13310 } 13311 13312 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13313 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; 13314 13315 /* A few boards don't want Ethernet@WireSpeed phy feature */ 13316 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || 13317 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && 13318 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && 13319 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || 13320 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) || 13321 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) 13322 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED; 13323 13324 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || 13325 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) 13326 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG; 13327 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) 13328 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; 13329 13330 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 13331 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && 13332 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 13333 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && 13334 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 13335 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { 13336 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 13338 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 13339 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { 13340 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && 13341 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) 13342 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; 13343 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 13344 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM; 13345 } else 13346 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 13347 } 13348 13349 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 13350 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { 13351 tp->phy_otp = tg3_read_otp_phycfg(tp); 13352 if (tp->phy_otp == 0) 13353 tp->phy_otp = TG3_OTP_DEFAULT; 13354 } 13355 13356 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) 13357 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 13358 else 13359 tp->mi_mode = MAC_MI_MODE_BASE; 13360 13361 tp->coalesce_mode = 0; 13362 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && 13363 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 13364 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 13365 13366 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 13367 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 13368 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; 13369 13370 err = tg3_mdio_init(tp); 13371 if (err) 13372 return err; 13373 13374 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && 13375 (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 || 13376 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) 13377 return -ENOTSUPP; 13378 13379 /* Initialize data/descriptor byte/word swapping. */ 13380 val = tr32(GRC_MODE); 13381 val &= GRC_MODE_HOST_STACKUP; 13382 tw32(GRC_MODE, val | tp->grc_mode); 13383 13384 tg3_switch_clocks(tp); 13385 13386 /* Clear this out for sanity. */ 13387 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 13388 13389 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 13390 &pci_state_reg); 13391 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 13392 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) { 13393 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); 13394 13395 if (chiprevid == CHIPREV_ID_5701_A0 || 13396 chiprevid == CHIPREV_ID_5701_B0 || 13397 chiprevid == CHIPREV_ID_5701_B2 || 13398 chiprevid == CHIPREV_ID_5701_B5) { 13399 void __iomem *sram_base; 13400 13401 /* Write some dummy words into the SRAM status block 13402 * area, see if it reads back correctly. If the return 13403 * value is bad, force enable the PCIX workaround. 13404 */ 13405 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; 13406 13407 writel(0x00000000, sram_base); 13408 writel(0x00000000, sram_base + 4); 13409 writel(0xffffffff, sram_base + 4); 13410 if (readl(sram_base) != 0x00000000) 13411 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; 13412 } 13413 } 13414 13415 udelay(50); 13416 tg3_nvram_init(tp); 13417 13418 grc_misc_cfg = tr32(GRC_MISC_CFG); 13419 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 13420 13421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 13422 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || 13423 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 13424 tp->tg3_flags2 |= TG3_FLG2_IS_5788; 13425 13426 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) && 13427 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)) 13428 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS; 13429 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { 13430 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | 13431 HOSTCC_MODE_CLRTICK_TXBD); 13432 13433 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; 13434 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 13435 tp->misc_host_ctrl); 13436 } 13437 13438 /* Preserve the APE MAC_MODE bits */ 13439 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 13440 tp->mac_mode = tr32(MAC_MODE) | 13441 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 13442 else 13443 tp->mac_mode = TG3_DEF_MAC_MODE; 13444 13445 /* these are limited to 10/100 only */ 13446 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 13447 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 13448 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 13449 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && 13450 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 || 13451 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 || 13452 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) || 13453 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && 13454 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F || 13455 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || 13456 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || 13457 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || 13458 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || 13459 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || 13460 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) 13461 tp->tg3_flags |= TG3_FLAG_10_100_ONLY; 13462 13463 err = tg3_phy_probe(tp); 13464 if (err) { 13465 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); 13466 /* ... but do not return immediately ... */ 13467 tg3_mdio_fini(tp); 13468 } 13469 13470 tg3_read_vpd(tp); 13471 tg3_read_fw_ver(tp); 13472 13473 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 13474 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; 13475 } else { 13476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) 13477 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT; 13478 else 13479 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; 13480 } 13481 13482 /* 5700 {AX,BX} chips have a broken status block link 13483 * change bit implementation, so we must use the 13484 * status register in those cases. 13485 */ 13486 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) 13487 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG; 13488 else 13489 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG; 13490 13491 /* The led_ctrl is set during tg3_phy_probe, here we might 13492 * have to force the link status polling mechanism based 13493 * upon subsystem IDs. 13494 */ 13495 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 13496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 13497 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 13498 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT | 13499 TG3_FLAG_USE_LINKCHG_REG); 13500 } 13501 13502 /* For all SERDES we poll the MAC status register. */ 13503 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 13504 tp->tg3_flags |= TG3_FLAG_POLL_SERDES; 13505 else 13506 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 13507 13508 tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM; 13509 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 13510 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 13511 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { 13512 tp->rx_offset -= NET_IP_ALIGN; 13513#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 13514 tp->rx_copy_thresh = ~(u16)0; 13515#endif 13516 } 13517 13518 tp->rx_std_max_post = TG3_RX_RING_SIZE; 13519 13520 /* Increment the rx prod index on the rx std ring by at most 13521 * 8 for these chips to workaround hw errata. 13522 */ 13523 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || 13524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 13525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) 13526 tp->rx_std_max_post = 8; 13527 13528 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) 13529 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & 13530 PCIE_PWR_MGMT_L1_THRESH_MSK; 13531 13532 return err; 13533} 13534 13535#ifdef CONFIG_SPARC 13536static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) 13537{ 13538 struct net_device *dev = tp->dev; 13539 struct pci_dev *pdev = tp->pdev; 13540 struct device_node *dp = pci_device_to_OF_node(pdev); 13541 const unsigned char *addr; 13542 int len; 13543 13544 addr = of_get_property(dp, "local-mac-address", &len); 13545 if (addr && len == 6) { 13546 memcpy(dev->dev_addr, addr, 6); 13547 memcpy(dev->perm_addr, dev->dev_addr, 6); 13548 return 0; 13549 } 13550 return -ENODEV; 13551} 13552 13553static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp) 13554{ 13555 struct net_device *dev = tp->dev; 13556 13557 memcpy(dev->dev_addr, idprom->id_ethaddr, 6); 13558 memcpy(dev->perm_addr, idprom->id_ethaddr, 6); 13559 return 0; 13560} 13561#endif 13562 13563static int __devinit tg3_get_device_address(struct tg3 *tp) 13564{ 13565 struct net_device *dev = tp->dev; 13566 u32 hi, lo, mac_offset; 13567 int addr_ok = 0; 13568 13569#ifdef CONFIG_SPARC 13570 if (!tg3_get_macaddr_sparc(tp)) 13571 return 0; 13572#endif 13573 13574 mac_offset = 0x7c; 13575 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || 13576 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 13577 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 13578 mac_offset = 0xcc; 13579 if (tg3_nvram_lock(tp)) 13580 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 13581 else 13582 tg3_nvram_unlock(tp); 13583 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 13584 if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC) 13585 mac_offset = 0xcc; 13586 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13587 mac_offset = 0x10; 13588 13589 /* First try to get it from MAC address mailbox. */ 13590 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); 13591 if ((hi >> 16) == 0x484b) { 13592 dev->dev_addr[0] = (hi >> 8) & 0xff; 13593 dev->dev_addr[1] = (hi >> 0) & 0xff; 13594 13595 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); 13596 dev->dev_addr[2] = (lo >> 24) & 0xff; 13597 dev->dev_addr[3] = (lo >> 16) & 0xff; 13598 dev->dev_addr[4] = (lo >> 8) & 0xff; 13599 dev->dev_addr[5] = (lo >> 0) & 0xff; 13600 13601 /* Some old bootcode may report a 0 MAC address in SRAM */ 13602 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); 13603 } 13604 if (!addr_ok) { 13605 /* Next, try NVRAM. */ 13606 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) && 13607 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 13608 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 13609 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); 13610 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); 13611 } 13612 /* Finally just fetch it out of the MAC control regs. */ 13613 else { 13614 hi = tr32(MAC_ADDR_0_HIGH); 13615 lo = tr32(MAC_ADDR_0_LOW); 13616 13617 dev->dev_addr[5] = lo & 0xff; 13618 dev->dev_addr[4] = (lo >> 8) & 0xff; 13619 dev->dev_addr[3] = (lo >> 16) & 0xff; 13620 dev->dev_addr[2] = (lo >> 24) & 0xff; 13621 dev->dev_addr[1] = hi & 0xff; 13622 dev->dev_addr[0] = (hi >> 8) & 0xff; 13623 } 13624 } 13625 13626 if (!is_valid_ether_addr(&dev->dev_addr[0])) { 13627#ifdef CONFIG_SPARC 13628 if (!tg3_get_default_macaddr_sparc(tp)) 13629 return 0; 13630#endif 13631 return -EINVAL; 13632 } 13633 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 13634 return 0; 13635} 13636 13637#define BOUNDARY_SINGLE_CACHELINE 1 13638#define BOUNDARY_MULTI_CACHELINE 2 13639 13640static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) 13641{ 13642 int cacheline_size; 13643 u8 byte; 13644 int goal; 13645 13646 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); 13647 if (byte == 0) 13648 cacheline_size = 1024; 13649 else 13650 cacheline_size = (int) byte * 4; 13651 13652 /* On 5703 and later chips, the boundary bits have no 13653 * effect. 13654 */ 13655 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 13656 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && 13657 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) 13658 goto out; 13659 13660#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 13661 goal = BOUNDARY_MULTI_CACHELINE; 13662#else 13663#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) 13664 goal = BOUNDARY_SINGLE_CACHELINE; 13665#else 13666 goal = 0; 13667#endif 13668#endif 13669 13670 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13671 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { 13672 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 13673 goto out; 13674 } 13675 13676 if (!goal) 13677 goto out; 13678 13679 /* PCI controllers on most RISC systems tend to disconnect 13680 * when a device tries to burst across a cache-line boundary. 13681 * Therefore, letting tg3 do so just wastes PCI bandwidth. 13682 * 13683 * Unfortunately, for PCI-E there are only limited 13684 * write-side controls for this, and thus for reads 13685 * we will still get the disconnects. We'll also waste 13686 * these PCI cycles for both read and write for chips 13687 * other than 5700 and 5701 which do not implement the 13688 * boundary bits. 13689 */ 13690 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && 13691 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { 13692 switch (cacheline_size) { 13693 case 16: 13694 case 32: 13695 case 64: 13696 case 128: 13697 if (goal == BOUNDARY_SINGLE_CACHELINE) { 13698 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | 13699 DMA_RWCTRL_WRITE_BNDRY_128_PCIX); 13700 } else { 13701 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 13702 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 13703 } 13704 break; 13705 13706 case 256: 13707 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | 13708 DMA_RWCTRL_WRITE_BNDRY_256_PCIX); 13709 break; 13710 13711 default: 13712 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 13713 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 13714 break; 13715 } 13716 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 13717 switch (cacheline_size) { 13718 case 16: 13719 case 32: 13720 case 64: 13721 if (goal == BOUNDARY_SINGLE_CACHELINE) { 13722 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 13723 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; 13724 break; 13725 } 13726 /* fallthrough */ 13727 case 128: 13728 default: 13729 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 13730 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 13731 break; 13732 } 13733 } else { 13734 switch (cacheline_size) { 13735 case 16: 13736 if (goal == BOUNDARY_SINGLE_CACHELINE) { 13737 val |= (DMA_RWCTRL_READ_BNDRY_16 | 13738 DMA_RWCTRL_WRITE_BNDRY_16); 13739 break; 13740 } 13741 /* fallthrough */ 13742 case 32: 13743 if (goal == BOUNDARY_SINGLE_CACHELINE) { 13744 val |= (DMA_RWCTRL_READ_BNDRY_32 | 13745 DMA_RWCTRL_WRITE_BNDRY_32); 13746 break; 13747 } 13748 /* fallthrough */ 13749 case 64: 13750 if (goal == BOUNDARY_SINGLE_CACHELINE) { 13751 val |= (DMA_RWCTRL_READ_BNDRY_64 | 13752 DMA_RWCTRL_WRITE_BNDRY_64); 13753 break; 13754 } 13755 /* fallthrough */ 13756 case 128: 13757 if (goal == BOUNDARY_SINGLE_CACHELINE) { 13758 val |= (DMA_RWCTRL_READ_BNDRY_128 | 13759 DMA_RWCTRL_WRITE_BNDRY_128); 13760 break; 13761 } 13762 /* fallthrough */ 13763 case 256: 13764 val |= (DMA_RWCTRL_READ_BNDRY_256 | 13765 DMA_RWCTRL_WRITE_BNDRY_256); 13766 break; 13767 case 512: 13768 val |= (DMA_RWCTRL_READ_BNDRY_512 | 13769 DMA_RWCTRL_WRITE_BNDRY_512); 13770 break; 13771 case 1024: 13772 default: 13773 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 13774 DMA_RWCTRL_WRITE_BNDRY_1024); 13775 break; 13776 } 13777 } 13778 13779out: 13780 return val; 13781} 13782 13783static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) 13784{ 13785 struct tg3_internal_buffer_desc test_desc; 13786 u32 sram_dma_descs; 13787 int i, ret; 13788 13789 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; 13790 13791 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); 13792 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); 13793 tw32(RDMAC_STATUS, 0); 13794 tw32(WDMAC_STATUS, 0); 13795 13796 tw32(BUFMGR_MODE, 0); 13797 tw32(FTQ_RESET, 0); 13798 13799 test_desc.addr_hi = ((u64) buf_dma) >> 32; 13800 test_desc.addr_lo = buf_dma & 0xffffffff; 13801 test_desc.nic_mbuf = 0x00002100; 13802 test_desc.len = size; 13803 13804 /* 13805 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz 13806 * the *second* time the tg3 driver was getting loaded after an 13807 * initial scan. 13808 * 13809 * Broadcom tells me: 13810 * ...the DMA engine is connected to the GRC block and a DMA 13811 * reset may affect the GRC block in some unpredictable way... 13812 * The behavior of resets to individual blocks has not been tested. 13813 * 13814 * Broadcom noted the GRC reset will also reset all sub-components. 13815 */ 13816 if (to_device) { 13817 test_desc.cqid_sqid = (13 << 8) | 2; 13818 13819 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); 13820 udelay(40); 13821 } else { 13822 test_desc.cqid_sqid = (16 << 8) | 7; 13823 13824 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); 13825 udelay(40); 13826 } 13827 test_desc.flags = 0x00000005; 13828 13829 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { 13830 u32 val; 13831 13832 val = *(((u32 *)&test_desc) + i); 13833 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 13834 sram_dma_descs + (i * sizeof(u32))); 13835 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 13836 } 13837 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 13838 13839 if (to_device) 13840 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 13841 else 13842 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 13843 13844 ret = -ENODEV; 13845 for (i = 0; i < 40; i++) { 13846 u32 val; 13847 13848 if (to_device) 13849 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); 13850 else 13851 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); 13852 if ((val & 0xffff) == sram_dma_descs) { 13853 ret = 0; 13854 break; 13855 } 13856 13857 udelay(100); 13858 } 13859 13860 return ret; 13861} 13862 13863#define TEST_BUFFER_SIZE 0x2000 13864 13865static int __devinit tg3_test_dma(struct tg3 *tp) 13866{ 13867 dma_addr_t buf_dma; 13868 u32 *buf, saved_dma_rwctrl; 13869 int ret = 0; 13870 13871 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 13872 if (!buf) { 13873 ret = -ENOMEM; 13874 goto out_nofree; 13875 } 13876 13877 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 13878 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 13879 13880 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 13881 13882 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13883 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 13884 goto out; 13885 13886 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 13887 /* DMA read watermark not used on PCIE */ 13888 tp->dma_rwctrl |= 0x00180000; 13889 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { 13890 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || 13891 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) 13892 tp->dma_rwctrl |= 0x003f0000; 13893 else 13894 tp->dma_rwctrl |= 0x003f000f; 13895 } else { 13896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 13897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 13898 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); 13899 u32 read_water = 0x7; 13900 13901 /* If the 5704 is behind the EPB bridge, we can 13902 * do the less restrictive ONE_DMA workaround for 13903 * better performance. 13904 */ 13905 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) && 13906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) 13907 tp->dma_rwctrl |= 0x8000; 13908 else if (ccval == 0x6 || ccval == 0x7) 13909 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 13910 13911 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) 13912 read_water = 4; 13913 /* Set bit 23 to enable PCIX hw bug fix */ 13914 tp->dma_rwctrl |= 13915 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | 13916 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | 13917 (1 << 23); 13918 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 13919 /* 5780 always in PCIX mode */ 13920 tp->dma_rwctrl |= 0x00144000; 13921 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { 13922 /* 5714 always in PCIX mode */ 13923 tp->dma_rwctrl |= 0x00148000; 13924 } else { 13925 tp->dma_rwctrl |= 0x001b000f; 13926 } 13927 } 13928 13929 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 13930 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) 13931 tp->dma_rwctrl &= 0xfffffff0; 13932 13933 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 13934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { 13935 /* Remove this if it causes problems for some boards. */ 13936 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; 13937 13938 /* On 5700/5701 chips, we need to set this bit. 13939 * Otherwise the chip will issue cacheline transactions 13940 * to streamable DMA memory with not all the byte 13941 * enables turned on. This is an error on several 13942 * RISC PCI controllers, in particular sparc64. 13943 * 13944 * On 5703/5704 chips, this bit has been reassigned 13945 * a different meaning. In particular, it is used 13946 * on those chips to enable a PCI-X workaround. 13947 */ 13948 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; 13949 } 13950 13951 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 13952 13953#if 0 13954 /* Unneeded, already done by tg3_get_invariants. */ 13955 tg3_switch_clocks(tp); 13956#endif 13957 13958 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 13959 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 13960 goto out; 13961 13962 /* It is best to perform DMA test with maximum write burst size 13963 * to expose the 5700/5701 write DMA bug. 13964 */ 13965 saved_dma_rwctrl = tp->dma_rwctrl; 13966 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 13967 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 13968 13969 while (1) { 13970 u32 *p = buf, i; 13971 13972 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) 13973 p[i] = i; 13974 13975 /* Send the buffer to the chip. */ 13976 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); 13977 if (ret) { 13978 dev_err(&tp->pdev->dev, 13979 "%s: Buffer write failed. err = %d\n", 13980 __func__, ret); 13981 break; 13982 } 13983 13984#if 0 13985 /* validate data reached card RAM correctly. */ 13986 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 13987 u32 val; 13988 tg3_read_mem(tp, 0x2100 + (i*4), &val); 13989 if (le32_to_cpu(val) != p[i]) { 13990 dev_err(&tp->pdev->dev, 13991 "%s: Buffer corrupted on device! " 13992 "(%d != %d)\n", __func__, val, i); 13993 /* ret = -ENODEV here? */ 13994 } 13995 p[i] = 0; 13996 } 13997#endif 13998 /* Now read it back. */ 13999 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); 14000 if (ret) { 14001 dev_err(&tp->pdev->dev, "%s: Buffer read failed. " 14002 "err = %d\n", __func__, ret); 14003 break; 14004 } 14005 14006 /* Verify it. */ 14007 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 14008 if (p[i] == i) 14009 continue; 14010 14011 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 14012 DMA_RWCTRL_WRITE_BNDRY_16) { 14013 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 14014 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 14015 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 14016 break; 14017 } else { 14018 dev_err(&tp->pdev->dev, 14019 "%s: Buffer corrupted on read back! " 14020 "(%d != %d)\n", __func__, p[i], i); 14021 ret = -ENODEV; 14022 goto out; 14023 } 14024 } 14025 14026 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { 14027 /* Success. */ 14028 ret = 0; 14029 break; 14030 } 14031 } 14032 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 14033 DMA_RWCTRL_WRITE_BNDRY_16) { 14034 static struct pci_device_id dma_wait_state_chipsets[] = { 14035 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 14036 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 14037 { }, 14038 }; 14039 14040 /* DMA test passed without adjusting DMA boundary, 14041 * now look for chipsets that are known to expose the 14042 * DMA bug without failing the test. 14043 */ 14044 if (pci_dev_present(dma_wait_state_chipsets)) { 14045 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 14046 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 14047 } else { 14048 /* Safe to use the calculated DMA boundary. */ 14049 tp->dma_rwctrl = saved_dma_rwctrl; 14050 } 14051 14052 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 14053 } 14054 14055out: 14056 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); 14057out_nofree: 14058 return ret; 14059} 14060 14061static void __devinit tg3_init_link_config(struct tg3 *tp) 14062{ 14063 tp->link_config.advertising = 14064 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 14065 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | 14066 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | 14067 ADVERTISED_Autoneg | ADVERTISED_MII); 14068 tp->link_config.speed = SPEED_INVALID; 14069 tp->link_config.duplex = DUPLEX_INVALID; 14070 tp->link_config.autoneg = AUTONEG_ENABLE; 14071 tp->link_config.active_speed = SPEED_INVALID; 14072 tp->link_config.active_duplex = DUPLEX_INVALID; 14073 tp->link_config.phy_is_low_power = 0; 14074 tp->link_config.orig_speed = SPEED_INVALID; 14075 tp->link_config.orig_duplex = DUPLEX_INVALID; 14076 tp->link_config.orig_autoneg = AUTONEG_INVALID; 14077} 14078 14079static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) 14080{ 14081 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 14082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { 14083 tp->bufmgr_config.mbuf_read_dma_low_water = 14084 DEFAULT_MB_RDMA_LOW_WATER_5705; 14085 tp->bufmgr_config.mbuf_mac_rx_low_water = 14086 DEFAULT_MB_MACRX_LOW_WATER_57765; 14087 tp->bufmgr_config.mbuf_high_water = 14088 DEFAULT_MB_HIGH_WATER_57765; 14089 14090 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 14091 DEFAULT_MB_RDMA_LOW_WATER_5705; 14092 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 14093 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; 14094 tp->bufmgr_config.mbuf_high_water_jumbo = 14095 DEFAULT_MB_HIGH_WATER_JUMBO_57765; 14096 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 14097 tp->bufmgr_config.mbuf_read_dma_low_water = 14098 DEFAULT_MB_RDMA_LOW_WATER_5705; 14099 tp->bufmgr_config.mbuf_mac_rx_low_water = 14100 DEFAULT_MB_MACRX_LOW_WATER_5705; 14101 tp->bufmgr_config.mbuf_high_water = 14102 DEFAULT_MB_HIGH_WATER_5705; 14103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 14104 tp->bufmgr_config.mbuf_mac_rx_low_water = 14105 DEFAULT_MB_MACRX_LOW_WATER_5906; 14106 tp->bufmgr_config.mbuf_high_water = 14107 DEFAULT_MB_HIGH_WATER_5906; 14108 } 14109 14110 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 14111 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; 14112 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 14113 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; 14114 tp->bufmgr_config.mbuf_high_water_jumbo = 14115 DEFAULT_MB_HIGH_WATER_JUMBO_5780; 14116 } else { 14117 tp->bufmgr_config.mbuf_read_dma_low_water = 14118 DEFAULT_MB_RDMA_LOW_WATER; 14119 tp->bufmgr_config.mbuf_mac_rx_low_water = 14120 DEFAULT_MB_MACRX_LOW_WATER; 14121 tp->bufmgr_config.mbuf_high_water = 14122 DEFAULT_MB_HIGH_WATER; 14123 14124 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 14125 DEFAULT_MB_RDMA_LOW_WATER_JUMBO; 14126 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 14127 DEFAULT_MB_MACRX_LOW_WATER_JUMBO; 14128 tp->bufmgr_config.mbuf_high_water_jumbo = 14129 DEFAULT_MB_HIGH_WATER_JUMBO; 14130 } 14131 14132 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; 14133 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; 14134} 14135 14136static char * __devinit tg3_phy_string(struct tg3 *tp) 14137{ 14138 switch (tp->phy_id & TG3_PHY_ID_MASK) { 14139 case TG3_PHY_ID_BCM5400: return "5400"; 14140 case TG3_PHY_ID_BCM5401: return "5401"; 14141 case TG3_PHY_ID_BCM5411: return "5411"; 14142 case TG3_PHY_ID_BCM5701: return "5701"; 14143 case TG3_PHY_ID_BCM5703: return "5703"; 14144 case TG3_PHY_ID_BCM5704: return "5704"; 14145 case TG3_PHY_ID_BCM5705: return "5705"; 14146 case TG3_PHY_ID_BCM5750: return "5750"; 14147 case TG3_PHY_ID_BCM5752: return "5752"; 14148 case TG3_PHY_ID_BCM5714: return "5714"; 14149 case TG3_PHY_ID_BCM5780: return "5780"; 14150 case TG3_PHY_ID_BCM5755: return "5755"; 14151 case TG3_PHY_ID_BCM5787: return "5787"; 14152 case TG3_PHY_ID_BCM5784: return "5784"; 14153 case TG3_PHY_ID_BCM5756: return "5722/5756"; 14154 case TG3_PHY_ID_BCM5906: return "5906"; 14155 case TG3_PHY_ID_BCM5761: return "5761"; 14156 case TG3_PHY_ID_BCM5718C: return "5718C"; 14157 case TG3_PHY_ID_BCM5718S: return "5718S"; 14158 case TG3_PHY_ID_BCM57765: return "57765"; 14159 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 14160 case 0: return "serdes"; 14161 default: return "unknown"; 14162 } 14163} 14164 14165static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) 14166{ 14167 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 14168 strcpy(str, "PCI Express"); 14169 return str; 14170 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { 14171 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; 14172 14173 strcpy(str, "PCIX:"); 14174 14175 if ((clock_ctrl == 7) || 14176 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == 14177 GRC_MISC_CFG_BOARD_ID_5704CIOBE)) 14178 strcat(str, "133MHz"); 14179 else if (clock_ctrl == 0) 14180 strcat(str, "33MHz"); 14181 else if (clock_ctrl == 2) 14182 strcat(str, "50MHz"); 14183 else if (clock_ctrl == 4) 14184 strcat(str, "66MHz"); 14185 else if (clock_ctrl == 6) 14186 strcat(str, "100MHz"); 14187 } else { 14188 strcpy(str, "PCI:"); 14189 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) 14190 strcat(str, "66MHz"); 14191 else 14192 strcat(str, "33MHz"); 14193 } 14194 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT) 14195 strcat(str, ":32-bit"); 14196 else 14197 strcat(str, ":64-bit"); 14198 return str; 14199} 14200 14201static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) 14202{ 14203 struct pci_dev *peer; 14204 unsigned int func, devnr = tp->pdev->devfn & ~7; 14205 14206 for (func = 0; func < 8; func++) { 14207 peer = pci_get_slot(tp->pdev->bus, devnr | func); 14208 if (peer && peer != tp->pdev) 14209 break; 14210 pci_dev_put(peer); 14211 } 14212 /* 5704 can be configured in single-port mode, set peer to 14213 * tp->pdev in that case. 14214 */ 14215 if (!peer) { 14216 peer = tp->pdev; 14217 return peer; 14218 } 14219 14220 /* 14221 * We don't need to keep the refcount elevated; there's no way 14222 * to remove one half of this device without removing the other 14223 */ 14224 pci_dev_put(peer); 14225 14226 return peer; 14227} 14228 14229static void __devinit tg3_init_coal(struct tg3 *tp) 14230{ 14231 struct ethtool_coalesce *ec = &tp->coal; 14232 14233 memset(ec, 0, sizeof(*ec)); 14234 ec->cmd = ETHTOOL_GCOALESCE; 14235 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; 14236 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; 14237 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; 14238 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; 14239 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; 14240 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; 14241 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; 14242 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; 14243 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; 14244 14245 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | 14246 HOSTCC_MODE_CLRTICK_TXBD)) { 14247 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; 14248 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; 14249 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; 14250 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; 14251 } 14252 14253 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 14254 ec->rx_coalesce_usecs_irq = 0; 14255 ec->tx_coalesce_usecs_irq = 0; 14256 ec->stats_block_coalesce_usecs = 0; 14257 } 14258} 14259 14260static const struct net_device_ops tg3_netdev_ops = { 14261 .ndo_open = tg3_open, 14262 .ndo_stop = tg3_close, 14263 .ndo_start_xmit = tg3_start_xmit, 14264 .ndo_get_stats = tg3_get_stats, 14265 .ndo_validate_addr = eth_validate_addr, 14266 .ndo_set_multicast_list = tg3_set_rx_mode, 14267 .ndo_set_mac_address = tg3_set_mac_addr, 14268 .ndo_do_ioctl = tg3_ioctl, 14269 .ndo_tx_timeout = tg3_tx_timeout, 14270 .ndo_change_mtu = tg3_change_mtu, 14271#if TG3_VLAN_TAG_USED 14272 .ndo_vlan_rx_register = tg3_vlan_rx_register, 14273#endif 14274#ifdef CONFIG_NET_POLL_CONTROLLER 14275 .ndo_poll_controller = tg3_poll_controller, 14276#endif 14277}; 14278 14279static const struct net_device_ops tg3_netdev_ops_dma_bug = { 14280 .ndo_open = tg3_open, 14281 .ndo_stop = tg3_close, 14282 .ndo_start_xmit = tg3_start_xmit_dma_bug, 14283 .ndo_get_stats = tg3_get_stats, 14284 .ndo_validate_addr = eth_validate_addr, 14285 .ndo_set_multicast_list = tg3_set_rx_mode, 14286 .ndo_set_mac_address = tg3_set_mac_addr, 14287 .ndo_do_ioctl = tg3_ioctl, 14288 .ndo_tx_timeout = tg3_tx_timeout, 14289 .ndo_change_mtu = tg3_change_mtu, 14290#if TG3_VLAN_TAG_USED 14291 .ndo_vlan_rx_register = tg3_vlan_rx_register, 14292#endif 14293#ifdef CONFIG_NET_POLL_CONTROLLER 14294 .ndo_poll_controller = tg3_poll_controller, 14295#endif 14296}; 14297 14298static int __devinit tg3_init_one(struct pci_dev *pdev, 14299 const struct pci_device_id *ent) 14300{ 14301 struct net_device *dev; 14302 struct tg3 *tp; 14303 int i, err, pm_cap; 14304 u32 sndmbx, rcvmbx, intmbx; 14305 char str[40]; 14306 u64 dma_mask, persist_dma_mask; 14307 14308 printk_once(KERN_INFO "%s\n", version); 14309 14310 err = pci_enable_device(pdev); 14311 if (err) { 14312 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 14313 return err; 14314 } 14315 14316 err = pci_request_regions(pdev, DRV_MODULE_NAME); 14317 if (err) { 14318 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 14319 goto err_out_disable_pdev; 14320 } 14321 14322 pci_set_master(pdev); 14323 14324 /* Find power-management capability. */ 14325 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 14326 if (pm_cap == 0) { 14327 dev_err(&pdev->dev, 14328 "Cannot find Power Management capability, aborting\n"); 14329 err = -EIO; 14330 goto err_out_free_res; 14331 } 14332 14333 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 14334 if (!dev) { 14335 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n"); 14336 err = -ENOMEM; 14337 goto err_out_free_res; 14338 } 14339 14340 SET_NETDEV_DEV(dev, &pdev->dev); 14341 14342#if TG3_VLAN_TAG_USED 14343 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 14344#endif 14345 14346 tp = netdev_priv(dev); 14347 tp->pdev = pdev; 14348 tp->dev = dev; 14349 tp->pm_cap = pm_cap; 14350 tp->rx_mode = TG3_DEF_RX_MODE; 14351 tp->tx_mode = TG3_DEF_TX_MODE; 14352 14353 if (tg3_debug > 0) 14354 tp->msg_enable = tg3_debug; 14355 else 14356 tp->msg_enable = TG3_DEF_MSG_ENABLE; 14357 14358 /* The word/byte swap controls here control register access byte 14359 * swapping. DMA data byte swapping is controlled in the GRC_MODE 14360 * setting below. 14361 */ 14362 tp->misc_host_ctrl = 14363 MISC_HOST_CTRL_MASK_PCI_INT | 14364 MISC_HOST_CTRL_WORD_SWAP | 14365 MISC_HOST_CTRL_INDIR_ACCESS | 14366 MISC_HOST_CTRL_PCISTATE_RW; 14367 14368 /* The NONFRM (non-frame) byte/word swap controls take effect 14369 * on descriptor entries, anything which isn't packet data. 14370 * 14371 * The StrongARM chips on the board (one for tx, one for rx) 14372 * are running in big-endian mode. 14373 */ 14374 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | 14375 GRC_MODE_WSWAP_NONFRM_DATA); 14376#ifdef __BIG_ENDIAN 14377 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 14378#endif 14379 spin_lock_init(&tp->lock); 14380 spin_lock_init(&tp->indirect_lock); 14381 INIT_WORK(&tp->reset_task, tg3_reset_task); 14382 14383 tp->regs = pci_ioremap_bar(pdev, BAR_0); 14384 if (!tp->regs) { 14385 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 14386 err = -ENOMEM; 14387 goto err_out_free_dev; 14388 } 14389 14390 tg3_init_link_config(tp); 14391 14392 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 14393 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 14394 14395 dev->ethtool_ops = &tg3_ethtool_ops; 14396 dev->watchdog_timeo = TG3_TX_TIMEOUT; 14397 dev->irq = pdev->irq; 14398 14399 err = tg3_get_invariants(tp); 14400 if (err) { 14401 dev_err(&pdev->dev, 14402 "Problem fetching invariants of chip, aborting\n"); 14403 goto err_out_iounmap; 14404 } 14405 14406 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && 14407 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) 14408 dev->netdev_ops = &tg3_netdev_ops; 14409 else 14410 dev->netdev_ops = &tg3_netdev_ops_dma_bug; 14411 14412 14413 /* The EPB bridge inside 5714, 5715, and 5780 and any 14414 * device behind the EPB cannot support DMA addresses > 40-bit. 14415 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 14416 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 14417 * do DMA address check in tg3_start_xmit(). 14418 */ 14419 if (tp->tg3_flags2 & TG3_FLG2_IS_5788) 14420 persist_dma_mask = dma_mask = DMA_BIT_MASK(32); 14421 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) { 14422 persist_dma_mask = dma_mask = DMA_BIT_MASK(40); 14423#ifdef CONFIG_HIGHMEM 14424 dma_mask = DMA_BIT_MASK(64); 14425#endif 14426 } else 14427 persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 14428 14429 /* Configure DMA attributes. */ 14430 if (dma_mask > DMA_BIT_MASK(32)) { 14431 err = pci_set_dma_mask(pdev, dma_mask); 14432 if (!err) { 14433 dev->features |= NETIF_F_HIGHDMA; 14434 err = pci_set_consistent_dma_mask(pdev, 14435 persist_dma_mask); 14436 if (err < 0) { 14437 dev_err(&pdev->dev, "Unable to obtain 64 bit " 14438 "DMA for consistent allocations\n"); 14439 goto err_out_iounmap; 14440 } 14441 } 14442 } 14443 if (err || dma_mask == DMA_BIT_MASK(32)) { 14444 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 14445 if (err) { 14446 dev_err(&pdev->dev, 14447 "No usable DMA configuration, aborting\n"); 14448 goto err_out_iounmap; 14449 } 14450 } 14451 14452 tg3_init_bufmgr_config(tp); 14453 14454 /* Selectively allow TSO based on operating conditions */ 14455 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || 14456 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) 14457 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 14458 else { 14459 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG); 14460 tp->fw_needed = NULL; 14461 } 14462 14463 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) 14464 tp->fw_needed = FIRMWARE_TG3; 14465 14466 /* TSO is on by default on chips that support hardware TSO. 14467 * Firmware TSO on older chips gives lower performance, so it 14468 * is off by default, but can be enabled using ethtool. 14469 */ 14470 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) && 14471 (dev->features & NETIF_F_IP_CSUM)) 14472 dev->features |= NETIF_F_TSO; 14473 14474 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || 14475 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) { 14476 if (dev->features & NETIF_F_IPV6_CSUM) 14477 dev->features |= NETIF_F_TSO6; 14478 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || 14479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 14480 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 14481 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 14482 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 14483 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 14484 dev->features |= NETIF_F_TSO_ECN; 14485 } 14486 14487 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && 14488 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 14489 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 14490 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64; 14491 tp->rx_pending = 63; 14492 } 14493 14494 err = tg3_get_device_address(tp); 14495 if (err) { 14496 dev_err(&pdev->dev, 14497 "Could not obtain valid ethernet address, aborting\n"); 14498 goto err_out_iounmap; 14499 } 14500 14501 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 14502 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 14503 if (!tp->aperegs) { 14504 dev_err(&pdev->dev, 14505 "Cannot map APE registers, aborting\n"); 14506 err = -ENOMEM; 14507 goto err_out_iounmap; 14508 } 14509 14510 tg3_ape_lock_init(tp); 14511 14512 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) 14513 tg3_read_dash_ver(tp); 14514 } 14515 14516 /* 14517 * Reset chip in case UNDI or EFI driver did not shutdown 14518 * DMA self test will enable WDMAC and we'll see (spurious) 14519 * pending DMA on the PCI bus at that point. 14520 */ 14521 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 14522 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 14523 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 14524 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14525 } 14526 14527 err = tg3_test_dma(tp); 14528 if (err) { 14529 dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); 14530 goto err_out_apeunmap; 14531 } 14532 14533 /* flow control autonegotiation is default behavior */ 14534 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 14535 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 14536 14537 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 14538 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 14539 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 14540 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { 14541 struct tg3_napi *tnapi = &tp->napi[i]; 14542 14543 tnapi->tp = tp; 14544 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; 14545 14546 tnapi->int_mbox = intmbx; 14547 if (i < 4) 14548 intmbx += 0x8; 14549 else 14550 intmbx += 0x4; 14551 14552 tnapi->consmbox = rcvmbx; 14553 tnapi->prodmbox = sndmbx; 14554 14555 if (i) { 14556 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); 14557 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64); 14558 } else { 14559 tnapi->coal_now = HOSTCC_MODE_NOW; 14560 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64); 14561 } 14562 14563 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) 14564 break; 14565 14566 /* 14567 * If we support MSIX, we'll be using RSS. If we're using 14568 * RSS, the first vector only handles link interrupts and the 14569 * remaining vectors handle rx and tx interrupts. Reuse the 14570 * mailbox values for the next iteration. The values we setup 14571 * above are still useful for the single vectored mode. 14572 */ 14573 if (!i) 14574 continue; 14575 14576 rcvmbx += 0x8; 14577 14578 if (sndmbx & 0x4) 14579 sndmbx -= 0x4; 14580 else 14581 sndmbx += 0xc; 14582 } 14583 14584 tg3_init_coal(tp); 14585 14586 pci_set_drvdata(pdev, dev); 14587 14588 err = register_netdev(dev); 14589 if (err) { 14590 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 14591 goto err_out_apeunmap; 14592 } 14593 14594 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 14595 tp->board_part_number, 14596 tp->pci_chip_rev_id, 14597 tg3_bus_string(tp, str), 14598 dev->dev_addr); 14599 14600 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { 14601 struct phy_device *phydev; 14602 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 14603 netdev_info(dev, 14604 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", 14605 phydev->drv->name, dev_name(&phydev->dev)); 14606 } else 14607 netdev_info(dev, "attached PHY is %s (%s Ethernet) " 14608 "(WireSpeed[%d])\n", tg3_phy_string(tp), 14609 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : 14610 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : 14611 "10/100/1000Base-T")), 14612 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0); 14613 14614 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 14615 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, 14616 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, 14617 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0, 14618 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, 14619 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 14620 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", 14621 tp->dma_rwctrl, 14622 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : 14623 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); 14624 14625 return 0; 14626 14627err_out_apeunmap: 14628 if (tp->aperegs) { 14629 iounmap(tp->aperegs); 14630 tp->aperegs = NULL; 14631 } 14632 14633err_out_iounmap: 14634 if (tp->regs) { 14635 iounmap(tp->regs); 14636 tp->regs = NULL; 14637 } 14638 14639err_out_free_dev: 14640 free_netdev(dev); 14641 14642err_out_free_res: 14643 pci_release_regions(pdev); 14644 14645err_out_disable_pdev: 14646 pci_disable_device(pdev); 14647 pci_set_drvdata(pdev, NULL); 14648 return err; 14649} 14650 14651static void __devexit tg3_remove_one(struct pci_dev *pdev) 14652{ 14653 struct net_device *dev = pci_get_drvdata(pdev); 14654 14655 if (dev) { 14656 struct tg3 *tp = netdev_priv(dev); 14657 14658 if (tp->fw) 14659 release_firmware(tp->fw); 14660 14661 flush_scheduled_work(); 14662 14663 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 14664 tg3_phy_fini(tp); 14665 tg3_mdio_fini(tp); 14666 } 14667 14668 unregister_netdev(dev); 14669 if (tp->aperegs) { 14670 iounmap(tp->aperegs); 14671 tp->aperegs = NULL; 14672 } 14673 if (tp->regs) { 14674 iounmap(tp->regs); 14675 tp->regs = NULL; 14676 } 14677 free_netdev(dev); 14678 pci_release_regions(pdev); 14679 pci_disable_device(pdev); 14680 pci_set_drvdata(pdev, NULL); 14681 } 14682} 14683 14684static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) 14685{ 14686 struct net_device *dev = pci_get_drvdata(pdev); 14687 struct tg3 *tp = netdev_priv(dev); 14688 pci_power_t target_state; 14689 int err; 14690 14691 /* PCI register 4 needs to be saved whether netif_running() or not. 14692 * MSI address and data need to be saved if using MSI and 14693 * netif_running(). 14694 */ 14695 pci_save_state(pdev); 14696 14697 if (!netif_running(dev)) 14698 return 0; 14699 14700 flush_scheduled_work(); 14701 tg3_phy_stop(tp); 14702 tg3_netif_stop(tp); 14703 14704 del_timer_sync(&tp->timer); 14705 14706 tg3_full_lock(tp, 1); 14707 tg3_disable_ints(tp); 14708 tg3_full_unlock(tp); 14709 14710 netif_device_detach(dev); 14711 14712 tg3_full_lock(tp, 0); 14713 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14714 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 14715 tg3_full_unlock(tp); 14716 14717 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot; 14718 14719 err = tg3_set_power_state(tp, target_state); 14720 if (err) { 14721 int err2; 14722 14723 tg3_full_lock(tp, 0); 14724 14725 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 14726 err2 = tg3_restart_hw(tp, 1); 14727 if (err2) 14728 goto out; 14729 14730 tp->timer.expires = jiffies + tp->timer_offset; 14731 add_timer(&tp->timer); 14732 14733 netif_device_attach(dev); 14734 tg3_netif_start(tp); 14735 14736out: 14737 tg3_full_unlock(tp); 14738 14739 if (!err2) 14740 tg3_phy_start(tp); 14741 } 14742 14743 return err; 14744} 14745 14746static int tg3_resume(struct pci_dev *pdev) 14747{ 14748 struct net_device *dev = pci_get_drvdata(pdev); 14749 struct tg3 *tp = netdev_priv(dev); 14750 int err; 14751 14752 pci_restore_state(tp->pdev); 14753 14754 if (!netif_running(dev)) 14755 return 0; 14756 14757 err = tg3_set_power_state(tp, PCI_D0); 14758 if (err) 14759 return err; 14760 14761 netif_device_attach(dev); 14762 14763 tg3_full_lock(tp, 0); 14764 14765 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 14766 err = tg3_restart_hw(tp, 1); 14767 if (err) 14768 goto out; 14769 14770 tp->timer.expires = jiffies + tp->timer_offset; 14771 add_timer(&tp->timer); 14772 14773 tg3_netif_start(tp); 14774 14775out: 14776 tg3_full_unlock(tp); 14777 14778 if (!err) 14779 tg3_phy_start(tp); 14780 14781 return err; 14782} 14783 14784static struct pci_driver tg3_driver = { 14785 .name = DRV_MODULE_NAME, 14786 .id_table = tg3_pci_tbl, 14787 .probe = tg3_init_one, 14788 .remove = __devexit_p(tg3_remove_one), 14789 .suspend = tg3_suspend, 14790 .resume = tg3_resume 14791}; 14792 14793static int __init tg3_init(void) 14794{ 14795 return pci_register_driver(&tg3_driver); 14796} 14797 14798static void __exit tg3_cleanup(void) 14799{ 14800 pci_unregister_driver(&tg3_driver); 14801} 14802 14803module_init(tg3_init); 14804module_exit(tg3_cleanup);