Merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/tg3-2.6

+556 -15
+556 -15
drivers/net/tg3.c
··· 133 /* number of ETHTOOL_GSTATS u64's */ 134 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) 135 136 static char version[] __devinitdata = 137 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 138 ··· 316 { "nic_irqs" }, 317 { "nic_avoided_irqs" }, 318 { "nic_tx_threshold_hit" } 319 }; 320 321 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) ··· 3083 } 3084 3085 static int tg3_init_hw(struct tg3 *); 3086 - static int tg3_halt(struct tg3 *, int); 3087 3088 #ifdef CONFIG_NET_POLL_CONTROLLER 3089 static void tg3_poll_controller(struct net_device *dev) ··· 3107 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; 3108 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3109 3110 - tg3_halt(tp, 0); 3111 tg3_init_hw(tp); 3112 3113 tg3_netif_start(tp); ··· 3453 spin_lock_irq(&tp->lock); 3454 spin_lock(&tp->tx_lock); 3455 3456 - tg3_halt(tp, 1); 3457 3458 tg3_set_mtu(dev, tp, new_mtu); 3459 ··· 4144 } 4145 4146 /* tp->lock is held. */ 4147 - static int tg3_halt(struct tg3 *tp, int silent) 4148 { 4149 int err; 4150 4151 tg3_stop_fw(tp); 4152 4153 - tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN); 4154 4155 tg3_abort_hw(tp, silent); 4156 err = tg3_chip_reset(tp); 4157 4158 - tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN); 4159 - tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4160 4161 if (err) 4162 return err; ··· 4370 */ 4371 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; 4372 4373 err = tg3_halt_cpu(tp, cpu_base); 4374 if (err) 4375 goto out; 4376 ··· 5899 int err, i; 5900 u32 int_mbox = 0; 5901 5902 tg3_disable_ints(tp); 5903 5904 free_irq(tp->pdev->irq, dev); ··· 6005 spin_lock_irq(&tp->lock); 6006 spin_lock(&tp->tx_lock); 6007 6008 - tg3_halt(tp, 1); 6009 err = tg3_init_hw(tp); 6010 6011 spin_unlock(&tp->tx_lock); ··· 6081 6082 err = tg3_init_hw(tp); 6083 if (err) { 6084 - tg3_halt(tp, 1); 6085 tg3_free_rings(tp); 6086 } else { 6087 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) ··· 6125 pci_disable_msi(tp->pdev); 6126 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 6127 } 6128 - tg3_halt(tp, 1); 6129 tg3_free_rings(tp); 6130 tg3_free_consistent(tp); 6131 ··· 6398 6399 tg3_disable_ints(tp); 6400 6401 - tg3_halt(tp, 1); 6402 tg3_free_rings(tp); 6403 tp->tg3_flags &= 6404 ~(TG3_FLAG_INIT_COMPLETE | ··· 7118 tp->tx_pending = ering->tx_pending; 7119 7120 if (netif_running(dev)) { 7121 - tg3_halt(tp, 1); 7122 tg3_init_hw(tp); 7123 tg3_netif_start(tp); 7124 } ··· 7161 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE; 7162 7163 if (netif_running(dev)) { 7164 - tg3_halt(tp, 1); 7165 tg3_init_hw(tp); 7166 tg3_netif_start(tp); 7167 } ··· 7220 return TG3_NUM_STATS; 7221 } 7222 7223 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) 7224 { 7225 switch (stringset) { 7226 case ETH_SS_STATS: 7227 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); 7228 break; 7229 default: 7230 WARN_ON(1); /* we need a WARN() */ ··· 7245 { 7246 struct tg3 *tp = netdev_priv(dev); 7247 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); 7248 } 7249 7250 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ··· 7870 .get_tso = ethtool_op_get_tso, 7871 .set_tso = tg3_set_tso, 7872 #endif 7873 .get_strings = tg3_get_strings, 7874 .get_stats_count = tg3_get_stats_count, 7875 .get_ethtool_stats = tg3_get_ethtool_stats, ··· 10019 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 10020 pci_save_state(tp->pdev); 10021 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 10022 - tg3_halt(tp, 1); 10023 } 10024 10025 err = tg3_test_dma(tp); ··· 10146 10147 spin_lock_irq(&tp->lock); 10148 spin_lock(&tp->tx_lock); 10149 - tg3_halt(tp, 1); 10150 spin_unlock(&tp->tx_lock); 10151 spin_unlock_irq(&tp->lock); 10152
··· 133 /* number of ETHTOOL_GSTATS u64's */ 134 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) 135 136 + #define TG3_NUM_TEST 6 137 + 138 static char version[] __devinitdata = 139 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 140 ··· 314 { "nic_irqs" }, 315 { "nic_avoided_irqs" }, 316 { "nic_tx_threshold_hit" } 317 + }; 318 + 319 + static struct { 320 + const char string[ETH_GSTRING_LEN]; 321 + } ethtool_test_keys[TG3_NUM_TEST] = { 322 + { "nvram test (online) " }, 323 + { "link test (online) " }, 324 + { "register test (offline)" }, 325 + { "memory test (offline)" }, 326 + { "loopback test (offline)" }, 327 + { "interrupt test (offline)" }, 328 }; 329 330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) ··· 3070 } 3071 3072 static int tg3_init_hw(struct tg3 *); 3073 + static int tg3_halt(struct tg3 *, int, int); 3074 3075 #ifdef CONFIG_NET_POLL_CONTROLLER 3076 static void tg3_poll_controller(struct net_device *dev) ··· 3094 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; 3095 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3096 3097 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 3098 tg3_init_hw(tp); 3099 3100 tg3_netif_start(tp); ··· 3440 spin_lock_irq(&tp->lock); 3441 spin_lock(&tp->tx_lock); 3442 3443 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 3444 3445 tg3_set_mtu(dev, tp, new_mtu); 3446 ··· 4131 } 4132 4133 /* tp->lock is held. */ 4134 + static int tg3_halt(struct tg3 *tp, int kind, int silent) 4135 { 4136 int err; 4137 4138 tg3_stop_fw(tp); 4139 4140 + tg3_write_sig_pre_reset(tp, kind); 4141 4142 tg3_abort_hw(tp, silent); 4143 err = tg3_chip_reset(tp); 4144 4145 + tg3_write_sig_legacy(tp, kind); 4146 + tg3_write_sig_post_reset(tp, kind); 4147 4148 if (err) 4149 return err; ··· 4357 */ 4358 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; 4359 4360 + /* It is possible that bootcode is still loading at this point. 4361 + * Get the nvram lock first before halting the cpu. 4362 + */ 4363 + tg3_nvram_lock(tp); 4364 err = tg3_halt_cpu(tp, cpu_base); 4365 + tg3_nvram_unlock(tp); 4366 if (err) 4367 goto out; 4368 ··· 5881 int err, i; 5882 u32 int_mbox = 0; 5883 5884 + if (!netif_running(dev)) 5885 + return -ENODEV; 5886 + 5887 tg3_disable_ints(tp); 5888 5889 free_irq(tp->pdev->irq, dev); ··· 5984 spin_lock_irq(&tp->lock); 5985 spin_lock(&tp->tx_lock); 5986 5987 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 5988 err = tg3_init_hw(tp); 5989 5990 spin_unlock(&tp->tx_lock); ··· 6060 6061 err = tg3_init_hw(tp); 6062 if (err) { 6063 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 6064 tg3_free_rings(tp); 6065 } else { 6066 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) ··· 6104 pci_disable_msi(tp->pdev); 6105 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 6106 } 6107 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 6108 tg3_free_rings(tp); 6109 tg3_free_consistent(tp); 6110 ··· 6377 6378 tg3_disable_ints(tp); 6379 6380 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 6381 tg3_free_rings(tp); 6382 tp->tg3_flags &= 6383 ~(TG3_FLAG_INIT_COMPLETE | ··· 7097 tp->tx_pending = ering->tx_pending; 7098 7099 if (netif_running(dev)) { 7100 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 7101 tg3_init_hw(tp); 7102 tg3_netif_start(tp); 7103 } ··· 7140 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE; 7141 7142 if (netif_running(dev)) { 7143 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 7144 tg3_init_hw(tp); 7145 tg3_netif_start(tp); 7146 } ··· 7199 return TG3_NUM_STATS; 7200 } 7201 7202 + static int tg3_get_test_count (struct net_device *dev) 7203 + { 7204 + return TG3_NUM_TEST; 7205 + } 7206 + 7207 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) 7208 { 7209 switch (stringset) { 7210 case ETH_SS_STATS: 7211 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); 7212 + break; 7213 + case ETH_SS_TEST: 7214 + memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys)); 7215 break; 7216 default: 7217 WARN_ON(1); /* we need a WARN() */ ··· 7216 { 7217 struct tg3 *tp = netdev_priv(dev); 7218 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); 7219 + } 7220 + 7221 + #define NVRAM_TEST_SIZE 0x100 7222 + 7223 + static int tg3_test_nvram(struct tg3 *tp) 7224 + { 7225 + u32 *buf, csum; 7226 + int i, j, err = 0; 7227 + 7228 + buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL); 7229 + if (buf == NULL) 7230 + return -ENOMEM; 7231 + 7232 + for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) { 7233 + u32 val; 7234 + 7235 + if ((err = tg3_nvram_read(tp, i, &val)) != 0) 7236 + break; 7237 + buf[j] = cpu_to_le32(val); 7238 + } 7239 + if (i < NVRAM_TEST_SIZE) 7240 + goto out; 7241 + 7242 + err = -EIO; 7243 + if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) 7244 + goto out; 7245 + 7246 + /* Bootstrap checksum at offset 0x10 */ 7247 + csum = calc_crc((unsigned char *) buf, 0x10); 7248 + if(csum != cpu_to_le32(buf[0x10/4])) 7249 + goto out; 7250 + 7251 + /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 7252 + csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 7253 + if (csum != cpu_to_le32(buf[0xfc/4])) 7254 + goto out; 7255 + 7256 + err = 0; 7257 + 7258 + out: 7259 + kfree(buf); 7260 + return err; 7261 + } 7262 + 7263 + #define TG3_SERDES_TIMEOUT_SEC 2 7264 + #define TG3_COPPER_TIMEOUT_SEC 6 7265 + 7266 + static int tg3_test_link(struct tg3 *tp) 7267 + { 7268 + int i, max; 7269 + 7270 + if (!netif_running(tp->dev)) 7271 + return -ENODEV; 7272 + 7273 + if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 7274 + max = TG3_SERDES_TIMEOUT_SEC; 7275 + else 7276 + max = TG3_COPPER_TIMEOUT_SEC; 7277 + 7278 + for (i = 0; i < max; i++) { 7279 + if (netif_carrier_ok(tp->dev)) 7280 + return 0; 7281 + 7282 + if (msleep_interruptible(1000)) 7283 + break; 7284 + } 7285 + 7286 + return -EIO; 7287 + } 7288 + 7289 + /* Only test the commonly used registers */ 7290 + static int tg3_test_registers(struct tg3 *tp) 7291 + { 7292 + int i, is_5705; 7293 + u32 offset, read_mask, write_mask, val, save_val, read_val; 7294 + static struct { 7295 + u16 offset; 7296 + u16 flags; 7297 + #define TG3_FL_5705 0x1 7298 + #define TG3_FL_NOT_5705 0x2 7299 + #define TG3_FL_NOT_5788 0x4 7300 + u32 read_mask; 7301 + u32 write_mask; 7302 + } reg_tbl[] = { 7303 + /* MAC Control Registers */ 7304 + { MAC_MODE, TG3_FL_NOT_5705, 7305 + 0x00000000, 0x00ef6f8c }, 7306 + { MAC_MODE, TG3_FL_5705, 7307 + 0x00000000, 0x01ef6b8c }, 7308 + { MAC_STATUS, TG3_FL_NOT_5705, 7309 + 0x03800107, 0x00000000 }, 7310 + { MAC_STATUS, TG3_FL_5705, 7311 + 0x03800100, 0x00000000 }, 7312 + { MAC_ADDR_0_HIGH, 0x0000, 7313 + 0x00000000, 0x0000ffff }, 7314 + { MAC_ADDR_0_LOW, 0x0000, 7315 + 0x00000000, 0xffffffff }, 7316 + { MAC_RX_MTU_SIZE, 0x0000, 7317 + 0x00000000, 0x0000ffff }, 7318 + { MAC_TX_MODE, 0x0000, 7319 + 0x00000000, 0x00000070 }, 7320 + { MAC_TX_LENGTHS, 0x0000, 7321 + 0x00000000, 0x00003fff }, 7322 + { MAC_RX_MODE, TG3_FL_NOT_5705, 7323 + 0x00000000, 0x000007fc }, 7324 + { MAC_RX_MODE, TG3_FL_5705, 7325 + 0x00000000, 0x000007dc }, 7326 + { MAC_HASH_REG_0, 0x0000, 7327 + 0x00000000, 0xffffffff }, 7328 + { MAC_HASH_REG_1, 0x0000, 7329 + 0x00000000, 0xffffffff }, 7330 + { MAC_HASH_REG_2, 0x0000, 7331 + 0x00000000, 0xffffffff }, 7332 + { MAC_HASH_REG_3, 0x0000, 7333 + 0x00000000, 0xffffffff }, 7334 + 7335 + /* Receive Data and Receive BD Initiator Control Registers. */ 7336 + { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 7337 + 0x00000000, 0xffffffff }, 7338 + { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 7339 + 0x00000000, 0xffffffff }, 7340 + { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 7341 + 0x00000000, 0x00000003 }, 7342 + { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 7343 + 0x00000000, 0xffffffff }, 7344 + { RCVDBDI_STD_BD+0, 0x0000, 7345 + 0x00000000, 0xffffffff }, 7346 + { RCVDBDI_STD_BD+4, 0x0000, 7347 + 0x00000000, 0xffffffff }, 7348 + { RCVDBDI_STD_BD+8, 0x0000, 7349 + 0x00000000, 0xffff0002 }, 7350 + { RCVDBDI_STD_BD+0xc, 0x0000, 7351 + 0x00000000, 0xffffffff }, 7352 + 7353 + /* Receive BD Initiator Control Registers. */ 7354 + { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 7355 + 0x00000000, 0xffffffff }, 7356 + { RCVBDI_STD_THRESH, TG3_FL_5705, 7357 + 0x00000000, 0x000003ff }, 7358 + { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 7359 + 0x00000000, 0xffffffff }, 7360 + 7361 + /* Host Coalescing Control Registers. */ 7362 + { HOSTCC_MODE, TG3_FL_NOT_5705, 7363 + 0x00000000, 0x00000004 }, 7364 + { HOSTCC_MODE, TG3_FL_5705, 7365 + 0x00000000, 0x000000f6 }, 7366 + { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 7367 + 0x00000000, 0xffffffff }, 7368 + { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 7369 + 0x00000000, 0x000003ff }, 7370 + { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 7371 + 0x00000000, 0xffffffff }, 7372 + { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 7373 + 0x00000000, 0x000003ff }, 7374 + { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 7375 + 0x00000000, 0xffffffff }, 7376 + { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 7377 + 0x00000000, 0x000000ff }, 7378 + { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 7379 + 0x00000000, 0xffffffff }, 7380 + { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 7381 + 0x00000000, 0x000000ff }, 7382 + { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 7383 + 0x00000000, 0xffffffff }, 7384 + { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 7385 + 0x00000000, 0xffffffff }, 7386 + { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 7387 + 0x00000000, 0xffffffff }, 7388 + { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 7389 + 0x00000000, 0x000000ff }, 7390 + { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 7391 + 0x00000000, 0xffffffff }, 7392 + { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 7393 + 0x00000000, 0x000000ff }, 7394 + { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 7395 + 0x00000000, 0xffffffff }, 7396 + { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 7397 + 0x00000000, 0xffffffff }, 7398 + { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 7399 + 0x00000000, 0xffffffff }, 7400 + { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 7401 + 0x00000000, 0xffffffff }, 7402 + { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 7403 + 0x00000000, 0xffffffff }, 7404 + { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 7405 + 0xffffffff, 0x00000000 }, 7406 + { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 7407 + 0xffffffff, 0x00000000 }, 7408 + 7409 + /* Buffer Manager Control Registers. */ 7410 + { BUFMGR_MB_POOL_ADDR, 0x0000, 7411 + 0x00000000, 0x007fff80 }, 7412 + { BUFMGR_MB_POOL_SIZE, 0x0000, 7413 + 0x00000000, 0x007fffff }, 7414 + { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 7415 + 0x00000000, 0x0000003f }, 7416 + { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 7417 + 0x00000000, 0x000001ff }, 7418 + { BUFMGR_MB_HIGH_WATER, 0x0000, 7419 + 0x00000000, 0x000001ff }, 7420 + { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 7421 + 0xffffffff, 0x00000000 }, 7422 + { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 7423 + 0xffffffff, 0x00000000 }, 7424 + 7425 + /* Mailbox Registers */ 7426 + { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 7427 + 0x00000000, 0x000001ff }, 7428 + { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 7429 + 0x00000000, 0x000001ff }, 7430 + { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 7431 + 0x00000000, 0x000007ff }, 7432 + { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 7433 + 0x00000000, 0x000001ff }, 7434 + 7435 + { 0xffff, 0x0000, 0x00000000, 0x00000000 }, 7436 + }; 7437 + 7438 + if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 7439 + is_5705 = 1; 7440 + else 7441 + is_5705 = 0; 7442 + 7443 + for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 7444 + if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) 7445 + continue; 7446 + 7447 + if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 7448 + continue; 7449 + 7450 + if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) && 7451 + (reg_tbl[i].flags & TG3_FL_NOT_5788)) 7452 + continue; 7453 + 7454 + offset = (u32) reg_tbl[i].offset; 7455 + read_mask = reg_tbl[i].read_mask; 7456 + write_mask = reg_tbl[i].write_mask; 7457 + 7458 + /* Save the original register content */ 7459 + save_val = tr32(offset); 7460 + 7461 + /* Determine the read-only value. */ 7462 + read_val = save_val & read_mask; 7463 + 7464 + /* Write zero to the register, then make sure the read-only bits 7465 + * are not changed and the read/write bits are all zeros. 7466 + */ 7467 + tw32(offset, 0); 7468 + 7469 + val = tr32(offset); 7470 + 7471 + /* Test the read-only and read/write bits. */ 7472 + if (((val & read_mask) != read_val) || (val & write_mask)) 7473 + goto out; 7474 + 7475 + /* Write ones to all the bits defined by RdMask and WrMask, then 7476 + * make sure the read-only bits are not changed and the 7477 + * read/write bits are all ones. 7478 + */ 7479 + tw32(offset, read_mask | write_mask); 7480 + 7481 + val = tr32(offset); 7482 + 7483 + /* Test the read-only bits. */ 7484 + if ((val & read_mask) != read_val) 7485 + goto out; 7486 + 7487 + /* Test the read/write bits. */ 7488 + if ((val & write_mask) != write_mask) 7489 + goto out; 7490 + 7491 + tw32(offset, save_val); 7492 + } 7493 + 7494 + return 0; 7495 + 7496 + out: 7497 + printk(KERN_ERR PFX "Register test failed at offset %x\n", offset); 7498 + tw32(offset, save_val); 7499 + return -EIO; 7500 + } 7501 + 7502 + static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 7503 + { 7504 + static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 7505 + int i; 7506 + u32 j; 7507 + 7508 + for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) { 7509 + for (j = 0; j < len; j += 4) { 7510 + u32 val; 7511 + 7512 + tg3_write_mem(tp, offset + j, test_pattern[i]); 7513 + tg3_read_mem(tp, offset + j, &val); 7514 + if (val != test_pattern[i]) 7515 + return -EIO; 7516 + } 7517 + } 7518 + return 0; 7519 + } 7520 + 7521 + static int tg3_test_memory(struct tg3 *tp) 7522 + { 7523 + static struct mem_entry { 7524 + u32 offset; 7525 + u32 len; 7526 + } mem_tbl_570x[] = { 7527 + { 0x00000000, 0x01000}, 7528 + { 0x00002000, 0x1c000}, 7529 + { 0xffffffff, 0x00000} 7530 + }, mem_tbl_5705[] = { 7531 + { 0x00000100, 0x0000c}, 7532 + { 0x00000200, 0x00008}, 7533 + { 0x00000b50, 0x00400}, 7534 + { 0x00004000, 0x00800}, 7535 + { 0x00006000, 0x01000}, 7536 + { 0x00008000, 0x02000}, 7537 + { 0x00010000, 0x0e000}, 7538 + { 0xffffffff, 0x00000} 7539 + }; 7540 + struct mem_entry *mem_tbl; 7541 + int err = 0; 7542 + int i; 7543 + 7544 + if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 7545 + mem_tbl = mem_tbl_5705; 7546 + else 7547 + mem_tbl = mem_tbl_570x; 7548 + 7549 + for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 7550 + if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset, 7551 + mem_tbl[i].len)) != 0) 7552 + break; 7553 + } 7554 + 7555 + return err; 7556 + } 7557 + 7558 + static int tg3_test_loopback(struct tg3 *tp) 7559 + { 7560 + u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key; 7561 + u32 desc_idx; 7562 + struct sk_buff *skb, *rx_skb; 7563 + u8 *tx_data; 7564 + dma_addr_t map; 7565 + int num_pkts, tx_len, rx_len, i, err; 7566 + struct tg3_rx_buffer_desc *desc; 7567 + 7568 + if (!netif_running(tp->dev)) 7569 + return -ENODEV; 7570 + 7571 + err = -EIO; 7572 + 7573 + tg3_abort_hw(tp, 1); 7574 + 7575 + /* Clearing this flag to keep interrupts disabled */ 7576 + tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 7577 + tg3_reset_hw(tp); 7578 + 7579 + mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 7580 + MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY | 7581 + MAC_MODE_PORT_MODE_GMII; 7582 + tw32(MAC_MODE, mac_mode); 7583 + 7584 + tx_len = 1514; 7585 + skb = dev_alloc_skb(tx_len); 7586 + tx_data = skb_put(skb, tx_len); 7587 + memcpy(tx_data, tp->dev->dev_addr, 6); 7588 + memset(tx_data + 6, 0x0, 8); 7589 + 7590 + tw32(MAC_RX_MTU_SIZE, tx_len + 4); 7591 + 7592 + for (i = 14; i < tx_len; i++) 7593 + tx_data[i] = (u8) (i & 0xff); 7594 + 7595 + map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); 7596 + 7597 + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 7598 + HOSTCC_MODE_NOW); 7599 + 7600 + udelay(10); 7601 + 7602 + rx_start_idx = tp->hw_status->idx[0].rx_producer; 7603 + 7604 + send_idx = 0; 7605 + num_pkts = 0; 7606 + 7607 + tg3_set_txd(tp, send_idx, map, tx_len, 0, 1); 7608 + 7609 + send_idx++; 7610 + num_pkts++; 7611 + 7612 + tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx); 7613 + tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW); 7614 + 7615 + udelay(10); 7616 + 7617 + for (i = 0; i < 10; i++) { 7618 + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 7619 + HOSTCC_MODE_NOW); 7620 + 7621 + udelay(10); 7622 + 7623 + tx_idx = tp->hw_status->idx[0].tx_consumer; 7624 + rx_idx = tp->hw_status->idx[0].rx_producer; 7625 + if ((tx_idx == send_idx) && 7626 + (rx_idx == (rx_start_idx + num_pkts))) 7627 + break; 7628 + } 7629 + 7630 + pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); 7631 + dev_kfree_skb(skb); 7632 + 7633 + if (tx_idx != send_idx) 7634 + goto out; 7635 + 7636 + if (rx_idx != rx_start_idx + num_pkts) 7637 + goto out; 7638 + 7639 + desc = &tp->rx_rcb[rx_start_idx]; 7640 + desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 7641 + opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 7642 + if (opaque_key != RXD_OPAQUE_RING_STD) 7643 + goto out; 7644 + 7645 + if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 7646 + (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 7647 + goto out; 7648 + 7649 + rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; 7650 + if (rx_len != tx_len) 7651 + goto out; 7652 + 7653 + rx_skb = tp->rx_std_buffers[desc_idx].skb; 7654 + 7655 + map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping); 7656 + pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); 7657 + 7658 + for (i = 14; i < tx_len; i++) { 7659 + if (*(rx_skb->data + i) != (u8) (i & 0xff)) 7660 + goto out; 7661 + } 7662 + err = 0; 7663 + 7664 + /* tg3_free_rings will unmap and free the rx_skb */ 7665 + out: 7666 + return err; 7667 + } 7668 + 7669 + static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 7670 + u64 *data) 7671 + { 7672 + struct tg3 *tp = netdev_priv(dev); 7673 + 7674 + memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 7675 + 7676 + if (tg3_test_nvram(tp) != 0) { 7677 + etest->flags |= ETH_TEST_FL_FAILED; 7678 + data[0] = 1; 7679 + } 7680 + if (tg3_test_link(tp) != 0) { 7681 + etest->flags |= ETH_TEST_FL_FAILED; 7682 + data[1] = 1; 7683 + } 7684 + if (etest->flags & ETH_TEST_FL_OFFLINE) { 7685 + if (netif_running(dev)) 7686 + tg3_netif_stop(tp); 7687 + 7688 + spin_lock_irq(&tp->lock); 7689 + spin_lock(&tp->tx_lock); 7690 + 7691 + tg3_halt(tp, RESET_KIND_SUSPEND, 1); 7692 + tg3_nvram_lock(tp); 7693 + tg3_halt_cpu(tp, RX_CPU_BASE); 7694 + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7695 + tg3_halt_cpu(tp, TX_CPU_BASE); 7696 + tg3_nvram_unlock(tp); 7697 + 7698 + if (tg3_test_registers(tp) != 0) { 7699 + etest->flags |= ETH_TEST_FL_FAILED; 7700 + data[2] = 1; 7701 + } 7702 + if (tg3_test_memory(tp) != 0) { 7703 + etest->flags |= ETH_TEST_FL_FAILED; 7704 + data[3] = 1; 7705 + } 7706 + if (tg3_test_loopback(tp) != 0) { 7707 + etest->flags |= ETH_TEST_FL_FAILED; 7708 + data[4] = 1; 7709 + } 7710 + 7711 + spin_unlock(&tp->tx_lock); 7712 + spin_unlock_irq(&tp->lock); 7713 + if (tg3_test_interrupt(tp) != 0) { 7714 + etest->flags |= ETH_TEST_FL_FAILED; 7715 + data[5] = 1; 7716 + } 7717 + spin_lock_irq(&tp->lock); 7718 + spin_lock(&tp->tx_lock); 7719 + 7720 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 7721 + if (netif_running(dev)) { 7722 + tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 7723 + tg3_init_hw(tp); 7724 + tg3_netif_start(tp); 7725 + } 7726 + spin_unlock(&tp->tx_lock); 7727 + spin_unlock_irq(&tp->lock); 7728 + } 7729 } 7730 7731 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ··· 7331 .get_tso = ethtool_op_get_tso, 7332 .set_tso = tg3_set_tso, 7333 #endif 7334 + .self_test_count = tg3_get_test_count, 7335 + .self_test = tg3_self_test, 7336 .get_strings = tg3_get_strings, 7337 .get_stats_count = tg3_get_stats_count, 7338 .get_ethtool_stats = tg3_get_ethtool_stats, ··· 9478 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 9479 pci_save_state(tp->pdev); 9480 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 9481 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 9482 } 9483 9484 err = tg3_test_dma(tp); ··· 9605 9606 spin_lock_irq(&tp->lock); 9607 spin_lock(&tp->tx_lock); 9608 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 9609 spin_unlock(&tp->tx_lock); 9610 spin_unlock_irq(&tp->lock); 9611