Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

e1000e: reformat register test code, fix some minor initialization

The register tests should be run with all the proper flags enabled
to maximize the test coverage code and make sure we are as close
as we can get to testing regular traffic.

Reformat the code for readability. Minor cleanups in the descriptor
ring setup.

Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>

authored by

Bruce Allan and committed by
Jeff Garzik
cef8c793 69e3fd8c

+63 -60
+63 -60
drivers/net/e1000e/ethtool.c
··· 641 641 tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 642 642 if (!tx_ring) 643 643 goto err_alloc_tx; 644 + /* 645 + * use a memcpy to save any previously configured 646 + * items like napi structs from having to be 647 + * reinitialized 648 + */ 649 + memcpy(tx_ring, tx_old, sizeof(struct e1000_ring)); 644 650 645 651 rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 646 652 if (!rx_ring) 647 653 goto err_alloc_rx; 654 + memcpy(rx_ring, rx_old, sizeof(struct e1000_ring)); 648 655 649 656 adapter->tx_ring = tx_ring; 650 657 adapter->rx_ring = rx_ring; ··· 707 700 return err; 708 701 } 709 702 710 - static bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data, 711 - int reg, int offset, u32 mask, u32 write) 703 + static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, 704 + int reg, int offset, u32 mask, u32 write) 712 705 { 713 - int i; 714 - u32 read; 706 + u32 pat, val; 715 707 static const u32 test[] = 716 708 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 717 - for (i = 0; i < ARRAY_SIZE(test); i++) { 709 + for (pat = 0; pat < ARRAY_SIZE(test); pat++) { 718 710 E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, 719 - (test[i] & write)); 720 - read = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); 721 - if (read != (test[i] & write & mask)) { 711 + (test[pat] & write)); 712 + val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); 713 + if (val != (test[pat] & write & mask)) { 722 714 ndev_err(adapter->netdev, "pattern test reg %04X " 723 715 "failed: got 0x%08X expected 0x%08X\n", 724 716 reg + offset, 725 - read, (test[i] & write & mask)); 717 + val, (test[pat] & write & mask)); 726 718 *data = reg; 727 - return true; 719 + return 1; 728 720 } 729 721 } 730 - return false; 722 + return 0; 731 723 } 732 724 733 725 static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, 734 726 int reg, u32 mask, u32 write) 735 727 { 736 - u32 read; 728 + u32 val; 737 729 __ew32(&adapter->hw, reg, write & mask); 738 - read = __er32(&adapter->hw, reg); 739 - if ((write & mask) != (read & mask)) { 730 + val = __er32(&adapter->hw, reg); 731 + if ((write & mask) != (val & mask)) { 740 732 ndev_err(adapter->netdev, "set/check reg %04X test failed: " 741 - "got 0x%08X expected 0x%08X\n", reg, (read & mask), 733 + "got 0x%08X expected 0x%08X\n", reg, (val & mask), 742 734 (write & mask)); 743 735 *data = reg; 744 - return true; 736 + return 1; 745 737 } 746 - return false; 738 + return 0; 747 739 } 748 - 749 - #define REG_PATTERN_TEST(R, M, W) \ 750 - do { \ 751 - if (reg_pattern_test_array(adapter, data, R, 0, M, W)) \ 752 - return 1; \ 740 + #define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ 741 + do { \ 742 + if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ 743 + return 1; \ 753 744 } while (0) 745 + #define REG_PATTERN_TEST(reg, mask, write) \ 746 + REG_PATTERN_TEST_ARRAY(reg, 0, mask, write) 754 747 755 - #define REG_PATTERN_TEST_ARRAY(R, offset, M, W) \ 756 - do { \ 757 - if (reg_pattern_test_array(adapter, data, R, offset, M, W)) \ 758 - return 1; \ 759 - } while (0) 760 - 761 - #define REG_SET_AND_CHECK(R, M, W) \ 762 - do { \ 763 - if (reg_set_and_check(adapter, data, R, M, W)) \ 764 - return 1; \ 748 + #define REG_SET_AND_CHECK(reg, mask, write) \ 749 + do { \ 750 + if (reg_set_and_check(adapter, data, reg, mask, write)) \ 751 + return 1; \ 765 752 } while (0) 766 753 767 754 static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) ··· 1039 1038 struct pci_dev *pdev = adapter->pdev; 1040 1039 struct e1000_hw *hw = &adapter->hw; 1041 1040 u32 rctl; 1042 - int size; 1043 1041 int i; 1044 1042 int ret_val; 1045 1043 ··· 1047 1047 if (!tx_ring->count) 1048 1048 tx_ring->count = E1000_DEFAULT_TXD; 1049 1049 1050 - size = tx_ring->count * sizeof(struct e1000_buffer); 1051 - tx_ring->buffer_info = kmalloc(size, GFP_KERNEL); 1052 - if (!tx_ring->buffer_info) { 1050 + tx_ring->buffer_info = kcalloc(tx_ring->count, 1051 + sizeof(struct e1000_buffer), 1052 + GFP_KERNEL); 1053 + if (!(tx_ring->buffer_info)) { 1053 1054 ret_val = 1; 1054 1055 goto err_nomem; 1055 1056 } 1056 - memset(tx_ring->buffer_info, 0, size); 1057 1057 1058 1058 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 1059 1059 tx_ring->size = ALIGN(tx_ring->size, 4096); ··· 1063 1063 ret_val = 2; 1064 1064 goto err_nomem; 1065 1065 } 1066 - memset(tx_ring->desc, 0, tx_ring->size); 1067 1066 tx_ring->next_to_use = 0; 1068 1067 tx_ring->next_to_clean = 0; 1069 1068 1070 - ew32(TDBAL, 1071 - ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); 1069 + ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); 1072 1070 ew32(TDBAH, ((u64) tx_ring->dma >> 32)); 1073 - ew32(TDLEN, 1074 - tx_ring->count * sizeof(struct e1000_tx_desc)); 1071 + ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc)); 1075 1072 ew32(TDH, 0); 1076 1073 ew32(TDT, 0); 1077 - ew32(TCTL, 1078 - E1000_TCTL_PSP | E1000_TCTL_EN | 1079 - E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1080 - E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1074 + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | 1075 + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1076 + E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1081 1077 1082 1078 for (i = 0; i < tx_ring->count; i++) { 1083 1079 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); ··· 1095 1099 ret_val = 4; 1096 1100 goto err_nomem; 1097 1101 } 1098 - tx_desc->buffer_addr = cpu_to_le64( 1099 - tx_ring->buffer_info[i].dma); 1102 + tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); 1100 1103 tx_desc->lower.data = cpu_to_le32(skb->len); 1101 1104 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | 1102 1105 E1000_TXD_CMD_IFCS | 1103 - E1000_TXD_CMD_RPS); 1106 + E1000_TXD_CMD_RS); 1104 1107 tx_desc->upper.data = 0; 1105 1108 } 1106 1109 ··· 1108 1113 if (!rx_ring->count) 1109 1114 rx_ring->count = E1000_DEFAULT_RXD; 1110 1115 1111 - size = rx_ring->count * sizeof(struct e1000_buffer); 1112 - rx_ring->buffer_info = kmalloc(size, GFP_KERNEL); 1113 - if (!rx_ring->buffer_info) { 1116 + rx_ring->buffer_info = kcalloc(rx_ring->count, 1117 + sizeof(struct e1000_buffer), 1118 + GFP_KERNEL); 1119 + if (!(rx_ring->buffer_info)) { 1114 1120 ret_val = 5; 1115 1121 goto err_nomem; 1116 1122 } 1117 - memset(rx_ring->buffer_info, 0, size); 1118 1123 1119 1124 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); 1120 1125 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, ··· 1123 1128 ret_val = 6; 1124 1129 goto err_nomem; 1125 1130 } 1126 - memset(rx_ring->desc, 0, rx_ring->size); 1127 1131 rx_ring->next_to_use = 0; 1128 1132 rx_ring->next_to_clean = 0; 1129 1133 ··· 1134 1140 ew32(RDH, 0); 1135 1141 ew32(RDT, 0); 1136 1142 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | 1143 + E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | 1144 + E1000_RCTL_SBP | E1000_RCTL_SECRC | 1137 1145 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1138 1146 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1139 1147 ew32(RCTL, rctl); ··· 1199 1203 1200 1204 ctrl_reg = er32(CTRL); 1201 1205 1202 - if (hw->phy.type == e1000_phy_ife) { 1206 + switch (hw->phy.type) { 1207 + case e1000_phy_ife: 1203 1208 /* force 100, set loopback */ 1204 1209 e1e_wphy(hw, PHY_CONTROL, 0x6100); 1205 1210 ··· 1210 1213 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1211 1214 E1000_CTRL_SPD_100 |/* Force Speed to 100 */ 1212 1215 E1000_CTRL_FD); /* Force Duplex to FULL */ 1213 - } else { 1216 + break; 1217 + default: 1214 1218 /* force 1000, set loopback */ 1215 1219 e1e_wphy(hw, PHY_CONTROL, 0x4140); 1220 + mdelay(250); 1216 1221 1217 1222 /* Now set up the MAC to the same speed/duplex as the PHY. */ 1218 1223 ctrl_reg = er32(CTRL); ··· 1223 1224 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1224 1225 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1225 1226 E1000_CTRL_FD); /* Force Duplex to FULL */ 1227 + 1228 + if ((adapter->hw.mac.type == e1000_ich8lan) || 1229 + (adapter->hw.mac.type == e1000_ich9lan)) 1230 + ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ 1226 1231 } 1227 1232 1228 1233 if (hw->phy.media_type == e1000_media_type_copper && ··· 1328 1325 #define KMRNCTRLSTA_OPMODE (0x1F << 16) 1329 1326 #define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 1330 1327 ew32(KMRNCTRLSTA, 1331 - (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); 1328 + (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); 1332 1329 1333 1330 return 0; 1334 1331 } ··· 1454 1451 l = 0; 1455 1452 for (j = 0; j <= lc; j++) { /* loop count loop */ 1456 1453 for (i = 0; i < 64; i++) { /* send the packets */ 1457 - e1000_create_lbtest_frame( 1458 - tx_ring->buffer_info[i].skb, 1024); 1454 + e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, 1455 + 1024); 1459 1456 pci_dma_sync_single_for_device(pdev, 1460 1457 tx_ring->buffer_info[k].dma, 1461 1458 tx_ring->buffer_info[k].length, ··· 1490 1487 ret_val = 13; /* ret_val is the same as mis-compare */ 1491 1488 break; 1492 1489 } 1493 - if (jiffies >= (time + 2)) { 1490 + if (jiffies >= (time + 20)) { 1494 1491 ret_val = 14; /* error code for time out error */ 1495 1492 break; 1496 1493 }