Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
1GbE Intel Wired LAN Driver Updates 2016-05-13

This series contains updates to e1000e, igb and igbvf.

Steve Shih fixes an issue for disabling auto-negotiation and forcing
speed and duplex settings for non-copper media.

Brian Walsh cleanups some inconsistency in the use of return variables
names to avoid confusion.

Jake cleans up the drivers to use the BIT() macro when it can, which will
future proof the drivers for GCC 6 when it gets released. Cleaned up
dead code which was never being used. Also fixed e1000e, where it was
incorrectly restting the SYSTIM registers every time the ioctl was being
run.

Denys Vlasenko fixes an oversight where incvalue variable holds a 32
bit value so we should declare it as such, instead of 64 bits. Also
fixed an overflow check, where two reads are the same, then it is not
an overflow.

Nathan Sullivan fixes the PTP timestamps for transmit and receive
latency based on the current link speed.

Alexander Duyck adds support for partial GSO segmentation in the case
of tunnels for igb and igbvf.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+633 -454
+6 -6
drivers/net/ethernet/intel/e1000e/80003es2lan.c
··· 121 121 /* EEPROM access above 16k is unsupported */ 122 122 if (size > 14) 123 123 size = 14; 124 - nvm->word_size = 1 << size; 124 + nvm->word_size = BIT(size); 125 125 126 126 return 0; 127 127 } ··· 845 845 846 846 /* Transmit Descriptor Control 0 */ 847 847 reg = er32(TXDCTL(0)); 848 - reg |= (1 << 22); 848 + reg |= BIT(22); 849 849 ew32(TXDCTL(0), reg); 850 850 851 851 /* Transmit Descriptor Control 1 */ 852 852 reg = er32(TXDCTL(1)); 853 - reg |= (1 << 22); 853 + reg |= BIT(22); 854 854 ew32(TXDCTL(1), reg); 855 855 856 856 /* Transmit Arbitration Control 0 */ 857 857 reg = er32(TARC(0)); 858 858 reg &= ~(0xF << 27); /* 30:27 */ 859 859 if (hw->phy.media_type != e1000_media_type_copper) 860 - reg &= ~(1 << 20); 860 + reg &= ~BIT(20); 861 861 ew32(TARC(0), reg); 862 862 863 863 /* Transmit Arbitration Control 1 */ 864 864 reg = er32(TARC(1)); 865 865 if (er32(TCTL) & E1000_TCTL_MULR) 866 - reg &= ~(1 << 28); 866 + reg &= ~BIT(28); 867 867 else 868 - reg |= (1 << 28); 868 + reg |= BIT(28); 869 869 ew32(TARC(1), reg); 870 870 871 871 /* Disable IPv6 extension header parsing because some malformed
+15 -15
drivers/net/ethernet/intel/e1000e/82571.c
··· 185 185 /* EEPROM access above 16k is unsupported */ 186 186 if (size > 14) 187 187 size = 14; 188 - nvm->word_size = 1 << size; 188 + nvm->word_size = BIT(size); 189 189 break; 190 190 } 191 191 ··· 1163 1163 1164 1164 /* Transmit Descriptor Control 0 */ 1165 1165 reg = er32(TXDCTL(0)); 1166 - reg |= (1 << 22); 1166 + reg |= BIT(22); 1167 1167 ew32(TXDCTL(0), reg); 1168 1168 1169 1169 /* Transmit Descriptor Control 1 */ 1170 1170 reg = er32(TXDCTL(1)); 1171 - reg |= (1 << 22); 1171 + reg |= BIT(22); 1172 1172 ew32(TXDCTL(1), reg); 1173 1173 1174 1174 /* Transmit Arbitration Control 0 */ ··· 1177 1177 switch (hw->mac.type) { 1178 1178 case e1000_82571: 1179 1179 case e1000_82572: 1180 - reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); 1180 + reg |= BIT(23) | BIT(24) | BIT(25) | BIT(26); 1181 1181 break; 1182 1182 case e1000_82574: 1183 1183 case e1000_82583: 1184 - reg |= (1 << 26); 1184 + reg |= BIT(26); 1185 1185 break; 1186 1186 default: 1187 1187 break; ··· 1193 1193 switch (hw->mac.type) { 1194 1194 case e1000_82571: 1195 1195 case e1000_82572: 1196 - reg &= ~((1 << 29) | (1 << 30)); 1197 - reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); 1196 + reg &= ~(BIT(29) | BIT(30)); 1197 + reg |= BIT(22) | BIT(24) | BIT(25) | BIT(26); 1198 1198 if (er32(TCTL) & E1000_TCTL_MULR) 1199 - reg &= ~(1 << 28); 1199 + reg &= ~BIT(28); 1200 1200 else 1201 - reg |= (1 << 28); 1201 + reg |= BIT(28); 1202 1202 ew32(TARC(1), reg); 1203 1203 break; 1204 1204 default: ··· 1211 1211 case e1000_82574: 1212 1212 case e1000_82583: 1213 1213 reg = er32(CTRL); 1214 - reg &= ~(1 << 29); 1214 + reg &= ~BIT(29); 1215 1215 ew32(CTRL, reg); 1216 1216 break; 1217 1217 default: ··· 1224 1224 case e1000_82574: 1225 1225 case e1000_82583: 1226 1226 reg = er32(CTRL_EXT); 1227 - reg &= ~(1 << 23); 1228 - reg |= (1 << 22); 1227 + reg &= ~BIT(23); 1228 + reg |= BIT(22); 1229 1229 ew32(CTRL_EXT, reg); 1230 1230 break; 1231 1231 default: ··· 1261 1261 case e1000_82574: 1262 1262 case e1000_82583: 1263 1263 reg = er32(GCR); 1264 - reg |= (1 << 22); 1264 + reg |= BIT(22); 1265 1265 ew32(GCR, reg); 1266 1266 1267 1267 /* Workaround for hardware errata. ··· 1308 1308 E1000_VFTA_ENTRY_SHIFT) & 1309 1309 E1000_VFTA_ENTRY_MASK; 1310 1310 vfta_bit_in_reg = 1311 - 1 << (hw->mng_cookie.vlan_id & 1312 - E1000_VFTA_ENTRY_BIT_SHIFT_MASK); 1311 + BIT(hw->mng_cookie.vlan_id & 1312 + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); 1313 1313 } 1314 1314 break; 1315 1315 default:
+54 -53
drivers/net/ethernet/intel/e1000e/e1000.h
··· 109 109 #define E1000_TXDCTL_DMA_BURST_ENABLE \ 110 110 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ 111 111 E1000_TXDCTL_COUNT_DESC | \ 112 - (1 << 16) | /* wthresh must be +1 more than desired */\ 113 - (1 << 8) | /* hthresh */ \ 114 - 0x1f) /* pthresh */ 112 + (1u << 16) | /* wthresh must be +1 more than desired */\ 113 + (1u << 8) | /* hthresh */ \ 114 + 0x1f) /* pthresh */ 115 115 116 116 #define E1000_RXDCTL_DMA_BURST_ENABLE \ 117 117 (0x01000000 | /* set descriptor granularity */ \ 118 - (4 << 16) | /* set writeback threshold */ \ 119 - (4 << 8) | /* set prefetch threshold */ \ 118 + (4u << 16) | /* set writeback threshold */ \ 119 + (4u << 8) | /* set prefetch threshold */ \ 120 120 0x20) /* set hthresh */ 121 121 122 - #define E1000_TIDV_FPD (1 << 31) 123 - #define E1000_RDTR_FPD (1 << 31) 122 + #define E1000_TIDV_FPD BIT(31) 123 + #define E1000_RDTR_FPD BIT(31) 124 124 125 125 enum e1000_boards { 126 126 board_82571, ··· 347 347 struct ptp_clock *ptp_clock; 348 348 struct ptp_clock_info ptp_clock_info; 349 349 struct pm_qos_request pm_qos_req; 350 + s32 ptp_delta; 350 351 351 352 u16 eee_advert; 352 353 }; ··· 405 404 #define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL) 406 405 407 406 /* hardware capability, feature, and workaround flags */ 408 - #define FLAG_HAS_AMT (1 << 0) 409 - #define FLAG_HAS_FLASH (1 << 1) 410 - #define FLAG_HAS_HW_VLAN_FILTER (1 << 2) 411 - #define FLAG_HAS_WOL (1 << 3) 412 - /* reserved bit4 */ 413 - #define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) 414 - #define FLAG_HAS_SWSM_ON_LOAD (1 << 6) 415 - #define FLAG_HAS_JUMBO_FRAMES (1 << 7) 416 - #define FLAG_READ_ONLY_NVM (1 << 8) 417 - #define FLAG_IS_ICH (1 << 9) 418 - #define FLAG_HAS_MSIX (1 << 10) 419 - #define FLAG_HAS_SMART_POWER_DOWN (1 << 11) 420 - #define FLAG_IS_QUAD_PORT_A (1 << 12) 421 - #define FLAG_IS_QUAD_PORT (1 << 13) 422 - #define FLAG_HAS_HW_TIMESTAMP (1 << 14) 423 - #define FLAG_APME_IN_WUC (1 << 15) 424 - #define FLAG_APME_IN_CTRL3 (1 << 16) 425 - #define FLAG_APME_CHECK_PORT_B (1 << 17) 426 - #define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18) 427 - #define FLAG_NO_WAKE_UCAST (1 << 19) 428 - #define FLAG_MNG_PT_ENABLED (1 << 20) 429 - #define FLAG_RESET_OVERWRITES_LAA (1 << 21) 430 - #define FLAG_TARC_SPEED_MODE_BIT (1 << 22) 431 - #define FLAG_TARC_SET_BIT_ZERO (1 << 23) 432 - #define FLAG_RX_NEEDS_RESTART (1 << 24) 433 - #define FLAG_LSC_GIG_SPEED_DROP (1 << 25) 434 - #define FLAG_SMART_POWER_DOWN (1 << 26) 435 - #define FLAG_MSI_ENABLED (1 << 27) 436 - /* reserved (1 << 28) */ 437 - #define FLAG_TSO_FORCE (1 << 29) 438 - #define FLAG_RESTART_NOW (1 << 30) 439 - #define FLAG_MSI_TEST_FAILED (1 << 31) 407 + #define FLAG_HAS_AMT BIT(0) 408 + #define FLAG_HAS_FLASH BIT(1) 409 + #define FLAG_HAS_HW_VLAN_FILTER BIT(2) 410 + #define FLAG_HAS_WOL BIT(3) 411 + /* reserved BIT(4) */ 412 + #define FLAG_HAS_CTRLEXT_ON_LOAD BIT(5) 413 + #define FLAG_HAS_SWSM_ON_LOAD BIT(6) 414 + #define FLAG_HAS_JUMBO_FRAMES BIT(7) 415 + #define FLAG_READ_ONLY_NVM BIT(8) 416 + #define FLAG_IS_ICH BIT(9) 417 + #define FLAG_HAS_MSIX BIT(10) 418 + #define FLAG_HAS_SMART_POWER_DOWN BIT(11) 419 + #define FLAG_IS_QUAD_PORT_A BIT(12) 420 + #define FLAG_IS_QUAD_PORT BIT(13) 421 + #define FLAG_HAS_HW_TIMESTAMP BIT(14) 422 + #define FLAG_APME_IN_WUC BIT(15) 423 + #define FLAG_APME_IN_CTRL3 BIT(16) 424 + #define FLAG_APME_CHECK_PORT_B BIT(17) 425 + #define FLAG_DISABLE_FC_PAUSE_TIME BIT(18) 426 + #define FLAG_NO_WAKE_UCAST BIT(19) 427 + #define FLAG_MNG_PT_ENABLED BIT(20) 428 + #define FLAG_RESET_OVERWRITES_LAA BIT(21) 429 + #define FLAG_TARC_SPEED_MODE_BIT BIT(22) 430 + #define FLAG_TARC_SET_BIT_ZERO BIT(23) 431 + #define FLAG_RX_NEEDS_RESTART BIT(24) 432 + #define FLAG_LSC_GIG_SPEED_DROP BIT(25) 433 + #define FLAG_SMART_POWER_DOWN BIT(26) 434 + #define FLAG_MSI_ENABLED BIT(27) 435 + /* reserved BIT(28) */ 436 + #define FLAG_TSO_FORCE BIT(29) 437 + #define FLAG_RESTART_NOW BIT(30) 438 + #define FLAG_MSI_TEST_FAILED BIT(31) 440 439 441 - #define FLAG2_CRC_STRIPPING (1 << 0) 442 - #define FLAG2_HAS_PHY_WAKEUP (1 << 1) 443 - #define FLAG2_IS_DISCARDING (1 << 2) 444 - #define FLAG2_DISABLE_ASPM_L1 (1 << 3) 445 - #define FLAG2_HAS_PHY_STATS (1 << 4) 446 - #define FLAG2_HAS_EEE (1 << 5) 447 - #define FLAG2_DMA_BURST (1 << 6) 448 - #define FLAG2_DISABLE_ASPM_L0S (1 << 7) 449 - #define FLAG2_DISABLE_AIM (1 << 8) 450 - #define FLAG2_CHECK_PHY_HANG (1 << 9) 451 - #define FLAG2_NO_DISABLE_RX (1 << 10) 452 - #define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11) 453 - #define FLAG2_DFLT_CRC_STRIPPING (1 << 12) 454 - #define FLAG2_CHECK_RX_HWTSTAMP (1 << 13) 440 + #define FLAG2_CRC_STRIPPING BIT(0) 441 + #define FLAG2_HAS_PHY_WAKEUP BIT(1) 442 + #define FLAG2_IS_DISCARDING BIT(2) 443 + #define FLAG2_DISABLE_ASPM_L1 BIT(3) 444 + #define FLAG2_HAS_PHY_STATS BIT(4) 445 + #define FLAG2_HAS_EEE BIT(5) 446 + #define FLAG2_DMA_BURST BIT(6) 447 + #define FLAG2_DISABLE_ASPM_L0S BIT(7) 448 + #define FLAG2_DISABLE_AIM BIT(8) 449 + #define FLAG2_CHECK_PHY_HANG BIT(9) 450 + #define FLAG2_NO_DISABLE_RX BIT(10) 451 + #define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) 452 + #define FLAG2_DFLT_CRC_STRIPPING BIT(12) 453 + #define FLAG2_CHECK_RX_HWTSTAMP BIT(13) 455 454 456 455 #define E1000_RX_DESC_PS(R, i) \ 457 456 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
+32 -23
drivers/net/ethernet/intel/e1000e/ethtool.c
··· 201 201 else 202 202 ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; 203 203 204 + if (hw->phy.media_type != e1000_media_type_copper) 205 + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; 206 + 204 207 return 0; 205 208 } 206 209 ··· 239 236 mac->forced_speed_duplex = ADVERTISE_100_FULL; 240 237 break; 241 238 case SPEED_1000 + DUPLEX_FULL: 242 - mac->autoneg = 1; 243 - adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 239 + if (adapter->hw.phy.media_type == e1000_media_type_copper) { 240 + mac->autoneg = 1; 241 + adapter->hw.phy.autoneg_advertised = 242 + ADVERTISE_1000_FULL; 243 + } else { 244 + mac->forced_speed_duplex = ADVERTISE_1000_FULL; 245 + } 244 246 break; 245 247 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 246 248 default: ··· 447 439 448 440 memset(p, 0, E1000_REGS_LEN * sizeof(u32)); 449 441 450 - regs->version = (1 << 24) | (adapter->pdev->revision << 16) | 451 - adapter->pdev->device; 442 + regs->version = (1u << 24) | 443 + (adapter->pdev->revision << 16) | 444 + adapter->pdev->device; 452 445 453 446 regs_buff[0] = er32(CTRL); 454 447 regs_buff[1] = er32(STATUS); ··· 904 895 case e1000_pch2lan: 905 896 case e1000_pch_lpt: 906 897 case e1000_pch_spt: 907 - mask |= (1 << 18); 898 + mask |= BIT(18); 908 899 break; 909 900 default: 910 901 break; ··· 923 914 924 915 /* SHRAH[9] different than the others */ 925 916 if (i == 10) 926 - mask |= (1 << 30); 917 + mask |= BIT(30); 927 918 else 928 - mask &= ~(1 << 30); 919 + mask &= ~BIT(30); 929 920 } 930 921 if (mac->type == e1000_pch2lan) { 931 922 /* SHRAH[0,1,2] different than previous */ ··· 933 924 mask &= 0xFFF4FFFF; 934 925 /* SHRAH[3] different than SHRAH[0,1,2] */ 935 926 if (i == 4) 936 - mask |= (1 << 30); 927 + mask |= BIT(30); 937 928 /* RAR[1-6] owned by management engine - skipping */ 938 929 if (i > 0) 939 930 i += 6; ··· 1028 1019 /* Test each interrupt */ 1029 1020 for (i = 0; i < 10; i++) { 1030 1021 /* Interrupt to test */ 1031 - mask = 1 << i; 1022 + mask = BIT(i); 1032 1023 1033 1024 if (adapter->flags & FLAG_IS_ICH) { 1034 1025 switch (mask) { ··· 1396 1387 case e1000_phy_82579: 1397 1388 /* Disable PHY energy detect power down */ 1398 1389 e1e_rphy(hw, PHY_REG(0, 21), &phy_reg); 1399 - e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3)); 1390 + e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~BIT(3)); 1400 1391 /* Disable full chip energy detect */ 1401 1392 e1e_rphy(hw, PHY_REG(776, 18), &phy_reg); 1402 1393 e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1); ··· 1462 1453 1463 1454 /* disable autoneg */ 1464 1455 ctrl = er32(TXCW); 1465 - ctrl &= ~(1 << 31); 1456 + ctrl &= ~BIT(31); 1466 1457 ew32(TXCW, ctrl); 1467 1458 1468 1459 link = (er32(STATUS) & E1000_STATUS_LU); ··· 2292 2283 SOF_TIMESTAMPING_RX_HARDWARE | 2293 2284 SOF_TIMESTAMPING_RAW_HARDWARE); 2294 2285 2295 - info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 2286 + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 2296 2287 2297 - info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | 2298 - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 2299 - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 2300 - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | 2301 - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | 2302 - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | 2303 - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | 2304 - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | 2305 - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | 2306 - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | 2307 - (1 << HWTSTAMP_FILTER_ALL)); 2288 + info->rx_filters = (BIT(HWTSTAMP_FILTER_NONE) | 2289 + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 2290 + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 2291 + BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | 2292 + BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | 2293 + BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | 2294 + BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | 2295 + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | 2296 + BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | 2297 + BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | 2298 + BIT(HWTSTAMP_FILTER_ALL)); 2308 2299 2309 2300 if (adapter->ptp_clock) 2310 2301 info->phc_index = ptp_clock_index(adapter->ptp_clock);
+22 -22
drivers/net/ethernet/intel/e1000e/ich8lan.c
··· 1048 1048 1049 1049 while (value > PCI_LTR_VALUE_MASK) { 1050 1050 scale++; 1051 - value = DIV_ROUND_UP(value, (1 << 5)); 1051 + value = DIV_ROUND_UP(value, BIT(5)); 1052 1052 } 1053 1053 if (scale > E1000_LTRV_SCALE_MAX) { 1054 1054 e_dbg("Invalid LTR latency scale %d\n", scale); ··· 1573 1573 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; 1574 1574 1575 1575 if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) 1576 - phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); 1576 + phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); 1577 1577 1578 1578 e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); 1579 1579 break; ··· 2044 2044 /* Restore SMBus frequency */ 2045 2045 if (freq--) { 2046 2046 phy_data &= ~HV_SMB_ADDR_FREQ_MASK; 2047 - phy_data |= (freq & (1 << 0)) << 2047 + phy_data |= (freq & BIT(0)) << 2048 2048 HV_SMB_ADDR_FREQ_LOW_SHIFT; 2049 - phy_data |= (freq & (1 << 1)) << 2049 + phy_data |= (freq & BIT(1)) << 2050 2050 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); 2051 2051 } else { 2052 2052 e_dbg("Unsupported SMB frequency in PHY\n"); ··· 2530 2530 2531 2531 /* disable Rx path while enabling/disabling workaround */ 2532 2532 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); 2533 - ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); 2533 + ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14)); 2534 2534 if (ret_val) 2535 2535 return ret_val; 2536 2536 ··· 2561 2561 2562 2562 /* Enable jumbo frame workaround in the MAC */ 2563 2563 mac_reg = er32(FFLT_DBG); 2564 - mac_reg &= ~(1 << 14); 2564 + mac_reg &= ~BIT(14); 2565 2565 mac_reg |= (7 << 15); 2566 2566 ew32(FFLT_DBG, mac_reg); 2567 2567 ··· 2576 2576 return ret_val; 2577 2577 ret_val = e1000e_write_kmrn_reg(hw, 2578 2578 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2579 - data | (1 << 0)); 2579 + data | BIT(0)); 2580 2580 if (ret_val) 2581 2581 return ret_val; 2582 2582 ret_val = e1000e_read_kmrn_reg(hw, ··· 2600 2600 if (ret_val) 2601 2601 return ret_val; 2602 2602 e1e_rphy(hw, PHY_REG(769, 16), &data); 2603 - data &= ~(1 << 13); 2603 + data &= ~BIT(13); 2604 2604 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 2605 2605 if (ret_val) 2606 2606 return ret_val; ··· 2614 2614 if (ret_val) 2615 2615 return ret_val; 2616 2616 e1e_rphy(hw, HV_PM_CTRL, &data); 2617 - ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); 2617 + ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10)); 2618 2618 if (ret_val) 2619 2619 return ret_val; 2620 2620 } else { ··· 2634 2634 return ret_val; 2635 2635 ret_val = e1000e_write_kmrn_reg(hw, 2636 2636 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2637 - data & ~(1 << 0)); 2637 + data & ~BIT(0)); 2638 2638 if (ret_val) 2639 2639 return ret_val; 2640 2640 ret_val = e1000e_read_kmrn_reg(hw, ··· 2657 2657 if (ret_val) 2658 2658 return ret_val; 2659 2659 e1e_rphy(hw, PHY_REG(769, 16), &data); 2660 - data |= (1 << 13); 2660 + data |= BIT(13); 2661 2661 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 2662 2662 if (ret_val) 2663 2663 return ret_val; ··· 2671 2671 if (ret_val) 2672 2672 return ret_val; 2673 2673 e1e_rphy(hw, HV_PM_CTRL, &data); 2674 - ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); 2674 + ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10)); 2675 2675 if (ret_val) 2676 2676 return ret_val; 2677 2677 } 2678 2678 2679 2679 /* re-enable Rx path after enabling/disabling workaround */ 2680 - return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); 2680 + return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14)); 2681 2681 } 2682 2682 2683 2683 /** ··· 4841 4841 4842 4842 /* Extended Device Control */ 4843 4843 reg = er32(CTRL_EXT); 4844 - reg |= (1 << 22); 4844 + reg |= BIT(22); 4845 4845 /* Enable PHY low-power state when MAC is at D3 w/o WoL */ 4846 4846 if (hw->mac.type >= e1000_pchlan) 4847 4847 reg |= E1000_CTRL_EXT_PHYPDEN; ··· 4849 4849 4850 4850 /* Transmit Descriptor Control 0 */ 4851 4851 reg = er32(TXDCTL(0)); 4852 - reg |= (1 << 22); 4852 + reg |= BIT(22); 4853 4853 ew32(TXDCTL(0), reg); 4854 4854 4855 4855 /* Transmit Descriptor Control 1 */ 4856 4856 reg = er32(TXDCTL(1)); 4857 - reg |= (1 << 22); 4857 + reg |= BIT(22); 4858 4858 ew32(TXDCTL(1), reg); 4859 4859 4860 4860 /* Transmit Arbitration Control 0 */ 4861 4861 reg = er32(TARC(0)); 4862 4862 if (hw->mac.type == e1000_ich8lan) 4863 - reg |= (1 << 28) | (1 << 29); 4864 - reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); 4863 + reg |= BIT(28) | BIT(29); 4864 + reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27); 4865 4865 ew32(TARC(0), reg); 4866 4866 4867 4867 /* Transmit Arbitration Control 1 */ 4868 4868 reg = er32(TARC(1)); 4869 4869 if (er32(TCTL) & E1000_TCTL_MULR) 4870 - reg &= ~(1 << 28); 4870 + reg &= ~BIT(28); 4871 4871 else 4872 - reg |= (1 << 28); 4873 - reg |= (1 << 24) | (1 << 26) | (1 << 30); 4872 + reg |= BIT(28); 4873 + reg |= BIT(24) | BIT(26) | BIT(30); 4874 4874 ew32(TARC(1), reg); 4875 4875 4876 4876 /* Device Status */ 4877 4877 if (hw->mac.type == e1000_ich8lan) { 4878 4878 reg = er32(STATUS); 4879 - reg &= ~(1 << 31); 4879 + reg &= ~BIT(31); 4880 4880 ew32(STATUS, reg); 4881 4881 } 4882 4882
+4 -4
drivers/net/ethernet/intel/e1000e/ich8lan.h
··· 73 73 (ID_LED_OFF1_ON2 << 4) | \ 74 74 (ID_LED_DEF1_DEF2)) 75 75 76 - #define E1000_ICH_NVM_SIG_WORD 0x13 77 - #define E1000_ICH_NVM_SIG_MASK 0xC000 78 - #define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 79 - #define E1000_ICH_NVM_SIG_VALUE 0x80 76 + #define E1000_ICH_NVM_SIG_WORD 0x13u 77 + #define E1000_ICH_NVM_SIG_MASK 0xC000u 78 + #define E1000_ICH_NVM_VALID_SIG_MASK 0xC0u 79 + #define E1000_ICH_NVM_SIG_VALUE 0x80u 80 80 81 81 #define E1000_ICH8_LAN_INIT_TIMEOUT 1500 82 82
+1 -1
drivers/net/ethernet/intel/e1000e/mac.c
··· 346 346 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 347 347 hash_bit = hash_value & 0x1F; 348 348 349 - hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); 349 + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); 350 350 mc_addr_list += (ETH_ALEN); 351 351 } 352 352
+93 -56
drivers/net/ethernet/intel/e1000e/netdev.c
··· 317 317 else 318 318 next_desc = ""; 319 319 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", 320 - (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : 321 - ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), 320 + (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' : 321 + ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')), 322 322 i, 323 323 (unsigned long long)le64_to_cpu(u0->a), 324 324 (unsigned long long)le64_to_cpu(u0->b), ··· 2018 2018 adapter->eiac_mask |= E1000_IMS_OTHER; 2019 2019 2020 2020 /* Cause Tx interrupts on every write back */ 2021 - ivar |= (1 << 31); 2021 + ivar |= BIT(31); 2022 2022 2023 2023 ew32(IVAR, ivar); 2024 2024 ··· 2709 2709 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2710 2710 index = (vid >> 5) & 0x7F; 2711 2711 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2712 - vfta |= (1 << (vid & 0x1F)); 2712 + vfta |= BIT((vid & 0x1F)); 2713 2713 hw->mac.ops.write_vfta(hw, index, vfta); 2714 2714 } 2715 2715 ··· 2737 2737 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2738 2738 index = (vid >> 5) & 0x7F; 2739 2739 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2740 - vfta &= ~(1 << (vid & 0x1F)); 2740 + vfta &= ~BIT((vid & 0x1F)); 2741 2741 hw->mac.ops.write_vfta(hw, index, vfta); 2742 2742 } 2743 2743 ··· 2878 2878 2879 2879 /* Enable this decision filter in MANC2H */ 2880 2880 if (mdef) 2881 - manc2h |= (1 << i); 2881 + manc2h |= BIT(i); 2882 2882 2883 2883 j |= mdef; 2884 2884 } ··· 2891 2891 if (er32(MDEF(i)) == 0) { 2892 2892 ew32(MDEF(i), (E1000_MDEF_PORT_623 | 2893 2893 E1000_MDEF_PORT_664)); 2894 - manc2h |= (1 << 1); 2894 + manc2h |= BIT(1); 2895 2895 j++; 2896 2896 break; 2897 2897 } ··· 2971 2971 /* set the speed mode bit, we'll clear it if we're not at 2972 2972 * gigabit link later 2973 2973 */ 2974 - #define SPEED_MODE_BIT (1 << 21) 2974 + #define SPEED_MODE_BIT BIT(21) 2975 2975 tarc |= SPEED_MODE_BIT; 2976 2976 ew32(TARC(0), tarc); 2977 2977 } ··· 3071 3071 3072 3072 e1e_rphy(hw, PHY_REG(770, 26), &phy_data); 3073 3073 phy_data &= 0xfff8; 3074 - phy_data |= (1 << 2); 3074 + phy_data |= BIT(2); 3075 3075 e1e_wphy(hw, PHY_REG(770, 26), phy_data); 3076 3076 3077 3077 e1e_rphy(hw, 22, &phy_data); 3078 3078 phy_data &= 0x0fff; 3079 - phy_data |= (1 << 14); 3079 + phy_data |= BIT(14); 3080 3080 e1e_wphy(hw, 0x10, 0x2823); 3081 3081 e1e_wphy(hw, 0x11, 0x0003); 3082 3082 e1e_wphy(hw, 22, phy_data); ··· 3368 3368 * combining 3369 3369 */ 3370 3370 netdev_for_each_uc_addr(ha, netdev) { 3371 - int rval; 3371 + int ret_val; 3372 3372 3373 3373 if (!rar_entries) 3374 3374 break; 3375 - rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); 3376 - if (rval < 0) 3375 + ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); 3376 + if (ret_val < 0) 3377 3377 return -ENOMEM; 3378 3378 count++; 3379 3379 } ··· 3503 3503 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { 3504 3504 u32 fextnvm7 = er32(FEXTNVM7); 3505 3505 3506 - if (!(fextnvm7 & (1 << 0))) { 3507 - ew32(FEXTNVM7, fextnvm7 | (1 << 0)); 3506 + if (!(fextnvm7 & BIT(0))) { 3507 + ew32(FEXTNVM7, fextnvm7 | BIT(0)); 3508 3508 e1e_flush(); 3509 3509 } 3510 3510 } ··· 3580 3580 bool is_l4 = false; 3581 3581 bool is_l2 = false; 3582 3582 u32 regval; 3583 - s32 ret_val; 3584 3583 3585 3584 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) 3586 3585 return -EINVAL; ··· 3718 3719 er32(RXSTMPH); 3719 3720 er32(TXSTMPH); 3720 3721 3721 - /* Get and set the System Time Register SYSTIM base frequency */ 3722 - ret_val = e1000e_get_base_timinca(adapter, &regval); 3723 - if (ret_val) 3724 - return ret_val; 3725 - ew32(TIMINCA, regval); 3726 - 3727 - /* reset the ns time counter */ 3728 - timecounter_init(&adapter->tc, &adapter->cc, 3729 - ktime_to_ns(ktime_get_real())); 3730 - 3731 3722 return 0; 3732 3723 } 3733 3724 ··· 3828 3839 /* update thresholds: prefetch threshold to 31, host threshold to 1 3829 3840 * and make sure the granularity is "descriptors" and not "cache lines" 3830 3841 */ 3831 - rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); 3842 + rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC); 3832 3843 3833 3844 ew32(RXDCTL(0), rxdctl); 3834 3845 /* momentarily enable the RX ring for the changes to take effect */ ··· 3871 3882 &hang_state); 3872 3883 if (hang_state & FLUSH_DESC_REQUIRED) 3873 3884 e1000_flush_rx_ring(adapter); 3885 + } 3886 + 3887 + /** 3888 + * e1000e_systim_reset - reset the timesync registers after a hardware reset 3889 + * @adapter: board private structure 3890 + * 3891 + * When the MAC is reset, all hardware bits for timesync will be reset to the 3892 + * default values. This function will restore the settings last in place. 3893 + * Since the clock SYSTIME registers are reset, we will simply restore the 3894 + * cyclecounter to the kernel real clock time. 3895 + **/ 3896 + static void e1000e_systim_reset(struct e1000_adapter *adapter) 3897 + { 3898 + struct ptp_clock_info *info = &adapter->ptp_clock_info; 3899 + struct e1000_hw *hw = &adapter->hw; 3900 + unsigned long flags; 3901 + u32 timinca; 3902 + s32 ret_val; 3903 + 3904 + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) 3905 + return; 3906 + 3907 + if (info->adjfreq) { 3908 + /* restore the previous ptp frequency delta */ 3909 + ret_val = info->adjfreq(info, adapter->ptp_delta); 3910 + } else { 3911 + /* set the default base frequency if no adjustment possible */ 3912 + ret_val = e1000e_get_base_timinca(adapter, &timinca); 3913 + if (!ret_val) 3914 + ew32(TIMINCA, timinca); 3915 + } 3916 + 3917 + if (ret_val) { 3918 + dev_warn(&adapter->pdev->dev, 3919 + "Failed to restore TIMINCA clock rate delta: %d\n", 3920 + ret_val); 3921 + return; 3922 + } 3923 + 3924 + /* reset the systim ns time counter */ 3925 + spin_lock_irqsave(&adapter->systim_lock, flags); 3926 + timecounter_init(&adapter->tc, &adapter->cc, 3927 + ktime_to_ns(ktime_get_real())); 3928 + spin_unlock_irqrestore(&adapter->systim_lock, flags); 3929 + 3930 + /* restore the previous hwtstamp configuration settings */ 3931 + e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); 3874 3932 } 3875 3933 3876 3934 /** ··· 4099 4063 4100 4064 e1000e_reset_adaptive(hw); 4101 4065 4102 - /* initialize systim and reset the ns time counter */ 4103 - e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); 4066 + /* restore systim and hwtstamp settings */ 4067 + e1000e_systim_reset(adapter); 4104 4068 4105 4069 /* Set EEE advertisement as appropriate */ 4106 4070 if (adapter->flags2 & FLAG2_HAS_EEE) { ··· 4311 4275 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, 4312 4276 cc); 4313 4277 struct e1000_hw *hw = &adapter->hw; 4314 - u32 systimel_1, systimel_2, systimeh; 4278 + u32 systimel, systimeh; 4315 4279 cycle_t systim, systim_next; 4316 4280 /* SYSTIMH latching upon SYSTIML read does not work well. 4317 4281 * This means that if SYSTIML overflows after we read it but before ··· 4319 4283 * will experience a huge non linear increment in the systime value 4320 4284 * to fix that we test for overflow and if true, we re-read systime. 4321 4285 */ 4322 - systimel_1 = er32(SYSTIML); 4286 + systimel = er32(SYSTIML); 4323 4287 systimeh = er32(SYSTIMH); 4324 - systimel_2 = er32(SYSTIML); 4325 - /* Check for overflow. If there was no overflow, use the values */ 4326 - if (systimel_1 < systimel_2) { 4327 - systim = (cycle_t)systimel_1; 4328 - systim |= (cycle_t)systimeh << 32; 4329 - } else { 4330 - /* There was an overflow, read again SYSTIMH, and use 4331 - * systimel_2 4332 - */ 4333 - systimeh = er32(SYSTIMH); 4334 - systim = (cycle_t)systimel_2; 4335 - systim |= (cycle_t)systimeh << 32; 4288 + /* Is systimel is so large that overflow is possible? */ 4289 + if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) { 4290 + u32 systimel_2 = er32(SYSTIML); 4291 + if (systimel > systimel_2) { 4292 + /* There was an overflow, read again SYSTIMH, and use 4293 + * systimel_2 4294 + */ 4295 + systimeh = er32(SYSTIMH); 4296 + systimel = systimel_2; 4297 + } 4336 4298 } 4299 + systim = (cycle_t)systimel; 4300 + systim |= (cycle_t)systimeh << 32; 4337 4301 4338 4302 if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { 4339 - u64 incvalue, time_delta, rem, temp; 4303 + u64 time_delta, rem, temp; 4304 + u32 incvalue; 4340 4305 int i; 4341 4306 4342 4307 /* errata for 82574/82583 possible bad bits read from SYSTIMH/L ··· 6898 6861 6899 6862 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); 6900 6863 le16_to_cpus(&buf); 6901 - if (!ret_val && (!(buf & (1 << 0)))) { 6864 + if (!ret_val && (!(buf & BIT(0)))) { 6902 6865 /* Deep Smart Power Down (DSPD) */ 6903 6866 dev_warn(&adapter->pdev->dev, 6904 6867 "Warning: detected DSPD enabled in EEPROM\n"); ··· 7002 6965 int bars, i, err, pci_using_dac; 7003 6966 u16 eeprom_data = 0; 7004 6967 u16 eeprom_apme_mask = E1000_EEPROM_APME; 7005 - s32 rval = 0; 6968 + s32 ret_val = 0; 7006 6969 7007 6970 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) 7008 6971 aspm_disable_flag = PCIE_LINK_STATE_L0S; ··· 7237 7200 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 7238 7201 if (adapter->flags & FLAG_APME_CHECK_PORT_B && 7239 7202 (adapter->hw.bus.func == 1)) 7240 - rval = e1000_read_nvm(&adapter->hw, 7203 + ret_val = e1000_read_nvm(&adapter->hw, 7241 7204 NVM_INIT_CONTROL3_PORT_B, 7242 7205 1, &eeprom_data); 7243 7206 else 7244 - rval = e1000_read_nvm(&adapter->hw, 7207 + ret_val = e1000_read_nvm(&adapter->hw, 7245 7208 NVM_INIT_CONTROL3_PORT_A, 7246 7209 1, &eeprom_data); 7247 7210 } 7248 7211 7249 7212 /* fetch WoL from EEPROM */ 7250 - if (rval) 7251 - e_dbg("NVM read error getting WoL initial values: %d\n", rval); 7213 + if (ret_val) 7214 + e_dbg("NVM read error getting WoL initial values: %d\n", ret_val); 7252 7215 else if (eeprom_data & eeprom_apme_mask) 7253 7216 adapter->eeprom_wol |= E1000_WUFC_MAG; 7254 7217 ··· 7268 7231 device_wakeup_enable(&pdev->dev); 7269 7232 7270 7233 /* save off EEPROM version number */ 7271 - rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); 7234 + ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); 7272 7235 7273 - if (rval) { 7274 - e_dbg("NVM read error getting EEPROM version: %d\n", rval); 7236 + if (ret_val) { 7237 + e_dbg("NVM read error getting EEPROM version: %d\n", ret_val); 7275 7238 adapter->eeprom_vers = 0; 7276 7239 } 7240 + 7241 + /* init PTP hardware clock */ 7242 + e1000e_ptp_init(adapter); 7277 7243 7278 7244 /* reset the hardware with the new settings */ 7279 7245 e1000e_reset(adapter); ··· 7295 7255 7296 7256 /* carrier off reporting is important to ethtool even BEFORE open */ 7297 7257 netif_carrier_off(netdev); 7298 - 7299 - /* init PTP hardware clock */ 7300 - e1000e_ptp_init(adapter); 7301 7258 7302 7259 e1000_print_device_info(adapter); 7303 7260
+1 -1
drivers/net/ethernet/intel/e1000e/nvm.c
··· 67 67 u32 eecd = er32(EECD); 68 68 u32 mask; 69 69 70 - mask = 0x01 << (count - 1); 70 + mask = BIT(count - 1); 71 71 if (nvm->type == e1000_nvm_eeprom_spi) 72 72 eecd |= E1000_EECD_DO; 73 73
+2 -2
drivers/net/ethernet/intel/e1000e/phy.c
··· 2894 2894 if ((hw->phy.type == e1000_phy_82578) && 2895 2895 (hw->phy.revision >= 1) && 2896 2896 (hw->phy.addr == 2) && 2897 - !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) { 2897 + !(MAX_PHY_REG_ADDRESS & reg) && (data & BIT(11))) { 2898 2898 u16 data2 = 0x7EFF; 2899 2899 2900 2900 ret_val = e1000_access_phy_debug_regs_hv(hw, 2901 - (1 << 6) | 0x3, 2901 + BIT(6) | 0x3, 2902 2902 &data2, false); 2903 2903 if (ret_val) 2904 2904 goto out;
+5 -5
drivers/net/ethernet/intel/e1000e/phy.h
··· 104 104 #define BM_WUC_DATA_OPCODE 0x12 105 105 #define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE 106 106 #define BM_WUC_ENABLE_REG 17 107 - #define BM_WUC_ENABLE_BIT (1 << 2) 108 - #define BM_WUC_HOST_WU_BIT (1 << 4) 109 - #define BM_WUC_ME_WU_BIT (1 << 5) 107 + #define BM_WUC_ENABLE_BIT BIT(2) 108 + #define BM_WUC_HOST_WU_BIT BIT(4) 109 + #define BM_WUC_ME_WU_BIT BIT(5) 110 110 111 111 #define PHY_UPPER_SHIFT 21 112 112 #define BM_PHY_REG(page, reg) \ ··· 124 124 #define I82578_ADDR_REG 29 125 125 #define I82577_ADDR_REG 16 126 126 #define I82577_CFG_REG 22 127 - #define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) 128 - #define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ 127 + #define I82577_CFG_ASSERT_CRS_ON_TX BIT(15) 128 + #define I82577_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift */ 129 129 #define I82577_CTRL_REG 23 130 130 131 131 /* 82577 specific PHY registers */
+2
drivers/net/ethernet/intel/e1000e/ptp.c
··· 79 79 80 80 ew32(TIMINCA, timinca); 81 81 82 + adapter->ptp_delta = delta; 83 + 82 84 spin_unlock_irqrestore(&adapter->systim_lock, flags); 83 85 84 86 return 0;
+4 -4
drivers/net/ethernet/intel/igb/e1000_82575.c
··· 361 361 if (size > 15) 362 362 size = 15; 363 363 364 - nvm->word_size = 1 << size; 364 + nvm->word_size = BIT(size); 365 365 nvm->opcode_bits = 8; 366 366 nvm->delay_usec = 1; 367 367 ··· 380 380 16 : 8; 381 381 break; 382 382 } 383 - if (nvm->word_size == (1 << 15)) 383 + if (nvm->word_size == BIT(15)) 384 384 nvm->page_size = 128; 385 385 386 386 nvm->type = e1000_nvm_eeprom_spi; ··· 391 391 nvm->ops.write = igb_write_nvm_spi; 392 392 nvm->ops.validate = igb_validate_nvm_checksum; 393 393 nvm->ops.update = igb_update_nvm_checksum; 394 - if (nvm->word_size < (1 << 15)) 394 + if (nvm->word_size < BIT(15)) 395 395 nvm->ops.read = igb_read_nvm_eerd; 396 396 else 397 397 nvm->ops.read = igb_read_nvm_spi; ··· 2107 2107 /* The PF can spoof - it has to in order to 2108 2108 * support emulation mode NICs 2109 2109 */ 2110 - reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); 2110 + reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS)); 2111 2111 } else { 2112 2112 reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | 2113 2113 E1000_DTXSWC_VLAN_SPOOF_MASK);
+15 -15
drivers/net/ethernet/intel/igb/e1000_82575.h
··· 168 168 #define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ 169 169 170 170 #define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ 171 - #define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ 172 - #define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ 173 - #define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ 174 - #define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ 171 + #define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */ 172 + #define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */ 173 + #define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */ 174 + #define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */ 175 175 176 176 #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ 177 - #define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ 178 - #define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ 179 - #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ 180 - #define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ 177 + #define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ 178 + #define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ 179 + #define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ 180 + #define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ 181 181 182 182 /* Additional DCA related definitions, note change in position of CPUID */ 183 183 #define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ ··· 186 186 #define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ 187 187 188 188 /* ETQF register bit definitions */ 189 - #define E1000_ETQF_FILTER_ENABLE (1 << 26) 190 - #define E1000_ETQF_1588 (1 << 30) 189 + #define E1000_ETQF_FILTER_ENABLE BIT(26) 190 + #define E1000_ETQF_1588 BIT(30) 191 191 192 192 /* FTQF register bit definitions */ 193 193 #define E1000_FTQF_VF_BP 0x00008000 ··· 203 203 #define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ 204 204 #define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ 205 205 #define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 206 - #define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ 206 + #define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31) /* global VF LB enable */ 207 207 208 208 /* Easy defines for setting default pool, would normally be left a zero */ 209 209 #define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 210 210 #define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) 211 211 212 212 /* Other useful VMD_CTL register defines */ 213 - #define E1000_VT_CTL_IGNORE_MAC (1 << 28) 214 - #define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) 215 - #define E1000_VT_CTL_VM_REPL_EN (1 << 30) 213 + #define E1000_VT_CTL_IGNORE_MAC BIT(28) 214 + #define E1000_VT_CTL_DISABLE_DEF_POOL BIT(29) 215 + #define E1000_VT_CTL_VM_REPL_EN BIT(30) 216 216 217 217 /* Per VM Offload register setup */ 218 218 #define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ ··· 252 252 #define E1000_DTXCTL_MDP_EN 0x0020 253 253 #define E1000_DTXCTL_SPOOF_INT 0x0040 254 254 255 - #define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) 255 + #define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT BIT(14) 256 256 257 257 #define ALL_QUEUES 0xFFFF 258 258
+54 -54
drivers/net/ethernet/intel/igb/e1000_defines.h
··· 530 530 531 531 /* Time Sync Interrupt Cause/Mask Register Bits */ 532 532 533 - #define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */ 534 - #define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */ 535 - #define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */ 536 - #define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */ 537 - #define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */ 538 - #define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */ 539 - #define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */ 540 - #define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */ 533 + #define TSINTR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ 534 + #define TSINTR_TXTS BIT(1) /* Transmit Timestamp. */ 535 + #define TSINTR_RXTS BIT(2) /* Receive Timestamp. */ 536 + #define TSINTR_TT0 BIT(3) /* Target Time 0 Trigger. */ 537 + #define TSINTR_TT1 BIT(4) /* Target Time 1 Trigger. */ 538 + #define TSINTR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ 539 + #define TSINTR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ 540 + #define TSINTR_TADJ BIT(7) /* Time Adjust Done. */ 541 541 542 542 #define TSYNC_INTERRUPTS TSINTR_TXTS 543 543 #define E1000_TSICR_TXTS TSINTR_TXTS 544 544 545 545 /* TSAUXC Configuration Bits */ 546 - #define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */ 547 - #define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */ 548 - #define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */ 549 - #define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */ 550 - #define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */ 551 - #define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */ 552 - #define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */ 553 - #define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */ 554 - #define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */ 555 - #define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */ 556 - #define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */ 557 - #define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */ 558 - #define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */ 559 - #define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */ 546 + #define TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ 547 + #define TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ 548 + #define TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ 549 + #define TSAUXC_SAMP_AUT0 BIT(3) /* Latch SYSTIML/H into AUXSTMPL/0. */ 550 + #define TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ 551 + #define TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ 552 + #define TSAUXC_SAMP_AUT1 BIT(6) /* Latch SYSTIML/H into AUXSTMPL/1. */ 553 + #define TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ 554 + #define TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ 555 + #define TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ 556 + #define TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ 557 + #define TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ 558 + #define TSAUXC_PLSG BIT(17) /* Generate a pulse. */ 559 + #define TSAUXC_DISABLE BIT(31) /* Disable SYSTIM Count Operation. */ 560 560 561 561 /* SDP Configuration Bits */ 562 - #define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */ 563 - #define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */ 564 - #define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */ 565 - #define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */ 566 - #define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */ 567 - #define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */ 568 - #define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */ 569 - #define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */ 570 - #define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */ 571 - #define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */ 572 - #define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */ 573 - #define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */ 574 - #define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */ 575 - #define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */ 576 - #define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */ 577 - #define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */ 578 - #define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */ 579 - #define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */ 580 - #define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */ 581 - #define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */ 582 - #define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */ 583 - #define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */ 584 - #define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */ 585 - #define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */ 586 - #define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */ 587 - #define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */ 588 - #define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */ 589 - #define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */ 590 - #define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */ 591 - #define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */ 562 + #define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ 563 + #define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ 564 + #define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ 565 + #define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ 566 + #define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ 567 + #define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ 568 + #define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ 569 + #define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ 570 + #define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ 571 + #define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ 572 + #define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ 573 + #define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ 574 + #define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ 575 + #define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ 576 + #define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ 577 + #define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ 578 + #define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ 579 + #define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ 580 + #define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ 581 + #define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ 582 + #define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ 583 + #define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ 584 + #define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ 585 + #define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ 586 + #define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ 587 + #define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ 588 + #define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ 589 + #define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ 590 + #define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ 591 + #define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ 592 592 593 593 #define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ 594 594 #define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ ··· 997 997 #define E1000_M88E1543_FIBER_CTRL 0x0 998 998 #define E1000_EEE_ADV_DEV_I354 7 999 999 #define E1000_EEE_ADV_ADDR_I354 60 1000 - #define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ 1001 - #define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ 1000 + #define E1000_EEE_ADV_100_SUPPORTED BIT(1) /* 100BaseTx EEE Supported */ 1001 + #define E1000_EEE_ADV_1000_SUPPORTED BIT(2) /* 1000BaseT EEE Supported */ 1002 1002 #define E1000_PCS_STATUS_DEV_I354 3 1003 1003 #define E1000_PCS_STATUS_ADDR_I354 1 1004 1004 #define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */
+5 -5
drivers/net/ethernet/intel/igb/e1000_mac.c
··· 212 212 * bits[4-0]: which bit in the register 213 213 */ 214 214 regidx = vlan / 32; 215 - vfta_delta = 1 << (vlan % 32); 215 + vfta_delta = BIT(vlan % 32); 216 216 vfta = adapter->shadow_vfta[regidx]; 217 217 218 218 /* vfta_delta represents the difference between the current value ··· 243 243 bits = rd32(E1000_VLVF(vlvf_index)); 244 244 245 245 /* set the pool bit */ 246 - bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind); 246 + bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); 247 247 if (vlan_on) 248 248 goto vlvf_update; 249 249 250 250 /* clear the pool bit */ 251 - bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind); 251 + bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); 252 252 253 253 if (!(bits & E1000_VLVF_POOLSEL_MASK)) { 254 254 /* Clear VFTA first, then disable VLVF. Otherwise ··· 427 427 428 428 mta = array_rd32(E1000_MTA, hash_reg); 429 429 430 - mta |= (1 << hash_bit); 430 + mta |= BIT(hash_bit); 431 431 432 432 array_wr32(E1000_MTA, hash_reg, mta); 433 433 wrfl(); ··· 527 527 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 528 528 hash_bit = hash_value & 0x1F; 529 529 530 - hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); 530 + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); 531 531 mc_addr_list += (ETH_ALEN); 532 532 } 533 533
+2 -2
drivers/net/ethernet/intel/igb/e1000_mbx.c
··· 302 302 u32 vflre = rd32(E1000_VFLRE); 303 303 s32 ret_val = -E1000_ERR_MBX; 304 304 305 - if (vflre & (1 << vf_number)) { 305 + if (vflre & BIT(vf_number)) { 306 306 ret_val = 0; 307 - wr32(E1000_VFLRE, (1 << vf_number)); 307 + wr32(E1000_VFLRE, BIT(vf_number)); 308 308 hw->mbx.stats.rsts++; 309 309 } 310 310
+1 -1
drivers/net/ethernet/intel/igb/e1000_nvm.c
··· 72 72 u32 eecd = rd32(E1000_EECD); 73 73 u32 mask; 74 74 75 - mask = 0x01 << (count - 1); 75 + mask = 1u << (count - 1); 76 76 if (nvm->type == e1000_nvm_eeprom_spi) 77 77 eecd |= E1000_EECD_DO; 78 78
+3 -3
drivers/net/ethernet/intel/igb/e1000_phy.h
··· 91 91 92 92 #define I82580_ADDR_REG 16 93 93 #define I82580_CFG_REG 22 94 - #define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15) 95 - #define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ 94 + #define I82580_CFG_ASSERT_CRS_ON_TX BIT(15) 95 + #define I82580_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift 100/10 */ 96 96 #define I82580_CTRL_REG 23 97 - #define I82580_CTRL_DOWNSHIFT_MASK (7 << 10) 97 + #define I82580_CTRL_DOWNSHIFT_MASK (7u << 10) 98 98 99 99 /* 82580 specific PHY registers */ 100 100 #define I82580_PHY_CTRL_2 18
+24 -16
drivers/net/ethernet/intel/igb/igb.h
··· 91 91 #define NVM_COMB_VER_OFF 0x0083 92 92 #define NVM_COMB_VER_PTR 0x003d 93 93 94 + /* Transmit and receive latency (for PTP timestamps) */ 95 + #define IGB_I210_TX_LATENCY_10 9542 96 + #define IGB_I210_TX_LATENCY_100 1024 97 + #define IGB_I210_TX_LATENCY_1000 178 98 + #define IGB_I210_RX_LATENCY_10 20662 99 + #define IGB_I210_RX_LATENCY_100 2213 100 + #define IGB_I210_RX_LATENCY_1000 448 101 + 94 102 struct vf_data_storage { 95 103 unsigned char vf_mac_addresses[ETH_ALEN]; 96 104 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; ··· 177 169 * maintain a power of two alignment we have to limit ourselves to 32K. 178 170 */ 179 171 #define IGB_MAX_TXD_PWR 15 180 - #define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR) 172 + #define IGB_MAX_DATA_PER_TXD (1u << IGB_MAX_TXD_PWR) 181 173 182 174 /* Tx Descriptors needed, worst case */ 183 175 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) ··· 474 466 u16 eee_advert; 475 467 }; 476 468 477 - #define IGB_FLAG_HAS_MSI (1 << 0) 478 - #define IGB_FLAG_DCA_ENABLED (1 << 1) 479 - #define IGB_FLAG_QUAD_PORT_A (1 << 2) 480 - #define IGB_FLAG_QUEUE_PAIRS (1 << 3) 481 - #define IGB_FLAG_DMAC (1 << 4) 482 - #define IGB_FLAG_PTP (1 << 5) 483 - #define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6) 484 - #define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7) 485 - #define IGB_FLAG_WOL_SUPPORTED (1 << 8) 486 - #define IGB_FLAG_NEED_LINK_UPDATE (1 << 9) 487 - #define IGB_FLAG_MEDIA_RESET (1 << 10) 488 - #define IGB_FLAG_MAS_CAPABLE (1 << 11) 489 - #define IGB_FLAG_MAS_ENABLE (1 << 12) 490 - #define IGB_FLAG_HAS_MSIX (1 << 13) 491 - #define IGB_FLAG_EEE (1 << 14) 469 + #define IGB_FLAG_HAS_MSI BIT(0) 470 + #define IGB_FLAG_DCA_ENABLED BIT(1) 471 + #define IGB_FLAG_QUAD_PORT_A BIT(2) 472 + #define IGB_FLAG_QUEUE_PAIRS BIT(3) 473 + #define IGB_FLAG_DMAC BIT(4) 474 + #define IGB_FLAG_PTP BIT(5) 475 + #define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6) 476 + #define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7) 477 + #define IGB_FLAG_WOL_SUPPORTED BIT(8) 478 + #define IGB_FLAG_NEED_LINK_UPDATE BIT(9) 479 + #define IGB_FLAG_MEDIA_RESET BIT(10) 480 + #define IGB_FLAG_MAS_CAPABLE BIT(11) 481 + #define IGB_FLAG_MAS_ENABLE BIT(12) 482 + #define IGB_FLAG_HAS_MSIX BIT(13) 483 + #define IGB_FLAG_EEE BIT(14) 492 484 #define IGB_FLAG_VLAN_PROMISC BIT(15) 493 485 494 486 /* Media Auto Sense */
+9 -9
drivers/net/ethernet/intel/igb/igb_ethtool.c
··· 466 466 467 467 memset(p, 0, IGB_REGS_LEN * sizeof(u32)); 468 468 469 - regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; 469 + regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; 470 470 471 471 /* General Registers */ 472 472 regs_buff[0] = rd32(E1000_CTRL); ··· 1448 1448 /* Test each interrupt */ 1449 1449 for (; i < 31; i++) { 1450 1450 /* Interrupt to test */ 1451 - mask = 1 << i; 1451 + mask = BIT(i); 1452 1452 1453 1453 if (!(mask & ics_mask)) 1454 1454 continue; ··· 2411 2411 SOF_TIMESTAMPING_RAW_HARDWARE; 2412 2412 2413 2413 info->tx_types = 2414 - (1 << HWTSTAMP_TX_OFF) | 2415 - (1 << HWTSTAMP_TX_ON); 2414 + BIT(HWTSTAMP_TX_OFF) | 2415 + BIT(HWTSTAMP_TX_ON); 2416 2416 2417 - info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; 2417 + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); 2418 2418 2419 2419 /* 82576 does not support timestamping all packets. */ 2420 2420 if (adapter->hw.mac.type >= e1000_82580) 2421 - info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL; 2421 + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); 2422 2422 else 2423 2423 info->rx_filters |= 2424 - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 2425 - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 2426 - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); 2424 + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 2425 + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 2426 + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); 2427 2427 2428 2428 return 0; 2429 2429 default:
+121 -66
drivers/net/ethernet/intel/igb/igb_main.c
··· 836 836 igb_write_ivar(hw, msix_vector, 837 837 tx_queue & 0x7, 838 838 ((tx_queue & 0x8) << 1) + 8); 839 - q_vector->eims_value = 1 << msix_vector; 839 + q_vector->eims_value = BIT(msix_vector); 840 840 break; 841 841 case e1000_82580: 842 842 case e1000_i350: ··· 857 857 igb_write_ivar(hw, msix_vector, 858 858 tx_queue >> 1, 859 859 ((tx_queue & 0x1) << 4) + 8); 860 - q_vector->eims_value = 1 << msix_vector; 860 + q_vector->eims_value = BIT(msix_vector); 861 861 break; 862 862 default: 863 863 BUG(); ··· 919 919 E1000_GPIE_NSICR); 920 920 921 921 /* enable msix_other interrupt */ 922 - adapter->eims_other = 1 << vector; 922 + adapter->eims_other = BIT(vector); 923 923 tmp = (vector++ | E1000_IVAR_VALID) << 8; 924 924 925 925 wr32(E1000_IVAR_MISC, tmp); ··· 2087 2087 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); 2088 2088 } 2089 2089 2090 + #define IGB_MAX_MAC_HDR_LEN 127 2091 + #define IGB_MAX_NETWORK_HDR_LEN 511 2092 + 2093 + static netdev_features_t 2094 + igb_features_check(struct sk_buff *skb, struct net_device *dev, 2095 + netdev_features_t features) 2096 + { 2097 + unsigned int network_hdr_len, mac_hdr_len; 2098 + 2099 + /* Make certain the headers can be described by a context descriptor */ 2100 + mac_hdr_len = skb_network_header(skb) - skb->data; 2101 + if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN)) 2102 + return features & ~(NETIF_F_HW_CSUM | 2103 + NETIF_F_SCTP_CRC | 2104 + NETIF_F_HW_VLAN_CTAG_TX | 2105 + NETIF_F_TSO | 2106 + NETIF_F_TSO6); 2107 + 2108 + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 2109 + if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN)) 2110 + return features & ~(NETIF_F_HW_CSUM | 2111 + NETIF_F_SCTP_CRC | 2112 + NETIF_F_TSO | 2113 + NETIF_F_TSO6); 2114 + 2115 + /* We can only support IPV4 TSO in tunnels if we can mangle the 2116 + * inner IP ID field, so strip TSO if MANGLEID is not supported. 2117 + */ 2118 + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 2119 + features &= ~NETIF_F_TSO; 2120 + 2121 + return features; 2122 + } 2123 + 2090 2124 static const struct net_device_ops igb_netdev_ops = { 2091 2125 .ndo_open = igb_open, 2092 2126 .ndo_stop = igb_close, ··· 2145 2111 .ndo_fix_features = igb_fix_features, 2146 2112 .ndo_set_features = igb_set_features, 2147 2113 .ndo_fdb_add = igb_ndo_fdb_add, 2148 - .ndo_features_check = passthru_features_check, 2114 + .ndo_features_check = igb_features_check, 2149 2115 }; 2150 2116 2151 2117 /** ··· 2411 2377 NETIF_F_TSO6 | 2412 2378 NETIF_F_RXHASH | 2413 2379 NETIF_F_RXCSUM | 2414 - NETIF_F_HW_CSUM | 2415 - NETIF_F_HW_VLAN_CTAG_RX | 2416 - NETIF_F_HW_VLAN_CTAG_TX; 2380 + NETIF_F_HW_CSUM; 2417 2381 2418 2382 if (hw->mac.type >= e1000_82576) 2419 2383 netdev->features |= NETIF_F_SCTP_CRC; 2420 2384 2385 + #define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ 2386 + NETIF_F_GSO_GRE_CSUM | \ 2387 + NETIF_F_GSO_IPIP | \ 2388 + NETIF_F_GSO_SIT | \ 2389 + NETIF_F_GSO_UDP_TUNNEL | \ 2390 + NETIF_F_GSO_UDP_TUNNEL_CSUM) 2391 + 2392 + netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES; 2393 + netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES; 2394 + 2421 2395 /* copy netdev features into list of user selectable features */ 2422 - netdev->hw_features |= netdev->features; 2423 - netdev->hw_features |= NETIF_F_RXALL; 2396 + netdev->hw_features |= netdev->features | 2397 + NETIF_F_HW_VLAN_CTAG_RX | 2398 + NETIF_F_HW_VLAN_CTAG_TX | 2399 + NETIF_F_RXALL; 2424 2400 2425 2401 if (hw->mac.type >= e1000_i350) 2426 2402 netdev->hw_features |= NETIF_F_NTUPLE; 2427 2403 2428 - /* set this bit last since it cannot be part of hw_features */ 2429 - netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2404 + if (pci_using_dac) 2405 + netdev->features |= NETIF_F_HIGHDMA; 2430 2406 2431 - netdev->vlan_features |= NETIF_F_SG | 2432 - NETIF_F_TSO | 2433 - NETIF_F_TSO6 | 2434 - NETIF_F_HW_CSUM | 2435 - NETIF_F_SCTP_CRC; 2436 - 2407 + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; 2437 2408 netdev->mpls_features |= NETIF_F_HW_CSUM; 2438 - netdev->hw_enc_features |= NETIF_F_HW_CSUM; 2409 + netdev->hw_enc_features |= netdev->vlan_features; 2410 + 2411 + /* set this bit last since it cannot be part of vlan_features */ 2412 + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 2413 + NETIF_F_HW_VLAN_CTAG_RX | 2414 + NETIF_F_HW_VLAN_CTAG_TX; 2439 2415 2440 2416 netdev->priv_flags |= IFF_SUPP_NOFCS; 2441 - 2442 - if (pci_using_dac) { 2443 - netdev->features |= NETIF_F_HIGHDMA; 2444 - netdev->vlan_features |= NETIF_F_HIGHDMA; 2445 - } 2446 2417 2447 2418 netdev->priv_flags |= IFF_UNICAST_FLT; 2448 2419 ··· 4103 4064 for (i = E1000_VLVF_ARRAY_SIZE; --i;) { 4104 4065 u32 vlvf = rd32(E1000_VLVF(i)); 4105 4066 4106 - vlvf |= 1 << pf_id; 4067 + vlvf |= BIT(pf_id); 4107 4068 wr32(E1000_VLVF(i), vlvf); 4108 4069 } 4109 4070 ··· 4130 4091 /* guarantee that we don't scrub out management VLAN */ 4131 4092 vid = adapter->mng_vlan_id; 4132 4093 if (vid >= vid_start && vid < vid_end) 4133 - vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); 4094 + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); 4134 4095 4135 4096 if (!adapter->vfs_allocated_count) 4136 4097 goto set_vfta; ··· 4149 4110 4150 4111 if (vlvf & E1000_VLVF_VLANID_ENABLE) { 4151 4112 /* record VLAN ID in VFTA */ 4152 - vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); 4113 + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); 4153 4114 4154 4115 /* if PF is part of this then continue */ 4155 4116 if (test_bit(vid, adapter->active_vlans)) ··· 4157 4118 } 4158 4119 4159 4120 /* remove PF from the pool */ 4160 - bits = ~(1 << pf_id); 4121 + bits = ~BIT(pf_id); 4161 4122 bits &= rd32(E1000_VLVF(i)); 4162 4123 wr32(E1000_VLVF(i), bits); 4163 4124 } ··· 4315 4276 return; 4316 4277 4317 4278 for (j = 0; j < adapter->vfs_allocated_count; j++) { 4318 - if (adapter->wvbr & (1 << j) || 4319 - adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { 4279 + if (adapter->wvbr & BIT(j) || 4280 + adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) { 4320 4281 dev_warn(&adapter->pdev->dev, 4321 4282 "Spoof event(s) detected on VF %d\n", j); 4322 4283 adapter->wvbr &= 4323 - ~((1 << j) | 4324 - (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))); 4284 + ~(BIT(j) | 4285 + BIT(j + IGB_STAGGERED_QUEUE_OFFSET)); 4325 4286 } 4326 4287 } 4327 4288 } ··· 4881 4842 struct igb_tx_buffer *first, 4882 4843 u8 *hdr_len) 4883 4844 { 4845 + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 4884 4846 struct sk_buff *skb = first->skb; 4885 - u32 vlan_macip_lens, type_tucmd; 4886 - u32 mss_l4len_idx, l4len; 4847 + union { 4848 + struct iphdr *v4; 4849 + struct ipv6hdr *v6; 4850 + unsigned char *hdr; 4851 + } ip; 4852 + union { 4853 + struct tcphdr *tcp; 4854 + unsigned char *hdr; 4855 + } l4; 4856 + u32 paylen, l4_offset; 4887 4857 int err; 4888 4858 4889 4859 if (skb->ip_summed != CHECKSUM_PARTIAL) ··· 4905 4857 if (err < 0) 4906 4858 return err; 4907 4859 4860 + ip.hdr = skb_network_header(skb); 4861 + l4.hdr = skb_checksum_start(skb); 4862 + 4908 4863 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 4909 4864 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; 4910 4865 4911 - if (first->protocol == htons(ETH_P_IP)) { 4912 - struct iphdr *iph = ip_hdr(skb); 4913 - iph->tot_len = 0; 4914 - iph->check = 0; 4915 - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 4916 - iph->daddr, 0, 4917 - IPPROTO_TCP, 4918 - 0); 4866 + /* initialize outer IP header fields */ 4867 + if (ip.v4->version == 4) { 4868 + /* IP header will have to cancel out any data that 4869 + * is not a part of the outer IP header 4870 + */ 4871 + ip.v4->check = csum_fold(csum_add(lco_csum(skb), 4872 + csum_unfold(l4.tcp->check))); 4919 4873 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; 4874 + 4875 + ip.v4->tot_len = 0; 4920 4876 first->tx_flags |= IGB_TX_FLAGS_TSO | 4921 4877 IGB_TX_FLAGS_CSUM | 4922 4878 IGB_TX_FLAGS_IPV4; 4923 - } else if (skb_is_gso_v6(skb)) { 4924 - ipv6_hdr(skb)->payload_len = 0; 4925 - tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4926 - &ipv6_hdr(skb)->daddr, 4927 - 0, IPPROTO_TCP, 0); 4879 + } else { 4880 + ip.v6->payload_len = 0; 4928 4881 first->tx_flags |= IGB_TX_FLAGS_TSO | 4929 4882 IGB_TX_FLAGS_CSUM; 4930 4883 } 4931 4884 4932 - /* compute header lengths */ 4933 - l4len = tcp_hdrlen(skb); 4934 - *hdr_len = skb_transport_offset(skb) + l4len; 4885 + /* determine offset of inner transport header */ 4886 + l4_offset = l4.hdr - skb->data; 4887 + 4888 + /* compute length of segmentation header */ 4889 + *hdr_len = (l4.tcp->doff * 4) + l4_offset; 4890 + 4891 + /* remove payload length from inner checksum */ 4892 + paylen = skb->len - l4_offset; 4893 + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); 4935 4894 4936 4895 /* update gso size and bytecount with header size */ 4937 4896 first->gso_segs = skb_shinfo(skb)->gso_segs; 4938 4897 first->bytecount += (first->gso_segs - 1) * *hdr_len; 4939 4898 4940 4899 /* MSS L4LEN IDX */ 4941 - mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT; 4900 + mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; 4942 4901 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; 4943 4902 4944 4903 /* VLAN MACLEN IPLEN */ 4945 - vlan_macip_lens = skb_network_header_len(skb); 4946 - vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; 4904 + vlan_macip_lens = l4.hdr - ip.hdr; 4905 + vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; 4947 4906 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; 4948 4907 4949 4908 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); ··· 6018 5963 6019 5964 /* create mask for VF and other pools */ 6020 5965 pool_mask = E1000_VLVF_POOLSEL_MASK; 6021 - vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); 5966 + vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf); 6022 5967 6023 5968 /* drop PF from pool bits */ 6024 - pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + 6025 - adapter->vfs_allocated_count)); 5969 + pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT + 5970 + adapter->vfs_allocated_count); 6026 5971 6027 5972 /* Find the vlan filter for this id */ 6028 5973 for (i = E1000_VLVF_ARRAY_SIZE; i--;) { ··· 6045 5990 goto update_vlvf; 6046 5991 6047 5992 vid = vlvf & E1000_VLVF_VLANID_MASK; 6048 - vfta_mask = 1 << (vid % 32); 5993 + vfta_mask = BIT(vid % 32); 6049 5994 6050 5995 /* clear bit from VFTA */ 6051 5996 vfta = adapter->shadow_vfta[vid / 32]; ··· 6082 6027 return idx; 6083 6028 } 6084 6029 6085 - void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) 6030 + static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) 6086 6031 { 6087 6032 struct e1000_hw *hw = &adapter->hw; 6088 6033 u32 bits, pf_id; ··· 6096 6041 * entry other than the PF. 6097 6042 */ 6098 6043 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; 6099 - bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK; 6044 + bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK; 6100 6045 bits &= rd32(E1000_VLVF(idx)); 6101 6046 6102 6047 /* Disable the filter so this falls into the default pool. */ 6103 6048 if (!bits) { 6104 6049 if (adapter->flags & IGB_FLAG_VLAN_PROMISC) 6105 - wr32(E1000_VLVF(idx), 1 << pf_id); 6050 + wr32(E1000_VLVF(idx), BIT(pf_id)); 6106 6051 else 6107 6052 wr32(E1000_VLVF(idx), 0); 6108 6053 } ··· 6286 6231 6287 6232 /* enable transmit and receive for vf */ 6288 6233 reg = rd32(E1000_VFTE); 6289 - wr32(E1000_VFTE, reg | (1 << vf)); 6234 + wr32(E1000_VFTE, reg | BIT(vf)); 6290 6235 reg = rd32(E1000_VFRE); 6291 - wr32(E1000_VFRE, reg | (1 << vf)); 6236 + wr32(E1000_VFRE, reg | BIT(vf)); 6292 6237 6293 6238 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; 6294 6239 ··· 7982 7927 /* Calculate the rate factor values to set */ 7983 7928 rf_int = link_speed / tx_rate; 7984 7929 rf_dec = (link_speed - (rf_int * tx_rate)); 7985 - rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) / 7930 + rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) / 7986 7931 tx_rate; 7987 7932 7988 7933 bcnrc_val = E1000_RTTBCNRC_RS_ENA; ··· 8072 8017 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; 8073 8018 reg_val = rd32(reg_offset); 8074 8019 if (setting) 8075 - reg_val |= ((1 << vf) | 8076 - (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); 8020 + reg_val |= (BIT(vf) | 8021 + BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); 8077 8022 else 8078 - reg_val &= ~((1 << vf) | 8079 - (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); 8023 + reg_val &= ~(BIT(vf) | 8024 + BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); 8080 8025 wr32(reg_offset, reg_val); 8081 8026 8082 8027 adapter->vf_data[vf].spoofchk_enabled = setting;
+39 -3
drivers/net/ethernet/intel/igb/igb_ptp.c
··· 69 69 70 70 #define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) 71 71 #define IGB_PTP_TX_TIMEOUT (HZ * 15) 72 - #define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) 73 - #define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1) 74 - #define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) 72 + #define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) 73 + #define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) 74 + #define INCVALUE_82576 (16u << IGB_82576_TSYNC_SHIFT) 75 75 #define IGB_NBITS_82580 40 76 76 77 77 static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); ··· 722 722 struct e1000_hw *hw = &adapter->hw; 723 723 struct skb_shared_hwtstamps shhwtstamps; 724 724 u64 regval; 725 + int adjust = 0; 725 726 726 727 regval = rd32(E1000_TXSTMPL); 727 728 regval |= (u64)rd32(E1000_TXSTMPH) << 32; 728 729 729 730 igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); 731 + /* adjust timestamp for the TX latency based on link speed */ 732 + if (adapter->hw.mac.type == e1000_i210) { 733 + switch (adapter->link_speed) { 734 + case SPEED_10: 735 + adjust = IGB_I210_TX_LATENCY_10; 736 + break; 737 + case SPEED_100: 738 + adjust = IGB_I210_TX_LATENCY_100; 739 + break; 740 + case SPEED_1000: 741 + adjust = IGB_I210_TX_LATENCY_1000; 742 + break; 743 + } 744 + } 745 + 746 + shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust); 747 + 730 748 skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); 731 749 dev_kfree_skb_any(adapter->ptp_tx_skb); 732 750 adapter->ptp_tx_skb = NULL; ··· 789 771 struct igb_adapter *adapter = q_vector->adapter; 790 772 struct e1000_hw *hw = &adapter->hw; 791 773 u64 regval; 774 + int adjust = 0; 792 775 793 776 /* If this bit is set, then the RX registers contain the time stamp. No 794 777 * other packet will be time stamped until we read these registers, so ··· 808 789 regval |= (u64)rd32(E1000_RXSTMPH) << 32; 809 790 810 791 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 792 + 793 + /* adjust timestamp for the RX latency based on link speed */ 794 + if (adapter->hw.mac.type == e1000_i210) { 795 + switch (adapter->link_speed) { 796 + case SPEED_10: 797 + adjust = IGB_I210_RX_LATENCY_10; 798 + break; 799 + case SPEED_100: 800 + adjust = IGB_I210_RX_LATENCY_100; 801 + break; 802 + case SPEED_1000: 803 + adjust = IGB_I210_RX_LATENCY_1000; 804 + break; 805 + } 806 + } 807 + skb_hwtstamps(skb)->hwtstamp = 808 + ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust); 811 809 812 810 /* Update the last_rx_timestamp timer in order to enable watchdog check 813 811 * for error case of latched timestamp on a dropped packet.
+1 -1
drivers/net/ethernet/intel/igbvf/defines.h
··· 113 113 #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */ 114 114 115 115 /* Direct Cache Access (DCA) definitions */ 116 - #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ 116 + #define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ 117 117 118 118 #define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ 119 119
+2 -1
drivers/net/ethernet/intel/igbvf/ethtool.c
··· 154 154 155 155 memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); 156 156 157 - regs->version = (1 << 24) | (adapter->pdev->revision << 16) | 157 + regs->version = (1u << 24) | 158 + (adapter->pdev->revision << 16) | 158 159 adapter->pdev->device; 159 160 160 161 regs_buff[0] = er32(CTRL);
+2 -2
drivers/net/ethernet/intel/igbvf/igbvf.h
··· 287 287 }; 288 288 289 289 /* hardware capability, feature, and workaround flags */ 290 - #define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) 291 - #define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) 290 + #define IGBVF_FLAG_RX_CSUM_DISABLED BIT(0) 291 + #define IGBVF_FLAG_RX_LB_VLAN_BSWAP BIT(1) 292 292 #define IGBVF_RX_DESC_ADV(R, i) \ 293 293 (&((((R).desc))[i].rx_desc)) 294 294 #define IGBVF_TX_DESC_ADV(R, i) \
+113 -83
drivers/net/ethernet/intel/igbvf/netdev.c
··· 964 964 ivar = ivar & 0xFFFFFF00; 965 965 ivar |= msix_vector | E1000_IVAR_VALID; 966 966 } 967 - adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector; 967 + adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector); 968 968 array_ew32(IVAR0, index, ivar); 969 969 } 970 970 if (tx_queue > IGBVF_NO_QUEUE) { ··· 979 979 ivar = ivar & 0xFFFF00FF; 980 980 ivar |= (msix_vector | E1000_IVAR_VALID) << 8; 981 981 } 982 - adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector; 982 + adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector); 983 983 array_ew32(IVAR0, index, ivar); 984 984 } 985 985 } ··· 1014 1014 1015 1015 ew32(IVAR_MISC, tmp); 1016 1016 1017 - adapter->eims_enable_mask = (1 << (vector)) - 1; 1018 - adapter->eims_other = 1 << (vector - 1); 1017 + adapter->eims_enable_mask = GENMASK(vector - 1, 0); 1018 + adapter->eims_other = BIT(vector - 1); 1019 1019 e1e_flush(); 1020 1020 } 1021 1021 ··· 1367 1367 struct e1000_hw *hw = &adapter->hw; 1368 1368 struct igbvf_ring *rx_ring = adapter->rx_ring; 1369 1369 u64 rdba; 1370 - u32 rdlen, rxdctl; 1370 + u32 rxdctl; 1371 1371 1372 1372 /* disable receives */ 1373 1373 rxdctl = er32(RXDCTL(0)); 1374 1374 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); 1375 1375 e1e_flush(); 1376 1376 msleep(10); 1377 - 1378 - rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1379 1377 1380 1378 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1381 1379 * the Base and Length of the Rx Descriptor Ring ··· 1931 1933 buffer_info->dma = 0; 1932 1934 } 1933 1935 1934 - static int igbvf_tso(struct igbvf_adapter *adapter, 1935 - struct igbvf_ring *tx_ring, 1936 - struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, 1937 - __be16 protocol) 1936 + static int igbvf_tso(struct igbvf_ring *tx_ring, 1937 + struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1938 1938 { 1939 - struct e1000_adv_tx_context_desc *context_desc; 1940 - struct igbvf_buffer *buffer_info; 1941 - u32 info = 0, tu_cmd = 0; 1942 - u32 mss_l4len_idx, l4len; 1943 - unsigned int i; 1939 + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 1940 + union { 1941 + struct iphdr *v4; 1942 + struct ipv6hdr *v6; 1943 + unsigned char *hdr; 1944 + } ip; 1945 + union { 1946 + struct tcphdr *tcp; 1947 + unsigned char *hdr; 1948 + } l4; 1949 + u32 paylen, l4_offset; 1944 1950 int err; 1945 1951 1946 - *hdr_len = 0; 1952 + if (skb->ip_summed != CHECKSUM_PARTIAL) 1953 + return 0; 1954 + 1955 + if (!skb_is_gso(skb)) 1956 + return 0; 1947 1957 1948 1958 err = skb_cow_head(skb, 0); 1949 - if (err < 0) { 1950 - dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n"); 1959 + if (err < 0) 1951 1960 return err; 1952 - } 1953 1961 1954 - l4len = tcp_hdrlen(skb); 1955 - *hdr_len += l4len; 1956 - 1957 - if (protocol == htons(ETH_P_IP)) { 1958 - struct iphdr *iph = ip_hdr(skb); 1959 - 1960 - iph->tot_len = 0; 1961 - iph->check = 0; 1962 - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 1963 - iph->daddr, 0, 1964 - IPPROTO_TCP, 1965 - 0); 1966 - } else if (skb_is_gso_v6(skb)) { 1967 - ipv6_hdr(skb)->payload_len = 0; 1968 - tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1969 - &ipv6_hdr(skb)->daddr, 1970 - 0, IPPROTO_TCP, 0); 1971 - } 1972 - 1973 - i = tx_ring->next_to_use; 1974 - 1975 - buffer_info = &tx_ring->buffer_info[i]; 1976 - context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); 1977 - /* VLAN MACLEN IPLEN */ 1978 - if (tx_flags & IGBVF_TX_FLAGS_VLAN) 1979 - info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); 1980 - info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 1981 - *hdr_len += skb_network_offset(skb); 1982 - info |= (skb_transport_header(skb) - skb_network_header(skb)); 1983 - *hdr_len += (skb_transport_header(skb) - skb_network_header(skb)); 1984 - context_desc->vlan_macip_lens = cpu_to_le32(info); 1962 + ip.hdr = skb_network_header(skb); 1963 + l4.hdr = skb_checksum_start(skb); 1985 1964 1986 1965 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1987 - tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 1966 + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; 1988 1967 1989 - if (protocol == htons(ETH_P_IP)) 1990 - tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 1991 - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1968 + /* initialize outer IP header fields */ 1969 + if (ip.v4->version == 4) { 1970 + /* IP header will have to cancel out any data that 1971 + * is not a part of the outer IP header 1972 + */ 1973 + ip.v4->check = csum_fold(csum_add(lco_csum(skb), 1974 + csum_unfold(l4.tcp->check))); 1975 + type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; 1992 1976 1993 - context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 1977 + ip.v4->tot_len = 0; 1978 + } else { 1979 + ip.v6->payload_len = 0; 1980 + } 1981 + 1982 + /* determine offset of inner transport header */ 1983 + l4_offset = l4.hdr - skb->data; 1984 + 1985 + /* compute length of segmentation header */ 1986 + *hdr_len = (l4.tcp->doff * 4) + l4_offset; 1987 + 1988 + /* remove payload length from inner checksum */ 1989 + paylen = skb->len - l4_offset; 1990 + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); 1994 1991 1995 1992 /* MSS L4LEN IDX */ 1996 - mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); 1997 - mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 1993 + mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; 1994 + mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; 1998 1995 1999 - context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2000 - context_desc->seqnum_seed = 0; 1996 + /* VLAN MACLEN IPLEN */ 1997 + vlan_macip_lens = l4.hdr - ip.hdr; 1998 + vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; 1999 + vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK; 2001 2000 2002 - buffer_info->time_stamp = jiffies; 2003 - buffer_info->dma = 0; 2004 - i++; 2005 - if (i == tx_ring->count) 2006 - i = 0; 2001 + igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); 2007 2002 2008 - tx_ring->next_to_use = i; 2009 - 2010 - return true; 2003 + return 1; 2011 2004 } 2012 2005 2013 2006 static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb) ··· 2080 2091 } 2081 2092 2082 2093 #define IGBVF_MAX_TXD_PWR 16 2083 - #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) 2094 + #define IGBVF_MAX_DATA_PER_TXD (1u << IGBVF_MAX_TXD_PWR) 2084 2095 2085 2096 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, 2086 2097 struct igbvf_ring *tx_ring, ··· 2260 2271 2261 2272 first = tx_ring->next_to_use; 2262 2273 2263 - tso = skb_is_gso(skb) ? 2264 - igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0; 2274 + tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len); 2265 2275 if (unlikely(tso < 0)) { 2266 2276 dev_kfree_skb_any(skb); 2267 2277 return NETDEV_TX_OK; ··· 2603 2615 return 0; 2604 2616 } 2605 2617 2618 + #define IGBVF_MAX_MAC_HDR_LEN 127 2619 + #define IGBVF_MAX_NETWORK_HDR_LEN 511 2620 + 2621 + static netdev_features_t 2622 + igbvf_features_check(struct sk_buff *skb, struct net_device *dev, 2623 + netdev_features_t features) 2624 + { 2625 + unsigned int network_hdr_len, mac_hdr_len; 2626 + 2627 + /* Make certain the headers can be described by a context descriptor */ 2628 + mac_hdr_len = skb_network_header(skb) - skb->data; 2629 + if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN)) 2630 + return features & ~(NETIF_F_HW_CSUM | 2631 + NETIF_F_SCTP_CRC | 2632 + NETIF_F_HW_VLAN_CTAG_TX | 2633 + NETIF_F_TSO | 2634 + NETIF_F_TSO6); 2635 + 2636 + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 2637 + if (unlikely(network_hdr_len > IGBVF_MAX_NETWORK_HDR_LEN)) 2638 + return features & ~(NETIF_F_HW_CSUM | 2639 + NETIF_F_SCTP_CRC | 2640 + NETIF_F_TSO | 2641 + NETIF_F_TSO6); 2642 + 2643 + /* We can only support IPV4 TSO in tunnels if we can mangle the 2644 + * inner IP ID field, so strip TSO if MANGLEID is not supported. 2645 + */ 2646 + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 2647 + features &= ~NETIF_F_TSO; 2648 + 2649 + return features; 2650 + } 2651 + 2606 2652 static const struct net_device_ops igbvf_netdev_ops = { 2607 2653 .ndo_open = igbvf_open, 2608 2654 .ndo_stop = igbvf_close, ··· 2653 2631 .ndo_poll_controller = igbvf_netpoll, 2654 2632 #endif 2655 2633 .ndo_set_features = igbvf_set_features, 2656 - .ndo_features_check = passthru_features_check, 2634 + .ndo_features_check = igbvf_features_check, 2657 2635 }; 2658 2636 2659 2637 /** ··· 2761 2739 NETIF_F_HW_CSUM | 2762 2740 NETIF_F_SCTP_CRC; 2763 2741 2764 - netdev->features = netdev->hw_features | 2765 - NETIF_F_HW_VLAN_CTAG_TX | 2766 - NETIF_F_HW_VLAN_CTAG_RX | 2767 - NETIF_F_HW_VLAN_CTAG_FILTER; 2742 + #define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ 2743 + NETIF_F_GSO_GRE_CSUM | \ 2744 + NETIF_F_GSO_IPIP | \ 2745 + NETIF_F_GSO_SIT | \ 2746 + NETIF_F_GSO_UDP_TUNNEL | \ 2747 + NETIF_F_GSO_UDP_TUNNEL_CSUM) 2748 + 2749 + netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES; 2750 + netdev->hw_features |= NETIF_F_GSO_PARTIAL | 2751 + IGBVF_GSO_PARTIAL_FEATURES; 2752 + 2753 + netdev->features = netdev->hw_features; 2768 2754 2769 2755 if (pci_using_dac) 2770 2756 netdev->features |= NETIF_F_HIGHDMA; 2771 2757 2772 - netdev->vlan_features |= NETIF_F_SG | 2773 - NETIF_F_TSO | 2774 - NETIF_F_TSO6 | 2775 - NETIF_F_HW_CSUM | 2776 - NETIF_F_SCTP_CRC; 2777 - 2758 + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; 2778 2759 netdev->mpls_features |= NETIF_F_HW_CSUM; 2779 - netdev->hw_enc_features |= NETIF_F_HW_CSUM; 2760 + netdev->hw_enc_features |= netdev->vlan_features; 2761 + 2762 + /* set this bit last since it cannot be part of vlan_features */ 2763 + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 2764 + NETIF_F_HW_VLAN_CTAG_RX | 2765 + NETIF_F_HW_VLAN_CTAG_TX; 2780 2766 2781 2767 /*reset the controller to put the device in a known good state */ 2782 2768 err = hw->mac.ops.reset_hw(hw);
+1 -1
drivers/net/ethernet/intel/igbvf/vf.c
··· 266 266 msgbuf[1] = vid; 267 267 /* Setting the 8 bit field MSG INFO to true indicates "add" */ 268 268 if (set) 269 - msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT; 269 + msgbuf[0] |= BIT(E1000_VT_MSGINFO_SHIFT); 270 270 271 271 mbx->ops.write_posted(hw, msgbuf, 2); 272 272