Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates

This series contains updates to igb, igbvf, ixgbe, i40e and i40evf.

Jacob provides eight patches to cleanup the ixgbe driver to resolve various
checkpatch.pl warnings/errors as well as minor coding style issues.

Stephen Hemminger and I provide simple cleanups of void functions which
had useless return statements at the end of the function which are not
needed.

v2: Dropped Emil's patch "ixgbe: fix the detection of SFP+ capable interfaces"
while I wait for his updated patch to be validated.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+271 -294
-4
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 6836 6836 return; 6837 6837 6838 6838 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 6839 - return; 6840 6839 } 6841 6840 6842 6841 /** ··· 7575 7576 7576 7577 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 7577 7578 i40e_veb_clear(veb); 7578 - 7579 - return; 7580 7579 } 7581 7580 7582 7581 /** ··· 8055 8058 } 8056 8059 8057 8060 pf->queues_left = queues_left; 8058 - return; 8059 8061 } 8060 8062 8061 8063 /**
-4
drivers/net/ethernet/intel/i40evf/i40evf_main.c
··· 693 693 f->remove = true; 694 694 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; 695 695 } 696 - return; 697 696 } 698 697 699 698 /** ··· 1231 1232 pci_disable_msix(adapter->pdev); 1232 1233 kfree(adapter->msix_entries); 1233 1234 adapter->msix_entries = NULL; 1234 - 1235 - return; 1236 1235 } 1237 1236 1238 1237 /** ··· 2155 2158 return; /* do not reschedule */ 2156 2159 } 2157 2160 schedule_delayed_work(&adapter->init_task, HZ * 3); 2158 - return; 2159 2161 } 2160 2162 2161 2163 /**
-1
drivers/net/ethernet/intel/igb/e1000_nvm.c
··· 798 798 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) 799 799 | eeprom_verl; 800 800 } 801 - return; 802 801 }
-1
drivers/net/ethernet/intel/igb/igb_main.c
··· 2139 2139 } 2140 2140 break; 2141 2141 } 2142 - return; 2143 2142 } 2144 2143 2145 2144 /**
-1
drivers/net/ethernet/intel/igbvf/ethtool.c
··· 119 119 static void igbvf_get_pauseparam(struct net_device *netdev, 120 120 struct ethtool_pauseparam *pause) 121 121 { 122 - return; 123 122 } 124 123 125 124 static int igbvf_set_pauseparam(struct net_device *netdev,
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 362 362 for (pos = (head).ring; pos != NULL; pos = pos->next) 363 363 364 364 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ 365 - ? 8 : 1) 365 + ? 8 : 1) 366 366 #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS 367 367 368 368 /* MAX_Q_VECTORS of these are allocated,
+30 -32
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
··· 41 41 #define IXGBE_82598_RX_PB_SIZE 512 42 42 43 43 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 44 - ixgbe_link_speed speed, 45 - bool autoneg_wait_to_complete); 44 + ixgbe_link_speed speed, 45 + bool autoneg_wait_to_complete); 46 46 static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 47 - u8 *eeprom_data); 47 + u8 *eeprom_data); 48 48 49 49 /** 50 50 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout ··· 140 140 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 141 141 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 142 142 phy->ops.get_firmware_version = 143 - &ixgbe_get_phy_firmware_version_tnx; 143 + &ixgbe_get_phy_firmware_version_tnx; 144 144 break; 145 145 case ixgbe_phy_nl: 146 146 phy->ops.reset = &ixgbe_reset_phy_nl; ··· 156 156 157 157 /* Check to see if SFP+ module is supported */ 158 158 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, 159 - &list_offset, 160 - &data_offset); 159 + &list_offset, 160 + &data_offset); 161 161 if (ret_val != 0) { 162 162 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 163 163 goto out; ··· 219 219 * Determines the link capabilities by reading the AUTOC register. 220 220 **/ 221 221 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 222 - ixgbe_link_speed *speed, 223 - bool *autoneg) 222 + ixgbe_link_speed *speed, 223 + bool *autoneg) 224 224 { 225 225 s32 status = 0; 226 226 u32 autoc = 0; ··· 473 473 * Restarts the link. Performs autonegotiation if needed. 474 474 **/ 475 475 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 476 - bool autoneg_wait_to_complete) 476 + bool autoneg_wait_to_complete) 477 477 { 478 478 u32 autoc_reg; 479 479 u32 links_reg; ··· 555 555 * Reads the links register to determine if link is up and the current speed 556 556 **/ 557 557 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 558 - ixgbe_link_speed *speed, bool *link_up, 559 - bool link_up_wait_to_complete) 558 + ixgbe_link_speed *speed, bool *link_up, 559 + bool link_up_wait_to_complete) 560 560 { 561 561 u32 links_reg; 562 562 u32 i; ··· 572 572 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); 573 573 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); 574 574 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, 575 - &adapt_comp_reg); 575 + &adapt_comp_reg); 576 576 if (link_up_wait_to_complete) { 577 577 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 578 578 if ((link_reg & 1) && ··· 584 584 } 585 585 msleep(100); 586 586 hw->phy.ops.read_reg(hw, 0xC79F, 587 - MDIO_MMD_PMAPMD, 588 - &link_reg); 587 + MDIO_MMD_PMAPMD, 588 + &link_reg); 589 589 hw->phy.ops.read_reg(hw, 0xC00C, 590 - MDIO_MMD_PMAPMD, 591 - &adapt_comp_reg); 590 + MDIO_MMD_PMAPMD, 591 + &adapt_comp_reg); 592 592 } 593 593 } else { 594 594 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) ··· 661 661 662 662 /* Set KX4/KX support according to speed requested */ 663 663 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || 664 - link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 664 + link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 665 665 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; 666 666 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 667 667 autoc |= IXGBE_AUTOC_KX4_SUPP; ··· 694 694 * Sets the link speed in the AUTOC register in the MAC and restarts link. 695 695 **/ 696 696 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 697 - ixgbe_link_speed speed, 698 - bool autoneg_wait_to_complete) 697 + ixgbe_link_speed speed, 698 + bool autoneg_wait_to_complete) 699 699 { 700 700 s32 status; 701 701 702 702 /* Setup the PHY according to input speed */ 703 703 status = hw->phy.ops.setup_link_speed(hw, speed, 704 - autoneg_wait_to_complete); 704 + autoneg_wait_to_complete); 705 705 /* Set up MAC */ 706 706 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 707 707 ··· 740 740 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 741 741 /* Enable Tx Atlas so packets can be transmitted again */ 742 742 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 743 - &analog_val); 743 + &analog_val); 744 744 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 745 745 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 746 - analog_val); 746 + analog_val); 747 747 748 748 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 749 - &analog_val); 749 + &analog_val); 750 750 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 751 751 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 752 - analog_val); 752 + analog_val); 753 753 754 754 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 755 - &analog_val); 755 + &analog_val); 756 756 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 757 757 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 758 - analog_val); 758 + analog_val); 759 759 760 760 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 761 - &analog_val); 761 + &analog_val); 762 762 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 763 763 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 764 - analog_val); 764 + analog_val); 765 765 } 766 766 767 767 /* Reset PHY */ ··· 960 960 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) 961 961 for (offset = 0; offset < hw->mac.vft_size; offset++) 962 962 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 963 - 0); 963 + 0); 964 964 965 965 return 0; 966 966 } ··· 978 978 u32 atlas_ctl; 979 979 980 980 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, 981 - IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); 981 + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); 982 982 IXGBE_WRITE_FLUSH(hw); 983 983 udelay(10); 984 984 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); ··· 1278 1278 /* Setup Tx packet buffer sizes */ 1279 1279 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1280 1280 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); 1281 - 1282 - return; 1283 1281 } 1284 1282 1285 1283 static struct ixgbe_mac_operations mac_ops_82598 = {
+43 -42
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
··· 48 48 ixgbe_link_speed speed, 49 49 bool autoneg_wait_to_complete); 50 50 static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 51 - ixgbe_link_speed speed, 52 - bool autoneg_wait_to_complete); 51 + ixgbe_link_speed speed, 52 + bool autoneg_wait_to_complete); 53 53 static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); 54 54 static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 55 55 bool autoneg_wait_to_complete); 56 56 static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 57 - ixgbe_link_speed speed, 58 - bool autoneg_wait_to_complete); 57 + ixgbe_link_speed speed, 58 + bool autoneg_wait_to_complete); 59 59 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 60 - ixgbe_link_speed speed, 61 - bool autoneg_wait_to_complete); 60 + ixgbe_link_speed speed, 61 + bool autoneg_wait_to_complete); 62 62 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 63 63 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 64 64 u8 dev_addr, u8 *data); ··· 96 96 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && 97 97 !ixgbe_mng_enabled(hw)) { 98 98 mac->ops.disable_tx_laser = 99 - &ixgbe_disable_tx_laser_multispeed_fiber; 99 + &ixgbe_disable_tx_laser_multispeed_fiber; 100 100 mac->ops.enable_tx_laser = 101 - &ixgbe_enable_tx_laser_multispeed_fiber; 101 + &ixgbe_enable_tx_laser_multispeed_fiber; 102 102 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 103 103 } else { 104 104 mac->ops.disable_tx_laser = NULL; ··· 132 132 hw->phy.ops.reset = NULL; 133 133 134 134 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 135 - &data_offset); 135 + &data_offset); 136 136 if (ret_val != 0) 137 137 goto setup_sfp_out; 138 138 139 139 /* PHY config will finish before releasing the semaphore */ 140 140 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 141 - IXGBE_GSSR_MAC_CSR_SM); 141 + IXGBE_GSSR_MAC_CSR_SM); 142 142 if (ret_val != 0) { 143 143 ret_val = IXGBE_ERR_SWFW_SYNC; 144 144 goto setup_sfp_out; ··· 334 334 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 335 335 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 336 336 phy->ops.get_firmware_version = 337 - &ixgbe_get_phy_firmware_version_tnx; 337 + &ixgbe_get_phy_firmware_version_tnx; 338 338 break; 339 339 default: 340 340 break; ··· 352 352 * Determines the link capabilities by reading the AUTOC register. 353 353 **/ 354 354 static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 355 - ixgbe_link_speed *speed, 355 + ixgbe_link_speed *speed, 356 356 bool *autoneg) 357 357 { 358 358 s32 status = 0; ··· 543 543 * Restarts the link. Performs autonegotiation if needed. 544 544 **/ 545 545 static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 546 - bool autoneg_wait_to_complete) 546 + bool autoneg_wait_to_complete) 547 547 { 548 548 u32 autoc_reg; 549 549 u32 links_reg; ··· 672 672 * Set the link speed in the AUTOC register and restarts link. 673 673 **/ 674 674 static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 675 - ixgbe_link_speed speed, 676 - bool autoneg_wait_to_complete) 675 + ixgbe_link_speed speed, 676 + bool autoneg_wait_to_complete) 677 677 { 678 678 s32 status = 0; 679 679 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; ··· 820 820 */ 821 821 if (speedcnt > 1) 822 822 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 823 - highest_link_speed, 824 - autoneg_wait_to_complete); 823 + highest_link_speed, 824 + autoneg_wait_to_complete); 825 825 826 826 out: 827 827 /* Set autoneg_advertised value based on input link speed */ ··· 1009 1009 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 1010 1010 autoc |= IXGBE_AUTOC_KX_SUPP; 1011 1011 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 1012 - (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 1013 - link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 1012 + (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 1013 + link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 1014 1014 /* Switch from 1G SFI to 10G SFI if requested */ 1015 1015 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 1016 1016 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { ··· 1018 1018 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 1019 1019 } 1020 1020 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 1021 - (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 1021 + (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 1022 1022 /* Switch from 10G SFI to 1G SFI if requested */ 1023 1023 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 1024 1024 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { ··· 1051 1051 } 1052 1052 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 1053 1053 status = 1054 - IXGBE_ERR_AUTONEG_NOT_COMPLETE; 1054 + IXGBE_ERR_AUTONEG_NOT_COMPLETE; 1055 1055 hw_dbg(hw, "Autoneg did not complete.\n"); 1056 1056 } 1057 1057 } ··· 1074 1074 * Restarts link on PHY and MAC based on settings passed in. 1075 1075 **/ 1076 1076 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 1077 - ixgbe_link_speed speed, 1078 - bool autoneg_wait_to_complete) 1077 + ixgbe_link_speed speed, 1078 + bool autoneg_wait_to_complete) 1079 1079 { 1080 1080 s32 status; 1081 1081 1082 1082 /* Setup the PHY according to input speed */ 1083 1083 status = hw->phy.ops.setup_link_speed(hw, speed, 1084 - autoneg_wait_to_complete); 1084 + autoneg_wait_to_complete); 1085 1085 /* Set up MAC */ 1086 1086 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 1087 1087 ··· 1224 1224 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1225 1225 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1226 1226 autoc2 |= (hw->mac.orig_autoc2 & 1227 - IXGBE_AUTOC2_UPPER_MASK); 1227 + IXGBE_AUTOC2_UPPER_MASK); 1228 1228 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1229 1229 } 1230 1230 } ··· 1246 1246 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1247 1247 if (is_valid_ether_addr(hw->mac.san_addr)) { 1248 1248 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 1249 - hw->mac.san_addr, 0, IXGBE_RAH_AV); 1249 + hw->mac.san_addr, 0, IXGBE_RAH_AV); 1250 1250 1251 1251 /* Save the SAN MAC RAR index */ 1252 1252 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; ··· 1257 1257 1258 1258 /* Store the alternative WWNN/WWPN prefix */ 1259 1259 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1260 - &hw->mac.wwpn_prefix); 1260 + &hw->mac.wwpn_prefix); 1261 1261 1262 1262 reset_hw_out: 1263 1263 return status; ··· 1271 1271 { 1272 1272 int i; 1273 1273 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1274 + 1274 1275 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1275 1276 1276 1277 /* ··· 1285 1284 udelay(10); 1286 1285 } 1287 1286 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1288 - hw_dbg(hw, "Flow Director previous command isn't complete, " 1289 - "aborting table re-initialization.\n"); 1287 + hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n"); 1290 1288 return IXGBE_ERR_FDIR_REINIT_FAILED; 1291 1289 } 1292 1290 ··· 1299 1299 * - write 0 to bit 8 of FDIRCMD register 1300 1300 */ 1301 1301 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1302 - (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1303 - IXGBE_FDIRCMD_CLEARHT)); 1302 + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1303 + IXGBE_FDIRCMD_CLEARHT)); 1304 1304 IXGBE_WRITE_FLUSH(hw); 1305 1305 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1306 - (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1307 - ~IXGBE_FDIRCMD_CLEARHT)); 1306 + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1307 + ~IXGBE_FDIRCMD_CLEARHT)); 1308 1308 IXGBE_WRITE_FLUSH(hw); 1309 1309 /* 1310 1310 * Clear FDIR Hash register to clear any leftover hashes ··· 1319 1319 /* Poll init-done after we write FDIRCTRL register */ 1320 1320 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1321 1321 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1322 - IXGBE_FDIRCTRL_INIT_DONE) 1322 + IXGBE_FDIRCTRL_INIT_DONE) 1323 1323 break; 1324 1324 usleep_range(1000, 2000); 1325 1325 } ··· 1368 1368 IXGBE_WRITE_FLUSH(hw); 1369 1369 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1370 1370 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1371 - IXGBE_FDIRCTRL_INIT_DONE) 1371 + IXGBE_FDIRCTRL_INIT_DONE) 1372 1372 break; 1373 1373 usleep_range(1000, 2000); 1374 1374 } ··· 1453 1453 bucket_hash ^= hi_hash_dword >> n; \ 1454 1454 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ 1455 1455 sig_hash ^= hi_hash_dword << (16 - n); \ 1456 - } while (0); 1456 + } while (0) 1457 1457 1458 1458 /** 1459 1459 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash ··· 1529 1529 * @queue: queue index to direct traffic to 1530 1530 **/ 1531 1531 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1532 - union ixgbe_atr_hash_dword input, 1533 - union ixgbe_atr_hash_dword common, 1534 - u8 queue) 1532 + union ixgbe_atr_hash_dword input, 1533 + union ixgbe_atr_hash_dword common, 1534 + u8 queue) 1535 1535 { 1536 1536 u64 fdirhashcmd; 1537 1537 u32 fdircmd; ··· 1555 1555 1556 1556 /* configure FDIRCMD register */ 1557 1557 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1558 - IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1558 + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1559 1559 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1560 1560 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1561 1561 ··· 1579 1579 bucket_hash ^= lo_hash_dword >> n; \ 1580 1580 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1581 1581 bucket_hash ^= hi_hash_dword >> n; \ 1582 - } while (0); 1582 + } while (0) 1583 1583 1584 1584 /** 1585 1585 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash ··· 1651 1651 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) 1652 1652 { 1653 1653 u32 mask = ntohs(input_mask->formatted.dst_port); 1654 + 1654 1655 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1655 1656 mask |= ntohs(input_mask->formatted.src_port); 1656 1657 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); ··· 1886 1885 u32 core_ctl; 1887 1886 1888 1887 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 1889 - (reg << 8)); 1888 + (reg << 8)); 1890 1889 IXGBE_WRITE_FLUSH(hw); 1891 1890 udelay(10); 1892 1891 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
+20 -25
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
··· 41 41 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 42 42 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 43 43 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 44 - u16 count); 44 + u16 count); 45 45 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 46 46 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 47 47 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); ··· 485 485 * Reads the part number string from the EEPROM. 486 486 **/ 487 487 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 488 - u32 pba_num_size) 488 + u32 pba_num_size) 489 489 { 490 490 s32 ret_val; 491 491 u16 data; ··· 818 818 eeprom->address_bits = 16; 819 819 else 820 820 eeprom->address_bits = 8; 821 - hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: " 822 - "%d\n", eeprom->type, eeprom->word_size, 823 - eeprom->address_bits); 821 + hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n", 822 + eeprom->type, eeprom->word_size, eeprom->address_bits); 824 823 } 825 824 826 825 return 0; ··· 1391 1392 } 1392 1393 1393 1394 if (i == timeout) { 1394 - hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore " 1395 - "not granted.\n"); 1395 + hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n"); 1396 1396 /* 1397 1397 * this release is particularly important because our attempts 1398 1398 * above to get the semaphore may have succeeded, and if there ··· 1436 1438 * was not granted because we don't have access to the EEPROM 1437 1439 */ 1438 1440 if (i >= timeout) { 1439 - hw_dbg(hw, "SWESMBI Software EEPROM semaphore " 1440 - "not granted.\n"); 1441 + hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n"); 1441 1442 ixgbe_release_eeprom_semaphore(hw); 1442 1443 status = IXGBE_ERR_EEPROM; 1443 1444 } 1444 1445 } else { 1445 - hw_dbg(hw, "Software semaphore SMBI between device drivers " 1446 - "not granted.\n"); 1446 + hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); 1447 1447 } 1448 1448 1449 1449 return status; ··· 1483 1487 */ 1484 1488 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1485 1489 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1486 - IXGBE_EEPROM_OPCODE_BITS); 1490 + IXGBE_EEPROM_OPCODE_BITS); 1487 1491 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1488 1492 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1489 1493 break; ··· 1532 1536 * @count: number of bits to shift out 1533 1537 **/ 1534 1538 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1535 - u16 count) 1539 + u16 count) 1536 1540 { 1537 1541 u32 eec; 1538 1542 u32 mask; ··· 1736 1740 * caller does not need checksum_val, the value can be NULL. 1737 1741 **/ 1738 1742 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 1739 - u16 *checksum_val) 1743 + u16 *checksum_val) 1740 1744 { 1741 1745 s32 status; 1742 1746 u16 checksum; ··· 1809 1813 * Puts an ethernet address into a receive address register. 1810 1814 **/ 1811 1815 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 1812 - u32 enable_addr) 1816 + u32 enable_addr) 1813 1817 { 1814 1818 u32 rar_low, rar_high; 1815 1819 u32 rar_entries = hw->mac.num_rar_entries; ··· 2053 2057 2054 2058 if (hw->addr_ctrl.mta_in_use > 0) 2055 2059 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2056 - IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2060 + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2057 2061 2058 2062 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); 2059 2063 return 0; ··· 2071 2075 2072 2076 if (a->mta_in_use > 0) 2073 2077 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2074 - hw->mac.mc_filter_type); 2078 + hw->mac.mc_filter_type); 2075 2079 2076 2080 return 0; 2077 2081 } ··· 2659 2663 2660 2664 /* For informational purposes only */ 2661 2665 if (i >= IXGBE_MAX_SECRX_POLL) 2662 - hw_dbg(hw, "Rx unit being enabled before security " 2663 - "path fully disabled. Continuing with init.\n"); 2666 + hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n"); 2664 2667 2665 2668 return 0; 2666 2669 ··· 2786 2791 * get and set mac_addr routines. 2787 2792 **/ 2788 2793 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2789 - u16 *san_mac_offset) 2794 + u16 *san_mac_offset) 2790 2795 { 2791 2796 s32 ret_val; 2792 2797 ··· 2832 2837 hw->mac.ops.set_lan_id(hw); 2833 2838 /* apply the port offset to the address offset */ 2834 2839 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2835 - (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2840 + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2836 2841 for (i = 0; i < 3; i++) { 2837 2842 ret_val = hw->eeprom.ops.read(hw, san_mac_offset, 2838 2843 &san_mac_data); ··· 3072 3077 * Turn on/off specified VLAN in the VLAN filter table. 3073 3078 **/ 3074 3079 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3075 - bool vlan_on) 3080 + bool vlan_on) 3076 3081 { 3077 3082 s32 regindex; 3078 3083 u32 bitindex; ··· 3194 3199 * Ignore it. */ 3195 3200 vfta_changed = false; 3196 3201 } 3197 - } 3198 - else 3202 + } else { 3199 3203 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 3204 + } 3200 3205 } 3201 3206 3202 3207 if (vfta_changed) ··· 3296 3301 * block to check the support for the alternative WWNN/WWPN prefix support. 3297 3302 **/ 3298 3303 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3299 - u16 *wwpn_prefix) 3304 + u16 *wwpn_prefix) 3300 3305 { 3301 3306 u16 offset, caps; 3302 3307 u16 alt_san_mac_blk_offset;
+8 -8
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
··· 39 39 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); 40 40 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); 41 41 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 42 - u32 pba_num_size); 42 + u32 pba_num_size); 43 43 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); 44 44 enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status); 45 45 enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status); ··· 61 61 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 62 62 u16 words, u16 *data); 63 63 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 64 - u16 *data); 64 + u16 *data); 65 65 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 66 66 u16 words, u16 *data); 67 67 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); 68 68 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 69 - u16 *checksum_val); 69 + u16 *checksum_val); 70 70 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); 71 71 72 72 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 73 - u32 enable_addr); 73 + u32 enable_addr); 74 74 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); 75 75 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); 76 76 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, ··· 92 92 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 93 93 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); 94 94 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, 95 - u32 vind, bool vlan_on); 95 + u32 vind, bool vlan_on); 96 96 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); 97 97 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, 98 - ixgbe_link_speed *speed, 99 - bool *link_up, bool link_up_wait_to_complete); 98 + ixgbe_link_speed *speed, 99 + bool *link_up, bool link_up_wait_to_complete); 100 100 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 101 - u16 *wwpn_prefix); 101 + u16 *wwpn_prefix); 102 102 103 103 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); 104 104 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+1 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
··· 267 267 * Configure dcb settings and enable dcb mode. 268 268 */ 269 269 s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, 270 - struct ixgbe_dcb_config *dcb_config) 270 + struct ixgbe_dcb_config *dcb_config) 271 271 { 272 272 s32 ret = 0; 273 273 u8 pfc_en; ··· 389 389 for (i = 0; i < MAX_USER_PRIORITY; i++) 390 390 map[i] = IXGBE_RTRUP2TC_UP_MASK & 391 391 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); 392 - return; 393 392 } 394 393 395 394 void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
+12 -12
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
··· 31 31 32 32 /* DCB register definitions */ 33 33 #define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, 34 - * 1 WSP - Weighted Strict Priority 35 - */ 34 + * 1 WSP - Weighted Strict Priority 35 + */ 36 36 #define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, 37 - * 1 WRR - Weighted Round Robin 38 - */ 37 + * 1 WRR - Weighted Round Robin 38 + */ 39 39 #define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ 40 40 #define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ 41 41 #define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ 42 42 #define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must 43 - * clear! 44 - */ 43 + * clear! 44 + */ 45 45 #define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ 46 46 47 47 /* Receive UP2TC mapping */ ··· 56 56 #define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ 57 57 58 58 #define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet 59 - * buffers enable 60 - */ 59 + * buffers enable 60 + */ 61 61 #define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores 62 - * (RSS) enable 63 - */ 62 + * (RSS) enable 63 + */ 64 64 65 65 /* RTRPCS Bit Masks */ 66 66 #define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ ··· 81 81 82 82 /* RTTPCS Bit Masks */ 83 83 #define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, 84 - * 1 SP - Strict Priority 85 - */ 84 + * 1 SP - Strict Priority 85 + */ 86 86 #define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ 87 87 #define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ 88 88 #define IXGBE_RTTPCS_ARBD_SHIFT 22
+14 -14
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
··· 192 192 } 193 193 194 194 static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, 195 - u8 prio, u8 bwg_id, u8 bw_pct, 196 - u8 up_map) 195 + u8 prio, u8 bwg_id, u8 bw_pct, 196 + u8 up_map) 197 197 { 198 198 struct ixgbe_adapter *adapter = netdev_priv(netdev); 199 199 ··· 210 210 } 211 211 212 212 static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 213 - u8 bw_pct) 213 + u8 bw_pct) 214 214 { 215 215 struct ixgbe_adapter *adapter = netdev_priv(netdev); 216 216 ··· 218 218 } 219 219 220 220 static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, 221 - u8 prio, u8 bwg_id, u8 bw_pct, 222 - u8 up_map) 221 + u8 prio, u8 bwg_id, u8 bw_pct, 222 + u8 up_map) 223 223 { 224 224 struct ixgbe_adapter *adapter = netdev_priv(netdev); 225 225 ··· 236 236 } 237 237 238 238 static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 239 - u8 bw_pct) 239 + u8 bw_pct) 240 240 { 241 241 struct ixgbe_adapter *adapter = netdev_priv(netdev); 242 242 ··· 244 244 } 245 245 246 246 static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, 247 - u8 *prio, u8 *bwg_id, u8 *bw_pct, 248 - u8 *up_map) 247 + u8 *prio, u8 *bwg_id, u8 *bw_pct, 248 + u8 *up_map) 249 249 { 250 250 struct ixgbe_adapter *adapter = netdev_priv(netdev); 251 251 ··· 256 256 } 257 257 258 258 static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 259 - u8 *bw_pct) 259 + u8 *bw_pct) 260 260 { 261 261 struct ixgbe_adapter *adapter = netdev_priv(netdev); 262 262 ··· 264 264 } 265 265 266 266 static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, 267 - u8 *prio, u8 *bwg_id, u8 *bw_pct, 268 - u8 *up_map) 267 + u8 *prio, u8 *bwg_id, u8 *bw_pct, 268 + u8 *up_map) 269 269 { 270 270 struct ixgbe_adapter *adapter = netdev_priv(netdev); 271 271 ··· 276 276 } 277 277 278 278 static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 279 - u8 *bw_pct) 279 + u8 *bw_pct) 280 280 { 281 281 struct ixgbe_adapter *adapter = netdev_priv(netdev); 282 282 ··· 284 284 } 285 285 286 286 static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, 287 - u8 setting) 287 + u8 setting) 288 288 { 289 289 struct ixgbe_adapter *adapter = netdev_priv(netdev); 290 290 ··· 295 295 } 296 296 297 297 static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, 298 - u8 *setting) 298 + u8 *setting) 299 299 { 300 300 struct ixgbe_adapter *adapter = netdev_priv(netdev); 301 301
+1 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
··· 253 253 **/ 254 254 void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) 255 255 { 256 - if (adapter->ixgbe_dbg_adapter) 257 - debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); 256 + debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); 258 257 adapter->ixgbe_dbg_adapter = NULL; 259 258 } 260 259
+40 -43
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
··· 141 141 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ 142 142 / sizeof(u64)) 143 143 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ 144 - IXGBE_PB_STATS_LEN + \ 145 - IXGBE_QUEUE_STATS_LEN) 144 + IXGBE_PB_STATS_LEN + \ 145 + IXGBE_QUEUE_STATS_LEN) 146 146 147 147 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 148 148 "Register test (offline)", "Eeprom test (offline)", ··· 152 152 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN 153 153 154 154 static int ixgbe_get_settings(struct net_device *netdev, 155 - struct ethtool_cmd *ecmd) 155 + struct ethtool_cmd *ecmd) 156 156 { 157 157 struct ixgbe_adapter *adapter = netdev_priv(netdev); 158 158 struct ixgbe_hw *hw = &adapter->hw; ··· 311 311 } 312 312 313 313 static int ixgbe_set_settings(struct net_device *netdev, 314 - struct ethtool_cmd *ecmd) 314 + struct ethtool_cmd *ecmd) 315 315 { 316 316 struct ixgbe_adapter *adapter = netdev_priv(netdev); 317 317 struct ixgbe_hw *hw = &adapter->hw; ··· 368 368 } 369 369 370 370 static void ixgbe_get_pauseparam(struct net_device *netdev, 371 - struct ethtool_pauseparam *pause) 371 + struct ethtool_pauseparam *pause) 372 372 { 373 373 struct ixgbe_adapter *adapter = netdev_priv(netdev); 374 374 struct ixgbe_hw *hw = &adapter->hw; ··· 390 390 } 391 391 392 392 static int ixgbe_set_pauseparam(struct net_device *netdev, 393 - struct ethtool_pauseparam *pause) 393 + struct ethtool_pauseparam *pause) 394 394 { 395 395 struct ixgbe_adapter *adapter = netdev_priv(netdev); 396 396 struct ixgbe_hw *hw = &adapter->hw; ··· 450 450 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 451 451 452 452 static void ixgbe_get_regs(struct net_device *netdev, 453 - struct ethtool_regs *regs, void *p) 453 + struct ethtool_regs *regs, void *p) 454 454 { 455 455 struct ixgbe_adapter *adapter = netdev_priv(netdev); 456 456 struct ixgbe_hw *hw = &adapter->hw; ··· 812 812 } 813 813 814 814 static int ixgbe_get_eeprom(struct net_device *netdev, 815 - struct ethtool_eeprom *eeprom, u8 *bytes) 815 + struct ethtool_eeprom *eeprom, u8 *bytes) 816 816 { 817 817 struct ixgbe_adapter *adapter = netdev_priv(netdev); 818 818 struct ixgbe_hw *hw = &adapter->hw; ··· 918 918 } 919 919 920 920 static void ixgbe_get_drvinfo(struct net_device *netdev, 921 - struct ethtool_drvinfo *drvinfo) 921 + struct ethtool_drvinfo *drvinfo) 922 922 { 923 923 struct ixgbe_adapter *adapter = netdev_priv(netdev); 924 924 u32 nvm_track_id; ··· 940 940 } 941 941 942 942 static void ixgbe_get_ringparam(struct net_device *netdev, 943 - struct ethtool_ringparam *ring) 943 + struct ethtool_ringparam *ring) 944 944 { 945 945 struct ixgbe_adapter *adapter = netdev_priv(netdev); 946 946 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; ··· 953 953 } 954 954 955 955 static int ixgbe_set_ringparam(struct net_device *netdev, 956 - struct ethtool_ringparam *ring) 956 + struct ethtool_ringparam *ring) 957 957 { 958 958 struct ixgbe_adapter *adapter = netdev_priv(netdev); 959 959 struct ixgbe_ring *temp_ring; ··· 1082 1082 } 1083 1083 1084 1084 static void ixgbe_get_ethtool_stats(struct net_device *netdev, 1085 - struct ethtool_stats *stats, u64 *data) 1085 + struct ethtool_stats *stats, u64 *data) 1086 1086 { 1087 1087 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1088 1088 struct rtnl_link_stats64 temp; ··· 1110 1110 } 1111 1111 1112 1112 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1113 - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1113 + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1114 1114 } 1115 1115 for (j = 0; j < netdev->num_tx_queues; j++) { 1116 1116 ring = adapter->tx_ring[j]; ··· 1180 1180 } 1181 1181 1182 1182 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 1183 - u8 *data) 1183 + u8 *data) 1184 1184 { 1185 1185 char *p = (char *)data; 1186 1186 int i; ··· 1357 1357 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write); 1358 1358 val = ixgbe_read_reg(&adapter->hw, reg); 1359 1359 if (val != (test_pattern[pat] & write & mask)) { 1360 - e_err(drv, "pattern test reg %04X failed: got " 1361 - "0x%08X expected 0x%08X\n", 1360 + e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", 1362 1361 reg, val, (test_pattern[pat] & write & mask)); 1363 1362 *data = reg; 1364 1363 ixgbe_write_reg(&adapter->hw, reg, before); ··· 1381 1382 ixgbe_write_reg(&adapter->hw, reg, write & mask); 1382 1383 val = ixgbe_read_reg(&adapter->hw, reg); 1383 1384 if ((write & mask) != (val & mask)) { 1384 - e_err(drv, "set/check reg %04X test failed: got 0x%08X " 1385 - "expected 0x%08X\n", reg, (val & mask), (write & mask)); 1385 + e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", 1386 + reg, (val & mask), (write & mask)); 1386 1387 *data = reg; 1387 1388 ixgbe_write_reg(&adapter->hw, reg, before); 1388 1389 return true; ··· 1429 1430 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle); 1430 1431 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle; 1431 1432 if (value != after) { 1432 - e_err(drv, "failed STATUS register test got: 0x%08X " 1433 - "expected: 0x%08X\n", after, value); 1433 + e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n", 1434 + after, value); 1434 1435 *data = 1; 1435 1436 return 1; 1436 1437 } ··· 1532 1533 return -1; 1533 1534 } 1534 1535 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, 1535 - netdev->name, netdev)) { 1536 + netdev->name, netdev)) { 1536 1537 shared_int = false; 1537 1538 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, 1538 - netdev->name, netdev)) { 1539 + netdev->name, netdev)) { 1539 1540 *data = 1; 1540 1541 return -1; 1541 1542 } ··· 1562 1563 */ 1563 1564 adapter->test_icr = 0; 1564 1565 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1565 - ~mask & 0x00007FFF); 1566 + ~mask & 0x00007FFF); 1566 1567 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1567 - ~mask & 0x00007FFF); 1568 + ~mask & 0x00007FFF); 1568 1569 IXGBE_WRITE_FLUSH(&adapter->hw); 1569 1570 usleep_range(10000, 20000); 1570 1571 ··· 1586 1587 IXGBE_WRITE_FLUSH(&adapter->hw); 1587 1588 usleep_range(10000, 20000); 1588 1589 1589 - if (!(adapter->test_icr &mask)) { 1590 + if (!(adapter->test_icr & mask)) { 1590 1591 *data = 4; 1591 1592 break; 1592 1593 } ··· 1601 1602 */ 1602 1603 adapter->test_icr = 0; 1603 1604 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1604 - ~mask & 0x00007FFF); 1605 + ~mask & 0x00007FFF); 1605 1606 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1606 - ~mask & 0x00007FFF); 1607 + ~mask & 0x00007FFF); 1607 1608 IXGBE_WRITE_FLUSH(&adapter->hw); 1608 1609 usleep_range(10000, 20000); 1609 1610 ··· 1963 1964 } 1964 1965 1965 1966 static void ixgbe_diag_test(struct net_device *netdev, 1966 - struct ethtool_test *eth_test, u64 *data) 1967 + struct ethtool_test *eth_test, u64 *data) 1967 1968 { 1968 1969 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1969 1970 bool if_running = netif_running(netdev); ··· 1986 1987 int i; 1987 1988 for (i = 0; i < adapter->num_vfs; i++) { 1988 1989 if (adapter->vfinfo[i].clear_to_send) { 1989 - netdev_warn(netdev, "%s", 1990 - "offline diagnostic is not " 1991 - "supported when VFs are " 1992 - "present\n"); 1990 + netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n"); 1993 1991 data[0] = 1; 1994 1992 data[1] = 1; 1995 1993 data[2] = 1; ··· 2033 2037 * loopback diagnostic. */ 2034 2038 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | 2035 2039 IXGBE_FLAG_VMDQ_ENABLED)) { 2036 - e_info(hw, "Skip MAC loopback diagnostic in VT " 2037 - "mode\n"); 2040 + e_info(hw, "Skip MAC loopback diagnostic in VT mode\n"); 2038 2041 data[3] = 0; 2039 2042 goto skip_loopback; 2040 2043 } ··· 2073 2078 } 2074 2079 2075 2080 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, 2076 - struct ethtool_wolinfo *wol) 2081 + struct ethtool_wolinfo *wol) 2077 2082 { 2078 2083 struct ixgbe_hw *hw = &adapter->hw; 2079 2084 int retval = 0; ··· 2089 2094 } 2090 2095 2091 2096 static void ixgbe_get_wol(struct net_device *netdev, 2092 - struct ethtool_wolinfo *wol) 2097 + struct ethtool_wolinfo *wol) 2093 2098 { 2094 2099 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2095 2100 2096 2101 wol->supported = WAKE_UCAST | WAKE_MCAST | 2097 - WAKE_BCAST | WAKE_MAGIC; 2102 + WAKE_BCAST | WAKE_MAGIC; 2098 2103 wol->wolopts = 0; 2099 2104 2100 2105 if (ixgbe_wol_exclusion(adapter, wol) || ··· 2176 2181 } 2177 2182 2178 2183 static int ixgbe_get_coalesce(struct net_device *netdev, 2179 - struct ethtool_coalesce *ec) 2184 + struct ethtool_coalesce *ec) 2180 2185 { 2181 2186 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2182 2187 ··· 2217 2222 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { 2218 2223 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { 2219 2224 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 2220 - e_info(probe, "rx-usecs value high enough " 2221 - "to re-enable RSC\n"); 2225 + e_info(probe, "rx-usecs value high enough to re-enable RSC\n"); 2222 2226 return true; 2223 2227 } 2224 2228 /* if interrupt rate is too high then disable RSC */ ··· 2230 2236 } 2231 2237 2232 2238 static int ixgbe_set_coalesce(struct net_device *netdev, 2233 - struct ethtool_coalesce *ec) 2239 + struct ethtool_coalesce *ec) 2234 2240 { 2235 2241 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2236 2242 struct ixgbe_q_vector *q_vector; ··· 2415 2421 switch (cmd->flow_type) { 2416 2422 case TCP_V4_FLOW: 2417 2423 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2424 + /* fallthrough */ 2418 2425 case UDP_V4_FLOW: 2419 2426 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2420 2427 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2428 + /* fallthrough */ 2421 2429 case SCTP_V4_FLOW: 2422 2430 case AH_ESP_V4_FLOW: 2423 2431 case AH_V4_FLOW: ··· 2429 2433 break; 2430 2434 case TCP_V6_FLOW: 2431 2435 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2436 + /* fallthrough */ 2432 2437 case UDP_V6_FLOW: 2433 2438 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2434 2439 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2440 + /* fallthrough */ 2435 2441 case SCTP_V6_FLOW: 2436 2442 case AH_ESP_V6_FLOW: 2437 2443 case AH_V6_FLOW: ··· 2785 2787 2786 2788 if ((flags2 & UDP_RSS_FLAGS) && 2787 2789 !(adapter->flags2 & UDP_RSS_FLAGS)) 2788 - e_warn(drv, "enabling UDP RSS: fragmented packets" 2789 - " may arrive out of order to the stack above\n"); 2790 + e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); 2790 2791 2791 2792 adapter->flags2 = flags2; 2792 2793
+2 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
··· 1113 1113 err = pci_enable_msi(adapter->pdev); 1114 1114 if (err) { 1115 1115 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, 1116 - "Unable to allocate MSI interrupt, " 1117 - "falling back to legacy. Error: %d\n", err); 1116 + "Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n", 1117 + err); 1118 1118 return; 1119 1119 } 1120 1120 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
+2 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6087 6087 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 6088 6088 for (i = 0; i < adapter->num_tx_queues; i++) 6089 6089 set_bit(__IXGBE_TX_FDIR_INIT_DONE, 6090 - &(adapter->tx_ring[i]->state)); 6090 + &(adapter->tx_ring[i]->state)); 6091 6091 /* re-enable flow director interrupts */ 6092 6092 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); 6093 6093 } else { ··· 8387 8387 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 8388 8388 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", 8389 8389 hw->mac.type, hw->phy.type, hw->phy.sfp_type, 8390 - part_str); 8390 + part_str); 8391 8391 else 8392 8392 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", 8393 8393 hw->mac.type, hw->phy.type, part_str);
+5 -5
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
··· 223 223 * received an ack to that message within delay * timeout period 224 224 **/ 225 225 static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, 226 - u16 mbx_id) 226 + u16 mbx_id) 227 227 { 228 228 struct ixgbe_mbx_info *mbx = &hw->mbx; 229 229 s32 ret_val = IXGBE_ERR_MBX; ··· 269 269 u32 vf_bit = vf_number % 16; 270 270 271 271 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, 272 - index)) { 272 + index)) { 273 273 ret_val = 0; 274 274 hw->mbx.stats.reqs++; 275 275 } ··· 291 291 u32 vf_bit = vf_number % 16; 292 292 293 293 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, 294 - index)) { 294 + index)) { 295 295 ret_val = 0; 296 296 hw->mbx.stats.acks++; 297 297 } ··· 366 366 * returns SUCCESS if it successfully copied message into the buffer 367 367 **/ 368 368 static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, 369 - u16 vf_number) 369 + u16 vf_number) 370 370 { 371 371 s32 ret_val; 372 372 u16 i; ··· 407 407 * a message due to a VF request so no polling for message is needed. 408 408 **/ 409 409 static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, 410 - u16 vf_number) 410 + u16 vf_number) 411 411 { 412 412 s32 ret_val; 413 413 u16 i;
+3 -3
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
··· 54 54 * Message ACK's are the value or'd with 0xF0000000 55 55 */ 56 56 #define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with 57 - * this are the ACK */ 57 + * this are the ACK */ 58 58 #define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with 59 - * this are the NACK */ 59 + * this are the NACK */ 60 60 #define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still 61 - clear to send requests */ 61 + clear to send requests */ 62 62 #define IXGBE_VT_MSGINFO_SHIFT 16 63 63 /* bits 23:16 are used for exra info for certain messages */ 64 64 #define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+34 -34
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
··· 67 67 if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { 68 68 ixgbe_get_phy_id(hw); 69 69 hw->phy.type = 70 - ixgbe_get_phy_type_from_id(hw->phy.id); 70 + ixgbe_get_phy_type_from_id(hw->phy.id); 71 71 72 72 if (hw->phy.type == ixgbe_phy_unknown) { 73 73 hw->phy.ops.read_reg(hw, ··· 136 136 u16 phy_id_low = 0; 137 137 138 138 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, 139 - &phy_id_high); 139 + &phy_id_high); 140 140 141 141 if (status == 0) { 142 142 hw->phy.id = (u32)(phy_id_high << 16); 143 143 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, 144 - &phy_id_low); 144 + &phy_id_low); 145 145 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); 146 146 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); 147 147 } ··· 318 318 * @phy_data: Pointer to read data from PHY register 319 319 **/ 320 320 s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 321 - u32 device_type, u16 *phy_data) 321 + u32 device_type, u16 *phy_data) 322 322 { 323 323 s32 status; 324 324 u16 gssr; ··· 421 421 * @phy_data: Data to write to the PHY register 422 422 **/ 423 423 s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 424 - u32 device_type, u16 phy_data) 424 + u32 device_type, u16 phy_data) 425 425 { 426 426 s32 status; 427 427 u16 gssr; ··· 548 548 * @speed: new link speed 549 549 **/ 550 550 s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, 551 - ixgbe_link_speed speed, 552 - bool autoneg_wait_to_complete) 551 + ixgbe_link_speed speed, 552 + bool autoneg_wait_to_complete) 553 553 { 554 554 555 555 /* ··· 582 582 * Determines the link capabilities by reading the AUTOC register. 583 583 */ 584 584 s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, 585 - ixgbe_link_speed *speed, 586 - bool *autoneg) 585 + ixgbe_link_speed *speed, 586 + bool *autoneg) 587 587 { 588 588 s32 status = IXGBE_ERR_LINK_SETUP; 589 589 u16 speed_ability; ··· 592 592 *autoneg = true; 593 593 594 594 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, 595 - &speed_ability); 595 + &speed_ability); 596 596 597 597 if (status == 0) { 598 598 if (speed_ability & MDIO_SPEED_10G) ··· 806 806 807 807 /* reset the PHY and poll for completion */ 808 808 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, 809 - (phy_data | MDIO_CTRL1_RESET)); 809 + (phy_data | MDIO_CTRL1_RESET)); 810 810 811 811 for (i = 0; i < 100; i++) { 812 812 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, 813 - &phy_data); 813 + &phy_data); 814 814 if ((phy_data & MDIO_CTRL1_RESET) == 0) 815 815 break; 816 816 usleep_range(10000, 20000); ··· 824 824 825 825 /* Get init offsets */ 826 826 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 827 - &data_offset); 827 + &data_offset); 828 828 if (ret_val != 0) 829 829 goto out; 830 830 ··· 838 838 if (ret_val) 839 839 goto err_eeprom; 840 840 control = (eword & IXGBE_CONTROL_MASK_NL) >> 841 - IXGBE_CONTROL_SHIFT_NL; 841 + IXGBE_CONTROL_SHIFT_NL; 842 842 edata = eword & IXGBE_DATA_MASK_NL; 843 843 switch (control) { 844 844 case IXGBE_DELAY_NL: ··· 859 859 if (ret_val) 860 860 goto err_eeprom; 861 861 hw->phy.ops.write_reg(hw, phy_offset, 862 - MDIO_MMD_PMAPMD, eword); 862 + MDIO_MMD_PMAPMD, eword); 863 863 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, 864 864 phy_offset); 865 865 data_offset++; ··· 1010 1010 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { 1011 1011 if (hw->bus.lan_id == 0) 1012 1012 hw->phy.sfp_type = 1013 - ixgbe_sfp_type_da_cu_core0; 1013 + ixgbe_sfp_type_da_cu_core0; 1014 1014 else 1015 1015 hw->phy.sfp_type = 1016 - ixgbe_sfp_type_da_cu_core1; 1016 + ixgbe_sfp_type_da_cu_core1; 1017 1017 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { 1018 1018 hw->phy.ops.read_i2c_eeprom( 1019 1019 hw, IXGBE_SFF_CABLE_SPEC_COMP, ··· 1035 1035 IXGBE_SFF_10GBASELR_CAPABLE)) { 1036 1036 if (hw->bus.lan_id == 0) 1037 1037 hw->phy.sfp_type = 1038 - ixgbe_sfp_type_srlr_core0; 1038 + ixgbe_sfp_type_srlr_core0; 1039 1039 else 1040 1040 hw->phy.sfp_type = 1041 - ixgbe_sfp_type_srlr_core1; 1041 + ixgbe_sfp_type_srlr_core1; 1042 1042 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { 1043 1043 if (hw->bus.lan_id == 0) 1044 1044 hw->phy.sfp_type = ··· 1087 1087 goto err_read_i2c_eeprom; 1088 1088 1089 1089 status = hw->phy.ops.read_i2c_eeprom(hw, 1090 - IXGBE_SFF_VENDOR_OUI_BYTE1, 1091 - &oui_bytes[1]); 1090 + IXGBE_SFF_VENDOR_OUI_BYTE1, 1091 + &oui_bytes[1]); 1092 1092 1093 1093 if (status != 0) 1094 1094 goto err_read_i2c_eeprom; 1095 1095 1096 1096 status = hw->phy.ops.read_i2c_eeprom(hw, 1097 - IXGBE_SFF_VENDOR_OUI_BYTE2, 1098 - &oui_bytes[2]); 1097 + IXGBE_SFF_VENDOR_OUI_BYTE2, 1098 + &oui_bytes[2]); 1099 1099 1100 1100 if (status != 0) 1101 1101 goto err_read_i2c_eeprom; ··· 1403 1403 * so it returns the offsets to the phy init sequence block. 1404 1404 **/ 1405 1405 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 1406 - u16 *list_offset, 1407 - u16 *data_offset) 1406 + u16 *list_offset, 1407 + u16 *data_offset) 1408 1408 { 1409 1409 u16 sfp_id; 1410 1410 u16 sfp_type = hw->phy.sfp_type; ··· 1493 1493 * Performs byte read operation to SFP module's EEPROM over I2C interface. 1494 1494 **/ 1495 1495 s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 1496 - u8 *eeprom_data) 1496 + u8 *eeprom_data) 1497 1497 { 1498 1498 return hw->phy.ops.read_i2c_byte(hw, byte_offset, 1499 - IXGBE_I2C_EEPROM_DEV_ADDR, 1500 - eeprom_data); 1499 + IXGBE_I2C_EEPROM_DEV_ADDR, 1500 + eeprom_data); 1501 1501 } 1502 1502 1503 1503 /** ··· 1525 1525 * Performs byte write operation to SFP module's EEPROM over I2C interface. 1526 1526 **/ 1527 1527 s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 1528 - u8 eeprom_data) 1528 + u8 eeprom_data) 1529 1529 { 1530 1530 return hw->phy.ops.write_i2c_byte(hw, byte_offset, 1531 - IXGBE_I2C_EEPROM_DEV_ADDR, 1532 - eeprom_data); 1531 + IXGBE_I2C_EEPROM_DEV_ADDR, 1532 + eeprom_data); 1533 1533 } 1534 1534 1535 1535 /** ··· 1542 1542 * a specified device address. 1543 1543 **/ 1544 1544 s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 1545 - u8 dev_addr, u8 *data) 1545 + u8 dev_addr, u8 *data) 1546 1546 { 1547 1547 s32 status = 0; 1548 1548 u32 max_retry = 10; ··· 1631 1631 * a specified device address. 1632 1632 **/ 1633 1633 s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 1634 - u8 dev_addr, u8 data) 1634 + u8 dev_addr, u8 data) 1635 1635 { 1636 1636 s32 status = 0; 1637 1637 u32 max_retry = 1; ··· 2046 2046 2047 2047 /* Check that the LASI temp alarm status was triggered */ 2048 2048 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, 2049 - MDIO_MMD_PMAPMD, &phy_data); 2049 + MDIO_MMD_PMAPMD, &phy_data); 2050 2050 2051 2051 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) 2052 2052 goto out;
+16 -16
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
··· 114 114 s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); 115 115 s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); 116 116 s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 117 - u32 device_type, u16 *phy_data); 117 + u32 device_type, u16 *phy_data); 118 118 s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 119 - u32 device_type, u16 phy_data); 119 + u32 device_type, u16 phy_data); 120 120 s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, 121 121 u32 device_type, u16 *phy_data); 122 122 s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, 123 123 u32 device_type, u16 phy_data); 124 124 s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); 125 125 s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, 126 - ixgbe_link_speed speed, 127 - bool autoneg_wait_to_complete); 126 + ixgbe_link_speed speed, 127 + bool autoneg_wait_to_complete); 128 128 s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, 129 - ixgbe_link_speed *speed, 130 - bool *autoneg); 129 + ixgbe_link_speed *speed, 130 + bool *autoneg); 131 131 bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw); 132 132 133 133 /* PHY specific */ 134 134 s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, 135 - ixgbe_link_speed *speed, 136 - bool *link_up); 135 + ixgbe_link_speed *speed, 136 + bool *link_up); 137 137 s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); 138 138 s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, 139 - u16 *firmware_version); 139 + u16 *firmware_version); 140 140 s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, 141 - u16 *firmware_version); 141 + u16 *firmware_version); 142 142 143 143 s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); 144 144 s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); 145 145 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); 146 146 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 147 - u16 *list_offset, 148 - u16 *data_offset); 147 + u16 *list_offset, 148 + u16 *data_offset); 149 149 s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); 150 150 s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 151 - u8 dev_addr, u8 *data); 151 + u8 dev_addr, u8 *data); 152 152 s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 153 - u8 dev_addr, u8 data); 153 + u8 dev_addr, u8 data); 154 154 s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 155 - u8 *eeprom_data); 155 + u8 *eeprom_data); 156 156 s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, 157 157 u8 *sff8472_data); 158 158 s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 159 - u8 eeprom_data); 159 + u8 eeprom_data); 160 160 #endif /* _IXGBE_PHY_H_ */
+2 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
··· 1129 1129 adapter->vfinfo[vf].vlan_count--; 1130 1130 adapter->vfinfo[vf].pf_vlan = 0; 1131 1131 adapter->vfinfo[vf].pf_qos = 0; 1132 - } 1132 + } 1133 1133 out: 1134 - return err; 1134 + return err; 1135 1135 } 1136 1136 1137 1137 static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
+30 -30
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
··· 160 160 #define IXGBE_MAX_EITR 0x00000FF8 161 161 #define IXGBE_MIN_EITR 8 162 162 #define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ 163 - (0x012300 + (((_i) - 24) * 4))) 163 + (0x012300 + (((_i) - 24) * 4))) 164 164 #define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 165 165 #define IXGBE_EITR_LLI_MOD 0x00008000 166 166 #define IXGBE_EITR_CNT_WDIS 0x80000000 ··· 213 213 * 64-127: 0x0D014 + (n-64)*0x40 214 214 */ 215 215 #define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ 216 - (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ 216 + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ 217 217 (0x0D014 + (((_i) - 64) * 0x40)))) 218 218 /* 219 219 * Rx DCA Control Register: ··· 222 222 * 64-127: 0x0D00C + (n-64)*0x40 223 223 */ 224 224 #define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ 225 - (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ 225 + (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ 226 226 (0x0D00C + (((_i) - 64) * 0x40)))) 227 227 #define IXGBE_RDRXCTL 0x02F00 228 228 #define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) 229 - /* 8 of these 0x03C00 - 0x03C1C */ 229 + /* 8 of these 0x03C00 - 0x03C1C */ 230 230 #define IXGBE_RXCTRL 0x03000 231 231 #define IXGBE_DROPEN 0x03D04 232 232 #define IXGBE_RXPBSIZE_SHIFT 10 ··· 239 239 /* Multicast Table Array - 128 entries */ 240 240 #define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) 241 241 #define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ 242 - (0x0A200 + ((_i) * 8))) 242 + (0x0A200 + ((_i) * 8))) 243 243 #define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ 244 - (0x0A204 + ((_i) * 8))) 244 + (0x0A204 + ((_i) * 8))) 245 245 #define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) 246 246 #define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) 247 247 /* Packet split receive type */ 248 248 #define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ 249 - (0x0EA00 + ((_i) * 4))) 249 + (0x0EA00 + ((_i) * 4))) 250 250 /* array of 4096 1-bit vlan filters */ 251 251 #define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) 252 252 /*array of 4096 4-bit vlan vmdq indices */ ··· 696 696 697 697 #define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) 698 698 #define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ 699 - (0x08600 + ((_i) * 4))) 699 + (0x08600 + ((_i) * 4))) 700 700 #define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) 701 701 702 702 #define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ ··· 820 820 #define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 821 821 #define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 822 822 #define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ 823 - IXGBE_GCR_EXT_VT_MODE_64) 823 + IXGBE_GCR_EXT_VT_MODE_64) 824 824 825 825 /* Time Sync Registers */ 826 826 #define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ ··· 1396 1396 #define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 1397 1397 1398 1398 #define IXGBE_EIMS_ENABLE_MASK ( \ 1399 - IXGBE_EIMS_RTX_QUEUE | \ 1400 - IXGBE_EIMS_LSC | \ 1401 - IXGBE_EIMS_TCP_TIMER | \ 1402 - IXGBE_EIMS_OTHER) 1399 + IXGBE_EIMS_RTX_QUEUE | \ 1400 + IXGBE_EIMS_LSC | \ 1401 + IXGBE_EIMS_TCP_TIMER | \ 1402 + IXGBE_EIMS_OTHER) 1403 1403 1404 1404 /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ 1405 1405 #define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ ··· 2161 2161 2162 2162 /* Masks to determine if packets should be dropped due to frame errors */ 2163 2163 #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ 2164 - IXGBE_RXD_ERR_CE | \ 2165 - IXGBE_RXD_ERR_LE | \ 2166 - IXGBE_RXD_ERR_PE | \ 2167 - IXGBE_RXD_ERR_OSE | \ 2168 - IXGBE_RXD_ERR_USE) 2164 + IXGBE_RXD_ERR_CE | \ 2165 + IXGBE_RXD_ERR_LE | \ 2166 + IXGBE_RXD_ERR_PE | \ 2167 + IXGBE_RXD_ERR_OSE | \ 2168 + IXGBE_RXD_ERR_USE) 2169 2169 2170 2170 #define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ 2171 - IXGBE_RXDADV_ERR_CE | \ 2172 - IXGBE_RXDADV_ERR_LE | \ 2173 - IXGBE_RXDADV_ERR_PE | \ 2174 - IXGBE_RXDADV_ERR_OSE | \ 2175 - IXGBE_RXDADV_ERR_USE) 2171 + IXGBE_RXDADV_ERR_CE | \ 2172 + IXGBE_RXDADV_ERR_LE | \ 2173 + IXGBE_RXDADV_ERR_PE | \ 2174 + IXGBE_RXDADV_ERR_OSE | \ 2175 + IXGBE_RXDADV_ERR_USE) 2176 2176 2177 2177 /* Multicast bit mask */ 2178 2178 #define IXGBE_MCSTCTRL_MFE 0x4 ··· 2393 2393 #define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ 2394 2394 #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ 2395 2395 #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ 2396 - IXGBE_ADVTXD_POPTS_SHIFT) 2396 + IXGBE_ADVTXD_POPTS_SHIFT) 2397 2397 #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ 2398 - IXGBE_ADVTXD_POPTS_SHIFT) 2398 + IXGBE_ADVTXD_POPTS_SHIFT) 2399 2399 #define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ 2400 2400 #define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ 2401 2401 #define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ ··· 2435 2435 #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 2436 2436 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 2437 2437 #define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ 2438 - IXGBE_LINK_SPEED_10GB_FULL) 2438 + IXGBE_LINK_SPEED_10GB_FULL) 2439 2439 #define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ 2440 - IXGBE_LINK_SPEED_1GB_FULL | \ 2441 - IXGBE_LINK_SPEED_10GB_FULL) 2440 + IXGBE_LINK_SPEED_1GB_FULL | \ 2441 + IXGBE_LINK_SPEED_10GB_FULL) 2442 2442 2443 2443 2444 2444 /* Physical layer type */ ··· 2840 2840 2841 2841 /* iterator type for walking multicast address lists */ 2842 2842 typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, 2843 - u32 *vmdq); 2843 + u32 *vmdq); 2844 2844 2845 2845 /* Function pointer table */ 2846 2846 struct ixgbe_eeprom_operations { ··· 2887 2887 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); 2888 2888 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); 2889 2889 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, 2890 - bool *); 2890 + bool *); 2891 2891 2892 2892 /* Packet Buffer Manipulation */ 2893 2893 void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
+7 -8
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
··· 81 81 bool autoneg_wait_to_complete) 82 82 { 83 83 return hw->phy.ops.setup_link_speed(hw, speed, 84 - autoneg_wait_to_complete); 84 + autoneg_wait_to_complete); 85 85 } 86 86 87 87 /** ··· 155 155 /* Add the SAN MAC address to the RAR only if it's a valid address */ 156 156 if (is_valid_ether_addr(hw->mac.san_addr)) { 157 157 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 158 - hw->mac.san_addr, 0, IXGBE_RAH_AV); 158 + hw->mac.san_addr, 0, IXGBE_RAH_AV); 159 159 160 160 /* Save the SAN MAC RAR index */ 161 161 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; ··· 166 166 167 167 /* Store the alternative WWNN/WWPN prefix */ 168 168 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 169 - &hw->mac.wwpn_prefix); 169 + &hw->mac.wwpn_prefix); 170 170 171 171 reset_hw_out: 172 172 return status; ··· 237 237 238 238 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 239 239 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 240 - IXGBE_EEC_SIZE_SHIFT); 240 + IXGBE_EEC_SIZE_SHIFT); 241 241 eeprom->word_size = 1 << (eeprom_size + 242 - IXGBE_EEPROM_WORD_SIZE_SHIFT); 242 + IXGBE_EEPROM_WORD_SIZE_SHIFT); 243 243 244 244 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", 245 245 eeprom->type, eeprom->word_size); ··· 712 712 udelay(50); 713 713 } 714 714 } else { 715 - hw_dbg(hw, "Software semaphore SMBI between device drivers " 716 - "not granted.\n"); 715 + hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); 717 716 } 718 717 719 718 return status; ··· 812 813 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, 813 814 .get_media_type = &ixgbe_get_media_type_X540, 814 815 .get_supported_physical_layer = 815 - &ixgbe_get_supported_physical_layer_X540, 816 + &ixgbe_get_supported_physical_layer_X540, 816 817 .enable_rx_dma = &ixgbe_enable_rx_dma_generic, 817 818 .get_mac_addr = &ixgbe_get_mac_addr_generic, 818 819 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,