Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
1GbE Intel Wired LAN Driver Updates 2018-10-17

This series adds support for the new igc driver.

The igc driver is the new client driver supporting the Intel I225
Ethernet Controller, which supports 2.5GbE speeds. The reason for
creating a new client driver, instead of adding support for the new
device in e1000e, is that the silicon behaves more like devices
supported in igb driver. It also did not make sense to add a client
part, to the igb driver which supports only 1GbE server parts.

This initial set of patches is designed for basic support (i.e. link and
pass traffic). Follow-on patch series will add more advanced support
like VLAN, Wake-on-LAN, etc..
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+8341
+16
drivers/net/ethernet/intel/Kconfig
··· 287 287 To compile this driver as a module, choose M here. The module 288 288 will be called fm10k. MSI-X interrupt support is required 289 289 290 + config IGC 291 + tristate "Intel(R) Ethernet Controller I225-LM/I225-V support" 292 + default n 293 + depends on PCI 294 + ---help--- 295 + This driver supports Intel(R) Ethernet Controller I225-LM/I225-V 296 + family of adapters. 297 + 298 + For more information on how to identify your adapter, go 299 + to the Adapter & Driver ID Guide that can be located at: 300 + 301 + <http://support.intel.com> 302 + 303 + To compile this driver as a module, choose M here. The module 304 + will be called igc. 305 + 290 306 endif # NET_VENDOR_INTEL
+1
drivers/net/ethernet/intel/Makefile
··· 7 7 obj-$(CONFIG_E1000) += e1000/ 8 8 obj-$(CONFIG_E1000E) += e1000e/ 9 9 obj-$(CONFIG_IGB) += igb/ 10 + obj-$(CONFIG_IGC) += igc/ 10 11 obj-$(CONFIG_IGBVF) += igbvf/ 11 12 obj-$(CONFIG_IXGBE) += ixgbe/ 12 13 obj-$(CONFIG_IXGBEVF) += ixgbevf/
+10
drivers/net/ethernet/intel/igc/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # Copyright (c) 2018 Intel Corporation 3 + 4 + # 5 + # Intel(R) I225-LM/I225-V 2.5G Ethernet Controller 6 + # 7 + 8 + obj-$(CONFIG_IGC) += igc.o 9 + 10 + igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o
+443
drivers/net/ethernet/intel/igc/igc.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #ifndef _IGC_H_ 5 + #define _IGC_H_ 6 + 7 + #include <linux/kobject.h> 8 + 9 + #include <linux/pci.h> 10 + #include <linux/netdevice.h> 11 + #include <linux/vmalloc.h> 12 + 13 + #include <linux/ethtool.h> 14 + 15 + #include <linux/sctp.h> 16 + 17 + #define IGC_ERR(args...) pr_err("igc: " args) 18 + 19 + #define PFX "igc: " 20 + 21 + #include <linux/timecounter.h> 22 + #include <linux/net_tstamp.h> 23 + #include <linux/ptp_clock_kernel.h> 24 + 25 + #include "igc_hw.h" 26 + 27 + /* main */ 28 + extern char igc_driver_name[]; 29 + extern char igc_driver_version[]; 30 + 31 + /* Interrupt defines */ 32 + #define IGC_START_ITR 648 /* ~6000 ints/sec */ 33 + #define IGC_FLAG_HAS_MSI BIT(0) 34 + #define IGC_FLAG_QUEUE_PAIRS BIT(4) 35 + #define IGC_FLAG_NEED_LINK_UPDATE BIT(9) 36 + #define IGC_FLAG_MEDIA_RESET BIT(10) 37 + #define IGC_FLAG_MAS_ENABLE BIT(12) 38 + #define IGC_FLAG_HAS_MSIX BIT(13) 39 + #define IGC_FLAG_VLAN_PROMISC BIT(15) 40 + 41 + #define IGC_START_ITR 648 /* ~6000 ints/sec */ 42 + #define IGC_4K_ITR 980 43 + #define IGC_20K_ITR 196 44 + #define IGC_70K_ITR 56 45 + 46 + #define IGC_DEFAULT_ITR 3 /* dynamic */ 47 + #define IGC_MAX_ITR_USECS 10000 48 + #define IGC_MIN_ITR_USECS 10 49 + #define NON_Q_VECTORS 1 50 + #define MAX_MSIX_ENTRIES 10 51 + 52 + /* TX/RX descriptor defines */ 53 + #define IGC_DEFAULT_TXD 256 54 + #define IGC_DEFAULT_TX_WORK 128 55 + #define IGC_MIN_TXD 80 56 + #define IGC_MAX_TXD 4096 57 + 58 + #define IGC_DEFAULT_RXD 256 59 + #define IGC_MIN_RXD 80 60 + #define IGC_MAX_RXD 4096 61 + 62 + /* Transmit and receive queues */ 63 + #define IGC_MAX_RX_QUEUES 4 64 + #define IGC_MAX_TX_QUEUES 4 65 + 66 + #define MAX_Q_VECTORS 8 67 + #define MAX_STD_JUMBO_FRAME_SIZE 9216 68 + 69 + /* Supported Rx Buffer Sizes */ 70 + #define IGC_RXBUFFER_256 256 71 + #define IGC_RXBUFFER_2048 2048 72 + #define IGC_RXBUFFER_3072 3072 73 + 74 + #define IGC_RX_HDR_LEN IGC_RXBUFFER_256 75 + 76 + /* RX and TX descriptor control thresholds. 77 + * PTHRESH - MAC will consider prefetch if it has fewer than this number of 78 + * descriptors available in its onboard memory. 79 + * Setting this to 0 disables RX descriptor prefetch. 80 + * HTHRESH - MAC will only prefetch if there are at least this many descriptors 81 + * available in host memory. 82 + * If PTHRESH is 0, this should also be 0. 83 + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back 84 + * descriptors until either it has this many to write back, or the 85 + * ITR timer expires. 86 + */ 87 + #define IGC_RX_PTHRESH 8 88 + #define IGC_RX_HTHRESH 8 89 + #define IGC_TX_PTHRESH 8 90 + #define IGC_TX_HTHRESH 1 91 + #define IGC_RX_WTHRESH 4 92 + #define IGC_TX_WTHRESH 16 93 + 94 + #define IGC_RX_DMA_ATTR \ 95 + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) 96 + 97 + #define IGC_TS_HDR_LEN 16 98 + 99 + #define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 100 + 101 + #if (PAGE_SIZE < 8192) 102 + #define IGC_MAX_FRAME_BUILD_SKB \ 103 + (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN) 104 + #else 105 + #define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN) 106 + #endif 107 + 108 + /* How many Rx Buffers do we bundle into one write to the hardware ? */ 109 + #define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 110 + 111 + /* igc_test_staterr - tests bits within Rx descriptor status and error fields */ 112 + static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, 113 + const u32 stat_err_bits) 114 + { 115 + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); 116 + } 117 + 118 + enum igc_state_t { 119 + __IGC_TESTING, 120 + __IGC_RESETTING, 121 + __IGC_DOWN, 122 + __IGC_PTP_TX_IN_PROGRESS, 123 + }; 124 + 125 + enum igc_tx_flags { 126 + /* cmd_type flags */ 127 + IGC_TX_FLAGS_VLAN = 0x01, 128 + IGC_TX_FLAGS_TSO = 0x02, 129 + IGC_TX_FLAGS_TSTAMP = 0x04, 130 + 131 + /* olinfo flags */ 132 + IGC_TX_FLAGS_IPV4 = 0x10, 133 + IGC_TX_FLAGS_CSUM = 0x20, 134 + }; 135 + 136 + enum igc_boards { 137 + board_base, 138 + }; 139 + 140 + /* The largest size we can write to the descriptor is 65535. In order to 141 + * maintain a power of two alignment we have to limit ourselves to 32K. 142 + */ 143 + #define IGC_MAX_TXD_PWR 15 144 + #define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR) 145 + 146 + /* Tx Descriptors needed, worst case */ 147 + #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD) 148 + #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 149 + 150 + /* wrapper around a pointer to a socket buffer, 151 + * so a DMA handle can be stored along with the buffer 152 + */ 153 + struct igc_tx_buffer { 154 + union igc_adv_tx_desc *next_to_watch; 155 + unsigned long time_stamp; 156 + struct sk_buff *skb; 157 + unsigned int bytecount; 158 + u16 gso_segs; 159 + __be16 protocol; 160 + 161 + DEFINE_DMA_UNMAP_ADDR(dma); 162 + DEFINE_DMA_UNMAP_LEN(len); 163 + u32 tx_flags; 164 + }; 165 + 166 + struct igc_rx_buffer { 167 + dma_addr_t dma; 168 + struct page *page; 169 + #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 170 + __u32 page_offset; 171 + #else 172 + __u16 page_offset; 173 + #endif 174 + __u16 pagecnt_bias; 175 + }; 176 + 177 + struct igc_tx_queue_stats { 178 + u64 packets; 179 + u64 bytes; 180 + u64 restart_queue; 181 + u64 restart_queue2; 182 + }; 183 + 184 + struct igc_rx_queue_stats { 185 + u64 packets; 186 + u64 bytes; 187 + u64 drops; 188 + u64 csum_err; 189 + u64 alloc_failed; 190 + }; 191 + 192 + struct igc_rx_packet_stats { 193 + u64 ipv4_packets; /* IPv4 headers processed */ 194 + u64 ipv4e_packets; /* IPv4E headers with extensions processed */ 195 + u64 ipv6_packets; /* IPv6 headers processed */ 196 + u64 ipv6e_packets; /* IPv6E headers with extensions processed */ 197 + u64 tcp_packets; /* TCP headers processed */ 198 + u64 udp_packets; /* UDP headers processed */ 199 + u64 sctp_packets; /* SCTP headers processed */ 200 + u64 nfs_packets; /* NFS headers processe */ 201 + u64 other_packets; 202 + }; 203 + 204 + struct igc_ring_container { 205 + struct igc_ring *ring; /* pointer to linked list of rings */ 206 + unsigned int total_bytes; /* total bytes processed this int */ 207 + unsigned int total_packets; /* total packets processed this int */ 208 + u16 work_limit; /* total work allowed per interrupt */ 209 + u8 count; /* total number of rings in vector */ 210 + u8 itr; /* current ITR setting for ring */ 211 + }; 212 + 213 + struct igc_ring { 214 + struct igc_q_vector *q_vector; /* backlink to q_vector */ 215 + struct net_device *netdev; /* back pointer to net_device */ 216 + struct device *dev; /* device for dma mapping */ 217 + union { /* array of buffer info structs */ 218 + struct igc_tx_buffer *tx_buffer_info; 219 + struct igc_rx_buffer *rx_buffer_info; 220 + }; 221 + void *desc; /* descriptor ring memory */ 222 + unsigned long flags; /* ring specific flags */ 223 + void __iomem *tail; /* pointer to ring tail register */ 224 + dma_addr_t dma; /* phys address of the ring */ 225 + unsigned int size; /* length of desc. ring in bytes */ 226 + 227 + u16 count; /* number of desc. in the ring */ 228 + u8 queue_index; /* logical index of the ring*/ 229 + u8 reg_idx; /* physical index of the ring */ 230 + 231 + /* everything past this point are written often */ 232 + u16 next_to_clean; 233 + u16 next_to_use; 234 + u16 next_to_alloc; 235 + 236 + union { 237 + /* TX */ 238 + struct { 239 + struct igc_tx_queue_stats tx_stats; 240 + struct u64_stats_sync tx_syncp; 241 + struct u64_stats_sync tx_syncp2; 242 + }; 243 + /* RX */ 244 + struct { 245 + struct igc_rx_queue_stats rx_stats; 246 + struct igc_rx_packet_stats pkt_stats; 247 + struct u64_stats_sync rx_syncp; 248 + struct sk_buff *skb; 249 + }; 250 + }; 251 + } ____cacheline_internodealigned_in_smp; 252 + 253 + struct igc_q_vector { 254 + struct igc_adapter *adapter; /* backlink */ 255 + void __iomem *itr_register; 256 + u32 eims_value; /* EIMS mask value */ 257 + 258 + u16 itr_val; 259 + u8 set_itr; 260 + 261 + struct igc_ring_container rx, tx; 262 + 263 + struct napi_struct napi; 264 + 265 + struct rcu_head rcu; /* to avoid race with update stats on free */ 266 + char name[IFNAMSIZ + 9]; 267 + struct net_device poll_dev; 268 + 269 + /* for dynamic allocation of rings associated with this q_vector */ 270 + struct igc_ring ring[0] ____cacheline_internodealigned_in_smp; 271 + }; 272 + 273 + struct igc_mac_addr { 274 + u8 addr[ETH_ALEN]; 275 + u8 queue; 276 + u8 state; /* bitmask */ 277 + }; 278 + 279 + #define IGC_MAC_STATE_DEFAULT 0x1 280 + #define IGC_MAC_STATE_MODIFIED 0x2 281 + #define IGC_MAC_STATE_IN_USE 0x4 282 + 283 + /* Board specific private data structure */ 284 + struct igc_adapter { 285 + struct net_device *netdev; 286 + 287 + unsigned long state; 288 + unsigned int flags; 289 + unsigned int num_q_vectors; 290 + 291 + struct msix_entry *msix_entries; 292 + 293 + /* TX */ 294 + u16 tx_work_limit; 295 + u32 tx_timeout_count; 296 + int num_tx_queues; 297 + struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES]; 298 + 299 + /* RX */ 300 + int num_rx_queues; 301 + struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; 302 + 303 + struct timer_list watchdog_timer; 304 + struct timer_list dma_err_timer; 305 + struct timer_list phy_info_timer; 306 + 307 + u16 link_speed; 308 + u16 link_duplex; 309 + 310 + u8 port_num; 311 + 312 + u8 __iomem *io_addr; 313 + /* Interrupt Throttle Rate */ 314 + u32 rx_itr_setting; 315 + u32 tx_itr_setting; 316 + 317 + struct work_struct reset_task; 318 + struct work_struct watchdog_task; 319 + struct work_struct dma_err_task; 320 + bool fc_autoneg; 321 + 322 + u8 tx_timeout_factor; 323 + 324 + int msg_enable; 325 + u32 max_frame_size; 326 + u32 min_frame_size; 327 + 328 + /* OS defined structs */ 329 + struct pci_dev *pdev; 330 + /* lock for statistics */ 331 + spinlock_t stats64_lock; 332 + struct rtnl_link_stats64 stats64; 333 + 334 + /* structs defined in igc_hw.h */ 335 + struct igc_hw hw; 336 + struct igc_hw_stats stats; 337 + 338 + struct igc_q_vector *q_vector[MAX_Q_VECTORS]; 339 + u32 eims_enable_mask; 340 + u32 eims_other; 341 + 342 + u16 tx_ring_count; 343 + u16 rx_ring_count; 344 + 345 + u32 *shadow_vfta; 346 + 347 + u32 rss_queues; 348 + 349 + /* lock for RX network flow classification filter */ 350 + spinlock_t nfc_lock; 351 + 352 + struct igc_mac_addr *mac_table; 353 + 354 + unsigned long link_check_timeout; 355 + struct igc_info ei; 356 + }; 357 + 358 + /* igc_desc_unused - calculate if we have unused descriptors */ 359 + static inline u16 igc_desc_unused(const struct igc_ring *ring) 360 + { 361 + u16 ntc = ring->next_to_clean; 362 + u16 ntu = ring->next_to_use; 363 + 364 + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; 365 + } 366 + 367 + static inline s32 igc_get_phy_info(struct igc_hw *hw) 368 + { 369 + if (hw->phy.ops.get_phy_info) 370 + return hw->phy.ops.get_phy_info(hw); 371 + 372 + return 0; 373 + } 374 + 375 + static inline s32 igc_reset_phy(struct igc_hw *hw) 376 + { 377 + if (hw->phy.ops.reset) 378 + return hw->phy.ops.reset(hw); 379 + 380 + return 0; 381 + } 382 + 383 + static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) 384 + { 385 + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); 386 + } 387 + 388 + enum igc_ring_flags_t { 389 + IGC_RING_FLAG_RX_3K_BUFFER, 390 + IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, 391 + IGC_RING_FLAG_RX_SCTP_CSUM, 392 + IGC_RING_FLAG_RX_LB_VLAN_BSWAP, 393 + IGC_RING_FLAG_TX_CTX_IDX, 394 + IGC_RING_FLAG_TX_DETECT_HANG 395 + }; 396 + 397 + #define ring_uses_large_buffer(ring) \ 398 + test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) 399 + 400 + #define ring_uses_build_skb(ring) \ 401 + test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) 402 + 403 + static inline unsigned int igc_rx_bufsz(struct igc_ring *ring) 404 + { 405 + #if (PAGE_SIZE < 8192) 406 + if (ring_uses_large_buffer(ring)) 407 + return IGC_RXBUFFER_3072; 408 + 409 + if (ring_uses_build_skb(ring)) 410 + return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN; 411 + #endif 412 + return IGC_RXBUFFER_2048; 413 + } 414 + 415 + static inline unsigned int igc_rx_pg_order(struct igc_ring *ring) 416 + { 417 + #if (PAGE_SIZE < 8192) 418 + if (ring_uses_large_buffer(ring)) 419 + return 1; 420 + #endif 421 + return 0; 422 + } 423 + 424 + static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) 425 + { 426 + if (hw->phy.ops.read_reg) 427 + return hw->phy.ops.read_reg(hw, offset, data); 428 + 429 + return 0; 430 + } 431 + 432 + #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) 433 + 434 + #define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) 435 + 436 + #define IGC_RX_DESC(R, i) \ 437 + (&(((union igc_adv_rx_desc *)((R)->desc))[i])) 438 + #define IGC_TX_DESC(R, i) \ 439 + (&(((union igc_adv_tx_desc *)((R)->desc))[i])) 440 + #define IGC_TX_CTXTDESC(R, i) \ 441 + (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i])) 442 + 443 + #endif /* _IGC_H_ */
+541
drivers/net/ethernet/intel/igc/igc_base.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #include <linux/delay.h> 5 + 6 + #include "igc_hw.h" 7 + #include "igc_i225.h" 8 + #include "igc_mac.h" 9 + #include "igc_base.h" 10 + #include "igc.h" 11 + 12 + /** 13 + * igc_set_pcie_completion_timeout - set pci-e completion timeout 14 + * @hw: pointer to the HW structure 15 + */ 16 + static s32 igc_set_pcie_completion_timeout(struct igc_hw *hw) 17 + { 18 + u32 gcr = rd32(IGC_GCR); 19 + u16 pcie_devctl2; 20 + s32 ret_val = 0; 21 + 22 + /* only take action if timeout value is defaulted to 0 */ 23 + if (gcr & IGC_GCR_CMPL_TMOUT_MASK) 24 + goto out; 25 + 26 + /* if capabilities version is type 1 we can write the 27 + * timeout of 10ms to 200ms through the GCR register 28 + */ 29 + if (!(gcr & IGC_GCR_CAP_VER2)) { 30 + gcr |= IGC_GCR_CMPL_TMOUT_10ms; 31 + goto out; 32 + } 33 + 34 + /* for version 2 capabilities we need to write the config space 35 + * directly in order to set the completion timeout value for 36 + * 16ms to 55ms 37 + */ 38 + ret_val = igc_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 39 + &pcie_devctl2); 40 + if (ret_val) 41 + goto out; 42 + 43 + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 44 + 45 + ret_val = igc_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 46 + &pcie_devctl2); 47 + out: 48 + /* disable completion timeout resend */ 49 + gcr &= ~IGC_GCR_CMPL_TMOUT_RESEND; 50 + 51 + wr32(IGC_GCR, gcr); 52 + 53 + return ret_val; 54 + } 55 + 56 + /** 57 + * igc_check_for_link_base - Check for link 58 + * @hw: pointer to the HW structure 59 + * 60 + * If sgmii is enabled, then use the pcs register to determine link, otherwise 61 + * use the generic interface for determining link. 62 + */ 63 + static s32 igc_check_for_link_base(struct igc_hw *hw) 64 + { 65 + s32 ret_val = 0; 66 + 67 + ret_val = igc_check_for_copper_link(hw); 68 + 69 + return ret_val; 70 + } 71 + 72 + /** 73 + * igc_reset_hw_base - Reset hardware 74 + * @hw: pointer to the HW structure 75 + * 76 + * This resets the hardware into a known state. This is a 77 + * function pointer entry point called by the api module. 78 + */ 79 + static s32 igc_reset_hw_base(struct igc_hw *hw) 80 + { 81 + s32 ret_val; 82 + u32 ctrl; 83 + 84 + /* Prevent the PCI-E bus from sticking if there is no TLP connection 85 + * on the last TLP read/write transaction when MAC is reset. 86 + */ 87 + ret_val = igc_disable_pcie_master(hw); 88 + if (ret_val) 89 + hw_dbg("PCI-E Master disable polling has failed.\n"); 90 + 91 + /* set the completion timeout for interface */ 92 + ret_val = igc_set_pcie_completion_timeout(hw); 93 + if (ret_val) 94 + hw_dbg("PCI-E Set completion timeout has failed.\n"); 95 + 96 + hw_dbg("Masking off all interrupts\n"); 97 + wr32(IGC_IMC, 0xffffffff); 98 + 99 + wr32(IGC_RCTL, 0); 100 + wr32(IGC_TCTL, IGC_TCTL_PSP); 101 + wrfl(); 102 + 103 + usleep_range(10000, 20000); 104 + 105 + ctrl = rd32(IGC_CTRL); 106 + 107 + hw_dbg("Issuing a global reset to MAC\n"); 108 + wr32(IGC_CTRL, ctrl | IGC_CTRL_RST); 109 + 110 + ret_val = igc_get_auto_rd_done(hw); 111 + if (ret_val) { 112 + /* When auto config read does not complete, do not 113 + * return with an error. This can happen in situations 114 + * where there is no eeprom and prevents getting link. 115 + */ 116 + hw_dbg("Auto Read Done did not complete\n"); 117 + } 118 + 119 + /* Clear any pending interrupt events. */ 120 + wr32(IGC_IMC, 0xffffffff); 121 + rd32(IGC_ICR); 122 + 123 + return ret_val; 124 + } 125 + 126 + /** 127 + * igc_get_phy_id_base - Retrieve PHY addr and id 128 + * @hw: pointer to the HW structure 129 + * 130 + * Retrieves the PHY address and ID for both PHY's which do and do not use 131 + * sgmi interface. 132 + */ 133 + static s32 igc_get_phy_id_base(struct igc_hw *hw) 134 + { 135 + s32 ret_val = 0; 136 + 137 + ret_val = igc_get_phy_id(hw); 138 + 139 + return ret_val; 140 + } 141 + 142 + /** 143 + * igc_init_nvm_params_base - Init NVM func ptrs. 144 + * @hw: pointer to the HW structure 145 + */ 146 + static s32 igc_init_nvm_params_base(struct igc_hw *hw) 147 + { 148 + struct igc_nvm_info *nvm = &hw->nvm; 149 + u32 eecd = rd32(IGC_EECD); 150 + u16 size; 151 + 152 + size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >> 153 + IGC_EECD_SIZE_EX_SHIFT); 154 + 155 + /* Added to a constant, "size" becomes the left-shift value 156 + * for setting word_size. 157 + */ 158 + size += NVM_WORD_SIZE_BASE_SHIFT; 159 + 160 + /* Just in case size is out of range, cap it to the largest 161 + * EEPROM size supported 162 + */ 163 + if (size > 15) 164 + size = 15; 165 + 166 + nvm->word_size = BIT(size); 167 + nvm->opcode_bits = 8; 168 + nvm->delay_usec = 1; 169 + 170 + nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8; 171 + nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ? 172 + 16 : 8; 173 + 174 + if (nvm->word_size == BIT(15)) 175 + nvm->page_size = 128; 176 + 177 + return 0; 178 + } 179 + 180 + /** 181 + * igc_setup_copper_link_base - Configure copper link settings 182 + * @hw: pointer to the HW structure 183 + * 184 + * Configures the link for auto-neg or forced speed and duplex. Then we check 185 + * for link, once link is established calls to configure collision distance 186 + * and flow control are called. 187 + */ 188 + static s32 igc_setup_copper_link_base(struct igc_hw *hw) 189 + { 190 + s32 ret_val = 0; 191 + u32 ctrl; 192 + 193 + ctrl = rd32(IGC_CTRL); 194 + ctrl |= IGC_CTRL_SLU; 195 + ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); 196 + wr32(IGC_CTRL, ctrl); 197 + 198 + ret_val = igc_setup_copper_link(hw); 199 + 200 + return ret_val; 201 + } 202 + 203 + /** 204 + * igc_init_mac_params_base - Init MAC func ptrs. 205 + * @hw: pointer to the HW structure 206 + */ 207 + static s32 igc_init_mac_params_base(struct igc_hw *hw) 208 + { 209 + struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base; 210 + struct igc_mac_info *mac = &hw->mac; 211 + 212 + /* Set mta register count */ 213 + mac->mta_reg_count = 128; 214 + mac->rar_entry_count = IGC_RAR_ENTRIES; 215 + 216 + /* reset */ 217 + mac->ops.reset_hw = igc_reset_hw_base; 218 + 219 + mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225; 220 + mac->ops.release_swfw_sync = igc_release_swfw_sync_i225; 221 + 222 + /* Allow a single clear of the SW semaphore on I225 */ 223 + if (mac->type == igc_i225) 224 + dev_spec->clear_semaphore_once = true; 225 + 226 + /* physical interface link setup */ 227 + mac->ops.setup_physical_interface = igc_setup_copper_link_base; 228 + 229 + return 0; 230 + } 231 + 232 + /** 233 + * igc_init_phy_params_base - Init PHY func ptrs. 234 + * @hw: pointer to the HW structure 235 + */ 236 + static s32 igc_init_phy_params_base(struct igc_hw *hw) 237 + { 238 + struct igc_phy_info *phy = &hw->phy; 239 + s32 ret_val = 0; 240 + u32 ctrl_ext; 241 + 242 + if (hw->phy.media_type != igc_media_type_copper) { 243 + phy->type = igc_phy_none; 244 + goto out; 245 + } 246 + 247 + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; 248 + phy->reset_delay_us = 100; 249 + 250 + ctrl_ext = rd32(IGC_CTRL_EXT); 251 + 252 + /* set lan id */ 253 + hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >> 254 + IGC_STATUS_FUNC_SHIFT; 255 + 256 + /* Make sure the PHY is in a good state. Several people have reported 257 + * firmware leaving the PHY's page select register set to something 258 + * other than the default of zero, which causes the PHY ID read to 259 + * access something other than the intended register. 260 + */ 261 + ret_val = hw->phy.ops.reset(hw); 262 + if (ret_val) { 263 + hw_dbg("Error resetting the PHY.\n"); 264 + goto out; 265 + } 266 + 267 + ret_val = igc_get_phy_id_base(hw); 268 + if (ret_val) 269 + return ret_val; 270 + 271 + igc_check_for_link_base(hw); 272 + 273 + /* Verify phy id and set remaining function pointers */ 274 + switch (phy->id) { 275 + case I225_I_PHY_ID: 276 + phy->type = igc_phy_i225; 277 + break; 278 + default: 279 + ret_val = -IGC_ERR_PHY; 280 + goto out; 281 + } 282 + 283 + out: 284 + return ret_val; 285 + } 286 + 287 + static s32 igc_get_invariants_base(struct igc_hw *hw) 288 + { 289 + struct igc_mac_info *mac = &hw->mac; 290 + u32 link_mode = 0; 291 + u32 ctrl_ext = 0; 292 + s32 ret_val = 0; 293 + 294 + switch (hw->device_id) { 295 + case IGC_DEV_ID_I225_LM: 296 + case IGC_DEV_ID_I225_V: 297 + mac->type = igc_i225; 298 + break; 299 + default: 300 + return -IGC_ERR_MAC_INIT; 301 + } 302 + 303 + hw->phy.media_type = igc_media_type_copper; 304 + 305 + ctrl_ext = rd32(IGC_CTRL_EXT); 306 + link_mode = ctrl_ext & IGC_CTRL_EXT_LINK_MODE_MASK; 307 + 308 + /* mac initialization and operations */ 309 + ret_val = igc_init_mac_params_base(hw); 310 + if (ret_val) 311 + goto out; 312 + 313 + /* NVM initialization */ 314 + ret_val = igc_init_nvm_params_base(hw); 315 + switch (hw->mac.type) { 316 + case igc_i225: 317 + ret_val = igc_init_nvm_params_i225(hw); 318 + break; 319 + default: 320 + break; 321 + } 322 + 323 + /* setup PHY parameters */ 324 + ret_val = igc_init_phy_params_base(hw); 325 + if (ret_val) 326 + goto out; 327 + 328 + out: 329 + return ret_val; 330 + } 331 + 332 + /** 333 + * igc_acquire_phy_base - Acquire rights to access PHY 334 + * @hw: pointer to the HW structure 335 + * 336 + * Acquire access rights to the correct PHY. This is a 337 + * function pointer entry point called by the api module. 338 + */ 339 + static s32 igc_acquire_phy_base(struct igc_hw *hw) 340 + { 341 + u16 mask = IGC_SWFW_PHY0_SM; 342 + 343 + return hw->mac.ops.acquire_swfw_sync(hw, mask); 344 + } 345 + 346 + /** 347 + * igc_release_phy_base - Release rights to access PHY 348 + * @hw: pointer to the HW structure 349 + * 350 + * A wrapper to release access rights to the correct PHY. This is a 351 + * function pointer entry point called by the api module. 352 + */ 353 + static void igc_release_phy_base(struct igc_hw *hw) 354 + { 355 + u16 mask = IGC_SWFW_PHY0_SM; 356 + 357 + hw->mac.ops.release_swfw_sync(hw, mask); 358 + } 359 + 360 + /** 361 + * igc_get_link_up_info_base - Get link speed/duplex info 362 + * @hw: pointer to the HW structure 363 + * @speed: stores the current speed 364 + * @duplex: stores the current duplex 365 + * 366 + * This is a wrapper function, if using the serial gigabit media independent 367 + * interface, use PCS to retrieve the link speed and duplex information. 368 + * Otherwise, use the generic function to get the link speed and duplex info. 369 + */ 370 + static s32 igc_get_link_up_info_base(struct igc_hw *hw, u16 *speed, 371 + u16 *duplex) 372 + { 373 + s32 ret_val; 374 + 375 + ret_val = igc_get_speed_and_duplex_copper(hw, speed, duplex); 376 + 377 + return ret_val; 378 + } 379 + 380 + /** 381 + * igc_init_hw_base - Initialize hardware 382 + * @hw: pointer to the HW structure 383 + * 384 + * This inits the hardware readying it for operation. 385 + */ 386 + static s32 igc_init_hw_base(struct igc_hw *hw) 387 + { 388 + struct igc_mac_info *mac = &hw->mac; 389 + u16 i, rar_count = mac->rar_entry_count; 390 + s32 ret_val = 0; 391 + 392 + /* Setup the receive address */ 393 + igc_init_rx_addrs(hw, rar_count); 394 + 395 + /* Zero out the Multicast HASH table */ 396 + hw_dbg("Zeroing the MTA\n"); 397 + for (i = 0; i < mac->mta_reg_count; i++) 398 + array_wr32(IGC_MTA, i, 0); 399 + 400 + /* Zero out the Unicast HASH table */ 401 + hw_dbg("Zeroing the UTA\n"); 402 + for (i = 0; i < mac->uta_reg_count; i++) 403 + array_wr32(IGC_UTA, i, 0); 404 + 405 + /* Setup link and flow control */ 406 + ret_val = igc_setup_link(hw); 407 + 408 + /* Clear all of the statistics registers (clear on read). It is 409 + * important that we do this after we have tried to establish link 410 + * because the symbol error count will increment wildly if there 411 + * is no link. 412 + */ 413 + igc_clear_hw_cntrs_base(hw); 414 + 415 + return ret_val; 416 + } 417 + 418 + /** 419 + * igc_read_mac_addr_base - Read device MAC address 420 + * @hw: pointer to the HW structure 421 + */ 422 + static s32 igc_read_mac_addr_base(struct igc_hw *hw) 423 + { 424 + s32 ret_val = 0; 425 + 426 + ret_val = igc_read_mac_addr(hw); 427 + 428 + return ret_val; 429 + } 430 + 431 + /** 432 + * igc_power_down_phy_copper_base - Remove link during PHY power down 433 + * @hw: pointer to the HW structure 434 + * 435 + * In the case of a PHY power down to save power, or to turn off link during a 436 + * driver unload, or wake on lan is not enabled, remove the link. 437 + */ 438 + void igc_power_down_phy_copper_base(struct igc_hw *hw) 439 + { 440 + /* If the management interface is not enabled, then power down */ 441 + if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw))) 442 + igc_power_down_phy_copper(hw); 443 + } 444 + 445 + /** 446 + * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable 447 + * @hw: pointer to the HW structure 448 + * 449 + * After Rx enable, if manageability is enabled then there is likely some 450 + * bad data at the start of the fifo and possibly in the DMA fifo. This 451 + * function clears the fifos and flushes any packets that came in as rx was 452 + * being enabled. 453 + */ 454 + void igc_rx_fifo_flush_base(struct igc_hw *hw) 455 + { 456 + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 457 + int i, ms_wait; 458 + 459 + /* disable IPv6 options as per hardware errata */ 460 + rfctl = rd32(IGC_RFCTL); 461 + rfctl |= IGC_RFCTL_IPV6_EX_DIS; 462 + wr32(IGC_RFCTL, rfctl); 463 + 464 + if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN)) 465 + return; 466 + 467 + /* Disable all Rx queues */ 468 + for (i = 0; i < 4; i++) { 469 + rxdctl[i] = rd32(IGC_RXDCTL(i)); 470 + wr32(IGC_RXDCTL(i), 471 + rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE); 472 + } 473 + /* Poll all queues to verify they have shut down */ 474 + for (ms_wait = 0; ms_wait < 10; ms_wait++) { 475 + usleep_range(1000, 2000); 476 + rx_enabled = 0; 477 + for (i = 0; i < 4; i++) 478 + rx_enabled |= rd32(IGC_RXDCTL(i)); 479 + if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE)) 480 + break; 481 + } 482 + 483 + if (ms_wait == 10) 484 + pr_debug("Queue disable timed out after 10ms\n"); 485 + 486 + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 487 + * incoming packets are rejected. Set enable and wait 2ms so that 488 + * any packet that was coming in as RCTL.EN was set is flushed 489 + */ 490 + wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF); 491 + 492 + rlpml = rd32(IGC_RLPML); 493 + wr32(IGC_RLPML, 0); 494 + 495 + rctl = rd32(IGC_RCTL); 496 + temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP); 497 + temp_rctl |= IGC_RCTL_LPE; 498 + 499 + wr32(IGC_RCTL, temp_rctl); 500 + wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN); 501 + wrfl(); 502 + usleep_range(2000, 3000); 503 + 504 + /* Enable Rx queues that were previously enabled and restore our 505 + * previous state 506 + */ 507 + for (i = 0; i < 4; i++) 508 + wr32(IGC_RXDCTL(i), rxdctl[i]); 509 + wr32(IGC_RCTL, rctl); 510 + wrfl(); 511 + 512 + wr32(IGC_RLPML, rlpml); 513 + wr32(IGC_RFCTL, rfctl); 514 + 515 + /* Flush receive errors generated by workaround */ 516 + rd32(IGC_ROC); 517 + rd32(IGC_RNBC); 518 + rd32(IGC_MPC); 519 + } 520 + 521 + static struct igc_mac_operations igc_mac_ops_base = { 522 + .init_hw = igc_init_hw_base, 523 + .check_for_link = igc_check_for_link_base, 524 + .rar_set = igc_rar_set, 525 + .read_mac_addr = igc_read_mac_addr_base, 526 + .get_speed_and_duplex = igc_get_link_up_info_base, 527 + }; 528 + 529 + static const struct igc_phy_operations igc_phy_ops_base = { 530 + .acquire = igc_acquire_phy_base, 531 + .release = igc_release_phy_base, 532 + .reset = igc_phy_hw_reset, 533 + .read_reg = igc_read_phy_reg_gpy, 534 + .write_reg = igc_write_phy_reg_gpy, 535 + }; 536 + 537 + const struct igc_info igc_base_info = { 538 + .get_invariants = igc_get_invariants_base, 539 + .mac_ops = &igc_mac_ops_base, 540 + .phy_ops = &igc_phy_ops_base, 541 + };
+107
drivers/net/ethernet/intel/igc/igc_base.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #ifndef _IGC_BASE_H 5 + #define _IGC_BASE_H 6 + 7 + /* forward declaration */ 8 + void igc_rx_fifo_flush_base(struct igc_hw *hw); 9 + void igc_power_down_phy_copper_base(struct igc_hw *hw); 10 + 11 + /* Transmit Descriptor - Advanced */ 12 + union igc_adv_tx_desc { 13 + struct { 14 + __le64 buffer_addr; /* Address of descriptor's data buf */ 15 + __le32 cmd_type_len; 16 + __le32 olinfo_status; 17 + } read; 18 + struct { 19 + __le64 rsvd; /* Reserved */ 20 + __le32 nxtseq_seed; 21 + __le32 status; 22 + } wb; 23 + }; 24 + 25 + /* Adv Transmit Descriptor Config Masks */ 26 + #define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ 27 + #define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ 28 + #define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ 29 + #define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ 30 + #define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ 31 + #define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ 32 + #define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ 33 + #define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ 34 + #define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ 35 + #define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ 36 + 37 + #define IGC_RAR_ENTRIES 16 38 + 39 + struct igc_adv_data_desc { 40 + __le64 buffer_addr; /* Address of the descriptor's data buffer */ 41 + union { 42 + u32 data; 43 + struct { 44 + u32 datalen:16; /* Data buffer length */ 45 + u32 rsvd:4; 46 + u32 dtyp:4; /* Descriptor type */ 47 + u32 dcmd:8; /* Descriptor command */ 48 + } config; 49 + } lower; 50 + union { 51 + u32 data; 52 + struct { 53 + u32 status:4; /* Descriptor status */ 54 + u32 idx:4; 55 + u32 popts:6; /* Packet Options */ 56 + u32 paylen:18; /* Payload length */ 57 + } options; 58 + } upper; 59 + }; 60 + 61 + /* Receive Descriptor - Advanced */ 62 + union igc_adv_rx_desc { 63 + struct { 64 + __le64 pkt_addr; /* Packet buffer address */ 65 + __le64 hdr_addr; /* Header buffer address */ 66 + } read; 67 + struct { 68 + struct { 69 + union { 70 + __le32 data; 71 + struct { 72 + __le16 pkt_info; /*RSS type, Pkt type*/ 73 + /* Split Header, header buffer len */ 74 + __le16 hdr_info; 75 + } hs_rss; 76 + } lo_dword; 77 + union { 78 + __le32 rss; /* RSS Hash */ 79 + struct { 80 + __le16 ip_id; /* IP id */ 81 + __le16 csum; /* Packet Checksum */ 82 + } csum_ip; 83 + } hi_dword; 84 + } lower; 85 + struct { 86 + __le32 status_error; /* ext status/error */ 87 + __le16 length; /* Packet length */ 88 + __le16 vlan; /* VLAN tag */ 89 + } upper; 90 + } wb; /* writeback */ 91 + }; 92 + 93 + /* Adv Transmit Descriptor Config Masks */ 94 + #define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ 95 + 96 + /* Additional Transmit Descriptor Control definitions */ 97 + #define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ 98 + 99 + /* Additional Receive Descriptor Control definitions */ 100 + #define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ 101 + 102 + /* SRRCTL bit definitions */ 103 + #define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ 104 + #define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ 105 + #define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 106 + 107 + #endif /* _IGC_BASE_H */
+389
drivers/net/ethernet/intel/igc/igc_defines.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #ifndef _IGC_DEFINES_H_ 5 + #define _IGC_DEFINES_H_ 6 + 7 + #define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ 8 + 9 + /* PCI Bus Info */ 10 + #define PCIE_DEVICE_CONTROL2 0x28 11 + #define PCIE_DEVICE_CONTROL2_16ms 0x0005 12 + 13 + /* Physical Func Reset Done Indication */ 14 + #define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000 15 + 16 + /* Loop limit on how long we wait for auto-negotiation to complete */ 17 + #define COPPER_LINK_UP_LIMIT 10 18 + #define PHY_AUTO_NEG_LIMIT 45 19 + #define PHY_FORCE_LIMIT 20 20 + 21 + /* Number of 100 microseconds we wait for PCI Express master disable */ 22 + #define MASTER_DISABLE_TIMEOUT 800 23 + /*Blocks new Master requests */ 24 + #define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004 25 + /* Status of Master requests. */ 26 + #define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000 27 + 28 + /* PCI Express Control */ 29 + #define IGC_GCR_CMPL_TMOUT_MASK 0x0000F000 30 + #define IGC_GCR_CMPL_TMOUT_10ms 0x00001000 31 + #define IGC_GCR_CMPL_TMOUT_RESEND 0x00010000 32 + #define IGC_GCR_CAP_VER2 0x00040000 33 + 34 + /* Receive Address 35 + * Number of high/low register pairs in the RAR. The RAR (Receive Address 36 + * Registers) holds the directed and multicast addresses that we monitor. 37 + * Technically, we have 16 spots. However, we reserve one of these spots 38 + * (RAR[15]) for our directed address used by controllers with 39 + * manageability enabled, allowing us room for 15 multicast addresses. 40 + */ 41 + #define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */ 42 + #define IGC_RAH_POOL_1 0x00040000 43 + #define IGC_RAL_MAC_ADDR_LEN 4 44 + #define IGC_RAH_MAC_ADDR_LEN 2 45 + 46 + /* Error Codes */ 47 + #define IGC_SUCCESS 0 48 + #define IGC_ERR_NVM 1 49 + #define IGC_ERR_PHY 2 50 + #define IGC_ERR_CONFIG 3 51 + #define IGC_ERR_PARAM 4 52 + #define IGC_ERR_MAC_INIT 5 53 + #define IGC_ERR_RESET 9 54 + #define IGC_ERR_MASTER_REQUESTS_PENDING 10 55 + #define IGC_ERR_BLK_PHY_RESET 12 56 + #define IGC_ERR_SWFW_SYNC 13 57 + 58 + /* Device Control */ 59 + #define IGC_CTRL_RST 0x04000000 /* Global reset */ 60 + 61 + #define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ 62 + #define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ 63 + #define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */ 64 + #define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 65 + 66 + #define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ 67 + #define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ 68 + 69 + #define IGC_CONNSW_AUTOSENSE_EN 0x1 70 + 71 + /* PBA constants */ 72 + #define IGC_PBA_34K 0x0022 73 + 74 + /* SW Semaphore Register */ 75 + #define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 76 + #define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 77 + 78 + /* SWFW_SYNC Definitions */ 79 + #define IGC_SWFW_EEP_SM 0x1 80 + #define IGC_SWFW_PHY0_SM 0x2 81 + 82 + /* Autoneg Advertisement Register */ 83 + #define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ 84 + #define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ 85 + #define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ 86 + #define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ 87 + #define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ 88 + #define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ 89 + 90 + /* Link Partner Ability Register (Base Page) */ 91 + #define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ 92 + #define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ 93 + 94 + /* 1000BASE-T Control Register */ 95 + #define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ 96 + #define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ 97 + #define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ 98 + 99 + /* 1000BASE-T Status Register */ 100 + #define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ 101 + #define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ 102 + 103 + /* PHY GPY 211 registers */ 104 + #define STANDARD_AN_REG_MASK 0x0007 /* MMD */ 105 + #define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */ 106 + #define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */ 107 + #define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */ 108 + 109 + /* NVM Control */ 110 + /* Number of milliseconds for NVM auto read done after MAC reset. */ 111 + #define AUTO_READ_DONE_TIMEOUT 10 112 + #define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ 113 + #define IGC_EECD_REQ 0x00000040 /* NVM Access Request */ 114 + #define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */ 115 + /* NVM Addressing bits based on type 0=small, 1=large */ 116 + #define IGC_EECD_ADDR_BITS 0x00000400 117 + #define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ 118 + #define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ 119 + #define IGC_EECD_SIZE_EX_SHIFT 11 120 + #define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */ 121 + #define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done*/ 122 + #define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */ 123 + #define IGC_FLUDONE_ATTEMPTS 20000 124 + #define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ 125 + 126 + /* Offset to data in NVM read/write registers */ 127 + #define IGC_NVM_RW_REG_DATA 16 128 + #define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ 129 + #define IGC_NVM_RW_REG_START 1 /* Start operation */ 130 + #define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ 131 + #define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */ 132 + 133 + /* NVM Word Offsets */ 134 + #define NVM_CHECKSUM_REG 0x003F 135 + 136 + /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ 137 + #define NVM_SUM 0xBABA 138 + 139 + #define NVM_PBA_OFFSET_0 8 140 + #define NVM_PBA_OFFSET_1 9 141 + #define NVM_RESERVED_WORD 0xFFFF 142 + #define NVM_PBA_PTR_GUARD 0xFAFA 143 + #define NVM_WORD_SIZE_BASE_SHIFT 6 144 + 145 + /* Collision related configuration parameters */ 146 + #define IGC_COLLISION_THRESHOLD 15 147 + #define IGC_CT_SHIFT 4 148 + #define IGC_COLLISION_DISTANCE 63 149 + #define IGC_COLD_SHIFT 12 150 + 151 + /* Device Status */ 152 + #define IGC_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ 153 + #define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ 154 + #define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ 155 + #define IGC_STATUS_FUNC_SHIFT 2 156 + #define IGC_STATUS_FUNC_1 0x00000004 /* Function 1 */ 157 + #define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */ 158 + #define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ 159 + #define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ 160 + #define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s */ 161 + 162 + #define SPEED_10 10 163 + #define SPEED_100 100 164 + #define SPEED_1000 1000 165 + #define SPEED_2500 2500 166 + #define HALF_DUPLEX 1 167 + #define FULL_DUPLEX 2 168 + 169 + /* 1Gbps and 2.5Gbps half duplex is not supported, nor spec-compliant. */ 170 + #define ADVERTISE_10_HALF 0x0001 171 + #define ADVERTISE_10_FULL 0x0002 172 + #define ADVERTISE_100_HALF 0x0004 173 + #define ADVERTISE_100_FULL 0x0008 174 + #define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ 175 + #define ADVERTISE_1000_FULL 0x0020 176 + #define ADVERTISE_2500_HALF 0x0040 /* Not used, just FYI */ 177 + #define ADVERTISE_2500_FULL 0x0080 178 + 179 + #define IGC_ALL_SPEED_DUPLEX_2500 ( \ 180 + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ 181 + ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL) 182 + 183 + #define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500 184 + 185 + /* Interrupt Cause Read */ 186 + #define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */ 187 + #define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */ 188 + #define IGC_ICR_LSC BIT(2) /* Link Status Change */ 189 + #define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */ 190 + #define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */ 191 + #define IGC_ICR_RXO BIT(6) /* Rx overrun */ 192 + #define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */ 193 + #define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */ 194 + 195 + /* If this bit asserted, the driver should claim the interrupt */ 196 + #define IGC_ICR_INT_ASSERTED BIT(31) 197 + 198 + #define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ 199 + 200 + #define IMS_ENABLE_MASK ( \ 201 + IGC_IMS_RXT0 | \ 202 + IGC_IMS_TXDW | \ 203 + IGC_IMS_RXDMT0 | \ 204 + IGC_IMS_RXSEQ | \ 205 + IGC_IMS_LSC) 206 + 207 + /* Interrupt Mask Set */ 208 + #define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */ 209 + #define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ 210 + #define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */ 211 + #define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */ 212 + #define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */ 213 + #define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ 214 + #define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ 215 + 216 + #define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */ 217 + #define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */ 218 + 219 + /* Interrupt Cause Set */ 220 + #define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */ 221 + #define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */ 222 + #define IGC_ICS_DRSTA IGC_ICR_DRSTA /* Device Reset Aserted */ 223 + 224 + #define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ 225 + #define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ 226 + #define IGC_IVAR_VALID 0x80 227 + #define IGC_GPIE_NSICR 0x00000001 228 + #define IGC_GPIE_MSIX_MODE 0x00000010 229 + #define IGC_GPIE_EIAME 0x40000000 230 + #define IGC_GPIE_PBA 0x80000000 231 + 232 + /* Transmit Descriptor bit definitions */ 233 + #define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */ 234 + #define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */ 235 + #define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ 236 + #define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ 237 + #define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */ 238 + #define IGC_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ 239 + #define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */ 240 + #define IGC_TXD_CMD_RS 0x08000000 /* Report Status */ 241 + #define IGC_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ 242 + #define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ 243 + #define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ 244 + #define IGC_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ 245 + #define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */ 246 + #define IGC_TXD_STAT_EC 0x00000002 /* Excess Collisions */ 247 + #define IGC_TXD_STAT_LC 0x00000004 /* Late Collisions */ 248 + #define IGC_TXD_STAT_TU 0x00000008 /* Transmit underrun */ 249 + #define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */ 250 + #define IGC_TXD_CMD_IP 0x02000000 /* IP packet */ 251 + #define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ 252 + #define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */ 253 + #define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ 254 + 255 + /* Transmit Control */ 256 + #define IGC_TCTL_EN 0x00000002 /* enable Tx */ 257 + #define IGC_TCTL_PSP 0x00000008 /* pad short packets */ 258 + #define IGC_TCTL_CT 0x00000ff0 /* collision threshold */ 259 + #define IGC_TCTL_COLD 0x003ff000 /* collision distance */ 260 + #define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ 261 + #define IGC_TCTL_MULR 0x10000000 /* Multiple request support */ 262 + 263 + #define IGC_CT_SHIFT 4 264 + #define IGC_COLLISION_THRESHOLD 15 265 + 266 + /* Flow Control Constants */ 267 + #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 268 + #define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 269 + #define FLOW_CONTROL_TYPE 0x8808 270 + /* Enable XON frame transmission */ 271 + #define IGC_FCRTL_XONE 0x80000000 272 + 273 + /* Management Control */ 274 + #define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 275 + #define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ 276 + 277 + /* Receive Control */ 278 + #define IGC_RCTL_RST 0x00000001 /* Software reset */ 279 + #define IGC_RCTL_EN 0x00000002 /* enable */ 280 + #define IGC_RCTL_SBP 0x00000004 /* store bad packet */ 281 + #define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */ 282 + #define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */ 283 + #define IGC_RCTL_LPE 0x00000020 /* long packet enable */ 284 + #define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ 285 + #define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ 286 + 287 + #define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ 288 + #define IGC_RCTL_BAM 0x00008000 /* broadcast enable */ 289 + 290 + /* Receive Descriptor bit definitions */ 291 + #define IGC_RXD_STAT_EOP 0x02 /* End of Packet */ 292 + 293 + #define IGC_RXDEXT_STATERR_CE 0x01000000 294 + #define IGC_RXDEXT_STATERR_SE 0x02000000 295 + #define IGC_RXDEXT_STATERR_SEQ 0x04000000 296 + #define IGC_RXDEXT_STATERR_CXE 0x10000000 297 + #define IGC_RXDEXT_STATERR_TCPE 0x20000000 298 + #define IGC_RXDEXT_STATERR_IPE 0x40000000 299 + #define IGC_RXDEXT_STATERR_RXE 0x80000000 300 + 301 + /* Same mask, but for extended and packet split descriptors */ 302 + #define IGC_RXDEXT_ERR_FRAME_ERR_MASK ( \ 303 + IGC_RXDEXT_STATERR_CE | \ 304 + IGC_RXDEXT_STATERR_SE | \ 305 + IGC_RXDEXT_STATERR_SEQ | \ 306 + IGC_RXDEXT_STATERR_CXE | \ 307 + IGC_RXDEXT_STATERR_RXE) 308 + 309 + /* Header split receive */ 310 + #define IGC_RFCTL_IPV6_EX_DIS 0x00010000 311 + #define IGC_RFCTL_LEF 0x00040000 312 + 313 + #define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ 314 + 315 + #define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */ 316 + #define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */ 317 + #define IGC_RCTL_DPF 0x00400000 /* discard pause frames */ 318 + #define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */ 319 + #define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 320 + 321 + #define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ 322 + #define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ 323 + 324 + /* GPY211 - I225 defines */ 325 + #define GPY_MMD_MASK 0xFFFF0000 326 + #define GPY_MMD_SHIFT 16 327 + #define GPY_REG_MASK 0x0000FFFF 328 + 329 + #define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ 330 + 331 + /* MAC definitions */ 332 + #define IGC_FACTPS_MNGCG 0x20000000 333 + #define IGC_FWSM_MODE_MASK 0xE 334 + #define IGC_FWSM_MODE_SHIFT 1 335 + 336 + /* Management Control */ 337 + #define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ 338 + #define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ 339 + 340 + /* PHY */ 341 + #define PHY_REVISION_MASK 0xFFFFFFF0 342 + #define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ 343 + #define IGC_GEN_POLL_TIMEOUT 1920 344 + 345 + /* PHY Control Register */ 346 + #define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ 347 + #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ 348 + #define MII_CR_POWER_DOWN 0x0800 /* Power down */ 349 + #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ 350 + #define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ 351 + #define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ 352 + #define MII_CR_SPEED_1000 0x0040 353 + #define MII_CR_SPEED_100 0x2000 354 + #define MII_CR_SPEED_10 0x0000 355 + 356 + /* PHY Status Register */ 357 + #define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ 358 + #define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ 359 + 360 + /* PHY 1000 MII Register/Bit Definitions */ 361 + /* PHY Registers defined by IEEE */ 362 + #define PHY_CONTROL 0x00 /* Control Register */ 363 + #define PHY_STATUS 0x01 /* Status Register */ 364 + #define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ 365 + #define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ 366 + #define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ 367 + #define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ 368 + #define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ 369 + #define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ 370 + 371 + /* Bit definitions for valid PHY IDs. I = Integrated E = External */ 372 + #define I225_I_PHY_ID 0x67C9DC00 373 + 374 + /* MDI Control */ 375 + #define IGC_MDIC_DATA_MASK 0x0000FFFF 376 + #define IGC_MDIC_REG_MASK 0x001F0000 377 + #define IGC_MDIC_REG_SHIFT 16 378 + #define IGC_MDIC_PHY_MASK 0x03E00000 379 + #define IGC_MDIC_PHY_SHIFT 21 380 + #define IGC_MDIC_OP_WRITE 0x04000000 381 + #define IGC_MDIC_OP_READ 0x08000000 382 + #define IGC_MDIC_READY 0x10000000 383 + #define IGC_MDIC_INT_EN 0x20000000 384 + #define IGC_MDIC_ERROR 0x40000000 385 + #define IGC_MDIC_DEST 0x80000000 386 + 387 + #define IGC_N0_QUEUE -1 388 + 389 + #endif /* _IGC_DEFINES_H_ */
+321
drivers/net/ethernet/intel/igc/igc_hw.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #ifndef _IGC_HW_H_ 5 + #define _IGC_HW_H_ 6 + 7 + #include <linux/types.h> 8 + #include <linux/if_ether.h> 9 + #include <linux/netdevice.h> 10 + 11 + #include "igc_regs.h" 12 + #include "igc_defines.h" 13 + #include "igc_mac.h" 14 + #include "igc_phy.h" 15 + #include "igc_nvm.h" 16 + #include "igc_i225.h" 17 + #include "igc_base.h" 18 + 19 + #define IGC_DEV_ID_I225_LM 0x15F2 20 + #define IGC_DEV_ID_I225_V 0x15F3 21 + 22 + #define IGC_FUNC_0 0 23 + 24 + /* Function pointers for the MAC. */ 25 + struct igc_mac_operations { 26 + s32 (*check_for_link)(struct igc_hw *hw); 27 + s32 (*reset_hw)(struct igc_hw *hw); 28 + s32 (*init_hw)(struct igc_hw *hw); 29 + s32 (*setup_physical_interface)(struct igc_hw *hw); 30 + void (*rar_set)(struct igc_hw *hw, u8 *address, u32 index); 31 + s32 (*read_mac_addr)(struct igc_hw *hw); 32 + s32 (*get_speed_and_duplex)(struct igc_hw *hw, u16 *speed, 33 + u16 *duplex); 34 + s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask); 35 + void (*release_swfw_sync)(struct igc_hw *hw, u16 mask); 36 + }; 37 + 38 + enum igc_mac_type { 39 + igc_undefined = 0, 40 + igc_i225, 41 + igc_num_macs /* List is 1-based, so subtract 1 for true count. */ 42 + }; 43 + 44 + enum igc_phy_type { 45 + igc_phy_unknown = 0, 46 + igc_phy_none, 47 + igc_phy_i225, 48 + }; 49 + 50 + enum igc_media_type { 51 + igc_media_type_unknown = 0, 52 + igc_media_type_copper = 1, 53 + igc_num_media_types 54 + }; 55 + 56 + enum igc_nvm_type { 57 + igc_nvm_unknown = 0, 58 + igc_nvm_flash_hw, 59 + igc_nvm_invm, 60 + }; 61 + 62 + struct igc_info { 63 + s32 (*get_invariants)(struct igc_hw *hw); 64 + struct igc_mac_operations *mac_ops; 65 + const struct igc_phy_operations *phy_ops; 66 + struct igc_nvm_operations *nvm_ops; 67 + }; 68 + 69 + extern const struct igc_info igc_base_info; 70 + 71 + struct igc_mac_info { 72 + struct igc_mac_operations ops; 73 + 74 + u8 addr[ETH_ALEN]; 75 + u8 perm_addr[ETH_ALEN]; 76 + 77 + enum igc_mac_type type; 78 + 79 + u32 collision_delta; 80 + u32 ledctl_default; 81 + u32 ledctl_mode1; 82 + u32 ledctl_mode2; 83 + u32 mc_filter_type; 84 + u32 tx_packet_delta; 85 + u32 txcw; 86 + 87 + u16 mta_reg_count; 88 + u16 uta_reg_count; 89 + 90 + u16 rar_entry_count; 91 + 92 + u8 forced_speed_duplex; 93 + 94 + bool adaptive_ifs; 95 + bool has_fwsm; 96 + bool asf_firmware_present; 97 + bool arc_subsystem_valid; 98 + 99 + bool autoneg; 100 + bool autoneg_failed; 101 + bool get_link_status; 102 + }; 103 + 104 + struct igc_nvm_operations { 105 + s32 (*acquire)(struct igc_hw *hw); 106 + s32 (*read)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); 107 + void (*release)(struct igc_hw *hw); 108 + s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); 109 + s32 (*update)(struct igc_hw *hw); 110 + s32 (*validate)(struct igc_hw *hw); 111 + s32 (*valid_led_default)(struct igc_hw *hw, u16 *data); 112 + }; 113 + 114 + struct igc_phy_operations { 115 + s32 (*acquire)(struct igc_hw *hw); 116 + s32 (*check_polarity)(struct igc_hw *hw); 117 + s32 (*check_reset_block)(struct igc_hw *hw); 118 + s32 (*force_speed_duplex)(struct igc_hw *hw); 119 + s32 (*get_cfg_done)(struct igc_hw *hw); 120 + s32 (*get_cable_length)(struct igc_hw *hw); 121 + s32 (*get_phy_info)(struct igc_hw *hw); 122 + s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data); 123 + void (*release)(struct igc_hw *hw); 124 + s32 (*reset)(struct igc_hw *hw); 125 + s32 (*write_reg)(struct igc_hw *hw, u32 address, u16 data); 126 + }; 127 + 128 + struct igc_nvm_info { 129 + struct igc_nvm_operations ops; 130 + enum igc_nvm_type type; 131 + 132 + u32 flash_bank_size; 133 + u32 flash_base_addr; 134 + 135 + u16 word_size; 136 + u16 delay_usec; 137 + u16 address_bits; 138 + u16 opcode_bits; 139 + u16 page_size; 140 + }; 141 + 142 + struct igc_phy_info { 143 + struct igc_phy_operations ops; 144 + 145 + enum igc_phy_type type; 146 + 147 + u32 addr; 148 + u32 id; 149 + u32 reset_delay_us; /* in usec */ 150 + u32 revision; 151 + 152 + enum igc_media_type media_type; 153 + 154 + u16 autoneg_advertised; 155 + u16 autoneg_mask; 156 + u16 cable_length; 157 + u16 max_cable_length; 158 + u16 min_cable_length; 159 + u16 pair_length[4]; 160 + 161 + u8 mdix; 162 + 163 + bool disable_polarity_correction; 164 + bool is_mdix; 165 + bool polarity_correction; 166 + bool reset_disable; 167 + bool speed_downgraded; 168 + bool autoneg_wait_to_complete; 169 + }; 170 + 171 + struct igc_bus_info { 172 + u16 func; 173 + u16 pci_cmd_word; 174 + }; 175 + 176 + enum igc_fc_mode { 177 + igc_fc_none = 0, 178 + igc_fc_rx_pause, 179 + igc_fc_tx_pause, 180 + igc_fc_full, 181 + igc_fc_default = 0xFF 182 + }; 183 + 184 + struct igc_fc_info { 185 + u32 high_water; /* Flow control high-water mark */ 186 + u32 low_water; /* Flow control low-water mark */ 187 + u16 pause_time; /* Flow control pause timer */ 188 + bool send_xon; /* Flow control send XON */ 189 + bool strict_ieee; /* Strict IEEE mode */ 190 + enum igc_fc_mode current_mode; /* Type of flow control */ 191 + enum igc_fc_mode requested_mode; 192 + }; 193 + 194 + struct igc_dev_spec_base { 195 + bool global_device_reset; 196 + bool eee_disable; 197 + bool clear_semaphore_once; 198 + bool module_plugged; 199 + u8 media_port; 200 + bool mas_capable; 201 + }; 202 + 203 + struct igc_hw { 204 + void *back; 205 + 206 + u8 __iomem *hw_addr; 207 + unsigned long io_base; 208 + 209 + struct igc_mac_info mac; 210 + struct igc_fc_info fc; 211 + struct igc_nvm_info nvm; 212 + struct igc_phy_info phy; 213 + 214 + struct igc_bus_info bus; 215 + 216 + union { 217 + struct igc_dev_spec_base _base; 218 + } dev_spec; 219 + 220 + u16 device_id; 221 + u16 subsystem_vendor_id; 222 + u16 subsystem_device_id; 223 + u16 vendor_id; 224 + 225 + u8 revision_id; 226 + }; 227 + 228 + /* Statistics counters collected by the MAC */ 229 + struct igc_hw_stats { 230 + u64 crcerrs; 231 + u64 algnerrc; 232 + u64 symerrs; 233 + u64 rxerrc; 234 + u64 mpc; 235 + u64 scc; 236 + u64 ecol; 237 + u64 mcc; 238 + u64 latecol; 239 + u64 colc; 240 + u64 dc; 241 + u64 tncrs; 242 + u64 sec; 243 + u64 cexterr; 244 + u64 rlec; 245 + u64 xonrxc; 246 + u64 xontxc; 247 + u64 xoffrxc; 248 + u64 xofftxc; 249 + u64 fcruc; 250 + u64 prc64; 251 + u64 prc127; 252 + u64 prc255; 253 + u64 prc511; 254 + u64 prc1023; 255 + u64 prc1522; 256 + u64 gprc; 257 + u64 bprc; 258 + u64 mprc; 259 + u64 gptc; 260 + u64 gorc; 261 + u64 gotc; 262 + u64 rnbc; 263 + u64 ruc; 264 + u64 rfc; 265 + u64 roc; 266 + u64 rjc; 267 + u64 mgprc; 268 + u64 mgpdc; 269 + u64 mgptc; 270 + u64 tor; 271 + u64 tot; 272 + u64 tpr; 273 + u64 tpt; 274 + u64 ptc64; 275 + u64 ptc127; 276 + u64 ptc255; 277 + u64 ptc511; 278 + u64 ptc1023; 279 + u64 ptc1522; 280 + u64 mptc; 281 + u64 bptc; 282 + u64 tsctc; 283 + u64 tsctfc; 284 + u64 iac; 285 + u64 icrxptc; 286 + u64 icrxatc; 287 + u64 ictxptc; 288 + u64 ictxatc; 289 + u64 ictxqec; 290 + u64 ictxqmtc; 291 + u64 icrxdmtc; 292 + u64 icrxoc; 293 + u64 cbtmpc; 294 + u64 htdpmc; 295 + u64 cbrdpc; 296 + u64 cbrmpc; 297 + u64 rpthc; 298 + u64 hgptc; 299 + u64 htcbdpc; 300 + u64 hgorc; 301 + u64 hgotc; 302 + u64 lenerrs; 303 + u64 scvpc; 304 + u64 hrmpc; 305 + u64 doosync; 306 + u64 o2bgptc; 307 + u64 o2bspc; 308 + u64 b2ospc; 309 + u64 b2ogprc; 310 + }; 311 + 312 + struct net_device *igc_get_hw_dev(struct igc_hw *hw); 313 + #define hw_dbg(format, arg...) \ 314 + netdev_dbg(igc_get_hw_dev(hw), format, ##arg) 315 + 316 + s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); 317 + s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); 318 + void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); 319 + void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); 320 + 321 + #endif /* _IGC_HW_H_ */
+490
drivers/net/ethernet/intel/igc/igc_i225.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #include <linux/delay.h> 5 + 6 + #include "igc_hw.h" 7 + 8 + /** 9 + * igc_get_hw_semaphore_i225 - Acquire hardware semaphore 10 + * @hw: pointer to the HW structure 11 + * 12 + * Acquire the necessary semaphores for exclusive access to the EEPROM. 13 + * Set the EEPROM access request bit and wait for EEPROM access grant bit. 14 + * Return successful if access grant bit set, else clear the request for 15 + * EEPROM access and return -IGC_ERR_NVM (-1). 16 + */ 17 + static s32 igc_acquire_nvm_i225(struct igc_hw *hw) 18 + { 19 + return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); 20 + } 21 + 22 + /** 23 + * igc_release_nvm_i225 - Release exclusive access to EEPROM 24 + * @hw: pointer to the HW structure 25 + * 26 + * Stop any current commands to the EEPROM and clear the EEPROM request bit, 27 + * then release the semaphores acquired. 28 + */ 29 + static void igc_release_nvm_i225(struct igc_hw *hw) 30 + { 31 + igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); 32 + } 33 + 34 + /** 35 + * igc_get_hw_semaphore_i225 - Acquire hardware semaphore 36 + * @hw: pointer to the HW structure 37 + * 38 + * Acquire the HW semaphore to access the PHY or NVM 39 + */ 40 + static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw) 41 + { 42 + s32 timeout = hw->nvm.word_size + 1; 43 + s32 i = 0; 44 + u32 swsm; 45 + 46 + /* Get the SW semaphore */ 47 + while (i < timeout) { 48 + swsm = rd32(IGC_SWSM); 49 + if (!(swsm & IGC_SWSM_SMBI)) 50 + break; 51 + 52 + usleep_range(500, 600); 53 + i++; 54 + } 55 + 56 + if (i == timeout) { 57 + /* In rare circumstances, the SW semaphore may already be held 58 + * unintentionally. Clear the semaphore once before giving up. 59 + */ 60 + if (hw->dev_spec._base.clear_semaphore_once) { 61 + hw->dev_spec._base.clear_semaphore_once = false; 62 + igc_put_hw_semaphore(hw); 63 + for (i = 0; i < timeout; i++) { 64 + swsm = rd32(IGC_SWSM); 65 + if (!(swsm & IGC_SWSM_SMBI)) 66 + break; 67 + 68 + usleep_range(500, 600); 69 + } 70 + } 71 + 72 + /* If we do not have the semaphore here, we have to give up. */ 73 + if (i == timeout) { 74 + hw_dbg("Driver can't access device - SMBI bit is set.\n"); 75 + return -IGC_ERR_NVM; 76 + } 77 + } 78 + 79 + /* Get the FW semaphore. */ 80 + for (i = 0; i < timeout; i++) { 81 + swsm = rd32(IGC_SWSM); 82 + wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI); 83 + 84 + /* Semaphore acquired if bit latched */ 85 + if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI) 86 + break; 87 + 88 + usleep_range(500, 600); 89 + } 90 + 91 + if (i == timeout) { 92 + /* Release semaphores */ 93 + igc_put_hw_semaphore(hw); 94 + hw_dbg("Driver can't access the NVM\n"); 95 + return -IGC_ERR_NVM; 96 + } 97 + 98 + return 0; 99 + } 100 + 101 + /** 102 + * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore 103 + * @hw: pointer to the HW structure 104 + * @mask: specifies which semaphore to acquire 105 + * 106 + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 107 + * will also specify which port we're acquiring the lock for. 108 + */ 109 + s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask) 110 + { 111 + s32 i = 0, timeout = 200; 112 + u32 fwmask = mask << 16; 113 + u32 swmask = mask; 114 + s32 ret_val = 0; 115 + u32 swfw_sync; 116 + 117 + while (i < timeout) { 118 + if (igc_get_hw_semaphore_i225(hw)) { 119 + ret_val = -IGC_ERR_SWFW_SYNC; 120 + goto out; 121 + } 122 + 123 + swfw_sync = rd32(IGC_SW_FW_SYNC); 124 + if (!(swfw_sync & (fwmask | swmask))) 125 + break; 126 + 127 + /* Firmware currently using resource (fwmask) */ 128 + igc_put_hw_semaphore(hw); 129 + mdelay(5); 130 + i++; 131 + } 132 + 133 + if (i == timeout) { 134 + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); 135 + ret_val = -IGC_ERR_SWFW_SYNC; 136 + goto out; 137 + } 138 + 139 + swfw_sync |= swmask; 140 + wr32(IGC_SW_FW_SYNC, swfw_sync); 141 + 142 + igc_put_hw_semaphore(hw); 143 + out: 144 + return ret_val; 145 + } 146 + 147 + /** 148 + * igc_release_swfw_sync_i225 - Release SW/FW semaphore 149 + * @hw: pointer to the HW structure 150 + * @mask: specifies which semaphore to acquire 151 + * 152 + * Release the SW/FW semaphore used to access the PHY or NVM. The mask 153 + * will also specify which port we're releasing the lock for. 154 + */ 155 + void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) 156 + { 157 + u32 swfw_sync; 158 + 159 + while (igc_get_hw_semaphore_i225(hw)) 160 + ; /* Empty */ 161 + 162 + swfw_sync = rd32(IGC_SW_FW_SYNC); 163 + swfw_sync &= ~mask; 164 + wr32(IGC_SW_FW_SYNC, swfw_sync); 165 + 166 + igc_put_hw_semaphore(hw); 167 + } 168 + 169 + /** 170 + * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register 171 + * @hw: pointer to the HW structure 172 + * @offset: offset of word in the Shadow Ram to read 173 + * @words: number of words to read 174 + * @data: word read from the Shadow Ram 175 + * 176 + * Reads a 16 bit word from the Shadow Ram using the EERD register. 177 + * Uses necessary synchronization semaphores. 178 + */ 179 + static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words, 180 + u16 *data) 181 + { 182 + s32 status = 0; 183 + u16 i, count; 184 + 185 + /* We cannot hold synchronization semaphores for too long, 186 + * because of forceful takeover procedure. However it is more efficient 187 + * to read in bursts than synchronizing access for each word. 188 + */ 189 + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { 190 + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? 191 + IGC_EERD_EEWR_MAX_COUNT : (words - i); 192 + 193 + status = hw->nvm.ops.acquire(hw); 194 + if (status) 195 + break; 196 + 197 + status = igc_read_nvm_eerd(hw, offset, count, data + i); 198 + hw->nvm.ops.release(hw); 199 + if (status) 200 + break; 201 + } 202 + 203 + return status; 204 + } 205 + 206 + /** 207 + * igc_write_nvm_srwr - Write to Shadow Ram using EEWR 208 + * @hw: pointer to the HW structure 209 + * @offset: offset within the Shadow Ram to be written to 210 + * @words: number of words to write 211 + * @data: 16 bit word(s) to be written to the Shadow Ram 212 + * 213 + * Writes data to Shadow Ram at offset using EEWR register. 214 + * 215 + * If igc_update_nvm_checksum is not called after this function , the 216 + * Shadow Ram will most likely contain an invalid checksum. 217 + */ 218 + static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, 219 + u16 *data) 220 + { 221 + struct igc_nvm_info *nvm = &hw->nvm; 222 + u32 attempts = 100000; 223 + u32 i, k, eewr = 0; 224 + s32 ret_val = 0; 225 + 226 + /* A check for invalid values: offset too large, too many words, 227 + * too many words for the offset, and not enough words. 228 + */ 229 + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || 230 + words == 0) { 231 + hw_dbg("nvm parameter(s) out of bounds\n"); 232 + ret_val = -IGC_ERR_NVM; 233 + goto out; 234 + } 235 + 236 + for (i = 0; i < words; i++) { 237 + eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | 238 + (data[i] << IGC_NVM_RW_REG_DATA) | 239 + IGC_NVM_RW_REG_START; 240 + 241 + wr32(IGC_SRWR, eewr); 242 + 243 + for (k = 0; k < attempts; k++) { 244 + if (IGC_NVM_RW_REG_DONE & 245 + rd32(IGC_SRWR)) { 246 + ret_val = 0; 247 + break; 248 + } 249 + udelay(5); 250 + } 251 + 252 + if (ret_val) { 253 + hw_dbg("Shadow RAM write EEWR timed out\n"); 254 + break; 255 + } 256 + } 257 + 258 + out: 259 + return ret_val; 260 + } 261 + 262 + /** 263 + * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR 264 + * @hw: pointer to the HW structure 265 + * @offset: offset within the Shadow RAM to be written to 266 + * @words: number of words to write 267 + * @data: 16 bit word(s) to be written to the Shadow RAM 268 + * 269 + * Writes data to Shadow RAM at offset using EEWR register. 270 + * 271 + * If igc_update_nvm_checksum is not called after this function , the 272 + * data will not be committed to FLASH and also Shadow RAM will most likely 273 + * contain an invalid checksum. 274 + * 275 + * If error code is returned, data and Shadow RAM may be inconsistent - buffer 276 + * partially written. 277 + */ 278 + static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words, 279 + u16 *data) 280 + { 281 + s32 status = 0; 282 + u16 i, count; 283 + 284 + /* We cannot hold synchronization semaphores for too long, 285 + * because of forceful takeover procedure. However it is more efficient 286 + * to write in bursts than synchronizing access for each word. 287 + */ 288 + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { 289 + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? 290 + IGC_EERD_EEWR_MAX_COUNT : (words - i); 291 + 292 + status = hw->nvm.ops.acquire(hw); 293 + if (status) 294 + break; 295 + 296 + status = igc_write_nvm_srwr(hw, offset, count, data + i); 297 + hw->nvm.ops.release(hw); 298 + if (status) 299 + break; 300 + } 301 + 302 + return status; 303 + } 304 + 305 + /** 306 + * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum 307 + * @hw: pointer to the HW structure 308 + * 309 + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 310 + * and then verifies that the sum of the EEPROM is equal to 0xBABA. 311 + */ 312 + static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw) 313 + { 314 + s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count, 315 + u16 *data); 316 + s32 status = 0; 317 + 318 + status = hw->nvm.ops.acquire(hw); 319 + if (status) 320 + goto out; 321 + 322 + /* Replace the read function with semaphore grabbing with 323 + * the one that skips this for a while. 324 + * We have semaphore taken already here. 325 + */ 326 + read_op_ptr = hw->nvm.ops.read; 327 + hw->nvm.ops.read = igc_read_nvm_eerd; 328 + 329 + status = igc_validate_nvm_checksum(hw); 330 + 331 + /* Revert original read operation. */ 332 + hw->nvm.ops.read = read_op_ptr; 333 + 334 + hw->nvm.ops.release(hw); 335 + 336 + out: 337 + return status; 338 + } 339 + 340 + /** 341 + * igc_pool_flash_update_done_i225 - Pool FLUDONE status 342 + * @hw: pointer to the HW structure 343 + */ 344 + static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw) 345 + { 346 + s32 ret_val = -IGC_ERR_NVM; 347 + u32 i, reg; 348 + 349 + for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) { 350 + reg = rd32(IGC_EECD); 351 + if (reg & IGC_EECD_FLUDONE_I225) { 352 + ret_val = 0; 353 + break; 354 + } 355 + udelay(5); 356 + } 357 + 358 + return ret_val; 359 + } 360 + 361 + /** 362 + * igc_update_flash_i225 - Commit EEPROM to the flash 363 + * @hw: pointer to the HW structure 364 + */ 365 + static s32 igc_update_flash_i225(struct igc_hw *hw) 366 + { 367 + s32 ret_val = 0; 368 + u32 flup; 369 + 370 + ret_val = igc_pool_flash_update_done_i225(hw); 371 + if (ret_val == -IGC_ERR_NVM) { 372 + hw_dbg("Flash update time out\n"); 373 + goto out; 374 + } 375 + 376 + flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225; 377 + wr32(IGC_EECD, flup); 378 + 379 + ret_val = igc_pool_flash_update_done_i225(hw); 380 + if (ret_val) 381 + hw_dbg("Flash update time out\n"); 382 + else 383 + hw_dbg("Flash update complete\n"); 384 + 385 + out: 386 + return ret_val; 387 + } 388 + 389 + /** 390 + * igc_update_nvm_checksum_i225 - Update EEPROM checksum 391 + * @hw: pointer to the HW structure 392 + * 393 + * Updates the EEPROM checksum by reading/adding each word of the EEPROM 394 + * up to the checksum. Then calculates the EEPROM checksum and writes the 395 + * value to the EEPROM. Next commit EEPROM data onto the Flash. 396 + */ 397 + static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw) 398 + { 399 + u16 checksum = 0; 400 + s32 ret_val = 0; 401 + u16 i, nvm_data; 402 + 403 + /* Read the first word from the EEPROM. If this times out or fails, do 404 + * not continue or we could be in for a very long wait while every 405 + * EEPROM read fails 406 + */ 407 + ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data); 408 + if (ret_val) { 409 + hw_dbg("EEPROM read failed\n"); 410 + goto out; 411 + } 412 + 413 + ret_val = hw->nvm.ops.acquire(hw); 414 + if (ret_val) 415 + goto out; 416 + 417 + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read 418 + * because we do not want to take the synchronization 419 + * semaphores twice here. 420 + */ 421 + 422 + for (i = 0; i < NVM_CHECKSUM_REG; i++) { 423 + ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data); 424 + if (ret_val) { 425 + hw->nvm.ops.release(hw); 426 + hw_dbg("NVM Read Error while updating checksum.\n"); 427 + goto out; 428 + } 429 + checksum += nvm_data; 430 + } 431 + checksum = (u16)NVM_SUM - checksum; 432 + ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, 433 + &checksum); 434 + if (ret_val) { 435 + hw->nvm.ops.release(hw); 436 + hw_dbg("NVM Write Error while updating checksum.\n"); 437 + goto out; 438 + } 439 + 440 + hw->nvm.ops.release(hw); 441 + 442 + ret_val = igc_update_flash_i225(hw); 443 + 444 + out: 445 + return ret_val; 446 + } 447 + 448 + /** 449 + * igc_get_flash_presence_i225 - Check if flash device is detected 450 + * @hw: pointer to the HW structure 451 + */ 452 + bool igc_get_flash_presence_i225(struct igc_hw *hw) 453 + { 454 + bool ret_val = false; 455 + u32 eec = 0; 456 + 457 + eec = rd32(IGC_EECD); 458 + if (eec & IGC_EECD_FLASH_DETECTED_I225) 459 + ret_val = true; 460 + 461 + return ret_val; 462 + } 463 + 464 + /** 465 + * igc_init_nvm_params_i225 - Init NVM func ptrs. 466 + * @hw: pointer to the HW structure 467 + */ 468 + s32 igc_init_nvm_params_i225(struct igc_hw *hw) 469 + { 470 + struct igc_nvm_info *nvm = &hw->nvm; 471 + 472 + nvm->ops.acquire = igc_acquire_nvm_i225; 473 + nvm->ops.release = igc_release_nvm_i225; 474 + 475 + /* NVM Function Pointers */ 476 + if (igc_get_flash_presence_i225(hw)) { 477 + hw->nvm.type = igc_nvm_flash_hw; 478 + nvm->ops.read = igc_read_nvm_srrd_i225; 479 + nvm->ops.write = igc_write_nvm_srwr_i225; 480 + nvm->ops.validate = igc_validate_nvm_checksum_i225; 481 + nvm->ops.update = igc_update_nvm_checksum_i225; 482 + } else { 483 + hw->nvm.type = igc_nvm_invm; 484 + nvm->ops.read = igc_read_nvm_eerd; 485 + nvm->ops.write = NULL; 486 + nvm->ops.validate = NULL; 487 + nvm->ops.update = NULL; 488 + } 489 + return 0; 490 + }
+13
drivers/net/ethernet/intel/igc/igc_i225.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #ifndef _IGC_I225_H_ 5 + #define _IGC_I225_H_ 6 + 7 + s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask); 8 + void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask); 9 + 10 + s32 igc_init_nvm_params_i225(struct igc_hw *hw); 11 + bool igc_get_flash_presence_i225(struct igc_hw *hw); 12 + 13 + #endif
+806
drivers/net/ethernet/intel/igc/igc_mac.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #include <linux/pci.h> 5 + #include <linux/delay.h> 6 + 7 + #include "igc_mac.h" 8 + #include "igc_hw.h" 9 + 10 + /* forward declaration */ 11 + static s32 igc_set_default_fc(struct igc_hw *hw); 12 + static s32 igc_set_fc_watermarks(struct igc_hw *hw); 13 + 14 + /** 15 + * igc_disable_pcie_master - Disables PCI-express master access 16 + * @hw: pointer to the HW structure 17 + * 18 + * Returns 0 (0) if successful, else returns -10 19 + * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused 20 + * the master requests to be disabled. 21 + * 22 + * Disables PCI-Express master access and verifies there are no pending 23 + * requests. 24 + */ 25 + s32 igc_disable_pcie_master(struct igc_hw *hw) 26 + { 27 + s32 timeout = MASTER_DISABLE_TIMEOUT; 28 + s32 ret_val = 0; 29 + u32 ctrl; 30 + 31 + ctrl = rd32(IGC_CTRL); 32 + ctrl |= IGC_CTRL_GIO_MASTER_DISABLE; 33 + wr32(IGC_CTRL, ctrl); 34 + 35 + while (timeout) { 36 + if (!(rd32(IGC_STATUS) & 37 + IGC_STATUS_GIO_MASTER_ENABLE)) 38 + break; 39 + usleep_range(2000, 3000); 40 + timeout--; 41 + } 42 + 43 + if (!timeout) { 44 + hw_dbg("Master requests are pending.\n"); 45 + ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING; 46 + goto out; 47 + } 48 + 49 + out: 50 + return ret_val; 51 + } 52 + 53 + /** 54 + * igc_init_rx_addrs - Initialize receive addresses 55 + * @hw: pointer to the HW structure 56 + * @rar_count: receive address registers 57 + * 58 + * Setup the receive address registers by setting the base receive address 59 + * register to the devices MAC address and clearing all the other receive 60 + * address registers to 0. 61 + */ 62 + void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count) 63 + { 64 + u8 mac_addr[ETH_ALEN] = {0}; 65 + u32 i; 66 + 67 + /* Setup the receive address */ 68 + hw_dbg("Programming MAC Address into RAR[0]\n"); 69 + 70 + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); 71 + 72 + /* Zero out the other (rar_entry_count - 1) receive addresses */ 73 + hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1); 74 + for (i = 1; i < rar_count; i++) 75 + hw->mac.ops.rar_set(hw, mac_addr, i); 76 + } 77 + 78 + /** 79 + * igc_setup_link - Setup flow control and link settings 80 + * @hw: pointer to the HW structure 81 + * 82 + * Determines which flow control settings to use, then configures flow 83 + * control. Calls the appropriate media-specific link configuration 84 + * function. Assuming the adapter has a valid link partner, a valid link 85 + * should be established. Assumes the hardware has previously been reset 86 + * and the transmitter and receiver are not enabled. 87 + */ 88 + s32 igc_setup_link(struct igc_hw *hw) 89 + { 90 + s32 ret_val = 0; 91 + 92 + /* In the case of the phy reset being blocked, we already have a link. 93 + * We do not need to set it up again. 94 + */ 95 + if (igc_check_reset_block(hw)) 96 + goto out; 97 + 98 + /* If requested flow control is set to default, set flow control 99 + * based on the EEPROM flow control settings. 100 + */ 101 + if (hw->fc.requested_mode == igc_fc_default) { 102 + ret_val = igc_set_default_fc(hw); 103 + if (ret_val) 104 + goto out; 105 + } 106 + 107 + /* We want to save off the original Flow Control configuration just 108 + * in case we get disconnected and then reconnected into a different 109 + * hub or switch with different Flow Control capabilities. 110 + */ 111 + hw->fc.current_mode = hw->fc.requested_mode; 112 + 113 + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); 114 + 115 + /* Call the necessary media_type subroutine to configure the link. */ 116 + ret_val = hw->mac.ops.setup_physical_interface(hw); 117 + if (ret_val) 118 + goto out; 119 + 120 + /* Initialize the flow control address, type, and PAUSE timer 121 + * registers to their default values. This is done even if flow 122 + * control is disabled, because it does not hurt anything to 123 + * initialize these registers. 124 + */ 125 + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); 126 + wr32(IGC_FCT, FLOW_CONTROL_TYPE); 127 + wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH); 128 + wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW); 129 + 130 + wr32(IGC_FCTTV, hw->fc.pause_time); 131 + 132 + ret_val = igc_set_fc_watermarks(hw); 133 + 134 + out: 135 + return ret_val; 136 + } 137 + 138 + /** 139 + * igc_set_default_fc - Set flow control default values 140 + * @hw: pointer to the HW structure 141 + * 142 + * Read the EEPROM for the default values for flow control and store the 143 + * values. 144 + */ 145 + static s32 igc_set_default_fc(struct igc_hw *hw) 146 + { 147 + hw->fc.requested_mode = igc_fc_full; 148 + return 0; 149 + } 150 + 151 + /** 152 + * igc_force_mac_fc - Force the MAC's flow control settings 153 + * @hw: pointer to the HW structure 154 + * 155 + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the 156 + * device control register to reflect the adapter settings. TFCE and RFCE 157 + * need to be explicitly set by software when a copper PHY is used because 158 + * autonegotiation is managed by the PHY rather than the MAC. Software must 159 + * also configure these bits when link is forced on a fiber connection. 160 + */ 161 + s32 igc_force_mac_fc(struct igc_hw *hw) 162 + { 163 + s32 ret_val = 0; 164 + u32 ctrl; 165 + 166 + ctrl = rd32(IGC_CTRL); 167 + 168 + /* Because we didn't get link via the internal auto-negotiation 169 + * mechanism (we either forced link or we got link via PHY 170 + * auto-neg), we have to manually enable/disable transmit an 171 + * receive flow control. 172 + * 173 + * The "Case" statement below enables/disable flow control 174 + * according to the "hw->fc.current_mode" parameter. 175 + * 176 + * The possible values of the "fc" parameter are: 177 + * 0: Flow control is completely disabled 178 + * 1: Rx flow control is enabled (we can receive pause 179 + * frames but not send pause frames). 180 + * 2: Tx flow control is enabled (we can send pause frames 181 + * frames but we do not receive pause frames). 182 + * 3: Both Rx and TX flow control (symmetric) is enabled. 183 + * other: No other values should be possible at this point. 184 + */ 185 + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); 186 + 187 + switch (hw->fc.current_mode) { 188 + case igc_fc_none: 189 + ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE)); 190 + break; 191 + case igc_fc_rx_pause: 192 + ctrl &= (~IGC_CTRL_TFCE); 193 + ctrl |= IGC_CTRL_RFCE; 194 + break; 195 + case igc_fc_tx_pause: 196 + ctrl &= (~IGC_CTRL_RFCE); 197 + ctrl |= IGC_CTRL_TFCE; 198 + break; 199 + case igc_fc_full: 200 + ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE); 201 + break; 202 + default: 203 + hw_dbg("Flow control param set incorrectly\n"); 204 + ret_val = -IGC_ERR_CONFIG; 205 + goto out; 206 + } 207 + 208 + wr32(IGC_CTRL, ctrl); 209 + 210 + out: 211 + return ret_val; 212 + } 213 + 214 + /** 215 + * igc_set_fc_watermarks - Set flow control high/low watermarks 216 + * @hw: pointer to the HW structure 217 + * 218 + * Sets the flow control high/low threshold (watermark) registers. If 219 + * flow control XON frame transmission is enabled, then set XON frame 220 + * transmission as well. 221 + */ 222 + static s32 igc_set_fc_watermarks(struct igc_hw *hw) 223 + { 224 + u32 fcrtl = 0, fcrth = 0; 225 + 226 + /* Set the flow control receive threshold registers. Normally, 227 + * these registers will be set to a default threshold that may be 228 + * adjusted later by the driver's runtime code. However, if the 229 + * ability to transmit pause frames is not enabled, then these 230 + * registers will be set to 0. 231 + */ 232 + if (hw->fc.current_mode & igc_fc_tx_pause) { 233 + /* We need to set up the Receive Threshold high and low water 234 + * marks as well as (optionally) enabling the transmission of 235 + * XON frames. 236 + */ 237 + fcrtl = hw->fc.low_water; 238 + if (hw->fc.send_xon) 239 + fcrtl |= IGC_FCRTL_XONE; 240 + 241 + fcrth = hw->fc.high_water; 242 + } 243 + wr32(IGC_FCRTL, fcrtl); 244 + wr32(IGC_FCRTH, fcrth); 245 + 246 + return 0; 247 + } 248 + 249 + /** 250 + * igc_clear_hw_cntrs_base - Clear base hardware counters 251 + * @hw: pointer to the HW structure 252 + * 253 + * Clears the base hardware counters by reading the counter registers. 254 + */ 255 + void igc_clear_hw_cntrs_base(struct igc_hw *hw) 256 + { 257 + rd32(IGC_CRCERRS); 258 + rd32(IGC_SYMERRS); 259 + rd32(IGC_MPC); 260 + rd32(IGC_SCC); 261 + rd32(IGC_ECOL); 262 + rd32(IGC_MCC); 263 + rd32(IGC_LATECOL); 264 + rd32(IGC_COLC); 265 + rd32(IGC_DC); 266 + rd32(IGC_SEC); 267 + rd32(IGC_RLEC); 268 + rd32(IGC_XONRXC); 269 + rd32(IGC_XONTXC); 270 + rd32(IGC_XOFFRXC); 271 + rd32(IGC_XOFFTXC); 272 + rd32(IGC_FCRUC); 273 + rd32(IGC_GPRC); 274 + rd32(IGC_BPRC); 275 + rd32(IGC_MPRC); 276 + rd32(IGC_GPTC); 277 + rd32(IGC_GORCL); 278 + rd32(IGC_GORCH); 279 + rd32(IGC_GOTCL); 280 + rd32(IGC_GOTCH); 281 + rd32(IGC_RNBC); 282 + rd32(IGC_RUC); 283 + rd32(IGC_RFC); 284 + rd32(IGC_ROC); 285 + rd32(IGC_RJC); 286 + rd32(IGC_TORL); 287 + rd32(IGC_TORH); 288 + rd32(IGC_TOTL); 289 + rd32(IGC_TOTH); 290 + rd32(IGC_TPR); 291 + rd32(IGC_TPT); 292 + rd32(IGC_MPTC); 293 + rd32(IGC_BPTC); 294 + 295 + rd32(IGC_PRC64); 296 + rd32(IGC_PRC127); 297 + rd32(IGC_PRC255); 298 + rd32(IGC_PRC511); 299 + rd32(IGC_PRC1023); 300 + rd32(IGC_PRC1522); 301 + rd32(IGC_PTC64); 302 + rd32(IGC_PTC127); 303 + rd32(IGC_PTC255); 304 + rd32(IGC_PTC511); 305 + rd32(IGC_PTC1023); 306 + rd32(IGC_PTC1522); 307 + 308 + rd32(IGC_ALGNERRC); 309 + rd32(IGC_RXERRC); 310 + rd32(IGC_TNCRS); 311 + rd32(IGC_CEXTERR); 312 + rd32(IGC_TSCTC); 313 + rd32(IGC_TSCTFC); 314 + 315 + rd32(IGC_MGTPRC); 316 + rd32(IGC_MGTPDC); 317 + rd32(IGC_MGTPTC); 318 + 319 + rd32(IGC_IAC); 320 + rd32(IGC_ICRXOC); 321 + 322 + rd32(IGC_ICRXPTC); 323 + rd32(IGC_ICRXATC); 324 + rd32(IGC_ICTXPTC); 325 + rd32(IGC_ICTXATC); 326 + rd32(IGC_ICTXQEC); 327 + rd32(IGC_ICTXQMTC); 328 + rd32(IGC_ICRXDMTC); 329 + 330 + rd32(IGC_CBTMPC); 331 + rd32(IGC_HTDPMC); 332 + rd32(IGC_CBRMPC); 333 + rd32(IGC_RPTHC); 334 + rd32(IGC_HGPTC); 335 + rd32(IGC_HTCBDPC); 336 + rd32(IGC_HGORCL); 337 + rd32(IGC_HGORCH); 338 + rd32(IGC_HGOTCL); 339 + rd32(IGC_HGOTCH); 340 + rd32(IGC_LENERRS); 341 + } 342 + 343 + /** 344 + * igc_rar_set - Set receive address register 345 + * @hw: pointer to the HW structure 346 + * @addr: pointer to the receive address 347 + * @index: receive address array register 348 + * 349 + * Sets the receive address array register at index to the address passed 350 + * in by addr. 351 + */ 352 + void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index) 353 + { 354 + u32 rar_low, rar_high; 355 + 356 + /* HW expects these in little endian so we reverse the byte order 357 + * from network order (big endian) to little endian 358 + */ 359 + rar_low = ((u32)addr[0] | 360 + ((u32)addr[1] << 8) | 361 + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); 362 + 363 + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); 364 + 365 + /* If MAC address zero, no need to set the AV bit */ 366 + if (rar_low || rar_high) 367 + rar_high |= IGC_RAH_AV; 368 + 369 + /* Some bridges will combine consecutive 32-bit writes into 370 + * a single burst write, which will malfunction on some parts. 371 + * The flushes avoid this. 372 + */ 373 + wr32(IGC_RAL(index), rar_low); 374 + wrfl(); 375 + wr32(IGC_RAH(index), rar_high); 376 + wrfl(); 377 + } 378 + 379 + /** 380 + * igc_check_for_copper_link - Check for link (Copper) 381 + * @hw: pointer to the HW structure 382 + * 383 + * Checks to see of the link status of the hardware has changed. If a 384 + * change in link status has been detected, then we read the PHY registers 385 + * to get the current speed/duplex if link exists. 386 + */ 387 + s32 igc_check_for_copper_link(struct igc_hw *hw) 388 + { 389 + struct igc_mac_info *mac = &hw->mac; 390 + s32 ret_val; 391 + bool link; 392 + 393 + /* We only want to go out to the PHY registers to see if Auto-Neg 394 + * has completed and/or if our link status has changed. The 395 + * get_link_status flag is set upon receiving a Link Status 396 + * Change or Rx Sequence Error interrupt. 397 + */ 398 + if (!mac->get_link_status) { 399 + ret_val = 0; 400 + goto out; 401 + } 402 + 403 + /* First we want to see if the MII Status Register reports 404 + * link. If so, then we want to get the current speed/duplex 405 + * of the PHY. 406 + */ 407 + ret_val = igc_phy_has_link(hw, 1, 0, &link); 408 + if (ret_val) 409 + goto out; 410 + 411 + if (!link) 412 + goto out; /* No link detected */ 413 + 414 + mac->get_link_status = false; 415 + 416 + /* Check if there was DownShift, must be checked 417 + * immediately after link-up 418 + */ 419 + igc_check_downshift(hw); 420 + 421 + /* If we are forcing speed/duplex, then we simply return since 422 + * we have already determined whether we have link or not. 423 + */ 424 + if (!mac->autoneg) { 425 + ret_val = -IGC_ERR_CONFIG; 426 + goto out; 427 + } 428 + 429 + /* Auto-Neg is enabled. Auto Speed Detection takes care 430 + * of MAC speed/duplex configuration. So we only need to 431 + * configure Collision Distance in the MAC. 432 + */ 433 + igc_config_collision_dist(hw); 434 + 435 + /* Configure Flow Control now that Auto-Neg has completed. 436 + * First, we need to restore the desired flow control 437 + * settings because we may have had to re-autoneg with a 438 + * different link partner. 439 + */ 440 + ret_val = igc_config_fc_after_link_up(hw); 441 + if (ret_val) 442 + hw_dbg("Error configuring flow control\n"); 443 + 444 + out: 445 + return ret_val; 446 + } 447 + 448 + /** 449 + * igc_config_collision_dist - Configure collision distance 450 + * @hw: pointer to the HW structure 451 + * 452 + * Configures the collision distance to the default value and is used 453 + * during link setup. Currently no func pointer exists and all 454 + * implementations are handled in the generic version of this function. 455 + */ 456 + void igc_config_collision_dist(struct igc_hw *hw) 457 + { 458 + u32 tctl; 459 + 460 + tctl = rd32(IGC_TCTL); 461 + 462 + tctl &= ~IGC_TCTL_COLD; 463 + tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT; 464 + 465 + wr32(IGC_TCTL, tctl); 466 + wrfl(); 467 + } 468 + 469 + /** 470 + * igc_config_fc_after_link_up - Configures flow control after link 471 + * @hw: pointer to the HW structure 472 + * 473 + * Checks the status of auto-negotiation after link up to ensure that the 474 + * speed and duplex were not forced. If the link needed to be forced, then 475 + * flow control needs to be forced also. If auto-negotiation is enabled 476 + * and did not fail, then we configure flow control based on our link 477 + * partner. 478 + */ 479 + s32 igc_config_fc_after_link_up(struct igc_hw *hw) 480 + { 481 + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; 482 + struct igc_mac_info *mac = &hw->mac; 483 + u16 speed, duplex; 484 + s32 ret_val = 0; 485 + 486 + /* Check for the case where we have fiber media and auto-neg failed 487 + * so we had to force link. In this case, we need to force the 488 + * configuration of the MAC to match the "fc" parameter. 489 + */ 490 + if (mac->autoneg_failed) { 491 + if (hw->phy.media_type == igc_media_type_copper) 492 + ret_val = igc_force_mac_fc(hw); 493 + } 494 + 495 + if (ret_val) { 496 + hw_dbg("Error forcing flow control settings\n"); 497 + goto out; 498 + } 499 + 500 + /* Check for the case where we have copper media and auto-neg is 501 + * enabled. In this case, we need to check and see if Auto-Neg 502 + * has completed, and if so, how the PHY and link partner has 503 + * flow control configured. 504 + */ 505 + if (hw->phy.media_type == igc_media_type_copper && mac->autoneg) { 506 + /* Read the MII Status Register and check to see if AutoNeg 507 + * has completed. We read this twice because this reg has 508 + * some "sticky" (latched) bits. 509 + */ 510 + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, 511 + &mii_status_reg); 512 + if (ret_val) 513 + goto out; 514 + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, 515 + &mii_status_reg); 516 + if (ret_val) 517 + goto out; 518 + 519 + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { 520 + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); 521 + goto out; 522 + } 523 + 524 + /* The AutoNeg process has completed, so we now need to 525 + * read both the Auto Negotiation Advertisement 526 + * Register (Address 4) and the Auto_Negotiation Base 527 + * Page Ability Register (Address 5) to determine how 528 + * flow control was negotiated. 529 + */ 530 + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, 531 + &mii_nway_adv_reg); 532 + if (ret_val) 533 + goto out; 534 + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, 535 + &mii_nway_lp_ability_reg); 536 + if (ret_val) 537 + goto out; 538 + /* Two bits in the Auto Negotiation Advertisement Register 539 + * (Address 4) and two bits in the Auto Negotiation Base 540 + * Page Ability Register (Address 5) determine flow control 541 + * for both the PHY and the link partner. The following 542 + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, 543 + * 1999, describes these PAUSE resolution bits and how flow 544 + * control is determined based upon these settings. 545 + * NOTE: DC = Don't Care 546 + * 547 + * LOCAL DEVICE | LINK PARTNER 548 + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution 549 + *-------|---------|-------|---------|-------------------- 550 + * 0 | 0 | DC | DC | igc_fc_none 551 + * 0 | 1 | 0 | DC | igc_fc_none 552 + * 0 | 1 | 1 | 0 | igc_fc_none 553 + * 0 | 1 | 1 | 1 | igc_fc_tx_pause 554 + * 1 | 0 | 0 | DC | igc_fc_none 555 + * 1 | DC | 1 | DC | igc_fc_full 556 + * 1 | 1 | 0 | 0 | igc_fc_none 557 + * 1 | 1 | 0 | 1 | igc_fc_rx_pause 558 + * 559 + * Are both PAUSE bits set to 1? If so, this implies 560 + * Symmetric Flow Control is enabled at both ends. The 561 + * ASM_DIR bits are irrelevant per the spec. 562 + * 563 + * For Symmetric Flow Control: 564 + * 565 + * LOCAL DEVICE | LINK PARTNER 566 + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 567 + *-------|---------|-------|---------|-------------------- 568 + * 1 | DC | 1 | DC | IGC_fc_full 569 + * 570 + */ 571 + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 572 + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 573 + /* Now we need to check if the user selected RX ONLY 574 + * of pause frames. In this case, we had to advertise 575 + * FULL flow control because we could not advertise RX 576 + * ONLY. Hence, we must now check to see if we need to 577 + * turn OFF the TRANSMISSION of PAUSE frames. 578 + */ 579 + if (hw->fc.requested_mode == igc_fc_full) { 580 + hw->fc.current_mode = igc_fc_full; 581 + hw_dbg("Flow Control = FULL.\n"); 582 + } else { 583 + hw->fc.current_mode = igc_fc_rx_pause; 584 + hw_dbg("Flow Control = RX PAUSE frames only.\n"); 585 + } 586 + } 587 + 588 + /* For receiving PAUSE frames ONLY. 589 + * 590 + * LOCAL DEVICE | LINK PARTNER 591 + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 592 + *-------|---------|-------|---------|-------------------- 593 + * 0 | 1 | 1 | 1 | igc_fc_tx_pause 594 + */ 595 + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && 596 + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 597 + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 598 + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 599 + hw->fc.current_mode = igc_fc_tx_pause; 600 + hw_dbg("Flow Control = TX PAUSE frames only.\n"); 601 + } 602 + /* For transmitting PAUSE frames ONLY. 603 + * 604 + * LOCAL DEVICE | LINK PARTNER 605 + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 606 + *-------|---------|-------|---------|-------------------- 607 + * 1 | 1 | 0 | 1 | igc_fc_rx_pause 608 + */ 609 + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 610 + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 611 + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 612 + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 613 + hw->fc.current_mode = igc_fc_rx_pause; 614 + hw_dbg("Flow Control = RX PAUSE frames only.\n"); 615 + } 616 + /* Per the IEEE spec, at this point flow control should be 617 + * disabled. However, we want to consider that we could 618 + * be connected to a legacy switch that doesn't advertise 619 + * desired flow control, but can be forced on the link 620 + * partner. So if we advertised no flow control, that is 621 + * what we will resolve to. If we advertised some kind of 622 + * receive capability (Rx Pause Only or Full Flow Control) 623 + * and the link partner advertised none, we will configure 624 + * ourselves to enable Rx Flow Control only. We can do 625 + * this safely for two reasons: If the link partner really 626 + * didn't want flow control enabled, and we enable Rx, no 627 + * harm done since we won't be receiving any PAUSE frames 628 + * anyway. If the intent on the link partner was to have 629 + * flow control enabled, then by us enabling RX only, we 630 + * can at least receive pause frames and process them. 631 + * This is a good idea because in most cases, since we are 632 + * predominantly a server NIC, more times than not we will 633 + * be asked to delay transmission of packets than asking 634 + * our link partner to pause transmission of frames. 635 + */ 636 + else if ((hw->fc.requested_mode == igc_fc_none) || 637 + (hw->fc.requested_mode == igc_fc_tx_pause) || 638 + (hw->fc.strict_ieee)) { 639 + hw->fc.current_mode = igc_fc_none; 640 + hw_dbg("Flow Control = NONE.\n"); 641 + } else { 642 + hw->fc.current_mode = igc_fc_rx_pause; 643 + hw_dbg("Flow Control = RX PAUSE frames only.\n"); 644 + } 645 + 646 + /* Now we need to do one last check... If we auto- 647 + * negotiated to HALF DUPLEX, flow control should not be 648 + * enabled per IEEE 802.3 spec. 649 + */ 650 + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); 651 + if (ret_val) { 652 + hw_dbg("Error getting link speed and duplex\n"); 653 + goto out; 654 + } 655 + 656 + if (duplex == HALF_DUPLEX) 657 + hw->fc.current_mode = igc_fc_none; 658 + 659 + /* Now we call a subroutine to actually force the MAC 660 + * controller to use the correct flow control settings. 661 + */ 662 + ret_val = igc_force_mac_fc(hw); 663 + if (ret_val) { 664 + hw_dbg("Error forcing flow control settings\n"); 665 + goto out; 666 + } 667 + } 668 + 669 + out: 670 + return 0; 671 + } 672 + 673 + /** 674 + * igc_get_auto_rd_done - Check for auto read completion 675 + * @hw: pointer to the HW structure 676 + * 677 + * Check EEPROM for Auto Read done bit. 678 + */ 679 + s32 igc_get_auto_rd_done(struct igc_hw *hw) 680 + { 681 + s32 ret_val = 0; 682 + s32 i = 0; 683 + 684 + while (i < AUTO_READ_DONE_TIMEOUT) { 685 + if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD) 686 + break; 687 + usleep_range(1000, 2000); 688 + i++; 689 + } 690 + 691 + if (i == AUTO_READ_DONE_TIMEOUT) { 692 + hw_dbg("Auto read by HW from NVM has not completed.\n"); 693 + ret_val = -IGC_ERR_RESET; 694 + goto out; 695 + } 696 + 697 + out: 698 + return ret_val; 699 + } 700 + 701 + /** 702 + * igc_get_speed_and_duplex_copper - Retrieve current speed/duplex 703 + * @hw: pointer to the HW structure 704 + * @speed: stores the current speed 705 + * @duplex: stores the current duplex 706 + * 707 + * Read the status register for the current speed/duplex and store the current 708 + * speed and duplex for copper connections. 709 + */ 710 + s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, 711 + u16 *duplex) 712 + { 713 + u32 status; 714 + 715 + status = rd32(IGC_STATUS); 716 + if (status & IGC_STATUS_SPEED_1000) { 717 + /* For I225, STATUS will indicate 1G speed in both 1 Gbps 718 + * and 2.5 Gbps link modes. An additional bit is used 719 + * to differentiate between 1 Gbps and 2.5 Gbps. 720 + */ 721 + if (hw->mac.type == igc_i225 && 722 + (status & IGC_STATUS_SPEED_2500)) { 723 + *speed = SPEED_2500; 724 + hw_dbg("2500 Mbs, "); 725 + } else { 726 + *speed = SPEED_1000; 727 + hw_dbg("1000 Mbs, "); 728 + } 729 + } else if (status & IGC_STATUS_SPEED_100) { 730 + *speed = SPEED_100; 731 + hw_dbg("100 Mbs, "); 732 + } else { 733 + *speed = SPEED_10; 734 + hw_dbg("10 Mbs, "); 735 + } 736 + 737 + if (status & IGC_STATUS_FD) { 738 + *duplex = FULL_DUPLEX; 739 + hw_dbg("Full Duplex\n"); 740 + } else { 741 + *duplex = HALF_DUPLEX; 742 + hw_dbg("Half Duplex\n"); 743 + } 744 + 745 + return 0; 746 + } 747 + 748 + /** 749 + * igc_put_hw_semaphore - Release hardware semaphore 750 + * @hw: pointer to the HW structure 751 + * 752 + * Release hardware semaphore used to access the PHY or NVM 753 + */ 754 + void igc_put_hw_semaphore(struct igc_hw *hw) 755 + { 756 + u32 swsm; 757 + 758 + swsm = rd32(IGC_SWSM); 759 + 760 + swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI); 761 + 762 + wr32(IGC_SWSM, swsm); 763 + } 764 + 765 + /** 766 + * igc_enable_mng_pass_thru - Enable processing of ARP's 767 + * @hw: pointer to the HW structure 768 + * 769 + * Verifies the hardware needs to leave interface enabled so that frames can 770 + * be directed to and from the management interface. 771 + */ 772 + bool igc_enable_mng_pass_thru(struct igc_hw *hw) 773 + { 774 + bool ret_val = false; 775 + u32 fwsm, factps; 776 + u32 manc; 777 + 778 + if (!hw->mac.asf_firmware_present) 779 + goto out; 780 + 781 + manc = rd32(IGC_MANC); 782 + 783 + if (!(manc & IGC_MANC_RCV_TCO_EN)) 784 + goto out; 785 + 786 + if (hw->mac.arc_subsystem_valid) { 787 + fwsm = rd32(IGC_FWSM); 788 + factps = rd32(IGC_FACTPS); 789 + 790 + if (!(factps & IGC_FACTPS_MNGCG) && 791 + ((fwsm & IGC_FWSM_MODE_MASK) == 792 + (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) { 793 + ret_val = true; 794 + goto out; 795 + } 796 + } else { 797 + if ((manc & IGC_MANC_SMBUS_EN) && 798 + !(manc & IGC_MANC_ASF_EN)) { 799 + ret_val = true; 800 + goto out; 801 + } 802 + } 803 + 804 + out: 805 + return ret_val; 806 + }
+41
drivers/net/ethernet/intel/igc/igc_mac.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #ifndef _IGC_MAC_H_ 5 + #define _IGC_MAC_H_ 6 + 7 + #include "igc_hw.h" 8 + #include "igc_phy.h" 9 + #include "igc_defines.h" 10 + 11 + #ifndef IGC_REMOVED 12 + #define IGC_REMOVED(a) (0) 13 + #endif /* IGC_REMOVED */ 14 + 15 + /* forward declaration */ 16 + s32 igc_disable_pcie_master(struct igc_hw *hw); 17 + s32 igc_check_for_copper_link(struct igc_hw *hw); 18 + s32 igc_config_fc_after_link_up(struct igc_hw *hw); 19 + s32 igc_force_mac_fc(struct igc_hw *hw); 20 + void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count); 21 + s32 igc_setup_link(struct igc_hw *hw); 22 + void igc_clear_hw_cntrs_base(struct igc_hw *hw); 23 + s32 igc_get_auto_rd_done(struct igc_hw *hw); 24 + void igc_put_hw_semaphore(struct igc_hw *hw); 25 + void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index); 26 + void igc_config_collision_dist(struct igc_hw *hw); 27 + 28 + s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, 29 + u16 *duplex); 30 + 31 + bool igc_enable_mng_pass_thru(struct igc_hw *hw); 32 + 33 + enum igc_mng_mode { 34 + igc_mng_mode_none = 0, 35 + igc_mng_mode_asf, 36 + igc_mng_mode_pt, 37 + igc_mng_mode_ipmi, 38 + igc_mng_mode_host_if_only 39 + }; 40 + 41 + #endif
+3901
drivers/net/ethernet/intel/igc/igc_main.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #include <linux/module.h> 5 + #include <linux/types.h> 6 + #include <linux/if_vlan.h> 7 + #include <linux/aer.h> 8 + 9 + #include "igc.h" 10 + #include "igc_hw.h" 11 + 12 + #define DRV_VERSION "0.0.1-k" 13 + #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" 14 + 15 + static int debug = -1; 16 + 17 + MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 18 + MODULE_DESCRIPTION(DRV_SUMMARY); 19 + MODULE_LICENSE("GPL v2"); 20 + MODULE_VERSION(DRV_VERSION); 21 + module_param(debug, int, 0); 22 + MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 23 + 24 + char igc_driver_name[] = "igc"; 25 + char igc_driver_version[] = DRV_VERSION; 26 + static const char igc_driver_string[] = DRV_SUMMARY; 27 + static const char igc_copyright[] = 28 + "Copyright(c) 2018 Intel Corporation."; 29 + 30 + static const struct igc_info *igc_info_tbl[] = { 31 + [board_base] = &igc_base_info, 32 + }; 33 + 34 + static const struct pci_device_id igc_pci_tbl[] = { 35 + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, 36 + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, 37 + /* required last entry */ 38 + {0, } 39 + }; 40 + 41 + MODULE_DEVICE_TABLE(pci, igc_pci_tbl); 42 + 43 + /* forward declaration */ 44 + static void igc_clean_tx_ring(struct igc_ring *tx_ring); 45 + static int igc_sw_init(struct igc_adapter *); 46 + static void igc_configure(struct igc_adapter *adapter); 47 + static void igc_power_down_link(struct igc_adapter *adapter); 48 + static void igc_set_default_mac_filter(struct igc_adapter *adapter); 49 + static void igc_set_rx_mode(struct net_device *netdev); 50 + static void igc_write_itr(struct igc_q_vector *q_vector); 51 + static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector); 52 + static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx); 53 + static void igc_set_interrupt_capability(struct igc_adapter *adapter, 54 + bool msix); 55 + static void igc_free_q_vectors(struct igc_adapter *adapter); 56 + static void igc_irq_disable(struct igc_adapter *adapter); 57 + static void igc_irq_enable(struct igc_adapter *adapter); 58 + static void igc_configure_msix(struct igc_adapter *adapter); 59 + static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, 60 + struct igc_rx_buffer *bi); 61 + 62 + enum latency_range { 63 + lowest_latency = 0, 64 + low_latency = 1, 65 + bulk_latency = 2, 66 + latency_invalid = 255 67 + }; 68 + 69 + static void igc_reset(struct igc_adapter *adapter) 70 + { 71 + struct pci_dev *pdev = adapter->pdev; 72 + struct igc_hw *hw = &adapter->hw; 73 + 74 + hw->mac.ops.reset_hw(hw); 75 + 76 + if (hw->mac.ops.init_hw(hw)) 77 + dev_err(&pdev->dev, "Hardware Error\n"); 78 + 79 + if (!netif_running(adapter->netdev)) 80 + igc_power_down_link(adapter); 81 + 82 + igc_get_phy_info(hw); 83 + } 84 + 85 + /** 86 + * igc_power_up_link - Power up the phy/serdes link 87 + * @adapter: address of board private structure 88 + */ 89 + static void igc_power_up_link(struct igc_adapter *adapter) 90 + { 91 + igc_reset_phy(&adapter->hw); 92 + 93 + if (adapter->hw.phy.media_type == igc_media_type_copper) 94 + igc_power_up_phy_copper(&adapter->hw); 95 + 96 + igc_setup_link(&adapter->hw); 97 + } 98 + 99 + /** 100 + * igc_power_down_link - Power down the phy/serdes link 101 + * @adapter: address of board private structure 102 + */ 103 + static void igc_power_down_link(struct igc_adapter *adapter) 104 + { 105 + if (adapter->hw.phy.media_type == igc_media_type_copper) 106 + igc_power_down_phy_copper_base(&adapter->hw); 107 + } 108 + 109 + /** 110 + * igc_release_hw_control - release control of the h/w to f/w 111 + * @adapter: address of board private structure 112 + * 113 + * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. 114 + * For ASF and Pass Through versions of f/w this means that the 115 + * driver is no longer loaded. 116 + */ 117 + static void igc_release_hw_control(struct igc_adapter *adapter) 118 + { 119 + struct igc_hw *hw = &adapter->hw; 120 + u32 ctrl_ext; 121 + 122 + /* Let firmware take over control of h/w */ 123 + ctrl_ext = rd32(IGC_CTRL_EXT); 124 + wr32(IGC_CTRL_EXT, 125 + ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 126 + } 127 + 128 + /** 129 + * igc_get_hw_control - get control of the h/w from f/w 130 + * @adapter: address of board private structure 131 + * 132 + * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. 133 + * For ASF and Pass Through versions of f/w this means that 134 + * the driver is loaded. 135 + */ 136 + static void igc_get_hw_control(struct igc_adapter *adapter) 137 + { 138 + struct igc_hw *hw = &adapter->hw; 139 + u32 ctrl_ext; 140 + 141 + /* Let firmware know the driver has taken over */ 142 + ctrl_ext = rd32(IGC_CTRL_EXT); 143 + wr32(IGC_CTRL_EXT, 144 + ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 145 + } 146 + 147 + /** 148 + * igc_free_tx_resources - Free Tx Resources per Queue 149 + * @tx_ring: Tx descriptor ring for a specific queue 150 + * 151 + * Free all transmit software resources 152 + */ 153 + static void igc_free_tx_resources(struct igc_ring *tx_ring) 154 + { 155 + igc_clean_tx_ring(tx_ring); 156 + 157 + vfree(tx_ring->tx_buffer_info); 158 + tx_ring->tx_buffer_info = NULL; 159 + 160 + /* if not set, then don't free */ 161 + if (!tx_ring->desc) 162 + return; 163 + 164 + dma_free_coherent(tx_ring->dev, tx_ring->size, 165 + tx_ring->desc, tx_ring->dma); 166 + 167 + tx_ring->desc = NULL; 168 + } 169 + 170 + /** 171 + * igc_free_all_tx_resources - Free Tx Resources for All Queues 172 + * @adapter: board private structure 173 + * 174 + * Free all transmit software resources 175 + */ 176 + static void igc_free_all_tx_resources(struct igc_adapter *adapter) 177 + { 178 + int i; 179 + 180 + for (i = 0; i < adapter->num_tx_queues; i++) 181 + igc_free_tx_resources(adapter->tx_ring[i]); 182 + } 183 + 184 + /** 185 + * igc_clean_tx_ring - Free Tx Buffers 186 + * @tx_ring: ring to be cleaned 187 + */ 188 + static void igc_clean_tx_ring(struct igc_ring *tx_ring) 189 + { 190 + u16 i = tx_ring->next_to_clean; 191 + struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; 192 + 193 + while (i != tx_ring->next_to_use) { 194 + union igc_adv_tx_desc *eop_desc, *tx_desc; 195 + 196 + /* Free all the Tx ring sk_buffs */ 197 + dev_kfree_skb_any(tx_buffer->skb); 198 + 199 + /* unmap skb header data */ 200 + dma_unmap_single(tx_ring->dev, 201 + dma_unmap_addr(tx_buffer, dma), 202 + dma_unmap_len(tx_buffer, len), 203 + DMA_TO_DEVICE); 204 + 205 + /* check for eop_desc to determine the end of the packet */ 206 + eop_desc = tx_buffer->next_to_watch; 207 + tx_desc = IGC_TX_DESC(tx_ring, i); 208 + 209 + /* unmap remaining buffers */ 210 + while (tx_desc != eop_desc) { 211 + tx_buffer++; 212 + tx_desc++; 213 + i++; 214 + if (unlikely(i == tx_ring->count)) { 215 + i = 0; 216 + tx_buffer = tx_ring->tx_buffer_info; 217 + tx_desc = IGC_TX_DESC(tx_ring, 0); 218 + } 219 + 220 + /* unmap any remaining paged data */ 221 + if (dma_unmap_len(tx_buffer, len)) 222 + dma_unmap_page(tx_ring->dev, 223 + dma_unmap_addr(tx_buffer, dma), 224 + dma_unmap_len(tx_buffer, len), 225 + DMA_TO_DEVICE); 226 + } 227 + 228 + /* move us one more past the eop_desc for start of next pkt */ 229 + tx_buffer++; 230 + i++; 231 + if (unlikely(i == tx_ring->count)) { 232 + i = 0; 233 + tx_buffer = tx_ring->tx_buffer_info; 234 + } 235 + } 236 + 237 + /* reset BQL for queue */ 238 + netdev_tx_reset_queue(txring_txq(tx_ring)); 239 + 240 + /* reset next_to_use and next_to_clean */ 241 + tx_ring->next_to_use = 0; 242 + tx_ring->next_to_clean = 0; 243 + } 244 + 245 + /** 246 + * igc_clean_all_tx_rings - Free Tx Buffers for all queues 247 + * @adapter: board private structure 248 + */ 249 + static void igc_clean_all_tx_rings(struct igc_adapter *adapter) 250 + { 251 + int i; 252 + 253 + for (i = 0; i < adapter->num_tx_queues; i++) 254 + if (adapter->tx_ring[i]) 255 + igc_clean_tx_ring(adapter->tx_ring[i]); 256 + } 257 + 258 + /** 259 + * igc_setup_tx_resources - allocate Tx resources (Descriptors) 260 + * @tx_ring: tx descriptor ring (for a specific queue) to setup 261 + * 262 + * Return 0 on success, negative on failure 263 + */ 264 + static int igc_setup_tx_resources(struct igc_ring *tx_ring) 265 + { 266 + struct device *dev = tx_ring->dev; 267 + int size = 0; 268 + 269 + size = sizeof(struct igc_tx_buffer) * tx_ring->count; 270 + tx_ring->tx_buffer_info = vzalloc(size); 271 + if (!tx_ring->tx_buffer_info) 272 + goto err; 273 + 274 + /* round up to nearest 4K */ 275 + tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); 276 + tx_ring->size = ALIGN(tx_ring->size, 4096); 277 + 278 + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 279 + &tx_ring->dma, GFP_KERNEL); 280 + 281 + if (!tx_ring->desc) 282 + goto err; 283 + 284 + tx_ring->next_to_use = 0; 285 + tx_ring->next_to_clean = 0; 286 + 287 + return 0; 288 + 289 + err: 290 + vfree(tx_ring->tx_buffer_info); 291 + dev_err(dev, 292 + "Unable to allocate memory for the transmit descriptor ring\n"); 293 + return -ENOMEM; 294 + } 295 + 296 + /** 297 + * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues 298 + * @adapter: board private structure 299 + * 300 + * Return 0 on success, negative on failure 301 + */ 302 + static int igc_setup_all_tx_resources(struct igc_adapter *adapter) 303 + { 304 + struct pci_dev *pdev = adapter->pdev; 305 + int i, err = 0; 306 + 307 + for (i = 0; i < adapter->num_tx_queues; i++) { 308 + err = igc_setup_tx_resources(adapter->tx_ring[i]); 309 + if (err) { 310 + dev_err(&pdev->dev, 311 + "Allocation for Tx Queue %u failed\n", i); 312 + for (i--; i >= 0; i--) 313 + igc_free_tx_resources(adapter->tx_ring[i]); 314 + break; 315 + } 316 + } 317 + 318 + return err; 319 + } 320 + 321 + /** 322 + * igc_clean_rx_ring - Free Rx Buffers per Queue 323 + * @rx_ring: ring to free buffers from 324 + */ 325 + static void igc_clean_rx_ring(struct igc_ring *rx_ring) 326 + { 327 + u16 i = rx_ring->next_to_clean; 328 + 329 + if (rx_ring->skb) 330 + dev_kfree_skb(rx_ring->skb); 331 + rx_ring->skb = NULL; 332 + 333 + /* Free all the Rx ring sk_buffs */ 334 + while (i != rx_ring->next_to_alloc) { 335 + struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 336 + 337 + /* Invalidate cache lines that may have been written to by 338 + * device so that we avoid corrupting memory. 339 + */ 340 + dma_sync_single_range_for_cpu(rx_ring->dev, 341 + buffer_info->dma, 342 + buffer_info->page_offset, 343 + igc_rx_bufsz(rx_ring), 344 + DMA_FROM_DEVICE); 345 + 346 + /* free resources associated with mapping */ 347 + dma_unmap_page_attrs(rx_ring->dev, 348 + buffer_info->dma, 349 + igc_rx_pg_size(rx_ring), 350 + DMA_FROM_DEVICE, 351 + IGC_RX_DMA_ATTR); 352 + __page_frag_cache_drain(buffer_info->page, 353 + buffer_info->pagecnt_bias); 354 + 355 + i++; 356 + if (i == rx_ring->count) 357 + i = 0; 358 + } 359 + 360 + rx_ring->next_to_alloc = 0; 361 + rx_ring->next_to_clean = 0; 362 + rx_ring->next_to_use = 0; 363 + } 364 + 365 + /** 366 + * igc_clean_all_rx_rings - Free Rx Buffers for all queues 367 + * @adapter: board private structure 368 + */ 369 + static void igc_clean_all_rx_rings(struct igc_adapter *adapter) 370 + { 371 + int i; 372 + 373 + for (i = 0; i < adapter->num_rx_queues; i++) 374 + if (adapter->rx_ring[i]) 375 + igc_clean_rx_ring(adapter->rx_ring[i]); 376 + } 377 + 378 + /** 379 + * igc_free_rx_resources - Free Rx Resources 380 + * @rx_ring: ring to clean the resources from 381 + * 382 + * Free all receive software resources 383 + */ 384 + static void igc_free_rx_resources(struct igc_ring *rx_ring) 385 + { 386 + igc_clean_rx_ring(rx_ring); 387 + 388 + vfree(rx_ring->rx_buffer_info); 389 + rx_ring->rx_buffer_info = NULL; 390 + 391 + /* if not set, then don't free */ 392 + if (!rx_ring->desc) 393 + return; 394 + 395 + dma_free_coherent(rx_ring->dev, rx_ring->size, 396 + rx_ring->desc, rx_ring->dma); 397 + 398 + rx_ring->desc = NULL; 399 + } 400 + 401 + /** 402 + * igc_free_all_rx_resources - Free Rx Resources for All Queues 403 + * @adapter: board private structure 404 + * 405 + * Free all receive software resources 406 + */ 407 + static void igc_free_all_rx_resources(struct igc_adapter *adapter) 408 + { 409 + int i; 410 + 411 + for (i = 0; i < adapter->num_rx_queues; i++) 412 + igc_free_rx_resources(adapter->rx_ring[i]); 413 + } 414 + 415 + /** 416 + * igc_setup_rx_resources - allocate Rx resources (Descriptors) 417 + * @rx_ring: rx descriptor ring (for a specific queue) to setup 418 + * 419 + * Returns 0 on success, negative on failure 420 + */ 421 + static int igc_setup_rx_resources(struct igc_ring *rx_ring) 422 + { 423 + struct device *dev = rx_ring->dev; 424 + int size, desc_len; 425 + 426 + size = sizeof(struct igc_rx_buffer) * rx_ring->count; 427 + rx_ring->rx_buffer_info = vzalloc(size); 428 + if (!rx_ring->rx_buffer_info) 429 + goto err; 430 + 431 + desc_len = sizeof(union igc_adv_rx_desc); 432 + 433 + /* Round up to nearest 4K */ 434 + rx_ring->size = rx_ring->count * desc_len; 435 + rx_ring->size = ALIGN(rx_ring->size, 4096); 436 + 437 + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 438 + &rx_ring->dma, GFP_KERNEL); 439 + 440 + if (!rx_ring->desc) 441 + goto err; 442 + 443 + rx_ring->next_to_alloc = 0; 444 + rx_ring->next_to_clean = 0; 445 + rx_ring->next_to_use = 0; 446 + 447 + return 0; 448 + 449 + err: 450 + vfree(rx_ring->rx_buffer_info); 451 + rx_ring->rx_buffer_info = NULL; 452 + dev_err(dev, 453 + "Unable to allocate memory for the receive descriptor ring\n"); 454 + return -ENOMEM; 455 + } 456 + 457 + /** 458 + * igc_setup_all_rx_resources - wrapper to allocate Rx resources 459 + * (Descriptors) for all queues 460 + * @adapter: board private structure 461 + * 462 + * Return 0 on success, negative on failure 463 + */ 464 + static int igc_setup_all_rx_resources(struct igc_adapter *adapter) 465 + { 466 + struct pci_dev *pdev = adapter->pdev; 467 + int i, err = 0; 468 + 469 + for (i = 0; i < adapter->num_rx_queues; i++) { 470 + err = igc_setup_rx_resources(adapter->rx_ring[i]); 471 + if (err) { 472 + dev_err(&pdev->dev, 473 + "Allocation for Rx Queue %u failed\n", i); 474 + for (i--; i >= 0; i--) 475 + igc_free_rx_resources(adapter->rx_ring[i]); 476 + break; 477 + } 478 + } 479 + 480 + return err; 481 + } 482 + 483 + /** 484 + * igc_configure_rx_ring - Configure a receive ring after Reset 485 + * @adapter: board private structure 486 + * @ring: receive ring to be configured 487 + * 488 + * Configure the Rx unit of the MAC after a reset. 489 + */ 490 + static void igc_configure_rx_ring(struct igc_adapter *adapter, 491 + struct igc_ring *ring) 492 + { 493 + struct igc_hw *hw = &adapter->hw; 494 + union igc_adv_rx_desc *rx_desc; 495 + int reg_idx = ring->reg_idx; 496 + u32 srrctl = 0, rxdctl = 0; 497 + u64 rdba = ring->dma; 498 + 499 + /* disable the queue */ 500 + wr32(IGC_RXDCTL(reg_idx), 0); 501 + 502 + /* Set DMA base address registers */ 503 + wr32(IGC_RDBAL(reg_idx), 504 + rdba & 0x00000000ffffffffULL); 505 + wr32(IGC_RDBAH(reg_idx), rdba >> 32); 506 + wr32(IGC_RDLEN(reg_idx), 507 + ring->count * sizeof(union igc_adv_rx_desc)); 508 + 509 + /* initialize head and tail */ 510 + ring->tail = adapter->io_addr + IGC_RDT(reg_idx); 511 + wr32(IGC_RDH(reg_idx), 0); 512 + writel(0, ring->tail); 513 + 514 + /* reset next-to- use/clean to place SW in sync with hardware */ 515 + ring->next_to_clean = 0; 516 + ring->next_to_use = 0; 517 + 518 + /* set descriptor configuration */ 519 + srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; 520 + if (ring_uses_large_buffer(ring)) 521 + srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT; 522 + else 523 + srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT; 524 + srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 525 + 526 + wr32(IGC_SRRCTL(reg_idx), srrctl); 527 + 528 + rxdctl |= IGC_RX_PTHRESH; 529 + rxdctl |= IGC_RX_HTHRESH << 8; 530 + rxdctl |= IGC_RX_WTHRESH << 16; 531 + 532 + /* initialize rx_buffer_info */ 533 + memset(ring->rx_buffer_info, 0, 534 + sizeof(struct igc_rx_buffer) * ring->count); 535 + 536 + /* initialize Rx descriptor 0 */ 537 + rx_desc = IGC_RX_DESC(ring, 0); 538 + rx_desc->wb.upper.length = 0; 539 + 540 + /* enable receive descriptor fetching */ 541 + rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 542 + 543 + wr32(IGC_RXDCTL(reg_idx), rxdctl); 544 + } 545 + 546 + /** 547 + * igc_configure_rx - Configure receive Unit after Reset 548 + * @adapter: board private structure 549 + * 550 + * Configure the Rx unit of the MAC after a reset. 551 + */ 552 + static void igc_configure_rx(struct igc_adapter *adapter) 553 + { 554 + int i; 555 + 556 + /* Setup the HW Rx Head and Tail Descriptor Pointers and 557 + * the Base and Length of the Rx Descriptor Ring 558 + */ 559 + for (i = 0; i < adapter->num_rx_queues; i++) 560 + igc_configure_rx_ring(adapter, adapter->rx_ring[i]); 561 + } 562 + 563 + /** 564 + * igc_configure_tx_ring - Configure transmit ring after Reset 565 + * @adapter: board private structure 566 + * @ring: tx ring to configure 567 + * 568 + * Configure a transmit ring after a reset. 569 + */ 570 + static void igc_configure_tx_ring(struct igc_adapter *adapter, 571 + struct igc_ring *ring) 572 + { 573 + struct igc_hw *hw = &adapter->hw; 574 + int reg_idx = ring->reg_idx; 575 + u64 tdba = ring->dma; 576 + u32 txdctl = 0; 577 + 578 + /* disable the queue */ 579 + wr32(IGC_TXDCTL(reg_idx), 0); 580 + wrfl(); 581 + mdelay(10); 582 + 583 + wr32(IGC_TDLEN(reg_idx), 584 + ring->count * sizeof(union igc_adv_tx_desc)); 585 + wr32(IGC_TDBAL(reg_idx), 586 + tdba & 0x00000000ffffffffULL); 587 + wr32(IGC_TDBAH(reg_idx), tdba >> 32); 588 + 589 + ring->tail = adapter->io_addr + IGC_TDT(reg_idx); 590 + wr32(IGC_TDH(reg_idx), 0); 591 + writel(0, ring->tail); 592 + 593 + txdctl |= IGC_TX_PTHRESH; 594 + txdctl |= IGC_TX_HTHRESH << 8; 595 + txdctl |= IGC_TX_WTHRESH << 16; 596 + 597 + txdctl |= IGC_TXDCTL_QUEUE_ENABLE; 598 + wr32(IGC_TXDCTL(reg_idx), txdctl); 599 + } 600 + 601 + /** 602 + * igc_configure_tx - Configure transmit Unit after Reset 603 + * @adapter: board private structure 604 + * 605 + * Configure the Tx unit of the MAC after a reset. 606 + */ 607 + static void igc_configure_tx(struct igc_adapter *adapter) 608 + { 609 + int i; 610 + 611 + for (i = 0; i < adapter->num_tx_queues; i++) 612 + igc_configure_tx_ring(adapter, adapter->tx_ring[i]); 613 + } 614 + 615 + /** 616 + * igc_setup_mrqc - configure the multiple receive queue control registers 617 + * @adapter: Board private structure 618 + */ 619 + static void igc_setup_mrqc(struct igc_adapter *adapter) 620 + { 621 + } 622 + 623 + /** 624 + * igc_setup_rctl - configure the receive control registers 625 + * @adapter: Board private structure 626 + */ 627 + static void igc_setup_rctl(struct igc_adapter *adapter) 628 + { 629 + struct igc_hw *hw = &adapter->hw; 630 + u32 rctl; 631 + 632 + rctl = rd32(IGC_RCTL); 633 + 634 + rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 635 + rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); 636 + 637 + rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | 638 + (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 639 + 640 + /* enable stripping of CRC. Newer features require 641 + * that the HW strips the CRC. 642 + */ 643 + rctl |= IGC_RCTL_SECRC; 644 + 645 + /* disable store bad packets and clear size bits. */ 646 + rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); 647 + 648 + /* enable LPE to allow for reception of jumbo frames */ 649 + rctl |= IGC_RCTL_LPE; 650 + 651 + /* disable queue 0 to prevent tail write w/o re-config */ 652 + wr32(IGC_RXDCTL(0), 0); 653 + 654 + /* This is useful for sniffing bad packets. */ 655 + if (adapter->netdev->features & NETIF_F_RXALL) { 656 + /* UPE and MPE will be handled by normal PROMISC logic 657 + * in set_rx_mode 658 + */ 659 + rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ 660 + IGC_RCTL_BAM | /* RX All Bcast Pkts */ 661 + IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 662 + 663 + rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ 664 + IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ 665 + } 666 + 667 + wr32(IGC_RCTL, rctl); 668 + } 669 + 670 + /** 671 + * igc_setup_tctl - configure the transmit control registers 672 + * @adapter: Board private structure 673 + */ 674 + static void igc_setup_tctl(struct igc_adapter *adapter) 675 + { 676 + struct igc_hw *hw = &adapter->hw; 677 + u32 tctl; 678 + 679 + /* disable queue 0 which icould be enabled by default */ 680 + wr32(IGC_TXDCTL(0), 0); 681 + 682 + /* Program the Transmit Control Register */ 683 + tctl = rd32(IGC_TCTL); 684 + tctl &= ~IGC_TCTL_CT; 685 + tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | 686 + (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); 687 + 688 + /* Enable transmits */ 689 + tctl |= IGC_TCTL_EN; 690 + 691 + wr32(IGC_TCTL, tctl); 692 + } 693 + 694 + /** 695 + * igc_set_mac - Change the Ethernet Address of the NIC 696 + * @netdev: network interface device structure 697 + * @p: pointer to an address structure 698 + * 699 + * Returns 0 on success, negative on failure 700 + */ 701 + static int igc_set_mac(struct net_device *netdev, void *p) 702 + { 703 + struct igc_adapter *adapter = netdev_priv(netdev); 704 + struct igc_hw *hw = &adapter->hw; 705 + struct sockaddr *addr = p; 706 + 707 + if (!is_valid_ether_addr(addr->sa_data)) 708 + return -EADDRNOTAVAIL; 709 + 710 + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 711 + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 712 + 713 + /* set the correct pool for the new PF MAC address in entry 0 */ 714 + igc_set_default_mac_filter(adapter); 715 + 716 + return 0; 717 + } 718 + 719 + static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) 720 + { 721 + } 722 + 723 + static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 724 + { 725 + struct net_device *netdev = tx_ring->netdev; 726 + 727 + netif_stop_subqueue(netdev, tx_ring->queue_index); 728 + 729 + /* memory barriier comment */ 730 + smp_mb(); 731 + 732 + /* We need to check again in a case another CPU has just 733 + * made room available. 734 + */ 735 + if (igc_desc_unused(tx_ring) < size) 736 + return -EBUSY; 737 + 738 + /* A reprieve! */ 739 + netif_wake_subqueue(netdev, tx_ring->queue_index); 740 + 741 + u64_stats_update_begin(&tx_ring->tx_syncp2); 742 + tx_ring->tx_stats.restart_queue2++; 743 + u64_stats_update_end(&tx_ring->tx_syncp2); 744 + 745 + return 0; 746 + } 747 + 748 + static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 749 + { 750 + if (igc_desc_unused(tx_ring) >= size) 751 + return 0; 752 + return __igc_maybe_stop_tx(tx_ring, size); 753 + } 754 + 755 + static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) 756 + { 757 + /* set type for advanced descriptor with frame checksum insertion */ 758 + u32 cmd_type = IGC_ADVTXD_DTYP_DATA | 759 + IGC_ADVTXD_DCMD_DEXT | 760 + IGC_ADVTXD_DCMD_IFCS; 761 + 762 + return cmd_type; 763 + } 764 + 765 + static void igc_tx_olinfo_status(struct igc_ring *tx_ring, 766 + union igc_adv_tx_desc *tx_desc, 767 + u32 tx_flags, unsigned int paylen) 768 + { 769 + u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; 770 + 771 + /* insert L4 checksum */ 772 + olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * 773 + ((IGC_TXD_POPTS_TXSM << 8) / 774 + IGC_TX_FLAGS_CSUM); 775 + 776 + /* insert IPv4 checksum */ 777 + olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * 778 + (((IGC_TXD_POPTS_IXSM << 8)) / 779 + IGC_TX_FLAGS_IPV4); 780 + 781 + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 782 + } 783 + 784 + static int igc_tx_map(struct igc_ring *tx_ring, 785 + struct igc_tx_buffer *first, 786 + const u8 hdr_len) 787 + { 788 + struct sk_buff *skb = first->skb; 789 + struct igc_tx_buffer *tx_buffer; 790 + union igc_adv_tx_desc *tx_desc; 791 + u32 tx_flags = first->tx_flags; 792 + struct skb_frag_struct *frag; 793 + u16 i = tx_ring->next_to_use; 794 + unsigned int data_len, size; 795 + dma_addr_t dma; 796 + u32 cmd_type = igc_tx_cmd_type(skb, tx_flags); 797 + 798 + tx_desc = IGC_TX_DESC(tx_ring, i); 799 + 800 + igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); 801 + 802 + size = skb_headlen(skb); 803 + data_len = skb->data_len; 804 + 805 + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 806 + 807 + tx_buffer = first; 808 + 809 + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 810 + if (dma_mapping_error(tx_ring->dev, dma)) 811 + goto dma_error; 812 + 813 + /* record length, and DMA address */ 814 + dma_unmap_len_set(tx_buffer, len, size); 815 + dma_unmap_addr_set(tx_buffer, dma, dma); 816 + 817 + tx_desc->read.buffer_addr = cpu_to_le64(dma); 818 + 819 + while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { 820 + tx_desc->read.cmd_type_len = 821 + cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); 822 + 823 + i++; 824 + tx_desc++; 825 + if (i == tx_ring->count) { 826 + tx_desc = IGC_TX_DESC(tx_ring, 0); 827 + i = 0; 828 + } 829 + tx_desc->read.olinfo_status = 0; 830 + 831 + dma += IGC_MAX_DATA_PER_TXD; 832 + size -= IGC_MAX_DATA_PER_TXD; 833 + 834 + tx_desc->read.buffer_addr = cpu_to_le64(dma); 835 + } 836 + 837 + if (likely(!data_len)) 838 + break; 839 + 840 + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 841 + 842 + i++; 843 + tx_desc++; 844 + if (i == tx_ring->count) { 845 + tx_desc = IGC_TX_DESC(tx_ring, 0); 846 + i = 0; 847 + } 848 + tx_desc->read.olinfo_status = 0; 849 + 850 + size = skb_frag_size(frag); 851 + data_len -= size; 852 + 853 + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, 854 + size, DMA_TO_DEVICE); 855 + 856 + tx_buffer = &tx_ring->tx_buffer_info[i]; 857 + } 858 + 859 + /* write last descriptor with RS and EOP bits */ 860 + cmd_type |= size | IGC_TXD_DCMD; 861 + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 862 + 863 + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 864 + 865 + /* set the timestamp */ 866 + first->time_stamp = jiffies; 867 + 868 + /* Force memory writes to complete before letting h/w know there 869 + * are new descriptors to fetch. (Only applicable for weak-ordered 870 + * memory model archs, such as IA-64). 871 + * 872 + * We also need this memory barrier to make certain all of the 873 + * status bits have been updated before next_to_watch is written. 874 + */ 875 + wmb(); 876 + 877 + /* set next_to_watch value indicating a packet is present */ 878 + first->next_to_watch = tx_desc; 879 + 880 + i++; 881 + if (i == tx_ring->count) 882 + i = 0; 883 + 884 + tx_ring->next_to_use = i; 885 + 886 + /* Make sure there is space in the ring for the next send. */ 887 + igc_maybe_stop_tx(tx_ring, DESC_NEEDED); 888 + 889 + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { 890 + writel(i, tx_ring->tail); 891 + 892 + /* we need this if more than one processor can write to our tail 893 + * at a time, it synchronizes IO on IA64/Altix systems 894 + */ 895 + mmiowb(); 896 + } 897 + 898 + return 0; 899 + dma_error: 900 + dev_err(tx_ring->dev, "TX DMA map failed\n"); 901 + tx_buffer = &tx_ring->tx_buffer_info[i]; 902 + 903 + /* clear dma mappings for failed tx_buffer_info map */ 904 + while (tx_buffer != first) { 905 + if (dma_unmap_len(tx_buffer, len)) 906 + dma_unmap_page(tx_ring->dev, 907 + dma_unmap_addr(tx_buffer, dma), 908 + dma_unmap_len(tx_buffer, len), 909 + DMA_TO_DEVICE); 910 + dma_unmap_len_set(tx_buffer, len, 0); 911 + 912 + if (i-- == 0) 913 + i += tx_ring->count; 914 + tx_buffer = &tx_ring->tx_buffer_info[i]; 915 + } 916 + 917 + if (dma_unmap_len(tx_buffer, len)) 918 + dma_unmap_single(tx_ring->dev, 919 + dma_unmap_addr(tx_buffer, dma), 920 + dma_unmap_len(tx_buffer, len), 921 + DMA_TO_DEVICE); 922 + dma_unmap_len_set(tx_buffer, len, 0); 923 + 924 + dev_kfree_skb_any(tx_buffer->skb); 925 + tx_buffer->skb = NULL; 926 + 927 + tx_ring->next_to_use = i; 928 + 929 + return -1; 930 + } 931 + 932 + static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, 933 + struct igc_ring *tx_ring) 934 + { 935 + u16 count = TXD_USE_COUNT(skb_headlen(skb)); 936 + __be16 protocol = vlan_get_protocol(skb); 937 + struct igc_tx_buffer *first; 938 + u32 tx_flags = 0; 939 + unsigned short f; 940 + u8 hdr_len = 0; 941 + 942 + /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, 943 + * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, 944 + * + 2 desc gap to keep tail from touching head, 945 + * + 1 desc for context descriptor, 946 + * otherwise try next time 947 + */ 948 + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 949 + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 950 + 951 + if (igc_maybe_stop_tx(tx_ring, count + 3)) { 952 + /* this is a hard error */ 953 + return NETDEV_TX_BUSY; 954 + } 955 + 956 + /* record the location of the first descriptor for this packet */ 957 + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 958 + first->skb = skb; 959 + first->bytecount = skb->len; 960 + first->gso_segs = 1; 961 + 962 + skb_tx_timestamp(skb); 963 + 964 + /* record initial flags and protocol */ 965 + first->tx_flags = tx_flags; 966 + first->protocol = protocol; 967 + 968 + igc_tx_csum(tx_ring, first); 969 + 970 + igc_tx_map(tx_ring, first, hdr_len); 971 + 972 + return NETDEV_TX_OK; 973 + } 974 + 975 + static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, 976 + struct sk_buff *skb) 977 + { 978 + unsigned int r_idx = skb->queue_mapping; 979 + 980 + if (r_idx >= adapter->num_tx_queues) 981 + r_idx = r_idx % adapter->num_tx_queues; 982 + 983 + return adapter->tx_ring[r_idx]; 984 + } 985 + 986 + static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, 987 + struct net_device *netdev) 988 + { 989 + struct igc_adapter *adapter = netdev_priv(netdev); 990 + 991 + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb 992 + * in order to meet this minimum size requirement. 993 + */ 994 + if (skb->len < 17) { 995 + if (skb_padto(skb, 17)) 996 + return NETDEV_TX_OK; 997 + skb->len = 17; 998 + } 999 + 1000 + return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); 1001 + } 1002 + 1003 + static inline void igc_rx_hash(struct igc_ring *ring, 1004 + union igc_adv_rx_desc *rx_desc, 1005 + struct sk_buff *skb) 1006 + { 1007 + if (ring->netdev->features & NETIF_F_RXHASH) 1008 + skb_set_hash(skb, 1009 + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), 1010 + PKT_HASH_TYPE_L3); 1011 + } 1012 + 1013 + /** 1014 + * igc_process_skb_fields - Populate skb header fields from Rx descriptor 1015 + * @rx_ring: rx descriptor ring packet is being transacted on 1016 + * @rx_desc: pointer to the EOP Rx descriptor 1017 + * @skb: pointer to current skb being populated 1018 + * 1019 + * This function checks the ring, descriptor, and packet information in 1020 + * order to populate the hash, checksum, VLAN, timestamp, protocol, and 1021 + * other fields within the skb. 1022 + */ 1023 + static void igc_process_skb_fields(struct igc_ring *rx_ring, 1024 + union igc_adv_rx_desc *rx_desc, 1025 + struct sk_buff *skb) 1026 + { 1027 + igc_rx_hash(rx_ring, rx_desc, skb); 1028 + 1029 + skb_record_rx_queue(skb, rx_ring->queue_index); 1030 + 1031 + skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1032 + } 1033 + 1034 + static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, 1035 + const unsigned int size) 1036 + { 1037 + struct igc_rx_buffer *rx_buffer; 1038 + 1039 + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 1040 + prefetchw(rx_buffer->page); 1041 + 1042 + /* we are reusing so sync this buffer for CPU use */ 1043 + dma_sync_single_range_for_cpu(rx_ring->dev, 1044 + rx_buffer->dma, 1045 + rx_buffer->page_offset, 1046 + size, 1047 + DMA_FROM_DEVICE); 1048 + 1049 + rx_buffer->pagecnt_bias--; 1050 + 1051 + return rx_buffer; 1052 + } 1053 + 1054 + /** 1055 + * igc_add_rx_frag - Add contents of Rx buffer to sk_buff 1056 + * @rx_ring: rx descriptor ring to transact packets on 1057 + * @rx_buffer: buffer containing page to add 1058 + * @skb: sk_buff to place the data into 1059 + * @size: size of buffer to be added 1060 + * 1061 + * This function will add the data contained in rx_buffer->page to the skb. 1062 + */ 1063 + static void igc_add_rx_frag(struct igc_ring *rx_ring, 1064 + struct igc_rx_buffer *rx_buffer, 1065 + struct sk_buff *skb, 1066 + unsigned int size) 1067 + { 1068 + #if (PAGE_SIZE < 8192) 1069 + unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; 1070 + 1071 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1072 + rx_buffer->page_offset, size, truesize); 1073 + rx_buffer->page_offset ^= truesize; 1074 + #else 1075 + unsigned int truesize = ring_uses_build_skb(rx_ring) ? 1076 + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1077 + SKB_DATA_ALIGN(size); 1078 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1079 + rx_buffer->page_offset, size, truesize); 1080 + rx_buffer->page_offset += truesize; 1081 + #endif 1082 + } 1083 + 1084 + static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, 1085 + struct igc_rx_buffer *rx_buffer, 1086 + union igc_adv_rx_desc *rx_desc, 1087 + unsigned int size) 1088 + { 1089 + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; 1090 + #if (PAGE_SIZE < 8192) 1091 + unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; 1092 + #else 1093 + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1094 + SKB_DATA_ALIGN(IGC_SKB_PAD + size); 1095 + #endif 1096 + struct sk_buff *skb; 1097 + 1098 + /* prefetch first cache line of first page */ 1099 + prefetch(va); 1100 + #if L1_CACHE_BYTES < 128 1101 + prefetch(va + L1_CACHE_BYTES); 1102 + #endif 1103 + 1104 + /* build an skb around the page buffer */ 1105 + skb = build_skb(va - IGC_SKB_PAD, truesize); 1106 + if (unlikely(!skb)) 1107 + return NULL; 1108 + 1109 + /* update pointers within the skb to store the data */ 1110 + skb_reserve(skb, IGC_SKB_PAD); 1111 + __skb_put(skb, size); 1112 + 1113 + /* update buffer offset */ 1114 + #if (PAGE_SIZE < 8192) 1115 + rx_buffer->page_offset ^= truesize; 1116 + #else 1117 + rx_buffer->page_offset += truesize; 1118 + #endif 1119 + 1120 + return skb; 1121 + } 1122 + 1123 + static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, 1124 + struct igc_rx_buffer *rx_buffer, 1125 + union igc_adv_rx_desc *rx_desc, 1126 + unsigned int size) 1127 + { 1128 + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; 1129 + #if (PAGE_SIZE < 8192) 1130 + unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; 1131 + #else 1132 + unsigned int truesize = SKB_DATA_ALIGN(size); 1133 + #endif 1134 + unsigned int headlen; 1135 + struct sk_buff *skb; 1136 + 1137 + /* prefetch first cache line of first page */ 1138 + prefetch(va); 1139 + #if L1_CACHE_BYTES < 128 1140 + prefetch(va + L1_CACHE_BYTES); 1141 + #endif 1142 + 1143 + /* allocate a skb to store the frags */ 1144 + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); 1145 + if (unlikely(!skb)) 1146 + return NULL; 1147 + 1148 + /* Determine available headroom for copy */ 1149 + headlen = size; 1150 + if (headlen > IGC_RX_HDR_LEN) 1151 + headlen = eth_get_headlen(va, IGC_RX_HDR_LEN); 1152 + 1153 + /* align pull length to size of long to optimize memcpy performance */ 1154 + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 1155 + 1156 + /* update all of the pointers */ 1157 + size -= headlen; 1158 + if (size) { 1159 + skb_add_rx_frag(skb, 0, rx_buffer->page, 1160 + (va + headlen) - page_address(rx_buffer->page), 1161 + size, truesize); 1162 + #if (PAGE_SIZE < 8192) 1163 + rx_buffer->page_offset ^= truesize; 1164 + #else 1165 + rx_buffer->page_offset += truesize; 1166 + #endif 1167 + } else { 1168 + rx_buffer->pagecnt_bias++; 1169 + } 1170 + 1171 + return skb; 1172 + } 1173 + 1174 + /** 1175 + * igc_reuse_rx_page - page flip buffer and store it back on the ring 1176 + * @rx_ring: rx descriptor ring to store buffers on 1177 + * @old_buff: donor buffer to have page reused 1178 + * 1179 + * Synchronizes page for reuse by the adapter 1180 + */ 1181 + static void igc_reuse_rx_page(struct igc_ring *rx_ring, 1182 + struct igc_rx_buffer *old_buff) 1183 + { 1184 + u16 nta = rx_ring->next_to_alloc; 1185 + struct igc_rx_buffer *new_buff; 1186 + 1187 + new_buff = &rx_ring->rx_buffer_info[nta]; 1188 + 1189 + /* update, and store next to alloc */ 1190 + nta++; 1191 + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1192 + 1193 + /* Transfer page from old buffer to new buffer. 1194 + * Move each member individually to avoid possible store 1195 + * forwarding stalls. 1196 + */ 1197 + new_buff->dma = old_buff->dma; 1198 + new_buff->page = old_buff->page; 1199 + new_buff->page_offset = old_buff->page_offset; 1200 + new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1201 + } 1202 + 1203 + static inline bool igc_page_is_reserved(struct page *page) 1204 + { 1205 + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 1206 + } 1207 + 1208 + static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer) 1209 + { 1210 + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1211 + struct page *page = rx_buffer->page; 1212 + 1213 + /* avoid re-using remote pages */ 1214 + if (unlikely(igc_page_is_reserved(page))) 1215 + return false; 1216 + 1217 + #if (PAGE_SIZE < 8192) 1218 + /* if we are only owner of page we can reuse it */ 1219 + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) 1220 + return false; 1221 + #else 1222 + #define IGC_LAST_OFFSET \ 1223 + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) 1224 + 1225 + if (rx_buffer->page_offset > IGC_LAST_OFFSET) 1226 + return false; 1227 + #endif 1228 + 1229 + /* If we have drained the page fragment pool we need to update 1230 + * the pagecnt_bias and page count so that we fully restock the 1231 + * number of references the driver holds. 1232 + */ 1233 + if (unlikely(!pagecnt_bias)) { 1234 + page_ref_add(page, USHRT_MAX); 1235 + rx_buffer->pagecnt_bias = USHRT_MAX; 1236 + } 1237 + 1238 + return true; 1239 + } 1240 + 1241 + /** 1242 + * igc_is_non_eop - process handling of non-EOP buffers 1243 + * @rx_ring: Rx ring being processed 1244 + * @rx_desc: Rx descriptor for current buffer 1245 + * @skb: current socket buffer containing buffer in progress 1246 + * 1247 + * This function updates next to clean. If the buffer is an EOP buffer 1248 + * this function exits returning false, otherwise it will place the 1249 + * sk_buff in the next buffer to be chained and return true indicating 1250 + * that this is in fact a non-EOP buffer. 1251 + */ 1252 + static bool igc_is_non_eop(struct igc_ring *rx_ring, 1253 + union igc_adv_rx_desc *rx_desc) 1254 + { 1255 + u32 ntc = rx_ring->next_to_clean + 1; 1256 + 1257 + /* fetch, update, and store next to clean */ 1258 + ntc = (ntc < rx_ring->count) ? ntc : 0; 1259 + rx_ring->next_to_clean = ntc; 1260 + 1261 + prefetch(IGC_RX_DESC(rx_ring, ntc)); 1262 + 1263 + if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) 1264 + return false; 1265 + 1266 + return true; 1267 + } 1268 + 1269 + /** 1270 + * igc_cleanup_headers - Correct corrupted or empty headers 1271 + * @rx_ring: rx descriptor ring packet is being transacted on 1272 + * @rx_desc: pointer to the EOP Rx descriptor 1273 + * @skb: pointer to current skb being fixed 1274 + * 1275 + * Address the case where we are pulling data in on pages only 1276 + * and as such no data is present in the skb header. 1277 + * 1278 + * In addition if skb is not at least 60 bytes we need to pad it so that 1279 + * it is large enough to qualify as a valid Ethernet frame. 1280 + * 1281 + * Returns true if an error was encountered and skb was freed. 1282 + */ 1283 + static bool igc_cleanup_headers(struct igc_ring *rx_ring, 1284 + union igc_adv_rx_desc *rx_desc, 1285 + struct sk_buff *skb) 1286 + { 1287 + if (unlikely((igc_test_staterr(rx_desc, 1288 + IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) { 1289 + struct net_device *netdev = rx_ring->netdev; 1290 + 1291 + if (!(netdev->features & NETIF_F_RXALL)) { 1292 + dev_kfree_skb_any(skb); 1293 + return true; 1294 + } 1295 + } 1296 + 1297 + /* if eth_skb_pad returns an error the skb was freed */ 1298 + if (eth_skb_pad(skb)) 1299 + return true; 1300 + 1301 + return false; 1302 + } 1303 + 1304 + static void igc_put_rx_buffer(struct igc_ring *rx_ring, 1305 + struct igc_rx_buffer *rx_buffer) 1306 + { 1307 + if (igc_can_reuse_rx_page(rx_buffer)) { 1308 + /* hand second half of page back to the ring */ 1309 + igc_reuse_rx_page(rx_ring, rx_buffer); 1310 + } else { 1311 + /* We are not reusing the buffer so unmap it and free 1312 + * any references we are holding to it 1313 + */ 1314 + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 1315 + igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 1316 + IGC_RX_DMA_ATTR); 1317 + __page_frag_cache_drain(rx_buffer->page, 1318 + rx_buffer->pagecnt_bias); 1319 + } 1320 + 1321 + /* clear contents of rx_buffer */ 1322 + rx_buffer->page = NULL; 1323 + } 1324 + 1325 + /** 1326 + * igc_alloc_rx_buffers - Replace used receive buffers; packet split 1327 + * @adapter: address of board private structure 1328 + */ 1329 + static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) 1330 + { 1331 + union igc_adv_rx_desc *rx_desc; 1332 + u16 i = rx_ring->next_to_use; 1333 + struct igc_rx_buffer *bi; 1334 + u16 bufsz; 1335 + 1336 + /* nothing to do */ 1337 + if (!cleaned_count) 1338 + return; 1339 + 1340 + rx_desc = IGC_RX_DESC(rx_ring, i); 1341 + bi = &rx_ring->rx_buffer_info[i]; 1342 + i -= rx_ring->count; 1343 + 1344 + bufsz = igc_rx_bufsz(rx_ring); 1345 + 1346 + do { 1347 + if (!igc_alloc_mapped_page(rx_ring, bi)) 1348 + break; 1349 + 1350 + /* sync the buffer for use by the device */ 1351 + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 1352 + bi->page_offset, bufsz, 1353 + DMA_FROM_DEVICE); 1354 + 1355 + /* Refresh the desc even if buffer_addrs didn't change 1356 + * because each write-back erases this info. 1357 + */ 1358 + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 1359 + 1360 + rx_desc++; 1361 + bi++; 1362 + i++; 1363 + if (unlikely(!i)) { 1364 + rx_desc = IGC_RX_DESC(rx_ring, 0); 1365 + bi = rx_ring->rx_buffer_info; 1366 + i -= rx_ring->count; 1367 + } 1368 + 1369 + /* clear the length for the next_to_use descriptor */ 1370 + rx_desc->wb.upper.length = 0; 1371 + 1372 + cleaned_count--; 1373 + } while (cleaned_count); 1374 + 1375 + i += rx_ring->count; 1376 + 1377 + if (rx_ring->next_to_use != i) { 1378 + /* record the next descriptor to use */ 1379 + rx_ring->next_to_use = i; 1380 + 1381 + /* update next to alloc since we have filled the ring */ 1382 + rx_ring->next_to_alloc = i; 1383 + 1384 + /* Force memory writes to complete before letting h/w 1385 + * know there are new descriptors to fetch. (Only 1386 + * applicable for weak-ordered memory model archs, 1387 + * such as IA-64). 1388 + */ 1389 + wmb(); 1390 + writel(i, rx_ring->tail); 1391 + } 1392 + } 1393 + 1394 + static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) 1395 + { 1396 + unsigned int total_bytes = 0, total_packets = 0; 1397 + struct igc_ring *rx_ring = q_vector->rx.ring; 1398 + struct sk_buff *skb = rx_ring->skb; 1399 + u16 cleaned_count = igc_desc_unused(rx_ring); 1400 + 1401 + while (likely(total_packets < budget)) { 1402 + union igc_adv_rx_desc *rx_desc; 1403 + struct igc_rx_buffer *rx_buffer; 1404 + unsigned int size; 1405 + 1406 + /* return some buffers to hardware, one at a time is too slow */ 1407 + if (cleaned_count >= IGC_RX_BUFFER_WRITE) { 1408 + igc_alloc_rx_buffers(rx_ring, cleaned_count); 1409 + cleaned_count = 0; 1410 + } 1411 + 1412 + rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); 1413 + size = le16_to_cpu(rx_desc->wb.upper.length); 1414 + if (!size) 1415 + break; 1416 + 1417 + /* This memory barrier is needed to keep us from reading 1418 + * any other fields out of the rx_desc until we know the 1419 + * descriptor has been written back 1420 + */ 1421 + dma_rmb(); 1422 + 1423 + rx_buffer = igc_get_rx_buffer(rx_ring, size); 1424 + 1425 + /* retrieve a buffer from the ring */ 1426 + if (skb) 1427 + igc_add_rx_frag(rx_ring, rx_buffer, skb, size); 1428 + else if (ring_uses_build_skb(rx_ring)) 1429 + skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); 1430 + else 1431 + skb = igc_construct_skb(rx_ring, rx_buffer, 1432 + rx_desc, size); 1433 + 1434 + /* exit if we failed to retrieve a buffer */ 1435 + if (!skb) { 1436 + rx_ring->rx_stats.alloc_failed++; 1437 + rx_buffer->pagecnt_bias++; 1438 + break; 1439 + } 1440 + 1441 + igc_put_rx_buffer(rx_ring, rx_buffer); 1442 + cleaned_count++; 1443 + 1444 + /* fetch next buffer in frame if non-eop */ 1445 + if (igc_is_non_eop(rx_ring, rx_desc)) 1446 + continue; 1447 + 1448 + /* verify the packet layout is correct */ 1449 + if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { 1450 + skb = NULL; 1451 + continue; 1452 + } 1453 + 1454 + /* probably a little skewed due to removing CRC */ 1455 + total_bytes += skb->len; 1456 + 1457 + /* populate checksum, timestamp, VLAN, and protocol */ 1458 + igc_process_skb_fields(rx_ring, rx_desc, skb); 1459 + 1460 + napi_gro_receive(&q_vector->napi, skb); 1461 + 1462 + /* reset skb pointer */ 1463 + skb = NULL; 1464 + 1465 + /* update budget accounting */ 1466 + total_packets++; 1467 + } 1468 + 1469 + /* place incomplete frames back on ring for completion */ 1470 + rx_ring->skb = skb; 1471 + 1472 + u64_stats_update_begin(&rx_ring->rx_syncp); 1473 + rx_ring->rx_stats.packets += total_packets; 1474 + rx_ring->rx_stats.bytes += total_bytes; 1475 + u64_stats_update_end(&rx_ring->rx_syncp); 1476 + q_vector->rx.total_packets += total_packets; 1477 + q_vector->rx.total_bytes += total_bytes; 1478 + 1479 + if (cleaned_count) 1480 + igc_alloc_rx_buffers(rx_ring, cleaned_count); 1481 + 1482 + return total_packets; 1483 + } 1484 + 1485 + static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) 1486 + { 1487 + return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0; 1488 + } 1489 + 1490 + static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, 1491 + struct igc_rx_buffer *bi) 1492 + { 1493 + struct page *page = bi->page; 1494 + dma_addr_t dma; 1495 + 1496 + /* since we are recycling buffers we should seldom need to alloc */ 1497 + if (likely(page)) 1498 + return true; 1499 + 1500 + /* alloc new page for storage */ 1501 + page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); 1502 + if (unlikely(!page)) { 1503 + rx_ring->rx_stats.alloc_failed++; 1504 + return false; 1505 + } 1506 + 1507 + /* map page for use */ 1508 + dma = dma_map_page_attrs(rx_ring->dev, page, 0, 1509 + igc_rx_pg_size(rx_ring), 1510 + DMA_FROM_DEVICE, 1511 + IGC_RX_DMA_ATTR); 1512 + 1513 + /* if mapping failed free memory back to system since 1514 + * there isn't much point in holding memory we can't use 1515 + */ 1516 + if (dma_mapping_error(rx_ring->dev, dma)) { 1517 + __free_page(page); 1518 + 1519 + rx_ring->rx_stats.alloc_failed++; 1520 + return false; 1521 + } 1522 + 1523 + bi->dma = dma; 1524 + bi->page = page; 1525 + bi->page_offset = igc_rx_offset(rx_ring); 1526 + bi->pagecnt_bias = 1; 1527 + 1528 + return true; 1529 + } 1530 + 1531 + /** 1532 + * igc_clean_tx_irq - Reclaim resources after transmit completes 1533 + * @q_vector: pointer to q_vector containing needed info 1534 + * @napi_budget: Used to determine if we are in netpoll 1535 + * 1536 + * returns true if ring is completely cleaned 1537 + */ 1538 + static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) 1539 + { 1540 + struct igc_adapter *adapter = q_vector->adapter; 1541 + unsigned int total_bytes = 0, total_packets = 0; 1542 + unsigned int budget = q_vector->tx.work_limit; 1543 + struct igc_ring *tx_ring = q_vector->tx.ring; 1544 + unsigned int i = tx_ring->next_to_clean; 1545 + struct igc_tx_buffer *tx_buffer; 1546 + union igc_adv_tx_desc *tx_desc; 1547 + 1548 + if (test_bit(__IGC_DOWN, &adapter->state)) 1549 + return true; 1550 + 1551 + tx_buffer = &tx_ring->tx_buffer_info[i]; 1552 + tx_desc = IGC_TX_DESC(tx_ring, i); 1553 + i -= tx_ring->count; 1554 + 1555 + do { 1556 + union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 1557 + 1558 + /* if next_to_watch is not set then there is no work pending */ 1559 + if (!eop_desc) 1560 + break; 1561 + 1562 + /* prevent any other reads prior to eop_desc */ 1563 + smp_rmb(); 1564 + 1565 + /* if DD is not set pending work has not been completed */ 1566 + if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) 1567 + break; 1568 + 1569 + /* clear next_to_watch to prevent false hangs */ 1570 + tx_buffer->next_to_watch = NULL; 1571 + 1572 + /* update the statistics for this packet */ 1573 + total_bytes += tx_buffer->bytecount; 1574 + total_packets += tx_buffer->gso_segs; 1575 + 1576 + /* free the skb */ 1577 + napi_consume_skb(tx_buffer->skb, napi_budget); 1578 + 1579 + /* unmap skb header data */ 1580 + dma_unmap_single(tx_ring->dev, 1581 + dma_unmap_addr(tx_buffer, dma), 1582 + dma_unmap_len(tx_buffer, len), 1583 + DMA_TO_DEVICE); 1584 + 1585 + /* clear tx_buffer data */ 1586 + dma_unmap_len_set(tx_buffer, len, 0); 1587 + 1588 + /* clear last DMA location and unmap remaining buffers */ 1589 + while (tx_desc != eop_desc) { 1590 + tx_buffer++; 1591 + tx_desc++; 1592 + i++; 1593 + if (unlikely(!i)) { 1594 + i -= tx_ring->count; 1595 + tx_buffer = tx_ring->tx_buffer_info; 1596 + tx_desc = IGC_TX_DESC(tx_ring, 0); 1597 + } 1598 + 1599 + /* unmap any remaining paged data */ 1600 + if (dma_unmap_len(tx_buffer, len)) { 1601 + dma_unmap_page(tx_ring->dev, 1602 + dma_unmap_addr(tx_buffer, dma), 1603 + dma_unmap_len(tx_buffer, len), 1604 + DMA_TO_DEVICE); 1605 + dma_unmap_len_set(tx_buffer, len, 0); 1606 + } 1607 + } 1608 + 1609 + /* move us one more past the eop_desc for start of next pkt */ 1610 + tx_buffer++; 1611 + tx_desc++; 1612 + i++; 1613 + if (unlikely(!i)) { 1614 + i -= tx_ring->count; 1615 + tx_buffer = tx_ring->tx_buffer_info; 1616 + tx_desc = IGC_TX_DESC(tx_ring, 0); 1617 + } 1618 + 1619 + /* issue prefetch for next Tx descriptor */ 1620 + prefetch(tx_desc); 1621 + 1622 + /* update budget accounting */ 1623 + budget--; 1624 + } while (likely(budget)); 1625 + 1626 + netdev_tx_completed_queue(txring_txq(tx_ring), 1627 + total_packets, total_bytes); 1628 + 1629 + i += tx_ring->count; 1630 + tx_ring->next_to_clean = i; 1631 + u64_stats_update_begin(&tx_ring->tx_syncp); 1632 + tx_ring->tx_stats.bytes += total_bytes; 1633 + tx_ring->tx_stats.packets += total_packets; 1634 + u64_stats_update_end(&tx_ring->tx_syncp); 1635 + q_vector->tx.total_bytes += total_bytes; 1636 + q_vector->tx.total_packets += total_packets; 1637 + 1638 + if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { 1639 + struct igc_hw *hw = &adapter->hw; 1640 + 1641 + /* Detect a transmit hang in hardware, this serializes the 1642 + * check with the clearing of time_stamp and movement of i 1643 + */ 1644 + clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 1645 + if (tx_buffer->next_to_watch && 1646 + time_after(jiffies, tx_buffer->time_stamp + 1647 + (adapter->tx_timeout_factor * HZ)) && 1648 + !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { 1649 + /* detected Tx unit hang */ 1650 + dev_err(tx_ring->dev, 1651 + "Detected Tx Unit Hang\n" 1652 + " Tx Queue <%d>\n" 1653 + " TDH <%x>\n" 1654 + " TDT <%x>\n" 1655 + " next_to_use <%x>\n" 1656 + " next_to_clean <%x>\n" 1657 + "buffer_info[next_to_clean]\n" 1658 + " time_stamp <%lx>\n" 1659 + " next_to_watch <%p>\n" 1660 + " jiffies <%lx>\n" 1661 + " desc.status <%x>\n", 1662 + tx_ring->queue_index, 1663 + rd32(IGC_TDH(tx_ring->reg_idx)), 1664 + readl(tx_ring->tail), 1665 + tx_ring->next_to_use, 1666 + tx_ring->next_to_clean, 1667 + tx_buffer->time_stamp, 1668 + tx_buffer->next_to_watch, 1669 + jiffies, 1670 + tx_buffer->next_to_watch->wb.status); 1671 + netif_stop_subqueue(tx_ring->netdev, 1672 + tx_ring->queue_index); 1673 + 1674 + /* we are about to reset, no point in enabling stuff */ 1675 + return true; 1676 + } 1677 + } 1678 + 1679 + #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 1680 + if (unlikely(total_packets && 1681 + netif_carrier_ok(tx_ring->netdev) && 1682 + igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { 1683 + /* Make sure that anybody stopping the queue after this 1684 + * sees the new next_to_clean. 1685 + */ 1686 + smp_mb(); 1687 + if (__netif_subqueue_stopped(tx_ring->netdev, 1688 + tx_ring->queue_index) && 1689 + !(test_bit(__IGC_DOWN, &adapter->state))) { 1690 + netif_wake_subqueue(tx_ring->netdev, 1691 + tx_ring->queue_index); 1692 + 1693 + u64_stats_update_begin(&tx_ring->tx_syncp); 1694 + tx_ring->tx_stats.restart_queue++; 1695 + u64_stats_update_end(&tx_ring->tx_syncp); 1696 + } 1697 + } 1698 + 1699 + return !!budget; 1700 + } 1701 + 1702 + /** 1703 + * igc_ioctl - I/O control method 1704 + * @netdev: network interface device structure 1705 + * @ifreq: frequency 1706 + * @cmd: command 1707 + */ 1708 + static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1709 + { 1710 + switch (cmd) { 1711 + default: 1712 + return -EOPNOTSUPP; 1713 + } 1714 + } 1715 + 1716 + /** 1717 + * igc_up - Open the interface and prepare it to handle traffic 1718 + * @adapter: board private structure 1719 + */ 1720 + static void igc_up(struct igc_adapter *adapter) 1721 + { 1722 + struct igc_hw *hw = &adapter->hw; 1723 + int i = 0; 1724 + 1725 + /* hardware has been reset, we need to reload some things */ 1726 + igc_configure(adapter); 1727 + 1728 + clear_bit(__IGC_DOWN, &adapter->state); 1729 + 1730 + for (i = 0; i < adapter->num_q_vectors; i++) 1731 + napi_enable(&adapter->q_vector[i]->napi); 1732 + 1733 + if (adapter->msix_entries) 1734 + igc_configure_msix(adapter); 1735 + else 1736 + igc_assign_vector(adapter->q_vector[0], 0); 1737 + 1738 + /* Clear any pending interrupts. */ 1739 + rd32(IGC_ICR); 1740 + igc_irq_enable(adapter); 1741 + 1742 + netif_tx_start_all_queues(adapter->netdev); 1743 + 1744 + /* start the watchdog. */ 1745 + hw->mac.get_link_status = 1; 1746 + schedule_work(&adapter->watchdog_task); 1747 + } 1748 + 1749 + /** 1750 + * igc_update_stats - Update the board statistics counters 1751 + * @adapter: board private structure 1752 + */ 1753 + static void igc_update_stats(struct igc_adapter *adapter) 1754 + { 1755 + } 1756 + 1757 + static void igc_nfc_filter_exit(struct igc_adapter *adapter) 1758 + { 1759 + } 1760 + 1761 + /** 1762 + * igc_down - Close the interface 1763 + * @adapter: board private structure 1764 + */ 1765 + static void igc_down(struct igc_adapter *adapter) 1766 + { 1767 + struct net_device *netdev = adapter->netdev; 1768 + struct igc_hw *hw = &adapter->hw; 1769 + u32 tctl, rctl; 1770 + int i = 0; 1771 + 1772 + set_bit(__IGC_DOWN, &adapter->state); 1773 + 1774 + /* disable receives in the hardware */ 1775 + rctl = rd32(IGC_RCTL); 1776 + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); 1777 + /* flush and sleep below */ 1778 + 1779 + igc_nfc_filter_exit(adapter); 1780 + 1781 + /* set trans_start so we don't get spurious watchdogs during reset */ 1782 + netif_trans_update(netdev); 1783 + 1784 + netif_carrier_off(netdev); 1785 + netif_tx_stop_all_queues(netdev); 1786 + 1787 + /* disable transmits in the hardware */ 1788 + tctl = rd32(IGC_TCTL); 1789 + tctl &= ~IGC_TCTL_EN; 1790 + wr32(IGC_TCTL, tctl); 1791 + /* flush both disables and wait for them to finish */ 1792 + wrfl(); 1793 + usleep_range(10000, 20000); 1794 + 1795 + igc_irq_disable(adapter); 1796 + 1797 + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 1798 + 1799 + for (i = 0; i < adapter->num_q_vectors; i++) { 1800 + if (adapter->q_vector[i]) { 1801 + napi_synchronize(&adapter->q_vector[i]->napi); 1802 + napi_disable(&adapter->q_vector[i]->napi); 1803 + } 1804 + } 1805 + 1806 + del_timer_sync(&adapter->watchdog_timer); 1807 + del_timer_sync(&adapter->phy_info_timer); 1808 + 1809 + /* record the stats before reset*/ 1810 + spin_lock(&adapter->stats64_lock); 1811 + igc_update_stats(adapter); 1812 + spin_unlock(&adapter->stats64_lock); 1813 + 1814 + adapter->link_speed = 0; 1815 + adapter->link_duplex = 0; 1816 + 1817 + if (!pci_channel_offline(adapter->pdev)) 1818 + igc_reset(adapter); 1819 + 1820 + /* clear VLAN promisc flag so VFTA will be updated if necessary */ 1821 + adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; 1822 + 1823 + igc_clean_all_tx_rings(adapter); 1824 + igc_clean_all_rx_rings(adapter); 1825 + } 1826 + 1827 + static void igc_reinit_locked(struct igc_adapter *adapter) 1828 + { 1829 + WARN_ON(in_interrupt()); 1830 + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 1831 + usleep_range(1000, 2000); 1832 + igc_down(adapter); 1833 + igc_up(adapter); 1834 + clear_bit(__IGC_RESETTING, &adapter->state); 1835 + } 1836 + 1837 + static void igc_reset_task(struct work_struct *work) 1838 + { 1839 + struct igc_adapter *adapter; 1840 + 1841 + adapter = container_of(work, struct igc_adapter, reset_task); 1842 + 1843 + netdev_err(adapter->netdev, "Reset adapter\n"); 1844 + igc_reinit_locked(adapter); 1845 + } 1846 + 1847 + /** 1848 + * igc_change_mtu - Change the Maximum Transfer Unit 1849 + * @netdev: network interface device structure 1850 + * @new_mtu: new value for maximum frame size 1851 + * 1852 + * Returns 0 on success, negative on failure 1853 + */ 1854 + static int igc_change_mtu(struct net_device *netdev, int new_mtu) 1855 + { 1856 + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 1857 + struct igc_adapter *adapter = netdev_priv(netdev); 1858 + struct pci_dev *pdev = adapter->pdev; 1859 + 1860 + /* adjust max frame to be at least the size of a standard frame */ 1861 + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) 1862 + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; 1863 + 1864 + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 1865 + usleep_range(1000, 2000); 1866 + 1867 + /* igc_down has a dependency on max_frame_size */ 1868 + adapter->max_frame_size = max_frame; 1869 + 1870 + if (netif_running(netdev)) 1871 + igc_down(adapter); 1872 + 1873 + dev_info(&pdev->dev, "changing MTU from %d to %d\n", 1874 + netdev->mtu, new_mtu); 1875 + netdev->mtu = new_mtu; 1876 + 1877 + if (netif_running(netdev)) 1878 + igc_up(adapter); 1879 + else 1880 + igc_reset(adapter); 1881 + 1882 + clear_bit(__IGC_RESETTING, &adapter->state); 1883 + 1884 + return 0; 1885 + } 1886 + 1887 + /** 1888 + * igc_get_stats - Get System Network Statistics 1889 + * @netdev: network interface device structure 1890 + * 1891 + * Returns the address of the device statistics structure. 1892 + * The statistics are updated here and also from the timer callback. 1893 + */ 1894 + static struct net_device_stats *igc_get_stats(struct net_device *netdev) 1895 + { 1896 + struct igc_adapter *adapter = netdev_priv(netdev); 1897 + 1898 + if (!test_bit(__IGC_RESETTING, &adapter->state)) 1899 + igc_update_stats(adapter); 1900 + 1901 + /* only return the current stats */ 1902 + return &netdev->stats; 1903 + } 1904 + 1905 + /** 1906 + * igc_configure - configure the hardware for RX and TX 1907 + * @adapter: private board structure 1908 + */ 1909 + static void igc_configure(struct igc_adapter *adapter) 1910 + { 1911 + struct net_device *netdev = adapter->netdev; 1912 + int i = 0; 1913 + 1914 + igc_get_hw_control(adapter); 1915 + igc_set_rx_mode(netdev); 1916 + 1917 + igc_setup_tctl(adapter); 1918 + igc_setup_mrqc(adapter); 1919 + igc_setup_rctl(adapter); 1920 + 1921 + igc_configure_tx(adapter); 1922 + igc_configure_rx(adapter); 1923 + 1924 + igc_rx_fifo_flush_base(&adapter->hw); 1925 + 1926 + /* call igc_desc_unused which always leaves 1927 + * at least 1 descriptor unused to make sure 1928 + * next_to_use != next_to_clean 1929 + */ 1930 + for (i = 0; i < adapter->num_rx_queues; i++) { 1931 + struct igc_ring *ring = adapter->rx_ring[i]; 1932 + 1933 + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 1934 + } 1935 + } 1936 + 1937 + /** 1938 + * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table 1939 + * @adapter: Pointer to adapter structure 1940 + * @index: Index of the RAR entry which need to be synced with MAC table 1941 + */ 1942 + static void igc_rar_set_index(struct igc_adapter *adapter, u32 index) 1943 + { 1944 + u8 *addr = adapter->mac_table[index].addr; 1945 + struct igc_hw *hw = &adapter->hw; 1946 + u32 rar_low, rar_high; 1947 + 1948 + /* HW expects these to be in network order when they are plugged 1949 + * into the registers which are little endian. In order to guarantee 1950 + * that ordering we need to do an leXX_to_cpup here in order to be 1951 + * ready for the byteswap that occurs with writel 1952 + */ 1953 + rar_low = le32_to_cpup((__le32 *)(addr)); 1954 + rar_high = le16_to_cpup((__le16 *)(addr + 4)); 1955 + 1956 + /* Indicate to hardware the Address is Valid. */ 1957 + if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) { 1958 + if (is_valid_ether_addr(addr)) 1959 + rar_high |= IGC_RAH_AV; 1960 + 1961 + rar_high |= IGC_RAH_POOL_1 << 1962 + adapter->mac_table[index].queue; 1963 + } 1964 + 1965 + wr32(IGC_RAL(index), rar_low); 1966 + wrfl(); 1967 + wr32(IGC_RAH(index), rar_high); 1968 + wrfl(); 1969 + } 1970 + 1971 + /* Set default MAC address for the PF in the first RAR entry */ 1972 + static void igc_set_default_mac_filter(struct igc_adapter *adapter) 1973 + { 1974 + struct igc_mac_addr *mac_table = &adapter->mac_table[0]; 1975 + 1976 + ether_addr_copy(mac_table->addr, adapter->hw.mac.addr); 1977 + mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE; 1978 + 1979 + igc_rar_set_index(adapter, 0); 1980 + } 1981 + 1982 + /** 1983 + * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 1984 + * @netdev: network interface device structure 1985 + * 1986 + * The set_rx_mode entry point is called whenever the unicast or multicast 1987 + * address lists or the network interface flags are updated. This routine is 1988 + * responsible for configuring the hardware for proper unicast, multicast, 1989 + * promiscuous mode, and all-multi behavior. 1990 + */ 1991 + static void igc_set_rx_mode(struct net_device *netdev) 1992 + { 1993 + } 1994 + 1995 + /** 1996 + * igc_msix_other - msix other interrupt handler 1997 + * @irq: interrupt number 1998 + * @data: pointer to a q_vector 1999 + */ 2000 + static irqreturn_t igc_msix_other(int irq, void *data) 2001 + { 2002 + struct igc_adapter *adapter = data; 2003 + struct igc_hw *hw = &adapter->hw; 2004 + u32 icr = rd32(IGC_ICR); 2005 + 2006 + /* reading ICR causes bit 31 of EICR to be cleared */ 2007 + if (icr & IGC_ICR_DRSTA) 2008 + schedule_work(&adapter->reset_task); 2009 + 2010 + if (icr & IGC_ICR_DOUTSYNC) { 2011 + /* HW is reporting DMA is out of sync */ 2012 + adapter->stats.doosync++; 2013 + } 2014 + 2015 + if (icr & IGC_ICR_LSC) { 2016 + hw->mac.get_link_status = 1; 2017 + /* guard against interrupt when we're going down */ 2018 + if (!test_bit(__IGC_DOWN, &adapter->state)) 2019 + mod_timer(&adapter->watchdog_timer, jiffies + 1); 2020 + } 2021 + 2022 + wr32(IGC_EIMS, adapter->eims_other); 2023 + 2024 + return IRQ_HANDLED; 2025 + } 2026 + 2027 + /** 2028 + * igc_write_ivar - configure ivar for given MSI-X vector 2029 + * @hw: pointer to the HW structure 2030 + * @msix_vector: vector number we are allocating to a given ring 2031 + * @index: row index of IVAR register to write within IVAR table 2032 + * @offset: column offset of in IVAR, should be multiple of 8 2033 + * 2034 + * The IVAR table consists of 2 columns, 2035 + * each containing an cause allocation for an Rx and Tx ring, and a 2036 + * variable number of rows depending on the number of queues supported. 2037 + */ 2038 + static void igc_write_ivar(struct igc_hw *hw, int msix_vector, 2039 + int index, int offset) 2040 + { 2041 + u32 ivar = array_rd32(IGC_IVAR0, index); 2042 + 2043 + /* clear any bits that are currently set */ 2044 + ivar &= ~((u32)0xFF << offset); 2045 + 2046 + /* write vector and valid bit */ 2047 + ivar |= (msix_vector | IGC_IVAR_VALID) << offset; 2048 + 2049 + array_wr32(IGC_IVAR0, index, ivar); 2050 + } 2051 + 2052 + static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) 2053 + { 2054 + struct igc_adapter *adapter = q_vector->adapter; 2055 + struct igc_hw *hw = &adapter->hw; 2056 + int rx_queue = IGC_N0_QUEUE; 2057 + int tx_queue = IGC_N0_QUEUE; 2058 + 2059 + if (q_vector->rx.ring) 2060 + rx_queue = q_vector->rx.ring->reg_idx; 2061 + if (q_vector->tx.ring) 2062 + tx_queue = q_vector->tx.ring->reg_idx; 2063 + 2064 + switch (hw->mac.type) { 2065 + case igc_i225: 2066 + if (rx_queue > IGC_N0_QUEUE) 2067 + igc_write_ivar(hw, msix_vector, 2068 + rx_queue >> 1, 2069 + (rx_queue & 0x1) << 4); 2070 + if (tx_queue > IGC_N0_QUEUE) 2071 + igc_write_ivar(hw, msix_vector, 2072 + tx_queue >> 1, 2073 + ((tx_queue & 0x1) << 4) + 8); 2074 + q_vector->eims_value = BIT(msix_vector); 2075 + break; 2076 + default: 2077 + WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); 2078 + break; 2079 + } 2080 + 2081 + /* add q_vector eims value to global eims_enable_mask */ 2082 + adapter->eims_enable_mask |= q_vector->eims_value; 2083 + 2084 + /* configure q_vector to set itr on first interrupt */ 2085 + q_vector->set_itr = 1; 2086 + } 2087 + 2088 + /** 2089 + * igc_configure_msix - Configure MSI-X hardware 2090 + * @adapter: Pointer to adapter structure 2091 + * 2092 + * igc_configure_msix sets up the hardware to properly 2093 + * generate MSI-X interrupts. 2094 + */ 2095 + static void igc_configure_msix(struct igc_adapter *adapter) 2096 + { 2097 + struct igc_hw *hw = &adapter->hw; 2098 + int i, vector = 0; 2099 + u32 tmp; 2100 + 2101 + adapter->eims_enable_mask = 0; 2102 + 2103 + /* set vector for other causes, i.e. link changes */ 2104 + switch (hw->mac.type) { 2105 + case igc_i225: 2106 + /* Turn on MSI-X capability first, or our settings 2107 + * won't stick. And it will take days to debug. 2108 + */ 2109 + wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | 2110 + IGC_GPIE_PBA | IGC_GPIE_EIAME | 2111 + IGC_GPIE_NSICR); 2112 + 2113 + /* enable msix_other interrupt */ 2114 + adapter->eims_other = BIT(vector); 2115 + tmp = (vector++ | IGC_IVAR_VALID) << 8; 2116 + 2117 + wr32(IGC_IVAR_MISC, tmp); 2118 + break; 2119 + default: 2120 + /* do nothing, since nothing else supports MSI-X */ 2121 + break; 2122 + } /* switch (hw->mac.type) */ 2123 + 2124 + adapter->eims_enable_mask |= adapter->eims_other; 2125 + 2126 + for (i = 0; i < adapter->num_q_vectors; i++) 2127 + igc_assign_vector(adapter->q_vector[i], vector++); 2128 + 2129 + wrfl(); 2130 + } 2131 + 2132 + static irqreturn_t igc_msix_ring(int irq, void *data) 2133 + { 2134 + struct igc_q_vector *q_vector = data; 2135 + 2136 + /* Write the ITR value calculated from the previous interrupt. */ 2137 + igc_write_itr(q_vector); 2138 + 2139 + napi_schedule(&q_vector->napi); 2140 + 2141 + return IRQ_HANDLED; 2142 + } 2143 + 2144 + /** 2145 + * igc_request_msix - Initialize MSI-X interrupts 2146 + * @adapter: Pointer to adapter structure 2147 + * 2148 + * igc_request_msix allocates MSI-X vectors and requests interrupts from the 2149 + * kernel. 2150 + */ 2151 + static int igc_request_msix(struct igc_adapter *adapter) 2152 + { 2153 + int i = 0, err = 0, vector = 0, free_vector = 0; 2154 + struct net_device *netdev = adapter->netdev; 2155 + 2156 + err = request_irq(adapter->msix_entries[vector].vector, 2157 + &igc_msix_other, 0, netdev->name, adapter); 2158 + if (err) 2159 + goto err_out; 2160 + 2161 + for (i = 0; i < adapter->num_q_vectors; i++) { 2162 + struct igc_q_vector *q_vector = adapter->q_vector[i]; 2163 + 2164 + vector++; 2165 + 2166 + q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); 2167 + 2168 + if (q_vector->rx.ring && q_vector->tx.ring) 2169 + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, 2170 + q_vector->rx.ring->queue_index); 2171 + else if (q_vector->tx.ring) 2172 + sprintf(q_vector->name, "%s-tx-%u", netdev->name, 2173 + q_vector->tx.ring->queue_index); 2174 + else if (q_vector->rx.ring) 2175 + sprintf(q_vector->name, "%s-rx-%u", netdev->name, 2176 + q_vector->rx.ring->queue_index); 2177 + else 2178 + sprintf(q_vector->name, "%s-unused", netdev->name); 2179 + 2180 + err = request_irq(adapter->msix_entries[vector].vector, 2181 + igc_msix_ring, 0, q_vector->name, 2182 + q_vector); 2183 + if (err) 2184 + goto err_free; 2185 + } 2186 + 2187 + igc_configure_msix(adapter); 2188 + return 0; 2189 + 2190 + err_free: 2191 + /* free already assigned IRQs */ 2192 + free_irq(adapter->msix_entries[free_vector++].vector, adapter); 2193 + 2194 + vector--; 2195 + for (i = 0; i < vector; i++) { 2196 + free_irq(adapter->msix_entries[free_vector++].vector, 2197 + adapter->q_vector[i]); 2198 + } 2199 + err_out: 2200 + return err; 2201 + } 2202 + 2203 + /** 2204 + * igc_reset_q_vector - Reset config for interrupt vector 2205 + * @adapter: board private structure to initialize 2206 + * @v_idx: Index of vector to be reset 2207 + * 2208 + * If NAPI is enabled it will delete any references to the 2209 + * NAPI struct. This is preparation for igc_free_q_vector. 2210 + */ 2211 + static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) 2212 + { 2213 + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 2214 + 2215 + /* if we're coming from igc_set_interrupt_capability, the vectors are 2216 + * not yet allocated 2217 + */ 2218 + if (!q_vector) 2219 + return; 2220 + 2221 + if (q_vector->tx.ring) 2222 + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 2223 + 2224 + if (q_vector->rx.ring) 2225 + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; 2226 + 2227 + netif_napi_del(&q_vector->napi); 2228 + } 2229 + 2230 + static void igc_reset_interrupt_capability(struct igc_adapter *adapter) 2231 + { 2232 + int v_idx = adapter->num_q_vectors; 2233 + 2234 + if (adapter->msix_entries) { 2235 + pci_disable_msix(adapter->pdev); 2236 + kfree(adapter->msix_entries); 2237 + adapter->msix_entries = NULL; 2238 + } else if (adapter->flags & IGC_FLAG_HAS_MSI) { 2239 + pci_disable_msi(adapter->pdev); 2240 + } 2241 + 2242 + while (v_idx--) 2243 + igc_reset_q_vector(adapter, v_idx); 2244 + } 2245 + 2246 + /** 2247 + * igc_clear_interrupt_scheme - reset the device to a state of no interrupts 2248 + * @adapter: Pointer to adapter structure 2249 + * 2250 + * This function resets the device so that it has 0 rx queues, tx queues, and 2251 + * MSI-X interrupts allocated. 2252 + */ 2253 + static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) 2254 + { 2255 + igc_free_q_vectors(adapter); 2256 + igc_reset_interrupt_capability(adapter); 2257 + } 2258 + 2259 + /** 2260 + * igc_free_q_vectors - Free memory allocated for interrupt vectors 2261 + * @adapter: board private structure to initialize 2262 + * 2263 + * This function frees the memory allocated to the q_vectors. In addition if 2264 + * NAPI is enabled it will delete any references to the NAPI struct prior 2265 + * to freeing the q_vector. 2266 + */ 2267 + static void igc_free_q_vectors(struct igc_adapter *adapter) 2268 + { 2269 + int v_idx = adapter->num_q_vectors; 2270 + 2271 + adapter->num_tx_queues = 0; 2272 + adapter->num_rx_queues = 0; 2273 + adapter->num_q_vectors = 0; 2274 + 2275 + while (v_idx--) { 2276 + igc_reset_q_vector(adapter, v_idx); 2277 + igc_free_q_vector(adapter, v_idx); 2278 + } 2279 + } 2280 + 2281 + /** 2282 + * igc_free_q_vector - Free memory allocated for specific interrupt vector 2283 + * @adapter: board private structure to initialize 2284 + * @v_idx: Index of vector to be freed 2285 + * 2286 + * This function frees the memory allocated to the q_vector. 2287 + */ 2288 + static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) 2289 + { 2290 + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 2291 + 2292 + adapter->q_vector[v_idx] = NULL; 2293 + 2294 + /* igc_get_stats64() might access the rings on this vector, 2295 + * we must wait a grace period before freeing it. 2296 + */ 2297 + if (q_vector) 2298 + kfree_rcu(q_vector, rcu); 2299 + } 2300 + 2301 + /* Need to wait a few seconds after link up to get diagnostic information from 2302 + * the phy 2303 + */ 2304 + static void igc_update_phy_info(struct timer_list *t) 2305 + { 2306 + struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); 2307 + 2308 + igc_get_phy_info(&adapter->hw); 2309 + } 2310 + 2311 + /** 2312 + * igc_has_link - check shared code for link and determine up/down 2313 + * @adapter: pointer to driver private info 2314 + */ 2315 + static bool igc_has_link(struct igc_adapter *adapter) 2316 + { 2317 + struct igc_hw *hw = &adapter->hw; 2318 + bool link_active = false; 2319 + 2320 + /* get_link_status is set on LSC (link status) interrupt or 2321 + * rx sequence error interrupt. get_link_status will stay 2322 + * false until the igc_check_for_link establishes link 2323 + * for copper adapters ONLY 2324 + */ 2325 + switch (hw->phy.media_type) { 2326 + case igc_media_type_copper: 2327 + if (!hw->mac.get_link_status) 2328 + return true; 2329 + hw->mac.ops.check_for_link(hw); 2330 + link_active = !hw->mac.get_link_status; 2331 + break; 2332 + default: 2333 + case igc_media_type_unknown: 2334 + break; 2335 + } 2336 + 2337 + if (hw->mac.type == igc_i225 && 2338 + hw->phy.id == I225_I_PHY_ID) { 2339 + if (!netif_carrier_ok(adapter->netdev)) { 2340 + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 2341 + } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { 2342 + adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; 2343 + adapter->link_check_timeout = jiffies; 2344 + } 2345 + } 2346 + 2347 + return link_active; 2348 + } 2349 + 2350 + /** 2351 + * igc_watchdog - Timer Call-back 2352 + * @data: pointer to adapter cast into an unsigned long 2353 + */ 2354 + static void igc_watchdog(struct timer_list *t) 2355 + { 2356 + struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); 2357 + /* Do the rest outside of interrupt context */ 2358 + schedule_work(&adapter->watchdog_task); 2359 + } 2360 + 2361 + static void igc_watchdog_task(struct work_struct *work) 2362 + { 2363 + struct igc_adapter *adapter = container_of(work, 2364 + struct igc_adapter, 2365 + watchdog_task); 2366 + struct net_device *netdev = adapter->netdev; 2367 + struct igc_hw *hw = &adapter->hw; 2368 + struct igc_phy_info *phy = &hw->phy; 2369 + u16 phy_data, retry_count = 20; 2370 + u32 connsw; 2371 + u32 link; 2372 + int i; 2373 + 2374 + link = igc_has_link(adapter); 2375 + 2376 + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { 2377 + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) 2378 + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 2379 + else 2380 + link = false; 2381 + } 2382 + 2383 + /* Force link down if we have fiber to swap to */ 2384 + if (adapter->flags & IGC_FLAG_MAS_ENABLE) { 2385 + if (hw->phy.media_type == igc_media_type_copper) { 2386 + connsw = rd32(IGC_CONNSW); 2387 + if (!(connsw & IGC_CONNSW_AUTOSENSE_EN)) 2388 + link = 0; 2389 + } 2390 + } 2391 + if (link) { 2392 + if (!netif_carrier_ok(netdev)) { 2393 + u32 ctrl; 2394 + 2395 + hw->mac.ops.get_speed_and_duplex(hw, 2396 + &adapter->link_speed, 2397 + &adapter->link_duplex); 2398 + 2399 + ctrl = rd32(IGC_CTRL); 2400 + /* Link status message must follow this format */ 2401 + netdev_info(netdev, 2402 + "igc: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 2403 + netdev->name, 2404 + adapter->link_speed, 2405 + adapter->link_duplex == FULL_DUPLEX ? 2406 + "Full" : "Half", 2407 + (ctrl & IGC_CTRL_TFCE) && 2408 + (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : 2409 + (ctrl & IGC_CTRL_RFCE) ? "RX" : 2410 + (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); 2411 + 2412 + /* check if SmartSpeed worked */ 2413 + igc_check_downshift(hw); 2414 + if (phy->speed_downgraded) 2415 + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); 2416 + 2417 + /* adjust timeout factor according to speed/duplex */ 2418 + adapter->tx_timeout_factor = 1; 2419 + switch (adapter->link_speed) { 2420 + case SPEED_10: 2421 + adapter->tx_timeout_factor = 14; 2422 + break; 2423 + case SPEED_100: 2424 + /* maybe add some timeout factor ? */ 2425 + break; 2426 + } 2427 + 2428 + if (adapter->link_speed != SPEED_1000) 2429 + goto no_wait; 2430 + 2431 + /* wait for Remote receiver status OK */ 2432 + retry_read_status: 2433 + if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, 2434 + &phy_data)) { 2435 + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && 2436 + retry_count) { 2437 + msleep(100); 2438 + retry_count--; 2439 + goto retry_read_status; 2440 + } else if (!retry_count) { 2441 + dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); 2442 + } 2443 + } else { 2444 + dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); 2445 + } 2446 + no_wait: 2447 + netif_carrier_on(netdev); 2448 + 2449 + /* link state has changed, schedule phy info update */ 2450 + if (!test_bit(__IGC_DOWN, &adapter->state)) 2451 + mod_timer(&adapter->phy_info_timer, 2452 + round_jiffies(jiffies + 2 * HZ)); 2453 + } 2454 + } else { 2455 + if (netif_carrier_ok(netdev)) { 2456 + adapter->link_speed = 0; 2457 + adapter->link_duplex = 0; 2458 + 2459 + /* Links status message must follow this format */ 2460 + netdev_info(netdev, "igc: %s NIC Link is Down\n", 2461 + netdev->name); 2462 + netif_carrier_off(netdev); 2463 + 2464 + /* link state has changed, schedule phy info update */ 2465 + if (!test_bit(__IGC_DOWN, &adapter->state)) 2466 + mod_timer(&adapter->phy_info_timer, 2467 + round_jiffies(jiffies + 2 * HZ)); 2468 + 2469 + /* link is down, time to check for alternate media */ 2470 + if (adapter->flags & IGC_FLAG_MAS_ENABLE) { 2471 + if (adapter->flags & IGC_FLAG_MEDIA_RESET) { 2472 + schedule_work(&adapter->reset_task); 2473 + /* return immediately */ 2474 + return; 2475 + } 2476 + } 2477 + 2478 + /* also check for alternate media here */ 2479 + } else if (!netif_carrier_ok(netdev) && 2480 + (adapter->flags & IGC_FLAG_MAS_ENABLE)) { 2481 + if (adapter->flags & IGC_FLAG_MEDIA_RESET) { 2482 + schedule_work(&adapter->reset_task); 2483 + /* return immediately */ 2484 + return; 2485 + } 2486 + } 2487 + } 2488 + 2489 + spin_lock(&adapter->stats64_lock); 2490 + igc_update_stats(adapter); 2491 + spin_unlock(&adapter->stats64_lock); 2492 + 2493 + for (i = 0; i < adapter->num_tx_queues; i++) { 2494 + struct igc_ring *tx_ring = adapter->tx_ring[i]; 2495 + 2496 + if (!netif_carrier_ok(netdev)) { 2497 + /* We've lost link, so the controller stops DMA, 2498 + * but we've got queued Tx work that's never going 2499 + * to get done, so reset controller to flush Tx. 2500 + * (Do the reset outside of interrupt context). 2501 + */ 2502 + if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { 2503 + adapter->tx_timeout_count++; 2504 + schedule_work(&adapter->reset_task); 2505 + /* return immediately since reset is imminent */ 2506 + return; 2507 + } 2508 + } 2509 + 2510 + /* Force detection of hung controller every watchdog period */ 2511 + set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 2512 + } 2513 + 2514 + /* Cause software interrupt to ensure Rx ring is cleaned */ 2515 + if (adapter->flags & IGC_FLAG_HAS_MSIX) { 2516 + u32 eics = 0; 2517 + 2518 + for (i = 0; i < adapter->num_q_vectors; i++) 2519 + eics |= adapter->q_vector[i]->eims_value; 2520 + wr32(IGC_EICS, eics); 2521 + } else { 2522 + wr32(IGC_ICS, IGC_ICS_RXDMT0); 2523 + } 2524 + 2525 + /* Reset the timer */ 2526 + if (!test_bit(__IGC_DOWN, &adapter->state)) { 2527 + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) 2528 + mod_timer(&adapter->watchdog_timer, 2529 + round_jiffies(jiffies + HZ)); 2530 + else 2531 + mod_timer(&adapter->watchdog_timer, 2532 + round_jiffies(jiffies + 2 * HZ)); 2533 + } 2534 + } 2535 + 2536 + /** 2537 + * igc_update_ring_itr - update the dynamic ITR value based on packet size 2538 + * @q_vector: pointer to q_vector 2539 + * 2540 + * Stores a new ITR value based on strictly on packet size. This 2541 + * algorithm is less sophisticated than that used in igc_update_itr, 2542 + * due to the difficulty of synchronizing statistics across multiple 2543 + * receive rings. The divisors and thresholds used by this function 2544 + * were determined based on theoretical maximum wire speed and testing 2545 + * data, in order to minimize response time while increasing bulk 2546 + * throughput. 2547 + * NOTE: This function is called only when operating in a multiqueue 2548 + * receive environment. 2549 + */ 2550 + static void igc_update_ring_itr(struct igc_q_vector *q_vector) 2551 + { 2552 + struct igc_adapter *adapter = q_vector->adapter; 2553 + int new_val = q_vector->itr_val; 2554 + int avg_wire_size = 0; 2555 + unsigned int packets; 2556 + 2557 + /* For non-gigabit speeds, just fix the interrupt rate at 4000 2558 + * ints/sec - ITR timer value of 120 ticks. 2559 + */ 2560 + switch (adapter->link_speed) { 2561 + case SPEED_10: 2562 + case SPEED_100: 2563 + new_val = IGC_4K_ITR; 2564 + goto set_itr_val; 2565 + default: 2566 + break; 2567 + } 2568 + 2569 + packets = q_vector->rx.total_packets; 2570 + if (packets) 2571 + avg_wire_size = q_vector->rx.total_bytes / packets; 2572 + 2573 + packets = q_vector->tx.total_packets; 2574 + if (packets) 2575 + avg_wire_size = max_t(u32, avg_wire_size, 2576 + q_vector->tx.total_bytes / packets); 2577 + 2578 + /* if avg_wire_size isn't set no work was done */ 2579 + if (!avg_wire_size) 2580 + goto clear_counts; 2581 + 2582 + /* Add 24 bytes to size to account for CRC, preamble, and gap */ 2583 + avg_wire_size += 24; 2584 + 2585 + /* Don't starve jumbo frames */ 2586 + avg_wire_size = min(avg_wire_size, 3000); 2587 + 2588 + /* Give a little boost to mid-size frames */ 2589 + if (avg_wire_size > 300 && avg_wire_size < 1200) 2590 + new_val = avg_wire_size / 3; 2591 + else 2592 + new_val = avg_wire_size / 2; 2593 + 2594 + /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2595 + if (new_val < IGC_20K_ITR && 2596 + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 2597 + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 2598 + new_val = IGC_20K_ITR; 2599 + 2600 + set_itr_val: 2601 + if (new_val != q_vector->itr_val) { 2602 + q_vector->itr_val = new_val; 2603 + q_vector->set_itr = 1; 2604 + } 2605 + clear_counts: 2606 + q_vector->rx.total_bytes = 0; 2607 + q_vector->rx.total_packets = 0; 2608 + q_vector->tx.total_bytes = 0; 2609 + q_vector->tx.total_packets = 0; 2610 + } 2611 + 2612 + /** 2613 + * igc_update_itr - update the dynamic ITR value based on statistics 2614 + * @q_vector: pointer to q_vector 2615 + * @ring_container: ring info to update the itr for 2616 + * 2617 + * Stores a new ITR value based on packets and byte 2618 + * counts during the last interrupt. The advantage of per interrupt 2619 + * computation is faster updates and more accurate ITR for the current 2620 + * traffic pattern. Constants in this function were computed 2621 + * based on theoretical maximum wire speed and thresholds were set based 2622 + * on testing data as well as attempting to minimize response time 2623 + * while increasing bulk throughput. 2624 + * NOTE: These calculations are only valid when operating in a single- 2625 + * queue environment. 2626 + */ 2627 + static void igc_update_itr(struct igc_q_vector *q_vector, 2628 + struct igc_ring_container *ring_container) 2629 + { 2630 + unsigned int packets = ring_container->total_packets; 2631 + unsigned int bytes = ring_container->total_bytes; 2632 + u8 itrval = ring_container->itr; 2633 + 2634 + /* no packets, exit with status unchanged */ 2635 + if (packets == 0) 2636 + return; 2637 + 2638 + switch (itrval) { 2639 + case lowest_latency: 2640 + /* handle TSO and jumbo frames */ 2641 + if (bytes / packets > 8000) 2642 + itrval = bulk_latency; 2643 + else if ((packets < 5) && (bytes > 512)) 2644 + itrval = low_latency; 2645 + break; 2646 + case low_latency: /* 50 usec aka 20000 ints/s */ 2647 + if (bytes > 10000) { 2648 + /* this if handles the TSO accounting */ 2649 + if (bytes / packets > 8000) 2650 + itrval = bulk_latency; 2651 + else if ((packets < 10) || ((bytes / packets) > 1200)) 2652 + itrval = bulk_latency; 2653 + else if ((packets > 35)) 2654 + itrval = lowest_latency; 2655 + } else if (bytes / packets > 2000) { 2656 + itrval = bulk_latency; 2657 + } else if (packets <= 2 && bytes < 512) { 2658 + itrval = lowest_latency; 2659 + } 2660 + break; 2661 + case bulk_latency: /* 250 usec aka 4000 ints/s */ 2662 + if (bytes > 25000) { 2663 + if (packets > 35) 2664 + itrval = low_latency; 2665 + } else if (bytes < 1500) { 2666 + itrval = low_latency; 2667 + } 2668 + break; 2669 + } 2670 + 2671 + /* clear work counters since we have the values we need */ 2672 + ring_container->total_bytes = 0; 2673 + ring_container->total_packets = 0; 2674 + 2675 + /* write updated itr to ring container */ 2676 + ring_container->itr = itrval; 2677 + } 2678 + 2679 + /** 2680 + * igc_intr_msi - Interrupt Handler 2681 + * @irq: interrupt number 2682 + * @data: pointer to a network interface device structure 2683 + */ 2684 + static irqreturn_t igc_intr_msi(int irq, void *data) 2685 + { 2686 + struct igc_adapter *adapter = data; 2687 + struct igc_q_vector *q_vector = adapter->q_vector[0]; 2688 + struct igc_hw *hw = &adapter->hw; 2689 + /* read ICR disables interrupts using IAM */ 2690 + u32 icr = rd32(IGC_ICR); 2691 + 2692 + igc_write_itr(q_vector); 2693 + 2694 + if (icr & IGC_ICR_DRSTA) 2695 + schedule_work(&adapter->reset_task); 2696 + 2697 + if (icr & IGC_ICR_DOUTSYNC) { 2698 + /* HW is reporting DMA is out of sync */ 2699 + adapter->stats.doosync++; 2700 + } 2701 + 2702 + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 2703 + hw->mac.get_link_status = 1; 2704 + if (!test_bit(__IGC_DOWN, &adapter->state)) 2705 + mod_timer(&adapter->watchdog_timer, jiffies + 1); 2706 + } 2707 + 2708 + napi_schedule(&q_vector->napi); 2709 + 2710 + return IRQ_HANDLED; 2711 + } 2712 + 2713 + /** 2714 + * igc_intr - Legacy Interrupt Handler 2715 + * @irq: interrupt number 2716 + * @data: pointer to a network interface device structure 2717 + */ 2718 + static irqreturn_t igc_intr(int irq, void *data) 2719 + { 2720 + struct igc_adapter *adapter = data; 2721 + struct igc_q_vector *q_vector = adapter->q_vector[0]; 2722 + struct igc_hw *hw = &adapter->hw; 2723 + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 2724 + * need for the IMC write 2725 + */ 2726 + u32 icr = rd32(IGC_ICR); 2727 + 2728 + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 2729 + * not set, then the adapter didn't send an interrupt 2730 + */ 2731 + if (!(icr & IGC_ICR_INT_ASSERTED)) 2732 + return IRQ_NONE; 2733 + 2734 + igc_write_itr(q_vector); 2735 + 2736 + if (icr & IGC_ICR_DRSTA) 2737 + schedule_work(&adapter->reset_task); 2738 + 2739 + if (icr & IGC_ICR_DOUTSYNC) { 2740 + /* HW is reporting DMA is out of sync */ 2741 + adapter->stats.doosync++; 2742 + } 2743 + 2744 + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 2745 + hw->mac.get_link_status = 1; 2746 + /* guard against interrupt when we're going down */ 2747 + if (!test_bit(__IGC_DOWN, &adapter->state)) 2748 + mod_timer(&adapter->watchdog_timer, jiffies + 1); 2749 + } 2750 + 2751 + napi_schedule(&q_vector->napi); 2752 + 2753 + return IRQ_HANDLED; 2754 + } 2755 + 2756 + static void igc_set_itr(struct igc_q_vector *q_vector) 2757 + { 2758 + struct igc_adapter *adapter = q_vector->adapter; 2759 + u32 new_itr = q_vector->itr_val; 2760 + u8 current_itr = 0; 2761 + 2762 + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2763 + switch (adapter->link_speed) { 2764 + case SPEED_10: 2765 + case SPEED_100: 2766 + current_itr = 0; 2767 + new_itr = IGC_4K_ITR; 2768 + goto set_itr_now; 2769 + default: 2770 + break; 2771 + } 2772 + 2773 + igc_update_itr(q_vector, &q_vector->tx); 2774 + igc_update_itr(q_vector, &q_vector->rx); 2775 + 2776 + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 2777 + 2778 + /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2779 + if (current_itr == lowest_latency && 2780 + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 2781 + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 2782 + current_itr = low_latency; 2783 + 2784 + switch (current_itr) { 2785 + /* counts and packets in update_itr are dependent on these numbers */ 2786 + case lowest_latency: 2787 + new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ 2788 + break; 2789 + case low_latency: 2790 + new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ 2791 + break; 2792 + case bulk_latency: 2793 + new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ 2794 + break; 2795 + default: 2796 + break; 2797 + } 2798 + 2799 + set_itr_now: 2800 + if (new_itr != q_vector->itr_val) { 2801 + /* this attempts to bias the interrupt rate towards Bulk 2802 + * by adding intermediate steps when interrupt rate is 2803 + * increasing 2804 + */ 2805 + new_itr = new_itr > q_vector->itr_val ? 2806 + max((new_itr * q_vector->itr_val) / 2807 + (new_itr + (q_vector->itr_val >> 2)), 2808 + new_itr) : new_itr; 2809 + /* Don't write the value here; it resets the adapter's 2810 + * internal timer, and causes us to delay far longer than 2811 + * we should between interrupts. Instead, we write the ITR 2812 + * value at the beginning of the next interrupt so the timing 2813 + * ends up being correct. 2814 + */ 2815 + q_vector->itr_val = new_itr; 2816 + q_vector->set_itr = 1; 2817 + } 2818 + } 2819 + 2820 + static void igc_ring_irq_enable(struct igc_q_vector *q_vector) 2821 + { 2822 + struct igc_adapter *adapter = q_vector->adapter; 2823 + struct igc_hw *hw = &adapter->hw; 2824 + 2825 + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || 2826 + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { 2827 + if (adapter->num_q_vectors == 1) 2828 + igc_set_itr(q_vector); 2829 + else 2830 + igc_update_ring_itr(q_vector); 2831 + } 2832 + 2833 + if (!test_bit(__IGC_DOWN, &adapter->state)) { 2834 + if (adapter->msix_entries) 2835 + wr32(IGC_EIMS, q_vector->eims_value); 2836 + else 2837 + igc_irq_enable(adapter); 2838 + } 2839 + } 2840 + 2841 + /** 2842 + * igc_poll - NAPI Rx polling callback 2843 + * @napi: napi polling structure 2844 + * @budget: count of how many packets we should handle 2845 + */ 2846 + static int igc_poll(struct napi_struct *napi, int budget) 2847 + { 2848 + struct igc_q_vector *q_vector = container_of(napi, 2849 + struct igc_q_vector, 2850 + napi); 2851 + bool clean_complete = true; 2852 + int work_done = 0; 2853 + 2854 + if (q_vector->tx.ring) 2855 + clean_complete = igc_clean_tx_irq(q_vector, budget); 2856 + 2857 + if (q_vector->rx.ring) { 2858 + int cleaned = igc_clean_rx_irq(q_vector, budget); 2859 + 2860 + work_done += cleaned; 2861 + if (cleaned >= budget) 2862 + clean_complete = false; 2863 + } 2864 + 2865 + /* If all work not completed, return budget and keep polling */ 2866 + if (!clean_complete) 2867 + return budget; 2868 + 2869 + /* If not enough Rx work done, exit the polling mode */ 2870 + napi_complete_done(napi, work_done); 2871 + igc_ring_irq_enable(q_vector); 2872 + 2873 + return 0; 2874 + } 2875 + 2876 + /** 2877 + * igc_set_interrupt_capability - set MSI or MSI-X if supported 2878 + * @adapter: Pointer to adapter structure 2879 + * 2880 + * Attempt to configure interrupts using the best available 2881 + * capabilities of the hardware and kernel. 2882 + */ 2883 + static void igc_set_interrupt_capability(struct igc_adapter *adapter, 2884 + bool msix) 2885 + { 2886 + int numvecs, i; 2887 + int err; 2888 + 2889 + if (!msix) 2890 + goto msi_only; 2891 + adapter->flags |= IGC_FLAG_HAS_MSIX; 2892 + 2893 + /* Number of supported queues. */ 2894 + adapter->num_rx_queues = adapter->rss_queues; 2895 + 2896 + adapter->num_tx_queues = adapter->rss_queues; 2897 + 2898 + /* start with one vector for every Rx queue */ 2899 + numvecs = adapter->num_rx_queues; 2900 + 2901 + /* if Tx handler is separate add 1 for every Tx queue */ 2902 + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) 2903 + numvecs += adapter->num_tx_queues; 2904 + 2905 + /* store the number of vectors reserved for queues */ 2906 + adapter->num_q_vectors = numvecs; 2907 + 2908 + /* add 1 vector for link status interrupts */ 2909 + numvecs++; 2910 + 2911 + adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 2912 + GFP_KERNEL); 2913 + 2914 + if (!adapter->msix_entries) 2915 + return; 2916 + 2917 + /* populate entry values */ 2918 + for (i = 0; i < numvecs; i++) 2919 + adapter->msix_entries[i].entry = i; 2920 + 2921 + err = pci_enable_msix_range(adapter->pdev, 2922 + adapter->msix_entries, 2923 + numvecs, 2924 + numvecs); 2925 + if (err > 0) 2926 + return; 2927 + 2928 + kfree(adapter->msix_entries); 2929 + adapter->msix_entries = NULL; 2930 + 2931 + igc_reset_interrupt_capability(adapter); 2932 + 2933 + msi_only: 2934 + adapter->flags &= ~IGC_FLAG_HAS_MSIX; 2935 + 2936 + adapter->rss_queues = 1; 2937 + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 2938 + adapter->num_rx_queues = 1; 2939 + adapter->num_tx_queues = 1; 2940 + adapter->num_q_vectors = 1; 2941 + if (!pci_enable_msi(adapter->pdev)) 2942 + adapter->flags |= IGC_FLAG_HAS_MSI; 2943 + } 2944 + 2945 + static void igc_add_ring(struct igc_ring *ring, 2946 + struct igc_ring_container *head) 2947 + { 2948 + head->ring = ring; 2949 + head->count++; 2950 + } 2951 + 2952 + /** 2953 + * igc_alloc_q_vector - Allocate memory for a single interrupt vector 2954 + * @adapter: board private structure to initialize 2955 + * @v_count: q_vectors allocated on adapter, used for ring interleaving 2956 + * @v_idx: index of vector in adapter struct 2957 + * @txr_count: total number of Tx rings to allocate 2958 + * @txr_idx: index of first Tx ring to allocate 2959 + * @rxr_count: total number of Rx rings to allocate 2960 + * @rxr_idx: index of first Rx ring to allocate 2961 + * 2962 + * We allocate one q_vector. If allocation fails we return -ENOMEM. 2963 + */ 2964 + static int igc_alloc_q_vector(struct igc_adapter *adapter, 2965 + unsigned int v_count, unsigned int v_idx, 2966 + unsigned int txr_count, unsigned int txr_idx, 2967 + unsigned int rxr_count, unsigned int rxr_idx) 2968 + { 2969 + struct igc_q_vector *q_vector; 2970 + struct igc_ring *ring; 2971 + int ring_count, size; 2972 + 2973 + /* igc only supports 1 Tx and/or 1 Rx queue per vector */ 2974 + if (txr_count > 1 || rxr_count > 1) 2975 + return -ENOMEM; 2976 + 2977 + ring_count = txr_count + rxr_count; 2978 + size = sizeof(struct igc_q_vector) + 2979 + (sizeof(struct igc_ring) * ring_count); 2980 + 2981 + /* allocate q_vector and rings */ 2982 + q_vector = adapter->q_vector[v_idx]; 2983 + if (!q_vector) 2984 + q_vector = kzalloc(size, GFP_KERNEL); 2985 + else 2986 + memset(q_vector, 0, size); 2987 + if (!q_vector) 2988 + return -ENOMEM; 2989 + 2990 + /* initialize NAPI */ 2991 + netif_napi_add(adapter->netdev, &q_vector->napi, 2992 + igc_poll, 64); 2993 + 2994 + /* tie q_vector and adapter together */ 2995 + adapter->q_vector[v_idx] = q_vector; 2996 + q_vector->adapter = adapter; 2997 + 2998 + /* initialize work limits */ 2999 + q_vector->tx.work_limit = adapter->tx_work_limit; 3000 + 3001 + /* initialize ITR configuration */ 3002 + q_vector->itr_register = adapter->io_addr + IGC_EITR(0); 3003 + q_vector->itr_val = IGC_START_ITR; 3004 + 3005 + /* initialize pointer to rings */ 3006 + ring = q_vector->ring; 3007 + 3008 + /* initialize ITR */ 3009 + if (rxr_count) { 3010 + /* rx or rx/tx vector */ 3011 + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) 3012 + q_vector->itr_val = adapter->rx_itr_setting; 3013 + } else { 3014 + /* tx only vector */ 3015 + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) 3016 + q_vector->itr_val = adapter->tx_itr_setting; 3017 + } 3018 + 3019 + if (txr_count) { 3020 + /* assign generic ring traits */ 3021 + ring->dev = &adapter->pdev->dev; 3022 + ring->netdev = adapter->netdev; 3023 + 3024 + /* configure backlink on ring */ 3025 + ring->q_vector = q_vector; 3026 + 3027 + /* update q_vector Tx values */ 3028 + igc_add_ring(ring, &q_vector->tx); 3029 + 3030 + /* apply Tx specific ring traits */ 3031 + ring->count = adapter->tx_ring_count; 3032 + ring->queue_index = txr_idx; 3033 + 3034 + /* assign ring to adapter */ 3035 + adapter->tx_ring[txr_idx] = ring; 3036 + 3037 + /* push pointer to next ring */ 3038 + ring++; 3039 + } 3040 + 3041 + if (rxr_count) { 3042 + /* assign generic ring traits */ 3043 + ring->dev = &adapter->pdev->dev; 3044 + ring->netdev = adapter->netdev; 3045 + 3046 + /* configure backlink on ring */ 3047 + ring->q_vector = q_vector; 3048 + 3049 + /* update q_vector Rx values */ 3050 + igc_add_ring(ring, &q_vector->rx); 3051 + 3052 + /* apply Rx specific ring traits */ 3053 + ring->count = adapter->rx_ring_count; 3054 + ring->queue_index = rxr_idx; 3055 + 3056 + /* assign ring to adapter */ 3057 + adapter->rx_ring[rxr_idx] = ring; 3058 + } 3059 + 3060 + return 0; 3061 + } 3062 + 3063 + /** 3064 + * igc_alloc_q_vectors - Allocate memory for interrupt vectors 3065 + * @adapter: board private structure to initialize 3066 + * 3067 + * We allocate one q_vector per queue interrupt. If allocation fails we 3068 + * return -ENOMEM. 3069 + */ 3070 + static int igc_alloc_q_vectors(struct igc_adapter *adapter) 3071 + { 3072 + int rxr_remaining = adapter->num_rx_queues; 3073 + int txr_remaining = adapter->num_tx_queues; 3074 + int rxr_idx = 0, txr_idx = 0, v_idx = 0; 3075 + int q_vectors = adapter->num_q_vectors; 3076 + int err; 3077 + 3078 + if (q_vectors >= (rxr_remaining + txr_remaining)) { 3079 + for (; rxr_remaining; v_idx++) { 3080 + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 3081 + 0, 0, 1, rxr_idx); 3082 + 3083 + if (err) 3084 + goto err_out; 3085 + 3086 + /* update counts and index */ 3087 + rxr_remaining--; 3088 + rxr_idx++; 3089 + } 3090 + } 3091 + 3092 + for (; v_idx < q_vectors; v_idx++) { 3093 + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 3094 + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 3095 + 3096 + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 3097 + tqpv, txr_idx, rqpv, rxr_idx); 3098 + 3099 + if (err) 3100 + goto err_out; 3101 + 3102 + /* update counts and index */ 3103 + rxr_remaining -= rqpv; 3104 + txr_remaining -= tqpv; 3105 + rxr_idx++; 3106 + txr_idx++; 3107 + } 3108 + 3109 + return 0; 3110 + 3111 + err_out: 3112 + adapter->num_tx_queues = 0; 3113 + adapter->num_rx_queues = 0; 3114 + adapter->num_q_vectors = 0; 3115 + 3116 + while (v_idx--) 3117 + igc_free_q_vector(adapter, v_idx); 3118 + 3119 + return -ENOMEM; 3120 + } 3121 + 3122 + /** 3123 + * igc_cache_ring_register - Descriptor ring to register mapping 3124 + * @adapter: board private structure to initialize 3125 + * 3126 + * Once we know the feature-set enabled for the device, we'll cache 3127 + * the register offset the descriptor ring is assigned to. 3128 + */ 3129 + static void igc_cache_ring_register(struct igc_adapter *adapter) 3130 + { 3131 + int i = 0, j = 0; 3132 + 3133 + switch (adapter->hw.mac.type) { 3134 + case igc_i225: 3135 + /* Fall through */ 3136 + default: 3137 + for (; i < adapter->num_rx_queues; i++) 3138 + adapter->rx_ring[i]->reg_idx = i; 3139 + for (; j < adapter->num_tx_queues; j++) 3140 + adapter->tx_ring[j]->reg_idx = j; 3141 + break; 3142 + } 3143 + } 3144 + 3145 + /** 3146 + * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 3147 + * @adapter: Pointer to adapter structure 3148 + * 3149 + * This function initializes the interrupts and allocates all of the queues. 3150 + */ 3151 + static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) 3152 + { 3153 + struct pci_dev *pdev = adapter->pdev; 3154 + int err = 0; 3155 + 3156 + igc_set_interrupt_capability(adapter, msix); 3157 + 3158 + err = igc_alloc_q_vectors(adapter); 3159 + if (err) { 3160 + dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); 3161 + goto err_alloc_q_vectors; 3162 + } 3163 + 3164 + igc_cache_ring_register(adapter); 3165 + 3166 + return 0; 3167 + 3168 + err_alloc_q_vectors: 3169 + igc_reset_interrupt_capability(adapter); 3170 + return err; 3171 + } 3172 + 3173 + static void igc_free_irq(struct igc_adapter *adapter) 3174 + { 3175 + if (adapter->msix_entries) { 3176 + int vector = 0, i; 3177 + 3178 + free_irq(adapter->msix_entries[vector++].vector, adapter); 3179 + 3180 + for (i = 0; i < adapter->num_q_vectors; i++) 3181 + free_irq(adapter->msix_entries[vector++].vector, 3182 + adapter->q_vector[i]); 3183 + } else { 3184 + free_irq(adapter->pdev->irq, adapter); 3185 + } 3186 + } 3187 + 3188 + /** 3189 + * igc_irq_disable - Mask off interrupt generation on the NIC 3190 + * @adapter: board private structure 3191 + */ 3192 + static void igc_irq_disable(struct igc_adapter *adapter) 3193 + { 3194 + struct igc_hw *hw = &adapter->hw; 3195 + 3196 + if (adapter->msix_entries) { 3197 + u32 regval = rd32(IGC_EIAM); 3198 + 3199 + wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); 3200 + wr32(IGC_EIMC, adapter->eims_enable_mask); 3201 + regval = rd32(IGC_EIAC); 3202 + wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); 3203 + } 3204 + 3205 + wr32(IGC_IAM, 0); 3206 + wr32(IGC_IMC, ~0); 3207 + wrfl(); 3208 + 3209 + if (adapter->msix_entries) { 3210 + int vector = 0, i; 3211 + 3212 + synchronize_irq(adapter->msix_entries[vector++].vector); 3213 + 3214 + for (i = 0; i < adapter->num_q_vectors; i++) 3215 + synchronize_irq(adapter->msix_entries[vector++].vector); 3216 + } else { 3217 + synchronize_irq(adapter->pdev->irq); 3218 + } 3219 + } 3220 + 3221 + /** 3222 + * igc_irq_enable - Enable default interrupt generation settings 3223 + * @adapter: board private structure 3224 + */ 3225 + static void igc_irq_enable(struct igc_adapter *adapter) 3226 + { 3227 + struct igc_hw *hw = &adapter->hw; 3228 + 3229 + if (adapter->msix_entries) { 3230 + u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; 3231 + u32 regval = rd32(IGC_EIAC); 3232 + 3233 + wr32(IGC_EIAC, regval | adapter->eims_enable_mask); 3234 + regval = rd32(IGC_EIAM); 3235 + wr32(IGC_EIAM, regval | adapter->eims_enable_mask); 3236 + wr32(IGC_EIMS, adapter->eims_enable_mask); 3237 + wr32(IGC_IMS, ims); 3238 + } else { 3239 + wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 3240 + wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 3241 + } 3242 + } 3243 + 3244 + /** 3245 + * igc_request_irq - initialize interrupts 3246 + * @adapter: Pointer to adapter structure 3247 + * 3248 + * Attempts to configure interrupts using the best available 3249 + * capabilities of the hardware and kernel. 3250 + */ 3251 + static int igc_request_irq(struct igc_adapter *adapter) 3252 + { 3253 + struct net_device *netdev = adapter->netdev; 3254 + struct pci_dev *pdev = adapter->pdev; 3255 + int err = 0; 3256 + 3257 + if (adapter->flags & IGC_FLAG_HAS_MSIX) { 3258 + err = igc_request_msix(adapter); 3259 + if (!err) 3260 + goto request_done; 3261 + /* fall back to MSI */ 3262 + igc_free_all_tx_resources(adapter); 3263 + igc_free_all_rx_resources(adapter); 3264 + 3265 + igc_clear_interrupt_scheme(adapter); 3266 + err = igc_init_interrupt_scheme(adapter, false); 3267 + if (err) 3268 + goto request_done; 3269 + igc_setup_all_tx_resources(adapter); 3270 + igc_setup_all_rx_resources(adapter); 3271 + igc_configure(adapter); 3272 + } 3273 + 3274 + igc_assign_vector(adapter->q_vector[0], 0); 3275 + 3276 + if (adapter->flags & IGC_FLAG_HAS_MSI) { 3277 + err = request_irq(pdev->irq, &igc_intr_msi, 0, 3278 + netdev->name, adapter); 3279 + if (!err) 3280 + goto request_done; 3281 + 3282 + /* fall back to legacy interrupts */ 3283 + igc_reset_interrupt_capability(adapter); 3284 + adapter->flags &= ~IGC_FLAG_HAS_MSI; 3285 + } 3286 + 3287 + err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, 3288 + netdev->name, adapter); 3289 + 3290 + if (err) 3291 + dev_err(&pdev->dev, "Error %d getting interrupt\n", 3292 + err); 3293 + 3294 + request_done: 3295 + return err; 3296 + } 3297 + 3298 + static void igc_write_itr(struct igc_q_vector *q_vector) 3299 + { 3300 + u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; 3301 + 3302 + if (!q_vector->set_itr) 3303 + return; 3304 + 3305 + if (!itr_val) 3306 + itr_val = IGC_ITR_VAL_MASK; 3307 + 3308 + itr_val |= IGC_EITR_CNT_IGNR; 3309 + 3310 + writel(itr_val, q_vector->itr_register); 3311 + q_vector->set_itr = 0; 3312 + } 3313 + 3314 + /** 3315 + * igc_open - Called when a network interface is made active 3316 + * @netdev: network interface device structure 3317 + * 3318 + * Returns 0 on success, negative value on failure 3319 + * 3320 + * The open entry point is called when a network interface is made 3321 + * active by the system (IFF_UP). At this point all resources needed 3322 + * for transmit and receive operations are allocated, the interrupt 3323 + * handler is registered with the OS, the watchdog timer is started, 3324 + * and the stack is notified that the interface is ready. 3325 + */ 3326 + static int __igc_open(struct net_device *netdev, bool resuming) 3327 + { 3328 + struct igc_adapter *adapter = netdev_priv(netdev); 3329 + struct igc_hw *hw = &adapter->hw; 3330 + int err = 0; 3331 + int i = 0; 3332 + 3333 + /* disallow open during test */ 3334 + 3335 + if (test_bit(__IGC_TESTING, &adapter->state)) { 3336 + WARN_ON(resuming); 3337 + return -EBUSY; 3338 + } 3339 + 3340 + netif_carrier_off(netdev); 3341 + 3342 + /* allocate transmit descriptors */ 3343 + err = igc_setup_all_tx_resources(adapter); 3344 + if (err) 3345 + goto err_setup_tx; 3346 + 3347 + /* allocate receive descriptors */ 3348 + err = igc_setup_all_rx_resources(adapter); 3349 + if (err) 3350 + goto err_setup_rx; 3351 + 3352 + igc_power_up_link(adapter); 3353 + 3354 + igc_configure(adapter); 3355 + 3356 + err = igc_request_irq(adapter); 3357 + if (err) 3358 + goto err_req_irq; 3359 + 3360 + /* Notify the stack of the actual queue counts. */ 3361 + netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 3362 + if (err) 3363 + goto err_set_queues; 3364 + 3365 + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 3366 + if (err) 3367 + goto err_set_queues; 3368 + 3369 + clear_bit(__IGC_DOWN, &adapter->state); 3370 + 3371 + for (i = 0; i < adapter->num_q_vectors; i++) 3372 + napi_enable(&adapter->q_vector[i]->napi); 3373 + 3374 + /* Clear any pending interrupts. */ 3375 + rd32(IGC_ICR); 3376 + igc_irq_enable(adapter); 3377 + 3378 + netif_tx_start_all_queues(netdev); 3379 + 3380 + /* start the watchdog. */ 3381 + hw->mac.get_link_status = 1; 3382 + schedule_work(&adapter->watchdog_task); 3383 + 3384 + return IGC_SUCCESS; 3385 + 3386 + err_set_queues: 3387 + igc_free_irq(adapter); 3388 + err_req_irq: 3389 + igc_release_hw_control(adapter); 3390 + igc_power_down_link(adapter); 3391 + igc_free_all_rx_resources(adapter); 3392 + err_setup_rx: 3393 + igc_free_all_tx_resources(adapter); 3394 + err_setup_tx: 3395 + igc_reset(adapter); 3396 + 3397 + return err; 3398 + } 3399 + 3400 + static int igc_open(struct net_device *netdev) 3401 + { 3402 + return __igc_open(netdev, false); 3403 + } 3404 + 3405 + /** 3406 + * igc_close - Disables a network interface 3407 + * @netdev: network interface device structure 3408 + * 3409 + * Returns 0, this is not allowed to fail 3410 + * 3411 + * The close entry point is called when an interface is de-activated 3412 + * by the OS. The hardware is still under the driver's control, but 3413 + * needs to be disabled. A global MAC reset is issued to stop the 3414 + * hardware, and all transmit and receive resources are freed. 3415 + */ 3416 + static int __igc_close(struct net_device *netdev, bool suspending) 3417 + { 3418 + struct igc_adapter *adapter = netdev_priv(netdev); 3419 + 3420 + WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); 3421 + 3422 + igc_down(adapter); 3423 + 3424 + igc_release_hw_control(adapter); 3425 + 3426 + igc_free_irq(adapter); 3427 + 3428 + igc_free_all_tx_resources(adapter); 3429 + igc_free_all_rx_resources(adapter); 3430 + 3431 + return 0; 3432 + } 3433 + 3434 + static int igc_close(struct net_device *netdev) 3435 + { 3436 + if (netif_device_present(netdev) || netdev->dismantle) 3437 + return __igc_close(netdev, false); 3438 + return 0; 3439 + } 3440 + 3441 + static const struct net_device_ops igc_netdev_ops = { 3442 + .ndo_open = igc_open, 3443 + .ndo_stop = igc_close, 3444 + .ndo_start_xmit = igc_xmit_frame, 3445 + .ndo_set_mac_address = igc_set_mac, 3446 + .ndo_change_mtu = igc_change_mtu, 3447 + .ndo_get_stats = igc_get_stats, 3448 + .ndo_do_ioctl = igc_ioctl, 3449 + }; 3450 + 3451 + /* PCIe configuration access */ 3452 + void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 3453 + { 3454 + struct igc_adapter *adapter = hw->back; 3455 + 3456 + pci_read_config_word(adapter->pdev, reg, value); 3457 + } 3458 + 3459 + void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 3460 + { 3461 + struct igc_adapter *adapter = hw->back; 3462 + 3463 + pci_write_config_word(adapter->pdev, reg, *value); 3464 + } 3465 + 3466 + s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 3467 + { 3468 + struct igc_adapter *adapter = hw->back; 3469 + u16 cap_offset; 3470 + 3471 + cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); 3472 + if (!cap_offset) 3473 + return -IGC_ERR_CONFIG; 3474 + 3475 + pci_read_config_word(adapter->pdev, cap_offset + reg, value); 3476 + 3477 + return IGC_SUCCESS; 3478 + } 3479 + 3480 + s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 3481 + { 3482 + struct igc_adapter *adapter = hw->back; 3483 + u16 cap_offset; 3484 + 3485 + cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); 3486 + if (!cap_offset) 3487 + return -IGC_ERR_CONFIG; 3488 + 3489 + pci_write_config_word(adapter->pdev, cap_offset + reg, *value); 3490 + 3491 + return IGC_SUCCESS; 3492 + } 3493 + 3494 + u32 igc_rd32(struct igc_hw *hw, u32 reg) 3495 + { 3496 + struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); 3497 + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); 3498 + u32 value = 0; 3499 + 3500 + if (IGC_REMOVED(hw_addr)) 3501 + return ~value; 3502 + 3503 + value = readl(&hw_addr[reg]); 3504 + 3505 + /* reads should not return all F's */ 3506 + if (!(~value) && (!reg || !(~readl(hw_addr)))) { 3507 + struct net_device *netdev = igc->netdev; 3508 + 3509 + hw->hw_addr = NULL; 3510 + netif_device_detach(netdev); 3511 + netdev_err(netdev, "PCIe link lost, device now detached\n"); 3512 + } 3513 + 3514 + return value; 3515 + } 3516 + 3517 + /** 3518 + * igc_probe - Device Initialization Routine 3519 + * @pdev: PCI device information struct 3520 + * @ent: entry in igc_pci_tbl 3521 + * 3522 + * Returns 0 on success, negative on failure 3523 + * 3524 + * igc_probe initializes an adapter identified by a pci_dev structure. 3525 + * The OS initialization, configuring the adapter private structure, 3526 + * and a hardware reset occur. 3527 + */ 3528 + static int igc_probe(struct pci_dev *pdev, 3529 + const struct pci_device_id *ent) 3530 + { 3531 + struct igc_adapter *adapter; 3532 + struct net_device *netdev; 3533 + struct igc_hw *hw; 3534 + const struct igc_info *ei = igc_info_tbl[ent->driver_data]; 3535 + int err, pci_using_dac; 3536 + 3537 + err = pci_enable_device_mem(pdev); 3538 + if (err) 3539 + return err; 3540 + 3541 + pci_using_dac = 0; 3542 + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 3543 + if (!err) { 3544 + err = dma_set_coherent_mask(&pdev->dev, 3545 + DMA_BIT_MASK(64)); 3546 + if (!err) 3547 + pci_using_dac = 1; 3548 + } else { 3549 + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 3550 + if (err) { 3551 + err = dma_set_coherent_mask(&pdev->dev, 3552 + DMA_BIT_MASK(32)); 3553 + if (err) { 3554 + IGC_ERR("Wrong DMA configuration, aborting\n"); 3555 + goto err_dma; 3556 + } 3557 + } 3558 + } 3559 + 3560 + err = pci_request_selected_regions(pdev, 3561 + pci_select_bars(pdev, 3562 + IORESOURCE_MEM), 3563 + igc_driver_name); 3564 + if (err) 3565 + goto err_pci_reg; 3566 + 3567 + pci_enable_pcie_error_reporting(pdev); 3568 + 3569 + pci_set_master(pdev); 3570 + 3571 + err = -ENOMEM; 3572 + netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), 3573 + IGC_MAX_TX_QUEUES); 3574 + 3575 + if (!netdev) 3576 + goto err_alloc_etherdev; 3577 + 3578 + SET_NETDEV_DEV(netdev, &pdev->dev); 3579 + 3580 + pci_set_drvdata(pdev, netdev); 3581 + adapter = netdev_priv(netdev); 3582 + adapter->netdev = netdev; 3583 + adapter->pdev = pdev; 3584 + hw = &adapter->hw; 3585 + hw->back = adapter; 3586 + adapter->port_num = hw->bus.func; 3587 + adapter->msg_enable = GENMASK(debug - 1, 0); 3588 + 3589 + err = pci_save_state(pdev); 3590 + if (err) 3591 + goto err_ioremap; 3592 + 3593 + err = -EIO; 3594 + adapter->io_addr = ioremap(pci_resource_start(pdev, 0), 3595 + pci_resource_len(pdev, 0)); 3596 + if (!adapter->io_addr) 3597 + goto err_ioremap; 3598 + 3599 + /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ 3600 + hw->hw_addr = adapter->io_addr; 3601 + 3602 + netdev->netdev_ops = &igc_netdev_ops; 3603 + 3604 + netdev->watchdog_timeo = 5 * HZ; 3605 + 3606 + netdev->mem_start = pci_resource_start(pdev, 0); 3607 + netdev->mem_end = pci_resource_end(pdev, 0); 3608 + 3609 + /* PCI config space info */ 3610 + hw->vendor_id = pdev->vendor; 3611 + hw->device_id = pdev->device; 3612 + hw->revision_id = pdev->revision; 3613 + hw->subsystem_vendor_id = pdev->subsystem_vendor; 3614 + hw->subsystem_device_id = pdev->subsystem_device; 3615 + 3616 + /* Copy the default MAC and PHY function pointers */ 3617 + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 3618 + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 3619 + 3620 + /* Initialize skew-specific constants */ 3621 + err = ei->get_invariants(hw); 3622 + if (err) 3623 + goto err_sw_init; 3624 + 3625 + /* setup the private structure */ 3626 + err = igc_sw_init(adapter); 3627 + if (err) 3628 + goto err_sw_init; 3629 + 3630 + /* MTU range: 68 - 9216 */ 3631 + netdev->min_mtu = ETH_MIN_MTU; 3632 + netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; 3633 + 3634 + /* before reading the NVM, reset the controller to put the device in a 3635 + * known good starting state 3636 + */ 3637 + hw->mac.ops.reset_hw(hw); 3638 + 3639 + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { 3640 + /* copy the MAC address out of the NVM */ 3641 + if (hw->mac.ops.read_mac_addr(hw)) 3642 + dev_err(&pdev->dev, "NVM Read Error\n"); 3643 + } 3644 + 3645 + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 3646 + 3647 + if (!is_valid_ether_addr(netdev->dev_addr)) { 3648 + dev_err(&pdev->dev, "Invalid MAC Address\n"); 3649 + err = -EIO; 3650 + goto err_eeprom; 3651 + } 3652 + 3653 + /* configure RXPBSIZE and TXPBSIZE */ 3654 + wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); 3655 + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); 3656 + 3657 + timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); 3658 + timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); 3659 + 3660 + INIT_WORK(&adapter->reset_task, igc_reset_task); 3661 + INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); 3662 + 3663 + /* Initialize link properties that are user-changeable */ 3664 + adapter->fc_autoneg = true; 3665 + hw->mac.autoneg = true; 3666 + hw->phy.autoneg_advertised = 0xaf; 3667 + 3668 + hw->fc.requested_mode = igc_fc_default; 3669 + hw->fc.current_mode = igc_fc_default; 3670 + 3671 + /* reset the hardware with the new settings */ 3672 + igc_reset(adapter); 3673 + 3674 + /* let the f/w know that the h/w is now under the control of the 3675 + * driver. 3676 + */ 3677 + igc_get_hw_control(adapter); 3678 + 3679 + strncpy(netdev->name, "eth%d", IFNAMSIZ); 3680 + err = register_netdev(netdev); 3681 + if (err) 3682 + goto err_register; 3683 + 3684 + /* carrier off reporting is important to ethtool even BEFORE open */ 3685 + netif_carrier_off(netdev); 3686 + 3687 + /* Check if Media Autosense is enabled */ 3688 + adapter->ei = *ei; 3689 + 3690 + /* print pcie link status and MAC address */ 3691 + pcie_print_link_status(pdev); 3692 + netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); 3693 + 3694 + return 0; 3695 + 3696 + err_register: 3697 + igc_release_hw_control(adapter); 3698 + err_eeprom: 3699 + if (!igc_check_reset_block(hw)) 3700 + igc_reset_phy(hw); 3701 + err_sw_init: 3702 + igc_clear_interrupt_scheme(adapter); 3703 + iounmap(adapter->io_addr); 3704 + err_ioremap: 3705 + free_netdev(netdev); 3706 + err_alloc_etherdev: 3707 + pci_release_selected_regions(pdev, 3708 + pci_select_bars(pdev, IORESOURCE_MEM)); 3709 + err_pci_reg: 3710 + err_dma: 3711 + pci_disable_device(pdev); 3712 + return err; 3713 + } 3714 + 3715 + /** 3716 + * igc_remove - Device Removal Routine 3717 + * @pdev: PCI device information struct 3718 + * 3719 + * igc_remove is called by the PCI subsystem to alert the driver 3720 + * that it should release a PCI device. This could be caused by a 3721 + * Hot-Plug event, or because the driver is going to be removed from 3722 + * memory. 3723 + */ 3724 + static void igc_remove(struct pci_dev *pdev) 3725 + { 3726 + struct net_device *netdev = pci_get_drvdata(pdev); 3727 + struct igc_adapter *adapter = netdev_priv(netdev); 3728 + 3729 + set_bit(__IGC_DOWN, &adapter->state); 3730 + 3731 + del_timer_sync(&adapter->watchdog_timer); 3732 + del_timer_sync(&adapter->phy_info_timer); 3733 + 3734 + cancel_work_sync(&adapter->reset_task); 3735 + cancel_work_sync(&adapter->watchdog_task); 3736 + 3737 + /* Release control of h/w to f/w. If f/w is AMT enabled, this 3738 + * would have already happened in close and is redundant. 3739 + */ 3740 + igc_release_hw_control(adapter); 3741 + unregister_netdev(netdev); 3742 + 3743 + igc_clear_interrupt_scheme(adapter); 3744 + pci_iounmap(pdev, adapter->io_addr); 3745 + pci_release_mem_regions(pdev); 3746 + 3747 + kfree(adapter->mac_table); 3748 + kfree(adapter->shadow_vfta); 3749 + free_netdev(netdev); 3750 + 3751 + pci_disable_pcie_error_reporting(pdev); 3752 + 3753 + pci_disable_device(pdev); 3754 + } 3755 + 3756 + static struct pci_driver igc_driver = { 3757 + .name = igc_driver_name, 3758 + .id_table = igc_pci_tbl, 3759 + .probe = igc_probe, 3760 + .remove = igc_remove, 3761 + }; 3762 + 3763 + static void igc_set_flag_queue_pairs(struct igc_adapter *adapter, 3764 + const u32 max_rss_queues) 3765 + { 3766 + /* Determine if we need to pair queues. */ 3767 + /* If rss_queues > half of max_rss_queues, pair the queues in 3768 + * order to conserve interrupts due to limited supply. 3769 + */ 3770 + if (adapter->rss_queues > (max_rss_queues / 2)) 3771 + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 3772 + else 3773 + adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; 3774 + } 3775 + 3776 + static unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) 3777 + { 3778 + unsigned int max_rss_queues; 3779 + 3780 + /* Determine the maximum number of RSS queues supported. */ 3781 + max_rss_queues = IGC_MAX_RX_QUEUES; 3782 + 3783 + return max_rss_queues; 3784 + } 3785 + 3786 + static void igc_init_queue_configuration(struct igc_adapter *adapter) 3787 + { 3788 + u32 max_rss_queues; 3789 + 3790 + max_rss_queues = igc_get_max_rss_queues(adapter); 3791 + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); 3792 + 3793 + igc_set_flag_queue_pairs(adapter, max_rss_queues); 3794 + } 3795 + 3796 + /** 3797 + * igc_sw_init - Initialize general software structures (struct igc_adapter) 3798 + * @adapter: board private structure to initialize 3799 + * 3800 + * igc_sw_init initializes the Adapter private data structure. 3801 + * Fields are initialized based on PCI device information and 3802 + * OS network device settings (MTU size). 3803 + */ 3804 + static int igc_sw_init(struct igc_adapter *adapter) 3805 + { 3806 + struct net_device *netdev = adapter->netdev; 3807 + struct pci_dev *pdev = adapter->pdev; 3808 + struct igc_hw *hw = &adapter->hw; 3809 + 3810 + int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count; 3811 + 3812 + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 3813 + 3814 + /* set default ring sizes */ 3815 + adapter->tx_ring_count = IGC_DEFAULT_TXD; 3816 + adapter->rx_ring_count = IGC_DEFAULT_RXD; 3817 + 3818 + /* set default ITR values */ 3819 + adapter->rx_itr_setting = IGC_DEFAULT_ITR; 3820 + adapter->tx_itr_setting = IGC_DEFAULT_ITR; 3821 + 3822 + /* set default work limits */ 3823 + adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; 3824 + 3825 + /* adjust max frame to be at least the size of a standard frame */ 3826 + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + 3827 + VLAN_HLEN; 3828 + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3829 + 3830 + spin_lock_init(&adapter->nfc_lock); 3831 + spin_lock_init(&adapter->stats64_lock); 3832 + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ 3833 + adapter->flags |= IGC_FLAG_HAS_MSIX; 3834 + 3835 + adapter->mac_table = kzalloc(size, GFP_ATOMIC); 3836 + if (!adapter->mac_table) 3837 + return -ENOMEM; 3838 + 3839 + igc_init_queue_configuration(adapter); 3840 + 3841 + /* This call may decrease the number of queues */ 3842 + if (igc_init_interrupt_scheme(adapter, true)) { 3843 + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 3844 + return -ENOMEM; 3845 + } 3846 + 3847 + /* Explicitly disable IRQ since the NIC can be in any state. */ 3848 + igc_irq_disable(adapter); 3849 + 3850 + set_bit(__IGC_DOWN, &adapter->state); 3851 + 3852 + return 0; 3853 + } 3854 + 3855 + /** 3856 + * igc_get_hw_dev - return device 3857 + * @hw: pointer to hardware structure 3858 + * 3859 + * used by hardware layer to print debugging information 3860 + */ 3861 + struct net_device *igc_get_hw_dev(struct igc_hw *hw) 3862 + { 3863 + struct igc_adapter *adapter = hw->back; 3864 + 3865 + return adapter->netdev; 3866 + } 3867 + 3868 + /** 3869 + * igc_init_module - Driver Registration Routine 3870 + * 3871 + * igc_init_module is the first routine called when the driver is 3872 + * loaded. All it does is register with the PCI subsystem. 3873 + */ 3874 + static int __init igc_init_module(void) 3875 + { 3876 + int ret; 3877 + 3878 + pr_info("%s - version %s\n", 3879 + igc_driver_string, igc_driver_version); 3880 + 3881 + pr_info("%s\n", igc_copyright); 3882 + 3883 + ret = pci_register_driver(&igc_driver); 3884 + return ret; 3885 + } 3886 + 3887 + module_init(igc_init_module); 3888 + 3889 + /** 3890 + * igc_exit_module - Driver Exit Cleanup Routine 3891 + * 3892 + * igc_exit_module is called just before the driver is removed 3893 + * from memory. 3894 + */ 3895 + static void __exit igc_exit_module(void) 3896 + { 3897 + pci_unregister_driver(&igc_driver); 3898 + } 3899 + 3900 + module_exit(igc_exit_module); 3901 + /* igc_main.c */
+215
drivers/net/ethernet/intel/igc/igc_nvm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #include "igc_mac.h" 5 + #include "igc_nvm.h" 6 + 7 + /** 8 + * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion 9 + * @hw: pointer to the HW structure 10 + * @ee_reg: EEPROM flag for polling 11 + * 12 + * Polls the EEPROM status bit for either read or write completion based 13 + * upon the value of 'ee_reg'. 14 + */ 15 + static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg) 16 + { 17 + s32 ret_val = -IGC_ERR_NVM; 18 + u32 attempts = 100000; 19 + u32 i, reg = 0; 20 + 21 + for (i = 0; i < attempts; i++) { 22 + if (ee_reg == IGC_NVM_POLL_READ) 23 + reg = rd32(IGC_EERD); 24 + else 25 + reg = rd32(IGC_EEWR); 26 + 27 + if (reg & IGC_NVM_RW_REG_DONE) { 28 + ret_val = 0; 29 + break; 30 + } 31 + 32 + udelay(5); 33 + } 34 + 35 + return ret_val; 36 + } 37 + 38 + /** 39 + * igc_acquire_nvm - Generic request for access to EEPROM 40 + * @hw: pointer to the HW structure 41 + * 42 + * Set the EEPROM access request bit and wait for EEPROM access grant bit. 43 + * Return successful if access grant bit set, else clear the request for 44 + * EEPROM access and return -IGC_ERR_NVM (-1). 45 + */ 46 + s32 igc_acquire_nvm(struct igc_hw *hw) 47 + { 48 + s32 timeout = IGC_NVM_GRANT_ATTEMPTS; 49 + u32 eecd = rd32(IGC_EECD); 50 + s32 ret_val = 0; 51 + 52 + wr32(IGC_EECD, eecd | IGC_EECD_REQ); 53 + eecd = rd32(IGC_EECD); 54 + 55 + while (timeout) { 56 + if (eecd & IGC_EECD_GNT) 57 + break; 58 + udelay(5); 59 + eecd = rd32(IGC_EECD); 60 + timeout--; 61 + } 62 + 63 + if (!timeout) { 64 + eecd &= ~IGC_EECD_REQ; 65 + wr32(IGC_EECD, eecd); 66 + hw_dbg("Could not acquire NVM grant\n"); 67 + ret_val = -IGC_ERR_NVM; 68 + } 69 + 70 + return ret_val; 71 + } 72 + 73 + /** 74 + * igc_release_nvm - Release exclusive access to EEPROM 75 + * @hw: pointer to the HW structure 76 + * 77 + * Stop any current commands to the EEPROM and clear the EEPROM request bit. 78 + */ 79 + void igc_release_nvm(struct igc_hw *hw) 80 + { 81 + u32 eecd; 82 + 83 + eecd = rd32(IGC_EECD); 84 + eecd &= ~IGC_EECD_REQ; 85 + wr32(IGC_EECD, eecd); 86 + } 87 + 88 + /** 89 + * igc_read_nvm_eerd - Reads EEPROM using EERD register 90 + * @hw: pointer to the HW structure 91 + * @offset: offset of word in the EEPROM to read 92 + * @words: number of words to read 93 + * @data: word read from the EEPROM 94 + * 95 + * Reads a 16 bit word from the EEPROM using the EERD register. 96 + */ 97 + s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data) 98 + { 99 + struct igc_nvm_info *nvm = &hw->nvm; 100 + u32 i, eerd = 0; 101 + s32 ret_val = 0; 102 + 103 + /* A check for invalid values: offset too large, too many words, 104 + * and not enough words. 105 + */ 106 + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || 107 + words == 0) { 108 + hw_dbg("nvm parameter(s) out of bounds\n"); 109 + ret_val = -IGC_ERR_NVM; 110 + goto out; 111 + } 112 + 113 + for (i = 0; i < words; i++) { 114 + eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) + 115 + IGC_NVM_RW_REG_START; 116 + 117 + wr32(IGC_EERD, eerd); 118 + ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ); 119 + if (ret_val) 120 + break; 121 + 122 + data[i] = (rd32(IGC_EERD) >> IGC_NVM_RW_REG_DATA); 123 + } 124 + 125 + out: 126 + return ret_val; 127 + } 128 + 129 + /** 130 + * igc_read_mac_addr - Read device MAC address 131 + * @hw: pointer to the HW structure 132 + */ 133 + s32 igc_read_mac_addr(struct igc_hw *hw) 134 + { 135 + u32 rar_high; 136 + u32 rar_low; 137 + u16 i; 138 + 139 + rar_high = rd32(IGC_RAH(0)); 140 + rar_low = rd32(IGC_RAL(0)); 141 + 142 + for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++) 143 + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); 144 + 145 + for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++) 146 + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); 147 + 148 + for (i = 0; i < ETH_ALEN; i++) 149 + hw->mac.addr[i] = hw->mac.perm_addr[i]; 150 + 151 + return 0; 152 + } 153 + 154 + /** 155 + * igc_validate_nvm_checksum - Validate EEPROM checksum 156 + * @hw: pointer to the HW structure 157 + * 158 + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 159 + * and then verifies that the sum of the EEPROM is equal to 0xBABA. 160 + */ 161 + s32 igc_validate_nvm_checksum(struct igc_hw *hw) 162 + { 163 + u16 checksum = 0; 164 + u16 i, nvm_data; 165 + s32 ret_val = 0; 166 + 167 + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 168 + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 169 + if (ret_val) { 170 + hw_dbg("NVM Read Error\n"); 171 + goto out; 172 + } 173 + checksum += nvm_data; 174 + } 175 + 176 + if (checksum != (u16)NVM_SUM) { 177 + hw_dbg("NVM Checksum Invalid\n"); 178 + ret_val = -IGC_ERR_NVM; 179 + goto out; 180 + } 181 + 182 + out: 183 + return ret_val; 184 + } 185 + 186 + /** 187 + * igc_update_nvm_checksum - Update EEPROM checksum 188 + * @hw: pointer to the HW structure 189 + * 190 + * Updates the EEPROM checksum by reading/adding each word of the EEPROM 191 + * up to the checksum. Then calculates the EEPROM checksum and writes the 192 + * value to the EEPROM. 193 + */ 194 + s32 igc_update_nvm_checksum(struct igc_hw *hw) 195 + { 196 + u16 checksum = 0; 197 + u16 i, nvm_data; 198 + s32 ret_val; 199 + 200 + for (i = 0; i < NVM_CHECKSUM_REG; i++) { 201 + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 202 + if (ret_val) { 203 + hw_dbg("NVM Read Error while updating checksum.\n"); 204 + goto out; 205 + } 206 + checksum += nvm_data; 207 + } 208 + checksum = (u16)NVM_SUM - checksum; 209 + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); 210 + if (ret_val) 211 + hw_dbg("NVM Write Error while updating checksum.\n"); 212 + 213 + out: 214 + return ret_val; 215 + }
+14
drivers/net/ethernet/intel/igc/igc_nvm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #ifndef _IGC_NVM_H_ 5 + #define _IGC_NVM_H_ 6 + 7 + s32 igc_acquire_nvm(struct igc_hw *hw); 8 + void igc_release_nvm(struct igc_hw *hw); 9 + s32 igc_read_mac_addr(struct igc_hw *hw); 10 + s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data); 11 + s32 igc_validate_nvm_checksum(struct igc_hw *hw); 12 + s32 igc_update_nvm_checksum(struct igc_hw *hw); 13 + 14 + #endif
+791
drivers/net/ethernet/intel/igc/igc_phy.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #include "igc_phy.h" 5 + 6 + /* forward declaration */ 7 + static s32 igc_phy_setup_autoneg(struct igc_hw *hw); 8 + static s32 igc_wait_autoneg(struct igc_hw *hw); 9 + 10 + /** 11 + * igc_check_reset_block - Check if PHY reset is blocked 12 + * @hw: pointer to the HW structure 13 + * 14 + * Read the PHY management control register and check whether a PHY reset 15 + * is blocked. If a reset is not blocked return 0, otherwise 16 + * return IGC_ERR_BLK_PHY_RESET (12). 17 + */ 18 + s32 igc_check_reset_block(struct igc_hw *hw) 19 + { 20 + u32 manc; 21 + 22 + manc = rd32(IGC_MANC); 23 + 24 + return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ? 25 + IGC_ERR_BLK_PHY_RESET : 0; 26 + } 27 + 28 + /** 29 + * igc_get_phy_id - Retrieve the PHY ID and revision 30 + * @hw: pointer to the HW structure 31 + * 32 + * Reads the PHY registers and stores the PHY ID and possibly the PHY 33 + * revision in the hardware structure. 34 + */ 35 + s32 igc_get_phy_id(struct igc_hw *hw) 36 + { 37 + struct igc_phy_info *phy = &hw->phy; 38 + s32 ret_val = 0; 39 + u16 phy_id; 40 + 41 + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); 42 + if (ret_val) 43 + goto out; 44 + 45 + phy->id = (u32)(phy_id << 16); 46 + usleep_range(200, 500); 47 + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); 48 + if (ret_val) 49 + goto out; 50 + 51 + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); 52 + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); 53 + 54 + out: 55 + return ret_val; 56 + } 57 + 58 + /** 59 + * igc_phy_has_link - Polls PHY for link 60 + * @hw: pointer to the HW structure 61 + * @iterations: number of times to poll for link 62 + * @usec_interval: delay between polling attempts 63 + * @success: pointer to whether polling was successful or not 64 + * 65 + * Polls the PHY status register for link, 'iterations' number of times. 66 + */ 67 + s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, 68 + u32 usec_interval, bool *success) 69 + { 70 + u16 i, phy_status; 71 + s32 ret_val = 0; 72 + 73 + for (i = 0; i < iterations; i++) { 74 + /* Some PHYs require the PHY_STATUS register to be read 75 + * twice due to the link bit being sticky. No harm doing 76 + * it across the board. 77 + */ 78 + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); 79 + if (ret_val && usec_interval > 0) { 80 + /* If the first read fails, another entity may have 81 + * ownership of the resources, wait and try again to 82 + * see if they have relinquished the resources yet. 83 + */ 84 + if (usec_interval >= 1000) 85 + mdelay(usec_interval / 1000); 86 + else 87 + udelay(usec_interval); 88 + } 89 + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); 90 + if (ret_val) 91 + break; 92 + if (phy_status & MII_SR_LINK_STATUS) 93 + break; 94 + if (usec_interval >= 1000) 95 + mdelay(usec_interval / 1000); 96 + else 97 + udelay(usec_interval); 98 + } 99 + 100 + *success = (i < iterations) ? true : false; 101 + 102 + return ret_val; 103 + } 104 + 105 + /** 106 + * igc_power_up_phy_copper - Restore copper link in case of PHY power down 107 + * @hw: pointer to the HW structure 108 + * 109 + * In the case of a PHY power down to save power, or to turn off link during a 110 + * driver unload, restore the link to previous settings. 111 + */ 112 + void igc_power_up_phy_copper(struct igc_hw *hw) 113 + { 114 + u16 mii_reg = 0; 115 + 116 + /* The PHY will retain its settings across a power down/up cycle */ 117 + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); 118 + mii_reg &= ~MII_CR_POWER_DOWN; 119 + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); 120 + } 121 + 122 + /** 123 + * igc_power_down_phy_copper - Power down copper PHY 124 + * @hw: pointer to the HW structure 125 + * 126 + * Power down PHY to save power when interface is down and wake on lan 127 + * is not enabled. 128 + */ 129 + void igc_power_down_phy_copper(struct igc_hw *hw) 130 + { 131 + u16 mii_reg = 0; 132 + 133 + /* The PHY will retain its settings across a power down/up cycle */ 134 + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); 135 + mii_reg |= MII_CR_POWER_DOWN; 136 + 137 + /* Temporary workaround - should be removed when PHY will implement 138 + * IEEE registers as properly 139 + */ 140 + /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ 141 + usleep_range(1000, 2000); 142 + } 143 + 144 + /** 145 + * igc_check_downshift - Checks whether a downshift in speed occurred 146 + * @hw: pointer to the HW structure 147 + * 148 + * Success returns 0, Failure returns 1 149 + * 150 + * A downshift is detected by querying the PHY link health. 151 + */ 152 + s32 igc_check_downshift(struct igc_hw *hw) 153 + { 154 + struct igc_phy_info *phy = &hw->phy; 155 + u16 phy_data, offset, mask; 156 + s32 ret_val; 157 + 158 + switch (phy->type) { 159 + case igc_phy_i225: 160 + default: 161 + /* speed downshift not supported */ 162 + phy->speed_downgraded = false; 163 + ret_val = 0; 164 + goto out; 165 + } 166 + 167 + ret_val = phy->ops.read_reg(hw, offset, &phy_data); 168 + 169 + if (!ret_val) 170 + phy->speed_downgraded = (phy_data & mask) ? true : false; 171 + 172 + out: 173 + return ret_val; 174 + } 175 + 176 + /** 177 + * igc_phy_hw_reset - PHY hardware reset 178 + * @hw: pointer to the HW structure 179 + * 180 + * Verify the reset block is not blocking us from resetting. Acquire 181 + * semaphore (if necessary) and read/set/write the device control reset 182 + * bit in the PHY. Wait the appropriate delay time for the device to 183 + * reset and release the semaphore (if necessary). 184 + */ 185 + s32 igc_phy_hw_reset(struct igc_hw *hw) 186 + { 187 + struct igc_phy_info *phy = &hw->phy; 188 + s32 ret_val; 189 + u32 ctrl; 190 + 191 + ret_val = igc_check_reset_block(hw); 192 + if (ret_val) { 193 + ret_val = 0; 194 + goto out; 195 + } 196 + 197 + ret_val = phy->ops.acquire(hw); 198 + if (ret_val) 199 + goto out; 200 + 201 + ctrl = rd32(IGC_CTRL); 202 + wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST); 203 + wrfl(); 204 + 205 + udelay(phy->reset_delay_us); 206 + 207 + wr32(IGC_CTRL, ctrl); 208 + wrfl(); 209 + 210 + usleep_range(1500, 2000); 211 + 212 + phy->ops.release(hw); 213 + 214 + out: 215 + return ret_val; 216 + } 217 + 218 + /** 219 + * igc_copper_link_autoneg - Setup/Enable autoneg for copper link 220 + * @hw: pointer to the HW structure 221 + * 222 + * Performs initial bounds checking on autoneg advertisement parameter, then 223 + * configure to advertise the full capability. Setup the PHY to autoneg 224 + * and restart the negotiation process between the link partner. If 225 + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. 226 + */ 227 + static s32 igc_copper_link_autoneg(struct igc_hw *hw) 228 + { 229 + struct igc_phy_info *phy = &hw->phy; 230 + u16 phy_ctrl; 231 + s32 ret_val; 232 + 233 + /* Perform some bounds checking on the autoneg advertisement 234 + * parameter. 235 + */ 236 + phy->autoneg_advertised &= phy->autoneg_mask; 237 + 238 + /* If autoneg_advertised is zero, we assume it was not defaulted 239 + * by the calling code so we set to advertise full capability. 240 + */ 241 + if (phy->autoneg_advertised == 0) 242 + phy->autoneg_advertised = phy->autoneg_mask; 243 + 244 + hw_dbg("Reconfiguring auto-neg advertisement params\n"); 245 + ret_val = igc_phy_setup_autoneg(hw); 246 + if (ret_val) { 247 + hw_dbg("Error Setting up Auto-Negotiation\n"); 248 + goto out; 249 + } 250 + hw_dbg("Restarting Auto-Neg\n"); 251 + 252 + /* Restart auto-negotiation by setting the Auto Neg Enable bit and 253 + * the Auto Neg Restart bit in the PHY control register. 254 + */ 255 + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); 256 + if (ret_val) 257 + goto out; 258 + 259 + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 260 + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); 261 + if (ret_val) 262 + goto out; 263 + 264 + /* Does the user want to wait for Auto-Neg to complete here, or 265 + * check at a later time (for example, callback routine). 266 + */ 267 + if (phy->autoneg_wait_to_complete) { 268 + ret_val = igc_wait_autoneg(hw); 269 + if (ret_val) { 270 + hw_dbg("Error while waiting for autoneg to complete\n"); 271 + goto out; 272 + } 273 + } 274 + 275 + hw->mac.get_link_status = true; 276 + 277 + out: 278 + return ret_val; 279 + } 280 + 281 + /** 282 + * igc_wait_autoneg - Wait for auto-neg completion 283 + * @hw: pointer to the HW structure 284 + * 285 + * Waits for auto-negotiation to complete or for the auto-negotiation time 286 + * limit to expire, which ever happens first. 287 + */ 288 + static s32 igc_wait_autoneg(struct igc_hw *hw) 289 + { 290 + u16 i, phy_status; 291 + s32 ret_val = 0; 292 + 293 + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ 294 + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { 295 + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); 296 + if (ret_val) 297 + break; 298 + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); 299 + if (ret_val) 300 + break; 301 + if (phy_status & MII_SR_AUTONEG_COMPLETE) 302 + break; 303 + msleep(100); 304 + } 305 + 306 + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation 307 + * has completed. 308 + */ 309 + return ret_val; 310 + } 311 + 312 + /** 313 + * igc_phy_setup_autoneg - Configure PHY for auto-negotiation 314 + * @hw: pointer to the HW structure 315 + * 316 + * Reads the MII auto-neg advertisement register and/or the 1000T control 317 + * register and if the PHY is already setup for auto-negotiation, then 318 + * return successful. Otherwise, setup advertisement and flow control to 319 + * the appropriate values for the wanted auto-negotiation. 320 + */ 321 + static s32 igc_phy_setup_autoneg(struct igc_hw *hw) 322 + { 323 + struct igc_phy_info *phy = &hw->phy; 324 + u16 aneg_multigbt_an_ctrl = 0; 325 + u16 mii_1000t_ctrl_reg = 0; 326 + u16 mii_autoneg_adv_reg; 327 + s32 ret_val; 328 + 329 + phy->autoneg_advertised &= phy->autoneg_mask; 330 + 331 + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 332 + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); 333 + if (ret_val) 334 + return ret_val; 335 + 336 + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { 337 + /* Read the MII 1000Base-T Control Register (Address 9). */ 338 + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, 339 + &mii_1000t_ctrl_reg); 340 + if (ret_val) 341 + return ret_val; 342 + } 343 + 344 + if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && 345 + hw->phy.id == I225_I_PHY_ID) { 346 + /* Read the MULTI GBT AN Control Register - reg 7.32 */ 347 + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << 348 + MMD_DEVADDR_SHIFT) | 349 + ANEG_MULTIGBT_AN_CTRL, 350 + &aneg_multigbt_an_ctrl); 351 + 352 + if (ret_val) 353 + return ret_val; 354 + } 355 + 356 + /* Need to parse both autoneg_advertised and fc and set up 357 + * the appropriate PHY registers. First we will parse for 358 + * autoneg_advertised software override. Since we can advertise 359 + * a plethora of combinations, we need to check each bit 360 + * individually. 361 + */ 362 + 363 + /* First we clear all the 10/100 mb speed bits in the Auto-Neg 364 + * Advertisement Register (Address 4) and the 1000 mb speed bits in 365 + * the 1000Base-T Control Register (Address 9). 366 + */ 367 + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | 368 + NWAY_AR_100TX_HD_CAPS | 369 + NWAY_AR_10T_FD_CAPS | 370 + NWAY_AR_10T_HD_CAPS); 371 + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); 372 + 373 + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); 374 + 375 + /* Do we want to advertise 10 Mb Half Duplex? */ 376 + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { 377 + hw_dbg("Advertise 10mb Half duplex\n"); 378 + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; 379 + } 380 + 381 + /* Do we want to advertise 10 Mb Full Duplex? */ 382 + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { 383 + hw_dbg("Advertise 10mb Full duplex\n"); 384 + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; 385 + } 386 + 387 + /* Do we want to advertise 100 Mb Half Duplex? */ 388 + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { 389 + hw_dbg("Advertise 100mb Half duplex\n"); 390 + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; 391 + } 392 + 393 + /* Do we want to advertise 100 Mb Full Duplex? */ 394 + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { 395 + hw_dbg("Advertise 100mb Full duplex\n"); 396 + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; 397 + } 398 + 399 + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ 400 + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) 401 + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); 402 + 403 + /* Do we want to advertise 1000 Mb Full Duplex? */ 404 + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { 405 + hw_dbg("Advertise 1000mb Full duplex\n"); 406 + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; 407 + } 408 + 409 + /* We do not allow the Phy to advertise 2500 Mb Half Duplex */ 410 + if (phy->autoneg_advertised & ADVERTISE_2500_HALF) 411 + hw_dbg("Advertise 2500mb Half duplex request denied!\n"); 412 + 413 + /* Do we want to advertise 2500 Mb Full Duplex? */ 414 + if (phy->autoneg_advertised & ADVERTISE_2500_FULL) { 415 + hw_dbg("Advertise 2500mb Full duplex\n"); 416 + aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS; 417 + } else { 418 + aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS; 419 + } 420 + 421 + /* Check for a software override of the flow control settings, and 422 + * setup the PHY advertisement registers accordingly. If 423 + * auto-negotiation is enabled, then software will have to set the 424 + * "PAUSE" bits to the correct value in the Auto-Negotiation 425 + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- 426 + * negotiation. 427 + * 428 + * The possible values of the "fc" parameter are: 429 + * 0: Flow control is completely disabled 430 + * 1: Rx flow control is enabled (we can receive pause frames 431 + * but not send pause frames). 432 + * 2: Tx flow control is enabled (we can send pause frames 433 + * but we do not support receiving pause frames). 434 + * 3: Both Rx and Tx flow control (symmetric) are enabled. 435 + * other: No software override. The flow control configuration 436 + * in the EEPROM is used. 437 + */ 438 + switch (hw->fc.current_mode) { 439 + case igc_fc_none: 440 + /* Flow control (Rx & Tx) is completely disabled by a 441 + * software over-ride. 442 + */ 443 + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 444 + break; 445 + case igc_fc_rx_pause: 446 + /* Rx Flow control is enabled, and Tx Flow control is 447 + * disabled, by a software over-ride. 448 + * 449 + * Since there really isn't a way to advertise that we are 450 + * capable of Rx Pause ONLY, we will advertise that we 451 + * support both symmetric and asymmetric Rx PAUSE. Later 452 + * (in igc_config_fc_after_link_up) we will disable the 453 + * hw's ability to send PAUSE frames. 454 + */ 455 + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 456 + break; 457 + case igc_fc_tx_pause: 458 + /* Tx Flow control is enabled, and Rx Flow control is 459 + * disabled, by a software over-ride. 460 + */ 461 + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; 462 + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; 463 + break; 464 + case igc_fc_full: 465 + /* Flow control (both Rx and Tx) is enabled by a software 466 + * over-ride. 467 + */ 468 + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 469 + break; 470 + default: 471 + hw_dbg("Flow control param set incorrectly\n"); 472 + return -IGC_ERR_CONFIG; 473 + } 474 + 475 + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); 476 + if (ret_val) 477 + return ret_val; 478 + 479 + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 480 + 481 + if (phy->autoneg_mask & ADVERTISE_1000_FULL) 482 + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, 483 + mii_1000t_ctrl_reg); 484 + 485 + if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && 486 + hw->phy.id == I225_I_PHY_ID) 487 + ret_val = phy->ops.write_reg(hw, 488 + (STANDARD_AN_REG_MASK << 489 + MMD_DEVADDR_SHIFT) | 490 + ANEG_MULTIGBT_AN_CTRL, 491 + aneg_multigbt_an_ctrl); 492 + 493 + return ret_val; 494 + } 495 + 496 + /** 497 + * igc_setup_copper_link - Configure copper link settings 498 + * @hw: pointer to the HW structure 499 + * 500 + * Calls the appropriate function to configure the link for auto-neg or forced 501 + * speed and duplex. Then we check for link, once link is established calls 502 + * to configure collision distance and flow control are called. If link is 503 + * not established, we return -IGC_ERR_PHY (-2). 504 + */ 505 + s32 igc_setup_copper_link(struct igc_hw *hw) 506 + { 507 + s32 ret_val = 0; 508 + bool link; 509 + 510 + if (hw->mac.autoneg) { 511 + /* Setup autoneg and flow control advertisement and perform 512 + * autonegotiation. 513 + */ 514 + ret_val = igc_copper_link_autoneg(hw); 515 + if (ret_val) 516 + goto out; 517 + } else { 518 + /* PHY will be set to 10H, 10F, 100H or 100F 519 + * depending on user settings. 520 + */ 521 + hw_dbg("Forcing Speed and Duplex\n"); 522 + ret_val = hw->phy.ops.force_speed_duplex(hw); 523 + if (ret_val) { 524 + hw_dbg("Error Forcing Speed and Duplex\n"); 525 + goto out; 526 + } 527 + } 528 + 529 + /* Check link status. Wait up to 100 microseconds for link to become 530 + * valid. 531 + */ 532 + ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); 533 + if (ret_val) 534 + goto out; 535 + 536 + if (link) { 537 + hw_dbg("Valid link established!!!\n"); 538 + igc_config_collision_dist(hw); 539 + ret_val = igc_config_fc_after_link_up(hw); 540 + } else { 541 + hw_dbg("Unable to establish link!!!\n"); 542 + } 543 + 544 + out: 545 + return ret_val; 546 + } 547 + 548 + /** 549 + * igc_read_phy_reg_mdic - Read MDI control register 550 + * @hw: pointer to the HW structure 551 + * @offset: register offset to be read 552 + * @data: pointer to the read data 553 + * 554 + * Reads the MDI control register in the PHY at offset and stores the 555 + * information read to data. 556 + */ 557 + static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) 558 + { 559 + struct igc_phy_info *phy = &hw->phy; 560 + u32 i, mdic = 0; 561 + s32 ret_val = 0; 562 + 563 + if (offset > MAX_PHY_REG_ADDRESS) { 564 + hw_dbg("PHY Address %d is out of range\n", offset); 565 + ret_val = -IGC_ERR_PARAM; 566 + goto out; 567 + } 568 + 569 + /* Set up Op-code, Phy Address, and register offset in the MDI 570 + * Control register. The MAC will take care of interfacing with the 571 + * PHY to retrieve the desired data. 572 + */ 573 + mdic = ((offset << IGC_MDIC_REG_SHIFT) | 574 + (phy->addr << IGC_MDIC_PHY_SHIFT) | 575 + (IGC_MDIC_OP_READ)); 576 + 577 + wr32(IGC_MDIC, mdic); 578 + 579 + /* Poll the ready bit to see if the MDI read completed 580 + * Increasing the time out as testing showed failures with 581 + * the lower time out 582 + */ 583 + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { 584 + usleep_range(500, 1000); 585 + mdic = rd32(IGC_MDIC); 586 + if (mdic & IGC_MDIC_READY) 587 + break; 588 + } 589 + if (!(mdic & IGC_MDIC_READY)) { 590 + hw_dbg("MDI Read did not complete\n"); 591 + ret_val = -IGC_ERR_PHY; 592 + goto out; 593 + } 594 + if (mdic & IGC_MDIC_ERROR) { 595 + hw_dbg("MDI Error\n"); 596 + ret_val = -IGC_ERR_PHY; 597 + goto out; 598 + } 599 + *data = (u16)mdic; 600 + 601 + out: 602 + return ret_val; 603 + } 604 + 605 + /** 606 + * igc_write_phy_reg_mdic - Write MDI control register 607 + * @hw: pointer to the HW structure 608 + * @offset: register offset to write to 609 + * @data: data to write to register at offset 610 + * 611 + * Writes data to MDI control register in the PHY at offset. 612 + */ 613 + static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) 614 + { 615 + struct igc_phy_info *phy = &hw->phy; 616 + u32 i, mdic = 0; 617 + s32 ret_val = 0; 618 + 619 + if (offset > MAX_PHY_REG_ADDRESS) { 620 + hw_dbg("PHY Address %d is out of range\n", offset); 621 + ret_val = -IGC_ERR_PARAM; 622 + goto out; 623 + } 624 + 625 + /* Set up Op-code, Phy Address, and register offset in the MDI 626 + * Control register. The MAC will take care of interfacing with the 627 + * PHY to write the desired data. 628 + */ 629 + mdic = (((u32)data) | 630 + (offset << IGC_MDIC_REG_SHIFT) | 631 + (phy->addr << IGC_MDIC_PHY_SHIFT) | 632 + (IGC_MDIC_OP_WRITE)); 633 + 634 + wr32(IGC_MDIC, mdic); 635 + 636 + /* Poll the ready bit to see if the MDI read completed 637 + * Increasing the time out as testing showed failures with 638 + * the lower time out 639 + */ 640 + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { 641 + usleep_range(500, 1000); 642 + mdic = rd32(IGC_MDIC); 643 + if (mdic & IGC_MDIC_READY) 644 + break; 645 + } 646 + if (!(mdic & IGC_MDIC_READY)) { 647 + hw_dbg("MDI Write did not complete\n"); 648 + ret_val = -IGC_ERR_PHY; 649 + goto out; 650 + } 651 + if (mdic & IGC_MDIC_ERROR) { 652 + hw_dbg("MDI Error\n"); 653 + ret_val = -IGC_ERR_PHY; 654 + goto out; 655 + } 656 + 657 + out: 658 + return ret_val; 659 + } 660 + 661 + /** 662 + * __igc_access_xmdio_reg - Read/write XMDIO register 663 + * @hw: pointer to the HW structure 664 + * @address: XMDIO address to program 665 + * @dev_addr: device address to program 666 + * @data: pointer to value to read/write from/to the XMDIO address 667 + * @read: boolean flag to indicate read or write 668 + */ 669 + static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address, 670 + u8 dev_addr, u16 *data, bool read) 671 + { 672 + s32 ret_val; 673 + 674 + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr); 675 + if (ret_val) 676 + return ret_val; 677 + 678 + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address); 679 + if (ret_val) 680 + return ret_val; 681 + 682 + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA | 683 + dev_addr); 684 + if (ret_val) 685 + return ret_val; 686 + 687 + if (read) 688 + ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data); 689 + else 690 + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data); 691 + if (ret_val) 692 + return ret_val; 693 + 694 + /* Recalibrate the device back to 0 */ 695 + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0); 696 + if (ret_val) 697 + return ret_val; 698 + 699 + return ret_val; 700 + } 701 + 702 + /** 703 + * igc_read_xmdio_reg - Read XMDIO register 704 + * @hw: pointer to the HW structure 705 + * @addr: XMDIO address to program 706 + * @dev_addr: device address to program 707 + * @data: value to be read from the EMI address 708 + */ 709 + static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, 710 + u8 dev_addr, u16 *data) 711 + { 712 + return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true); 713 + } 714 + 715 + /** 716 + * igc_write_xmdio_reg - Write XMDIO register 717 + * @hw: pointer to the HW structure 718 + * @addr: XMDIO address to program 719 + * @dev_addr: device address to program 720 + * @data: value to be written to the XMDIO address 721 + */ 722 + static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, 723 + u8 dev_addr, u16 data) 724 + { 725 + return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false); 726 + } 727 + 728 + /** 729 + * igc_write_phy_reg_gpy - Write GPY PHY register 730 + * @hw: pointer to the HW structure 731 + * @offset: register offset to write to 732 + * @data: data to write at register offset 733 + * 734 + * Acquires semaphore, if necessary, then writes the data to PHY register 735 + * at the offset. Release any acquired semaphores before exiting. 736 + */ 737 + s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) 738 + { 739 + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; 740 + s32 ret_val; 741 + 742 + offset = offset & GPY_REG_MASK; 743 + 744 + if (!dev_addr) { 745 + ret_val = hw->phy.ops.acquire(hw); 746 + if (ret_val) 747 + return ret_val; 748 + ret_val = igc_write_phy_reg_mdic(hw, offset, data); 749 + if (ret_val) 750 + return ret_val; 751 + hw->phy.ops.release(hw); 752 + } else { 753 + ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, 754 + data); 755 + } 756 + 757 + return ret_val; 758 + } 759 + 760 + /** 761 + * igc_read_phy_reg_gpy - Read GPY PHY register 762 + * @hw: pointer to the HW structure 763 + * @offset: lower half is register offset to read to 764 + * upper half is MMD to use. 765 + * @data: data to read at register offset 766 + * 767 + * Acquires semaphore, if necessary, then reads the data in the PHY register 768 + * at the offset. Release any acquired semaphores before exiting. 769 + */ 770 + s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) 771 + { 772 + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; 773 + s32 ret_val; 774 + 775 + offset = offset & GPY_REG_MASK; 776 + 777 + if (!dev_addr) { 778 + ret_val = hw->phy.ops.acquire(hw); 779 + if (ret_val) 780 + return ret_val; 781 + ret_val = igc_read_phy_reg_mdic(hw, offset, data); 782 + if (ret_val) 783 + return ret_val; 784 + hw->phy.ops.release(hw); 785 + } else { 786 + ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, 787 + data); 788 + } 789 + 790 + return ret_val; 791 + }
+21
drivers/net/ethernet/intel/igc/igc_phy.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #ifndef _IGC_PHY_H_ 5 + #define _IGC_PHY_H_ 6 + 7 + #include "igc_mac.h" 8 + 9 + s32 igc_check_reset_block(struct igc_hw *hw); 10 + s32 igc_phy_hw_reset(struct igc_hw *hw); 11 + s32 igc_get_phy_id(struct igc_hw *hw); 12 + s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, 13 + u32 usec_interval, bool *success); 14 + s32 igc_check_downshift(struct igc_hw *hw); 15 + s32 igc_setup_copper_link(struct igc_hw *hw); 16 + void igc_power_up_phy_copper(struct igc_hw *hw); 17 + void igc_power_down_phy_copper(struct igc_hw *hw); 18 + s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data); 19 + s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data); 20 + 21 + #endif
+221
drivers/net/ethernet/intel/igc/igc_regs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2018 Intel Corporation */ 3 + 4 + #ifndef _IGC_REGS_H_ 5 + #define _IGC_REGS_H_ 6 + 7 + /* General Register Descriptions */ 8 + #define IGC_CTRL 0x00000 /* Device Control - RW */ 9 + #define IGC_STATUS 0x00008 /* Device Status - RO */ 10 + #define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */ 11 + #define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */ 12 + #define IGC_MDIC 0x00020 /* MDI Control - RW */ 13 + #define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */ 14 + #define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ 15 + 16 + /* Internal Packet Buffer Size Registers */ 17 + #define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ 18 + #define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ 19 + 20 + /* NVM Register Descriptions */ 21 + #define IGC_EERD 0x12014 /* EEprom mode read - RW */ 22 + #define IGC_EEWR 0x12018 /* EEprom mode write - RW */ 23 + 24 + /* Flow Control Register Descriptions */ 25 + #define IGC_FCAL 0x00028 /* FC Address Low - RW */ 26 + #define IGC_FCAH 0x0002C /* FC Address High - RW */ 27 + #define IGC_FCT 0x00030 /* FC Type - RW */ 28 + #define IGC_FCTTV 0x00170 /* FC Transmit Timer - RW */ 29 + #define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */ 30 + #define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */ 31 + #define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */ 32 + #define IGC_FCSTS 0x02464 /* FC Status - RO */ 33 + 34 + /* PCIe Register Description */ 35 + #define IGC_GCR 0x05B00 /* PCIe control- RW */ 36 + 37 + /* Semaphore registers */ 38 + #define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ 39 + #define IGC_SWSM 0x05B50 /* SW Semaphore */ 40 + #define IGC_FWSM 0x05B54 /* FW Semaphore */ 41 + 42 + /* Function Active and Power State to MNG */ 43 + #define IGC_FACTPS 0x05B30 44 + 45 + /* Interrupt Register Description */ 46 + #define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ 47 + #define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ 48 + #define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ 49 + #define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ 50 + #define IGC_EIAM 0x01530 /* Ext. Interrupt Auto Mask - RW */ 51 + #define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */ 52 + #define IGC_ICS 0x01504 /* Intr Cause Set - WO */ 53 + #define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */ 54 + #define IGC_IMC 0x0150C /* Intr Mask Clear - WO */ 55 + #define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */ 56 + /* Intr Throttle - RW */ 57 + #define IGC_EITR(_n) (0x01680 + (0x4 * (_n))) 58 + /* Interrupt Vector Allocation - RW */ 59 + #define IGC_IVAR0 0x01700 60 + #define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ 61 + #define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */ 62 + 63 + /* Interrupt Cause */ 64 + #define IGC_ICRXPTC 0x04104 /* Rx Packet Timer Expire Count */ 65 + #define IGC_ICRXATC 0x04108 /* Rx Absolute Timer Expire Count */ 66 + #define IGC_ICTXPTC 0x0410C /* Tx Packet Timer Expire Count */ 67 + #define IGC_ICTXATC 0x04110 /* Tx Absolute Timer Expire Count */ 68 + #define IGC_ICTXQEC 0x04118 /* Tx Queue Empty Count */ 69 + #define IGC_ICTXQMTC 0x0411C /* Tx Queue Min Threshold Count */ 70 + #define IGC_ICRXDMTC 0x04120 /* Rx Descriptor Min Threshold Count */ 71 + #define IGC_ICRXOC 0x04124 /* Receiver Overrun Count */ 72 + 73 + #define IGC_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ 74 + #define IGC_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ 75 + #define IGC_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ 76 + #define IGC_RPTHC 0x04104 /* Rx Packets To Host */ 77 + #define IGC_HGPTC 0x04118 /* Host Good Packets TX Count */ 78 + #define IGC_HTCBDPC 0x04124 /* Host TX Circ.Breaker Drop Count */ 79 + 80 + /* MSI-X Table Register Descriptions */ 81 + #define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */ 82 + 83 + /* Receive Register Descriptions */ 84 + #define IGC_RCTL 0x00100 /* Rx Control - RW */ 85 + #define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40)) 86 + #define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4)) 87 + #define IGC_RDBAL(_n) (0x0C000 + ((_n) * 0x40)) 88 + #define IGC_RDBAH(_n) (0x0C004 + ((_n) * 0x40)) 89 + #define IGC_RDLEN(_n) (0x0C008 + ((_n) * 0x40)) 90 + #define IGC_RDH(_n) (0x0C010 + ((_n) * 0x40)) 91 + #define IGC_RDT(_n) (0x0C018 + ((_n) * 0x40)) 92 + #define IGC_RXDCTL(_n) (0x0C028 + ((_n) * 0x40)) 93 + #define IGC_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) 94 + #define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */ 95 + #define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */ 96 + #define IGC_RFCTL 0x05008 /* Receive Filter Control*/ 97 + #define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */ 98 + #define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ 99 + #define IGC_RAL(_n) (0x05400 + ((_n) * 0x08)) 100 + #define IGC_RAH(_n) (0x05404 + ((_n) * 0x08)) 101 + 102 + /* Transmit Register Descriptions */ 103 + #define IGC_TCTL 0x00400 /* Tx Control - RW */ 104 + #define IGC_TIPG 0x00410 /* Tx Inter-packet gap - RW */ 105 + #define IGC_TDBAL(_n) (0x0E000 + ((_n) * 0x40)) 106 + #define IGC_TDBAH(_n) (0x0E004 + ((_n) * 0x40)) 107 + #define IGC_TDLEN(_n) (0x0E008 + ((_n) * 0x40)) 108 + #define IGC_TDH(_n) (0x0E010 + ((_n) * 0x40)) 109 + #define IGC_TDT(_n) (0x0E018 + ((_n) * 0x40)) 110 + #define IGC_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) 111 + 112 + /* MMD Register Descriptions */ 113 + #define IGC_MMDAC 13 /* MMD Access Control */ 114 + #define IGC_MMDAAD 14 /* MMD Access Address/Data */ 115 + 116 + /* Good transmitted packets counter registers */ 117 + #define IGC_PQGPTC(_n) (0x010014 + (0x100 * (_n))) 118 + 119 + /* Statistics Register Descriptions */ 120 + #define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */ 121 + #define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ 122 + #define IGC_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ 123 + #define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */ 124 + #define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */ 125 + #define IGC_SCC 0x04014 /* Single Collision Count - R/clr */ 126 + #define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */ 127 + #define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */ 128 + #define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */ 129 + #define IGC_COLC 0x04028 /* Collision Count - R/clr */ 130 + #define IGC_DC 0x04030 /* Defer Count - R/clr */ 131 + #define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */ 132 + #define IGC_SEC 0x04038 /* Sequence Error Count - R/clr */ 133 + #define IGC_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ 134 + #define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */ 135 + #define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */ 136 + #define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */ 137 + #define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ 138 + #define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ 139 + #define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ 140 + #define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ 141 + #define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ 142 + #define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ 143 + #define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ 144 + #define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ 145 + #define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ 146 + #define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ 147 + #define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ 148 + #define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ 149 + #define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ 150 + #define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ 151 + #define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ 152 + #define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ 153 + #define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ 154 + #define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ 155 + #define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */ 156 + #define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */ 157 + #define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */ 158 + #define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */ 159 + #define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ 160 + #define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ 161 + #define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ 162 + #define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ 163 + #define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */ 164 + #define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ 165 + #define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */ 166 + #define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */ 167 + #define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */ 168 + #define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ 169 + #define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ 170 + #define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ 171 + #define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ 172 + #define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ 173 + #define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ 174 + #define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ 175 + #define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ 176 + #define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ 177 + #define IGC_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ 178 + #define IGC_IAC 0x04100 /* Interrupt Assertion Count */ 179 + #define IGC_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ 180 + #define IGC_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ 181 + #define IGC_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ 182 + #define IGC_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ 183 + #define IGC_RPTHC 0x04104 /* Rx Packets To Host */ 184 + #define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */ 185 + #define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */ 186 + #define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */ 187 + #define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */ 188 + #define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ 189 + #define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ 190 + #define IGC_LENERRS 0x04138 /* Length Errors Count */ 191 + #define IGC_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ 192 + #define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ 193 + 194 + /* Management registers */ 195 + #define IGC_MANC 0x05820 /* Management Control - RW */ 196 + 197 + /* Shadow Ram Write Register - RW */ 198 + #define IGC_SRWR 0x12018 199 + 200 + /* forward declaration */ 201 + struct igc_hw; 202 + u32 igc_rd32(struct igc_hw *hw, u32 reg); 203 + 204 + /* write operations, indexed using DWORDS */ 205 + #define wr32(reg, val) \ 206 + do { \ 207 + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ 208 + if (!IGC_REMOVED(hw_addr)) \ 209 + writel((val), &hw_addr[(reg)]); \ 210 + } while (0) 211 + 212 + #define rd32(reg) (igc_rd32(hw, reg)) 213 + 214 + #define wrfl() ((void)rd32(IGC_STATUS)) 215 + 216 + #define array_wr32(reg, offset, value) \ 217 + wr32((reg) + ((offset) << 2), (value)) 218 + 219 + #define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2))) 220 + 221 + #endif