Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '1GbE' of https://github.com/anguy11/next-queue

Tony Nguyen says:

====================
1GbE Intel Wired LAN Driver Updates 2020-09-28

This series contains updates to igb, igc, and e1000e drivers.

Sven Auhagen adds XDP support for igb.

Gal Hammer allows for 82576 to display part number string correctly for
igb.

Sasha adds device IDs for i221 and i226 parts. Exposes LPI counters and
removes unused fields in structures for igc. He also adds Meteor Lake
support for e1000e.

For igc, Andre renames IGC_TSYNCTXCTL_VALID to IGC_TSYNCTXCTL_TXTT_0 to
match the datasheet and adds a warning if it's not set when expected.
Removes the PTP Tx timestamp check in igc_ptp_tx_work() as it's already
checked in the watchdog_task. Cleans up some code by removing invalid error
bits, renaming a bit to match datasheet naming, and removing a, now
unneeded, macro.

Vinicius makes changes for igc PTP: removes calling SYSTIMR to latch timer
value, stores PTP time before a reset, and rejects schedules with times in
the future.

v2: Remove 'inline' from igb_xdp_tx_queue_mapping() and igb_rx_offset()
for patch 1
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+592 -87
+2
drivers/net/ethernet/intel/e1000e/ethtool.c
··· 895 895 case e1000_pch_cnp: 896 896 case e1000_pch_tgp: 897 897 case e1000_pch_adp: 898 + case e1000_pch_mtp: 898 899 mask |= BIT(18); 899 900 break; 900 901 default: ··· 1561 1560 case e1000_pch_cnp: 1562 1561 case e1000_pch_tgp: 1563 1562 case e1000_pch_adp: 1563 + case e1000_pch_mtp: 1564 1564 fext_nvm11 = er32(FEXTNVM11); 1565 1565 fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX; 1566 1566 ew32(FEXTNVM11, fext_nvm11);
+5
drivers/net/ethernet/intel/e1000e/hw.h
··· 102 102 #define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F 103 103 #define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C 104 104 #define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D 105 + #define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A 106 + #define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B 107 + #define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C 108 + #define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D 105 109 106 110 #define E1000_REVISION_4 4 107 111 ··· 131 127 e1000_pch_cnp, 132 128 e1000_pch_tgp, 133 129 e1000_pch_adp, 130 + e1000_pch_mtp, 134 131 }; 135 132 136 133 enum e1000_media_type {
+7
drivers/net/ethernet/intel/e1000e/ich8lan.c
··· 320 320 case e1000_pch_cnp: 321 321 case e1000_pch_tgp: 322 322 case e1000_pch_adp: 323 + case e1000_pch_mtp: 323 324 if (e1000_phy_is_accessible_pchlan(hw)) 324 325 break; 325 326 ··· 465 464 case e1000_pch_cnp: 466 465 case e1000_pch_tgp: 467 466 case e1000_pch_adp: 467 + case e1000_pch_mtp: 468 468 /* In case the PHY needs to be in mdio slow mode, 469 469 * set slow mode and try to get the PHY id again. 470 470 */ ··· 710 708 case e1000_pch_cnp: 711 709 case e1000_pch_tgp: 712 710 case e1000_pch_adp: 711 + case e1000_pch_mtp: 713 712 case e1000_pchlan: 714 713 /* check management mode */ 715 714 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; ··· 1651 1648 case e1000_pch_cnp: 1652 1649 case e1000_pch_tgp: 1653 1650 case e1000_pch_adp: 1651 + case e1000_pch_mtp: 1654 1652 rc = e1000_init_phy_params_pchlan(hw); 1655 1653 break; 1656 1654 default: ··· 2106 2102 case e1000_pch_cnp: 2107 2103 case e1000_pch_tgp: 2108 2104 case e1000_pch_adp: 2105 + case e1000_pch_mtp: 2109 2106 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; 2110 2107 break; 2111 2108 default: ··· 3150 3145 case e1000_pch_cnp: 3151 3146 case e1000_pch_tgp: 3152 3147 case e1000_pch_adp: 3148 + case e1000_pch_mtp: 3153 3149 bank1_offset = nvm->flash_bank_size; 3154 3150 act_offset = E1000_ICH_NVM_SIG_WORD; 3155 3151 ··· 4096 4090 case e1000_pch_cnp: 4097 4091 case e1000_pch_tgp: 4098 4092 case e1000_pch_adp: 4093 + case e1000_pch_mtp: 4099 4094 word = NVM_COMPAT; 4100 4095 valid_csum_mask = NVM_COMPAT_VALID_CSUM; 4101 4096 break;
+6
drivers/net/ethernet/intel/e1000e/netdev.c
··· 3587 3587 case e1000_pch_cnp: 3588 3588 case e1000_pch_tgp: 3589 3589 case e1000_pch_adp: 3590 + case e1000_pch_mtp: 3590 3591 if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { 3591 3592 /* Stable 24MHz frequency */ 3592 3593 incperiod = INCPERIOD_24MHZ; ··· 4105 4104 case e1000_pch_cnp: 4106 4105 case e1000_pch_tgp: 4107 4106 case e1000_pch_adp: 4107 + case e1000_pch_mtp: 4108 4108 fc->refresh_time = 0xFFFF; 4109 4109 fc->pause_time = 0xFFFF; 4110 4110 ··· 7879 7877 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp }, 7880 7878 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp }, 7881 7879 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp }, 7880 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp }, 7881 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp }, 7882 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp }, 7883 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp }, 7882 7884 7883 7885 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 7884 7886 };
+1
drivers/net/ethernet/intel/e1000e/ptp.c
··· 297 297 case e1000_pch_cnp: 298 298 case e1000_pch_tgp: 299 299 case e1000_pch_adp: 300 + case e1000_pch_mtp: 300 301 if ((hw->mac.type < e1000_pch_lpt) || 301 302 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { 302 303 adapter->ptp_clock_info.max_adj = 24000000 - 1;
+74 -6
drivers/net/ethernet/intel/igb/igb.h
··· 19 19 #include <linux/pci.h> 20 20 #include <linux/mdio.h> 21 21 22 + #include <net/xdp.h> 23 + 22 24 struct igb_adapter; 23 25 24 26 #define E1000_PCS_CFG_IGN_SD 1 ··· 81 79 #define IGB_I210_RX_LATENCY_100 2213 82 80 #define IGB_I210_RX_LATENCY_1000 448 83 81 82 + /* XDP */ 83 + #define IGB_XDP_PASS 0 84 + #define IGB_XDP_CONSUMED BIT(0) 85 + #define IGB_XDP_TX BIT(1) 86 + #define IGB_XDP_REDIR BIT(2) 87 + 84 88 struct vf_data_storage { 85 89 unsigned char vf_mac_addresses[ETH_ALEN]; 86 90 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; ··· 140 132 141 133 /* Supported Rx Buffer Sizes */ 142 134 #define IGB_RXBUFFER_256 256 135 + #define IGB_RXBUFFER_1536 1536 143 136 #define IGB_RXBUFFER_2048 2048 144 137 #define IGB_RXBUFFER_3072 3072 145 138 #define IGB_RX_HDR_LEN IGB_RXBUFFER_256 146 139 #define IGB_TS_HDR_LEN 16 147 140 148 - #define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 141 + /* Attempt to maximize the headroom available for incoming frames. We 142 + * use a 2K buffer for receives and need 1536/1534 to store the data for 143 + * the frame. This leaves us with 512 bytes of room. From that we need 144 + * to deduct the space needed for the shared info and the padding needed 145 + * to IP align the frame. 146 + * 147 + * Note: For cache line sizes 256 or larger this value is going to end 148 + * up negative. In these cases we should fall back to the 3K 149 + * buffers. 150 + */ 149 151 #if (PAGE_SIZE < 8192) 150 - #define IGB_MAX_FRAME_BUILD_SKB \ 151 - (SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN) 152 + #define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN) 153 + #define IGB_2K_TOO_SMALL_WITH_PADDING \ 154 + ((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048)) 155 + 156 + static inline int igb_compute_pad(int rx_buf_len) 157 + { 158 + int page_size, pad_size; 159 + 160 + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); 161 + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; 162 + 163 + return pad_size; 164 + } 165 + 166 + static inline int igb_skb_pad(void) 167 + { 168 + int rx_buf_len; 169 + 170 + /* If a 2K buffer cannot handle a standard Ethernet frame then 171 + * optimize padding for a 3K buffer instead of a 1.5K buffer. 172 + * 173 + * For a 3K buffer we need to add enough padding to allow for 174 + * tailroom due to NET_IP_ALIGN possibly shifting us out of 175 + * cache-line alignment. 176 + */ 177 + if (IGB_2K_TOO_SMALL_WITH_PADDING) 178 + rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); 179 + else 180 + rx_buf_len = IGB_RXBUFFER_1536; 181 + 182 + /* if needed make room for NET_IP_ALIGN */ 183 + rx_buf_len -= NET_IP_ALIGN; 184 + 185 + return igb_compute_pad(rx_buf_len); 186 + } 187 + 188 + #define IGB_SKB_PAD igb_skb_pad() 152 189 #else 153 - #define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN) 190 + #define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 154 191 #endif 155 192 156 193 /* How many Rx Buffers do we bundle into one write to the hardware ? */ ··· 247 194 #define IGB_SFF_ADDRESSING_MODE 0x4 248 195 #define IGB_SFF_8472_UNSUP 0x00 249 196 197 + enum igb_tx_buf_type { 198 + IGB_TYPE_SKB = 0, 199 + IGB_TYPE_XDP, 200 + }; 201 + 250 202 /* wrapper around a pointer to a socket buffer, 251 203 * so a DMA handle can be stored along with the buffer 252 204 */ 253 205 struct igb_tx_buffer { 254 206 union e1000_adv_tx_desc *next_to_watch; 255 207 unsigned long time_stamp; 256 - struct sk_buff *skb; 208 + enum igb_tx_buf_type type; 209 + union { 210 + struct sk_buff *skb; 211 + struct xdp_frame *xdpf; 212 + }; 257 213 unsigned int bytecount; 258 214 u16 gso_segs; 259 215 __be16 protocol; ··· 310 248 struct igb_ring { 311 249 struct igb_q_vector *q_vector; /* backlink to q_vector */ 312 250 struct net_device *netdev; /* back pointer to net_device */ 251 + struct bpf_prog *xdp_prog; 313 252 struct device *dev; /* device pointer for dma mapping */ 314 253 union { /* array of buffer info structs */ 315 254 struct igb_tx_buffer *tx_buffer_info; ··· 351 288 struct u64_stats_sync rx_syncp; 352 289 }; 353 290 }; 291 + struct xdp_rxq_info xdp_rxq; 354 292 } ____cacheline_internodealigned_in_smp; 355 293 356 294 struct igb_q_vector { ··· 403 339 return IGB_RXBUFFER_3072; 404 340 405 341 if (ring_uses_build_skb(ring)) 406 - return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN; 342 + return IGB_MAX_FRAME_BUILD_SKB; 407 343 #endif 408 344 return IGB_RXBUFFER_2048; 409 345 } ··· 531 467 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 532 468 533 469 struct net_device *netdev; 470 + struct bpf_prog *xdp_prog; 534 471 535 472 unsigned long state; 536 473 unsigned int flags; ··· 708 643 709 644 extern char igb_driver_name[]; 710 645 646 + int igb_xmit_xdp_ring(struct igb_adapter *adapter, 647 + struct igb_ring *ring, 648 + struct xdp_frame *xdpf); 711 649 int igb_open(struct net_device *netdev); 712 650 int igb_close(struct net_device *netdev); 713 651 int igb_up(struct igb_adapter *);
+4
drivers/net/ethernet/intel/igb/igb_ethtool.c
··· 961 961 memcpy(&temp_ring[i], adapter->rx_ring[i], 962 962 sizeof(struct igb_ring)); 963 963 964 + /* Clear copied XDP RX-queue info */ 965 + memset(&temp_ring[i].xdp_rxq, 0, 966 + sizeof(temp_ring[i].xdp_rxq)); 967 + 964 968 temp_ring[i].count = new_rx_count; 965 969 err = igb_setup_rx_resources(&temp_ring[i]); 966 970 if (err) {
+406 -31
drivers/net/ethernet/intel/igb/igb_main.c
··· 30 30 #include <linux/if_ether.h> 31 31 #include <linux/aer.h> 32 32 #include <linux/prefetch.h> 33 + #include <linux/bpf.h> 34 + #include <linux/bpf_trace.h> 33 35 #include <linux/pm_runtime.h> 34 36 #include <linux/etherdevice.h> 35 37 #ifdef CONFIG_IGB_DCA ··· 2825 2823 } 2826 2824 } 2827 2825 2826 + static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog) 2827 + { 2828 + int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 2829 + struct igb_adapter *adapter = netdev_priv(dev); 2830 + bool running = netif_running(dev); 2831 + struct bpf_prog *old_prog; 2832 + bool need_reset; 2833 + 2834 + /* verify igb ring attributes are sufficient for XDP */ 2835 + for (i = 0; i < adapter->num_rx_queues; i++) { 2836 + struct igb_ring *ring = adapter->rx_ring[i]; 2837 + 2838 + if (frame_size > igb_rx_bufsz(ring)) 2839 + return -EINVAL; 2840 + } 2841 + 2842 + old_prog = xchg(&adapter->xdp_prog, prog); 2843 + need_reset = (!!prog != !!old_prog); 2844 + 2845 + /* device is up and bpf is added/removed, must setup the RX queues */ 2846 + if (need_reset && running) { 2847 + igb_close(dev); 2848 + } else { 2849 + for (i = 0; i < adapter->num_rx_queues; i++) 2850 + (void)xchg(&adapter->rx_ring[i]->xdp_prog, 2851 + adapter->xdp_prog); 2852 + } 2853 + 2854 + if (old_prog) 2855 + bpf_prog_put(old_prog); 2856 + 2857 + /* bpf is just replaced, RXQ and MTU are already setup */ 2858 + if (!need_reset) 2859 + return 0; 2860 + 2861 + if (running) 2862 + igb_open(dev); 2863 + 2864 + return 0; 2865 + } 2866 + 2867 + static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) 2868 + { 2869 + switch (xdp->command) { 2870 + case XDP_SETUP_PROG: 2871 + return igb_xdp_setup(dev, xdp->prog); 2872 + default: 2873 + return -EINVAL; 2874 + } 2875 + } 2876 + 2877 + static void igb_xdp_ring_update_tail(struct igb_ring *ring) 2878 + { 2879 + /* Force memory writes to complete before letting h/w know there 2880 + * are new descriptors to fetch. 2881 + */ 2882 + wmb(); 2883 + writel(ring->next_to_use, ring->tail); 2884 + } 2885 + 2886 + static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) 2887 + { 2888 + unsigned int r_idx = smp_processor_id(); 2889 + 2890 + if (r_idx >= adapter->num_tx_queues) 2891 + r_idx = r_idx % adapter->num_tx_queues; 2892 + 2893 + return adapter->tx_ring[r_idx]; 2894 + } 2895 + 2896 + static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) 2897 + { 2898 + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 2899 + int cpu = smp_processor_id(); 2900 + struct igb_ring *tx_ring; 2901 + struct netdev_queue *nq; 2902 + u32 ret; 2903 + 2904 + if (unlikely(!xdpf)) 2905 + return IGB_XDP_CONSUMED; 2906 + 2907 + /* During program transitions its possible adapter->xdp_prog is assigned 2908 + * but ring has not been configured yet. In this case simply abort xmit. 2909 + */ 2910 + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; 2911 + if (unlikely(!tx_ring)) 2912 + return -ENXIO; 2913 + 2914 + nq = txring_txq(tx_ring); 2915 + __netif_tx_lock(nq, cpu); 2916 + ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); 2917 + __netif_tx_unlock(nq); 2918 + 2919 + return ret; 2920 + } 2921 + 2922 + static int igb_xdp_xmit(struct net_device *dev, int n, 2923 + struct xdp_frame **frames, u32 flags) 2924 + { 2925 + struct igb_adapter *adapter = netdev_priv(dev); 2926 + int cpu = smp_processor_id(); 2927 + struct igb_ring *tx_ring; 2928 + struct netdev_queue *nq; 2929 + int drops = 0; 2930 + int i; 2931 + 2932 + if (unlikely(test_bit(__IGB_DOWN, &adapter->state))) 2933 + return -ENETDOWN; 2934 + 2935 + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 2936 + return -EINVAL; 2937 + 2938 + /* During program transitions its possible adapter->xdp_prog is assigned 2939 + * but ring has not been configured yet. In this case simply abort xmit. 2940 + */ 2941 + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; 2942 + if (unlikely(!tx_ring)) 2943 + return -ENXIO; 2944 + 2945 + nq = txring_txq(tx_ring); 2946 + __netif_tx_lock(nq, cpu); 2947 + 2948 + for (i = 0; i < n; i++) { 2949 + struct xdp_frame *xdpf = frames[i]; 2950 + int err; 2951 + 2952 + err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); 2953 + if (err != IGB_XDP_TX) { 2954 + xdp_return_frame_rx_napi(xdpf); 2955 + drops++; 2956 + } 2957 + } 2958 + 2959 + __netif_tx_unlock(nq); 2960 + 2961 + if (unlikely(flags & XDP_XMIT_FLUSH)) 2962 + igb_xdp_ring_update_tail(tx_ring); 2963 + 2964 + return n - drops; 2965 + } 2966 + 2828 2967 static const struct net_device_ops igb_netdev_ops = { 2829 2968 .ndo_open = igb_open, 2830 2969 .ndo_stop = igb_close, ··· 2990 2847 .ndo_fdb_add = igb_ndo_fdb_add, 2991 2848 .ndo_features_check = igb_features_check, 2992 2849 .ndo_setup_tc = igb_setup_tc, 2850 + .ndo_bpf = igb_xdp, 2851 + .ndo_xdp_xmit = igb_xdp_xmit, 2993 2852 }; 2994 2853 2995 2854 /** ··· 3532 3387 "Width x1" : "unknown"), netdev->dev_addr); 3533 3388 } 3534 3389 3535 - if ((hw->mac.type >= e1000_i210 || 3390 + if ((hw->mac.type == e1000_82576 && 3391 + rd32(E1000_EECD) & E1000_EECD_PRES) || 3392 + (hw->mac.type >= e1000_i210 || 3536 3393 igb_get_flash_presence_i210(hw))) { 3537 3394 ret_val = igb_read_part_string(hw, part_str, 3538 3395 E1000_PBANUM_LENGTH); ··· 4326 4179 **/ 4327 4180 int igb_setup_rx_resources(struct igb_ring *rx_ring) 4328 4181 { 4182 + struct igb_adapter *adapter = netdev_priv(rx_ring->netdev); 4329 4183 struct device *dev = rx_ring->dev; 4330 4184 int size; 4331 4185 ··· 4348 4200 rx_ring->next_to_alloc = 0; 4349 4201 rx_ring->next_to_clean = 0; 4350 4202 rx_ring->next_to_use = 0; 4203 + 4204 + rx_ring->xdp_prog = adapter->xdp_prog; 4205 + 4206 + /* XDP RX-queue info */ 4207 + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 4208 + rx_ring->queue_index) < 0) 4209 + goto err; 4351 4210 4352 4211 return 0; 4353 4212 ··· 4660 4505 int reg_idx = ring->reg_idx; 4661 4506 u32 rxdctl = 0; 4662 4507 4508 + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 4509 + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 4510 + MEM_TYPE_PAGE_SHARED, NULL)); 4511 + 4663 4512 /* disable the queue */ 4664 4513 wr32(E1000_RXDCTL(reg_idx), 0); 4665 4514 ··· 4868 4709 { 4869 4710 igb_clean_rx_ring(rx_ring); 4870 4711 4712 + rx_ring->xdp_prog = NULL; 4713 + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 4871 4714 vfree(rx_ring->rx_buffer_info); 4872 4715 rx_ring->rx_buffer_info = NULL; 4873 4716 ··· 6239 6078 return -1; 6240 6079 } 6241 6080 6081 + int igb_xmit_xdp_ring(struct igb_adapter *adapter, 6082 + struct igb_ring *tx_ring, 6083 + struct xdp_frame *xdpf) 6084 + { 6085 + union e1000_adv_tx_desc *tx_desc; 6086 + u32 len, cmd_type, olinfo_status; 6087 + struct igb_tx_buffer *tx_buffer; 6088 + dma_addr_t dma; 6089 + u16 i; 6090 + 6091 + len = xdpf->len; 6092 + 6093 + if (unlikely(!igb_desc_unused(tx_ring))) 6094 + return IGB_XDP_CONSUMED; 6095 + 6096 + dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE); 6097 + if (dma_mapping_error(tx_ring->dev, dma)) 6098 + return IGB_XDP_CONSUMED; 6099 + 6100 + /* record the location of the first descriptor for this packet */ 6101 + tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 6102 + tx_buffer->bytecount = len; 6103 + tx_buffer->gso_segs = 1; 6104 + tx_buffer->protocol = 0; 6105 + 6106 + i = tx_ring->next_to_use; 6107 + tx_desc = IGB_TX_DESC(tx_ring, i); 6108 + 6109 + dma_unmap_len_set(tx_buffer, len, len); 6110 + dma_unmap_addr_set(tx_buffer, dma, dma); 6111 + tx_buffer->type = IGB_TYPE_XDP; 6112 + tx_buffer->xdpf = xdpf; 6113 + 6114 + tx_desc->read.buffer_addr = cpu_to_le64(dma); 6115 + 6116 + /* put descriptor type bits */ 6117 + cmd_type = E1000_ADVTXD_DTYP_DATA | 6118 + E1000_ADVTXD_DCMD_DEXT | 6119 + E1000_ADVTXD_DCMD_IFCS; 6120 + cmd_type |= len | IGB_TXD_DCMD; 6121 + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 6122 + 6123 + olinfo_status = cpu_to_le32(len << E1000_ADVTXD_PAYLEN_SHIFT); 6124 + /* 82575 requires a unique index per ring */ 6125 + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) 6126 + olinfo_status |= tx_ring->reg_idx << 4; 6127 + 6128 + tx_desc->read.olinfo_status = olinfo_status; 6129 + 6130 + netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount); 6131 + 6132 + /* set the timestamp */ 6133 + tx_buffer->time_stamp = jiffies; 6134 + 6135 + /* Avoid any potential race with xdp_xmit and cleanup */ 6136 + smp_wmb(); 6137 + 6138 + /* set next_to_watch value indicating a packet is present */ 6139 + i++; 6140 + if (i == tx_ring->count) 6141 + i = 0; 6142 + 6143 + tx_buffer->next_to_watch = tx_desc; 6144 + tx_ring->next_to_use = i; 6145 + 6146 + /* Make sure there is space in the ring for the next send. */ 6147 + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); 6148 + 6149 + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) 6150 + writel(i, tx_ring->tail); 6151 + 6152 + return IGB_XDP_TX; 6153 + } 6154 + 6242 6155 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, 6243 6156 struct igb_ring *tx_ring) 6244 6157 { ··· 6341 6106 6342 6107 /* record the location of the first descriptor for this packet */ 6343 6108 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 6109 + first->type = IGB_TYPE_SKB; 6344 6110 first->skb = skb; 6345 6111 first->bytecount = skb->len; 6346 6112 first->gso_segs = 1; ··· 6493 6257 { 6494 6258 struct igb_adapter *adapter = netdev_priv(netdev); 6495 6259 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 6260 + 6261 + if (adapter->xdp_prog) { 6262 + int i; 6263 + 6264 + for (i = 0; i < adapter->num_rx_queues; i++) { 6265 + struct igb_ring *ring = adapter->rx_ring[i]; 6266 + 6267 + if (max_frame > igb_rx_bufsz(ring)) { 6268 + netdev_warn(adapter->netdev, "Requested MTU size is not supported with XDP\n"); 6269 + return -EINVAL; 6270 + } 6271 + } 6272 + } 6496 6273 6497 6274 /* adjust max frame to be at least the size of a standard frame */ 6498 6275 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) ··· 8060 7811 total_packets += tx_buffer->gso_segs; 8061 7812 8062 7813 /* free the skb */ 8063 - napi_consume_skb(tx_buffer->skb, napi_budget); 7814 + if (tx_buffer->type == IGB_TYPE_SKB) 7815 + napi_consume_skb(tx_buffer->skb, napi_budget); 7816 + else 7817 + xdp_return_frame(tx_buffer->xdpf); 8064 7818 8065 7819 /* unmap skb header data */ 8066 7820 dma_unmap_single(tx_ring->dev, ··· 8247 7995 * the pagecnt_bias and page count so that we fully restock the 8248 7996 * number of references the driver holds. 8249 7997 */ 8250 - if (unlikely(!pagecnt_bias)) { 8251 - page_ref_add(page, USHRT_MAX); 7998 + if (unlikely(pagecnt_bias == 1)) { 7999 + page_ref_add(page, USHRT_MAX - 1); 8252 8000 rx_buffer->pagecnt_bias = USHRT_MAX; 8253 8001 } 8254 8002 ··· 8287 8035 8288 8036 static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, 8289 8037 struct igb_rx_buffer *rx_buffer, 8290 - union e1000_adv_rx_desc *rx_desc, 8291 - unsigned int size) 8038 + struct xdp_buff *xdp, 8039 + union e1000_adv_rx_desc *rx_desc) 8292 8040 { 8293 - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; 8294 8041 #if (PAGE_SIZE < 8192) 8295 8042 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; 8296 8043 #else 8297 - unsigned int truesize = SKB_DATA_ALIGN(size); 8044 + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - 8045 + xdp->data_hard_start); 8298 8046 #endif 8047 + unsigned int size = xdp->data_end - xdp->data; 8299 8048 unsigned int headlen; 8300 8049 struct sk_buff *skb; 8301 8050 8302 8051 /* prefetch first cache line of first page */ 8303 - net_prefetch(va); 8052 + net_prefetch(xdp->data); 8304 8053 8305 8054 /* allocate a skb to store the frags */ 8306 8055 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); ··· 8309 8056 return NULL; 8310 8057 8311 8058 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { 8312 - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); 8313 - va += IGB_TS_HDR_LEN; 8059 + igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb); 8060 + xdp->data += IGB_TS_HDR_LEN; 8314 8061 size -= IGB_TS_HDR_LEN; 8315 8062 } 8316 8063 8317 8064 /* Determine available headroom for copy */ 8318 8065 headlen = size; 8319 8066 if (headlen > IGB_RX_HDR_LEN) 8320 - headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN); 8067 + headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN); 8321 8068 8322 8069 /* align pull length to size of long to optimize memcpy performance */ 8323 - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 8070 + memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long))); 8324 8071 8325 8072 /* update all of the pointers */ 8326 8073 size -= headlen; 8327 8074 if (size) { 8328 8075 skb_add_rx_frag(skb, 0, rx_buffer->page, 8329 - (va + headlen) - page_address(rx_buffer->page), 8076 + (xdp->data + headlen) - page_address(rx_buffer->page), 8330 8077 size, truesize); 8331 8078 #if (PAGE_SIZE < 8192) 8332 8079 rx_buffer->page_offset ^= truesize; ··· 8342 8089 8343 8090 static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, 8344 8091 struct igb_rx_buffer *rx_buffer, 8345 - union e1000_adv_rx_desc *rx_desc, 8346 - unsigned int size) 8092 + struct xdp_buff *xdp, 8093 + union e1000_adv_rx_desc *rx_desc) 8347 8094 { 8348 - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; 8349 8095 #if (PAGE_SIZE < 8192) 8350 8096 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; 8351 8097 #else 8352 8098 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 8353 - SKB_DATA_ALIGN(IGB_SKB_PAD + size); 8099 + SKB_DATA_ALIGN(xdp->data_end - 8100 + xdp->data_hard_start); 8354 8101 #endif 8355 8102 struct sk_buff *skb; 8356 8103 8357 8104 /* prefetch first cache line of first page */ 8358 - net_prefetch(va); 8105 + net_prefetch(xdp->data_meta); 8359 8106 8360 8107 /* build an skb around the page buffer */ 8361 - skb = build_skb(va - IGB_SKB_PAD, truesize); 8108 + skb = build_skb(xdp->data_hard_start, truesize); 8362 8109 if (unlikely(!skb)) 8363 8110 return NULL; 8364 8111 8365 8112 /* update pointers within the skb to store the data */ 8366 - skb_reserve(skb, IGB_SKB_PAD); 8367 - __skb_put(skb, size); 8113 + skb_reserve(skb, xdp->data - xdp->data_hard_start); 8114 + __skb_put(skb, xdp->data_end - xdp->data); 8368 8115 8369 8116 /* pull timestamp out of packet data */ 8370 8117 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { ··· 8380 8127 #endif 8381 8128 8382 8129 return skb; 8130 + } 8131 + 8132 + static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, 8133 + struct igb_ring *rx_ring, 8134 + struct xdp_buff *xdp) 8135 + { 8136 + int err, result = IGB_XDP_PASS; 8137 + struct bpf_prog *xdp_prog; 8138 + u32 act; 8139 + 8140 + rcu_read_lock(); 8141 + xdp_prog = READ_ONCE(rx_ring->xdp_prog); 8142 + 8143 + if (!xdp_prog) 8144 + goto xdp_out; 8145 + 8146 + prefetchw(xdp->data_hard_start); /* xdp_frame write */ 8147 + 8148 + act = bpf_prog_run_xdp(xdp_prog, xdp); 8149 + switch (act) { 8150 + case XDP_PASS: 8151 + break; 8152 + case XDP_TX: 8153 + result = igb_xdp_xmit_back(adapter, xdp); 8154 + break; 8155 + case XDP_REDIRECT: 8156 + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); 8157 + if (!err) 8158 + result = IGB_XDP_REDIR; 8159 + else 8160 + result = IGB_XDP_CONSUMED; 8161 + break; 8162 + default: 8163 + bpf_warn_invalid_xdp_action(act); 8164 + fallthrough; 8165 + case XDP_ABORTED: 8166 + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 8167 + fallthrough; 8168 + case XDP_DROP: 8169 + result = IGB_XDP_CONSUMED; 8170 + break; 8171 + } 8172 + xdp_out: 8173 + rcu_read_unlock(); 8174 + return ERR_PTR(-result); 8175 + } 8176 + 8177 + static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring, 8178 + unsigned int size) 8179 + { 8180 + unsigned int truesize; 8181 + 8182 + #if (PAGE_SIZE < 8192) 8183 + truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 8184 + #else 8185 + truesize = ring_uses_build_skb(rx_ring) ? 8186 + SKB_DATA_ALIGN(IGB_SKB_PAD + size) + 8187 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 8188 + SKB_DATA_ALIGN(size); 8189 + #endif 8190 + return truesize; 8191 + } 8192 + 8193 + static void igb_rx_buffer_flip(struct igb_ring *rx_ring, 8194 + struct igb_rx_buffer *rx_buffer, 8195 + unsigned int size) 8196 + { 8197 + unsigned int truesize = igb_rx_frame_truesize(rx_ring, size); 8198 + #if (PAGE_SIZE < 8192) 8199 + rx_buffer->page_offset ^= truesize; 8200 + #else 8201 + rx_buffer->page_offset += truesize; 8202 + #endif 8383 8203 } 8384 8204 8385 8205 static inline void igb_rx_checksum(struct igb_ring *ring, ··· 8550 8224 union e1000_adv_rx_desc *rx_desc, 8551 8225 struct sk_buff *skb) 8552 8226 { 8227 + /* XDP packets use error pointer so abort at this point */ 8228 + if (IS_ERR(skb)) 8229 + return true; 8230 + 8553 8231 if (unlikely((igb_test_staterr(rx_desc, 8554 8232 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { 8555 8233 struct net_device *netdev = rx_ring->netdev; ··· 8612 8282 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 8613 8283 } 8614 8284 8285 + static unsigned int igb_rx_offset(struct igb_ring *rx_ring) 8286 + { 8287 + return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; 8288 + } 8289 + 8615 8290 static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, 8616 8291 const unsigned int size) 8617 8292 { ··· 8660 8325 8661 8326 static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) 8662 8327 { 8328 + struct igb_adapter *adapter = q_vector->adapter; 8663 8329 struct igb_ring *rx_ring = q_vector->rx.ring; 8664 8330 struct sk_buff *skb = rx_ring->skb; 8665 8331 unsigned int total_bytes = 0, total_packets = 0; 8666 8332 u16 cleaned_count = igb_desc_unused(rx_ring); 8333 + unsigned int xdp_xmit = 0; 8334 + struct xdp_buff xdp; 8335 + 8336 + xdp.rxq = &rx_ring->xdp_rxq; 8337 + 8338 + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 8339 + #if (PAGE_SIZE < 8192) 8340 + xdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0); 8341 + #endif 8667 8342 8668 8343 while (likely(total_packets < budget)) { 8669 8344 union e1000_adv_rx_desc *rx_desc; ··· 8700 8355 rx_buffer = igb_get_rx_buffer(rx_ring, size); 8701 8356 8702 8357 /* retrieve a buffer from the ring */ 8703 - if (skb) 8358 + if (!skb) { 8359 + xdp.data = page_address(rx_buffer->page) + 8360 + rx_buffer->page_offset; 8361 + xdp.data_meta = xdp.data; 8362 + xdp.data_hard_start = xdp.data - 8363 + igb_rx_offset(rx_ring); 8364 + xdp.data_end = xdp.data + size; 8365 + #if (PAGE_SIZE > 4096) 8366 + /* At larger PAGE_SIZE, frame_sz depend on len size */ 8367 + xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size); 8368 + #endif 8369 + skb = igb_run_xdp(adapter, rx_ring, &xdp); 8370 + } 8371 + 8372 + if (IS_ERR(skb)) { 8373 + unsigned int xdp_res = -PTR_ERR(skb); 8374 + 8375 + if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) { 8376 + xdp_xmit |= xdp_res; 8377 + igb_rx_buffer_flip(rx_ring, rx_buffer, size); 8378 + } else { 8379 + rx_buffer->pagecnt_bias++; 8380 + } 8381 + total_packets++; 8382 + total_bytes += size; 8383 + } else if (skb) 8704 8384 igb_add_rx_frag(rx_ring, rx_buffer, skb, size); 8705 8385 else if (ring_uses_build_skb(rx_ring)) 8706 - skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size); 8386 + skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc); 8707 8387 else 8708 8388 skb = igb_construct_skb(rx_ring, rx_buffer, 8709 - rx_desc, size); 8389 + &xdp, rx_desc); 8710 8390 8711 8391 /* exit if we failed to retrieve a buffer */ 8712 8392 if (!skb) { ··· 8771 8401 /* place incomplete frames back on ring for completion */ 8772 8402 rx_ring->skb = skb; 8773 8403 8404 + if (xdp_xmit & IGB_XDP_REDIR) 8405 + xdp_do_flush_map(); 8406 + 8407 + if (xdp_xmit & IGB_XDP_TX) { 8408 + struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); 8409 + 8410 + igb_xdp_ring_update_tail(tx_ring); 8411 + } 8412 + 8774 8413 u64_stats_update_begin(&rx_ring->rx_syncp); 8775 8414 rx_ring->rx_stats.packets += total_packets; 8776 8415 rx_ring->rx_stats.bytes += total_bytes; ··· 8791 8412 igb_alloc_rx_buffers(rx_ring, cleaned_count); 8792 8413 8793 8414 return total_packets; 8794 - } 8795 - 8796 - static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) 8797 - { 8798 - return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; 8799 8415 } 8800 8416 8801 8417 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, ··· 8829 8455 bi->dma = dma; 8830 8456 bi->page = page; 8831 8457 bi->page_offset = igb_rx_offset(rx_ring); 8832 - bi->pagecnt_bias = 1; 8458 + page_ref_add(page, USHRT_MAX - 1); 8459 + bi->pagecnt_bias = USHRT_MAX; 8833 8460 8834 8461 return true; 8835 8462 }
+3
drivers/net/ethernet/intel/igc/igc.h
··· 215 215 spinlock_t tmreg_lock; 216 216 struct cyclecounter cc; 217 217 struct timecounter tc; 218 + struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ 219 + ktime_t ptp_reset_start; /* Reset time in clock mono */ 218 220 }; 219 221 220 222 void igc_up(struct igc_adapter *adapter); ··· 550 548 int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); 551 549 int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); 552 550 void igc_ptp_tx_hang(struct igc_adapter *adapter); 551 + void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); 553 552 554 553 #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) 555 554
+5
drivers/net/ethernet/intel/igc/igc_base.c
··· 215 215 case IGC_DEV_ID_I225_K2: 216 216 case IGC_DEV_ID_I225_LMVP: 217 217 case IGC_DEV_ID_I225_IT: 218 + case IGC_DEV_ID_I226_LM: 219 + case IGC_DEV_ID_I226_V: 220 + case IGC_DEV_ID_I226_IT: 221 + case IGC_DEV_ID_I221_V: 222 + case IGC_DEV_ID_I226_BLANK_NVM: 218 223 case IGC_DEV_ID_I225_BLANK_NVM: 219 224 mac->type = igc_i225; 220 225 break;
+2 -14
drivers/net/ethernet/intel/igc/igc_defines.h
··· 324 324 /* Advanced Receive Descriptor bit definitions */ 325 325 #define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ 326 326 327 - #define IGC_RXDEXT_STATERR_CE 0x01000000 328 - #define IGC_RXDEXT_STATERR_SE 0x02000000 329 - #define IGC_RXDEXT_STATERR_SEQ 0x04000000 330 - #define IGC_RXDEXT_STATERR_CXE 0x10000000 331 - #define IGC_RXDEXT_STATERR_TCPE 0x20000000 327 + #define IGC_RXDEXT_STATERR_L4E 0x20000000 332 328 #define IGC_RXDEXT_STATERR_IPE 0x40000000 333 329 #define IGC_RXDEXT_STATERR_RXE 0x80000000 334 - 335 - /* Same mask, but for extended and packet split descriptors */ 336 - #define IGC_RXDEXT_ERR_FRAME_ERR_MASK ( \ 337 - IGC_RXDEXT_STATERR_CE | \ 338 - IGC_RXDEXT_STATERR_SE | \ 339 - IGC_RXDEXT_STATERR_SEQ | \ 340 - IGC_RXDEXT_STATERR_CXE | \ 341 - IGC_RXDEXT_STATERR_RXE) 342 330 343 331 #define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 344 332 #define IGC_MRQC_RSS_FIELD_IPV4 0x00020000 ··· 397 409 #define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ 398 410 399 411 /* Time Sync Transmit Control bit definitions */ 400 - #define IGC_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ 412 + #define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */ 401 413 #define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ 402 414 #define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ 403 415 #define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */
+3
drivers/net/ethernet/intel/igc/igc_ethtool.c
··· 321 321 322 322 for (i = 0; i < 8; i++) 323 323 regs_buff[205 + i] = rd32(IGC_ETQF(i)); 324 + 325 + regs_buff[213] = adapter->stats.tlpic; 326 + regs_buff[214] = adapter->stats.rlpic; 324 327 } 325 328 326 329 static void igc_ethtool_get_wol(struct net_device *netdev,
+7 -4
drivers/net/ethernet/intel/igc/igc_hw.h
··· 24 24 #define IGC_DEV_ID_I225_K2 0x3101 25 25 #define IGC_DEV_ID_I225_LMVP 0x5502 26 26 #define IGC_DEV_ID_I225_IT 0x0D9F 27 + #define IGC_DEV_ID_I226_LM 0x125B 28 + #define IGC_DEV_ID_I226_V 0x125C 29 + #define IGC_DEV_ID_I226_IT 0x125D 30 + #define IGC_DEV_ID_I221_V 0x125E 31 + #define IGC_DEV_ID_I226_BLANK_NVM 0x125F 27 32 #define IGC_DEV_ID_I225_BLANK_NVM 0x15FD 28 33 29 34 /* Function pointers for the MAC. */ ··· 130 125 struct igc_nvm_operations ops; 131 126 enum igc_nvm_type type; 132 127 133 - u32 flash_bank_size; 134 - u32 flash_base_addr; 135 - 136 128 u16 word_size; 137 129 u16 delay_usec; 138 130 u16 address_bits; ··· 155 153 u8 mdix; 156 154 157 155 bool is_mdix; 158 - bool reset_disable; 159 156 bool speed_downgraded; 160 157 bool autoneg_wait_to_complete; 161 158 }; ··· 240 239 u64 prc511; 241 240 u64 prc1023; 242 241 u64 prc1522; 242 + u64 tlpic; 243 + u64 rlpic; 243 244 u64 gprc; 244 245 u64 bprc; 245 246 u64 mprc;
+34 -5
drivers/net/ethernet/intel/igc/igc_main.c
··· 47 47 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, 48 48 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, 49 49 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, 50 + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, 51 + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, 52 + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, 53 + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, 54 + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, 50 55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, 51 56 /* required last entry */ 52 57 {0, } ··· 1433 1428 1434 1429 /* TCP/UDP checksum error bit is set */ 1435 1430 if (igc_test_staterr(rx_desc, 1436 - IGC_RXDEXT_STATERR_TCPE | 1431 + IGC_RXDEXT_STATERR_L4E | 1437 1432 IGC_RXDEXT_STATERR_IPE)) { 1438 1433 /* work around errata with sctp packets where the TCPE aka 1439 1434 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) ··· 1742 1737 union igc_adv_rx_desc *rx_desc, 1743 1738 struct sk_buff *skb) 1744 1739 { 1745 - if (unlikely((igc_test_staterr(rx_desc, 1746 - IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) { 1740 + if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { 1747 1741 struct net_device *netdev = rx_ring->netdev; 1748 1742 1749 1743 if (!(netdev->features & NETIF_F_RXALL)) { ··· 3683 3679 adapter->stats.prc511 += rd32(IGC_PRC511); 3684 3680 adapter->stats.prc1023 += rd32(IGC_PRC1023); 3685 3681 adapter->stats.prc1522 += rd32(IGC_PRC1522); 3682 + adapter->stats.tlpic += rd32(IGC_TLPIC); 3683 + adapter->stats.rlpic += rd32(IGC_RLPIC); 3686 3684 3687 3685 mpc = rd32(IGC_MPC); 3688 3686 adapter->stats.mpc += mpc; ··· 3777 3771 int i = 0; 3778 3772 3779 3773 set_bit(__IGC_DOWN, &adapter->state); 3774 + 3775 + igc_ptp_suspend(adapter); 3780 3776 3781 3777 /* disable receives in the hardware */ 3782 3778 rctl = rd32(IGC_RCTL); ··· 4702 4694 return 0; 4703 4695 } 4704 4696 4705 - static bool validate_schedule(const struct tc_taprio_qopt_offload *qopt) 4697 + static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) 4698 + { 4699 + struct timespec64 b; 4700 + 4701 + b = ktime_to_timespec64(base_time); 4702 + 4703 + return timespec64_compare(now, &b) > 0; 4704 + } 4705 + 4706 + static bool validate_schedule(struct igc_adapter *adapter, 4707 + const struct tc_taprio_qopt_offload *qopt) 4706 4708 { 4707 4709 int queue_uses[IGC_MAX_TX_QUEUES] = { }; 4710 + struct timespec64 now; 4708 4711 size_t n; 4709 4712 4710 4713 if (qopt->cycle_time_extension) 4714 + return false; 4715 + 4716 + igc_ptp_read(adapter, &now); 4717 + 4718 + /* If we program the controller's BASET registers with a time 4719 + * in the future, it will hold all the packets until that 4720 + * time, causing a lot of TX Hangs, so to avoid that, we 4721 + * reject schedules that would start in the future. 4722 + */ 4723 + if (!is_base_time_past(qopt->base_time, &now)) 4711 4724 return false; 4712 4725 4713 4726 for (n = 0; n < qopt->num_entries; n++) { ··· 4785 4756 if (adapter->base_time) 4786 4757 return -EALREADY; 4787 4758 4788 - if (!validate_schedule(qopt)) 4759 + if (!validate_schedule(adapter, qopt)) 4789 4760 return -EINVAL; 4790 4761 4791 4762 adapter->cycle_time = qopt->cycle_time;
+33 -27
drivers/net/ethernet/intel/igc/igc_ptp.c
··· 8 8 #include <linux/pci.h> 9 9 #include <linux/ptp_classify.h> 10 10 #include <linux/clocksource.h> 11 + #include <linux/ktime.h> 11 12 12 13 #define INCVALUE_MASK 0x7fffffff 13 14 #define ISGN 0x80000000 ··· 17 16 #define IGC_PTP_TX_TIMEOUT (HZ * 15) 18 17 19 18 /* SYSTIM read access for I225 */ 20 - static void igc_ptp_read_i225(struct igc_adapter *adapter, 21 - struct timespec64 *ts) 19 + void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts) 22 20 { 23 21 struct igc_hw *hw = &adapter->hw; 24 22 u32 sec, nsec; 25 23 26 - /* The timestamp latches on lowest register read. For I210/I211, the 27 - * lowest register is SYSTIMR. Since we only need to provide nanosecond 28 - * resolution, we can ignore it. 29 - */ 30 - rd32(IGC_SYSTIMR); 24 + /* The timestamp is latched when SYSTIML is read. */ 31 25 nsec = rd32(IGC_SYSTIML); 32 26 sec = rd32(IGC_SYSTIMH); 33 27 ··· 35 39 { 36 40 struct igc_hw *hw = &adapter->hw; 37 41 38 - /* Writing the SYSTIMR register is not necessary as it only 39 - * provides sub-nanosecond resolution. 40 - */ 41 42 wr32(IGC_SYSTIML, ts->tv_nsec); 42 43 wr32(IGC_SYSTIMH, ts->tv_sec); 43 44 } ··· 74 81 75 82 spin_lock_irqsave(&igc->tmreg_lock, flags); 76 83 77 - igc_ptp_read_i225(igc, &now); 84 + igc_ptp_read(igc, &now); 78 85 now = timespec64_add(now, then); 79 86 igc_ptp_write_i225(igc, (const struct timespec64 *)&now); 80 87 ··· 95 102 spin_lock_irqsave(&igc->tmreg_lock, flags); 96 103 97 104 ptp_read_system_prets(sts); 98 - rd32(IGC_SYSTIMR); 99 - ptp_read_system_postts(sts); 100 105 ts->tv_nsec = rd32(IGC_SYSTIML); 101 106 ts->tv_sec = rd32(IGC_SYSTIMH); 107 + ptp_read_system_postts(sts); 102 108 103 109 spin_unlock_irqrestore(&igc->tmreg_lock, flags); 104 110 ··· 414 422 if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) 415 423 return; 416 424 417 - if (time_is_before_jiffies(adapter->ptp_tx_start + 418 - IGC_PTP_TX_TIMEOUT)) { 419 - igc_ptp_tx_timeout(adapter); 420 - return; 421 - } 422 - 423 425 tsynctxctl = rd32(IGC_TSYNCTXCTL); 424 - if (tsynctxctl & IGC_TSYNCTXCTL_VALID) 425 - igc_ptp_tx_hwtstamp(adapter); 426 - else 427 - /* reschedule to check later */ 428 - schedule_work(&adapter->ptp_tx_work); 426 + if (WARN_ON_ONCE(!(tsynctxctl & IGC_TSYNCTXCTL_TXTT_0))) 427 + return; 428 + 429 + igc_ptp_tx_hwtstamp(adapter); 429 430 } 430 431 431 432 /** ··· 500 515 adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 501 516 adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; 502 517 518 + adapter->prev_ptp_time = ktime_to_timespec64(ktime_get_real()); 519 + adapter->ptp_reset_start = ktime_get(); 520 + 503 521 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, 504 522 &adapter->pdev->dev); 505 523 if (IS_ERR(adapter->ptp_clock)) { ··· 512 524 netdev_info(netdev, "PHC added\n"); 513 525 adapter->ptp_flags |= IGC_PTP_ENABLED; 514 526 } 527 + } 528 + 529 + static void igc_ptp_time_save(struct igc_adapter *adapter) 530 + { 531 + igc_ptp_read(adapter, &adapter->prev_ptp_time); 532 + adapter->ptp_reset_start = ktime_get(); 533 + } 534 + 535 + static void igc_ptp_time_restore(struct igc_adapter *adapter) 536 + { 537 + struct timespec64 ts = adapter->prev_ptp_time; 538 + ktime_t delta; 539 + 540 + delta = ktime_sub(ktime_get(), adapter->ptp_reset_start); 541 + 542 + timespec64_add_ns(&ts, ktime_to_ns(delta)); 543 + 544 + igc_ptp_write_i225(adapter, &ts); 515 545 } 516 546 517 547 /** ··· 548 542 dev_kfree_skb_any(adapter->ptp_tx_skb); 549 543 adapter->ptp_tx_skb = NULL; 550 544 clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); 545 + 546 + igc_ptp_time_save(adapter); 551 547 } 552 548 553 549 /** ··· 599 591 600 592 /* Re-initialize the timer. */ 601 593 if (hw->mac.type == igc_i225) { 602 - struct timespec64 ts64 = ktime_to_timespec64(ktime_get_real()); 603 - 604 - igc_ptp_write_i225(adapter, &ts64); 594 + igc_ptp_time_restore(adapter); 605 595 } else { 606 596 timecounter_init(&adapter->tc, &adapter->cc, 607 597 ktime_to_ns(ktime_get_real()));