Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'add-vf-drivers-for-wangxun-virtual-functions'

Mengyuan Lou says:

====================
Add vf drivers for wangxun virtual functions

Introduces basic support for Wangxun’s virtual function (VF) network
drivers, specifically txgbevf and ngbevf. These drivers provide SR-IOV
VF functionality for Wangxun 10/25/40G network devices.
The first three patches add common APIs for Wangxun VF drivers, including
mailbox communication and shared initialization logic.These abstractions
are placed in libwx to reduce duplication across VF drivers.
Patches 4–8 introduce the txgbevf driver, including:
PCI device initialization, Hardware reset, Interrupt setup, Rx/Tx datapath
implementation and link status changeing flow.
Patches 9–12 implement the ngbevf driver, mirroring the functionality
added in txgbevf.

v2: https://lore.kernel.org/20250625102058.19898-1-mengyuanlou@net-swift.com
v1: https://lore.kernel.org/20250611083559.14175-1-mengyuanlou@net-swift.com
====================

Link: https://patch.msgid.link/20250704094923.652-1-mengyuanlou@net-swift.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+2470 -5
+2
Documentation/networking/device_drivers/ethernet/index.rst
··· 58 58 ti/tlan 59 59 ti/icssg_prueth 60 60 wangxun/txgbe 61 + wangxun/txgbevf 61 62 wangxun/ngbe 63 + wangxun/ngbevf 62 64 63 65 .. only:: subproject and html 64 66
+16
Documentation/networking/device_drivers/ethernet/wangxun/ngbevf.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0+ 2 + 3 + ================================================================== 4 + Linux Base Virtual Function Driver for Wangxun(R) Gigabit Ethernet 5 + ================================================================== 6 + 7 + WangXun Gigabit Virtual Function Linux driver. 8 + Copyright(c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. 9 + 10 + Support 11 + ======= 12 + For general information, go to the website at: 13 + https://www.net-swift.com 14 + 15 + If you got any problem, contact Wangxun support team via nic-support@net-swift.com 16 + and Cc: netdev.
+16
Documentation/networking/device_drivers/ethernet/wangxun/txgbevf.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0+ 2 + 3 + =========================================================================== 4 + Linux Base Virtual Function Driver for Wangxun(R) 10/25/40 Gigabit Ethernet 5 + =========================================================================== 6 + 7 + WangXun 10/25/40 Gigabit Virtual Function Linux driver. 8 + Copyright(c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. 9 + 10 + Support 11 + ======= 12 + For general information, go to the website at: 13 + https://www.net-swift.com 14 + 15 + If you got any problem, contact Wangxun support team via nic-support@net-swift.com 16 + and Cc: netdev.
+33
drivers/net/ethernet/wangxun/Kconfig
··· 64 64 To compile this driver as a module, choose M here. The module 65 65 will be called txgbe. 66 66 67 + config TXGBEVF 68 + tristate "Wangxun(R) 10/25/40G Virtual Function Ethernet support" 69 + depends on PCI 70 + depends on PCI_MSI 71 + select LIBWX 72 + select PHYLINK 73 + help 74 + This driver supports virtual functions for SP1000A, WX1820AL, 75 + WX5XXX, WX5XXXAL. 76 + 77 + This driver was formerly named txgbevf. 78 + 79 + More specific information on configuring the driver is in 80 + <file:Documentation/networking/device_drivers/ethernet/wangxun/txgbevf.rst>. 81 + 82 + To compile this driver as a module, choose M here. MSI-X interrupt 83 + support is required for this driver to work correctly. 84 + 85 + config NGBEVF 86 + tristate "Wangxun(R) GbE Virtual Function Ethernet support" 87 + depends on PCI_MSI 88 + select LIBWX 89 + help 90 + This driver supports virtual functions for WX1860, WX1860AL. 91 + 92 + This driver was formerly named ngbevf. 93 + 94 + More specific information on configuring the driver is in 95 + <file:Documentation/networking/device_drivers/ethernet/wangxun/ngbevf.rst>. 96 + 97 + To compile this driver as a module, choose M here. MSI-X interrupt 98 + support is required for this driver to work correctly. 99 + 67 100 endif # NET_VENDOR_WANGXUN
+2
drivers/net/ethernet/wangxun/Makefile
··· 5 5 6 6 obj-$(CONFIG_LIBWX) += libwx/ 7 7 obj-$(CONFIG_TXGBE) += txgbe/ 8 + obj-$(CONFIG_TXGBEVF) += txgbevf/ 8 9 obj-$(CONFIG_NGBE) += ngbe/ 10 + obj-$(CONFIG_NGBEVF) += ngbevf/
+1
drivers/net/ethernet/wangxun/libwx/Makefile
··· 5 5 obj-$(CONFIG_LIBWX) += libwx.o 6 6 7 7 libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o wx_mbx.o wx_sriov.o 8 + libwx-objs += wx_vf.o wx_vf_lib.o wx_vf_common.o
+11 -3
drivers/net/ethernet/wangxun/libwx/wx_hw.c
··· 11 11 #include "wx_type.h" 12 12 #include "wx_lib.h" 13 13 #include "wx_sriov.h" 14 + #include "wx_vf.h" 14 15 #include "wx_hw.h" 15 16 16 17 static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum) ··· 124 123 void wx_intr_enable(struct wx *wx, u64 qmask) 125 124 { 126 125 u32 mask; 126 + 127 + if (wx->pdev->is_virtfn) { 128 + wr32(wx, WX_VXIMC, qmask); 129 + return; 130 + } 127 131 128 132 mask = (qmask & U32_MAX); 129 133 if (mask) ··· 1113 1107 * by the MO field of the MCSTCTRL. The MO field is set during initialization 1114 1108 * to mc_filter_type. 1115 1109 **/ 1116 - static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr) 1110 + u32 wx_mta_vector(struct wx *wx, u8 *mc_addr) 1117 1111 { 1118 1112 u32 vector = 0; 1119 1113 ··· 1833 1827 } 1834 1828 EXPORT_SYMBOL(wx_disable_rx_queue); 1835 1829 1836 - static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring) 1830 + void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring) 1837 1831 { 1838 1832 u8 reg_idx = ring->reg_idx; 1839 1833 u32 rxdctl; ··· 1849 1843 reg_idx); 1850 1844 } 1851 1845 } 1846 + EXPORT_SYMBOL(wx_enable_rx_queue); 1852 1847 1853 1848 static void wx_configure_srrctl(struct wx *wx, 1854 1849 struct wx_ring *rx_ring) ··· 2375 2368 wx->bus.device = PCI_SLOT(pdev->devfn); 2376 2369 wx->bus.func = PCI_FUNC(pdev->devfn); 2377 2370 2378 - if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) { 2371 + if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN || 2372 + pdev->is_virtfn) { 2379 2373 wx->subsystem_vendor_id = pdev->subsystem_vendor; 2380 2374 wx->subsystem_device_id = pdev->subsystem_device; 2381 2375 } else {
+2
drivers/net/ethernet/wangxun/libwx/wx_hw.h
··· 29 29 int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool); 30 30 int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool); 31 31 void wx_flush_sw_mac_table(struct wx *wx); 32 + u32 wx_mta_vector(struct wx *wx, u8 *mc_addr); 32 33 int wx_set_mac(struct net_device *netdev, void *p); 33 34 void wx_disable_rx(struct wx *wx); 34 35 int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); ··· 38 37 void wx_set_rx_mode(struct net_device *netdev); 39 38 int wx_change_mtu(struct net_device *netdev, int new_mtu); 40 39 void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring); 40 + void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring); 41 41 void wx_configure_rx(struct wx *wx); 42 42 void wx_configure(struct wx *wx); 43 43 void wx_start_hw(struct wx *wx);
+7 -2
drivers/net/ethernet/wangxun/libwx/wx_lib.c
··· 1819 1819 1820 1820 /* We will try to get MSI-X interrupts first */ 1821 1821 ret = wx_acquire_msix_vectors(wx); 1822 - if (ret == 0 || (ret == -ENOMEM)) 1822 + if (ret == 0 || (ret == -ENOMEM) || pdev->is_virtfn) 1823 1823 return ret; 1824 1824 1825 1825 /* Disable VMDq support */ ··· 2170 2170 int ret; 2171 2171 2172 2172 /* Number of supported queues */ 2173 - wx_set_num_queues(wx); 2173 + if (wx->pdev->is_virtfn) { 2174 + if (wx->set_num_queues) 2175 + wx->set_num_queues(wx); 2176 + } else { 2177 + wx_set_num_queues(wx); 2178 + } 2174 2179 2175 2180 /* Set interrupt mode */ 2176 2181 ret = wx_set_interrupt_capability(wx);
+243
drivers/net/ethernet/wangxun/libwx/wx_mbx.c
··· 174 174 175 175 return 0; 176 176 } 177 + 178 + static u32 wx_read_v2p_mailbox(struct wx *wx) 179 + { 180 + u32 mailbox = rd32(wx, WX_VXMAILBOX); 181 + 182 + mailbox |= wx->mbx.mailbox; 183 + wx->mbx.mailbox |= mailbox & WX_VXMAILBOX_R2C_BITS; 184 + 185 + return mailbox; 186 + } 187 + 188 + static u32 wx_mailbox_get_lock_vf(struct wx *wx) 189 + { 190 + wr32(wx, WX_VXMAILBOX, WX_VXMAILBOX_VFU); 191 + return wx_read_v2p_mailbox(wx); 192 + } 193 + 194 + /** 195 + * wx_obtain_mbx_lock_vf - obtain mailbox lock 196 + * @wx: pointer to the HW structure 197 + * 198 + * Return: return 0 on success and -EBUSY on failure 199 + **/ 200 + static int wx_obtain_mbx_lock_vf(struct wx *wx) 201 + { 202 + int count = 5, ret; 203 + u32 mailbox; 204 + 205 + ret = readx_poll_timeout_atomic(wx_mailbox_get_lock_vf, wx, mailbox, 206 + (mailbox & WX_VXMAILBOX_VFU), 207 + 1, count); 208 + if (ret) 209 + wx_err(wx, "Failed to obtain mailbox lock for VF.\n"); 210 + 211 + return ret; 212 + } 213 + 214 + static int wx_check_for_bit_vf(struct wx *wx, u32 mask) 215 + { 216 + u32 mailbox = wx_read_v2p_mailbox(wx); 217 + 218 + wx->mbx.mailbox &= ~mask; 219 + 220 + return (mailbox & mask ? 0 : -EBUSY); 221 + } 222 + 223 + /** 224 + * wx_check_for_ack_vf - checks to see if the PF has ACK'd 225 + * @wx: pointer to the HW structure 226 + * 227 + * Return: return 0 if the PF has set the status bit or else -EBUSY 228 + **/ 229 + static int wx_check_for_ack_vf(struct wx *wx) 230 + { 231 + /* read clear the pf ack bit */ 232 + return wx_check_for_bit_vf(wx, WX_VXMAILBOX_PFACK); 233 + } 234 + 235 + /** 236 + * wx_check_for_msg_vf - checks to see if the PF has sent mail 237 + * @wx: pointer to the HW structure 238 + * 239 + * Return: return 0 if the PF has got req bit or else -EBUSY 240 + **/ 241 + int wx_check_for_msg_vf(struct wx *wx) 242 + { 243 + /* read clear the pf sts bit */ 244 + return wx_check_for_bit_vf(wx, WX_VXMAILBOX_PFSTS); 245 + } 246 + 247 + /** 248 + * wx_check_for_rst_vf - checks to see if the PF has reset 249 + * @wx: pointer to the HW structure 250 + * 251 + * Return: return 0 if the PF has set the reset done and -EBUSY on failure 252 + **/ 253 + int wx_check_for_rst_vf(struct wx *wx) 254 + { 255 + /* read clear the pf reset done bit */ 256 + return wx_check_for_bit_vf(wx, 257 + WX_VXMAILBOX_RSTD | 258 + WX_VXMAILBOX_RSTI); 259 + } 260 + 261 + /** 262 + * wx_poll_for_msg - Wait for message notification 263 + * @wx: pointer to the HW structure 264 + * 265 + * Return: return 0 if the VF has successfully received a message notification 266 + **/ 267 + static int wx_poll_for_msg(struct wx *wx) 268 + { 269 + struct wx_mbx_info *mbx = &wx->mbx; 270 + u32 val; 271 + 272 + return readx_poll_timeout_atomic(wx_check_for_msg_vf, wx, val, 273 + (val == 0), mbx->udelay, mbx->timeout); 274 + } 275 + 276 + /** 277 + * wx_poll_for_ack - Wait for message acknowledgment 278 + * @wx: pointer to the HW structure 279 + * 280 + * Return: return 0 if the VF has successfully received a message ack 281 + **/ 282 + static int wx_poll_for_ack(struct wx *wx) 283 + { 284 + struct wx_mbx_info *mbx = &wx->mbx; 285 + u32 val; 286 + 287 + return readx_poll_timeout_atomic(wx_check_for_ack_vf, wx, val, 288 + (val == 0), mbx->udelay, mbx->timeout); 289 + } 290 + 291 + /** 292 + * wx_read_posted_mbx - Wait for message notification and receive message 293 + * @wx: pointer to the HW structure 294 + * @msg: The message buffer 295 + * @size: Length of buffer 296 + * 297 + * Return: returns 0 if it successfully received a message notification and 298 + * copied it into the receive buffer. 299 + **/ 300 + int wx_read_posted_mbx(struct wx *wx, u32 *msg, u16 size) 301 + { 302 + int ret; 303 + 304 + ret = wx_poll_for_msg(wx); 305 + /* if ack received read message, otherwise we timed out */ 306 + if (ret) 307 + return ret; 308 + 309 + return wx_read_mbx_vf(wx, msg, size); 310 + } 311 + 312 + /** 313 + * wx_write_posted_mbx - Write a message to the mailbox, wait for ack 314 + * @wx: pointer to the HW structure 315 + * @msg: The message buffer 316 + * @size: Length of buffer 317 + * 318 + * Return: returns 0 if it successfully copied message into the buffer and 319 + * received an ack to that message within delay * timeout period 320 + **/ 321 + int wx_write_posted_mbx(struct wx *wx, u32 *msg, u16 size) 322 + { 323 + int ret; 324 + 325 + /* send msg */ 326 + ret = wx_write_mbx_vf(wx, msg, size); 327 + /* if msg sent wait until we receive an ack */ 328 + if (ret) 329 + return ret; 330 + 331 + return wx_poll_for_ack(wx); 332 + } 333 + 334 + /** 335 + * wx_write_mbx_vf - Write a message to the mailbox 336 + * @wx: pointer to the HW structure 337 + * @msg: The message buffer 338 + * @size: Length of buffer 339 + * 340 + * Return: returns 0 if it successfully copied message into the buffer 341 + **/ 342 + int wx_write_mbx_vf(struct wx *wx, u32 *msg, u16 size) 343 + { 344 + struct wx_mbx_info *mbx = &wx->mbx; 345 + int ret, i; 346 + 347 + /* mbx->size is up to 15 */ 348 + if (size > mbx->size) { 349 + wx_err(wx, "Invalid mailbox message size %d", size); 350 + return -EINVAL; 351 + } 352 + 353 + /* lock the mailbox to prevent pf/vf race condition */ 354 + ret = wx_obtain_mbx_lock_vf(wx); 355 + if (ret) 356 + return ret; 357 + 358 + /* flush msg and acks as we are overwriting the message buffer */ 359 + wx_check_for_msg_vf(wx); 360 + wx_check_for_ack_vf(wx); 361 + 362 + /* copy the caller specified message to the mailbox memory buffer */ 363 + for (i = 0; i < size; i++) 364 + wr32a(wx, WX_VXMBMEM, i, msg[i]); 365 + 366 + /* Drop VFU and interrupt the PF to tell it a message has been sent */ 367 + wr32(wx, WX_VXMAILBOX, WX_VXMAILBOX_REQ); 368 + 369 + return 0; 370 + } 371 + 372 + /** 373 + * wx_read_mbx_vf - Reads a message from the inbox intended for vf 374 + * @wx: pointer to the HW structure 375 + * @msg: The message buffer 376 + * @size: Length of buffer 377 + * 378 + * Return: returns 0 if it successfully copied message into the buffer 379 + **/ 380 + int wx_read_mbx_vf(struct wx *wx, u32 *msg, u16 size) 381 + { 382 + struct wx_mbx_info *mbx = &wx->mbx; 383 + int ret, i; 384 + 385 + /* limit read to size of mailbox and mbx->size is up to 15 */ 386 + if (size > mbx->size) 387 + size = mbx->size; 388 + 389 + /* lock the mailbox to prevent pf/vf race condition */ 390 + ret = wx_obtain_mbx_lock_vf(wx); 391 + if (ret) 392 + return ret; 393 + 394 + /* copy the message from the mailbox memory buffer */ 395 + for (i = 0; i < size; i++) 396 + msg[i] = rd32a(wx, WX_VXMBMEM, i); 397 + 398 + /* Acknowledge receipt and release mailbox, then we're done */ 399 + wr32(wx, WX_VXMAILBOX, WX_VXMAILBOX_ACK); 400 + 401 + return 0; 402 + } 403 + 404 + int wx_init_mbx_params_vf(struct wx *wx) 405 + { 406 + wx->vfinfo = kzalloc(sizeof(struct vf_data_storage), 407 + GFP_KERNEL); 408 + if (!wx->vfinfo) 409 + return -ENOMEM; 410 + 411 + /* Initialize mailbox parameters */ 412 + wx->mbx.size = WX_VXMAILBOX_SIZE; 413 + wx->mbx.mailbox = WX_VXMAILBOX; 414 + wx->mbx.udelay = 10; 415 + wx->mbx.timeout = 1000; 416 + 417 + return 0; 418 + } 419 + EXPORT_SYMBOL(wx_init_mbx_params_vf);
+22
drivers/net/ethernet/wangxun/libwx/wx_mbx.h
··· 11 11 #define WX_PXMAILBOX_ACK BIT(1) /* Ack message recv'd from VF */ 12 12 #define WX_PXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */ 13 13 14 + /* VF Registers */ 15 + #define WX_VXMAILBOX 0x600 16 + #define WX_VXMAILBOX_REQ BIT(0) /* Request for PF Ready bit */ 17 + #define WX_VXMAILBOX_ACK BIT(1) /* Ack PF message received */ 18 + #define WX_VXMAILBOX_VFU BIT(2) /* VF owns the mailbox buffer */ 19 + #define WX_VXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */ 20 + #define WX_VXMAILBOX_PFSTS BIT(4) /* PF wrote a message in the MB */ 21 + #define WX_VXMAILBOX_PFACK BIT(5) /* PF ack the previous VF msg */ 22 + #define WX_VXMAILBOX_RSTI BIT(6) /* PF has reset indication */ 23 + #define WX_VXMAILBOX_RSTD BIT(7) /* PF has indicated reset done */ 24 + #define WX_VXMAILBOX_R2C_BITS (WX_VXMAILBOX_RSTD | \ 25 + WX_VXMAILBOX_PFSTS | WX_VXMAILBOX_PFACK) 26 + 27 + #define WX_VXMBMEM 0x00C00 /* 16*4B */ 14 28 #define WX_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */ 15 29 16 30 #define WX_VFLRE(i) (0x4A0 + (4 * (i))) /* i=[0,1] */ ··· 87 73 int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id); 88 74 int wx_check_for_msg_pf(struct wx *wx, u16 mbx_id); 89 75 int wx_check_for_ack_pf(struct wx *wx, u16 mbx_id); 76 + 77 + int wx_read_posted_mbx(struct wx *wx, u32 *msg, u16 size); 78 + int wx_write_posted_mbx(struct wx *wx, u32 *msg, u16 size); 79 + int wx_check_for_rst_vf(struct wx *wx); 80 + int wx_check_for_msg_vf(struct wx *wx); 81 + int wx_read_mbx_vf(struct wx *wx, u32 *msg, u16 size); 82 + int wx_write_mbx_vf(struct wx *wx, u32 *msg, u16 size); 83 + int wx_init_mbx_params_vf(struct wx *wx); 90 84 91 85 #endif /* _WX_MBX_H_ */
+11
drivers/net/ethernet/wangxun/libwx/wx_type.h
··· 825 825 826 826 struct wx_mbx_info { 827 827 u16 size; 828 + u32 mailbox; 829 + u32 udelay; 830 + u32 timeout; 831 + /* lock mbx access */ 832 + spinlock_t mbx_lock; 828 833 }; 829 834 830 835 struct wx_thermal_sensor_data { ··· 1206 1201 WX_FLAG_PTP_PPS_ENABLED, 1207 1202 WX_FLAG_NEED_LINK_CONFIG, 1208 1203 WX_FLAG_NEED_SFP_RESET, 1204 + WX_FLAG_NEED_UPDATE_LINK, 1205 + WX_FLAG_NEED_DO_RESET, 1209 1206 WX_PF_FLAGS_NBITS /* must be last */ 1210 1207 }; 1211 1208 ··· 1218 1211 1219 1212 void *priv; 1220 1213 u8 __iomem *hw_addr; 1214 + u8 __iomem *b4_addr; /* vf only */ 1221 1215 struct pci_dev *pdev; 1222 1216 struct net_device *netdev; 1223 1217 struct wx_bus_info bus; ··· 1293 1285 u32 *isb_mem; 1294 1286 u32 isb_tag[WX_ISB_MAX]; 1295 1287 bool misc_irq_domain; 1288 + u32 eims_other; 1289 + u32 eims_enable_mask; 1296 1290 1297 1291 #define WX_MAX_RETA_ENTRIES 128 1298 1292 #define WX_RSS_INDIR_TBL_MAX 64 ··· 1326 1316 int (*setup_tc)(struct net_device *netdev, u8 tc); 1327 1317 void (*do_reset)(struct net_device *netdev); 1328 1318 int (*ptp_setup_sdp)(struct wx *wx); 1319 + void (*set_num_queues)(struct wx *wx); 1329 1320 1330 1321 bool pps_enabled; 1331 1322 u64 pps_width;
+599
drivers/net/ethernet/wangxun/libwx/wx_vf.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #include <linux/etherdevice.h> 5 + #include <linux/pci.h> 6 + 7 + #include "wx_type.h" 8 + #include "wx_hw.h" 9 + #include "wx_mbx.h" 10 + #include "wx_vf.h" 11 + 12 + static void wx_virt_clr_reg(struct wx *wx) 13 + { 14 + u32 vfsrrctl, i; 15 + 16 + /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */ 17 + vfsrrctl = WX_VXRXDCTL_HDRSZ(wx_hdr_sz(WX_RX_HDR_SIZE)); 18 + vfsrrctl |= WX_VXRXDCTL_BUFSZ(wx_buf_sz(WX_RX_BUF_SIZE)); 19 + 20 + /* clear all rxd ctl */ 21 + for (i = 0; i < WX_VF_MAX_RING_NUMS; i++) 22 + wr32m(wx, WX_VXRXDCTL(i), 23 + WX_VXRXDCTL_HDRSZ_MASK | WX_VXRXDCTL_BUFSZ_MASK, 24 + vfsrrctl); 25 + 26 + rd32(wx, WX_VXSTATUS); 27 + } 28 + 29 + /** 30 + * wx_init_hw_vf - virtual function hardware initialization 31 + * @wx: pointer to hardware structure 32 + * 33 + * Initialize the mac address 34 + **/ 35 + void wx_init_hw_vf(struct wx *wx) 36 + { 37 + wx_get_mac_addr_vf(wx, wx->mac.addr); 38 + } 39 + EXPORT_SYMBOL(wx_init_hw_vf); 40 + 41 + static int wx_mbx_write_and_read_reply(struct wx *wx, u32 *req_buf, 42 + u32 *resp_buf, u16 size) 43 + { 44 + int ret; 45 + 46 + ret = wx_write_posted_mbx(wx, req_buf, size); 47 + if (ret) 48 + return ret; 49 + 50 + return wx_read_posted_mbx(wx, resp_buf, size); 51 + } 52 + 53 + /** 54 + * wx_reset_hw_vf - Performs hardware reset 55 + * @wx: pointer to hardware structure 56 + * 57 + * Resets the hardware by resetting the transmit and receive units, masks and 58 + * clears all interrupts. 59 + * 60 + * Return: returns 0 on success, negative error code on failure 61 + **/ 62 + int wx_reset_hw_vf(struct wx *wx) 63 + { 64 + struct wx_mbx_info *mbx = &wx->mbx; 65 + u32 msgbuf[4] = {WX_VF_RESET}; 66 + u8 *addr = (u8 *)(&msgbuf[1]); 67 + u32 b4_buf[16] = {0}; 68 + u32 timeout = 200; 69 + int ret; 70 + u32 i; 71 + 72 + /* Call wx stop to disable tx/rx and clear interrupts */ 73 + wx_stop_adapter_vf(wx); 74 + 75 + /* reset the api version */ 76 + wx->vfinfo->vf_api = wx_mbox_api_null; 77 + 78 + /* backup msix vectors */ 79 + if (wx->b4_addr) { 80 + for (i = 0; i < 16; i++) 81 + b4_buf[i] = readl(wx->b4_addr + i * 4); 82 + } 83 + 84 + wr32m(wx, WX_VXCTRL, WX_VXCTRL_RST, WX_VXCTRL_RST); 85 + rd32(wx, WX_VXSTATUS); 86 + 87 + /* we cannot reset while the RSTI / RSTD bits are asserted */ 88 + while (!wx_check_for_rst_vf(wx) && timeout) { 89 + timeout--; 90 + udelay(5); 91 + } 92 + 93 + /* restore msix vectors */ 94 + if (wx->b4_addr) { 95 + for (i = 0; i < 16; i++) 96 + writel(b4_buf[i], wx->b4_addr + i * 4); 97 + } 98 + 99 + /* amlite: bme */ 100 + if (wx->mac.type == wx_mac_aml || wx->mac.type == wx_mac_aml40) 101 + wr32(wx, WX_VX_PF_BME, WX_VF_BME_ENABLE); 102 + 103 + if (!timeout) 104 + return -EBUSY; 105 + 106 + /* Reset VF registers to initial values */ 107 + wx_virt_clr_reg(wx); 108 + 109 + /* mailbox timeout can now become active */ 110 + mbx->timeout = 2000; 111 + 112 + ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf, 113 + ARRAY_SIZE(msgbuf)); 114 + if (ret) 115 + return ret; 116 + 117 + if (msgbuf[0] != (WX_VF_RESET | WX_VT_MSGTYPE_ACK) && 118 + msgbuf[0] != (WX_VF_RESET | WX_VT_MSGTYPE_NACK)) 119 + return -EINVAL; 120 + 121 + if (msgbuf[0] == (WX_VF_RESET | WX_VT_MSGTYPE_ACK)) 122 + ether_addr_copy(wx->mac.perm_addr, addr); 123 + 124 + wx->mac.mc_filter_type = msgbuf[3]; 125 + 126 + return 0; 127 + } 128 + EXPORT_SYMBOL(wx_reset_hw_vf); 129 + 130 + /** 131 + * wx_stop_adapter_vf - Generic stop Tx/Rx units 132 + * @wx: pointer to hardware structure 133 + * 134 + * Clears interrupts, disables transmit and receive units. 135 + **/ 136 + void wx_stop_adapter_vf(struct wx *wx) 137 + { 138 + u32 reg_val; 139 + u16 i; 140 + 141 + /* Clear interrupt mask to stop from interrupts being generated */ 142 + wr32(wx, WX_VXIMS, WX_VF_IRQ_CLEAR_MASK); 143 + 144 + /* Clear any pending interrupts, flush previous writes */ 145 + wr32(wx, WX_VXICR, U32_MAX); 146 + 147 + /* Disable the transmit unit. Each queue must be disabled. */ 148 + for (i = 0; i < wx->mac.max_tx_queues; i++) 149 + wr32(wx, WX_VXTXDCTL(i), WX_VXTXDCTL_FLUSH); 150 + 151 + /* Disable the receive unit by stopping each queue */ 152 + for (i = 0; i < wx->mac.max_rx_queues; i++) { 153 + reg_val = rd32(wx, WX_VXRXDCTL(i)); 154 + reg_val &= ~WX_VXRXDCTL_ENABLE; 155 + wr32(wx, WX_VXRXDCTL(i), reg_val); 156 + } 157 + /* Clear packet split and pool config */ 158 + wr32(wx, WX_VXMRQC, 0); 159 + 160 + /* flush all queues disables */ 161 + rd32(wx, WX_VXSTATUS); 162 + } 163 + EXPORT_SYMBOL(wx_stop_adapter_vf); 164 + 165 + /** 166 + * wx_set_rar_vf - set device MAC address 167 + * @wx: pointer to hardware structure 168 + * @index: Receive address register to write 169 + * @addr: Address to put into receive address register 170 + * @enable_addr: set flag that address is active 171 + * 172 + * Return: returns 0 on success, negative error code on failure 173 + **/ 174 + int wx_set_rar_vf(struct wx *wx, u32 index, u8 *addr, u32 enable_addr) 175 + { 176 + u32 msgbuf[3] = {WX_VF_SET_MAC_ADDR}; 177 + u8 *msg_addr = (u8 *)(&msgbuf[1]); 178 + int ret; 179 + 180 + memcpy(msg_addr, addr, ETH_ALEN); 181 + 182 + ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf, 183 + ARRAY_SIZE(msgbuf)); 184 + if (ret) 185 + return ret; 186 + msgbuf[0] &= ~WX_VT_MSGTYPE_CTS; 187 + 188 + /* if nacked the address was rejected, use "perm_addr" */ 189 + if (msgbuf[0] == (WX_VF_SET_MAC_ADDR | WX_VT_MSGTYPE_NACK)) { 190 + wx_get_mac_addr_vf(wx, wx->mac.addr); 191 + return -EINVAL; 192 + } 193 + 194 + return 0; 195 + } 196 + EXPORT_SYMBOL(wx_set_rar_vf); 197 + 198 + /** 199 + * wx_update_mc_addr_list_vf - Update Multicast addresses 200 + * @wx: pointer to the HW structure 201 + * @netdev: pointer to the net device structure 202 + * 203 + * Updates the Multicast Table Array. 204 + * 205 + * Return: returns 0 on success, negative error code on failure 206 + **/ 207 + int wx_update_mc_addr_list_vf(struct wx *wx, struct net_device *netdev) 208 + { 209 + u32 msgbuf[WX_VXMAILBOX_SIZE] = {WX_VF_SET_MULTICAST}; 210 + u16 *vector_l = (u16 *)&msgbuf[1]; 211 + struct netdev_hw_addr *ha; 212 + u32 cnt, i; 213 + 214 + cnt = netdev_mc_count(netdev); 215 + if (cnt > 28) 216 + cnt = 28; 217 + msgbuf[0] |= cnt << WX_VT_MSGINFO_SHIFT; 218 + 219 + i = 0; 220 + netdev_for_each_mc_addr(ha, netdev) { 221 + if (i == cnt) 222 + break; 223 + if (is_link_local_ether_addr(ha->addr)) 224 + continue; 225 + 226 + vector_l[i++] = wx_mta_vector(wx, ha->addr); 227 + } 228 + 229 + return wx_write_posted_mbx(wx, msgbuf, ARRAY_SIZE(msgbuf)); 230 + } 231 + EXPORT_SYMBOL(wx_update_mc_addr_list_vf); 232 + 233 + /** 234 + * wx_update_xcast_mode_vf - Update Multicast mode 235 + * @wx: pointer to the HW structure 236 + * @xcast_mode: new multicast mode 237 + * 238 + * Updates the Multicast Mode of VF. 239 + * 240 + * Return: returns 0 on success, negative error code on failure 241 + **/ 242 + int wx_update_xcast_mode_vf(struct wx *wx, int xcast_mode) 243 + { 244 + u32 msgbuf[2] = {WX_VF_UPDATE_XCAST_MODE, xcast_mode}; 245 + int ret = 0; 246 + 247 + if (wx->vfinfo->vf_api < wx_mbox_api_13) 248 + return -EINVAL; 249 + 250 + ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf, 251 + ARRAY_SIZE(msgbuf)); 252 + if (ret) 253 + return ret; 254 + 255 + msgbuf[0] &= ~WX_VT_MSGTYPE_CTS; 256 + if (msgbuf[0] == (WX_VF_UPDATE_XCAST_MODE | WX_VT_MSGTYPE_NACK)) 257 + return -EINVAL; 258 + 259 + return 0; 260 + } 261 + EXPORT_SYMBOL(wx_update_xcast_mode_vf); 262 + 263 + /** 264 + * wx_get_link_state_vf - Get VF link state from PF 265 + * @wx: pointer to the HW structure 266 + * @link_state: link state storage 267 + * 268 + * Return: return state of the operation error or success. 269 + **/ 270 + int wx_get_link_state_vf(struct wx *wx, u16 *link_state) 271 + { 272 + u32 msgbuf[2] = {WX_VF_GET_LINK_STATE}; 273 + int ret; 274 + 275 + ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf, 276 + ARRAY_SIZE(msgbuf)); 277 + if (ret) 278 + return ret; 279 + 280 + if (msgbuf[0] & WX_VT_MSGTYPE_NACK) 281 + return -EINVAL; 282 + 283 + *link_state = msgbuf[1]; 284 + 285 + return 0; 286 + } 287 + EXPORT_SYMBOL(wx_get_link_state_vf); 288 + 289 + /** 290 + * wx_set_vfta_vf - Set/Unset vlan filter table address 291 + * @wx: pointer to the HW structure 292 + * @vlan: 12 bit VLAN ID 293 + * @vind: unused by VF drivers 294 + * @vlan_on: if true then set bit, else clear bit 295 + * @vlvf_bypass: boolean flag indicating updating default pool is okay 296 + * 297 + * Turn on/off specified VLAN in the VLAN filter table. 298 + * 299 + * Return: returns 0 on success, negative error code on failure 300 + **/ 301 + int wx_set_vfta_vf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on, 302 + bool vlvf_bypass) 303 + { 304 + u32 msgbuf[2] = {WX_VF_SET_VLAN, vlan}; 305 + bool vlan_offload = false; 306 + int ret; 307 + 308 + /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ 309 + msgbuf[0] |= vlan_on << WX_VT_MSGINFO_SHIFT; 310 + /* if vf vlan offload is disabled, allow to create vlan under pf port vlan */ 311 + msgbuf[0] |= BIT(vlan_offload); 312 + 313 + ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf, 314 + ARRAY_SIZE(msgbuf)); 315 + if (ret) 316 + return ret; 317 + 318 + if (msgbuf[0] & WX_VT_MSGTYPE_ACK) 319 + return 0; 320 + 321 + return msgbuf[0] & WX_VT_MSGTYPE_NACK; 322 + } 323 + EXPORT_SYMBOL(wx_set_vfta_vf); 324 + 325 + void wx_get_mac_addr_vf(struct wx *wx, u8 *mac_addr) 326 + { 327 + ether_addr_copy(mac_addr, wx->mac.perm_addr); 328 + } 329 + EXPORT_SYMBOL(wx_get_mac_addr_vf); 330 + 331 + int wx_get_fw_version_vf(struct wx *wx) 332 + { 333 + u32 msgbuf[2] = {WX_VF_GET_FW_VERSION}; 334 + int ret; 335 + 336 + ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf, 337 + ARRAY_SIZE(msgbuf)); 338 + if (ret) 339 + return ret; 340 + 341 + if (msgbuf[0] & WX_VT_MSGTYPE_NACK) 342 + return -EINVAL; 343 + snprintf(wx->eeprom_id, 32, "0x%08x", msgbuf[1]); 344 + 345 + return 0; 346 + } 347 + EXPORT_SYMBOL(wx_get_fw_version_vf); 348 + 349 + int wx_set_uc_addr_vf(struct wx *wx, u32 index, u8 *addr) 350 + { 351 + u32 msgbuf[3] = {WX_VF_SET_MACVLAN}; 352 + u8 *msg_addr = (u8 *)(&msgbuf[1]); 353 + int ret; 354 + 355 + /* If index is one then this is the start of a new list and needs 356 + * indication to the PF so it can do it's own list management. 357 + * If it is zero then that tells the PF to just clear all of 358 + * this VF's macvlans and there is no new list. 359 + */ 360 + msgbuf[0] |= index << WX_VT_MSGINFO_SHIFT; 361 + if (addr) 362 + memcpy(msg_addr, addr, 6); 363 + ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf, 364 + ARRAY_SIZE(msgbuf)); 365 + if (ret) 366 + return ret; 367 + 368 + msgbuf[0] &= ~WX_VT_MSGTYPE_CTS; 369 + 370 + if (msgbuf[0] == (WX_VF_SET_MACVLAN | WX_VT_MSGTYPE_NACK)) 371 + return -EINVAL; 372 + 373 + return 0; 374 + } 375 + EXPORT_SYMBOL(wx_set_uc_addr_vf); 376 + 377 + /** 378 + * wx_rlpml_set_vf - Set the maximum receive packet length 379 + * @wx: pointer to the HW structure 380 + * @max_size: value to assign to max frame size 381 + * 382 + * Return: returns 0 on success, negative error code on failure 383 + **/ 384 + int wx_rlpml_set_vf(struct wx *wx, u16 max_size) 385 + { 386 + u32 msgbuf[2] = {WX_VF_SET_LPE, max_size}; 387 + int ret; 388 + 389 + ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf, 390 + ARRAY_SIZE(msgbuf)); 391 + if (ret) 392 + return ret; 393 + if ((msgbuf[0] & WX_VF_SET_LPE) && 394 + (msgbuf[0] & WX_VT_MSGTYPE_NACK)) 395 + return -EINVAL; 396 + 397 + return 0; 398 + } 399 + EXPORT_SYMBOL(wx_rlpml_set_vf); 400 + 401 + /** 402 + * wx_negotiate_api_version - Negotiate supported API version 403 + * @wx: pointer to the HW structure 404 + * @api: integer containing requested API version 405 + * 406 + * Return: returns 0 on success, negative error code on failure 407 + **/ 408 + int wx_negotiate_api_version(struct wx *wx, int api) 409 + { 410 + u32 msgbuf[2] = {WX_VF_API_NEGOTIATE, api}; 411 + int ret; 412 + 413 + ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf, 414 + ARRAY_SIZE(msgbuf)); 415 + if (ret) 416 + return ret; 417 + 418 + msgbuf[0] &= ~WX_VT_MSGTYPE_CTS; 419 + 420 + /* Store value and return 0 on success */ 421 + if (msgbuf[0] == (WX_VF_API_NEGOTIATE | WX_VT_MSGTYPE_NACK)) 422 + return -EINVAL; 423 + wx->vfinfo->vf_api = api; 424 + 425 + return 0; 426 + } 427 + EXPORT_SYMBOL(wx_negotiate_api_version); 428 + 429 + int wx_get_queues_vf(struct wx *wx, u32 *num_tcs, u32 *default_tc) 430 + { 431 + u32 msgbuf[5] = {WX_VF_GET_QUEUES}; 432 + int ret; 433 + 434 + /* do nothing if API doesn't support wx_get_queues */ 435 + if (wx->vfinfo->vf_api < wx_mbox_api_13) 436 + return -EINVAL; 437 + 438 + /* Fetch queue configuration from the PF */ 439 + ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf, 440 + ARRAY_SIZE(msgbuf)); 441 + if (ret) 442 + return ret; 443 + msgbuf[0] &= ~WX_VT_MSGTYPE_CTS; 444 + 445 + /* if we didn't get an ACK there must have been 446 + * some sort of mailbox error so we should treat it 447 + * as such 448 + */ 449 + if (msgbuf[0] != (WX_VF_GET_QUEUES | WX_VT_MSGTYPE_ACK)) 450 + return -EINVAL; 451 + /* record and validate values from message */ 452 + wx->mac.max_tx_queues = msgbuf[WX_VF_TX_QUEUES]; 453 + if (wx->mac.max_tx_queues == 0 || 454 + wx->mac.max_tx_queues > WX_VF_MAX_TX_QUEUES) 455 + wx->mac.max_tx_queues = WX_VF_MAX_TX_QUEUES; 456 + 457 + wx->mac.max_rx_queues = msgbuf[WX_VF_RX_QUEUES]; 458 + if (wx->mac.max_rx_queues == 0 || 459 + wx->mac.max_rx_queues > WX_VF_MAX_RX_QUEUES) 460 + wx->mac.max_rx_queues = WX_VF_MAX_RX_QUEUES; 461 + 462 + *num_tcs = msgbuf[WX_VF_TRANS_VLAN]; 463 + /* in case of unknown state assume we cannot tag frames */ 464 + if (*num_tcs > wx->mac.max_rx_queues) 465 + *num_tcs = 1; 466 + *default_tc = msgbuf[WX_VF_DEF_QUEUE]; 467 + /* default to queue 0 on out-of-bounds queue number */ 468 + if (*default_tc >= wx->mac.max_tx_queues) 469 + *default_tc = 0; 470 + 471 + return 0; 472 + } 473 + EXPORT_SYMBOL(wx_get_queues_vf); 474 + 475 + static int wx_get_link_status_from_pf(struct wx *wx, u32 *msgbuf) 476 + { 477 + u32 links_reg = msgbuf[1]; 478 + 479 + if (msgbuf[1] & WX_PF_NOFITY_VF_NET_NOT_RUNNING) 480 + wx->notify_down = true; 481 + else 482 + wx->notify_down = false; 483 + 484 + if (wx->notify_down) { 485 + wx->link = false; 486 + wx->speed = SPEED_UNKNOWN; 487 + return 0; 488 + } 489 + 490 + wx->link = WX_PFLINK_STATUS(links_reg); 491 + wx->speed = WX_PFLINK_SPEED(links_reg); 492 + 493 + return 0; 494 + } 495 + 496 + static int wx_pf_ping_vf(struct wx *wx, u32 *msgbuf) 497 + { 498 + if (!(msgbuf[0] & WX_VT_MSGTYPE_CTS)) 499 + /* msg is not CTS, we need to do reset */ 500 + return -EINVAL; 501 + 502 + return 0; 503 + } 504 + 505 + static struct wx_link_reg_fields wx_speed_lookup_vf[] = { 506 + {wx_mac_unknown}, 507 + {wx_mac_sp, SPEED_10000, SPEED_1000, SPEED_100, SPEED_UNKNOWN, SPEED_UNKNOWN}, 508 + {wx_mac_em, SPEED_1000, SPEED_100, SPEED_10, SPEED_UNKNOWN, SPEED_UNKNOWN}, 509 + {wx_mac_aml, SPEED_40000, SPEED_25000, SPEED_10000, SPEED_1000, SPEED_UNKNOWN}, 510 + {wx_mac_aml40, SPEED_40000, SPEED_25000, SPEED_10000, SPEED_1000, SPEED_UNKNOWN}, 511 + }; 512 + 513 + static void wx_check_physical_link(struct wx *wx) 514 + { 515 + u32 val, link_val; 516 + int ret; 517 + 518 + /* get link status from hw status reg 519 + * for SFP+ modules and DA cables, it can take up to 500usecs 520 + * before the link status is correct 521 + */ 522 + if (wx->mac.type == wx_mac_em) 523 + ret = read_poll_timeout_atomic(rd32, val, val & GENMASK(4, 1), 524 + 100, 500, false, wx, WX_VXSTATUS); 525 + else 526 + ret = read_poll_timeout_atomic(rd32, val, val & BIT(0), 100, 527 + 500, false, wx, WX_VXSTATUS); 528 + if (ret) { 529 + wx->speed = SPEED_UNKNOWN; 530 + wx->link = false; 531 + return; 532 + } 533 + 534 + wx->link = true; 535 + link_val = WX_VXSTATUS_SPEED(val); 536 + 537 + if (link_val & BIT(0)) 538 + wx->speed = wx_speed_lookup_vf[wx->mac.type].bit0_f; 539 + else if (link_val & BIT(1)) 540 + wx->speed = wx_speed_lookup_vf[wx->mac.type].bit1_f; 541 + else if (link_val & BIT(2)) 542 + wx->speed = wx_speed_lookup_vf[wx->mac.type].bit2_f; 543 + else if (link_val & BIT(3)) 544 + wx->speed = wx_speed_lookup_vf[wx->mac.type].bit3_f; 545 + else 546 + wx->speed = SPEED_UNKNOWN; 547 + } 548 + 549 + int wx_check_mac_link_vf(struct wx *wx) 550 + { 551 + struct wx_mbx_info *mbx = &wx->mbx; 552 + u32 msgbuf[2] = {0}; 553 + int ret = 0; 554 + 555 + if (!mbx->timeout) 556 + goto out; 557 + 558 + wx_check_for_rst_vf(wx); 559 + if (!wx_check_for_msg_vf(wx)) 560 + ret = wx_read_mbx_vf(wx, msgbuf, 2); 561 + if (ret) 562 + goto out; 563 + 564 + switch (msgbuf[0] & GENMASK(8, 0)) { 565 + case WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG: 566 + ret = wx_get_link_status_from_pf(wx, msgbuf); 567 + goto out; 568 + case WX_PF_CONTROL_MSG: 569 + ret = wx_pf_ping_vf(wx, msgbuf); 570 + goto out; 571 + case 0: 572 + if (msgbuf[0] & WX_VT_MSGTYPE_NACK) { 573 + /* msg is NACK, we must have lost CTS status */ 574 + ret = -EBUSY; 575 + goto out; 576 + } 577 + /* no message, check link status */ 578 + wx_check_physical_link(wx); 579 + goto out; 580 + default: 581 + break; 582 + } 583 + 584 + if (!(msgbuf[0] & WX_VT_MSGTYPE_CTS)) { 585 + /* msg is not CTS and is NACK we must have lost CTS status */ 586 + if (msgbuf[0] & WX_VT_MSGTYPE_NACK) 587 + ret = -EBUSY; 588 + goto out; 589 + } 590 + 591 + /* the pf is talking, if we timed out in the past we reinit */ 592 + if (!mbx->timeout) { 593 + ret = -EBUSY; 594 + goto out; 595 + } 596 + 597 + out: 598 + return ret; 599 + }
+127
drivers/net/ethernet/wangxun/libwx/wx_vf.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #ifndef _WX_VF_H_ 5 + #define _WX_VF_H_ 6 + 7 + #define WX_VF_MAX_RING_NUMS 8 8 + #define WX_VX_PF_BME 0x4B8 9 + #define WX_VF_BME_ENABLE BIT(0) 10 + #define WX_VXSTATUS 0x4 11 + #define WX_VXCTRL 0x8 12 + #define WX_VXCTRL_RST BIT(0) 13 + 14 + #define WX_VXMRQC 0x78 15 + #define WX_VXICR 0x100 16 + #define WX_VXIMS 0x108 17 + #define WX_VXIMC 0x10C 18 + #define WX_VF_IRQ_CLEAR_MASK 7 19 + #define WX_VF_MAX_TX_QUEUES 4 20 + #define WX_VF_MAX_RX_QUEUES 4 21 + #define WX_VXTXDCTL(r) (0x3010 + (0x40 * (r))) 22 + #define WX_VXRXDCTL(r) (0x1010 + (0x40 * (r))) 23 + #define WX_VXRXDCTL_ENABLE BIT(0) 24 + #define WX_VXTXDCTL_FLUSH BIT(26) 25 + 26 + #define WX_VXITR(i) (0x200 + (4 * (i))) /* i=[0,1] */ 27 + #define WX_VXITR_MASK GENMASK(8, 0) 28 + #define WX_VXITR_CNT_WDIS BIT(31) 29 + #define WX_VXIVAR_MISC 0x260 30 + #define WX_VXIVAR(i) (0x240 + (4 * (i))) /* i=[0,3] */ 31 + 32 + #define WX_VXRXDCTL_RSCMAX(f) FIELD_PREP(GENMASK(24, 23), f) 33 + #define WX_VXRXDCTL_BUFLEN(f) FIELD_PREP(GENMASK(6, 1), f) 34 + #define WX_VXRXDCTL_BUFSZ(f) FIELD_PREP(GENMASK(11, 8), f) 35 + #define WX_VXRXDCTL_HDRSZ(f) FIELD_PREP(GENMASK(15, 12), f) 36 + 37 + #define WX_VXRXDCTL_RSCMAX_MASK GENMASK(24, 23) 38 + #define WX_VXRXDCTL_BUFLEN_MASK GENMASK(6, 1) 39 + #define WX_VXRXDCTL_BUFSZ_MASK GENMASK(11, 8) 40 + #define WX_VXRXDCTL_HDRSZ_MASK GENMASK(15, 12) 41 + 42 + #define wx_conf_size(v, mwidth, uwidth) ({ \ 43 + typeof(v) _v = (v); \ 44 + (_v == 2 << (mwidth) ? 0 : _v >> (uwidth)); \ 45 + }) 46 + #define wx_buf_len(v) wx_conf_size(v, 13, 7) 47 + #define wx_hdr_sz(v) wx_conf_size(v, 10, 6) 48 + #define wx_buf_sz(v) wx_conf_size(v, 14, 10) 49 + #define wx_pkt_thresh(v) wx_conf_size(v, 4, 0) 50 + 51 + #define WX_RX_HDR_SIZE 256 52 + #define WX_RX_BUF_SIZE 2048 53 + 54 + #define WX_RXBUFFER_2048 (2048) 55 + #define WX_RXBUFFER_3072 3072 56 + 57 + /* Receive Path */ 58 + #define WX_VXRDBAL(r) (0x1000 + (0x40 * (r))) 59 + #define WX_VXRDBAH(r) (0x1004 + (0x40 * (r))) 60 + #define WX_VXRDT(r) (0x1008 + (0x40 * (r))) 61 + #define WX_VXRDH(r) (0x100C + (0x40 * (r))) 62 + 63 + #define WX_VXRXDCTL_RSCEN BIT(29) 64 + #define WX_VXRXDCTL_DROP BIT(30) 65 + #define WX_VXRXDCTL_VLAN BIT(31) 66 + 67 + #define WX_VXTDBAL(r) (0x3000 + (0x40 * (r))) 68 + #define WX_VXTDBAH(r) (0x3004 + (0x40 * (r))) 69 + #define WX_VXTDT(r) (0x3008 + (0x40 * (r))) 70 + #define WX_VXTDH(r) (0x300C + (0x40 * (r))) 71 + 72 + #define WX_VXTXDCTL_ENABLE BIT(0) 73 + #define WX_VXTXDCTL_BUFLEN(f) FIELD_PREP(GENMASK(6, 1), f) 74 + #define WX_VXTXDCTL_PTHRESH(f) FIELD_PREP(GENMASK(11, 8), f) 75 + #define WX_VXTXDCTL_WTHRESH(f) FIELD_PREP(GENMASK(22, 16), f) 76 + 77 + #define WX_VXMRQC_PSR(f) FIELD_PREP(GENMASK(5, 1), f) 78 + #define WX_VXMRQC_PSR_MASK GENMASK(5, 1) 79 + #define WX_VXMRQC_PSR_L4HDR BIT(0) 80 + #define WX_VXMRQC_PSR_L3HDR BIT(1) 81 + #define WX_VXMRQC_PSR_L2HDR BIT(2) 82 + #define WX_VXMRQC_PSR_TUNHDR BIT(3) 83 + #define WX_VXMRQC_PSR_TUNMAC BIT(4) 84 + 85 + #define WX_VXRSSRK(i) (0x80 + ((i) * 4)) /* i=[0,9] */ 86 + #define WX_VXRETA(i) (0xC0 + ((i) * 4)) /* i=[0,15] */ 87 + 88 + #define WX_VXMRQC_RSS(f) FIELD_PREP(GENMASK(31, 16), f) 89 + #define WX_VXMRQC_RSS_MASK GENMASK(31, 16) 90 + #define WX_VXMRQC_RSS_ALG_IPV4_TCP BIT(0) 91 + #define WX_VXMRQC_RSS_ALG_IPV4 BIT(1) 92 + #define WX_VXMRQC_RSS_ALG_IPV6 BIT(4) 93 + #define WX_VXMRQC_RSS_ALG_IPV6_TCP BIT(5) 94 + #define WX_VXMRQC_RSS_EN BIT(8) 95 + #define WX_VXMRQC_RSS_HASH(f) FIELD_PREP(GENMASK(15, 13), f) 96 + 97 + #define WX_PFLINK_STATUS(g) FIELD_GET(BIT(0), g) 98 + #define WX_PFLINK_SPEED(g) FIELD_GET(GENMASK(31, 1), g) 99 + #define WX_VXSTATUS_SPEED(g) FIELD_GET(GENMASK(4, 1), g) 100 + 101 + struct wx_link_reg_fields { 102 + u32 mac_type; 103 + u32 bit0_f; 104 + u32 bit1_f; 105 + u32 bit2_f; 106 + u32 bit3_f; 107 + u32 bit4_f; 108 + }; 109 + 110 + void wx_init_hw_vf(struct wx *wx); 111 + int wx_reset_hw_vf(struct wx *wx); 112 + void wx_get_mac_addr_vf(struct wx *wx, u8 *mac_addr); 113 + void wx_stop_adapter_vf(struct wx *wx); 114 + int wx_get_fw_version_vf(struct wx *wx); 115 + int wx_set_rar_vf(struct wx *wx, u32 index, u8 *addr, u32 enable_addr); 116 + int wx_update_mc_addr_list_vf(struct wx *wx, struct net_device *netdev); 117 + int wx_set_uc_addr_vf(struct wx *wx, u32 index, u8 *addr); 118 + int wx_rlpml_set_vf(struct wx *wx, u16 max_size); 119 + int wx_negotiate_api_version(struct wx *wx, int api); 120 + int wx_get_queues_vf(struct wx *wx, u32 *num_tcs, u32 *default_tc); 121 + int wx_update_xcast_mode_vf(struct wx *wx, int xcast_mode); 122 + int wx_get_link_state_vf(struct wx *wx, u16 *link_state); 123 + int wx_set_vfta_vf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on, 124 + bool vlvf_bypass); 125 + int wx_check_mac_link_vf(struct wx *wx); 126 + 127 + #endif /* _WX_VF_H_ */
+414
drivers/net/ethernet/wangxun/libwx/wx_vf_common.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #include <linux/etherdevice.h> 5 + #include <linux/pci.h> 6 + 7 + #include "wx_type.h" 8 + #include "wx_mbx.h" 9 + #include "wx_lib.h" 10 + #include "wx_vf.h" 11 + #include "wx_vf_lib.h" 12 + #include "wx_vf_common.h" 13 + 14 + int wxvf_suspend(struct device *dev_d) 15 + { 16 + struct pci_dev *pdev = to_pci_dev(dev_d); 17 + struct wx *wx = pci_get_drvdata(pdev); 18 + 19 + netif_device_detach(wx->netdev); 20 + wx_clear_interrupt_scheme(wx); 21 + pci_disable_device(pdev); 22 + 23 + return 0; 24 + } 25 + EXPORT_SYMBOL(wxvf_suspend); 26 + 27 + void wxvf_shutdown(struct pci_dev *pdev) 28 + { 29 + wxvf_suspend(&pdev->dev); 30 + } 31 + EXPORT_SYMBOL(wxvf_shutdown); 32 + 33 + int wxvf_resume(struct device *dev_d) 34 + { 35 + struct pci_dev *pdev = to_pci_dev(dev_d); 36 + struct wx *wx = pci_get_drvdata(pdev); 37 + 38 + pci_set_master(pdev); 39 + wx_init_interrupt_scheme(wx); 40 + netif_device_attach(wx->netdev); 41 + 42 + return 0; 43 + } 44 + EXPORT_SYMBOL(wxvf_resume); 45 + 46 + void wxvf_remove(struct pci_dev *pdev) 47 + { 48 + struct wx *wx = pci_get_drvdata(pdev); 49 + struct net_device *netdev; 50 + 51 + cancel_work_sync(&wx->service_task); 52 + netdev = wx->netdev; 53 + unregister_netdev(netdev); 54 + kfree(wx->vfinfo); 55 + kfree(wx->rss_key); 56 + kfree(wx->mac_table); 57 + wx_clear_interrupt_scheme(wx); 58 + pci_release_selected_regions(pdev, 59 + pci_select_bars(pdev, IORESOURCE_MEM)); 60 + pci_disable_device(pdev); 61 + } 62 + EXPORT_SYMBOL(wxvf_remove); 63 + 64 + static irqreturn_t wx_msix_misc_vf(int __always_unused irq, void *data) 65 + { 66 + struct wx *wx = data; 67 + 68 + set_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags); 69 + /* Clear the interrupt */ 70 + if (netif_running(wx->netdev)) 71 + wr32(wx, WX_VXIMC, wx->eims_other); 72 + 73 + return IRQ_HANDLED; 74 + } 75 + 76 + int wx_request_msix_irqs_vf(struct wx *wx) 77 + { 78 + struct net_device *netdev = wx->netdev; 79 + int vector, err; 80 + 81 + for (vector = 0; vector < wx->num_q_vectors; vector++) { 82 + struct wx_q_vector *q_vector = wx->q_vector[vector]; 83 + struct msix_entry *entry = &wx->msix_q_entries[vector]; 84 + 85 + if (q_vector->tx.ring && q_vector->rx.ring) 86 + snprintf(q_vector->name, sizeof(q_vector->name) - 1, 87 + "%s-TxRx-%d", netdev->name, entry->entry); 88 + else 89 + /* skip this unused q_vector */ 90 + continue; 91 + 92 + err = request_irq(entry->vector, wx_msix_clean_rings, 0, 93 + q_vector->name, q_vector); 94 + if (err) { 95 + wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n", 96 + q_vector->name, err); 97 + goto free_queue_irqs; 98 + } 99 + } 100 + 101 + err = request_threaded_irq(wx->msix_entry->vector, wx_msix_misc_vf, 102 + NULL, IRQF_ONESHOT, netdev->name, wx); 103 + if (err) { 104 + wx_err(wx, "request_irq for msix_other failed: %d\n", err); 105 + goto free_queue_irqs; 106 + } 107 + 108 + return 0; 109 + 110 + free_queue_irqs: 111 + while (vector) { 112 + vector--; 113 + free_irq(wx->msix_q_entries[vector].vector, 114 + wx->q_vector[vector]); 115 + } 116 + wx_reset_interrupt_capability(wx); 117 + return err; 118 + } 119 + EXPORT_SYMBOL(wx_request_msix_irqs_vf); 120 + 121 + void wx_negotiate_api_vf(struct wx *wx) 122 + { 123 + int api[] = { 124 + wx_mbox_api_13, 125 + wx_mbox_api_null}; 126 + int err = 0, idx = 0; 127 + 128 + spin_lock_bh(&wx->mbx.mbx_lock); 129 + while (api[idx] != wx_mbox_api_null) { 130 + err = wx_negotiate_api_version(wx, api[idx]); 131 + if (!err) 132 + break; 133 + idx++; 134 + } 135 + spin_unlock_bh(&wx->mbx.mbx_lock); 136 + } 137 + EXPORT_SYMBOL(wx_negotiate_api_vf); 138 + 139 + void wx_reset_vf(struct wx *wx) 140 + { 141 + struct net_device *netdev = wx->netdev; 142 + int ret = 0; 143 + 144 + ret = wx_reset_hw_vf(wx); 145 + if (!ret) 146 + wx_init_hw_vf(wx); 147 + wx_negotiate_api_vf(wx); 148 + if (is_valid_ether_addr(wx->mac.addr)) { 149 + eth_hw_addr_set(netdev, wx->mac.addr); 150 + ether_addr_copy(netdev->perm_addr, wx->mac.addr); 151 + } 152 + } 153 + EXPORT_SYMBOL(wx_reset_vf); 154 + 155 + void wx_set_rx_mode_vf(struct net_device *netdev) 156 + { 157 + struct wx *wx = netdev_priv(netdev); 158 + unsigned int flags = netdev->flags; 159 + int xcast_mode; 160 + 161 + xcast_mode = (flags & IFF_ALLMULTI) ? WXVF_XCAST_MODE_ALLMULTI : 162 + (flags & (IFF_BROADCAST | IFF_MULTICAST)) ? 163 + WXVF_XCAST_MODE_MULTI : WXVF_XCAST_MODE_NONE; 164 + /* request the most inclusive mode we need */ 165 + if (flags & IFF_PROMISC) 166 + xcast_mode = WXVF_XCAST_MODE_PROMISC; 167 + else if (flags & IFF_ALLMULTI) 168 + xcast_mode = WXVF_XCAST_MODE_ALLMULTI; 169 + else if (flags & (IFF_BROADCAST | IFF_MULTICAST)) 170 + xcast_mode = WXVF_XCAST_MODE_MULTI; 171 + else 172 + xcast_mode = WXVF_XCAST_MODE_NONE; 173 + 174 + spin_lock_bh(&wx->mbx.mbx_lock); 175 + wx_update_xcast_mode_vf(wx, xcast_mode); 176 + wx_update_mc_addr_list_vf(wx, netdev); 177 + wx_write_uc_addr_list_vf(netdev); 178 + spin_unlock_bh(&wx->mbx.mbx_lock); 179 + } 180 + EXPORT_SYMBOL(wx_set_rx_mode_vf); 181 + 182 + /** 183 + * wx_configure_rx_vf - Configure Receive Unit after Reset 184 + * @wx: board private structure 185 + * 186 + * Configure the Rx unit of the MAC after a reset. 187 + **/ 188 + static void wx_configure_rx_vf(struct wx *wx) 189 + { 190 + struct net_device *netdev = wx->netdev; 191 + int i, ret; 192 + 193 + wx_setup_psrtype_vf(wx); 194 + wx_setup_vfmrqc_vf(wx); 195 + 196 + spin_lock_bh(&wx->mbx.mbx_lock); 197 + ret = wx_rlpml_set_vf(wx, 198 + netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 199 + spin_unlock_bh(&wx->mbx.mbx_lock); 200 + if (ret) 201 + wx_dbg(wx, "Failed to set MTU at %d\n", netdev->mtu); 202 + 203 + /* Setup the HW Rx Head and Tail Descriptor Pointers and 204 + * the Base and Length of the Rx Descriptor Ring 205 + */ 206 + for (i = 0; i < wx->num_rx_queues; i++) { 207 + struct wx_ring *rx_ring = wx->rx_ring[i]; 208 + #ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC 209 + wx_set_rx_buffer_len_vf(wx, rx_ring); 210 + #endif 211 + wx_configure_rx_ring_vf(wx, rx_ring); 212 + } 213 + } 214 + 215 + void wx_configure_vf(struct wx *wx) 216 + { 217 + wx_set_rx_mode_vf(wx->netdev); 218 + wx_configure_tx_vf(wx); 219 + wx_configure_rx_vf(wx); 220 + } 221 + EXPORT_SYMBOL(wx_configure_vf); 222 + 223 + int wx_set_mac_vf(struct net_device *netdev, void *p) 224 + { 225 + struct wx *wx = netdev_priv(netdev); 226 + struct sockaddr *addr = p; 227 + int ret; 228 + 229 + ret = eth_prepare_mac_addr_change(netdev, addr); 230 + if (ret) 231 + return ret; 232 + 233 + spin_lock_bh(&wx->mbx.mbx_lock); 234 + ret = wx_set_rar_vf(wx, 1, (u8 *)addr->sa_data, 1); 235 + spin_unlock_bh(&wx->mbx.mbx_lock); 236 + 237 + if (ret) 238 + return -EPERM; 239 + 240 + memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len); 241 + memcpy(wx->mac.perm_addr, addr->sa_data, netdev->addr_len); 242 + eth_hw_addr_set(netdev, addr->sa_data); 243 + 244 + return 0; 245 + } 246 + EXPORT_SYMBOL(wx_set_mac_vf); 247 + 248 + void wxvf_watchdog_update_link(struct wx *wx) 249 + { 250 + int err; 251 + 252 + if (!test_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags)) 253 + return; 254 + 255 + spin_lock_bh(&wx->mbx.mbx_lock); 256 + err = wx_check_mac_link_vf(wx); 257 + spin_unlock_bh(&wx->mbx.mbx_lock); 258 + if (err) { 259 + wx->link = false; 260 + set_bit(WX_FLAG_NEED_DO_RESET, wx->flags); 261 + } 262 + clear_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags); 263 + } 264 + EXPORT_SYMBOL(wxvf_watchdog_update_link); 265 + 266 + static void wxvf_irq_enable(struct wx *wx) 267 + { 268 + wr32(wx, WX_VXIMC, wx->eims_enable_mask); 269 + } 270 + 271 + static void wxvf_up_complete(struct wx *wx) 272 + { 273 + /* Always set the carrier off */ 274 + netif_carrier_off(wx->netdev); 275 + mod_timer(&wx->service_timer, jiffies + HZ); 276 + set_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags); 277 + 278 + wx_configure_msix_vf(wx); 279 + smp_mb__before_atomic(); 280 + wx_napi_enable_all(wx); 281 + 282 + /* clear any pending interrupts, may auto mask */ 283 + wr32(wx, WX_VXICR, U32_MAX); 284 + wxvf_irq_enable(wx); 285 + /* enable transmits */ 286 + netif_tx_start_all_queues(wx->netdev); 287 + } 288 + 289 + int wxvf_open(struct net_device *netdev) 290 + { 291 + struct wx *wx = netdev_priv(netdev); 292 + int err; 293 + 294 + err = wx_setup_resources(wx); 295 + if (err) 296 + goto err_reset; 297 + wx_configure_vf(wx); 298 + 299 + err = wx_request_msix_irqs_vf(wx); 300 + if (err) 301 + goto err_free_resources; 302 + 303 + /* Notify the stack of the actual queue counts. */ 304 + err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); 305 + if (err) 306 + goto err_free_irq; 307 + 308 + err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues); 309 + if (err) 310 + goto err_free_irq; 311 + 312 + wxvf_up_complete(wx); 313 + 314 + return 0; 315 + err_free_irq: 316 + wx_free_irq(wx); 317 + err_free_resources: 318 + wx_free_resources(wx); 319 + err_reset: 320 + wx_reset_vf(wx); 321 + return err; 322 + } 323 + EXPORT_SYMBOL(wxvf_open); 324 + 325 + static void wxvf_down(struct wx *wx) 326 + { 327 + struct net_device *netdev = wx->netdev; 328 + 329 + timer_delete_sync(&wx->service_timer); 330 + netif_tx_stop_all_queues(netdev); 331 + netif_tx_disable(netdev); 332 + netif_carrier_off(netdev); 333 + wx_napi_disable_all(wx); 334 + wx_reset_vf(wx); 335 + 336 + wx_clean_all_tx_rings(wx); 337 + wx_clean_all_rx_rings(wx); 338 + } 339 + 340 + static void wxvf_reinit_locked(struct wx *wx) 341 + { 342 + while (test_and_set_bit(WX_STATE_RESETTING, wx->state)) 343 + usleep_range(1000, 2000); 344 + wxvf_down(wx); 345 + wx_free_irq(wx); 346 + wx_configure_vf(wx); 347 + wx_request_msix_irqs_vf(wx); 348 + wxvf_up_complete(wx); 349 + clear_bit(WX_STATE_RESETTING, wx->state); 350 + } 351 + 352 + static void wxvf_reset_subtask(struct wx *wx) 353 + { 354 + if (!test_bit(WX_FLAG_NEED_DO_RESET, wx->flags)) 355 + return; 356 + clear_bit(WX_FLAG_NEED_DO_RESET, wx->flags); 357 + 358 + rtnl_lock(); 359 + if (test_bit(WX_STATE_RESETTING, wx->state) || 360 + !(netif_running(wx->netdev))) { 361 + rtnl_unlock(); 362 + return; 363 + } 364 + wxvf_reinit_locked(wx); 365 + rtnl_unlock(); 366 + } 367 + 368 + int wxvf_close(struct net_device *netdev) 369 + { 370 + struct wx *wx = netdev_priv(netdev); 371 + 372 + wxvf_down(wx); 373 + wx_free_irq(wx); 374 + wx_free_resources(wx); 375 + 376 + return 0; 377 + } 378 + EXPORT_SYMBOL(wxvf_close); 379 + 380 + static void wxvf_link_config_subtask(struct wx *wx) 381 + { 382 + struct net_device *netdev = wx->netdev; 383 + 384 + wxvf_watchdog_update_link(wx); 385 + if (wx->link) { 386 + if (netif_carrier_ok(netdev)) 387 + return; 388 + netif_carrier_on(netdev); 389 + netdev_info(netdev, "Link is Up - %s\n", 390 + phy_speed_to_str(wx->speed)); 391 + } else { 392 + if (!netif_carrier_ok(netdev)) 393 + return; 394 + netif_carrier_off(netdev); 395 + netdev_info(netdev, "Link is Down\n"); 396 + } 397 + } 398 + 399 + static void wxvf_service_task(struct work_struct *work) 400 + { 401 + struct wx *wx = container_of(work, struct wx, service_task); 402 + 403 + wxvf_link_config_subtask(wx); 404 + wxvf_reset_subtask(wx); 405 + wx_service_event_complete(wx); 406 + } 407 + 408 + void wxvf_init_service(struct wx *wx) 409 + { 410 + timer_setup(&wx->service_timer, wx_service_timer, 0); 411 + INIT_WORK(&wx->service_task, wxvf_service_task); 412 + clear_bit(WX_STATE_SERVICE_SCHED, wx->state); 413 + } 414 + EXPORT_SYMBOL(wxvf_init_service);
+22
drivers/net/ethernet/wangxun/libwx/wx_vf_common.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #ifndef _WX_VF_COMMON_H_ 5 + #define _WX_VF_COMMON_H_ 6 + 7 + int wxvf_suspend(struct device *dev_d); 8 + void wxvf_shutdown(struct pci_dev *pdev); 9 + int wxvf_resume(struct device *dev_d); 10 + void wxvf_remove(struct pci_dev *pdev); 11 + int wx_request_msix_irqs_vf(struct wx *wx); 12 + void wx_negotiate_api_vf(struct wx *wx); 13 + void wx_reset_vf(struct wx *wx); 14 + void wx_set_rx_mode_vf(struct net_device *netdev); 15 + void wx_configure_vf(struct wx *wx); 16 + int wx_set_mac_vf(struct net_device *netdev, void *p); 17 + void wxvf_watchdog_update_link(struct wx *wx); 18 + int wxvf_open(struct net_device *netdev); 19 + int wxvf_close(struct net_device *netdev); 20 + void wxvf_init_service(struct wx *wx); 21 + 22 + #endif /* _WX_VF_COMMON_H_ */
+280
drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #include <linux/etherdevice.h> 5 + #include <linux/pci.h> 6 + 7 + #include "wx_type.h" 8 + #include "wx_hw.h" 9 + #include "wx_lib.h" 10 + #include "wx_vf.h" 11 + #include "wx_vf_lib.h" 12 + 13 + static void wx_write_eitr_vf(struct wx_q_vector *q_vector) 14 + { 15 + struct wx *wx = q_vector->wx; 16 + int v_idx = q_vector->v_idx; 17 + u32 itr_reg; 18 + 19 + itr_reg = q_vector->itr & WX_VXITR_MASK; 20 + 21 + /* set the WDIS bit to not clear the timer bits and cause an 22 + * immediate assertion of the interrupt 23 + */ 24 + itr_reg |= WX_VXITR_CNT_WDIS; 25 + 26 + wr32(wx, WX_VXITR(v_idx), itr_reg); 27 + } 28 + 29 + static void wx_set_ivar_vf(struct wx *wx, s8 direction, u8 queue, 30 + u8 msix_vector) 31 + { 32 + u32 ivar, index; 33 + 34 + if (direction == -1) { 35 + /* other causes */ 36 + msix_vector |= WX_PX_IVAR_ALLOC_VAL; 37 + ivar = rd32(wx, WX_VXIVAR_MISC); 38 + ivar &= ~0xFF; 39 + ivar |= msix_vector; 40 + wr32(wx, WX_VXIVAR_MISC, ivar); 41 + } else { 42 + /* tx or rx causes */ 43 + msix_vector |= WX_PX_IVAR_ALLOC_VAL; 44 + index = ((16 * (queue & 1)) + (8 * direction)); 45 + ivar = rd32(wx, WX_VXIVAR(queue >> 1)); 46 + ivar &= ~(0xFF << index); 47 + ivar |= (msix_vector << index); 48 + wr32(wx, WX_VXIVAR(queue >> 1), ivar); 49 + } 50 + } 51 + 52 + void wx_configure_msix_vf(struct wx *wx) 53 + { 54 + int v_idx; 55 + 56 + wx->eims_enable_mask = 0; 57 + for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) { 58 + struct wx_q_vector *q_vector = wx->q_vector[v_idx]; 59 + struct wx_ring *ring; 60 + 61 + wx_for_each_ring(ring, q_vector->rx) 62 + wx_set_ivar_vf(wx, 0, ring->reg_idx, v_idx); 63 + 64 + wx_for_each_ring(ring, q_vector->tx) 65 + wx_set_ivar_vf(wx, 1, ring->reg_idx, v_idx); 66 + 67 + /* add q_vector eims value to global eims_enable_mask */ 68 + wx->eims_enable_mask |= BIT(v_idx); 69 + wx_write_eitr_vf(q_vector); 70 + } 71 + 72 + wx_set_ivar_vf(wx, -1, 1, v_idx); 73 + 74 + /* setup eims_other and add value to global eims_enable_mask */ 75 + wx->eims_other = BIT(v_idx); 76 + wx->eims_enable_mask |= wx->eims_other; 77 + } 78 + 79 + int wx_write_uc_addr_list_vf(struct net_device *netdev) 80 + { 81 + struct wx *wx = netdev_priv(netdev); 82 + int count = 0; 83 + 84 + if (!netdev_uc_empty(netdev)) { 85 + struct netdev_hw_addr *ha; 86 + 87 + netdev_for_each_uc_addr(ha, netdev) 88 + wx_set_uc_addr_vf(wx, ++count, ha->addr); 89 + } else { 90 + /* 91 + * If the list is empty then send message to PF driver to 92 + * clear all macvlans on this VF. 93 + */ 94 + wx_set_uc_addr_vf(wx, 0, NULL); 95 + } 96 + 97 + return count; 98 + } 99 + 100 + /** 101 + * wx_configure_tx_ring_vf - Configure Tx ring after Reset 102 + * @wx: board private structure 103 + * @ring: structure containing ring specific data 104 + * 105 + * Configure the Tx descriptor ring after a reset. 106 + **/ 107 + static void wx_configure_tx_ring_vf(struct wx *wx, struct wx_ring *ring) 108 + { 109 + u8 reg_idx = ring->reg_idx; 110 + u64 tdba = ring->dma; 111 + u32 txdctl = 0; 112 + int ret; 113 + 114 + /* disable queue to avoid issues while updating state */ 115 + wr32(wx, WX_VXTXDCTL(reg_idx), WX_VXTXDCTL_FLUSH); 116 + wr32(wx, WX_VXTDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); 117 + wr32(wx, WX_VXTDBAH(reg_idx), tdba >> 32); 118 + 119 + /* enable relaxed ordering */ 120 + pcie_capability_clear_and_set_word(wx->pdev, PCI_EXP_DEVCTL, 121 + 0, PCI_EXP_DEVCTL_RELAX_EN); 122 + 123 + /* reset head and tail pointers */ 124 + wr32(wx, WX_VXTDH(reg_idx), 0); 125 + wr32(wx, WX_VXTDT(reg_idx), 0); 126 + ring->tail = wx->hw_addr + WX_VXTDT(reg_idx); 127 + 128 + /* reset ntu and ntc to place SW in sync with hardwdare */ 129 + ring->next_to_clean = 0; 130 + ring->next_to_use = 0; 131 + 132 + txdctl |= WX_VXTXDCTL_BUFLEN(wx_buf_len(ring->count)); 133 + txdctl |= WX_VXTXDCTL_ENABLE; 134 + 135 + /* reinitialize tx_buffer_info */ 136 + memset(ring->tx_buffer_info, 0, 137 + sizeof(struct wx_tx_buffer) * ring->count); 138 + 139 + wr32(wx, WX_VXTXDCTL(reg_idx), txdctl); 140 + /* poll to verify queue is enabled */ 141 + ret = read_poll_timeout(rd32, txdctl, txdctl & WX_VXTXDCTL_ENABLE, 142 + 1000, 10000, true, wx, WX_VXTXDCTL(reg_idx)); 143 + if (ret == -ETIMEDOUT) 144 + wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx); 145 + } 146 + 147 + /** 148 + * wx_configure_tx_vf - Configure Transmit Unit after Reset 149 + * @wx: board private structure 150 + * 151 + * Configure the Tx unit of the MAC after a reset. 152 + **/ 153 + void wx_configure_tx_vf(struct wx *wx) 154 + { 155 + u32 i; 156 + 157 + /* Setup the HW Tx Head and Tail descriptor pointers */ 158 + for (i = 0; i < wx->num_tx_queues; i++) 159 + wx_configure_tx_ring_vf(wx, wx->tx_ring[i]); 160 + } 161 + 162 + static void wx_configure_srrctl_vf(struct wx *wx, struct wx_ring *ring, 163 + int index) 164 + { 165 + u32 srrctl; 166 + 167 + srrctl = rd32m(wx, WX_VXRXDCTL(index), 168 + (u32)~(WX_VXRXDCTL_HDRSZ_MASK | WX_VXRXDCTL_BUFSZ_MASK)); 169 + srrctl |= WX_VXRXDCTL_DROP; 170 + srrctl |= WX_VXRXDCTL_HDRSZ(wx_hdr_sz(WX_RX_HDR_SIZE)); 171 + srrctl |= WX_VXRXDCTL_BUFSZ(wx_buf_sz(WX_RX_BUF_SIZE)); 172 + 173 + wr32(wx, WX_VXRXDCTL(index), srrctl); 174 + } 175 + 176 + void wx_setup_psrtype_vf(struct wx *wx) 177 + { 178 + /* PSRTYPE must be initialized */ 179 + u32 psrtype = WX_VXMRQC_PSR_L2HDR | 180 + WX_VXMRQC_PSR_L3HDR | 181 + WX_VXMRQC_PSR_L4HDR | 182 + WX_VXMRQC_PSR_TUNHDR | 183 + WX_VXMRQC_PSR_TUNMAC; 184 + 185 + wr32m(wx, WX_VXMRQC, WX_VXMRQC_PSR_MASK, WX_VXMRQC_PSR(psrtype)); 186 + } 187 + 188 + void wx_setup_vfmrqc_vf(struct wx *wx) 189 + { 190 + u16 rss_i = wx->num_rx_queues; 191 + u32 vfmrqc = 0, vfreta = 0; 192 + u8 i, j; 193 + 194 + /* Fill out hash function seeds */ 195 + netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key)); 196 + for (i = 0; i < WX_RSS_KEY_SIZE / 4; i++) 197 + wr32(wx, WX_VXRSSRK(i), wx->rss_key[i]); 198 + 199 + for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) { 200 + if (j == rss_i) 201 + j = 0; 202 + 203 + wx->rss_indir_tbl[i] = j; 204 + 205 + vfreta |= j << (i & 0x3) * 8; 206 + if ((i & 3) == 3) { 207 + wr32(wx, WX_VXRETA(i >> 2), vfreta); 208 + vfreta = 0; 209 + } 210 + } 211 + 212 + /* Perform hash on these packet types */ 213 + vfmrqc |= WX_VXMRQC_RSS_ALG_IPV4 | 214 + WX_VXMRQC_RSS_ALG_IPV4_TCP | 215 + WX_VXMRQC_RSS_ALG_IPV6 | 216 + WX_VXMRQC_RSS_ALG_IPV6_TCP; 217 + 218 + vfmrqc |= WX_VXMRQC_RSS_EN; 219 + 220 + if (wx->num_rx_queues > 3) 221 + vfmrqc |= WX_VXMRQC_RSS_HASH(2); 222 + else if (wx->num_rx_queues > 1) 223 + vfmrqc |= WX_VXMRQC_RSS_HASH(1); 224 + wr32m(wx, WX_VXMRQC, WX_VXMRQC_RSS_MASK, WX_VXMRQC_RSS(vfmrqc)); 225 + } 226 + 227 + void wx_configure_rx_ring_vf(struct wx *wx, struct wx_ring *ring) 228 + { 229 + u8 reg_idx = ring->reg_idx; 230 + union wx_rx_desc *rx_desc; 231 + u64 rdba = ring->dma; 232 + u32 rxdctl; 233 + 234 + /* disable queue to avoid issues while updating state */ 235 + rxdctl = rd32(wx, WX_VXRXDCTL(reg_idx)); 236 + wx_disable_rx_queue(wx, ring); 237 + 238 + wr32(wx, WX_VXRDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); 239 + wr32(wx, WX_VXRDBAH(reg_idx), rdba >> 32); 240 + 241 + /* enable relaxed ordering */ 242 + pcie_capability_clear_and_set_word(wx->pdev, PCI_EXP_DEVCTL, 243 + 0, PCI_EXP_DEVCTL_RELAX_EN); 244 + 245 + /* reset head and tail pointers */ 246 + wr32(wx, WX_VXRDH(reg_idx), 0); 247 + wr32(wx, WX_VXRDT(reg_idx), 0); 248 + ring->tail = wx->hw_addr + WX_VXRDT(reg_idx); 249 + 250 + /* initialize rx_buffer_info */ 251 + memset(ring->rx_buffer_info, 0, 252 + sizeof(struct wx_rx_buffer) * ring->count); 253 + 254 + /* initialize Rx descriptor 0 */ 255 + rx_desc = WX_RX_DESC(ring, 0); 256 + rx_desc->wb.upper.length = 0; 257 + 258 + /* reset ntu and ntc to place SW in sync with hardwdare */ 259 + ring->next_to_clean = 0; 260 + ring->next_to_use = 0; 261 + ring->next_to_alloc = 0; 262 + 263 + wx_configure_srrctl_vf(wx, ring, reg_idx); 264 + 265 + /* allow any size packet since we can handle overflow */ 266 + rxdctl &= ~WX_VXRXDCTL_BUFLEN_MASK; 267 + rxdctl |= WX_VXRXDCTL_BUFLEN(wx_buf_len(ring->count)); 268 + rxdctl |= WX_VXRXDCTL_ENABLE | WX_VXRXDCTL_VLAN; 269 + 270 + /* enable RSC */ 271 + rxdctl &= ~WX_VXRXDCTL_RSCMAX_MASK; 272 + rxdctl |= WX_VXRXDCTL_RSCMAX(0); 273 + rxdctl |= WX_VXRXDCTL_RSCEN; 274 + 275 + wr32(wx, WX_VXRXDCTL(reg_idx), rxdctl); 276 + 277 + /* pf/vf reuse */ 278 + wx_enable_rx_queue(wx, ring); 279 + wx_alloc_rx_buffers(ring, wx_desc_unused(ring)); 280 + }
+14
drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #ifndef _WX_VF_LIB_H_ 5 + #define _WX_VF_LIB_H_ 6 + 7 + void wx_configure_msix_vf(struct wx *wx); 8 + int wx_write_uc_addr_list_vf(struct net_device *netdev); 9 + void wx_setup_psrtype_vf(struct wx *wx); 10 + void wx_setup_vfmrqc_vf(struct wx *wx); 11 + void wx_configure_tx_vf(struct wx *wx); 12 + void wx_configure_rx_ring_vf(struct wx *wx, struct wx_ring *ring); 13 + 14 + #endif /* _WX_VF_LIB_H_ */
+9
drivers/net/ethernet/wangxun/ngbevf/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. 3 + # 4 + # Makefile for the Wangxun(R) 1GbE virtual functions driver 5 + # 6 + 7 + obj-$(CONFIG_NGBE) += ngbevf.o 8 + 9 + ngbevf-objs := ngbevf_main.o
+261
drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #include <linux/types.h> 5 + #include <linux/module.h> 6 + #include <linux/pci.h> 7 + #include <linux/netdevice.h> 8 + #include <linux/string.h> 9 + #include <linux/etherdevice.h> 10 + 11 + #include "../libwx/wx_type.h" 12 + #include "../libwx/wx_hw.h" 13 + #include "../libwx/wx_lib.h" 14 + #include "../libwx/wx_mbx.h" 15 + #include "../libwx/wx_vf.h" 16 + #include "../libwx/wx_vf_common.h" 17 + #include "ngbevf_type.h" 18 + 19 + /* ngbevf_pci_tbl - PCI Device ID Table 20 + * 21 + * Wildcard entries (PCI_ANY_ID) should come last 22 + * Last entry must be all 0s 23 + * 24 + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 25 + * Class, Class Mask, private data (not used) } 26 + */ 27 + static const struct pci_device_id ngbevf_pci_tbl[] = { 28 + { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL_W), 0}, 29 + { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A2), 0}, 30 + { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A2S), 0}, 31 + { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A4), 0}, 32 + { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A4S), 0}, 33 + { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL2), 0}, 34 + { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL2S), 0}, 35 + { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL4), 0}, 36 + { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL4S), 0}, 37 + { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860NCSI), 0}, 38 + { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A1), 0}, 39 + { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL1), 0}, 40 + /* required last entry */ 41 + { .device = 0 } 42 + }; 43 + 44 + static const struct net_device_ops ngbevf_netdev_ops = { 45 + .ndo_open = wxvf_open, 46 + .ndo_stop = wxvf_close, 47 + .ndo_start_xmit = wx_xmit_frame, 48 + .ndo_validate_addr = eth_validate_addr, 49 + .ndo_set_mac_address = wx_set_mac_vf, 50 + }; 51 + 52 + static void ngbevf_set_num_queues(struct wx *wx) 53 + { 54 + /* Start with base case */ 55 + wx->num_rx_queues = 1; 56 + wx->num_tx_queues = 1; 57 + } 58 + 59 + static int ngbevf_sw_init(struct wx *wx) 60 + { 61 + struct net_device *netdev = wx->netdev; 62 + struct pci_dev *pdev = wx->pdev; 63 + int err; 64 + 65 + /* Initialize pcie info and common capability flags */ 66 + err = wx_sw_init(wx); 67 + if (err < 0) 68 + goto err_wx_sw_init; 69 + 70 + /* Initialize the mailbox */ 71 + err = wx_init_mbx_params_vf(wx); 72 + if (err) 73 + goto err_init_mbx_params; 74 + 75 + /* Initialize the device type */ 76 + wx->mac.type = wx_mac_em; 77 + wx->mac.max_msix_vectors = NGBEVF_MAX_MSIX_VECTORS; 78 + /* lock to protect mailbox accesses */ 79 + spin_lock_init(&wx->mbx.mbx_lock); 80 + 81 + err = wx_reset_hw_vf(wx); 82 + if (err) { 83 + wx_err(wx, "PF still in reset state. Is the PF interface up?\n"); 84 + goto err_reset_hw; 85 + } 86 + wx_init_hw_vf(wx); 87 + wx_negotiate_api_vf(wx); 88 + if (is_zero_ether_addr(wx->mac.addr)) 89 + dev_info(&pdev->dev, 90 + "MAC address not assigned by administrator.\n"); 91 + eth_hw_addr_set(netdev, wx->mac.addr); 92 + 93 + if (!is_valid_ether_addr(netdev->dev_addr)) { 94 + dev_info(&pdev->dev, "Assigning random MAC address\n"); 95 + eth_hw_addr_random(netdev); 96 + ether_addr_copy(wx->mac.addr, netdev->dev_addr); 97 + ether_addr_copy(wx->mac.perm_addr, netdev->dev_addr); 98 + } 99 + 100 + wx->mac.max_tx_queues = NGBEVF_MAX_TX_QUEUES; 101 + wx->mac.max_rx_queues = NGBEVF_MAX_RX_QUEUES; 102 + /* Enable dynamic interrupt throttling rates */ 103 + wx->rx_itr_setting = 1; 104 + wx->tx_itr_setting = 1; 105 + /* set default ring sizes */ 106 + wx->tx_ring_count = NGBEVF_DEFAULT_TXD; 107 + wx->rx_ring_count = NGBEVF_DEFAULT_RXD; 108 + /* set default work limits */ 109 + wx->tx_work_limit = NGBEVF_DEFAULT_TX_WORK; 110 + wx->rx_work_limit = NGBEVF_DEFAULT_RX_WORK; 111 + wx->set_num_queues = ngbevf_set_num_queues; 112 + 113 + return 0; 114 + err_reset_hw: 115 + kfree(wx->vfinfo); 116 + err_init_mbx_params: 117 + kfree(wx->rss_key); 118 + kfree(wx->mac_table); 119 + err_wx_sw_init: 120 + return err; 121 + } 122 + 123 + /** 124 + * ngbevf_probe - Device Initialization Routine 125 + * @pdev: PCI device information struct 126 + * @ent: entry in ngbevf_pci_tbl 127 + * 128 + * Return: return 0 on success, negative on failure 129 + * 130 + * ngbevf_probe initializes an adapter identified by a pci_dev structure. 131 + * The OS initialization, configuring of the adapter private structure, 132 + * and a hardware reset occur. 133 + **/ 134 + static int ngbevf_probe(struct pci_dev *pdev, 135 + const struct pci_device_id __always_unused *ent) 136 + { 137 + struct net_device *netdev; 138 + struct wx *wx = NULL; 139 + int err; 140 + 141 + err = pci_enable_device_mem(pdev); 142 + if (err) 143 + return err; 144 + 145 + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 146 + if (err) { 147 + dev_err(&pdev->dev, 148 + "No usable DMA configuration, aborting\n"); 149 + goto err_pci_disable_dev; 150 + } 151 + 152 + err = pci_request_selected_regions(pdev, 153 + pci_select_bars(pdev, IORESOURCE_MEM), 154 + dev_driver_string(&pdev->dev)); 155 + if (err) { 156 + dev_err(&pdev->dev, 157 + "pci_request_selected_regions failed 0x%x\n", err); 158 + goto err_pci_disable_dev; 159 + } 160 + 161 + pci_set_master(pdev); 162 + 163 + netdev = devm_alloc_etherdev_mqs(&pdev->dev, 164 + sizeof(struct wx), 165 + NGBEVF_MAX_TX_QUEUES, 166 + NGBEVF_MAX_RX_QUEUES); 167 + if (!netdev) { 168 + err = -ENOMEM; 169 + goto err_pci_release_regions; 170 + } 171 + 172 + SET_NETDEV_DEV(netdev, &pdev->dev); 173 + 174 + wx = netdev_priv(netdev); 175 + wx->netdev = netdev; 176 + wx->pdev = pdev; 177 + 178 + wx->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV | 179 + NETIF_MSG_PROBE | NETIF_MSG_LINK); 180 + wx->hw_addr = devm_ioremap(&pdev->dev, 181 + pci_resource_start(pdev, 0), 182 + pci_resource_len(pdev, 0)); 183 + if (!wx->hw_addr) { 184 + err = -EIO; 185 + goto err_pci_release_regions; 186 + } 187 + 188 + netdev->netdev_ops = &ngbevf_netdev_ops; 189 + 190 + /* setup the private structure */ 191 + err = ngbevf_sw_init(wx); 192 + if (err) 193 + goto err_pci_release_regions; 194 + 195 + netdev->features |= NETIF_F_HIGHDMA; 196 + 197 + eth_hw_addr_set(netdev, wx->mac.perm_addr); 198 + ether_addr_copy(netdev->perm_addr, wx->mac.addr); 199 + 200 + wxvf_init_service(wx); 201 + err = wx_init_interrupt_scheme(wx); 202 + if (err) 203 + goto err_free_sw_init; 204 + 205 + err = register_netdev(netdev); 206 + if (err) 207 + goto err_register; 208 + 209 + pci_set_drvdata(pdev, wx); 210 + netif_tx_stop_all_queues(netdev); 211 + 212 + return 0; 213 + 214 + err_register: 215 + wx_clear_interrupt_scheme(wx); 216 + err_free_sw_init: 217 + timer_delete_sync(&wx->service_timer); 218 + cancel_work_sync(&wx->service_task); 219 + kfree(wx->vfinfo); 220 + kfree(wx->rss_key); 221 + kfree(wx->mac_table); 222 + err_pci_release_regions: 223 + pci_release_selected_regions(pdev, 224 + pci_select_bars(pdev, IORESOURCE_MEM)); 225 + err_pci_disable_dev: 226 + pci_disable_device(pdev); 227 + return err; 228 + } 229 + 230 + /** 231 + * ngbevf_remove - Device Removal Routine 232 + * @pdev: PCI device information struct 233 + * 234 + * ngbevf_remove is called by the PCI subsystem to alert the driver 235 + * that it should release a PCI device. The could be caused by a 236 + * Hot-Plug event, or because the driver is going to be removed from 237 + * memory. 238 + **/ 239 + static void ngbevf_remove(struct pci_dev *pdev) 240 + { 241 + wxvf_remove(pdev); 242 + } 243 + 244 + static DEFINE_SIMPLE_DEV_PM_OPS(ngbevf_pm_ops, wxvf_suspend, wxvf_resume); 245 + 246 + static struct pci_driver ngbevf_driver = { 247 + .name = KBUILD_MODNAME, 248 + .id_table = ngbevf_pci_tbl, 249 + .probe = ngbevf_probe, 250 + .remove = ngbevf_remove, 251 + .shutdown = wxvf_shutdown, 252 + /* Power Management Hooks */ 253 + .driver.pm = pm_sleep_ptr(&ngbevf_pm_ops) 254 + }; 255 + 256 + module_pci_driver(ngbevf_driver); 257 + 258 + MODULE_DEVICE_TABLE(pci, ngbevf_pci_tbl); 259 + MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>"); 260 + MODULE_DESCRIPTION("WangXun(R) Gigabit PCI Express Network Driver"); 261 + MODULE_LICENSE("GPL");
+29
drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #ifndef _NGBEVF_TYPE_H_ 5 + #define _NGBEVF_TYPE_H_ 6 + 7 + /* Device IDs */ 8 + #define NGBEVF_DEV_ID_EM_WX1860AL_W 0x0110 9 + #define NGBEVF_DEV_ID_EM_WX1860A2 0x0111 10 + #define NGBEVF_DEV_ID_EM_WX1860A2S 0x0112 11 + #define NGBEVF_DEV_ID_EM_WX1860A4 0x0113 12 + #define NGBEVF_DEV_ID_EM_WX1860A4S 0x0114 13 + #define NGBEVF_DEV_ID_EM_WX1860AL2 0x0115 14 + #define NGBEVF_DEV_ID_EM_WX1860AL2S 0x0116 15 + #define NGBEVF_DEV_ID_EM_WX1860AL4 0x0117 16 + #define NGBEVF_DEV_ID_EM_WX1860AL4S 0x0118 17 + #define NGBEVF_DEV_ID_EM_WX1860NCSI 0x0119 18 + #define NGBEVF_DEV_ID_EM_WX1860A1 0x011a 19 + #define NGBEVF_DEV_ID_EM_WX1860AL1 0x011b 20 + 21 + #define NGBEVF_MAX_MSIX_VECTORS 1 22 + #define NGBEVF_MAX_RX_QUEUES 1 23 + #define NGBEVF_MAX_TX_QUEUES 1 24 + #define NGBEVF_DEFAULT_TXD 128 25 + #define NGBEVF_DEFAULT_RXD 128 26 + #define NGBEVF_DEFAULT_TX_WORK 256 27 + #define NGBEVF_DEFAULT_RX_WORK 256 28 + 29 + #endif /* _NGBEVF_TYPE_H_ */
+9
drivers/net/ethernet/wangxun/txgbevf/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. 3 + # 4 + # Makefile for the Wangxun(R) 10/25/40GbE virtual functions driver 5 + # 6 + 7 + obj-$(CONFIG_TXGBE) += txgbevf.o 8 + 9 + txgbevf-objs := txgbevf_main.o
+314
drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #include <linux/types.h> 5 + #include <linux/module.h> 6 + #include <linux/pci.h> 7 + #include <linux/netdevice.h> 8 + #include <linux/string.h> 9 + #include <linux/etherdevice.h> 10 + 11 + #include "../libwx/wx_type.h" 12 + #include "../libwx/wx_hw.h" 13 + #include "../libwx/wx_lib.h" 14 + #include "../libwx/wx_mbx.h" 15 + #include "../libwx/wx_vf.h" 16 + #include "../libwx/wx_vf_common.h" 17 + #include "txgbevf_type.h" 18 + 19 + /* txgbevf_pci_tbl - PCI Device ID Table 20 + * 21 + * Wildcard entries (PCI_ANY_ID) should come last 22 + * Last entry must be all 0s 23 + * 24 + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 25 + * Class, Class Mask, private data (not used) } 26 + */ 27 + static const struct pci_device_id txgbevf_pci_tbl[] = { 28 + { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_SP1000), 0}, 29 + { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_WX1820), 0}, 30 + { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML500F), 0}, 31 + { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML510F), 0}, 32 + { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML5024), 0}, 33 + { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML5124), 0}, 34 + { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML503F), 0}, 35 + { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML513F), 0}, 36 + /* required last entry */ 37 + { .device = 0 } 38 + }; 39 + 40 + static const struct net_device_ops txgbevf_netdev_ops = { 41 + .ndo_open = wxvf_open, 42 + .ndo_stop = wxvf_close, 43 + .ndo_start_xmit = wx_xmit_frame, 44 + .ndo_validate_addr = eth_validate_addr, 45 + .ndo_set_mac_address = wx_set_mac_vf, 46 + }; 47 + 48 + static void txgbevf_set_num_queues(struct wx *wx) 49 + { 50 + u32 def_q = 0, num_tcs = 0; 51 + u16 rss, queue; 52 + int ret = 0; 53 + 54 + /* Start with base case */ 55 + wx->num_rx_queues = 1; 56 + wx->num_tx_queues = 1; 57 + 58 + spin_lock_bh(&wx->mbx.mbx_lock); 59 + /* fetch queue configuration from the PF */ 60 + ret = wx_get_queues_vf(wx, &num_tcs, &def_q); 61 + spin_unlock_bh(&wx->mbx.mbx_lock); 62 + 63 + if (ret) 64 + return; 65 + 66 + /* we need as many queues as traffic classes */ 67 + if (num_tcs > 1) { 68 + wx->num_rx_queues = num_tcs; 69 + } else { 70 + rss = min_t(u16, num_online_cpus(), TXGBEVF_MAX_RSS_NUM); 71 + queue = min_t(u16, wx->mac.max_rx_queues, wx->mac.max_tx_queues); 72 + rss = min_t(u16, queue, rss); 73 + 74 + if (wx->vfinfo->vf_api >= wx_mbox_api_13) { 75 + wx->num_rx_queues = rss; 76 + wx->num_tx_queues = rss; 77 + } 78 + } 79 + } 80 + 81 + static void txgbevf_init_type_code(struct wx *wx) 82 + { 83 + switch (wx->device_id) { 84 + case TXGBEVF_DEV_ID_SP1000: 85 + case TXGBEVF_DEV_ID_WX1820: 86 + wx->mac.type = wx_mac_sp; 87 + break; 88 + case TXGBEVF_DEV_ID_AML500F: 89 + case TXGBEVF_DEV_ID_AML510F: 90 + case TXGBEVF_DEV_ID_AML5024: 91 + case TXGBEVF_DEV_ID_AML5124: 92 + case TXGBEVF_DEV_ID_AML503F: 93 + case TXGBEVF_DEV_ID_AML513F: 94 + wx->mac.type = wx_mac_aml; 95 + break; 96 + default: 97 + wx->mac.type = wx_mac_unknown; 98 + break; 99 + } 100 + } 101 + 102 + static int txgbevf_sw_init(struct wx *wx) 103 + { 104 + struct net_device *netdev = wx->netdev; 105 + struct pci_dev *pdev = wx->pdev; 106 + int err; 107 + 108 + /* Initialize pcie info and common capability flags */ 109 + err = wx_sw_init(wx); 110 + if (err < 0) 111 + goto err_wx_sw_init; 112 + 113 + /* Initialize the mailbox */ 114 + err = wx_init_mbx_params_vf(wx); 115 + if (err) 116 + goto err_init_mbx_params; 117 + 118 + /* max q_vectors */ 119 + wx->mac.max_msix_vectors = TXGBEVF_MAX_MSIX_VECTORS; 120 + /* Initialize the device type */ 121 + txgbevf_init_type_code(wx); 122 + /* lock to protect mailbox accesses */ 123 + spin_lock_init(&wx->mbx.mbx_lock); 124 + 125 + err = wx_reset_hw_vf(wx); 126 + if (err) { 127 + wx_err(wx, "PF still in reset state. Is the PF interface up?\n"); 128 + goto err_reset_hw; 129 + } 130 + wx_init_hw_vf(wx); 131 + wx_negotiate_api_vf(wx); 132 + if (is_zero_ether_addr(wx->mac.addr)) 133 + dev_info(&pdev->dev, 134 + "MAC address not assigned by administrator.\n"); 135 + eth_hw_addr_set(netdev, wx->mac.addr); 136 + 137 + if (!is_valid_ether_addr(netdev->dev_addr)) { 138 + dev_info(&pdev->dev, "Assigning random MAC address\n"); 139 + eth_hw_addr_random(netdev); 140 + ether_addr_copy(wx->mac.addr, netdev->dev_addr); 141 + ether_addr_copy(wx->mac.perm_addr, netdev->dev_addr); 142 + } 143 + 144 + wx->mac.max_tx_queues = TXGBEVF_MAX_TX_QUEUES; 145 + wx->mac.max_rx_queues = TXGBEVF_MAX_RX_QUEUES; 146 + /* Enable dynamic interrupt throttling rates */ 147 + wx->rx_itr_setting = 1; 148 + wx->tx_itr_setting = 1; 149 + /* set default ring sizes */ 150 + wx->tx_ring_count = TXGBEVF_DEFAULT_TXD; 151 + wx->rx_ring_count = TXGBEVF_DEFAULT_RXD; 152 + /* set default work limits */ 153 + wx->tx_work_limit = TXGBEVF_DEFAULT_TX_WORK; 154 + wx->rx_work_limit = TXGBEVF_DEFAULT_RX_WORK; 155 + 156 + wx->set_num_queues = txgbevf_set_num_queues; 157 + 158 + return 0; 159 + err_reset_hw: 160 + kfree(wx->vfinfo); 161 + err_init_mbx_params: 162 + kfree(wx->rss_key); 163 + kfree(wx->mac_table); 164 + err_wx_sw_init: 165 + return err; 166 + } 167 + 168 + /** 169 + * txgbevf_probe - Device Initialization Routine 170 + * @pdev: PCI device information struct 171 + * @ent: entry in txgbevf_pci_tbl 172 + * 173 + * Return: return 0 on success, negative on failure 174 + * 175 + * txgbevf_probe initializes an adapter identified by a pci_dev structure. 176 + * The OS initialization, configuring of the adapter private structure, 177 + * and a hardware reset occur. 178 + **/ 179 + static int txgbevf_probe(struct pci_dev *pdev, 180 + const struct pci_device_id __always_unused *ent) 181 + { 182 + struct net_device *netdev; 183 + struct wx *wx = NULL; 184 + int err; 185 + 186 + err = pci_enable_device_mem(pdev); 187 + if (err) 188 + return err; 189 + 190 + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 191 + if (err) { 192 + dev_err(&pdev->dev, 193 + "No usable DMA configuration, aborting\n"); 194 + goto err_pci_disable_dev; 195 + } 196 + 197 + err = pci_request_selected_regions(pdev, 198 + pci_select_bars(pdev, IORESOURCE_MEM), 199 + dev_driver_string(&pdev->dev)); 200 + if (err) { 201 + dev_err(&pdev->dev, 202 + "pci_request_selected_regions failed 0x%x\n", err); 203 + goto err_pci_disable_dev; 204 + } 205 + 206 + pci_set_master(pdev); 207 + 208 + netdev = devm_alloc_etherdev_mqs(&pdev->dev, 209 + sizeof(struct wx), 210 + TXGBEVF_MAX_TX_QUEUES, 211 + TXGBEVF_MAX_RX_QUEUES); 212 + if (!netdev) { 213 + err = -ENOMEM; 214 + goto err_pci_release_regions; 215 + } 216 + 217 + SET_NETDEV_DEV(netdev, &pdev->dev); 218 + 219 + wx = netdev_priv(netdev); 220 + wx->netdev = netdev; 221 + wx->pdev = pdev; 222 + 223 + wx->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV | 224 + NETIF_MSG_PROBE | NETIF_MSG_LINK); 225 + wx->hw_addr = devm_ioremap(&pdev->dev, 226 + pci_resource_start(pdev, 0), 227 + pci_resource_len(pdev, 0)); 228 + if (!wx->hw_addr) { 229 + err = -EIO; 230 + goto err_pci_release_regions; 231 + } 232 + 233 + wx->b4_addr = devm_ioremap(&pdev->dev, 234 + pci_resource_start(pdev, 4), 235 + pci_resource_len(pdev, 4)); 236 + if (!wx->b4_addr) { 237 + err = -EIO; 238 + goto err_pci_release_regions; 239 + } 240 + 241 + netdev->netdev_ops = &txgbevf_netdev_ops; 242 + 243 + /* setup the private structure */ 244 + err = txgbevf_sw_init(wx); 245 + if (err) 246 + goto err_pci_release_regions; 247 + 248 + netdev->features |= NETIF_F_HIGHDMA; 249 + 250 + eth_hw_addr_set(netdev, wx->mac.perm_addr); 251 + ether_addr_copy(netdev->perm_addr, wx->mac.addr); 252 + 253 + wxvf_init_service(wx); 254 + err = wx_init_interrupt_scheme(wx); 255 + if (err) 256 + goto err_free_sw_init; 257 + 258 + err = register_netdev(netdev); 259 + if (err) 260 + goto err_register; 261 + 262 + pci_set_drvdata(pdev, wx); 263 + netif_tx_stop_all_queues(netdev); 264 + 265 + return 0; 266 + 267 + err_register: 268 + wx_clear_interrupt_scheme(wx); 269 + err_free_sw_init: 270 + timer_delete_sync(&wx->service_timer); 271 + cancel_work_sync(&wx->service_task); 272 + kfree(wx->vfinfo); 273 + kfree(wx->rss_key); 274 + kfree(wx->mac_table); 275 + err_pci_release_regions: 276 + pci_release_selected_regions(pdev, 277 + pci_select_bars(pdev, IORESOURCE_MEM)); 278 + err_pci_disable_dev: 279 + pci_disable_device(pdev); 280 + return err; 281 + } 282 + 283 + /** 284 + * txgbevf_remove - Device Removal Routine 285 + * @pdev: PCI device information struct 286 + * 287 + * txgbevf_remove is called by the PCI subsystem to alert the driver 288 + * that it should release a PCI device. The could be caused by a 289 + * Hot-Plug event, or because the driver is going to be removed from 290 + * memory. 291 + **/ 292 + static void txgbevf_remove(struct pci_dev *pdev) 293 + { 294 + wxvf_remove(pdev); 295 + } 296 + 297 + static DEFINE_SIMPLE_DEV_PM_OPS(txgbevf_pm_ops, wxvf_suspend, wxvf_resume); 298 + 299 + static struct pci_driver txgbevf_driver = { 300 + .name = KBUILD_MODNAME, 301 + .id_table = txgbevf_pci_tbl, 302 + .probe = txgbevf_probe, 303 + .remove = txgbevf_remove, 304 + .shutdown = wxvf_shutdown, 305 + /* Power Management Hooks */ 306 + .driver.pm = pm_sleep_ptr(&txgbevf_pm_ops) 307 + }; 308 + 309 + module_pci_driver(txgbevf_driver); 310 + 311 + MODULE_DEVICE_TABLE(pci, txgbevf_pci_tbl); 312 + MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>"); 313 + MODULE_DESCRIPTION("WangXun(R) 10/25/40 Gigabit Virtual Function Network Driver"); 314 + MODULE_LICENSE("GPL");
+26
drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #ifndef _TXGBEVF_TYPE_H_ 5 + #define _TXGBEVF_TYPE_H_ 6 + 7 + /* Device IDs */ 8 + #define TXGBEVF_DEV_ID_SP1000 0x1000 9 + #define TXGBEVF_DEV_ID_WX1820 0x2000 10 + #define TXGBEVF_DEV_ID_AML500F 0x500F 11 + #define TXGBEVF_DEV_ID_AML510F 0x510F 12 + #define TXGBEVF_DEV_ID_AML5024 0x5024 13 + #define TXGBEVF_DEV_ID_AML5124 0x5124 14 + #define TXGBEVF_DEV_ID_AML503F 0x503f 15 + #define TXGBEVF_DEV_ID_AML513F 0x513f 16 + 17 + #define TXGBEVF_MAX_MSIX_VECTORS 2 18 + #define TXGBEVF_MAX_RSS_NUM 4 19 + #define TXGBEVF_MAX_RX_QUEUES 4 20 + #define TXGBEVF_MAX_TX_QUEUES 4 21 + #define TXGBEVF_DEFAULT_TXD 128 22 + #define TXGBEVF_DEFAULT_RXD 128 23 + #define TXGBEVF_DEFAULT_TX_WORK 256 24 + #define TXGBEVF_DEFAULT_RX_WORK 256 25 + 26 + #endif /* _TXGBEVF_TYPE_H_ */