Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/linux

Tony Nguyen says:

====================
Add RDMA support for Intel IPU E2000 in idpf

Tatyana Nikolova says:

This idpf patch series is the second part of the staged submission for
introducing RDMA RoCEv2 support for the IPU E2000 line of products,
referred to as GEN3.

To support RDMA GEN3 devices, the idpf driver uses common definitions
of the IIDC interface and implements specific device functionality in
iidc_rdma_idpf.h.

The IPU model can host one or more logical network endpoints called
vPorts per PCI function that are flexibly associated with a physical
port or an internal communication port.

Other features as it pertains to GEN3 devices include:
* MMIO learning
* RDMA capability negotiation
* RDMA vectors discovery between idpf and control plane

These patches are split from the submission "Add RDMA support for Intel
IPU E2000 (GEN3)" [1]. The patches have been tested on a range of hosts
and platforms with a variety of general RDMA applications which include
standalone verbs (rping, perftest, etc.), storage and HPC applications.

Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>

[1] https://lore.kernel.org/all/20240724233917.704-1-tatyana.e.nikolova@intel.com/
This idpf patch series is the second part of the staged submission for
introducing RDMA RoCEv2 support for the IPU E2000 line of products,
referred to as GEN3.

To support RDMA GEN3 devices, the idpf driver uses common definitions
of the IIDC interface and implements specific device functionality in
iidc_rdma_idpf.h.

The IPU model can host one or more logical network endpoints called
vPorts per PCI function that are flexibly associated with a physical
port or an internal communication port.

Other features as it pertains to GEN3 devices include:
* MMIO learning
* RDMA capability negotiation
* RDMA vectors discovery between idpf and control plane

These patches are split from the submission "Add RDMA support for Intel
IPU E2000 (GEN3)" [1]. The patches have been tested on a range of hosts
and platforms with a variety of general RDMA applications which include
standalone verbs (rping, perftest, etc.), storage and HPC applications.

Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>

[1] https://lore.kernel.org/all/20240724233917.704-1-tatyana.e.nikolova@intel.com/

IWL reviews:
v3: https://lore.kernel.org/all/20250708210554.1662-1-tatyana.e.nikolova@intel.com/
v2: https://lore.kernel.org/all/20250612220002.1120-1-tatyana.e.nikolova@intel.com/
v1 (split from previous series):
https://lore.kernel.org/all/20250523170435.668-1-tatyana.e.nikolova@intel.com/

v3: https://lore.kernel.org/all/20250207194931.1569-1-tatyana.e.nikolova@intel.com/
RFC v2: https://lore.kernel.org/all/20240824031924.421-1-tatyana.e.nikolova@intel.com/
RFC: https://lore.kernel.org/all/20240724233917.704-1-tatyana.e.nikolova@intel.com/

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/linux:
idpf: implement get LAN MMIO memory regions
idpf: implement IDC vport aux driver MTU change handler
idpf: implement remaining IDC RDMA core callbacks and handlers
idpf: implement RDMA vport auxiliary dev create, init, and destroy
idpf: implement core RDMA auxiliary dev create, init, and destroy
idpf: use reserved RDMA vectors from control plane
====================

Link: https://patch.msgid.link/20250714181002.2865694-1-anthony.l.nguyen@intel.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

+1107 -74
+1
drivers/net/ethernet/intel/idpf/Makefile
··· 10 10 idpf_controlq_setup.o \ 11 11 idpf_dev.o \ 12 12 idpf_ethtool.o \ 13 + idpf_idc.o \ 13 14 idpf_lib.o \ 14 15 idpf_main.o \ 15 16 idpf_txrx.o \
+111 -5
drivers/net/ethernet/intel/idpf/idpf.h
··· 12 12 #include <net/pkt_sched.h> 13 13 #include <linux/aer.h> 14 14 #include <linux/etherdevice.h> 15 + #include <linux/ioport.h> 15 16 #include <linux/pci.h> 16 17 #include <linux/bitfield.h> 17 18 #include <linux/sctp.h> 18 19 #include <linux/ethtool_netlink.h> 19 20 #include <net/gro.h> 21 + 22 + #include <linux/net/intel/iidc_rdma.h> 23 + #include <linux/net/intel/iidc_rdma_idpf.h> 20 24 21 25 #include "virtchnl2.h" 22 26 #include "idpf_txrx.h" ··· 198 194 * @ptp_reg_init: PTP register initialization 199 195 */ 200 196 struct idpf_reg_ops { 201 - void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq); 197 + void (*ctlq_reg_init)(struct idpf_adapter *adapter, 198 + struct idpf_ctlq_create_info *cq); 202 199 int (*intr_reg_init)(struct idpf_vport *vport); 203 200 void (*mb_intr_reg_init)(struct idpf_adapter *adapter); 204 201 void (*reset_reg_init)(struct idpf_adapter *adapter); ··· 208 203 void (*ptp_reg_init)(const struct idpf_adapter *adapter); 209 204 }; 210 205 206 + #define IDPF_MMIO_REG_NUM_STATIC 2 207 + #define IDPF_PF_MBX_REGION_SZ 4096 208 + #define IDPF_PF_RSTAT_REGION_SZ 2048 209 + #define IDPF_VF_MBX_REGION_SZ 10240 210 + #define IDPF_VF_RSTAT_REGION_SZ 2048 211 + 211 212 /** 212 213 * struct idpf_dev_ops - Device specific operations 213 214 * @reg_ops: Register operations 215 + * @idc_init: IDC initialization 216 + * @static_reg_info: array of mailbox and rstat register info 214 217 */ 215 218 struct idpf_dev_ops { 216 219 struct idpf_reg_ops reg_ops; 220 + 221 + int (*idc_init)(struct idpf_adapter *adapter); 222 + 223 + /* static_reg_info[0] is mailbox region, static_reg_info[1] is rstat */ 224 + struct resource static_reg_info[IDPF_MMIO_REG_NUM_STATIC]; 217 225 }; 218 226 219 227 /** ··· 293 275 * group will yield total number of RX queues. 294 276 * @rxq_model: Splitq queue or single queue queuing model 295 277 * @rx_ptype_lkup: Lookup table for ptypes on RX 278 + * @vdev_info: IDC vport device info pointer 296 279 * @adapter: back pointer to associated adapter 297 280 * @netdev: Associated net_device. Each vport should have one and only one 298 281 * associated netdev. ··· 338 319 struct idpf_rxq_group *rxq_grps; 339 320 u32 rxq_model; 340 321 struct libeth_rx_pt *rx_ptype_lkup; 322 + 323 + struct iidc_rdma_vport_dev_info *vdev_info; 341 324 342 325 struct idpf_adapter *adapter; 343 326 struct net_device *netdev; ··· 528 507 * @flags: See enum idpf_flags 529 508 * @reset_reg: See struct idpf_reset_reg 530 509 * @hw: Device access data 531 - * @num_req_msix: Requested number of MSIX vectors 532 510 * @num_avail_msix: Available number of MSIX vectors 533 511 * @num_msix_entries: Number of entries in MSIX table 534 512 * @msix_entries: MSIX table 513 + * @num_rdma_msix_entries: Available number of MSIX vectors for RDMA 514 + * @rdma_msix_entries: RDMA MSIX table 535 515 * @req_vec_chunks: Requested vector chunk data 536 516 * @mb_vector: Mailbox vector data 537 517 * @vector_stack: Stack to store the msix vector indexes ··· 561 539 * @caps: Negotiated capabilities with device 562 540 * @vcxn_mngr: Virtchnl transaction manager 563 541 * @dev_ops: See idpf_dev_ops 542 + * @cdev_info: IDC core device info pointer 564 543 * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk 565 544 * to VFs but is used to initialize them 566 545 * @crc_enable: Enable CRC insertion offload ··· 584 561 DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS); 585 562 struct idpf_reset_reg reset_reg; 586 563 struct idpf_hw hw; 587 - u16 num_req_msix; 588 564 u16 num_avail_msix; 589 565 u16 num_msix_entries; 590 566 struct msix_entry *msix_entries; 567 + u16 num_rdma_msix_entries; 568 + struct msix_entry *rdma_msix_entries; 591 569 struct virtchnl2_alloc_vectors *req_vec_chunks; 592 570 struct idpf_q_vector mb_vector; 593 571 struct idpf_vector_lifo vector_stack; ··· 621 597 struct idpf_vc_xn_manager *vcxn_mngr; 622 598 623 599 struct idpf_dev_ops dev_ops; 600 + struct iidc_rdma_core_dev_info *cdev_info; 624 601 int num_vfs; 625 602 bool crc_enable; 626 603 bool req_tx_splitq; ··· 654 629 655 630 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, 656 631 enum idpf_cap_field field, u64 flag); 632 + 633 + /** 634 + * idpf_is_rdma_cap_ena - Determine if RDMA is supported 635 + * @adapter: private data struct 636 + * 637 + * Return: true if RDMA capability is enabled, false otherwise 638 + */ 639 + static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter) 640 + { 641 + return idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_RDMA); 642 + } 657 643 658 644 #define IDPF_CAP_RSS (\ 659 645 VIRTCHNL2_CAP_RSS_IPV4_TCP |\ ··· 719 683 } 720 684 721 685 /** 686 + * idpf_get_reserved_rdma_vecs - Get reserved RDMA vectors 687 + * @adapter: private data struct 688 + * 689 + * Return: number of vectors reserved for RDMA 690 + */ 691 + static inline u16 idpf_get_reserved_rdma_vecs(struct idpf_adapter *adapter) 692 + { 693 + return le16_to_cpu(adapter->caps.num_rdma_allocated_vectors); 694 + } 695 + 696 + /** 722 697 * idpf_get_default_vports - Get default number of vports 723 698 * @adapter: private data struct 724 699 */ ··· 768 721 } 769 722 770 723 /** 724 + * idpf_get_mbx_reg_addr - Get BAR0 mailbox register address 725 + * @adapter: private data struct 726 + * @reg_offset: register offset value 727 + * 728 + * Return: BAR0 mailbox register address based on register offset. 729 + */ 730 + static inline void __iomem *idpf_get_mbx_reg_addr(struct idpf_adapter *adapter, 731 + resource_size_t reg_offset) 732 + { 733 + return adapter->hw.mbx.vaddr + reg_offset; 734 + } 735 + 736 + /** 737 + * idpf_get_rstat_reg_addr - Get BAR0 rstat register address 738 + * @adapter: private data struct 739 + * @reg_offset: register offset value 740 + * 741 + * Return: BAR0 rstat register address based on register offset. 742 + */ 743 + static inline void __iomem *idpf_get_rstat_reg_addr(struct idpf_adapter *adapter, 744 + resource_size_t reg_offset) 745 + { 746 + reg_offset -= adapter->dev_ops.static_reg_info[1].start; 747 + 748 + return adapter->hw.rstat.vaddr + reg_offset; 749 + } 750 + 751 + /** 771 752 * idpf_get_reg_addr - Get BAR0 register address 772 753 * @adapter: private data struct 773 754 * @reg_offset: register offset value ··· 805 730 static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter, 806 731 resource_size_t reg_offset) 807 732 { 808 - return (void __iomem *)(adapter->hw.hw_addr + reg_offset); 733 + struct idpf_hw *hw = &adapter->hw; 734 + 735 + for (int i = 0; i < hw->num_lan_regs; i++) { 736 + struct idpf_mmio_reg *region = &hw->lan_regs[i]; 737 + 738 + if (reg_offset >= region->addr_start && 739 + reg_offset < (region->addr_start + region->addr_len)) { 740 + /* Convert the offset so that it is relative to the 741 + * start of the region. Then add the base address of 742 + * the region to get the final address. 743 + */ 744 + reg_offset -= region->addr_start; 745 + 746 + return region->vaddr + reg_offset; 747 + } 748 + } 749 + 750 + /* It's impossible to hit this case with offsets from the CP. But if we 751 + * do for any other reason, the kernel will panic on that register 752 + * access. Might as well do it here to make it clear what's happening. 753 + */ 754 + BUG(); 755 + 756 + return NULL; 809 757 } 810 758 811 759 /** ··· 842 744 if (!adapter->hw.arq) 843 745 return true; 844 746 845 - return !(readl(idpf_get_reg_addr(adapter, adapter->hw.arq->reg.len)) & 747 + return !(readl(idpf_get_mbx_reg_addr(adapter, adapter->hw.arq->reg.len)) & 846 748 adapter->hw.arq->reg.len_mask); 847 749 } 848 750 ··· 951 853 952 854 u8 idpf_vport_get_hsplit(const struct idpf_vport *vport); 953 855 bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val); 856 + int idpf_idc_init(struct idpf_adapter *adapter); 857 + int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter, 858 + enum iidc_function_type ftype); 859 + void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info); 860 + void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info); 861 + void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info); 862 + void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info, 863 + enum iidc_rdma_event_type event_type); 954 864 955 865 #endif /* !_IDPF_H_ */
+7 -7
drivers/net/ethernet/intel/idpf/idpf_controlq.c
··· 36 36 { 37 37 /* Update tail to post pre-allocated buffers for rx queues */ 38 38 if (is_rxq) 39 - wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1)); 39 + idpf_mbx_wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1)); 40 40 41 41 /* For non-Mailbox control queues only TAIL need to be set */ 42 42 if (cq->q_id != -1) 43 43 return; 44 44 45 45 /* Clear Head for both send or receive */ 46 - wr32(hw, cq->reg.head, 0); 46 + idpf_mbx_wr32(hw, cq->reg.head, 0); 47 47 48 48 /* set starting point */ 49 - wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa)); 50 - wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa)); 51 - wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask)); 49 + idpf_mbx_wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa)); 50 + idpf_mbx_wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa)); 51 + idpf_mbx_wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask)); 52 52 } 53 53 54 54 /** ··· 328 328 */ 329 329 dma_wmb(); 330 330 331 - wr32(hw, cq->reg.tail, cq->next_to_use); 331 + idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_use); 332 332 333 333 err_unlock: 334 334 spin_unlock(&cq->cq_lock); ··· 520 520 521 521 dma_wmb(); 522 522 523 - wr32(hw, cq->reg.tail, cq->next_to_post); 523 + idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_post); 524 524 } 525 525 526 526 spin_unlock(&cq->cq_lock);
+16 -2
drivers/net/ethernet/intel/idpf/idpf_controlq.h
··· 94 94 u32 pf_vf_id; /* used by CP when sending to PF */ 95 95 }; 96 96 97 + /* Max number of MMIO regions not including the mailbox and rstat regions in 98 + * the fallback case when the whole bar is mapped. 99 + */ 100 + #define IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING 3 101 + 102 + struct idpf_mmio_reg { 103 + void __iomem *vaddr; 104 + resource_size_t addr_start; 105 + resource_size_t addr_len; 106 + }; 107 + 97 108 /* Define the driver hardware struct to replace other control structs as needed 98 109 * Align to ctlq_hw_info 99 110 */ 100 111 struct idpf_hw { 101 - void __iomem *hw_addr; 102 - resource_size_t hw_addr_len; 112 + struct idpf_mmio_reg mbx; 113 + struct idpf_mmio_reg rstat; 114 + /* Array of remaining LAN BAR regions */ 115 + int num_lan_regs; 116 + struct idpf_mmio_reg *lan_regs; 103 117 104 118 struct idpf_adapter *back; 105 119
+35 -14
drivers/net/ethernet/intel/idpf/idpf_dev.c
··· 10 10 11 11 /** 12 12 * idpf_ctlq_reg_init - initialize default mailbox registers 13 + * @adapter: adapter structure 13 14 * @cq: pointer to the array of create control queues 14 15 */ 15 - static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq) 16 + static void idpf_ctlq_reg_init(struct idpf_adapter *adapter, 17 + struct idpf_ctlq_create_info *cq) 16 18 { 19 + resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start; 17 20 int i; 18 21 19 22 for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) { ··· 25 22 switch (ccq->type) { 26 23 case IDPF_CTLQ_TYPE_MAILBOX_TX: 27 24 /* set head and tail registers in our local struct */ 28 - ccq->reg.head = PF_FW_ATQH; 29 - ccq->reg.tail = PF_FW_ATQT; 30 - ccq->reg.len = PF_FW_ATQLEN; 31 - ccq->reg.bah = PF_FW_ATQBAH; 32 - ccq->reg.bal = PF_FW_ATQBAL; 25 + ccq->reg.head = PF_FW_ATQH - mbx_start; 26 + ccq->reg.tail = PF_FW_ATQT - mbx_start; 27 + ccq->reg.len = PF_FW_ATQLEN - mbx_start; 28 + ccq->reg.bah = PF_FW_ATQBAH - mbx_start; 29 + ccq->reg.bal = PF_FW_ATQBAL - mbx_start; 33 30 ccq->reg.len_mask = PF_FW_ATQLEN_ATQLEN_M; 34 31 ccq->reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M; 35 32 ccq->reg.head_mask = PF_FW_ATQH_ATQH_M; 36 33 break; 37 34 case IDPF_CTLQ_TYPE_MAILBOX_RX: 38 35 /* set head and tail registers in our local struct */ 39 - ccq->reg.head = PF_FW_ARQH; 40 - ccq->reg.tail = PF_FW_ARQT; 41 - ccq->reg.len = PF_FW_ARQLEN; 42 - ccq->reg.bah = PF_FW_ARQBAH; 43 - ccq->reg.bal = PF_FW_ARQBAL; 36 + ccq->reg.head = PF_FW_ARQH - mbx_start; 37 + ccq->reg.tail = PF_FW_ARQT - mbx_start; 38 + ccq->reg.len = PF_FW_ARQLEN - mbx_start; 39 + ccq->reg.bah = PF_FW_ARQBAH - mbx_start; 40 + ccq->reg.bal = PF_FW_ARQBAL - mbx_start; 44 41 ccq->reg.len_mask = PF_FW_ARQLEN_ARQLEN_M; 45 42 ccq->reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M; 46 43 ccq->reg.head_mask = PF_FW_ARQH_ARQH_M; ··· 133 130 */ 134 131 static void idpf_reset_reg_init(struct idpf_adapter *adapter) 135 132 { 136 - adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, PFGEN_RSTAT); 133 + adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, PFGEN_RSTAT); 137 134 adapter->reset_reg.rstat_m = PFGEN_RSTAT_PFR_STATE_M; 138 135 } 139 136 ··· 147 144 { 148 145 u32 reset_reg; 149 146 150 - reset_reg = readl(idpf_get_reg_addr(adapter, PFGEN_CTRL)); 147 + reset_reg = readl(idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL)); 151 148 writel(reset_reg | PFGEN_CTRL_PFSWR, 152 - idpf_get_reg_addr(adapter, PFGEN_CTRL)); 149 + idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL)); 153 150 } 154 151 155 152 /** ··· 162 159 { 163 160 adapter->ptp->cmd.shtime_enable_mask = PF_GLTSYN_CMD_SYNC_SHTIME_EN_M; 164 161 adapter->ptp->cmd.exec_cmd_mask = PF_GLTSYN_CMD_SYNC_EXEC_CMD_M; 162 + } 163 + 164 + /** 165 + * idpf_idc_register - register for IDC callbacks 166 + * @adapter: Driver specific private structure 167 + * 168 + * Return: 0 on success or error code on failure. 169 + */ 170 + static int idpf_idc_register(struct idpf_adapter *adapter) 171 + { 172 + return idpf_idc_init_aux_core_dev(adapter, IIDC_FUNCTION_TYPE_PF); 165 173 } 166 174 167 175 /** ··· 196 182 void idpf_dev_ops_init(struct idpf_adapter *adapter) 197 183 { 198 184 idpf_reg_ops_init(adapter); 185 + 186 + adapter->dev_ops.idc_init = idpf_idc_register; 187 + 188 + resource_set_range(&adapter->dev_ops.static_reg_info[0], 189 + PF_FW_BASE, IDPF_PF_MBX_REGION_SZ); 190 + resource_set_range(&adapter->dev_ops.static_reg_info[1], 191 + PFGEN_RTRIG, IDPF_PF_RSTAT_REGION_SZ); 199 192 }
+503
drivers/net/ethernet/intel/idpf/idpf_idc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright (C) 2025 Intel Corporation */ 3 + 4 + #include <linux/export.h> 5 + 6 + #include "idpf.h" 7 + #include "idpf_virtchnl.h" 8 + 9 + static DEFINE_IDA(idpf_idc_ida); 10 + 11 + #define IDPF_IDC_MAX_ADEV_NAME_LEN 15 12 + 13 + /** 14 + * idpf_idc_init - Called to initialize IDC 15 + * @adapter: driver private data structure 16 + * 17 + * Return: 0 on success or cap not enabled, error code on failure. 18 + */ 19 + int idpf_idc_init(struct idpf_adapter *adapter) 20 + { 21 + int err; 22 + 23 + if (!idpf_is_rdma_cap_ena(adapter) || 24 + !adapter->dev_ops.idc_init) 25 + return 0; 26 + 27 + err = adapter->dev_ops.idc_init(adapter); 28 + if (err) 29 + dev_err(&adapter->pdev->dev, "failed to initialize idc: %d\n", 30 + err); 31 + 32 + return err; 33 + } 34 + 35 + /** 36 + * idpf_vport_adev_release - function to be mapped to aux dev's release op 37 + * @dev: pointer to device to free 38 + */ 39 + static void idpf_vport_adev_release(struct device *dev) 40 + { 41 + struct iidc_rdma_vport_auxiliary_dev *iadev; 42 + 43 + iadev = container_of(dev, struct iidc_rdma_vport_auxiliary_dev, adev.dev); 44 + kfree(iadev); 45 + iadev = NULL; 46 + } 47 + 48 + /** 49 + * idpf_plug_vport_aux_dev - allocate and register a vport Auxiliary device 50 + * @cdev_info: IDC core device info pointer 51 + * @vdev_info: IDC vport device info pointer 52 + * 53 + * Return: 0 on success or error code on failure. 54 + */ 55 + static int idpf_plug_vport_aux_dev(struct iidc_rdma_core_dev_info *cdev_info, 56 + struct iidc_rdma_vport_dev_info *vdev_info) 57 + { 58 + struct iidc_rdma_vport_auxiliary_dev *iadev; 59 + char name[IDPF_IDC_MAX_ADEV_NAME_LEN]; 60 + struct auxiliary_device *adev; 61 + int ret; 62 + 63 + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); 64 + if (!iadev) 65 + return -ENOMEM; 66 + 67 + adev = &iadev->adev; 68 + vdev_info->adev = &iadev->adev; 69 + iadev->vdev_info = vdev_info; 70 + 71 + ret = ida_alloc(&idpf_idc_ida, GFP_KERNEL); 72 + if (ret < 0) { 73 + pr_err("failed to allocate unique device ID for Auxiliary driver\n"); 74 + goto err_ida_alloc; 75 + } 76 + adev->id = ret; 77 + adev->dev.release = idpf_vport_adev_release; 78 + adev->dev.parent = &cdev_info->pdev->dev; 79 + sprintf(name, "%04x.rdma.vdev", cdev_info->pdev->vendor); 80 + adev->name = name; 81 + 82 + ret = auxiliary_device_init(adev); 83 + if (ret) 84 + goto err_aux_dev_init; 85 + 86 + ret = auxiliary_device_add(adev); 87 + if (ret) 88 + goto err_aux_dev_add; 89 + 90 + return 0; 91 + 92 + err_aux_dev_add: 93 + auxiliary_device_uninit(adev); 94 + err_aux_dev_init: 95 + ida_free(&idpf_idc_ida, adev->id); 96 + err_ida_alloc: 97 + vdev_info->adev = NULL; 98 + kfree(iadev); 99 + 100 + return ret; 101 + } 102 + 103 + /** 104 + * idpf_idc_init_aux_vport_dev - initialize vport Auxiliary Device(s) 105 + * @vport: virtual port data struct 106 + * 107 + * Return: 0 on success or error code on failure. 108 + */ 109 + static int idpf_idc_init_aux_vport_dev(struct idpf_vport *vport) 110 + { 111 + struct idpf_adapter *adapter = vport->adapter; 112 + struct iidc_rdma_vport_dev_info *vdev_info; 113 + struct iidc_rdma_core_dev_info *cdev_info; 114 + struct virtchnl2_create_vport *vport_msg; 115 + int err; 116 + 117 + vport_msg = (struct virtchnl2_create_vport *) 118 + adapter->vport_params_recvd[vport->idx]; 119 + 120 + if (!(le16_to_cpu(vport_msg->vport_flags) & VIRTCHNL2_VPORT_ENABLE_RDMA)) 121 + return 0; 122 + 123 + vport->vdev_info = kzalloc(sizeof(*vdev_info), GFP_KERNEL); 124 + if (!vport->vdev_info) 125 + return -ENOMEM; 126 + 127 + cdev_info = vport->adapter->cdev_info; 128 + 129 + vdev_info = vport->vdev_info; 130 + vdev_info->vport_id = vport->vport_id; 131 + vdev_info->netdev = vport->netdev; 132 + vdev_info->core_adev = cdev_info->adev; 133 + 134 + err = idpf_plug_vport_aux_dev(cdev_info, vdev_info); 135 + if (err) { 136 + vport->vdev_info = NULL; 137 + kfree(vdev_info); 138 + return err; 139 + } 140 + 141 + return 0; 142 + } 143 + 144 + /** 145 + * idpf_idc_vdev_mtu_event - Function to handle IDC vport mtu change events 146 + * @vdev_info: IDC vport device info pointer 147 + * @event_type: type of event to pass to handler 148 + */ 149 + void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info, 150 + enum iidc_rdma_event_type event_type) 151 + { 152 + struct iidc_rdma_vport_auxiliary_drv *iadrv; 153 + struct iidc_rdma_event event = { }; 154 + struct auxiliary_device *adev; 155 + 156 + if (!vdev_info) 157 + /* RDMA is not enabled */ 158 + return; 159 + 160 + set_bit(event_type, event.type); 161 + 162 + device_lock(&vdev_info->adev->dev); 163 + adev = vdev_info->adev; 164 + if (!adev || !adev->dev.driver) 165 + goto unlock; 166 + iadrv = container_of(adev->dev.driver, 167 + struct iidc_rdma_vport_auxiliary_drv, 168 + adrv.driver); 169 + if (iadrv->event_handler) 170 + iadrv->event_handler(vdev_info, &event); 171 + unlock: 172 + device_unlock(&vdev_info->adev->dev); 173 + } 174 + 175 + /** 176 + * idpf_core_adev_release - function to be mapped to aux dev's release op 177 + * @dev: pointer to device to free 178 + */ 179 + static void idpf_core_adev_release(struct device *dev) 180 + { 181 + struct iidc_rdma_core_auxiliary_dev *iadev; 182 + 183 + iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev, adev.dev); 184 + kfree(iadev); 185 + iadev = NULL; 186 + } 187 + 188 + /** 189 + * idpf_plug_core_aux_dev - allocate and register an Auxiliary device 190 + * @cdev_info: IDC core device info pointer 191 + * 192 + * Return: 0 on success or error code on failure. 193 + */ 194 + static int idpf_plug_core_aux_dev(struct iidc_rdma_core_dev_info *cdev_info) 195 + { 196 + struct iidc_rdma_core_auxiliary_dev *iadev; 197 + char name[IDPF_IDC_MAX_ADEV_NAME_LEN]; 198 + struct auxiliary_device *adev; 199 + int ret; 200 + 201 + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); 202 + if (!iadev) 203 + return -ENOMEM; 204 + 205 + adev = &iadev->adev; 206 + cdev_info->adev = adev; 207 + iadev->cdev_info = cdev_info; 208 + 209 + ret = ida_alloc(&idpf_idc_ida, GFP_KERNEL); 210 + if (ret < 0) { 211 + pr_err("failed to allocate unique device ID for Auxiliary driver\n"); 212 + goto err_ida_alloc; 213 + } 214 + adev->id = ret; 215 + adev->dev.release = idpf_core_adev_release; 216 + adev->dev.parent = &cdev_info->pdev->dev; 217 + sprintf(name, "%04x.rdma.core", cdev_info->pdev->vendor); 218 + adev->name = name; 219 + 220 + ret = auxiliary_device_init(adev); 221 + if (ret) 222 + goto err_aux_dev_init; 223 + 224 + ret = auxiliary_device_add(adev); 225 + if (ret) 226 + goto err_aux_dev_add; 227 + 228 + return 0; 229 + 230 + err_aux_dev_add: 231 + auxiliary_device_uninit(adev); 232 + err_aux_dev_init: 233 + ida_free(&idpf_idc_ida, adev->id); 234 + err_ida_alloc: 235 + cdev_info->adev = NULL; 236 + kfree(iadev); 237 + 238 + return ret; 239 + } 240 + 241 + /** 242 + * idpf_unplug_aux_dev - unregister and free an Auxiliary device 243 + * @adev: auxiliary device struct 244 + */ 245 + static void idpf_unplug_aux_dev(struct auxiliary_device *adev) 246 + { 247 + if (!adev) 248 + return; 249 + 250 + auxiliary_device_delete(adev); 251 + auxiliary_device_uninit(adev); 252 + 253 + ida_free(&idpf_idc_ida, adev->id); 254 + } 255 + 256 + /** 257 + * idpf_idc_issue_reset_event - Function to handle reset IDC event 258 + * @cdev_info: IDC core device info pointer 259 + */ 260 + void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info) 261 + { 262 + enum iidc_rdma_event_type event_type = IIDC_RDMA_EVENT_WARN_RESET; 263 + struct iidc_rdma_core_auxiliary_drv *iadrv; 264 + struct iidc_rdma_event event = { }; 265 + struct auxiliary_device *adev; 266 + 267 + if (!cdev_info) 268 + /* RDMA is not enabled */ 269 + return; 270 + 271 + set_bit(event_type, event.type); 272 + 273 + device_lock(&cdev_info->adev->dev); 274 + 275 + adev = cdev_info->adev; 276 + if (!adev || !adev->dev.driver) 277 + goto unlock; 278 + 279 + iadrv = container_of(adev->dev.driver, 280 + struct iidc_rdma_core_auxiliary_drv, 281 + adrv.driver); 282 + if (iadrv->event_handler) 283 + iadrv->event_handler(cdev_info, &event); 284 + unlock: 285 + device_unlock(&cdev_info->adev->dev); 286 + } 287 + 288 + /** 289 + * idpf_idc_vport_dev_up - called when CORE is ready for vport aux devs 290 + * @adapter: private data struct 291 + * 292 + * Return: 0 on success or error code on failure. 293 + */ 294 + static int idpf_idc_vport_dev_up(struct idpf_adapter *adapter) 295 + { 296 + int i, err = 0; 297 + 298 + for (i = 0; i < adapter->num_alloc_vports; i++) { 299 + struct idpf_vport *vport = adapter->vports[i]; 300 + 301 + if (!vport) 302 + continue; 303 + 304 + if (!vport->vdev_info) 305 + err = idpf_idc_init_aux_vport_dev(vport); 306 + else 307 + err = idpf_plug_vport_aux_dev(vport->adapter->cdev_info, 308 + vport->vdev_info); 309 + } 310 + 311 + return err; 312 + } 313 + 314 + /** 315 + * idpf_idc_vport_dev_down - called CORE is leaving vport aux dev support state 316 + * @adapter: private data struct 317 + */ 318 + static void idpf_idc_vport_dev_down(struct idpf_adapter *adapter) 319 + { 320 + int i; 321 + 322 + for (i = 0; i < adapter->num_alloc_vports; i++) { 323 + struct idpf_vport *vport = adapter->vports[i]; 324 + 325 + if (!vport) 326 + continue; 327 + 328 + idpf_unplug_aux_dev(vport->vdev_info->adev); 329 + vport->vdev_info->adev = NULL; 330 + } 331 + } 332 + 333 + /** 334 + * idpf_idc_vport_dev_ctrl - Called by an Auxiliary Driver 335 + * @cdev_info: IDC core device info pointer 336 + * @up: RDMA core driver status 337 + * 338 + * This callback function is accessed by an Auxiliary Driver to indicate 339 + * whether core driver is ready to support vport driver load or if vport 340 + * drivers need to be taken down. 341 + * 342 + * Return: 0 on success or error code on failure. 343 + */ 344 + int idpf_idc_vport_dev_ctrl(struct iidc_rdma_core_dev_info *cdev_info, bool up) 345 + { 346 + struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev); 347 + 348 + if (up) 349 + return idpf_idc_vport_dev_up(adapter); 350 + 351 + idpf_idc_vport_dev_down(adapter); 352 + 353 + return 0; 354 + } 355 + EXPORT_SYMBOL_GPL(idpf_idc_vport_dev_ctrl); 356 + 357 + /** 358 + * idpf_idc_request_reset - Called by an Auxiliary Driver 359 + * @cdev_info: IDC core device info pointer 360 + * @reset_type: function, core or other 361 + * 362 + * This callback function is accessed by an Auxiliary Driver to request a reset 363 + * on the Auxiliary Device. 364 + * 365 + * Return: 0 on success or error code on failure. 366 + */ 367 + int idpf_idc_request_reset(struct iidc_rdma_core_dev_info *cdev_info, 368 + enum iidc_rdma_reset_type __always_unused reset_type) 369 + { 370 + struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev); 371 + 372 + if (!idpf_is_reset_in_prog(adapter)) { 373 + set_bit(IDPF_HR_FUNC_RESET, adapter->flags); 374 + queue_delayed_work(adapter->vc_event_wq, 375 + &adapter->vc_event_task, 376 + msecs_to_jiffies(10)); 377 + } 378 + 379 + return 0; 380 + } 381 + EXPORT_SYMBOL_GPL(idpf_idc_request_reset); 382 + 383 + /** 384 + * idpf_idc_init_msix_data - initialize MSIX data for the cdev_info structure 385 + * @adapter: driver private data structure 386 + */ 387 + static void 388 + idpf_idc_init_msix_data(struct idpf_adapter *adapter) 389 + { 390 + struct iidc_rdma_core_dev_info *cdev_info; 391 + struct iidc_rdma_priv_dev_info *privd; 392 + 393 + if (!adapter->rdma_msix_entries) 394 + return; 395 + 396 + cdev_info = adapter->cdev_info; 397 + privd = cdev_info->iidc_priv; 398 + 399 + privd->msix_entries = adapter->rdma_msix_entries; 400 + privd->msix_count = adapter->num_rdma_msix_entries; 401 + } 402 + 403 + /** 404 + * idpf_idc_init_aux_core_dev - initialize Auxiliary Device(s) 405 + * @adapter: driver private data structure 406 + * @ftype: PF or VF 407 + * 408 + * Return: 0 on success or error code on failure. 409 + */ 410 + int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter, 411 + enum iidc_function_type ftype) 412 + { 413 + struct iidc_rdma_core_dev_info *cdev_info; 414 + struct iidc_rdma_priv_dev_info *privd; 415 + int err, i; 416 + 417 + adapter->cdev_info = kzalloc(sizeof(*cdev_info), GFP_KERNEL); 418 + if (!adapter->cdev_info) 419 + return -ENOMEM; 420 + cdev_info = adapter->cdev_info; 421 + 422 + privd = kzalloc(sizeof(*privd), GFP_KERNEL); 423 + if (!privd) { 424 + err = -ENOMEM; 425 + goto err_privd_alloc; 426 + } 427 + 428 + cdev_info->iidc_priv = privd; 429 + cdev_info->pdev = adapter->pdev; 430 + cdev_info->rdma_protocol = IIDC_RDMA_PROTOCOL_ROCEV2; 431 + privd->ftype = ftype; 432 + 433 + privd->mapped_mem_regions = 434 + kcalloc(adapter->hw.num_lan_regs, 435 + sizeof(struct iidc_rdma_lan_mapped_mem_region), 436 + GFP_KERNEL); 437 + if (!privd->mapped_mem_regions) { 438 + err = -ENOMEM; 439 + goto err_plug_aux_dev; 440 + } 441 + 442 + privd->num_memory_regions = cpu_to_le16(adapter->hw.num_lan_regs); 443 + for (i = 0; i < adapter->hw.num_lan_regs; i++) { 444 + privd->mapped_mem_regions[i].region_addr = 445 + adapter->hw.lan_regs[i].vaddr; 446 + privd->mapped_mem_regions[i].size = 447 + cpu_to_le64(adapter->hw.lan_regs[i].addr_len); 448 + privd->mapped_mem_regions[i].start_offset = 449 + cpu_to_le64(adapter->hw.lan_regs[i].addr_start); 450 + } 451 + 452 + idpf_idc_init_msix_data(adapter); 453 + 454 + err = idpf_plug_core_aux_dev(cdev_info); 455 + if (err) 456 + goto err_free_mem_regions; 457 + 458 + return 0; 459 + 460 + err_free_mem_regions: 461 + kfree(privd->mapped_mem_regions); 462 + privd->mapped_mem_regions = NULL; 463 + err_plug_aux_dev: 464 + kfree(privd); 465 + err_privd_alloc: 466 + kfree(cdev_info); 467 + adapter->cdev_info = NULL; 468 + 469 + return err; 470 + } 471 + 472 + /** 473 + * idpf_idc_deinit_core_aux_device - de-initialize Auxiliary Device(s) 474 + * @cdev_info: IDC core device info pointer 475 + */ 476 + void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info) 477 + { 478 + struct iidc_rdma_priv_dev_info *privd; 479 + 480 + if (!cdev_info) 481 + return; 482 + 483 + idpf_unplug_aux_dev(cdev_info->adev); 484 + 485 + privd = cdev_info->iidc_priv; 486 + kfree(privd->mapped_mem_regions); 487 + kfree(privd); 488 + kfree(cdev_info); 489 + } 490 + 491 + /** 492 + * idpf_idc_deinit_vport_aux_device - de-initialize Auxiliary Device(s) 493 + * @vdev_info: IDC vport device info pointer 494 + */ 495 + void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info) 496 + { 497 + if (!vdev_info) 498 + return; 499 + 500 + idpf_unplug_aux_dev(vdev_info->adev); 501 + 502 + kfree(vdev_info); 503 + }
+83 -21
drivers/net/ethernet/intel/idpf/idpf_lib.c
··· 88 88 idpf_deinit_vector_stack(adapter); 89 89 kfree(adapter->msix_entries); 90 90 adapter->msix_entries = NULL; 91 + kfree(adapter->rdma_msix_entries); 92 + adapter->rdma_msix_entries = NULL; 91 93 } 92 94 93 95 /** ··· 301 299 */ 302 300 int idpf_intr_req(struct idpf_adapter *adapter) 303 301 { 302 + u16 num_lan_vecs, min_lan_vecs, num_rdma_vecs = 0, min_rdma_vecs = 0; 304 303 u16 default_vports = idpf_get_default_vports(adapter); 305 304 int num_q_vecs, total_vecs, num_vec_ids; 306 - int min_vectors, v_actual, err; 305 + int min_vectors, actual_vecs, err; 307 306 unsigned int vector; 308 307 u16 *vecids; 308 + int i; 309 309 310 310 total_vecs = idpf_get_reserved_vecs(adapter); 311 + num_lan_vecs = total_vecs; 312 + if (idpf_is_rdma_cap_ena(adapter)) { 313 + num_rdma_vecs = idpf_get_reserved_rdma_vecs(adapter); 314 + min_rdma_vecs = IDPF_MIN_RDMA_VEC; 315 + 316 + if (!num_rdma_vecs) { 317 + /* If idpf_get_reserved_rdma_vecs is 0, vectors are 318 + * pulled from the LAN pool. 319 + */ 320 + num_rdma_vecs = min_rdma_vecs; 321 + } else if (num_rdma_vecs < min_rdma_vecs) { 322 + dev_err(&adapter->pdev->dev, 323 + "Not enough vectors reserved for RDMA (min: %u, current: %u)\n", 324 + min_rdma_vecs, num_rdma_vecs); 325 + return -EINVAL; 326 + } 327 + } 328 + 311 329 num_q_vecs = total_vecs - IDPF_MBX_Q_VEC; 312 330 313 331 err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs); ··· 338 316 return -EAGAIN; 339 317 } 340 318 341 - min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports; 342 - v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors, 343 - total_vecs, PCI_IRQ_MSIX); 344 - if (v_actual < min_vectors) { 345 - dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n", 346 - v_actual); 347 - err = -EAGAIN; 319 + min_lan_vecs = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports; 320 + min_vectors = min_lan_vecs + min_rdma_vecs; 321 + actual_vecs = pci_alloc_irq_vectors(adapter->pdev, min_vectors, 322 + total_vecs, PCI_IRQ_MSIX); 323 + if (actual_vecs < 0) { 324 + dev_err(&adapter->pdev->dev, "Failed to allocate minimum MSIX vectors required: %d\n", 325 + min_vectors); 326 + err = actual_vecs; 348 327 goto send_dealloc_vecs; 349 328 } 350 329 351 - adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry), 352 - GFP_KERNEL); 330 + if (idpf_is_rdma_cap_ena(adapter)) { 331 + if (actual_vecs < total_vecs) { 332 + dev_warn(&adapter->pdev->dev, 333 + "Warning: %d vectors requested, only %d available. Defaulting to minimum (%d) for RDMA and remaining for LAN.\n", 334 + total_vecs, actual_vecs, IDPF_MIN_RDMA_VEC); 335 + num_rdma_vecs = IDPF_MIN_RDMA_VEC; 336 + } 353 337 338 + adapter->rdma_msix_entries = kcalloc(num_rdma_vecs, 339 + sizeof(struct msix_entry), 340 + GFP_KERNEL); 341 + if (!adapter->rdma_msix_entries) { 342 + err = -ENOMEM; 343 + goto free_irq; 344 + } 345 + } 346 + 347 + num_lan_vecs = actual_vecs - num_rdma_vecs; 348 + adapter->msix_entries = kcalloc(num_lan_vecs, sizeof(struct msix_entry), 349 + GFP_KERNEL); 354 350 if (!adapter->msix_entries) { 355 351 err = -ENOMEM; 356 - goto free_irq; 352 + goto free_rdma_msix; 357 353 } 358 354 359 355 adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id); 360 356 361 - vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL); 357 + vecids = kcalloc(actual_vecs, sizeof(u16), GFP_KERNEL); 362 358 if (!vecids) { 363 359 err = -ENOMEM; 364 360 goto free_msix; 365 361 } 366 362 367 - num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs, 363 + num_vec_ids = idpf_get_vec_ids(adapter, vecids, actual_vecs, 368 364 &adapter->req_vec_chunks->vchunks); 369 - if (num_vec_ids < v_actual) { 365 + if (num_vec_ids < actual_vecs) { 370 366 err = -EINVAL; 371 367 goto free_vecids; 372 368 } 373 369 374 - for (vector = 0; vector < v_actual; vector++) { 370 + for (vector = 0; vector < num_lan_vecs; vector++) { 375 371 adapter->msix_entries[vector].entry = vecids[vector]; 376 372 adapter->msix_entries[vector].vector = 377 373 pci_irq_vector(adapter->pdev, vector); 378 374 } 375 + for (i = 0; i < num_rdma_vecs; vector++, i++) { 376 + adapter->rdma_msix_entries[i].entry = vecids[vector]; 377 + adapter->rdma_msix_entries[i].vector = 378 + pci_irq_vector(adapter->pdev, vector); 379 + } 379 380 380 - adapter->num_req_msix = total_vecs; 381 - adapter->num_msix_entries = v_actual; 382 381 /* 'num_avail_msix' is used to distribute excess vectors to the vports 383 382 * after considering the minimum vectors required per each default 384 383 * vport 385 384 */ 386 - adapter->num_avail_msix = v_actual - min_vectors; 385 + adapter->num_avail_msix = num_lan_vecs - min_lan_vecs; 386 + adapter->num_msix_entries = num_lan_vecs; 387 + if (idpf_is_rdma_cap_ena(adapter)) 388 + adapter->num_rdma_msix_entries = num_rdma_vecs; 387 389 388 390 /* Fill MSIX vector lifo stack with vector indexes */ 389 391 err = idpf_init_vector_stack(adapter); ··· 429 383 free_msix: 430 384 kfree(adapter->msix_entries); 431 385 adapter->msix_entries = NULL; 386 + free_rdma_msix: 387 + kfree(adapter->rdma_msix_entries); 388 + adapter->rdma_msix_entries = NULL; 432 389 free_irq: 433 390 pci_free_irq_vectors(adapter->pdev); 434 391 send_dealloc_vecs: ··· 1020 971 { 1021 972 struct idpf_adapter *adapter = vport->adapter; 1022 973 unsigned int i = vport->idx; 974 + 975 + idpf_idc_deinit_vport_aux_device(vport->vdev_info); 1023 976 1024 977 idpf_deinit_mac_addr(vport); 1025 978 idpf_vport_stop(vport); ··· 1789 1738 } else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) { 1790 1739 bool is_reset = idpf_is_reset_detected(adapter); 1791 1740 1741 + idpf_idc_issue_reset_event(adapter->cdev_info); 1742 + 1792 1743 idpf_set_vport_state(adapter); 1793 1744 idpf_vc_core_deinit(adapter); 1794 1745 if (!is_reset) ··· 1837 1784 1838 1785 unlock_mutex: 1839 1786 mutex_unlock(&adapter->vport_ctrl_lock); 1787 + 1788 + /* Wait until all vports are created to init RDMA CORE AUX */ 1789 + if (!err) 1790 + err = idpf_idc_init(adapter); 1840 1791 1841 1792 return err; 1842 1793 } ··· 1925 1868 idpf_vport_calc_num_q_desc(new_vport); 1926 1869 break; 1927 1870 case IDPF_SR_MTU_CHANGE: 1871 + idpf_idc_vdev_mtu_event(vport->vdev_info, 1872 + IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE); 1873 + break; 1928 1874 case IDPF_SR_RSC_CHANGE: 1929 1875 break; 1930 1876 default: ··· 1972 1912 if (current_state == __IDPF_VPORT_UP) 1973 1913 err = idpf_vport_open(vport); 1974 1914 1975 - kfree(new_vport); 1976 - 1977 - return err; 1915 + goto free_vport; 1978 1916 1979 1917 err_reset: 1980 1918 idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq, ··· 1984 1926 1985 1927 free_vport: 1986 1928 kfree(new_vport); 1929 + 1930 + if (reset_cause == IDPF_SR_MTU_CHANGE) 1931 + idpf_idc_vdev_mtu_event(vport->vdev_info, 1932 + IIDC_RDMA_EVENT_AFTER_MTU_CHANGE); 1987 1933 1988 1934 return err; 1989 1935 }
+27 -5
drivers/net/ethernet/intel/idpf/idpf_main.c
··· 106 106 */ 107 107 static int idpf_cfg_hw(struct idpf_adapter *adapter) 108 108 { 109 + resource_size_t res_start, mbx_start, rstat_start; 109 110 struct pci_dev *pdev = adapter->pdev; 110 111 struct idpf_hw *hw = &adapter->hw; 112 + struct device *dev = &pdev->dev; 113 + long len; 111 114 112 - hw->hw_addr = pcim_iomap_table(pdev)[0]; 113 - if (!hw->hw_addr) { 114 - pci_err(pdev, "failed to allocate PCI iomap table\n"); 115 + res_start = pci_resource_start(pdev, 0); 116 + 117 + /* Map mailbox space for virtchnl communication */ 118 + mbx_start = res_start + adapter->dev_ops.static_reg_info[0].start; 119 + len = resource_size(&adapter->dev_ops.static_reg_info[0]); 120 + hw->mbx.vaddr = devm_ioremap(dev, mbx_start, len); 121 + if (!hw->mbx.vaddr) { 122 + pci_err(pdev, "failed to allocate BAR0 mbx region\n"); 115 123 116 124 return -ENOMEM; 117 125 } 126 + hw->mbx.addr_start = adapter->dev_ops.static_reg_info[0].start; 127 + hw->mbx.addr_len = len; 128 + 129 + /* Map rstat space for resets */ 130 + rstat_start = res_start + adapter->dev_ops.static_reg_info[1].start; 131 + len = resource_size(&adapter->dev_ops.static_reg_info[1]); 132 + hw->rstat.vaddr = devm_ioremap(dev, rstat_start, len); 133 + if (!hw->rstat.vaddr) { 134 + pci_err(pdev, "failed to allocate BAR0 rstat region\n"); 135 + 136 + return -ENOMEM; 137 + } 138 + hw->rstat.addr_start = adapter->dev_ops.static_reg_info[1].start; 139 + hw->rstat.addr_len = len; 118 140 119 141 hw->back = adapter; 120 142 ··· 183 161 if (err) 184 162 goto err_free; 185 163 186 - err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); 164 + err = pcim_request_region(pdev, 0, pci_name(pdev)); 187 165 if (err) { 188 - pci_err(pdev, "pcim_iomap_regions failed %pe\n", ERR_PTR(err)); 166 + pci_err(pdev, "pcim_request_region failed %pe\n", ERR_PTR(err)); 189 167 190 168 goto err_free; 191 169 }
+4 -4
drivers/net/ethernet/intel/idpf/idpf_mem.h
··· 12 12 size_t size; 13 13 }; 14 14 15 - #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) 16 - #define rd32(a, reg) readl((a)->hw_addr + (reg)) 17 - #define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) 18 - #define rd64(a, reg) readq((a)->hw_addr + (reg)) 15 + #define idpf_mbx_wr32(a, reg, value) writel((value), ((a)->mbx.vaddr + (reg))) 16 + #define idpf_mbx_rd32(a, reg) readl((a)->mbx.vaddr + (reg)) 17 + #define idpf_mbx_wr64(a, reg, value) writeq((value), ((a)->mbx.vaddr + (reg))) 18 + #define idpf_mbx_rd64(a, reg) readq((a)->mbx.vaddr + (reg)) 19 19 20 20 #endif /* _IDPF_MEM_H_ */
+1
drivers/net/ethernet/intel/idpf/idpf_txrx.h
··· 57 57 /* Default vector sharing */ 58 58 #define IDPF_MBX_Q_VEC 1 59 59 #define IDPF_MIN_Q_VEC 1 60 + #define IDPF_MIN_RDMA_VEC 2 60 61 61 62 #define IDPF_DFLT_TX_Q_DESC_COUNT 512 62 63 #define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512
+33 -12
drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
··· 9 9 10 10 /** 11 11 * idpf_vf_ctlq_reg_init - initialize default mailbox registers 12 + * @adapter: adapter structure 12 13 * @cq: pointer to the array of create control queues 13 14 */ 14 - static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq) 15 + static void idpf_vf_ctlq_reg_init(struct idpf_adapter *adapter, 16 + struct idpf_ctlq_create_info *cq) 15 17 { 18 + resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start; 16 19 int i; 17 20 18 21 for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) { ··· 24 21 switch (ccq->type) { 25 22 case IDPF_CTLQ_TYPE_MAILBOX_TX: 26 23 /* set head and tail registers in our local struct */ 27 - ccq->reg.head = VF_ATQH; 28 - ccq->reg.tail = VF_ATQT; 29 - ccq->reg.len = VF_ATQLEN; 30 - ccq->reg.bah = VF_ATQBAH; 31 - ccq->reg.bal = VF_ATQBAL; 24 + ccq->reg.head = VF_ATQH - mbx_start; 25 + ccq->reg.tail = VF_ATQT - mbx_start; 26 + ccq->reg.len = VF_ATQLEN - mbx_start; 27 + ccq->reg.bah = VF_ATQBAH - mbx_start; 28 + ccq->reg.bal = VF_ATQBAL - mbx_start; 32 29 ccq->reg.len_mask = VF_ATQLEN_ATQLEN_M; 33 30 ccq->reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M; 34 31 ccq->reg.head_mask = VF_ATQH_ATQH_M; 35 32 break; 36 33 case IDPF_CTLQ_TYPE_MAILBOX_RX: 37 34 /* set head and tail registers in our local struct */ 38 - ccq->reg.head = VF_ARQH; 39 - ccq->reg.tail = VF_ARQT; 40 - ccq->reg.len = VF_ARQLEN; 41 - ccq->reg.bah = VF_ARQBAH; 42 - ccq->reg.bal = VF_ARQBAL; 35 + ccq->reg.head = VF_ARQH - mbx_start; 36 + ccq->reg.tail = VF_ARQT - mbx_start; 37 + ccq->reg.len = VF_ARQLEN - mbx_start; 38 + ccq->reg.bah = VF_ARQBAH - mbx_start; 39 + ccq->reg.bal = VF_ARQBAL - mbx_start; 43 40 ccq->reg.len_mask = VF_ARQLEN_ARQLEN_M; 44 41 ccq->reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M; 45 42 ccq->reg.head_mask = VF_ARQH_ARQH_M; ··· 132 129 */ 133 130 static void idpf_vf_reset_reg_init(struct idpf_adapter *adapter) 134 131 { 135 - adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, VFGEN_RSTAT); 132 + adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, VFGEN_RSTAT); 136 133 adapter->reset_reg.rstat_m = VFGEN_RSTAT_VFR_STATE_M; 137 134 } 138 135 ··· 148 145 if (trig_cause == IDPF_HR_FUNC_RESET && 149 146 !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) 150 147 idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL, 0); 148 + } 149 + 150 + /** 151 + * idpf_idc_vf_register - register for IDC callbacks 152 + * @adapter: Driver specific private structure 153 + * 154 + * Return: 0 on success or error code on failure. 155 + */ 156 + static int idpf_idc_vf_register(struct idpf_adapter *adapter) 157 + { 158 + return idpf_idc_init_aux_core_dev(adapter, IIDC_FUNCTION_TYPE_VF); 151 159 } 152 160 153 161 /** ··· 181 167 void idpf_vf_dev_ops_init(struct idpf_adapter *adapter) 182 168 { 183 169 idpf_vf_reg_ops_init(adapter); 170 + 171 + adapter->dev_ops.idc_init = idpf_idc_vf_register; 172 + 173 + resource_set_range(&adapter->dev_ops.static_reg_info[0], 174 + VF_BASE, IDPF_VF_MBX_REGION_SZ); 175 + resource_set_range(&adapter->dev_ops.static_reg_info[1], 176 + VFGEN_RSTAT, IDPF_VF_RSTAT_REGION_SZ); 184 177 }
+190 -1
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* Copyright (C) 2023 Intel Corporation */ 3 3 4 + #include <linux/export.h> 4 5 #include <net/libeth/rx.h> 5 6 6 7 #include "idpf.h" ··· 869 868 870 869 caps.other_caps = 871 870 cpu_to_le64(VIRTCHNL2_CAP_SRIOV | 871 + VIRTCHNL2_CAP_RDMA | 872 + VIRTCHNL2_CAP_LAN_MEMORY_REGIONS | 872 873 VIRTCHNL2_CAP_MACFILTER | 873 874 VIRTCHNL2_CAP_SPLITQ_QSCHED | 874 875 VIRTCHNL2_CAP_PROMISC | ··· 889 886 return reply_sz; 890 887 if (reply_sz < sizeof(adapter->caps)) 891 888 return -EIO; 889 + 890 + return 0; 891 + } 892 + 893 + /** 894 + * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg 895 + * @adapter: Driver specific private struct 896 + * 897 + * Return: 0 on success or error code on failure. 898 + */ 899 + static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter) 900 + { 901 + struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree); 902 + struct idpf_vc_xn_params xn_params = { 903 + .vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS, 904 + .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN, 905 + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, 906 + }; 907 + int num_regions, size; 908 + struct idpf_hw *hw; 909 + ssize_t reply_sz; 910 + int err = 0; 911 + 912 + rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 913 + if (!rcvd_regions) 914 + return -ENOMEM; 915 + 916 + xn_params.recv_buf.iov_base = rcvd_regions; 917 + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 918 + if (reply_sz < 0) 919 + return reply_sz; 920 + 921 + num_regions = le16_to_cpu(rcvd_regions->num_memory_regions); 922 + size = struct_size(rcvd_regions, mem_reg, num_regions); 923 + if (reply_sz < size) 924 + return -EIO; 925 + 926 + if (size > IDPF_CTLQ_MAX_BUF_LEN) 927 + return -EINVAL; 928 + 929 + hw = &adapter->hw; 930 + hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL); 931 + if (!hw->lan_regs) 932 + return -ENOMEM; 933 + 934 + for (int i = 0; i < num_regions; i++) { 935 + hw->lan_regs[i].addr_len = 936 + le64_to_cpu(rcvd_regions->mem_reg[i].size); 937 + hw->lan_regs[i].addr_start = 938 + le64_to_cpu(rcvd_regions->mem_reg[i].start_offset); 939 + } 940 + hw->num_lan_regs = num_regions; 941 + 942 + return err; 943 + } 944 + 945 + /** 946 + * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat 947 + * @adapter: Driver specific private structure 948 + * 949 + * Called when idpf_send_get_lan_memory_regions is not supported. This will 950 + * calculate the offsets and sizes for the regions before, in between, and 951 + * after the mailbox and rstat MMIO mappings. 952 + * 953 + * Return: 0 on success or error code on failure. 954 + */ 955 + static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter) 956 + { 957 + struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1]; 958 + struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0]; 959 + struct idpf_hw *hw = &adapter->hw; 960 + 961 + hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING; 962 + hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs), 963 + GFP_KERNEL); 964 + if (!hw->lan_regs) 965 + return -ENOMEM; 966 + 967 + /* Region preceding mailbox */ 968 + hw->lan_regs[0].addr_start = 0; 969 + hw->lan_regs[0].addr_len = mbx_reg->start; 970 + /* Region between mailbox and rstat */ 971 + hw->lan_regs[1].addr_start = mbx_reg->end + 1; 972 + hw->lan_regs[1].addr_len = rstat_reg->start - 973 + hw->lan_regs[1].addr_start; 974 + /* Region after rstat */ 975 + hw->lan_regs[2].addr_start = rstat_reg->end + 1; 976 + hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) - 977 + hw->lan_regs[2].addr_start; 978 + 979 + return 0; 980 + } 981 + 982 + /** 983 + * idpf_map_lan_mmio_regs - map remaining LAN BAR regions 984 + * @adapter: Driver specific private structure 985 + * 986 + * Return: 0 on success or error code on failure. 987 + */ 988 + static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter) 989 + { 990 + struct pci_dev *pdev = adapter->pdev; 991 + struct idpf_hw *hw = &adapter->hw; 992 + resource_size_t res_start; 993 + 994 + res_start = pci_resource_start(pdev, 0); 995 + 996 + for (int i = 0; i < hw->num_lan_regs; i++) { 997 + resource_size_t start; 998 + long len; 999 + 1000 + len = hw->lan_regs[i].addr_len; 1001 + if (!len) 1002 + continue; 1003 + start = hw->lan_regs[i].addr_start + res_start; 1004 + 1005 + hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len); 1006 + if (!hw->lan_regs[i].vaddr) { 1007 + pci_err(pdev, "failed to allocate BAR0 region\n"); 1008 + return -ENOMEM; 1009 + } 1010 + } 892 1011 893 1012 return 0; 894 1013 } ··· 2926 2801 struct idpf_hw *hw = &adapter->hw; 2927 2802 int err; 2928 2803 2929 - adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info); 2804 + adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info); 2930 2805 2931 2806 err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info); 2932 2807 if (err) ··· 3086 2961 msleep(task_delay); 3087 2962 } 3088 2963 2964 + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) { 2965 + err = idpf_send_get_lan_memory_regions(adapter); 2966 + if (err) { 2967 + dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n", 2968 + err); 2969 + return -EINVAL; 2970 + } 2971 + } else { 2972 + /* Fallback to mapping the remaining regions of the entire BAR */ 2973 + err = idpf_calc_remaining_mmio_regs(adapter); 2974 + if (err) { 2975 + dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n", 2976 + err); 2977 + return -ENOMEM; 2978 + } 2979 + } 2980 + 2981 + err = idpf_map_lan_mmio_regs(adapter); 2982 + if (err) { 2983 + dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n", 2984 + err); 2985 + return -ENOMEM; 2986 + } 2987 + 3089 2988 pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); 3090 2989 num_max_vports = idpf_get_max_vports(adapter); 3091 2990 adapter->max_vports = num_max_vports; ··· 3219 3070 3220 3071 idpf_ptp_release(adapter); 3221 3072 idpf_deinit_task(adapter); 3073 + idpf_idc_deinit_core_aux_device(adapter->cdev_info); 3222 3074 idpf_intr_rel(adapter); 3223 3075 3224 3076 if (remove_in_prog) ··· 3878 3728 3879 3729 return reply_sz < 0 ? reply_sz : 0; 3880 3730 } 3731 + 3732 + /** 3733 + * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers 3734 + * @cdev_info: IDC core device info pointer 3735 + * @send_msg: message to send 3736 + * @msg_size: size of message to send 3737 + * @recv_msg: message to populate on reception of response 3738 + * @recv_len: length of message copied into recv_msg or 0 on error 3739 + * 3740 + * Return: 0 on success or error code on failure. 3741 + */ 3742 + int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info, 3743 + u8 *send_msg, u16 msg_size, 3744 + u8 *recv_msg, u16 *recv_len) 3745 + { 3746 + struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev); 3747 + struct idpf_vc_xn_params xn_params = { }; 3748 + ssize_t reply_sz; 3749 + u16 recv_size; 3750 + 3751 + if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN) 3752 + return -EINVAL; 3753 + 3754 + recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN); 3755 + *recv_len = 0; 3756 + xn_params.vc_op = VIRTCHNL2_OP_RDMA; 3757 + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 3758 + xn_params.send_buf.iov_base = send_msg; 3759 + xn_params.send_buf.iov_len = msg_size; 3760 + xn_params.recv_buf.iov_base = recv_msg; 3761 + xn_params.recv_buf.iov_len = recv_size; 3762 + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 3763 + if (reply_sz < 0) 3764 + return reply_sz; 3765 + *recv_len = reply_sz; 3766 + 3767 + return 0; 3768 + } 3769 + EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync);
+3
drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
··· 151 151 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get); 152 152 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get); 153 153 void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr); 154 + int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info, 155 + u8 *send_msg, u16 msg_size, 156 + u8 *recv_msg, u16 *recv_len); 154 157 155 158 #endif /* _IDPF_VIRTCHNL_H_ */
+38 -3
drivers/net/ethernet/intel/idpf/virtchnl2.h
··· 62 62 VIRTCHNL2_OP_GET_PTYPE_INFO = 526, 63 63 /* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and 64 64 * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW. 65 - * Opcodes 529, 530, 531, 532 and 533 are reserved. 66 65 */ 66 + VIRTCHNL2_OP_RDMA = 529, 67 + /* Opcodes 530 through 533 are reserved. */ 67 68 VIRTCHNL2_OP_LOOPBACK = 534, 68 69 VIRTCHNL2_OP_ADD_MAC_ADDR = 535, 69 70 VIRTCHNL2_OP_DEL_MAC_ADDR = 536, ··· 79 78 VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE = 546, 80 79 VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME = 547, 81 80 VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS = 548, 81 + VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS = 549, 82 82 }; 83 83 84 84 /** ··· 213 211 VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17), 214 212 VIRTCHNL2_CAP_PTYPE = BIT_ULL(18), 215 213 VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19), 216 - /* Other capability 20 is reserved */ 214 + /* Other capability 20-21 is reserved */ 215 + VIRTCHNL2_CAP_LAN_MEMORY_REGIONS = BIT_ULL(22), 217 216 218 217 /* this must be the last capability */ 219 218 VIRTCHNL2_CAP_OEM = BIT_ULL(63), ··· 486 483 * segment offload. 487 484 * @max_hdr_buf_per_lso: Max number of header buffers that can be used for 488 485 * an LSO. 486 + * @num_rdma_allocated_vectors: Maximum number of allocated RDMA vectors for 487 + * the device. 489 488 * @pad1: Padding for future extensions. 490 489 * 491 490 * Dataplane driver sends this message to CP to negotiate capabilities and ··· 535 530 __le32 device_type; 536 531 u8 min_sso_packet_len; 537 532 u8 max_hdr_buf_per_lso; 538 - u8 pad1[10]; 533 + __le16 num_rdma_allocated_vectors; 534 + u8 pad1[8]; 539 535 }; 540 536 VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities); 541 537 ··· 578 572 /** 579 573 * enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities. 580 574 * @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports 575 + * @VIRTCHNL2_VPORT_ENABLE_RDMA: RDMA is enabled for this vport 581 576 */ 582 577 enum virtchnl2_vport_flags { 583 578 VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0), 579 + /* VIRTCHNL2_VPORT_* bits [1:3] rsvd */ 580 + VIRTCHNL2_VPORT_ENABLE_RDMA = BIT(4), 584 581 }; 585 582 586 583 /** ··· 1588 1579 __le64 delta; 1589 1580 }; 1590 1581 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time); 1582 + 1583 + /** 1584 + * struct virtchnl2_mem_region - MMIO memory region 1585 + * @start_offset: starting offset of the MMIO memory region 1586 + * @size: size of the MMIO memory region 1587 + */ 1588 + struct virtchnl2_mem_region { 1589 + __le64 start_offset; 1590 + __le64 size; 1591 + }; 1592 + VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_mem_region); 1593 + 1594 + /** 1595 + * struct virtchnl2_get_lan_memory_regions - List of LAN MMIO memory regions 1596 + * @num_memory_regions: number of memory regions 1597 + * @pad: Padding 1598 + * @mem_reg: List with memory region info 1599 + * 1600 + * PF/VF sends this message to learn what LAN MMIO memory regions it should map. 1601 + */ 1602 + struct virtchnl2_get_lan_memory_regions { 1603 + __le16 num_memory_regions; 1604 + u8 pad[6]; 1605 + struct virtchnl2_mem_region mem_reg[]; 1606 + }; 1607 + VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_lan_memory_regions); 1591 1608 1592 1609 #endif /* _VIRTCHNL_2_H_ */
+55
include/linux/net/intel/iidc_rdma_idpf.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2025 Intel Corporation. */ 3 + 4 + #ifndef _IIDC_RDMA_IDPF_H_ 5 + #define _IIDC_RDMA_IDPF_H_ 6 + 7 + #include <linux/auxiliary_bus.h> 8 + 9 + /* struct to be populated by core LAN PCI driver */ 10 + struct iidc_rdma_vport_dev_info { 11 + struct auxiliary_device *adev; 12 + struct auxiliary_device *core_adev; 13 + struct net_device *netdev; 14 + u16 vport_id; 15 + }; 16 + 17 + struct iidc_rdma_vport_auxiliary_dev { 18 + struct auxiliary_device adev; 19 + struct iidc_rdma_vport_dev_info *vdev_info; 20 + }; 21 + 22 + struct iidc_rdma_vport_auxiliary_drv { 23 + struct auxiliary_driver adrv; 24 + void (*event_handler)(struct iidc_rdma_vport_dev_info *vdev, 25 + struct iidc_rdma_event *event); 26 + }; 27 + 28 + /* struct to be populated by core LAN PCI driver */ 29 + enum iidc_function_type { 30 + IIDC_FUNCTION_TYPE_PF, 31 + IIDC_FUNCTION_TYPE_VF, 32 + }; 33 + 34 + struct iidc_rdma_lan_mapped_mem_region { 35 + u8 __iomem *region_addr; 36 + __le64 size; 37 + __le64 start_offset; 38 + }; 39 + 40 + struct iidc_rdma_priv_dev_info { 41 + struct msix_entry *msix_entries; 42 + u16 msix_count; /* How many vectors are reserved for this device */ 43 + enum iidc_function_type ftype; 44 + __le16 num_memory_regions; 45 + struct iidc_rdma_lan_mapped_mem_region *mapped_mem_regions; 46 + }; 47 + 48 + int idpf_idc_vport_dev_ctrl(struct iidc_rdma_core_dev_info *cdev_info, bool up); 49 + int idpf_idc_request_reset(struct iidc_rdma_core_dev_info *cdev_info, 50 + enum iidc_rdma_reset_type __always_unused reset_type); 51 + int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info, 52 + u8 *send_msg, u16 msg_size, 53 + u8 *recv_msg, u16 *recv_len); 54 + 55 + #endif /* _IIDC_RDMA_IDPF_H_ */