Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/linux

Tony Nguyen says:

====================
Prepare for Intel IPU E2000 (GEN3)

This is the first part in introducing RDMA support for idpf.

----------------------------------------------------------------
Tatyana Nikolova says:

To align with review comments, the patch series introducing RDMA
RoCEv2 support for the Intel Infrastructure Processing Unit (IPU)
E2000 line of products is going to be submitted in three parts:

1. Modify ice to use specific and common IIDC definitions and
pass a core device info to irdma.

2. Add RDMA support to idpf and modify idpf to use specific and
common IIDC definitions and pass a core device info to irdma.

3. Add RDMA RoCEv2 support for the E2000 products, referred to as
GEN3 to irdma.

This first part is a 5 patch series based on the original
"iidc/ice/irdma: Update IDC to support multiple consumers" patch
to allow for multiple CORE PCI drivers, using the auxbus.

Patches:
1) Move header file to new name for clarity and replace ice
specific DSCP define with a kernel equivalent one in irdma
2) Unify naming convention
3) Separate header file into common and driver specific info
4) Replace ice specific DSCP define with a kernel equivalent
one in ice
5) Implement core device info struct and update drivers to use it
----------------------------------------------------------------

v1: https://lore.kernel.org/20250505212037.2092288-1-anthony.l.nguyen@intel.com

IWL reviews:
[v5] https://lore.kernel.org/20250416021549.606-1-tatyana.e.nikolova@intel.com
[v4] https://lore.kernel.org/20250225050428.2166-1-tatyana.e.nikolova@intel.com
[v3] https://lore.kernel.org/20250207194931.1569-1-tatyana.e.nikolova@intel.com
[v2] https://lore.kernel.org/20240824031924.421-1-tatyana.e.nikolova@intel.com
[v1] https://lore.kernel.org/20240724233917.704-1-tatyana.e.nikolova@intel.com

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/linux:
iidc/ice/irdma: Update IDC to support multiple consumers
ice: Replace ice specific DSCP mapping num with a kernel define
iidc/ice/irdma: Break iidc.h into two headers
iidc/ice/irdma: Rename to iidc_* convention
iidc/ice/irdma: Rename IDC header file

====================

Link: https://patch.msgid.link/20250509200712.2911060-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+452 -288
+1 -1
MAINTAINERS
··· 11902 11902 F: drivers/net/ethernet/intel/ 11903 11903 F: drivers/net/ethernet/intel/*/ 11904 11904 F: include/linux/avf/virtchnl.h 11905 - F: include/linux/net/intel/iidc.h 11905 + F: include/linux/net/intel/*/ 11906 11906 11907 11907 INTEL ETHERNET PROTOCOL DRIVER FOR RDMA 11908 11908 M: Mustafa Ismail <mustafa.ismail@intel.com>
+66 -59
drivers/infiniband/hw/irdma/main.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 2 /* Copyright (c) 2015 - 2021 Intel Corporation */ 3 3 #include "main.h" 4 - #include "../../../net/ethernet/intel/ice/ice.h" 5 4 6 5 MODULE_ALIAS("i40iw"); 7 - MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>"); 8 6 MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA"); 9 7 MODULE_LICENSE("Dual BSD/GPL"); 10 8 ··· 59 61 } 60 62 61 63 static void irdma_fill_qos_info(struct irdma_l2params *l2params, 62 - struct iidc_qos_params *qos_info) 64 + struct iidc_rdma_qos_params *qos_info) 63 65 { 64 66 int i; 65 67 ··· 83 85 } 84 86 } 85 87 86 - static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event) 88 + static void irdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info, 89 + struct iidc_rdma_event *event) 87 90 { 88 - struct irdma_device *iwdev = dev_get_drvdata(&pf->adev->dev); 91 + struct irdma_device *iwdev = dev_get_drvdata(&cdev_info->adev->dev); 89 92 struct irdma_l2params l2params = {}; 90 93 91 - if (*event->type & BIT(IIDC_EVENT_AFTER_MTU_CHANGE)) { 94 + if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) { 92 95 ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu); 93 96 if (iwdev->vsi.mtu != iwdev->netdev->mtu) { 94 97 l2params.mtu = iwdev->netdev->mtu; ··· 97 98 irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev); 98 99 irdma_change_l2params(&iwdev->vsi, &l2params); 99 100 } 100 - } else if (*event->type & BIT(IIDC_EVENT_BEFORE_TC_CHANGE)) { 101 + } else if (*event->type & BIT(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE)) { 101 102 if (iwdev->vsi.tc_change_pending) 102 103 return; 103 104 104 105 irdma_prep_tc_change(iwdev); 105 - } else if (*event->type & BIT(IIDC_EVENT_AFTER_TC_CHANGE)) { 106 - struct iidc_qos_params qos_info = {}; 106 + } else if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_TC_CHANGE)) { 107 + struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv; 107 108 108 109 if (!iwdev->vsi.tc_change_pending) 109 110 return; 110 111 111 112 l2params.tc_changed = true; 112 113 ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n"); 113 - ice_get_qos_params(pf, &qos_info); 114 - irdma_fill_qos_info(&l2params, &qos_info); 114 + 115 + irdma_fill_qos_info(&l2params, &iidc_priv->qos_info); 115 116 if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY) 116 - iwdev->dcb_vlan_mode = qos_info.num_tc > 1 && !l2params.dscp_mode; 117 + iwdev->dcb_vlan_mode = 118 + l2params.num_tc > 1 && !l2params.dscp_mode; 117 119 irdma_change_l2params(&iwdev->vsi, &l2params); 118 - } else if (*event->type & BIT(IIDC_EVENT_CRIT_ERR)) { 120 + } else if (*event->type & BIT(IIDC_RDMA_EVENT_CRIT_ERR)) { 119 121 ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n", 120 122 event->reg); 121 123 if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) { ··· 151 151 */ 152 152 static void irdma_request_reset(struct irdma_pci_f *rf) 153 153 { 154 - struct ice_pf *pf = rf->cdev; 155 - 156 154 ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n"); 157 - ice_rdma_request_reset(pf, IIDC_PFR); 155 + ice_rdma_request_reset(rf->cdev, IIDC_FUNC_RESET); 158 156 } 159 157 160 158 /** ··· 164 166 struct irdma_ws_node *tc_node) 165 167 { 166 168 struct irdma_device *iwdev = vsi->back_vsi; 167 - struct ice_pf *pf = iwdev->rf->cdev; 169 + struct iidc_rdma_core_dev_info *cdev_info; 168 170 struct iidc_rdma_qset_params qset = {}; 169 171 int ret; 170 172 173 + cdev_info = iwdev->rf->cdev; 171 174 qset.qs_handle = tc_node->qs_handle; 172 175 qset.tc = tc_node->traffic_class; 173 176 qset.vport_id = vsi->vsi_idx; 174 - ret = ice_add_rdma_qset(pf, &qset); 177 + ret = ice_add_rdma_qset(cdev_info, &qset); 175 178 if (ret) { 176 179 ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n"); 177 180 return ret; ··· 193 194 struct irdma_ws_node *tc_node) 194 195 { 195 196 struct irdma_device *iwdev = vsi->back_vsi; 196 - struct ice_pf *pf = iwdev->rf->cdev; 197 + struct iidc_rdma_core_dev_info *cdev_info; 197 198 struct iidc_rdma_qset_params qset = {}; 198 199 200 + cdev_info = iwdev->rf->cdev; 199 201 qset.qs_handle = tc_node->qs_handle; 200 202 qset.tc = tc_node->traffic_class; 201 203 qset.vport_id = vsi->vsi_idx; 202 204 qset.teid = tc_node->l2_sched_node_id; 203 205 204 - if (ice_del_rdma_qset(pf, &qset)) 206 + if (ice_del_rdma_qset(cdev_info, &qset)) 205 207 ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n"); 206 208 } 207 209 208 - static int irdma_init_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf) 210 + static int irdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev) 209 211 { 210 212 int i; 211 213 ··· 217 217 return -ENOMEM; 218 218 219 219 for (i = 0; i < rf->msix_count; i++) 220 - if (ice_alloc_rdma_qvector(pf, &rf->msix_entries[i])) 220 + if (ice_alloc_rdma_qvector(cdev, &rf->msix_entries[i])) 221 221 break; 222 222 223 223 if (i < IRDMA_MIN_MSIX) { 224 224 for (; i > 0; i--) 225 - ice_free_rdma_qvector(pf, &rf->msix_entries[i]); 225 + ice_free_rdma_qvector(cdev, &rf->msix_entries[i]); 226 226 227 227 kfree(rf->msix_entries); 228 228 return -ENOMEM; ··· 233 233 return 0; 234 234 } 235 235 236 - static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf) 236 + static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev) 237 237 { 238 238 int i; 239 239 240 240 for (i = 0; i < rf->msix_count; i++) 241 - ice_free_rdma_qvector(pf, &rf->msix_entries[i]); 241 + ice_free_rdma_qvector(cdev, &rf->msix_entries[i]); 242 242 243 243 kfree(rf->msix_entries); 244 244 } 245 245 246 246 static void irdma_remove(struct auxiliary_device *aux_dev) 247 247 { 248 - struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev, 249 - struct iidc_auxiliary_dev, 250 - adev); 251 - struct ice_pf *pf = iidc_adev->pf; 252 248 struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev); 249 + struct iidc_rdma_core_auxiliary_dev *iidc_adev; 250 + struct iidc_rdma_core_dev_info *cdev_info; 253 251 252 + iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev); 253 + cdev_info = iidc_adev->cdev_info; 254 + 255 + ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, false); 254 256 irdma_ib_unregister_device(iwdev); 255 - ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false); 256 - irdma_deinit_interrupts(iwdev->rf, pf); 257 + irdma_deinit_interrupts(iwdev->rf, cdev_info); 257 258 258 - pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn)); 259 + pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(cdev_info->pdev->devfn)); 259 260 } 260 261 261 - static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf, 262 - struct ice_vsi *vsi) 262 + static void irdma_fill_device_info(struct irdma_device *iwdev, 263 + struct iidc_rdma_core_dev_info *cdev_info) 263 264 { 265 + struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv; 264 266 struct irdma_pci_f *rf = iwdev->rf; 265 267 266 - rf->cdev = pf; 268 + rf->sc_dev.hw = &rf->hw; 269 + rf->iwdev = iwdev; 270 + rf->cdev = cdev_info; 271 + rf->hw.hw_addr = iidc_priv->hw_addr; 272 + rf->pcidev = cdev_info->pdev; 273 + rf->hw.device = &rf->pcidev->dev; 274 + rf->pf_id = iidc_priv->pf_id; 267 275 rf->gen_ops.register_qset = irdma_lan_register_qset; 268 276 rf->gen_ops.unregister_qset = irdma_lan_unregister_qset; 269 - rf->hw.hw_addr = pf->hw.hw_addr; 270 - rf->pcidev = pf->pdev; 271 - rf->pf_id = pf->hw.pf_id; 272 - rf->default_vsi.vsi_idx = vsi->vsi_num; 273 - rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? 274 - IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY; 277 + 278 + rf->default_vsi.vsi_idx = iidc_priv->vport_id; 279 + rf->protocol_used = 280 + cdev_info->rdma_protocol == IIDC_RDMA_PROTOCOL_ROCEV2 ? 281 + IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY; 275 282 rf->rdma_ver = IRDMA_GEN_2; 276 283 rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT; 277 284 rf->rst_to = IRDMA_RST_TIMEOUT_HZ; 278 285 rf->gen_ops.request_reset = irdma_request_reset; 279 286 rf->limits_sel = 7; 280 287 rf->iwdev = iwdev; 288 + 281 289 mutex_init(&iwdev->ah_tbl_lock); 282 - iwdev->netdev = vsi->netdev; 283 - iwdev->vsi_num = vsi->vsi_num; 290 + 291 + iwdev->netdev = iidc_priv->netdev; 292 + iwdev->vsi_num = iidc_priv->vport_id; 284 293 iwdev->init_state = INITIAL_STATE; 285 294 iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT; 286 295 iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT; ··· 301 292 302 293 static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id) 303 294 { 304 - struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev, 305 - struct iidc_auxiliary_dev, 306 - adev); 307 - struct ice_pf *pf = iidc_adev->pf; 308 - struct ice_vsi *vsi = ice_get_main_vsi(pf); 309 - struct iidc_qos_params qos_info = {}; 295 + struct iidc_rdma_core_auxiliary_dev *iidc_adev; 296 + struct iidc_rdma_core_dev_info *cdev_info; 297 + struct iidc_rdma_priv_dev_info *iidc_priv; 298 + struct irdma_l2params l2params = {}; 310 299 struct irdma_device *iwdev; 311 300 struct irdma_pci_f *rf; 312 - struct irdma_l2params l2params = {}; 313 301 int err; 314 302 315 - if (!vsi) 316 - return -EIO; 303 + iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev); 304 + cdev_info = iidc_adev->cdev_info; 305 + iidc_priv = cdev_info->iidc_priv; 306 + 317 307 iwdev = ib_alloc_device(irdma_device, ibdev); 318 308 if (!iwdev) 319 309 return -ENOMEM; ··· 322 314 return -ENOMEM; 323 315 } 324 316 325 - irdma_fill_device_info(iwdev, pf, vsi); 317 + irdma_fill_device_info(iwdev, cdev_info); 326 318 rf = iwdev->rf; 327 319 328 - err = irdma_init_interrupts(rf, pf); 320 + err = irdma_init_interrupts(rf, cdev_info); 329 321 if (err) 330 322 goto err_init_interrupts; 331 323 ··· 334 326 goto err_ctrl_init; 335 327 336 328 l2params.mtu = iwdev->netdev->mtu; 337 - ice_get_qos_params(pf, &qos_info); 338 - irdma_fill_qos_info(&l2params, &qos_info); 329 + irdma_fill_qos_info(&l2params, &iidc_priv->qos_info); 339 330 if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY) 340 331 iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode; 341 332 ··· 346 339 if (err) 347 340 goto err_ibreg; 348 341 349 - ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, true); 342 + ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, true); 350 343 351 344 ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn)); 352 345 auxiliary_set_drvdata(aux_dev, iwdev); ··· 358 351 err_rt_init: 359 352 irdma_ctrl_deinit_hw(rf); 360 353 err_ctrl_init: 361 - irdma_deinit_interrupts(rf, pf); 354 + irdma_deinit_interrupts(rf, cdev_info); 362 355 err_init_interrupts: 363 356 kfree(iwdev->rf); 364 357 ib_dealloc_device(&iwdev->ibdev); ··· 374 367 375 368 MODULE_DEVICE_TABLE(auxiliary, irdma_auxiliary_id_table); 376 369 377 - static struct iidc_auxiliary_drv irdma_auxiliary_drv = { 370 + static struct iidc_rdma_core_auxiliary_drv irdma_auxiliary_drv = { 378 371 .adrv = { 379 372 .id_table = irdma_auxiliary_id_table, 380 373 .probe = irdma_probe,
+2 -1
drivers/infiniband/hw/irdma/main.h
··· 29 29 #include <linux/io-64-nonatomic-lo-hi.h> 30 30 #endif 31 31 #include <linux/auxiliary_bus.h> 32 - #include <linux/net/intel/iidc.h> 32 + #include <linux/net/intel/iidc_rdma.h> 33 + #include <linux/net/intel/iidc_rdma_ice.h> 33 34 #include <rdma/ib_smi.h> 34 35 #include <rdma/ib_verbs.h> 35 36 #include <rdma/ib_pack.h>
+1 -1
drivers/infiniband/hw/irdma/osdep.h
··· 5 5 6 6 #include <linux/pci.h> 7 7 #include <linux/bitfield.h> 8 - #include <linux/net/intel/iidc.h> 9 8 #include <rdma/ib_verbs.h> 9 + #include <net/dscp.h> 10 10 11 11 #define STATS_TIMER_DELAY 60000 12 12
+2 -2
drivers/infiniband/hw/irdma/type.h
··· 567 567 u8 qos_rel_bw; 568 568 u8 qos_prio_type; 569 569 u8 stats_idx; 570 - u8 dscp_map[IIDC_MAX_DSCP_MAPPING]; 570 + u8 dscp_map[DSCP_MAX]; 571 571 struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY]; 572 572 u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1]; 573 573 bool dscp_mode:1; ··· 695 695 u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY]; 696 696 u16 mtu; 697 697 u8 up2tc[IRDMA_MAX_USER_PRIORITY]; 698 - u8 dscp_map[IIDC_MAX_DSCP_MAPPING]; 698 + u8 dscp_map[DSCP_MAX]; 699 699 u8 num_tc; 700 700 u8 vsi_rel_bw; 701 701 u8 vsi_prio_type;
+35 -10
drivers/net/ethernet/intel/ice/devlink/devlink.c
··· 1339 1339 struct devlink_param_gset_ctx *ctx) 1340 1340 { 1341 1341 struct ice_pf *pf = devlink_priv(devlink); 1342 + struct iidc_rdma_core_dev_info *cdev; 1342 1343 1343 - ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false; 1344 + cdev = pf->cdev_info; 1345 + if (!cdev) 1346 + return -ENODEV; 1347 + 1348 + ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2); 1344 1349 1345 1350 return 0; 1346 1351 } ··· 1355 1350 struct netlink_ext_ack *extack) 1356 1351 { 1357 1352 struct ice_pf *pf = devlink_priv(devlink); 1353 + struct iidc_rdma_core_dev_info *cdev; 1358 1354 bool roce_ena = ctx->val.vbool; 1359 1355 int ret; 1360 1356 1357 + cdev = pf->cdev_info; 1358 + if (!cdev) 1359 + return -ENODEV; 1360 + 1361 1361 if (!roce_ena) { 1362 1362 ice_unplug_aux_dev(pf); 1363 - pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2; 1363 + cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2; 1364 1364 return 0; 1365 1365 } 1366 1366 1367 - pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; 1367 + cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2; 1368 1368 ret = ice_plug_aux_dev(pf); 1369 1369 if (ret) 1370 - pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2; 1370 + cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2; 1371 1371 1372 1372 return ret; 1373 1373 } ··· 1383 1373 struct netlink_ext_ack *extack) 1384 1374 { 1385 1375 struct ice_pf *pf = devlink_priv(devlink); 1376 + struct iidc_rdma_core_dev_info *cdev; 1377 + 1378 + cdev = pf->cdev_info; 1379 + if (!cdev) 1380 + return -ENODEV; 1386 1381 1387 1382 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1388 1383 return -EOPNOTSUPP; 1389 1384 1390 - if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) { 1385 + if (cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP) { 1391 1386 NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); 1392 1387 return -EOPNOTSUPP; 1393 1388 } ··· 1405 1390 struct devlink_param_gset_ctx *ctx) 1406 1391 { 1407 1392 struct ice_pf *pf = devlink_priv(devlink); 1393 + struct iidc_rdma_core_dev_info *cdev; 1408 1394 1409 - ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP; 1395 + cdev = pf->cdev_info; 1396 + if (!cdev) 1397 + return -ENODEV; 1398 + 1399 + ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP); 1410 1400 1411 1401 return 0; 1412 1402 } ··· 1421 1401 struct netlink_ext_ack *extack) 1422 1402 { 1423 1403 struct ice_pf *pf = devlink_priv(devlink); 1404 + struct iidc_rdma_core_dev_info *cdev; 1424 1405 bool iw_ena = ctx->val.vbool; 1425 1406 int ret; 1426 1407 1408 + cdev = pf->cdev_info; 1409 + if (!cdev) 1410 + return -ENODEV; 1411 + 1427 1412 if (!iw_ena) { 1428 1413 ice_unplug_aux_dev(pf); 1429 - pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP; 1414 + cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP; 1430 1415 return 0; 1431 1416 } 1432 1417 1433 - pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP; 1418 + cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_IWARP; 1434 1419 ret = ice_plug_aux_dev(pf); 1435 1420 if (ret) 1436 - pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP; 1421 + cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP; 1437 1422 1438 1423 return ret; 1439 1424 } ··· 1453 1428 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1454 1429 return -EOPNOTSUPP; 1455 1430 1456 - if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) { 1431 + if (pf->cdev_info->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2) { 1457 1432 NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); 1458 1433 return -EOPNOTSUPP; 1459 1434 }
+1 -5
drivers/net/ethernet/intel/ice/ice.h
··· 399 399 u16 req_rxq; /* User requested Rx queues */ 400 400 u16 num_rx_desc; 401 401 u16 num_tx_desc; 402 - u16 qset_handle[ICE_MAX_TRAFFIC_CLASS]; 403 402 struct ice_tc_cfg tc_cfg; 404 403 struct bpf_prog *xdp_prog; 405 404 struct ice_tx_ring **xdp_rings; /* XDP ring array */ ··· 555 556 struct devlink_port devlink_port; 556 557 557 558 /* OS reserved IRQ details */ 558 - struct msix_entry *msix_entries; 559 559 struct ice_irq_tracker irq_tracker; 560 560 struct ice_virt_irq_tracker virt_irq_tracker; 561 561 ··· 589 591 struct gnss_serial *gnss_serial; 590 592 struct gnss_device *gnss_dev; 591 593 u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ 592 - u16 rdma_base_vector; 593 594 594 595 /* spinlock to protect the AdminQ wait list */ 595 596 spinlock_t aq_wait_lock; ··· 621 624 struct ice_hw_port_stats stats_prev; 622 625 struct ice_hw hw; 623 626 u8 stat_prev_loaded:1; /* has previous stats been loaded */ 624 - u8 rdma_mode; 625 627 u16 dcbx_cap; 626 628 u32 tx_timeout_count; 627 629 unsigned long tx_timeout_last_recovery; 628 630 u32 tx_timeout_recovery_level; 629 631 char int_name[ICE_INT_NAME_STR_LEN]; 630 632 char int_name_ll_ts[ICE_INT_NAME_STR_LEN]; 631 - struct auxiliary_device *adev; 632 633 int aux_idx; 633 634 u32 sw_int_count; 634 635 /* count of tc_flower filters specific to channel (aka where filter ··· 658 663 struct ice_dplls dplls; 659 664 struct device *hwmon_dev; 660 665 struct ice_health health_reporters; 666 + struct iidc_rdma_core_dev_info *cdev_info; 661 667 662 668 u8 num_quanta_prof_used; 663 669 };
+1 -1
drivers/net/ethernet/intel/ice/ice_dcb.c
··· 1288 1288 tlv->ouisubtype = htonl(ouisubtype); 1289 1289 1290 1290 /* bytes 0 - 63 - IPv4 DSCP2UP LUT */ 1291 - for (i = 0; i < ICE_DSCP_NUM_VAL; i++) { 1291 + for (i = 0; i < DSCP_MAX; i++) { 1292 1292 /* IPv4 mapping */ 1293 1293 buf[i] = dcbcfg->dscp_map[i]; 1294 1294 /* IPv6 mapping */
+42 -5
drivers/net/ethernet/intel/ice/ice_dcb_lib.c
··· 352 352 struct ice_aqc_port_ets_elem buf = { 0 }; 353 353 struct ice_dcbx_cfg *old_cfg, *curr_cfg; 354 354 struct device *dev = ice_pf_to_dev(pf); 355 + struct iidc_rdma_event *event; 355 356 int ret = ICE_DCB_NO_HW_CHG; 356 - struct iidc_event *event; 357 357 struct ice_vsi *pf_vsi; 358 358 359 359 curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; ··· 405 405 goto free_cfg; 406 406 } 407 407 408 - set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type); 408 + set_bit(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE, event->type); 409 409 ice_send_event_to_aux(pf, event); 410 410 kfree(event); 411 411 ··· 740 740 void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) 741 741 { 742 742 struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 743 - struct iidc_event *event; 743 + struct iidc_rdma_priv_dev_info *privd; 744 + struct iidc_rdma_core_dev_info *cdev; 745 + struct iidc_rdma_event *event; 744 746 u8 tc_map = 0; 745 747 int v, ret; 746 748 ··· 785 783 if (vsi->type == ICE_VSI_PF) 786 784 ice_dcbnl_set_all(vsi); 787 785 } 788 - if (!locked) { 786 + 787 + cdev = pf->cdev_info; 788 + if (cdev && !locked) { 789 + privd = cdev->iidc_priv; 790 + ice_setup_dcb_qos_info(pf, &privd->qos_info); 789 791 /* Notify the AUX drivers that TC change is finished */ 790 792 event = kzalloc(sizeof(*event), GFP_KERNEL); 791 793 if (!event) 792 794 return; 793 795 794 - set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type); 796 + set_bit(IIDC_RDMA_EVENT_AFTER_TC_CHANGE, event->type); 795 797 ice_send_event_to_aux(pf, event); 796 798 kfree(event); 797 799 } ··· 948 942 else 949 943 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 950 944 } 945 + } 946 + 947 + /** 948 + * ice_setup_dcb_qos_info - Setup DCB QoS information 949 + * @pf: ptr to ice_pf 950 + * @qos_info: QoS param instance 951 + */ 952 + void ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info) 953 + { 954 + struct ice_dcbx_cfg *dcbx_cfg; 955 + unsigned int i; 956 + u32 up2tc; 957 + 958 + if (!pf || !qos_info) 959 + return; 960 + 961 + dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 962 + up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); 963 + 964 + qos_info->num_tc = ice_dcb_get_num_tc(dcbx_cfg); 965 + 966 + for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++) 967 + qos_info->up2tc[i] = (up2tc >> (i * 3)) & 0x7; 968 + 969 + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 970 + qos_info->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i]; 971 + 972 + qos_info->pfc_mode = dcbx_cfg->pfc_mode; 973 + if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) 974 + for (i = 0; i < DSCP_MAX; i++) 975 + qos_info->dscp_map[i] = dcbx_cfg->dscp_map[i]; 951 976 } 952 977 953 978 /**
+9
drivers/net/ethernet/intel/ice/ice_dcb_lib.h
··· 31 31 ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, 32 32 struct ice_tx_buf *first); 33 33 void 34 + ice_setup_dcb_qos_info(struct ice_pf *pf, 35 + struct iidc_rdma_qos_params *qos_info); 36 + void 34 37 ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, 35 38 struct ice_rq_event_info *event); 36 39 /** ··· 137 134 static inline void 138 135 ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { } 139 136 static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { } 137 + static inline void 138 + ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info) 139 + { 140 + qos_info->num_tc = 1; 141 + qos_info->tc_info[0].rel_bw = 100; 142 + } 140 143 #endif /* CONFIG_DCB */ 141 144 #endif /* _ICE_DCB_LIB_H_ */
+2 -2
drivers/net/ethernet/intel/ice/ice_dcb_nl.c
··· 754 754 if (!ice_is_feature_supported(pf, ICE_F_DSCP)) 755 755 return -EOPNOTSUPP; 756 756 757 - if (app->protocol >= ICE_DSCP_NUM_VAL) { 757 + if (app->protocol >= DSCP_MAX) { 758 758 netdev_err(netdev, "DSCP value 0x%04X out of range\n", 759 759 app->protocol); 760 760 return -EINVAL; ··· 931 931 /* if the last DSCP mapping just got deleted, need to switch 932 932 * to L2 VLAN QoS mode 933 933 */ 934 - if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) && 934 + if (bitmap_empty(new_cfg->dscp_mapped, DSCP_MAX) && 935 935 new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) { 936 936 ret = ice_aq_set_pfc_mode(&pf->hw, 937 937 ICE_AQC_PFC_VLAN_BASED_PFC,
+4 -4
drivers/net/ethernet/intel/ice/ice_ethtool.c
··· 3964 3964 return -EINVAL; 3965 3965 } 3966 3966 3967 - if (pf->adev) { 3967 + if (pf->cdev_info && pf->cdev_info->adev) { 3968 3968 mutex_lock(&pf->adev_mutex); 3969 - device_lock(&pf->adev->dev); 3969 + device_lock(&pf->cdev_info->adev->dev); 3970 3970 locked = true; 3971 - if (pf->adev->dev.driver) { 3971 + if (pf->cdev_info->adev->dev.driver) { 3972 3972 netdev_err(dev, "Cannot change channels when RDMA is active\n"); 3973 3973 ret = -EBUSY; 3974 3974 goto adev_unlock; ··· 3987 3987 3988 3988 adev_unlock: 3989 3989 if (locked) { 3990 - device_unlock(&pf->adev->dev); 3990 + device_unlock(&pf->cdev_info->adev->dev); 3991 3991 mutex_unlock(&pf->adev_mutex); 3992 3992 } 3993 3993 return ret;
+132 -75
drivers/net/ethernet/intel/ice/ice_idc.c
··· 9 9 static DEFINE_XARRAY_ALLOC1(ice_aux_id); 10 10 11 11 /** 12 - * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct 13 - * @pf: pointer to PF struct 12 + * ice_get_auxiliary_drv - retrieve iidc_rdma_core_auxiliary_drv struct 13 + * @cdev: pointer to iidc_rdma_core_dev_info struct 14 14 * 15 15 * This function has to be called with a device_lock on the 16 - * pf->adev.dev to avoid race conditions. 16 + * cdev->adev.dev to avoid race conditions. 17 + * 18 + * Return: pointer to the matched auxiliary driver struct 17 19 */ 18 - static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf) 20 + static struct iidc_rdma_core_auxiliary_drv * 21 + ice_get_auxiliary_drv(struct iidc_rdma_core_dev_info *cdev) 19 22 { 20 23 struct auxiliary_device *adev; 21 24 22 - adev = pf->adev; 25 + adev = cdev->adev; 23 26 if (!adev || !adev->dev.driver) 24 27 return NULL; 25 28 26 - return container_of(adev->dev.driver, struct iidc_auxiliary_drv, 27 - adrv.driver); 29 + return container_of(adev->dev.driver, 30 + struct iidc_rdma_core_auxiliary_drv, adrv.driver); 28 31 } 29 32 30 33 /** ··· 35 32 * @pf: pointer to PF struct 36 33 * @event: event struct 37 34 */ 38 - void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event) 35 + void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event) 39 36 { 40 - struct iidc_auxiliary_drv *iadrv; 37 + struct iidc_rdma_core_auxiliary_drv *iadrv; 38 + struct iidc_rdma_core_dev_info *cdev; 41 39 42 40 if (WARN_ON_ONCE(!in_task())) 43 41 return; 44 42 43 + cdev = pf->cdev_info; 44 + if (!cdev) 45 + return; 46 + 45 47 mutex_lock(&pf->adev_mutex); 46 - if (!pf->adev) 48 + if (!cdev->adev) 47 49 goto finish; 48 50 49 - device_lock(&pf->adev->dev); 50 - iadrv = ice_get_auxiliary_drv(pf); 51 + device_lock(&cdev->adev->dev); 52 + iadrv = ice_get_auxiliary_drv(cdev); 51 53 if (iadrv && iadrv->event_handler) 52 - iadrv->event_handler(pf, event); 53 - device_unlock(&pf->adev->dev); 54 + iadrv->event_handler(cdev, event); 55 + device_unlock(&cdev->adev->dev); 54 56 finish: 55 57 mutex_unlock(&pf->adev_mutex); 56 58 } 57 59 58 60 /** 59 61 * ice_add_rdma_qset - Add Leaf Node for RDMA Qset 60 - * @pf: PF struct 62 + * @cdev: pointer to iidc_rdma_core_dev_info struct 61 63 * @qset: Resource to be allocated 64 + * 65 + * Return: Zero on success or error code encountered 62 66 */ 63 - int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) 67 + int ice_add_rdma_qset(struct iidc_rdma_core_dev_info *cdev, 68 + struct iidc_rdma_qset_params *qset) 64 69 { 65 70 u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS]; 66 71 struct ice_vsi *vsi; 67 72 struct device *dev; 73 + struct ice_pf *pf; 68 74 u32 qset_teid; 69 75 u16 qs_handle; 70 76 int status; 71 77 int i; 72 78 73 - if (WARN_ON(!pf || !qset)) 79 + if (WARN_ON(!cdev || !qset)) 74 80 return -EINVAL; 75 81 82 + pf = pci_get_drvdata(cdev->pdev); 76 83 dev = ice_pf_to_dev(pf); 77 84 78 85 if (!ice_is_rdma_ena(pf)) ··· 113 100 dev_err(dev, "Failed VSI RDMA Qset enable\n"); 114 101 return status; 115 102 } 116 - vsi->qset_handle[qset->tc] = qset->qs_handle; 117 103 qset->teid = qset_teid; 118 104 119 105 return 0; ··· 121 109 122 110 /** 123 111 * ice_del_rdma_qset - Delete leaf node for RDMA Qset 124 - * @pf: PF struct 112 + * @cdev: pointer to iidc_rdma_core_dev_info struct 125 113 * @qset: Resource to be freed 114 + * 115 + * Return: Zero on success, error code on failure 126 116 */ 127 - int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) 117 + int ice_del_rdma_qset(struct iidc_rdma_core_dev_info *cdev, 118 + struct iidc_rdma_qset_params *qset) 128 119 { 129 120 struct ice_vsi *vsi; 121 + struct ice_pf *pf; 130 122 u32 teid; 131 123 u16 q_id; 132 124 133 - if (WARN_ON(!pf || !qset)) 125 + if (WARN_ON(!cdev || !qset)) 134 126 return -EINVAL; 135 127 128 + pf = pci_get_drvdata(cdev->pdev); 136 129 vsi = ice_find_vsi(pf, qset->vport_id); 137 130 if (!vsi) { 138 131 dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n"); ··· 147 130 q_id = qset->qs_handle; 148 131 teid = qset->teid; 149 132 150 - vsi->qset_handle[qset->tc] = 0; 151 - 152 133 return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id); 153 134 } 154 135 EXPORT_SYMBOL_GPL(ice_del_rdma_qset); 155 136 156 137 /** 157 138 * ice_rdma_request_reset - accept request from RDMA to perform a reset 158 - * @pf: struct for PF 139 + * @cdev: pointer to iidc_rdma_core_dev_info struct 159 140 * @reset_type: type of reset 141 + * 142 + * Return: Zero on success, error code on failure 160 143 */ 161 - int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type) 144 + int ice_rdma_request_reset(struct iidc_rdma_core_dev_info *cdev, 145 + enum iidc_rdma_reset_type reset_type) 162 146 { 163 147 enum ice_reset_req reset; 148 + struct ice_pf *pf; 164 149 165 - if (WARN_ON(!pf)) 150 + if (WARN_ON(!cdev)) 166 151 return -EINVAL; 167 152 153 + pf = pci_get_drvdata(cdev->pdev); 154 + 168 155 switch (reset_type) { 169 - case IIDC_PFR: 156 + case IIDC_FUNC_RESET: 170 157 reset = ICE_RESET_PFR; 171 158 break; 172 - case IIDC_CORER: 159 + case IIDC_DEV_RESET: 173 160 reset = ICE_RESET_CORER; 174 161 break; 175 - case IIDC_GLOBR: 176 - reset = ICE_RESET_GLOBR; 177 - break; 178 162 default: 179 - dev_err(ice_pf_to_dev(pf), "incorrect reset request\n"); 180 163 return -EINVAL; 181 164 } 182 165 ··· 186 169 187 170 /** 188 171 * ice_rdma_update_vsi_filter - update main VSI filters for RDMA 189 - * @pf: pointer to struct for PF 172 + * @cdev: pointer to iidc_rdma_core_dev_info struct 190 173 * @vsi_id: VSI HW idx to update filter on 191 174 * @enable: bool whether to enable or disable filters 175 + * 176 + * Return: Zero on success, error code on failure 192 177 */ 193 - int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable) 178 + int ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info *cdev, 179 + u16 vsi_id, bool enable) 194 180 { 195 181 struct ice_vsi *vsi; 182 + struct ice_pf *pf; 196 183 int status; 197 184 198 - if (WARN_ON(!pf)) 185 + if (WARN_ON(!cdev)) 199 186 return -EINVAL; 200 187 188 + pf = pci_get_drvdata(cdev->pdev); 201 189 vsi = ice_find_vsi(pf, vsi_id); 202 190 if (!vsi) 203 191 return -EINVAL; ··· 223 201 EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter); 224 202 225 203 /** 226 - * ice_get_qos_params - parse QoS params for RDMA consumption 227 - * @pf: pointer to PF struct 228 - * @qos: set of QoS values 204 + * ice_alloc_rdma_qvector - alloc vector resources reserved for RDMA driver 205 + * @cdev: pointer to iidc_rdma_core_dev_info struct 206 + * @entry: MSI-X entry to be removed 207 + * 208 + * Return: Zero on success, error code on failure 229 209 */ 230 - void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos) 210 + int ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info *cdev, 211 + struct msix_entry *entry) 231 212 { 232 - struct ice_dcbx_cfg *dcbx_cfg; 233 - unsigned int i; 234 - u32 up2tc; 213 + struct msi_map map; 214 + struct ice_pf *pf; 235 215 236 - dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 237 - up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); 216 + if (WARN_ON(!cdev)) 217 + return -EINVAL; 238 218 239 - qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg); 240 - for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++) 241 - qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7; 242 - 243 - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 244 - qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i]; 245 - 246 - qos->pfc_mode = dcbx_cfg->pfc_mode; 247 - if (qos->pfc_mode == IIDC_DSCP_PFC_MODE) 248 - for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++) 249 - qos->dscp_map[i] = dcbx_cfg->dscp_map[i]; 250 - } 251 - EXPORT_SYMBOL_GPL(ice_get_qos_params); 252 - 253 - int ice_alloc_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry) 254 - { 255 - struct msi_map map = ice_alloc_irq(pf, true); 256 - 219 + pf = pci_get_drvdata(cdev->pdev); 220 + map = ice_alloc_irq(pf, true); 257 221 if (map.index < 0) 258 222 return -ENOMEM; 259 223 ··· 252 244 253 245 /** 254 246 * ice_free_rdma_qvector - free vector resources reserved for RDMA driver 255 - * @pf: board private structure to initialize 247 + * @cdev: pointer to iidc_rdma_core_dev_info struct 256 248 * @entry: MSI-X entry to be removed 257 249 */ 258 - void ice_free_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry) 250 + void ice_free_rdma_qvector(struct iidc_rdma_core_dev_info *cdev, 251 + struct msix_entry *entry) 259 252 { 260 253 struct msi_map map; 254 + struct ice_pf *pf; 255 + 256 + if (WARN_ON(!cdev || !entry)) 257 + return; 258 + 259 + pf = pci_get_drvdata(cdev->pdev); 261 260 262 261 map.index = entry->entry; 263 262 map.virq = entry->vector; ··· 278 263 */ 279 264 static void ice_adev_release(struct device *dev) 280 265 { 281 - struct iidc_auxiliary_dev *iadev; 266 + struct iidc_rdma_core_auxiliary_dev *iadev; 282 267 283 - iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev); 268 + iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev, 269 + adev.dev); 284 270 kfree(iadev); 285 271 } 286 272 287 273 /** 288 274 * ice_plug_aux_dev - allocate and register AUX device 289 275 * @pf: pointer to pf struct 276 + * 277 + * Return: Zero on success, error code on failure 290 278 */ 291 279 int ice_plug_aux_dev(struct ice_pf *pf) 292 280 { 293 - struct iidc_auxiliary_dev *iadev; 281 + struct iidc_rdma_core_auxiliary_dev *iadev; 282 + struct iidc_rdma_core_dev_info *cdev; 294 283 struct auxiliary_device *adev; 295 284 int ret; 296 285 ··· 304 285 if (!ice_is_rdma_ena(pf)) 305 286 return 0; 306 287 288 + cdev = pf->cdev_info; 289 + if (!cdev) 290 + return -ENODEV; 291 + 307 292 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); 308 293 if (!iadev) 309 294 return -ENOMEM; 310 295 311 296 adev = &iadev->adev; 312 - iadev->pf = pf; 297 + iadev->cdev_info = cdev; 313 298 314 299 adev->id = pf->aux_idx; 315 300 adev->dev.release = ice_adev_release; 316 301 adev->dev.parent = &pf->pdev->dev; 317 - adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp"; 302 + adev->name = cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2 ? 303 + "roce" : "iwarp"; 318 304 319 305 ret = auxiliary_device_init(adev); 320 306 if (ret) { ··· 334 310 } 335 311 336 312 mutex_lock(&pf->adev_mutex); 337 - pf->adev = adev; 313 + cdev->adev = adev; 338 314 mutex_unlock(&pf->adev_mutex); 339 315 340 316 return 0; ··· 348 324 struct auxiliary_device *adev; 349 325 350 326 mutex_lock(&pf->adev_mutex); 351 - adev = pf->adev; 352 - pf->adev = NULL; 327 + adev = pf->cdev_info->adev; 328 + pf->cdev_info->adev = NULL; 353 329 mutex_unlock(&pf->adev_mutex); 354 330 355 331 if (adev) { ··· 364 340 */ 365 341 int ice_init_rdma(struct ice_pf *pf) 366 342 { 343 + struct iidc_rdma_priv_dev_info *privd; 367 344 struct device *dev = &pf->pdev->dev; 345 + struct iidc_rdma_core_dev_info *cdev; 368 346 int ret; 369 347 370 348 if (!ice_is_rdma_ena(pf)) { ··· 374 348 return 0; 375 349 } 376 350 351 + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 352 + if (!cdev) 353 + return -ENOMEM; 354 + 355 + pf->cdev_info = cdev; 356 + 357 + privd = kzalloc(sizeof(*privd), GFP_KERNEL); 358 + if (!privd) { 359 + ret = -ENOMEM; 360 + goto err_privd_alloc; 361 + } 362 + 363 + privd->pf_id = pf->hw.pf_id; 377 364 ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX), 378 365 GFP_KERNEL); 379 366 if (ret) { 380 367 dev_err(dev, "Failed to allocate device ID for AUX driver\n"); 381 - return -ENOMEM; 368 + ret = -ENOMEM; 369 + goto err_alloc_xa; 382 370 } 383 371 384 - pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; 372 + cdev->iidc_priv = privd; 373 + privd->netdev = pf->vsi[0]->netdev; 374 + 375 + privd->hw_addr = (u8 __iomem *)pf->hw.hw_addr; 376 + cdev->pdev = pf->pdev; 377 + privd->vport_id = pf->vsi[0]->vsi_num; 378 + 379 + pf->cdev_info->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2; 380 + ice_setup_dcb_qos_info(pf, &privd->qos_info); 385 381 ret = ice_plug_aux_dev(pf); 386 382 if (ret) 387 383 goto err_plug_aux_dev; 388 384 return 0; 389 385 390 386 err_plug_aux_dev: 391 - pf->adev = NULL; 387 + pf->cdev_info->adev = NULL; 392 388 xa_erase(&ice_aux_id, pf->aux_idx); 389 + err_alloc_xa: 390 + kfree(privd); 391 + err_privd_alloc: 392 + kfree(cdev); 393 + pf->cdev_info = NULL; 394 + 393 395 return ret; 394 396 } 395 397 ··· 432 378 433 379 ice_unplug_aux_dev(pf); 434 380 xa_erase(&ice_aux_id, pf->aux_idx); 381 + kfree(pf->cdev_info->iidc_priv); 382 + kfree(pf->cdev_info); 383 + pf->cdev_info = NULL; 435 384 }
+3 -2
drivers/net/ethernet/intel/ice/ice_idc_int.h
··· 4 4 #ifndef _ICE_IDC_INT_H_ 5 5 #define _ICE_IDC_INT_H_ 6 6 7 - #include <linux/net/intel/iidc.h> 7 + #include <linux/net/intel/iidc_rdma.h> 8 + #include <linux/net/intel/iidc_rdma_ice.h> 8 9 9 10 struct ice_pf; 10 11 11 - void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event); 12 + void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event); 12 13 13 14 #endif /* !_ICE_IDC_INT_H_ */
+10 -8
drivers/net/ethernet/intel/ice/ice_main.c
··· 2401 2401 } 2402 2402 2403 2403 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { 2404 - struct iidc_event *event; 2404 + struct iidc_rdma_event *event; 2405 2405 2406 2406 event = kzalloc(sizeof(*event), GFP_KERNEL); 2407 2407 if (event) { 2408 - set_bit(IIDC_EVENT_CRIT_ERR, event->type); 2408 + set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type); 2409 2409 /* report the entire OICR value to AUX driver */ 2410 2410 swap(event->reg, pf->oicr_err_reg); 2411 2411 ice_send_event_to_aux(pf, event); ··· 2424 2424 ice_plug_aux_dev(pf); 2425 2425 2426 2426 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { 2427 - struct iidc_event *event; 2427 + struct iidc_rdma_event *event; 2428 2428 2429 2429 event = kzalloc(sizeof(*event), GFP_KERNEL); 2430 2430 if (event) { 2431 - set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); 2431 + set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type); 2432 2432 ice_send_event_to_aux(pf, event); 2433 2433 kfree(event); 2434 2434 } ··· 9342 9342 { 9343 9343 struct ice_netdev_priv *np = netdev_priv(netdev); 9344 9344 enum flow_block_binder_type binder_type; 9345 + struct iidc_rdma_core_dev_info *cdev; 9345 9346 struct ice_pf *pf = np->vsi->back; 9346 9347 flow_setup_cb_t *flower_handler; 9347 9348 bool locked = false; ··· 9374 9373 return -EOPNOTSUPP; 9375 9374 } 9376 9375 9377 - if (pf->adev) { 9376 + cdev = pf->cdev_info; 9377 + if (cdev && cdev->adev) { 9378 9378 mutex_lock(&pf->adev_mutex); 9379 - device_lock(&pf->adev->dev); 9379 + device_lock(&cdev->adev->dev); 9380 9380 locked = true; 9381 - if (pf->adev->dev.driver) { 9381 + if (cdev->adev->dev.driver) { 9382 9382 netdev_err(netdev, "Cannot change qdisc when RDMA is active\n"); 9383 9383 err = -EBUSY; 9384 9384 goto adev_unlock; ··· 9393 9391 9394 9392 adev_unlock: 9395 9393 if (locked) { 9396 - device_unlock(&pf->adev->dev); 9394 + device_unlock(&cdev->adev->dev); 9397 9395 mutex_unlock(&pf->adev_mutex); 9398 9396 } 9399 9397 return err;
+3 -3
drivers/net/ethernet/intel/ice/ice_type.h
··· 19 19 #include "ice_vlan_mode.h" 20 20 #include "ice_fwlog.h" 21 21 #include <linux/wait.h> 22 + #include <net/dscp.h> 22 23 23 24 static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) 24 25 { ··· 696 695 697 696 #define ICE_MAX_USER_PRIORITY 8 698 697 #define ICE_DCBX_MAX_APPS 64 699 - #define ICE_DSCP_NUM_VAL 64 700 698 #define ICE_LLDPDU_SIZE 1500 701 699 #define ICE_TLV_STATUS_OPER 0x1 702 700 #define ICE_TLV_STATUS_SYNC 0x2 ··· 718 718 u8 pfc_mode; 719 719 struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS]; 720 720 /* when DSCP mapping defined by user set its bit to 1 */ 721 - DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL); 721 + DECLARE_BITMAP(dscp_mapped, DSCP_MAX); 722 722 /* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */ 723 - u8 dscp_map[ICE_DSCP_NUM_VAL]; 723 + u8 dscp_map[DSCP_MAX]; 724 724 u8 dcbx_mode; 725 725 #define ICE_DCBX_MODE_CEE 0x1 726 726 #define ICE_DCBX_MODE_IEEE 0x2
-109
include/linux/net/intel/iidc.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* Copyright (C) 2021, Intel Corporation. */ 3 - 4 - #ifndef _IIDC_H_ 5 - #define _IIDC_H_ 6 - 7 - #include <linux/auxiliary_bus.h> 8 - #include <linux/dcbnl.h> 9 - #include <linux/device.h> 10 - #include <linux/if_ether.h> 11 - #include <linux/kernel.h> 12 - #include <linux/netdevice.h> 13 - 14 - enum iidc_event_type { 15 - IIDC_EVENT_BEFORE_MTU_CHANGE, 16 - IIDC_EVENT_AFTER_MTU_CHANGE, 17 - IIDC_EVENT_BEFORE_TC_CHANGE, 18 - IIDC_EVENT_AFTER_TC_CHANGE, 19 - IIDC_EVENT_CRIT_ERR, 20 - IIDC_EVENT_NBITS /* must be last */ 21 - }; 22 - 23 - enum iidc_reset_type { 24 - IIDC_PFR, 25 - IIDC_CORER, 26 - IIDC_GLOBR, 27 - }; 28 - 29 - enum iidc_rdma_protocol { 30 - IIDC_RDMA_PROTOCOL_IWARP = BIT(0), 31 - IIDC_RDMA_PROTOCOL_ROCEV2 = BIT(1), 32 - }; 33 - 34 - #define IIDC_MAX_USER_PRIORITY 8 35 - #define IIDC_MAX_DSCP_MAPPING 64 36 - #define IIDC_DSCP_PFC_MODE 0x1 37 - 38 - /* Struct to hold per RDMA Qset info */ 39 - struct iidc_rdma_qset_params { 40 - /* Qset TEID returned to the RDMA driver in 41 - * ice_add_rdma_qset and used by RDMA driver 42 - * for calls to ice_del_rdma_qset 43 - */ 44 - u32 teid; /* Qset TEID */ 45 - u16 qs_handle; /* RDMA driver provides this */ 46 - u16 vport_id; /* VSI index */ 47 - u8 tc; /* TC branch the Qset should belong to */ 48 - }; 49 - 50 - struct iidc_qos_info { 51 - u64 tc_ctx; 52 - u8 rel_bw; 53 - u8 prio_type; 54 - u8 egress_virt_up; 55 - u8 ingress_virt_up; 56 - }; 57 - 58 - /* Struct to pass QoS info */ 59 - struct iidc_qos_params { 60 - struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS]; 61 - u8 up2tc[IIDC_MAX_USER_PRIORITY]; 62 - u8 vport_relative_bw; 63 - u8 vport_priority_type; 64 - u8 num_tc; 65 - u8 pfc_mode; 66 - u8 dscp_map[IIDC_MAX_DSCP_MAPPING]; 67 - }; 68 - 69 - struct iidc_event { 70 - DECLARE_BITMAP(type, IIDC_EVENT_NBITS); 71 - u32 reg; 72 - }; 73 - 74 - struct ice_pf; 75 - 76 - int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset); 77 - int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset); 78 - int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type); 79 - int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable); 80 - void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos); 81 - int ice_alloc_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry); 82 - void ice_free_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry); 83 - 84 - /* Structure representing auxiliary driver tailored information about the core 85 - * PCI dev, each auxiliary driver using the IIDC interface will have an 86 - * instance of this struct dedicated to it. 87 - */ 88 - 89 - struct iidc_auxiliary_dev { 90 - struct auxiliary_device adev; 91 - struct ice_pf *pf; 92 - }; 93 - 94 - /* structure representing the auxiliary driver. This struct is to be 95 - * allocated and populated by the auxiliary driver's owner. The core PCI 96 - * driver will access these ops by performing a container_of on the 97 - * auxiliary_device->dev.driver. 98 - */ 99 - struct iidc_auxiliary_drv { 100 - struct auxiliary_driver adrv; 101 - /* This event_handler is meant to be a blocking call. For instance, 102 - * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not 103 - * return until the auxiliary driver is ready for the MTU change to 104 - * happen. 105 - */ 106 - void (*event_handler)(struct ice_pf *pf, struct iidc_event *event); 107 - }; 108 - 109 - #endif /* _IIDC_H_*/
+68
include/linux/net/intel/iidc_rdma.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2021-2025, Intel Corporation. */ 3 + 4 + #ifndef _IIDC_RDMA_H_ 5 + #define _IIDC_RDMA_H_ 6 + 7 + #include <linux/auxiliary_bus.h> 8 + #include <linux/device.h> 9 + #include <linux/if_ether.h> 10 + #include <linux/kernel.h> 11 + #include <linux/netdevice.h> 12 + #include <net/dscp.h> 13 + 14 + enum iidc_rdma_event_type { 15 + IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE, 16 + IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, 17 + IIDC_RDMA_EVENT_BEFORE_TC_CHANGE, 18 + IIDC_RDMA_EVENT_AFTER_TC_CHANGE, 19 + IIDC_RDMA_EVENT_WARN_RESET, 20 + IIDC_RDMA_EVENT_CRIT_ERR, 21 + IIDC_RDMA_EVENT_NBITS /* must be last */ 22 + }; 23 + 24 + struct iidc_rdma_event { 25 + DECLARE_BITMAP(type, IIDC_RDMA_EVENT_NBITS); 26 + u32 reg; 27 + }; 28 + 29 + enum iidc_rdma_reset_type { 30 + IIDC_FUNC_RESET, 31 + IIDC_DEV_RESET, 32 + }; 33 + 34 + enum iidc_rdma_protocol { 35 + IIDC_RDMA_PROTOCOL_IWARP = BIT(0), 36 + IIDC_RDMA_PROTOCOL_ROCEV2 = BIT(1), 37 + }; 38 + 39 + /* Structure to be populated by core LAN PCI driver */ 40 + struct iidc_rdma_core_dev_info { 41 + struct pci_dev *pdev; /* PCI device of corresponding to main function */ 42 + struct auxiliary_device *adev; 43 + /* Current active RDMA protocol */ 44 + enum iidc_rdma_protocol rdma_protocol; 45 + void *iidc_priv; /* elements unique to each driver */ 46 + }; 47 + 48 + /* Structure representing auxiliary driver tailored information about the core 49 + * PCI dev, each auxiliary driver using the IIDC interface will have an 50 + * instance of this struct dedicated to it. 51 + */ 52 + struct iidc_rdma_core_auxiliary_dev { 53 + struct auxiliary_device adev; 54 + struct iidc_rdma_core_dev_info *cdev_info; 55 + }; 56 + 57 + /* structure representing the auxiliary driver. This struct is to be 58 + * allocated and populated by the auxiliary driver's owner. The core PCI 59 + * driver will access these ops by performing a container_of on the 60 + * auxiliary_device->dev.driver. 61 + */ 62 + struct iidc_rdma_core_auxiliary_drv { 63 + struct auxiliary_driver adrv; 64 + void (*event_handler)(struct iidc_rdma_core_dev_info *cdev, 65 + struct iidc_rdma_event *event); 66 + }; 67 + 68 + #endif /* _IIDC_RDMA_H_*/
+70
include/linux/net/intel/iidc_rdma_ice.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2021-2025, Intel Corporation. */ 3 + 4 + #ifndef _IIDC_RDMA_ICE_H_ 5 + #define _IIDC_RDMA_ICE_H_ 6 + 7 + #include <linux/dcbnl.h> 8 + 9 + #define IIDC_MAX_USER_PRIORITY 8 10 + #define IIDC_DSCP_PFC_MODE 0x1 11 + 12 + /** 13 + * struct iidc_rdma_qset_params - Struct to hold per RDMA Qset info 14 + * @teid: TEID of the Qset node 15 + * @qs_handle: SW index of the Qset, RDMA provides this 16 + * @vport_id: VSI index 17 + * @tc: Traffic Class branch the QSet should belong to 18 + */ 19 + struct iidc_rdma_qset_params { 20 + /* Qset TEID returned to the RDMA driver in 21 + * ice_add_rdma_qset and used by RDMA driver 22 + * for calls to ice_del_rdma_qset 23 + */ 24 + u32 teid; 25 + u16 qs_handle; 26 + u16 vport_id; 27 + u8 tc; 28 + }; 29 + 30 + struct iidc_rdma_qos_info { 31 + u64 tc_ctx; 32 + u8 rel_bw; 33 + u8 prio_type; 34 + u8 egress_virt_up; 35 + u8 ingress_virt_up; 36 + }; 37 + 38 + /* Struct to pass QoS info */ 39 + struct iidc_rdma_qos_params { 40 + struct iidc_rdma_qos_info tc_info[IEEE_8021QAZ_MAX_TCS]; 41 + u8 up2tc[IIDC_MAX_USER_PRIORITY]; 42 + u8 vport_relative_bw; 43 + u8 vport_priority_type; 44 + u8 num_tc; 45 + u8 pfc_mode; 46 + u8 dscp_map[DSCP_MAX]; 47 + }; 48 + 49 + struct iidc_rdma_priv_dev_info { 50 + u8 pf_id; 51 + u16 vport_id; 52 + struct net_device *netdev; 53 + struct iidc_rdma_qos_params qos_info; 54 + u8 __iomem *hw_addr; 55 + }; 56 + 57 + int ice_add_rdma_qset(struct iidc_rdma_core_dev_info *cdev, 58 + struct iidc_rdma_qset_params *qset); 59 + int ice_del_rdma_qset(struct iidc_rdma_core_dev_info *cdev, 60 + struct iidc_rdma_qset_params *qset); 61 + int ice_rdma_request_reset(struct iidc_rdma_core_dev_info *cdev, 62 + enum iidc_rdma_reset_type reset_type); 63 + int ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info *cdev, u16 vsi_id, 64 + bool enable); 65 + int ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info *cdev, 66 + struct msix_entry *entry); 67 + void ice_free_rdma_qvector(struct iidc_rdma_core_dev_info *cdev, 68 + struct msix_entry *entry); 69 + 70 + #endif /* _IIDC_RDMA_ICE_H_*/