Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
virtchnl: update and refactor

Jesse Brandeburg says:

The virtchnl.h file is used by i40e/ice physical function (PF) drivers
and irdma when talking to the iavf driver. This series cleans up the
header file by removing unused elements, adding/cleaning some comments,
fixing the data structures so they are explicitly defined, including
padding, and finally does a long overdue rename of the IWARP members in
the structures to RDMA, since the ice driver and it's associated Intel
Ethernet E800 series adapters support both RDMA and IWARP.

The whole series should result in no functional change, but hopefully
clearer code.

* '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
virtchnl: i40e/iavf: rename iwarp to rdma
virtchnl: do structure hardening
virtchnl: update header and increase header clarity
virtchnl: remove unused structure declaration
====================

Link: https://lore.kernel.org/r/20230125212441.4030014-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+150 -134
+1 -1
drivers/net/ethernet/intel/i40e/i40e_client.c
··· 543 543 struct i40e_hw *hw = &pf->hw; 544 544 i40e_status err; 545 545 546 - err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP, 546 + err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_RDMA, 547 547 0, msg, len, NULL); 548 548 if (err) 549 549 dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
+32 -31
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
··· 441 441 } 442 442 443 443 /** 444 - * i40e_release_iwarp_qvlist 444 + * i40e_release_rdma_qvlist 445 445 * @vf: pointer to the VF. 446 446 * 447 447 **/ 448 - static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) 448 + static void i40e_release_rdma_qvlist(struct i40e_vf *vf) 449 449 { 450 450 struct i40e_pf *pf = vf->pf; 451 - struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; 451 + struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info; 452 452 u32 msix_vf; 453 453 u32 i; 454 454 ··· 457 457 458 458 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 459 459 for (i = 0; i < qvlist_info->num_vectors; i++) { 460 - struct virtchnl_iwarp_qv_info *qv_info; 460 + struct virtchnl_rdma_qv_info *qv_info; 461 461 u32 next_q_index, next_q_type; 462 462 struct i40e_hw *hw = &pf->hw; 463 463 u32 v_idx, reg_idx, reg; ··· 491 491 } 492 492 493 493 /** 494 - * i40e_config_iwarp_qvlist 494 + * i40e_config_rdma_qvlist 495 495 * @vf: pointer to the VF info 496 496 * @qvlist_info: queue and vector list 497 497 * 498 498 * Return 0 on success or < 0 on error 499 499 **/ 500 - static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, 501 - struct virtchnl_iwarp_qvlist_info *qvlist_info) 500 + static int 501 + i40e_config_rdma_qvlist(struct i40e_vf *vf, 502 + struct virtchnl_rdma_qvlist_info *qvlist_info) 502 503 { 503 504 struct i40e_pf *pf = vf->pf; 504 505 struct i40e_hw *hw = &pf->hw; 505 - struct virtchnl_iwarp_qv_info *qv_info; 506 + struct virtchnl_rdma_qv_info *qv_info; 506 507 u32 v_idx, i, reg_idx, reg; 507 508 u32 next_q_idx, next_q_type; 508 509 u32 msix_vf; ··· 2124 2123 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 2125 2124 2126 2125 if (i40e_vf_client_capable(pf, vf->vf_id) && 2127 - (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { 2128 - vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; 2129 - set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 2126 + (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) { 2127 + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA; 2128 + set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states); 2130 2129 } else { 2131 - clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 2130 + clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states); 2132 2131 } 2133 2132 2134 2133 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { ··· 3188 3187 } 3189 3188 3190 3189 /** 3191 - * i40e_vc_iwarp_msg 3190 + * i40e_vc_rdma_msg 3192 3191 * @vf: pointer to the VF info 3193 3192 * @msg: pointer to the msg buffer 3194 3193 * @msglen: msg length 3195 3194 * 3196 3195 * called from the VF for the iwarp msgs 3197 3196 **/ 3198 - static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 3197 + static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 3199 3198 { 3200 3199 struct i40e_pf *pf = vf->pf; 3201 3200 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 3202 3201 i40e_status aq_ret = 0; 3203 3202 3204 3203 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3205 - !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 3204 + !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { 3206 3205 aq_ret = I40E_ERR_PARAM; 3207 3206 goto error_param; 3208 3207 } ··· 3212 3211 3213 3212 error_param: 3214 3213 /* send the response to the VF */ 3215 - return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, 3214 + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA, 3216 3215 aq_ret); 3217 3216 } 3218 3217 3219 3218 /** 3220 - * i40e_vc_iwarp_qvmap_msg 3219 + * i40e_vc_rdma_qvmap_msg 3221 3220 * @vf: pointer to the VF info 3222 3221 * @msg: pointer to the msg buffer 3223 3222 * @config: config qvmap or release it 3224 3223 * 3225 3224 * called from the VF for the iwarp msgs 3226 3225 **/ 3227 - static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) 3226 + static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) 3228 3227 { 3229 - struct virtchnl_iwarp_qvlist_info *qvlist_info = 3230 - (struct virtchnl_iwarp_qvlist_info *)msg; 3228 + struct virtchnl_rdma_qvlist_info *qvlist_info = 3229 + (struct virtchnl_rdma_qvlist_info *)msg; 3231 3230 i40e_status aq_ret = 0; 3232 3231 3233 3232 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3234 - !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 3233 + !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { 3235 3234 aq_ret = I40E_ERR_PARAM; 3236 3235 goto error_param; 3237 3236 } 3238 3237 3239 3238 if (config) { 3240 - if (i40e_config_iwarp_qvlist(vf, qvlist_info)) 3239 + if (i40e_config_rdma_qvlist(vf, qvlist_info)) 3241 3240 aq_ret = I40E_ERR_PARAM; 3242 3241 } else { 3243 - i40e_release_iwarp_qvlist(vf); 3242 + i40e_release_rdma_qvlist(vf); 3244 3243 } 3245 3244 3246 3245 error_param: 3247 3246 /* send the response to the VF */ 3248 3247 return i40e_vc_send_resp_to_vf(vf, 3249 - config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : 3250 - VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 3248 + config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP : 3249 + VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP, 3251 3250 aq_ret); 3252 3251 } 3253 3252 ··· 4113 4112 case VIRTCHNL_OP_GET_STATS: 4114 4113 ret = i40e_vc_get_stats_msg(vf, msg); 4115 4114 break; 4116 - case VIRTCHNL_OP_IWARP: 4117 - ret = i40e_vc_iwarp_msg(vf, msg, msglen); 4115 + case VIRTCHNL_OP_RDMA: 4116 + ret = i40e_vc_rdma_msg(vf, msg, msglen); 4118 4117 break; 4119 - case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 4120 - ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); 4118 + case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: 4119 + ret = i40e_vc_rdma_qvmap_msg(vf, msg, true); 4121 4120 break; 4122 - case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 4123 - ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); 4121 + case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP: 4122 + ret = i40e_vc_rdma_qvmap_msg(vf, msg, false); 4124 4123 break; 4125 4124 case VIRTCHNL_OP_CONFIG_RSS_KEY: 4126 4125 ret = i40e_vc_config_rss_key(vf, msg);
+3 -3
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
··· 34 34 enum i40e_vf_states { 35 35 I40E_VF_STATE_INIT = 0, 36 36 I40E_VF_STATE_ACTIVE, 37 - I40E_VF_STATE_IWARPENA, 37 + I40E_VF_STATE_RDMAENA, 38 38 I40E_VF_STATE_DISABLED, 39 39 I40E_VF_STATE_MC_PROMISC, 40 40 I40E_VF_STATE_UC_PROMISC, ··· 46 46 enum i40e_vf_capabilities { 47 47 I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0, 48 48 I40E_VIRTCHNL_VF_CAP_L2, 49 - I40E_VIRTCHNL_VF_CAP_IWARP, 49 + I40E_VIRTCHNL_VF_CAP_RDMA, 50 50 }; 51 51 52 52 /* In ADq, max 4 VSI's can be allocated per VF including primary VF VSI. ··· 108 108 u16 num_cloud_filters; 109 109 110 110 /* RDMA Client */ 111 - struct virtchnl_iwarp_qvlist_info *qvlist_info; 111 + struct virtchnl_rdma_qvlist_info *qvlist_info; 112 112 }; 113 113 114 114 void i40e_free_vfs(struct i40e_pf *pf);
+3 -3
drivers/net/ethernet/intel/iavf/iavf.h
··· 275 275 u64 hw_csum_rx_error; 276 276 u32 rx_desc_count; 277 277 int num_msix_vectors; 278 - int num_iwarp_msix; 279 - int iwarp_base_vector; 278 + int num_rdma_msix; 279 + int rdma_base_vector; 280 280 u32 client_pending; 281 281 struct iavf_client_instance *cinst; 282 282 struct msix_entry *msix_entries; ··· 383 383 enum virtchnl_ops current_op; 384 384 #define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ 385 385 (_a)->vf_res->vf_cap_flags & \ 386 - VIRTCHNL_VF_OFFLOAD_IWARP : \ 386 + VIRTCHNL_VF_OFFLOAD_RDMA : \ 387 387 0) 388 388 #define CLIENT_ENABLED(_a) ((_a)->cinst) 389 389 /* RSS by the PF should be preferred over RSS via other methods. */
+16 -16
drivers/net/ethernet/intel/iavf/iavf_client.c
··· 127 127 } 128 128 129 129 /** 130 - * iavf_client_release_qvlist - send a message to the PF to release iwarp qv map 130 + * iavf_client_release_qvlist - send a message to the PF to release rdma qv map 131 131 * @ldev: pointer to L2 context. 132 132 * 133 133 * Return 0 on success or < 0 on error ··· 141 141 return -EAGAIN; 142 142 143 143 err = iavf_aq_send_msg_to_pf(&adapter->hw, 144 - VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 144 + VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP, 145 145 IAVF_SUCCESS, NULL, 0, NULL); 146 146 147 147 if (err) 148 148 dev_err(&adapter->pdev->dev, 149 - "Unable to send iWarp vector release message to PF, error %d, aq status %d\n", 149 + "Unable to send RDMA vector release message to PF, error %d, aq status %d\n", 150 150 err, adapter->hw.aq.asq_last_status); 151 151 152 152 return err; ··· 215 215 cinst->lan_info.params = params; 216 216 set_bit(__IAVF_CLIENT_INSTANCE_NONE, &cinst->state); 217 217 218 - cinst->lan_info.msix_count = adapter->num_iwarp_msix; 218 + cinst->lan_info.msix_count = adapter->num_rdma_msix; 219 219 cinst->lan_info.msix_entries = 220 - &adapter->msix_entries[adapter->iwarp_base_vector]; 220 + &adapter->msix_entries[adapter->rdma_base_vector]; 221 221 222 222 mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list, 223 223 struct netdev_hw_addr, list); ··· 425 425 if (adapter->aq_required) 426 426 return -EAGAIN; 427 427 428 - err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP, 428 + err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_RDMA, 429 429 IAVF_SUCCESS, msg, len, NULL); 430 430 if (err) 431 - dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n", 431 + dev_err(&adapter->pdev->dev, "Unable to send RDMA message to PF, error %d, aq status %d\n", 432 432 err, adapter->hw.aq.asq_last_status); 433 433 434 434 return err; 435 435 } 436 436 437 437 /** 438 - * iavf_client_setup_qvlist - send a message to the PF to setup iwarp qv map 438 + * iavf_client_setup_qvlist - send a message to the PF to setup rdma qv map 439 439 * @ldev: pointer to L2 context. 440 440 * @client: Client pointer. 441 441 * @qvlist_info: queue and vector list ··· 446 446 struct iavf_client *client, 447 447 struct iavf_qvlist_info *qvlist_info) 448 448 { 449 - struct virtchnl_iwarp_qvlist_info *v_qvlist_info; 449 + struct virtchnl_rdma_qvlist_info *v_qvlist_info; 450 450 struct iavf_adapter *adapter = ldev->vf; 451 451 struct iavf_qv_info *qv_info; 452 452 enum iavf_status err; ··· 463 463 continue; 464 464 v_idx = qv_info->v_idx; 465 465 if ((v_idx >= 466 - (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) || 467 - (v_idx < adapter->iwarp_base_vector)) 466 + (adapter->rdma_base_vector + adapter->num_rdma_msix)) || 467 + (v_idx < adapter->rdma_base_vector)) 468 468 return -EINVAL; 469 469 } 470 470 471 - v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info; 471 + v_qvlist_info = (struct virtchnl_rdma_qvlist_info *)qvlist_info; 472 472 msg_size = struct_size(v_qvlist_info, qv_info, 473 473 v_qvlist_info->num_vectors - 1); 474 474 475 - adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP); 475 + adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP); 476 476 err = iavf_aq_send_msg_to_pf(&adapter->hw, 477 - VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, IAVF_SUCCESS, 477 + VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP, IAVF_SUCCESS, 478 478 (u8 *)v_qvlist_info, msg_size, NULL); 479 479 480 480 if (err) { 481 481 dev_err(&adapter->pdev->dev, 482 - "Unable to send iWarp vector config message to PF, error %d, aq status %d\n", 482 + "Unable to send RDMA vector config message to PF, error %d, aq status %d\n", 483 483 err, adapter->hw.aq.asq_last_status); 484 484 goto out; 485 485 } ··· 488 488 for (i = 0; i < 5; i++) { 489 489 msleep(100); 490 490 if (!(adapter->client_pending & 491 - BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) { 491 + BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP))) { 492 492 err = 0; 493 493 break; 494 494 }
+1 -1
drivers/net/ethernet/intel/iavf/iavf_client.h
··· 159 159 #define IAVF_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) 160 160 #define IAVF_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) 161 161 u8 type; 162 - #define IAVF_CLIENT_IWARP 0 162 + #define IAVF_CLIENT_RDMA 0 163 163 struct iavf_client_ops *ops; /* client ops provided by the client */ 164 164 }; 165 165
+2 -2
drivers/net/ethernet/intel/iavf/iavf_common.c
··· 223 223 return "IAVF_ERR_ADMIN_QUEUE_FULL"; 224 224 case IAVF_ERR_ADMIN_QUEUE_NO_WORK: 225 225 return "IAVF_ERR_ADMIN_QUEUE_NO_WORK"; 226 - case IAVF_ERR_BAD_IWARP_CQE: 227 - return "IAVF_ERR_BAD_IWARP_CQE"; 226 + case IAVF_ERR_BAD_RDMA_CQE: 227 + return "IAVF_ERR_BAD_RDMA_CQE"; 228 228 case IAVF_ERR_NVM_BLANK_MODE: 229 229 return "IAVF_ERR_NVM_BLANK_MODE"; 230 230 case IAVF_ERR_NOT_IMPLEMENTED:
+1 -1
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 106 106 case IAVF_ERR_SRQ_ENABLED: 107 107 case IAVF_ERR_ADMIN_QUEUE_ERROR: 108 108 case IAVF_ERR_ADMIN_QUEUE_FULL: 109 - case IAVF_ERR_BAD_IWARP_CQE: 109 + case IAVF_ERR_BAD_RDMA_CQE: 110 110 case IAVF_ERR_NVM_BLANK_MODE: 111 111 case IAVF_ERR_PE_DOORBELL_NOT_ENABLED: 112 112 case IAVF_ERR_DIAG_TEST_FAILED:
+1 -1
drivers/net/ethernet/intel/iavf/iavf_status.h
··· 64 64 IAVF_ERR_BUF_TOO_SHORT = -55, 65 65 IAVF_ERR_ADMIN_QUEUE_FULL = -56, 66 66 IAVF_ERR_ADMIN_QUEUE_NO_WORK = -57, 67 - IAVF_ERR_BAD_IWARP_CQE = -58, 67 + IAVF_ERR_BAD_RDMA_CQE = -58, 68 68 IAVF_ERR_NVM_BLANK_MODE = -59, 69 69 IAVF_ERR_NOT_IMPLEMENTED = -60, 70 70 IAVF_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+3 -3
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
··· 2290 2290 if (v_opcode != adapter->current_op) 2291 2291 return; 2292 2292 break; 2293 - case VIRTCHNL_OP_IWARP: 2293 + case VIRTCHNL_OP_RDMA: 2294 2294 /* Gobble zero-length replies from the PF. They indicate that 2295 2295 * a previous message was received OK, and the client doesn't 2296 2296 * care about that. ··· 2299 2299 iavf_notify_client_message(&adapter->vsi, msg, msglen); 2300 2300 break; 2301 2301 2302 - case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 2302 + case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: 2303 2303 adapter->client_pending &= 2304 - ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); 2304 + ~(BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP)); 2305 2305 break; 2306 2306 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { 2307 2307 struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
+87 -72
include/linux/avf/virtchnl.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /******************************************************************************* 3 - * 4 - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 5 - * Copyright(c) 2013 - 2014 Intel Corporation. 6 - * 7 - * Contact Information: 8 - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 9 - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 10 - * 11 - ******************************************************************************/ 2 + /* Copyright (c) 2013-2022, Intel Corporation. */ 12 3 13 4 #ifndef _VIRTCHNL_H_ 14 5 #define _VIRTCHNL_H_ 15 6 16 7 /* Description: 17 - * This header file describes the VF-PF communication protocol used 18 - * by the drivers for all devices starting from our 40G product line 8 + * This header file describes the Virtual Function (VF) - Physical Function 9 + * (PF) communication protocol used by the drivers for all devices starting 10 + * from our 40G product line 19 11 * 20 12 * Admin queue buffer usage: 21 13 * desc->opcode is always aqc_opc_send_msg_to_pf ··· 21 29 * have a maximum of sixteen queues for all of its VSIs. 22 30 * 23 31 * The PF is required to return a status code in v_retval for all messages 24 - * except RESET_VF, which does not require any response. The return value 25 - * is of status_code type, defined in the shared type.h. 32 + * except RESET_VF, which does not require any response. The returned value 33 + * is of virtchnl_status_code type, defined here. 26 34 * 27 35 * In general, VF driver initialization should roughly follow the order of 28 36 * these opcodes. The VF driver must first validate the API version of the ··· 114 122 VIRTCHNL_OP_GET_STATS = 15, 115 123 VIRTCHNL_OP_RSVD = 16, 116 124 VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ 125 + /* opcode 19 is reserved */ 117 126 VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ 127 + VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP, 118 128 VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */ 129 + VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, 119 130 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */ 131 + VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 120 132 VIRTCHNL_OP_CONFIG_RSS_KEY = 23, 121 133 VIRTCHNL_OP_CONFIG_RSS_LUT = 24, 122 134 VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, ··· 157 161 { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } 158 162 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ 159 163 { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } 160 - 161 - /* Virtual channel message descriptor. This overlays the admin queue 162 - * descriptor. All other data is passed in external buffers. 163 - */ 164 - 165 - struct virtchnl_msg { 166 - u8 pad[8]; /* AQ flags/opcode/len/retval fields */ 167 - enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ 168 - enum virtchnl_status_code v_retval; /* ditto for desc->retval */ 169 - u32 vfid; /* used by PF when sending to VF */ 170 - }; 171 - 172 - VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); 173 164 174 165 /* Message descriptions and data structures. */ 175 166 ··· 218 235 struct virtchnl_vsi_resource { 219 236 u16 vsi_id; 220 237 u16 num_queue_pairs; 221 - enum virtchnl_vsi_type vsi_type; 238 + 239 + /* see enum virtchnl_vsi_type */ 240 + s32 vsi_type; 222 241 u16 qset_handle; 223 242 u8 default_mac_addr[ETH_ALEN]; 224 243 }; ··· 232 247 * TX/RX Checksum offloading and TSO for non-tunnelled packets. 233 248 */ 234 249 #define VIRTCHNL_VF_OFFLOAD_L2 BIT(0) 235 - #define VIRTCHNL_VF_OFFLOAD_IWARP BIT(1) 250 + #define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1) 251 + #define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA 236 252 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3) 237 253 #define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4) 238 254 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5) ··· 310 324 u8 rxdid; 311 325 u8 pad1[2]; 312 326 u64 dma_ring_addr; 313 - enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ 327 + 328 + /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */ 329 + s32 rx_split_pos; 314 330 u32 pad2; 315 331 }; 316 332 ··· 324 336 * PF configures queues and returns status. 325 337 * If the number of queues specified is greater than the number of queues 326 338 * associated with the VSI, an error is returned and no queues are configured. 339 + * NOTE: The VF is not required to configure all queues in a single request. 340 + * It may send multiple messages. PF drivers must correctly handle all VF 341 + * requests. 327 342 */ 328 343 struct virtchnl_queue_pair_info { 329 344 /* NOTE: vsi_id and queue_id should be identical for both queues. */ ··· 364 373 * VF uses this message to map vectors to queues. 365 374 * The rxq_map and txq_map fields are bitmaps used to indicate which queues 366 375 * are to be associated with the specified vector. 367 - * The "other" causes are always mapped to vector 0. 376 + * The "other" causes are always mapped to vector 0. The VF may not request 377 + * that vector 0 be used for traffic. 368 378 * PF configures interrupt mapping and returns status. 379 + * NOTE: due to hardware requirements, all active queues (both TX and RX) 380 + * should be mapped to interrupts, even if the driver intends to operate 381 + * only in polling mode. In this case the interrupt may be disabled, but 382 + * the ITR timer will still run to trigger writebacks. 369 383 */ 370 384 struct virtchnl_vector_map { 371 385 u16 vsi_id; ··· 397 401 * (Currently, we only support 16 queues per VF, but we make the field 398 402 * u32 to allow for expansion.) 399 403 * PF performs requested action and returns status. 404 + * NOTE: The VF is not required to enable/disable all queues in a single 405 + * request. It may send multiple messages. 406 + * PF drivers must correctly handle all VF requests. 400 407 */ 401 408 struct virtchnl_queue_select { 402 409 u16 vsi_id; ··· 964 965 struct virtchnl_filter { 965 966 union virtchnl_flow_spec data; 966 967 union virtchnl_flow_spec mask; 967 - enum virtchnl_flow_type flow_type; 968 - enum virtchnl_action action; 968 + 969 + /* see enum virtchnl_flow_type */ 970 + s32 flow_type; 971 + 972 + /* see enum virtchnl_action */ 973 + s32 action; 969 974 u32 action_meta; 970 975 u8 field_flags; 971 976 u8 pad[3]; ··· 997 994 #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 998 995 999 996 struct virtchnl_pf_event { 1000 - enum virtchnl_event_codes event; 997 + /* see enum virtchnl_event_codes */ 998 + s32 event; 1001 999 union { 1002 1000 /* If the PF driver does not support the new speed reporting 1003 1001 * capabilities then use link_event else use link_event_adv to ··· 1011 1007 struct { 1012 1008 enum virtchnl_link_speed link_speed; 1013 1009 bool link_status; 1010 + u8 pad[3]; 1014 1011 } link_event; 1015 1012 struct { 1016 1013 /* link_speed provided in Mbps */ ··· 1021 1016 } link_event_adv; 1022 1017 } event_data; 1023 1018 1024 - int severity; 1019 + s32 severity; 1025 1020 }; 1026 1021 1027 1022 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); 1028 1023 1029 - /* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP 1030 - * VF uses this message to request PF to map IWARP vectors to IWARP queues. 1031 - * The request for this originates from the VF IWARP driver through 1032 - * a client interface between VF LAN and VF IWARP driver. 1024 + /* used to specify if a ceq_idx or aeq_idx is invalid */ 1025 + #define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF 1026 + /* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP 1027 + * VF uses this message to request PF to map RDMA vectors to RDMA queues. 1028 + * The request for this originates from the VF RDMA driver through 1029 + * a client interface between VF LAN and VF RDMA driver. 1033 1030 * A vector could have an AEQ and CEQ attached to it although 1034 - * there is a single AEQ per VF IWARP instance in which case 1035 - * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. 1036 - * There will never be a case where there will be multiple CEQs attached 1037 - * to a single vector. 1031 + * there is a single AEQ per VF RDMA instance in which case 1032 + * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid 1033 + * idx for ceqs There will never be a case where there will be multiple CEQs 1034 + * attached to a single vector. 1038 1035 * PF configures interrupt mapping and returns status. 1039 1036 */ 1040 1037 1041 - struct virtchnl_iwarp_qv_info { 1038 + struct virtchnl_rdma_qv_info { 1042 1039 u32 v_idx; /* msix_vector */ 1043 - u16 ceq_idx; 1044 - u16 aeq_idx; 1040 + u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */ 1041 + u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */ 1045 1042 u8 itr_idx; 1046 1043 u8 pad[3]; 1047 1044 }; 1048 1045 1049 - VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info); 1046 + VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info); 1050 1047 1051 - struct virtchnl_iwarp_qvlist_info { 1048 + struct virtchnl_rdma_qvlist_info { 1052 1049 u32 num_vectors; 1053 - struct virtchnl_iwarp_qv_info qv_info[1]; 1050 + struct virtchnl_rdma_qv_info qv_info[1]; 1054 1051 }; 1055 1052 1056 - VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info); 1053 + VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_rdma_qvlist_info); 1057 1054 1058 1055 /* VF reset states - these are written into the RSTAT register: 1059 1056 * VFGEN_RSTAT on the VF ··· 1114 1107 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \ 1115 1108 (((hdr)->type) >> PROTO_HDR_SHIFT) 1116 1109 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \ 1117 - ((hdr)->type == ((val) >> PROTO_HDR_SHIFT)) 1110 + ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT))) 1118 1111 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \ 1119 1112 (VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \ 1120 1113 VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val))) ··· 1210 1203 }; 1211 1204 1212 1205 struct virtchnl_proto_hdr { 1213 - enum virtchnl_proto_hdr_type type; 1206 + /* see enum virtchnl_proto_hdr_type */ 1207 + s32 type; 1214 1208 u32 field_selector; /* a bit mask to select field for header type */ 1215 1209 u8 buffer[64]; 1216 1210 /** ··· 1241 1233 1242 1234 struct virtchnl_rss_cfg { 1243 1235 struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */ 1244 - enum virtchnl_rss_algorithm rss_algorithm; /* RSS algorithm type */ 1245 - u8 reserved[128]; /* reserve for future */ 1236 + 1237 + /* see enum virtchnl_rss_algorithm; rss algorithm type */ 1238 + s32 rss_algorithm; 1239 + u8 reserved[128]; /* reserve for future */ 1246 1240 }; 1247 1241 1248 1242 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg); 1249 1243 1250 1244 /* action configuration for FDIR */ 1251 1245 struct virtchnl_filter_action { 1252 - enum virtchnl_action type; 1246 + /* see enum virtchnl_action type */ 1247 + s32 type; 1253 1248 union { 1254 1249 /* used for queue and qgroup action */ 1255 1250 struct { ··· 1294 1283 /* Status returned to VF after VF requests FDIR commands 1295 1284 * VIRTCHNL_FDIR_SUCCESS 1296 1285 * VF FDIR related request is successfully done by PF 1297 - * The request can be OP_ADD/DEL. 1286 + * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER. 1298 1287 * 1299 1288 * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE 1300 1289 * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource. ··· 1315 1304 * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT 1316 1305 * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out 1317 1306 * for programming. 1307 + * 1308 + * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID 1309 + * OP_QUERY_FDIR_FILTER request is failed due to parameters validation, 1310 + * for example, VF query counter of a rule who has no counter action. 1318 1311 */ 1319 1312 enum virtchnl_fdir_prgm_status { 1320 1313 VIRTCHNL_FDIR_SUCCESS = 0, ··· 1328 1313 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST, 1329 1314 VIRTCHNL_FDIR_FAILURE_RULE_INVALID, 1330 1315 VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT, 1316 + VIRTCHNL_FDIR_FAILURE_QUERY_INVALID, 1331 1317 }; 1332 1318 1333 1319 /* VIRTCHNL_OP_ADD_FDIR_FILTER ··· 1345 1329 u16 validate_only; /* INPUT */ 1346 1330 u32 flow_id; /* OUTPUT */ 1347 1331 struct virtchnl_fdir_rule rule_cfg; /* INPUT */ 1348 - enum virtchnl_fdir_prgm_status status; /* OUTPUT */ 1332 + 1333 + /* see enum virtchnl_fdir_prgm_status; OUTPUT */ 1334 + s32 status; 1349 1335 }; 1350 1336 1351 1337 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add); ··· 1360 1342 u16 vsi_id; /* INPUT */ 1361 1343 u16 pad; 1362 1344 u32 flow_id; /* INPUT */ 1363 - enum virtchnl_fdir_prgm_status status; /* OUTPUT */ 1345 + 1346 + /* see enum virtchnl_fdir_prgm_status; OUTPUT */ 1347 + s32 status; 1364 1348 }; 1365 1349 1366 1350 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); ··· 1381 1361 u8 *msg, u16 msglen) 1382 1362 { 1383 1363 bool err_msg_format = false; 1384 - int valid_len = 0; 1364 + u32 valid_len = 0; 1385 1365 1386 1366 /* Validate message length. */ 1387 1367 switch (v_opcode) { ··· 1456 1436 case VIRTCHNL_OP_GET_STATS: 1457 1437 valid_len = sizeof(struct virtchnl_queue_select); 1458 1438 break; 1459 - case VIRTCHNL_OP_IWARP: 1439 + case VIRTCHNL_OP_RDMA: 1460 1440 /* These messages are opaque to us and will be validated in 1461 1441 * the RDMA client code. We just need to check for nonzero 1462 1442 * length. The firmware will enforce max length restrictions. ··· 1466 1446 else 1467 1447 err_msg_format = true; 1468 1448 break; 1469 - case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 1449 + case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP: 1470 1450 break; 1471 - case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 1472 - valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); 1451 + case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: 1452 + valid_len = sizeof(struct virtchnl_rdma_qvlist_info); 1473 1453 if (msglen >= valid_len) { 1474 - struct virtchnl_iwarp_qvlist_info *qv = 1475 - (struct virtchnl_iwarp_qvlist_info *)msg; 1476 - if (qv->num_vectors == 0) { 1477 - err_msg_format = true; 1478 - break; 1479 - } 1454 + struct virtchnl_rdma_qvlist_info *qv = 1455 + (struct virtchnl_rdma_qvlist_info *)msg; 1456 + 1480 1457 valid_len += ((qv->num_vectors - 1) * 1481 - sizeof(struct virtchnl_iwarp_qv_info)); 1458 + sizeof(struct virtchnl_rdma_qv_info)); 1482 1459 } 1483 1460 break; 1484 1461 case VIRTCHNL_OP_CONFIG_RSS_KEY: ··· 1519 1502 case VIRTCHNL_OP_DISABLE_CHANNELS: 1520 1503 break; 1521 1504 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 1522 - valid_len = sizeof(struct virtchnl_filter); 1523 - break; 1524 1505 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 1525 1506 valid_len = sizeof(struct virtchnl_filter); 1526 1507 break;