Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'hinic-mailbox-channel-enhancement'

Luo bin says:

====================
hinic: mailbox channel enhancement

add support to generate mailbox random id for VF to ensure that
the mailbox message from VF is valid and PF should check whether
the cmd from VF is supported before passing it to hw.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+422 -2
+8
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
··· 31 31 (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \ 32 32 << HINIC_CMDQ_CTXT_##member##_SHIFT) 33 33 34 + #define HINIC_CMDQ_CTXT_PAGE_INFO_GET(val, member) \ 35 + (((u64)(val) >> HINIC_CMDQ_CTXT_##member##_SHIFT) \ 36 + & HINIC_CMDQ_CTXT_##member##_MASK) 37 + 34 38 #define HINIC_CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \ 35 39 ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \ 36 40 << HINIC_CMDQ_CTXT_##member##_SHIFT))) ··· 48 44 #define HINIC_CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ 49 45 (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \ 50 46 << HINIC_CMDQ_CTXT_##member##_SHIFT) 47 + 48 + #define HINIC_CMDQ_CTXT_BLOCK_INFO_GET(val, member) \ 49 + (((u64)(val) >> HINIC_CMDQ_CTXT_##member##_SHIFT) \ 50 + & HINIC_CMDQ_CTXT_##member##_MASK) 51 51 52 52 #define HINIC_CMDQ_CTXT_BLOCK_INFO_CLEAR(val, member) \ 53 53 ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \
+13
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
··· 28 28 #define HINIC_MGMT_STATUS_EXIST 0x6 29 29 #define HINIC_MGMT_CMD_UNSUPPORTED 0xFF 30 30 31 + #define HINIC_CMD_VER_FUNC_ID 2 32 + 31 33 struct hinic_cap { 32 34 u16 max_qps; 33 35 u16 num_qps; ··· 313 311 u8 lli_credit_cnt; 314 312 u8 resend_timer_cnt; 315 313 u8 rsvd1[3]; 314 + }; 315 + 316 + struct hinic_set_random_id { 317 + u8 status; 318 + u8 version; 319 + u8 rsvd0[6]; 320 + 321 + u8 vf_in_pf; 322 + u8 rsvd1; 323 + u16 func_idx; 324 + u32 random_id; 316 325 }; 317 326 318 327 struct hinic_board_info {
+309 -1
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
··· 153 153 (MBOX_MSG_ID(func_to_func_mbox) + 1) & MBOX_MSG_ID_MASK) 154 154 155 155 #define FUNC_ID_OFF_SET_8B 8 156 - #define FUNC_ID_OFF_SET_10B 10 157 156 158 157 /* max message counter wait to process for one function */ 159 158 #define HINIC_MAX_MSG_CNT_TO_PROCESS 10 ··· 187 188 NOT_TRIGGER, 188 189 TRIGGER, 189 190 }; 191 + 192 + static bool check_func_id(struct hinic_hwdev *hwdev, u16 src_func_idx, 193 + const void *buf_in, u16 in_size, u16 offset) 194 + { 195 + u16 func_idx; 196 + 197 + if (in_size < offset + sizeof(func_idx)) { 198 + dev_warn(&hwdev->hwif->pdev->dev, 199 + "Receive mailbox msg len: %d less than %d Bytes is invalid\n", 200 + in_size, offset); 201 + return false; 202 + } 203 + 204 + func_idx = *((u16 *)((u8 *)buf_in + offset)); 205 + 206 + if (src_func_idx != func_idx) { 207 + dev_warn(&hwdev->hwif->pdev->dev, 208 + "Receive mailbox function id: 0x%x not equal to msg function id: 0x%x\n", 209 + src_func_idx, func_idx); 210 + return false; 211 + } 212 + 213 + return true; 214 + } 215 + 216 + bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx, 217 + void *buf_in, u16 in_size) 218 + { 219 + return check_func_id(hwdev, func_idx, buf_in, in_size, 220 + FUNC_ID_OFF_SET_8B); 221 + } 190 222 191 223 /** 192 224 * hinic_register_pf_mbox_cb - register mbox callback for pf ··· 516 486 kfree(rcv_mbox_temp); 517 487 } 518 488 489 + static int set_vf_mbox_random_id(struct hinic_hwdev *hwdev, u16 func_id) 490 + { 491 + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; 492 + struct hinic_set_random_id rand_info = {0}; 493 + u16 out_size = sizeof(rand_info); 494 + struct hinic_pfhwdev *pfhwdev; 495 + int ret; 496 + 497 + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); 498 + 499 + rand_info.version = HINIC_CMD_VER_FUNC_ID; 500 + rand_info.func_idx = func_id; 501 + rand_info.vf_in_pf = func_id - hinic_glb_pf_vf_offset(hwdev->hwif); 502 + rand_info.random_id = get_random_u32(); 503 + 504 + func_to_func->vf_mbx_rand_id[func_id] = rand_info.random_id; 505 + 506 + ret = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, 507 + HINIC_MGMT_CMD_SET_VF_RANDOM_ID, 508 + &rand_info, sizeof(rand_info), 509 + &rand_info, &out_size, HINIC_MGMT_MSG_SYNC); 510 + if ((rand_info.status != HINIC_MGMT_CMD_UNSUPPORTED && 511 + rand_info.status) || !out_size || ret) { 512 + dev_err(&hwdev->hwif->pdev->dev, "Set VF random id failed, err: %d, status: 0x%x, out size: 0x%x\n", 513 + ret, rand_info.status, out_size); 514 + return -EIO; 515 + } 516 + 517 + if (rand_info.status == HINIC_MGMT_CMD_UNSUPPORTED) 518 + return rand_info.status; 519 + 520 + func_to_func->vf_mbx_old_rand_id[func_id] = 521 + func_to_func->vf_mbx_rand_id[func_id]; 522 + 523 + return 0; 524 + } 525 + 526 + static void update_random_id_work_handler(struct work_struct *work) 527 + { 528 + struct hinic_mbox_work *mbox_work = 529 + container_of(work, struct hinic_mbox_work, work); 530 + struct hinic_mbox_func_to_func *func_to_func; 531 + u16 src = mbox_work->src_func_idx; 532 + 533 + func_to_func = mbox_work->func_to_func; 534 + 535 + if (set_vf_mbox_random_id(func_to_func->hwdev, src)) 536 + dev_warn(&func_to_func->hwdev->hwif->pdev->dev, "Update VF id: 0x%x random id failed\n", 537 + mbox_work->src_func_idx); 538 + 539 + kfree(mbox_work); 540 + } 541 + 542 + static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func, 543 + u8 *header) 544 + { 545 + struct hinic_hwdev *hwdev = func_to_func->hwdev; 546 + struct hinic_mbox_work *mbox_work = NULL; 547 + u64 mbox_header = *((u64 *)header); 548 + u16 offset, src; 549 + u32 random_id; 550 + int vf_in_pf; 551 + 552 + src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); 553 + 554 + if (IS_PF_OR_PPF_SRC(src) || !func_to_func->support_vf_random) 555 + return true; 556 + 557 + if (!HINIC_IS_PPF(hwdev->hwif)) { 558 + offset = hinic_glb_pf_vf_offset(hwdev->hwif); 559 + vf_in_pf = src - offset; 560 + 561 + if (vf_in_pf < 1 || vf_in_pf > hwdev->nic_cap.max_vf) { 562 + dev_warn(&hwdev->hwif->pdev->dev, 563 + "Receive vf id(0x%x) is invalid, vf id should be from 0x%x to 0x%x\n", 564 + src, offset + 1, 565 + hwdev->nic_cap.max_vf + offset); 566 + return false; 567 + } 568 + } 569 + 570 + random_id = be32_to_cpu(*(u32 *)(header + MBOX_SEG_LEN + 571 + MBOX_HEADER_SZ)); 572 + 573 + if (random_id == func_to_func->vf_mbx_rand_id[src] || 574 + random_id == func_to_func->vf_mbx_old_rand_id[src]) 575 + return true; 576 + 577 + dev_warn(&hwdev->hwif->pdev->dev, 578 + "The mailbox random id(0x%x) of func_id(0x%x) doesn't match with pf reservation(0x%x)\n", 579 + random_id, src, func_to_func->vf_mbx_rand_id[src]); 580 + 581 + mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL); 582 + if (!mbox_work) 583 + return false; 584 + 585 + mbox_work->func_to_func = func_to_func; 586 + mbox_work->src_func_idx = src; 587 + 588 + INIT_WORK(&mbox_work->work, update_random_id_work_handler); 589 + queue_work(func_to_func->workq, &mbox_work->work); 590 + 591 + return false; 592 + } 593 + 519 594 void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size) 520 595 { 521 596 struct hinic_mbox_func_to_func *func_to_func; ··· 638 503 "Mailbox source function id:%u is invalid\n", (u32)src); 639 504 return; 640 505 } 506 + 507 + if (!check_vf_mbox_random_id(func_to_func, header)) 508 + return; 641 509 642 510 recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ? 643 511 &func_to_func->mbox_send[src] : ··· 1235 1097 send_mbox->wb_paddr); 1236 1098 } 1237 1099 1100 + bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev, 1101 + struct vf_cmd_check_handle *cmd_handle, 1102 + u16 vf_id, u8 cmd, void *buf_in, 1103 + u16 in_size, u8 size) 1104 + { 1105 + u16 src_idx = vf_id + hinic_glb_pf_vf_offset(hwdev->hwif); 1106 + int i; 1107 + 1108 + for (i = 0; i < size; i++) { 1109 + if (cmd == cmd_handle[i].cmd) { 1110 + if (cmd_handle[i].check_cmd) 1111 + return cmd_handle[i].check_cmd(hwdev, src_idx, 1112 + buf_in, in_size); 1113 + else 1114 + return true; 1115 + } 1116 + } 1117 + 1118 + dev_err(&hwdev->hwif->pdev->dev, 1119 + "PF Receive VF(%d) unsupported cmd(0x%x)\n", 1120 + vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd); 1121 + 1122 + return false; 1123 + } 1124 + 1125 + static bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev, 1126 + struct hinic_cmdq_ctxt *cmdq_ctxt) 1127 + { 1128 + struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; 1129 + u64 curr_pg_pfn, wq_block_pfn; 1130 + 1131 + if (cmdq_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif) || 1132 + cmdq_ctxt->cmdq_type > HINIC_MAX_CMDQ_TYPES) 1133 + return false; 1134 + 1135 + curr_pg_pfn = HINIC_CMDQ_CTXT_PAGE_INFO_GET 1136 + (ctxt_info->curr_wqe_page_pfn, CURR_WQE_PAGE_PFN); 1137 + wq_block_pfn = HINIC_CMDQ_CTXT_BLOCK_INFO_GET 1138 + (ctxt_info->wq_block_pfn, WQ_BLOCK_PFN); 1139 + /* VF must use 0-level CLA */ 1140 + if (curr_pg_pfn != wq_block_pfn) 1141 + return false; 1142 + 1143 + return true; 1144 + } 1145 + 1146 + static bool check_cmdq_ctxt(struct hinic_hwdev *hwdev, u16 func_idx, 1147 + void *buf_in, u16 in_size) 1148 + { 1149 + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) 1150 + return false; 1151 + 1152 + return hinic_cmdq_check_vf_ctxt(hwdev, buf_in); 1153 + } 1154 + 1155 + #define HW_CTX_QPS_VALID(hw_ctxt) \ 1156 + ((hw_ctxt)->rq_depth >= HINIC_QUEUE_MIN_DEPTH && \ 1157 + (hw_ctxt)->rq_depth <= HINIC_QUEUE_MAX_DEPTH && \ 1158 + (hw_ctxt)->sq_depth >= HINIC_QUEUE_MIN_DEPTH && \ 1159 + (hw_ctxt)->sq_depth <= HINIC_QUEUE_MAX_DEPTH && \ 1160 + (hw_ctxt)->rx_buf_sz_idx <= HINIC_MAX_RX_BUFFER_SIZE) 1161 + 1162 + static bool hw_ctxt_qps_param_valid(struct hinic_cmd_hw_ioctxt *hw_ctxt) 1163 + { 1164 + if (HW_CTX_QPS_VALID(hw_ctxt)) 1165 + return true; 1166 + 1167 + if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth && 1168 + !hw_ctxt->rx_buf_sz_idx) 1169 + return true; 1170 + 1171 + return false; 1172 + } 1173 + 1174 + static bool check_hwctxt(struct hinic_hwdev *hwdev, u16 func_idx, 1175 + void *buf_in, u16 in_size) 1176 + { 1177 + struct hinic_cmd_hw_ioctxt *hw_ctxt = buf_in; 1178 + 1179 + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) 1180 + return false; 1181 + 1182 + if (hw_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif)) 1183 + return false; 1184 + 1185 + if (hw_ctxt->set_cmdq_depth) { 1186 + if (hw_ctxt->cmdq_depth >= HINIC_QUEUE_MIN_DEPTH && 1187 + hw_ctxt->cmdq_depth <= HINIC_QUEUE_MAX_DEPTH) 1188 + return true; 1189 + 1190 + return false; 1191 + } 1192 + 1193 + return hw_ctxt_qps_param_valid(hw_ctxt); 1194 + } 1195 + 1196 + static bool check_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx, 1197 + void *buf_in, u16 in_size) 1198 + { 1199 + struct hinic_wq_page_size *page_size_info = buf_in; 1200 + 1201 + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) 1202 + return false; 1203 + 1204 + if (page_size_info->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif)) 1205 + return false; 1206 + 1207 + if (((1U << page_size_info->page_size) * SZ_4K) != 1208 + HINIC_DEFAULT_WQ_PAGE_SIZE) 1209 + return false; 1210 + 1211 + return true; 1212 + } 1213 + 1214 + static struct vf_cmd_check_handle hw_cmd_support_vf[] = { 1215 + {HINIC_COMM_CMD_START_FLR, hinic_mbox_check_func_id_8B}, 1216 + {HINIC_COMM_CMD_DMA_ATTR_SET, hinic_mbox_check_func_id_8B}, 1217 + {HINIC_COMM_CMD_CMDQ_CTXT_SET, check_cmdq_ctxt}, 1218 + {HINIC_COMM_CMD_CMDQ_CTXT_GET, check_cmdq_ctxt}, 1219 + {HINIC_COMM_CMD_HWCTXT_SET, check_hwctxt}, 1220 + {HINIC_COMM_CMD_HWCTXT_GET, check_hwctxt}, 1221 + {HINIC_COMM_CMD_SQ_HI_CI_SET, hinic_mbox_check_func_id_8B}, 1222 + {HINIC_COMM_CMD_RES_STATE_SET, hinic_mbox_check_func_id_8B}, 1223 + {HINIC_COMM_CMD_IO_RES_CLEAR, hinic_mbox_check_func_id_8B}, 1224 + {HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B}, 1225 + {HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B}, 1226 + {HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP, hinic_mbox_check_func_id_8B}, 1227 + {HINIC_COMM_CMD_L2NIC_RESET, hinic_mbox_check_func_id_8B}, 1228 + {HINIC_COMM_CMD_PAGESIZE_SET, check_set_wq_page_size}, 1229 + }; 1230 + 1238 1231 static int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in, 1239 1232 u16 in_size, void *buf_out, u16 *out_size) 1240 1233 { 1234 + u8 size = ARRAY_SIZE(hw_cmd_support_vf); 1241 1235 struct hinic_hwdev *hwdev = handle; 1242 1236 struct hinic_pfhwdev *pfhwdev; 1243 1237 int err = 0; 1244 1238 1245 1239 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); 1240 + 1241 + if (!hinic_mbox_check_cmd_valid(handle, hw_cmd_support_vf, vf_id, cmd, 1242 + buf_in, in_size, size)) { 1243 + dev_err(&hwdev->hwif->pdev->dev, 1244 + "PF Receive VF: %d common cmd: 0x%x or mbox len: 0x%x is invalid\n", 1245 + vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd, 1246 + in_size); 1247 + return HINIC_MBOX_VF_CMD_ERROR; 1248 + } 1246 1249 1247 1250 if (cmd == HINIC_COMM_CMD_START_FLR) { 1248 1251 *out_size = 0; ··· 1488 1209 free_mbox_info(func_to_func->mbox_send); 1489 1210 1490 1211 kfree(func_to_func); 1212 + } 1213 + 1214 + int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev) 1215 + { 1216 + u16 vf_offset; 1217 + u8 vf_in_pf; 1218 + int err = 0; 1219 + 1220 + if (HINIC_IS_VF(hwdev->hwif)) 1221 + return 0; 1222 + 1223 + vf_offset = hinic_glb_pf_vf_offset(hwdev->hwif); 1224 + 1225 + for (vf_in_pf = 1; vf_in_pf <= hwdev->nic_cap.max_vf; vf_in_pf++) { 1226 + err = set_vf_mbox_random_id(hwdev, vf_offset + vf_in_pf); 1227 + if (err) 1228 + break; 1229 + } 1230 + 1231 + if (err == HINIC_MGMT_CMD_UNSUPPORTED) { 1232 + hwdev->func_to_func->support_vf_random = false; 1233 + err = 0; 1234 + dev_warn(&hwdev->hwif->pdev->dev, "Mgmt is unsupported to set VF%d random id\n", 1235 + vf_in_pf - 1); 1236 + } else if (!err) { 1237 + hwdev->func_to_func->support_vf_random = true; 1238 + } 1239 + 1240 + return err; 1491 1241 }
+22
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
··· 22 22 #define HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF 0x0108 23 23 #define HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF 0x010C 24 24 25 + #define MAX_FUNCTION_NUM 512 26 + 27 + struct vf_cmd_check_handle { 28 + u8 cmd; 29 + bool (*check_cmd)(struct hinic_hwdev *hwdev, u16 src_func_idx, 30 + void *buf_in, u16 in_size); 31 + }; 32 + 25 33 enum hinic_mbox_ack_type { 26 34 MBOX_ACK, 27 35 MBOX_NO_ACK, ··· 108 100 109 101 /* lock for mbox event flag */ 110 102 spinlock_t mbox_lock; 103 + 104 + u32 vf_mbx_old_rand_id[MAX_FUNCTION_NUM]; 105 + u32 vf_mbx_rand_id[MAX_FUNCTION_NUM]; 106 + bool support_vf_random; 111 107 }; 112 108 113 109 struct hinic_mbox_work { ··· 127 115 void *buf_in, u16 in_size, 128 116 void *buf_out, u16 *out_size); 129 117 }; 118 + 119 + bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx, 120 + void *buf_in, u16 in_size); 121 + 122 + bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev, 123 + struct vf_cmd_check_handle *cmd_handle, 124 + u16 vf_id, u8 cmd, void *buf_in, 125 + u16 in_size, u8 size); 130 126 131 127 int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev, 132 128 enum hinic_mod_type mod, ··· 170 150 int hinic_mbox_to_vf(struct hinic_hwdev *hwdev, 171 151 enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in, 172 152 u16 in_size, void *buf_out, u16 *out_size, u32 timeout); 153 + 154 + int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev); 173 155 174 156 #endif
+2
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
··· 93 93 94 94 HINIC_COMM_CMD_WATCHDOG_INFO = 0x56, 95 95 96 + HINIC_MGMT_CMD_SET_VF_RANDOM_ID = 0x61, 97 + 96 98 HINIC_COMM_CMD_MAX, 97 99 }; 98 100
+68 -1
drivers/net/ethernet/huawei/hinic/hinic_sriov.c
··· 429 429 return 0; 430 430 } 431 431 432 + static bool check_func_table(struct hinic_hwdev *hwdev, u16 func_idx, 433 + void *buf_in, u16 in_size) 434 + { 435 + struct hinic_cmd_fw_ctxt *function_table = buf_in; 436 + 437 + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size) || 438 + !function_table->rx_buf_sz) 439 + return false; 440 + 441 + return true; 442 + } 443 + 432 444 static struct vf_cmd_msg_handle nic_vf_cmd_msg_handler[] = { 433 445 {HINIC_PORT_CMD_VF_REGISTER, hinic_register_vf_msg_handler}, 434 446 {HINIC_PORT_CMD_VF_UNREGISTER, hinic_unregister_vf_msg_handler}, ··· 449 437 {HINIC_PORT_CMD_SET_MAC, hinic_set_vf_mac_msg_handler}, 450 438 {HINIC_PORT_CMD_DEL_MAC, hinic_del_vf_mac_msg_handler}, 451 439 {HINIC_PORT_CMD_GET_LINK_STATE, hinic_get_vf_link_status_msg_handler}, 440 + }; 441 + 442 + static struct vf_cmd_check_handle nic_cmd_support_vf[] = { 443 + {HINIC_PORT_CMD_VF_REGISTER, NULL}, 444 + {HINIC_PORT_CMD_VF_UNREGISTER, NULL}, 445 + {HINIC_PORT_CMD_CHANGE_MTU, hinic_mbox_check_func_id_8B}, 446 + {HINIC_PORT_CMD_ADD_VLAN, hinic_mbox_check_func_id_8B}, 447 + {HINIC_PORT_CMD_DEL_VLAN, hinic_mbox_check_func_id_8B}, 448 + {HINIC_PORT_CMD_SET_MAC, hinic_mbox_check_func_id_8B}, 449 + {HINIC_PORT_CMD_GET_MAC, hinic_mbox_check_func_id_8B}, 450 + {HINIC_PORT_CMD_DEL_MAC, hinic_mbox_check_func_id_8B}, 451 + {HINIC_PORT_CMD_SET_RX_MODE, hinic_mbox_check_func_id_8B}, 452 + {HINIC_PORT_CMD_GET_PAUSE_INFO, hinic_mbox_check_func_id_8B}, 453 + {HINIC_PORT_CMD_GET_LINK_STATE, hinic_mbox_check_func_id_8B}, 454 + {HINIC_PORT_CMD_SET_LRO, hinic_mbox_check_func_id_8B}, 455 + {HINIC_PORT_CMD_SET_RX_CSUM, hinic_mbox_check_func_id_8B}, 456 + {HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, hinic_mbox_check_func_id_8B}, 457 + {HINIC_PORT_CMD_GET_VPORT_STAT, hinic_mbox_check_func_id_8B}, 458 + {HINIC_PORT_CMD_CLEAN_VPORT_STAT, hinic_mbox_check_func_id_8B}, 459 + {HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL, 460 + hinic_mbox_check_func_id_8B}, 461 + {HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B}, 462 + {HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B}, 463 + {HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B}, 464 + {HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B}, 465 + {HINIC_PORT_CMD_GET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B}, 466 + {HINIC_PORT_CMD_SET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B}, 467 + {HINIC_PORT_CMD_RSS_TEMP_MGR, hinic_mbox_check_func_id_8B}, 468 + {HINIC_PORT_CMD_RSS_CFG, hinic_mbox_check_func_id_8B}, 469 + {HINIC_PORT_CMD_FWCTXT_INIT, check_func_table}, 470 + {HINIC_PORT_CMD_GET_MGMT_VERSION, NULL}, 471 + {HINIC_PORT_CMD_SET_FUNC_STATE, hinic_mbox_check_func_id_8B}, 472 + {HINIC_PORT_CMD_GET_GLOBAL_QPN, hinic_mbox_check_func_id_8B}, 473 + {HINIC_PORT_CMD_SET_TSO, hinic_mbox_check_func_id_8B}, 474 + {HINIC_PORT_CMD_SET_RQ_IQ_MAP, hinic_mbox_check_func_id_8B}, 475 + {HINIC_PORT_CMD_LINK_STATUS_REPORT, hinic_mbox_check_func_id_8B}, 476 + {HINIC_PORT_CMD_UPDATE_MAC, hinic_mbox_check_func_id_8B}, 477 + {HINIC_PORT_CMD_GET_CAP, hinic_mbox_check_func_id_8B}, 478 + {HINIC_PORT_CMD_GET_LINK_MODE, hinic_mbox_check_func_id_8B}, 452 479 }; 453 480 454 481 #define CHECK_IPSU_15BIT 0X8000 ··· 1023 972 static int nic_pf_mbox_handler(void *hwdev, u16 vf_id, u8 cmd, void *buf_in, 1024 973 u16 in_size, void *buf_out, u16 *out_size) 1025 974 { 975 + u8 size = ARRAY_SIZE(nic_cmd_support_vf); 1026 976 struct vf_cmd_msg_handle *vf_msg_handle; 1027 977 struct hinic_hwdev *dev = hwdev; 1028 978 struct hinic_func_to_io *nic_io; ··· 1032 980 u32 i; 1033 981 1034 982 if (!hwdev) 1035 - return -EFAULT; 983 + return -EINVAL; 984 + 985 + if (!hinic_mbox_check_cmd_valid(hwdev, nic_cmd_support_vf, vf_id, cmd, 986 + buf_in, in_size, size)) { 987 + dev_err(&dev->hwif->pdev->dev, 988 + "PF Receive VF nic cmd: 0x%x, mbox len: 0x%x is invalid\n", 989 + cmd, in_size); 990 + return HINIC_MBOX_VF_CMD_ERROR; 991 + } 1036 992 1037 993 pfhwdev = container_of(dev, struct hinic_pfhwdev, hwdev); 1038 994 nic_io = &dev->func_to_io; ··· 1167 1107 struct hinic_func_to_io *nic_io; 1168 1108 int err = 0; 1169 1109 u32 size, i; 1110 + 1111 + err = hinic_vf_mbox_random_id_init(hwdev); 1112 + if (err) { 1113 + dev_err(&hwdev->hwif->pdev->dev, "Failed to init vf mbox random id, err: %d\n", 1114 + err); 1115 + return err; 1116 + } 1170 1117 1171 1118 nic_io = &hwdev->func_to_io; 1172 1119