Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

qed: Update qed_hsi.h for fw 8.59.1.0

The qed_hsi.h has been updated to support new FW version 8.59.1.0 with
changes.
- Updates FW HSI (Hardware Software interface) structures.
- Addition/update in function declaration and defines as per HSI.
- Add generic infrastructure for FW error reporting as part of
common event queue handling.
- Move malicious VF error reporting to FW error reporting
infrastructure.
- Move consolidation queue initialization from FW context to ramrod
message.

qed_hsi.h header file changes lead to change in many files to ensure
compilation.

This patch also fixes the existing checkpatch warnings and few important
checks.

Signed-off-by: Ariel Elior <aelior@marvell.com>
Signed-off-by: Shai Malin <smalin@marvell.com>
Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com>
Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Prabhakar Kushwaha and committed by
David S. Miller
fe40a830 f2a74107

+1589 -307
+85 -27
drivers/net/ethernet/qlogic/qed/qed_dev.c
··· 1397 1397 qed_rdma_info_free(p_hwfn); 1398 1398 } 1399 1399 1400 + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); 1400 1401 qed_iov_free(p_hwfn); 1401 1402 qed_l2_free(p_hwfn); 1402 1403 qed_dmae_info_free(p_hwfn); 1403 1404 qed_dcbx_info_free(p_hwfn); 1404 1405 qed_dbg_user_data_free(p_hwfn); 1405 - qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem); 1406 + qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem); 1406 1407 1407 1408 /* Destroy doorbell recovery mechanism */ 1408 1409 qed_db_recovery_teardown(p_hwfn); ··· 1485 1484 u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); 1486 1485 1487 1486 /* num RLs can't exceed resource amount of rls or vports */ 1488 - num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL), 1489 - RESC_NUM(p_hwfn, QED_VPORT)); 1487 + num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL), 1488 + RESC_NUM(p_hwfn, QED_VPORT)); 1490 1489 1491 1490 /* Make sure after we reserve there's something left */ 1492 1491 if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) ··· 1534 1533 bool four_port; 1535 1534 1536 1535 /* pq and vport bases for this PF */ 1537 - qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ); 1538 - qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); 1536 + qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); 1537 + qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); 1539 1538 1540 1539 /* rate limiting and weighted fair queueing are always enabled */ 1541 1540 qm_info->vport_rl_en = true; ··· 1630 1629 */ 1631 1630 1632 1631 /* flags for pq init */ 1633 - #define PQ_INIT_SHARE_VPORT (1 << 0) 1634 - #define PQ_INIT_PF_RL (1 << 1) 1635 - #define PQ_INIT_VF_RL (1 << 2) 1632 + #define PQ_INIT_SHARE_VPORT BIT(0) 1633 + #define PQ_INIT_PF_RL BIT(1) 1634 + #define PQ_INIT_VF_RL BIT(2) 1636 1635 1637 1636 /* defines for pq init */ 1638 1637 #define PQ_INIT_DEFAULT_WRR_GROUP 1 ··· 2292 2291 goto alloc_no_mem; 2293 2292 } 2294 2293 2295 - rc = qed_eq_alloc(p_hwfn, (u16) n_eqes); 2294 + rc = qed_eq_alloc(p_hwfn, (u16)n_eqes); 2296 2295 if (rc) 2297 2296 goto alloc_err; 2298 2297 ··· 2377 2376 return rc; 2378 2377 } 2379 2378 2379 + static int qed_fw_err_handler(struct qed_hwfn *p_hwfn, 2380 + u8 opcode, 2381 + u16 echo, 2382 + union event_ring_data *data, u8 fw_return_code) 2383 + { 2384 + if (fw_return_code != COMMON_ERR_CODE_ERROR) 2385 + goto eqe_unexpected; 2386 + 2387 + if (data->err_data.recovery_scope == ERR_SCOPE_FUNC && 2388 + le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) { 2389 + qed_sriov_vfpf_malicious(p_hwfn, &data->err_data); 2390 + return 0; 2391 + } 2392 + 2393 + eqe_unexpected: 2394 + DP_ERR(p_hwfn, 2395 + "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n", 2396 + opcode, fw_return_code, echo); 2397 + return -EINVAL; 2398 + } 2399 + 2400 + static int qed_common_eqe_event(struct qed_hwfn *p_hwfn, 2401 + u8 opcode, 2402 + __le16 echo, 2403 + union event_ring_data *data, 2404 + u8 fw_return_code) 2405 + { 2406 + switch (opcode) { 2407 + case COMMON_EVENT_VF_PF_CHANNEL: 2408 + case COMMON_EVENT_VF_FLR: 2409 + return qed_sriov_eqe_event(p_hwfn, opcode, echo, data, 2410 + fw_return_code); 2411 + case COMMON_EVENT_FW_ERROR: 2412 + return qed_fw_err_handler(p_hwfn, opcode, 2413 + le16_to_cpu(echo), data, 2414 + fw_return_code); 2415 + default: 2416 + DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n", 2417 + opcode, echo); 2418 + return -EINVAL; 2419 + } 2420 + } 2421 + 2380 2422 void qed_resc_setup(struct qed_dev *cdev) 2381 2423 { 2382 2424 int i; ··· 2448 2404 2449 2405 qed_l2_setup(p_hwfn); 2450 2406 qed_iov_setup(p_hwfn); 2407 + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, 2408 + qed_common_eqe_event); 2451 2409 #ifdef CONFIG_QED_LL2 2452 2410 if (p_hwfn->using_ll2) 2453 2411 qed_ll2_setup(p_hwfn); ··· 2639 2593 cache_line_size); 2640 2594 } 2641 2595 2642 - if (L1_CACHE_BYTES > wr_mbs) 2596 + if (wr_mbs < L1_CACHE_BYTES) 2643 2597 DP_INFO(p_hwfn, 2644 2598 "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", 2645 2599 L1_CACHE_BYTES, wr_mbs); ··· 2655 2609 struct qed_ptt *p_ptt, int hw_mode) 2656 2610 { 2657 2611 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 2658 - struct qed_qm_common_rt_init_params params; 2612 + struct qed_qm_common_rt_init_params *params; 2659 2613 struct qed_dev *cdev = p_hwfn->cdev; 2660 2614 u8 vf_id, max_num_vfs; 2661 2615 u16 num_pfs, pf_id; 2662 2616 u32 concrete_fid; 2663 2617 int rc = 0; 2618 + 2619 + params = kzalloc(sizeof(*params), GFP_KERNEL); 2620 + if (!params) { 2621 + DP_NOTICE(p_hwfn->cdev, 2622 + "Failed to allocate common init params\n"); 2623 + 2624 + return -ENOMEM; 2625 + } 2664 2626 2665 2627 qed_init_cau_rt_data(cdev); 2666 2628 ··· 2682 2628 qm_info->pf_wfq_en = true; 2683 2629 } 2684 2630 2685 - memset(&params, 0, sizeof(params)); 2686 - params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; 2687 - params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; 2688 - params.pf_rl_en = qm_info->pf_rl_en; 2689 - params.pf_wfq_en = qm_info->pf_wfq_en; 2690 - params.global_rl_en = qm_info->vport_rl_en; 2691 - params.vport_wfq_en = qm_info->vport_wfq_en; 2692 - params.port_params = qm_info->qm_port_params; 2631 + params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; 2632 + params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; 2633 + params->pf_rl_en = qm_info->pf_rl_en; 2634 + params->pf_wfq_en = qm_info->pf_wfq_en; 2635 + params->global_rl_en = qm_info->vport_rl_en; 2636 + params->vport_wfq_en = qm_info->vport_wfq_en; 2637 + params->port_params = qm_info->qm_port_params; 2693 2638 2694 - qed_qm_common_rt_init(p_hwfn, &params); 2639 + qed_qm_common_rt_init(p_hwfn, params); 2695 2640 2696 2641 qed_cxt_hw_init_common(p_hwfn); 2697 2642 ··· 2698 2645 2699 2646 rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); 2700 2647 if (rc) 2701 - return rc; 2648 + goto out; 2702 2649 2703 2650 qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 2704 2651 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); ··· 2717 2664 max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; 2718 2665 for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { 2719 2666 concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); 2720 - qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); 2667 + qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); 2721 2668 qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 2722 2669 qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); 2723 2670 qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); ··· 2725 2672 } 2726 2673 /* pretend to original PF */ 2727 2674 qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 2675 + 2676 + out: 2677 + kfree(params); 2728 2678 2729 2679 return rc; 2730 2680 } ··· 2841 2785 qed_rdma_dpm_bar(p_hwfn, p_ptt); 2842 2786 } 2843 2787 2844 - p_hwfn->wid_count = (u16) n_cpus; 2788 + p_hwfn->wid_count = (u16)n_cpus; 2845 2789 2846 2790 DP_INFO(p_hwfn, 2847 2791 "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", ··· 3560 3504 static void get_function_id(struct qed_hwfn *p_hwfn) 3561 3505 { 3562 3506 /* ME Register */ 3563 - p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, 3564 - PXP_PF_ME_OPAQUE_ADDR); 3507 + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, 3508 + PXP_PF_ME_OPAQUE_ADDR); 3565 3509 3566 3510 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); 3567 3511 ··· 3727 3671 3728 3672 return qed_hsi_def_val[type][chip_id]; 3729 3673 } 3674 + 3730 3675 static int 3731 3676 qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 3732 3677 { 3733 3678 u32 resc_max_val, mcp_resp; 3734 3679 u8 res_id; 3735 3680 int rc; 3681 + 3736 3682 for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { 3737 3683 switch (res_id) { 3738 3684 case QED_LL2_RAM_QUEUE: ··· 3980 3922 * resources allocation queries should be atomic. Since several PFs can 3981 3923 * run in parallel - a resource lock is needed. 3982 3924 * If either the resource lock or resource set value commands are not 3983 - * supported - skip the the max values setting, release the lock if 3925 + * supported - skip the max values setting, release the lock if 3984 3926 * needed, and proceed to the queries. Other failures, including a 3985 3927 * failure to acquire the lock, will cause this function to fail. 3986 3928 */ ··· 4834 4776 if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { 4835 4777 u16 min, max; 4836 4778 4837 - min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE); 4779 + min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); 4838 4780 max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); 4839 4781 DP_NOTICE(p_hwfn, 4840 4782 "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
+1375 -179
drivers/net/ethernet/qlogic/qed/qed_hsi.h
··· 1 1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ 2 2 /* QLogic qed NIC Driver 3 3 * Copyright (c) 2015-2017 QLogic Corporation 4 - * Copyright (c) 2019-2020 Marvell International Ltd. 4 + * Copyright (c) 2019-2021 Marvell International Ltd. 5 5 */ 6 6 7 7 #ifndef _QED_HSI_H ··· 38 38 COMMON_EVENT_VF_PF_CHANNEL, 39 39 COMMON_EVENT_VF_FLR, 40 40 COMMON_EVENT_PF_UPDATE, 41 - COMMON_EVENT_MALICIOUS_VF, 41 + COMMON_EVENT_FW_ERROR, 42 42 COMMON_EVENT_RL_UPDATE, 43 43 COMMON_EVENT_EMPTY, 44 44 MAX_COMMON_EVENT_OPCODE ··· 84 84 MAX_CORE_L4_PSEUDO_CHECKSUM_MODE 85 85 }; 86 86 87 + /* LL2 SP error code */ 88 + enum core_ll2_error_code { 89 + LL2_OK = 0, 90 + LL2_ERROR, 91 + MAX_CORE_LL2_ERROR_CODE 92 + }; 93 + 87 94 /* Light-L2 RX Producers in Tstorm RAM */ 88 95 struct core_ll2_port_stats { 89 96 struct regpair gsi_invalid_hdr; ··· 130 123 struct regpair rcv_bcast_pkts; 131 124 }; 132 125 126 + struct core_ll2_rx_per_queue_stat { 127 + struct core_ll2_tstorm_per_queue_stat tstorm_stat; 128 + struct core_ll2_ustorm_per_queue_stat ustorm_stat; 129 + }; 130 + 131 + struct core_ll2_tx_per_queue_stat { 132 + struct core_ll2_pstorm_per_queue_stat pstorm_stat; 133 + }; 134 + 133 135 /* Structure for doorbell data, in PWM mode, for RX producers update. */ 134 136 struct core_pwm_prod_update_data { 135 137 __le16 icid; /* internal CID */ ··· 149 133 #define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_MASK 0x3F /* Set 0 */ 150 134 #define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_SHIFT 2 151 135 struct core_ll2_rx_prod prod; /* Producers */ 136 + }; 137 + 138 + /* Ramrod data for rx/tx queue statistics query ramrod */ 139 + struct core_queue_stats_query_ramrod_data { 140 + u8 rx_stat; 141 + u8 tx_stat; 142 + __le16 reserved[3]; 143 + struct regpair rx_stat_addr; 144 + struct regpair tx_stat_addr; 152 145 }; 153 146 154 147 /* Core Ramrod Command IDs (light L2) */ ··· 235 210 __le16 vlan; 236 211 struct core_rx_cqe_opaque_data opaque_data; 237 212 struct parsing_err_flags err_flags; 238 - __le16 reserved0; 213 + u8 packet_source; 214 + u8 reserved0; 239 215 __le32 reserved1[3]; 240 216 }; 241 217 ··· 252 226 __le16 qp_id; 253 227 __le32 src_qp; 254 228 struct core_rx_cqe_opaque_data opaque_data; 255 - __le32 reserved; 229 + u8 packet_source; 230 + u8 reserved[3]; 256 231 }; 257 232 258 233 /* Core RX CQE for Light L2 */ ··· 270 243 struct core_rx_fast_path_cqe rx_cqe_fp; 271 244 struct core_rx_gsi_offload_cqe rx_cqe_gsi; 272 245 struct core_rx_slow_path_cqe rx_cqe_sp; 246 + }; 247 + 248 + /* RX packet source. */ 249 + enum core_rx_pkt_source { 250 + CORE_RX_PKT_SOURCE_NETWORK = 0, 251 + CORE_RX_PKT_SOURCE_LB, 252 + CORE_RX_PKT_SOURCE_TX, 253 + CORE_RX_PKT_SOURCE_LL2_TX, 254 + MAX_CORE_RX_PKT_SOURCE 273 255 }; 274 256 275 257 /* Ramrod data for rx queue start ramrod */ ··· 398 362 u8 update_qm_pq_id_flg; 399 363 u8 reserved0; 400 364 __le16 qm_pq_id; 401 - __le32 reserved1; 365 + __le32 reserved1[1]; 402 366 }; 403 367 404 368 /* Enum flag for what type of dcb data to update */ ··· 422 386 423 387 /* Core Slowpath Connection storm context of Xstorm */ 424 388 struct xstorm_core_conn_st_ctx { 425 - __le32 spq_base_lo; 426 - __le32 spq_base_hi; 427 - struct regpair consolid_base_addr; 389 + struct regpair spq_base_addr; 390 + __le32 reserved0[2]; 428 391 __le16 spq_cons; 429 - __le16 consolid_cons; 430 - __le32 reserved0[55]; 392 + __le16 reserved1[111]; 431 393 }; 432 394 433 395 struct xstorm_core_conn_ag_ctx { ··· 964 930 965 931 /* Update RSS indirection table entry command */ 966 932 struct eth_tstorm_rss_update_data { 967 - u8 valid; 968 933 u8 vport_id; 969 934 u8 ind_table_index; 970 - u8 reserved; 971 935 __le16 ind_table_value; 972 936 __le16 reserved1; 937 + u8 reserved; 938 + u8 valid; 973 939 }; 974 940 975 941 struct eth_ustorm_per_pf_stat { ··· 1001 967 struct regpair msg_addr; 1002 968 }; 1003 969 1004 - /* Event Ring malicious VF data */ 1005 - struct malicious_vf_eqe_data { 1006 - u8 vf_id; 1007 - u8 err_id; 1008 - __le16 reserved[3]; 1009 - }; 1010 - 1011 970 /* Event Ring initial cleanup data */ 1012 971 struct initial_cleanup_eqe_data { 1013 972 u8 vf_id; 1014 973 u8 reserved[7]; 974 + }; 975 + 976 + /* FW error data */ 977 + struct fw_err_data { 978 + u8 recovery_scope; 979 + u8 err_id; 980 + __le16 entity_id; 981 + u8 reserved[4]; 1015 982 }; 1016 983 1017 984 /* Event Data Union */ ··· 1022 987 struct iscsi_eqe_data iscsi_info; 1023 988 struct iscsi_connect_done_results iscsi_conn_done_info; 1024 989 union rdma_eqe_data rdma_data; 1025 - struct malicious_vf_eqe_data malicious_vf; 1026 990 struct initial_cleanup_eqe_data vf_init_cleanup; 991 + struct fw_err_data err_data; 1027 992 }; 1028 993 1029 994 /* Event Ring Entry */ ··· 1077 1042 u8 major_ver_arr[2]; 1078 1043 }; 1079 1044 1045 + /* Integration Phase */ 1046 + enum integ_phase { 1047 + INTEG_PHASE_BB_A0_LATEST = 3, 1048 + INTEG_PHASE_BB_B0_NO_MCP = 10, 1049 + INTEG_PHASE_BB_B0_WITH_MCP = 11, 1050 + MAX_INTEG_PHASE 1051 + }; 1052 + 1053 + /* Ports mode */ 1080 1054 enum iwarp_ll2_tx_queues { 1081 1055 IWARP_LL2_IN_ORDER_TX_QUEUE = 1, 1082 1056 IWARP_LL2_ALIGNED_TX_QUEUE, ··· 1094 1050 MAX_IWARP_LL2_TX_QUEUES 1095 1051 }; 1096 1052 1097 - /* Malicious VF error ID */ 1098 - enum malicious_vf_error_id { 1099 - MALICIOUS_VF_NO_ERROR, 1053 + /* Function error ID */ 1054 + enum func_err_id { 1055 + FUNC_NO_ERROR, 1100 1056 VF_PF_CHANNEL_NOT_READY, 1101 1057 VF_ZONE_MSG_NOT_VALID, 1102 1058 VF_ZONE_FUNC_NOT_ENABLED, ··· 1131 1087 CORE_PACKET_SIZE_TOO_LARGE, 1132 1088 CORE_ILLEGAL_BD_FLAGS, 1133 1089 CORE_GSI_PACKET_VIOLATION, 1134 - MAX_MALICIOUS_VF_ERROR_ID, 1090 + MAX_FUNC_ERR_ID 1091 + }; 1092 + 1093 + /* FW error handling mode */ 1094 + enum fw_err_mode { 1095 + FW_ERR_FATAL_ASSERT, 1096 + FW_ERR_DRV_REPORT, 1097 + MAX_FW_ERR_MODE 1098 + }; 1099 + 1100 + /* FW error recovery scope */ 1101 + enum fw_err_recovery_scope { 1102 + ERR_SCOPE_INVALID, 1103 + ERR_SCOPE_TX_Q, 1104 + ERR_SCOPE_RX_Q, 1105 + ERR_SCOPE_QP, 1106 + ERR_SCOPE_VPORT, 1107 + ERR_SCOPE_FUNC, 1108 + ERR_SCOPE_PORT, 1109 + ERR_SCOPE_ENGINE, 1110 + MAX_FW_ERR_RECOVERY_SCOPE 1135 1111 }; 1136 1112 1137 1113 /* Mstorm non-triggering VF zone */ ··· 1212 1148 /* Ramrod data for PF start ramrod */ 1213 1149 struct pf_start_ramrod_data { 1214 1150 struct regpair event_ring_pbl_addr; 1215 - struct regpair consolid_q_pbl_addr; 1151 + struct regpair consolid_q_pbl_base_addr; 1216 1152 struct pf_start_tunnel_config tunnel_config; 1217 1153 __le16 event_ring_sb_id; 1218 1154 u8 base_vf_id; ··· 1230 1166 u8 reserved0; 1231 1167 struct hsi_fp_ver_struct hsi_fp_ver; 1232 1168 struct outer_tag_config_struct outer_tag_config; 1169 + u8 pf_fp_err_mode; 1170 + u8 consolid_q_num_pages; 1171 + u8 reserved[6]; 1233 1172 }; 1234 1173 1235 1174 /* Data for port update ramrod */ ··· 1295 1228 ENGX1_PORTX2, 1296 1229 ENGX1_PORTX4, 1297 1230 MAX_PORTS_MODE 1231 + }; 1232 + 1233 + /* Protocol-common error code */ 1234 + enum protocol_common_error_code { 1235 + COMMON_ERR_CODE_OK = 0, 1236 + COMMON_ERR_CODE_ERROR, 1237 + MAX_PROTOCOL_COMMON_ERROR_CODE 1298 1238 }; 1299 1239 1300 1240 /* use to index in hsi_fp_[major|minor]_ver_arr per protocol */ ··· 1778 1704 #define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF 1779 1705 #define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 1780 1706 }; 1707 + 1781 1708 /* per encapsulation type enabling flags */ 1782 1709 struct prs_reg_encapsulation_type_en { 1783 1710 u8 flags; ··· 1956 1881 1957 1882 /* QM per global RL init parameters */ 1958 1883 struct init_qm_global_rl_params { 1884 + u8 type; 1885 + u8 reserved0; 1886 + u16 reserved1; 1959 1887 u32 rate_limit; 1960 1888 }; 1961 1889 ··· 1973 1895 1974 1896 /* QM per-PQ init parameters */ 1975 1897 struct init_qm_pq_params { 1976 - u8 vport_id; 1898 + u16 vport_id; 1899 + u16 rl_id; 1900 + u8 rl_valid; 1977 1901 u8 tc_id; 1978 1902 u8 wrr_group; 1979 - u8 rl_valid; 1980 - u16 rl_id; 1981 1903 u8 port_id; 1982 - u8 reserved; 1904 + }; 1905 + 1906 + /* QM per RL init parameters */ 1907 + struct init_qm_rl_params { 1908 + u32 vport_rl; 1909 + u8 vport_rl_type; 1910 + u8 reserved[3]; 1911 + }; 1912 + 1913 + /* QM Rate Limiter types */ 1914 + enum init_qm_rl_type { 1915 + QM_RL_TYPE_NORMAL, 1916 + QM_RL_TYPE_QCN, 1917 + MAX_INIT_QM_RL_TYPE 1983 1918 }; 1984 1919 1985 1920 /* QM per-vport init parameters */ 1986 1921 struct init_qm_vport_params { 1987 1922 u16 wfq; 1923 + u16 reserved; 1924 + u16 tc_wfq[NUM_OF_TCS]; 1988 1925 u16 first_tx_pq_id[NUM_OF_TCS]; 1989 1926 }; 1990 1927 ··· 2058 1965 }; 2059 1966 2060 1967 enum init_modes { 2061 - MODE_RESERVED, 1968 + MODE_BB_A0_DEPRECATED, 2062 1969 MODE_BB, 2063 1970 MODE_K2, 2064 1971 MODE_ASIC, 2065 - MODE_RESERVED2, 2066 - MODE_RESERVED3, 2067 - MODE_RESERVED4, 2068 - MODE_RESERVED5, 1972 + MODE_EMUL_REDUCED, 1973 + MODE_EMUL_FULL, 1974 + MODE_FPGA, 1975 + MODE_CHIPSIM, 2069 1976 MODE_SF, 2070 1977 MODE_MF_SD, 2071 1978 MODE_MF_SI, ··· 2073 1980 MODE_PORTS_PER_ENG_2, 2074 1981 MODE_PORTS_PER_ENG_4, 2075 1982 MODE_100G, 2076 - MODE_RESERVED6, 2077 - MODE_RESERVED7, 1983 + MODE_SKIP_PRAM_INIT, 1984 + MODE_EMUL_MAC, 2078 1985 MAX_INIT_MODES 2079 1986 }; 2080 1987 ··· 2375 2282 /* Win 13 */ 2376 2283 #define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL 2377 2284 2285 + /* Returns the VOQ based on port and TC */ 2286 + #define VOQ(port, tc, max_phys_tcs_per_port) ((tc) == \ 2287 + PURE_LB_TC ? NUM_OF_PHYS_TCS *\ 2288 + MAX_NUM_PORTS_BB + \ 2289 + (port) : (port) * \ 2290 + (max_phys_tcs_per_port) + (tc)) 2291 + 2292 + struct init_qm_pq_params; 2293 + 2378 2294 /** 2379 2295 * qed_qm_pf_mem_size(): Prepare QM ILT sizes. 2380 2296 * ··· 2410 2308 bool global_rl_en; 2411 2309 bool vport_wfq_en; 2412 2310 struct init_qm_port_params *port_params; 2311 + struct init_qm_global_rl_params 2312 + global_rl_params[COMMON_MAX_QM_GLOBAL_RLS]; 2413 2313 }; 2414 2314 2315 + /** 2316 + * qed_qm_common_rt_init(): Prepare QM runtime init values for the 2317 + * engine phase. 2318 + * 2319 + * @p_hwfn: HW device data. 2320 + * @p_params: Parameters. 2321 + * 2322 + * Return: 0 on success, -1 on error. 2323 + */ 2415 2324 int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, 2416 2325 struct qed_qm_common_rt_init_params *p_params); 2417 2326 ··· 2439 2326 u16 num_vf_pqs; 2440 2327 u16 start_vport; 2441 2328 u16 num_vports; 2329 + u16 start_rl; 2330 + u16 num_rls; 2442 2331 u16 pf_wfq; 2443 2332 u32 pf_rl; 2333 + u32 link_speed; 2444 2334 struct init_qm_pq_params *pq_params; 2445 2335 struct init_qm_vport_params *vport_params; 2336 + struct init_qm_rl_params *rl_params; 2446 2337 }; 2447 2338 2339 + /** 2340 + * qed_qm_pf_rt_init(): Prepare QM runtime init values for the PF phase. 2341 + * 2342 + * @p_hwfn: HW device data. 2343 + * @p_ptt: Ptt window used for writing the registers 2344 + * @p_params: Parameters. 2345 + * 2346 + * Return: 0 on success, -1 on error. 2347 + */ 2448 2348 int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, 2449 - struct qed_ptt *p_ptt, 2450 - struct qed_qm_pf_rt_init_params *p_params); 2349 + struct qed_ptt *p_ptt, 2350 + struct qed_qm_pf_rt_init_params *p_params); 2451 2351 2452 2352 /** 2453 2353 * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF. ··· 2505 2379 u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq); 2506 2380 2507 2381 /** 2382 + * qed_init_vport_tc_wfq(): Initializes the WFQ weight of the specified 2383 + * VPORT and TC. 2384 + * 2385 + * @p_hwfn: HW device data. 2386 + * @p_ptt: Ptt window used for writing the registers. 2387 + * @first_tx_pq_id: The first Tx PQ ID associated with the VPORT and TC. 2388 + * (filled by qed_qm_pf_rt_init). 2389 + * @weight: VPORT+TC WFQ weight. 2390 + * 2391 + * Return: 0 on success, -1 on error. 2392 + */ 2393 + int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, 2394 + struct qed_ptt *p_ptt, 2395 + u16 first_tx_pq_id, u16 weight); 2396 + 2397 + /** 2508 2398 * qed_init_global_rl(): Initializes the rate limit of the specified 2509 2399 * rate limiter. 2510 2400 * ··· 2528 2386 * @p_ptt: Ptt window used for writing the registers. 2529 2387 * @rl_id: RL ID. 2530 2388 * @rate_limit: Rate limit in Mb/sec units 2389 + * @vport_rl_type: Vport RL type. 2531 2390 * 2532 2391 * Return: 0 on success, -1 on error. 2533 2392 */ 2534 2393 int qed_init_global_rl(struct qed_hwfn *p_hwfn, 2535 2394 struct qed_ptt *p_ptt, 2536 - u16 rl_id, u32 rate_limit); 2395 + u16 rl_id, u32 rate_limit, 2396 + enum init_qm_rl_type vport_rl_type); 2537 2397 2538 2398 /** 2539 2399 * qed_send_qm_stop_cmd(): Sends a stop command to the QM. ··· 2771 2627 * Return: Void. 2772 2628 */ 2773 2629 void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, 2774 - struct phys_mem_desc *fw_overlay_mem); 2630 + struct phys_mem_desc **fw_overlay_mem); 2631 + 2632 + #define PCICFG_OFFSET 0x2000 2633 + #define GRC_CONFIG_REG_PF_INIT_VF 0x624 2634 + 2635 + /* First VF_NUM for PF is encoded in this register. 2636 + * The number of VFs assigned to a PF is assumed to be a multiple of 8. 2637 + * Software should program these bits based on Total Number of VFs programmed 2638 + * for each PF. 2639 + * Since registers from 0x000-0x7ff are spilt across functions, each PF will 2640 + * have the same location for the same 4 bits 2641 + */ 2642 + #define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xff 2775 2643 2776 2644 /* Runtime array offsets */ 2777 2645 #define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 ··· 3114 2958 #define QM_REG_TXPQMAP_RT_SIZE 512 3115 2959 #define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556 3116 2960 #define QM_REG_WFQVPWEIGHT_RT_SIZE 512 3117 - #define QM_REG_WFQVPCRD_RT_OFFSET 32068 2961 + #define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32068 2962 + #define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512 2963 + #define QM_REG_WFQVPCRD_RT_OFFSET 32580 3118 2964 #define QM_REG_WFQVPCRD_RT_SIZE 512 3119 - #define QM_REG_WFQVPMAP_RT_OFFSET 32580 2965 + #define QM_REG_WFQVPMAP_RT_OFFSET 33092 3120 2966 #define QM_REG_WFQVPMAP_RT_SIZE 512 3121 - #define QM_REG_PTRTBLTX_RT_OFFSET 33092 2967 + #define QM_REG_PTRTBLTX_RT_OFFSET 33604 3122 2968 #define QM_REG_PTRTBLTX_RT_SIZE 1024 3123 - #define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34116 2969 + #define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34628 3124 2970 #define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 3125 - #define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34276 3126 - #define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34277 3127 - #define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34278 3128 - #define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34279 3129 - #define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34280 3130 - #define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34281 3131 - #define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34282 3132 - #define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34283 2971 + #define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34788 2972 + #define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34789 2973 + #define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34790 2974 + #define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34791 2975 + #define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34792 2976 + #define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34793 2977 + #define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34794 2978 + #define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34795 3133 2979 #define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 3134 - #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34287 2980 + #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34799 3135 2981 #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 3136 - #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34291 2982 + #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34803 3137 2983 #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 3138 - #define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34323 2984 + #define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34835 3139 2985 #define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 3140 - #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34339 2986 + #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34851 3141 2987 #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 3142 - #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34355 2988 + #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34867 3143 2989 #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 3144 - #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34371 2990 + #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34883 3145 2991 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 3146 - #define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34387 3147 - #define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34388 2992 + #define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34899 2993 + #define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34900 3148 2994 #define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 3149 - #define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34396 3150 - #define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34397 3151 - #define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34398 3152 - #define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34399 3153 - #define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34400 3154 - #define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34401 3155 - #define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34402 3156 - #define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34403 3157 - #define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34404 3158 - #define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34405 3159 - #define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34406 3160 - #define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34407 3161 - #define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34408 3162 - #define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34409 3163 - #define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34410 3164 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34411 3165 - #define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34412 3166 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34413 3167 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34414 3168 - #define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34415 3169 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34416 3170 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34417 3171 - #define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34418 3172 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34419 3173 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34420 3174 - #define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34421 3175 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34422 3176 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34423 3177 - #define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34424 3178 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34425 3179 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34426 3180 - #define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34427 3181 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34428 3182 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34429 3183 - #define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34430 3184 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34431 3185 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34432 3186 - #define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34433 3187 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34434 3188 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34435 3189 - #define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34436 3190 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34437 3191 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34438 3192 - #define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34439 3193 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34440 3194 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34441 3195 - #define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34442 3196 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34443 3197 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34444 3198 - #define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34445 3199 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34446 3200 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34447 3201 - #define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34448 3202 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34449 3203 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34450 3204 - #define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34451 3205 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34452 3206 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34453 3207 - #define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34454 3208 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34455 3209 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34456 3210 - #define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34457 3211 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34458 3212 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34459 3213 - #define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34460 3214 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34461 3215 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34462 3216 - #define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34463 3217 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34464 3218 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34465 3219 - #define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34466 3220 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34467 3221 - #define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34468 3222 - #define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34469 3223 - #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34470 3224 - #define XCM_REG_CON_PHY_Q3_RT_OFFSET 34471 2995 + #define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34908 2996 + #define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34909 2997 + #define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34910 2998 + #define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34911 2999 + #define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34912 3000 + #define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34913 3001 + #define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34914 3002 + #define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34915 3003 + #define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34916 3004 + #define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34917 3005 + #define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34918 3006 + #define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34919 3007 + #define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34920 3008 + #define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34921 3009 + #define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34922 3010 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34923 3011 + #define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34924 3012 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34925 3013 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34926 3014 + #define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34927 3015 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34928 3016 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34929 3017 + #define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34930 3018 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34931 3019 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34932 3020 + #define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34933 3021 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34934 3022 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34935 3023 + #define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34936 3024 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34937 3025 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34938 3026 + #define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34939 3027 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34940 3028 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34941 3029 + #define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34942 3030 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34943 3031 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34944 3032 + #define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34945 3033 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34946 3034 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34947 3035 + #define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34948 3036 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34949 3037 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34950 3038 + #define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34951 3039 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34952 3040 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34953 3041 + #define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34954 3042 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34955 3043 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34956 3044 + #define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34957 3045 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34958 3046 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34959 3047 + #define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34960 3048 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34961 3049 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34962 3050 + #define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34963 3051 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34964 3052 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34965 3053 + #define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34966 3054 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34967 3055 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34968 3056 + #define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34969 3057 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34970 3058 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34971 3059 + #define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34972 3060 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34973 3061 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34974 3062 + #define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34975 3063 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34976 3064 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34977 3065 + #define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34978 3066 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34979 3067 + #define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34980 3068 + #define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34981 3069 + #define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34982 3070 + #define XCM_REG_CON_PHY_Q3_RT_OFFSET 34983 3225 3071 3226 - #define RUNTIME_ARRAY_SIZE 34472 3072 + #define RUNTIME_ARRAY_SIZE 34984 3227 3073 3228 3074 /* Init Callbacks */ 3229 3075 #define DMAE_READY_CB 0 ··· 3907 3749 ETH_RAMROD_RX_ADD_UDP_FILTER, 3908 3750 ETH_RAMROD_RX_DELETE_UDP_FILTER, 3909 3751 ETH_RAMROD_RX_CREATE_GFT_ACTION, 3910 - ETH_RAMROD_GFT_UPDATE_FILTER, 3752 + ETH_RAMROD_RX_UPDATE_GFT_FILTER, 3911 3753 ETH_RAMROD_TX_QUEUE_UPDATE, 3912 3754 ETH_RAMROD_RGFS_FILTER_ADD, 3913 3755 ETH_RAMROD_RGFS_FILTER_DEL, ··· 3991 3833 u8 update_rss_ind_table; 3992 3834 u8 update_rss_capabilities; 3993 3835 u8 tbl_size; 3994 - __le32 reserved2[2]; 3836 + u8 ind_table_mask_valid; 3837 + u8 reserved2[3]; 3995 3838 __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]; 3839 + __le32 ind_table_mask[ETH_RSS_IND_TABLE_MASK_SIZE_REGS]; 3996 3840 __le32 rss_key[ETH_RSS_KEY_SIZE_REGS]; 3997 - __le32 reserved3[2]; 3841 + __le32 reserved3; 3998 3842 }; 3999 3843 4000 3844 /* eth vport RSS mode */ ··· 4071 3911 MAX_GFT_FILTER_UPDATE_ACTION 4072 3912 }; 4073 3913 3914 + /* Ramrod data for rx create gft action */ 3915 + struct rx_create_gft_action_ramrod_data { 3916 + u8 vport_id; 3917 + u8 reserved[7]; 3918 + }; 3919 + 3920 + /* Ramrod data for rx create openflow action */ 3921 + struct rx_create_openflow_action_ramrod_data { 3922 + u8 vport_id; 3923 + u8 reserved[7]; 3924 + }; 3925 + 4074 3926 /* Ramrod data for rx add openflow filter */ 4075 - struct rx_add_openflow_filter_data { 3927 + struct rx_openflow_filter_ramrod_data { 4076 3928 __le16 action_icid; 4077 3929 u8 priority; 4078 3930 u8 reserved0; ··· 4105 3933 __le32 ipv4_src_addr; 4106 3934 __le16 l4_dst_port; 4107 3935 __le16 l4_src_port; 4108 - }; 4109 - 4110 - /* Ramrod data for rx create gft action */ 4111 - struct rx_create_gft_action_data { 4112 - u8 vport_id; 4113 - u8 reserved[7]; 4114 - }; 4115 - 4116 - /* Ramrod data for rx create openflow action */ 4117 - struct rx_create_openflow_action_data { 4118 - u8 vport_id; 4119 - u8 reserved[7]; 4120 3936 }; 4121 3937 4122 3938 /* Ramrod data for rx queue start ramrod */ ··· 4165 4005 }; 4166 4006 4167 4007 /* Ramrod data for rx Add UDP Filter */ 4168 - struct rx_udp_filter_data { 4008 + struct rx_udp_filter_ramrod_data { 4169 4009 __le16 action_icid; 4170 4010 __le16 vlan_id; 4171 4011 u8 ip_type; ··· 4181 4021 /* Add or delete GFT filter - filter is packet header of type of packet wished 4182 4022 * to pass certain FW flow. 4183 4023 */ 4184 - struct rx_update_gft_filter_data { 4024 + struct rx_update_gft_filter_ramrod_data { 4185 4025 struct regpair pkt_hdr_addr; 4186 4026 __le16 pkt_hdr_length; 4187 4027 __le16 action_icid; ··· 4221 4061 u8 pxp_tph_valid_bd; 4222 4062 u8 pxp_tph_valid_pkt; 4223 4063 __le16 pxp_st_index; 4224 - __le16 comp_agg_size; 4064 + u8 comp_agg_size; 4065 + u8 reserved3; 4225 4066 __le16 queue_zone_id; 4226 4067 __le16 reserved2; 4227 4068 __le16 pbl_size; ··· 4343 4182 u8 ctl_frame_ethtype_check_en; 4344 4183 u8 update_in_to_in_pri_map_mode; 4345 4184 u8 in_to_in_pri_map[8]; 4346 - u8 reserved[6]; 4185 + u8 update_tx_dst_port_mode_flg; 4186 + u8 tx_dst_port_mode_config; 4187 + u8 dst_vport_id; 4188 + u8 tx_dst_port_mode; 4189 + u8 dst_vport_id_valid; 4190 + u8 reserved[1]; 4347 4191 }; 4348 4192 4349 4193 struct vport_update_ramrod_mcast { ··· 4882 4716 #define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29 4883 4717 }; 4884 4718 4885 - 4886 4719 /* Used in gft_profile_key: Indication for ip version */ 4887 4720 enum gft_profile_ip_version { 4888 4721 GFT_PROFILE_IPV4 = 0, ··· 5242 5077 struct ustorm_rdma_task_ag_ctx ustorm_ag_context; 5243 5078 }; 5244 5079 5080 + #define TOE_MAX_RAMROD_PER_PF 8 5081 + #define TOE_TX_PAGE_SIZE_BYTES 4096 5082 + #define TOE_GRQ_PAGE_SIZE_BYTES 4096 5083 + #define TOE_RX_CQ_PAGE_SIZE_BYTES 4096 5084 + 5085 + #define TOE_RX_MAX_RSS_CHAINS 64 5086 + #define TOE_TX_MAX_TSS_CHAINS 64 5087 + #define TOE_RSS_INDIRECTION_TABLE_SIZE 128 5088 + 5089 + /* The toe storm context of Mstorm */ 5090 + struct mstorm_toe_conn_st_ctx { 5091 + __le32 reserved[24]; 5092 + }; 5093 + 5094 + /* The toe storm context of Pstorm */ 5095 + struct pstorm_toe_conn_st_ctx { 5096 + __le32 reserved[36]; 5097 + }; 5098 + 5099 + /* The toe storm context of Ystorm */ 5100 + struct ystorm_toe_conn_st_ctx { 5101 + __le32 reserved[8]; 5102 + }; 5103 + 5104 + /* The toe storm context of Xstorm */ 5105 + struct xstorm_toe_conn_st_ctx { 5106 + __le32 reserved[44]; 5107 + }; 5108 + 5109 + struct ystorm_toe_conn_ag_ctx { 5110 + u8 byte0; 5111 + u8 byte1; 5112 + u8 flags0; 5113 + #define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 5114 + #define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 5115 + #define YSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 5116 + #define YSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 5117 + #define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3 5118 + #define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2 5119 + #define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_MASK 0x3 5120 + #define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_SHIFT 4 5121 + #define YSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 5122 + #define YSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 5123 + u8 flags1; 5124 + #define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1 5125 + #define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 0 5126 + #define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_MASK 0x1 5127 + #define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_SHIFT 1 5128 + #define YSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 5129 + #define YSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 5130 + #define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_MASK 0x1 5131 + #define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_SHIFT 3 5132 + #define YSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 5133 + #define YSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 5134 + #define YSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 5135 + #define YSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 5136 + #define YSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 5137 + #define YSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 5138 + #define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_MASK 0x1 5139 + #define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_SHIFT 7 5140 + u8 completion_opcode; 5141 + u8 byte3; 5142 + __le16 word0; 5143 + __le32 rel_seq; 5144 + __le32 rel_seq_threshold; 5145 + __le16 app_prod; 5146 + __le16 app_cons; 5147 + __le16 word3; 5148 + __le16 word4; 5149 + __le32 reg2; 5150 + __le32 reg3; 5151 + }; 5152 + 5153 + struct xstorm_toe_conn_ag_ctx { 5154 + u8 reserved0; 5155 + u8 state; 5156 + u8 flags0; 5157 + #define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 5158 + #define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 5159 + #define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 5160 + #define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 5161 + #define XSTORM_TOE_CONN_AG_CTX_RESERVED1_MASK 0x1 5162 + #define XSTORM_TOE_CONN_AG_CTX_RESERVED1_SHIFT 2 5163 + #define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 5164 + #define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 5165 + #define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_MASK 0x1 5166 + #define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_SHIFT 4 5167 + #define XSTORM_TOE_CONN_AG_CTX_RESERVED2_MASK 0x1 5168 + #define XSTORM_TOE_CONN_AG_CTX_RESERVED2_SHIFT 5 5169 + #define XSTORM_TOE_CONN_AG_CTX_BIT6_MASK 0x1 5170 + #define XSTORM_TOE_CONN_AG_CTX_BIT6_SHIFT 6 5171 + #define XSTORM_TOE_CONN_AG_CTX_BIT7_MASK 0x1 5172 + #define XSTORM_TOE_CONN_AG_CTX_BIT7_SHIFT 7 5173 + u8 flags1; 5174 + #define XSTORM_TOE_CONN_AG_CTX_BIT8_MASK 0x1 5175 + #define XSTORM_TOE_CONN_AG_CTX_BIT8_SHIFT 0 5176 + #define XSTORM_TOE_CONN_AG_CTX_BIT9_MASK 0x1 5177 + #define XSTORM_TOE_CONN_AG_CTX_BIT9_SHIFT 1 5178 + #define XSTORM_TOE_CONN_AG_CTX_BIT10_MASK 0x1 5179 + #define XSTORM_TOE_CONN_AG_CTX_BIT10_SHIFT 2 5180 + #define XSTORM_TOE_CONN_AG_CTX_BIT11_MASK 0x1 5181 + #define XSTORM_TOE_CONN_AG_CTX_BIT11_SHIFT 3 5182 + #define XSTORM_TOE_CONN_AG_CTX_BIT12_MASK 0x1 5183 + #define XSTORM_TOE_CONN_AG_CTX_BIT12_SHIFT 4 5184 + #define XSTORM_TOE_CONN_AG_CTX_BIT13_MASK 0x1 5185 + #define XSTORM_TOE_CONN_AG_CTX_BIT13_SHIFT 5 5186 + #define XSTORM_TOE_CONN_AG_CTX_BIT14_MASK 0x1 5187 + #define XSTORM_TOE_CONN_AG_CTX_BIT14_SHIFT 6 5188 + #define XSTORM_TOE_CONN_AG_CTX_BIT15_MASK 0x1 5189 + #define XSTORM_TOE_CONN_AG_CTX_BIT15_SHIFT 7 5190 + u8 flags2; 5191 + #define XSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 5192 + #define XSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 0 5193 + #define XSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 5194 + #define XSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 2 5195 + #define XSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 5196 + #define XSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 4 5197 + #define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 5198 + #define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 5199 + u8 flags3; 5200 + #define XSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3 5201 + #define XSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 0 5202 + #define XSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3 5203 + #define XSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 2 5204 + #define XSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 5205 + #define XSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 4 5206 + #define XSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3 5207 + #define XSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 6 5208 + u8 flags4; 5209 + #define XSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3 5210 + #define XSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 0 5211 + #define XSTORM_TOE_CONN_AG_CTX_CF9_MASK 0x3 5212 + #define XSTORM_TOE_CONN_AG_CTX_CF9_SHIFT 2 5213 + #define XSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3 5214 + #define XSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 4 5215 + #define XSTORM_TOE_CONN_AG_CTX_CF11_MASK 0x3 5216 + #define XSTORM_TOE_CONN_AG_CTX_CF11_SHIFT 6 5217 + u8 flags5; 5218 + #define XSTORM_TOE_CONN_AG_CTX_CF12_MASK 0x3 5219 + #define XSTORM_TOE_CONN_AG_CTX_CF12_SHIFT 0 5220 + #define XSTORM_TOE_CONN_AG_CTX_CF13_MASK 0x3 5221 + #define XSTORM_TOE_CONN_AG_CTX_CF13_SHIFT 2 5222 + #define XSTORM_TOE_CONN_AG_CTX_CF14_MASK 0x3 5223 + #define XSTORM_TOE_CONN_AG_CTX_CF14_SHIFT 4 5224 + #define XSTORM_TOE_CONN_AG_CTX_CF15_MASK 0x3 5225 + #define XSTORM_TOE_CONN_AG_CTX_CF15_SHIFT 6 5226 + u8 flags6; 5227 + #define XSTORM_TOE_CONN_AG_CTX_CF16_MASK 0x3 5228 + #define XSTORM_TOE_CONN_AG_CTX_CF16_SHIFT 0 5229 + #define XSTORM_TOE_CONN_AG_CTX_CF17_MASK 0x3 5230 + #define XSTORM_TOE_CONN_AG_CTX_CF17_SHIFT 2 5231 + #define XSTORM_TOE_CONN_AG_CTX_CF18_MASK 0x3 5232 + #define XSTORM_TOE_CONN_AG_CTX_CF18_SHIFT 4 5233 + #define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 5234 + #define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 5235 + u8 flags7; 5236 + #define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 5237 + #define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 5238 + #define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 5239 + #define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 5240 + #define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 5241 + #define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 5242 + #define XSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 5243 + #define XSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 6 5244 + #define XSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 5245 + #define XSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 7 5246 + u8 flags8; 5247 + #define XSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 5248 + #define XSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 0 5249 + #define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 5250 + #define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 5251 + #define XSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1 5252 + #define XSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 2 5253 + #define XSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1 5254 + #define XSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 3 5255 + #define XSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 5256 + #define XSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 4 5257 + #define XSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1 5258 + #define XSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 5 5259 + #define XSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1 5260 + #define XSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 6 5261 + #define XSTORM_TOE_CONN_AG_CTX_CF9EN_MASK 0x1 5262 + #define XSTORM_TOE_CONN_AG_CTX_CF9EN_SHIFT 7 5263 + u8 flags9; 5264 + #define XSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1 5265 + #define XSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 0 5266 + #define XSTORM_TOE_CONN_AG_CTX_CF11EN_MASK 0x1 5267 + #define XSTORM_TOE_CONN_AG_CTX_CF11EN_SHIFT 1 5268 + #define XSTORM_TOE_CONN_AG_CTX_CF12EN_MASK 0x1 5269 + #define XSTORM_TOE_CONN_AG_CTX_CF12EN_SHIFT 2 5270 + #define XSTORM_TOE_CONN_AG_CTX_CF13EN_MASK 0x1 5271 + #define XSTORM_TOE_CONN_AG_CTX_CF13EN_SHIFT 3 5272 + #define XSTORM_TOE_CONN_AG_CTX_CF14EN_MASK 0x1 5273 + #define XSTORM_TOE_CONN_AG_CTX_CF14EN_SHIFT 4 5274 + #define XSTORM_TOE_CONN_AG_CTX_CF15EN_MASK 0x1 5275 + #define XSTORM_TOE_CONN_AG_CTX_CF15EN_SHIFT 5 5276 + #define XSTORM_TOE_CONN_AG_CTX_CF16EN_MASK 0x1 5277 + #define XSTORM_TOE_CONN_AG_CTX_CF16EN_SHIFT 6 5278 + #define XSTORM_TOE_CONN_AG_CTX_CF17EN_MASK 0x1 5279 + #define XSTORM_TOE_CONN_AG_CTX_CF17EN_SHIFT 7 5280 + u8 flags10; 5281 + #define XSTORM_TOE_CONN_AG_CTX_CF18EN_MASK 0x1 5282 + #define XSTORM_TOE_CONN_AG_CTX_CF18EN_SHIFT 0 5283 + #define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 5284 + #define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 5285 + #define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 5286 + #define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 5287 + #define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 5288 + #define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 5289 + #define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 5290 + #define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 5291 + #define XSTORM_TOE_CONN_AG_CTX_CF23EN_MASK 0x1 5292 + #define XSTORM_TOE_CONN_AG_CTX_CF23EN_SHIFT 5 5293 + #define XSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 5294 + #define XSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 6 5295 + #define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 5296 + #define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 5297 + u8 flags11; 5298 + #define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 5299 + #define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 5300 + #define XSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 5301 + #define XSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 1 5302 + #define XSTORM_TOE_CONN_AG_CTX_RESERVED3_MASK 0x1 5303 + #define XSTORM_TOE_CONN_AG_CTX_RESERVED3_SHIFT 2 5304 + #define XSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 5305 + #define XSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 3 5306 + #define XSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 5307 + #define XSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 4 5308 + #define XSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 5309 + #define XSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 5 5310 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 5311 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 5312 + #define XSTORM_TOE_CONN_AG_CTX_RULE9EN_MASK 0x1 5313 + #define XSTORM_TOE_CONN_AG_CTX_RULE9EN_SHIFT 7 5314 + u8 flags12; 5315 + #define XSTORM_TOE_CONN_AG_CTX_RULE10EN_MASK 0x1 5316 + #define XSTORM_TOE_CONN_AG_CTX_RULE10EN_SHIFT 0 5317 + #define XSTORM_TOE_CONN_AG_CTX_RULE11EN_MASK 0x1 5318 + #define XSTORM_TOE_CONN_AG_CTX_RULE11EN_SHIFT 1 5319 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 5320 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 5321 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 5322 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 5323 + #define XSTORM_TOE_CONN_AG_CTX_RULE14EN_MASK 0x1 5324 + #define XSTORM_TOE_CONN_AG_CTX_RULE14EN_SHIFT 4 5325 + #define XSTORM_TOE_CONN_AG_CTX_RULE15EN_MASK 0x1 5326 + #define XSTORM_TOE_CONN_AG_CTX_RULE15EN_SHIFT 5 5327 + #define XSTORM_TOE_CONN_AG_CTX_RULE16EN_MASK 0x1 5328 + #define XSTORM_TOE_CONN_AG_CTX_RULE16EN_SHIFT 6 5329 + #define XSTORM_TOE_CONN_AG_CTX_RULE17EN_MASK 0x1 5330 + #define XSTORM_TOE_CONN_AG_CTX_RULE17EN_SHIFT 7 5331 + u8 flags13; 5332 + #define XSTORM_TOE_CONN_AG_CTX_RULE18EN_MASK 0x1 5333 + #define XSTORM_TOE_CONN_AG_CTX_RULE18EN_SHIFT 0 5334 + #define XSTORM_TOE_CONN_AG_CTX_RULE19EN_MASK 0x1 5335 + #define XSTORM_TOE_CONN_AG_CTX_RULE19EN_SHIFT 1 5336 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 5337 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 5338 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 5339 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 5340 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 5341 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 5342 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 5343 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 5344 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 5345 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 5346 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 5347 + #define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 5348 + u8 flags14; 5349 + #define XSTORM_TOE_CONN_AG_CTX_BIT16_MASK 0x1 5350 + #define XSTORM_TOE_CONN_AG_CTX_BIT16_SHIFT 0 5351 + #define XSTORM_TOE_CONN_AG_CTX_BIT17_MASK 0x1 5352 + #define XSTORM_TOE_CONN_AG_CTX_BIT17_SHIFT 1 5353 + #define XSTORM_TOE_CONN_AG_CTX_BIT18_MASK 0x1 5354 + #define XSTORM_TOE_CONN_AG_CTX_BIT18_SHIFT 2 5355 + #define XSTORM_TOE_CONN_AG_CTX_BIT19_MASK 0x1 5356 + #define XSTORM_TOE_CONN_AG_CTX_BIT19_SHIFT 3 5357 + #define XSTORM_TOE_CONN_AG_CTX_BIT20_MASK 0x1 5358 + #define XSTORM_TOE_CONN_AG_CTX_BIT20_SHIFT 4 5359 + #define XSTORM_TOE_CONN_AG_CTX_BIT21_MASK 0x1 5360 + #define XSTORM_TOE_CONN_AG_CTX_BIT21_SHIFT 5 5361 + #define XSTORM_TOE_CONN_AG_CTX_CF23_MASK 0x3 5362 + #define XSTORM_TOE_CONN_AG_CTX_CF23_SHIFT 6 5363 + u8 byte2; 5364 + __le16 physical_q0; 5365 + __le16 physical_q1; 5366 + __le16 word2; 5367 + __le16 word3; 5368 + __le16 bd_prod; 5369 + __le16 word5; 5370 + __le16 word6; 5371 + u8 byte3; 5372 + u8 byte4; 5373 + u8 byte5; 5374 + u8 byte6; 5375 + __le32 reg0; 5376 + __le32 reg1; 5377 + __le32 reg2; 5378 + __le32 more_to_send_seq; 5379 + __le32 local_adv_wnd_seq; 5380 + __le32 reg5; 5381 + __le32 reg6; 5382 + __le16 word7; 5383 + __le16 word8; 5384 + __le16 word9; 5385 + __le16 word10; 5386 + __le32 reg7; 5387 + __le32 reg8; 5388 + __le32 reg9; 5389 + u8 byte7; 5390 + u8 byte8; 5391 + u8 byte9; 5392 + u8 byte10; 5393 + u8 byte11; 5394 + u8 byte12; 5395 + u8 byte13; 5396 + u8 byte14; 5397 + u8 byte15; 5398 + u8 e5_reserved; 5399 + __le16 word11; 5400 + __le32 reg10; 5401 + __le32 reg11; 5402 + __le32 reg12; 5403 + __le32 reg13; 5404 + __le32 reg14; 5405 + __le32 reg15; 5406 + __le32 reg16; 5407 + __le32 reg17; 5408 + }; 5409 + 5410 + struct tstorm_toe_conn_ag_ctx { 5411 + u8 reserved0; 5412 + u8 byte1; 5413 + u8 flags0; 5414 + #define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 5415 + #define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 5416 + #define TSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 5417 + #define TSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 5418 + #define TSTORM_TOE_CONN_AG_CTX_BIT2_MASK 0x1 5419 + #define TSTORM_TOE_CONN_AG_CTX_BIT2_SHIFT 2 5420 + #define TSTORM_TOE_CONN_AG_CTX_BIT3_MASK 0x1 5421 + #define TSTORM_TOE_CONN_AG_CTX_BIT3_SHIFT 3 5422 + #define TSTORM_TOE_CONN_AG_CTX_BIT4_MASK 0x1 5423 + #define TSTORM_TOE_CONN_AG_CTX_BIT4_SHIFT 4 5424 + #define TSTORM_TOE_CONN_AG_CTX_BIT5_MASK 0x1 5425 + #define TSTORM_TOE_CONN_AG_CTX_BIT5_SHIFT 5 5426 + #define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_MASK 0x3 5427 + #define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_SHIFT 6 5428 + u8 flags1; 5429 + #define TSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 5430 + #define TSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 0 5431 + #define TSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 5432 + #define TSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 2 5433 + #define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 5434 + #define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 5435 + #define TSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3 5436 + #define TSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 6 5437 + u8 flags2; 5438 + #define TSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3 5439 + #define TSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 0 5440 + #define TSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 5441 + #define TSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 2 5442 + #define TSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3 5443 + #define TSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 4 5444 + #define TSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3 5445 + #define TSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 6 5446 + u8 flags3; 5447 + #define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 5448 + #define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 5449 + #define TSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3 5450 + #define TSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 2 5451 + #define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_MASK 0x1 5452 + #define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_SHIFT 4 5453 + #define TSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 5454 + #define TSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 5 5455 + #define TSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 5456 + #define TSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 6 5457 + #define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 5458 + #define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 5459 + u8 flags4; 5460 + #define TSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1 5461 + #define TSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 0 5462 + #define TSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1 5463 + #define TSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 1 5464 + #define TSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 5465 + #define TSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 2 5466 + #define TSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1 5467 + #define TSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 3 5468 + #define TSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1 5469 + #define TSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 4 5470 + #define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 5471 + #define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 5472 + #define TSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1 5473 + #define TSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 6 5474 + #define TSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 5475 + #define TSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7 5476 + u8 flags5; 5477 + #define TSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 5478 + #define TSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0 5479 + #define TSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 5480 + #define TSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1 5481 + #define TSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 5482 + #define TSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2 5483 + #define TSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 5484 + #define TSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3 5485 + #define TSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 5486 + #define TSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4 5487 + #define TSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 5488 + #define TSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5 5489 + #define TSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 5490 + #define TSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6 5491 + #define TSTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1 5492 + #define TSTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7 5493 + __le32 reg0; 5494 + __le32 reg1; 5495 + __le32 reg2; 5496 + __le32 reg3; 5497 + __le32 reg4; 5498 + __le32 reg5; 5499 + __le32 reg6; 5500 + __le32 reg7; 5501 + __le32 reg8; 5502 + u8 byte2; 5503 + u8 byte3; 5504 + __le16 word0; 5505 + }; 5506 + 5507 + struct ustorm_toe_conn_ag_ctx { 5508 + u8 reserved; 5509 + u8 byte1; 5510 + u8 flags0; 5511 + #define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 5512 + #define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 5513 + #define USTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 5514 + #define USTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 5515 + #define USTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 5516 + #define USTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 5517 + #define USTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 5518 + #define USTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 5519 + #define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_MASK 0x3 5520 + #define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_SHIFT 6 5521 + u8 flags1; 5522 + #define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 5523 + #define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 0 5524 + #define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3 5525 + #define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2 5526 + #define USTORM_TOE_CONN_AG_CTX_DQ_CF_MASK 0x3 5527 + #define USTORM_TOE_CONN_AG_CTX_DQ_CF_SHIFT 4 5528 + #define USTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 5529 + #define USTORM_TOE_CONN_AG_CTX_CF6_SHIFT 6 5530 + u8 flags2; 5531 + #define USTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 5532 + #define USTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 5533 + #define USTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 5534 + #define USTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 5535 + #define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_MASK 0x1 5536 + #define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_SHIFT 2 5537 + #define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 5538 + #define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 3 5539 + #define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1 5540 + #define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 4 5541 + #define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 5542 + #define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 5 5543 + #define USTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 5544 + #define USTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 6 5545 + #define USTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 5546 + #define USTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7 5547 + u8 flags3; 5548 + #define USTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 5549 + #define USTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0 5550 + #define USTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 5551 + #define USTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1 5552 + #define USTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 5553 + #define USTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2 5554 + #define USTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 5555 + #define USTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3 5556 + #define USTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 5557 + #define USTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4 5558 + #define USTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 5559 + #define USTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5 5560 + #define USTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 5561 + #define USTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6 5562 + #define USTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1 5563 + #define USTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7 5564 + u8 byte2; 5565 + u8 byte3; 5566 + __le16 word0; 5567 + __le16 word1; 5568 + __le32 reg0; 5569 + __le32 reg1; 5570 + __le32 reg2; 5571 + __le32 reg3; 5572 + __le16 word2; 5573 + __le16 word3; 5574 + }; 5575 + 5576 + /* The toe storm context of Tstorm */ 5577 + struct tstorm_toe_conn_st_ctx { 5578 + __le32 reserved[16]; 5579 + }; 5580 + 5581 + /* The toe storm context of Ustorm */ 5582 + struct ustorm_toe_conn_st_ctx { 5583 + __le32 reserved[52]; 5584 + }; 5585 + 5586 + /* toe connection context */ 5587 + struct toe_conn_context { 5588 + struct ystorm_toe_conn_st_ctx ystorm_st_context; 5589 + struct pstorm_toe_conn_st_ctx pstorm_st_context; 5590 + struct regpair pstorm_st_padding[2]; 5591 + struct xstorm_toe_conn_st_ctx xstorm_st_context; 5592 + struct regpair xstorm_st_padding[2]; 5593 + struct ystorm_toe_conn_ag_ctx ystorm_ag_context; 5594 + struct xstorm_toe_conn_ag_ctx xstorm_ag_context; 5595 + struct tstorm_toe_conn_ag_ctx tstorm_ag_context; 5596 + struct regpair tstorm_ag_padding[2]; 5597 + struct timers_context timer_context; 5598 + struct ustorm_toe_conn_ag_ctx ustorm_ag_context; 5599 + struct tstorm_toe_conn_st_ctx tstorm_st_context; 5600 + struct mstorm_toe_conn_st_ctx mstorm_st_context; 5601 + struct ustorm_toe_conn_st_ctx ustorm_st_context; 5602 + }; 5603 + 5604 + /* toe init ramrod header */ 5605 + struct toe_init_ramrod_header { 5606 + u8 first_rss; 5607 + u8 num_rss; 5608 + u8 reserved[6]; 5609 + }; 5610 + 5611 + /* toe pf init parameters */ 5612 + struct toe_pf_init_params { 5613 + __le32 push_timeout; 5614 + __le16 grq_buffer_size; 5615 + __le16 grq_sb_id; 5616 + u8 grq_sb_index; 5617 + u8 max_seg_retransmit; 5618 + u8 doubt_reachability; 5619 + u8 ll2_rx_queue_id; 5620 + __le16 grq_fetch_threshold; 5621 + u8 reserved1[2]; 5622 + struct regpair grq_page_addr; 5623 + }; 5624 + 5625 + /* toe tss parameters */ 5626 + struct toe_tss_params { 5627 + struct regpair curr_page_addr; 5628 + struct regpair next_page_addr; 5629 + u8 reserved0; 5630 + u8 status_block_index; 5631 + __le16 status_block_id; 5632 + __le16 reserved1[2]; 5633 + }; 5634 + 5635 + /* toe rss parameters */ 5636 + struct toe_rss_params { 5637 + struct regpair curr_page_addr; 5638 + struct regpair next_page_addr; 5639 + u8 reserved0; 5640 + u8 status_block_index; 5641 + __le16 status_block_id; 5642 + __le16 reserved1[2]; 5643 + }; 5644 + 5645 + /* toe init ramrod data */ 5646 + struct toe_init_ramrod_data { 5647 + struct toe_init_ramrod_header hdr; 5648 + struct tcp_init_params tcp_params; 5649 + struct toe_pf_init_params pf_params; 5650 + struct toe_tss_params tss_params[TOE_TX_MAX_TSS_CHAINS]; 5651 + struct toe_rss_params rss_params[TOE_RX_MAX_RSS_CHAINS]; 5652 + }; 5653 + 5654 + /* toe offload parameters */ 5655 + struct toe_offload_params { 5656 + struct regpair tx_bd_page_addr; 5657 + struct regpair tx_app_page_addr; 5658 + __le32 more_to_send_seq; 5659 + __le16 rcv_indication_size; 5660 + u8 rss_tss_id; 5661 + u8 ignore_grq_push; 5662 + struct regpair rx_db_data_ptr; 5663 + }; 5664 + 5665 + /* TOE offload ramrod data - DMAed by firmware */ 5666 + struct toe_offload_ramrod_data { 5667 + struct tcp_offload_params tcp_ofld_params; 5668 + struct toe_offload_params toe_ofld_params; 5669 + }; 5670 + 5671 + /* TOE ramrod command IDs */ 5672 + enum toe_ramrod_cmd_id { 5673 + TOE_RAMROD_UNUSED, 5674 + TOE_RAMROD_FUNC_INIT, 5675 + TOE_RAMROD_INITATE_OFFLOAD, 5676 + TOE_RAMROD_FUNC_CLOSE, 5677 + TOE_RAMROD_SEARCHER_DELETE, 5678 + TOE_RAMROD_TERMINATE, 5679 + TOE_RAMROD_QUERY, 5680 + TOE_RAMROD_UPDATE, 5681 + TOE_RAMROD_EMPTY, 5682 + TOE_RAMROD_RESET_SEND, 5683 + TOE_RAMROD_INVALIDATE, 5684 + MAX_TOE_RAMROD_CMD_ID 5685 + }; 5686 + 5687 + /* Toe RQ buffer descriptor */ 5688 + struct toe_rx_bd { 5689 + struct regpair addr; 5690 + __le16 size; 5691 + __le16 flags; 5692 + #define TOE_RX_BD_START_MASK 0x1 5693 + #define TOE_RX_BD_START_SHIFT 0 5694 + #define TOE_RX_BD_END_MASK 0x1 5695 + #define TOE_RX_BD_END_SHIFT 1 5696 + #define TOE_RX_BD_NO_PUSH_MASK 0x1 5697 + #define TOE_RX_BD_NO_PUSH_SHIFT 2 5698 + #define TOE_RX_BD_SPLIT_MASK 0x1 5699 + #define TOE_RX_BD_SPLIT_SHIFT 3 5700 + #define TOE_RX_BD_RESERVED0_MASK 0xFFF 5701 + #define TOE_RX_BD_RESERVED0_SHIFT 4 5702 + __le32 reserved1; 5703 + }; 5704 + 5705 + /* TOE RX completion queue opcodes (opcode 0 is illegal) */ 5706 + enum toe_rx_cmp_opcode { 5707 + TOE_RX_CMP_OPCODE_GA = 1, 5708 + TOE_RX_CMP_OPCODE_GR = 2, 5709 + TOE_RX_CMP_OPCODE_GNI = 3, 5710 + TOE_RX_CMP_OPCODE_GAIR = 4, 5711 + TOE_RX_CMP_OPCODE_GAIL = 5, 5712 + TOE_RX_CMP_OPCODE_GRI = 6, 5713 + TOE_RX_CMP_OPCODE_GJ = 7, 5714 + TOE_RX_CMP_OPCODE_DGI = 8, 5715 + TOE_RX_CMP_OPCODE_CMP = 9, 5716 + TOE_RX_CMP_OPCODE_REL = 10, 5717 + TOE_RX_CMP_OPCODE_SKP = 11, 5718 + TOE_RX_CMP_OPCODE_URG = 12, 5719 + TOE_RX_CMP_OPCODE_RT_TO = 13, 5720 + TOE_RX_CMP_OPCODE_KA_TO = 14, 5721 + TOE_RX_CMP_OPCODE_MAX_RT = 15, 5722 + TOE_RX_CMP_OPCODE_DBT_RE = 16, 5723 + TOE_RX_CMP_OPCODE_SYN = 17, 5724 + TOE_RX_CMP_OPCODE_OPT_ERR = 18, 5725 + TOE_RX_CMP_OPCODE_FW2_TO = 19, 5726 + TOE_RX_CMP_OPCODE_2WY_CLS = 20, 5727 + TOE_RX_CMP_OPCODE_RST_RCV = 21, 5728 + TOE_RX_CMP_OPCODE_FIN_RCV = 22, 5729 + TOE_RX_CMP_OPCODE_FIN_UPL = 23, 5730 + TOE_RX_CMP_OPCODE_INIT = 32, 5731 + TOE_RX_CMP_OPCODE_RSS_UPDATE = 33, 5732 + TOE_RX_CMP_OPCODE_CLOSE = 34, 5733 + TOE_RX_CMP_OPCODE_INITIATE_OFFLOAD = 80, 5734 + TOE_RX_CMP_OPCODE_SEARCHER_DELETE = 81, 5735 + TOE_RX_CMP_OPCODE_TERMINATE = 82, 5736 + TOE_RX_CMP_OPCODE_QUERY = 83, 5737 + TOE_RX_CMP_OPCODE_RESET_SEND = 84, 5738 + TOE_RX_CMP_OPCODE_INVALIDATE = 85, 5739 + TOE_RX_CMP_OPCODE_EMPTY = 86, 5740 + TOE_RX_CMP_OPCODE_UPDATE = 87, 5741 + MAX_TOE_RX_CMP_OPCODE 5742 + }; 5743 + 5744 + /* TOE rx ooo completion data */ 5745 + struct toe_rx_cqe_ooo_params { 5746 + __le32 nbytes; 5747 + __le16 grq_buff_id; 5748 + u8 isle_num; 5749 + u8 reserved0; 5750 + }; 5751 + 5752 + /* TOE rx in order completion data */ 5753 + struct toe_rx_cqe_in_order_params { 5754 + __le32 nbytes; 5755 + __le16 grq_buff_id; 5756 + __le16 reserved1; 5757 + }; 5758 + 5759 + /* Union for TOE rx completion data */ 5760 + union toe_rx_cqe_data_union { 5761 + struct toe_rx_cqe_ooo_params ooo_params; 5762 + struct toe_rx_cqe_in_order_params in_order_params; 5763 + struct regpair raw_data; 5764 + }; 5765 + 5766 + /* TOE rx completion element */ 5767 + struct toe_rx_cqe { 5768 + __le16 icid; 5769 + u8 completion_opcode; 5770 + u8 reserved0; 5771 + __le32 reserved1; 5772 + union toe_rx_cqe_data_union data; 5773 + }; 5774 + 5775 + /* toe RX doorbel data */ 5776 + struct toe_rx_db_data { 5777 + __le32 local_adv_wnd_seq; 5778 + __le32 reserved[3]; 5779 + }; 5780 + 5781 + /* Toe GRQ buffer descriptor */ 5782 + struct toe_rx_grq_bd { 5783 + struct regpair addr; 5784 + __le16 buff_id; 5785 + __le16 reserved0; 5786 + __le32 reserved1; 5787 + }; 5788 + 5789 + /* Toe transmission application buffer descriptor */ 5790 + struct toe_tx_app_buff_desc { 5791 + __le32 next_buffer_start_seq; 5792 + __le32 reserved; 5793 + }; 5794 + 5795 + /* Toe transmission application buffer descriptor page pointer */ 5796 + struct toe_tx_app_buff_page_pointer { 5797 + struct regpair next_page_addr; 5798 + }; 5799 + 5800 + /* Toe transmission buffer descriptor */ 5801 + struct toe_tx_bd { 5802 + struct regpair addr; 5803 + __le16 size; 5804 + __le16 flags; 5805 + #define TOE_TX_BD_PUSH_MASK 0x1 5806 + #define TOE_TX_BD_PUSH_SHIFT 0 5807 + #define TOE_TX_BD_NOTIFY_MASK 0x1 5808 + #define TOE_TX_BD_NOTIFY_SHIFT 1 5809 + #define TOE_TX_BD_LARGE_IO_MASK 0x1 5810 + #define TOE_TX_BD_LARGE_IO_SHIFT 2 5811 + #define TOE_TX_BD_BD_CONS_MASK 0x1FFF 5812 + #define TOE_TX_BD_BD_CONS_SHIFT 3 5813 + __le32 next_bd_start_seq; 5814 + }; 5815 + 5816 + /* TOE completion opcodes */ 5817 + enum toe_tx_cmp_opcode { 5818 + TOE_TX_CMP_OPCODE_DATA, 5819 + TOE_TX_CMP_OPCODE_TERMINATE, 5820 + TOE_TX_CMP_OPCODE_EMPTY, 5821 + TOE_TX_CMP_OPCODE_RESET_SEND, 5822 + TOE_TX_CMP_OPCODE_INVALIDATE, 5823 + TOE_TX_CMP_OPCODE_RST_RCV, 5824 + MAX_TOE_TX_CMP_OPCODE 5825 + }; 5826 + 5827 + /* Toe transmission completion element */ 5828 + struct toe_tx_cqe { 5829 + __le16 icid; 5830 + u8 opcode; 5831 + u8 reserved; 5832 + __le32 size; 5833 + }; 5834 + 5835 + /* Toe transmission page pointer bd */ 5836 + struct toe_tx_page_pointer_bd { 5837 + struct regpair next_page_addr; 5838 + struct regpair prev_page_addr; 5839 + }; 5840 + 5841 + /* Toe transmission completion element page pointer */ 5842 + struct toe_tx_page_pointer_cqe { 5843 + struct regpair next_page_addr; 5844 + }; 5845 + 5846 + /* toe update parameters */ 5847 + struct toe_update_params { 5848 + __le16 flags; 5849 + #define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_MASK 0x1 5850 + #define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_SHIFT 0 5851 + #define TOE_UPDATE_PARAMS_RESERVED_MASK 0x7FFF 5852 + #define TOE_UPDATE_PARAMS_RESERVED_SHIFT 1 5853 + __le16 rcv_indication_size; 5854 + __le16 reserved1[2]; 5855 + }; 5856 + 5857 + /* TOE update ramrod data - DMAed by firmware */ 5858 + struct toe_update_ramrod_data { 5859 + struct tcp_update_params tcp_upd_params; 5860 + struct toe_update_params toe_upd_params; 5861 + }; 5862 + 5863 + struct mstorm_toe_conn_ag_ctx { 5864 + u8 byte0; 5865 + u8 byte1; 5866 + u8 flags0; 5867 + #define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1 5868 + #define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0 5869 + #define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 5870 + #define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 5871 + #define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 5872 + #define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 5873 + #define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 5874 + #define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 5875 + #define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 5876 + #define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 5877 + u8 flags1; 5878 + #define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 5879 + #define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 5880 + #define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 5881 + #define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 5882 + #define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 5883 + #define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 5884 + #define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 5885 + #define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3 5886 + #define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 5887 + #define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 5888 + #define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 5889 + #define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 5890 + #define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 5891 + #define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 5892 + #define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 5893 + #define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7 5894 + __le16 word0; 5895 + __le16 word1; 5896 + __le32 reg0; 5897 + __le32 reg1; 5898 + }; 5899 + 5900 + /* TOE doorbell data */ 5901 + struct toe_db_data { 5902 + u8 params; 5903 + #define TOE_DB_DATA_DEST_MASK 0x3 5904 + #define TOE_DB_DATA_DEST_SHIFT 0 5905 + #define TOE_DB_DATA_AGG_CMD_MASK 0x3 5906 + #define TOE_DB_DATA_AGG_CMD_SHIFT 2 5907 + #define TOE_DB_DATA_BYPASS_EN_MASK 0x1 5908 + #define TOE_DB_DATA_BYPASS_EN_SHIFT 4 5909 + #define TOE_DB_DATA_RESERVED_MASK 0x1 5910 + #define TOE_DB_DATA_RESERVED_SHIFT 5 5911 + #define TOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 5912 + #define TOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 5913 + u8 agg_flags; 5914 + __le16 bd_prod; 5915 + }; 5916 + 5245 5917 /* rdma function init ramrod data */ 5246 5918 struct rdma_close_func_ramrod_data { 5247 5919 u8 cnq_start_offset; ··· 6150 5148 RDMA_EVENT_CREATE_SRQ, 6151 5149 RDMA_EVENT_MODIFY_SRQ, 6152 5150 RDMA_EVENT_DESTROY_SRQ, 5151 + RDMA_EVENT_START_NAMESPACE_TRACKING, 5152 + RDMA_EVENT_STOP_NAMESPACE_TRACKING, 6153 5153 MAX_RDMA_EVENT_OPCODE 6154 5154 }; 6155 5155 ··· 6176 5172 u8 relaxed_ordering; 6177 5173 __le16 first_reg_srq_id; 6178 5174 __le32 reg_srq_base_addr; 6179 - u8 searcher_mode; 6180 - u8 pvrdma_mode; 5175 + u8 flags; 5176 + #define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_MASK 0x1 5177 + #define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_SHIFT 0 5178 + #define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_MASK 0x1 5179 + #define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_SHIFT 1 5180 + #define RDMA_INIT_FUNC_HDR_DPT_MODE_MASK 0x1 5181 + #define RDMA_INIT_FUNC_HDR_DPT_MODE_SHIFT 2 5182 + #define RDMA_INIT_FUNC_HDR_RESERVED0_MASK 0x1F 5183 + #define RDMA_INIT_FUNC_HDR_RESERVED0_SHIFT 3 5184 + u8 dpt_byte_threshold_log; 5185 + u8 dpt_common_queue_id; 6181 5186 u8 max_num_ns_log; 6182 - u8 reserved; 6183 5187 }; 6184 5188 6185 5189 /* rdma function init ramrod data */ 6186 5190 struct rdma_init_func_ramrod_data { 6187 5191 struct rdma_init_func_hdr params_header; 5192 + struct rdma_cnq_params dptq_params; 6188 5193 struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES]; 5194 + }; 5195 + 5196 + /* rdma namespace tracking ramrod data */ 5197 + struct rdma_namespace_tracking_ramrod_data { 5198 + u8 name_space; 5199 + u8 reserved[7]; 6189 5200 }; 6190 5201 6191 5202 /* RDMA ramrod command IDs */ ··· 6216 5197 RDMA_RAMROD_CREATE_SRQ, 6217 5198 RDMA_RAMROD_MODIFY_SRQ, 6218 5199 RDMA_RAMROD_DESTROY_SRQ, 5200 + RDMA_RAMROD_START_NS_TRACKING, 5201 + RDMA_RAMROD_STOP_NS_TRACKING, 6219 5202 MAX_RDMA_RAMROD_CMD_ID 6220 5203 }; 6221 5204 ··· 6939 5918 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0 6940 5919 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1 6941 5920 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_SHIFT 1 6942 - #define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x3F 6943 - #define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 2 5921 + #define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1 5922 + #define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 2 5923 + #define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1F 5924 + #define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 3 6944 5925 u8 name_space; 6945 5926 u8 reserved3[3]; 6946 5927 __le16 regular_latency_phy_queue; ··· 6974 5951 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16 6975 5952 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_MASK 0x1 6976 5953 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_SHIFT 17 6977 - #define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x3FFF 6978 - #define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 18 5954 + #define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1 5955 + #define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 18 5956 + #define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x1FFF 5957 + #define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 19 6979 5958 __le16 xrc_domain; 6980 5959 u8 max_ird; 6981 5960 u8 traffic_class; ··· 7014 5989 u8 reserved3[3]; 7015 5990 }; 7016 5991 5992 + /* RoCE Create Suspended qp requester runtime ramrod data */ 5993 + struct roce_create_suspended_qp_req_runtime_ramrod_data { 5994 + __le32 flags; 5995 + #define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_MASK 0x1 5996 + #define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_SHIFT 0 5997 + #define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_MASK \ 5998 + 0x7FFFFFFF 5999 + #define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_SHIFT 1 6000 + __le32 send_msg_psn; 6001 + __le32 inflight_sends; 6002 + __le32 ssn; 6003 + }; 6004 + 6005 + /* RoCE Create Suspended QP requester ramrod data */ 6006 + struct roce_create_suspended_qp_req_ramrod_data { 6007 + struct roce_create_qp_req_ramrod_data qp_params; 6008 + struct roce_create_suspended_qp_req_runtime_ramrod_data 6009 + qp_runtime_params; 6010 + }; 6011 + 6012 + /* RoCE Create Suspended QP responder runtime params */ 6013 + struct roce_create_suspended_qp_resp_runtime_params { 6014 + __le32 flags; 6015 + #define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1 6016 + #define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0 6017 + #define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1 6018 + #define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1 6019 + #define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF 6020 + #define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2 6021 + __le32 receive_msg_psn; 6022 + __le32 inflight_receives; 6023 + __le32 rmsn; 6024 + __le32 rdma_key; 6025 + struct regpair rdma_va; 6026 + __le32 rdma_length; 6027 + __le32 num_rdb_entries; 6028 + __le32 resreved; 6029 + }; 6030 + 6031 + /* RoCE RDB array entry */ 6032 + struct roce_resp_qp_rdb_entry { 6033 + struct regpair atomic_data; 6034 + struct regpair va; 6035 + __le32 psn; 6036 + __le32 rkey; 6037 + __le32 byte_count; 6038 + u8 op_type; 6039 + u8 reserved[3]; 6040 + }; 6041 + 6042 + /* RoCE Create Suspended QP responder runtime ramrod data */ 6043 + struct roce_create_suspended_qp_resp_runtime_ramrod_data { 6044 + struct roce_create_suspended_qp_resp_runtime_params params; 6045 + struct roce_resp_qp_rdb_entry 6046 + rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE]; 6047 + }; 6048 + 6049 + /* RoCE Create Suspended QP responder ramrod data */ 6050 + struct roce_create_suspended_qp_resp_ramrod_data { 6051 + struct roce_create_qp_resp_ramrod_data 6052 + qp_params; 6053 + struct roce_create_suspended_qp_resp_runtime_ramrod_data 6054 + qp_runtime_params; 6055 + }; 6056 + 6057 + /* RoCE create ud qp ramrod data */ 6058 + struct roce_create_ud_qp_ramrod_data { 6059 + __le16 local_mac_addr[3]; 6060 + __le16 vlan_id; 6061 + __le32 src_qp_id; 6062 + u8 name_space; 6063 + u8 reserved[3]; 6064 + }; 6065 + 7017 6066 /* roce DCQCN received statistics */ 7018 6067 struct roce_dcqcn_received_stats { 7019 6068 struct regpair ecn_pkt_rcv; 7020 6069 struct regpair cnp_pkt_rcv; 6070 + struct regpair cnp_pkt_reject; 7021 6071 }; 7022 6072 7023 6073 /* roce DCQCN sent statistics */ ··· 7124 6024 __le32 reserved; 7125 6025 }; 7126 6026 6027 + /* RoCE destroy ud qp ramrod data */ 6028 + struct roce_destroy_ud_qp_ramrod_data { 6029 + __le32 src_qp_id; 6030 + __le32 reserved; 6031 + }; 6032 + 7127 6033 /* roce error statistics */ 7128 6034 struct roce_error_stats { 7129 6035 __le32 resp_remote_access_errors; ··· 7152 6046 7153 6047 /* roce slow path EQ cmd IDs */ 7154 6048 enum roce_event_opcode { 7155 - ROCE_EVENT_CREATE_QP = 11, 6049 + ROCE_EVENT_CREATE_QP = 13, 7156 6050 ROCE_EVENT_MODIFY_QP, 7157 6051 ROCE_EVENT_QUERY_QP, 7158 6052 ROCE_EVENT_DESTROY_QP, 7159 6053 ROCE_EVENT_CREATE_UD_QP, 7160 6054 ROCE_EVENT_DESTROY_UD_QP, 7161 6055 ROCE_EVENT_FUNC_UPDATE, 6056 + ROCE_EVENT_SUSPEND_QP, 6057 + ROCE_EVENT_QUERY_SUSPENDED_QP, 6058 + ROCE_EVENT_CREATE_SUSPENDED_QP, 6059 + ROCE_EVENT_RESUME_QP, 6060 + ROCE_EVENT_SUSPEND_UD_QP, 6061 + ROCE_EVENT_RESUME_UD_QP, 6062 + ROCE_EVENT_CREATE_SUSPENDED_UD_QP, 6063 + ROCE_EVENT_FLUSH_DPT_QP, 7162 6064 MAX_ROCE_EVENT_OPCODE 7163 6065 }; 7164 6066 ··· 7192 6078 struct roce_init_func_ramrod_data { 7193 6079 struct rdma_init_func_ramrod_data rdma; 7194 6080 struct roce_init_func_params roce; 6081 + }; 6082 + 6083 + /* roce_ll2_cqe_data */ 6084 + struct roce_ll2_cqe_data { 6085 + u8 name_space; 6086 + u8 flags; 6087 + #define ROCE_LL2_CQE_DATA_QP_SUSPENDED_MASK 0x1 6088 + #define ROCE_LL2_CQE_DATA_QP_SUSPENDED_SHIFT 0 6089 + #define ROCE_LL2_CQE_DATA_RESERVED0_MASK 0x7F 6090 + #define ROCE_LL2_CQE_DATA_RESERVED0_SHIFT 1 6091 + u8 reserved1[2]; 6092 + __le32 cid; 7195 6093 }; 7196 6094 7197 6095 /* roce modify qp requester ramrod data */ ··· 7233 6107 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10 7234 6108 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 7235 6109 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 13 7236 - #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3 7237 - #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14 6110 + #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1 6111 + #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 14 6112 + #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x1 6113 + #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 15 7238 6114 u8 fields; 7239 6115 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF 7240 6116 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0 ··· 7282 6154 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9 7283 6155 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 7284 6156 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 10 7285 - #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F 7286 - #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11 6157 + #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1 6158 + #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 11 6159 + #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0xF 6160 + #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 12 7287 6161 u8 fields; 7288 6162 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 7289 6163 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0 ··· 7336 6206 struct regpair output_params_addr; 7337 6207 }; 7338 6208 6209 + /* RoCE Query Suspended QP requester output params */ 6210 + struct roce_query_suspended_qp_req_output_params { 6211 + __le32 psn; 6212 + __le32 flags; 6213 + #define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1 6214 + #define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0 6215 + #define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF 6216 + #define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 1 6217 + __le32 send_msg_psn; 6218 + __le32 inflight_sends; 6219 + __le32 ssn; 6220 + __le32 reserved; 6221 + }; 6222 + 6223 + /* RoCE Query Suspended QP requester ramrod data */ 6224 + struct roce_query_suspended_qp_req_ramrod_data { 6225 + struct regpair output_params_addr; 6226 + }; 6227 + 6228 + /* RoCE Query Suspended QP responder runtime params */ 6229 + struct roce_query_suspended_qp_resp_runtime_params { 6230 + __le32 psn; 6231 + __le32 flags; 6232 + #define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1 6233 + #define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0 6234 + #define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1 6235 + #define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1 6236 + #define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF 6237 + #define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2 6238 + __le32 receive_msg_psn; 6239 + __le32 inflight_receives; 6240 + __le32 rmsn; 6241 + __le32 rdma_key; 6242 + struct regpair rdma_va; 6243 + __le32 rdma_length; 6244 + __le32 num_rdb_entries; 6245 + }; 6246 + 6247 + /* RoCE Query Suspended QP responder output params */ 6248 + struct roce_query_suspended_qp_resp_output_params { 6249 + struct roce_query_suspended_qp_resp_runtime_params runtime_params; 6250 + struct roce_resp_qp_rdb_entry 6251 + rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE]; 6252 + }; 6253 + 6254 + /* RoCE Query Suspended QP responder ramrod data */ 6255 + struct roce_query_suspended_qp_resp_ramrod_data { 6256 + struct regpair output_params_addr; 6257 + }; 6258 + 7339 6259 /* ROCE ramrod command IDs */ 7340 6260 enum roce_ramrod_cmd_id { 7341 - ROCE_RAMROD_CREATE_QP = 11, 6261 + ROCE_RAMROD_CREATE_QP = 13, 7342 6262 ROCE_RAMROD_MODIFY_QP, 7343 6263 ROCE_RAMROD_QUERY_QP, 7344 6264 ROCE_RAMROD_DESTROY_QP, 7345 6265 ROCE_RAMROD_CREATE_UD_QP, 7346 6266 ROCE_RAMROD_DESTROY_UD_QP, 7347 6267 ROCE_RAMROD_FUNC_UPDATE, 6268 + ROCE_RAMROD_SUSPEND_QP, 6269 + ROCE_RAMROD_QUERY_SUSPENDED_QP, 6270 + ROCE_RAMROD_CREATE_SUSPENDED_QP, 6271 + ROCE_RAMROD_RESUME_QP, 6272 + ROCE_RAMROD_SUSPEND_UD_QP, 6273 + ROCE_RAMROD_RESUME_UD_QP, 6274 + ROCE_RAMROD_CREATE_SUSPENDED_UD_QP, 6275 + ROCE_RAMROD_FLUSH_DPT_QP, 7348 6276 MAX_ROCE_RAMROD_CMD_ID 6277 + }; 6278 + 6279 + /* ROCE RDB array entry type */ 6280 + enum roce_resp_qp_rdb_entry_type { 6281 + ROCE_QP_RDB_ENTRY_RDMA_RESPONSE = 0, 6282 + ROCE_QP_RDB_ENTRY_ATOMIC_RESPONSE = 1, 6283 + ROCE_QP_RDB_ENTRY_INVALID = 2, 6284 + MAX_ROCE_RESP_QP_RDB_ENTRY_TYPE 7349 6285 }; 7350 6286 7351 6287 /* RoCE func init ramrod data */ ··· 9164 7968 IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED, 9165 7969 IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE, 9166 7970 IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW, 9167 - IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY, 9168 7971 IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT, 7972 + IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY, 9169 7973 MAX_IWARP_EQE_ASYNC_OPCODE 9170 7974 }; 9171 7975 ··· 9183 7987 9184 7988 /* iWARP completion queue types */ 9185 7989 enum iwarp_eqe_sync_opcode { 9186 - IWARP_EVENT_TYPE_TCP_OFFLOAD = 9187 - 11, 7990 + IWARP_EVENT_TYPE_TCP_OFFLOAD = 13, 9188 7991 IWARP_EVENT_TYPE_MPA_OFFLOAD, 9189 7992 IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR, 9190 7993 IWARP_EVENT_TYPE_CREATE_QP, ··· 9215 8020 IWARP_EXCEPTION_DETECTED_LLP_RESET, 9216 8021 IWARP_EXCEPTION_DETECTED_IRQ_FULL, 9217 8022 IWARP_EXCEPTION_DETECTED_RQ_EMPTY, 9218 - IWARP_EXCEPTION_DETECTED_SRQ_EMPTY, 9219 - IWARP_EXCEPTION_DETECTED_SRQ_LIMIT, 9220 8023 IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT, 9221 8024 IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR, 9222 8025 IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW, ··· 9308 8115 struct regpair async_eqe_output_buf; 9309 8116 struct regpair handle_for_async; 9310 8117 struct regpair shared_queue_addr; 8118 + __le32 additional_setup_time; 9311 8119 __le16 rcv_wnd; 9312 8120 u8 stats_counter_id; 9313 - u8 reserved3[13]; 8121 + u8 reserved3[9]; 9314 8122 }; 9315 8123 9316 8124 /* iWARP TCP connection offload params passed by driver to FW */ ··· 9319 8125 struct mpa_ulp_buffer incoming_ulp_buffer; 9320 8126 struct regpair async_eqe_output_buf; 9321 8127 struct regpair handle_for_async; 8128 + __le32 additional_setup_time; 9322 8129 __le16 physical_q0; 9323 8130 __le16 physical_q1; 9324 8131 u8 stats_counter_id; 9325 8132 u8 mpa_mode; 9326 - u8 reserved[10]; 8133 + u8 src_vport_id; 8134 + u8 reserved[5]; 9327 8135 }; 9328 8136 9329 8137 /* iWARP query QP output params */ ··· 9345 8149 9346 8150 /* iWARP Ramrod Command IDs */ 9347 8151 enum iwarp_ramrod_cmd_id { 9348 - IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11, 8152 + IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 13, 9349 8153 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, 9350 8154 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, 9351 8155 IWARP_RAMROD_CMD_ID_CREATE_QP,
+8 -6
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
··· 920 920 } 921 921 922 922 int qed_init_global_rl(struct qed_hwfn *p_hwfn, 923 - struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit) 923 + struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit, 924 + enum init_qm_rl_type vport_rl_type) 924 925 { 925 926 u32 inc_val; 926 927 ··· 1646 1645 1647 1646 /* If memory allocation has failed, free all allocated memory */ 1648 1647 if (buf_offset < buf_size) { 1649 - qed_fw_overlay_mem_free(p_hwfn, allocated_mem); 1648 + qed_fw_overlay_mem_free(p_hwfn, &allocated_mem); 1650 1649 return NULL; 1651 1650 } 1652 1651 ··· 1680 1679 } 1681 1680 1682 1681 void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, 1683 - struct phys_mem_desc *fw_overlay_mem) 1682 + struct phys_mem_desc **fw_overlay_mem) 1684 1683 { 1685 1684 u8 storm_id; 1686 1685 1687 - if (!fw_overlay_mem) 1686 + if (!fw_overlay_mem || !(*fw_overlay_mem)) 1688 1687 return; 1689 1688 1690 1689 for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { 1691 1690 struct phys_mem_desc *storm_mem_desc = 1692 - (struct phys_mem_desc *)fw_overlay_mem + storm_id; 1691 + (struct phys_mem_desc *)*fw_overlay_mem + storm_id; 1693 1692 1694 1693 /* Free Storm's physical memory */ 1695 1694 if (storm_mem_desc->virt_addr) ··· 1700 1699 } 1701 1700 1702 1701 /* Free allocated virtual memory */ 1703 - kfree(fw_overlay_mem); 1702 + kfree(*fw_overlay_mem); 1703 + *fw_overlay_mem = NULL; 1704 1704 }
+2 -4
drivers/net/ethernet/qlogic/qed/qed_l2.c
··· 38 38 #include "qed_sp.h" 39 39 #include "qed_sriov.h" 40 40 41 - 42 41 #define QED_MAX_SGES_NUM 16 43 42 #define CRC32_POLY 0x1edc6f41 44 43 ··· 1111 1112 { 1112 1113 int rc; 1113 1114 1114 - 1115 1115 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, 1116 1116 pbl_addr, pbl_size, 1117 1117 qed_get_cm_pq_idx_mcos(p_hwfn, tc)); ··· 2009 2011 struct qed_spq_comp_cb *p_cb, 2010 2012 struct qed_ntuple_filter_params *p_params) 2011 2013 { 2012 - struct rx_update_gft_filter_data *p_ramrod = NULL; 2014 + struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL; 2013 2015 struct qed_spq_entry *p_ent = NULL; 2014 2016 struct qed_sp_init_data init_data; 2015 2017 u16 abs_rx_q_id = 0; ··· 2030 2032 } 2031 2033 2032 2034 rc = qed_sp_init_request(p_hwfn, &p_ent, 2033 - ETH_RAMROD_GFT_UPDATE_FILTER, 2035 + ETH_RAMROD_RX_UPDATE_GFT_FILTER, 2034 2036 PROTOCOLID_ETH, &init_data); 2035 2037 if (rc) 2036 2038 return rc;
-1
drivers/net/ethernet/qlogic/qed/qed_l2.h
··· 146 146 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, 147 147 struct qed_sp_vport_start_params *p_params); 148 148 149 - 150 149 struct qed_filter_accept_flags { 151 150 u8 update_rx_mode_config; 152 151 u8 update_tx_mode_config;
+4 -4
drivers/net/ethernet/qlogic/qed/qed_sp.h
··· 23 23 }; 24 24 25 25 struct qed_spq_comp_cb { 26 - void (*function)(struct qed_hwfn *, 27 - void *, 28 - union event_ring_data *, 26 + void (*function)(struct qed_hwfn *p_hwfn, 27 + void *cookie, 28 + union event_ring_data *data, 29 29 u8 fw_return_code); 30 30 void *cookie; 31 31 }; ··· 53 53 struct tx_queue_stop_ramrod_data tx_queue_stop; 54 54 struct vport_start_ramrod_data vport_start; 55 55 struct vport_stop_ramrod_data vport_stop; 56 - struct rx_update_gft_filter_data rx_update_gft; 56 + struct rx_update_gft_filter_ramrod_data rx_update_gft; 57 57 struct vport_update_ramrod_data vport_update; 58 58 struct core_rx_start_ramrod_data core_rx_queue_start; 59 59 struct core_rx_stop_ramrod_data core_rx_queue_stop;
+7 -3
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
··· 369 369 qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain)); 370 370 page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); 371 371 p_ramrod->event_ring_num_pages = page_cnt; 372 - DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, 372 + 373 + /* Place consolidation queue address in ramrod */ 374 + DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr, 373 375 qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain)); 376 + page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain); 377 + p_ramrod->consolid_q_num_pages = page_cnt; 374 378 375 379 qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); 376 380 ··· 405 401 if (p_hwfn->cdev->p_iov_info) { 406 402 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 407 403 408 - p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf; 409 - p_ramrod->num_vfs = (u8) p_iov->total_vfs; 404 + p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf; 405 + p_ramrod->num_vfs = (u8)p_iov->total_vfs; 410 406 } 411 407 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; 412 408 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
+23 -27
drivers/net/ethernet/qlogic/qed/qed_spq.c
··· 32 32 #include "qed_rdma.h" 33 33 34 34 /*************************************************************************** 35 - * Structures & Definitions 36 - ***************************************************************************/ 35 + * Structures & Definitions 36 + ***************************************************************************/ 37 37 38 38 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) 39 39 ··· 43 43 #define SPQ_BLOCK_SLEEP_MS (5) 44 44 45 45 /*************************************************************************** 46 - * Blocking Imp. (BLOCK/EBLOCK mode) 47 - ***************************************************************************/ 46 + * Blocking Imp. (BLOCK/EBLOCK mode) 47 + ***************************************************************************/ 48 48 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, 49 49 void *cookie, 50 50 union event_ring_data *data, u8 fw_return_code) ··· 150 150 } 151 151 152 152 /*************************************************************************** 153 - * SPQ entries inner API 154 - ***************************************************************************/ 153 + * SPQ entries inner API 154 + ***************************************************************************/ 155 155 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, 156 156 struct qed_spq_entry *p_ent) 157 157 { ··· 185 185 } 186 186 187 187 /*************************************************************************** 188 - * HSI access 189 - ***************************************************************************/ 188 + * HSI access 189 + ***************************************************************************/ 190 190 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, 191 191 struct qed_spq *p_spq) 192 192 { ··· 218 218 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); 219 219 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q); 220 220 221 - p_cxt->xstorm_st_context.spq_base_lo = 221 + p_cxt->xstorm_st_context.spq_base_addr.lo = 222 222 DMA_LO_LE(p_spq->chain.p_phys_addr); 223 - p_cxt->xstorm_st_context.spq_base_hi = 223 + p_cxt->xstorm_st_context.spq_base_addr.hi = 224 224 DMA_HI_LE(p_spq->chain.p_phys_addr); 225 - 226 - DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr, 227 - p_hwfn->p_consq->chain.p_phys_addr); 228 225 } 229 226 230 227 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, ··· 263 266 } 264 267 265 268 /*************************************************************************** 266 - * Asynchronous events 267 - ***************************************************************************/ 269 + * Asynchronous events 270 + ***************************************************************************/ 268 271 static int 269 272 qed_async_event_completion(struct qed_hwfn *p_hwfn, 270 273 struct event_ring_entry *p_eqe) ··· 309 312 } 310 313 311 314 /*************************************************************************** 312 - * EQ API 313 - ***************************************************************************/ 315 + * EQ API 316 + ***************************************************************************/ 314 317 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) 315 318 { 316 319 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM + ··· 431 434 } 432 435 433 436 /*************************************************************************** 434 - * CQE API - manipulate EQ functionality 435 - ***************************************************************************/ 437 + * CQE API - manipulate EQ functionality 438 + ***************************************************************************/ 436 439 static int qed_cqe_completion(struct qed_hwfn *p_hwfn, 437 440 struct eth_slow_path_rx_cqe *cqe, 438 441 enum protocol_type protocol) ··· 462 465 } 463 466 464 467 /*************************************************************************** 465 - * Slow hwfn Queue (spq) 466 - ***************************************************************************/ 468 + * Slow hwfn Queue (spq) 469 + ***************************************************************************/ 467 470 void qed_spq_setup(struct qed_hwfn *p_hwfn) 468 471 { 469 472 struct qed_spq *p_spq = p_hwfn->p_spq; ··· 546 549 int ret; 547 550 548 551 /* SPQ struct */ 549 - p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL); 552 + p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL); 550 553 if (!p_spq) 551 554 return -ENOMEM; 552 555 ··· 674 677 struct qed_spq *p_spq = p_hwfn->p_spq; 675 678 676 679 if (p_ent->queue == &p_spq->unlimited_pending) { 677 - 678 680 if (list_empty(&p_spq->free_pool)) { 679 681 list_add_tail(&p_ent->list, &p_spq->unlimited_pending); 680 682 p_spq->unlimited_pending_count++; ··· 722 726 } 723 727 724 728 /*************************************************************************** 725 - * Accessor 726 - ***************************************************************************/ 729 + * Accessor 730 + ***************************************************************************/ 727 731 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) 728 732 { 729 733 if (!p_hwfn->p_spq) ··· 732 736 } 733 737 734 738 /*************************************************************************** 735 - * Posting new Ramrods 736 - ***************************************************************************/ 739 + * Posting new Ramrods 740 + ***************************************************************************/ 737 741 static int qed_spq_post_list(struct qed_hwfn *p_hwfn, 738 742 struct list_head *head, u32 keep_reserve) 739 743 {
+57 -55
drivers/net/ethernet/qlogic/qed/qed_sriov.c
··· 20 20 #include "qed_sp.h" 21 21 #include "qed_sriov.h" 22 22 #include "qed_vf.h" 23 - static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 24 - u8 opcode, 25 - __le16 echo, 26 - union event_ring_data *data, u8 fw_return_code); 27 23 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid); 24 + 25 + static u16 qed_vf_from_entity_id(__le16 entity_id) 26 + { 27 + return le16_to_cpu(entity_id) - MAX_NUM_PFS; 28 + } 28 29 29 30 static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) 30 31 { ··· 171 170 b_enabled_only, false)) 172 171 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; 173 172 else 174 - DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", 175 - relative_vf_id); 173 + DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n", 174 + __func__, relative_vf_id); 176 175 177 176 return vf; 178 177 } ··· 310 309 struct qed_dmae_params params; 311 310 struct qed_vf_info *p_vf; 312 311 313 - p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 312 + p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 314 313 if (!p_vf) 315 314 return -EINVAL; 316 315 ··· 422 421 bulletin_p = p_iov_info->bulletins_phys; 423 422 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { 424 423 DP_ERR(p_hwfn, 425 - "qed_iov_setup_vfdb called without allocating mem first\n"); 424 + "%s called without allocating mem first\n", __func__); 426 425 return; 427 426 } 428 427 ··· 466 465 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; 467 466 468 467 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 469 - "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); 468 + "%s for %d VFs\n", __func__, num_vfs); 470 469 471 470 /* Allocate PF Mailbox buffer (per-VF) */ 472 471 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; ··· 502 501 QED_MSG_IOV, 503 502 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", 504 503 p_iov_info->mbx_msg_virt_addr, 505 - (u64) p_iov_info->mbx_msg_phys_addr, 504 + (u64)p_iov_info->mbx_msg_phys_addr, 506 505 p_iov_info->mbx_reply_virt_addr, 507 - (u64) p_iov_info->mbx_reply_phys_addr, 508 - p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); 506 + (u64)p_iov_info->mbx_reply_phys_addr, 507 + p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys); 509 508 510 509 return 0; 511 510 } ··· 610 609 if (rc) 611 610 return rc; 612 611 613 - /* We want PF IOV to be synonemous with the existance of p_iov_info; 612 + /* We want PF IOV to be synonemous with the existence of p_iov_info; 614 613 * In case the capability is published but there are no VFs, simply 615 614 * de-allocate the struct. 616 615 */ ··· 716 715 int i; 717 716 718 717 /* Set VF masks and configuration - pretend */ 719 - qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 718 + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); 720 719 721 720 qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); 722 721 723 722 /* unpretend */ 724 - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 723 + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); 725 724 726 725 /* iterate over all queues, clear sb consumer */ 727 726 for (i = 0; i < vf->num_sbs; i++) ··· 736 735 { 737 736 u32 igu_vf_conf; 738 737 739 - qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 738 + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); 740 739 741 740 igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); 742 741 ··· 748 747 qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); 749 748 750 749 /* unpretend */ 751 - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 750 + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); 752 751 } 753 752 754 753 static int ··· 809 808 if (rc) 810 809 return rc; 811 810 812 - qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 811 + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); 813 812 814 813 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); 815 814 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); ··· 818 817 p_hwfn->hw_info.hw_mode); 819 818 820 819 /* unpretend */ 821 - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 820 + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); 822 821 823 822 vf->state = VF_FREE; 824 823 ··· 906 905 p_block->igu_sb_id * sizeof(u64), 2, NULL); 907 906 } 908 907 909 - vf->num_sbs = (u8) num_rx_queues; 908 + vf->num_sbs = (u8)num_rx_queues; 910 909 911 910 return vf->num_sbs; 912 911 } ··· 990 989 991 990 vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); 992 991 if (!vf) { 993 - DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); 992 + DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__); 994 993 return -EINVAL; 995 994 } 996 995 ··· 1094 1093 1095 1094 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 1096 1095 if (!vf) { 1097 - DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); 1096 + DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__); 1098 1097 return -EINVAL; 1099 1098 } 1100 1099 ··· 1547 1546 memset(resp, 0, sizeof(*resp)); 1548 1547 1549 1548 /* Write the PF version so that VF would know which version 1550 - * is supported - might be later overriden. This guarantees that 1549 + * is supported - might be later overridden. This guarantees that 1551 1550 * VF could recognize legacy PF based on lack of versions in reply. 1552 1551 */ 1553 1552 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; ··· 1899 1898 int sb_id; 1900 1899 int rc; 1901 1900 1902 - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); 1901 + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true); 1903 1902 if (!vf_info) { 1904 1903 DP_NOTICE(p_hwfn->cdev, 1905 1904 "Failed to get VF info, invalid vfid [%d]\n", ··· 1959 1958 rc = qed_sp_eth_vport_start(p_hwfn, &params); 1960 1959 if (rc) { 1961 1960 DP_ERR(p_hwfn, 1962 - "qed_iov_vf_mbx_start_vport returned error %d\n", rc); 1961 + "%s returned error %d\n", __func__, rc); 1963 1962 status = PFVF_STATUS_FAILURE; 1964 1963 } else { 1965 1964 vf->vport_instance++; ··· 1995 1994 1996 1995 rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); 1997 1996 if (rc) { 1998 - DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", 1999 - rc); 1997 + DP_ERR(p_hwfn, "%s returned error %d\n", 1998 + __func__, rc); 2000 1999 status = PFVF_STATUS_FAILURE; 2001 2000 } 2002 2001 ··· 3032 3031 goto out; 3033 3032 } 3034 3033 p_rss_params = vzalloc(sizeof(*p_rss_params)); 3035 - if (p_rss_params == NULL) { 3034 + if (!p_rss_params) { 3036 3035 status = PFVF_STATUS_FAILURE; 3037 3036 goto out; 3038 3037 } ··· 3552 3551 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, 3553 3552 sizeof(struct pfvf_def_resp_tlv), status); 3554 3553 } 3554 + 3555 3555 static int 3556 3556 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, 3557 3557 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) ··· 3560 3558 int cnt; 3561 3559 u32 val; 3562 3560 3563 - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); 3561 + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid); 3564 3562 3565 3563 for (cnt = 0; cnt < 50; cnt++) { 3566 3564 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); ··· 3568 3566 break; 3569 3567 msleep(20); 3570 3568 } 3571 - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 3569 + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); 3572 3570 3573 3571 if (cnt == 50) { 3574 3572 DP_ERR(p_hwfn, ··· 3845 3843 struct qed_iov_vf_mbx *mbx; 3846 3844 struct qed_vf_info *p_vf; 3847 3845 3848 - p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 3846 + p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 3849 3847 if (!p_vf) 3850 3848 return; 3851 3849 ··· 3982 3980 static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, 3983 3981 u16 abs_vfid) 3984 3982 { 3985 - u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf; 3983 + u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; 3986 3984 3987 3985 if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { 3988 3986 DP_VERBOSE(p_hwfn, ··· 3992 3990 return NULL; 3993 3991 } 3994 3992 3995 - return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min]; 3993 + return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; 3996 3994 } 3997 3995 3998 3996 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, ··· 4016 4014 return 0; 4017 4015 } 4018 4016 4019 - static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, 4020 - struct malicious_vf_eqe_data *p_data) 4017 + void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, 4018 + struct fw_err_data *p_data) 4021 4019 { 4022 4020 struct qed_vf_info *p_vf; 4023 4021 4024 - p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); 4025 - 4022 + p_vf = qed_sriov_get_vf_from_absid(p_hwfn, qed_vf_from_entity_id 4023 + (p_data->entity_id)); 4026 4024 if (!p_vf) 4027 4025 return; 4028 4026 ··· 4039 4037 } 4040 4038 } 4041 4039 4042 - static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, 4043 - union event_ring_data *data, u8 fw_return_code) 4040 + int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, 4041 + union event_ring_data *data, u8 fw_return_code) 4044 4042 { 4045 4043 switch (opcode) { 4046 4044 case COMMON_EVENT_VF_PF_CHANNEL: 4047 4045 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), 4048 4046 &data->vf_pf_channel.msg_addr); 4049 - case COMMON_EVENT_MALICIOUS_VF: 4050 - qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf); 4051 - return 0; 4052 4047 default: 4053 4048 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", 4054 4049 opcode); ··· 4075 4076 struct qed_dmae_params params; 4076 4077 struct qed_vf_info *vf_info; 4077 4078 4078 - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4079 + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4079 4080 if (!vf_info) 4080 4081 return -EINVAL; 4081 4082 ··· 4176 4177 struct qed_vf_info *vf_info; 4177 4178 u64 feature; 4178 4179 4179 - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4180 + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4180 4181 if (!vf_info) { 4181 4182 DP_NOTICE(p_hwfn->cdev, 4182 4183 "Can not set forced MAC, invalid vfid [%d]\n", vfid); ··· 4226 4227 { 4227 4228 struct qed_vf_info *p_vf_info; 4228 4229 4229 - p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4230 + p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4230 4231 if (!p_vf_info) 4231 4232 return false; 4232 4233 ··· 4237 4238 { 4238 4239 struct qed_vf_info *p_vf_info; 4239 4240 4240 - p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4241 + p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4241 4242 if (!p_vf_info) 4242 4243 return true; 4243 4244 ··· 4248 4249 { 4249 4250 struct qed_vf_info *vf_info; 4250 4251 4251 - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4252 + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4252 4253 if (!vf_info) 4253 4254 return false; 4254 4255 ··· 4266 4267 goto out; 4267 4268 } 4268 4269 4269 - vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4270 + vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4270 4271 if (!vf) 4271 4272 goto out; 4272 4273 ··· 4345 4346 return rc; 4346 4347 4347 4348 rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */ 4348 - return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val); 4349 + return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val, 4350 + QM_RL_TYPE_NORMAL); 4349 4351 } 4350 4352 4351 4353 static int ··· 4377 4377 struct qed_wfq_data *vf_vp_wfq; 4378 4378 struct qed_vf_info *vf_info; 4379 4379 4380 - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4380 + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4381 4381 if (!vf_info) 4382 4382 return 0; 4383 4383 ··· 4396 4396 */ 4397 4397 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) 4398 4398 { 4399 + /* Memory barrier for setting atomic bit */ 4399 4400 smp_mb__before_atomic(); 4400 4401 set_bit(flag, &hwfn->iov_task_flags); 4402 + /* Memory barrier after setting atomic bit */ 4401 4403 smp_mb__after_atomic(); 4402 4404 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 4403 4405 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); ··· 4410 4408 int i; 4411 4409 4412 4410 for_each_hwfn(cdev, i) 4413 - queue_delayed_work(cdev->hwfns[i].iov_wq, 4414 - &cdev->hwfns[i].iov_task, 0); 4411 + queue_delayed_work(cdev->hwfns[i].iov_wq, 4412 + &cdev->hwfns[i].iov_task, 0); 4415 4413 } 4416 4414 4417 4415 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) ··· 4419 4417 int i, j; 4420 4418 4421 4419 for_each_hwfn(cdev, i) 4422 - if (cdev->hwfns[i].iov_wq) 4423 - flush_workqueue(cdev->hwfns[i].iov_wq); 4420 + if (cdev->hwfns[i].iov_wq) 4421 + flush_workqueue(cdev->hwfns[i].iov_wq); 4424 4422 4425 4423 /* Mark VFs for disablement */ 4426 4424 qed_iov_set_vfs_to_disable(cdev, true); ··· 5013 5011 } 5014 5012 5015 5013 qed_for_each_vf(hwfn, i) 5016 - qed_iov_post_vf_bulletin(hwfn, i, ptt); 5014 + qed_iov_post_vf_bulletin(hwfn, i, ptt); 5017 5015 5018 5016 qed_ptt_release(hwfn, ptt); 5019 5017 }
+26 -1
drivers/net/ethernet/qlogic/qed/qed_sriov.h
··· 142 142 143 143 enum vf_state { 144 144 VF_FREE = 0, /* VF ready to be acquired holds no resc */ 145 - VF_ACQUIRED, /* VF, acquired, but not initalized */ 145 + VF_ACQUIRED, /* VF, acquired, but not initialized */ 146 146 VF_ENABLED, /* VF, Enabled */ 147 147 VF_RESET, /* VF, FLR'd, pending cleanup */ 148 148 VF_STOPPED /* VF, Stopped */ ··· 312 312 * Return: Void. 313 313 */ 314 314 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); 315 + 316 + /** 317 + * qed_sriov_vfpf_malicious(): Handle malicious VF/PF. 318 + * 319 + * @p_hwfn: HW device data. 320 + * @p_data: Pointer to data. 321 + * 322 + * Return: Void. 323 + */ 324 + void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, 325 + struct fw_err_data *p_data); 326 + 327 + /** 328 + * qed_sriov_eqe_event(): Callback for SRIOV events. 329 + * 330 + * @p_hwfn: HW device data. 331 + * @opcode: Opcode. 332 + * @echo: Echo. 333 + * @data: data 334 + * @fw_return_code: FW return code. 335 + * 336 + * Return: Int. 337 + */ 338 + int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, 339 + union event_ring_data *data, u8 fw_return_code); 315 340 316 341 /** 317 342 * qed_iov_alloc(): allocate sriov related resources
+1
include/linux/qed/eth_common.h
··· 67 67 /* Ethernet vport update constants */ 68 68 #define ETH_FILTER_RULES_COUNT 10 69 69 #define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 70 + #define ETH_RSS_IND_TABLE_MASK_SIZE_REGS (ETH_RSS_IND_TABLE_ENTRIES_NUM / 32) 70 71 #define ETH_RSS_KEY_SIZE_REGS 10 71 72 #define ETH_RSS_ENGINE_NUM_K2 207 72 73 #define ETH_RSS_ENGINE_NUM_BB 127
+1
include/linux/qed/rdma_common.h
··· 27 27 #define RDMA_MAX_PDS (64 * 1024) 28 28 #define RDMA_MAX_XRC_SRQS (1024) 29 29 #define RDMA_MAX_SRQS (32 * 1024) 30 + #define RDMA_MAX_IRQ_ELEMS_IN_PAGE (128) 30 31 31 32 #define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS 32 33 #define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2