Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-hinic3-add-a-driver-for-huawei-3rd-gen-nic-sw-and-hw-initialization'

Fan Gong says:

====================
net: hinic3: Add a driver for Huawei 3rd gen NIC - sw and hw initialization

This is [3/3] part of hinic3 Ethernet driver initial submission.
With this patch hinic3 becomes a functional Ethernet driver.

The driver parts contained in this patch:
Memory allocation and initialization of the driver structures.
Management interfaces initialization.
HW capabilities probing, initialization and setup using management
interfaces.
Net device open/stop implementation and data queues initialization.
Register VID:DID in PCI id_table.
Fix netif_queue_set_napi usage.

V01: https://lore.kernel.org/netdev/cover.1756195078.git.zhuyikai1@h-partners.com
V02: https://lore.kernel.org/netdev/cover.1756378721.git.zhuyikai1@h-partners.com
V03: https://lore.kernel.org/netdev/cover.1756524443.git.zhuyikai1@h-partners.com
V04: https://lore.kernel.org/netdev/cover.1757057860.git.zhuyikai1@h-partners.com
V05: https://lore.kernel.org/netdev/cover.1757401320.git.zhuyikai1@h-partners.com
====================

Link: https://patch.msgid.link/cover.1757653621.git.zhuyikai1@h-partners.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

+3923 -86
+2
drivers/net/ethernet/huawei/hinic3/Makefile
··· 14 14 hinic3_lld.o \ 15 15 hinic3_main.o \ 16 16 hinic3_mbox.o \ 17 + hinic3_mgmt.o \ 17 18 hinic3_netdev_ops.o \ 18 19 hinic3_nic_cfg.o \ 19 20 hinic3_nic_io.o \ 20 21 hinic3_queue_common.o \ 22 + hinic3_rss.o \ 21 23 hinic3_rx.o \ 22 24 hinic3_tx.o \ 23 25 hinic3_wq.o
+168
drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
··· 8 8 #include "hinic3_hwif.h" 9 9 #include "hinic3_mbox.h" 10 10 11 + #define HINIC3_CFG_MAX_QP 256 12 + 13 + static void hinic3_parse_pub_res_cap(struct hinic3_hwdev *hwdev, 14 + struct hinic3_dev_cap *cap, 15 + const struct cfg_cmd_dev_cap *dev_cap, 16 + enum hinic3_func_type type) 17 + { 18 + cap->port_id = dev_cap->port_id; 19 + cap->supp_svcs_bitmap = dev_cap->svc_cap_en; 20 + } 21 + 22 + static void hinic3_parse_l2nic_res_cap(struct hinic3_hwdev *hwdev, 23 + struct hinic3_dev_cap *cap, 24 + const struct cfg_cmd_dev_cap *dev_cap, 25 + enum hinic3_func_type type) 26 + { 27 + struct hinic3_nic_service_cap *nic_svc_cap = &cap->nic_svc_cap; 28 + 29 + nic_svc_cap->max_sqs = min(dev_cap->nic_max_sq_id + 1, 30 + HINIC3_CFG_MAX_QP); 31 + } 32 + 33 + static void hinic3_parse_dev_cap(struct hinic3_hwdev *hwdev, 34 + const struct cfg_cmd_dev_cap *dev_cap, 35 + enum hinic3_func_type type) 36 + { 37 + struct hinic3_dev_cap *cap = &hwdev->cfg_mgmt->cap; 38 + 39 + /* Public resource */ 40 + hinic3_parse_pub_res_cap(hwdev, cap, dev_cap, type); 41 + 42 + /* L2 NIC resource */ 43 + if (hinic3_support_nic(hwdev)) 44 + hinic3_parse_l2nic_res_cap(hwdev, cap, dev_cap, type); 45 + } 46 + 47 + static int get_cap_from_fw(struct hinic3_hwdev *hwdev, 48 + enum hinic3_func_type type) 49 + { 50 + struct mgmt_msg_params msg_params = {}; 51 + struct cfg_cmd_dev_cap dev_cap = {}; 52 + int err; 53 + 54 + dev_cap.func_id = hinic3_global_func_id(hwdev); 55 + 56 + mgmt_msg_params_init_default(&msg_params, &dev_cap, sizeof(dev_cap)); 57 + 58 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_CFGM, 59 + CFG_CMD_GET_DEV_CAP, &msg_params); 60 + if (err || dev_cap.head.status) { 61 + dev_err(hwdev->dev, 62 + "Failed to get capability from FW, err: %d, status: 0x%x\n", 63 + err, dev_cap.head.status); 64 + return -EIO; 65 + } 66 + 67 + hinic3_parse_dev_cap(hwdev, &dev_cap, type); 68 + 69 + return 0; 70 + } 71 + 72 + static int hinic3_init_irq_info(struct hinic3_hwdev *hwdev) 73 + { 74 + struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt; 75 + struct hinic3_hwif *hwif = hwdev->hwif; 76 + u16 intr_num = hwif->attr.num_irqs; 77 + struct hinic3_irq_info *irq_info; 78 + u16 intr_needed; 79 + 80 + intr_needed = hwif->attr.msix_flex_en ? (hwif->attr.num_aeqs + 81 + hwif->attr.num_ceqs + hwif->attr.num_sq) : intr_num; 82 + if (intr_needed > intr_num) { 83 + dev_warn(hwdev->dev, "Irq num cfg %d is less than the needed irq num %d msix_flex_en %d\n", 84 + intr_num, intr_needed, hwdev->hwif->attr.msix_flex_en); 85 + intr_needed = intr_num; 86 + } 87 + 88 + irq_info = &cfg_mgmt->irq_info; 89 + irq_info->irq = kcalloc(intr_num, sizeof(struct hinic3_irq), 90 + GFP_KERNEL); 91 + if (!irq_info->irq) 92 + return -ENOMEM; 93 + 94 + irq_info->num_irq_hw = intr_needed; 95 + mutex_init(&irq_info->irq_mutex); 96 + 97 + return 0; 98 + } 99 + 100 + static int hinic3_init_irq_alloc_info(struct hinic3_hwdev *hwdev) 101 + { 102 + struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt; 103 + struct hinic3_irq *irq = cfg_mgmt->irq_info.irq; 104 + u16 nreq = cfg_mgmt->irq_info.num_irq_hw; 105 + struct pci_dev *pdev = hwdev->pdev; 106 + int actual_irq; 107 + u16 i; 108 + 109 + actual_irq = pci_alloc_irq_vectors(pdev, 2, nreq, PCI_IRQ_MSIX); 110 + if (actual_irq < 0) { 111 + dev_err(hwdev->dev, "Alloc msix entries with threshold 2 failed. actual_irq: %d\n", 112 + actual_irq); 113 + return -ENOMEM; 114 + } 115 + 116 + nreq = actual_irq; 117 + cfg_mgmt->irq_info.num_irq = nreq; 118 + 119 + for (i = 0; i < nreq; ++i) { 120 + irq[i].msix_entry_idx = i; 121 + irq[i].irq_id = pci_irq_vector(pdev, i); 122 + irq[i].allocated = false; 123 + } 124 + 125 + return 0; 126 + } 127 + 128 + int hinic3_init_cfg_mgmt(struct hinic3_hwdev *hwdev) 129 + { 130 + struct hinic3_cfg_mgmt_info *cfg_mgmt; 131 + int err; 132 + 133 + cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL); 134 + if (!cfg_mgmt) 135 + return -ENOMEM; 136 + 137 + hwdev->cfg_mgmt = cfg_mgmt; 138 + 139 + err = hinic3_init_irq_info(hwdev); 140 + if (err) { 141 + dev_err(hwdev->dev, "Failed to init hinic3_irq_mgmt_info, err: %d\n", 142 + err); 143 + goto err_free_cfg_mgmt; 144 + } 145 + 146 + err = hinic3_init_irq_alloc_info(hwdev); 147 + if (err) { 148 + dev_err(hwdev->dev, "Failed to init hinic3_irq_info, err: %d\n", 149 + err); 150 + goto err_free_irq_info; 151 + } 152 + 153 + return 0; 154 + 155 + err_free_irq_info: 156 + kfree(cfg_mgmt->irq_info.irq); 157 + cfg_mgmt->irq_info.irq = NULL; 158 + err_free_cfg_mgmt: 159 + kfree(cfg_mgmt); 160 + 161 + return err; 162 + } 163 + 164 + void hinic3_free_cfg_mgmt(struct hinic3_hwdev *hwdev) 165 + { 166 + struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt; 167 + 168 + pci_free_irq_vectors(hwdev->pdev); 169 + kfree(cfg_mgmt->irq_info.irq); 170 + cfg_mgmt->irq_info.irq = NULL; 171 + kfree(cfg_mgmt); 172 + } 173 + 11 174 int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num, 12 175 struct msix_entry *alloc_arr, u16 *act_num) 13 176 { ··· 212 49 } 213 50 } 214 51 mutex_unlock(&irq_info->irq_mutex); 52 + } 53 + 54 + int hinic3_init_capability(struct hinic3_hwdev *hwdev) 55 + { 56 + return get_cap_from_fw(hwdev, HINIC3_FUNC_TYPE_VF); 215 57 } 216 58 217 59 bool hinic3_support_nic(struct hinic3_hwdev *hwdev)
+4
drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
··· 42 42 struct hinic3_dev_cap cap; 43 43 }; 44 44 45 + int hinic3_init_cfg_mgmt(struct hinic3_hwdev *hwdev); 46 + void hinic3_free_cfg_mgmt(struct hinic3_hwdev *hwdev); 47 + 45 48 int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num, 46 49 struct msix_entry *alloc_arr, u16 *act_num); 47 50 void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id); 48 51 52 + int hinic3_init_capability(struct hinic3_hwdev *hwdev); 49 53 bool hinic3_support_nic(struct hinic3_hwdev *hwdev); 50 54 u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev); 51 55 u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev);
+363
drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
··· 3 3 4 4 #include <linux/delay.h> 5 5 6 + #include "hinic3_cmdq.h" 6 7 #include "hinic3_hw_comm.h" 7 8 #include "hinic3_hwdev.h" 8 9 #include "hinic3_hwif.h" ··· 58 57 dev_err(hwdev->dev, "Failed to reset func resources, reset_flag 0x%llx, err: %d, status: 0x%x\n", 59 58 reset_flag, err, func_reset.head.status); 60 59 return -EIO; 60 + } 61 + 62 + return 0; 63 + } 64 + 65 + static int hinic3_comm_features_nego(struct hinic3_hwdev *hwdev, u8 opcode, 66 + u64 *s_feature, u16 size) 67 + { 68 + struct comm_cmd_feature_nego feature_nego = {}; 69 + struct mgmt_msg_params msg_params = {}; 70 + int err; 71 + 72 + feature_nego.func_id = hinic3_global_func_id(hwdev); 73 + feature_nego.opcode = opcode; 74 + if (opcode == MGMT_MSG_CMD_OP_SET) 75 + memcpy(feature_nego.s_feature, s_feature, 76 + array_size(size, sizeof(u64))); 77 + 78 + mgmt_msg_params_init_default(&msg_params, &feature_nego, 79 + sizeof(feature_nego)); 80 + 81 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, 82 + COMM_CMD_FEATURE_NEGO, &msg_params); 83 + if (err || feature_nego.head.status) { 84 + dev_err(hwdev->dev, "Failed to negotiate feature, err: %d, status: 0x%x\n", 85 + err, feature_nego.head.status); 86 + return -EINVAL; 87 + } 88 + 89 + if (opcode == MGMT_MSG_CMD_OP_GET) 90 + memcpy(s_feature, feature_nego.s_feature, 91 + array_size(size, sizeof(u64))); 92 + 93 + return 0; 94 + } 95 + 96 + int hinic3_get_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature, 97 + u16 size) 98 + { 99 + return hinic3_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_GET, s_feature, 100 + size); 101 + } 102 + 103 + int hinic3_set_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature, 104 + u16 size) 105 + { 106 + return hinic3_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_SET, s_feature, 107 + size); 108 + } 109 + 110 + int hinic3_get_global_attr(struct hinic3_hwdev *hwdev, 111 + struct comm_global_attr *attr) 112 + { 113 + struct comm_cmd_get_glb_attr get_attr = {}; 114 + struct mgmt_msg_params msg_params = {}; 115 + int err; 116 + 117 + mgmt_msg_params_init_default(&msg_params, &get_attr, sizeof(get_attr)); 118 + 119 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, 120 + COMM_CMD_GET_GLOBAL_ATTR, &msg_params); 121 + if (err || get_attr.head.status) { 122 + dev_err(hwdev->dev, 123 + "Failed to get global attribute, err: %d, status: 0x%x\n", 124 + err, get_attr.head.status); 125 + return -EIO; 126 + } 127 + 128 + memcpy(attr, &get_attr.attr, sizeof(*attr)); 129 + 130 + return 0; 131 + } 132 + 133 + int hinic3_set_func_svc_used_state(struct hinic3_hwdev *hwdev, u16 svc_type, 134 + u8 state) 135 + { 136 + struct comm_cmd_set_func_svc_used_state used_state = {}; 137 + struct mgmt_msg_params msg_params = {}; 138 + int err; 139 + 140 + used_state.func_id = hinic3_global_func_id(hwdev); 141 + used_state.svc_type = svc_type; 142 + used_state.used_state = state; 143 + 144 + mgmt_msg_params_init_default(&msg_params, &used_state, 145 + sizeof(used_state)); 146 + 147 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, 148 + COMM_CMD_SET_FUNC_SVC_USED_STATE, 149 + &msg_params); 150 + if (err || used_state.head.status) { 151 + dev_err(hwdev->dev, 152 + "Failed to set func service used state, err: %d, status: 0x%x\n", 153 + err, used_state.head.status); 154 + return -EIO; 155 + } 156 + 157 + return 0; 158 + } 159 + 160 + int hinic3_set_dma_attr_tbl(struct hinic3_hwdev *hwdev, u8 entry_idx, u8 st, 161 + u8 at, u8 ph, u8 no_snooping, u8 tph_en) 162 + { 163 + struct comm_cmd_set_dma_attr dma_attr = {}; 164 + struct mgmt_msg_params msg_params = {}; 165 + int err; 166 + 167 + dma_attr.func_id = hinic3_global_func_id(hwdev); 168 + dma_attr.entry_idx = entry_idx; 169 + dma_attr.st = st; 170 + dma_attr.at = at; 171 + dma_attr.ph = ph; 172 + dma_attr.no_snooping = no_snooping; 173 + dma_attr.tph_en = tph_en; 174 + 175 + mgmt_msg_params_init_default(&msg_params, &dma_attr, sizeof(dma_attr)); 176 + 177 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, 178 + COMM_CMD_SET_DMA_ATTR, &msg_params); 179 + if (err || dma_attr.head.status) { 180 + dev_err(hwdev->dev, "Failed to set dma attr, err: %d, status: 0x%x\n", 181 + err, dma_attr.head.status); 182 + return -EIO; 183 + } 184 + 185 + return 0; 186 + } 187 + 188 + int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx, 189 + u32 page_size) 190 + { 191 + struct comm_cmd_cfg_wq_page_size page_size_info = {}; 192 + struct mgmt_msg_params msg_params = {}; 193 + int err; 194 + 195 + page_size_info.func_id = func_idx; 196 + page_size_info.page_size = ilog2(page_size / HINIC3_MIN_PAGE_SIZE); 197 + page_size_info.opcode = MGMT_MSG_CMD_OP_SET; 198 + 199 + mgmt_msg_params_init_default(&msg_params, &page_size_info, 200 + sizeof(page_size_info)); 201 + 202 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, 203 + COMM_CMD_CFG_PAGESIZE, &msg_params); 204 + if (err || page_size_info.head.status) { 205 + dev_err(hwdev->dev, 206 + "Failed to set wq page size, err: %d, status: 0x%x\n", 207 + err, page_size_info.head.status); 208 + return -EFAULT; 209 + } 210 + 211 + return 0; 212 + } 213 + 214 + int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth) 215 + { 216 + struct comm_cmd_set_root_ctxt root_ctxt = {}; 217 + struct mgmt_msg_params msg_params = {}; 218 + int err; 219 + 220 + root_ctxt.func_id = hinic3_global_func_id(hwdev); 221 + 222 + root_ctxt.set_cmdq_depth = 1; 223 + root_ctxt.cmdq_depth = ilog2(cmdq_depth); 224 + 225 + mgmt_msg_params_init_default(&msg_params, &root_ctxt, 226 + sizeof(root_ctxt)); 227 + 228 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, 229 + COMM_CMD_SET_VAT, &msg_params); 230 + if (err || root_ctxt.head.status) { 231 + dev_err(hwdev->dev, 232 + "Failed to set cmdq depth, err: %d, status: 0x%x\n", 233 + err, root_ctxt.head.status); 234 + return -EFAULT; 235 + } 236 + 237 + return 0; 238 + } 239 + 240 + #define HINIC3_WAIT_CMDQ_IDLE_TIMEOUT 5000 241 + 242 + static enum hinic3_wait_return check_cmdq_stop_handler(void *priv_data) 243 + { 244 + struct hinic3_hwdev *hwdev = priv_data; 245 + enum hinic3_cmdq_type cmdq_type; 246 + struct hinic3_cmdqs *cmdqs; 247 + 248 + cmdqs = hwdev->cmdqs; 249 + for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { 250 + if (!hinic3_cmdq_idle(&cmdqs->cmdq[cmdq_type])) 251 + return HINIC3_WAIT_PROCESS_WAITING; 252 + } 253 + 254 + return HINIC3_WAIT_PROCESS_CPL; 255 + } 256 + 257 + static int wait_cmdq_stop(struct hinic3_hwdev *hwdev) 258 + { 259 + struct hinic3_cmdqs *cmdqs = hwdev->cmdqs; 260 + enum hinic3_cmdq_type cmdq_type; 261 + int err; 262 + 263 + if (!(cmdqs->status & HINIC3_CMDQ_ENABLE)) 264 + return 0; 265 + 266 + cmdqs->status &= ~HINIC3_CMDQ_ENABLE; 267 + err = hinic3_wait_for_timeout(hwdev, check_cmdq_stop_handler, 268 + HINIC3_WAIT_CMDQ_IDLE_TIMEOUT, 269 + USEC_PER_MSEC); 270 + 271 + if (err) 272 + goto err_reenable_cmdq; 273 + 274 + return 0; 275 + 276 + err_reenable_cmdq: 277 + for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { 278 + if (!hinic3_cmdq_idle(&cmdqs->cmdq[cmdq_type])) 279 + dev_err(hwdev->dev, "Cmdq %d is busy\n", cmdq_type); 280 + } 281 + cmdqs->status |= HINIC3_CMDQ_ENABLE; 282 + 283 + return err; 284 + } 285 + 286 + int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev) 287 + { 288 + struct comm_cmd_clear_resource clear_db = {}; 289 + struct comm_cmd_clear_resource clr_res = {}; 290 + struct hinic3_hwif *hwif = hwdev->hwif; 291 + struct mgmt_msg_params msg_params = {}; 292 + int ret = 0; 293 + int err; 294 + 295 + err = wait_cmdq_stop(hwdev); 296 + if (err) { 297 + dev_warn(hwdev->dev, "CMDQ is still working, CMDQ timeout value is unreasonable\n"); 298 + ret = err; 299 + } 300 + 301 + hinic3_toggle_doorbell(hwif, DISABLE_DOORBELL); 302 + 303 + clear_db.func_id = hwif->attr.func_global_idx; 304 + mgmt_msg_params_init_default(&msg_params, &clear_db, sizeof(clear_db)); 305 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, 306 + COMM_CMD_FLUSH_DOORBELL, &msg_params); 307 + if (err || clear_db.head.status) { 308 + dev_warn(hwdev->dev, "Failed to flush doorbell, err: %d, status: 0x%x\n", 309 + err, clear_db.head.status); 310 + if (err) 311 + ret = err; 312 + else 313 + ret = -EFAULT; 314 + } 315 + 316 + clr_res.func_id = hwif->attr.func_global_idx; 317 + msg_params.buf_in = &clr_res; 318 + msg_params.in_size = sizeof(clr_res); 319 + err = hinic3_send_mbox_to_mgmt_no_ack(hwdev, MGMT_MOD_COMM, 320 + COMM_CMD_START_FLUSH, 321 + &msg_params); 322 + if (err) { 323 + dev_warn(hwdev->dev, "Failed to notice flush message, err: %d\n", 324 + err); 325 + ret = err; 326 + } 327 + 328 + hinic3_toggle_doorbell(hwif, ENABLE_DOORBELL); 329 + 330 + err = hinic3_reinit_cmdq_ctxts(hwdev); 331 + if (err) { 332 + dev_warn(hwdev->dev, "Failed to reinit cmdq\n"); 333 + ret = err; 334 + } 335 + 336 + return ret; 337 + } 338 + 339 + static int get_hw_rx_buf_size_idx(int rx_buf_sz, u16 *buf_sz_idx) 340 + { 341 + /* Supported RX buffer sizes in bytes. Configured by array index. */ 342 + static const int supported_sizes[16] = { 343 + [0] = 32, [1] = 64, [2] = 96, [3] = 128, 344 + [4] = 192, [5] = 256, [6] = 384, [7] = 512, 345 + [8] = 768, [9] = 1024, [10] = 1536, [11] = 2048, 346 + [12] = 3072, [13] = 4096, [14] = 8192, [15] = 16384, 347 + }; 348 + u16 idx; 349 + 350 + /* Scan from biggest to smallest. Choose supported size that is equal or 351 + * smaller. For smaller value HW will under-utilize posted buffers. For 352 + * bigger value HW may overrun posted buffers. 353 + */ 354 + idx = ARRAY_SIZE(supported_sizes); 355 + while (idx > 0) { 356 + idx--; 357 + if (supported_sizes[idx] <= rx_buf_sz) { 358 + *buf_sz_idx = idx; 359 + return 0; 360 + } 361 + } 362 + 363 + return -EINVAL; 364 + } 365 + 366 + int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth, 367 + int rx_buf_sz) 368 + { 369 + struct comm_cmd_set_root_ctxt root_ctxt = {}; 370 + struct mgmt_msg_params msg_params = {}; 371 + u16 buf_sz_idx; 372 + int err; 373 + 374 + err = get_hw_rx_buf_size_idx(rx_buf_sz, &buf_sz_idx); 375 + if (err) 376 + return err; 377 + 378 + root_ctxt.func_id = hinic3_global_func_id(hwdev); 379 + 380 + root_ctxt.set_cmdq_depth = 0; 381 + root_ctxt.cmdq_depth = 0; 382 + 383 + root_ctxt.lro_en = 1; 384 + 385 + root_ctxt.rq_depth = ilog2(rq_depth); 386 + root_ctxt.rx_buf_sz = buf_sz_idx; 387 + root_ctxt.sq_depth = ilog2(sq_depth); 388 + 389 + mgmt_msg_params_init_default(&msg_params, &root_ctxt, 390 + sizeof(root_ctxt)); 391 + 392 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, 393 + COMM_CMD_SET_VAT, &msg_params); 394 + if (err || root_ctxt.head.status) { 395 + dev_err(hwdev->dev, 396 + "Failed to set root context, err: %d, status: 0x%x\n", 397 + err, root_ctxt.head.status); 398 + return -EFAULT; 399 + } 400 + 401 + return 0; 402 + } 403 + 404 + int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev) 405 + { 406 + struct comm_cmd_set_root_ctxt root_ctxt = {}; 407 + struct mgmt_msg_params msg_params = {}; 408 + int err; 409 + 410 + root_ctxt.func_id = hinic3_global_func_id(hwdev); 411 + 412 + mgmt_msg_params_init_default(&msg_params, &root_ctxt, 413 + sizeof(root_ctxt)); 414 + 415 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, 416 + COMM_CMD_SET_VAT, &msg_params); 417 + if (err || root_ctxt.head.status) { 418 + dev_err(hwdev->dev, 419 + "Failed to set root context, err: %d, status: 0x%x\n", 420 + err, root_ctxt.head.status); 421 + return -EFAULT; 61 422 } 62 423 63 424 return 0;
+21
drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
··· 8 8 9 9 struct hinic3_hwdev; 10 10 11 + #define HINIC3_WQ_PAGE_SIZE_ORDER 8 12 + 11 13 struct hinic3_interrupt_info { 12 14 u32 lli_set; 13 15 u32 interrupt_coalesc_set; ··· 24 22 int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev, 25 23 const struct hinic3_interrupt_info *info); 26 24 int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag); 25 + 26 + int hinic3_get_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature, 27 + u16 size); 28 + int hinic3_set_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature, 29 + u16 size); 30 + int hinic3_get_global_attr(struct hinic3_hwdev *hwdev, 31 + struct comm_global_attr *attr); 32 + int hinic3_set_func_svc_used_state(struct hinic3_hwdev *hwdev, u16 svc_type, 33 + u8 state); 34 + int hinic3_set_dma_attr_tbl(struct hinic3_hwdev *hwdev, u8 entry_idx, u8 st, 35 + u8 at, u8 ph, u8 no_snooping, u8 tph_en); 36 + 37 + int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx, 38 + u32 page_size); 39 + int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth); 40 + int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev); 41 + int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth, 42 + int rx_buf_sz); 43 + int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev); 27 44 28 45 #endif
+115
drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
··· 51 51 msg_params->timeout_ms = 0; 52 52 } 53 53 54 + enum cfg_cmd { 55 + CFG_CMD_GET_DEV_CAP = 0, 56 + }; 57 + 58 + /* Device capabilities, defined by hw */ 59 + struct cfg_cmd_dev_cap { 60 + struct mgmt_msg_head head; 61 + 62 + u16 func_id; 63 + u16 rsvd1; 64 + 65 + /* Public resources */ 66 + u8 host_id; 67 + u8 ep_id; 68 + u8 er_id; 69 + u8 port_id; 70 + 71 + u16 host_total_func; 72 + u8 host_pf_num; 73 + u8 pf_id_start; 74 + u16 host_vf_num; 75 + u16 vf_id_start; 76 + u8 host_oq_id_mask_val; 77 + u8 timer_en; 78 + u8 host_valid_bitmap; 79 + u8 rsvd_host; 80 + 81 + u16 svc_cap_en; 82 + u16 max_vf; 83 + u8 flexq_en; 84 + u8 valid_cos_bitmap; 85 + u8 port_cos_valid_bitmap; 86 + u8 rsvd2[45]; 87 + 88 + /* l2nic */ 89 + u16 nic_max_sq_id; 90 + u16 nic_max_rq_id; 91 + u16 nic_default_num_queues; 92 + 93 + u8 rsvd3[250]; 94 + }; 95 + 54 96 /* COMM Commands between Driver to fw */ 55 97 enum comm_cmd { 56 98 /* Commands for clearing FLR and resources */ ··· 140 98 COMM_FUNC_RESET_BIT_NIC = BIT(13), 141 99 }; 142 100 101 + #define COMM_FUNC_RESET_FLAG \ 102 + (COMM_FUNC_RESET_BIT_COMM | COMM_FUNC_RESET_BIT_COMM_CMD_CH | \ 103 + COMM_FUNC_RESET_BIT_FLUSH | COMM_FUNC_RESET_BIT_MQM | \ 104 + COMM_FUNC_RESET_BIT_SMF | COMM_FUNC_RESET_BIT_PF_BW_CFG) 105 + 143 106 struct comm_cmd_func_reset { 144 107 struct mgmt_msg_head head; 145 108 u16 func_id; ··· 161 114 u64 s_feature[COMM_MAX_FEATURE_QWORD]; 162 115 }; 163 116 117 + struct comm_global_attr { 118 + u8 max_host_num; 119 + u8 max_pf_num; 120 + u16 vf_id_start; 121 + /* for api cmd to mgmt cpu */ 122 + u8 mgmt_host_node_id; 123 + u8 cmdq_num; 124 + u8 rsvd1[34]; 125 + }; 126 + 127 + struct comm_cmd_get_glb_attr { 128 + struct mgmt_msg_head head; 129 + struct comm_global_attr attr; 130 + }; 131 + 132 + enum comm_func_svc_type { 133 + COMM_FUNC_SVC_T_COMM = 0, 134 + COMM_FUNC_SVC_T_NIC = 1, 135 + }; 136 + 137 + struct comm_cmd_set_func_svc_used_state { 138 + struct mgmt_msg_head head; 139 + u16 func_id; 140 + u16 svc_type; 141 + u8 used_state; 142 + u8 rsvd[35]; 143 + }; 144 + 145 + struct comm_cmd_set_dma_attr { 146 + struct mgmt_msg_head head; 147 + u16 func_id; 148 + u8 entry_idx; 149 + u8 st; 150 + u8 at; 151 + u8 ph; 152 + u8 no_snooping; 153 + u8 tph_en; 154 + u32 resv1; 155 + }; 156 + 164 157 struct comm_cmd_set_ceq_ctrl_reg { 165 158 struct mgmt_msg_head head; 166 159 u16 func_id; ··· 208 121 u32 ctrl0; 209 122 u32 ctrl1; 210 123 u32 rsvd1; 124 + }; 125 + 126 + struct comm_cmd_cfg_wq_page_size { 127 + struct mgmt_msg_head head; 128 + u16 func_id; 129 + u8 opcode; 130 + /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */ 131 + u8 page_size; 132 + u32 rsvd1; 133 + }; 134 + 135 + struct comm_cmd_set_root_ctxt { 136 + struct mgmt_msg_head head; 137 + u16 func_id; 138 + u8 set_cmdq_depth; 139 + u8 cmdq_depth; 140 + u16 rx_buf_sz; 141 + u8 lro_en; 142 + u8 rsvd1; 143 + u16 sq_depth; 144 + u16 rq_depth; 145 + u64 rsvd2; 211 146 }; 212 147 213 148 struct comm_cmdq_ctxt_info { ··· 243 134 u8 cmdq_id; 244 135 u8 rsvd1[5]; 245 136 struct comm_cmdq_ctxt_info ctxt; 137 + }; 138 + 139 + struct comm_cmd_clear_resource { 140 + struct mgmt_msg_head head; 141 + u16 func_id; 142 + u16 rsvd1[3]; 246 143 }; 247 144 248 145 /* Services supported by HW. HW uses these values when delivering events.
+537 -4
drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. 3 3 4 + #include "hinic3_cmdq.h" 5 + #include "hinic3_csr.h" 6 + #include "hinic3_eqs.h" 4 7 #include "hinic3_hw_comm.h" 5 8 #include "hinic3_hwdev.h" 6 9 #include "hinic3_hwif.h" 7 10 #include "hinic3_mbox.h" 8 11 #include "hinic3_mgmt.h" 9 12 13 + #define HINIC3_PCIE_SNOOP 0 14 + #define HINIC3_PCIE_TPH_DISABLE 0 15 + 16 + #define HINIC3_DMA_ATTR_INDIR_IDX_MASK GENMASK(9, 0) 17 + #define HINIC3_DMA_ATTR_INDIR_IDX_SET(val, member) \ 18 + FIELD_PREP(HINIC3_DMA_ATTR_INDIR_##member##_MASK, val) 19 + 20 + #define HINIC3_DMA_ATTR_ENTRY_ST_MASK GENMASK(7, 0) 21 + #define HINIC3_DMA_ATTR_ENTRY_AT_MASK GENMASK(9, 8) 22 + #define HINIC3_DMA_ATTR_ENTRY_PH_MASK GENMASK(11, 10) 23 + #define HINIC3_DMA_ATTR_ENTRY_NO_SNOOPING_MASK BIT(12) 24 + #define HINIC3_DMA_ATTR_ENTRY_TPH_EN_MASK BIT(13) 25 + #define HINIC3_DMA_ATTR_ENTRY_SET(val, member) \ 26 + FIELD_PREP(HINIC3_DMA_ATTR_ENTRY_##member##_MASK, val) 27 + 28 + #define HINIC3_PCIE_ST_DISABLE 0 29 + #define HINIC3_PCIE_AT_DISABLE 0 30 + #define HINIC3_PCIE_PH_DISABLE 0 31 + #define HINIC3_PCIE_MSIX_ATTR_ENTRY 0 32 + 33 + #define HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT 0 34 + #define HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF 35 + #define HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG 7 36 + 37 + #define HINIC3_HWDEV_WQ_NAME "hinic3_hardware" 38 + #define HINIC3_WQ_MAX_REQ 10 39 + 40 + enum hinic3_hwdev_init_state { 41 + HINIC3_HWDEV_MBOX_INITED = 2, 42 + HINIC3_HWDEV_CMDQ_INITED = 3, 43 + }; 44 + 45 + static int hinic3_comm_aeqs_init(struct hinic3_hwdev *hwdev) 46 + { 47 + struct msix_entry aeq_msix_entries[HINIC3_MAX_AEQS]; 48 + u16 num_aeqs, resp_num_irq, i; 49 + int err; 50 + 51 + num_aeqs = hwdev->hwif->attr.num_aeqs; 52 + if (num_aeqs > HINIC3_MAX_AEQS) { 53 + dev_warn(hwdev->dev, "Adjust aeq num to %d\n", 54 + HINIC3_MAX_AEQS); 55 + num_aeqs = HINIC3_MAX_AEQS; 56 + } 57 + err = hinic3_alloc_irqs(hwdev, num_aeqs, aeq_msix_entries, 58 + &resp_num_irq); 59 + if (err) { 60 + dev_err(hwdev->dev, "Failed to alloc aeq irqs, num_aeqs: %u\n", 61 + num_aeqs); 62 + return err; 63 + } 64 + 65 + if (resp_num_irq < num_aeqs) { 66 + dev_warn(hwdev->dev, "Adjust aeq num to %u\n", 67 + resp_num_irq); 68 + num_aeqs = resp_num_irq; 69 + } 70 + 71 + err = hinic3_aeqs_init(hwdev, num_aeqs, aeq_msix_entries); 72 + if (err) { 73 + dev_err(hwdev->dev, "Failed to init aeqs\n"); 74 + goto err_free_irqs; 75 + } 76 + 77 + return 0; 78 + 79 + err_free_irqs: 80 + for (i = 0; i < num_aeqs; i++) 81 + hinic3_free_irq(hwdev, aeq_msix_entries[i].vector); 82 + 83 + return err; 84 + } 85 + 86 + static int hinic3_comm_ceqs_init(struct hinic3_hwdev *hwdev) 87 + { 88 + struct msix_entry ceq_msix_entries[HINIC3_MAX_CEQS]; 89 + u16 num_ceqs, resp_num_irq, i; 90 + int err; 91 + 92 + num_ceqs = hwdev->hwif->attr.num_ceqs; 93 + if (num_ceqs > HINIC3_MAX_CEQS) { 94 + dev_warn(hwdev->dev, "Adjust ceq num to %d\n", 95 + HINIC3_MAX_CEQS); 96 + num_ceqs = HINIC3_MAX_CEQS; 97 + } 98 + 99 + err = hinic3_alloc_irqs(hwdev, num_ceqs, ceq_msix_entries, 100 + &resp_num_irq); 101 + if (err) { 102 + dev_err(hwdev->dev, "Failed to alloc ceq irqs, num_ceqs: %u\n", 103 + num_ceqs); 104 + return err; 105 + } 106 + 107 + if (resp_num_irq < num_ceqs) { 108 + dev_warn(hwdev->dev, "Adjust ceq num to %u\n", 109 + resp_num_irq); 110 + num_ceqs = resp_num_irq; 111 + } 112 + 113 + err = hinic3_ceqs_init(hwdev, num_ceqs, ceq_msix_entries); 114 + if (err) { 115 + dev_err(hwdev->dev, 116 + "Failed to init ceqs, err:%d\n", err); 117 + goto err_free_irqs; 118 + } 119 + 120 + return 0; 121 + 122 + err_free_irqs: 123 + for (i = 0; i < num_ceqs; i++) 124 + hinic3_free_irq(hwdev, ceq_msix_entries[i].vector); 125 + 126 + return err; 127 + } 128 + 129 + static int hinic3_comm_mbox_init(struct hinic3_hwdev *hwdev) 130 + { 131 + int err; 132 + 133 + err = hinic3_init_mbox(hwdev); 134 + if (err) 135 + return err; 136 + 137 + hinic3_aeq_register_cb(hwdev, HINIC3_MBX_FROM_FUNC, 138 + hinic3_mbox_func_aeqe_handler); 139 + hinic3_aeq_register_cb(hwdev, HINIC3_MSG_FROM_FW, 140 + hinic3_mgmt_msg_aeqe_handler); 141 + 142 + set_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state); 143 + 144 + return 0; 145 + } 146 + 147 + static void hinic3_comm_mbox_free(struct hinic3_hwdev *hwdev) 148 + { 149 + spin_lock_bh(&hwdev->channel_lock); 150 + clear_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state); 151 + spin_unlock_bh(&hwdev->channel_lock); 152 + hinic3_aeq_unregister_cb(hwdev, HINIC3_MBX_FROM_FUNC); 153 + hinic3_aeq_unregister_cb(hwdev, HINIC3_MSG_FROM_FW); 154 + hinic3_free_mbox(hwdev); 155 + } 156 + 157 + static int init_aeqs_msix_attr(struct hinic3_hwdev *hwdev) 158 + { 159 + struct hinic3_aeqs *aeqs = hwdev->aeqs; 160 + struct hinic3_interrupt_info info = {}; 161 + struct hinic3_eq *eq; 162 + u16 q_id; 163 + int err; 164 + 165 + info.interrupt_coalesc_set = 1; 166 + info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT; 167 + info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG; 168 + info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG; 169 + 170 + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { 171 + eq = &aeqs->aeq[q_id]; 172 + info.msix_index = eq->msix_entry_idx; 173 + err = hinic3_set_interrupt_cfg_direct(hwdev, &info); 174 + if (err) { 175 + dev_err(hwdev->dev, "Set msix attr for aeq %d failed\n", 176 + q_id); 177 + return err; 178 + } 179 + } 180 + 181 + return 0; 182 + } 183 + 184 + static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev) 185 + { 186 + struct hinic3_ceqs *ceqs = hwdev->ceqs; 187 + struct hinic3_interrupt_info info = {}; 188 + struct hinic3_eq *eq; 189 + u16 q_id; 190 + int err; 191 + 192 + info.interrupt_coalesc_set = 1; 193 + info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT; 194 + info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG; 195 + info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG; 196 + 197 + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { 198 + eq = &ceqs->ceq[q_id]; 199 + info.msix_index = eq->msix_entry_idx; 200 + err = hinic3_set_interrupt_cfg_direct(hwdev, &info); 201 + if (err) { 202 + dev_err(hwdev->dev, "Set msix attr for ceq %u failed\n", 203 + q_id); 204 + return err; 205 + } 206 + } 207 + 208 + return 0; 209 + } 210 + 211 + static int init_basic_mgmt_channel(struct hinic3_hwdev *hwdev) 212 + { 213 + int err; 214 + 215 + err = hinic3_comm_aeqs_init(hwdev); 216 + if (err) { 217 + dev_err(hwdev->dev, "Failed to init async event queues\n"); 218 + return err; 219 + } 220 + 221 + err = hinic3_comm_mbox_init(hwdev); 222 + if (err) { 223 + dev_err(hwdev->dev, "Failed to init mailbox\n"); 224 + goto err_free_comm_aeqs; 225 + } 226 + 227 + err = init_aeqs_msix_attr(hwdev); 228 + if (err) { 229 + dev_err(hwdev->dev, "Failed to init aeqs msix attr\n"); 230 + goto err_free_comm_mbox; 231 + } 232 + 233 + return 0; 234 + 235 + err_free_comm_mbox: 236 + hinic3_comm_mbox_free(hwdev); 237 + err_free_comm_aeqs: 238 + hinic3_aeqs_free(hwdev); 239 + 240 + return err; 241 + } 242 + 243 + static void free_base_mgmt_channel(struct hinic3_hwdev *hwdev) 244 + { 245 + hinic3_comm_mbox_free(hwdev); 246 + hinic3_aeqs_free(hwdev); 247 + } 248 + 249 + static int dma_attr_table_init(struct hinic3_hwdev *hwdev) 250 + { 251 + u32 addr, val, dst_attr; 252 + 253 + /* Indirect access, set entry_idx first */ 254 + addr = HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR; 255 + val = hinic3_hwif_read_reg(hwdev->hwif, addr); 256 + val &= ~HINIC3_DMA_ATTR_ENTRY_AT_MASK; 257 + val |= HINIC3_DMA_ATTR_INDIR_IDX_SET(HINIC3_PCIE_MSIX_ATTR_ENTRY, IDX); 258 + hinic3_hwif_write_reg(hwdev->hwif, addr, val); 259 + 260 + addr = HINIC3_CSR_DMA_ATTR_TBL_ADDR; 261 + val = hinic3_hwif_read_reg(hwdev->hwif, addr); 262 + 263 + dst_attr = HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_ST_DISABLE, ST) | 264 + HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_AT_DISABLE, AT) | 265 + HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_PH_DISABLE, PH) | 266 + HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_SNOOP, NO_SNOOPING) | 267 + HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_TPH_DISABLE, TPH_EN); 268 + if (val == dst_attr) 269 + return 0; 270 + 271 + return hinic3_set_dma_attr_tbl(hwdev, 272 + HINIC3_PCIE_MSIX_ATTR_ENTRY, 273 + HINIC3_PCIE_ST_DISABLE, 274 + HINIC3_PCIE_AT_DISABLE, 275 + HINIC3_PCIE_PH_DISABLE, 276 + HINIC3_PCIE_SNOOP, 277 + HINIC3_PCIE_TPH_DISABLE); 278 + } 279 + 280 + static int init_basic_attributes(struct hinic3_hwdev *hwdev) 281 + { 282 + struct comm_global_attr glb_attr; 283 + int err; 284 + 285 + err = hinic3_func_reset(hwdev, hinic3_global_func_id(hwdev), 286 + COMM_FUNC_RESET_FLAG); 287 + if (err) 288 + return err; 289 + 290 + err = hinic3_get_comm_features(hwdev, hwdev->features, 291 + COMM_MAX_FEATURE_QWORD); 292 + if (err) 293 + return err; 294 + 295 + dev_dbg(hwdev->dev, "Comm hw features: 0x%llx\n", hwdev->features[0]); 296 + 297 + err = hinic3_get_global_attr(hwdev, &glb_attr); 298 + if (err) 299 + return err; 300 + 301 + err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 1); 302 + if (err) 303 + return err; 304 + 305 + err = dma_attr_table_init(hwdev); 306 + if (err) 307 + return err; 308 + 309 + hwdev->max_cmdq = min(glb_attr.cmdq_num, HINIC3_MAX_CMDQ_TYPES); 310 + dev_dbg(hwdev->dev, 311 + "global attribute: max_host: 0x%x, max_pf: 0x%x, vf_id_start: 0x%x, mgmt node id: 0x%x, cmdq_num: 0x%x\n", 312 + glb_attr.max_host_num, glb_attr.max_pf_num, 313 + glb_attr.vf_id_start, glb_attr.mgmt_host_node_id, 314 + glb_attr.cmdq_num); 315 + 316 + return 0; 317 + } 318 + 319 + static int hinic3_comm_cmdqs_init(struct hinic3_hwdev *hwdev) 320 + { 321 + int err; 322 + 323 + err = hinic3_cmdqs_init(hwdev); 324 + if (err) { 325 + dev_err(hwdev->dev, "Failed to init cmd queues\n"); 326 + return err; 327 + } 328 + 329 + hinic3_ceq_register_cb(hwdev, HINIC3_CMDQ, hinic3_cmdq_ceq_handler); 330 + 331 + err = hinic3_set_cmdq_depth(hwdev, CMDQ_DEPTH); 332 + if (err) { 333 + dev_err(hwdev->dev, "Failed to set cmdq depth\n"); 334 + goto err_free_cmdqs; 335 + } 336 + 337 + set_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state); 338 + 339 + return 0; 340 + 341 + err_free_cmdqs: 342 + hinic3_cmdqs_free(hwdev); 343 + 344 + return err; 345 + } 346 + 347 + static void hinic3_comm_cmdqs_free(struct hinic3_hwdev *hwdev) 348 + { 349 + spin_lock_bh(&hwdev->channel_lock); 350 + clear_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state); 351 + spin_unlock_bh(&hwdev->channel_lock); 352 + 353 + hinic3_ceq_unregister_cb(hwdev, HINIC3_CMDQ); 354 + hinic3_cmdqs_free(hwdev); 355 + } 356 + 357 + static int init_cmdqs_channel(struct hinic3_hwdev *hwdev) 358 + { 359 + int err; 360 + 361 + err = hinic3_comm_ceqs_init(hwdev); 362 + if (err) { 363 + dev_err(hwdev->dev, "Failed to init completion event queues\n"); 364 + return err; 365 + } 366 + 367 + err = init_ceqs_msix_attr(hwdev); 368 + if (err) { 369 + dev_err(hwdev->dev, "Failed to init ceqs msix attr\n"); 370 + goto err_free_ceqs; 371 + } 372 + 373 + hwdev->wq_page_size = HINIC3_MIN_PAGE_SIZE << HINIC3_WQ_PAGE_SIZE_ORDER; 374 + err = hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev), 375 + hwdev->wq_page_size); 376 + if (err) { 377 + dev_err(hwdev->dev, "Failed to set wq page size\n"); 378 + goto err_free_ceqs; 379 + } 380 + 381 + err = hinic3_comm_cmdqs_init(hwdev); 382 + if (err) { 383 + dev_err(hwdev->dev, "Failed to init cmd queues\n"); 384 + goto err_reset_wq_page_size; 385 + } 386 + 387 + return 0; 388 + 389 + err_reset_wq_page_size: 390 + hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev), 391 + HINIC3_MIN_PAGE_SIZE); 392 + err_free_ceqs: 393 + hinic3_ceqs_free(hwdev); 394 + 395 + return err; 396 + } 397 + 398 + static void hinic3_free_cmdqs_channel(struct hinic3_hwdev *hwdev) 399 + { 400 + hinic3_comm_cmdqs_free(hwdev); 401 + hinic3_ceqs_free(hwdev); 402 + } 403 + 404 + static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev) 405 + { 406 + int err; 407 + 408 + err = init_basic_mgmt_channel(hwdev); 409 + if (err) 410 + return err; 411 + 412 + err = init_basic_attributes(hwdev); 413 + if (err) 414 + goto err_free_basic_mgmt_ch; 415 + 416 + err = init_cmdqs_channel(hwdev); 417 + if (err) { 418 + dev_err(hwdev->dev, "Failed to init cmdq channel\n"); 419 + goto err_clear_func_svc_used_state; 420 + } 421 + 422 + return 0; 423 + 424 + err_clear_func_svc_used_state: 425 + hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0); 426 + err_free_basic_mgmt_ch: 427 + free_base_mgmt_channel(hwdev); 428 + 429 + return err; 430 + } 431 + 432 + static void hinic3_uninit_comm_ch(struct hinic3_hwdev *hwdev) 433 + { 434 + hinic3_free_cmdqs_channel(hwdev); 435 + hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0); 436 + free_base_mgmt_channel(hwdev); 437 + } 438 + 439 + static DEFINE_IDA(hinic3_adev_ida); 440 + 441 + static int hinic3_adev_idx_alloc(void) 442 + { 443 + return ida_alloc(&hinic3_adev_ida, GFP_KERNEL); 444 + } 445 + 446 + static void hinic3_adev_idx_free(int id) 447 + { 448 + ida_free(&hinic3_adev_ida, id); 449 + } 450 + 10 451 int hinic3_init_hwdev(struct pci_dev *pdev) 11 452 { 12 - /* Completed by later submission due to LoC limit. */ 13 - return -EFAULT; 453 + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); 454 + struct hinic3_hwdev *hwdev; 455 + int err; 456 + 457 + hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); 458 + if (!hwdev) 459 + return -ENOMEM; 460 + 461 + pci_adapter->hwdev = hwdev; 462 + hwdev->adapter = pci_adapter; 463 + hwdev->pdev = pci_adapter->pdev; 464 + hwdev->dev = &pci_adapter->pdev->dev; 465 + hwdev->func_state = 0; 466 + hwdev->dev_id = hinic3_adev_idx_alloc(); 467 + spin_lock_init(&hwdev->channel_lock); 468 + 469 + err = hinic3_init_hwif(hwdev); 470 + if (err) { 471 + dev_err(hwdev->dev, "Failed to init hwif\n"); 472 + goto err_free_hwdev; 473 + } 474 + 475 + hwdev->workq = alloc_workqueue(HINIC3_HWDEV_WQ_NAME, WQ_MEM_RECLAIM, 476 + HINIC3_WQ_MAX_REQ); 477 + if (!hwdev->workq) { 478 + dev_err(hwdev->dev, "Failed to alloc hardware workq\n"); 479 + err = -ENOMEM; 480 + goto err_free_hwif; 481 + } 482 + 483 + err = hinic3_init_cfg_mgmt(hwdev); 484 + if (err) { 485 + dev_err(hwdev->dev, "Failed to init config mgmt\n"); 486 + goto err_destroy_workqueue; 487 + } 488 + 489 + err = hinic3_init_comm_ch(hwdev); 490 + if (err) { 491 + dev_err(hwdev->dev, "Failed to init communication channel\n"); 492 + goto err_free_cfg_mgmt; 493 + } 494 + 495 + err = hinic3_init_capability(hwdev); 496 + if (err) { 497 + dev_err(hwdev->dev, "Failed to init capability\n"); 498 + goto err_uninit_comm_ch; 499 + } 500 + 501 + err = hinic3_set_comm_features(hwdev, hwdev->features, 502 + COMM_MAX_FEATURE_QWORD); 503 + if (err) { 504 + dev_err(hwdev->dev, "Failed to set comm features\n"); 505 + goto err_uninit_comm_ch; 506 + } 507 + 508 + return 0; 509 + 510 + err_uninit_comm_ch: 511 + hinic3_uninit_comm_ch(hwdev); 512 + err_free_cfg_mgmt: 513 + hinic3_free_cfg_mgmt(hwdev); 514 + err_destroy_workqueue: 515 + destroy_workqueue(hwdev->workq); 516 + err_free_hwif: 517 + hinic3_free_hwif(hwdev); 518 + err_free_hwdev: 519 + pci_adapter->hwdev = NULL; 520 + hinic3_adev_idx_free(hwdev->dev_id); 521 + kfree(hwdev); 522 + 523 + return err; 14 524 } 15 525 16 526 void hinic3_free_hwdev(struct hinic3_hwdev *hwdev) 17 527 { 18 - /* Completed by later submission due to LoC limit. */ 528 + u64 drv_features[COMM_MAX_FEATURE_QWORD] = {}; 529 + 530 + hinic3_set_comm_features(hwdev, drv_features, COMM_MAX_FEATURE_QWORD); 531 + hinic3_func_rx_tx_flush(hwdev); 532 + hinic3_uninit_comm_ch(hwdev); 533 + hinic3_free_cfg_mgmt(hwdev); 534 + destroy_workqueue(hwdev->workq); 535 + hinic3_free_hwif(hwdev); 536 + hinic3_adev_idx_free(hwdev->dev_id); 537 + kfree(hwdev); 19 538 } 20 539 21 540 void hinic3_set_api_stop(struct hinic3_hwdev *hwdev) 22 541 { 23 - /* Completed by later submission due to LoC limit. */ 542 + struct hinic3_mbox *mbox; 543 + 544 + spin_lock_bh(&hwdev->channel_lock); 545 + if (test_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state)) { 546 + mbox = hwdev->mbox; 547 + spin_lock(&mbox->mbox_lock); 548 + if (mbox->event_flag == MBOX_EVENT_START) 549 + mbox->event_flag = MBOX_EVENT_TIMEOUT; 550 + spin_unlock(&mbox->mbox_lock); 551 + } 552 + 553 + if (test_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state)) 554 + hinic3_cmdq_flush_sync_cmd(hwdev); 555 + 556 + spin_unlock_bh(&hwdev->channel_lock); 24 557 }
+269
drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
··· 10 10 #include "hinic3_hwdev.h" 11 11 #include "hinic3_hwif.h" 12 12 13 + #define HINIC3_HWIF_READY_TIMEOUT 10000 14 + #define HINIC3_DB_AND_OUTBOUND_EN_TIMEOUT 60000 15 + #define HINIC3_PCIE_LINK_DOWN 0xFFFFFFFF 16 + 13 17 /* config BAR4/5 4MB, DB & DWQE both 2MB */ 14 18 #define HINIC3_DB_DWQE_SIZE 0x00400000 15 19 ··· 21 17 #define HINIC3_DB_PAGE_SIZE 0x00001000 22 18 #define HINIC3_DWQE_OFFSET 0x00000800 23 19 #define HINIC3_DB_MAX_AREAS (HINIC3_DB_DWQE_SIZE / HINIC3_DB_PAGE_SIZE) 20 + 21 + #define HINIC3_MAX_MSIX_ENTRY 2048 22 + 23 + #define HINIC3_AF0_FUNC_GLOBAL_IDX_MASK GENMASK(11, 0) 24 + #define HINIC3_AF0_P2P_IDX_MASK GENMASK(16, 12) 25 + #define HINIC3_AF0_PCI_INTF_IDX_MASK GENMASK(19, 17) 26 + #define HINIC3_AF0_FUNC_TYPE_MASK BIT(28) 27 + #define HINIC3_AF0_GET(val, member) \ 28 + FIELD_GET(HINIC3_AF0_##member##_MASK, val) 29 + 30 + #define HINIC3_AF1_AEQS_PER_FUNC_MASK GENMASK(9, 8) 31 + #define HINIC3_AF1_MGMT_INIT_STATUS_MASK BIT(30) 32 + #define HINIC3_AF1_GET(val, member) \ 33 + FIELD_GET(HINIC3_AF1_##member##_MASK, val) 34 + 35 + #define HINIC3_AF2_CEQS_PER_FUNC_MASK GENMASK(8, 0) 36 + #define HINIC3_AF2_IRQS_PER_FUNC_MASK GENMASK(26, 16) 37 + #define HINIC3_AF2_GET(val, member) \ 38 + FIELD_GET(HINIC3_AF2_##member##_MASK, val) 39 + 40 + #define HINIC3_AF4_DOORBELL_CTRL_MASK BIT(0) 41 + #define HINIC3_AF4_GET(val, member) \ 42 + FIELD_GET(HINIC3_AF4_##member##_MASK, val) 43 + #define HINIC3_AF4_SET(val, member) \ 44 + FIELD_PREP(HINIC3_AF4_##member##_MASK, val) 45 + 46 + #define HINIC3_AF5_OUTBOUND_CTRL_MASK BIT(0) 47 + #define HINIC3_AF5_GET(val, member) \ 48 + FIELD_GET(HINIC3_AF5_##member##_MASK, val) 49 + 50 + #define HINIC3_AF6_PF_STATUS_MASK GENMASK(15, 0) 51 + #define HINIC3_AF6_FUNC_MAX_SQ_MASK GENMASK(31, 23) 52 + #define HINIC3_AF6_MSIX_FLEX_EN_MASK BIT(22) 53 + #define HINIC3_AF6_GET(val, member) \ 54 + FIELD_GET(HINIC3_AF6_##member##_MASK, val) 24 55 25 56 #define HINIC3_GET_REG_ADDR(reg) ((reg) & (HINIC3_REGS_FLAG_MASK)) 26 57 ··· 76 37 void __iomem *addr = hinic3_reg_addr(hwif, reg); 77 38 78 39 iowrite32be(val, addr); 40 + } 41 + 42 + static enum hinic3_wait_return check_hwif_ready_handler(void *priv_data) 43 + { 44 + struct hinic3_hwdev *hwdev = priv_data; 45 + u32 attr1; 46 + 47 + attr1 = hinic3_hwif_read_reg(hwdev->hwif, HINIC3_CSR_FUNC_ATTR1_ADDR); 48 + 49 + return HINIC3_AF1_GET(attr1, MGMT_INIT_STATUS) ? 50 + HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING; 51 + } 52 + 53 + static int wait_hwif_ready(struct hinic3_hwdev *hwdev) 54 + { 55 + return hinic3_wait_for_timeout(hwdev, check_hwif_ready_handler, 56 + HINIC3_HWIF_READY_TIMEOUT, 57 + USEC_PER_MSEC); 58 + } 59 + 60 + /* Set attr struct from HW attr values. */ 61 + static void set_hwif_attr(struct hinic3_func_attr *attr, u32 attr0, u32 attr1, 62 + u32 attr2, u32 attr3, u32 attr6) 63 + { 64 + attr->func_global_idx = HINIC3_AF0_GET(attr0, FUNC_GLOBAL_IDX); 65 + attr->port_to_port_idx = HINIC3_AF0_GET(attr0, P2P_IDX); 66 + attr->pci_intf_idx = HINIC3_AF0_GET(attr0, PCI_INTF_IDX); 67 + attr->func_type = HINIC3_AF0_GET(attr0, FUNC_TYPE); 68 + 69 + attr->num_aeqs = BIT(HINIC3_AF1_GET(attr1, AEQS_PER_FUNC)); 70 + attr->num_ceqs = HINIC3_AF2_GET(attr2, CEQS_PER_FUNC); 71 + attr->num_irqs = HINIC3_AF2_GET(attr2, IRQS_PER_FUNC); 72 + if (attr->num_irqs > HINIC3_MAX_MSIX_ENTRY) 73 + attr->num_irqs = HINIC3_MAX_MSIX_ENTRY; 74 + 75 + attr->num_sq = HINIC3_AF6_GET(attr6, FUNC_MAX_SQ); 76 + attr->msix_flex_en = HINIC3_AF6_GET(attr6, MSIX_FLEX_EN); 77 + } 78 + 79 + /* Read attributes from HW and set attribute struct. */ 80 + static int init_hwif_attr(struct hinic3_hwdev *hwdev) 81 + { 82 + u32 attr0, attr1, attr2, attr3, attr6; 83 + struct hinic3_hwif *hwif; 84 + 85 + hwif = hwdev->hwif; 86 + attr0 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR0_ADDR); 87 + if (attr0 == HINIC3_PCIE_LINK_DOWN) 88 + return -EFAULT; 89 + 90 + attr1 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR1_ADDR); 91 + if (attr1 == HINIC3_PCIE_LINK_DOWN) 92 + return -EFAULT; 93 + 94 + attr2 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR2_ADDR); 95 + if (attr2 == HINIC3_PCIE_LINK_DOWN) 96 + return -EFAULT; 97 + 98 + attr3 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR3_ADDR); 99 + if (attr3 == HINIC3_PCIE_LINK_DOWN) 100 + return -EFAULT; 101 + 102 + attr6 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR); 103 + if (attr6 == HINIC3_PCIE_LINK_DOWN) 104 + return -EFAULT; 105 + 106 + set_hwif_attr(&hwif->attr, attr0, attr1, attr2, attr3, attr6); 107 + 108 + if (!hwif->attr.num_ceqs) { 109 + dev_err(hwdev->dev, "Ceq num cfg in fw is zero\n"); 110 + return -EFAULT; 111 + } 112 + 113 + if (!hwif->attr.num_irqs) { 114 + dev_err(hwdev->dev, 115 + "Irq num cfg in fw is zero, msix_flex_en %d\n", 116 + hwif->attr.msix_flex_en); 117 + return -EFAULT; 118 + } 119 + 120 + return 0; 121 + } 122 + 123 + static enum hinic3_doorbell_ctrl hinic3_get_doorbell_ctrl_status(struct hinic3_hwif *hwif) 124 + { 125 + u32 attr4 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR4_ADDR); 126 + 127 + return HINIC3_AF4_GET(attr4, DOORBELL_CTRL); 128 + } 129 + 130 + static enum hinic3_outbound_ctrl hinic3_get_outbound_ctrl_status(struct hinic3_hwif *hwif) 131 + { 132 + u32 attr5 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR5_ADDR); 133 + 134 + return HINIC3_AF5_GET(attr5, OUTBOUND_CTRL); 135 + } 136 + 137 + void hinic3_toggle_doorbell(struct hinic3_hwif *hwif, 138 + enum hinic3_doorbell_ctrl flag) 139 + { 140 + u32 addr, attr4; 141 + 142 + addr = HINIC3_CSR_FUNC_ATTR4_ADDR; 143 + attr4 = hinic3_hwif_read_reg(hwif, addr); 144 + 145 + attr4 &= ~HINIC3_AF4_DOORBELL_CTRL_MASK; 146 + attr4 |= HINIC3_AF4_SET(flag, DOORBELL_CTRL); 147 + 148 + hinic3_hwif_write_reg(hwif, addr, attr4); 149 + } 150 + 151 + static int db_area_idx_init(struct hinic3_hwif *hwif, u64 db_base_phy, 152 + u8 __iomem *db_base, u64 db_dwqe_len) 153 + { 154 + struct hinic3_db_area *db_area = &hwif->db_area; 155 + u32 db_max_areas; 156 + 157 + hwif->db_base_phy = db_base_phy; 158 + hwif->db_base = db_base; 159 + hwif->db_dwqe_len = db_dwqe_len; 160 + 161 + db_max_areas = db_dwqe_len > HINIC3_DB_DWQE_SIZE ? 162 + HINIC3_DB_MAX_AREAS : db_dwqe_len / HINIC3_DB_PAGE_SIZE; 163 + db_area->db_bitmap_array = bitmap_zalloc(db_max_areas, GFP_KERNEL); 164 + if (!db_area->db_bitmap_array) 165 + return -ENOMEM; 166 + 167 + db_area->db_max_areas = db_max_areas; 168 + spin_lock_init(&db_area->idx_lock); 169 + 170 + return 0; 171 + } 172 + 173 + static void db_area_idx_free(struct hinic3_db_area *db_area) 174 + { 175 + bitmap_free(db_area->db_bitmap_array); 79 176 } 80 177 81 178 static int get_db_idx(struct hinic3_hwif *hwif, u32 *idx) ··· 300 125 hinic3_hwif_write_reg(hwif, addr, mask_bits); 301 126 } 302 127 128 + static void disable_all_msix(struct hinic3_hwdev *hwdev) 129 + { 130 + u16 num_irqs = hwdev->hwif->attr.num_irqs; 131 + u16 i; 132 + 133 + for (i = 0; i < num_irqs; i++) 134 + hinic3_set_msix_state(hwdev, i, HINIC3_MSIX_DISABLE); 135 + } 136 + 303 137 void hinic3_msix_intr_clear_resend_bit(struct hinic3_hwdev *hwdev, u16 msix_idx, 304 138 u8 clear_resend_en) 305 139 { ··· 343 159 344 160 addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR; 345 161 hinic3_hwif_write_reg(hwif, addr, mask_bits); 162 + } 163 + 164 + static enum hinic3_wait_return check_db_outbound_enable_handler(void *priv_data) 165 + { 166 + enum hinic3_outbound_ctrl outbound_ctrl; 167 + struct hinic3_hwif *hwif = priv_data; 168 + enum hinic3_doorbell_ctrl db_ctrl; 169 + 170 + db_ctrl = hinic3_get_doorbell_ctrl_status(hwif); 171 + outbound_ctrl = hinic3_get_outbound_ctrl_status(hwif); 172 + if (outbound_ctrl == ENABLE_OUTBOUND && db_ctrl == ENABLE_DOORBELL) 173 + return HINIC3_WAIT_PROCESS_CPL; 174 + 175 + return HINIC3_WAIT_PROCESS_WAITING; 176 + } 177 + 178 + static int wait_until_doorbell_and_outbound_enabled(struct hinic3_hwif *hwif) 179 + { 180 + return hinic3_wait_for_timeout(hwif, check_db_outbound_enable_handler, 181 + HINIC3_DB_AND_OUTBOUND_EN_TIMEOUT, 182 + USEC_PER_MSEC); 183 + } 184 + 185 + int hinic3_init_hwif(struct hinic3_hwdev *hwdev) 186 + { 187 + struct hinic3_pcidev *pci_adapter = hwdev->adapter; 188 + struct hinic3_hwif *hwif; 189 + u32 attr1, attr4, attr5; 190 + int err; 191 + 192 + hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); 193 + if (!hwif) 194 + return -ENOMEM; 195 + 196 + hwdev->hwif = hwif; 197 + hwif->cfg_regs_base = (u8 __iomem *)pci_adapter->cfg_reg_base + 198 + HINIC3_VF_CFG_REG_OFFSET; 199 + 200 + err = db_area_idx_init(hwif, pci_adapter->db_base_phy, 201 + pci_adapter->db_base, 202 + pci_adapter->db_dwqe_len); 203 + if (err) { 204 + dev_err(hwdev->dev, "Failed to init db area.\n"); 205 + goto err_free_hwif; 206 + } 207 + 208 + err = wait_hwif_ready(hwdev); 209 + if (err) { 210 + attr1 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR1_ADDR); 211 + dev_err(hwdev->dev, "Chip status is not ready, attr1:0x%x\n", 212 + attr1); 213 + goto err_free_db_area_idx; 214 + } 215 + 216 + err = init_hwif_attr(hwdev); 217 + if (err) { 218 + dev_err(hwdev->dev, "Init hwif attr failed\n"); 219 + goto err_free_db_area_idx; 220 + } 221 + 222 + err = wait_until_doorbell_and_outbound_enabled(hwif); 223 + if (err) { 224 + attr4 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR4_ADDR); 225 + attr5 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR5_ADDR); 226 + dev_err(hwdev->dev, "HW doorbell/outbound is disabled, attr4 0x%x attr5 0x%x\n", 227 + attr4, attr5); 228 + goto err_free_db_area_idx; 229 + } 230 + 231 + disable_all_msix(hwdev); 232 + 233 + return 0; 234 + 235 + err_free_db_area_idx: 236 + db_area_idx_free(&hwif->db_area); 237 + err_free_hwif: 238 + kfree(hwif); 239 + 240 + return err; 241 + } 242 + 243 + void hinic3_free_hwif(struct hinic3_hwdev *hwdev) 244 + { 245 + db_area_idx_free(&hwdev->hwif->db_area); 246 + kfree(hwdev->hwif); 346 247 } 347 248 348 249 u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev)
+16
drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
··· 45 45 struct hinic3_func_attr attr; 46 46 }; 47 47 48 + enum hinic3_outbound_ctrl { 49 + ENABLE_OUTBOUND = 0x0, 50 + DISABLE_OUTBOUND = 0x1, 51 + }; 52 + 53 + enum hinic3_doorbell_ctrl { 54 + ENABLE_DOORBELL = 0, 55 + DISABLE_DOORBELL = 1, 56 + }; 57 + 48 58 enum hinic3_msix_state { 49 59 HINIC3_MSIX_ENABLE, 50 60 HINIC3_MSIX_DISABLE, ··· 68 58 u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg); 69 59 void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val); 70 60 61 + void hinic3_toggle_doorbell(struct hinic3_hwif *hwif, 62 + enum hinic3_doorbell_ctrl flag); 63 + 71 64 int hinic3_alloc_db_addr(struct hinic3_hwdev *hwdev, void __iomem **db_base, 72 65 void __iomem **dwqe_base); 73 66 void hinic3_free_db_addr(struct hinic3_hwdev *hwdev, const u8 __iomem *db_base); 67 + 68 + int hinic3_init_hwif(struct hinic3_hwdev *hwdev); 69 + void hinic3_free_hwif(struct hinic3_hwdev *hwdev); 74 70 75 71 void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx, 76 72 enum hinic3_msix_state flag);
+1 -1
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
··· 42 42 { 43 43 struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); 44 44 45 + netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll); 45 46 netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id, 46 47 NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi); 47 48 netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id, 48 49 NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi); 49 - netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll); 50 50 napi_enable(&irq_cfg->napi); 51 51 } 52 52
+8 -1
drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
··· 8 8 #include "hinic3_hwdev.h" 9 9 #include "hinic3_lld.h" 10 10 #include "hinic3_mgmt.h" 11 + #include "hinic3_pci_id_tbl.h" 11 12 12 13 #define HINIC3_VF_PCI_CFG_REG_BAR 0 13 14 #define HINIC3_PCI_INTR_REG_BAR 2 ··· 122 121 goto err_del_adevs; 123 122 } 124 123 mutex_unlock(&pci_adapter->pdev_mutex); 124 + 125 125 return 0; 126 126 127 127 err_del_adevs: ··· 134 132 } 135 133 } 136 134 mutex_unlock(&pci_adapter->pdev_mutex); 135 + 137 136 return -ENOMEM; 138 137 } 139 138 ··· 156 153 struct hinic3_adev *hadev; 157 154 158 155 hadev = container_of(adev, struct hinic3_adev, adev); 156 + 159 157 return hadev->hwdev; 160 158 } 161 159 ··· 311 307 { 312 308 struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); 313 309 310 + hinic3_flush_mgmt_workq(pci_adapter->hwdev); 314 311 hinic3_detach_aux_devices(pci_adapter->hwdev); 315 312 hinic3_free_hwdev(pci_adapter->hwdev); 316 313 } ··· 338 333 339 334 err_out: 340 335 dev_err(&pdev->dev, "PCIe device probe function failed\n"); 336 + 341 337 return err; 342 338 } 343 339 ··· 371 365 372 366 err_out: 373 367 dev_err(&pdev->dev, "PCIe device probe failed\n"); 368 + 374 369 return err; 375 370 } 376 371 ··· 384 377 } 385 378 386 379 static const struct pci_device_id hinic3_pci_table[] = { 387 - /* Completed by later submission due to LoC limit. */ 380 + {PCI_VDEVICE(HUAWEI, PCI_DEV_ID_HINIC3_VF), 0}, 388 381 {0, 0} 389 382 390 383 };
+7 -1
drivers/net/ethernet/huawei/hinic3/hinic3_main.c
··· 12 12 #include "hinic3_nic_cfg.h" 13 13 #include "hinic3_nic_dev.h" 14 14 #include "hinic3_nic_io.h" 15 + #include "hinic3_rss.h" 15 16 #include "hinic3_rx.h" 16 17 #include "hinic3_tx.h" 17 18 ··· 135 134 nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH; 136 135 nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH; 137 136 137 + hinic3_try_to_enable_rss(netdev); 138 + 138 139 /* VF driver always uses random MAC address. During VM migration to a 139 140 * new device, the new device should learn the VMs old MAC rather than 140 141 * provide its own MAC. The product design assumes that every VF is ··· 148 145 hinic3_global_func_id(hwdev)); 149 146 if (err) { 150 147 dev_err(hwdev->dev, "Failed to set default MAC\n"); 151 - return err; 148 + goto err_clear_rss_config; 152 149 } 153 150 154 151 err = hinic3_alloc_txrxqs(netdev); ··· 162 159 err_del_mac: 163 160 hinic3_del_mac(hwdev, netdev->dev_addr, 0, 164 161 hinic3_global_func_id(hwdev)); 162 + err_clear_rss_config: 163 + hinic3_clear_rss_config(netdev); 165 164 166 165 return err; 167 166 } ··· 175 170 hinic3_free_txrxqs(netdev); 176 171 hinic3_del_mac(nic_dev->hwdev, netdev->dev_addr, 0, 177 172 hinic3_global_func_id(nic_dev->hwdev)); 173 + hinic3_clear_rss_config(netdev); 178 174 } 179 175 180 176 static void hinic3_assign_netdev_ops(struct net_device *netdev)
+21
drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. 3 + 4 + #include "hinic3_eqs.h" 5 + #include "hinic3_hwdev.h" 6 + #include "hinic3_mbox.h" 7 + #include "hinic3_mgmt.h" 8 + 9 + void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev) 10 + { 11 + if (hwdev->aeqs) 12 + flush_workqueue(hwdev->aeqs->workq); 13 + } 14 + 15 + void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header, 16 + u8 size) 17 + { 18 + if (MBOX_MSG_HEADER_GET(*(__force __le64 *)header, SOURCE) == 19 + MBOX_MSG_FROM_MBOX) 20 + hinic3_mbox_func_aeqe_handler(hwdev, header, size); 21 + }
+2
drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
··· 9 9 struct hinic3_hwdev; 10 10 11 11 void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev); 12 + void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev, 13 + u8 *header, u8 size); 12 14 13 15 #endif
+119
drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
··· 56 56 u8 new_mac[ETH_ALEN]; 57 57 }; 58 58 59 + struct l2nic_cmd_set_ci_attr { 60 + struct mgmt_msg_head msg_head; 61 + u16 func_idx; 62 + u8 dma_attr_off; 63 + u8 pending_limit; 64 + u8 coalescing_time; 65 + u8 intr_en; 66 + u16 intr_idx; 67 + u32 l2nic_sqn; 68 + u32 rsvd; 69 + u64 ci_addr; 70 + }; 71 + 72 + struct l2nic_cmd_clear_qp_resource { 73 + struct mgmt_msg_head msg_head; 74 + u16 func_id; 75 + u16 rsvd1; 76 + }; 77 + 59 78 struct l2nic_cmd_force_pkt_drop { 60 79 struct mgmt_msg_head msg_head; 61 80 u8 port; 62 81 u8 rsvd1[3]; 82 + }; 83 + 84 + struct l2nic_cmd_set_vport_state { 85 + struct mgmt_msg_head msg_head; 86 + u16 func_id; 87 + u16 rsvd1; 88 + /* 0--disable, 1--enable */ 89 + u8 state; 90 + u8 rsvd2[3]; 91 + }; 92 + 93 + struct l2nic_cmd_set_dcb_state { 94 + struct mgmt_msg_head head; 95 + u16 func_id; 96 + /* 0 - get dcb state, 1 - set dcb state */ 97 + u8 op_code; 98 + /* 0 - disable, 1 - enable dcb */ 99 + u8 state; 100 + /* 0 - disable, 1 - enable dcb */ 101 + u8 port_state; 102 + u8 rsvd[7]; 103 + }; 104 + 105 + #define L2NIC_RSS_TYPE_VALID_MASK BIT(23) 106 + #define L2NIC_RSS_TYPE_TCP_IPV6_EXT_MASK BIT(24) 107 + #define L2NIC_RSS_TYPE_IPV6_EXT_MASK BIT(25) 108 + #define L2NIC_RSS_TYPE_TCP_IPV6_MASK BIT(26) 109 + #define L2NIC_RSS_TYPE_IPV6_MASK BIT(27) 110 + #define L2NIC_RSS_TYPE_TCP_IPV4_MASK BIT(28) 111 + #define L2NIC_RSS_TYPE_IPV4_MASK BIT(29) 112 + #define L2NIC_RSS_TYPE_UDP_IPV6_MASK BIT(30) 113 + #define L2NIC_RSS_TYPE_UDP_IPV4_MASK BIT(31) 114 + #define L2NIC_RSS_TYPE_SET(val, member) \ 115 + FIELD_PREP(L2NIC_RSS_TYPE_##member##_MASK, val) 116 + #define L2NIC_RSS_TYPE_GET(val, member) \ 117 + FIELD_GET(L2NIC_RSS_TYPE_##member##_MASK, val) 118 + 119 + #define L2NIC_RSS_INDIR_SIZE 256 120 + #define L2NIC_RSS_KEY_SIZE 40 121 + 122 + /* IEEE 802.1Qaz std */ 123 + #define L2NIC_DCB_COS_MAX 0x8 124 + 125 + struct l2nic_cmd_set_rss_ctx_tbl { 126 + struct mgmt_msg_head msg_head; 127 + u16 func_id; 128 + u16 rsvd1; 129 + u32 context; 130 + }; 131 + 132 + struct l2nic_cmd_cfg_rss_engine { 133 + struct mgmt_msg_head msg_head; 134 + u16 func_id; 135 + u8 opcode; 136 + u8 hash_engine; 137 + u8 rsvd1[4]; 138 + }; 139 + 140 + struct l2nic_cmd_cfg_rss_hash_key { 141 + struct mgmt_msg_head msg_head; 142 + u16 func_id; 143 + u8 opcode; 144 + u8 rsvd1; 145 + u8 key[L2NIC_RSS_KEY_SIZE]; 146 + }; 147 + 148 + struct l2nic_cmd_cfg_rss { 149 + struct mgmt_msg_head msg_head; 150 + u16 func_id; 151 + u8 rss_en; 152 + u8 rq_priority_number; 153 + u8 prio_tc[L2NIC_DCB_COS_MAX]; 154 + u16 num_qps; 155 + u16 rsvd1; 63 156 }; 64 157 65 158 /* Commands between NIC to fw */ ··· 173 80 L2NIC_CMD_QOS_DCB_STATE = 110, 174 81 L2NIC_CMD_FORCE_PKT_DROP = 113, 175 82 L2NIC_CMD_MAX = 256, 83 + }; 84 + 85 + struct l2nic_cmd_rss_set_indir_tbl { 86 + __le32 rsvd[4]; 87 + __le16 entry[L2NIC_RSS_INDIR_SIZE]; 88 + }; 89 + 90 + /* NIC CMDQ MODE */ 91 + enum l2nic_ucode_cmd { 92 + L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX = 0, 93 + L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX = 1, 94 + L2NIC_UCODE_CMD_SET_RSS_INDIR_TBL = 4, 95 + }; 96 + 97 + /* hilink mac group command */ 98 + enum mag_cmd { 99 + MAG_CMD_GET_LINK_STATUS = 7, 100 + }; 101 + 102 + /* firmware also use this cmd report link event to driver */ 103 + struct mag_cmd_get_link_status { 104 + struct mgmt_msg_head head; 105 + u8 port_id; 106 + /* 0:link down 1:link up */ 107 + u8 status; 108 + u8 rsvd0[2]; 176 109 }; 177 110 178 111 enum hinic3_nic_feature_cap {
+422 -4
drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
··· 8 8 #include "hinic3_nic_cfg.h" 9 9 #include "hinic3_nic_dev.h" 10 10 #include "hinic3_nic_io.h" 11 + #include "hinic3_rss.h" 11 12 #include "hinic3_rx.h" 12 13 #include "hinic3_tx.h" 13 14 15 + /* try to modify the number of irq to the target number, 16 + * and return the actual number of irq. 17 + */ 18 + static u16 hinic3_qp_irq_change(struct net_device *netdev, 19 + u16 dst_num_qp_irq) 20 + { 21 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 22 + struct msix_entry *qps_msix_entries; 23 + u16 resp_irq_num, irq_num_gap, i; 24 + u16 idx; 25 + int err; 26 + 27 + qps_msix_entries = nic_dev->qps_msix_entries; 28 + if (dst_num_qp_irq > nic_dev->num_qp_irq) { 29 + irq_num_gap = dst_num_qp_irq - nic_dev->num_qp_irq; 30 + err = hinic3_alloc_irqs(nic_dev->hwdev, irq_num_gap, 31 + &qps_msix_entries[nic_dev->num_qp_irq], 32 + &resp_irq_num); 33 + if (err) { 34 + netdev_err(netdev, "Failed to alloc irqs\n"); 35 + return nic_dev->num_qp_irq; 36 + } 37 + 38 + nic_dev->num_qp_irq += resp_irq_num; 39 + } else if (dst_num_qp_irq < nic_dev->num_qp_irq) { 40 + irq_num_gap = nic_dev->num_qp_irq - dst_num_qp_irq; 41 + for (i = 0; i < irq_num_gap; i++) { 42 + idx = (nic_dev->num_qp_irq - i) - 1; 43 + hinic3_free_irq(nic_dev->hwdev, 44 + qps_msix_entries[idx].vector); 45 + qps_msix_entries[idx].vector = 0; 46 + qps_msix_entries[idx].entry = 0; 47 + } 48 + nic_dev->num_qp_irq = dst_num_qp_irq; 49 + } 50 + 51 + return nic_dev->num_qp_irq; 52 + } 53 + 54 + static void hinic3_config_num_qps(struct net_device *netdev, 55 + struct hinic3_dyna_txrxq_params *q_params) 56 + { 57 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 58 + u16 alloc_num_irq, cur_num_irq; 59 + u16 dst_num_irq; 60 + 61 + if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) 62 + q_params->num_qps = 1; 63 + 64 + if (nic_dev->num_qp_irq >= q_params->num_qps) 65 + goto out; 66 + 67 + cur_num_irq = nic_dev->num_qp_irq; 68 + 69 + alloc_num_irq = hinic3_qp_irq_change(netdev, q_params->num_qps); 70 + if (alloc_num_irq < q_params->num_qps) { 71 + q_params->num_qps = alloc_num_irq; 72 + netdev_warn(netdev, "Can not get enough irqs, adjust num_qps to %u\n", 73 + q_params->num_qps); 74 + 75 + /* The current irq may be in use, we must keep it */ 76 + dst_num_irq = max_t(u16, cur_num_irq, q_params->num_qps); 77 + hinic3_qp_irq_change(netdev, dst_num_irq); 78 + } 79 + 80 + out: 81 + netdev_dbg(netdev, "No need to change irqs, num_qps is %u\n", 82 + q_params->num_qps); 83 + } 84 + 85 + static int hinic3_setup_num_qps(struct net_device *netdev) 86 + { 87 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 88 + 89 + nic_dev->num_qp_irq = 0; 90 + 91 + nic_dev->qps_msix_entries = kcalloc(nic_dev->max_qps, 92 + sizeof(struct msix_entry), 93 + GFP_KERNEL); 94 + if (!nic_dev->qps_msix_entries) 95 + return -ENOMEM; 96 + 97 + hinic3_config_num_qps(netdev, &nic_dev->q_params); 98 + 99 + return 0; 100 + } 101 + 102 + static void hinic3_destroy_num_qps(struct net_device *netdev) 103 + { 104 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 105 + u16 i; 106 + 107 + for (i = 0; i < nic_dev->num_qp_irq; i++) 108 + hinic3_free_irq(nic_dev->hwdev, 109 + nic_dev->qps_msix_entries[i].vector); 110 + 111 + kfree(nic_dev->qps_msix_entries); 112 + } 113 + 114 + static int hinic3_alloc_txrxq_resources(struct net_device *netdev, 115 + struct hinic3_dyna_txrxq_params *q_params) 116 + { 117 + int err; 118 + 119 + q_params->txqs_res = kcalloc(q_params->num_qps, 120 + sizeof(*q_params->txqs_res), GFP_KERNEL); 121 + if (!q_params->txqs_res) 122 + return -ENOMEM; 123 + 124 + q_params->rxqs_res = kcalloc(q_params->num_qps, 125 + sizeof(*q_params->rxqs_res), GFP_KERNEL); 126 + if (!q_params->rxqs_res) { 127 + err = -ENOMEM; 128 + goto err_free_txqs_res_arr; 129 + } 130 + 131 + q_params->irq_cfg = kcalloc(q_params->num_qps, 132 + sizeof(*q_params->irq_cfg), GFP_KERNEL); 133 + if (!q_params->irq_cfg) { 134 + err = -ENOMEM; 135 + goto err_free_rxqs_res_arr; 136 + } 137 + 138 + err = hinic3_alloc_txqs_res(netdev, q_params->num_qps, 139 + q_params->sq_depth, q_params->txqs_res); 140 + if (err) { 141 + netdev_err(netdev, "Failed to alloc txqs resource\n"); 142 + goto err_free_irq_cfg; 143 + } 144 + 145 + err = hinic3_alloc_rxqs_res(netdev, q_params->num_qps, 146 + q_params->rq_depth, q_params->rxqs_res); 147 + if (err) { 148 + netdev_err(netdev, "Failed to alloc rxqs resource\n"); 149 + goto err_free_txqs_res; 150 + } 151 + 152 + return 0; 153 + 154 + err_free_txqs_res: 155 + hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth, 156 + q_params->txqs_res); 157 + err_free_irq_cfg: 158 + kfree(q_params->irq_cfg); 159 + q_params->irq_cfg = NULL; 160 + err_free_rxqs_res_arr: 161 + kfree(q_params->rxqs_res); 162 + q_params->rxqs_res = NULL; 163 + err_free_txqs_res_arr: 164 + kfree(q_params->txqs_res); 165 + q_params->txqs_res = NULL; 166 + 167 + return err; 168 + } 169 + 170 + static void hinic3_free_txrxq_resources(struct net_device *netdev, 171 + struct hinic3_dyna_txrxq_params *q_params) 172 + { 173 + hinic3_free_rxqs_res(netdev, q_params->num_qps, q_params->rq_depth, 174 + q_params->rxqs_res); 175 + hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth, 176 + q_params->txqs_res); 177 + 178 + kfree(q_params->irq_cfg); 179 + q_params->irq_cfg = NULL; 180 + 181 + kfree(q_params->rxqs_res); 182 + q_params->rxqs_res = NULL; 183 + 184 + kfree(q_params->txqs_res); 185 + q_params->txqs_res = NULL; 186 + } 187 + 188 + static int hinic3_configure_txrxqs(struct net_device *netdev, 189 + struct hinic3_dyna_txrxq_params *q_params) 190 + { 191 + int err; 192 + 193 + err = hinic3_configure_txqs(netdev, q_params->num_qps, 194 + q_params->sq_depth, q_params->txqs_res); 195 + if (err) { 196 + netdev_err(netdev, "Failed to configure txqs\n"); 197 + return err; 198 + } 199 + 200 + err = hinic3_configure_rxqs(netdev, q_params->num_qps, 201 + q_params->rq_depth, q_params->rxqs_res); 202 + if (err) { 203 + netdev_err(netdev, "Failed to configure rxqs\n"); 204 + return err; 205 + } 206 + 207 + return 0; 208 + } 209 + 210 + static int hinic3_configure(struct net_device *netdev) 211 + { 212 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 213 + int err; 214 + 215 + netdev->min_mtu = HINIC3_MIN_MTU_SIZE; 216 + netdev->max_mtu = HINIC3_MAX_JUMBO_FRAME_SIZE; 217 + err = hinic3_set_port_mtu(netdev, netdev->mtu); 218 + if (err) { 219 + netdev_err(netdev, "Failed to set mtu\n"); 220 + return err; 221 + } 222 + 223 + /* Ensure DCB is disabled */ 224 + hinic3_sync_dcb_state(nic_dev->hwdev, 1, 0); 225 + 226 + if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) { 227 + err = hinic3_rss_init(netdev); 228 + if (err) { 229 + netdev_err(netdev, "Failed to init rss\n"); 230 + return err; 231 + } 232 + } 233 + 234 + return 0; 235 + } 236 + 237 + static void hinic3_remove_configure(struct net_device *netdev) 238 + { 239 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 240 + 241 + if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) 242 + hinic3_rss_uninit(netdev); 243 + } 244 + 245 + static int hinic3_alloc_channel_resources(struct net_device *netdev, 246 + struct hinic3_dyna_qp_params *qp_params, 247 + struct hinic3_dyna_txrxq_params *trxq_params) 248 + { 249 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 250 + int err; 251 + 252 + qp_params->num_qps = trxq_params->num_qps; 253 + qp_params->sq_depth = trxq_params->sq_depth; 254 + qp_params->rq_depth = trxq_params->rq_depth; 255 + 256 + err = hinic3_alloc_qps(nic_dev, qp_params); 257 + if (err) { 258 + netdev_err(netdev, "Failed to alloc qps\n"); 259 + return err; 260 + } 261 + 262 + err = hinic3_alloc_txrxq_resources(netdev, trxq_params); 263 + if (err) { 264 + netdev_err(netdev, "Failed to alloc txrxq resources\n"); 265 + hinic3_free_qps(nic_dev, qp_params); 266 + return err; 267 + } 268 + 269 + return 0; 270 + } 271 + 272 + static void hinic3_free_channel_resources(struct net_device *netdev, 273 + struct hinic3_dyna_qp_params *qp_params, 274 + struct hinic3_dyna_txrxq_params *trxq_params) 275 + { 276 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 277 + 278 + hinic3_free_txrxq_resources(netdev, trxq_params); 279 + hinic3_free_qps(nic_dev, qp_params); 280 + } 281 + 282 + static int hinic3_open_channel(struct net_device *netdev) 283 + { 284 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 285 + int err; 286 + 287 + err = hinic3_init_qp_ctxts(nic_dev); 288 + if (err) { 289 + netdev_err(netdev, "Failed to init qps\n"); 290 + return err; 291 + } 292 + 293 + err = hinic3_configure_txrxqs(netdev, &nic_dev->q_params); 294 + if (err) { 295 + netdev_err(netdev, "Failed to configure txrxqs\n"); 296 + goto err_free_qp_ctxts; 297 + } 298 + 299 + err = hinic3_qps_irq_init(netdev); 300 + if (err) { 301 + netdev_err(netdev, "Failed to init txrxq irq\n"); 302 + goto err_free_qp_ctxts; 303 + } 304 + 305 + err = hinic3_configure(netdev); 306 + if (err) { 307 + netdev_err(netdev, "Failed to init txrxq irq\n"); 308 + goto err_uninit_qps_irq; 309 + } 310 + 311 + return 0; 312 + 313 + err_uninit_qps_irq: 314 + hinic3_qps_irq_uninit(netdev); 315 + err_free_qp_ctxts: 316 + hinic3_free_qp_ctxts(nic_dev); 317 + 318 + return err; 319 + } 320 + 321 + static void hinic3_close_channel(struct net_device *netdev) 322 + { 323 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 324 + 325 + hinic3_remove_configure(netdev); 326 + hinic3_qps_irq_uninit(netdev); 327 + hinic3_free_qp_ctxts(nic_dev); 328 + } 329 + 330 + static int hinic3_vport_up(struct net_device *netdev) 331 + { 332 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 333 + bool link_status_up; 334 + u16 glb_func_id; 335 + int err; 336 + 337 + glb_func_id = hinic3_global_func_id(nic_dev->hwdev); 338 + err = hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, true); 339 + if (err) { 340 + netdev_err(netdev, "Failed to enable vport\n"); 341 + goto err_flush_qps_res; 342 + } 343 + 344 + err = netif_set_real_num_queues(netdev, nic_dev->q_params.num_qps, 345 + nic_dev->q_params.num_qps); 346 + if (err) { 347 + netdev_err(netdev, "Failed to set real number of queues\n"); 348 + goto err_flush_qps_res; 349 + } 350 + netif_tx_start_all_queues(netdev); 351 + 352 + err = hinic3_get_link_status(nic_dev->hwdev, &link_status_up); 353 + if (!err && link_status_up) 354 + netif_carrier_on(netdev); 355 + 356 + return 0; 357 + 358 + err_flush_qps_res: 359 + hinic3_flush_qps_res(nic_dev->hwdev); 360 + /* wait to guarantee that no packets will be sent to host */ 361 + msleep(100); 362 + 363 + return err; 364 + } 365 + 366 + static void hinic3_vport_down(struct net_device *netdev) 367 + { 368 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 369 + u16 glb_func_id; 370 + 371 + netif_carrier_off(netdev); 372 + netif_tx_disable(netdev); 373 + 374 + glb_func_id = hinic3_global_func_id(nic_dev->hwdev); 375 + hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false); 376 + 377 + hinic3_flush_txqs(netdev); 378 + /* wait to guarantee that no packets will be sent to host */ 379 + msleep(100); 380 + hinic3_flush_qps_res(nic_dev->hwdev); 381 + } 382 + 14 383 static int hinic3_open(struct net_device *netdev) 15 384 { 16 - /* Completed by later submission due to LoC limit. */ 17 - return -EFAULT; 385 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 386 + struct hinic3_dyna_qp_params qp_params; 387 + int err; 388 + 389 + err = hinic3_init_nicio_res(nic_dev); 390 + if (err) { 391 + netdev_err(netdev, "Failed to init nicio resources\n"); 392 + return err; 393 + } 394 + 395 + err = hinic3_setup_num_qps(netdev); 396 + if (err) { 397 + netdev_err(netdev, "Failed to setup num_qps\n"); 398 + goto err_free_nicio_res; 399 + } 400 + 401 + err = hinic3_alloc_channel_resources(netdev, &qp_params, 402 + &nic_dev->q_params); 403 + if (err) 404 + goto err_destroy_num_qps; 405 + 406 + hinic3_init_qps(nic_dev, &qp_params); 407 + 408 + err = hinic3_open_channel(netdev); 409 + if (err) 410 + goto err_uninit_qps; 411 + 412 + err = hinic3_vport_up(netdev); 413 + if (err) 414 + goto err_close_channel; 415 + 416 + return 0; 417 + 418 + err_close_channel: 419 + hinic3_close_channel(netdev); 420 + err_uninit_qps: 421 + hinic3_uninit_qps(nic_dev, &qp_params); 422 + hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params); 423 + err_destroy_num_qps: 424 + hinic3_destroy_num_qps(netdev); 425 + err_free_nicio_res: 426 + hinic3_free_nicio_res(nic_dev); 427 + 428 + return err; 18 429 } 19 430 20 431 static int hinic3_close(struct net_device *netdev) 21 432 { 22 - /* Completed by later submission due to LoC limit. */ 23 - return -EFAULT; 433 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 434 + struct hinic3_dyna_qp_params qp_params; 435 + 436 + hinic3_vport_down(netdev); 437 + hinic3_close_channel(netdev); 438 + hinic3_uninit_qps(nic_dev, &qp_params); 439 + hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params); 440 + 441 + return 0; 24 442 } 25 443 26 444 static int hinic3_change_mtu(struct net_device *netdev, int new_mtu)
+152
drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
··· 39 39 return 0; 40 40 } 41 41 42 + int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev) 43 + { 44 + return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_GET, 45 + &nic_dev->nic_io->feature_cap, 1); 46 + } 47 + 42 48 int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev) 43 49 { 44 50 return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_SET, ··· 88 82 return 0; 89 83 } 90 84 85 + int hinic3_init_function_table(struct hinic3_nic_dev *nic_dev) 86 + { 87 + struct hinic3_nic_io *nic_io = nic_dev->nic_io; 88 + struct l2nic_func_tbl_cfg func_tbl_cfg = {}; 89 + u32 cfg_bitmap; 90 + 91 + func_tbl_cfg.mtu = 0x3FFF; /* default, max mtu */ 92 + func_tbl_cfg.rx_wqe_buf_size = nic_io->rx_buf_len; 93 + 94 + cfg_bitmap = BIT(L2NIC_FUNC_TBL_CFG_INIT) | 95 + BIT(L2NIC_FUNC_TBL_CFG_MTU) | 96 + BIT(L2NIC_FUNC_TBL_CFG_RX_BUF_SIZE); 97 + 98 + return hinic3_set_function_table(nic_dev->hwdev, cfg_bitmap, 99 + &func_tbl_cfg); 100 + } 101 + 91 102 int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu) 92 103 { 93 104 struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); ··· 112 89 struct hinic3_hwdev *hwdev = nic_dev->hwdev; 113 90 114 91 func_tbl_cfg.mtu = new_mtu; 92 + 115 93 return hinic3_set_function_table(hwdev, BIT(L2NIC_FUNC_TBL_CFG_MTU), 116 94 &func_tbl_cfg); 117 95 } ··· 230 206 err, mac_info.msg_head.status); 231 207 return -EIO; 232 208 } 209 + 210 + return 0; 211 + } 212 + 213 + int hinic3_set_ci_table(struct hinic3_hwdev *hwdev, struct hinic3_sq_attr *attr) 214 + { 215 + struct l2nic_cmd_set_ci_attr cons_idx_attr = {}; 216 + struct mgmt_msg_params msg_params = {}; 217 + int err; 218 + 219 + cons_idx_attr.func_idx = hinic3_global_func_id(hwdev); 220 + cons_idx_attr.dma_attr_off = attr->dma_attr_off; 221 + cons_idx_attr.pending_limit = attr->pending_limit; 222 + cons_idx_attr.coalescing_time = attr->coalescing_time; 223 + 224 + if (attr->intr_en) { 225 + cons_idx_attr.intr_en = attr->intr_en; 226 + cons_idx_attr.intr_idx = attr->intr_idx; 227 + } 228 + 229 + cons_idx_attr.l2nic_sqn = attr->l2nic_sqn; 230 + cons_idx_attr.ci_addr = attr->ci_dma_base; 231 + 232 + mgmt_msg_params_init_default(&msg_params, &cons_idx_attr, 233 + sizeof(cons_idx_attr)); 234 + 235 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, 236 + L2NIC_CMD_SET_SQ_CI_ATTR, &msg_params); 237 + if (err || cons_idx_attr.msg_head.status) { 238 + dev_err(hwdev->dev, 239 + "Failed to set ci attribute table, err: %d, status: 0x%x\n", 240 + err, cons_idx_attr.msg_head.status); 241 + return -EFAULT; 242 + } 243 + 244 + return 0; 245 + } 246 + 247 + int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev) 248 + { 249 + struct l2nic_cmd_clear_qp_resource sq_res = {}; 250 + struct mgmt_msg_params msg_params = {}; 251 + int err; 252 + 253 + sq_res.func_id = hinic3_global_func_id(hwdev); 254 + 255 + mgmt_msg_params_init_default(&msg_params, &sq_res, sizeof(sq_res)); 256 + 257 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, 258 + L2NIC_CMD_CLEAR_QP_RESOURCE, 259 + &msg_params); 260 + if (err || sq_res.msg_head.status) { 261 + dev_err(hwdev->dev, "Failed to clear sq resources, err: %d, status: 0x%x\n", 262 + err, sq_res.msg_head.status); 263 + return -EINVAL; 264 + } 265 + 233 266 return 0; 234 267 } 235 268 ··· 311 230 } 312 231 313 232 return pkt_drop.msg_head.status; 233 + } 234 + 235 + int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state) 236 + { 237 + struct l2nic_cmd_set_dcb_state dcb_state = {}; 238 + struct mgmt_msg_params msg_params = {}; 239 + int err; 240 + 241 + dcb_state.op_code = op_code; 242 + dcb_state.state = state; 243 + dcb_state.func_id = hinic3_global_func_id(hwdev); 244 + 245 + mgmt_msg_params_init_default(&msg_params, &dcb_state, 246 + sizeof(dcb_state)); 247 + 248 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, 249 + L2NIC_CMD_QOS_DCB_STATE, &msg_params); 250 + if (err || dcb_state.head.status) { 251 + dev_err(hwdev->dev, 252 + "Failed to set dcb state, err: %d, status: 0x%x\n", 253 + err, dcb_state.head.status); 254 + return -EFAULT; 255 + } 256 + 257 + return 0; 258 + } 259 + 260 + int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up) 261 + { 262 + struct mag_cmd_get_link_status get_link = {}; 263 + struct mgmt_msg_params msg_params = {}; 264 + int err; 265 + 266 + get_link.port_id = hinic3_physical_port_id(hwdev); 267 + 268 + mgmt_msg_params_init_default(&msg_params, &get_link, sizeof(get_link)); 269 + 270 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_HILINK, 271 + MAG_CMD_GET_LINK_STATUS, &msg_params); 272 + if (err || get_link.head.status) { 273 + dev_err(hwdev->dev, "Failed to get link state, err: %d, status: 0x%x\n", 274 + err, get_link.head.status); 275 + return -EIO; 276 + } 277 + 278 + *link_status_up = !!get_link.status; 279 + 280 + return 0; 281 + } 282 + 283 + int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id, 284 + bool enable) 285 + { 286 + struct l2nic_cmd_set_vport_state en_state = {}; 287 + struct mgmt_msg_params msg_params = {}; 288 + int err; 289 + 290 + en_state.func_id = func_id; 291 + en_state.state = enable ? 1 : 0; 292 + 293 + mgmt_msg_params_init_default(&msg_params, &en_state, sizeof(en_state)); 294 + 295 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, 296 + L2NIC_CMD_SET_VPORT_ENABLE, &msg_params); 297 + if (err || en_state.msg_head.status) { 298 + dev_err(hwdev->dev, "Failed to set vport state, err: %d, status: 0x%x\n", 299 + err, en_state.msg_head.status); 300 + return -EINVAL; 301 + } 302 + 303 + return 0; 314 304 }
+20
drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
··· 22 22 HINIC3_NIC_EVENT_LINK_UP = 1, 23 23 }; 24 24 25 + struct hinic3_sq_attr { 26 + u8 dma_attr_off; 27 + u8 pending_limit; 28 + u8 coalescing_time; 29 + u8 intr_en; 30 + u16 intr_idx; 31 + u32 l2nic_sqn; 32 + u64 ci_dma_base; 33 + }; 34 + 35 + int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev); 25 36 int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev); 26 37 bool hinic3_test_support(struct hinic3_nic_dev *nic_dev, 27 38 enum hinic3_nic_feature_cap feature_bits); 28 39 void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap); 29 40 41 + int hinic3_init_function_table(struct hinic3_nic_dev *nic_dev); 30 42 int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu); 31 43 32 44 int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, ··· 48 36 int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac, 49 37 u8 *new_mac, u16 vlan_id, u16 func_id); 50 38 39 + int hinic3_set_ci_table(struct hinic3_hwdev *hwdev, 40 + struct hinic3_sq_attr *attr); 41 + int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev); 51 42 int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev); 43 + 44 + int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state); 45 + int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up); 46 + int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id, 47 + bool enable); 52 48 53 49 #endif
+5
drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
··· 73 73 struct hinic3_txq *txqs; 74 74 struct hinic3_rxq *rxqs; 75 75 76 + enum hinic3_rss_hash_type rss_hash_type; 77 + struct hinic3_rss_type rss_type; 78 + u8 *rss_hkey; 79 + u16 *rss_indir; 80 + 76 81 u16 num_qp_irq; 77 82 struct msix_entry *qps_msix_entries; 78 83
+867 -3
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. 3 3 4 + #include "hinic3_cmdq.h" 4 5 #include "hinic3_hw_comm.h" 5 6 #include "hinic3_hw_intf.h" 6 7 #include "hinic3_hwdev.h" ··· 10 9 #include "hinic3_nic_dev.h" 11 10 #include "hinic3_nic_io.h" 12 11 12 + #define HINIC3_DEFAULT_TX_CI_PENDING_LIMIT 1 13 + #define HINIC3_DEFAULT_TX_CI_COALESCING_TIME 1 14 + #define HINIC3_DEFAULT_DROP_THD_ON (0xFFFF) 15 + #define HINIC3_DEFAULT_DROP_THD_OFF 0 16 + 17 + #define HINIC3_CI_Q_ADDR_SIZE (64) 18 + 19 + #define HINIC3_CI_TABLE_SIZE(num_qps) \ 20 + (ALIGN((num_qps) * HINIC3_CI_Q_ADDR_SIZE, HINIC3_MIN_PAGE_SIZE)) 21 + 22 + #define HINIC3_CI_VADDR(base_addr, q_id) \ 23 + ((u8 *)(base_addr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE) 24 + 25 + #define HINIC3_CI_PADDR(base_paddr, q_id) \ 26 + ((base_paddr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE) 27 + 28 + #define SQ_WQ_PREFETCH_MAX 1 29 + #define SQ_WQ_PREFETCH_MIN 1 30 + #define SQ_WQ_PREFETCH_THRESHOLD 16 31 + 32 + #define RQ_WQ_PREFETCH_MAX 4 33 + #define RQ_WQ_PREFETCH_MIN 1 34 + #define RQ_WQ_PREFETCH_THRESHOLD 256 35 + 36 + /* (2048 - 8) / 64 */ 37 + #define HINIC3_Q_CTXT_MAX 31 38 + 39 + enum hinic3_qp_ctxt_type { 40 + HINIC3_QP_CTXT_TYPE_SQ = 0, 41 + HINIC3_QP_CTXT_TYPE_RQ = 1, 42 + }; 43 + 44 + struct hinic3_qp_ctxt_hdr { 45 + __le16 num_queues; 46 + __le16 queue_type; 47 + __le16 start_qid; 48 + __le16 rsvd; 49 + }; 50 + 51 + struct hinic3_sq_ctxt { 52 + __le32 ci_pi; 53 + __le32 drop_mode_sp; 54 + __le32 wq_pfn_hi_owner; 55 + __le32 wq_pfn_lo; 56 + 57 + __le32 rsvd0; 58 + __le32 pkt_drop_thd; 59 + __le32 global_sq_id; 60 + __le32 vlan_ceq_attr; 61 + 62 + __le32 pref_cache; 63 + __le32 pref_ci_owner; 64 + __le32 pref_wq_pfn_hi_ci; 65 + __le32 pref_wq_pfn_lo; 66 + 67 + __le32 rsvd8; 68 + __le32 rsvd9; 69 + __le32 wq_block_pfn_hi; 70 + __le32 wq_block_pfn_lo; 71 + }; 72 + 73 + struct hinic3_rq_ctxt { 74 + __le32 ci_pi; 75 + __le32 ceq_attr; 76 + __le32 wq_pfn_hi_type_owner; 77 + __le32 wq_pfn_lo; 78 + 79 + __le32 rsvd[3]; 80 + __le32 cqe_sge_len; 81 + 82 + __le32 pref_cache; 83 + __le32 pref_ci_owner; 84 + __le32 pref_wq_pfn_hi_ci; 85 + __le32 pref_wq_pfn_lo; 86 + 87 + __le32 pi_paddr_hi; 88 + __le32 pi_paddr_lo; 89 + __le32 wq_block_pfn_hi; 90 + __le32 wq_block_pfn_lo; 91 + }; 92 + 93 + struct hinic3_sq_ctxt_block { 94 + struct hinic3_qp_ctxt_hdr cmdq_hdr; 95 + struct hinic3_sq_ctxt sq_ctxt[HINIC3_Q_CTXT_MAX]; 96 + }; 97 + 98 + struct hinic3_rq_ctxt_block { 99 + struct hinic3_qp_ctxt_hdr cmdq_hdr; 100 + struct hinic3_rq_ctxt rq_ctxt[HINIC3_Q_CTXT_MAX]; 101 + }; 102 + 103 + struct hinic3_clean_queue_ctxt { 104 + struct hinic3_qp_ctxt_hdr cmdq_hdr; 105 + __le32 rsvd; 106 + }; 107 + 108 + #define SQ_CTXT_SIZE(num_sqs) \ 109 + (sizeof(struct hinic3_qp_ctxt_hdr) + \ 110 + (num_sqs) * sizeof(struct hinic3_sq_ctxt)) 111 + 112 + #define RQ_CTXT_SIZE(num_rqs) \ 113 + (sizeof(struct hinic3_qp_ctxt_hdr) + \ 114 + (num_rqs) * sizeof(struct hinic3_rq_ctxt)) 115 + 116 + #define SQ_CTXT_PREF_CI_HI_SHIFT 12 117 + #define SQ_CTXT_PREF_CI_HI(val) ((val) >> SQ_CTXT_PREF_CI_HI_SHIFT) 118 + 119 + #define SQ_CTXT_PI_IDX_MASK GENMASK(15, 0) 120 + #define SQ_CTXT_CI_IDX_MASK GENMASK(31, 16) 121 + #define SQ_CTXT_CI_PI_SET(val, member) \ 122 + FIELD_PREP(SQ_CTXT_##member##_MASK, val) 123 + 124 + #define SQ_CTXT_MODE_SP_FLAG_MASK BIT(0) 125 + #define SQ_CTXT_MODE_PKT_DROP_MASK BIT(1) 126 + #define SQ_CTXT_MODE_SET(val, member) \ 127 + FIELD_PREP(SQ_CTXT_MODE_##member##_MASK, val) 128 + 129 + #define SQ_CTXT_WQ_PAGE_HI_PFN_MASK GENMASK(19, 0) 130 + #define SQ_CTXT_WQ_PAGE_OWNER_MASK BIT(23) 131 + #define SQ_CTXT_WQ_PAGE_SET(val, member) \ 132 + FIELD_PREP(SQ_CTXT_WQ_PAGE_##member##_MASK, val) 133 + 134 + #define SQ_CTXT_PKT_DROP_THD_ON_MASK GENMASK(15, 0) 135 + #define SQ_CTXT_PKT_DROP_THD_OFF_MASK GENMASK(31, 16) 136 + #define SQ_CTXT_PKT_DROP_THD_SET(val, member) \ 137 + FIELD_PREP(SQ_CTXT_PKT_DROP_##member##_MASK, val) 138 + 139 + #define SQ_CTXT_GLOBAL_SQ_ID_MASK GENMASK(12, 0) 140 + #define SQ_CTXT_GLOBAL_QUEUE_ID_SET(val, member) \ 141 + FIELD_PREP(SQ_CTXT_##member##_MASK, val) 142 + 143 + #define SQ_CTXT_VLAN_INSERT_MODE_MASK GENMASK(20, 19) 144 + #define SQ_CTXT_VLAN_CEQ_EN_MASK BIT(23) 145 + #define SQ_CTXT_VLAN_CEQ_SET(val, member) \ 146 + FIELD_PREP(SQ_CTXT_VLAN_##member##_MASK, val) 147 + 148 + #define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK GENMASK(13, 0) 149 + #define SQ_CTXT_PREF_CACHE_MAX_MASK GENMASK(24, 14) 150 + #define SQ_CTXT_PREF_CACHE_MIN_MASK GENMASK(31, 25) 151 + 152 + #define SQ_CTXT_PREF_CI_HI_MASK GENMASK(3, 0) 153 + #define SQ_CTXT_PREF_OWNER_MASK BIT(4) 154 + 155 + #define SQ_CTXT_PREF_WQ_PFN_HI_MASK GENMASK(19, 0) 156 + #define SQ_CTXT_PREF_CI_LOW_MASK GENMASK(31, 20) 157 + #define SQ_CTXT_PREF_SET(val, member) \ 158 + FIELD_PREP(SQ_CTXT_PREF_##member##_MASK, val) 159 + 160 + #define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK GENMASK(22, 0) 161 + #define SQ_CTXT_WQ_BLOCK_SET(val, member) \ 162 + FIELD_PREP(SQ_CTXT_WQ_BLOCK_##member##_MASK, val) 163 + 164 + #define RQ_CTXT_PI_IDX_MASK GENMASK(15, 0) 165 + #define RQ_CTXT_CI_IDX_MASK GENMASK(31, 16) 166 + #define RQ_CTXT_CI_PI_SET(val, member) \ 167 + FIELD_PREP(RQ_CTXT_##member##_MASK, val) 168 + 169 + #define RQ_CTXT_CEQ_ATTR_INTR_MASK GENMASK(30, 21) 170 + #define RQ_CTXT_CEQ_ATTR_EN_MASK BIT(31) 171 + #define RQ_CTXT_CEQ_ATTR_SET(val, member) \ 172 + FIELD_PREP(RQ_CTXT_CEQ_ATTR_##member##_MASK, val) 173 + 174 + #define RQ_CTXT_WQ_PAGE_HI_PFN_MASK GENMASK(19, 0) 175 + #define RQ_CTXT_WQ_PAGE_WQE_TYPE_MASK GENMASK(29, 28) 176 + #define RQ_CTXT_WQ_PAGE_OWNER_MASK BIT(31) 177 + #define RQ_CTXT_WQ_PAGE_SET(val, member) \ 178 + FIELD_PREP(RQ_CTXT_WQ_PAGE_##member##_MASK, val) 179 + 180 + #define RQ_CTXT_CQE_LEN_MASK GENMASK(29, 28) 181 + #define RQ_CTXT_CQE_LEN_SET(val, member) \ 182 + FIELD_PREP(RQ_CTXT_##member##_MASK, val) 183 + 184 + #define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK GENMASK(13, 0) 185 + #define RQ_CTXT_PREF_CACHE_MAX_MASK GENMASK(24, 14) 186 + #define RQ_CTXT_PREF_CACHE_MIN_MASK GENMASK(31, 25) 187 + 188 + #define RQ_CTXT_PREF_CI_HI_MASK GENMASK(3, 0) 189 + #define RQ_CTXT_PREF_OWNER_MASK BIT(4) 190 + 191 + #define RQ_CTXT_PREF_WQ_PFN_HI_MASK GENMASK(19, 0) 192 + #define RQ_CTXT_PREF_CI_LOW_MASK GENMASK(31, 20) 193 + #define RQ_CTXT_PREF_SET(val, member) \ 194 + FIELD_PREP(RQ_CTXT_PREF_##member##_MASK, val) 195 + 196 + #define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK GENMASK(22, 0) 197 + #define RQ_CTXT_WQ_BLOCK_SET(val, member) \ 198 + FIELD_PREP(RQ_CTXT_WQ_BLOCK_##member##_MASK, val) 199 + 200 + #define WQ_PAGE_PFN_SHIFT 12 201 + #define WQ_BLOCK_PFN_SHIFT 9 202 + #define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT) 203 + #define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT) 204 + 13 205 int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev) 14 206 { 15 - /* Completed by later submission due to LoC limit. */ 16 - return -EFAULT; 207 + struct hinic3_hwdev *hwdev = nic_dev->hwdev; 208 + struct hinic3_nic_io *nic_io; 209 + int err; 210 + 211 + nic_io = kzalloc(sizeof(*nic_io), GFP_KERNEL); 212 + if (!nic_io) 213 + return -ENOMEM; 214 + 215 + nic_dev->nic_io = nic_io; 216 + 217 + err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_NIC, 1); 218 + if (err) { 219 + dev_err(hwdev->dev, "Failed to set function svc used state\n"); 220 + goto err_free_nicio; 221 + } 222 + 223 + err = hinic3_init_function_table(nic_dev); 224 + if (err) { 225 + dev_err(hwdev->dev, "Failed to init function table\n"); 226 + goto err_clear_func_svc_used_state; 227 + } 228 + 229 + nic_io->rx_buf_len = nic_dev->rx_buf_len; 230 + 231 + err = hinic3_get_nic_feature_from_hw(nic_dev); 232 + if (err) { 233 + dev_err(hwdev->dev, "Failed to get nic features\n"); 234 + goto err_clear_func_svc_used_state; 235 + } 236 + 237 + nic_io->feature_cap &= HINIC3_NIC_F_ALL_MASK; 238 + nic_io->feature_cap &= HINIC3_NIC_DRV_DEFAULT_FEATURE; 239 + dev_dbg(hwdev->dev, "nic features: 0x%llx\n\n", nic_io->feature_cap); 240 + 241 + return 0; 242 + 243 + err_clear_func_svc_used_state: 244 + hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_NIC, 0); 245 + err_free_nicio: 246 + nic_dev->nic_io = NULL; 247 + kfree(nic_io); 248 + 249 + return err; 17 250 } 18 251 19 252 void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev) 20 253 { 21 - /* Completed by later submission due to LoC limit. */ 254 + struct hinic3_nic_io *nic_io = nic_dev->nic_io; 255 + 256 + hinic3_set_func_svc_used_state(nic_dev->hwdev, COMM_FUNC_SVC_T_NIC, 0); 257 + nic_dev->nic_io = NULL; 258 + kfree(nic_io); 259 + } 260 + 261 + int hinic3_init_nicio_res(struct hinic3_nic_dev *nic_dev) 262 + { 263 + struct hinic3_nic_io *nic_io = nic_dev->nic_io; 264 + struct hinic3_hwdev *hwdev = nic_dev->hwdev; 265 + void __iomem *db_base; 266 + int err; 267 + 268 + nic_io->max_qps = hinic3_func_max_qnum(hwdev); 269 + 270 + err = hinic3_alloc_db_addr(hwdev, &db_base, NULL); 271 + if (err) { 272 + dev_err(hwdev->dev, "Failed to allocate doorbell for sqs\n"); 273 + return err; 274 + } 275 + nic_io->sqs_db_addr = db_base; 276 + 277 + err = hinic3_alloc_db_addr(hwdev, &db_base, NULL); 278 + if (err) { 279 + hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr); 280 + dev_err(hwdev->dev, "Failed to allocate doorbell for rqs\n"); 281 + return err; 282 + } 283 + nic_io->rqs_db_addr = db_base; 284 + 285 + nic_io->ci_vaddr_base = 286 + dma_alloc_coherent(hwdev->dev, 287 + HINIC3_CI_TABLE_SIZE(nic_io->max_qps), 288 + &nic_io->ci_dma_base, 289 + GFP_KERNEL); 290 + if (!nic_io->ci_vaddr_base) { 291 + hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr); 292 + hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr); 293 + return -ENOMEM; 294 + } 295 + 296 + return 0; 297 + } 298 + 299 + void hinic3_free_nicio_res(struct hinic3_nic_dev *nic_dev) 300 + { 301 + struct hinic3_nic_io *nic_io = nic_dev->nic_io; 302 + struct hinic3_hwdev *hwdev = nic_dev->hwdev; 303 + 304 + dma_free_coherent(hwdev->dev, 305 + HINIC3_CI_TABLE_SIZE(nic_io->max_qps), 306 + nic_io->ci_vaddr_base, nic_io->ci_dma_base); 307 + 308 + hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr); 309 + hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr); 310 + } 311 + 312 + static int hinic3_create_sq(struct hinic3_hwdev *hwdev, 313 + struct hinic3_io_queue *sq, 314 + u16 q_id, u32 sq_depth, u16 sq_msix_idx) 315 + { 316 + int err; 317 + 318 + /* sq used & hardware request init 1 */ 319 + sq->owner = 1; 320 + 321 + sq->q_id = q_id; 322 + sq->msix_entry_idx = sq_msix_idx; 323 + 324 + err = hinic3_wq_create(hwdev, &sq->wq, sq_depth, 325 + BIT(HINIC3_SQ_WQEBB_SHIFT)); 326 + if (err) { 327 + dev_err(hwdev->dev, "Failed to create tx queue %u wq\n", 328 + q_id); 329 + return err; 330 + } 331 + 332 + return 0; 333 + } 334 + 335 + static int hinic3_create_rq(struct hinic3_hwdev *hwdev, 336 + struct hinic3_io_queue *rq, 337 + u16 q_id, u32 rq_depth, u16 rq_msix_idx) 338 + { 339 + int err; 340 + 341 + rq->q_id = q_id; 342 + rq->msix_entry_idx = rq_msix_idx; 343 + 344 + err = hinic3_wq_create(hwdev, &rq->wq, rq_depth, 345 + BIT(HINIC3_RQ_WQEBB_SHIFT + 346 + HINIC3_NORMAL_RQ_WQE)); 347 + if (err) { 348 + dev_err(hwdev->dev, "Failed to create rx queue %u wq\n", 349 + q_id); 350 + return err; 351 + } 352 + 353 + return 0; 354 + } 355 + 356 + static int hinic3_create_qp(struct hinic3_hwdev *hwdev, 357 + struct hinic3_io_queue *sq, 358 + struct hinic3_io_queue *rq, u16 q_id, u32 sq_depth, 359 + u32 rq_depth, u16 qp_msix_idx) 360 + { 361 + int err; 362 + 363 + err = hinic3_create_sq(hwdev, sq, q_id, sq_depth, qp_msix_idx); 364 + if (err) { 365 + dev_err(hwdev->dev, "Failed to create sq, qid: %u\n", 366 + q_id); 367 + return err; 368 + } 369 + 370 + err = hinic3_create_rq(hwdev, rq, q_id, rq_depth, qp_msix_idx); 371 + if (err) { 372 + dev_err(hwdev->dev, "Failed to create rq, qid: %u\n", 373 + q_id); 374 + goto err_destroy_sq_wq; 375 + } 376 + 377 + return 0; 378 + 379 + err_destroy_sq_wq: 380 + hinic3_wq_destroy(hwdev, &sq->wq); 381 + 382 + return err; 383 + } 384 + 385 + static void hinic3_destroy_qp(struct hinic3_hwdev *hwdev, 386 + struct hinic3_io_queue *sq, 387 + struct hinic3_io_queue *rq) 388 + { 389 + hinic3_wq_destroy(hwdev, &sq->wq); 390 + hinic3_wq_destroy(hwdev, &rq->wq); 391 + } 392 + 393 + int hinic3_alloc_qps(struct hinic3_nic_dev *nic_dev, 394 + struct hinic3_dyna_qp_params *qp_params) 395 + { 396 + struct msix_entry *qps_msix_entries = nic_dev->qps_msix_entries; 397 + struct hinic3_nic_io *nic_io = nic_dev->nic_io; 398 + struct hinic3_hwdev *hwdev = nic_dev->hwdev; 399 + struct hinic3_io_queue *sqs; 400 + struct hinic3_io_queue *rqs; 401 + u16 q_id; 402 + int err; 403 + 404 + if (qp_params->num_qps > nic_io->max_qps || !qp_params->num_qps) 405 + return -EINVAL; 406 + 407 + sqs = kcalloc(qp_params->num_qps, sizeof(*sqs), GFP_KERNEL); 408 + if (!sqs) { 409 + err = -ENOMEM; 410 + goto err_out; 411 + } 412 + 413 + rqs = kcalloc(qp_params->num_qps, sizeof(*rqs), GFP_KERNEL); 414 + if (!rqs) { 415 + err = -ENOMEM; 416 + goto err_free_sqs; 417 + } 418 + 419 + for (q_id = 0; q_id < qp_params->num_qps; q_id++) { 420 + err = hinic3_create_qp(hwdev, &sqs[q_id], &rqs[q_id], q_id, 421 + qp_params->sq_depth, qp_params->rq_depth, 422 + qps_msix_entries[q_id].entry); 423 + if (err) { 424 + dev_err(hwdev->dev, "Failed to allocate qp %u, err: %d\n", 425 + q_id, err); 426 + goto err_destroy_qp; 427 + } 428 + } 429 + 430 + qp_params->sqs = sqs; 431 + qp_params->rqs = rqs; 432 + 433 + return 0; 434 + 435 + err_destroy_qp: 436 + while (q_id > 0) { 437 + q_id--; 438 + hinic3_destroy_qp(hwdev, &sqs[q_id], &rqs[q_id]); 439 + } 440 + kfree(rqs); 441 + err_free_sqs: 442 + kfree(sqs); 443 + err_out: 444 + return err; 445 + } 446 + 447 + void hinic3_free_qps(struct hinic3_nic_dev *nic_dev, 448 + struct hinic3_dyna_qp_params *qp_params) 449 + { 450 + struct hinic3_hwdev *hwdev = nic_dev->hwdev; 451 + u16 q_id; 452 + 453 + for (q_id = 0; q_id < qp_params->num_qps; q_id++) 454 + hinic3_destroy_qp(hwdev, &qp_params->sqs[q_id], 455 + &qp_params->rqs[q_id]); 456 + 457 + kfree(qp_params->sqs); 458 + kfree(qp_params->rqs); 459 + } 460 + 461 + void hinic3_init_qps(struct hinic3_nic_dev *nic_dev, 462 + struct hinic3_dyna_qp_params *qp_params) 463 + { 464 + struct hinic3_nic_io *nic_io = nic_dev->nic_io; 465 + struct hinic3_io_queue *sqs = qp_params->sqs; 466 + struct hinic3_io_queue *rqs = qp_params->rqs; 467 + u16 q_id; 468 + 469 + nic_io->num_qps = qp_params->num_qps; 470 + nic_io->sq = qp_params->sqs; 471 + nic_io->rq = qp_params->rqs; 472 + for (q_id = 0; q_id < nic_io->num_qps; q_id++) { 473 + sqs[q_id].cons_idx_addr = 474 + (u16 *)HINIC3_CI_VADDR(nic_io->ci_vaddr_base, q_id); 475 + /* clear ci value */ 476 + WRITE_ONCE(*sqs[q_id].cons_idx_addr, 0); 477 + 478 + sqs[q_id].db_addr = nic_io->sqs_db_addr; 479 + rqs[q_id].db_addr = nic_io->rqs_db_addr; 480 + } 481 + } 482 + 483 + void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev, 484 + struct hinic3_dyna_qp_params *qp_params) 485 + { 486 + struct hinic3_nic_io *nic_io = nic_dev->nic_io; 487 + 488 + qp_params->sqs = nic_io->sq; 489 + qp_params->rqs = nic_io->rq; 490 + qp_params->num_qps = nic_io->num_qps; 491 + } 492 + 493 + static void hinic3_qp_prepare_cmdq_header(struct hinic3_qp_ctxt_hdr *qp_ctxt_hdr, 494 + enum hinic3_qp_ctxt_type ctxt_type, 495 + u16 num_queues, u16 q_id) 496 + { 497 + qp_ctxt_hdr->queue_type = cpu_to_le16(ctxt_type); 498 + qp_ctxt_hdr->num_queues = cpu_to_le16(num_queues); 499 + qp_ctxt_hdr->start_qid = cpu_to_le16(q_id); 500 + qp_ctxt_hdr->rsvd = 0; 501 + } 502 + 503 + static void hinic3_sq_prepare_ctxt(struct hinic3_io_queue *sq, u16 sq_id, 504 + struct hinic3_sq_ctxt *sq_ctxt) 505 + { 506 + u64 wq_page_addr, wq_page_pfn, wq_block_pfn; 507 + u32 wq_block_pfn_hi, wq_block_pfn_lo; 508 + u32 wq_page_pfn_hi, wq_page_pfn_lo; 509 + u16 pi_start, ci_start; 510 + 511 + ci_start = hinic3_get_sq_local_ci(sq); 512 + pi_start = hinic3_get_sq_local_pi(sq); 513 + 514 + wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&sq->wq); 515 + 516 + wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); 517 + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); 518 + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); 519 + 520 + wq_block_pfn = WQ_BLOCK_PFN(sq->wq.wq_block_paddr); 521 + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); 522 + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); 523 + 524 + sq_ctxt->ci_pi = 525 + cpu_to_le32(SQ_CTXT_CI_PI_SET(ci_start, CI_IDX) | 526 + SQ_CTXT_CI_PI_SET(pi_start, PI_IDX)); 527 + 528 + sq_ctxt->drop_mode_sp = 529 + cpu_to_le32(SQ_CTXT_MODE_SET(0, SP_FLAG) | 530 + SQ_CTXT_MODE_SET(0, PKT_DROP)); 531 + 532 + sq_ctxt->wq_pfn_hi_owner = 533 + cpu_to_le32(SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | 534 + SQ_CTXT_WQ_PAGE_SET(1, OWNER)); 535 + 536 + sq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo); 537 + 538 + sq_ctxt->pkt_drop_thd = 539 + cpu_to_le32(SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_ON, THD_ON) | 540 + SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_OFF, THD_OFF)); 541 + 542 + sq_ctxt->global_sq_id = 543 + cpu_to_le32(SQ_CTXT_GLOBAL_QUEUE_ID_SET((u32)sq_id, 544 + GLOBAL_SQ_ID)); 545 + 546 + /* enable insert c-vlan by default */ 547 + sq_ctxt->vlan_ceq_attr = 548 + cpu_to_le32(SQ_CTXT_VLAN_CEQ_SET(0, CEQ_EN) | 549 + SQ_CTXT_VLAN_CEQ_SET(1, INSERT_MODE)); 550 + 551 + sq_ctxt->rsvd0 = 0; 552 + 553 + sq_ctxt->pref_cache = 554 + cpu_to_le32(SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MIN, CACHE_MIN) | 555 + SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MAX, CACHE_MAX) | 556 + SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD)); 557 + 558 + sq_ctxt->pref_ci_owner = 559 + cpu_to_le32(SQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) | 560 + SQ_CTXT_PREF_SET(1, OWNER)); 561 + 562 + sq_ctxt->pref_wq_pfn_hi_ci = 563 + cpu_to_le32(SQ_CTXT_PREF_SET(ci_start, CI_LOW) | 564 + SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI)); 565 + 566 + sq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo); 567 + 568 + sq_ctxt->wq_block_pfn_hi = 569 + cpu_to_le32(SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI)); 570 + 571 + sq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo); 572 + } 573 + 574 + static void hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue *rq, 575 + u32 *wq_page_pfn_hi, 576 + u32 *wq_page_pfn_lo, 577 + u32 *wq_block_pfn_hi, 578 + u32 *wq_block_pfn_lo) 579 + { 580 + u64 wq_page_addr, wq_page_pfn, wq_block_pfn; 581 + 582 + wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&rq->wq); 583 + 584 + wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); 585 + *wq_page_pfn_hi = upper_32_bits(wq_page_pfn); 586 + *wq_page_pfn_lo = lower_32_bits(wq_page_pfn); 587 + 588 + wq_block_pfn = WQ_BLOCK_PFN(rq->wq.wq_block_paddr); 589 + *wq_block_pfn_hi = upper_32_bits(wq_block_pfn); 590 + *wq_block_pfn_lo = lower_32_bits(wq_block_pfn); 591 + } 592 + 593 + static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq, 594 + struct hinic3_rq_ctxt *rq_ctxt) 595 + { 596 + u32 wq_block_pfn_hi, wq_block_pfn_lo; 597 + u32 wq_page_pfn_hi, wq_page_pfn_lo; 598 + u16 pi_start, ci_start; 599 + 600 + ci_start = (rq->wq.cons_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE; 601 + pi_start = (rq->wq.prod_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE; 602 + 603 + hinic3_rq_prepare_ctxt_get_wq_info(rq, &wq_page_pfn_hi, &wq_page_pfn_lo, 604 + &wq_block_pfn_hi, &wq_block_pfn_lo); 605 + 606 + rq_ctxt->ci_pi = 607 + cpu_to_le32(RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) | 608 + RQ_CTXT_CI_PI_SET(pi_start, PI_IDX)); 609 + 610 + rq_ctxt->ceq_attr = 611 + cpu_to_le32(RQ_CTXT_CEQ_ATTR_SET(0, EN) | 612 + RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR)); 613 + 614 + rq_ctxt->wq_pfn_hi_type_owner = 615 + cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | 616 + RQ_CTXT_WQ_PAGE_SET(1, OWNER)); 617 + 618 + /* use 16Byte WQE */ 619 + rq_ctxt->wq_pfn_hi_type_owner |= 620 + cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE)); 621 + rq_ctxt->cqe_sge_len = cpu_to_le32(RQ_CTXT_CQE_LEN_SET(1, CQE_LEN)); 622 + 623 + rq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo); 624 + 625 + rq_ctxt->pref_cache = 626 + cpu_to_le32(RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MIN, CACHE_MIN) | 627 + RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MAX, CACHE_MAX) | 628 + RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD)); 629 + 630 + rq_ctxt->pref_ci_owner = 631 + cpu_to_le32(RQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) | 632 + RQ_CTXT_PREF_SET(1, OWNER)); 633 + 634 + rq_ctxt->pref_wq_pfn_hi_ci = 635 + cpu_to_le32(RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) | 636 + RQ_CTXT_PREF_SET(ci_start, CI_LOW)); 637 + 638 + rq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo); 639 + 640 + rq_ctxt->wq_block_pfn_hi = 641 + cpu_to_le32(RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI)); 642 + 643 + rq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo); 644 + } 645 + 646 + static int init_sq_ctxts(struct hinic3_nic_dev *nic_dev) 647 + { 648 + struct hinic3_nic_io *nic_io = nic_dev->nic_io; 649 + struct hinic3_hwdev *hwdev = nic_dev->hwdev; 650 + struct hinic3_sq_ctxt_block *sq_ctxt_block; 651 + u16 q_id, curr_id, max_ctxts, i; 652 + struct hinic3_sq_ctxt *sq_ctxt; 653 + struct hinic3_cmd_buf *cmd_buf; 654 + struct hinic3_io_queue *sq; 655 + __le64 out_param; 656 + int err = 0; 657 + 658 + cmd_buf = hinic3_alloc_cmd_buf(hwdev); 659 + if (!cmd_buf) { 660 + dev_err(hwdev->dev, "Failed to allocate cmd buf\n"); 661 + return -ENOMEM; 662 + } 663 + 664 + q_id = 0; 665 + while (q_id < nic_io->num_qps) { 666 + sq_ctxt_block = cmd_buf->buf; 667 + sq_ctxt = sq_ctxt_block->sq_ctxt; 668 + 669 + max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ? 670 + HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id); 671 + 672 + hinic3_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr, 673 + HINIC3_QP_CTXT_TYPE_SQ, max_ctxts, 674 + q_id); 675 + 676 + for (i = 0; i < max_ctxts; i++) { 677 + curr_id = q_id + i; 678 + sq = &nic_io->sq[curr_id]; 679 + hinic3_sq_prepare_ctxt(sq, curr_id, &sq_ctxt[i]); 680 + } 681 + 682 + hinic3_cmdq_buf_swab32(sq_ctxt_block, sizeof(*sq_ctxt_block)); 683 + 684 + cmd_buf->size = cpu_to_le16(SQ_CTXT_SIZE(max_ctxts)); 685 + err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC, 686 + L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX, 687 + cmd_buf, &out_param); 688 + if (err || out_param) { 689 + dev_err(hwdev->dev, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n", 690 + err, out_param); 691 + err = -EFAULT; 692 + break; 693 + } 694 + 695 + q_id += max_ctxts; 696 + } 697 + 698 + hinic3_free_cmd_buf(hwdev, cmd_buf); 699 + 700 + return err; 701 + } 702 + 703 + static int init_rq_ctxts(struct hinic3_nic_dev *nic_dev) 704 + { 705 + struct hinic3_nic_io *nic_io = nic_dev->nic_io; 706 + struct hinic3_hwdev *hwdev = nic_dev->hwdev; 707 + struct hinic3_rq_ctxt_block *rq_ctxt_block; 708 + u16 q_id, curr_id, max_ctxts, i; 709 + struct hinic3_rq_ctxt *rq_ctxt; 710 + struct hinic3_cmd_buf *cmd_buf; 711 + struct hinic3_io_queue *rq; 712 + __le64 out_param; 713 + int err = 0; 714 + 715 + cmd_buf = hinic3_alloc_cmd_buf(hwdev); 716 + if (!cmd_buf) { 717 + dev_err(hwdev->dev, "Failed to allocate cmd buf\n"); 718 + return -ENOMEM; 719 + } 720 + 721 + q_id = 0; 722 + while (q_id < nic_io->num_qps) { 723 + rq_ctxt_block = cmd_buf->buf; 724 + rq_ctxt = rq_ctxt_block->rq_ctxt; 725 + 726 + max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ? 727 + HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id); 728 + 729 + hinic3_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr, 730 + HINIC3_QP_CTXT_TYPE_RQ, max_ctxts, 731 + q_id); 732 + 733 + for (i = 0; i < max_ctxts; i++) { 734 + curr_id = q_id + i; 735 + rq = &nic_io->rq[curr_id]; 736 + hinic3_rq_prepare_ctxt(rq, &rq_ctxt[i]); 737 + } 738 + 739 + hinic3_cmdq_buf_swab32(rq_ctxt_block, sizeof(*rq_ctxt_block)); 740 + 741 + cmd_buf->size = cpu_to_le16(RQ_CTXT_SIZE(max_ctxts)); 742 + 743 + err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC, 744 + L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX, 745 + cmd_buf, &out_param); 746 + if (err || out_param) { 747 + dev_err(hwdev->dev, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n", 748 + err, out_param); 749 + err = -EFAULT; 750 + break; 751 + } 752 + 753 + q_id += max_ctxts; 754 + } 755 + 756 + hinic3_free_cmd_buf(hwdev, cmd_buf); 757 + 758 + return err; 759 + } 760 + 761 + static int init_qp_ctxts(struct hinic3_nic_dev *nic_dev) 762 + { 763 + int err; 764 + 765 + err = init_sq_ctxts(nic_dev); 766 + if (err) 767 + return err; 768 + 769 + err = init_rq_ctxts(nic_dev); 770 + if (err) 771 + return err; 772 + 773 + return 0; 774 + } 775 + 776 + static int clean_queue_offload_ctxt(struct hinic3_nic_dev *nic_dev, 777 + enum hinic3_qp_ctxt_type ctxt_type) 778 + { 779 + struct hinic3_nic_io *nic_io = nic_dev->nic_io; 780 + struct hinic3_hwdev *hwdev = nic_dev->hwdev; 781 + struct hinic3_clean_queue_ctxt *ctxt_block; 782 + struct hinic3_cmd_buf *cmd_buf; 783 + __le64 out_param; 784 + int err; 785 + 786 + cmd_buf = hinic3_alloc_cmd_buf(hwdev); 787 + if (!cmd_buf) { 788 + dev_err(hwdev->dev, "Failed to allocate cmd buf\n"); 789 + return -ENOMEM; 790 + } 791 + 792 + ctxt_block = cmd_buf->buf; 793 + ctxt_block->cmdq_hdr.num_queues = cpu_to_le16(nic_io->max_qps); 794 + ctxt_block->cmdq_hdr.queue_type = cpu_to_le16(ctxt_type); 795 + ctxt_block->cmdq_hdr.start_qid = 0; 796 + ctxt_block->cmdq_hdr.rsvd = 0; 797 + ctxt_block->rsvd = 0; 798 + 799 + hinic3_cmdq_buf_swab32(ctxt_block, sizeof(*ctxt_block)); 800 + 801 + cmd_buf->size = cpu_to_le16(sizeof(*ctxt_block)); 802 + 803 + err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC, 804 + L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX, 805 + cmd_buf, &out_param); 806 + if (err || out_param) { 807 + dev_err(hwdev->dev, "Failed to clean queue offload ctxts, err: %d,out_param: 0x%llx\n", 808 + err, out_param); 809 + 810 + err = -EFAULT; 811 + } 812 + 813 + hinic3_free_cmd_buf(hwdev, cmd_buf); 814 + 815 + return err; 816 + } 817 + 818 + static int clean_qp_offload_ctxt(struct hinic3_nic_dev *nic_dev) 819 + { 820 + /* clean LRO/TSO context space */ 821 + return clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_SQ) || 822 + clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_RQ); 823 + } 824 + 825 + /* init qps ctxt and set sq ci attr and arm all sq */ 826 + int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev) 827 + { 828 + struct hinic3_nic_io *nic_io = nic_dev->nic_io; 829 + struct hinic3_hwdev *hwdev = nic_dev->hwdev; 830 + struct hinic3_sq_attr sq_attr; 831 + u32 rq_depth; 832 + u16 q_id; 833 + int err; 834 + 835 + err = init_qp_ctxts(nic_dev); 836 + if (err) { 837 + dev_err(hwdev->dev, "Failed to init QP ctxts\n"); 838 + return err; 839 + } 840 + 841 + /* clean LRO/TSO context space */ 842 + err = clean_qp_offload_ctxt(nic_dev); 843 + if (err) { 844 + dev_err(hwdev->dev, "Failed to clean qp offload ctxts\n"); 845 + return err; 846 + } 847 + 848 + rq_depth = nic_io->rq[0].wq.q_depth << HINIC3_NORMAL_RQ_WQE; 849 + 850 + err = hinic3_set_root_ctxt(hwdev, rq_depth, nic_io->sq[0].wq.q_depth, 851 + nic_io->rx_buf_len); 852 + if (err) { 853 + dev_err(hwdev->dev, "Failed to set root context\n"); 854 + return err; 855 + } 856 + 857 + for (q_id = 0; q_id < nic_io->num_qps; q_id++) { 858 + sq_attr.ci_dma_base = 859 + HINIC3_CI_PADDR(nic_io->ci_dma_base, q_id) >> 0x2; 860 + sq_attr.pending_limit = HINIC3_DEFAULT_TX_CI_PENDING_LIMIT; 861 + sq_attr.coalescing_time = HINIC3_DEFAULT_TX_CI_COALESCING_TIME; 862 + sq_attr.intr_en = 1; 863 + sq_attr.intr_idx = nic_io->sq[q_id].msix_entry_idx; 864 + sq_attr.l2nic_sqn = q_id; 865 + sq_attr.dma_attr_off = 0; 866 + err = hinic3_set_ci_table(hwdev, &sq_attr); 867 + if (err) { 868 + dev_err(hwdev->dev, "Failed to set ci table\n"); 869 + goto err_clean_root_ctxt; 870 + } 871 + } 872 + 873 + return 0; 874 + 875 + err_clean_root_ctxt: 876 + hinic3_clean_root_ctxt(hwdev); 877 + 878 + return err; 879 + } 880 + 881 + void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev) 882 + { 883 + hinic3_clean_root_ctxt(nic_dev->hwdev); 22 884 }
+32 -7
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
··· 75 75 #define DB_CFLAG_DP_RQ 1 76 76 77 77 struct hinic3_nic_db { 78 - u32 db_info; 79 - u32 pi_hi; 78 + __le32 db_info; 79 + __le32 pi_hi; 80 80 }; 81 81 82 82 static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos, ··· 84 84 { 85 85 struct hinic3_nic_db db; 86 86 87 - db.db_info = DB_INFO_SET(DB_SRC_TYPE, TYPE) | 88 - DB_INFO_SET(cflag, CFLAG) | 89 - DB_INFO_SET(cos, COS) | 90 - DB_INFO_SET(queue->q_id, QID); 91 - db.pi_hi = DB_PI_HIGH(pi); 87 + db.db_info = 88 + cpu_to_le32(DB_INFO_SET(DB_SRC_TYPE, TYPE) | 89 + DB_INFO_SET(cflag, CFLAG) | 90 + DB_INFO_SET(cos, COS) | 91 + DB_INFO_SET(queue->q_id, QID)); 92 + db.pi_hi = cpu_to_le32(DB_PI_HIGH(pi)); 92 93 93 94 writeq(*((u64 *)&db), DB_ADDR(queue, pi)); 94 95 } 96 + 97 + struct hinic3_dyna_qp_params { 98 + u16 num_qps; 99 + u32 sq_depth; 100 + u32 rq_depth; 101 + 102 + struct hinic3_io_queue *sqs; 103 + struct hinic3_io_queue *rqs; 104 + }; 95 105 96 106 struct hinic3_nic_io { 97 107 struct hinic3_io_queue *sq; ··· 126 116 127 117 int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev); 128 118 void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev); 119 + 120 + int hinic3_init_nicio_res(struct hinic3_nic_dev *nic_dev); 121 + void hinic3_free_nicio_res(struct hinic3_nic_dev *nic_dev); 122 + 123 + int hinic3_alloc_qps(struct hinic3_nic_dev *nic_dev, 124 + struct hinic3_dyna_qp_params *qp_params); 125 + void hinic3_free_qps(struct hinic3_nic_dev *nic_dev, 126 + struct hinic3_dyna_qp_params *qp_params); 127 + void hinic3_init_qps(struct hinic3_nic_dev *nic_dev, 128 + struct hinic3_dyna_qp_params *qp_params); 129 + void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev, 130 + struct hinic3_dyna_qp_params *qp_params); 131 + 132 + int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev); 133 + void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev); 129 134 130 135 #endif
+9
drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ 3 + 4 + #ifndef _HINIC3_PCI_ID_TBL_H_ 5 + #define _HINIC3_PCI_ID_TBL_H_ 6 + 7 + #define PCI_DEV_ID_HINIC3_VF 0x375F 8 + 9 + #endif
+336
drivers/net/ethernet/huawei/hinic3/hinic3_rss.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. 3 + 4 + #include <linux/ethtool.h> 5 + 6 + #include "hinic3_cmdq.h" 7 + #include "hinic3_hwdev.h" 8 + #include "hinic3_hwif.h" 9 + #include "hinic3_mbox.h" 10 + #include "hinic3_nic_cfg.h" 11 + #include "hinic3_nic_dev.h" 12 + #include "hinic3_rss.h" 13 + 14 + static void hinic3_fillout_indir_tbl(struct net_device *netdev, u16 *indir) 15 + { 16 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 17 + u16 i, num_qps; 18 + 19 + num_qps = nic_dev->q_params.num_qps; 20 + for (i = 0; i < L2NIC_RSS_INDIR_SIZE; i++) 21 + indir[i] = ethtool_rxfh_indir_default(i, num_qps); 22 + } 23 + 24 + static int hinic3_rss_cfg(struct hinic3_hwdev *hwdev, u8 rss_en, u16 num_qps) 25 + { 26 + struct mgmt_msg_params msg_params = {}; 27 + struct l2nic_cmd_cfg_rss rss_cfg = {}; 28 + int err; 29 + 30 + rss_cfg.func_id = hinic3_global_func_id(hwdev); 31 + rss_cfg.rss_en = rss_en; 32 + rss_cfg.rq_priority_number = 0; 33 + rss_cfg.num_qps = num_qps; 34 + 35 + mgmt_msg_params_init_default(&msg_params, &rss_cfg, sizeof(rss_cfg)); 36 + 37 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, 38 + L2NIC_CMD_CFG_RSS, &msg_params); 39 + if (err || rss_cfg.msg_head.status) { 40 + dev_err(hwdev->dev, "Failed to set rss cfg, err: %d, status: 0x%x\n", 41 + err, rss_cfg.msg_head.status); 42 + return -EINVAL; 43 + } 44 + 45 + return 0; 46 + } 47 + 48 + static void hinic3_init_rss_parameters(struct net_device *netdev) 49 + { 50 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 51 + 52 + nic_dev->rss_hash_type = HINIC3_RSS_HASH_ENGINE_TYPE_XOR; 53 + nic_dev->rss_type.tcp_ipv6_ext = 1; 54 + nic_dev->rss_type.ipv6_ext = 1; 55 + nic_dev->rss_type.tcp_ipv6 = 1; 56 + nic_dev->rss_type.ipv6 = 1; 57 + nic_dev->rss_type.tcp_ipv4 = 1; 58 + nic_dev->rss_type.ipv4 = 1; 59 + nic_dev->rss_type.udp_ipv6 = 1; 60 + nic_dev->rss_type.udp_ipv4 = 1; 61 + } 62 + 63 + static void decide_num_qps(struct net_device *netdev) 64 + { 65 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 66 + unsigned int dev_cpus; 67 + 68 + dev_cpus = netif_get_num_default_rss_queues(); 69 + nic_dev->q_params.num_qps = min(dev_cpus, nic_dev->max_qps); 70 + } 71 + 72 + static int alloc_rss_resource(struct net_device *netdev) 73 + { 74 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 75 + 76 + nic_dev->rss_hkey = kmalloc_array(L2NIC_RSS_KEY_SIZE, 77 + sizeof(nic_dev->rss_hkey[0]), 78 + GFP_KERNEL); 79 + if (!nic_dev->rss_hkey) 80 + return -ENOMEM; 81 + 82 + netdev_rss_key_fill(nic_dev->rss_hkey, L2NIC_RSS_KEY_SIZE); 83 + 84 + nic_dev->rss_indir = kcalloc(L2NIC_RSS_INDIR_SIZE, sizeof(u16), 85 + GFP_KERNEL); 86 + if (!nic_dev->rss_indir) { 87 + kfree(nic_dev->rss_hkey); 88 + nic_dev->rss_hkey = NULL; 89 + return -ENOMEM; 90 + } 91 + 92 + return 0; 93 + } 94 + 95 + static int hinic3_rss_set_indir_tbl(struct hinic3_hwdev *hwdev, 96 + const u16 *indir_table) 97 + { 98 + struct l2nic_cmd_rss_set_indir_tbl *indir_tbl; 99 + struct hinic3_cmd_buf *cmd_buf; 100 + __le64 out_param; 101 + int err; 102 + u32 i; 103 + 104 + cmd_buf = hinic3_alloc_cmd_buf(hwdev); 105 + if (!cmd_buf) { 106 + dev_err(hwdev->dev, "Failed to allocate cmd buf\n"); 107 + return -ENOMEM; 108 + } 109 + 110 + cmd_buf->size = cpu_to_le16(sizeof(struct l2nic_cmd_rss_set_indir_tbl)); 111 + indir_tbl = cmd_buf->buf; 112 + memset(indir_tbl, 0, sizeof(*indir_tbl)); 113 + 114 + for (i = 0; i < L2NIC_RSS_INDIR_SIZE; i++) 115 + indir_tbl->entry[i] = cpu_to_le16(indir_table[i]); 116 + 117 + hinic3_cmdq_buf_swab32(indir_tbl, sizeof(*indir_tbl)); 118 + 119 + err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC, 120 + L2NIC_UCODE_CMD_SET_RSS_INDIR_TBL, 121 + cmd_buf, &out_param); 122 + if (err || out_param) { 123 + dev_err(hwdev->dev, "Failed to set rss indir table\n"); 124 + err = -EFAULT; 125 + } 126 + 127 + hinic3_free_cmd_buf(hwdev, cmd_buf); 128 + 129 + return err; 130 + } 131 + 132 + static int hinic3_set_rss_type(struct hinic3_hwdev *hwdev, 133 + struct hinic3_rss_type rss_type) 134 + { 135 + struct l2nic_cmd_set_rss_ctx_tbl ctx_tbl = {}; 136 + struct mgmt_msg_params msg_params = {}; 137 + u32 ctx; 138 + int err; 139 + 140 + ctx_tbl.func_id = hinic3_global_func_id(hwdev); 141 + ctx = L2NIC_RSS_TYPE_SET(1, VALID) | 142 + L2NIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) | 143 + L2NIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) | 144 + L2NIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | 145 + L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | 146 + L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | 147 + L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | 148 + L2NIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | 149 + L2NIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6); 150 + ctx_tbl.context = ctx; 151 + 152 + mgmt_msg_params_init_default(&msg_params, &ctx_tbl, sizeof(ctx_tbl)); 153 + 154 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, 155 + L2NIC_CMD_SET_RSS_CTX_TBL, &msg_params); 156 + 157 + if (ctx_tbl.msg_head.status == MGMT_STATUS_CMD_UNSUPPORTED) { 158 + return MGMT_STATUS_CMD_UNSUPPORTED; 159 + } else if (err || ctx_tbl.msg_head.status) { 160 + dev_err(hwdev->dev, "mgmt Failed to set rss context offload, err: %d, status: 0x%x\n", 161 + err, ctx_tbl.msg_head.status); 162 + return -EINVAL; 163 + } 164 + 165 + return 0; 166 + } 167 + 168 + static int hinic3_rss_cfg_hash_type(struct hinic3_hwdev *hwdev, u8 opcode, 169 + enum hinic3_rss_hash_type *type) 170 + { 171 + struct l2nic_cmd_cfg_rss_engine hash_type_cmd = {}; 172 + struct mgmt_msg_params msg_params = {}; 173 + int err; 174 + 175 + hash_type_cmd.func_id = hinic3_global_func_id(hwdev); 176 + hash_type_cmd.opcode = opcode; 177 + 178 + if (opcode == MGMT_MSG_CMD_OP_SET) 179 + hash_type_cmd.hash_engine = *type; 180 + 181 + mgmt_msg_params_init_default(&msg_params, &hash_type_cmd, 182 + sizeof(hash_type_cmd)); 183 + 184 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, 185 + L2NIC_CMD_CFG_RSS_HASH_ENGINE, 186 + &msg_params); 187 + if (err || hash_type_cmd.msg_head.status) { 188 + dev_err(hwdev->dev, "Failed to %s hash engine, err: %d, status: 0x%x\n", 189 + opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get", 190 + err, hash_type_cmd.msg_head.status); 191 + return -EIO; 192 + } 193 + 194 + if (opcode == MGMT_MSG_CMD_OP_GET) 195 + *type = hash_type_cmd.hash_engine; 196 + 197 + return 0; 198 + } 199 + 200 + static int hinic3_rss_set_hash_type(struct hinic3_hwdev *hwdev, 201 + enum hinic3_rss_hash_type type) 202 + { 203 + return hinic3_rss_cfg_hash_type(hwdev, MGMT_MSG_CMD_OP_SET, &type); 204 + } 205 + 206 + static int hinic3_config_rss_hw_resource(struct net_device *netdev, 207 + u16 *indir_tbl) 208 + { 209 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 210 + int err; 211 + 212 + err = hinic3_rss_set_indir_tbl(nic_dev->hwdev, indir_tbl); 213 + if (err) 214 + return err; 215 + 216 + err = hinic3_set_rss_type(nic_dev->hwdev, nic_dev->rss_type); 217 + if (err) 218 + return err; 219 + 220 + return hinic3_rss_set_hash_type(nic_dev->hwdev, nic_dev->rss_hash_type); 221 + } 222 + 223 + static int hinic3_rss_cfg_hash_key(struct hinic3_hwdev *hwdev, u8 opcode, 224 + u8 *key) 225 + { 226 + struct l2nic_cmd_cfg_rss_hash_key hash_key = {}; 227 + struct mgmt_msg_params msg_params = {}; 228 + int err; 229 + 230 + hash_key.func_id = hinic3_global_func_id(hwdev); 231 + hash_key.opcode = opcode; 232 + 233 + if (opcode == MGMT_MSG_CMD_OP_SET) 234 + memcpy(hash_key.key, key, L2NIC_RSS_KEY_SIZE); 235 + 236 + mgmt_msg_params_init_default(&msg_params, &hash_key, sizeof(hash_key)); 237 + 238 + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, 239 + L2NIC_CMD_CFG_RSS_HASH_KEY, &msg_params); 240 + if (err || hash_key.msg_head.status) { 241 + dev_err(hwdev->dev, "Failed to %s hash key, err: %d, status: 0x%x\n", 242 + opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get", 243 + err, hash_key.msg_head.status); 244 + return -EINVAL; 245 + } 246 + 247 + if (opcode == MGMT_MSG_CMD_OP_GET) 248 + memcpy(key, hash_key.key, L2NIC_RSS_KEY_SIZE); 249 + 250 + return 0; 251 + } 252 + 253 + static int hinic3_rss_set_hash_key(struct hinic3_hwdev *hwdev, u8 *key) 254 + { 255 + return hinic3_rss_cfg_hash_key(hwdev, MGMT_MSG_CMD_OP_SET, key); 256 + } 257 + 258 + static int hinic3_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en) 259 + { 260 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 261 + int err; 262 + 263 + err = hinic3_rss_set_hash_key(nic_dev->hwdev, nic_dev->rss_hkey); 264 + if (err) 265 + return err; 266 + 267 + hinic3_fillout_indir_tbl(netdev, nic_dev->rss_indir); 268 + 269 + err = hinic3_config_rss_hw_resource(netdev, nic_dev->rss_indir); 270 + if (err) 271 + return err; 272 + 273 + err = hinic3_rss_cfg(nic_dev->hwdev, rss_en, nic_dev->q_params.num_qps); 274 + if (err) 275 + return err; 276 + 277 + return 0; 278 + } 279 + 280 + int hinic3_rss_init(struct net_device *netdev) 281 + { 282 + return hinic3_set_hw_rss_parameters(netdev, 1); 283 + } 284 + 285 + void hinic3_rss_uninit(struct net_device *netdev) 286 + { 287 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 288 + 289 + hinic3_rss_cfg(nic_dev->hwdev, 0, 0); 290 + } 291 + 292 + void hinic3_clear_rss_config(struct net_device *netdev) 293 + { 294 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 295 + 296 + kfree(nic_dev->rss_hkey); 297 + nic_dev->rss_hkey = NULL; 298 + 299 + kfree(nic_dev->rss_indir); 300 + nic_dev->rss_indir = NULL; 301 + } 302 + 303 + void hinic3_try_to_enable_rss(struct net_device *netdev) 304 + { 305 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 306 + struct hinic3_hwdev *hwdev = nic_dev->hwdev; 307 + int err; 308 + 309 + nic_dev->max_qps = hinic3_func_max_qnum(hwdev); 310 + if (nic_dev->max_qps <= 1 || 311 + !hinic3_test_support(nic_dev, HINIC3_NIC_F_RSS)) 312 + goto err_reset_q_params; 313 + 314 + err = alloc_rss_resource(netdev); 315 + if (err) { 316 + nic_dev->max_qps = 1; 317 + goto err_reset_q_params; 318 + } 319 + 320 + set_bit(HINIC3_RSS_ENABLE, &nic_dev->flags); 321 + decide_num_qps(netdev); 322 + hinic3_init_rss_parameters(netdev); 323 + err = hinic3_set_hw_rss_parameters(netdev, 0); 324 + if (err) { 325 + dev_err(hwdev->dev, "Failed to set hardware rss parameters\n"); 326 + hinic3_clear_rss_config(netdev); 327 + nic_dev->max_qps = 1; 328 + goto err_reset_q_params; 329 + } 330 + 331 + return; 332 + 333 + err_reset_q_params: 334 + clear_bit(HINIC3_RSS_ENABLE, &nic_dev->flags); 335 + nic_dev->q_params.num_qps = nic_dev->max_qps; 336 + }
+14
drivers/net/ethernet/huawei/hinic3/hinic3_rss.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ 3 + 4 + #ifndef _HINIC3_RSS_H_ 5 + #define _HINIC3_RSS_H_ 6 + 7 + #include <linux/netdevice.h> 8 + 9 + int hinic3_rss_init(struct net_device *netdev); 10 + void hinic3_rss_uninit(struct net_device *netdev); 11 + void hinic3_try_to_enable_rss(struct net_device *netdev); 12 + void hinic3_clear_rss_config(struct net_device *netdev); 13 + 14 + #endif
+218 -8
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
··· 35 35 36 36 int hinic3_alloc_rxqs(struct net_device *netdev) 37 37 { 38 - /* Completed by later submission due to LoC limit. */ 39 - return -EFAULT; 38 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 39 + struct pci_dev *pdev = nic_dev->pdev; 40 + u16 num_rxqs = nic_dev->max_qps; 41 + struct hinic3_rxq *rxq; 42 + u16 q_id; 43 + 44 + nic_dev->rxqs = kcalloc(num_rxqs, sizeof(*nic_dev->rxqs), GFP_KERNEL); 45 + if (!nic_dev->rxqs) 46 + return -ENOMEM; 47 + 48 + for (q_id = 0; q_id < num_rxqs; q_id++) { 49 + rxq = &nic_dev->rxqs[q_id]; 50 + rxq->netdev = netdev; 51 + rxq->dev = &pdev->dev; 52 + rxq->q_id = q_id; 53 + rxq->buf_len = nic_dev->rx_buf_len; 54 + rxq->buf_len_shift = ilog2(nic_dev->rx_buf_len); 55 + rxq->q_depth = nic_dev->q_params.rq_depth; 56 + rxq->q_mask = nic_dev->q_params.rq_depth - 1; 57 + } 58 + 59 + return 0; 40 60 } 41 61 42 62 void hinic3_free_rxqs(struct net_device *netdev) 43 63 { 44 - /* Completed by later submission due to LoC limit. */ 64 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 65 + 66 + kfree(nic_dev->rxqs); 45 67 } 46 68 47 69 static int rx_alloc_mapped_page(struct page_pool *page_pool, ··· 71 49 { 72 50 struct page *page; 73 51 u32 page_offset; 52 + 53 + if (likely(rx_info->page)) 54 + return 0; 74 55 75 56 page = page_pool_dev_alloc_frag(page_pool, &page_offset, buf_len); 76 57 if (unlikely(!page)) ··· 85 60 return 0; 86 61 } 87 62 63 + /* Associate fixed completion element to every wqe in the rq. Every rq wqe will 64 + * always post completion to the same place. 65 + */ 66 + static void rq_associate_cqes(struct hinic3_rxq *rxq) 67 + { 68 + struct hinic3_queue_pages *qpages; 69 + struct hinic3_rq_wqe *rq_wqe; 70 + dma_addr_t cqe_dma; 71 + u32 i; 72 + 73 + qpages = &rxq->rq->wq.qpages; 74 + 75 + for (i = 0; i < rxq->q_depth; i++) { 76 + rq_wqe = get_q_element(qpages, i, NULL); 77 + cqe_dma = rxq->cqe_start_paddr + 78 + i * sizeof(struct hinic3_rq_cqe); 79 + rq_wqe->cqe_hi_addr = cpu_to_le32(upper_32_bits(cqe_dma)); 80 + rq_wqe->cqe_lo_addr = cpu_to_le32(lower_32_bits(cqe_dma)); 81 + } 82 + } 83 + 88 84 static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx, 89 85 dma_addr_t dma_addr, u16 len) 90 86 { 91 87 struct hinic3_rq_wqe *rq_wqe; 92 88 93 89 rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL); 94 - rq_wqe->buf_hi_addr = upper_32_bits(dma_addr); 95 - rq_wqe->buf_lo_addr = lower_32_bits(dma_addr); 90 + rq_wqe->buf_hi_addr = cpu_to_le32(upper_32_bits(dma_addr)); 91 + rq_wqe->buf_lo_addr = cpu_to_le32(lower_32_bits(dma_addr)); 96 92 } 97 93 98 94 static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq) ··· 146 100 } 147 101 148 102 return i; 103 + } 104 + 105 + static u32 hinic3_alloc_rx_buffers(struct hinic3_dyna_rxq_res *rqres, 106 + u32 rq_depth, u16 buf_len) 107 + { 108 + u32 free_wqebbs = rq_depth - 1; 109 + u32 idx; 110 + int err; 111 + 112 + for (idx = 0; idx < free_wqebbs; idx++) { 113 + err = rx_alloc_mapped_page(rqres->page_pool, 114 + &rqres->rx_info[idx], buf_len); 115 + if (err) 116 + break; 117 + } 118 + 119 + return idx; 120 + } 121 + 122 + static void hinic3_free_rx_buffers(struct hinic3_dyna_rxq_res *rqres, 123 + u32 q_depth) 124 + { 125 + struct hinic3_rx_info *rx_info; 126 + u32 i; 127 + 128 + /* Free all the Rx ring sk_buffs */ 129 + for (i = 0; i < q_depth; i++) { 130 + rx_info = &rqres->rx_info[i]; 131 + 132 + if (rx_info->page) { 133 + page_pool_put_full_page(rqres->page_pool, 134 + rx_info->page, false); 135 + rx_info->page = NULL; 136 + } 137 + } 149 138 } 150 139 151 140 static void hinic3_add_rx_frag(struct hinic3_rxq *rxq, ··· 360 279 if (skb_is_nonlinear(skb)) 361 280 hinic3_pull_tail(skb); 362 281 363 - offload_type = rx_cqe->offload_type; 282 + offload_type = le32_to_cpu(rx_cqe->offload_type); 364 283 hinic3_rx_csum(rxq, offload_type, status, skb); 365 284 366 285 num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO); ··· 380 299 return 0; 381 300 } 382 301 302 + int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq, 303 + u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res) 304 + { 305 + u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth; 306 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 307 + struct page_pool_params pp_params = {}; 308 + struct hinic3_dyna_rxq_res *rqres; 309 + u32 pkt_idx; 310 + int idx; 311 + 312 + for (idx = 0; idx < num_rq; idx++) { 313 + rqres = &rxqs_res[idx]; 314 + rqres->rx_info = kcalloc(rq_depth, sizeof(*rqres->rx_info), 315 + GFP_KERNEL); 316 + if (!rqres->rx_info) 317 + goto err_free_rqres; 318 + 319 + rqres->cqe_start_vaddr = 320 + dma_alloc_coherent(&nic_dev->pdev->dev, cqe_mem_size, 321 + &rqres->cqe_start_paddr, GFP_KERNEL); 322 + if (!rqres->cqe_start_vaddr) { 323 + netdev_err(netdev, "Failed to alloc rxq%d rx cqe\n", 324 + idx); 325 + goto err_free_rx_info; 326 + } 327 + 328 + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 329 + pp_params.pool_size = rq_depth * nic_dev->rx_buf_len / 330 + PAGE_SIZE; 331 + pp_params.nid = dev_to_node(&nic_dev->pdev->dev); 332 + pp_params.dev = &nic_dev->pdev->dev; 333 + pp_params.dma_dir = DMA_FROM_DEVICE; 334 + pp_params.max_len = PAGE_SIZE; 335 + rqres->page_pool = page_pool_create(&pp_params); 336 + if (!rqres->page_pool) { 337 + netdev_err(netdev, "Failed to create rxq%d page pool\n", 338 + idx); 339 + goto err_free_cqe; 340 + } 341 + 342 + pkt_idx = hinic3_alloc_rx_buffers(rqres, rq_depth, 343 + nic_dev->rx_buf_len); 344 + if (!pkt_idx) { 345 + netdev_err(netdev, "Failed to alloc rxq%d rx buffers\n", 346 + idx); 347 + goto err_destroy_page_pool; 348 + } 349 + rqres->next_to_alloc = pkt_idx; 350 + } 351 + 352 + return 0; 353 + 354 + err_destroy_page_pool: 355 + page_pool_destroy(rqres->page_pool); 356 + err_free_cqe: 357 + dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, 358 + rqres->cqe_start_vaddr, 359 + rqres->cqe_start_paddr); 360 + err_free_rx_info: 361 + kfree(rqres->rx_info); 362 + err_free_rqres: 363 + hinic3_free_rxqs_res(netdev, idx, rq_depth, rxqs_res); 364 + 365 + return -ENOMEM; 366 + } 367 + 368 + void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq, 369 + u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res) 370 + { 371 + u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth; 372 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 373 + struct hinic3_dyna_rxq_res *rqres; 374 + int idx; 375 + 376 + for (idx = 0; idx < num_rq; idx++) { 377 + rqres = &rxqs_res[idx]; 378 + 379 + hinic3_free_rx_buffers(rqres, rq_depth); 380 + page_pool_destroy(rqres->page_pool); 381 + dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, 382 + rqres->cqe_start_vaddr, 383 + rqres->cqe_start_paddr); 384 + kfree(rqres->rx_info); 385 + } 386 + } 387 + 388 + int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq, 389 + u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res) 390 + { 391 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 392 + struct hinic3_dyna_rxq_res *rqres; 393 + struct msix_entry *msix_entry; 394 + struct hinic3_rxq *rxq; 395 + u16 q_id; 396 + u32 pkts; 397 + 398 + for (q_id = 0; q_id < num_rq; q_id++) { 399 + rxq = &nic_dev->rxqs[q_id]; 400 + rqres = &rxqs_res[q_id]; 401 + msix_entry = &nic_dev->qps_msix_entries[q_id]; 402 + 403 + rxq->irq_id = msix_entry->vector; 404 + rxq->msix_entry_idx = msix_entry->entry; 405 + rxq->next_to_update = 0; 406 + rxq->next_to_alloc = rqres->next_to_alloc; 407 + rxq->q_depth = rq_depth; 408 + rxq->delta = rxq->q_depth; 409 + rxq->q_mask = rxq->q_depth - 1; 410 + rxq->cons_idx = 0; 411 + 412 + rxq->cqe_arr = rqres->cqe_start_vaddr; 413 + rxq->cqe_start_paddr = rqres->cqe_start_paddr; 414 + rxq->rx_info = rqres->rx_info; 415 + rxq->page_pool = rqres->page_pool; 416 + 417 + rxq->rq = &nic_dev->nic_io->rq[rxq->q_id]; 418 + 419 + rq_associate_cqes(rxq); 420 + 421 + pkts = hinic3_rx_fill_buffers(rxq); 422 + if (!pkts) { 423 + netdev_err(netdev, "Failed to fill Rx buffer\n"); 424 + return -ENOMEM; 425 + } 426 + } 427 + 428 + return 0; 429 + } 430 + 383 431 int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget) 384 432 { 385 433 struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev); ··· 521 311 while (likely(nr_pkts < budget)) { 522 312 sw_ci = rxq->cons_idx & rxq->q_mask; 523 313 rx_cqe = rxq->cqe_arr + sw_ci; 524 - status = rx_cqe->status; 314 + status = le32_to_cpu(rx_cqe->status); 525 315 if (!RQ_CQE_STATUS_GET(status, RXDONE)) 526 316 break; 527 317 528 318 /* make sure we read rx_done before packet length */ 529 319 rmb(); 530 320 531 - vlan_len = rx_cqe->vlan_len; 321 + vlan_len = le32_to_cpu(rx_cqe->vlan_len); 532 322 pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN); 533 323 if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status)) 534 324 break;
+26 -12
drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
··· 27 27 28 28 /* RX Completion information that is provided by HW for a specific RX WQE */ 29 29 struct hinic3_rq_cqe { 30 - u32 status; 31 - u32 vlan_len; 32 - u32 offload_type; 33 - u32 rsvd3; 34 - u32 rsvd4; 35 - u32 rsvd5; 36 - u32 rsvd6; 37 - u32 pkt_info; 30 + __le32 status; 31 + __le32 vlan_len; 32 + __le32 offload_type; 33 + __le32 rsvd3; 34 + __le32 rsvd4; 35 + __le32 rsvd5; 36 + __le32 rsvd6; 37 + __le32 pkt_info; 38 38 }; 39 39 40 40 struct hinic3_rq_wqe { 41 - u32 buf_hi_addr; 42 - u32 buf_lo_addr; 43 - u32 cqe_hi_addr; 44 - u32 cqe_lo_addr; 41 + __le32 buf_hi_addr; 42 + __le32 buf_lo_addr; 43 + __le32 cqe_hi_addr; 44 + __le32 cqe_lo_addr; 45 45 }; 46 46 47 47 struct hinic3_rx_info { ··· 82 82 dma_addr_t cqe_start_paddr; 83 83 } ____cacheline_aligned; 84 84 85 + struct hinic3_dyna_rxq_res { 86 + u16 next_to_alloc; 87 + struct hinic3_rx_info *rx_info; 88 + dma_addr_t cqe_start_paddr; 89 + void *cqe_start_vaddr; 90 + struct page_pool *page_pool; 91 + }; 92 + 85 93 int hinic3_alloc_rxqs(struct net_device *netdev); 86 94 void hinic3_free_rxqs(struct net_device *netdev); 87 95 96 + int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq, 97 + u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res); 98 + void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq, 99 + u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res); 100 + int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq, 101 + u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res); 88 102 int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget); 89 103 90 104 #endif
+148 -36
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
··· 81 81 82 82 dma_info[0].len = skb_headlen(skb); 83 83 84 - wqe_desc->hi_addr = upper_32_bits(dma_info[0].dma); 85 - wqe_desc->lo_addr = lower_32_bits(dma_info[0].dma); 84 + wqe_desc->hi_addr = cpu_to_le32(upper_32_bits(dma_info[0].dma)); 85 + wqe_desc->lo_addr = cpu_to_le32(lower_32_bits(dma_info[0].dma)); 86 86 87 - wqe_desc->ctrl_len = dma_info[0].len; 87 + wqe_desc->ctrl_len = cpu_to_le32(dma_info[0].len); 88 88 89 89 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 90 90 frag = &(skb_shinfo(skb)->frags[i]); ··· 116 116 } 117 117 dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len, 118 118 DMA_TO_DEVICE); 119 + 119 120 return err; 120 121 } 121 122 ··· 137 136 138 137 dma_unmap_single(&pdev->dev, dma_info[0].dma, 139 138 dma_info[0].len, DMA_TO_DEVICE); 139 + } 140 + 141 + static void free_all_tx_skbs(struct net_device *netdev, u32 sq_depth, 142 + struct hinic3_tx_info *tx_info_arr) 143 + { 144 + struct hinic3_tx_info *tx_info; 145 + u32 idx; 146 + 147 + for (idx = 0; idx < sq_depth; idx++) { 148 + tx_info = &tx_info_arr[idx]; 149 + if (tx_info->skb) { 150 + hinic3_tx_unmap_skb(netdev, tx_info->skb, 151 + tx_info->dma_info); 152 + dev_kfree_skb_any(tx_info->skb); 153 + tx_info->skb = NULL; 154 + } 155 + } 140 156 } 141 157 142 158 union hinic3_ip { ··· 215 197 union hinic3_ip ip; 216 198 u8 l4_proto; 217 199 218 - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG); 200 + task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, 201 + TUNNEL_FLAG)); 219 202 220 203 ip.hdr = skb_network_header(skb); 221 204 if (ip.v4->version == 4) { ··· 245 226 } 246 227 } 247 228 248 - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN); 229 + task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L4_EN)); 249 230 250 231 return 1; 251 232 } ··· 274 255 } 275 256 } 276 257 277 - static void hinic3_set_tso_info(struct hinic3_sq_task *task, u32 *queue_info, 258 + static void hinic3_set_tso_info(struct hinic3_sq_task *task, __le32 *queue_info, 278 259 enum hinic3_l4_offload_type l4_offload, 279 260 u32 offset, u32 mss) 280 261 { 281 262 if (l4_offload == HINIC3_L4_OFFLOAD_TCP) { 282 - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, TSO); 283 - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN); 263 + *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, TSO)); 264 + task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, 265 + INNER_L4_EN)); 284 266 } else if (l4_offload == HINIC3_L4_OFFLOAD_UDP) { 285 - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UFO); 286 - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN); 267 + *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, UFO)); 268 + task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, 269 + INNER_L4_EN)); 287 270 } 288 271 289 272 /* enable L3 calculation */ 290 - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L3_EN); 273 + task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L3_EN)); 291 274 292 - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF); 275 + *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF)); 293 276 294 277 /* set MSS value */ 295 - *queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK; 296 - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS); 278 + *queue_info &= cpu_to_le32(~SQ_CTRL_QUEUE_INFO_MSS_MASK); 279 + *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(mss, MSS)); 297 280 } 298 281 299 282 static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto) ··· 305 284 csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); 306 285 } 307 286 308 - static int hinic3_tso(struct hinic3_sq_task *task, u32 *queue_info, 287 + static int hinic3_tso(struct hinic3_sq_task *task, __le32 *queue_info, 309 288 struct sk_buff *skb) 310 289 { 311 290 enum hinic3_l4_offload_type l4_offload; ··· 326 305 if (skb->encapsulation) { 327 306 u32 gso_type = skb_shinfo(skb)->gso_type; 328 307 /* L3 checksum is always enabled */ 329 - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L3_EN); 330 - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG); 308 + task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L3_EN)); 309 + task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, 310 + TUNNEL_FLAG)); 331 311 332 312 l4.hdr = skb_transport_header(skb); 333 313 ip.hdr = skb_network_header(skb); 334 314 335 315 if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { 336 316 l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); 337 - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L4_EN); 317 + task->pkt_info0 |= 318 + cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L4_EN)); 338 319 } 339 320 340 321 ip.hdr = skb_inner_network_header(skb); ··· 366 343 * 2=select TPID2 in IPSU, 3=select TPID3 in IPSU, 367 344 * 4=select TPID4 in IPSU 368 345 */ 369 - task->vlan_offload = SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) | 370 - SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) | 371 - SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID); 346 + task->vlan_offload = 347 + cpu_to_le32(SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) | 348 + SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) | 349 + SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID)); 372 350 } 373 351 374 352 static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task, 375 - u32 *queue_info, struct hinic3_txq *txq) 353 + __le32 *queue_info, struct hinic3_txq *txq) 376 354 { 377 355 u32 offload = 0; 378 356 int tso_cs_en; ··· 464 440 } 465 441 466 442 static void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo, 467 - u32 queue_info, int nr_descs, u16 owner) 443 + __le32 queue_info, int nr_descs, u16 owner) 468 444 { 469 445 struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; 470 446 471 447 if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) { 472 448 wqe_desc->ctrl_len |= 473 - SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | 474 - SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | 475 - SQ_CTRL_SET(owner, OWNER); 449 + cpu_to_le32(SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | 450 + SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | 451 + SQ_CTRL_SET(owner, OWNER)); 476 452 477 453 /* compact wqe queue_info will transfer to chip */ 478 454 wqe_desc->queue_info = 0; 479 455 return; 480 456 } 481 457 482 - wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) | 483 - SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) | 484 - SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | 485 - SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | 486 - SQ_CTRL_SET(owner, OWNER); 458 + wqe_desc->ctrl_len |= 459 + cpu_to_le32(SQ_CTRL_SET(nr_descs, BUFDESC_NUM) | 460 + SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) | 461 + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | 462 + SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | 463 + SQ_CTRL_SET(owner, OWNER)); 487 464 488 465 wqe_desc->queue_info = queue_info; 489 - wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UC); 466 + wqe_desc->queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, UC)); 490 467 491 468 if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) { 492 469 wqe_desc->queue_info |= 493 - SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS); 470 + cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS)); 494 471 } else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) < 495 472 HINIC3_TX_MSS_MIN) { 496 473 /* mss should not be less than 80 */ 497 - wqe_desc->queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK; 474 + wqe_desc->queue_info &= 475 + cpu_to_le32(~SQ_CTRL_QUEUE_INFO_MSS_MASK); 498 476 wqe_desc->queue_info |= 499 - SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS); 477 + cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS)); 500 478 } 501 479 } 502 480 ··· 508 482 { 509 483 struct hinic3_sq_wqe_combo wqe_combo = {}; 510 484 struct hinic3_tx_info *tx_info; 511 - u32 offload, queue_info = 0; 512 485 struct hinic3_sq_task task; 513 486 u16 wqebb_cnt, num_sge; 487 + __le32 queue_info = 0; 514 488 u16 saved_wq_prod_idx; 515 489 u16 owner, pi = 0; 516 490 u8 saved_sq_owner; 491 + u32 offload; 517 492 int err; 518 493 519 494 if (unlikely(skb->len < MIN_SKB_LEN)) { ··· 602 575 603 576 err_drop_pkt: 604 577 dev_kfree_skb_any(skb); 578 + 605 579 return NETDEV_TX_OK; 606 580 } 607 581 ··· 651 623 652 624 #define HINIC3_BDS_PER_SQ_WQEBB \ 653 625 (HINIC3_SQ_WQEBB_SIZE / sizeof(struct hinic3_sq_bufdesc)) 626 + 627 + int hinic3_alloc_txqs_res(struct net_device *netdev, u16 num_sq, 628 + u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res) 629 + { 630 + struct hinic3_dyna_txq_res *tqres; 631 + int idx; 632 + 633 + for (idx = 0; idx < num_sq; idx++) { 634 + tqres = &txqs_res[idx]; 635 + 636 + tqres->tx_info = kcalloc(sq_depth, sizeof(*tqres->tx_info), 637 + GFP_KERNEL); 638 + if (!tqres->tx_info) 639 + goto err_free_tqres; 640 + 641 + tqres->bds = kcalloc(sq_depth * HINIC3_BDS_PER_SQ_WQEBB + 642 + HINIC3_MAX_SQ_SGE, sizeof(*tqres->bds), 643 + GFP_KERNEL); 644 + if (!tqres->bds) { 645 + kfree(tqres->tx_info); 646 + goto err_free_tqres; 647 + } 648 + } 649 + 650 + return 0; 651 + 652 + err_free_tqres: 653 + while (idx > 0) { 654 + idx--; 655 + tqres = &txqs_res[idx]; 656 + 657 + kfree(tqres->bds); 658 + kfree(tqres->tx_info); 659 + } 660 + 661 + return -ENOMEM; 662 + } 663 + 664 + void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq, 665 + u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res) 666 + { 667 + struct hinic3_dyna_txq_res *tqres; 668 + int idx; 669 + 670 + for (idx = 0; idx < num_sq; idx++) { 671 + tqres = &txqs_res[idx]; 672 + 673 + free_all_tx_skbs(netdev, sq_depth, tqres->tx_info); 674 + kfree(tqres->bds); 675 + kfree(tqres->tx_info); 676 + } 677 + } 678 + 679 + int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq, 680 + u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res) 681 + { 682 + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); 683 + struct hinic3_dyna_txq_res *tqres; 684 + struct hinic3_txq *txq; 685 + u16 q_id; 686 + u32 idx; 687 + 688 + for (q_id = 0; q_id < num_sq; q_id++) { 689 + txq = &nic_dev->txqs[q_id]; 690 + tqres = &txqs_res[q_id]; 691 + 692 + txq->q_depth = sq_depth; 693 + txq->q_mask = sq_depth - 1; 694 + 695 + txq->tx_stop_thrs = min(HINIC3_DEFAULT_STOP_THRS, 696 + sq_depth / 20); 697 + txq->tx_start_thrs = min(HINIC3_DEFAULT_START_THRS, 698 + sq_depth / 10); 699 + 700 + txq->tx_info = tqres->tx_info; 701 + for (idx = 0; idx < sq_depth; idx++) 702 + txq->tx_info[idx].dma_info = 703 + &tqres->bds[idx * HINIC3_BDS_PER_SQ_WQEBB]; 704 + 705 + txq->sq = &nic_dev->nic_io->sq[q_id]; 706 + } 707 + 708 + return 0; 709 + } 654 710 655 711 bool hinic3_tx_poll(struct hinic3_txq *txq, int budget) 656 712 {
+21 -9
drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
··· 58 58 #define SQ_CTRL_QUEUE_INFO_SET(val, member) \ 59 59 FIELD_PREP(SQ_CTRL_QUEUE_INFO_##member##_MASK, val) 60 60 #define SQ_CTRL_QUEUE_INFO_GET(val, member) \ 61 - FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, val) 61 + FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, le32_to_cpu(val)) 62 62 63 63 #define SQ_CTRL_MAX_PLDOFF 221 64 64 ··· 77 77 FIELD_PREP(SQ_TASK_INFO3_##member##_MASK, val) 78 78 79 79 struct hinic3_sq_wqe_desc { 80 - u32 ctrl_len; 81 - u32 queue_info; 82 - u32 hi_addr; 83 - u32 lo_addr; 80 + __le32 ctrl_len; 81 + __le32 queue_info; 82 + __le32 hi_addr; 83 + __le32 lo_addr; 84 84 }; 85 85 86 86 struct hinic3_sq_task { 87 - u32 pkt_info0; 88 - u32 ip_identify; 89 - u32 rsvd; 90 - u32 vlan_offload; 87 + __le32 pkt_info0; 88 + __le32 ip_identify; 89 + __le32 rsvd; 90 + __le32 vlan_offload; 91 91 }; 92 92 93 93 struct hinic3_sq_wqe_combo { ··· 125 125 struct hinic3_io_queue *sq; 126 126 } ____cacheline_aligned; 127 127 128 + struct hinic3_dyna_txq_res { 129 + struct hinic3_tx_info *tx_info; 130 + struct hinic3_dma_info *bds; 131 + }; 132 + 128 133 int hinic3_alloc_txqs(struct net_device *netdev); 129 134 void hinic3_free_txqs(struct net_device *netdev); 135 + 136 + int hinic3_alloc_txqs_res(struct net_device *netdev, u16 num_sq, 137 + u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res); 138 + void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq, 139 + u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res); 140 + int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq, 141 + u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res); 130 142 131 143 netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 132 144 bool hinic3_tx_poll(struct hinic3_txq *txq, int budget);