Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-mana-add-pf-and-xdp_redirect-support'

Haiyang Zhang says:

====================
net: mana: Add PF and XDP_REDIRECT support

The patch set adds PF and XDP_REDIRECT support.
====================

Link: https://lore.kernel.org/r/1655238535-19257-1-git-send-email-haiyangz@microsoft.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

+360 -6
+10
drivers/net/ethernet/microsoft/mana/gdma.h
··· 348 348 struct completion eq_test_event; 349 349 u32 test_event_eq_id; 350 350 351 + bool is_pf; 351 352 void __iomem *bar0_va; 352 353 void __iomem *shm_base; 353 354 void __iomem *db_page_base; ··· 469 468 #define GDMA_REG_DB_PAGE_OFFSET 8 470 469 #define GDMA_REG_DB_PAGE_SIZE 0x10 471 470 #define GDMA_REG_SHM_OFFSET 0x18 471 + 472 + #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0 473 + #define GDMA_PF_REG_DB_PAGE_OFF 0xC8 474 + #define GDMA_PF_REG_SHM_OFF 0x70 475 + 476 + #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108 477 + 478 + #define MANA_PF_DEVICE_ID 0x00B9 479 + #define MANA_VF_DEVICE_ID 0x00BA 472 480 473 481 struct gdma_posted_wqe_info { 474 482 u32 wqe_size_in_bu;
+36 -3
drivers/net/ethernet/microsoft/mana/gdma_main.c
··· 18 18 return readq(g->bar0_va + offset); 19 19 } 20 20 21 - static void mana_gd_init_registers(struct pci_dev *pdev) 21 + static void mana_gd_init_pf_regs(struct pci_dev *pdev) 22 + { 23 + struct gdma_context *gc = pci_get_drvdata(pdev); 24 + void __iomem *sriov_base_va; 25 + u64 sriov_base_off; 26 + 27 + gc->db_page_size = mana_gd_r32(gc, GDMA_PF_REG_DB_PAGE_SIZE) & 0xFFFF; 28 + gc->db_page_base = gc->bar0_va + 29 + mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF); 30 + 31 + sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF); 32 + 33 + sriov_base_va = gc->bar0_va + sriov_base_off; 34 + gc->shm_base = sriov_base_va + 35 + mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF); 36 + } 37 + 38 + static void mana_gd_init_vf_regs(struct pci_dev *pdev) 22 39 { 23 40 struct gdma_context *gc = pci_get_drvdata(pdev); 24 41 ··· 45 28 mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET); 46 29 47 30 gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET); 31 + } 32 + 33 + static void mana_gd_init_registers(struct pci_dev *pdev) 34 + { 35 + struct gdma_context *gc = pci_get_drvdata(pdev); 36 + 37 + if (gc->is_pf) 38 + mana_gd_init_pf_regs(pdev); 39 + else 40 + mana_gd_init_vf_regs(pdev); 48 41 } 49 42 50 43 static int mana_gd_query_max_resources(struct pci_dev *pdev) ··· 1331 1304 mana_gd_remove_irqs(pdev); 1332 1305 } 1333 1306 1307 + static bool mana_is_pf(unsigned short dev_id) 1308 + { 1309 + return dev_id == MANA_PF_DEVICE_ID; 1310 + } 1311 + 1334 1312 static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1335 1313 { 1336 1314 struct gdma_context *gc; ··· 1372 1340 if (!bar0_va) 1373 1341 goto free_gc; 1374 1342 1343 + gc->is_pf = mana_is_pf(pdev->device); 1375 1344 gc->bar0_va = bar0_va; 1376 1345 gc->dev = &pdev->dev; 1377 - 1378 1346 1379 1347 err = mana_gd_setup(pdev); 1380 1348 if (err) ··· 1470 1438 #endif 1471 1439 1472 1440 static const struct pci_device_id mana_id_table[] = { 1473 - { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, 0x00BA) }, 1441 + { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) }, 1442 + { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) }, 1474 1443 { } 1475 1444 }; 1476 1445
+17 -1
drivers/net/ethernet/microsoft/mana/hw_channel.c
··· 158 158 hwc->rxq->msg_buf->gpa_mkey = val; 159 159 hwc->txq->msg_buf->gpa_mkey = val; 160 160 break; 161 + 162 + case HWC_INIT_DATA_PF_DEST_RQ_ID: 163 + hwc->pf_dest_vrq_id = val; 164 + break; 165 + 166 + case HWC_INIT_DATA_PF_DEST_CQ_ID: 167 + hwc->pf_dest_vrcq_id = val; 168 + break; 161 169 } 162 170 163 171 break; ··· 781 773 int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, 782 774 const void *req, u32 resp_len, void *resp) 783 775 { 776 + struct gdma_context *gc = hwc->gdma_dev->gdma_context; 784 777 struct hwc_work_request *tx_wr; 785 778 struct hwc_wq *txq = hwc->txq; 786 779 struct gdma_req_hdr *req_msg; 787 780 struct hwc_caller_ctx *ctx; 781 + u32 dest_vrcq = 0; 782 + u32 dest_vrq = 0; 788 783 u16 msg_id; 789 784 int err; 790 785 ··· 814 803 815 804 tx_wr->msg_size = req_len; 816 805 817 - err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false); 806 + if (gc->is_pf) { 807 + dest_vrq = hwc->pf_dest_vrq_id; 808 + dest_vrcq = hwc->pf_dest_vrcq_id; 809 + } 810 + 811 + err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false); 818 812 if (err) { 819 813 dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err); 820 814 goto out;
+5
drivers/net/ethernet/microsoft/mana/hw_channel.h
··· 20 20 #define HWC_INIT_DATA_MAX_NUM_CQS 7 21 21 #define HWC_INIT_DATA_PDID 8 22 22 #define HWC_INIT_DATA_GPA_MKEY 9 23 + #define HWC_INIT_DATA_PF_DEST_RQ_ID 10 24 + #define HWC_INIT_DATA_PF_DEST_CQ_ID 11 23 25 24 26 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of 25 27 * them are naturally aligned and hence don't need __packed. ··· 179 177 180 178 struct semaphore sema; 181 179 struct gdma_resource inflight_msg_res; 180 + 181 + u32 pf_dest_vrq_id; 182 + u32 pf_dest_vrcq_id; 182 183 183 184 struct hwc_caller_ctx *caller_ctx; 184 185 };
+70
drivers/net/ethernet/microsoft/mana/mana.h
··· 53 53 u64 bytes; 54 54 u64 xdp_drop; 55 55 u64 xdp_tx; 56 + u64 xdp_redirect; 56 57 struct u64_stats_sync syncp; 57 58 }; 58 59 59 60 struct mana_stats_tx { 60 61 u64 packets; 61 62 u64 bytes; 63 + u64 xdp_xmit; 62 64 struct u64_stats_sync syncp; 63 65 }; 64 66 ··· 313 311 struct bpf_prog __rcu *bpf_prog; 314 312 struct xdp_rxq_info xdp_rxq; 315 313 struct page *xdp_save_page; 314 + bool xdp_flush; 315 + int xdp_rc; /* XDP redirect return code */ 316 316 317 317 /* MUST BE THE LAST MEMBER: 318 318 * Each receive buffer has an associated mana_recv_buf_oob. ··· 378 374 unsigned int num_queues; 379 375 380 376 mana_handle_t port_handle; 377 + mana_handle_t pf_filter_handle; 381 378 382 379 u16 port_idx; 383 380 ··· 400 395 void mana_remove(struct gdma_dev *gd, bool suspending); 401 396 402 397 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); 398 + int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, 399 + u32 flags); 403 400 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, 404 401 struct xdp_buff *xdp, void *buf_va, uint pkt_len); 405 402 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); ··· 427 420 MANA_FENCE_RQ = 0x20006, 428 421 MANA_CONFIG_VPORT_RX = 0x20007, 429 422 MANA_QUERY_VPORT_CONFIG = 0x20008, 423 + 424 + /* Privileged commands for the PF mode */ 425 + MANA_REGISTER_FILTER = 0x28000, 426 + MANA_DEREGISTER_FILTER = 0x28001, 427 + MANA_REGISTER_HW_PORT = 0x28003, 428 + MANA_DEREGISTER_HW_PORT = 0x28004, 430 429 }; 431 430 432 431 /* Query Device Configuration */ ··· 557 544 }; /* HW DATA */ 558 545 559 546 struct mana_cfg_rx_steer_resp { 547 + struct gdma_resp_hdr hdr; 548 + }; /* HW DATA */ 549 + 550 + /* Register HW vPort */ 551 + struct mana_register_hw_vport_req { 552 + struct gdma_req_hdr hdr; 553 + u16 attached_gfid; 554 + u8 is_pf_default_vport; 555 + u8 reserved1; 556 + u8 allow_all_ether_types; 557 + u8 reserved2; 558 + u8 reserved3; 559 + u8 reserved4; 560 + }; /* HW DATA */ 561 + 562 + struct mana_register_hw_vport_resp { 563 + struct gdma_resp_hdr hdr; 564 + mana_handle_t hw_vport_handle; 565 + }; /* HW DATA */ 566 + 567 + /* Deregister HW vPort */ 568 + struct mana_deregister_hw_vport_req { 569 + struct gdma_req_hdr hdr; 570 + mana_handle_t hw_vport_handle; 571 + }; /* HW DATA */ 572 + 573 + struct mana_deregister_hw_vport_resp { 574 + struct gdma_resp_hdr hdr; 575 + }; /* HW DATA */ 576 + 577 + /* Register filter */ 578 + struct mana_register_filter_req { 579 + struct gdma_req_hdr hdr; 580 + mana_handle_t vport; 581 + u8 mac_addr[6]; 582 + u8 reserved1; 583 + u8 reserved2; 584 + u8 reserved3; 585 + u8 reserved4; 586 + u16 reserved5; 587 + u32 reserved6; 588 + u32 reserved7; 589 + u32 reserved8; 590 + }; /* HW DATA */ 591 + 592 + struct mana_register_filter_resp { 593 + struct gdma_resp_hdr hdr; 594 + mana_handle_t filter_handle; 595 + }; /* HW DATA */ 596 + 597 + /* Deregister filter */ 598 + struct mana_deregister_filter_req { 599 + struct gdma_req_hdr hdr; 600 + mana_handle_t filter_handle; 601 + }; /* HW DATA */ 602 + 603 + struct mana_deregister_filter_resp { 560 604 struct gdma_resp_hdr hdr; 561 605 }; /* HW DATA */ 562 606
+64
drivers/net/ethernet/microsoft/mana/mana_bpf.c
··· 32 32 ndev->stats.tx_dropped++; 33 33 } 34 34 35 + static int mana_xdp_xmit_fm(struct net_device *ndev, struct xdp_frame *frame, 36 + u16 q_idx) 37 + { 38 + struct sk_buff *skb; 39 + 40 + skb = xdp_build_skb_from_frame(frame, ndev); 41 + if (unlikely(!skb)) 42 + return -ENOMEM; 43 + 44 + skb_set_queue_mapping(skb, q_idx); 45 + 46 + mana_xdp_tx(skb, ndev); 47 + 48 + return 0; 49 + } 50 + 51 + int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, 52 + u32 flags) 53 + { 54 + struct mana_port_context *apc = netdev_priv(ndev); 55 + struct mana_stats_tx *tx_stats; 56 + int i, count = 0; 57 + u16 q_idx; 58 + 59 + if (unlikely(!apc->port_is_up)) 60 + return 0; 61 + 62 + q_idx = smp_processor_id() % ndev->real_num_tx_queues; 63 + 64 + for (i = 0; i < n; i++) { 65 + if (mana_xdp_xmit_fm(ndev, frames[i], q_idx)) 66 + break; 67 + 68 + count++; 69 + } 70 + 71 + tx_stats = &apc->tx_qp[q_idx].txq.stats; 72 + 73 + u64_stats_update_begin(&tx_stats->syncp); 74 + tx_stats->xdp_xmit += count; 75 + u64_stats_update_end(&tx_stats->syncp); 76 + 77 + return count; 78 + } 79 + 35 80 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, 36 81 struct xdp_buff *xdp, void *buf_va, uint pkt_len) 37 82 { 83 + struct mana_stats_rx *rx_stats; 38 84 struct bpf_prog *prog; 39 85 u32 act = XDP_PASS; 40 86 ··· 95 49 96 50 act = bpf_prog_run_xdp(prog, xdp); 97 51 52 + rx_stats = &rxq->stats; 53 + 98 54 switch (act) { 99 55 case XDP_PASS: 100 56 case XDP_TX: 101 57 case XDP_DROP: 102 58 break; 59 + 60 + case XDP_REDIRECT: 61 + rxq->xdp_rc = xdp_do_redirect(ndev, xdp, prog); 62 + if (!rxq->xdp_rc) { 63 + rxq->xdp_flush = true; 64 + 65 + u64_stats_update_begin(&rx_stats->syncp); 66 + rx_stats->packets++; 67 + rx_stats->bytes += pkt_len; 68 + rx_stats->xdp_redirect++; 69 + u64_stats_update_end(&rx_stats->syncp); 70 + 71 + break; 72 + } 73 + 74 + fallthrough; 103 75 104 76 case XDP_ABORTED: 105 77 trace_xdp_exception(ndev, prog, act);
+147 -1
drivers/net/ethernet/microsoft/mana/mana_en.c
··· 6 6 #include <linux/inetdevice.h> 7 7 #include <linux/etherdevice.h> 8 8 #include <linux/ethtool.h> 9 + #include <linux/filter.h> 9 10 #include <linux/mm.h> 10 11 11 12 #include <net/checksum.h> ··· 383 382 .ndo_validate_addr = eth_validate_addr, 384 383 .ndo_get_stats64 = mana_get_stats64, 385 384 .ndo_bpf = mana_bpf, 385 + .ndo_xdp_xmit = mana_xdp_xmit, 386 386 }; 387 387 388 388 static void mana_cleanup_port_context(struct mana_port_context *apc) ··· 446 444 return -EPROTO; 447 445 448 446 return 0; 447 + } 448 + 449 + static int mana_pf_register_hw_vport(struct mana_port_context *apc) 450 + { 451 + struct mana_register_hw_vport_resp resp = {}; 452 + struct mana_register_hw_vport_req req = {}; 453 + int err; 454 + 455 + mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT, 456 + sizeof(req), sizeof(resp)); 457 + req.attached_gfid = 1; 458 + req.is_pf_default_vport = 1; 459 + req.allow_all_ether_types = 1; 460 + 461 + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 462 + sizeof(resp)); 463 + if (err) { 464 + netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err); 465 + return err; 466 + } 467 + 468 + err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT, 469 + sizeof(resp)); 470 + if (err || resp.hdr.status) { 471 + netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n", 472 + err, resp.hdr.status); 473 + return err ? err : -EPROTO; 474 + } 475 + 476 + apc->port_handle = resp.hw_vport_handle; 477 + return 0; 478 + } 479 + 480 + static void mana_pf_deregister_hw_vport(struct mana_port_context *apc) 481 + { 482 + struct mana_deregister_hw_vport_resp resp = {}; 483 + struct mana_deregister_hw_vport_req req = {}; 484 + int err; 485 + 486 + mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT, 487 + sizeof(req), sizeof(resp)); 488 + req.hw_vport_handle = apc->port_handle; 489 + 490 + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 491 + sizeof(resp)); 492 + if (err) { 493 + netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n", 494 + err); 495 + return; 496 + } 497 + 498 + err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT, 499 + sizeof(resp)); 500 + if (err || resp.hdr.status) 501 + netdev_err(apc->ndev, 502 + "Failed to deregister hw vPort: %d, 0x%x\n", 503 + err, resp.hdr.status); 504 + } 505 + 506 + static int mana_pf_register_filter(struct mana_port_context *apc) 507 + { 508 + struct mana_register_filter_resp resp = {}; 509 + struct mana_register_filter_req req = {}; 510 + int err; 511 + 512 + mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER, 513 + sizeof(req), sizeof(resp)); 514 + req.vport = apc->port_handle; 515 + memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN); 516 + 517 + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 518 + sizeof(resp)); 519 + if (err) { 520 + netdev_err(apc->ndev, "Failed to register filter: %d\n", err); 521 + return err; 522 + } 523 + 524 + err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER, 525 + sizeof(resp)); 526 + if (err || resp.hdr.status) { 527 + netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n", 528 + err, resp.hdr.status); 529 + return err ? err : -EPROTO; 530 + } 531 + 532 + apc->pf_filter_handle = resp.filter_handle; 533 + return 0; 534 + } 535 + 536 + static void mana_pf_deregister_filter(struct mana_port_context *apc) 537 + { 538 + struct mana_deregister_filter_resp resp = {}; 539 + struct mana_deregister_filter_req req = {}; 540 + int err; 541 + 542 + mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER, 543 + sizeof(req), sizeof(resp)); 544 + req.filter_handle = apc->pf_filter_handle; 545 + 546 + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 547 + sizeof(resp)); 548 + if (err) { 549 + netdev_err(apc->ndev, "Failed to unregister filter: %d\n", 550 + err); 551 + return; 552 + } 553 + 554 + err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER, 555 + sizeof(resp)); 556 + if (err || resp.hdr.status) 557 + netdev_err(apc->ndev, 558 + "Failed to deregister filter: %d, 0x%x\n", 559 + err, resp.hdr.status); 449 560 } 450 561 451 562 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, ··· 1122 1007 1123 1008 act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len); 1124 1009 1010 + if (act == XDP_REDIRECT && !rxq->xdp_rc) 1011 + return; 1012 + 1125 1013 if (act != XDP_PASS && act != XDP_TX) 1126 1014 goto drop_xdp; 1127 1015 ··· 1280 1162 static void mana_poll_rx_cq(struct mana_cq *cq) 1281 1163 { 1282 1164 struct gdma_comp *comp = cq->gdma_comp_buf; 1165 + struct mana_rxq *rxq = cq->rxq; 1283 1166 int comp_read, i; 1284 1167 1285 1168 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); 1286 1169 WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER); 1170 + 1171 + rxq->xdp_flush = false; 1287 1172 1288 1173 for (i = 0; i < comp_read; i++) { 1289 1174 if (WARN_ON_ONCE(comp[i].is_sq)) ··· 1296 1175 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id)) 1297 1176 return; 1298 1177 1299 - mana_process_rx_cqe(cq->rxq, cq, &comp[i]); 1178 + mana_process_rx_cqe(rxq, cq, &comp[i]); 1300 1179 } 1180 + 1181 + if (rxq->xdp_flush) 1182 + xdp_do_flush(); 1301 1183 } 1302 1184 1303 1185 static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue) ··· 1777 1653 1778 1654 static void mana_destroy_vport(struct mana_port_context *apc) 1779 1655 { 1656 + struct gdma_dev *gd = apc->ac->gdma_dev; 1780 1657 struct mana_rxq *rxq; 1781 1658 u32 rxq_idx; 1782 1659 ··· 1791 1666 } 1792 1667 1793 1668 mana_destroy_txq(apc); 1669 + 1670 + if (gd->gdma_context->is_pf) 1671 + mana_pf_deregister_hw_vport(apc); 1794 1672 } 1795 1673 1796 1674 static int mana_create_vport(struct mana_port_context *apc, ··· 1803 1675 int err; 1804 1676 1805 1677 apc->default_rxobj = INVALID_MANA_HANDLE; 1678 + 1679 + if (gd->gdma_context->is_pf) { 1680 + err = mana_pf_register_hw_vport(apc); 1681 + if (err) 1682 + return err; 1683 + } 1806 1684 1807 1685 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); 1808 1686 if (err) ··· 1889 1755 int mana_alloc_queues(struct net_device *ndev) 1890 1756 { 1891 1757 struct mana_port_context *apc = netdev_priv(ndev); 1758 + struct gdma_dev *gd = apc->ac->gdma_dev; 1892 1759 int err; 1893 1760 1894 1761 err = mana_create_vport(apc, ndev); ··· 1915 1780 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true); 1916 1781 if (err) 1917 1782 goto destroy_vport; 1783 + 1784 + if (gd->gdma_context->is_pf) { 1785 + err = mana_pf_register_filter(apc); 1786 + if (err) 1787 + goto destroy_vport; 1788 + } 1918 1789 1919 1790 mana_chn_setxdp(apc, mana_xdp_get(apc)); 1920 1791 ··· 1966 1825 static int mana_dealloc_queues(struct net_device *ndev) 1967 1826 { 1968 1827 struct mana_port_context *apc = netdev_priv(ndev); 1828 + struct gdma_dev *gd = apc->ac->gdma_dev; 1969 1829 struct mana_txq *txq; 1970 1830 int i, err; 1971 1831 ··· 1974 1832 return -EINVAL; 1975 1833 1976 1834 mana_chn_setxdp(apc, NULL); 1835 + 1836 + if (gd->gdma_context->is_pf) 1837 + mana_pf_deregister_filter(apc); 1977 1838 1978 1839 /* No packet can be transmitted now since apc->port_is_up is false. 1979 1840 * There is still a tiny chance that mana_poll_tx_cq() can re-enable ··· 2060 1915 apc->max_queues = gc->max_num_queues; 2061 1916 apc->num_queues = gc->max_num_queues; 2062 1917 apc->port_handle = INVALID_MANA_HANDLE; 1918 + apc->pf_filter_handle = INVALID_MANA_HANDLE; 2063 1919 apc->port_idx = port_idx; 2064 1920 2065 1921 ndev->netdev_ops = &mana_devops;
+11 -1
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
··· 23 23 if (stringset != ETH_SS_STATS) 24 24 return -EINVAL; 25 25 26 - return ARRAY_SIZE(mana_eth_stats) + num_queues * 6; 26 + return ARRAY_SIZE(mana_eth_stats) + num_queues * 8; 27 27 } 28 28 29 29 static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data) ··· 50 50 p += ETH_GSTRING_LEN; 51 51 sprintf(p, "rx_%d_xdp_tx", i); 52 52 p += ETH_GSTRING_LEN; 53 + sprintf(p, "rx_%d_xdp_redirect", i); 54 + p += ETH_GSTRING_LEN; 53 55 } 54 56 55 57 for (i = 0; i < num_queues; i++) { 56 58 sprintf(p, "tx_%d_packets", i); 57 59 p += ETH_GSTRING_LEN; 58 60 sprintf(p, "tx_%d_bytes", i); 61 + p += ETH_GSTRING_LEN; 62 + sprintf(p, "tx_%d_xdp_xmit", i); 59 63 p += ETH_GSTRING_LEN; 60 64 } 61 65 } ··· 74 70 struct mana_stats_tx *tx_stats; 75 71 unsigned int start; 76 72 u64 packets, bytes; 73 + u64 xdp_redirect; 74 + u64 xdp_xmit; 77 75 u64 xdp_drop; 78 76 u64 xdp_tx; 79 77 int q, i = 0; ··· 95 89 bytes = rx_stats->bytes; 96 90 xdp_drop = rx_stats->xdp_drop; 97 91 xdp_tx = rx_stats->xdp_tx; 92 + xdp_redirect = rx_stats->xdp_redirect; 98 93 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); 99 94 100 95 data[i++] = packets; 101 96 data[i++] = bytes; 102 97 data[i++] = xdp_drop; 103 98 data[i++] = xdp_tx; 99 + data[i++] = xdp_redirect; 104 100 } 105 101 106 102 for (q = 0; q < num_queues; q++) { ··· 112 104 start = u64_stats_fetch_begin_irq(&tx_stats->syncp); 113 105 packets = tx_stats->packets; 114 106 bytes = tx_stats->bytes; 107 + xdp_xmit = tx_stats->xdp_xmit; 115 108 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); 116 109 117 110 data[i++] = packets; 118 111 data[i++] = bytes; 112 + data[i++] = xdp_xmit; 119 113 } 120 114 } 121 115