Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mana-shared-6.2' of https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Long Li says:

====================
Introduce Microsoft Azure Network Adapter (MANA) RDMA driver [netdev prep]

The first 11 patches which modify the MANA Ethernet driver to support
RDMA driver.

* 'mana-shared-6.2' of https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
net: mana: Define data structures for protection domain and memory registration
net: mana: Define data structures for allocating doorbell page from GDMA
net: mana: Define and process GDMA response code GDMA_STATUS_MORE_ENTRIES
net: mana: Define max values for SGL entries
net: mana: Move header files to a common location
net: mana: Record port number in netdev
net: mana: Export Work Queue functions for use by RDMA driver
net: mana: Set the DMA device max segment size
net: mana: Handle vport sharing between devices
net: mana: Record the physical address for doorbell page region
net: mana: Add support for auxiliary device
====================

Link: https://lore.kernel.org/all/1667502990-2559-1-git-send-email-longli@linuxonhyperv.com/
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+372 -45
+1
MAINTAINERS
··· 9543 9543 F: include/asm-generic/mshyperv.h 9544 9544 F: include/clocksource/hyperv_timer.h 9545 9545 F: include/linux/hyperv.h 9546 + F: include/net/mana 9546 9547 F: include/uapi/linux/hyperv.h 9547 9548 F: net/vmw_vsock/hyperv_transport.c 9548 9549 F: tools/hv/
+1
drivers/net/ethernet/microsoft/Kconfig
··· 19 19 tristate "Microsoft Azure Network Adapter (MANA) support" 20 20 depends on PCI_MSI && X86_64 21 21 depends on PCI_HYPERV 22 + select AUXILIARY_BUS 22 23 help 23 24 This driver supports Microsoft Azure Network Adapter (MANA). 24 25 So far, the driver is only supported on X86_64.
+153 -5
drivers/net/ethernet/microsoft/mana/gdma.h include/net/mana/gdma.h
··· 9 9 10 10 #include "shm_channel.h" 11 11 12 + #define GDMA_STATUS_MORE_ENTRIES 0x00000105 13 + 12 14 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of 13 15 * them are naturally aligned and hence don't need __packed. 14 16 */ ··· 24 22 GDMA_GENERATE_TEST_EQE = 10, 25 23 GDMA_CREATE_QUEUE = 12, 26 24 GDMA_DISABLE_QUEUE = 13, 25 + GDMA_ALLOCATE_RESOURCE_RANGE = 22, 26 + GDMA_DESTROY_RESOURCE_RANGE = 24, 27 27 GDMA_CREATE_DMA_REGION = 25, 28 28 GDMA_DMA_REGION_ADD_PAGES = 26, 29 29 GDMA_DESTROY_DMA_REGION = 27, 30 + GDMA_CREATE_PD = 29, 31 + GDMA_DESTROY_PD = 30, 32 + GDMA_CREATE_MR = 31, 33 + GDMA_DESTROY_MR = 32, 30 34 }; 35 + 36 + #define GDMA_RESOURCE_DOORBELL_PAGE 27 31 37 32 38 enum gdma_queue_type { 33 39 GDMA_INVALID_QUEUE, ··· 64 54 GDMA_DEVICE_HWC = 1, 65 55 GDMA_DEVICE_MANA = 2, 66 56 }; 57 + 58 + typedef u64 gdma_obj_handle_t; 67 59 68 60 struct gdma_resource { 69 61 /* Protect the bitmap */ ··· 200 188 u64 length; 201 189 202 190 /* Allocated by the PF driver */ 203 - u64 gdma_region; 191 + gdma_obj_handle_t dma_region_handle; 204 192 }; 205 193 206 194 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 ··· 216 204 217 205 /* GDMA driver specific pointer */ 218 206 void *driver_data; 207 + 208 + struct auxiliary_device *adev; 219 209 }; 220 210 221 211 #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE ··· 363 349 u32 test_event_eq_id; 364 350 365 351 bool is_pf; 352 + phys_addr_t bar0_pa; 366 353 void __iomem *bar0_va; 367 354 void __iomem *shm_base; 368 355 void __iomem *db_page_base; 356 + phys_addr_t phys_db_page_base; 369 357 u32 db_page_size; 370 358 int numa_node; 371 359 ··· 439 423 440 424 #define MAX_TX_WQE_SIZE 512 441 425 #define MAX_RX_WQE_SIZE 256 426 + 427 + #define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \ 428 + sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \ 429 + sizeof(struct gdma_sge)) 430 + 431 + #define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \ 432 + sizeof(struct gdma_sge)) / sizeof(struct gdma_sge)) 442 433 443 434 struct gdma_cqe { 444 435 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; ··· 598 575 u32 db_id; 599 576 }; /* HW DATA */ 600 577 578 + struct gdma_allocate_resource_range_req { 579 + struct gdma_req_hdr hdr; 580 + u32 resource_type; 581 + u32 num_resources; 582 + u32 alignment; 583 + u32 allocated_resources; 584 + }; 585 + 586 + struct gdma_allocate_resource_range_resp { 587 + struct gdma_resp_hdr hdr; 588 + u32 allocated_resources; 589 + }; 590 + 591 + struct gdma_destroy_resource_range_req { 592 + struct gdma_req_hdr hdr; 593 + u32 resource_type; 594 + u32 num_resources; 595 + u32 allocated_resources; 596 + }; 597 + 601 598 /* GDMA_CREATE_QUEUE */ 602 599 struct gdma_create_queue_req { 603 600 struct gdma_req_hdr hdr; ··· 625 582 u32 reserved1; 626 583 u32 pdid; 627 584 u32 doolbell_id; 628 - u64 gdma_region; 585 + gdma_obj_handle_t gdma_region; 629 586 u32 reserved2; 630 587 u32 queue_size; 631 588 u32 log2_throttle_limit; ··· 651 608 u32 queue_index; 652 609 u32 alloc_res_id_on_creation; 653 610 }; /* HW DATA */ 611 + 612 + enum atb_page_size { 613 + ATB_PAGE_SIZE_4K, 614 + ATB_PAGE_SIZE_8K, 615 + ATB_PAGE_SIZE_16K, 616 + ATB_PAGE_SIZE_32K, 617 + ATB_PAGE_SIZE_64K, 618 + ATB_PAGE_SIZE_128K, 619 + ATB_PAGE_SIZE_256K, 620 + ATB_PAGE_SIZE_512K, 621 + ATB_PAGE_SIZE_1M, 622 + ATB_PAGE_SIZE_2M, 623 + ATB_PAGE_SIZE_MAX, 624 + }; 625 + 626 + enum gdma_mr_access_flags { 627 + GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0), 628 + GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1), 629 + GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2), 630 + GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3), 631 + GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4), 632 + }; 654 633 655 634 /* GDMA_CREATE_DMA_REGION */ 656 635 struct gdma_create_dma_region_req { ··· 700 635 701 636 struct gdma_create_dma_region_resp { 702 637 struct gdma_resp_hdr hdr; 703 - u64 gdma_region; 638 + gdma_obj_handle_t dma_region_handle; 704 639 }; /* HW DATA */ 705 640 706 641 /* GDMA_DMA_REGION_ADD_PAGES */ 707 642 struct gdma_dma_region_add_pages_req { 708 643 struct gdma_req_hdr hdr; 709 644 710 - u64 gdma_region; 645 + gdma_obj_handle_t dma_region_handle; 711 646 712 647 u32 page_addr_list_len; 713 648 u32 reserved3; ··· 719 654 struct gdma_destroy_dma_region_req { 720 655 struct gdma_req_hdr hdr; 721 656 722 - u64 gdma_region; 657 + gdma_obj_handle_t dma_region_handle; 723 658 }; /* HW DATA */ 659 + 660 + enum gdma_pd_flags { 661 + GDMA_PD_FLAG_INVALID = 0, 662 + }; 663 + 664 + struct gdma_create_pd_req { 665 + struct gdma_req_hdr hdr; 666 + enum gdma_pd_flags flags; 667 + u32 reserved; 668 + };/* HW DATA */ 669 + 670 + struct gdma_create_pd_resp { 671 + struct gdma_resp_hdr hdr; 672 + gdma_obj_handle_t pd_handle; 673 + u32 pd_id; 674 + u32 reserved; 675 + };/* HW DATA */ 676 + 677 + struct gdma_destroy_pd_req { 678 + struct gdma_req_hdr hdr; 679 + gdma_obj_handle_t pd_handle; 680 + };/* HW DATA */ 681 + 682 + struct gdma_destory_pd_resp { 683 + struct gdma_resp_hdr hdr; 684 + };/* HW DATA */ 685 + 686 + enum gdma_mr_type { 687 + /* Guest Virtual Address - MRs of this type allow access 688 + * to memory mapped by PTEs associated with this MR using a virtual 689 + * address that is set up in the MST 690 + */ 691 + GDMA_MR_TYPE_GVA = 2, 692 + }; 693 + 694 + struct gdma_create_mr_params { 695 + gdma_obj_handle_t pd_handle; 696 + enum gdma_mr_type mr_type; 697 + union { 698 + struct { 699 + gdma_obj_handle_t dma_region_handle; 700 + u64 virtual_address; 701 + enum gdma_mr_access_flags access_flags; 702 + } gva; 703 + }; 704 + }; 705 + 706 + struct gdma_create_mr_request { 707 + struct gdma_req_hdr hdr; 708 + gdma_obj_handle_t pd_handle; 709 + enum gdma_mr_type mr_type; 710 + u32 reserved_1; 711 + 712 + union { 713 + struct { 714 + gdma_obj_handle_t dma_region_handle; 715 + u64 virtual_address; 716 + enum gdma_mr_access_flags access_flags; 717 + } gva; 718 + 719 + }; 720 + u32 reserved_2; 721 + };/* HW DATA */ 722 + 723 + struct gdma_create_mr_response { 724 + struct gdma_resp_hdr hdr; 725 + gdma_obj_handle_t mr_handle; 726 + u32 lkey; 727 + u32 rkey; 728 + };/* HW DATA */ 729 + 730 + struct gdma_destroy_mr_request { 731 + struct gdma_req_hdr hdr; 732 + gdma_obj_handle_t mr_handle; 733 + };/* HW DATA */ 734 + 735 + struct gdma_destroy_mr_response { 736 + struct gdma_resp_hdr hdr; 737 + };/* HW DATA */ 724 738 725 739 int mana_gd_verify_vf_version(struct pci_dev *pdev); 726 740 ··· 827 683 828 684 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, 829 685 u32 resp_len, void *resp); 686 + 687 + int mana_gd_destroy_dma_region(struct gdma_context *gc, 688 + gdma_obj_handle_t dma_region_handle); 689 + 830 690 #endif /* _GDMA_H */
+29 -11
drivers/net/ethernet/microsoft/mana/gdma_main.c
··· 6 6 #include <linux/utsname.h> 7 7 #include <linux/version.h> 8 8 9 - #include "mana.h" 9 + #include <net/mana/mana.h> 10 10 11 11 static u32 mana_gd_r32(struct gdma_context *g, u64 offset) 12 12 { ··· 42 42 gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF; 43 43 44 44 gc->db_page_base = gc->bar0_va + 45 + mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET); 46 + 47 + gc->phys_db_page_base = gc->bar0_pa + 45 48 mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET); 46 49 47 50 gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET); ··· 152 149 153 150 return mana_hwc_send_request(hwc, req_len, req, resp_len, resp); 154 151 } 152 + EXPORT_SYMBOL_NS(mana_gd_send_request, NET_MANA); 155 153 156 154 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, 157 155 struct gdma_mem_info *gmi) ··· 198 194 req.type = queue->type; 199 195 req.pdid = queue->gdma_dev->pdid; 200 196 req.doolbell_id = queue->gdma_dev->doorbell; 201 - req.gdma_region = queue->mem_info.gdma_region; 197 + req.gdma_region = queue->mem_info.dma_region_handle; 202 198 req.queue_size = queue->queue_size; 203 199 req.log2_throttle_limit = queue->eq.log2_throttle_limit; 204 200 req.eq_pci_msix_index = queue->eq.msix_index; ··· 212 208 213 209 queue->id = resp.queue_index; 214 210 queue->eq.disable_needed = true; 215 - queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; 211 + queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; 216 212 return 0; 217 213 } 218 214 ··· 671 667 return err; 672 668 } 673 669 674 - static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region) 670 + int mana_gd_destroy_dma_region(struct gdma_context *gc, 671 + gdma_obj_handle_t dma_region_handle) 675 672 { 676 673 struct gdma_destroy_dma_region_req req = {}; 677 674 struct gdma_general_resp resp = {}; 678 675 int err; 679 676 680 - if (gdma_region == GDMA_INVALID_DMA_REGION) 681 - return; 677 + if (dma_region_handle == GDMA_INVALID_DMA_REGION) 678 + return 0; 682 679 683 680 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req), 684 681 sizeof(resp)); 685 - req.gdma_region = gdma_region; 682 + req.dma_region_handle = dma_region_handle; 686 683 687 684 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 688 - if (err || resp.hdr.status) 685 + if (err || resp.hdr.status) { 689 686 dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n", 690 687 err, resp.hdr.status); 688 + return -EPROTO; 689 + } 690 + 691 + return 0; 691 692 } 693 + EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA); 692 694 693 695 static int mana_gd_create_dma_region(struct gdma_dev *gd, 694 696 struct gdma_mem_info *gmi) ··· 739 729 if (err) 740 730 goto out; 741 731 742 - if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) { 732 + if (resp.hdr.status || 733 + resp.dma_region_handle == GDMA_INVALID_DMA_REGION) { 743 734 dev_err(gc->dev, "Failed to create DMA region: 0x%x\n", 744 735 resp.hdr.status); 745 736 err = -EPROTO; 746 737 goto out; 747 738 } 748 739 749 - gmi->gdma_region = resp.gdma_region; 740 + gmi->dma_region_handle = resp.dma_region_handle; 750 741 out: 751 742 kfree(req); 752 743 return err; ··· 870 859 return; 871 860 } 872 861 873 - mana_gd_destroy_dma_region(gc, gmi->gdma_region); 862 + mana_gd_destroy_dma_region(gc, gmi->dma_region_handle); 874 863 mana_gd_free_memory(gmi); 875 864 kfree(queue); 876 865 } ··· 1404 1393 if (err) 1405 1394 goto release_region; 1406 1395 1396 + err = dma_set_max_seg_size(&pdev->dev, UINT_MAX); 1397 + if (err) { 1398 + dev_err(&pdev->dev, "Failed to set dma device segment size\n"); 1399 + goto release_region; 1400 + } 1401 + 1407 1402 err = -ENOMEM; 1408 1403 gc = vzalloc(sizeof(*gc)); 1409 1404 if (!gc) ··· 1417 1400 1418 1401 mutex_init(&gc->eq_test_event_mutex); 1419 1402 pci_set_drvdata(pdev, gc); 1403 + gc->bar0_pa = pci_resource_start(pdev, 0); 1420 1404 1421 1405 bar0_va = pci_iomap(pdev, bar, 0); 1422 1406 if (!bar0_va)
+3 -3
drivers/net/ethernet/microsoft/mana/hw_channel.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 3 4 - #include "gdma.h" 5 - #include "hw_channel.h" 4 + #include <net/mana/gdma.h> 5 + #include <net/mana/hw_channel.h> 6 6 7 7 static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id) 8 8 { ··· 836 836 goto out; 837 837 } 838 838 839 - if (ctx->status_code) { 839 + if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) { 840 840 dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n", 841 841 ctx->status_code); 842 842 err = -EPROTO;
drivers/net/ethernet/microsoft/mana/hw_channel.h include/net/mana/hw_channel.h
+17 -3
drivers/net/ethernet/microsoft/mana/mana.h include/net/mana/mana.h
··· 265 265 int budget; 266 266 }; 267 267 268 - #define GDMA_MAX_RQE_SGES 15 269 - 270 268 struct mana_recv_buf_oob { 271 269 /* A valid GDMA work request representing the data buffer. */ 272 270 struct gdma_wqe_request wqe_req; ··· 274 276 275 277 /* SGL of the buffer going to be sent has part of the work request. */ 276 278 u32 num_sge; 277 - struct gdma_sge sgl[GDMA_MAX_RQE_SGES]; 279 + struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES]; 278 280 279 281 /* Required to store the result of mana_gd_post_work_request. 280 282 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the ··· 377 379 378 380 mana_handle_t port_handle; 379 381 mana_handle_t pf_filter_handle; 382 + 383 + /* Mutex for sharing access to vport_use_count */ 384 + struct mutex vport_mutex; 385 + int vport_use_count; 380 386 381 387 u16 port_idx; 382 388 ··· 633 631 struct gdma_posted_wqe_info wqe_info; 634 632 }; 635 633 634 + int mana_create_wq_obj(struct mana_port_context *apc, 635 + mana_handle_t vport, 636 + u32 wq_type, struct mana_obj_spec *wq_spec, 637 + struct mana_obj_spec *cq_spec, 638 + mana_handle_t *wq_obj); 639 + 640 + void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, 641 + mana_handle_t wq_obj); 642 + 643 + int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, 644 + u32 doorbell_pg_id); 645 + void mana_uncfg_vport(struct mana_port_context *apc); 636 646 #endif /* _MANA_H */
+1 -1
drivers/net/ethernet/microsoft/mana/mana_bpf.c
··· 8 8 #include <linux/bpf_trace.h> 9 9 #include <net/xdp.h> 10 10 11 - #include "mana.h" 11 + #include <net/mana/mana.h> 12 12 13 13 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev) 14 14 {
+155 -20
drivers/net/ethernet/microsoft/mana/mana_en.c
··· 12 12 #include <net/checksum.h> 13 13 #include <net/ip6_checksum.h> 14 14 15 - #include "mana.h" 15 + #include <net/mana/mana.h> 16 + #include <net/mana/mana_auxiliary.h> 17 + 18 + static DEFINE_IDA(mana_adev_ida); 19 + 20 + static int mana_adev_idx_alloc(void) 21 + { 22 + return ida_alloc(&mana_adev_ida, GFP_KERNEL); 23 + } 24 + 25 + static void mana_adev_idx_free(int idx) 26 + { 27 + ida_free(&mana_adev_ida, idx); 28 + } 16 29 17 30 /* Microsoft Azure Network Adapter (MANA) functions */ 18 31 ··· 189 176 pkg.wqe_req.client_data_unit = 0; 190 177 191 178 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; 192 - WARN_ON_ONCE(pkg.wqe_req.num_sge > 30); 179 + WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); 193 180 194 181 if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { 195 182 pkg.wqe_req.sgl = pkg.sgl_array; ··· 646 633 return 0; 647 634 } 648 635 649 - static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, 650 - u32 doorbell_pg_id) 636 + void mana_uncfg_vport(struct mana_port_context *apc) 637 + { 638 + mutex_lock(&apc->vport_mutex); 639 + apc->vport_use_count--; 640 + WARN_ON(apc->vport_use_count < 0); 641 + mutex_unlock(&apc->vport_mutex); 642 + } 643 + EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA); 644 + 645 + int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, 646 + u32 doorbell_pg_id) 651 647 { 652 648 struct mana_config_vport_resp resp = {}; 653 649 struct mana_config_vport_req req = {}; 654 650 int err; 651 + 652 + /* This function is used to program the Ethernet port in the hardware 653 + * table. It can be called from the Ethernet driver or the RDMA driver. 654 + * 655 + * For Ethernet usage, the hardware supports only one active user on a 656 + * physical port. The driver checks on the port usage before programming 657 + * the hardware when creating the RAW QP (RDMA driver) or exposing the 658 + * device to kernel NET layer (Ethernet driver). 659 + * 660 + * Because the RDMA driver doesn't know in advance which QP type the 661 + * user will create, it exposes the device with all its ports. The user 662 + * may not be able to create RAW QP on a port if this port is already 663 + * in used by the Ethernet driver from the kernel. 664 + * 665 + * This physical port limitation only applies to the RAW QP. For RC QP, 666 + * the hardware doesn't have this limitation. The user can create RC 667 + * QPs on a physical port up to the hardware limits independent of the 668 + * Ethernet usage on the same port. 669 + */ 670 + mutex_lock(&apc->vport_mutex); 671 + if (apc->vport_use_count > 0) { 672 + mutex_unlock(&apc->vport_mutex); 673 + return -EBUSY; 674 + } 675 + apc->vport_use_count++; 676 + mutex_unlock(&apc->vport_mutex); 655 677 656 678 mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX, 657 679 sizeof(req), sizeof(resp)); ··· 714 666 715 667 apc->tx_shortform_allowed = resp.short_form_allowed; 716 668 apc->tx_vp_offset = resp.tx_vport_offset; 669 + 670 + netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n", 671 + apc->port_handle, protection_dom_id, doorbell_pg_id); 717 672 out: 673 + if (err) 674 + mana_uncfg_vport(apc); 675 + 718 676 return err; 719 677 } 678 + EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA); 720 679 721 680 static int mana_cfg_vport_steering(struct mana_port_context *apc, 722 681 enum TRI_STATE rx, ··· 784 729 resp.hdr.status); 785 730 err = -EPROTO; 786 731 } 732 + 733 + netdev_info(ndev, "Configured steering vPort %llu entries %u\n", 734 + apc->port_handle, num_entries); 787 735 out: 788 736 kfree(req); 789 737 return err; 790 738 } 791 739 792 - static int mana_create_wq_obj(struct mana_port_context *apc, 793 - mana_handle_t vport, 794 - u32 wq_type, struct mana_obj_spec *wq_spec, 795 - struct mana_obj_spec *cq_spec, 796 - mana_handle_t *wq_obj) 740 + int mana_create_wq_obj(struct mana_port_context *apc, 741 + mana_handle_t vport, 742 + u32 wq_type, struct mana_obj_spec *wq_spec, 743 + struct mana_obj_spec *cq_spec, 744 + mana_handle_t *wq_obj) 797 745 { 798 746 struct mana_create_wqobj_resp resp = {}; 799 747 struct mana_create_wqobj_req req = {}; ··· 845 787 out: 846 788 return err; 847 789 } 790 + EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA); 848 791 849 - static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, 850 - mana_handle_t wq_obj) 792 + void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, 793 + mana_handle_t wq_obj) 851 794 { 852 795 struct mana_destroy_wqobj_resp resp = {}; 853 796 struct mana_destroy_wqobj_req req = {}; ··· 873 814 netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err, 874 815 resp.hdr.status); 875 816 } 817 + EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA); 876 818 877 819 static void mana_destroy_eq(struct mana_context *ac) 878 820 { ··· 1523 1463 memset(&wq_spec, 0, sizeof(wq_spec)); 1524 1464 memset(&cq_spec, 0, sizeof(cq_spec)); 1525 1465 1526 - wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region; 1466 + wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle; 1527 1467 wq_spec.queue_size = txq->gdma_sq->queue_size; 1528 1468 1529 - cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region; 1469 + cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; 1530 1470 cq_spec.queue_size = cq->gdma_cq->queue_size; 1531 1471 cq_spec.modr_ctx_id = 0; 1532 1472 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; ··· 1541 1481 txq->gdma_sq->id = wq_spec.queue_index; 1542 1482 cq->gdma_cq->id = cq_spec.queue_index; 1543 1483 1544 - txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; 1545 - cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; 1484 + txq->gdma_sq->mem_info.dma_region_handle = 1485 + GDMA_INVALID_DMA_REGION; 1486 + cq->gdma_cq->mem_info.dma_region_handle = 1487 + GDMA_INVALID_DMA_REGION; 1546 1488 1547 1489 txq->gdma_txq_id = txq->gdma_sq->id; 1548 1490 ··· 1755 1693 1756 1694 memset(&wq_spec, 0, sizeof(wq_spec)); 1757 1695 memset(&cq_spec, 0, sizeof(cq_spec)); 1758 - wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region; 1696 + wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle; 1759 1697 wq_spec.queue_size = rxq->gdma_rq->queue_size; 1760 1698 1761 - cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region; 1699 + cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; 1762 1700 cq_spec.queue_size = cq->gdma_cq->queue_size; 1763 1701 cq_spec.modr_ctx_id = 0; 1764 1702 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; ··· 1771 1709 rxq->gdma_rq->id = wq_spec.queue_index; 1772 1710 cq->gdma_cq->id = cq_spec.queue_index; 1773 1711 1774 - rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; 1775 - cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; 1712 + rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; 1713 + cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; 1776 1714 1777 1715 rxq->gdma_id = rxq->gdma_rq->id; 1778 1716 cq->gdma_id = cq->gdma_cq->id; ··· 1853 1791 } 1854 1792 1855 1793 mana_destroy_txq(apc); 1794 + mana_uncfg_vport(apc); 1856 1795 1857 1796 if (gd->gdma_context->is_pf) 1858 1797 mana_pf_deregister_hw_vport(apc); ··· 2126 2063 apc->pf_filter_handle = INVALID_MANA_HANDLE; 2127 2064 apc->port_idx = port_idx; 2128 2065 2066 + mutex_init(&apc->vport_mutex); 2067 + apc->vport_use_count = 0; 2068 + 2129 2069 ndev->netdev_ops = &mana_devops; 2130 2070 ndev->ethtool_ops = &mana_ethtool_ops; 2131 2071 ndev->mtu = ETH_DATA_LEN; 2132 2072 ndev->max_mtu = ndev->mtu; 2133 2073 ndev->min_mtu = ndev->mtu; 2134 2074 ndev->needed_headroom = MANA_HEADROOM; 2075 + ndev->dev_port = port_idx; 2135 2076 SET_NETDEV_DEV(ndev, gc->dev); 2136 2077 2137 2078 netif_carrier_off(ndev); ··· 2171 2104 netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err); 2172 2105 free_netdev(ndev); 2173 2106 return err; 2107 + } 2108 + 2109 + static void adev_release(struct device *dev) 2110 + { 2111 + struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev); 2112 + 2113 + kfree(madev); 2114 + } 2115 + 2116 + static void remove_adev(struct gdma_dev *gd) 2117 + { 2118 + struct auxiliary_device *adev = gd->adev; 2119 + int id = adev->id; 2120 + 2121 + auxiliary_device_delete(adev); 2122 + auxiliary_device_uninit(adev); 2123 + 2124 + mana_adev_idx_free(id); 2125 + gd->adev = NULL; 2126 + } 2127 + 2128 + static int add_adev(struct gdma_dev *gd) 2129 + { 2130 + struct auxiliary_device *adev; 2131 + struct mana_adev *madev; 2132 + int ret; 2133 + 2134 + madev = kzalloc(sizeof(*madev), GFP_KERNEL); 2135 + if (!madev) 2136 + return -ENOMEM; 2137 + 2138 + adev = &madev->adev; 2139 + ret = mana_adev_idx_alloc(); 2140 + if (ret < 0) 2141 + goto idx_fail; 2142 + adev->id = ret; 2143 + 2144 + adev->name = "rdma"; 2145 + adev->dev.parent = gd->gdma_context->dev; 2146 + adev->dev.release = adev_release; 2147 + madev->mdev = gd; 2148 + 2149 + ret = auxiliary_device_init(adev); 2150 + if (ret) 2151 + goto init_fail; 2152 + 2153 + ret = auxiliary_device_add(adev); 2154 + if (ret) 2155 + goto add_fail; 2156 + 2157 + gd->adev = adev; 2158 + return 0; 2159 + 2160 + add_fail: 2161 + auxiliary_device_uninit(adev); 2162 + 2163 + init_fail: 2164 + mana_adev_idx_free(adev->id); 2165 + 2166 + idx_fail: 2167 + kfree(madev); 2168 + 2169 + return ret; 2174 2170 } 2175 2171 2176 2172 int mana_probe(struct gdma_dev *gd, bool resuming) ··· 2303 2173 break; 2304 2174 } 2305 2175 } 2176 + 2177 + err = add_adev(gd); 2306 2178 out: 2307 2179 if (err) 2308 2180 mana_remove(gd, false); ··· 2320 2188 struct net_device *ndev; 2321 2189 int err; 2322 2190 int i; 2191 + 2192 + /* adev currently doesn't support suspending, always remove it */ 2193 + if (gd->adev) 2194 + remove_adev(gd); 2323 2195 2324 2196 for (i = 0; i < ac->num_ports; i++) { 2325 2197 ndev = ac->ports[i]; ··· 2357 2221 } 2358 2222 2359 2223 mana_destroy_eq(ac); 2360 - 2361 2224 out: 2362 2225 mana_gd_deregister_device(gd); 2363 2226
+1 -1
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
··· 5 5 #include <linux/etherdevice.h> 6 6 #include <linux/ethtool.h> 7 7 8 - #include "mana.h" 8 + #include <net/mana/mana.h> 9 9 10 10 static const struct { 11 11 char name[ETH_GSTRING_LEN];
+1 -1
drivers/net/ethernet/microsoft/mana/shm_channel.c
··· 6 6 #include <linux/io.h> 7 7 #include <linux/mm.h> 8 8 9 - #include "shm_channel.h" 9 + #include <net/mana/shm_channel.h> 10 10 11 11 #define PAGE_FRAME_L48_WIDTH_BYTES 6 12 12 #define PAGE_FRAME_L48_WIDTH_BITS (PAGE_FRAME_L48_WIDTH_BYTES * 8)
drivers/net/ethernet/microsoft/mana/shm_channel.h include/net/mana/shm_channel.h
+10
include/net/mana/mana_auxiliary.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* Copyright (c) 2022, Microsoft Corporation. */ 3 + 4 + #include "mana.h" 5 + #include <linux/auxiliary_bus.h> 6 + 7 + struct mana_adev { 8 + struct auxiliary_device adev; 9 + struct gdma_dev *mdev; 10 + };