Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: mana: Add support for page sizes other than 4KB on ARM64

As defined by the MANA Hardware spec, the queue size for DMA is 4KB
minimal, and power of 2. And, the HWC queue size has to be exactly
4KB.

To support page sizes other than 4KB on ARM64, define the minimal
queue size as a macro separately from the PAGE_SIZE, which we always
assumed it to be 4KB before supporting ARM64.

Also, add MANA specific macros and update code related to size
alignment, DMA region calculations, etc.

Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Reviewed-by: Michael Kelley <mhklinux@outlook.com>
Link: https://lore.kernel.org/r/1718655446-6576-1-git-send-email-haiyangz@microsoft.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Haiyang Zhang and committed by
Jakub Kicinski
382d1741 2c6a4b96

+35 -25
+1 -1
drivers/net/ethernet/microsoft/Kconfig
··· 18 18 config MICROSOFT_MANA 19 19 tristate "Microsoft Azure Network Adapter (MANA) support" 20 20 depends on PCI_MSI 21 - depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN && ARM64_4K_PAGES) 21 + depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN) 22 22 depends on PCI_HYPERV 23 23 select AUXILIARY_BUS 24 24 select PAGE_POOL
+5 -5
drivers/net/ethernet/microsoft/mana/gdma_main.c
··· 182 182 dma_addr_t dma_handle; 183 183 void *buf; 184 184 185 - if (length < PAGE_SIZE || !is_power_of_2(length)) 185 + if (length < MANA_PAGE_SIZE || !is_power_of_2(length)) 186 186 return -EINVAL; 187 187 188 188 gmi->dev = gc->dev; ··· 717 717 static int mana_gd_create_dma_region(struct gdma_dev *gd, 718 718 struct gdma_mem_info *gmi) 719 719 { 720 - unsigned int num_page = gmi->length / PAGE_SIZE; 720 + unsigned int num_page = gmi->length / MANA_PAGE_SIZE; 721 721 struct gdma_create_dma_region_req *req = NULL; 722 722 struct gdma_create_dma_region_resp resp = {}; 723 723 struct gdma_context *gc = gd->gdma_context; ··· 727 727 int err; 728 728 int i; 729 729 730 - if (length < PAGE_SIZE || !is_power_of_2(length)) 730 + if (length < MANA_PAGE_SIZE || !is_power_of_2(length)) 731 731 return -EINVAL; 732 732 733 - if (offset_in_page(gmi->virt_addr) != 0) 733 + if (!MANA_PAGE_ALIGNED(gmi->virt_addr)) 734 734 return -EINVAL; 735 735 736 736 hwc = gc->hwc.driver_data; ··· 751 751 req->page_addr_list_len = num_page; 752 752 753 753 for (i = 0; i < num_page; i++) 754 - req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE; 754 + req->page_addr_list[i] = gmi->dma_handle + i * MANA_PAGE_SIZE; 755 755 756 756 err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp); 757 757 if (err)
+7 -7
drivers/net/ethernet/microsoft/mana/hw_channel.c
··· 362 362 int err; 363 363 364 364 eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth); 365 - if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE) 366 - eq_size = MINIMUM_SUPPORTED_PAGE_SIZE; 365 + if (eq_size < MANA_MIN_QSIZE) 366 + eq_size = MANA_MIN_QSIZE; 367 367 368 368 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth); 369 - if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE) 370 - cq_size = MINIMUM_SUPPORTED_PAGE_SIZE; 369 + if (cq_size < MANA_MIN_QSIZE) 370 + cq_size = MANA_MIN_QSIZE; 371 371 372 372 hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL); 373 373 if (!hwc_cq) ··· 429 429 430 430 dma_buf->num_reqs = q_depth; 431 431 432 - buf_size = PAGE_ALIGN(q_depth * max_msg_size); 432 + buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size); 433 433 434 434 gmi = &dma_buf->mem_info; 435 435 err = mana_gd_alloc_memory(gc, buf_size, gmi); ··· 497 497 else 498 498 queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth); 499 499 500 - if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE) 501 - queue_size = MINIMUM_SUPPORTED_PAGE_SIZE; 500 + if (queue_size < MANA_MIN_QSIZE) 501 + queue_size = MANA_MIN_QSIZE; 502 502 503 503 hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL); 504 504 if (!hwc_wq)
+4 -4
drivers/net/ethernet/microsoft/mana/mana_en.c
··· 1904 1904 * to prevent overflow. 1905 1905 */ 1906 1906 txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32; 1907 - BUILD_BUG_ON(!PAGE_ALIGNED(txq_size)); 1907 + BUILD_BUG_ON(!MANA_PAGE_ALIGNED(txq_size)); 1908 1908 1909 1909 cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE; 1910 - cq_size = PAGE_ALIGN(cq_size); 1910 + cq_size = MANA_PAGE_ALIGN(cq_size); 1911 1911 1912 1912 gc = gd->gdma_context; 1913 1913 ··· 2204 2204 if (err) 2205 2205 goto out; 2206 2206 2207 - rq_size = PAGE_ALIGN(rq_size); 2208 - cq_size = PAGE_ALIGN(cq_size); 2207 + rq_size = MANA_PAGE_ALIGN(rq_size); 2208 + cq_size = MANA_PAGE_ALIGN(cq_size); 2209 2209 2210 2210 /* Create RQ */ 2211 2211 memset(&spec, 0, sizeof(spec));
+7 -6
drivers/net/ethernet/microsoft/mana/shm_channel.c
··· 6 6 #include <linux/io.h> 7 7 #include <linux/mm.h> 8 8 9 + #include <net/mana/gdma.h> 9 10 #include <net/mana/shm_channel.h> 10 11 11 12 #define PAGE_FRAME_L48_WIDTH_BYTES 6 ··· 156 155 return err; 157 156 } 158 157 159 - if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) || 160 - !PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr)) 158 + if (!MANA_PAGE_ALIGNED(eq_addr) || !MANA_PAGE_ALIGNED(cq_addr) || 159 + !MANA_PAGE_ALIGNED(rq_addr) || !MANA_PAGE_ALIGNED(sq_addr)) 161 160 return -EINVAL; 162 161 163 162 if ((eq_msix_index & VECTOR_MASK) != eq_msix_index) ··· 184 183 185 184 /* EQ addr: low 48 bits of frame address */ 186 185 shmem = (u64 *)ptr; 187 - frame_addr = PHYS_PFN(eq_addr); 186 + frame_addr = MANA_PFN(eq_addr); 188 187 *shmem = frame_addr & PAGE_FRAME_L48_MASK; 189 188 all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << 190 189 (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); ··· 192 191 193 192 /* CQ addr: low 48 bits of frame address */ 194 193 shmem = (u64 *)ptr; 195 - frame_addr = PHYS_PFN(cq_addr); 194 + frame_addr = MANA_PFN(cq_addr); 196 195 *shmem = frame_addr & PAGE_FRAME_L48_MASK; 197 196 all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << 198 197 (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); ··· 200 199 201 200 /* RQ addr: low 48 bits of frame address */ 202 201 shmem = (u64 *)ptr; 203 - frame_addr = PHYS_PFN(rq_addr); 202 + frame_addr = MANA_PFN(rq_addr); 204 203 *shmem = frame_addr & PAGE_FRAME_L48_MASK; 205 204 all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << 206 205 (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); ··· 208 207 209 208 /* SQ addr: low 48 bits of frame address */ 210 209 shmem = (u64 *)ptr; 211 - frame_addr = PHYS_PFN(sq_addr); 210 + frame_addr = MANA_PFN(sq_addr); 212 211 *shmem = frame_addr & PAGE_FRAME_L48_MASK; 213 212 all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << 214 213 (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+9 -1
include/net/mana/gdma.h
··· 224 224 struct auxiliary_device *adev; 225 225 }; 226 226 227 - #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE 227 + /* MANA_PAGE_SIZE is the DMA unit */ 228 + #define MANA_PAGE_SHIFT 12 229 + #define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT) 230 + #define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE) 231 + #define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE) 232 + #define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT) 233 + 234 + /* Required by HW */ 235 + #define MANA_MIN_QSIZE MANA_PAGE_SIZE 228 236 229 237 #define GDMA_CQE_SIZE 64 230 238 #define GDMA_EQE_SIZE 16
+2 -1
include/net/mana/mana.h
··· 42 42 43 43 #define MAX_SEND_BUFFERS_PER_QUEUE 256 44 44 45 - #define EQ_SIZE (8 * PAGE_SIZE) 45 + #define EQ_SIZE (8 * MANA_PAGE_SIZE) 46 + 46 47 #define LOG2_EQ_THROTTLE 3 47 48 48 49 #define MAX_PORTS_IN_MANA_DEV 256