Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

firmware: arm_ffa: Add support for MEM_* interfaces

Most of the MEM_* APIs share the same parameters, so they can be
generalised. Currently only MEM_SHARE is implemented and the user space
interface for that is not added yet.

Link: https://lore.kernel.org/r/20210521151033.181846-6-sudeep.holla@arm.com
Tested-by: Jens Wiklander <jens.wiklander@linaro.org>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>

+338
+199
drivers/firmware/arm_ffa/driver.c
··· 28 28 #include <linux/io.h> 29 29 #include <linux/kernel.h> 30 30 #include <linux/module.h> 31 + #include <linux/mm.h> 32 + #include <linux/scatterlist.h> 31 33 #include <linux/slab.h> 32 34 #include <linux/uuid.h> 33 35 ··· 351 349 return -EINVAL; 352 350 } 353 351 352 + static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz, 353 + u32 frag_len, u32 len, u64 *handle) 354 + { 355 + ffa_value_t ret; 356 + 357 + invoke_ffa_fn((ffa_value_t){ 358 + .a0 = func_id, .a1 = len, .a2 = frag_len, 359 + .a3 = buf, .a4 = buf_sz, 360 + }, &ret); 361 + 362 + while (ret.a0 == FFA_MEM_OP_PAUSE) 363 + invoke_ffa_fn((ffa_value_t){ 364 + .a0 = FFA_MEM_OP_RESUME, 365 + .a1 = ret.a1, .a2 = ret.a2, 366 + }, &ret); 367 + 368 + if (ret.a0 == FFA_ERROR) 369 + return ffa_to_linux_errno((int)ret.a2); 370 + 371 + if (ret.a0 != FFA_SUCCESS) 372 + return -EOPNOTSUPP; 373 + 374 + if (handle) 375 + *handle = PACK_HANDLE(ret.a2, ret.a3); 376 + 377 + return frag_len; 378 + } 379 + 380 + static int ffa_mem_next_frag(u64 handle, u32 frag_len) 381 + { 382 + ffa_value_t ret; 383 + 384 + invoke_ffa_fn((ffa_value_t){ 385 + .a0 = FFA_MEM_FRAG_TX, 386 + .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle), 387 + .a3 = frag_len, 388 + }, &ret); 389 + 390 + while (ret.a0 == FFA_MEM_OP_PAUSE) 391 + invoke_ffa_fn((ffa_value_t){ 392 + .a0 = FFA_MEM_OP_RESUME, 393 + .a1 = ret.a1, .a2 = ret.a2, 394 + }, &ret); 395 + 396 + if (ret.a0 == FFA_ERROR) 397 + return ffa_to_linux_errno((int)ret.a2); 398 + 399 + if (ret.a0 != FFA_MEM_FRAG_RX) 400 + return -EOPNOTSUPP; 401 + 402 + return ret.a3; 403 + } 404 + 405 + static int 406 + ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len, 407 + u32 len, u64 *handle, bool first) 408 + { 409 + if (!first) 410 + return ffa_mem_next_frag(*handle, frag_len); 411 + 412 + return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle); 413 + } 414 + 415 + static u32 ffa_get_num_pages_sg(struct scatterlist *sg) 416 + { 417 + u32 num_pages = 0; 418 + 419 + do { 420 + num_pages += sg->length / FFA_PAGE_SIZE; 421 + } while ((sg = sg_next(sg))); 422 + 423 + return num_pages; 424 + } 425 + 426 + static int 427 + ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, 428 + struct ffa_mem_ops_args *args) 429 + { 430 + int rc = 0; 431 + bool first = true; 432 + phys_addr_t addr = 0; 433 + struct ffa_composite_mem_region *composite; 434 + struct ffa_mem_region_addr_range *constituents; 435 + struct ffa_mem_region_attributes *ep_mem_access; 436 + struct ffa_mem_region *mem_region = buffer; 437 + u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg); 438 + 439 + mem_region->tag = args->tag; 440 + mem_region->flags = args->flags; 441 + mem_region->sender_id = drv_info->vm_id; 442 + mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | 443 + FFA_MEM_INNER_SHAREABLE; 444 + ep_mem_access = &mem_region->ep_mem_access[0]; 445 + 446 + for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) { 447 + ep_mem_access->receiver = args->attrs[idx].receiver; 448 + ep_mem_access->attrs = args->attrs[idx].attrs; 449 + ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs); 450 + } 451 + mem_region->ep_count = args->nattrs; 452 + 453 + composite = buffer + COMPOSITE_OFFSET(args->nattrs); 454 + composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg); 455 + composite->addr_range_cnt = num_entries; 456 + 457 + length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries); 458 + frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0); 459 + if (frag_len > max_fragsize) 460 + return -ENXIO; 461 + 462 + if (!args->use_txbuf) { 463 + addr = virt_to_phys(buffer); 464 + buf_sz = max_fragsize / FFA_PAGE_SIZE; 465 + } 466 + 467 + constituents = buffer + frag_len; 468 + idx = 0; 469 + do { 470 + if (frag_len == max_fragsize) { 471 + rc = ffa_transmit_fragment(func_id, addr, buf_sz, 472 + frag_len, length, 473 + &args->g_handle, first); 474 + if (rc < 0) 475 + return -ENXIO; 476 + 477 + first = false; 478 + idx = 0; 479 + frag_len = 0; 480 + constituents = buffer; 481 + } 482 + 483 + if ((void *)constituents - buffer > max_fragsize) { 484 + pr_err("Memory Region Fragment > Tx Buffer size\n"); 485 + return -EFAULT; 486 + } 487 + 488 + constituents->address = sg_phys(args->sg); 489 + constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE; 490 + constituents++; 491 + frag_len += sizeof(struct ffa_mem_region_addr_range); 492 + } while ((args->sg = sg_next(args->sg))); 493 + 494 + return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len, 495 + length, &args->g_handle, first); 496 + } 497 + 498 + static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args) 499 + { 500 + int ret; 501 + void *buffer; 502 + 503 + if (!args->use_txbuf) { 504 + buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); 505 + if (!buffer) 506 + return -ENOMEM; 507 + } else { 508 + buffer = drv_info->tx_buffer; 509 + mutex_lock(&drv_info->tx_lock); 510 + } 511 + 512 + ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args); 513 + 514 + if (args->use_txbuf) 515 + mutex_unlock(&drv_info->tx_lock); 516 + else 517 + free_pages_exact(buffer, RXTX_BUFFER_SIZE); 518 + 519 + return ret < 0 ? ret : 0; 520 + } 521 + 522 + static int ffa_memory_reclaim(u64 g_handle, u32 flags) 523 + { 524 + ffa_value_t ret; 525 + 526 + invoke_ffa_fn((ffa_value_t){ 527 + .a0 = FFA_MEM_RECLAIM, 528 + .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle), 529 + .a3 = flags, 530 + }, &ret); 531 + 532 + if (ret.a0 == FFA_ERROR) 533 + return ffa_to_linux_errno((int)ret.a2); 534 + 535 + return 0; 536 + } 537 + 354 538 static u32 ffa_api_version_get(void) 355 539 { 356 540 return drv_info->version; ··· 575 387 dev->mode_32bit, data); 576 388 } 577 389 390 + static int 391 + ffa_memory_share(struct ffa_device *dev, struct ffa_mem_ops_args *args) 392 + { 393 + if (dev->mode_32bit) 394 + return ffa_memory_ops(FFA_MEM_SHARE, args); 395 + 396 + return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args); 397 + } 398 + 578 399 static const struct ffa_dev_ops ffa_ops = { 579 400 .api_version_get = ffa_api_version_get, 580 401 .partition_info_get = ffa_partition_info_get, 581 402 .mode_32bit_set = ffa_mode_32bit_set, 582 403 .sync_send_receive = ffa_sync_send_receive, 404 + .memory_reclaim = ffa_memory_reclaim, 405 + .memory_share = ffa_memory_share, 583 406 }; 584 407 585 408 const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev)
+139
include/linux/arm_ffa.h
··· 116 116 unsigned long data4; /* w7/x7 */ 117 117 }; 118 118 119 + struct ffa_mem_region_addr_range { 120 + /* The base IPA of the constituent memory region, aligned to 4 kiB */ 121 + u64 address; 122 + /* The number of 4 kiB pages in the constituent memory region. */ 123 + u32 pg_cnt; 124 + u32 reserved; 125 + }; 126 + 127 + struct ffa_composite_mem_region { 128 + /* 129 + * The total number of 4 kiB pages included in this memory region. This 130 + * must be equal to the sum of page counts specified in each 131 + * `struct ffa_mem_region_addr_range`. 132 + */ 133 + u32 total_pg_cnt; 134 + /* The number of constituents included in this memory region range */ 135 + u32 addr_range_cnt; 136 + u64 reserved; 137 + /** An array of `addr_range_cnt` memory region constituents. */ 138 + struct ffa_mem_region_addr_range constituents[]; 139 + }; 140 + 141 + struct ffa_mem_region_attributes { 142 + /* The ID of the VM to which the memory is being given or shared. */ 143 + u16 receiver; 144 + /* 145 + * The permissions with which the memory region should be mapped in the 146 + * receiver's page table. 147 + */ 148 + #define FFA_MEM_EXEC BIT(3) 149 + #define FFA_MEM_NO_EXEC BIT(2) 150 + #define FFA_MEM_RW BIT(1) 151 + #define FFA_MEM_RO BIT(0) 152 + u8 attrs; 153 + /* 154 + * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP 155 + * for memory regions with multiple borrowers. 156 + */ 157 + #define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0) 158 + u8 flag; 159 + u32 composite_off; 160 + /* 161 + * Offset in bytes from the start of the outer `ffa_memory_region` to 162 + * an `struct ffa_mem_region_addr_range`. 163 + */ 164 + u64 reserved; 165 + }; 166 + 167 + struct ffa_mem_region { 168 + /* The ID of the VM/owner which originally sent the memory region */ 169 + u16 sender_id; 170 + #define FFA_MEM_NORMAL BIT(5) 171 + #define FFA_MEM_DEVICE BIT(4) 172 + 173 + #define FFA_MEM_WRITE_BACK (3 << 2) 174 + #define FFA_MEM_NON_CACHEABLE (1 << 2) 175 + 176 + #define FFA_DEV_nGnRnE (0 << 2) 177 + #define FFA_DEV_nGnRE (1 << 2) 178 + #define FFA_DEV_nGRE (2 << 2) 179 + #define FFA_DEV_GRE (3 << 2) 180 + 181 + #define FFA_MEM_NON_SHAREABLE (0) 182 + #define FFA_MEM_OUTER_SHAREABLE (2) 183 + #define FFA_MEM_INNER_SHAREABLE (3) 184 + u8 attributes; 185 + u8 reserved_0; 186 + /* 187 + * Clear memory region contents after unmapping it from the sender and 188 + * before mapping it for any receiver. 189 + */ 190 + #define FFA_MEM_CLEAR BIT(0) 191 + /* 192 + * Whether the hypervisor may time slice the memory sharing or retrieval 193 + * operation. 194 + */ 195 + #define FFA_TIME_SLICE_ENABLE BIT(1) 196 + 197 + #define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3) 198 + #define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3) 199 + #define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3) 200 + #define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3) 201 + 202 + #define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9) 203 + #define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5) 204 + /* Flags to control behaviour of the transaction. */ 205 + u32 flags; 206 + #define HANDLE_LOW_MASK GENMASK_ULL(31, 0) 207 + #define HANDLE_HIGH_MASK GENMASK_ULL(63, 32) 208 + #define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x)))) 209 + #define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x)))) 210 + 211 + #define PACK_HANDLE(l, h) \ 212 + (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h))) 213 + /* 214 + * A globally-unique ID assigned by the hypervisor for a region 215 + * of memory being sent between VMs. 216 + */ 217 + u64 handle; 218 + /* 219 + * An implementation defined value associated with the receiver and the 220 + * memory region. 221 + */ 222 + u64 tag; 223 + u32 reserved_1; 224 + /* 225 + * The number of `ffa_mem_region_attributes` entries included in this 226 + * transaction. 227 + */ 228 + u32 ep_count; 229 + /* 230 + * An array of endpoint memory access descriptors. 231 + * Each one specifies a memory region offset, an endpoint and the 232 + * attributes with which this memory region should be mapped in that 233 + * endpoint's page table. 234 + */ 235 + struct ffa_mem_region_attributes ep_mem_access[]; 236 + }; 237 + 238 + #define COMPOSITE_OFFSET(x) \ 239 + (offsetof(struct ffa_mem_region, ep_mem_access[x])) 240 + #define CONSTITUENTS_OFFSET(x) \ 241 + (offsetof(struct ffa_composite_mem_region, constituents[x])) 242 + #define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \ 243 + (COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y)) 244 + 245 + struct ffa_mem_ops_args { 246 + bool use_txbuf; 247 + u32 nattrs; 248 + u32 flags; 249 + u64 tag; 250 + u64 g_handle; 251 + struct scatterlist *sg; 252 + struct ffa_mem_region_attributes *attrs; 253 + }; 254 + 119 255 struct ffa_dev_ops { 120 256 u32 (*api_version_get)(void); 121 257 int (*partition_info_get)(const char *uuid_str, ··· 259 123 void (*mode_32bit_set)(struct ffa_device *dev); 260 124 int (*sync_send_receive)(struct ffa_device *dev, 261 125 struct ffa_send_direct_data *data); 126 + int (*memory_reclaim)(u64 g_handle, u32 flags); 127 + int (*memory_share)(struct ffa_device *dev, 128 + struct ffa_mem_ops_args *args); 262 129 }; 263 130 264 131 #endif /* _LINUX_ARM_FFA_H */