Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ffa-updates-6.12' of https://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into soc/drivers

Arm FF-A updates for v6.12

The main addition this time is the basic support for FF-A v1.2
specification which includes support for newly added:
1. FFA_MSG_SEND_DIRECT_{REQ,RESP}2
2. FFA_PARTITION_INFO_GET_REGS
3. FFA_YIELD support in direct messaging

Apart from these, the changes include support to fetch the Rx/Tx buffer
size using FFA_FEATURES, addition of the FF-A FIDs for v1.2 and some
coding style cleanups.

* tag 'ffa-updates-6.12' of https://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux:
firmware: arm_ffa: Fetch the Rx/Tx buffer size using ffa_features()
firmware: arm_ffa: Add support for FFA_YIELD in direct messaging
firmware: arm_ffa: Add support for FFA_MSG_SEND_DIRECT_{REQ,RESP}2
firmware: arm_ffa: Add support for FFA_PARTITION_INFO_GET_REGS
firmware: arm_ffa: Move the function ffa_features() earlier
firmware: arm_ffa: Update the FF-A command list with v1.2 additions
firmware: arm_ffa: Some coding style fixes

Link: https://lore.kernel.org/r/20240830135759.2383431-1-sudeep.holla@arm.com
Signed-off-by: Arnd Bergmann <arnd@arndb.de>

+195 -57
+183 -57
drivers/firmware/arm_ffa/driver.c
··· 26 26 #include <linux/arm_ffa.h> 27 27 #include <linux/bitfield.h> 28 28 #include <linux/cpuhotplug.h> 29 + #include <linux/delay.h> 29 30 #include <linux/device.h> 30 31 #include <linux/hashtable.h> 31 32 #include <linux/interrupt.h> ··· 54 53 #define PACK_TARGET_INFO(s, r) \ 55 54 (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r))) 56 55 57 - /* 58 - * Keeping RX TX buffer size as 4K for now 59 - * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config 60 - */ 61 - #define RXTX_BUFFER_SIZE SZ_4K 56 + #define RXTX_MAP_MIN_BUFSZ_MASK GENMASK(1, 0) 57 + #define RXTX_MAP_MIN_BUFSZ(x) ((x) & RXTX_MAP_MIN_BUFSZ_MASK) 62 58 63 59 #define FFA_MAX_NOTIFICATIONS 64 64 60 ··· 73 75 -EAGAIN, /* FFA_RET_RETRY */ 74 76 -ECANCELED, /* FFA_RET_ABORTED */ 75 77 -ENODATA, /* FFA_RET_NO_DATA */ 78 + -EAGAIN, /* FFA_RET_NOT_READY */ 76 79 }; 77 80 78 81 static inline int ffa_to_linux_errno(int errno) ··· 96 97 struct mutex tx_lock; /* lock to protect Tx buffer */ 97 98 void *rx_buffer; 98 99 void *tx_buffer; 100 + size_t rxtx_bufsz; 99 101 bool mem_ops_native; 102 + bool msg_direct_req2_supp; 100 103 bool bitmap_created; 101 104 bool notif_enabled; 102 105 unsigned int sched_recv_irq; ··· 212 211 return 0; 213 212 } 214 213 214 + static int ffa_features(u32 func_feat_id, u32 input_props, 215 + u32 *if_props_1, u32 *if_props_2) 216 + { 217 + ffa_value_t id; 218 + 219 + if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) { 220 + pr_err("%s: Invalid Parameters: %x, %x", __func__, 221 + func_feat_id, input_props); 222 + return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS); 223 + } 224 + 225 + invoke_ffa_fn((ffa_value_t){ 226 + .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props, 227 + }, &id); 228 + 229 + if (id.a0 == FFA_ERROR) 230 + return ffa_to_linux_errno((int)id.a2); 231 + 232 + if (if_props_1) 233 + *if_props_1 = id.a2; 234 + if (if_props_2) 235 + *if_props_2 = id.a3; 236 + 237 + return 0; 238 + } 239 + 215 240 #define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0) 216 241 217 242 /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */ ··· 287 260 return count; 288 261 } 289 262 263 + #define LAST_INDEX_MASK GENMASK(15, 0) 264 + #define CURRENT_INDEX_MASK GENMASK(31, 16) 265 + #define UUID_INFO_TAG_MASK GENMASK(47, 32) 266 + #define PARTITION_INFO_SZ_MASK GENMASK(63, 48) 267 + #define PARTITION_COUNT(x) ((u16)(FIELD_GET(LAST_INDEX_MASK, (x))) + 1) 268 + #define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x)))) 269 + #define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x)))) 270 + #define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x)))) 271 + static int 272 + __ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, 273 + struct ffa_partition_info *buffer, int num_parts) 274 + { 275 + u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0; 276 + ffa_value_t partition_info; 277 + 278 + do { 279 + start_idx = prev_idx ? prev_idx + 1 : 0; 280 + 281 + invoke_ffa_fn((ffa_value_t){ 282 + .a0 = FFA_PARTITION_INFO_GET_REGS, 283 + .a1 = (u64)uuid1 << 32 | uuid0, 284 + .a2 = (u64)uuid3 << 32 | uuid2, 285 + .a3 = start_idx | tag << 16, 286 + }, &partition_info); 287 + 288 + if (partition_info.a0 == FFA_ERROR) 289 + return ffa_to_linux_errno((int)partition_info.a2); 290 + 291 + if (!count) 292 + count = PARTITION_COUNT(partition_info.a2); 293 + if (!buffer || !num_parts) /* count only */ 294 + return count; 295 + 296 + cur_idx = CURRENT_INDEX(partition_info.a2); 297 + tag = UUID_INFO_TAG(partition_info.a2); 298 + buf_sz = PARTITION_INFO_SZ(partition_info.a2); 299 + if (buf_sz > sizeof(*buffer)) 300 + buf_sz = sizeof(*buffer); 301 + 302 + memcpy(buffer + prev_idx * buf_sz, &partition_info.a3, 303 + (cur_idx - start_idx + 1) * buf_sz); 304 + prev_idx = cur_idx; 305 + 306 + } while (cur_idx < (count - 1)); 307 + 308 + return count; 309 + } 310 + 290 311 /* buffer is allocated and caller must free the same if returned count > 0 */ 291 312 static int 292 313 ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer) 293 314 { 294 315 int count; 295 316 u32 uuid0_4[4]; 317 + bool reg_mode = false; 296 318 struct ffa_partition_info *pbuf; 297 319 320 + if (!ffa_features(FFA_PARTITION_INFO_GET_REGS, 0, NULL, NULL)) 321 + reg_mode = true; 322 + 298 323 export_uuid((u8 *)uuid0_4, uuid); 299 - count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2], 300 - uuid0_4[3], NULL, 0); 324 + if (reg_mode) 325 + count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1], 326 + uuid0_4[2], uuid0_4[3], 327 + NULL, 0); 328 + else 329 + count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], 330 + uuid0_4[2], uuid0_4[3], 331 + NULL, 0); 301 332 if (count <= 0) 302 333 return count; 303 334 ··· 363 278 if (!pbuf) 364 279 return -ENOMEM; 365 280 366 - count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2], 367 - uuid0_4[3], pbuf, count); 281 + if (reg_mode) 282 + count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1], 283 + uuid0_4[2], uuid0_4[3], 284 + pbuf, count); 285 + else 286 + count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], 287 + uuid0_4[2], uuid0_4[3], 288 + pbuf, count); 368 289 if (count <= 0) 369 290 kfree(pbuf); 370 291 else ··· 396 305 return 0; 397 306 } 398 307 308 + static inline void ffa_msg_send_wait_for_completion(ffa_value_t *ret) 309 + { 310 + while (ret->a0 == FFA_INTERRUPT || ret->a0 == FFA_YIELD) { 311 + if (ret->a0 == FFA_YIELD) 312 + fsleep(1000); 313 + 314 + invoke_ffa_fn((ffa_value_t){ 315 + .a0 = FFA_RUN, .a1 = ret->a1, 316 + }, ret); 317 + } 318 + } 319 + 399 320 static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, 400 321 struct ffa_send_direct_data *data) 401 322 { ··· 428 325 .a6 = data->data3, .a7 = data->data4, 429 326 }, &ret); 430 327 431 - while (ret.a0 == FFA_INTERRUPT) 432 - invoke_ffa_fn((ffa_value_t){ 433 - .a0 = FFA_RUN, .a1 = ret.a1, 434 - }, &ret); 328 + ffa_msg_send_wait_for_completion(&ret); 435 329 436 330 if (ret.a0 == FFA_ERROR) 437 331 return ffa_to_linux_errno((int)ret.a2); ··· 452 352 ffa_value_t ret; 453 353 int retval = 0; 454 354 455 - if (sz > (RXTX_BUFFER_SIZE - sizeof(*msg))) 355 + if (sz > (drv_info->rxtx_bufsz - sizeof(*msg))) 456 356 return -ERANGE; 457 357 458 358 mutex_lock(&drv_info->tx_lock); ··· 475 375 476 376 mutex_unlock(&drv_info->tx_lock); 477 377 return retval; 378 + } 379 + 380 + static int ffa_msg_send_direct_req2(u16 src_id, u16 dst_id, const uuid_t *uuid, 381 + struct ffa_send_direct_data2 *data) 382 + { 383 + u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); 384 + ffa_value_t ret, args = { 385 + .a0 = FFA_MSG_SEND_DIRECT_REQ2, .a1 = src_dst_ids, 386 + }; 387 + 388 + export_uuid((u8 *)&args.a2, uuid); 389 + memcpy((void *)&args + offsetof(ffa_value_t, a4), data, sizeof(*data)); 390 + 391 + invoke_ffa_fn(args, &ret); 392 + 393 + ffa_msg_send_wait_for_completion(&ret); 394 + 395 + if (ret.a0 == FFA_ERROR) 396 + return ffa_to_linux_errno((int)ret.a2); 397 + 398 + if (ret.a0 == FFA_MSG_SEND_DIRECT_RESP2) { 399 + memcpy(data, &ret.a4, sizeof(*data)); 400 + return 0; 401 + } 402 + 403 + return -EINVAL; 478 404 } 479 405 480 406 static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz, ··· 687 561 { 688 562 int ret; 689 563 void *buffer; 564 + size_t rxtx_bufsz = drv_info->rxtx_bufsz; 690 565 691 566 if (!args->use_txbuf) { 692 - buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); 567 + buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL); 693 568 if (!buffer) 694 569 return -ENOMEM; 695 570 } else { ··· 698 571 mutex_lock(&drv_info->tx_lock); 699 572 } 700 573 701 - ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args); 574 + ret = ffa_setup_and_transmit(func_id, buffer, rxtx_bufsz, args); 702 575 703 576 if (args->use_txbuf) 704 577 mutex_unlock(&drv_info->tx_lock); 705 578 else 706 - free_pages_exact(buffer, RXTX_BUFFER_SIZE); 579 + free_pages_exact(buffer, rxtx_bufsz); 707 580 708 581 return ret < 0 ? ret : 0; 709 582 } ··· 720 593 721 594 if (ret.a0 == FFA_ERROR) 722 595 return ffa_to_linux_errno((int)ret.a2); 723 - 724 - return 0; 725 - } 726 - 727 - static int ffa_features(u32 func_feat_id, u32 input_props, 728 - u32 *if_props_1, u32 *if_props_2) 729 - { 730 - ffa_value_t id; 731 - 732 - if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) { 733 - pr_err("%s: Invalid Parameters: %x, %x", __func__, 734 - func_feat_id, input_props); 735 - return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS); 736 - } 737 - 738 - invoke_ffa_fn((ffa_value_t){ 739 - .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props, 740 - }, &id); 741 - 742 - if (id.a0 == FFA_ERROR) 743 - return ffa_to_linux_errno((int)id.a2); 744 - 745 - if (if_props_1) 746 - *if_props_1 = id.a2; 747 - if (if_props_2) 748 - *if_props_2 = id.a3; 749 596 750 597 return 0; 751 598 } ··· 959 858 return 0; 960 859 } 961 860 962 - static void ffa_set_up_mem_ops_native_flag(void) 861 + static void ffa_drvinfo_flags_init(void) 963 862 { 964 863 if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) || 965 864 !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL)) 966 865 drv_info->mem_ops_native = true; 866 + 867 + if (!ffa_features(FFA_MSG_SEND_DIRECT_REQ2, 0, NULL, NULL) || 868 + !ffa_features(FFA_MSG_SEND_DIRECT_RESP2, 0, NULL, NULL)) 869 + drv_info->msg_direct_req2_supp = true; 967 870 } 968 871 969 872 static u32 ffa_api_version_get(void) ··· 1011 906 static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz) 1012 907 { 1013 908 return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz); 909 + } 910 + 911 + static int ffa_sync_send_receive2(struct ffa_device *dev, const uuid_t *uuid, 912 + struct ffa_send_direct_data2 *data) 913 + { 914 + if (!drv_info->msg_direct_req2_supp) 915 + return -EOPNOTSUPP; 916 + 917 + return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id, 918 + uuid, data); 1014 919 } 1015 920 1016 921 static int ffa_memory_share(struct ffa_mem_ops_args *args) ··· 1306 1191 .mode_32bit_set = ffa_mode_32bit_set, 1307 1192 .sync_send_receive = ffa_sync_send_receive, 1308 1193 .indirect_send = ffa_indirect_msg_send, 1194 + .sync_send_receive2 = ffa_sync_send_receive2, 1309 1195 }; 1310 1196 1311 1197 static const struct ffa_mem_ops ffa_drv_mem_ops = { ··· 1358 1242 1359 1243 if (action == BUS_NOTIFY_BIND_DRIVER) { 1360 1244 struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); 1361 - const struct ffa_device_id *id_table= ffa_drv->id_table; 1245 + const struct ffa_device_id *id_table = ffa_drv->id_table; 1362 1246 1363 1247 /* 1364 1248 * FF-A v1.1 provides UUID for each partition as part of the ··· 1443 1327 /* Allocate for the host */ 1444 1328 info = kzalloc(sizeof(*info), GFP_KERNEL); 1445 1329 if (!info) { 1446 - pr_err("%s: failed to alloc Host partition ID 0x%x. Abort.\n", 1447 - __func__, drv_info->vm_id); 1448 1330 /* Already registered devices are freed on bus_exit */ 1449 1331 ffa_partitions_cleanup(); 1450 1332 return -ENOMEM; ··· 1717 1603 static int __init ffa_init(void) 1718 1604 { 1719 1605 int ret; 1606 + u32 buf_sz; 1607 + size_t rxtx_bufsz = SZ_4K; 1720 1608 1721 1609 ret = ffa_transport_init(&invoke_ffa_fn); 1722 1610 if (ret) 1723 1611 return ret; 1724 1612 1725 1613 drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL); 1726 - if (!drv_info) { 1614 + if (!drv_info) 1727 1615 return -ENOMEM; 1728 - } 1729 1616 1730 1617 ret = ffa_version_check(&drv_info->version); 1731 1618 if (ret) ··· 1738 1623 goto free_drv_info; 1739 1624 } 1740 1625 1741 - drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); 1626 + ret = ffa_features(FFA_FN_NATIVE(RXTX_MAP), 0, &buf_sz, NULL); 1627 + if (!ret) { 1628 + if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 1) 1629 + rxtx_bufsz = SZ_64K; 1630 + else if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 2) 1631 + rxtx_bufsz = SZ_16K; 1632 + else 1633 + rxtx_bufsz = SZ_4K; 1634 + } 1635 + 1636 + drv_info->rxtx_bufsz = rxtx_bufsz; 1637 + drv_info->rx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL); 1742 1638 if (!drv_info->rx_buffer) { 1743 1639 ret = -ENOMEM; 1744 1640 goto free_pages; 1745 1641 } 1746 1642 1747 - drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); 1643 + drv_info->tx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL); 1748 1644 if (!drv_info->tx_buffer) { 1749 1645 ret = -ENOMEM; 1750 1646 goto free_pages; ··· 1763 1637 1764 1638 ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer), 1765 1639 virt_to_phys(drv_info->rx_buffer), 1766 - RXTX_BUFFER_SIZE / FFA_PAGE_SIZE); 1640 + rxtx_bufsz / FFA_PAGE_SIZE); 1767 1641 if (ret) { 1768 1642 pr_err("failed to register FFA RxTx buffers\n"); 1769 1643 goto free_pages; ··· 1772 1646 mutex_init(&drv_info->rx_lock); 1773 1647 mutex_init(&drv_info->tx_lock); 1774 1648 1775 - ffa_set_up_mem_ops_native_flag(); 1649 + ffa_drvinfo_flags_init(); 1776 1650 1777 1651 ffa_notifications_setup(); 1778 1652 ··· 1793 1667 ffa_notifications_cleanup(); 1794 1668 free_pages: 1795 1669 if (drv_info->tx_buffer) 1796 - free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); 1797 - free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); 1670 + free_pages_exact(drv_info->tx_buffer, rxtx_bufsz); 1671 + free_pages_exact(drv_info->rx_buffer, rxtx_bufsz); 1798 1672 free_drv_info: 1799 1673 kfree(drv_info); 1800 1674 return ret; ··· 1806 1680 ffa_notifications_cleanup(); 1807 1681 ffa_partitions_cleanup(); 1808 1682 ffa_rxtx_unmap(drv_info->vm_id); 1809 - free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); 1810 - free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); 1683 + free_pages_exact(drv_info->tx_buffer, drv_info->rxtx_bufsz); 1684 + free_pages_exact(drv_info->rx_buffer, drv_info->rxtx_bufsz); 1811 1685 kfree(drv_info); 1812 1686 } 1813 1687 module_exit(ffa_exit);
+12
include/linux/arm_ffa.h
··· 73 73 #define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88) 74 74 #define FFA_MEM_PERM_SET FFA_SMC_32(0x89) 75 75 #define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89) 76 + #define FFA_CONSOLE_LOG FFA_SMC_32(0x8A) 77 + #define FFA_PARTITION_INFO_GET_REGS FFA_SMC_64(0x8B) 78 + #define FFA_EL3_INTR_HANDLE FFA_SMC_32(0x8C) 79 + #define FFA_MSG_SEND_DIRECT_REQ2 FFA_SMC_64(0x8D) 80 + #define FFA_MSG_SEND_DIRECT_RESP2 FFA_SMC_64(0x8E) 76 81 77 82 /* 78 83 * For some calls it is necessary to use SMC64 to pass or return 64-bit values. ··· 270 265 u32 size; 271 266 }; 272 267 268 + /* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP}2 which pass data via registers */ 269 + struct ffa_send_direct_data2 { 270 + unsigned long data[14]; /* x4-x17 */ 271 + }; 272 + 273 273 struct ffa_mem_region_addr_range { 274 274 /* The base IPA of the constituent memory region, aligned to 4 kiB */ 275 275 u64 address; ··· 436 426 int (*sync_send_receive)(struct ffa_device *dev, 437 427 struct ffa_send_direct_data *data); 438 428 int (*indirect_send)(struct ffa_device *dev, void *buf, size_t sz); 429 + int (*sync_send_receive2)(struct ffa_device *dev, const uuid_t *uuid, 430 + struct ffa_send_direct_data2 *data); 439 431 }; 440 432 441 433 struct ffa_mem_ops {