Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ffa-updates-6.15' of https://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into soc/drivers

Arm FF-A updates for v6.15

This update primarily focuses on FF-A framework notification support
along with other improvements, including UUID handling enhancements
and various fixes.

1. FF-A framework notification upport

- Adds support for multiple UUIDs per partition to register individual
SRI callbacks.
- Handles Rx buffer full framework notifications and provides a general
interface for future extensions.

2. Improved multiple UUID/services per-partition handling

- Adds support for UUID passing in FFA_MSG_SEND2, improving multiple
UUID/service support in the driver.
- Introduces a helper function to check whether a partition can
receive REQUEST2 messages.

3. Partition handling generic improvements

- Implements device unregistration for better partition cleanup.
- Improves handling of the host partition presence in partition info.

4. FF-A version updates

- Upgrades the driver version to FF-A v1.2.
- Rejects major versions higher than the driver version as incompatible.

5. Big-Endian support fixes

- Fixes big-endian issues in:
__ffa_partition_info_regs_get()
__ffa_partition_info_get()
- Big-endian support is still incomplete, and only these changes can
be verified without additional application/testing updates at the
moment. We can discover all the partitions correctly with big-endian
kernel now.

6. Miscellaneous fixes

- Fixes function prototype misalignments in: sync_send_receive{,2}
- Adds explicit type casting for return values from: FFA_VERSION
and NOTIFICATION_INFO_GET
- Corrects vCPU list parsing in ffa_notification_info_get().

7. UUID management in the driver and DMA mask updates

- Replaces UUID buffer with the standard UUID format in ffa_partition_info
structure.
- Fixes a typo in some FF-A bus macros.
- Sets dma_mask for FF-A devices.

In short, this update enhances notification handling, UUID support, and
overall robustness of the FF-A driver while addressing multiple fixes
and cleanups.

* tag 'ffa-updates-6.15' of https://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux: (23 commits)
firmware: arm_ffa: Set dma_mask for ffa devices
firmware: arm_ffa: Skip the first/partition ID when parsing vCPU list
firmware: arm_ffa: Explicitly cast return value from NOTIFICATION_INFO_GET
firmware: arm_ffa: Explicitly cast return value from FFA_VERSION before comparison
firmware: arm_ffa: Handle ffa_notification_get correctly at virtual FF-A instance
firmware: arm_ffa: Allow multiple UUIDs per partition to register SRI callback
firmware: arm_ffa: Add support for handling framework notifications
firmware: arm_ffa: Add support for {un,}registration of framework notifications
firmware: arm_ffa: Stash ffa_device instead of notify_type in notifier_cb_info
firmware: arm_ffa: Refactoring to prepare for framework notification support
firmware: arm_ffa: Remove unnecessary declaration of ffa_partitions_cleanup()
firmware: arm_ffa: Reject higher major version as incompatible
firmware: arm_ffa: Upgrade FF-A version to v1.2 in the driver
firmware: arm_ffa: Add support for passing UUID in FFA_MSG_SEND2
firmware: arm_ffa: Helper to check if a partition can receive REQUEST2 messages
firmware: arm_ffa: Unregister the FF-A devices when cleaning up the partitions
firmware: arm_ffa: Handle the presence of host partition in the partition info
firmware: arm_ffa: Refactor addition of partition information into XArray
firmware: arm_ffa: Fix big-endian support in __ffa_partition_info_regs_get()
firmware: arm_ffa: Fix big-endian support in __ffa_partition_info_get()
...

Link: https://lore.kernel.org/r/20250304105928.432997-1-sudeep.holla@arm.com
Signed-off-by: Arnd Bergmann <arnd@arndb.de>

+440 -164
+7 -7
drivers/firmware/arm_ffa/bus.c
··· 15 15 16 16 #include "common.h" 17 17 18 - #define SCMI_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb" 18 + #define FFA_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb" 19 19 20 20 static DEFINE_IDA(ffa_bus_id); 21 21 ··· 68 68 { 69 69 const struct ffa_device *ffa_dev = to_ffa_dev(dev); 70 70 71 - return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT, 71 + return add_uevent_var(env, "MODALIAS=" FFA_UEVENT_MODALIAS_FMT, 72 72 ffa_dev->vm_id, &ffa_dev->uuid); 73 73 } 74 74 ··· 77 77 { 78 78 struct ffa_device *ffa_dev = to_ffa_dev(dev); 79 79 80 - return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT, ffa_dev->vm_id, 80 + return sysfs_emit(buf, FFA_UEVENT_MODALIAS_FMT, ffa_dev->vm_id, 81 81 &ffa_dev->uuid); 82 82 } 83 83 static DEVICE_ATTR_RO(modalias); ··· 160 160 return 0; 161 161 } 162 162 163 - static void ffa_devices_unregister(void) 163 + void ffa_devices_unregister(void) 164 164 { 165 165 bus_for_each_dev(&ffa_bus_type, NULL, NULL, 166 166 __ffa_devices_unregister); 167 167 } 168 + EXPORT_SYMBOL_GPL(ffa_devices_unregister); 168 169 169 170 bool ffa_device_is_valid(struct ffa_device *ffa_dev) 170 171 { ··· 193 192 const struct ffa_ops *ops) 194 193 { 195 194 int id, ret; 196 - uuid_t uuid; 197 195 struct device *dev; 198 196 struct ffa_device *ffa_dev; 199 197 ··· 212 212 dev = &ffa_dev->dev; 213 213 dev->bus = &ffa_bus_type; 214 214 dev->release = ffa_release_device; 215 + dev->dma_mask = &dev->coherent_dma_mask; 215 216 dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id); 216 217 217 218 ffa_dev->id = id; 218 219 ffa_dev->vm_id = part_info->id; 219 220 ffa_dev->properties = part_info->properties; 220 221 ffa_dev->ops = ops; 221 - import_uuid(&uuid, (u8 *)part_info->uuid); 222 - uuid_copy(&ffa_dev->uuid, &uuid); 222 + uuid_copy(&ffa_dev->uuid, &part_info->uuid); 223 223 224 224 ret = device_register(&ffa_dev->dev); 225 225 if (ret) {
+413 -155
drivers/firmware/arm_ffa/driver.c
··· 44 44 45 45 #include "common.h" 46 46 47 - #define FFA_DRIVER_VERSION FFA_VERSION_1_1 47 + #define FFA_DRIVER_VERSION FFA_VERSION_1_2 48 48 #define FFA_MIN_VERSION FFA_VERSION_1_0 49 49 50 50 #define SENDER_ID_MASK GENMASK(31, 16) ··· 114 114 }; 115 115 116 116 static struct ffa_drv_info *drv_info; 117 - static void ffa_partitions_cleanup(void); 118 117 119 118 /* 120 119 * The driver must be able to support all the versions from the earliest ··· 144 145 .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION, 145 146 }, &ver); 146 147 147 - if (ver.a0 == FFA_RET_NOT_SUPPORTED) { 148 + if ((s32)ver.a0 == FFA_RET_NOT_SUPPORTED) { 148 149 pr_info("FFA_VERSION returned not supported\n"); 149 150 return -EOPNOTSUPP; 151 + } 152 + 153 + if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) { 154 + pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n", 155 + FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0), 156 + FFA_MAJOR_VERSION(FFA_DRIVER_VERSION), 157 + FFA_MINOR_VERSION(FFA_DRIVER_VERSION)); 158 + return -EINVAL; 150 159 } 151 160 152 161 if (ver.a0 < FFA_MIN_VERSION) { ··· 283 276 } 284 277 285 278 if (buffer && count <= num_partitions) 286 - for (idx = 0; idx < count; idx++) 287 - memcpy(buffer + idx, drv_info->rx_buffer + idx * sz, 288 - buf_sz); 279 + for (idx = 0; idx < count; idx++) { 280 + struct ffa_partition_info_le { 281 + __le16 id; 282 + __le16 exec_ctxt; 283 + __le32 properties; 284 + uuid_t uuid; 285 + } *rx_buf = drv_info->rx_buffer + idx * sz; 286 + struct ffa_partition_info *buf = buffer + idx; 287 + 288 + buf->id = le16_to_cpu(rx_buf->id); 289 + buf->exec_ctxt = le16_to_cpu(rx_buf->exec_ctxt); 290 + buf->properties = le32_to_cpu(rx_buf->properties); 291 + if (buf_sz > 8) 292 + import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid); 293 + } 289 294 290 295 ffa_rx_release(); 291 296 ··· 314 295 #define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x)))) 315 296 #define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x)))) 316 297 #define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x)))) 298 + #define PART_INFO_ID_MASK GENMASK(15, 0) 299 + #define PART_INFO_EXEC_CXT_MASK GENMASK(31, 16) 300 + #define PART_INFO_PROPS_MASK GENMASK(63, 32) 301 + #define PART_INFO_ID(x) ((u16)(FIELD_GET(PART_INFO_ID_MASK, (x)))) 302 + #define PART_INFO_EXEC_CXT(x) ((u16)(FIELD_GET(PART_INFO_EXEC_CXT_MASK, (x)))) 303 + #define PART_INFO_PROPERTIES(x) ((u32)(FIELD_GET(PART_INFO_PROPS_MASK, (x)))) 317 304 static int 318 305 __ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, 319 306 struct ffa_partition_info *buffer, int num_parts) 320 307 { 321 308 u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0; 309 + struct ffa_partition_info *buf = buffer; 322 310 ffa_value_t partition_info; 323 311 324 312 do { 313 + __le64 *regs; 314 + int idx; 315 + 325 316 start_idx = prev_idx ? prev_idx + 1 : 0; 326 317 327 318 invoke_ffa_fn((ffa_value_t){ ··· 355 326 if (buf_sz > sizeof(*buffer)) 356 327 buf_sz = sizeof(*buffer); 357 328 358 - memcpy(buffer + prev_idx * buf_sz, &partition_info.a3, 359 - (cur_idx - start_idx + 1) * buf_sz); 329 + regs = (void *)&partition_info.a3; 330 + for (idx = 0; idx < cur_idx - start_idx + 1; idx++, buf++) { 331 + union { 332 + uuid_t uuid; 333 + u64 regs[2]; 334 + } uuid_regs = { 335 + .regs = { 336 + le64_to_cpu(*(regs + 1)), 337 + le64_to_cpu(*(regs + 2)), 338 + } 339 + }; 340 + u64 val = *(u64 *)regs; 341 + 342 + buf->id = PART_INFO_ID(val); 343 + buf->exec_ctxt = PART_INFO_EXEC_CXT(val); 344 + buf->properties = PART_INFO_PROPERTIES(val); 345 + uuid_copy(&buf->uuid, &uuid_regs.uuid); 346 + regs += 3; 347 + } 360 348 prev_idx = cur_idx; 361 349 362 350 } while (cur_idx < (count - 1)); ··· 491 445 return -EINVAL; 492 446 } 493 447 494 - static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz) 448 + static int ffa_msg_send2(struct ffa_device *dev, u16 src_id, void *buf, size_t sz) 495 449 { 496 - u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); 450 + u32 src_dst_ids = PACK_TARGET_INFO(src_id, dev->vm_id); 497 451 struct ffa_indirect_msg_hdr *msg; 498 452 ffa_value_t ret; 499 453 int retval = 0; ··· 509 463 msg->offset = sizeof(*msg); 510 464 msg->send_recv_id = src_dst_ids; 511 465 msg->size = sz; 466 + uuid_copy(&msg->uuid, &dev->uuid); 512 467 memcpy((u8 *)msg + msg->offset, buf, sz); 513 468 514 469 /* flags = 0, sender VMID = 0 works for both physical/virtual NS */ ··· 807 760 return 0; 808 761 } 809 762 763 + enum notify_type { 764 + SECURE_PARTITION, 765 + NON_SECURE_VM, 766 + SPM_FRAMEWORK, 767 + NS_HYP_FRAMEWORK, 768 + }; 769 + 810 770 #define NOTIFICATION_LOW_MASK GENMASK(31, 0) 811 771 #define NOTIFICATION_HIGH_MASK GENMASK(63, 32) 812 772 #define NOTIFICATION_BITMAP_HIGH(x) \ ··· 837 783 #define MAX_IDS_32 10 838 784 839 785 #define PER_VCPU_NOTIFICATION_FLAG BIT(0) 840 - #define SECURE_PARTITION_BITMAP BIT(0) 841 - #define NON_SECURE_VM_BITMAP BIT(1) 842 - #define SPM_FRAMEWORK_BITMAP BIT(2) 843 - #define NS_HYP_FRAMEWORK_BITMAP BIT(3) 786 + #define SECURE_PARTITION_BITMAP_ENABLE BIT(SECURE_PARTITION) 787 + #define NON_SECURE_VM_BITMAP_ENABLE BIT(NON_SECURE_VM) 788 + #define SPM_FRAMEWORK_BITMAP_ENABLE BIT(SPM_FRAMEWORK) 789 + #define NS_HYP_FRAMEWORK_BITMAP_ENABLE BIT(NS_HYP_FRAMEWORK) 790 + #define FFA_BITMAP_SECURE_ENABLE_MASK \ 791 + (SECURE_PARTITION_BITMAP_ENABLE | SPM_FRAMEWORK_BITMAP_ENABLE) 792 + #define FFA_BITMAP_NS_ENABLE_MASK \ 793 + (NON_SECURE_VM_BITMAP_ENABLE | NS_HYP_FRAMEWORK_BITMAP_ENABLE) 794 + #define FFA_BITMAP_ALL_ENABLE_MASK \ 795 + (FFA_BITMAP_SECURE_ENABLE_MASK | FFA_BITMAP_NS_ENABLE_MASK) 796 + 797 + #define FFA_SECURE_PARTITION_ID_FLAG BIT(15) 798 + 799 + #define SPM_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_LOW(x) 800 + #define NS_HYP_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_HIGH(x) 801 + #define FRAMEWORK_NOTIFY_RX_BUFFER_FULL BIT(0) 844 802 845 803 static int ffa_notification_bind_common(u16 dst_id, u64 bitmap, 846 804 u32 flags, bool is_bind) ··· 918 852 else if (ret.a0 != FFA_SUCCESS) 919 853 return -EINVAL; /* Something else went wrong. */ 920 854 921 - notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3); 922 - notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5); 923 - notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7); 855 + if (flags & SECURE_PARTITION_BITMAP_ENABLE) 856 + notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3); 857 + if (flags & NON_SECURE_VM_BITMAP_ENABLE) 858 + notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5); 859 + if (flags & SPM_FRAMEWORK_BITMAP_ENABLE) 860 + notify->arch_map = SPM_FRAMEWORK_BITMAP(ret.a6); 861 + if (flags & NS_HYP_FRAMEWORK_BITMAP_ENABLE) 862 + notify->arch_map = PACK_NOTIFICATION_BITMAP(notify->arch_map, 863 + ret.a7); 924 864 925 865 return 0; 926 866 } ··· 935 863 ffa_sched_recv_cb callback; 936 864 void *cb_data; 937 865 rwlock_t rw_lock; 866 + struct ffa_device *dev; 867 + struct list_head node; 938 868 }; 939 869 940 870 static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu) 941 871 { 942 - struct ffa_dev_part_info *partition; 872 + struct ffa_dev_part_info *partition = NULL, *tmp; 943 873 ffa_sched_recv_cb callback; 874 + struct list_head *phead; 944 875 void *cb_data; 945 876 946 - partition = xa_load(&drv_info->partition_info, part_id); 947 - if (!partition) { 877 + phead = xa_load(&drv_info->partition_info, part_id); 878 + if (!phead) { 948 879 pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id); 949 880 return; 950 881 } 951 882 952 - read_lock(&partition->rw_lock); 953 - callback = partition->callback; 954 - cb_data = partition->cb_data; 955 - read_unlock(&partition->rw_lock); 883 + list_for_each_entry_safe(partition, tmp, phead, node) { 884 + read_lock(&partition->rw_lock); 885 + callback = partition->callback; 886 + cb_data = partition->cb_data; 887 + read_unlock(&partition->rw_lock); 956 888 957 - if (callback) 958 - callback(vcpu, is_per_vcpu, cb_data); 889 + if (callback) 890 + callback(vcpu, is_per_vcpu, cb_data); 891 + } 959 892 } 960 893 961 894 static void ffa_notification_info_get(void) ··· 976 899 }, &ret); 977 900 978 901 if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) { 979 - if (ret.a2 != FFA_RET_NO_DATA) 902 + if ((s32)ret.a2 != FFA_RET_NO_DATA) 980 903 pr_err("Notification Info fetch failed: 0x%lx (0x%lx)", 981 904 ret.a0, ret.a2); 982 905 return; ··· 1012 935 } 1013 936 1014 937 /* Per vCPU Notification */ 1015 - for (idx = 0; idx < ids_count[list]; idx++) { 938 + for (idx = 1; idx < ids_count[list]; idx++) { 1016 939 if (ids_processed >= max_ids - 1) 1017 940 break; 1018 941 ··· 1092 1015 1093 1016 static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz) 1094 1017 { 1095 - return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz); 1018 + return ffa_msg_send2(dev, drv_info->vm_id, buf, sz); 1096 1019 } 1097 1020 1098 - static int ffa_sync_send_receive2(struct ffa_device *dev, const uuid_t *uuid, 1021 + static int ffa_sync_send_receive2(struct ffa_device *dev, 1099 1022 struct ffa_send_direct_data2 *data) 1100 1023 { 1101 1024 if (!drv_info->msg_direct_req2_supp) 1102 1025 return -EOPNOTSUPP; 1103 1026 1104 1027 return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id, 1105 - uuid, data); 1028 + &dev->uuid, data); 1106 1029 } 1107 1030 1108 1031 static int ffa_memory_share(struct ffa_mem_ops_args *args) ··· 1128 1051 return ffa_memory_ops(FFA_MEM_LEND, args); 1129 1052 } 1130 1053 1131 - #define FFA_SECURE_PARTITION_ID_FLAG BIT(15) 1132 - 1133 1054 #define ffa_notifications_disabled() (!drv_info->notif_enabled) 1134 - 1135 - enum notify_type { 1136 - NON_SECURE_VM, 1137 - SECURE_PARTITION, 1138 - FRAMEWORK, 1139 - }; 1140 1055 1141 1056 struct notifier_cb_info { 1142 1057 struct hlist_node hnode; 1058 + struct ffa_device *dev; 1059 + ffa_fwk_notifier_cb fwk_cb; 1143 1060 ffa_notifier_cb cb; 1144 1061 void *cb_data; 1145 - enum notify_type type; 1146 1062 }; 1147 1063 1148 - static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback, 1149 - void *cb_data, bool is_registration) 1064 + static int 1065 + ffa_sched_recv_cb_update(struct ffa_device *dev, ffa_sched_recv_cb callback, 1066 + void *cb_data, bool is_registration) 1150 1067 { 1151 - struct ffa_dev_part_info *partition; 1068 + struct ffa_dev_part_info *partition = NULL, *tmp; 1069 + struct list_head *phead; 1152 1070 bool cb_valid; 1153 1071 1154 1072 if (ffa_notifications_disabled()) 1155 1073 return -EOPNOTSUPP; 1156 1074 1157 - partition = xa_load(&drv_info->partition_info, part_id); 1075 + phead = xa_load(&drv_info->partition_info, dev->vm_id); 1076 + if (!phead) { 1077 + pr_err("%s: Invalid partition ID 0x%x\n", __func__, dev->vm_id); 1078 + return -EINVAL; 1079 + } 1080 + 1081 + list_for_each_entry_safe(partition, tmp, phead, node) 1082 + if (partition->dev == dev) 1083 + break; 1084 + 1158 1085 if (!partition) { 1159 - pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id); 1086 + pr_err("%s: No such partition ID 0x%x\n", __func__, dev->vm_id); 1160 1087 return -EINVAL; 1161 1088 } 1162 1089 ··· 1182 1101 static int ffa_sched_recv_cb_register(struct ffa_device *dev, 1183 1102 ffa_sched_recv_cb cb, void *cb_data) 1184 1103 { 1185 - return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true); 1104 + return ffa_sched_recv_cb_update(dev, cb, cb_data, true); 1186 1105 } 1187 1106 1188 1107 static int ffa_sched_recv_cb_unregister(struct ffa_device *dev) 1189 1108 { 1190 - return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false); 1109 + return ffa_sched_recv_cb_update(dev, NULL, NULL, false); 1191 1110 } 1192 1111 1193 1112 static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags) ··· 1200 1119 return ffa_notification_bind_common(dst_id, bitmap, 0, false); 1201 1120 } 1202 1121 1203 - /* Should be called while the notify_lock is taken */ 1122 + static enum notify_type ffa_notify_type_get(u16 vm_id) 1123 + { 1124 + if (vm_id & FFA_SECURE_PARTITION_ID_FLAG) 1125 + return SECURE_PARTITION; 1126 + else 1127 + return NON_SECURE_VM; 1128 + } 1129 + 1130 + /* notifier_hnode_get* should be called with notify_lock held */ 1204 1131 static struct notifier_cb_info * 1205 - notifier_hash_node_get(u16 notify_id, enum notify_type type) 1132 + notifier_hnode_get_by_vmid(u16 notify_id, int vmid) 1206 1133 { 1207 1134 struct notifier_cb_info *node; 1208 1135 1209 1136 hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id) 1210 - if (type == node->type) 1137 + if (node->fwk_cb && vmid == node->dev->vm_id) 1138 + return node; 1139 + 1140 + return NULL; 1141 + } 1142 + 1143 + static struct notifier_cb_info * 1144 + notifier_hnode_get_by_vmid_uuid(u16 notify_id, int vmid, const uuid_t *uuid) 1145 + { 1146 + struct notifier_cb_info *node; 1147 + 1148 + if (uuid_is_null(uuid)) 1149 + return notifier_hnode_get_by_vmid(notify_id, vmid); 1150 + 1151 + hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id) 1152 + if (node->fwk_cb && vmid == node->dev->vm_id && 1153 + uuid_equal(&node->dev->uuid, uuid)) 1154 + return node; 1155 + 1156 + return NULL; 1157 + } 1158 + 1159 + static struct notifier_cb_info * 1160 + notifier_hnode_get_by_type(u16 notify_id, enum notify_type type) 1161 + { 1162 + struct notifier_cb_info *node; 1163 + 1164 + hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id) 1165 + if (node->cb && type == ffa_notify_type_get(node->dev->vm_id)) 1211 1166 return node; 1212 1167 1213 1168 return NULL; 1214 1169 } 1215 1170 1216 1171 static int 1217 - update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb, 1218 - void *cb_data, bool is_registration) 1172 + update_notifier_cb(struct ffa_device *dev, int notify_id, void *cb, 1173 + void *cb_data, bool is_registration, bool is_framework) 1219 1174 { 1220 1175 struct notifier_cb_info *cb_info = NULL; 1176 + enum notify_type type = ffa_notify_type_get(dev->vm_id); 1221 1177 bool cb_found; 1222 1178 1223 - cb_info = notifier_hash_node_get(notify_id, type); 1179 + if (is_framework) 1180 + cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, dev->vm_id, 1181 + &dev->uuid); 1182 + else 1183 + cb_info = notifier_hnode_get_by_type(notify_id, type); 1184 + 1224 1185 cb_found = !!cb_info; 1225 1186 1226 1187 if (!(is_registration ^ cb_found)) ··· 1273 1150 if (!cb_info) 1274 1151 return -ENOMEM; 1275 1152 1276 - cb_info->type = type; 1277 - cb_info->cb = cb; 1153 + cb_info->dev = dev; 1278 1154 cb_info->cb_data = cb_data; 1155 + if (is_framework) 1156 + cb_info->fwk_cb = cb; 1157 + else 1158 + cb_info->cb = cb; 1279 1159 1280 1160 hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id); 1281 1161 } else { ··· 1288 1162 return 0; 1289 1163 } 1290 1164 1291 - static enum notify_type ffa_notify_type_get(u16 vm_id) 1292 - { 1293 - if (vm_id & FFA_SECURE_PARTITION_ID_FLAG) 1294 - return SECURE_PARTITION; 1295 - else 1296 - return NON_SECURE_VM; 1297 - } 1298 - 1299 - static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id) 1165 + static int __ffa_notify_relinquish(struct ffa_device *dev, int notify_id, 1166 + bool is_framework) 1300 1167 { 1301 1168 int rc; 1302 - enum notify_type type = ffa_notify_type_get(dev->vm_id); 1303 1169 1304 1170 if (ffa_notifications_disabled()) 1305 1171 return -EOPNOTSUPP; ··· 1301 1183 1302 1184 mutex_lock(&drv_info->notify_lock); 1303 1185 1304 - rc = update_notifier_cb(notify_id, type, NULL, NULL, false); 1186 + rc = update_notifier_cb(dev, notify_id, NULL, NULL, false, 1187 + is_framework); 1305 1188 if (rc) { 1306 1189 pr_err("Could not unregister notification callback\n"); 1307 1190 mutex_unlock(&drv_info->notify_lock); 1308 1191 return rc; 1309 1192 } 1310 1193 1311 - rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id)); 1194 + if (!is_framework) 1195 + rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id)); 1312 1196 1197 + mutex_unlock(&drv_info->notify_lock); 1198 + 1199 + return rc; 1200 + } 1201 + 1202 + static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id) 1203 + { 1204 + return __ffa_notify_relinquish(dev, notify_id, false); 1205 + } 1206 + 1207 + static int ffa_fwk_notify_relinquish(struct ffa_device *dev, int notify_id) 1208 + { 1209 + return __ffa_notify_relinquish(dev, notify_id, true); 1210 + } 1211 + 1212 + static int __ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, 1213 + void *cb, void *cb_data, 1214 + int notify_id, bool is_framework) 1215 + { 1216 + int rc; 1217 + u32 flags = 0; 1218 + 1219 + if (ffa_notifications_disabled()) 1220 + return -EOPNOTSUPP; 1221 + 1222 + if (notify_id >= FFA_MAX_NOTIFICATIONS) 1223 + return -EINVAL; 1224 + 1225 + mutex_lock(&drv_info->notify_lock); 1226 + 1227 + if (!is_framework) { 1228 + if (is_per_vcpu) 1229 + flags = PER_VCPU_NOTIFICATION_FLAG; 1230 + 1231 + rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags); 1232 + if (rc) { 1233 + mutex_unlock(&drv_info->notify_lock); 1234 + return rc; 1235 + } 1236 + } 1237 + 1238 + rc = update_notifier_cb(dev, notify_id, cb, cb_data, true, 1239 + is_framework); 1240 + if (rc) { 1241 + pr_err("Failed to register callback for %d - %d\n", 1242 + notify_id, rc); 1243 + if (!is_framework) 1244 + ffa_notification_unbind(dev->vm_id, BIT(notify_id)); 1245 + } 1313 1246 mutex_unlock(&drv_info->notify_lock); 1314 1247 1315 1248 return rc; ··· 1369 1200 static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, 1370 1201 ffa_notifier_cb cb, void *cb_data, int notify_id) 1371 1202 { 1372 - int rc; 1373 - u32 flags = 0; 1374 - enum notify_type type = ffa_notify_type_get(dev->vm_id); 1203 + return __ffa_notify_request(dev, is_per_vcpu, cb, cb_data, notify_id, 1204 + false); 1205 + } 1375 1206 1376 - if (ffa_notifications_disabled()) 1377 - return -EOPNOTSUPP; 1378 - 1379 - if (notify_id >= FFA_MAX_NOTIFICATIONS) 1380 - return -EINVAL; 1381 - 1382 - mutex_lock(&drv_info->notify_lock); 1383 - 1384 - if (is_per_vcpu) 1385 - flags = PER_VCPU_NOTIFICATION_FLAG; 1386 - 1387 - rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags); 1388 - if (rc) { 1389 - mutex_unlock(&drv_info->notify_lock); 1390 - return rc; 1391 - } 1392 - 1393 - rc = update_notifier_cb(notify_id, type, cb, cb_data, true); 1394 - if (rc) { 1395 - pr_err("Failed to register callback for %d - %d\n", 1396 - notify_id, rc); 1397 - ffa_notification_unbind(dev->vm_id, BIT(notify_id)); 1398 - } 1399 - mutex_unlock(&drv_info->notify_lock); 1400 - 1401 - return rc; 1207 + static int 1208 + ffa_fwk_notify_request(struct ffa_device *dev, ffa_fwk_notifier_cb cb, 1209 + void *cb_data, int notify_id) 1210 + { 1211 + return __ffa_notify_request(dev, false, cb, cb_data, notify_id, true); 1402 1212 } 1403 1213 1404 1214 static int ffa_notify_send(struct ffa_device *dev, int notify_id, ··· 1406 1258 continue; 1407 1259 1408 1260 mutex_lock(&drv_info->notify_lock); 1409 - cb_info = notifier_hash_node_get(notify_id, type); 1261 + cb_info = notifier_hnode_get_by_type(notify_id, type); 1410 1262 mutex_unlock(&drv_info->notify_lock); 1411 1263 1412 1264 if (cb_info && cb_info->cb) ··· 1414 1266 } 1415 1267 } 1416 1268 1417 - static void notif_get_and_handle(void *unused) 1269 + static void handle_fwk_notif_callbacks(u32 bitmap) 1270 + { 1271 + void *buf; 1272 + uuid_t uuid; 1273 + int notify_id = 0, target; 1274 + struct ffa_indirect_msg_hdr *msg; 1275 + struct notifier_cb_info *cb_info = NULL; 1276 + 1277 + /* Only one framework notification defined and supported for now */ 1278 + if (!(bitmap & FRAMEWORK_NOTIFY_RX_BUFFER_FULL)) 1279 + return; 1280 + 1281 + mutex_lock(&drv_info->rx_lock); 1282 + 1283 + msg = drv_info->rx_buffer; 1284 + buf = kmemdup((void *)msg + msg->offset, msg->size, GFP_KERNEL); 1285 + if (!buf) { 1286 + mutex_unlock(&drv_info->rx_lock); 1287 + return; 1288 + } 1289 + 1290 + target = SENDER_ID(msg->send_recv_id); 1291 + if (msg->offset >= sizeof(*msg)) 1292 + uuid_copy(&uuid, &msg->uuid); 1293 + else 1294 + uuid_copy(&uuid, &uuid_null); 1295 + 1296 + mutex_unlock(&drv_info->rx_lock); 1297 + 1298 + ffa_rx_release(); 1299 + 1300 + mutex_lock(&drv_info->notify_lock); 1301 + cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, target, &uuid); 1302 + mutex_unlock(&drv_info->notify_lock); 1303 + 1304 + if (cb_info && cb_info->fwk_cb) 1305 + cb_info->fwk_cb(notify_id, cb_info->cb_data, buf); 1306 + kfree(buf); 1307 + } 1308 + 1309 + static void notif_get_and_handle(void *cb_data) 1418 1310 { 1419 1311 int rc; 1420 - struct ffa_notify_bitmaps bitmaps; 1312 + u32 flags; 1313 + struct ffa_drv_info *info = cb_data; 1314 + struct ffa_notify_bitmaps bitmaps = { 0 }; 1421 1315 1422 - rc = ffa_notification_get(SECURE_PARTITION_BITMAP | 1423 - SPM_FRAMEWORK_BITMAP, &bitmaps); 1316 + if (info->vm_id == 0) /* Non secure physical instance */ 1317 + flags = FFA_BITMAP_SECURE_ENABLE_MASK; 1318 + else 1319 + flags = FFA_BITMAP_ALL_ENABLE_MASK; 1320 + 1321 + rc = ffa_notification_get(flags, &bitmaps); 1424 1322 if (rc) { 1425 1323 pr_err("Failed to retrieve notifications with %d!\n", rc); 1426 1324 return; 1427 1325 } 1428 1326 1327 + handle_fwk_notif_callbacks(SPM_FRAMEWORK_BITMAP(bitmaps.arch_map)); 1328 + handle_fwk_notif_callbacks(NS_HYP_FRAMEWORK_BITMAP(bitmaps.arch_map)); 1429 1329 handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM); 1430 1330 handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION); 1431 - handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK); 1432 1331 } 1433 1332 1434 1333 static void ··· 1524 1329 .sched_recv_cb_unregister = ffa_sched_recv_cb_unregister, 1525 1330 .notify_request = ffa_notify_request, 1526 1331 .notify_relinquish = ffa_notify_relinquish, 1332 + .fwk_notify_request = ffa_fwk_notify_request, 1333 + .fwk_notify_relinquish = ffa_fwk_notify_relinquish, 1527 1334 .notify_send = ffa_notify_send, 1528 1335 }; 1529 1336 ··· 1581 1384 .notifier_call = ffa_bus_notifier, 1582 1385 }; 1583 1386 1387 + static int ffa_xa_add_partition_info(struct ffa_device *dev) 1388 + { 1389 + struct ffa_dev_part_info *info; 1390 + struct list_head *head, *phead; 1391 + int ret = -ENOMEM; 1392 + 1393 + phead = xa_load(&drv_info->partition_info, dev->vm_id); 1394 + if (phead) { 1395 + head = phead; 1396 + list_for_each_entry(info, head, node) { 1397 + if (info->dev == dev) { 1398 + pr_err("%s: duplicate dev %p part ID 0x%x\n", 1399 + __func__, dev, dev->vm_id); 1400 + return -EEXIST; 1401 + } 1402 + } 1403 + } 1404 + 1405 + info = kzalloc(sizeof(*info), GFP_KERNEL); 1406 + if (!info) 1407 + return ret; 1408 + 1409 + rwlock_init(&info->rw_lock); 1410 + info->dev = dev; 1411 + 1412 + if (!phead) { 1413 + phead = kzalloc(sizeof(*phead), GFP_KERNEL); 1414 + if (!phead) 1415 + goto free_out; 1416 + 1417 + INIT_LIST_HEAD(phead); 1418 + 1419 + ret = xa_insert(&drv_info->partition_info, dev->vm_id, phead, 1420 + GFP_KERNEL); 1421 + if (ret) { 1422 + pr_err("%s: failed to save part ID 0x%x Ret:%d\n", 1423 + __func__, dev->vm_id, ret); 1424 + goto free_out; 1425 + } 1426 + } 1427 + list_add(&info->node, phead); 1428 + return 0; 1429 + 1430 + free_out: 1431 + kfree(phead); 1432 + kfree(info); 1433 + return ret; 1434 + } 1435 + 1436 + static int ffa_setup_host_partition(int vm_id) 1437 + { 1438 + struct ffa_partition_info buf = { 0 }; 1439 + struct ffa_device *ffa_dev; 1440 + int ret; 1441 + 1442 + buf.id = vm_id; 1443 + ffa_dev = ffa_device_register(&buf, &ffa_drv_ops); 1444 + if (!ffa_dev) { 1445 + pr_err("%s: failed to register host partition ID 0x%x\n", 1446 + __func__, vm_id); 1447 + return -EINVAL; 1448 + } 1449 + 1450 + ret = ffa_xa_add_partition_info(ffa_dev); 1451 + if (ret) 1452 + return ret; 1453 + 1454 + if (ffa_notifications_disabled()) 1455 + return 0; 1456 + 1457 + ret = ffa_sched_recv_cb_update(ffa_dev, ffa_self_notif_handle, 1458 + drv_info, true); 1459 + if (ret) 1460 + pr_info("Failed to register driver sched callback %d\n", ret); 1461 + 1462 + return ret; 1463 + } 1464 + 1465 + static void ffa_partitions_cleanup(void) 1466 + { 1467 + struct list_head *phead; 1468 + unsigned long idx; 1469 + 1470 + /* Clean up/free all registered devices */ 1471 + ffa_devices_unregister(); 1472 + 1473 + xa_for_each(&drv_info->partition_info, idx, phead) { 1474 + struct ffa_dev_part_info *info, *tmp; 1475 + 1476 + xa_erase(&drv_info->partition_info, idx); 1477 + list_for_each_entry_safe(info, tmp, phead, node) { 1478 + list_del(&info->node); 1479 + kfree(info); 1480 + } 1481 + kfree(phead); 1482 + } 1483 + 1484 + xa_destroy(&drv_info->partition_info); 1485 + } 1486 + 1584 1487 static int ffa_setup_partitions(void) 1585 1488 { 1586 1489 int count, idx, ret; 1587 1490 struct ffa_device *ffa_dev; 1588 - struct ffa_dev_part_info *info; 1589 1491 struct ffa_partition_info *pbuf, *tpbuf; 1590 1492 1591 1493 if (drv_info->version == FFA_VERSION_1_0) { ··· 1718 1422 !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC)) 1719 1423 ffa_mode_32bit_set(ffa_dev); 1720 1424 1721 - info = kzalloc(sizeof(*info), GFP_KERNEL); 1722 - if (!info) { 1425 + if (ffa_xa_add_partition_info(ffa_dev)) { 1723 1426 ffa_device_unregister(ffa_dev); 1724 1427 continue; 1725 - } 1726 - rwlock_init(&info->rw_lock); 1727 - ret = xa_insert(&drv_info->partition_info, tpbuf->id, 1728 - info, GFP_KERNEL); 1729 - if (ret) { 1730 - pr_err("%s: failed to save partition ID 0x%x - ret:%d\n", 1731 - __func__, tpbuf->id, ret); 1732 - ffa_device_unregister(ffa_dev); 1733 - kfree(info); 1734 1428 } 1735 1429 } 1736 1430 1737 1431 kfree(pbuf); 1738 1432 1739 - /* Allocate for the host */ 1740 - info = kzalloc(sizeof(*info), GFP_KERNEL); 1741 - if (!info) { 1742 - /* Already registered devices are freed on bus_exit */ 1743 - ffa_partitions_cleanup(); 1744 - return -ENOMEM; 1745 - } 1433 + /* 1434 + * Check if the host is already added as part of partition info 1435 + * No multiple UUID possible for the host, so just checking if 1436 + * there is an entry will suffice 1437 + */ 1438 + if (xa_load(&drv_info->partition_info, drv_info->vm_id)) 1439 + return 0; 1746 1440 1747 - rwlock_init(&info->rw_lock); 1748 - ret = xa_insert(&drv_info->partition_info, drv_info->vm_id, 1749 - info, GFP_KERNEL); 1750 - if (ret) { 1751 - pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n", 1752 - __func__, drv_info->vm_id, ret); 1753 - kfree(info); 1754 - /* Already registered devices are freed on bus_exit */ 1441 + /* Allocate for the host */ 1442 + ret = ffa_setup_host_partition(drv_info->vm_id); 1443 + if (ret) 1755 1444 ffa_partitions_cleanup(); 1756 - } 1757 1445 1758 1446 return ret; 1759 - } 1760 - 1761 - static void ffa_partitions_cleanup(void) 1762 - { 1763 - struct ffa_dev_part_info *info; 1764 - unsigned long idx; 1765 - 1766 - xa_for_each(&drv_info->partition_info, idx, info) { 1767 - xa_erase(&drv_info->partition_info, idx); 1768 - kfree(info); 1769 - } 1770 - 1771 - xa_destroy(&drv_info->partition_info); 1772 1447 } 1773 1448 1774 1449 /* FFA FEATURE IDs */ ··· 2044 1777 ffa_notifications_setup(); 2045 1778 2046 1779 ret = ffa_setup_partitions(); 2047 - if (ret) { 2048 - pr_err("failed to setup partitions\n"); 2049 - goto cleanup_notifs; 2050 - } 1780 + if (!ret) 1781 + return ret; 2051 1782 2052 - ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle, 2053 - drv_info, true); 2054 - if (ret) 2055 - pr_info("Failed to register driver sched callback %d\n", ret); 2056 - 2057 - return 0; 2058 - 2059 - cleanup_notifs: 1783 + pr_err("failed to setup partitions\n"); 2060 1784 ffa_notifications_cleanup(); 2061 1785 free_pages: 2062 1786 if (drv_info->tx_buffer)
+20 -2
include/linux/arm_ffa.h
··· 112 112 FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor))) 113 113 #define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0) 114 114 #define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1) 115 + #define FFA_VERSION_1_2 FFA_PACK_VERSION_INFO(1, 2) 115 116 116 117 /** 117 118 * FF-A specification mentions explicitly about '4K pages'. This should ··· 177 176 int ffa_driver_register(struct ffa_driver *driver, struct module *owner, 178 177 const char *mod_name); 179 178 void ffa_driver_unregister(struct ffa_driver *driver); 179 + void ffa_devices_unregister(void); 180 180 bool ffa_device_is_valid(struct ffa_device *ffa_dev); 181 181 182 182 #else ··· 189 187 } 190 188 191 189 static inline void ffa_device_unregister(struct ffa_device *dev) {} 190 + 191 + static inline void ffa_devices_unregister(void) {} 192 192 193 193 static inline int 194 194 ffa_driver_register(struct ffa_driver *driver, struct module *owner, ··· 241 237 #define FFA_PARTITION_NOTIFICATION_RECV BIT(3) 242 238 /* partition runs in the AArch64 execution state. */ 243 239 #define FFA_PARTITION_AARCH64_EXEC BIT(8) 240 + /* partition supports receipt of direct request2 */ 241 + #define FFA_PARTITION_DIRECT_REQ2_RECV BIT(9) 242 + /* partition can send direct request2. */ 243 + #define FFA_PARTITION_DIRECT_REQ2_SEND BIT(10) 244 244 u32 properties; 245 - u32 uuid[4]; 245 + uuid_t uuid; 246 246 }; 247 247 248 248 static inline ··· 264 256 #define ffa_partition_supports_direct_recv(dev) \ 265 257 ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_RECV) 266 258 259 + #define ffa_partition_supports_direct_req2_recv(dev) \ 260 + (ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_REQ2_RECV) && \ 261 + !dev->mode_32bit) 262 + 267 263 /* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */ 268 264 struct ffa_send_direct_data { 269 265 unsigned long data0; /* w3/x3 */ ··· 283 271 u32 offset; 284 272 u32 send_recv_id; 285 273 u32 size; 274 + uuid_t uuid; 286 275 }; 287 276 288 277 /* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP}2 which pass data via registers */ ··· 452 439 int (*sync_send_receive)(struct ffa_device *dev, 453 440 struct ffa_send_direct_data *data); 454 441 int (*indirect_send)(struct ffa_device *dev, void *buf, size_t sz); 455 - int (*sync_send_receive2)(struct ffa_device *dev, const uuid_t *uuid, 442 + int (*sync_send_receive2)(struct ffa_device *dev, 456 443 struct ffa_send_direct_data2 *data); 457 444 }; 458 445 ··· 468 455 469 456 typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data); 470 457 typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data); 458 + typedef void (*ffa_fwk_notifier_cb)(int notify_id, void *cb_data, void *buf); 471 459 472 460 struct ffa_notifier_ops { 473 461 int (*sched_recv_cb_register)(struct ffa_device *dev, ··· 477 463 int (*notify_request)(struct ffa_device *dev, bool per_vcpu, 478 464 ffa_notifier_cb cb, void *cb_data, int notify_id); 479 465 int (*notify_relinquish)(struct ffa_device *dev, int notify_id); 466 + int (*fwk_notify_request)(struct ffa_device *dev, 467 + ffa_fwk_notifier_cb cb, void *cb_data, 468 + int notify_id); 469 + int (*fwk_notify_relinquish)(struct ffa_device *dev, int notify_id); 480 470 int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu, 481 471 u16 vcpu); 482 472 };