at v6.5 11 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2021 ARM Ltd. 4 */ 5 6#ifndef _LINUX_ARM_FFA_H 7#define _LINUX_ARM_FFA_H 8 9#include <linux/device.h> 10#include <linux/module.h> 11#include <linux/types.h> 12#include <linux/uuid.h> 13 14#define FFA_SMC(calling_convention, func_num) \ 15 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, (calling_convention), \ 16 ARM_SMCCC_OWNER_STANDARD, (func_num)) 17 18#define FFA_SMC_32(func_num) FFA_SMC(ARM_SMCCC_SMC_32, (func_num)) 19#define FFA_SMC_64(func_num) FFA_SMC(ARM_SMCCC_SMC_64, (func_num)) 20 21#define FFA_ERROR FFA_SMC_32(0x60) 22#define FFA_SUCCESS FFA_SMC_32(0x61) 23#define FFA_INTERRUPT FFA_SMC_32(0x62) 24#define FFA_VERSION FFA_SMC_32(0x63) 25#define FFA_FEATURES FFA_SMC_32(0x64) 26#define FFA_RX_RELEASE FFA_SMC_32(0x65) 27#define FFA_RXTX_MAP FFA_SMC_32(0x66) 28#define FFA_FN64_RXTX_MAP FFA_SMC_64(0x66) 29#define FFA_RXTX_UNMAP FFA_SMC_32(0x67) 30#define FFA_PARTITION_INFO_GET FFA_SMC_32(0x68) 31#define FFA_ID_GET FFA_SMC_32(0x69) 32#define FFA_MSG_POLL FFA_SMC_32(0x6A) 33#define FFA_MSG_WAIT FFA_SMC_32(0x6B) 34#define FFA_YIELD FFA_SMC_32(0x6C) 35#define FFA_RUN FFA_SMC_32(0x6D) 36#define FFA_MSG_SEND FFA_SMC_32(0x6E) 37#define FFA_MSG_SEND_DIRECT_REQ FFA_SMC_32(0x6F) 38#define FFA_FN64_MSG_SEND_DIRECT_REQ FFA_SMC_64(0x6F) 39#define FFA_MSG_SEND_DIRECT_RESP FFA_SMC_32(0x70) 40#define FFA_FN64_MSG_SEND_DIRECT_RESP FFA_SMC_64(0x70) 41#define FFA_MEM_DONATE FFA_SMC_32(0x71) 42#define FFA_FN64_MEM_DONATE FFA_SMC_64(0x71) 43#define FFA_MEM_LEND FFA_SMC_32(0x72) 44#define FFA_FN64_MEM_LEND FFA_SMC_64(0x72) 45#define FFA_MEM_SHARE FFA_SMC_32(0x73) 46#define FFA_FN64_MEM_SHARE FFA_SMC_64(0x73) 47#define FFA_MEM_RETRIEVE_REQ FFA_SMC_32(0x74) 48#define FFA_FN64_MEM_RETRIEVE_REQ FFA_SMC_64(0x74) 49#define FFA_MEM_RETRIEVE_RESP FFA_SMC_32(0x75) 50#define FFA_MEM_RELINQUISH FFA_SMC_32(0x76) 51#define FFA_MEM_RECLAIM FFA_SMC_32(0x77) 52#define FFA_MEM_OP_PAUSE FFA_SMC_32(0x78) 53#define FFA_MEM_OP_RESUME FFA_SMC_32(0x79) 54#define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A) 55#define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B) 56#define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C) 57 58/* 59 * For some calls it is necessary to use SMC64 to pass or return 64-bit values. 60 * For such calls FFA_FN_NATIVE(name) will choose the appropriate 61 * (native-width) function ID. 62 */ 63#ifdef CONFIG_64BIT 64#define FFA_FN_NATIVE(name) FFA_FN64_##name 65#else 66#define FFA_FN_NATIVE(name) FFA_##name 67#endif 68 69/* FFA error codes. */ 70#define FFA_RET_SUCCESS (0) 71#define FFA_RET_NOT_SUPPORTED (-1) 72#define FFA_RET_INVALID_PARAMETERS (-2) 73#define FFA_RET_NO_MEMORY (-3) 74#define FFA_RET_BUSY (-4) 75#define FFA_RET_INTERRUPTED (-5) 76#define FFA_RET_DENIED (-6) 77#define FFA_RET_RETRY (-7) 78#define FFA_RET_ABORTED (-8) 79 80/* FFA version encoding */ 81#define FFA_MAJOR_VERSION_MASK GENMASK(30, 16) 82#define FFA_MINOR_VERSION_MASK GENMASK(15, 0) 83#define FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(FFA_MAJOR_VERSION_MASK, (x)))) 84#define FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(FFA_MINOR_VERSION_MASK, (x)))) 85#define FFA_PACK_VERSION_INFO(major, minor) \ 86 (FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \ 87 FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor))) 88#define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0) 89 90/** 91 * FF-A specification mentions explicitly about '4K pages'. This should 92 * not be confused with the kernel PAGE_SIZE, which is the translation 93 * granule kernel is configured and may be one among 4K, 16K and 64K. 94 */ 95#define FFA_PAGE_SIZE SZ_4K 96 97/* 98 * Minimum buffer size/alignment encodings returned by an FFA_FEATURES 99 * query for FFA_RXTX_MAP. 100 */ 101#define FFA_FEAT_RXTX_MIN_SZ_4K 0 102#define FFA_FEAT_RXTX_MIN_SZ_64K 1 103#define FFA_FEAT_RXTX_MIN_SZ_16K 2 104 105/* FFA Bus/Device/Driver related */ 106struct ffa_device { 107 u32 id; 108 int vm_id; 109 bool mode_32bit; 110 uuid_t uuid; 111 struct device dev; 112 const struct ffa_ops *ops; 113}; 114 115#define to_ffa_dev(d) container_of(d, struct ffa_device, dev) 116 117struct ffa_device_id { 118 uuid_t uuid; 119}; 120 121struct ffa_driver { 122 const char *name; 123 int (*probe)(struct ffa_device *sdev); 124 void (*remove)(struct ffa_device *sdev); 125 const struct ffa_device_id *id_table; 126 127 struct device_driver driver; 128}; 129 130#define to_ffa_driver(d) container_of(d, struct ffa_driver, driver) 131 132static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data) 133{ 134 dev_set_drvdata(&fdev->dev, data); 135} 136 137static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev) 138{ 139 return dev_get_drvdata(&fdev->dev); 140} 141 142#if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT) 143struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, 144 const struct ffa_ops *ops); 145void ffa_device_unregister(struct ffa_device *ffa_dev); 146int ffa_driver_register(struct ffa_driver *driver, struct module *owner, 147 const char *mod_name); 148void ffa_driver_unregister(struct ffa_driver *driver); 149bool ffa_device_is_valid(struct ffa_device *ffa_dev); 150 151#else 152static inline 153struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, 154 const struct ffa_ops *ops) 155{ 156 return NULL; 157} 158 159static inline void ffa_device_unregister(struct ffa_device *dev) {} 160 161static inline int 162ffa_driver_register(struct ffa_driver *driver, struct module *owner, 163 const char *mod_name) 164{ 165 return -EINVAL; 166} 167 168static inline void ffa_driver_unregister(struct ffa_driver *driver) {} 169 170static inline 171bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; } 172 173#endif /* CONFIG_ARM_FFA_TRANSPORT */ 174 175#define ffa_register(driver) \ 176 ffa_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) 177#define ffa_unregister(driver) \ 178 ffa_driver_unregister(driver) 179 180/** 181 * module_ffa_driver() - Helper macro for registering a psa_ffa driver 182 * @__ffa_driver: ffa_driver structure 183 * 184 * Helper macro for psa_ffa drivers to set up proper module init / exit 185 * functions. Replaces module_init() and module_exit() and keeps people from 186 * printing pointless things to the kernel log when their driver is loaded. 187 */ 188#define module_ffa_driver(__ffa_driver) \ 189 module_driver(__ffa_driver, ffa_register, ffa_unregister) 190 191/* FFA transport related */ 192struct ffa_partition_info { 193 u16 id; 194 u16 exec_ctxt; 195/* partition supports receipt of direct requests */ 196#define FFA_PARTITION_DIRECT_RECV BIT(0) 197/* partition can send direct requests. */ 198#define FFA_PARTITION_DIRECT_SEND BIT(1) 199/* partition can send and receive indirect messages. */ 200#define FFA_PARTITION_INDIRECT_MSG BIT(2) 201/* partition runs in the AArch64 execution state. */ 202#define FFA_PARTITION_AARCH64_EXEC BIT(8) 203 u32 properties; 204 u32 uuid[4]; 205}; 206 207/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */ 208struct ffa_send_direct_data { 209 unsigned long data0; /* w3/x3 */ 210 unsigned long data1; /* w4/x4 */ 211 unsigned long data2; /* w5/x5 */ 212 unsigned long data3; /* w6/x6 */ 213 unsigned long data4; /* w7/x7 */ 214}; 215 216struct ffa_mem_region_addr_range { 217 /* The base IPA of the constituent memory region, aligned to 4 kiB */ 218 u64 address; 219 /* The number of 4 kiB pages in the constituent memory region. */ 220 u32 pg_cnt; 221 u32 reserved; 222}; 223 224struct ffa_composite_mem_region { 225 /* 226 * The total number of 4 kiB pages included in this memory region. This 227 * must be equal to the sum of page counts specified in each 228 * `struct ffa_mem_region_addr_range`. 229 */ 230 u32 total_pg_cnt; 231 /* The number of constituents included in this memory region range */ 232 u32 addr_range_cnt; 233 u64 reserved; 234 /** An array of `addr_range_cnt` memory region constituents. */ 235 struct ffa_mem_region_addr_range constituents[]; 236}; 237 238struct ffa_mem_region_attributes { 239 /* The ID of the VM to which the memory is being given or shared. */ 240 u16 receiver; 241 /* 242 * The permissions with which the memory region should be mapped in the 243 * receiver's page table. 244 */ 245#define FFA_MEM_EXEC BIT(3) 246#define FFA_MEM_NO_EXEC BIT(2) 247#define FFA_MEM_RW BIT(1) 248#define FFA_MEM_RO BIT(0) 249 u8 attrs; 250 /* 251 * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP 252 * for memory regions with multiple borrowers. 253 */ 254#define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0) 255 u8 flag; 256 /* 257 * Offset in bytes from the start of the outer `ffa_memory_region` to 258 * an `struct ffa_mem_region_addr_range`. 259 */ 260 u32 composite_off; 261 u64 reserved; 262}; 263 264struct ffa_mem_region { 265 /* The ID of the VM/owner which originally sent the memory region */ 266 u16 sender_id; 267#define FFA_MEM_NORMAL BIT(5) 268#define FFA_MEM_DEVICE BIT(4) 269 270#define FFA_MEM_WRITE_BACK (3 << 2) 271#define FFA_MEM_NON_CACHEABLE (1 << 2) 272 273#define FFA_DEV_nGnRnE (0 << 2) 274#define FFA_DEV_nGnRE (1 << 2) 275#define FFA_DEV_nGRE (2 << 2) 276#define FFA_DEV_GRE (3 << 2) 277 278#define FFA_MEM_NON_SHAREABLE (0) 279#define FFA_MEM_OUTER_SHAREABLE (2) 280#define FFA_MEM_INNER_SHAREABLE (3) 281 u8 attributes; 282 u8 reserved_0; 283/* 284 * Clear memory region contents after unmapping it from the sender and 285 * before mapping it for any receiver. 286 */ 287#define FFA_MEM_CLEAR BIT(0) 288/* 289 * Whether the hypervisor may time slice the memory sharing or retrieval 290 * operation. 291 */ 292#define FFA_TIME_SLICE_ENABLE BIT(1) 293 294#define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3) 295#define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3) 296#define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3) 297#define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3) 298 299#define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9) 300#define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5) 301 /* Flags to control behaviour of the transaction. */ 302 u32 flags; 303#define HANDLE_LOW_MASK GENMASK_ULL(31, 0) 304#define HANDLE_HIGH_MASK GENMASK_ULL(63, 32) 305#define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x)))) 306#define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x)))) 307 308#define PACK_HANDLE(l, h) \ 309 (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h))) 310 /* 311 * A globally-unique ID assigned by the hypervisor for a region 312 * of memory being sent between VMs. 313 */ 314 u64 handle; 315 /* 316 * An implementation defined value associated with the receiver and the 317 * memory region. 318 */ 319 u64 tag; 320 u32 reserved_1; 321 /* 322 * The number of `ffa_mem_region_attributes` entries included in this 323 * transaction. 324 */ 325 u32 ep_count; 326 /* 327 * An array of endpoint memory access descriptors. 328 * Each one specifies a memory region offset, an endpoint and the 329 * attributes with which this memory region should be mapped in that 330 * endpoint's page table. 331 */ 332 struct ffa_mem_region_attributes ep_mem_access[]; 333}; 334 335#define COMPOSITE_OFFSET(x) \ 336 (offsetof(struct ffa_mem_region, ep_mem_access[x])) 337#define CONSTITUENTS_OFFSET(x) \ 338 (offsetof(struct ffa_composite_mem_region, constituents[x])) 339#define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \ 340 (COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y)) 341 342struct ffa_mem_ops_args { 343 bool use_txbuf; 344 u32 nattrs; 345 u32 flags; 346 u64 tag; 347 u64 g_handle; 348 struct scatterlist *sg; 349 struct ffa_mem_region_attributes *attrs; 350}; 351 352struct ffa_info_ops { 353 u32 (*api_version_get)(void); 354 int (*partition_info_get)(const char *uuid_str, 355 struct ffa_partition_info *buffer); 356}; 357 358struct ffa_msg_ops { 359 void (*mode_32bit_set)(struct ffa_device *dev); 360 int (*sync_send_receive)(struct ffa_device *dev, 361 struct ffa_send_direct_data *data); 362}; 363 364struct ffa_mem_ops { 365 int (*memory_reclaim)(u64 g_handle, u32 flags); 366 int (*memory_share)(struct ffa_mem_ops_args *args); 367 int (*memory_lend)(struct ffa_mem_ops_args *args); 368}; 369 370struct ffa_ops { 371 const struct ffa_info_ops *info_ops; 372 const struct ffa_msg_ops *msg_ops; 373 const struct ffa_mem_ops *mem_ops; 374}; 375 376#endif /* _LINUX_ARM_FFA_H */