Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

optee: FF-A: dynamic protected memory allocation

Add support in the OP-TEE backend driver dynamic protected memory
allocation with FF-A.

The protected memory pools for dynamically allocated protected memory
are instantiated when requested by user-space. This instantiation can
fail if OP-TEE doesn't support the requested use-case of protected
memory.

Restricted memory pools based on a static carveout or dynamic allocation
can coexist for different use-cases. We use only dynamic allocation with
FF-A.

Reviewed-by: Sumit Garg <sumit.garg@oss.qualcomm.com>
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>

+492 -3
+1
drivers/tee/optee/Makefile
··· 4 4 optee-objs += call.o 5 5 optee-objs += notif.o 6 6 optee-objs += rpc.o 7 + optee-objs += protmem.o 7 8 optee-objs += supp.o 8 9 optee-objs += device.o 9 10 optee-objs += smc_abi.o
+144 -2
drivers/tee/optee/ffa_abi.c
··· 649 649 return optee_ffa_yielding_call(ctx, &data, rpc_arg, system_thread); 650 650 } 651 651 652 + static int do_call_lend_protmem(struct optee *optee, u64 cookie, u32 use_case) 653 + { 654 + struct optee_shm_arg_entry *entry; 655 + struct optee_msg_arg *msg_arg; 656 + struct tee_shm *shm; 657 + u_int offs; 658 + int rc; 659 + 660 + msg_arg = optee_get_msg_arg(optee->ctx, 1, &entry, &shm, &offs); 661 + if (IS_ERR(msg_arg)) 662 + return PTR_ERR(msg_arg); 663 + 664 + msg_arg->cmd = OPTEE_MSG_CMD_ASSIGN_PROTMEM; 665 + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 666 + msg_arg->params[0].u.value.a = cookie; 667 + msg_arg->params[0].u.value.b = use_case; 668 + 669 + rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false); 670 + if (rc) 671 + goto out; 672 + if (msg_arg->ret != TEEC_SUCCESS) { 673 + rc = -EINVAL; 674 + goto out; 675 + } 676 + 677 + out: 678 + optee_free_msg_arg(optee->ctx, entry, offs); 679 + return rc; 680 + } 681 + 682 + static int optee_ffa_lend_protmem(struct optee *optee, struct tee_shm *protmem, 683 + u32 *mem_attrs, unsigned int ma_count, 684 + u32 use_case) 685 + { 686 + struct ffa_device *ffa_dev = optee->ffa.ffa_dev; 687 + const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops; 688 + const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops; 689 + struct ffa_send_direct_data data; 690 + struct ffa_mem_region_attributes *mem_attr; 691 + struct ffa_mem_ops_args args = { 692 + .use_txbuf = true, 693 + .tag = use_case, 694 + }; 695 + struct page *page; 696 + struct scatterlist sgl; 697 + unsigned int n; 698 + int rc; 699 + 700 + mem_attr = kcalloc(ma_count, sizeof(*mem_attr), GFP_KERNEL); 701 + for (n = 0; n < ma_count; n++) { 702 + mem_attr[n].receiver = mem_attrs[n] & U16_MAX; 703 + mem_attr[n].attrs = mem_attrs[n] >> 16; 704 + } 705 + args.attrs = mem_attr; 706 + args.nattrs = ma_count; 707 + 708 + page = phys_to_page(protmem->paddr); 709 + sg_init_table(&sgl, 1); 710 + sg_set_page(&sgl, page, protmem->size, 0); 711 + 712 + args.sg = &sgl; 713 + rc = mem_ops->memory_lend(&args); 714 + kfree(mem_attr); 715 + if (rc) 716 + return rc; 717 + 718 + rc = do_call_lend_protmem(optee, args.g_handle, use_case); 719 + if (rc) 720 + goto err_reclaim; 721 + 722 + rc = optee_shm_add_ffa_handle(optee, protmem, args.g_handle); 723 + if (rc) 724 + goto err_unreg; 725 + 726 + protmem->sec_world_id = args.g_handle; 727 + 728 + return 0; 729 + 730 + err_unreg: 731 + data = (struct ffa_send_direct_data){ 732 + .data0 = OPTEE_FFA_RELEASE_PROTMEM, 733 + .data1 = (u32)args.g_handle, 734 + .data2 = (u32)(args.g_handle >> 32), 735 + }; 736 + msg_ops->sync_send_receive(ffa_dev, &data); 737 + err_reclaim: 738 + mem_ops->memory_reclaim(args.g_handle, 0); 739 + return rc; 740 + } 741 + 742 + static int optee_ffa_reclaim_protmem(struct optee *optee, 743 + struct tee_shm *protmem) 744 + { 745 + struct ffa_device *ffa_dev = optee->ffa.ffa_dev; 746 + const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops; 747 + const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops; 748 + u64 global_handle = protmem->sec_world_id; 749 + struct ffa_send_direct_data data = { 750 + .data0 = OPTEE_FFA_RELEASE_PROTMEM, 751 + .data1 = (u32)global_handle, 752 + .data2 = (u32)(global_handle >> 32) 753 + }; 754 + int rc; 755 + 756 + optee_shm_rem_ffa_handle(optee, global_handle); 757 + protmem->sec_world_id = 0; 758 + 759 + rc = msg_ops->sync_send_receive(ffa_dev, &data); 760 + if (rc) 761 + pr_err("Release SHM id 0x%llx rc %d\n", global_handle, rc); 762 + 763 + rc = mem_ops->memory_reclaim(global_handle, 0); 764 + if (rc) 765 + pr_err("mem_reclaim: 0x%llx %d", global_handle, rc); 766 + 767 + return rc; 768 + } 769 + 652 770 /* 653 771 * 6. Driver initialization 654 772 * ··· 937 819 .do_call_with_arg = optee_ffa_do_call_with_arg, 938 820 .to_msg_param = optee_ffa_to_msg_param, 939 821 .from_msg_param = optee_ffa_from_msg_param, 822 + .lend_protmem = optee_ffa_lend_protmem, 823 + .reclaim_protmem = optee_ffa_reclaim_protmem, 940 824 }; 941 825 942 826 static void optee_ffa_remove(struct ffa_device *ffa_dev) ··· 1011 891 return rc; 1012 892 } 1013 893 894 + static int optee_ffa_protmem_pool_init(struct optee *optee, u32 sec_caps) 895 + { 896 + enum tee_dma_heap_id id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY; 897 + struct tee_protmem_pool *pool; 898 + int rc = 0; 899 + 900 + if (sec_caps & OPTEE_FFA_SEC_CAP_PROTMEM) { 901 + pool = optee_protmem_alloc_dyn_pool(optee, id); 902 + if (IS_ERR(pool)) 903 + return PTR_ERR(pool); 904 + 905 + rc = tee_device_register_dma_heap(optee->teedev, id, pool); 906 + if (rc) 907 + pool->ops->destroy_pool(pool); 908 + } 909 + 910 + return rc; 911 + } 912 + 1014 913 static int optee_ffa_probe(struct ffa_device *ffa_dev) 1015 914 { 1016 915 const struct ffa_notifier_ops *notif_ops; ··· 1080 941 optee); 1081 942 if (IS_ERR(teedev)) { 1082 943 rc = PTR_ERR(teedev); 1083 - goto err_free_pool; 944 + goto err_free_shm_pool; 1084 945 } 1085 946 optee->teedev = teedev; 1086 947 ··· 1127 988 rc); 1128 989 } 1129 990 991 + if (optee_ffa_protmem_pool_init(optee, sec_caps)) 992 + pr_info("Protected memory service not available\n"); 993 + 1130 994 rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); 1131 995 if (rc) 1132 996 goto err_unregister_devices; ··· 1160 1018 tee_device_unregister(optee->supp_teedev); 1161 1019 err_unreg_teedev: 1162 1020 tee_device_unregister(optee->teedev); 1163 - err_free_pool: 1021 + err_free_shm_pool: 1164 1022 tee_shm_pool_free(pool); 1165 1023 err_free_optee: 1166 1024 kfree(optee);
+12 -1
drivers/tee/optee/optee_private.h
··· 176 176 * @do_call_with_arg: enters OP-TEE in secure world 177 177 * @to_msg_param: converts from struct tee_param to OPTEE_MSG parameters 178 178 * @from_msg_param: converts from OPTEE_MSG parameters to struct tee_param 179 + * @lend_protmem: lends physically contiguous memory as restricted 180 + * memory, inaccessible by the kernel 181 + * @reclaim_protmem: reclaims restricted memory previously lent with 182 + * @lend_protmem() and makes it accessible by the 183 + * kernel again 179 184 * 180 185 * These OPs are only supposed to be used internally in the OP-TEE driver 181 - * as a way of abstracting the different methogs of entering OP-TEE in 186 + * as a way of abstracting the different methods of entering OP-TEE in 182 187 * secure world. 183 188 */ 184 189 struct optee_ops { ··· 196 191 int (*from_msg_param)(struct optee *optee, struct tee_param *params, 197 192 size_t num_params, 198 193 const struct optee_msg_param *msg_params); 194 + int (*lend_protmem)(struct optee *optee, struct tee_shm *protmem, 195 + u32 *mem_attr, unsigned int ma_count, 196 + u32 use_case); 197 + int (*reclaim_protmem)(struct optee *optee, struct tee_shm *protmem); 199 198 }; 200 199 201 200 /** ··· 296 287 void optee_supp_init(struct optee_supp *supp); 297 288 void optee_supp_uninit(struct optee_supp *supp); 298 289 void optee_supp_release(struct optee_supp *supp); 290 + struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee, 291 + enum tee_dma_heap_id id); 299 292 300 293 int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, 301 294 struct tee_param *param);
+335
drivers/tee/optee/protmem.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2025, Linaro Limited 4 + */ 5 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 + 7 + #include <linux/errno.h> 8 + #include <linux/genalloc.h> 9 + #include <linux/slab.h> 10 + #include <linux/string.h> 11 + #include <linux/tee_core.h> 12 + #include <linux/types.h> 13 + #include "optee_private.h" 14 + 15 + struct optee_protmem_dyn_pool { 16 + struct tee_protmem_pool pool; 17 + struct gen_pool *gen_pool; 18 + struct optee *optee; 19 + size_t page_count; 20 + u32 *mem_attrs; 21 + u_int mem_attr_count; 22 + refcount_t refcount; 23 + u32 use_case; 24 + struct tee_shm *protmem; 25 + /* Protects when initializing and tearing down this struct */ 26 + struct mutex mutex; 27 + }; 28 + 29 + static struct optee_protmem_dyn_pool * 30 + to_protmem_dyn_pool(struct tee_protmem_pool *pool) 31 + { 32 + return container_of(pool, struct optee_protmem_dyn_pool, pool); 33 + } 34 + 35 + static int init_dyn_protmem(struct optee_protmem_dyn_pool *rp) 36 + { 37 + int rc; 38 + 39 + rp->protmem = tee_shm_alloc_dma_mem(rp->optee->ctx, rp->page_count); 40 + if (IS_ERR(rp->protmem)) { 41 + rc = PTR_ERR(rp->protmem); 42 + goto err_null_protmem; 43 + } 44 + 45 + /* 46 + * TODO unmap the memory range since the physical memory will 47 + * become inaccesible after the lend_protmem() call. 48 + * 49 + * If the platform supports a hypervisor at EL2, it will unmap the 50 + * intermediate physical memory for us and stop cache pre-fetch of 51 + * the memory. 52 + */ 53 + rc = rp->optee->ops->lend_protmem(rp->optee, rp->protmem, 54 + rp->mem_attrs, 55 + rp->mem_attr_count, rp->use_case); 56 + if (rc) 57 + goto err_put_shm; 58 + rp->protmem->flags |= TEE_SHM_DYNAMIC; 59 + 60 + rp->gen_pool = gen_pool_create(PAGE_SHIFT, -1); 61 + if (!rp->gen_pool) { 62 + rc = -ENOMEM; 63 + goto err_reclaim; 64 + } 65 + 66 + rc = gen_pool_add(rp->gen_pool, rp->protmem->paddr, 67 + rp->protmem->size, -1); 68 + if (rc) 69 + goto err_free_pool; 70 + 71 + refcount_set(&rp->refcount, 1); 72 + return 0; 73 + 74 + err_free_pool: 75 + gen_pool_destroy(rp->gen_pool); 76 + rp->gen_pool = NULL; 77 + err_reclaim: 78 + rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem); 79 + err_put_shm: 80 + tee_shm_put(rp->protmem); 81 + err_null_protmem: 82 + rp->protmem = NULL; 83 + return rc; 84 + } 85 + 86 + static int get_dyn_protmem(struct optee_protmem_dyn_pool *rp) 87 + { 88 + int rc = 0; 89 + 90 + if (!refcount_inc_not_zero(&rp->refcount)) { 91 + mutex_lock(&rp->mutex); 92 + if (rp->gen_pool) { 93 + /* 94 + * Another thread has already initialized the pool 95 + * before us, or the pool was just about to be torn 96 + * down. Either way we only need to increase the 97 + * refcount and we're done. 98 + */ 99 + refcount_inc(&rp->refcount); 100 + } else { 101 + rc = init_dyn_protmem(rp); 102 + } 103 + mutex_unlock(&rp->mutex); 104 + } 105 + 106 + return rc; 107 + } 108 + 109 + static void release_dyn_protmem(struct optee_protmem_dyn_pool *rp) 110 + { 111 + gen_pool_destroy(rp->gen_pool); 112 + rp->gen_pool = NULL; 113 + 114 + rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem); 115 + rp->protmem->flags &= ~TEE_SHM_DYNAMIC; 116 + 117 + WARN(refcount_read(&rp->protmem->refcount) != 1, "Unexpected refcount"); 118 + tee_shm_put(rp->protmem); 119 + rp->protmem = NULL; 120 + } 121 + 122 + static void put_dyn_protmem(struct optee_protmem_dyn_pool *rp) 123 + { 124 + if (refcount_dec_and_test(&rp->refcount)) { 125 + mutex_lock(&rp->mutex); 126 + if (rp->gen_pool) 127 + release_dyn_protmem(rp); 128 + mutex_unlock(&rp->mutex); 129 + } 130 + } 131 + 132 + static int protmem_pool_op_dyn_alloc(struct tee_protmem_pool *pool, 133 + struct sg_table *sgt, size_t size, 134 + size_t *offs) 135 + { 136 + struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool); 137 + size_t sz = ALIGN(size, PAGE_SIZE); 138 + phys_addr_t pa; 139 + int rc; 140 + 141 + rc = get_dyn_protmem(rp); 142 + if (rc) 143 + return rc; 144 + 145 + pa = gen_pool_alloc(rp->gen_pool, sz); 146 + if (!pa) { 147 + rc = -ENOMEM; 148 + goto err_put; 149 + } 150 + 151 + rc = sg_alloc_table(sgt, 1, GFP_KERNEL); 152 + if (rc) 153 + goto err_free; 154 + 155 + sg_set_page(sgt->sgl, phys_to_page(pa), size, 0); 156 + *offs = pa - rp->protmem->paddr; 157 + 158 + return 0; 159 + err_free: 160 + gen_pool_free(rp->gen_pool, pa, size); 161 + err_put: 162 + put_dyn_protmem(rp); 163 + 164 + return rc; 165 + } 166 + 167 + static void protmem_pool_op_dyn_free(struct tee_protmem_pool *pool, 168 + struct sg_table *sgt) 169 + { 170 + struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool); 171 + struct scatterlist *sg; 172 + int i; 173 + 174 + for_each_sgtable_sg(sgt, sg, i) 175 + gen_pool_free(rp->gen_pool, sg_phys(sg), sg->length); 176 + sg_free_table(sgt); 177 + put_dyn_protmem(rp); 178 + } 179 + 180 + static int protmem_pool_op_dyn_update_shm(struct tee_protmem_pool *pool, 181 + struct sg_table *sgt, size_t offs, 182 + struct tee_shm *shm, 183 + struct tee_shm **parent_shm) 184 + { 185 + struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool); 186 + 187 + *parent_shm = rp->protmem; 188 + 189 + return 0; 190 + } 191 + 192 + static void pool_op_dyn_destroy_pool(struct tee_protmem_pool *pool) 193 + { 194 + struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool); 195 + 196 + mutex_destroy(&rp->mutex); 197 + kfree(rp); 198 + } 199 + 200 + static struct tee_protmem_pool_ops protmem_pool_ops_dyn = { 201 + .alloc = protmem_pool_op_dyn_alloc, 202 + .free = protmem_pool_op_dyn_free, 203 + .update_shm = protmem_pool_op_dyn_update_shm, 204 + .destroy_pool = pool_op_dyn_destroy_pool, 205 + }; 206 + 207 + static int get_protmem_config(struct optee *optee, u32 use_case, 208 + size_t *min_size, u_int *pa_width, 209 + u32 *mem_attrs, u_int *ma_count) 210 + { 211 + struct tee_param params[2] = { 212 + [0] = { 213 + .attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT, 214 + .u.value.a = use_case, 215 + }, 216 + [1] = { 217 + .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT, 218 + }, 219 + }; 220 + struct optee_shm_arg_entry *entry; 221 + struct tee_shm *shm_param = NULL; 222 + struct optee_msg_arg *msg_arg; 223 + struct tee_shm *shm; 224 + u_int offs; 225 + int rc; 226 + 227 + if (mem_attrs && *ma_count) { 228 + params[1].u.memref.size = *ma_count * sizeof(*mem_attrs); 229 + shm_param = tee_shm_alloc_priv_buf(optee->ctx, 230 + params[1].u.memref.size); 231 + if (IS_ERR(shm_param)) 232 + return PTR_ERR(shm_param); 233 + params[1].u.memref.shm = shm_param; 234 + } 235 + 236 + msg_arg = optee_get_msg_arg(optee->ctx, ARRAY_SIZE(params), &entry, 237 + &shm, &offs); 238 + if (IS_ERR(msg_arg)) { 239 + rc = PTR_ERR(msg_arg); 240 + goto out_free_shm; 241 + } 242 + msg_arg->cmd = OPTEE_MSG_CMD_GET_PROTMEM_CONFIG; 243 + 244 + rc = optee->ops->to_msg_param(optee, msg_arg->params, 245 + ARRAY_SIZE(params), params); 246 + if (rc) 247 + goto out_free_msg; 248 + 249 + rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false); 250 + if (rc) 251 + goto out_free_msg; 252 + if (msg_arg->ret && msg_arg->ret != TEEC_ERROR_SHORT_BUFFER) { 253 + rc = -EINVAL; 254 + goto out_free_msg; 255 + } 256 + 257 + rc = optee->ops->from_msg_param(optee, params, ARRAY_SIZE(params), 258 + msg_arg->params); 259 + if (rc) 260 + goto out_free_msg; 261 + 262 + if (!msg_arg->ret && mem_attrs && 263 + *ma_count < params[1].u.memref.size / sizeof(*mem_attrs)) { 264 + rc = -EINVAL; 265 + goto out_free_msg; 266 + } 267 + 268 + *min_size = params[0].u.value.a; 269 + *pa_width = params[0].u.value.c; 270 + *ma_count = params[1].u.memref.size / sizeof(*mem_attrs); 271 + 272 + if (msg_arg->ret == TEEC_ERROR_SHORT_BUFFER) { 273 + rc = -ENOSPC; 274 + goto out_free_msg; 275 + } 276 + 277 + if (mem_attrs) 278 + memcpy(mem_attrs, tee_shm_get_va(shm_param, 0), 279 + params[1].u.memref.size); 280 + 281 + out_free_msg: 282 + optee_free_msg_arg(optee->ctx, entry, offs); 283 + out_free_shm: 284 + if (shm_param) 285 + tee_shm_free(shm_param); 286 + return rc; 287 + } 288 + 289 + struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee, 290 + enum tee_dma_heap_id id) 291 + { 292 + struct optee_protmem_dyn_pool *rp; 293 + size_t min_size; 294 + u_int pa_width; 295 + int rc; 296 + 297 + rp = kzalloc(sizeof(*rp), GFP_KERNEL); 298 + if (!rp) 299 + return ERR_PTR(-ENOMEM); 300 + rp->use_case = id; 301 + 302 + rc = get_protmem_config(optee, id, &min_size, &pa_width, NULL, 303 + &rp->mem_attr_count); 304 + if (rc) { 305 + if (rc != -ENOSPC) 306 + goto err; 307 + rp->mem_attrs = kcalloc(rp->mem_attr_count, 308 + sizeof(*rp->mem_attrs), GFP_KERNEL); 309 + if (!rp->mem_attrs) { 310 + rc = -ENOMEM; 311 + goto err; 312 + } 313 + rc = get_protmem_config(optee, id, &min_size, &pa_width, 314 + rp->mem_attrs, &rp->mem_attr_count); 315 + if (rc) 316 + goto err_kfree_eps; 317 + } 318 + 319 + rc = optee_set_dma_mask(optee, pa_width); 320 + if (rc) 321 + goto err_kfree_eps; 322 + 323 + rp->pool.ops = &protmem_pool_ops_dyn; 324 + rp->optee = optee; 325 + rp->page_count = min_size / PAGE_SIZE; 326 + mutex_init(&rp->mutex); 327 + 328 + return &rp->pool; 329 + 330 + err_kfree_eps: 331 + kfree(rp->mem_attrs); 332 + err: 333 + kfree(rp); 334 + return ERR_PTR(rc); 335 + }