Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'optee-ffa-for-v5.16' of git://git.linaro.org/people/jens.wiklander/linux-tee into arm/drivers

Add FF-A support in OP-TEE driver

Adds supports for the OP-TEE driver to communicate with secure world
using FF-A [1] as transport.

[1] https://developer.arm.com/documentation/den0077/latest

* tag 'optee-ffa-for-v5.16' of git://git.linaro.org/people/jens.wiklander/linux-tee:
optee: add FF-A support
optee: isolate smc abi
optee: refactor driver with internal callbacks
optee: simplify optee_release()
tee: add sec_world_id to struct tee_shm
tee: optee: Fix missing devices unregister during optee_remove
tee/optee/shm_pool: fix application of sizeof to pointer

Link: https://lore.kernel.org/r/20211018121324.GA2943530@jade
Signed-off-by: Arnd Bergmann <arnd@arndb.de>

+2761 -1416
+3 -2
drivers/tee/optee/Makefile
··· 4 4 optee-objs += call.o 5 5 optee-objs += rpc.o 6 6 optee-objs += supp.o 7 - optee-objs += shm_pool.o 8 7 optee-objs += device.o 8 + optee-objs += smc_abi.o 9 + optee-objs += ffa_abi.o 9 10 10 11 # for tracing framework to find optee_trace.h 11 - CFLAGS_call.o := -I$(src) 12 + CFLAGS_smc_abi.o := -I$(src)
+59 -388
drivers/tee/optee/call.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (c) 2015, Linaro Limited 3 + * Copyright (c) 2015-2021, Linaro Limited 4 4 */ 5 - #include <linux/arm-smccc.h> 6 5 #include <linux/device.h> 7 6 #include <linux/err.h> 8 7 #include <linux/errno.h> 9 8 #include <linux/mm.h> 10 - #include <linux/sched.h> 11 9 #include <linux/slab.h> 12 10 #include <linux/tee_drv.h> 13 11 #include <linux/types.h> 14 - #include <linux/uaccess.h> 15 12 #include "optee_private.h" 16 - #include "optee_smc.h" 17 - #define CREATE_TRACE_POINTS 18 - #include "optee_trace.h" 19 13 20 - struct optee_call_waiter { 21 - struct list_head list_node; 22 - struct completion c; 23 - }; 24 - 25 - static void optee_cq_wait_init(struct optee_call_queue *cq, 26 - struct optee_call_waiter *w) 14 + void optee_cq_wait_init(struct optee_call_queue *cq, 15 + struct optee_call_waiter *w) 27 16 { 28 17 /* 29 18 * We're preparing to make a call to secure world. In case we can't ··· 36 47 mutex_unlock(&cq->mutex); 37 48 } 38 49 39 - static void optee_cq_wait_for_completion(struct optee_call_queue *cq, 40 - struct optee_call_waiter *w) 50 + void optee_cq_wait_for_completion(struct optee_call_queue *cq, 51 + struct optee_call_waiter *w) 41 52 { 42 53 wait_for_completion(&w->c); 43 54 ··· 63 74 } 64 75 } 65 76 66 - static void optee_cq_wait_final(struct optee_call_queue *cq, 67 - struct optee_call_waiter *w) 77 + void optee_cq_wait_final(struct optee_call_queue *cq, 78 + struct optee_call_waiter *w) 68 79 { 69 80 /* 70 81 * We're done with the call to secure world. The thread in secure ··· 104 115 return NULL; 105 116 } 106 117 107 - /** 108 - * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world 109 - * @ctx: calling context 110 - * @parg: physical address of message to pass to secure world 111 - * 112 - * Does and SMC to OP-TEE in secure world and handles eventual resulting 113 - * Remote Procedure Calls (RPC) from OP-TEE. 114 - * 115 - * Returns return code from secure world, 0 is OK 116 - */ 117 - u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) 118 + struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params, 119 + struct optee_msg_arg **msg_arg) 118 120 { 119 121 struct optee *optee = tee_get_drvdata(ctx->teedev); 120 - struct optee_call_waiter w; 121 - struct optee_rpc_param param = { }; 122 - struct optee_call_ctx call_ctx = { }; 123 - u32 ret; 124 - 125 - param.a0 = OPTEE_SMC_CALL_WITH_ARG; 126 - reg_pair_from_64(&param.a1, &param.a2, parg); 127 - /* Initialize waiter */ 128 - optee_cq_wait_init(&optee->call_queue, &w); 129 - while (true) { 130 - struct arm_smccc_res res; 131 - 132 - trace_optee_invoke_fn_begin(&param); 133 - optee->invoke_fn(param.a0, param.a1, param.a2, param.a3, 134 - param.a4, param.a5, param.a6, param.a7, 135 - &res); 136 - trace_optee_invoke_fn_end(&param, &res); 137 - 138 - if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) { 139 - /* 140 - * Out of threads in secure world, wait for a thread 141 - * become available. 142 - */ 143 - optee_cq_wait_for_completion(&optee->call_queue, &w); 144 - } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { 145 - cond_resched(); 146 - param.a0 = res.a0; 147 - param.a1 = res.a1; 148 - param.a2 = res.a2; 149 - param.a3 = res.a3; 150 - optee_handle_rpc(ctx, &param, &call_ctx); 151 - } else { 152 - ret = res.a0; 153 - break; 154 - } 155 - } 156 - 157 - optee_rpc_finalize_call(&call_ctx); 158 - /* 159 - * We're done with our thread in secure world, if there's any 160 - * thread waiters wake up one. 161 - */ 162 - optee_cq_wait_final(&optee->call_queue, &w); 163 - 164 - return ret; 165 - } 166 - 167 - static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, 168 - struct optee_msg_arg **msg_arg, 169 - phys_addr_t *msg_parg) 170 - { 171 - int rc; 122 + size_t sz = OPTEE_MSG_GET_ARG_SIZE(num_params); 172 123 struct tee_shm *shm; 173 124 struct optee_msg_arg *ma; 174 125 175 - shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params), 176 - TEE_SHM_MAPPED | TEE_SHM_PRIV); 126 + /* 127 + * rpc_arg_count is set to the number of allocated parameters in 128 + * the RPC argument struct if a second MSG arg struct is expected. 129 + * The second arg struct will then be used for RPC. 130 + */ 131 + if (optee->rpc_arg_count) 132 + sz += OPTEE_MSG_GET_ARG_SIZE(optee->rpc_arg_count); 133 + 134 + shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV); 177 135 if (IS_ERR(shm)) 178 136 return shm; 179 137 180 138 ma = tee_shm_get_va(shm, 0); 181 139 if (IS_ERR(ma)) { 182 - rc = PTR_ERR(ma); 183 - goto out; 140 + tee_shm_free(shm); 141 + return (void *)ma; 184 142 } 185 - 186 - rc = tee_shm_get_pa(shm, 0, msg_parg); 187 - if (rc) 188 - goto out; 189 143 190 144 memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); 191 145 ma->num_params = num_params; 192 146 *msg_arg = ma; 193 - out: 194 - if (rc) { 195 - tee_shm_free(shm); 196 - return ERR_PTR(rc); 197 - } 198 147 199 148 return shm; 200 149 } ··· 141 214 struct tee_ioctl_open_session_arg *arg, 142 215 struct tee_param *param) 143 216 { 217 + struct optee *optee = tee_get_drvdata(ctx->teedev); 144 218 struct optee_context_data *ctxdata = ctx->data; 145 219 int rc; 146 220 struct tee_shm *shm; 147 221 struct optee_msg_arg *msg_arg; 148 - phys_addr_t msg_parg; 149 222 struct optee_session *sess = NULL; 150 223 uuid_t client_uuid; 151 224 152 225 /* +2 for the meta parameters added below */ 153 - shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg); 226 + shm = optee_get_msg_arg(ctx, arg->num_params + 2, &msg_arg); 154 227 if (IS_ERR(shm)) 155 228 return PTR_ERR(shm); 156 229 ··· 174 247 goto out; 175 248 export_uuid(msg_arg->params[1].u.octets, &client_uuid); 176 249 177 - rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param); 250 + rc = optee->ops->to_msg_param(optee, msg_arg->params + 2, 251 + arg->num_params, param); 178 252 if (rc) 179 253 goto out; 180 254 ··· 185 257 goto out; 186 258 } 187 259 188 - if (optee_do_call_with_arg(ctx, msg_parg)) { 260 + if (optee->ops->do_call_with_arg(ctx, shm)) { 189 261 msg_arg->ret = TEEC_ERROR_COMMUNICATION; 190 262 msg_arg->ret_origin = TEEC_ORIGIN_COMMS; 191 263 } ··· 200 272 kfree(sess); 201 273 } 202 274 203 - if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) { 275 + if (optee->ops->from_msg_param(optee, param, arg->num_params, 276 + msg_arg->params + 2)) { 204 277 arg->ret = TEEC_ERROR_COMMUNICATION; 205 278 arg->ret_origin = TEEC_ORIGIN_COMMS; 206 279 /* Close session again to avoid leakage */ ··· 217 288 return rc; 218 289 } 219 290 291 + int optee_close_session_helper(struct tee_context *ctx, u32 session) 292 + { 293 + struct tee_shm *shm; 294 + struct optee *optee = tee_get_drvdata(ctx->teedev); 295 + struct optee_msg_arg *msg_arg; 296 + 297 + shm = optee_get_msg_arg(ctx, 0, &msg_arg); 298 + if (IS_ERR(shm)) 299 + return PTR_ERR(shm); 300 + 301 + msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; 302 + msg_arg->session = session; 303 + optee->ops->do_call_with_arg(ctx, shm); 304 + 305 + tee_shm_free(shm); 306 + 307 + return 0; 308 + } 309 + 220 310 int optee_close_session(struct tee_context *ctx, u32 session) 221 311 { 222 312 struct optee_context_data *ctxdata = ctx->data; 223 - struct tee_shm *shm; 224 - struct optee_msg_arg *msg_arg; 225 - phys_addr_t msg_parg; 226 313 struct optee_session *sess; 227 314 228 315 /* Check that the session is valid and remove it from the list */ ··· 251 306 return -EINVAL; 252 307 kfree(sess); 253 308 254 - shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); 255 - if (IS_ERR(shm)) 256 - return PTR_ERR(shm); 257 - 258 - msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; 259 - msg_arg->session = session; 260 - optee_do_call_with_arg(ctx, msg_parg); 261 - 262 - tee_shm_free(shm); 263 - return 0; 309 + return optee_close_session_helper(ctx, session); 264 310 } 265 311 266 312 int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, 267 313 struct tee_param *param) 268 314 { 315 + struct optee *optee = tee_get_drvdata(ctx->teedev); 269 316 struct optee_context_data *ctxdata = ctx->data; 270 317 struct tee_shm *shm; 271 318 struct optee_msg_arg *msg_arg; 272 - phys_addr_t msg_parg; 273 319 struct optee_session *sess; 274 320 int rc; 275 321 ··· 271 335 if (!sess) 272 336 return -EINVAL; 273 337 274 - shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg); 338 + shm = optee_get_msg_arg(ctx, arg->num_params, &msg_arg); 275 339 if (IS_ERR(shm)) 276 340 return PTR_ERR(shm); 277 341 msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND; ··· 279 343 msg_arg->session = arg->session; 280 344 msg_arg->cancel_id = arg->cancel_id; 281 345 282 - rc = optee_to_msg_param(msg_arg->params, arg->num_params, param); 346 + rc = optee->ops->to_msg_param(optee, msg_arg->params, arg->num_params, 347 + param); 283 348 if (rc) 284 349 goto out; 285 350 286 - if (optee_do_call_with_arg(ctx, msg_parg)) { 351 + if (optee->ops->do_call_with_arg(ctx, shm)) { 287 352 msg_arg->ret = TEEC_ERROR_COMMUNICATION; 288 353 msg_arg->ret_origin = TEEC_ORIGIN_COMMS; 289 354 } 290 355 291 - if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) { 356 + if (optee->ops->from_msg_param(optee, param, arg->num_params, 357 + msg_arg->params)) { 292 358 msg_arg->ret = TEEC_ERROR_COMMUNICATION; 293 359 msg_arg->ret_origin = TEEC_ORIGIN_COMMS; 294 360 } ··· 304 366 305 367 int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) 306 368 { 369 + struct optee *optee = tee_get_drvdata(ctx->teedev); 307 370 struct optee_context_data *ctxdata = ctx->data; 308 371 struct tee_shm *shm; 309 372 struct optee_msg_arg *msg_arg; 310 - phys_addr_t msg_parg; 311 373 struct optee_session *sess; 312 374 313 375 /* Check that the session is valid */ ··· 317 379 if (!sess) 318 380 return -EINVAL; 319 381 320 - shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); 382 + shm = optee_get_msg_arg(ctx, 0, &msg_arg); 321 383 if (IS_ERR(shm)) 322 384 return PTR_ERR(shm); 323 385 324 386 msg_arg->cmd = OPTEE_MSG_CMD_CANCEL; 325 387 msg_arg->session = session; 326 388 msg_arg->cancel_id = cancel_id; 327 - optee_do_call_with_arg(ctx, msg_parg); 389 + optee->ops->do_call_with_arg(ctx, shm); 328 390 329 391 tee_shm_free(shm); 330 392 return 0; 331 - } 332 - 333 - /** 334 - * optee_enable_shm_cache() - Enables caching of some shared memory allocation 335 - * in OP-TEE 336 - * @optee: main service struct 337 - */ 338 - void optee_enable_shm_cache(struct optee *optee) 339 - { 340 - struct optee_call_waiter w; 341 - 342 - /* We need to retry until secure world isn't busy. */ 343 - optee_cq_wait_init(&optee->call_queue, &w); 344 - while (true) { 345 - struct arm_smccc_res res; 346 - 347 - optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, 348 - 0, &res); 349 - if (res.a0 == OPTEE_SMC_RETURN_OK) 350 - break; 351 - optee_cq_wait_for_completion(&optee->call_queue, &w); 352 - } 353 - optee_cq_wait_final(&optee->call_queue, &w); 354 - } 355 - 356 - /** 357 - * __optee_disable_shm_cache() - Disables caching of some shared memory 358 - * allocation in OP-TEE 359 - * @optee: main service struct 360 - * @is_mapped: true if the cached shared memory addresses were mapped by this 361 - * kernel, are safe to dereference, and should be freed 362 - */ 363 - static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped) 364 - { 365 - struct optee_call_waiter w; 366 - 367 - /* We need to retry until secure world isn't busy. */ 368 - optee_cq_wait_init(&optee->call_queue, &w); 369 - while (true) { 370 - union { 371 - struct arm_smccc_res smccc; 372 - struct optee_smc_disable_shm_cache_result result; 373 - } res; 374 - 375 - optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, 376 - 0, &res.smccc); 377 - if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL) 378 - break; /* All shm's freed */ 379 - if (res.result.status == OPTEE_SMC_RETURN_OK) { 380 - struct tee_shm *shm; 381 - 382 - /* 383 - * Shared memory references that were not mapped by 384 - * this kernel must be ignored to prevent a crash. 385 - */ 386 - if (!is_mapped) 387 - continue; 388 - 389 - shm = reg_pair_to_ptr(res.result.shm_upper32, 390 - res.result.shm_lower32); 391 - tee_shm_free(shm); 392 - } else { 393 - optee_cq_wait_for_completion(&optee->call_queue, &w); 394 - } 395 - } 396 - optee_cq_wait_final(&optee->call_queue, &w); 397 - } 398 - 399 - /** 400 - * optee_disable_shm_cache() - Disables caching of mapped shared memory 401 - * allocations in OP-TEE 402 - * @optee: main service struct 403 - */ 404 - void optee_disable_shm_cache(struct optee *optee) 405 - { 406 - return __optee_disable_shm_cache(optee, true); 407 - } 408 - 409 - /** 410 - * optee_disable_unmapped_shm_cache() - Disables caching of shared memory 411 - * allocations in OP-TEE which are not 412 - * currently mapped 413 - * @optee: main service struct 414 - */ 415 - void optee_disable_unmapped_shm_cache(struct optee *optee) 416 - { 417 - return __optee_disable_shm_cache(optee, false); 418 - } 419 - 420 - #define PAGELIST_ENTRIES_PER_PAGE \ 421 - ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1) 422 - 423 - /** 424 - * optee_fill_pages_list() - write list of user pages to given shared 425 - * buffer. 426 - * 427 - * @dst: page-aligned buffer where list of pages will be stored 428 - * @pages: array of pages that represents shared buffer 429 - * @num_pages: number of entries in @pages 430 - * @page_offset: offset of user buffer from page start 431 - * 432 - * @dst should be big enough to hold list of user page addresses and 433 - * links to the next pages of buffer 434 - */ 435 - void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, 436 - size_t page_offset) 437 - { 438 - int n = 0; 439 - phys_addr_t optee_page; 440 - /* 441 - * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h 442 - * for details. 443 - */ 444 - struct { 445 - u64 pages_list[PAGELIST_ENTRIES_PER_PAGE]; 446 - u64 next_page_data; 447 - } *pages_data; 448 - 449 - /* 450 - * Currently OP-TEE uses 4k page size and it does not looks 451 - * like this will change in the future. On other hand, there are 452 - * no know ARM architectures with page size < 4k. 453 - * Thus the next built assert looks redundant. But the following 454 - * code heavily relies on this assumption, so it is better be 455 - * safe than sorry. 456 - */ 457 - BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE); 458 - 459 - pages_data = (void *)dst; 460 - /* 461 - * If linux page is bigger than 4k, and user buffer offset is 462 - * larger than 4k/8k/12k/etc this will skip first 4k pages, 463 - * because they bear no value data for OP-TEE. 464 - */ 465 - optee_page = page_to_phys(*pages) + 466 - round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE); 467 - 468 - while (true) { 469 - pages_data->pages_list[n++] = optee_page; 470 - 471 - if (n == PAGELIST_ENTRIES_PER_PAGE) { 472 - pages_data->next_page_data = 473 - virt_to_phys(pages_data + 1); 474 - pages_data++; 475 - n = 0; 476 - } 477 - 478 - optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE; 479 - if (!(optee_page & ~PAGE_MASK)) { 480 - if (!--num_pages) 481 - break; 482 - pages++; 483 - optee_page = page_to_phys(*pages); 484 - } 485 - } 486 - } 487 - 488 - /* 489 - * The final entry in each pagelist page is a pointer to the next 490 - * pagelist page. 491 - */ 492 - static size_t get_pages_list_size(size_t num_entries) 493 - { 494 - int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE); 495 - 496 - return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE; 497 - } 498 - 499 - u64 *optee_allocate_pages_list(size_t num_entries) 500 - { 501 - return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL); 502 - } 503 - 504 - void optee_free_pages_list(void *list, size_t num_entries) 505 - { 506 - free_pages_exact(list, get_pages_list_size(num_entries)); 507 393 } 508 394 509 395 static bool is_normal_memory(pgprot_t p) ··· 353 591 return -EINVAL; 354 592 } 355 593 356 - static int check_mem_type(unsigned long start, size_t num_pages) 594 + int optee_check_mem_type(unsigned long start, size_t num_pages) 357 595 { 358 596 struct mm_struct *mm = current->mm; 359 597 int rc; ··· 371 609 mmap_read_unlock(mm); 372 610 373 611 return rc; 374 - } 375 - 376 - int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, 377 - struct page **pages, size_t num_pages, 378 - unsigned long start) 379 - { 380 - struct tee_shm *shm_arg = NULL; 381 - struct optee_msg_arg *msg_arg; 382 - u64 *pages_list; 383 - phys_addr_t msg_parg; 384 - int rc; 385 - 386 - if (!num_pages) 387 - return -EINVAL; 388 - 389 - rc = check_mem_type(start, num_pages); 390 - if (rc) 391 - return rc; 392 - 393 - pages_list = optee_allocate_pages_list(num_pages); 394 - if (!pages_list) 395 - return -ENOMEM; 396 - 397 - shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); 398 - if (IS_ERR(shm_arg)) { 399 - rc = PTR_ERR(shm_arg); 400 - goto out; 401 - } 402 - 403 - optee_fill_pages_list(pages_list, pages, num_pages, 404 - tee_shm_get_page_offset(shm)); 405 - 406 - msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM; 407 - msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 408 - OPTEE_MSG_ATTR_NONCONTIG; 409 - msg_arg->params->u.tmem.shm_ref = (unsigned long)shm; 410 - msg_arg->params->u.tmem.size = tee_shm_get_size(shm); 411 - /* 412 - * In the least bits of msg_arg->params->u.tmem.buf_ptr we 413 - * store buffer offset from 4k page, as described in OP-TEE ABI. 414 - */ 415 - msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) | 416 - (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); 417 - 418 - if (optee_do_call_with_arg(ctx, msg_parg) || 419 - msg_arg->ret != TEEC_SUCCESS) 420 - rc = -EINVAL; 421 - 422 - tee_shm_free(shm_arg); 423 - out: 424 - optee_free_pages_list(pages_list, num_pages); 425 - return rc; 426 - } 427 - 428 - int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) 429 - { 430 - struct tee_shm *shm_arg; 431 - struct optee_msg_arg *msg_arg; 432 - phys_addr_t msg_parg; 433 - int rc = 0; 434 - 435 - shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); 436 - if (IS_ERR(shm_arg)) 437 - return PTR_ERR(shm_arg); 438 - 439 - msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM; 440 - 441 - msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; 442 - msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm; 443 - 444 - if (optee_do_call_with_arg(ctx, msg_parg) || 445 - msg_arg->ret != TEEC_SUCCESS) 446 - rc = -EINVAL; 447 - tee_shm_free(shm_arg); 448 - return rc; 449 - } 450 - 451 - int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, 452 - struct page **pages, size_t num_pages, 453 - unsigned long start) 454 - { 455 - /* 456 - * We don't want to register supplicant memory in OP-TEE. 457 - * Instead information about it will be passed in RPC code. 458 - */ 459 - return check_mem_type(start, num_pages); 460 - } 461 - 462 - int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm) 463 - { 464 - return 0; 465 612 }
+81 -651
drivers/tee/optee/core.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (c) 2015, Linaro Limited 3 + * Copyright (c) 2015-2021, Linaro Limited 4 + * Copyright (c) 2016, EPAM Systems 4 5 */ 5 6 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 8 - #include <linux/arm-smccc.h> 9 9 #include <linux/crash_dump.h> 10 10 #include <linux/errno.h> 11 11 #include <linux/io.h> 12 + #include <linux/mm.h> 12 13 #include <linux/module.h> 13 - #include <linux/of.h> 14 - #include <linux/of_platform.h> 15 - #include <linux/platform_device.h> 16 14 #include <linux/slab.h> 17 15 #include <linux/string.h> 18 16 #include <linux/tee_drv.h> 19 17 #include <linux/types.h> 20 - #include <linux/uaccess.h> 21 18 #include <linux/workqueue.h> 22 19 #include "optee_private.h" 23 - #include "optee_smc.h" 24 - #include "shm_pool.h" 25 20 26 - #define DRIVER_NAME "optee" 27 - 28 - #define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES 29 - 30 - /** 31 - * optee_from_msg_param() - convert from OPTEE_MSG parameters to 32 - * struct tee_param 33 - * @params: subsystem internal parameter representation 34 - * @num_params: number of elements in the parameter arrays 35 - * @msg_params: OPTEE_MSG parameters 36 - * Returns 0 on success or <0 on failure 37 - */ 38 - int optee_from_msg_param(struct tee_param *params, size_t num_params, 39 - const struct optee_msg_param *msg_params) 21 + int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm, 22 + struct tee_shm *shm, size_t size, 23 + int (*shm_register)(struct tee_context *ctx, 24 + struct tee_shm *shm, 25 + struct page **pages, 26 + size_t num_pages, 27 + unsigned long start)) 40 28 { 41 - int rc; 42 - size_t n; 43 - struct tee_shm *shm; 44 - phys_addr_t pa; 29 + unsigned int order = get_order(size); 30 + struct page *page; 31 + int rc = 0; 45 32 46 - for (n = 0; n < num_params; n++) { 47 - struct tee_param *p = params + n; 48 - const struct optee_msg_param *mp = msg_params + n; 49 - u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK; 33 + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 34 + if (!page) 35 + return -ENOMEM; 50 36 51 - switch (attr) { 52 - case OPTEE_MSG_ATTR_TYPE_NONE: 53 - p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; 54 - memset(&p->u, 0, sizeof(p->u)); 55 - break; 56 - case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT: 57 - case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: 58 - case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: 59 - p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT + 60 - attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 61 - p->u.value.a = mp->u.value.a; 62 - p->u.value.b = mp->u.value.b; 63 - p->u.value.c = mp->u.value.c; 64 - break; 65 - case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: 66 - case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: 67 - case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: 68 - p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + 69 - attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; 70 - p->u.memref.size = mp->u.tmem.size; 71 - shm = (struct tee_shm *)(unsigned long) 72 - mp->u.tmem.shm_ref; 73 - if (!shm) { 74 - p->u.memref.shm_offs = 0; 75 - p->u.memref.shm = NULL; 76 - break; 77 - } 78 - rc = tee_shm_get_pa(shm, 0, &pa); 79 - if (rc) 80 - return rc; 81 - p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; 82 - p->u.memref.shm = shm; 83 - break; 84 - case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT: 85 - case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: 86 - case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: 87 - p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + 88 - attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; 89 - p->u.memref.size = mp->u.rmem.size; 90 - shm = (struct tee_shm *)(unsigned long) 91 - mp->u.rmem.shm_ref; 37 + shm->kaddr = page_address(page); 38 + shm->paddr = page_to_phys(page); 39 + shm->size = PAGE_SIZE << order; 92 40 93 - if (!shm) { 94 - p->u.memref.shm_offs = 0; 95 - p->u.memref.shm = NULL; 96 - break; 97 - } 98 - p->u.memref.shm_offs = mp->u.rmem.offs; 99 - p->u.memref.shm = shm; 41 + if (shm_register) { 42 + unsigned int nr_pages = 1 << order, i; 43 + struct page **pages; 100 44 101 - break; 102 - 103 - default: 104 - return -EINVAL; 45 + pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); 46 + if (!pages) { 47 + rc = -ENOMEM; 48 + goto err; 105 49 } 106 - } 107 - return 0; 108 - } 109 50 110 - static int to_msg_param_tmp_mem(struct optee_msg_param *mp, 111 - const struct tee_param *p) 112 - { 113 - int rc; 114 - phys_addr_t pa; 115 - 116 - mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr - 117 - TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; 118 - 119 - mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; 120 - mp->u.tmem.size = p->u.memref.size; 121 - 122 - if (!p->u.memref.shm) { 123 - mp->u.tmem.buf_ptr = 0; 124 - return 0; 125 - } 126 - 127 - rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa); 128 - if (rc) 129 - return rc; 130 - 131 - mp->u.tmem.buf_ptr = pa; 132 - mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED << 133 - OPTEE_MSG_ATTR_CACHE_SHIFT; 134 - 135 - return 0; 136 - } 137 - 138 - static int to_msg_param_reg_mem(struct optee_msg_param *mp, 139 - const struct tee_param *p) 140 - { 141 - mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr - 142 - TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; 143 - 144 - mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm; 145 - mp->u.rmem.size = p->u.memref.size; 146 - mp->u.rmem.offs = p->u.memref.shm_offs; 147 - return 0; 148 - } 149 - 150 - /** 151 - * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters 152 - * @msg_params: OPTEE_MSG parameters 153 - * @num_params: number of elements in the parameter arrays 154 - * @params: subsystem itnernal parameter representation 155 - * Returns 0 on success or <0 on failure 156 - */ 157 - int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, 158 - const struct tee_param *params) 159 - { 160 - int rc; 161 - size_t n; 162 - 163 - for (n = 0; n < num_params; n++) { 164 - const struct tee_param *p = params + n; 165 - struct optee_msg_param *mp = msg_params + n; 166 - 167 - switch (p->attr) { 168 - case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: 169 - mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; 170 - memset(&mp->u, 0, sizeof(mp->u)); 171 - break; 172 - case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: 173 - case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: 174 - case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: 175 - mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr - 176 - TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; 177 - mp->u.value.a = p->u.value.a; 178 - mp->u.value.b = p->u.value.b; 179 - mp->u.value.c = p->u.value.c; 180 - break; 181 - case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: 182 - case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: 183 - case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: 184 - if (tee_shm_is_registered(p->u.memref.shm)) 185 - rc = to_msg_param_reg_mem(mp, p); 186 - else 187 - rc = to_msg_param_tmp_mem(mp, p); 188 - if (rc) 189 - return rc; 190 - break; 191 - default: 192 - return -EINVAL; 51 + for (i = 0; i < nr_pages; i++) { 52 + pages[i] = page; 53 + page++; 193 54 } 55 + 56 + shm->flags |= TEE_SHM_REGISTER; 57 + rc = shm_register(shm->ctx, shm, pages, nr_pages, 58 + (unsigned long)shm->kaddr); 59 + kfree(pages); 60 + if (rc) 61 + goto err; 194 62 } 63 + 195 64 return 0; 196 - } 197 65 198 - static void optee_get_version(struct tee_device *teedev, 199 - struct tee_ioctl_version_data *vers) 200 - { 201 - struct tee_ioctl_version_data v = { 202 - .impl_id = TEE_IMPL_ID_OPTEE, 203 - .impl_caps = TEE_OPTEE_CAP_TZ, 204 - .gen_caps = TEE_GEN_CAP_GP, 205 - }; 206 - struct optee *optee = tee_get_drvdata(teedev); 207 - 208 - if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) 209 - v.gen_caps |= TEE_GEN_CAP_REG_MEM; 210 - if (optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL) 211 - v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL; 212 - *vers = v; 66 + err: 67 + __free_pages(page, order); 68 + return rc; 213 69 } 214 70 215 71 static void optee_bus_scan(struct work_struct *work) ··· 73 217 WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP)); 74 218 } 75 219 76 - static int optee_open(struct tee_context *ctx) 220 + int optee_open(struct tee_context *ctx, bool cap_memref_null) 77 221 { 78 222 struct optee_context_data *ctxdata; 79 223 struct tee_device *teedev = ctx->teedev; ··· 111 255 mutex_init(&ctxdata->mutex); 112 256 INIT_LIST_HEAD(&ctxdata->sess_list); 113 257 114 - if (optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL) 115 - ctx->cap_memref_null = true; 116 - else 117 - ctx->cap_memref_null = false; 118 - 258 + ctx->cap_memref_null = cap_memref_null; 119 259 ctx->data = ctxdata; 120 260 return 0; 121 261 } 122 262 123 - static void optee_release(struct tee_context *ctx) 263 + static void optee_release_helper(struct tee_context *ctx, 264 + int (*close_session)(struct tee_context *ctx, 265 + u32 session)) 124 266 { 125 267 struct optee_context_data *ctxdata = ctx->data; 126 - struct tee_device *teedev = ctx->teedev; 127 - struct optee *optee = tee_get_drvdata(teedev); 128 - struct tee_shm *shm; 129 - struct optee_msg_arg *arg = NULL; 130 - phys_addr_t parg; 131 268 struct optee_session *sess; 132 269 struct optee_session *sess_tmp; 133 270 134 271 if (!ctxdata) 135 272 return; 136 273 137 - shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), 138 - TEE_SHM_MAPPED | TEE_SHM_PRIV); 139 - if (!IS_ERR(shm)) { 140 - arg = tee_shm_get_va(shm, 0); 141 - /* 142 - * If va2pa fails for some reason, we can't call into 143 - * secure world, only free the memory. Secure OS will leak 144 - * sessions and finally refuse more sessions, but we will 145 - * at least let normal world reclaim its memory. 146 - */ 147 - if (!IS_ERR(arg)) 148 - if (tee_shm_va2pa(shm, arg, &parg)) 149 - arg = NULL; /* prevent usage of parg below */ 150 - } 151 - 152 274 list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list, 153 275 list_node) { 154 276 list_del(&sess->list_node); 155 - if (!IS_ERR_OR_NULL(arg)) { 156 - memset(arg, 0, sizeof(*arg)); 157 - arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; 158 - arg->session = sess->session_id; 159 - optee_do_call_with_arg(ctx, parg); 160 - } 277 + close_session(ctx, sess->session_id); 161 278 kfree(sess); 162 279 } 163 280 kfree(ctxdata); 164 - 165 - if (!IS_ERR(shm)) 166 - tee_shm_free(shm); 167 - 168 281 ctx->data = NULL; 282 + } 169 283 170 - if (teedev == optee->supp_teedev) { 171 - if (optee->scan_bus_wq) { 172 - destroy_workqueue(optee->scan_bus_wq); 173 - optee->scan_bus_wq = NULL; 174 - } 175 - optee_supp_release(&optee->supp); 284 + void optee_release(struct tee_context *ctx) 285 + { 286 + optee_release_helper(ctx, optee_close_session_helper); 287 + } 288 + 289 + void optee_release_supp(struct tee_context *ctx) 290 + { 291 + struct optee *optee = tee_get_drvdata(ctx->teedev); 292 + 293 + optee_release_helper(ctx, optee_close_session_helper); 294 + if (optee->scan_bus_wq) { 295 + destroy_workqueue(optee->scan_bus_wq); 296 + optee->scan_bus_wq = NULL; 176 297 } 298 + optee_supp_release(&optee->supp); 177 299 } 178 300 179 - static const struct tee_driver_ops optee_ops = { 180 - .get_version = optee_get_version, 181 - .open = optee_open, 182 - .release = optee_release, 183 - .open_session = optee_open_session, 184 - .close_session = optee_close_session, 185 - .invoke_func = optee_invoke_func, 186 - .cancel_req = optee_cancel_req, 187 - .shm_register = optee_shm_register, 188 - .shm_unregister = optee_shm_unregister, 189 - }; 190 - 191 - static const struct tee_desc optee_desc = { 192 - .name = DRIVER_NAME "-clnt", 193 - .ops = &optee_ops, 194 - .owner = THIS_MODULE, 195 - }; 196 - 197 - static const struct tee_driver_ops optee_supp_ops = { 198 - .get_version = optee_get_version, 199 - .open = optee_open, 200 - .release = optee_release, 201 - .supp_recv = optee_supp_recv, 202 - .supp_send = optee_supp_send, 203 - .shm_register = optee_shm_register_supp, 204 - .shm_unregister = optee_shm_unregister_supp, 205 - }; 206 - 207 - static const struct tee_desc optee_supp_desc = { 208 - .name = DRIVER_NAME "-supp", 209 - .ops = &optee_supp_ops, 210 - .owner = THIS_MODULE, 211 - .flags = TEE_DESC_PRIVILEGED, 212 - }; 213 - 214 - static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) 301 + void optee_remove_common(struct optee *optee) 215 302 { 216 - struct arm_smccc_res res; 217 - 218 - invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res); 219 - 220 - if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 && 221 - res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3) 222 - return true; 223 - return false; 224 - } 225 - 226 - static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn) 227 - { 228 - union { 229 - struct arm_smccc_res smccc; 230 - struct optee_smc_call_get_os_revision_result result; 231 - } res = { 232 - .result = { 233 - .build_id = 0 234 - } 235 - }; 236 - 237 - invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0, 238 - &res.smccc); 239 - 240 - if (res.result.build_id) 241 - pr_info("revision %lu.%lu (%08lx)", res.result.major, 242 - res.result.minor, res.result.build_id); 243 - else 244 - pr_info("revision %lu.%lu", res.result.major, res.result.minor); 245 - } 246 - 247 - static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn) 248 - { 249 - union { 250 - struct arm_smccc_res smccc; 251 - struct optee_smc_calls_revision_result result; 252 - } res; 253 - 254 - invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc); 255 - 256 - if (res.result.major == OPTEE_MSG_REVISION_MAJOR && 257 - (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR) 258 - return true; 259 - return false; 260 - } 261 - 262 - static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, 263 - u32 *sec_caps) 264 - { 265 - union { 266 - struct arm_smccc_res smccc; 267 - struct optee_smc_exchange_capabilities_result result; 268 - } res; 269 - u32 a1 = 0; 270 - 271 - /* 272 - * TODO This isn't enough to tell if it's UP system (from kernel 273 - * point of view) or not, is_smp() returns the the information 274 - * needed, but can't be called directly from here. 275 - */ 276 - if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1) 277 - a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR; 278 - 279 - invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0, 280 - &res.smccc); 281 - 282 - if (res.result.status != OPTEE_SMC_RETURN_OK) 283 - return false; 284 - 285 - *sec_caps = res.result.capabilities; 286 - return true; 287 - } 288 - 289 - static struct tee_shm_pool *optee_config_dyn_shm(void) 290 - { 291 - struct tee_shm_pool_mgr *priv_mgr; 292 - struct tee_shm_pool_mgr *dmabuf_mgr; 293 - void *rc; 294 - 295 - rc = optee_shm_pool_alloc_pages(); 296 - if (IS_ERR(rc)) 297 - return rc; 298 - priv_mgr = rc; 299 - 300 - rc = optee_shm_pool_alloc_pages(); 301 - if (IS_ERR(rc)) { 302 - tee_shm_pool_mgr_destroy(priv_mgr); 303 - return rc; 304 - } 305 - dmabuf_mgr = rc; 306 - 307 - rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); 308 - if (IS_ERR(rc)) { 309 - tee_shm_pool_mgr_destroy(priv_mgr); 310 - tee_shm_pool_mgr_destroy(dmabuf_mgr); 311 - } 312 - 313 - return rc; 314 - } 315 - 316 - static struct tee_shm_pool * 317 - optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) 318 - { 319 - union { 320 - struct arm_smccc_res smccc; 321 - struct optee_smc_get_shm_config_result result; 322 - } res; 323 - unsigned long vaddr; 324 - phys_addr_t paddr; 325 - size_t size; 326 - phys_addr_t begin; 327 - phys_addr_t end; 328 - void *va; 329 - struct tee_shm_pool_mgr *priv_mgr; 330 - struct tee_shm_pool_mgr *dmabuf_mgr; 331 - void *rc; 332 - const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; 333 - 334 - invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); 335 - if (res.result.status != OPTEE_SMC_RETURN_OK) { 336 - pr_err("static shm service not available\n"); 337 - return ERR_PTR(-ENOENT); 338 - } 339 - 340 - if (res.result.settings != OPTEE_SMC_SHM_CACHED) { 341 - pr_err("only normal cached shared memory supported\n"); 342 - return ERR_PTR(-EINVAL); 343 - } 344 - 345 - begin = roundup(res.result.start, PAGE_SIZE); 346 - end = rounddown(res.result.start + res.result.size, PAGE_SIZE); 347 - paddr = begin; 348 - size = end - begin; 349 - 350 - if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) { 351 - pr_err("too small shared memory area\n"); 352 - return ERR_PTR(-EINVAL); 353 - } 354 - 355 - va = memremap(paddr, size, MEMREMAP_WB); 356 - if (!va) { 357 - pr_err("shared memory ioremap failed\n"); 358 - return ERR_PTR(-EINVAL); 359 - } 360 - vaddr = (unsigned long)va; 361 - 362 - rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz, 363 - 3 /* 8 bytes aligned */); 364 - if (IS_ERR(rc)) 365 - goto err_memunmap; 366 - priv_mgr = rc; 367 - 368 - vaddr += sz; 369 - paddr += sz; 370 - size -= sz; 371 - 372 - rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT); 373 - if (IS_ERR(rc)) 374 - goto err_free_priv_mgr; 375 - dmabuf_mgr = rc; 376 - 377 - rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); 378 - if (IS_ERR(rc)) 379 - goto err_free_dmabuf_mgr; 380 - 381 - *memremaped_shm = va; 382 - 383 - return rc; 384 - 385 - err_free_dmabuf_mgr: 386 - tee_shm_pool_mgr_destroy(dmabuf_mgr); 387 - err_free_priv_mgr: 388 - tee_shm_pool_mgr_destroy(priv_mgr); 389 - err_memunmap: 390 - memunmap(va); 391 - return rc; 392 - } 393 - 394 - /* Simple wrapper functions to be able to use a function pointer */ 395 - static void optee_smccc_smc(unsigned long a0, unsigned long a1, 396 - unsigned long a2, unsigned long a3, 397 - unsigned long a4, unsigned long a5, 398 - unsigned long a6, unsigned long a7, 399 - struct arm_smccc_res *res) 400 - { 401 - arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res); 402 - } 403 - 404 - static void optee_smccc_hvc(unsigned long a0, unsigned long a1, 405 - unsigned long a2, unsigned long a3, 406 - unsigned long a4, unsigned long a5, 407 - unsigned long a6, unsigned long a7, 408 - struct arm_smccc_res *res) 409 - { 410 - arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); 411 - } 412 - 413 - static optee_invoke_fn *get_invoke_func(struct device *dev) 414 - { 415 - const char *method; 416 - 417 - pr_info("probing for conduit method.\n"); 418 - 419 - if (device_property_read_string(dev, "method", &method)) { 420 - pr_warn("missing \"method\" property\n"); 421 - return ERR_PTR(-ENXIO); 422 - } 423 - 424 - if (!strcmp("hvc", method)) 425 - return optee_smccc_hvc; 426 - else if (!strcmp("smc", method)) 427 - return optee_smccc_smc; 428 - 429 - pr_warn("invalid \"method\" property: %s\n", method); 430 - return ERR_PTR(-EINVAL); 431 - } 432 - 433 - /* optee_remove - Device Removal Routine 434 - * @pdev: platform device information struct 435 - * 436 - * optee_remove is called by platform subsystem to alert the driver 437 - * that it should release the device 438 - */ 439 - 440 - static int optee_remove(struct platform_device *pdev) 441 - { 442 - struct optee *optee = platform_get_drvdata(pdev); 443 - 444 - /* 445 - * Ask OP-TEE to free all cached shared memory objects to decrease 446 - * reference counters and also avoid wild pointers in secure world 447 - * into the old shared memory range. 448 - */ 449 - optee_disable_shm_cache(optee); 303 + /* Unregister OP-TEE specific client devices on TEE bus */ 304 + optee_unregister_devices(); 450 305 451 306 /* 452 307 * The two devices have to be unregistered before we can free the ··· 167 600 tee_device_unregister(optee->teedev); 168 601 169 602 tee_shm_pool_free(optee->pool); 170 - if (optee->memremaped_shm) 171 - memunmap(optee->memremaped_shm); 172 603 optee_wait_queue_exit(&optee->wait_queue); 173 604 optee_supp_uninit(&optee->supp); 174 605 mutex_destroy(&optee->call_queue.mutex); 175 - 176 - kfree(optee); 177 - 178 - return 0; 179 606 } 180 607 181 - /* optee_shutdown - Device Removal Routine 182 - * @pdev: platform device information struct 183 - * 184 - * platform_shutdown is called by the platform subsystem to alert 185 - * the driver that a shutdown, reboot, or kexec is happening and 186 - * device must be disabled. 187 - */ 188 - static void optee_shutdown(struct platform_device *pdev) 189 - { 190 - optee_disable_shm_cache(platform_get_drvdata(pdev)); 191 - } 608 + static int smc_abi_rc; 609 + static int ffa_abi_rc; 192 610 193 - static int optee_probe(struct platform_device *pdev) 611 + static int optee_core_init(void) 194 612 { 195 - optee_invoke_fn *invoke_fn; 196 - struct tee_shm_pool *pool = ERR_PTR(-EINVAL); 197 - struct optee *optee = NULL; 198 - void *memremaped_shm = NULL; 199 - struct tee_device *teedev; 200 - u32 sec_caps; 201 - int rc; 202 - 203 613 /* 204 614 * The kernel may have crashed at the same time that all available 205 615 * secure world threads were suspended and we cannot reschedule the ··· 187 643 if (is_kdump_kernel()) 188 644 return -ENODEV; 189 645 190 - invoke_fn = get_invoke_func(&pdev->dev); 191 - if (IS_ERR(invoke_fn)) 192 - return PTR_ERR(invoke_fn); 646 + smc_abi_rc = optee_smc_abi_register(); 647 + ffa_abi_rc = optee_ffa_abi_register(); 193 648 194 - if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { 195 - pr_warn("api uid mismatch\n"); 196 - return -EINVAL; 197 - } 198 - 199 - optee_msg_get_os_revision(invoke_fn); 200 - 201 - if (!optee_msg_api_revision_is_compatible(invoke_fn)) { 202 - pr_warn("api revision mismatch\n"); 203 - return -EINVAL; 204 - } 205 - 206 - if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { 207 - pr_warn("capabilities mismatch\n"); 208 - return -EINVAL; 209 - } 210 - 211 - /* 212 - * Try to use dynamic shared memory if possible 213 - */ 214 - if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) 215 - pool = optee_config_dyn_shm(); 216 - 217 - /* 218 - * If dynamic shared memory is not available or failed - try static one 219 - */ 220 - if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) 221 - pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); 222 - 223 - if (IS_ERR(pool)) 224 - return PTR_ERR(pool); 225 - 226 - optee = kzalloc(sizeof(*optee), GFP_KERNEL); 227 - if (!optee) { 228 - rc = -ENOMEM; 229 - goto err; 230 - } 231 - 232 - optee->invoke_fn = invoke_fn; 233 - optee->sec_caps = sec_caps; 234 - 235 - teedev = tee_device_alloc(&optee_desc, NULL, pool, optee); 236 - if (IS_ERR(teedev)) { 237 - rc = PTR_ERR(teedev); 238 - goto err; 239 - } 240 - optee->teedev = teedev; 241 - 242 - teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee); 243 - if (IS_ERR(teedev)) { 244 - rc = PTR_ERR(teedev); 245 - goto err; 246 - } 247 - optee->supp_teedev = teedev; 248 - 249 - rc = tee_device_register(optee->teedev); 250 - if (rc) 251 - goto err; 252 - 253 - rc = tee_device_register(optee->supp_teedev); 254 - if (rc) 255 - goto err; 256 - 257 - mutex_init(&optee->call_queue.mutex); 258 - INIT_LIST_HEAD(&optee->call_queue.waiters); 259 - optee_wait_queue_init(&optee->wait_queue); 260 - optee_supp_init(&optee->supp); 261 - optee->memremaped_shm = memremaped_shm; 262 - optee->pool = pool; 263 - 264 - /* 265 - * Ensure that there are no pre-existing shm objects before enabling 266 - * the shm cache so that there's no chance of receiving an invalid 267 - * address during shutdown. This could occur, for example, if we're 268 - * kexec booting from an older kernel that did not properly cleanup the 269 - * shm cache. 270 - */ 271 - optee_disable_unmapped_shm_cache(optee); 272 - 273 - optee_enable_shm_cache(optee); 274 - 275 - if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) 276 - pr_info("dynamic shared memory is enabled\n"); 277 - 278 - platform_set_drvdata(pdev, optee); 279 - 280 - rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); 281 - if (rc) { 282 - optee_remove(pdev); 283 - return rc; 284 - } 285 - 286 - pr_info("initialized driver\n"); 649 + /* If both failed there's no point with this module */ 650 + if (smc_abi_rc && ffa_abi_rc) 651 + return smc_abi_rc; 287 652 return 0; 288 - err: 289 - if (optee) { 290 - /* 291 - * tee_device_unregister() is safe to call even if the 292 - * devices hasn't been registered with 293 - * tee_device_register() yet. 294 - */ 295 - tee_device_unregister(optee->supp_teedev); 296 - tee_device_unregister(optee->teedev); 297 - kfree(optee); 298 - } 299 - if (pool) 300 - tee_shm_pool_free(pool); 301 - if (memremaped_shm) 302 - memunmap(memremaped_shm); 303 - return rc; 304 653 } 654 + module_init(optee_core_init); 305 655 306 - static const struct of_device_id optee_dt_match[] = { 307 - { .compatible = "linaro,optee-tz" }, 308 - {}, 309 - }; 310 - MODULE_DEVICE_TABLE(of, optee_dt_match); 311 - 312 - static struct platform_driver optee_driver = { 313 - .probe = optee_probe, 314 - .remove = optee_remove, 315 - .shutdown = optee_shutdown, 316 - .driver = { 317 - .name = "optee", 318 - .of_match_table = optee_dt_match, 319 - }, 320 - }; 321 - module_platform_driver(optee_driver); 656 + static void optee_core_exit(void) 657 + { 658 + if (!smc_abi_rc) 659 + optee_smc_abi_unregister(); 660 + if (!ffa_abi_rc) 661 + optee_ffa_abi_unregister(); 662 + } 663 + module_exit(optee_core_exit); 322 664 323 665 MODULE_AUTHOR("Linaro"); 324 666 MODULE_DESCRIPTION("OP-TEE driver");
+22
drivers/tee/optee/device.c
··· 53 53 return 0; 54 54 } 55 55 56 + static void optee_release_device(struct device *dev) 57 + { 58 + struct tee_client_device *optee_device = to_tee_client_device(dev); 59 + 60 + kfree(optee_device); 61 + } 62 + 56 63 static int optee_register_device(const uuid_t *device_uuid) 57 64 { 58 65 struct tee_client_device *optee_device = NULL; ··· 70 63 return -ENOMEM; 71 64 72 65 optee_device->dev.bus = &tee_bus_type; 66 + optee_device->dev.release = optee_release_device; 73 67 if (dev_set_name(&optee_device->dev, "optee-ta-%pUb", device_uuid)) { 74 68 kfree(optee_device); 75 69 return -ENOMEM; ··· 161 153 int optee_enumerate_devices(u32 func) 162 154 { 163 155 return __optee_enumerate_devices(func); 156 + } 157 + 158 + static int __optee_unregister_device(struct device *dev, void *data) 159 + { 160 + if (!strncmp(dev_name(dev), "optee-ta", strlen("optee-ta"))) 161 + device_unregister(dev); 162 + 163 + return 0; 164 + } 165 + 166 + void optee_unregister_devices(void) 167 + { 168 + bus_for_each_dev(&tee_bus_type, NULL, NULL, 169 + __optee_unregister_device); 164 170 }
+911
drivers/tee/optee/ffa_abi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2021, Linaro Limited 4 + */ 5 + 6 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 + 8 + #include <linux/arm_ffa.h> 9 + #include <linux/errno.h> 10 + #include <linux/scatterlist.h> 11 + #include <linux/sched.h> 12 + #include <linux/slab.h> 13 + #include <linux/string.h> 14 + #include <linux/tee_drv.h> 15 + #include <linux/types.h> 16 + #include "optee_private.h" 17 + #include "optee_ffa.h" 18 + #include "optee_rpc_cmd.h" 19 + 20 + /* 21 + * This file implement the FF-A ABI used when communicating with secure world 22 + * OP-TEE OS via FF-A. 23 + * This file is divided into the following sections: 24 + * 1. Maintain a hash table for lookup of a global FF-A memory handle 25 + * 2. Convert between struct tee_param and struct optee_msg_param 26 + * 3. Low level support functions to register shared memory in secure world 27 + * 4. Dynamic shared memory pool based on alloc_pages() 28 + * 5. Do a normal scheduled call into secure world 29 + * 6. Driver initialization. 30 + */ 31 + 32 + /* 33 + * 1. Maintain a hash table for lookup of a global FF-A memory handle 34 + * 35 + * FF-A assigns a global memory handle for each piece shared memory. 36 + * This handle is then used when communicating with secure world. 37 + * 38 + * Main functions are optee_shm_add_ffa_handle() and optee_shm_rem_ffa_handle() 39 + */ 40 + struct shm_rhash { 41 + struct tee_shm *shm; 42 + u64 global_id; 43 + struct rhash_head linkage; 44 + }; 45 + 46 + static void rh_free_fn(void *ptr, void *arg) 47 + { 48 + kfree(ptr); 49 + } 50 + 51 + static const struct rhashtable_params shm_rhash_params = { 52 + .head_offset = offsetof(struct shm_rhash, linkage), 53 + .key_len = sizeof(u64), 54 + .key_offset = offsetof(struct shm_rhash, global_id), 55 + .automatic_shrinking = true, 56 + }; 57 + 58 + static struct tee_shm *optee_shm_from_ffa_handle(struct optee *optee, 59 + u64 global_id) 60 + { 61 + struct tee_shm *shm = NULL; 62 + struct shm_rhash *r; 63 + 64 + mutex_lock(&optee->ffa.mutex); 65 + r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id, 66 + shm_rhash_params); 67 + if (r) 68 + shm = r->shm; 69 + mutex_unlock(&optee->ffa.mutex); 70 + 71 + return shm; 72 + } 73 + 74 + static int optee_shm_add_ffa_handle(struct optee *optee, struct tee_shm *shm, 75 + u64 global_id) 76 + { 77 + struct shm_rhash *r; 78 + int rc; 79 + 80 + r = kmalloc(sizeof(*r), GFP_KERNEL); 81 + if (!r) 82 + return -ENOMEM; 83 + r->shm = shm; 84 + r->global_id = global_id; 85 + 86 + mutex_lock(&optee->ffa.mutex); 87 + rc = rhashtable_lookup_insert_fast(&optee->ffa.global_ids, &r->linkage, 88 + shm_rhash_params); 89 + mutex_unlock(&optee->ffa.mutex); 90 + 91 + if (rc) 92 + kfree(r); 93 + 94 + return rc; 95 + } 96 + 97 + static int optee_shm_rem_ffa_handle(struct optee *optee, u64 global_id) 98 + { 99 + struct shm_rhash *r; 100 + int rc = -ENOENT; 101 + 102 + mutex_lock(&optee->ffa.mutex); 103 + r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id, 104 + shm_rhash_params); 105 + if (r) 106 + rc = rhashtable_remove_fast(&optee->ffa.global_ids, 107 + &r->linkage, shm_rhash_params); 108 + mutex_unlock(&optee->ffa.mutex); 109 + 110 + if (!rc) 111 + kfree(r); 112 + 113 + return rc; 114 + } 115 + 116 + /* 117 + * 2. Convert between struct tee_param and struct optee_msg_param 118 + * 119 + * optee_ffa_from_msg_param() and optee_ffa_to_msg_param() are the main 120 + * functions. 121 + */ 122 + 123 + static void from_msg_param_ffa_mem(struct optee *optee, struct tee_param *p, 124 + u32 attr, const struct optee_msg_param *mp) 125 + { 126 + struct tee_shm *shm = NULL; 127 + u64 offs_high = 0; 128 + u64 offs_low = 0; 129 + 130 + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + 131 + attr - OPTEE_MSG_ATTR_TYPE_FMEM_INPUT; 132 + p->u.memref.size = mp->u.fmem.size; 133 + 134 + if (mp->u.fmem.global_id != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) 135 + shm = optee_shm_from_ffa_handle(optee, mp->u.fmem.global_id); 136 + p->u.memref.shm = shm; 137 + 138 + if (shm) { 139 + offs_low = mp->u.fmem.offs_low; 140 + offs_high = mp->u.fmem.offs_high; 141 + } 142 + p->u.memref.shm_offs = offs_low | offs_high << 32; 143 + } 144 + 145 + /** 146 + * optee_ffa_from_msg_param() - convert from OPTEE_MSG parameters to 147 + * struct tee_param 148 + * @optee: main service struct 149 + * @params: subsystem internal parameter representation 150 + * @num_params: number of elements in the parameter arrays 151 + * @msg_params: OPTEE_MSG parameters 152 + * 153 + * Returns 0 on success or <0 on failure 154 + */ 155 + static int optee_ffa_from_msg_param(struct optee *optee, 156 + struct tee_param *params, size_t num_params, 157 + const struct optee_msg_param *msg_params) 158 + { 159 + size_t n; 160 + 161 + for (n = 0; n < num_params; n++) { 162 + struct tee_param *p = params + n; 163 + const struct optee_msg_param *mp = msg_params + n; 164 + u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK; 165 + 166 + switch (attr) { 167 + case OPTEE_MSG_ATTR_TYPE_NONE: 168 + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; 169 + memset(&p->u, 0, sizeof(p->u)); 170 + break; 171 + case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT: 172 + case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: 173 + case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: 174 + optee_from_msg_param_value(p, attr, mp); 175 + break; 176 + case OPTEE_MSG_ATTR_TYPE_FMEM_INPUT: 177 + case OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT: 178 + case OPTEE_MSG_ATTR_TYPE_FMEM_INOUT: 179 + from_msg_param_ffa_mem(optee, p, attr, mp); 180 + break; 181 + default: 182 + return -EINVAL; 183 + } 184 + } 185 + 186 + return 0; 187 + } 188 + 189 + static int to_msg_param_ffa_mem(struct optee_msg_param *mp, 190 + const struct tee_param *p) 191 + { 192 + struct tee_shm *shm = p->u.memref.shm; 193 + 194 + mp->attr = OPTEE_MSG_ATTR_TYPE_FMEM_INPUT + p->attr - 195 + TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; 196 + 197 + if (shm) { 198 + u64 shm_offs = p->u.memref.shm_offs; 199 + 200 + mp->u.fmem.internal_offs = shm->offset; 201 + 202 + mp->u.fmem.offs_low = shm_offs; 203 + mp->u.fmem.offs_high = shm_offs >> 32; 204 + /* Check that the entire offset could be stored. */ 205 + if (mp->u.fmem.offs_high != shm_offs >> 32) 206 + return -EINVAL; 207 + 208 + mp->u.fmem.global_id = shm->sec_world_id; 209 + } else { 210 + memset(&mp->u, 0, sizeof(mp->u)); 211 + mp->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 212 + } 213 + mp->u.fmem.size = p->u.memref.size; 214 + 215 + return 0; 216 + } 217 + 218 + /** 219 + * optee_ffa_to_msg_param() - convert from struct tee_params to OPTEE_MSG 220 + * parameters 221 + * @optee: main service struct 222 + * @msg_params: OPTEE_MSG parameters 223 + * @num_params: number of elements in the parameter arrays 224 + * @params: subsystem itnernal parameter representation 225 + * Returns 0 on success or <0 on failure 226 + */ 227 + static int optee_ffa_to_msg_param(struct optee *optee, 228 + struct optee_msg_param *msg_params, 229 + size_t num_params, 230 + const struct tee_param *params) 231 + { 232 + size_t n; 233 + 234 + for (n = 0; n < num_params; n++) { 235 + const struct tee_param *p = params + n; 236 + struct optee_msg_param *mp = msg_params + n; 237 + 238 + switch (p->attr) { 239 + case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: 240 + mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; 241 + memset(&mp->u, 0, sizeof(mp->u)); 242 + break; 243 + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: 244 + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: 245 + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: 246 + optee_to_msg_param_value(mp, p); 247 + break; 248 + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: 249 + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: 250 + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: 251 + if (to_msg_param_ffa_mem(mp, p)) 252 + return -EINVAL; 253 + break; 254 + default: 255 + return -EINVAL; 256 + } 257 + } 258 + 259 + return 0; 260 + } 261 + 262 + /* 263 + * 3. Low level support functions to register shared memory in secure world 264 + * 265 + * Functions to register and unregister shared memory both for normal 266 + * clients and for tee-supplicant. 267 + */ 268 + 269 + static int optee_ffa_shm_register(struct tee_context *ctx, struct tee_shm *shm, 270 + struct page **pages, size_t num_pages, 271 + unsigned long start) 272 + { 273 + struct optee *optee = tee_get_drvdata(ctx->teedev); 274 + const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops; 275 + struct ffa_device *ffa_dev = optee->ffa.ffa_dev; 276 + struct ffa_mem_region_attributes mem_attr = { 277 + .receiver = ffa_dev->vm_id, 278 + .attrs = FFA_MEM_RW, 279 + }; 280 + struct ffa_mem_ops_args args = { 281 + .use_txbuf = true, 282 + .attrs = &mem_attr, 283 + .nattrs = 1, 284 + }; 285 + struct sg_table sgt; 286 + int rc; 287 + 288 + rc = optee_check_mem_type(start, num_pages); 289 + if (rc) 290 + return rc; 291 + 292 + rc = sg_alloc_table_from_pages(&sgt, pages, num_pages, 0, 293 + num_pages * PAGE_SIZE, GFP_KERNEL); 294 + if (rc) 295 + return rc; 296 + args.sg = sgt.sgl; 297 + rc = ffa_ops->memory_share(ffa_dev, &args); 298 + sg_free_table(&sgt); 299 + if (rc) 300 + return rc; 301 + 302 + rc = optee_shm_add_ffa_handle(optee, shm, args.g_handle); 303 + if (rc) { 304 + ffa_ops->memory_reclaim(args.g_handle, 0); 305 + return rc; 306 + } 307 + 308 + shm->sec_world_id = args.g_handle; 309 + 310 + return 0; 311 + } 312 + 313 + static int optee_ffa_shm_unregister(struct tee_context *ctx, 314 + struct tee_shm *shm) 315 + { 316 + struct optee *optee = tee_get_drvdata(ctx->teedev); 317 + const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops; 318 + struct ffa_device *ffa_dev = optee->ffa.ffa_dev; 319 + u64 global_handle = shm->sec_world_id; 320 + struct ffa_send_direct_data data = { 321 + .data0 = OPTEE_FFA_UNREGISTER_SHM, 322 + .data1 = (u32)global_handle, 323 + .data2 = (u32)(global_handle >> 32) 324 + }; 325 + int rc; 326 + 327 + optee_shm_rem_ffa_handle(optee, global_handle); 328 + shm->sec_world_id = 0; 329 + 330 + rc = ffa_ops->sync_send_receive(ffa_dev, &data); 331 + if (rc) 332 + pr_err("Unregister SHM id 0x%llx rc %d\n", global_handle, rc); 333 + 334 + rc = ffa_ops->memory_reclaim(global_handle, 0); 335 + if (rc) 336 + pr_err("mem_reclain: 0x%llx %d", global_handle, rc); 337 + 338 + return rc; 339 + } 340 + 341 + static int optee_ffa_shm_unregister_supp(struct tee_context *ctx, 342 + struct tee_shm *shm) 343 + { 344 + struct optee *optee = tee_get_drvdata(ctx->teedev); 345 + const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops; 346 + u64 global_handle = shm->sec_world_id; 347 + int rc; 348 + 349 + /* 350 + * We're skipping the OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM call 351 + * since this is OP-TEE freeing via RPC so it has already retired 352 + * this ID. 353 + */ 354 + 355 + optee_shm_rem_ffa_handle(optee, global_handle); 356 + rc = ffa_ops->memory_reclaim(global_handle, 0); 357 + if (rc) 358 + pr_err("mem_reclain: 0x%llx %d", global_handle, rc); 359 + 360 + shm->sec_world_id = 0; 361 + 362 + return rc; 363 + } 364 + 365 + /* 366 + * 4. Dynamic shared memory pool based on alloc_pages() 367 + * 368 + * Implements an OP-TEE specific shared memory pool. 369 + * The main function is optee_ffa_shm_pool_alloc_pages(). 370 + */ 371 + 372 + static int pool_ffa_op_alloc(struct tee_shm_pool_mgr *poolm, 373 + struct tee_shm *shm, size_t size) 374 + { 375 + return optee_pool_op_alloc_helper(poolm, shm, size, 376 + optee_ffa_shm_register); 377 + } 378 + 379 + static void pool_ffa_op_free(struct tee_shm_pool_mgr *poolm, 380 + struct tee_shm *shm) 381 + { 382 + optee_ffa_shm_unregister(shm->ctx, shm); 383 + free_pages((unsigned long)shm->kaddr, get_order(shm->size)); 384 + shm->kaddr = NULL; 385 + } 386 + 387 + static void pool_ffa_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) 388 + { 389 + kfree(poolm); 390 + } 391 + 392 + static const struct tee_shm_pool_mgr_ops pool_ffa_ops = { 393 + .alloc = pool_ffa_op_alloc, 394 + .free = pool_ffa_op_free, 395 + .destroy_poolmgr = pool_ffa_op_destroy_poolmgr, 396 + }; 397 + 398 + /** 399 + * optee_ffa_shm_pool_alloc_pages() - create page-based allocator pool 400 + * 401 + * This pool is used with OP-TEE over FF-A. In this case command buffers 402 + * and such are allocated from kernel's own memory. 403 + */ 404 + static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void) 405 + { 406 + struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); 407 + 408 + if (!mgr) 409 + return ERR_PTR(-ENOMEM); 410 + 411 + mgr->ops = &pool_ffa_ops; 412 + 413 + return mgr; 414 + } 415 + 416 + /* 417 + * 5. Do a normal scheduled call into secure world 418 + * 419 + * The function optee_ffa_do_call_with_arg() performs a normal scheduled 420 + * call into secure world. During this call may normal world request help 421 + * from normal world using RPCs, Remote Procedure Calls. This includes 422 + * delivery of non-secure interrupts to for instance allow rescheduling of 423 + * the current task. 424 + */ 425 + 426 + static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx, 427 + struct optee_msg_arg *arg) 428 + { 429 + struct tee_shm *shm; 430 + 431 + if (arg->num_params != 1 || 432 + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { 433 + arg->ret = TEEC_ERROR_BAD_PARAMETERS; 434 + return; 435 + } 436 + 437 + switch (arg->params[0].u.value.a) { 438 + case OPTEE_RPC_SHM_TYPE_APPL: 439 + shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b); 440 + break; 441 + case OPTEE_RPC_SHM_TYPE_KERNEL: 442 + shm = tee_shm_alloc(ctx, arg->params[0].u.value.b, 443 + TEE_SHM_MAPPED | TEE_SHM_PRIV); 444 + break; 445 + default: 446 + arg->ret = TEEC_ERROR_BAD_PARAMETERS; 447 + return; 448 + } 449 + 450 + if (IS_ERR(shm)) { 451 + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 452 + return; 453 + } 454 + 455 + arg->params[0] = (struct optee_msg_param){ 456 + .attr = OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT, 457 + .u.fmem.size = tee_shm_get_size(shm), 458 + .u.fmem.global_id = shm->sec_world_id, 459 + .u.fmem.internal_offs = shm->offset, 460 + }; 461 + 462 + arg->ret = TEEC_SUCCESS; 463 + } 464 + 465 + static void handle_ffa_rpc_func_cmd_shm_free(struct tee_context *ctx, 466 + struct optee *optee, 467 + struct optee_msg_arg *arg) 468 + { 469 + struct tee_shm *shm; 470 + 471 + if (arg->num_params != 1 || 472 + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) 473 + goto err_bad_param; 474 + 475 + shm = optee_shm_from_ffa_handle(optee, arg->params[0].u.value.b); 476 + if (!shm) 477 + goto err_bad_param; 478 + switch (arg->params[0].u.value.a) { 479 + case OPTEE_RPC_SHM_TYPE_APPL: 480 + optee_rpc_cmd_free_suppl(ctx, shm); 481 + break; 482 + case OPTEE_RPC_SHM_TYPE_KERNEL: 483 + tee_shm_free(shm); 484 + break; 485 + default: 486 + goto err_bad_param; 487 + } 488 + arg->ret = TEEC_SUCCESS; 489 + return; 490 + 491 + err_bad_param: 492 + arg->ret = TEEC_ERROR_BAD_PARAMETERS; 493 + } 494 + 495 + static void handle_ffa_rpc_func_cmd(struct tee_context *ctx, 496 + struct optee_msg_arg *arg) 497 + { 498 + struct optee *optee = tee_get_drvdata(ctx->teedev); 499 + 500 + arg->ret_origin = TEEC_ORIGIN_COMMS; 501 + switch (arg->cmd) { 502 + case OPTEE_RPC_CMD_SHM_ALLOC: 503 + handle_ffa_rpc_func_cmd_shm_alloc(ctx, arg); 504 + break; 505 + case OPTEE_RPC_CMD_SHM_FREE: 506 + handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg); 507 + break; 508 + default: 509 + optee_rpc_cmd(ctx, optee, arg); 510 + } 511 + } 512 + 513 + static void optee_handle_ffa_rpc(struct tee_context *ctx, u32 cmd, 514 + struct optee_msg_arg *arg) 515 + { 516 + switch (cmd) { 517 + case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD: 518 + handle_ffa_rpc_func_cmd(ctx, arg); 519 + break; 520 + case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT: 521 + /* Interrupt delivered by now */ 522 + break; 523 + default: 524 + pr_warn("Unknown RPC func 0x%x\n", cmd); 525 + break; 526 + } 527 + } 528 + 529 + static int optee_ffa_yielding_call(struct tee_context *ctx, 530 + struct ffa_send_direct_data *data, 531 + struct optee_msg_arg *rpc_arg) 532 + { 533 + struct optee *optee = tee_get_drvdata(ctx->teedev); 534 + const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops; 535 + struct ffa_device *ffa_dev = optee->ffa.ffa_dev; 536 + struct optee_call_waiter w; 537 + u32 cmd = data->data0; 538 + u32 w4 = data->data1; 539 + u32 w5 = data->data2; 540 + u32 w6 = data->data3; 541 + int rc; 542 + 543 + /* Initialize waiter */ 544 + optee_cq_wait_init(&optee->call_queue, &w); 545 + while (true) { 546 + rc = ffa_ops->sync_send_receive(ffa_dev, data); 547 + if (rc) 548 + goto done; 549 + 550 + switch ((int)data->data0) { 551 + case TEEC_SUCCESS: 552 + break; 553 + case TEEC_ERROR_BUSY: 554 + if (cmd == OPTEE_FFA_YIELDING_CALL_RESUME) { 555 + rc = -EIO; 556 + goto done; 557 + } 558 + 559 + /* 560 + * Out of threads in secure world, wait for a thread 561 + * become available. 562 + */ 563 + optee_cq_wait_for_completion(&optee->call_queue, &w); 564 + data->data0 = cmd; 565 + data->data1 = w4; 566 + data->data2 = w5; 567 + data->data3 = w6; 568 + continue; 569 + default: 570 + rc = -EIO; 571 + goto done; 572 + } 573 + 574 + if (data->data1 == OPTEE_FFA_YIELDING_CALL_RETURN_DONE) 575 + goto done; 576 + 577 + /* 578 + * OP-TEE has returned with a RPC request. 579 + * 580 + * Note that data->data4 (passed in register w7) is already 581 + * filled in by ffa_ops->sync_send_receive() returning 582 + * above. 583 + */ 584 + cond_resched(); 585 + optee_handle_ffa_rpc(ctx, data->data1, rpc_arg); 586 + cmd = OPTEE_FFA_YIELDING_CALL_RESUME; 587 + data->data0 = cmd; 588 + data->data1 = 0; 589 + data->data2 = 0; 590 + data->data3 = 0; 591 + } 592 + done: 593 + /* 594 + * We're done with our thread in secure world, if there's any 595 + * thread waiters wake up one. 596 + */ 597 + optee_cq_wait_final(&optee->call_queue, &w); 598 + 599 + return rc; 600 + } 601 + 602 + /** 603 + * optee_ffa_do_call_with_arg() - Do a FF-A call to enter OP-TEE in secure world 604 + * @ctx: calling context 605 + * @shm: shared memory holding the message to pass to secure world 606 + * 607 + * Does a FF-A call to OP-TEE in secure world and handles eventual resulting 608 + * Remote Procedure Calls (RPC) from OP-TEE. 609 + * 610 + * Returns return code from FF-A, 0 is OK 611 + */ 612 + 613 + static int optee_ffa_do_call_with_arg(struct tee_context *ctx, 614 + struct tee_shm *shm) 615 + { 616 + struct ffa_send_direct_data data = { 617 + .data0 = OPTEE_FFA_YIELDING_CALL_WITH_ARG, 618 + .data1 = (u32)shm->sec_world_id, 619 + .data2 = (u32)(shm->sec_world_id >> 32), 620 + .data3 = shm->offset, 621 + }; 622 + struct optee_msg_arg *arg = tee_shm_get_va(shm, 0); 623 + unsigned int rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params); 624 + struct optee_msg_arg *rpc_arg = tee_shm_get_va(shm, rpc_arg_offs); 625 + 626 + return optee_ffa_yielding_call(ctx, &data, rpc_arg); 627 + } 628 + 629 + /* 630 + * 6. Driver initialization 631 + * 632 + * During driver inititialization is the OP-TEE Secure Partition is probed 633 + * to find out which features it supports so the driver can be initialized 634 + * with a matching configuration. 635 + */ 636 + 637 + static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev, 638 + const struct ffa_dev_ops *ops) 639 + { 640 + struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION }; 641 + int rc; 642 + 643 + ops->mode_32bit_set(ffa_dev); 644 + 645 + rc = ops->sync_send_receive(ffa_dev, &data); 646 + if (rc) { 647 + pr_err("Unexpected error %d\n", rc); 648 + return false; 649 + } 650 + if (data.data0 != OPTEE_FFA_VERSION_MAJOR || 651 + data.data1 < OPTEE_FFA_VERSION_MINOR) { 652 + pr_err("Incompatible OP-TEE API version %lu.%lu", 653 + data.data0, data.data1); 654 + return false; 655 + } 656 + 657 + data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION }; 658 + rc = ops->sync_send_receive(ffa_dev, &data); 659 + if (rc) { 660 + pr_err("Unexpected error %d\n", rc); 661 + return false; 662 + } 663 + if (data.data2) 664 + pr_info("revision %lu.%lu (%08lx)", 665 + data.data0, data.data1, data.data2); 666 + else 667 + pr_info("revision %lu.%lu", data.data0, data.data1); 668 + 669 + return true; 670 + } 671 + 672 + static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev, 673 + const struct ffa_dev_ops *ops, 674 + unsigned int *rpc_arg_count) 675 + { 676 + struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES }; 677 + int rc; 678 + 679 + rc = ops->sync_send_receive(ffa_dev, &data); 680 + if (rc) { 681 + pr_err("Unexpected error %d", rc); 682 + return false; 683 + } 684 + if (data.data0) { 685 + pr_err("Unexpected exchange error %lu", data.data0); 686 + return false; 687 + } 688 + 689 + *rpc_arg_count = (u8)data.data1; 690 + 691 + return true; 692 + } 693 + 694 + static struct tee_shm_pool *optee_ffa_config_dyn_shm(void) 695 + { 696 + struct tee_shm_pool_mgr *priv_mgr; 697 + struct tee_shm_pool_mgr *dmabuf_mgr; 698 + void *rc; 699 + 700 + rc = optee_ffa_shm_pool_alloc_pages(); 701 + if (IS_ERR(rc)) 702 + return rc; 703 + priv_mgr = rc; 704 + 705 + rc = optee_ffa_shm_pool_alloc_pages(); 706 + if (IS_ERR(rc)) { 707 + tee_shm_pool_mgr_destroy(priv_mgr); 708 + return rc; 709 + } 710 + dmabuf_mgr = rc; 711 + 712 + rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); 713 + if (IS_ERR(rc)) { 714 + tee_shm_pool_mgr_destroy(priv_mgr); 715 + tee_shm_pool_mgr_destroy(dmabuf_mgr); 716 + } 717 + 718 + return rc; 719 + } 720 + 721 + static void optee_ffa_get_version(struct tee_device *teedev, 722 + struct tee_ioctl_version_data *vers) 723 + { 724 + struct tee_ioctl_version_data v = { 725 + .impl_id = TEE_IMPL_ID_OPTEE, 726 + .impl_caps = TEE_OPTEE_CAP_TZ, 727 + .gen_caps = TEE_GEN_CAP_GP | TEE_GEN_CAP_REG_MEM | 728 + TEE_GEN_CAP_MEMREF_NULL, 729 + }; 730 + 731 + *vers = v; 732 + } 733 + 734 + static int optee_ffa_open(struct tee_context *ctx) 735 + { 736 + return optee_open(ctx, true); 737 + } 738 + 739 + static const struct tee_driver_ops optee_ffa_clnt_ops = { 740 + .get_version = optee_ffa_get_version, 741 + .open = optee_ffa_open, 742 + .release = optee_release, 743 + .open_session = optee_open_session, 744 + .close_session = optee_close_session, 745 + .invoke_func = optee_invoke_func, 746 + .cancel_req = optee_cancel_req, 747 + .shm_register = optee_ffa_shm_register, 748 + .shm_unregister = optee_ffa_shm_unregister, 749 + }; 750 + 751 + static const struct tee_desc optee_ffa_clnt_desc = { 752 + .name = DRIVER_NAME "-ffa-clnt", 753 + .ops = &optee_ffa_clnt_ops, 754 + .owner = THIS_MODULE, 755 + }; 756 + 757 + static const struct tee_driver_ops optee_ffa_supp_ops = { 758 + .get_version = optee_ffa_get_version, 759 + .open = optee_ffa_open, 760 + .release = optee_release_supp, 761 + .supp_recv = optee_supp_recv, 762 + .supp_send = optee_supp_send, 763 + .shm_register = optee_ffa_shm_register, /* same as for clnt ops */ 764 + .shm_unregister = optee_ffa_shm_unregister_supp, 765 + }; 766 + 767 + static const struct tee_desc optee_ffa_supp_desc = { 768 + .name = DRIVER_NAME "-ffa-supp", 769 + .ops = &optee_ffa_supp_ops, 770 + .owner = THIS_MODULE, 771 + .flags = TEE_DESC_PRIVILEGED, 772 + }; 773 + 774 + static const struct optee_ops optee_ffa_ops = { 775 + .do_call_with_arg = optee_ffa_do_call_with_arg, 776 + .to_msg_param = optee_ffa_to_msg_param, 777 + .from_msg_param = optee_ffa_from_msg_param, 778 + }; 779 + 780 + static void optee_ffa_remove(struct ffa_device *ffa_dev) 781 + { 782 + struct optee *optee = ffa_dev->dev.driver_data; 783 + 784 + optee_remove_common(optee); 785 + 786 + mutex_destroy(&optee->ffa.mutex); 787 + rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL); 788 + 789 + kfree(optee); 790 + } 791 + 792 + static int optee_ffa_probe(struct ffa_device *ffa_dev) 793 + { 794 + const struct ffa_dev_ops *ffa_ops; 795 + unsigned int rpc_arg_count; 796 + struct tee_device *teedev; 797 + struct optee *optee; 798 + int rc; 799 + 800 + ffa_ops = ffa_dev_ops_get(ffa_dev); 801 + if (!ffa_ops) { 802 + pr_warn("failed \"method\" init: ffa\n"); 803 + return -ENOENT; 804 + } 805 + 806 + if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops)) 807 + return -EINVAL; 808 + 809 + if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &rpc_arg_count)) 810 + return -EINVAL; 811 + 812 + optee = kzalloc(sizeof(*optee), GFP_KERNEL); 813 + if (!optee) { 814 + rc = -ENOMEM; 815 + goto err; 816 + } 817 + optee->pool = optee_ffa_config_dyn_shm(); 818 + if (IS_ERR(optee->pool)) { 819 + rc = PTR_ERR(optee->pool); 820 + optee->pool = NULL; 821 + goto err; 822 + } 823 + 824 + optee->ops = &optee_ffa_ops; 825 + optee->ffa.ffa_dev = ffa_dev; 826 + optee->ffa.ffa_ops = ffa_ops; 827 + optee->rpc_arg_count = rpc_arg_count; 828 + 829 + teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool, 830 + optee); 831 + if (IS_ERR(teedev)) { 832 + rc = PTR_ERR(teedev); 833 + goto err; 834 + } 835 + optee->teedev = teedev; 836 + 837 + teedev = tee_device_alloc(&optee_ffa_supp_desc, NULL, optee->pool, 838 + optee); 839 + if (IS_ERR(teedev)) { 840 + rc = PTR_ERR(teedev); 841 + goto err; 842 + } 843 + optee->supp_teedev = teedev; 844 + 845 + rc = tee_device_register(optee->teedev); 846 + if (rc) 847 + goto err; 848 + 849 + rc = tee_device_register(optee->supp_teedev); 850 + if (rc) 851 + goto err; 852 + 853 + rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params); 854 + if (rc) 855 + goto err; 856 + mutex_init(&optee->ffa.mutex); 857 + mutex_init(&optee->call_queue.mutex); 858 + INIT_LIST_HEAD(&optee->call_queue.waiters); 859 + optee_wait_queue_init(&optee->wait_queue); 860 + optee_supp_init(&optee->supp); 861 + ffa_dev_set_drvdata(ffa_dev, optee); 862 + 863 + rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); 864 + if (rc) { 865 + optee_ffa_remove(ffa_dev); 866 + return rc; 867 + } 868 + 869 + pr_info("initialized driver\n"); 870 + return 0; 871 + err: 872 + /* 873 + * tee_device_unregister() is safe to call even if the 874 + * devices hasn't been registered with 875 + * tee_device_register() yet. 876 + */ 877 + tee_device_unregister(optee->supp_teedev); 878 + tee_device_unregister(optee->teedev); 879 + if (optee->pool) 880 + tee_shm_pool_free(optee->pool); 881 + kfree(optee); 882 + return rc; 883 + } 884 + 885 + static const struct ffa_device_id optee_ffa_device_id[] = { 886 + /* 486178e0-e7f8-11e3-bc5e0002a5d5c51b */ 887 + { UUID_INIT(0x486178e0, 0xe7f8, 0x11e3, 888 + 0xbc, 0x5e, 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b) }, 889 + {} 890 + }; 891 + 892 + static struct ffa_driver optee_ffa_driver = { 893 + .name = "optee", 894 + .probe = optee_ffa_probe, 895 + .remove = optee_ffa_remove, 896 + .id_table = optee_ffa_device_id, 897 + }; 898 + 899 + int optee_ffa_abi_register(void) 900 + { 901 + if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)) 902 + return ffa_register(&optee_ffa_driver); 903 + else 904 + return -EOPNOTSUPP; 905 + } 906 + 907 + void optee_ffa_abi_unregister(void) 908 + { 909 + if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)) 910 + ffa_unregister(&optee_ffa_driver); 911 + }
+153
drivers/tee/optee/optee_ffa.h
··· 1 + /* SPDX-License-Identifier: BSD-2-Clause */ 2 + /* 3 + * Copyright (c) 2019-2021, Linaro Limited 4 + */ 5 + 6 + /* 7 + * This file is exported by OP-TEE and is kept in sync between secure world 8 + * and normal world drivers. We're using ARM FF-A 1.0 specification. 9 + */ 10 + 11 + #ifndef __OPTEE_FFA_H 12 + #define __OPTEE_FFA_H 13 + 14 + #include <linux/arm_ffa.h> 15 + 16 + /* 17 + * Normal world sends requests with FFA_MSG_SEND_DIRECT_REQ and 18 + * responses are returned with FFA_MSG_SEND_DIRECT_RESP for normal 19 + * messages. 20 + * 21 + * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP 22 + * are using the AArch32 SMC calling convention with register usage as 23 + * defined in FF-A specification: 24 + * w0: Function ID (0x8400006F or 0x84000070) 25 + * w1: Source/Destination IDs 26 + * w2: Reserved (MBZ) 27 + * w3-w7: Implementation defined, free to be used below 28 + */ 29 + 30 + #define OPTEE_FFA_VERSION_MAJOR 1 31 + #define OPTEE_FFA_VERSION_MINOR 0 32 + 33 + #define OPTEE_FFA_BLOCKING_CALL(id) (id) 34 + #define OPTEE_FFA_YIELDING_CALL_BIT 31 35 + #define OPTEE_FFA_YIELDING_CALL(id) ((id) | BIT(OPTEE_FFA_YIELDING_CALL_BIT)) 36 + 37 + /* 38 + * Returns the API version implemented, currently follows the FF-A version. 39 + * Call register usage: 40 + * w3: Service ID, OPTEE_FFA_GET_API_VERSION 41 + * w4-w7: Not used (MBZ) 42 + * 43 + * Return register usage: 44 + * w3: OPTEE_FFA_VERSION_MAJOR 45 + * w4: OPTEE_FFA_VERSION_MINOR 46 + * w5-w7: Not used (MBZ) 47 + */ 48 + #define OPTEE_FFA_GET_API_VERSION OPTEE_FFA_BLOCKING_CALL(0) 49 + 50 + /* 51 + * Returns the revision of OP-TEE. 52 + * 53 + * Used by non-secure world to figure out which version of the Trusted OS 54 + * is installed. Note that the returned revision is the revision of the 55 + * Trusted OS, not of the API. 56 + * 57 + * Call register usage: 58 + * w3: Service ID, OPTEE_FFA_GET_OS_VERSION 59 + * w4-w7: Unused (MBZ) 60 + * 61 + * Return register usage: 62 + * w3: CFG_OPTEE_REVISION_MAJOR 63 + * w4: CFG_OPTEE_REVISION_MINOR 64 + * w5: TEE_IMPL_GIT_SHA1 (or zero if not supported) 65 + */ 66 + #define OPTEE_FFA_GET_OS_VERSION OPTEE_FFA_BLOCKING_CALL(1) 67 + 68 + /* 69 + * Exchange capabilities between normal world and secure world. 70 + * 71 + * Currently there are no defined capabilities. When features are added new 72 + * capabilities may be added. 73 + * 74 + * Call register usage: 75 + * w3: Service ID, OPTEE_FFA_EXCHANGE_CAPABILITIES 76 + * w4-w7: Note used (MBZ) 77 + * 78 + * Return register usage: 79 + * w3: Error code, 0 on success 80 + * w4: Bit[7:0]: Number of parameters needed for RPC to be supplied 81 + * as the second MSG arg struct for 82 + * OPTEE_FFA_YIELDING_CALL_WITH_ARG. 83 + * Bit[31:8]: Reserved (MBZ) 84 + * w5-w7: Note used (MBZ) 85 + */ 86 + #define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2) 87 + 88 + /* 89 + * Unregister shared memory 90 + * 91 + * Call register usage: 92 + * w3: Service ID, OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM 93 + * w4: Shared memory handle, lower bits 94 + * w5: Shared memory handle, higher bits 95 + * w6-w7: Not used (MBZ) 96 + * 97 + * Return register usage: 98 + * w3: Error code, 0 on success 99 + * w4-w7: Note used (MBZ) 100 + */ 101 + #define OPTEE_FFA_UNREGISTER_SHM OPTEE_FFA_BLOCKING_CALL(3) 102 + 103 + /* 104 + * Call with struct optee_msg_arg as argument in the supplied shared memory 105 + * with a zero internal offset and normal cached memory attributes. 106 + * Register usage: 107 + * w3: Service ID, OPTEE_FFA_YIELDING_CALL_WITH_ARG 108 + * w4: Lower 32 bits of a 64-bit Shared memory handle 109 + * w5: Upper 32 bits of a 64-bit Shared memory handle 110 + * w6: Offset into shared memory pointing to a struct optee_msg_arg 111 + * right after the parameters of this struct (at offset 112 + * OPTEE_MSG_GET_ARG_SIZE(num_params) follows a struct optee_msg_arg 113 + * for RPC, this struct has reserved space for the number of RPC 114 + * parameters as returned by OPTEE_FFA_EXCHANGE_CAPABILITIES. 115 + * w7: Not used (MBZ) 116 + * Resume from RPC. Register usage: 117 + * w3: Service ID, OPTEE_FFA_YIELDING_CALL_RESUME 118 + * w4-w6: Not used (MBZ) 119 + * w7: Resume info 120 + * 121 + * Normal return (yielding call is completed). Register usage: 122 + * w3: Error code, 0 on success 123 + * w4: OPTEE_FFA_YIELDING_CALL_RETURN_DONE 124 + * w5-w7: Not used (MBZ) 125 + * 126 + * RPC interrupt return (RPC from secure world). Register usage: 127 + * w3: Error code == 0 128 + * w4: Any defined RPC code but OPTEE_FFA_YIELDING_CALL_RETURN_DONE 129 + * w5-w6: Not used (MBZ) 130 + * w7: Resume info 131 + * 132 + * Possible error codes in register w3: 133 + * 0: Success 134 + * FFA_DENIED: w4 isn't one of OPTEE_FFA_YIELDING_CALL_START 135 + * OPTEE_FFA_YIELDING_CALL_RESUME 136 + * 137 + * Possible error codes for OPTEE_FFA_YIELDING_CALL_START, 138 + * FFA_BUSY: Number of OP-TEE OS threads exceeded, 139 + * try again later 140 + * FFA_DENIED: RPC shared memory object not found 141 + * FFA_INVALID_PARAMETER: Bad shared memory handle or offset into the memory 142 + * 143 + * Possible error codes for OPTEE_FFA_YIELDING_CALL_RESUME 144 + * FFA_INVALID_PARAMETER: Bad resume info 145 + */ 146 + #define OPTEE_FFA_YIELDING_CALL_WITH_ARG OPTEE_FFA_YIELDING_CALL(0) 147 + #define OPTEE_FFA_YIELDING_CALL_RESUME OPTEE_FFA_YIELDING_CALL(1) 148 + 149 + #define OPTEE_FFA_YIELDING_CALL_RETURN_DONE 0 150 + #define OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD 1 151 + #define OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT 2 152 + 153 + #endif /*__OPTEE_FFA_H*/
+26 -1
drivers/tee/optee/optee_msg.h
··· 28 28 #define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5 29 29 #define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6 30 30 #define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7 31 + #define OPTEE_MSG_ATTR_TYPE_FMEM_INPUT OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 32 + #define OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 33 + #define OPTEE_MSG_ATTR_TYPE_FMEM_INOUT OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 31 34 #define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9 32 35 #define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa 33 36 #define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb ··· 99 96 */ 100 97 #define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096 101 98 99 + #define OPTEE_MSG_FMEM_INVALID_GLOBAL_ID 0xffffffffffffffff 100 + 102 101 /** 103 102 * struct optee_msg_param_tmem - temporary memory reference parameter 104 103 * @buf_ptr: Address of the buffer ··· 133 128 }; 134 129 135 130 /** 131 + * struct optee_msg_param_fmem - ffa memory reference parameter 132 + * @offs_lower: Lower bits of offset into shared memory reference 133 + * @offs_upper: Upper bits of offset into shared memory reference 134 + * @internal_offs: Internal offset into the first page of shared memory 135 + * reference 136 + * @size: Size of the buffer 137 + * @global_id: Global identifier of Shared memory 138 + */ 139 + struct optee_msg_param_fmem { 140 + u32 offs_low; 141 + u16 offs_high; 142 + u16 internal_offs; 143 + u64 size; 144 + u64 global_id; 145 + }; 146 + 147 + /** 136 148 * struct optee_msg_param_value - opaque value parameter 137 149 * 138 150 * Value parameters are passed unchecked between normal and secure world. ··· 165 143 * @attr: attributes 166 144 * @tmem: parameter by temporary memory reference 167 145 * @rmem: parameter by registered memory reference 146 + * @fmem: parameter by ffa registered memory reference 168 147 * @value: parameter by opaque value 169 148 * @octets: parameter by octet string 170 149 * 171 150 * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in 172 151 * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets, 173 152 * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and 174 - * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem, 153 + * OPTEE_MSG_ATTR_TYPE_RMEM_* or the alias PTEE_MSG_ATTR_TYPE_FMEM_* indicates 154 + * @rmem or @fmem depending on the conduit. 175 155 * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used. 176 156 */ 177 157 struct optee_msg_param { ··· 181 157 union { 182 158 struct optee_msg_param_tmem tmem; 183 159 struct optee_msg_param_rmem rmem; 160 + struct optee_msg_param_fmem fmem; 184 161 struct optee_msg_param_value value; 185 162 u8 octets[24]; 186 163 } u;
+123 -37
drivers/tee/optee/optee_private.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 - * Copyright (c) 2015, Linaro Limited 3 + * Copyright (c) 2015-2021, Linaro Limited 4 4 */ 5 5 6 6 #ifndef OPTEE_PRIVATE_H 7 7 #define OPTEE_PRIVATE_H 8 8 9 9 #include <linux/arm-smccc.h> 10 + #include <linux/rhashtable.h> 10 11 #include <linux/semaphore.h> 11 12 #include <linux/tee_drv.h> 12 13 #include <linux/types.h> 13 14 #include "optee_msg.h" 15 + 16 + #define DRIVER_NAME "optee" 14 17 15 18 #define OPTEE_MAX_ARG_SIZE 1024 16 19 ··· 23 20 #define TEEC_ERROR_NOT_SUPPORTED 0xFFFF000A 24 21 #define TEEC_ERROR_COMMUNICATION 0xFFFF000E 25 22 #define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C 23 + #define TEEC_ERROR_BUSY 0xFFFF000D 26 24 #define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010 27 25 28 26 #define TEEC_ORIGIN_COMMS 0x00000002 ··· 32 28 unsigned long, unsigned long, unsigned long, 33 29 unsigned long, unsigned long, 34 30 struct arm_smccc_res *); 31 + 32 + struct optee_call_waiter { 33 + struct list_head list_node; 34 + struct completion c; 35 + }; 35 36 36 37 struct optee_call_queue { 37 38 /* Serializes access to this struct */ ··· 75 66 struct completion reqs_c; 76 67 }; 77 68 69 + struct optee_smc { 70 + optee_invoke_fn *invoke_fn; 71 + void *memremaped_shm; 72 + u32 sec_caps; 73 + }; 74 + 75 + /** 76 + * struct optee_ffa_data - FFA communication struct 77 + * @ffa_dev FFA device, contains the destination id, the id of 78 + * OP-TEE in secure world 79 + * @ffa_ops FFA operations 80 + * @mutex Serializes access to @global_ids 81 + * @global_ids FF-A shared memory global handle translation 82 + */ 83 + struct optee_ffa { 84 + struct ffa_device *ffa_dev; 85 + const struct ffa_dev_ops *ffa_ops; 86 + /* Serializes access to @global_ids */ 87 + struct mutex mutex; 88 + struct rhashtable global_ids; 89 + }; 90 + 91 + struct optee; 92 + 93 + /** 94 + * struct optee_ops - OP-TEE driver internal operations 95 + * @do_call_with_arg: enters OP-TEE in secure world 96 + * @to_msg_param: converts from struct tee_param to OPTEE_MSG parameters 97 + * @from_msg_param: converts from OPTEE_MSG parameters to struct tee_param 98 + * 99 + * These OPs are only supposed to be used internally in the OP-TEE driver 100 + * as a way of abstracting the different methogs of entering OP-TEE in 101 + * secure world. 102 + */ 103 + struct optee_ops { 104 + int (*do_call_with_arg)(struct tee_context *ctx, 105 + struct tee_shm *shm_arg); 106 + int (*to_msg_param)(struct optee *optee, 107 + struct optee_msg_param *msg_params, 108 + size_t num_params, const struct tee_param *params); 109 + int (*from_msg_param)(struct optee *optee, struct tee_param *params, 110 + size_t num_params, 111 + const struct optee_msg_param *msg_params); 112 + }; 113 + 78 114 /** 79 115 * struct optee - main service struct 80 116 * @supp_teedev: supplicant device 117 + * @ops: internal callbacks for different ways to reach secure 118 + * world 81 119 * @teedev: client device 82 - * @invoke_fn: function to issue smc or hvc 120 + * @smc: specific to SMC ABI 121 + * @ffa: specific to FF-A ABI 83 122 * @call_queue: queue of threads waiting to call @invoke_fn 84 123 * @wait_queue: queue of threads from secure world waiting for a 85 124 * secure world sync object 86 125 * @supp: supplicant synchronization struct for RPC to supplicant 87 126 * @pool: shared memory pool 88 - * @memremaped_shm virtual address of memory in shared memory pool 89 - * @sec_caps: secure world capabilities defined by 90 - * OPTEE_SMC_SEC_CAP_* in optee_smc.h 127 + * @rpc_arg_count: If > 0 number of RPC parameters to make room for 91 128 * @scan_bus_done flag if device registation was already done. 92 129 * @scan_bus_wq workqueue to scan optee bus and register optee drivers 93 130 * @scan_bus_work workq to scan optee bus and register optee drivers ··· 141 86 struct optee { 142 87 struct tee_device *supp_teedev; 143 88 struct tee_device *teedev; 144 - optee_invoke_fn *invoke_fn; 89 + const struct optee_ops *ops; 90 + union { 91 + struct optee_smc smc; 92 + struct optee_ffa ffa; 93 + }; 145 94 struct optee_call_queue call_queue; 146 95 struct optee_wait_queue wait_queue; 147 96 struct optee_supp supp; 148 97 struct tee_shm_pool *pool; 149 - void *memremaped_shm; 150 - u32 sec_caps; 98 + unsigned int rpc_arg_count; 151 99 bool scan_bus_done; 152 100 struct workqueue_struct *scan_bus_wq; 153 101 struct work_struct scan_bus_work; ··· 185 127 size_t num_entries; 186 128 }; 187 129 188 - void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, 189 - struct optee_call_ctx *call_ctx); 190 - void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx); 191 - 192 130 void optee_wait_queue_init(struct optee_wait_queue *wq); 193 131 void optee_wait_queue_exit(struct optee_wait_queue *wq); 194 132 ··· 202 148 int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, 203 149 struct tee_param *param); 204 150 205 - u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg); 206 151 int optee_open_session(struct tee_context *ctx, 207 152 struct tee_ioctl_open_session_arg *arg, 208 153 struct tee_param *param); 154 + int optee_close_session_helper(struct tee_context *ctx, u32 session); 209 155 int optee_close_session(struct tee_context *ctx, u32 session); 210 156 int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, 211 157 struct tee_param *param); 212 158 int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); 213 159 214 - void optee_enable_shm_cache(struct optee *optee); 215 - void optee_disable_shm_cache(struct optee *optee); 216 - void optee_disable_unmapped_shm_cache(struct optee *optee); 217 - 218 - int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, 219 - struct page **pages, size_t num_pages, 220 - unsigned long start); 221 - int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm); 222 - 223 - int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, 224 - struct page **pages, size_t num_pages, 225 - unsigned long start); 226 - int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm); 227 - 228 - int optee_from_msg_param(struct tee_param *params, size_t num_params, 229 - const struct optee_msg_param *msg_params); 230 - int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, 231 - const struct tee_param *params); 232 - 233 - u64 *optee_allocate_pages_list(size_t num_entries); 234 - void optee_free_pages_list(void *array, size_t num_entries); 235 - void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, 236 - size_t page_offset); 237 - 238 160 #define PTA_CMD_GET_DEVICES 0x0 239 161 #define PTA_CMD_GET_DEVICES_SUPP 0x1 240 162 int optee_enumerate_devices(u32 func); 163 + void optee_unregister_devices(void); 164 + 165 + int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm, 166 + struct tee_shm *shm, size_t size, 167 + int (*shm_register)(struct tee_context *ctx, 168 + struct tee_shm *shm, 169 + struct page **pages, 170 + size_t num_pages, 171 + unsigned long start)); 172 + 173 + 174 + void optee_remove_common(struct optee *optee); 175 + int optee_open(struct tee_context *ctx, bool cap_memref_null); 176 + void optee_release(struct tee_context *ctx); 177 + void optee_release_supp(struct tee_context *ctx); 178 + 179 + static inline void optee_from_msg_param_value(struct tee_param *p, u32 attr, 180 + const struct optee_msg_param *mp) 181 + { 182 + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT + 183 + attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 184 + p->u.value.a = mp->u.value.a; 185 + p->u.value.b = mp->u.value.b; 186 + p->u.value.c = mp->u.value.c; 187 + } 188 + 189 + static inline void optee_to_msg_param_value(struct optee_msg_param *mp, 190 + const struct tee_param *p) 191 + { 192 + mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr - 193 + TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; 194 + mp->u.value.a = p->u.value.a; 195 + mp->u.value.b = p->u.value.b; 196 + mp->u.value.c = p->u.value.c; 197 + } 198 + 199 + void optee_cq_wait_init(struct optee_call_queue *cq, 200 + struct optee_call_waiter *w); 201 + void optee_cq_wait_for_completion(struct optee_call_queue *cq, 202 + struct optee_call_waiter *w); 203 + void optee_cq_wait_final(struct optee_call_queue *cq, 204 + struct optee_call_waiter *w); 205 + int optee_check_mem_type(unsigned long start, size_t num_pages); 206 + struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params, 207 + struct optee_msg_arg **msg_arg); 208 + 209 + struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz); 210 + void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm); 211 + void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee, 212 + struct optee_msg_arg *arg); 241 213 242 214 /* 243 215 * Small helpers ··· 279 199 *reg0 = val >> 32; 280 200 *reg1 = val; 281 201 } 202 + 203 + /* Registration of the ABIs */ 204 + int optee_smc_abi_register(void); 205 + void optee_smc_abi_unregister(void); 206 + int optee_ffa_abi_register(void); 207 + void optee_ffa_abi_unregister(void); 282 208 283 209 #endif /*OPTEE_PRIVATE_H*/
+16 -221
drivers/tee/optee/rpc.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (c) 2015-2016, Linaro Limited 3 + * Copyright (c) 2015-2021, Linaro Limited 4 4 */ 5 5 6 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 7 8 8 #include <linux/delay.h> 9 - #include <linux/device.h> 10 9 #include <linux/i2c.h> 11 10 #include <linux/slab.h> 12 11 #include <linux/tee_drv.h> 13 12 #include "optee_private.h" 14 - #include "optee_smc.h" 15 13 #include "optee_rpc_cmd.h" 16 14 17 15 struct wq_entry { ··· 53 55 static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx, 54 56 struct optee_msg_arg *arg) 55 57 { 58 + struct optee *optee = tee_get_drvdata(ctx->teedev); 56 59 struct tee_param *params; 57 60 struct i2c_adapter *adapter; 58 61 struct i2c_msg msg = { }; ··· 78 79 return; 79 80 } 80 81 81 - if (optee_from_msg_param(params, arg->num_params, arg->params)) 82 + if (optee->ops->from_msg_param(optee, params, arg->num_params, 83 + arg->params)) 82 84 goto bad; 83 85 84 86 for (i = 0; i < arg->num_params; i++) { ··· 122 122 arg->ret = TEEC_ERROR_COMMUNICATION; 123 123 } else { 124 124 params[3].u.value.a = msg.len; 125 - if (optee_to_msg_param(arg->params, arg->num_params, params)) 125 + if (optee->ops->to_msg_param(optee, arg->params, 126 + arg->num_params, params)) 126 127 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 127 128 else 128 129 arg->ret = TEEC_SUCCESS; ··· 235 234 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 236 235 } 237 236 238 - static void handle_rpc_supp_cmd(struct tee_context *ctx, 237 + static void handle_rpc_supp_cmd(struct tee_context *ctx, struct optee *optee, 239 238 struct optee_msg_arg *arg) 240 239 { 241 240 struct tee_param *params; ··· 249 248 return; 250 249 } 251 250 252 - if (optee_from_msg_param(params, arg->num_params, arg->params)) { 251 + if (optee->ops->from_msg_param(optee, params, arg->num_params, 252 + arg->params)) { 253 253 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 254 254 goto out; 255 255 } 256 256 257 257 arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params); 258 258 259 - if (optee_to_msg_param(arg->params, arg->num_params, params)) 259 + if (optee->ops->to_msg_param(optee, arg->params, arg->num_params, 260 + params)) 260 261 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 261 262 out: 262 263 kfree(params); 263 264 } 264 265 265 - static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) 266 + struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz) 266 267 { 267 268 u32 ret; 268 269 struct tee_param param; ··· 287 284 return shm; 288 285 } 289 286 290 - static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, 291 - struct optee_msg_arg *arg, 292 - struct optee_call_ctx *call_ctx) 293 - { 294 - phys_addr_t pa; 295 - struct tee_shm *shm; 296 - size_t sz; 297 - size_t n; 298 - 299 - arg->ret_origin = TEEC_ORIGIN_COMMS; 300 - 301 - if (!arg->num_params || 302 - arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { 303 - arg->ret = TEEC_ERROR_BAD_PARAMETERS; 304 - return; 305 - } 306 - 307 - for (n = 1; n < arg->num_params; n++) { 308 - if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) { 309 - arg->ret = TEEC_ERROR_BAD_PARAMETERS; 310 - return; 311 - } 312 - } 313 - 314 - sz = arg->params[0].u.value.b; 315 - switch (arg->params[0].u.value.a) { 316 - case OPTEE_RPC_SHM_TYPE_APPL: 317 - shm = cmd_alloc_suppl(ctx, sz); 318 - break; 319 - case OPTEE_RPC_SHM_TYPE_KERNEL: 320 - shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV); 321 - break; 322 - default: 323 - arg->ret = TEEC_ERROR_BAD_PARAMETERS; 324 - return; 325 - } 326 - 327 - if (IS_ERR(shm)) { 328 - arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 329 - return; 330 - } 331 - 332 - if (tee_shm_get_pa(shm, 0, &pa)) { 333 - arg->ret = TEEC_ERROR_BAD_PARAMETERS; 334 - goto bad; 335 - } 336 - 337 - sz = tee_shm_get_size(shm); 338 - 339 - if (tee_shm_is_registered(shm)) { 340 - struct page **pages; 341 - u64 *pages_list; 342 - size_t page_num; 343 - 344 - pages = tee_shm_get_pages(shm, &page_num); 345 - if (!pages || !page_num) { 346 - arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 347 - goto bad; 348 - } 349 - 350 - pages_list = optee_allocate_pages_list(page_num); 351 - if (!pages_list) { 352 - arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 353 - goto bad; 354 - } 355 - 356 - call_ctx->pages_list = pages_list; 357 - call_ctx->num_entries = page_num; 358 - 359 - arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 360 - OPTEE_MSG_ATTR_NONCONTIG; 361 - /* 362 - * In the least bits of u.tmem.buf_ptr we store buffer offset 363 - * from 4k page, as described in OP-TEE ABI. 364 - */ 365 - arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) | 366 - (tee_shm_get_page_offset(shm) & 367 - (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); 368 - arg->params[0].u.tmem.size = tee_shm_get_size(shm); 369 - arg->params[0].u.tmem.shm_ref = (unsigned long)shm; 370 - 371 - optee_fill_pages_list(pages_list, pages, page_num, 372 - tee_shm_get_page_offset(shm)); 373 - } else { 374 - arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; 375 - arg->params[0].u.tmem.buf_ptr = pa; 376 - arg->params[0].u.tmem.size = sz; 377 - arg->params[0].u.tmem.shm_ref = (unsigned long)shm; 378 - } 379 - 380 - arg->ret = TEEC_SUCCESS; 381 - return; 382 - bad: 383 - tee_shm_free(shm); 384 - } 385 - 386 - static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm) 287 + void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm) 387 288 { 388 289 struct tee_param param; 389 290 ··· 312 405 optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_FREE, 1, &param); 313 406 } 314 407 315 - static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, 316 - struct optee_msg_arg *arg) 408 + void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee, 409 + struct optee_msg_arg *arg) 317 410 { 318 - struct tee_shm *shm; 319 - 320 - arg->ret_origin = TEEC_ORIGIN_COMMS; 321 - 322 - if (arg->num_params != 1 || 323 - arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { 324 - arg->ret = TEEC_ERROR_BAD_PARAMETERS; 325 - return; 326 - } 327 - 328 - shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b; 329 - switch (arg->params[0].u.value.a) { 330 - case OPTEE_RPC_SHM_TYPE_APPL: 331 - cmd_free_suppl(ctx, shm); 332 - break; 333 - case OPTEE_RPC_SHM_TYPE_KERNEL: 334 - tee_shm_free(shm); 335 - break; 336 - default: 337 - arg->ret = TEEC_ERROR_BAD_PARAMETERS; 338 - } 339 - arg->ret = TEEC_SUCCESS; 340 - } 341 - 342 - static void free_pages_list(struct optee_call_ctx *call_ctx) 343 - { 344 - if (call_ctx->pages_list) { 345 - optee_free_pages_list(call_ctx->pages_list, 346 - call_ctx->num_entries); 347 - call_ctx->pages_list = NULL; 348 - call_ctx->num_entries = 0; 349 - } 350 - } 351 - 352 - void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx) 353 - { 354 - free_pages_list(call_ctx); 355 - } 356 - 357 - static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, 358 - struct tee_shm *shm, 359 - struct optee_call_ctx *call_ctx) 360 - { 361 - struct optee_msg_arg *arg; 362 - 363 - arg = tee_shm_get_va(shm, 0); 364 - if (IS_ERR(arg)) { 365 - pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm); 366 - return; 367 - } 368 - 369 411 switch (arg->cmd) { 370 412 case OPTEE_RPC_CMD_GET_TIME: 371 413 handle_rpc_func_cmd_get_time(arg); ··· 325 469 case OPTEE_RPC_CMD_SUSPEND: 326 470 handle_rpc_func_cmd_wait(arg); 327 471 break; 328 - case OPTEE_RPC_CMD_SHM_ALLOC: 329 - free_pages_list(call_ctx); 330 - handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx); 331 - break; 332 - case OPTEE_RPC_CMD_SHM_FREE: 333 - handle_rpc_func_cmd_shm_free(ctx, arg); 334 - break; 335 472 case OPTEE_RPC_CMD_I2C_TRANSFER: 336 473 handle_rpc_func_cmd_i2c_transfer(ctx, arg); 337 474 break; 338 475 default: 339 - handle_rpc_supp_cmd(ctx, arg); 476 + handle_rpc_supp_cmd(ctx, optee, arg); 340 477 } 341 478 } 342 479 343 - /** 344 - * optee_handle_rpc() - handle RPC from secure world 345 - * @ctx: context doing the RPC 346 - * @param: value of registers for the RPC 347 - * @call_ctx: call context. Preserved during one OP-TEE invocation 348 - * 349 - * Result of RPC is written back into @param. 350 - */ 351 - void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, 352 - struct optee_call_ctx *call_ctx) 353 - { 354 - struct tee_device *teedev = ctx->teedev; 355 - struct optee *optee = tee_get_drvdata(teedev); 356 - struct tee_shm *shm; 357 - phys_addr_t pa; 358 480 359 - switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { 360 - case OPTEE_SMC_RPC_FUNC_ALLOC: 361 - shm = tee_shm_alloc(ctx, param->a1, 362 - TEE_SHM_MAPPED | TEE_SHM_PRIV); 363 - if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { 364 - reg_pair_from_64(&param->a1, &param->a2, pa); 365 - reg_pair_from_64(&param->a4, &param->a5, 366 - (unsigned long)shm); 367 - } else { 368 - param->a1 = 0; 369 - param->a2 = 0; 370 - param->a4 = 0; 371 - param->a5 = 0; 372 - } 373 - break; 374 - case OPTEE_SMC_RPC_FUNC_FREE: 375 - shm = reg_pair_to_ptr(param->a1, param->a2); 376 - tee_shm_free(shm); 377 - break; 378 - case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR: 379 - /* 380 - * A foreign interrupt was raised while secure world was 381 - * executing, since they are handled in Linux a dummy RPC is 382 - * performed to let Linux take the interrupt through the normal 383 - * vector. 384 - */ 385 - break; 386 - case OPTEE_SMC_RPC_FUNC_CMD: 387 - shm = reg_pair_to_ptr(param->a1, param->a2); 388 - handle_rpc_func_cmd(ctx, optee, shm, call_ctx); 389 - break; 390 - default: 391 - pr_warn("Unknown RPC func 0x%x\n", 392 - (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)); 393 - break; 394 - } 395 - 396 - param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC; 397 - }
-101
drivers/tee/optee/shm_pool.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Copyright (c) 2015, Linaro Limited 4 - * Copyright (c) 2017, EPAM Systems 5 - */ 6 - #include <linux/device.h> 7 - #include <linux/dma-buf.h> 8 - #include <linux/genalloc.h> 9 - #include <linux/slab.h> 10 - #include <linux/tee_drv.h> 11 - #include "optee_private.h" 12 - #include "optee_smc.h" 13 - #include "shm_pool.h" 14 - 15 - static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, 16 - struct tee_shm *shm, size_t size) 17 - { 18 - unsigned int order = get_order(size); 19 - struct page *page; 20 - int rc = 0; 21 - 22 - page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 23 - if (!page) 24 - return -ENOMEM; 25 - 26 - shm->kaddr = page_address(page); 27 - shm->paddr = page_to_phys(page); 28 - shm->size = PAGE_SIZE << order; 29 - 30 - /* 31 - * Shared memory private to the OP-TEE driver doesn't need 32 - * to be registered with OP-TEE. 33 - */ 34 - if (!(shm->flags & TEE_SHM_PRIV)) { 35 - unsigned int nr_pages = 1 << order, i; 36 - struct page **pages; 37 - 38 - pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL); 39 - if (!pages) { 40 - rc = -ENOMEM; 41 - goto err; 42 - } 43 - 44 - for (i = 0; i < nr_pages; i++) { 45 - pages[i] = page; 46 - page++; 47 - } 48 - 49 - shm->flags |= TEE_SHM_REGISTER; 50 - rc = optee_shm_register(shm->ctx, shm, pages, nr_pages, 51 - (unsigned long)shm->kaddr); 52 - kfree(pages); 53 - if (rc) 54 - goto err; 55 - } 56 - 57 - return 0; 58 - 59 - err: 60 - __free_pages(page, order); 61 - return rc; 62 - } 63 - 64 - static void pool_op_free(struct tee_shm_pool_mgr *poolm, 65 - struct tee_shm *shm) 66 - { 67 - if (!(shm->flags & TEE_SHM_PRIV)) 68 - optee_shm_unregister(shm->ctx, shm); 69 - 70 - free_pages((unsigned long)shm->kaddr, get_order(shm->size)); 71 - shm->kaddr = NULL; 72 - } 73 - 74 - static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) 75 - { 76 - kfree(poolm); 77 - } 78 - 79 - static const struct tee_shm_pool_mgr_ops pool_ops = { 80 - .alloc = pool_op_alloc, 81 - .free = pool_op_free, 82 - .destroy_poolmgr = pool_op_destroy_poolmgr, 83 - }; 84 - 85 - /** 86 - * optee_shm_pool_alloc_pages() - create page-based allocator pool 87 - * 88 - * This pool is used when OP-TEE supports dymanic SHM. In this case 89 - * command buffers and such are allocated from kernel's own memory. 90 - */ 91 - struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void) 92 - { 93 - struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); 94 - 95 - if (!mgr) 96 - return ERR_PTR(-ENOMEM); 97 - 98 - mgr->ops = &pool_ops; 99 - 100 - return mgr; 101 - }
-14
drivers/tee/optee/shm_pool.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * Copyright (c) 2015, Linaro Limited 4 - * Copyright (c) 2016, EPAM Systems 5 - */ 6 - 7 - #ifndef SHM_POOL_H 8 - #define SHM_POOL_H 9 - 10 - #include <linux/tee_drv.h> 11 - 12 - struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void); 13 - 14 - #endif
+1361
drivers/tee/optee/smc_abi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2015-2021, Linaro Limited 4 + * Copyright (c) 2016, EPAM Systems 5 + */ 6 + 7 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 + 9 + #include <linux/arm-smccc.h> 10 + #include <linux/errno.h> 11 + #include <linux/io.h> 12 + #include <linux/sched.h> 13 + #include <linux/module.h> 14 + #include <linux/of.h> 15 + #include <linux/of_platform.h> 16 + #include <linux/platform_device.h> 17 + #include <linux/slab.h> 18 + #include <linux/string.h> 19 + #include <linux/tee_drv.h> 20 + #include <linux/types.h> 21 + #include <linux/workqueue.h> 22 + #include "optee_private.h" 23 + #include "optee_smc.h" 24 + #include "optee_rpc_cmd.h" 25 + #define CREATE_TRACE_POINTS 26 + #include "optee_trace.h" 27 + 28 + /* 29 + * This file implement the SMC ABI used when communicating with secure world 30 + * OP-TEE OS via raw SMCs. 31 + * This file is divided into the following sections: 32 + * 1. Convert between struct tee_param and struct optee_msg_param 33 + * 2. Low level support functions to register shared memory in secure world 34 + * 3. Dynamic shared memory pool based on alloc_pages() 35 + * 4. Do a normal scheduled call into secure world 36 + * 5. Driver initialization. 37 + */ 38 + 39 + #define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES 40 + 41 + /* 42 + * 1. Convert between struct tee_param and struct optee_msg_param 43 + * 44 + * optee_from_msg_param() and optee_to_msg_param() are the main 45 + * functions. 46 + */ 47 + 48 + static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr, 49 + const struct optee_msg_param *mp) 50 + { 51 + struct tee_shm *shm; 52 + phys_addr_t pa; 53 + int rc; 54 + 55 + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + 56 + attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; 57 + p->u.memref.size = mp->u.tmem.size; 58 + shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref; 59 + if (!shm) { 60 + p->u.memref.shm_offs = 0; 61 + p->u.memref.shm = NULL; 62 + return 0; 63 + } 64 + 65 + rc = tee_shm_get_pa(shm, 0, &pa); 66 + if (rc) 67 + return rc; 68 + 69 + p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; 70 + p->u.memref.shm = shm; 71 + 72 + /* Check that the memref is covered by the shm object */ 73 + if (p->u.memref.size) { 74 + size_t o = p->u.memref.shm_offs + 75 + p->u.memref.size - 1; 76 + 77 + rc = tee_shm_get_pa(shm, o, NULL); 78 + if (rc) 79 + return rc; 80 + } 81 + 82 + return 0; 83 + } 84 + 85 + static void from_msg_param_reg_mem(struct tee_param *p, u32 attr, 86 + const struct optee_msg_param *mp) 87 + { 88 + struct tee_shm *shm; 89 + 90 + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + 91 + attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; 92 + p->u.memref.size = mp->u.rmem.size; 93 + shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref; 94 + 95 + if (shm) { 96 + p->u.memref.shm_offs = mp->u.rmem.offs; 97 + p->u.memref.shm = shm; 98 + } else { 99 + p->u.memref.shm_offs = 0; 100 + p->u.memref.shm = NULL; 101 + } 102 + } 103 + 104 + /** 105 + * optee_from_msg_param() - convert from OPTEE_MSG parameters to 106 + * struct tee_param 107 + * @optee: main service struct 108 + * @params: subsystem internal parameter representation 109 + * @num_params: number of elements in the parameter arrays 110 + * @msg_params: OPTEE_MSG parameters 111 + * Returns 0 on success or <0 on failure 112 + */ 113 + static int optee_from_msg_param(struct optee *optee, struct tee_param *params, 114 + size_t num_params, 115 + const struct optee_msg_param *msg_params) 116 + { 117 + int rc; 118 + size_t n; 119 + 120 + for (n = 0; n < num_params; n++) { 121 + struct tee_param *p = params + n; 122 + const struct optee_msg_param *mp = msg_params + n; 123 + u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK; 124 + 125 + switch (attr) { 126 + case OPTEE_MSG_ATTR_TYPE_NONE: 127 + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; 128 + memset(&p->u, 0, sizeof(p->u)); 129 + break; 130 + case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT: 131 + case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: 132 + case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: 133 + optee_from_msg_param_value(p, attr, mp); 134 + break; 135 + case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: 136 + case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: 137 + case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: 138 + rc = from_msg_param_tmp_mem(p, attr, mp); 139 + if (rc) 140 + return rc; 141 + break; 142 + case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT: 143 + case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: 144 + case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: 145 + from_msg_param_reg_mem(p, attr, mp); 146 + break; 147 + 148 + default: 149 + return -EINVAL; 150 + } 151 + } 152 + return 0; 153 + } 154 + 155 + static int to_msg_param_tmp_mem(struct optee_msg_param *mp, 156 + const struct tee_param *p) 157 + { 158 + int rc; 159 + phys_addr_t pa; 160 + 161 + mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr - 162 + TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; 163 + 164 + mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; 165 + mp->u.tmem.size = p->u.memref.size; 166 + 167 + if (!p->u.memref.shm) { 168 + mp->u.tmem.buf_ptr = 0; 169 + return 0; 170 + } 171 + 172 + rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa); 173 + if (rc) 174 + return rc; 175 + 176 + mp->u.tmem.buf_ptr = pa; 177 + mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED << 178 + OPTEE_MSG_ATTR_CACHE_SHIFT; 179 + 180 + return 0; 181 + } 182 + 183 + static int to_msg_param_reg_mem(struct optee_msg_param *mp, 184 + const struct tee_param *p) 185 + { 186 + mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr - 187 + TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; 188 + 189 + mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm; 190 + mp->u.rmem.size = p->u.memref.size; 191 + mp->u.rmem.offs = p->u.memref.shm_offs; 192 + return 0; 193 + } 194 + 195 + /** 196 + * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters 197 + * @optee: main service struct 198 + * @msg_params: OPTEE_MSG parameters 199 + * @num_params: number of elements in the parameter arrays 200 + * @params: subsystem itnernal parameter representation 201 + * Returns 0 on success or <0 on failure 202 + */ 203 + static int optee_to_msg_param(struct optee *optee, 204 + struct optee_msg_param *msg_params, 205 + size_t num_params, const struct tee_param *params) 206 + { 207 + int rc; 208 + size_t n; 209 + 210 + for (n = 0; n < num_params; n++) { 211 + const struct tee_param *p = params + n; 212 + struct optee_msg_param *mp = msg_params + n; 213 + 214 + switch (p->attr) { 215 + case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: 216 + mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; 217 + memset(&mp->u, 0, sizeof(mp->u)); 218 + break; 219 + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: 220 + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: 221 + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: 222 + optee_to_msg_param_value(mp, p); 223 + break; 224 + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: 225 + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: 226 + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: 227 + if (tee_shm_is_registered(p->u.memref.shm)) 228 + rc = to_msg_param_reg_mem(mp, p); 229 + else 230 + rc = to_msg_param_tmp_mem(mp, p); 231 + if (rc) 232 + return rc; 233 + break; 234 + default: 235 + return -EINVAL; 236 + } 237 + } 238 + return 0; 239 + } 240 + 241 + /* 242 + * 2. Low level support functions to register shared memory in secure world 243 + * 244 + * Functions to enable/disable shared memory caching in secure world, that 245 + * is, lazy freeing of previously allocated shared memory. Freeing is 246 + * performed when a request has been compled. 247 + * 248 + * Functions to register and unregister shared memory both for normal 249 + * clients and for tee-supplicant. 250 + */ 251 + 252 + /** 253 + * optee_enable_shm_cache() - Enables caching of some shared memory allocation 254 + * in OP-TEE 255 + * @optee: main service struct 256 + */ 257 + static void optee_enable_shm_cache(struct optee *optee) 258 + { 259 + struct optee_call_waiter w; 260 + 261 + /* We need to retry until secure world isn't busy. */ 262 + optee_cq_wait_init(&optee->call_queue, &w); 263 + while (true) { 264 + struct arm_smccc_res res; 265 + 266 + optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 267 + 0, 0, 0, 0, 0, 0, 0, &res); 268 + if (res.a0 == OPTEE_SMC_RETURN_OK) 269 + break; 270 + optee_cq_wait_for_completion(&optee->call_queue, &w); 271 + } 272 + optee_cq_wait_final(&optee->call_queue, &w); 273 + } 274 + 275 + /** 276 + * __optee_disable_shm_cache() - Disables caching of some shared memory 277 + * allocation in OP-TEE 278 + * @optee: main service struct 279 + * @is_mapped: true if the cached shared memory addresses were mapped by this 280 + * kernel, are safe to dereference, and should be freed 281 + */ 282 + static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped) 283 + { 284 + struct optee_call_waiter w; 285 + 286 + /* We need to retry until secure world isn't busy. */ 287 + optee_cq_wait_init(&optee->call_queue, &w); 288 + while (true) { 289 + union { 290 + struct arm_smccc_res smccc; 291 + struct optee_smc_disable_shm_cache_result result; 292 + } res; 293 + 294 + optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 295 + 0, 0, 0, 0, 0, 0, 0, &res.smccc); 296 + if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL) 297 + break; /* All shm's freed */ 298 + if (res.result.status == OPTEE_SMC_RETURN_OK) { 299 + struct tee_shm *shm; 300 + 301 + /* 302 + * Shared memory references that were not mapped by 303 + * this kernel must be ignored to prevent a crash. 304 + */ 305 + if (!is_mapped) 306 + continue; 307 + 308 + shm = reg_pair_to_ptr(res.result.shm_upper32, 309 + res.result.shm_lower32); 310 + tee_shm_free(shm); 311 + } else { 312 + optee_cq_wait_for_completion(&optee->call_queue, &w); 313 + } 314 + } 315 + optee_cq_wait_final(&optee->call_queue, &w); 316 + } 317 + 318 + /** 319 + * optee_disable_shm_cache() - Disables caching of mapped shared memory 320 + * allocations in OP-TEE 321 + * @optee: main service struct 322 + */ 323 + static void optee_disable_shm_cache(struct optee *optee) 324 + { 325 + return __optee_disable_shm_cache(optee, true); 326 + } 327 + 328 + /** 329 + * optee_disable_unmapped_shm_cache() - Disables caching of shared memory 330 + * allocations in OP-TEE which are not 331 + * currently mapped 332 + * @optee: main service struct 333 + */ 334 + static void optee_disable_unmapped_shm_cache(struct optee *optee) 335 + { 336 + return __optee_disable_shm_cache(optee, false); 337 + } 338 + 339 + #define PAGELIST_ENTRIES_PER_PAGE \ 340 + ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1) 341 + 342 + /* 343 + * The final entry in each pagelist page is a pointer to the next 344 + * pagelist page. 345 + */ 346 + static size_t get_pages_list_size(size_t num_entries) 347 + { 348 + int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE); 349 + 350 + return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE; 351 + } 352 + 353 + static u64 *optee_allocate_pages_list(size_t num_entries) 354 + { 355 + return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL); 356 + } 357 + 358 + static void optee_free_pages_list(void *list, size_t num_entries) 359 + { 360 + free_pages_exact(list, get_pages_list_size(num_entries)); 361 + } 362 + 363 + /** 364 + * optee_fill_pages_list() - write list of user pages to given shared 365 + * buffer. 366 + * 367 + * @dst: page-aligned buffer where list of pages will be stored 368 + * @pages: array of pages that represents shared buffer 369 + * @num_pages: number of entries in @pages 370 + * @page_offset: offset of user buffer from page start 371 + * 372 + * @dst should be big enough to hold list of user page addresses and 373 + * links to the next pages of buffer 374 + */ 375 + static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, 376 + size_t page_offset) 377 + { 378 + int n = 0; 379 + phys_addr_t optee_page; 380 + /* 381 + * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h 382 + * for details. 383 + */ 384 + struct { 385 + u64 pages_list[PAGELIST_ENTRIES_PER_PAGE]; 386 + u64 next_page_data; 387 + } *pages_data; 388 + 389 + /* 390 + * Currently OP-TEE uses 4k page size and it does not looks 391 + * like this will change in the future. On other hand, there are 392 + * no know ARM architectures with page size < 4k. 393 + * Thus the next built assert looks redundant. But the following 394 + * code heavily relies on this assumption, so it is better be 395 + * safe than sorry. 396 + */ 397 + BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE); 398 + 399 + pages_data = (void *)dst; 400 + /* 401 + * If linux page is bigger than 4k, and user buffer offset is 402 + * larger than 4k/8k/12k/etc this will skip first 4k pages, 403 + * because they bear no value data for OP-TEE. 404 + */ 405 + optee_page = page_to_phys(*pages) + 406 + round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE); 407 + 408 + while (true) { 409 + pages_data->pages_list[n++] = optee_page; 410 + 411 + if (n == PAGELIST_ENTRIES_PER_PAGE) { 412 + pages_data->next_page_data = 413 + virt_to_phys(pages_data + 1); 414 + pages_data++; 415 + n = 0; 416 + } 417 + 418 + optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE; 419 + if (!(optee_page & ~PAGE_MASK)) { 420 + if (!--num_pages) 421 + break; 422 + pages++; 423 + optee_page = page_to_phys(*pages); 424 + } 425 + } 426 + } 427 + 428 + static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, 429 + struct page **pages, size_t num_pages, 430 + unsigned long start) 431 + { 432 + struct optee *optee = tee_get_drvdata(ctx->teedev); 433 + struct optee_msg_arg *msg_arg; 434 + struct tee_shm *shm_arg; 435 + u64 *pages_list; 436 + int rc; 437 + 438 + if (!num_pages) 439 + return -EINVAL; 440 + 441 + rc = optee_check_mem_type(start, num_pages); 442 + if (rc) 443 + return rc; 444 + 445 + pages_list = optee_allocate_pages_list(num_pages); 446 + if (!pages_list) 447 + return -ENOMEM; 448 + 449 + shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg); 450 + if (IS_ERR(shm_arg)) { 451 + rc = PTR_ERR(shm_arg); 452 + goto out; 453 + } 454 + 455 + optee_fill_pages_list(pages_list, pages, num_pages, 456 + tee_shm_get_page_offset(shm)); 457 + 458 + msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM; 459 + msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 460 + OPTEE_MSG_ATTR_NONCONTIG; 461 + msg_arg->params->u.tmem.shm_ref = (unsigned long)shm; 462 + msg_arg->params->u.tmem.size = tee_shm_get_size(shm); 463 + /* 464 + * In the least bits of msg_arg->params->u.tmem.buf_ptr we 465 + * store buffer offset from 4k page, as described in OP-TEE ABI. 466 + */ 467 + msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) | 468 + (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); 469 + 470 + if (optee->ops->do_call_with_arg(ctx, shm_arg) || 471 + msg_arg->ret != TEEC_SUCCESS) 472 + rc = -EINVAL; 473 + 474 + tee_shm_free(shm_arg); 475 + out: 476 + optee_free_pages_list(pages_list, num_pages); 477 + return rc; 478 + } 479 + 480 + static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) 481 + { 482 + struct optee *optee = tee_get_drvdata(ctx->teedev); 483 + struct optee_msg_arg *msg_arg; 484 + struct tee_shm *shm_arg; 485 + int rc = 0; 486 + 487 + shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg); 488 + if (IS_ERR(shm_arg)) 489 + return PTR_ERR(shm_arg); 490 + 491 + msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM; 492 + 493 + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; 494 + msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm; 495 + 496 + if (optee->ops->do_call_with_arg(ctx, shm_arg) || 497 + msg_arg->ret != TEEC_SUCCESS) 498 + rc = -EINVAL; 499 + tee_shm_free(shm_arg); 500 + return rc; 501 + } 502 + 503 + static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, 504 + struct page **pages, size_t num_pages, 505 + unsigned long start) 506 + { 507 + /* 508 + * We don't want to register supplicant memory in OP-TEE. 509 + * Instead information about it will be passed in RPC code. 510 + */ 511 + return optee_check_mem_type(start, num_pages); 512 + } 513 + 514 + static int optee_shm_unregister_supp(struct tee_context *ctx, 515 + struct tee_shm *shm) 516 + { 517 + return 0; 518 + } 519 + 520 + /* 521 + * 3. Dynamic shared memory pool based on alloc_pages() 522 + * 523 + * Implements an OP-TEE specific shared memory pool which is used 524 + * when dynamic shared memory is supported by secure world. 525 + * 526 + * The main function is optee_shm_pool_alloc_pages(). 527 + */ 528 + 529 + static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, 530 + struct tee_shm *shm, size_t size) 531 + { 532 + /* 533 + * Shared memory private to the OP-TEE driver doesn't need 534 + * to be registered with OP-TEE. 535 + */ 536 + if (shm->flags & TEE_SHM_PRIV) 537 + return optee_pool_op_alloc_helper(poolm, shm, size, NULL); 538 + 539 + return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register); 540 + } 541 + 542 + static void pool_op_free(struct tee_shm_pool_mgr *poolm, 543 + struct tee_shm *shm) 544 + { 545 + if (!(shm->flags & TEE_SHM_PRIV)) 546 + optee_shm_unregister(shm->ctx, shm); 547 + 548 + free_pages((unsigned long)shm->kaddr, get_order(shm->size)); 549 + shm->kaddr = NULL; 550 + } 551 + 552 + static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) 553 + { 554 + kfree(poolm); 555 + } 556 + 557 + static const struct tee_shm_pool_mgr_ops pool_ops = { 558 + .alloc = pool_op_alloc, 559 + .free = pool_op_free, 560 + .destroy_poolmgr = pool_op_destroy_poolmgr, 561 + }; 562 + 563 + /** 564 + * optee_shm_pool_alloc_pages() - create page-based allocator pool 565 + * 566 + * This pool is used when OP-TEE supports dymanic SHM. In this case 567 + * command buffers and such are allocated from kernel's own memory. 568 + */ 569 + static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void) 570 + { 571 + struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); 572 + 573 + if (!mgr) 574 + return ERR_PTR(-ENOMEM); 575 + 576 + mgr->ops = &pool_ops; 577 + 578 + return mgr; 579 + } 580 + 581 + /* 582 + * 4. Do a normal scheduled call into secure world 583 + * 584 + * The function optee_smc_do_call_with_arg() performs a normal scheduled 585 + * call into secure world. During this call may normal world request help 586 + * from normal world using RPCs, Remote Procedure Calls. This includes 587 + * delivery of non-secure interrupts to for instance allow rescheduling of 588 + * the current task. 589 + */ 590 + 591 + static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, 592 + struct optee_msg_arg *arg) 593 + { 594 + struct tee_shm *shm; 595 + 596 + arg->ret_origin = TEEC_ORIGIN_COMMS; 597 + 598 + if (arg->num_params != 1 || 599 + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { 600 + arg->ret = TEEC_ERROR_BAD_PARAMETERS; 601 + return; 602 + } 603 + 604 + shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b; 605 + switch (arg->params[0].u.value.a) { 606 + case OPTEE_RPC_SHM_TYPE_APPL: 607 + optee_rpc_cmd_free_suppl(ctx, shm); 608 + break; 609 + case OPTEE_RPC_SHM_TYPE_KERNEL: 610 + tee_shm_free(shm); 611 + break; 612 + default: 613 + arg->ret = TEEC_ERROR_BAD_PARAMETERS; 614 + } 615 + arg->ret = TEEC_SUCCESS; 616 + } 617 + 618 + static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, 619 + struct optee_msg_arg *arg, 620 + struct optee_call_ctx *call_ctx) 621 + { 622 + phys_addr_t pa; 623 + struct tee_shm *shm; 624 + size_t sz; 625 + size_t n; 626 + 627 + arg->ret_origin = TEEC_ORIGIN_COMMS; 628 + 629 + if (!arg->num_params || 630 + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { 631 + arg->ret = TEEC_ERROR_BAD_PARAMETERS; 632 + return; 633 + } 634 + 635 + for (n = 1; n < arg->num_params; n++) { 636 + if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) { 637 + arg->ret = TEEC_ERROR_BAD_PARAMETERS; 638 + return; 639 + } 640 + } 641 + 642 + sz = arg->params[0].u.value.b; 643 + switch (arg->params[0].u.value.a) { 644 + case OPTEE_RPC_SHM_TYPE_APPL: 645 + shm = optee_rpc_cmd_alloc_suppl(ctx, sz); 646 + break; 647 + case OPTEE_RPC_SHM_TYPE_KERNEL: 648 + shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV); 649 + break; 650 + default: 651 + arg->ret = TEEC_ERROR_BAD_PARAMETERS; 652 + return; 653 + } 654 + 655 + if (IS_ERR(shm)) { 656 + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 657 + return; 658 + } 659 + 660 + if (tee_shm_get_pa(shm, 0, &pa)) { 661 + arg->ret = TEEC_ERROR_BAD_PARAMETERS; 662 + goto bad; 663 + } 664 + 665 + sz = tee_shm_get_size(shm); 666 + 667 + if (tee_shm_is_registered(shm)) { 668 + struct page **pages; 669 + u64 *pages_list; 670 + size_t page_num; 671 + 672 + pages = tee_shm_get_pages(shm, &page_num); 673 + if (!pages || !page_num) { 674 + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 675 + goto bad; 676 + } 677 + 678 + pages_list = optee_allocate_pages_list(page_num); 679 + if (!pages_list) { 680 + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 681 + goto bad; 682 + } 683 + 684 + call_ctx->pages_list = pages_list; 685 + call_ctx->num_entries = page_num; 686 + 687 + arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 688 + OPTEE_MSG_ATTR_NONCONTIG; 689 + /* 690 + * In the least bits of u.tmem.buf_ptr we store buffer offset 691 + * from 4k page, as described in OP-TEE ABI. 692 + */ 693 + arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) | 694 + (tee_shm_get_page_offset(shm) & 695 + (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); 696 + arg->params[0].u.tmem.size = tee_shm_get_size(shm); 697 + arg->params[0].u.tmem.shm_ref = (unsigned long)shm; 698 + 699 + optee_fill_pages_list(pages_list, pages, page_num, 700 + tee_shm_get_page_offset(shm)); 701 + } else { 702 + arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; 703 + arg->params[0].u.tmem.buf_ptr = pa; 704 + arg->params[0].u.tmem.size = sz; 705 + arg->params[0].u.tmem.shm_ref = (unsigned long)shm; 706 + } 707 + 708 + arg->ret = TEEC_SUCCESS; 709 + return; 710 + bad: 711 + tee_shm_free(shm); 712 + } 713 + 714 + static void free_pages_list(struct optee_call_ctx *call_ctx) 715 + { 716 + if (call_ctx->pages_list) { 717 + optee_free_pages_list(call_ctx->pages_list, 718 + call_ctx->num_entries); 719 + call_ctx->pages_list = NULL; 720 + call_ctx->num_entries = 0; 721 + } 722 + } 723 + 724 + static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx) 725 + { 726 + free_pages_list(call_ctx); 727 + } 728 + 729 + static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, 730 + struct tee_shm *shm, 731 + struct optee_call_ctx *call_ctx) 732 + { 733 + struct optee_msg_arg *arg; 734 + 735 + arg = tee_shm_get_va(shm, 0); 736 + if (IS_ERR(arg)) { 737 + pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm); 738 + return; 739 + } 740 + 741 + switch (arg->cmd) { 742 + case OPTEE_RPC_CMD_SHM_ALLOC: 743 + free_pages_list(call_ctx); 744 + handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx); 745 + break; 746 + case OPTEE_RPC_CMD_SHM_FREE: 747 + handle_rpc_func_cmd_shm_free(ctx, arg); 748 + break; 749 + default: 750 + optee_rpc_cmd(ctx, optee, arg); 751 + } 752 + } 753 + 754 + /** 755 + * optee_handle_rpc() - handle RPC from secure world 756 + * @ctx: context doing the RPC 757 + * @param: value of registers for the RPC 758 + * @call_ctx: call context. Preserved during one OP-TEE invocation 759 + * 760 + * Result of RPC is written back into @param. 761 + */ 762 + static void optee_handle_rpc(struct tee_context *ctx, 763 + struct optee_rpc_param *param, 764 + struct optee_call_ctx *call_ctx) 765 + { 766 + struct tee_device *teedev = ctx->teedev; 767 + struct optee *optee = tee_get_drvdata(teedev); 768 + struct tee_shm *shm; 769 + phys_addr_t pa; 770 + 771 + switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { 772 + case OPTEE_SMC_RPC_FUNC_ALLOC: 773 + shm = tee_shm_alloc(ctx, param->a1, 774 + TEE_SHM_MAPPED | TEE_SHM_PRIV); 775 + if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { 776 + reg_pair_from_64(&param->a1, &param->a2, pa); 777 + reg_pair_from_64(&param->a4, &param->a5, 778 + (unsigned long)shm); 779 + } else { 780 + param->a1 = 0; 781 + param->a2 = 0; 782 + param->a4 = 0; 783 + param->a5 = 0; 784 + } 785 + break; 786 + case OPTEE_SMC_RPC_FUNC_FREE: 787 + shm = reg_pair_to_ptr(param->a1, param->a2); 788 + tee_shm_free(shm); 789 + break; 790 + case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR: 791 + /* 792 + * A foreign interrupt was raised while secure world was 793 + * executing, since they are handled in Linux a dummy RPC is 794 + * performed to let Linux take the interrupt through the normal 795 + * vector. 796 + */ 797 + break; 798 + case OPTEE_SMC_RPC_FUNC_CMD: 799 + shm = reg_pair_to_ptr(param->a1, param->a2); 800 + handle_rpc_func_cmd(ctx, optee, shm, call_ctx); 801 + break; 802 + default: 803 + pr_warn("Unknown RPC func 0x%x\n", 804 + (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)); 805 + break; 806 + } 807 + 808 + param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC; 809 + } 810 + 811 + /** 812 + * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world 813 + * @ctx: calling context 814 + * @arg: shared memory holding the message to pass to secure world 815 + * 816 + * Does and SMC to OP-TEE in secure world and handles eventual resulting 817 + * Remote Procedure Calls (RPC) from OP-TEE. 818 + * 819 + * Returns return code from secure world, 0 is OK 820 + */ 821 + static int optee_smc_do_call_with_arg(struct tee_context *ctx, 822 + struct tee_shm *arg) 823 + { 824 + struct optee *optee = tee_get_drvdata(ctx->teedev); 825 + struct optee_call_waiter w; 826 + struct optee_rpc_param param = { }; 827 + struct optee_call_ctx call_ctx = { }; 828 + phys_addr_t parg; 829 + int rc; 830 + 831 + rc = tee_shm_get_pa(arg, 0, &parg); 832 + if (rc) 833 + return rc; 834 + 835 + param.a0 = OPTEE_SMC_CALL_WITH_ARG; 836 + reg_pair_from_64(&param.a1, &param.a2, parg); 837 + /* Initialize waiter */ 838 + optee_cq_wait_init(&optee->call_queue, &w); 839 + while (true) { 840 + struct arm_smccc_res res; 841 + 842 + trace_optee_invoke_fn_begin(&param); 843 + optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3, 844 + param.a4, param.a5, param.a6, param.a7, 845 + &res); 846 + trace_optee_invoke_fn_end(&param, &res); 847 + 848 + if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) { 849 + /* 850 + * Out of threads in secure world, wait for a thread 851 + * become available. 852 + */ 853 + optee_cq_wait_for_completion(&optee->call_queue, &w); 854 + } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { 855 + cond_resched(); 856 + param.a0 = res.a0; 857 + param.a1 = res.a1; 858 + param.a2 = res.a2; 859 + param.a3 = res.a3; 860 + optee_handle_rpc(ctx, &param, &call_ctx); 861 + } else { 862 + rc = res.a0; 863 + break; 864 + } 865 + } 866 + 867 + optee_rpc_finalize_call(&call_ctx); 868 + /* 869 + * We're done with our thread in secure world, if there's any 870 + * thread waiters wake up one. 871 + */ 872 + optee_cq_wait_final(&optee->call_queue, &w); 873 + 874 + return rc; 875 + } 876 + 877 + /* 878 + * 5. Driver initialization 879 + * 880 + * During driver inititialization is secure world probed to find out which 881 + * features it supports so the driver can be initialized with a matching 882 + * configuration. This involves for instance support for dynamic shared 883 + * memory instead of a static memory carvout. 884 + */ 885 + 886 + static void optee_get_version(struct tee_device *teedev, 887 + struct tee_ioctl_version_data *vers) 888 + { 889 + struct tee_ioctl_version_data v = { 890 + .impl_id = TEE_IMPL_ID_OPTEE, 891 + .impl_caps = TEE_OPTEE_CAP_TZ, 892 + .gen_caps = TEE_GEN_CAP_GP, 893 + }; 894 + struct optee *optee = tee_get_drvdata(teedev); 895 + 896 + if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) 897 + v.gen_caps |= TEE_GEN_CAP_REG_MEM; 898 + if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL) 899 + v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL; 900 + *vers = v; 901 + } 902 + 903 + static int optee_smc_open(struct tee_context *ctx) 904 + { 905 + struct optee *optee = tee_get_drvdata(ctx->teedev); 906 + u32 sec_caps = optee->smc.sec_caps; 907 + 908 + return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL); 909 + } 910 + 911 + static const struct tee_driver_ops optee_clnt_ops = { 912 + .get_version = optee_get_version, 913 + .open = optee_smc_open, 914 + .release = optee_release, 915 + .open_session = optee_open_session, 916 + .close_session = optee_close_session, 917 + .invoke_func = optee_invoke_func, 918 + .cancel_req = optee_cancel_req, 919 + .shm_register = optee_shm_register, 920 + .shm_unregister = optee_shm_unregister, 921 + }; 922 + 923 + static const struct tee_desc optee_clnt_desc = { 924 + .name = DRIVER_NAME "-clnt", 925 + .ops = &optee_clnt_ops, 926 + .owner = THIS_MODULE, 927 + }; 928 + 929 + static const struct tee_driver_ops optee_supp_ops = { 930 + .get_version = optee_get_version, 931 + .open = optee_smc_open, 932 + .release = optee_release_supp, 933 + .supp_recv = optee_supp_recv, 934 + .supp_send = optee_supp_send, 935 + .shm_register = optee_shm_register_supp, 936 + .shm_unregister = optee_shm_unregister_supp, 937 + }; 938 + 939 + static const struct tee_desc optee_supp_desc = { 940 + .name = DRIVER_NAME "-supp", 941 + .ops = &optee_supp_ops, 942 + .owner = THIS_MODULE, 943 + .flags = TEE_DESC_PRIVILEGED, 944 + }; 945 + 946 + static const struct optee_ops optee_ops = { 947 + .do_call_with_arg = optee_smc_do_call_with_arg, 948 + .to_msg_param = optee_to_msg_param, 949 + .from_msg_param = optee_from_msg_param, 950 + }; 951 + 952 + static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) 953 + { 954 + struct arm_smccc_res res; 955 + 956 + invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res); 957 + 958 + if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 && 959 + res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3) 960 + return true; 961 + return false; 962 + } 963 + 964 + static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn) 965 + { 966 + union { 967 + struct arm_smccc_res smccc; 968 + struct optee_smc_call_get_os_revision_result result; 969 + } res = { 970 + .result = { 971 + .build_id = 0 972 + } 973 + }; 974 + 975 + invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0, 976 + &res.smccc); 977 + 978 + if (res.result.build_id) 979 + pr_info("revision %lu.%lu (%08lx)", res.result.major, 980 + res.result.minor, res.result.build_id); 981 + else 982 + pr_info("revision %lu.%lu", res.result.major, res.result.minor); 983 + } 984 + 985 + static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn) 986 + { 987 + union { 988 + struct arm_smccc_res smccc; 989 + struct optee_smc_calls_revision_result result; 990 + } res; 991 + 992 + invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc); 993 + 994 + if (res.result.major == OPTEE_MSG_REVISION_MAJOR && 995 + (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR) 996 + return true; 997 + return false; 998 + } 999 + 1000 + static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, 1001 + u32 *sec_caps) 1002 + { 1003 + union { 1004 + struct arm_smccc_res smccc; 1005 + struct optee_smc_exchange_capabilities_result result; 1006 + } res; 1007 + u32 a1 = 0; 1008 + 1009 + /* 1010 + * TODO This isn't enough to tell if it's UP system (from kernel 1011 + * point of view) or not, is_smp() returns the information 1012 + * needed, but can't be called directly from here. 1013 + */ 1014 + if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1) 1015 + a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR; 1016 + 1017 + invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0, 1018 + &res.smccc); 1019 + 1020 + if (res.result.status != OPTEE_SMC_RETURN_OK) 1021 + return false; 1022 + 1023 + *sec_caps = res.result.capabilities; 1024 + return true; 1025 + } 1026 + 1027 + static struct tee_shm_pool *optee_config_dyn_shm(void) 1028 + { 1029 + struct tee_shm_pool_mgr *priv_mgr; 1030 + struct tee_shm_pool_mgr *dmabuf_mgr; 1031 + void *rc; 1032 + 1033 + rc = optee_shm_pool_alloc_pages(); 1034 + if (IS_ERR(rc)) 1035 + return rc; 1036 + priv_mgr = rc; 1037 + 1038 + rc = optee_shm_pool_alloc_pages(); 1039 + if (IS_ERR(rc)) { 1040 + tee_shm_pool_mgr_destroy(priv_mgr); 1041 + return rc; 1042 + } 1043 + dmabuf_mgr = rc; 1044 + 1045 + rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); 1046 + if (IS_ERR(rc)) { 1047 + tee_shm_pool_mgr_destroy(priv_mgr); 1048 + tee_shm_pool_mgr_destroy(dmabuf_mgr); 1049 + } 1050 + 1051 + return rc; 1052 + } 1053 + 1054 + static struct tee_shm_pool * 1055 + optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) 1056 + { 1057 + union { 1058 + struct arm_smccc_res smccc; 1059 + struct optee_smc_get_shm_config_result result; 1060 + } res; 1061 + unsigned long vaddr; 1062 + phys_addr_t paddr; 1063 + size_t size; 1064 + phys_addr_t begin; 1065 + phys_addr_t end; 1066 + void *va; 1067 + struct tee_shm_pool_mgr *priv_mgr; 1068 + struct tee_shm_pool_mgr *dmabuf_mgr; 1069 + void *rc; 1070 + const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; 1071 + 1072 + invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); 1073 + if (res.result.status != OPTEE_SMC_RETURN_OK) { 1074 + pr_err("static shm service not available\n"); 1075 + return ERR_PTR(-ENOENT); 1076 + } 1077 + 1078 + if (res.result.settings != OPTEE_SMC_SHM_CACHED) { 1079 + pr_err("only normal cached shared memory supported\n"); 1080 + return ERR_PTR(-EINVAL); 1081 + } 1082 + 1083 + begin = roundup(res.result.start, PAGE_SIZE); 1084 + end = rounddown(res.result.start + res.result.size, PAGE_SIZE); 1085 + paddr = begin; 1086 + size = end - begin; 1087 + 1088 + if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) { 1089 + pr_err("too small shared memory area\n"); 1090 + return ERR_PTR(-EINVAL); 1091 + } 1092 + 1093 + va = memremap(paddr, size, MEMREMAP_WB); 1094 + if (!va) { 1095 + pr_err("shared memory ioremap failed\n"); 1096 + return ERR_PTR(-EINVAL); 1097 + } 1098 + vaddr = (unsigned long)va; 1099 + 1100 + rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz, 1101 + 3 /* 8 bytes aligned */); 1102 + if (IS_ERR(rc)) 1103 + goto err_memunmap; 1104 + priv_mgr = rc; 1105 + 1106 + vaddr += sz; 1107 + paddr += sz; 1108 + size -= sz; 1109 + 1110 + rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT); 1111 + if (IS_ERR(rc)) 1112 + goto err_free_priv_mgr; 1113 + dmabuf_mgr = rc; 1114 + 1115 + rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); 1116 + if (IS_ERR(rc)) 1117 + goto err_free_dmabuf_mgr; 1118 + 1119 + *memremaped_shm = va; 1120 + 1121 + return rc; 1122 + 1123 + err_free_dmabuf_mgr: 1124 + tee_shm_pool_mgr_destroy(dmabuf_mgr); 1125 + err_free_priv_mgr: 1126 + tee_shm_pool_mgr_destroy(priv_mgr); 1127 + err_memunmap: 1128 + memunmap(va); 1129 + return rc; 1130 + } 1131 + 1132 + /* Simple wrapper functions to be able to use a function pointer */ 1133 + static void optee_smccc_smc(unsigned long a0, unsigned long a1, 1134 + unsigned long a2, unsigned long a3, 1135 + unsigned long a4, unsigned long a5, 1136 + unsigned long a6, unsigned long a7, 1137 + struct arm_smccc_res *res) 1138 + { 1139 + arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res); 1140 + } 1141 + 1142 + static void optee_smccc_hvc(unsigned long a0, unsigned long a1, 1143 + unsigned long a2, unsigned long a3, 1144 + unsigned long a4, unsigned long a5, 1145 + unsigned long a6, unsigned long a7, 1146 + struct arm_smccc_res *res) 1147 + { 1148 + arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); 1149 + } 1150 + 1151 + static optee_invoke_fn *get_invoke_func(struct device *dev) 1152 + { 1153 + const char *method; 1154 + 1155 + pr_info("probing for conduit method.\n"); 1156 + 1157 + if (device_property_read_string(dev, "method", &method)) { 1158 + pr_warn("missing \"method\" property\n"); 1159 + return ERR_PTR(-ENXIO); 1160 + } 1161 + 1162 + if (!strcmp("hvc", method)) 1163 + return optee_smccc_hvc; 1164 + else if (!strcmp("smc", method)) 1165 + return optee_smccc_smc; 1166 + 1167 + pr_warn("invalid \"method\" property: %s\n", method); 1168 + return ERR_PTR(-EINVAL); 1169 + } 1170 + 1171 + /* optee_remove - Device Removal Routine 1172 + * @pdev: platform device information struct 1173 + * 1174 + * optee_remove is called by platform subsystem to alert the driver 1175 + * that it should release the device 1176 + */ 1177 + static int optee_smc_remove(struct platform_device *pdev) 1178 + { 1179 + struct optee *optee = platform_get_drvdata(pdev); 1180 + 1181 + /* 1182 + * Ask OP-TEE to free all cached shared memory objects to decrease 1183 + * reference counters and also avoid wild pointers in secure world 1184 + * into the old shared memory range. 1185 + */ 1186 + optee_disable_shm_cache(optee); 1187 + 1188 + optee_remove_common(optee); 1189 + 1190 + if (optee->smc.memremaped_shm) 1191 + memunmap(optee->smc.memremaped_shm); 1192 + 1193 + kfree(optee); 1194 + 1195 + return 0; 1196 + } 1197 + 1198 + /* optee_shutdown - Device Removal Routine 1199 + * @pdev: platform device information struct 1200 + * 1201 + * platform_shutdown is called by the platform subsystem to alert 1202 + * the driver that a shutdown, reboot, or kexec is happening and 1203 + * device must be disabled. 1204 + */ 1205 + static void optee_shutdown(struct platform_device *pdev) 1206 + { 1207 + optee_disable_shm_cache(platform_get_drvdata(pdev)); 1208 + } 1209 + 1210 + static int optee_probe(struct platform_device *pdev) 1211 + { 1212 + optee_invoke_fn *invoke_fn; 1213 + struct tee_shm_pool *pool = ERR_PTR(-EINVAL); 1214 + struct optee *optee = NULL; 1215 + void *memremaped_shm = NULL; 1216 + struct tee_device *teedev; 1217 + u32 sec_caps; 1218 + int rc; 1219 + 1220 + invoke_fn = get_invoke_func(&pdev->dev); 1221 + if (IS_ERR(invoke_fn)) 1222 + return PTR_ERR(invoke_fn); 1223 + 1224 + if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { 1225 + pr_warn("api uid mismatch\n"); 1226 + return -EINVAL; 1227 + } 1228 + 1229 + optee_msg_get_os_revision(invoke_fn); 1230 + 1231 + if (!optee_msg_api_revision_is_compatible(invoke_fn)) { 1232 + pr_warn("api revision mismatch\n"); 1233 + return -EINVAL; 1234 + } 1235 + 1236 + if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { 1237 + pr_warn("capabilities mismatch\n"); 1238 + return -EINVAL; 1239 + } 1240 + 1241 + /* 1242 + * Try to use dynamic shared memory if possible 1243 + */ 1244 + if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) 1245 + pool = optee_config_dyn_shm(); 1246 + 1247 + /* 1248 + * If dynamic shared memory is not available or failed - try static one 1249 + */ 1250 + if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) 1251 + pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); 1252 + 1253 + if (IS_ERR(pool)) 1254 + return PTR_ERR(pool); 1255 + 1256 + optee = kzalloc(sizeof(*optee), GFP_KERNEL); 1257 + if (!optee) { 1258 + rc = -ENOMEM; 1259 + goto err; 1260 + } 1261 + 1262 + optee->ops = &optee_ops; 1263 + optee->smc.invoke_fn = invoke_fn; 1264 + optee->smc.sec_caps = sec_caps; 1265 + 1266 + teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee); 1267 + if (IS_ERR(teedev)) { 1268 + rc = PTR_ERR(teedev); 1269 + goto err; 1270 + } 1271 + optee->teedev = teedev; 1272 + 1273 + teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee); 1274 + if (IS_ERR(teedev)) { 1275 + rc = PTR_ERR(teedev); 1276 + goto err; 1277 + } 1278 + optee->supp_teedev = teedev; 1279 + 1280 + rc = tee_device_register(optee->teedev); 1281 + if (rc) 1282 + goto err; 1283 + 1284 + rc = tee_device_register(optee->supp_teedev); 1285 + if (rc) 1286 + goto err; 1287 + 1288 + mutex_init(&optee->call_queue.mutex); 1289 + INIT_LIST_HEAD(&optee->call_queue.waiters); 1290 + optee_wait_queue_init(&optee->wait_queue); 1291 + optee_supp_init(&optee->supp); 1292 + optee->smc.memremaped_shm = memremaped_shm; 1293 + optee->pool = pool; 1294 + 1295 + /* 1296 + * Ensure that there are no pre-existing shm objects before enabling 1297 + * the shm cache so that there's no chance of receiving an invalid 1298 + * address during shutdown. This could occur, for example, if we're 1299 + * kexec booting from an older kernel that did not properly cleanup the 1300 + * shm cache. 1301 + */ 1302 + optee_disable_unmapped_shm_cache(optee); 1303 + 1304 + optee_enable_shm_cache(optee); 1305 + 1306 + if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) 1307 + pr_info("dynamic shared memory is enabled\n"); 1308 + 1309 + platform_set_drvdata(pdev, optee); 1310 + 1311 + rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); 1312 + if (rc) { 1313 + optee_smc_remove(pdev); 1314 + return rc; 1315 + } 1316 + 1317 + pr_info("initialized driver\n"); 1318 + return 0; 1319 + err: 1320 + if (optee) { 1321 + /* 1322 + * tee_device_unregister() is safe to call even if the 1323 + * devices hasn't been registered with 1324 + * tee_device_register() yet. 1325 + */ 1326 + tee_device_unregister(optee->supp_teedev); 1327 + tee_device_unregister(optee->teedev); 1328 + kfree(optee); 1329 + } 1330 + if (pool) 1331 + tee_shm_pool_free(pool); 1332 + if (memremaped_shm) 1333 + memunmap(memremaped_shm); 1334 + return rc; 1335 + } 1336 + 1337 + static const struct of_device_id optee_dt_match[] = { 1338 + { .compatible = "linaro,optee-tz" }, 1339 + {}, 1340 + }; 1341 + MODULE_DEVICE_TABLE(of, optee_dt_match); 1342 + 1343 + static struct platform_driver optee_driver = { 1344 + .probe = optee_probe, 1345 + .remove = optee_smc_remove, 1346 + .shutdown = optee_shutdown, 1347 + .driver = { 1348 + .name = "optee", 1349 + .of_match_table = optee_dt_match, 1350 + }, 1351 + }; 1352 + 1353 + int optee_smc_abi_register(void) 1354 + { 1355 + return platform_driver_register(&optee_driver); 1356 + } 1357 + 1358 + void optee_smc_abi_unregister(void) 1359 + { 1360 + platform_driver_unregister(&optee_driver); 1361 + }
+6 -1
include/linux/tee_drv.h
··· 197 197 * @num_pages: number of locked pages 198 198 * @dmabuf: dmabuf used to for exporting to user space 199 199 * @flags: defined by TEE_SHM_* in tee_drv.h 200 - * @id: unique id of a shared memory object on this device 200 + * @id: unique id of a shared memory object on this device, shared 201 + * with user space 202 + * @sec_world_id: 203 + * secure world assigned id of this shared memory object, not 204 + * used by all drivers 201 205 * 202 206 * This pool is only supposed to be accessed directly from the TEE 203 207 * subsystem and from drivers that implements their own shm pool manager. ··· 217 213 struct dma_buf *dmabuf; 218 214 u32 flags; 219 215 int id; 216 + u64 sec_world_id; 220 217 }; 221 218 222 219 /**