Merge tag 'fixes-for-v5.16' of https://git.linaro.org/people/jens.wiklander/linux-tee into arm/fixes

TEE and OP-TEE fixes for v5.16

- Fixes a race when a tee_shm reaches reference count 0 and is about to
be teared down
- Fixes an incorrect page free bug in an error path of the OP-TEE shared
memory pool handling
- Suppresses a false positive kmemleak report when allocating driver
private shared memory buffers for OP-TEE

* tag 'fixes-for-v5.16' of https://git.linaro.org/people/jens.wiklander/linux-tee:
optee: Suppress false positive kmemleak report in optee_handle_rpc()
tee: optee: Fix incorrect page free bug
tee: handle lookup of shm with reference count 0

Link: https://lore.kernel.org/r/20211216150745.GA3347954@jade
Signed-off-by: Arnd Bergmann <arnd@arndb.de>

+72 -114
+2 -4
drivers/tee/optee/core.c
··· 48 goto err; 49 } 50 51 - for (i = 0; i < nr_pages; i++) { 52 - pages[i] = page; 53 - page++; 54 - } 55 56 shm->flags |= TEE_SHM_REGISTER; 57 rc = shm_register(shm->ctx, shm, pages, nr_pages,
··· 48 goto err; 49 } 50 51 + for (i = 0; i < nr_pages; i++) 52 + pages[i] = page + i; 53 54 shm->flags |= TEE_SHM_REGISTER; 55 rc = shm_register(shm->ctx, shm, pages, nr_pages,
+2
drivers/tee/optee/smc_abi.c
··· 23 #include "optee_private.h" 24 #include "optee_smc.h" 25 #include "optee_rpc_cmd.h" 26 #define CREATE_TRACE_POINTS 27 #include "optee_trace.h" 28 ··· 784 param->a4 = 0; 785 param->a5 = 0; 786 } 787 break; 788 case OPTEE_SMC_RPC_FUNC_FREE: 789 shm = reg_pair_to_ptr(param->a1, param->a2);
··· 23 #include "optee_private.h" 24 #include "optee_smc.h" 25 #include "optee_rpc_cmd.h" 26 + #include <linux/kmemleak.h> 27 #define CREATE_TRACE_POINTS 28 #include "optee_trace.h" 29 ··· 783 param->a4 = 0; 784 param->a5 = 0; 785 } 786 + kmemleak_not_leak(shm); 787 break; 788 case OPTEE_SMC_RPC_FUNC_FREE: 789 shm = reg_pair_to_ptr(param->a1, param->a2);
+66 -108
drivers/tee/tee_shm.c
··· 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 - * Copyright (c) 2015-2016, Linaro Limited 4 */ 5 #include <linux/device.h> 6 - #include <linux/dma-buf.h> 7 - #include <linux/fdtable.h> 8 #include <linux/idr.h> 9 #include <linux/sched.h> 10 #include <linux/slab.h> 11 #include <linux/tee_drv.h> 12 #include <linux/uio.h> 13 - #include <linux/module.h> 14 #include "tee_private.h" 15 - 16 - MODULE_IMPORT_NS(DMA_BUF); 17 18 static void release_registered_pages(struct tee_shm *shm) 19 { ··· 28 } 29 } 30 31 - static void tee_shm_release(struct tee_shm *shm) 32 { 33 - struct tee_device *teedev = shm->ctx->teedev; 34 - 35 - if (shm->flags & TEE_SHM_DMA_BUF) { 36 - mutex_lock(&teedev->mutex); 37 - idr_remove(&teedev->idr, shm->id); 38 - mutex_unlock(&teedev->mutex); 39 - } 40 - 41 if (shm->flags & TEE_SHM_POOL) { 42 struct tee_shm_pool_mgr *poolm; 43 ··· 55 56 tee_device_put(teedev); 57 } 58 - 59 - static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment 60 - *attach, enum dma_data_direction dir) 61 - { 62 - return NULL; 63 - } 64 - 65 - static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach, 66 - struct sg_table *table, 67 - enum dma_data_direction dir) 68 - { 69 - } 70 - 71 - static void tee_shm_op_release(struct dma_buf *dmabuf) 72 - { 73 - struct tee_shm *shm = dmabuf->priv; 74 - 75 - tee_shm_release(shm); 76 - } 77 - 78 - static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 79 - { 80 - struct tee_shm *shm = dmabuf->priv; 81 - size_t size = vma->vm_end - vma->vm_start; 82 - 83 - /* Refuse sharing shared memory provided by application */ 84 - if (shm->flags & TEE_SHM_USER_MAPPED) 85 - return -EINVAL; 86 - 87 - return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, 88 - size, vma->vm_page_prot); 89 - } 90 - 91 - static const struct dma_buf_ops tee_shm_dma_buf_ops = { 92 - .map_dma_buf = tee_shm_op_map_dma_buf, 93 - .unmap_dma_buf = tee_shm_op_unmap_dma_buf, 94 - .release = tee_shm_op_release, 95 - .mmap = tee_shm_op_mmap, 96 - }; 97 98 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) 99 { ··· 90 goto err_dev_put; 91 } 92 93 shm->flags = flags | TEE_SHM_POOL; 94 shm->ctx = ctx; 95 if (flags & TEE_SHM_DMA_BUF) ··· 104 goto err_kfree; 105 } 106 107 - 108 if (flags & TEE_SHM_DMA_BUF) { 109 - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 110 - 111 mutex_lock(&teedev->mutex); 112 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); 113 mutex_unlock(&teedev->mutex); ··· 112 ret = ERR_PTR(shm->id); 113 goto err_pool_free; 114 } 115 - 116 - exp_info.ops = &tee_shm_dma_buf_ops; 117 - exp_info.size = shm->size; 118 - exp_info.flags = O_RDWR; 119 - exp_info.priv = shm; 120 - 121 - shm->dmabuf = dma_buf_export(&exp_info); 122 - if (IS_ERR(shm->dmabuf)) { 123 - ret = ERR_CAST(shm->dmabuf); 124 - goto err_rem; 125 - } 126 } 127 128 teedev_ctx_get(ctx); 129 130 return shm; 131 - err_rem: 132 - if (flags & TEE_SHM_DMA_BUF) { 133 - mutex_lock(&teedev->mutex); 134 - idr_remove(&teedev->idr, shm->id); 135 - mutex_unlock(&teedev->mutex); 136 - } 137 err_pool_free: 138 poolm->ops->free(poolm, shm); 139 err_kfree: ··· 177 goto err; 178 } 179 180 shm->flags = flags | TEE_SHM_REGISTER; 181 shm->ctx = ctx; 182 shm->id = -1; ··· 238 goto err; 239 } 240 241 - if (flags & TEE_SHM_DMA_BUF) { 242 - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 243 - 244 - exp_info.ops = &tee_shm_dma_buf_ops; 245 - exp_info.size = shm->size; 246 - exp_info.flags = O_RDWR; 247 - exp_info.priv = shm; 248 - 249 - shm->dmabuf = dma_buf_export(&exp_info); 250 - if (IS_ERR(shm->dmabuf)) { 251 - ret = ERR_CAST(shm->dmabuf); 252 - teedev->desc->ops->shm_unregister(ctx, shm); 253 - goto err; 254 - } 255 - } 256 - 257 return shm; 258 err: 259 if (shm) { ··· 255 } 256 EXPORT_SYMBOL_GPL(tee_shm_register); 257 258 /** 259 * tee_shm_get_fd() - Increase reference count and return file descriptor 260 * @shm: Shared memory handle ··· 296 if (!(shm->flags & TEE_SHM_DMA_BUF)) 297 return -EINVAL; 298 299 - get_dma_buf(shm->dmabuf); 300 - fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); 301 if (fd < 0) 302 - dma_buf_put(shm->dmabuf); 303 return fd; 304 } 305 ··· 310 */ 311 void tee_shm_free(struct tee_shm *shm) 312 { 313 - /* 314 - * dma_buf_put() decreases the dmabuf reference counter and will 315 - * call tee_shm_release() when the last reference is gone. 316 - * 317 - * In the case of driver private memory we call tee_shm_release 318 - * directly instead as it doesn't have a reference counter. 319 - */ 320 - if (shm->flags & TEE_SHM_DMA_BUF) 321 - dma_buf_put(shm->dmabuf); 322 - else 323 - tee_shm_release(shm); 324 } 325 EXPORT_SYMBOL_GPL(tee_shm_free); 326 ··· 417 teedev = ctx->teedev; 418 mutex_lock(&teedev->mutex); 419 shm = idr_find(&teedev->idr, id); 420 if (!shm || shm->ctx != ctx) 421 shm = ERR_PTR(-EINVAL); 422 - else if (shm->flags & TEE_SHM_DMA_BUF) 423 - get_dma_buf(shm->dmabuf); 424 mutex_unlock(&teedev->mutex); 425 return shm; 426 } ··· 437 */ 438 void tee_shm_put(struct tee_shm *shm) 439 { 440 - if (shm->flags & TEE_SHM_DMA_BUF) 441 - dma_buf_put(shm->dmabuf); 442 } 443 EXPORT_SYMBOL_GPL(tee_shm_put);
··· 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 + * Copyright (c) 2015-2017, 2019-2021 Linaro Limited 4 */ 5 + #include <linux/anon_inodes.h> 6 #include <linux/device.h> 7 #include <linux/idr.h> 8 + #include <linux/mm.h> 9 #include <linux/sched.h> 10 #include <linux/slab.h> 11 #include <linux/tee_drv.h> 12 #include <linux/uio.h> 13 #include "tee_private.h" 14 15 static void release_registered_pages(struct tee_shm *shm) 16 { ··· 31 } 32 } 33 34 + static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) 35 { 36 if (shm->flags & TEE_SHM_POOL) { 37 struct tee_shm_pool_mgr *poolm; 38 ··· 66 67 tee_device_put(teedev); 68 } 69 70 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) 71 { ··· 140 goto err_dev_put; 141 } 142 143 + refcount_set(&shm->refcount, 1); 144 shm->flags = flags | TEE_SHM_POOL; 145 shm->ctx = ctx; 146 if (flags & TEE_SHM_DMA_BUF) ··· 153 goto err_kfree; 154 } 155 156 if (flags & TEE_SHM_DMA_BUF) { 157 mutex_lock(&teedev->mutex); 158 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); 159 mutex_unlock(&teedev->mutex); ··· 164 ret = ERR_PTR(shm->id); 165 goto err_pool_free; 166 } 167 } 168 169 teedev_ctx_get(ctx); 170 171 return shm; 172 err_pool_free: 173 poolm->ops->free(poolm, shm); 174 err_kfree: ··· 246 goto err; 247 } 248 249 + refcount_set(&shm->refcount, 1); 250 shm->flags = flags | TEE_SHM_REGISTER; 251 shm->ctx = ctx; 252 shm->id = -1; ··· 306 goto err; 307 } 308 309 return shm; 310 err: 311 if (shm) { ··· 339 } 340 EXPORT_SYMBOL_GPL(tee_shm_register); 341 342 + static int tee_shm_fop_release(struct inode *inode, struct file *filp) 343 + { 344 + tee_shm_put(filp->private_data); 345 + return 0; 346 + } 347 + 348 + static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma) 349 + { 350 + struct tee_shm *shm = filp->private_data; 351 + size_t size = vma->vm_end - vma->vm_start; 352 + 353 + /* Refuse sharing shared memory provided by application */ 354 + if (shm->flags & TEE_SHM_USER_MAPPED) 355 + return -EINVAL; 356 + 357 + /* check for overflowing the buffer's size */ 358 + if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) 359 + return -EINVAL; 360 + 361 + return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, 362 + size, vma->vm_page_prot); 363 + } 364 + 365 + static const struct file_operations tee_shm_fops = { 366 + .owner = THIS_MODULE, 367 + .release = tee_shm_fop_release, 368 + .mmap = tee_shm_fop_mmap, 369 + }; 370 + 371 /** 372 * tee_shm_get_fd() - Increase reference count and return file descriptor 373 * @shm: Shared memory handle ··· 351 if (!(shm->flags & TEE_SHM_DMA_BUF)) 352 return -EINVAL; 353 354 + /* matched by tee_shm_put() in tee_shm_op_release() */ 355 + refcount_inc(&shm->refcount); 356 + fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR); 357 if (fd < 0) 358 + tee_shm_put(shm); 359 return fd; 360 } 361 ··· 364 */ 365 void tee_shm_free(struct tee_shm *shm) 366 { 367 + tee_shm_put(shm); 368 } 369 EXPORT_SYMBOL_GPL(tee_shm_free); 370 ··· 481 teedev = ctx->teedev; 482 mutex_lock(&teedev->mutex); 483 shm = idr_find(&teedev->idr, id); 484 + /* 485 + * If the tee_shm was found in the IDR it must have a refcount 486 + * larger than 0 due to the guarantee in tee_shm_put() below. So 487 + * it's safe to use refcount_inc(). 488 + */ 489 if (!shm || shm->ctx != ctx) 490 shm = ERR_PTR(-EINVAL); 491 + else 492 + refcount_inc(&shm->refcount); 493 mutex_unlock(&teedev->mutex); 494 return shm; 495 } ··· 496 */ 497 void tee_shm_put(struct tee_shm *shm) 498 { 499 + struct tee_device *teedev = shm->ctx->teedev; 500 + bool do_release = false; 501 + 502 + mutex_lock(&teedev->mutex); 503 + if (refcount_dec_and_test(&shm->refcount)) { 504 + /* 505 + * refcount has reached 0, we must now remove it from the 506 + * IDR before releasing the mutex. This will guarantee that 507 + * the refcount_inc() in tee_shm_get_from_id() never starts 508 + * from 0. 509 + */ 510 + if (shm->flags & TEE_SHM_DMA_BUF) 511 + idr_remove(&teedev->idr, shm->id); 512 + do_release = true; 513 + } 514 + mutex_unlock(&teedev->mutex); 515 + 516 + if (do_release) 517 + tee_shm_release(teedev, shm); 518 } 519 EXPORT_SYMBOL_GPL(tee_shm_put);
+2 -2
include/linux/tee_drv.h
··· 195 * @offset: offset of buffer in user space 196 * @pages: locked pages from userspace 197 * @num_pages: number of locked pages 198 - * @dmabuf: dmabuf used to for exporting to user space 199 * @flags: defined by TEE_SHM_* in tee_drv.h 200 * @id: unique id of a shared memory object on this device, shared 201 * with user space ··· 214 unsigned int offset; 215 struct page **pages; 216 size_t num_pages; 217 - struct dma_buf *dmabuf; 218 u32 flags; 219 int id; 220 u64 sec_world_id;
··· 195 * @offset: offset of buffer in user space 196 * @pages: locked pages from userspace 197 * @num_pages: number of locked pages 198 + * @refcount: reference counter 199 * @flags: defined by TEE_SHM_* in tee_drv.h 200 * @id: unique id of a shared memory object on this device, shared 201 * with user space ··· 214 unsigned int offset; 215 struct page **pages; 216 size_t num_pages; 217 + refcount_t refcount; 218 u32 flags; 219 int id; 220 u64 sec_world_id;