Merge tag 'fixes-for-v5.16' of https://git.linaro.org/people/jens.wiklander/linux-tee into arm/fixes

TEE and OP-TEE fixes for v5.16

- Fixes a race when a tee_shm reaches reference count 0 and is about to
be teared down
- Fixes an incorrect page free bug in an error path of the OP-TEE shared
memory pool handling
- Suppresses a false positive kmemleak report when allocating driver
private shared memory buffers for OP-TEE

* tag 'fixes-for-v5.16' of https://git.linaro.org/people/jens.wiklander/linux-tee:
optee: Suppress false positive kmemleak report in optee_handle_rpc()
tee: optee: Fix incorrect page free bug
tee: handle lookup of shm with reference count 0

Link: https://lore.kernel.org/r/20211216150745.GA3347954@jade
Signed-off-by: Arnd Bergmann <arnd@arndb.de>

+72 -114
+2 -4
drivers/tee/optee/core.c
··· 48 48 goto err; 49 49 } 50 50 51 - for (i = 0; i < nr_pages; i++) { 52 - pages[i] = page; 53 - page++; 54 - } 51 + for (i = 0; i < nr_pages; i++) 52 + pages[i] = page + i; 55 53 56 54 shm->flags |= TEE_SHM_REGISTER; 57 55 rc = shm_register(shm->ctx, shm, pages, nr_pages,
+2
drivers/tee/optee/smc_abi.c
··· 23 23 #include "optee_private.h" 24 24 #include "optee_smc.h" 25 25 #include "optee_rpc_cmd.h" 26 + #include <linux/kmemleak.h> 26 27 #define CREATE_TRACE_POINTS 27 28 #include "optee_trace.h" 28 29 ··· 784 783 param->a4 = 0; 785 784 param->a5 = 0; 786 785 } 786 + kmemleak_not_leak(shm); 787 787 break; 788 788 case OPTEE_SMC_RPC_FUNC_FREE: 789 789 shm = reg_pair_to_ptr(param->a1, param->a2);
+66 -108
drivers/tee/tee_shm.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (c) 2015-2016, Linaro Limited 3 + * Copyright (c) 2015-2017, 2019-2021 Linaro Limited 4 4 */ 5 + #include <linux/anon_inodes.h> 5 6 #include <linux/device.h> 6 - #include <linux/dma-buf.h> 7 - #include <linux/fdtable.h> 8 7 #include <linux/idr.h> 8 + #include <linux/mm.h> 9 9 #include <linux/sched.h> 10 10 #include <linux/slab.h> 11 11 #include <linux/tee_drv.h> 12 12 #include <linux/uio.h> 13 - #include <linux/module.h> 14 13 #include "tee_private.h" 15 - 16 - MODULE_IMPORT_NS(DMA_BUF); 17 14 18 15 static void release_registered_pages(struct tee_shm *shm) 19 16 { ··· 28 31 } 29 32 } 30 33 31 - static void tee_shm_release(struct tee_shm *shm) 34 + static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) 32 35 { 33 - struct tee_device *teedev = shm->ctx->teedev; 34 - 35 - if (shm->flags & TEE_SHM_DMA_BUF) { 36 - mutex_lock(&teedev->mutex); 37 - idr_remove(&teedev->idr, shm->id); 38 - mutex_unlock(&teedev->mutex); 39 - } 40 - 41 36 if (shm->flags & TEE_SHM_POOL) { 42 37 struct tee_shm_pool_mgr *poolm; 43 38 ··· 55 66 56 67 tee_device_put(teedev); 57 68 } 58 - 59 - static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment 60 - *attach, enum dma_data_direction dir) 61 - { 62 - return NULL; 63 - } 64 - 65 - static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach, 66 - struct sg_table *table, 67 - enum dma_data_direction dir) 68 - { 69 - } 70 - 71 - static void tee_shm_op_release(struct dma_buf *dmabuf) 72 - { 73 - struct tee_shm *shm = dmabuf->priv; 74 - 75 - tee_shm_release(shm); 76 - } 77 - 78 - static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 79 - { 80 - struct tee_shm *shm = dmabuf->priv; 81 - size_t size = vma->vm_end - vma->vm_start; 82 - 83 - /* Refuse sharing shared memory provided by application */ 84 - if (shm->flags & TEE_SHM_USER_MAPPED) 85 - return -EINVAL; 86 - 87 - return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, 88 - size, vma->vm_page_prot); 89 - } 90 - 91 - static const struct dma_buf_ops tee_shm_dma_buf_ops = { 92 - .map_dma_buf = tee_shm_op_map_dma_buf, 93 - .unmap_dma_buf = tee_shm_op_unmap_dma_buf, 94 - .release = tee_shm_op_release, 95 - .mmap = tee_shm_op_mmap, 96 - }; 97 69 98 70 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) 99 71 { ··· 90 140 goto err_dev_put; 91 141 } 92 142 143 + refcount_set(&shm->refcount, 1); 93 144 shm->flags = flags | TEE_SHM_POOL; 94 145 shm->ctx = ctx; 95 146 if (flags & TEE_SHM_DMA_BUF) ··· 104 153 goto err_kfree; 105 154 } 106 155 107 - 108 156 if (flags & TEE_SHM_DMA_BUF) { 109 - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 110 - 111 157 mutex_lock(&teedev->mutex); 112 158 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); 113 159 mutex_unlock(&teedev->mutex); ··· 112 164 ret = ERR_PTR(shm->id); 113 165 goto err_pool_free; 114 166 } 115 - 116 - exp_info.ops = &tee_shm_dma_buf_ops; 117 - exp_info.size = shm->size; 118 - exp_info.flags = O_RDWR; 119 - exp_info.priv = shm; 120 - 121 - shm->dmabuf = dma_buf_export(&exp_info); 122 - if (IS_ERR(shm->dmabuf)) { 123 - ret = ERR_CAST(shm->dmabuf); 124 - goto err_rem; 125 - } 126 167 } 127 168 128 169 teedev_ctx_get(ctx); 129 170 130 171 return shm; 131 - err_rem: 132 - if (flags & TEE_SHM_DMA_BUF) { 133 - mutex_lock(&teedev->mutex); 134 - idr_remove(&teedev->idr, shm->id); 135 - mutex_unlock(&teedev->mutex); 136 - } 137 172 err_pool_free: 138 173 poolm->ops->free(poolm, shm); 139 174 err_kfree: ··· 177 246 goto err; 178 247 } 179 248 249 + refcount_set(&shm->refcount, 1); 180 250 shm->flags = flags | TEE_SHM_REGISTER; 181 251 shm->ctx = ctx; 182 252 shm->id = -1; ··· 238 306 goto err; 239 307 } 240 308 241 - if (flags & TEE_SHM_DMA_BUF) { 242 - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 243 - 244 - exp_info.ops = &tee_shm_dma_buf_ops; 245 - exp_info.size = shm->size; 246 - exp_info.flags = O_RDWR; 247 - exp_info.priv = shm; 248 - 249 - shm->dmabuf = dma_buf_export(&exp_info); 250 - if (IS_ERR(shm->dmabuf)) { 251 - ret = ERR_CAST(shm->dmabuf); 252 - teedev->desc->ops->shm_unregister(ctx, shm); 253 - goto err; 254 - } 255 - } 256 - 257 309 return shm; 258 310 err: 259 311 if (shm) { ··· 255 339 } 256 340 EXPORT_SYMBOL_GPL(tee_shm_register); 257 341 342 + static int tee_shm_fop_release(struct inode *inode, struct file *filp) 343 + { 344 + tee_shm_put(filp->private_data); 345 + return 0; 346 + } 347 + 348 + static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma) 349 + { 350 + struct tee_shm *shm = filp->private_data; 351 + size_t size = vma->vm_end - vma->vm_start; 352 + 353 + /* Refuse sharing shared memory provided by application */ 354 + if (shm->flags & TEE_SHM_USER_MAPPED) 355 + return -EINVAL; 356 + 357 + /* check for overflowing the buffer's size */ 358 + if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) 359 + return -EINVAL; 360 + 361 + return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, 362 + size, vma->vm_page_prot); 363 + } 364 + 365 + static const struct file_operations tee_shm_fops = { 366 + .owner = THIS_MODULE, 367 + .release = tee_shm_fop_release, 368 + .mmap = tee_shm_fop_mmap, 369 + }; 370 + 258 371 /** 259 372 * tee_shm_get_fd() - Increase reference count and return file descriptor 260 373 * @shm: Shared memory handle ··· 296 351 if (!(shm->flags & TEE_SHM_DMA_BUF)) 297 352 return -EINVAL; 298 353 299 - get_dma_buf(shm->dmabuf); 300 - fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); 354 + /* matched by tee_shm_put() in tee_shm_op_release() */ 355 + refcount_inc(&shm->refcount); 356 + fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR); 301 357 if (fd < 0) 302 - dma_buf_put(shm->dmabuf); 358 + tee_shm_put(shm); 303 359 return fd; 304 360 } 305 361 ··· 310 364 */ 311 365 void tee_shm_free(struct tee_shm *shm) 312 366 { 313 - /* 314 - * dma_buf_put() decreases the dmabuf reference counter and will 315 - * call tee_shm_release() when the last reference is gone. 316 - * 317 - * In the case of driver private memory we call tee_shm_release 318 - * directly instead as it doesn't have a reference counter. 319 - */ 320 - if (shm->flags & TEE_SHM_DMA_BUF) 321 - dma_buf_put(shm->dmabuf); 322 - else 323 - tee_shm_release(shm); 367 + tee_shm_put(shm); 324 368 } 325 369 EXPORT_SYMBOL_GPL(tee_shm_free); 326 370 ··· 417 481 teedev = ctx->teedev; 418 482 mutex_lock(&teedev->mutex); 419 483 shm = idr_find(&teedev->idr, id); 484 + /* 485 + * If the tee_shm was found in the IDR it must have a refcount 486 + * larger than 0 due to the guarantee in tee_shm_put() below. So 487 + * it's safe to use refcount_inc(). 488 + */ 420 489 if (!shm || shm->ctx != ctx) 421 490 shm = ERR_PTR(-EINVAL); 422 - else if (shm->flags & TEE_SHM_DMA_BUF) 423 - get_dma_buf(shm->dmabuf); 491 + else 492 + refcount_inc(&shm->refcount); 424 493 mutex_unlock(&teedev->mutex); 425 494 return shm; 426 495 } ··· 437 496 */ 438 497 void tee_shm_put(struct tee_shm *shm) 439 498 { 440 - if (shm->flags & TEE_SHM_DMA_BUF) 441 - dma_buf_put(shm->dmabuf); 499 + struct tee_device *teedev = shm->ctx->teedev; 500 + bool do_release = false; 501 + 502 + mutex_lock(&teedev->mutex); 503 + if (refcount_dec_and_test(&shm->refcount)) { 504 + /* 505 + * refcount has reached 0, we must now remove it from the 506 + * IDR before releasing the mutex. This will guarantee that 507 + * the refcount_inc() in tee_shm_get_from_id() never starts 508 + * from 0. 509 + */ 510 + if (shm->flags & TEE_SHM_DMA_BUF) 511 + idr_remove(&teedev->idr, shm->id); 512 + do_release = true; 513 + } 514 + mutex_unlock(&teedev->mutex); 515 + 516 + if (do_release) 517 + tee_shm_release(teedev, shm); 442 518 } 443 519 EXPORT_SYMBOL_GPL(tee_shm_put);
+2 -2
include/linux/tee_drv.h
··· 195 195 * @offset: offset of buffer in user space 196 196 * @pages: locked pages from userspace 197 197 * @num_pages: number of locked pages 198 - * @dmabuf: dmabuf used to for exporting to user space 198 + * @refcount: reference counter 199 199 * @flags: defined by TEE_SHM_* in tee_drv.h 200 200 * @id: unique id of a shared memory object on this device, shared 201 201 * with user space ··· 214 214 unsigned int offset; 215 215 struct page **pages; 216 216 size_t num_pages; 217 - struct dma_buf *dmabuf; 217 + refcount_t refcount; 218 218 u32 flags; 219 219 int id; 220 220 u64 sec_world_id;