Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tee: flexible shared memory pool creation

Makes creation of shm pools more flexible by adding new more primitive
functions to allocate a shm pool. This makes it easier to add driver
specific shm pool management.

Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
Signed-off-by: Volodymyr Babchuk <vlad.babchuk@gmail.com>

+200 -123
+2 -55
drivers/tee/tee_private.h
··· 21 21 #include <linux/mutex.h> 22 22 #include <linux/types.h> 23 23 24 - struct tee_device; 25 - 26 - /** 27 - * struct tee_shm - shared memory object 28 - * @teedev: device used to allocate the object 29 - * @ctx: context using the object, if NULL the context is gone 30 - * @link link element 31 - * @paddr: physical address of the shared memory 32 - * @kaddr: virtual address of the shared memory 33 - * @size: size of shared memory 34 - * @dmabuf: dmabuf used to for exporting to user space 35 - * @flags: defined by TEE_SHM_* in tee_drv.h 36 - * @id: unique id of a shared memory object on this device 37 - */ 38 - struct tee_shm { 39 - struct tee_device *teedev; 40 - struct tee_context *ctx; 41 - struct list_head link; 42 - phys_addr_t paddr; 43 - void *kaddr; 44 - size_t size; 45 - struct dma_buf *dmabuf; 46 - u32 flags; 47 - int id; 48 - }; 49 - 50 - struct tee_shm_pool_mgr; 51 - 52 - /** 53 - * struct tee_shm_pool_mgr_ops - shared memory pool manager operations 54 - * @alloc: called when allocating shared memory 55 - * @free: called when freeing shared memory 56 - */ 57 - struct tee_shm_pool_mgr_ops { 58 - int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm, 59 - size_t size); 60 - void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm); 61 - }; 62 - 63 - /** 64 - * struct tee_shm_pool_mgr - shared memory manager 65 - * @ops: operations 66 - * @private_data: private data for the shared memory manager 67 - */ 68 - struct tee_shm_pool_mgr { 69 - const struct tee_shm_pool_mgr_ops *ops; 70 - void *private_data; 71 - }; 72 - 73 24 /** 74 25 * struct tee_shm_pool - shared memory pool 75 26 * @private_mgr: pool manager for shared memory only between kernel 76 27 * and secure world 77 28 * @dma_buf_mgr: pool manager for shared memory exported to user space 78 - * @destroy: called when destroying the pool 79 - * @private_data: private data for the pool 80 29 */ 81 30 struct tee_shm_pool { 82 - struct tee_shm_pool_mgr private_mgr; 83 - struct tee_shm_pool_mgr dma_buf_mgr; 84 - void (*destroy)(struct tee_shm_pool *pool); 85 - void *private_data; 31 + struct tee_shm_pool_mgr *private_mgr; 32 + struct tee_shm_pool_mgr *dma_buf_mgr; 86 33 }; 87 34 88 35 #define TEE_DEVICE_FLAG_REGISTERED 0x1
+4 -4
drivers/tee/tee_shm.c
··· 32 32 mutex_unlock(&teedev->mutex); 33 33 34 34 if (shm->flags & TEE_SHM_DMA_BUF) 35 - poolm = &teedev->pool->dma_buf_mgr; 35 + poolm = teedev->pool->dma_buf_mgr; 36 36 else 37 - poolm = &teedev->pool->private_mgr; 37 + poolm = teedev->pool->private_mgr; 38 38 39 39 poolm->ops->free(poolm, shm); 40 40 kfree(shm); ··· 139 139 shm->teedev = teedev; 140 140 shm->ctx = ctx; 141 141 if (flags & TEE_SHM_DMA_BUF) 142 - poolm = &teedev->pool->dma_buf_mgr; 142 + poolm = teedev->pool->dma_buf_mgr; 143 143 else 144 - poolm = &teedev->pool->private_mgr; 144 + poolm = teedev->pool->private_mgr; 145 145 146 146 rc = poolm->ops->alloc(poolm, shm, size); 147 147 if (rc) {
+103 -64
drivers/tee/tee_shm_pool.c
··· 44 44 shm->kaddr = NULL; 45 45 } 46 46 47 + static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) 48 + { 49 + gen_pool_destroy(poolm->private_data); 50 + kfree(poolm); 51 + } 52 + 47 53 static const struct tee_shm_pool_mgr_ops pool_ops_generic = { 48 54 .alloc = pool_op_gen_alloc, 49 55 .free = pool_op_gen_free, 56 + .destroy_poolmgr = pool_op_gen_destroy_poolmgr, 50 57 }; 51 - 52 - static void pool_res_mem_destroy(struct tee_shm_pool *pool) 53 - { 54 - gen_pool_destroy(pool->private_mgr.private_data); 55 - gen_pool_destroy(pool->dma_buf_mgr.private_data); 56 - } 57 - 58 - static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr, 59 - struct tee_shm_pool_mem_info *info, 60 - int min_alloc_order) 61 - { 62 - size_t page_mask = PAGE_SIZE - 1; 63 - struct gen_pool *genpool = NULL; 64 - int rc; 65 - 66 - /* 67 - * Start and end must be page aligned 68 - */ 69 - if ((info->vaddr & page_mask) || (info->paddr & page_mask) || 70 - (info->size & page_mask)) 71 - return -EINVAL; 72 - 73 - genpool = gen_pool_create(min_alloc_order, -1); 74 - if (!genpool) 75 - return -ENOMEM; 76 - 77 - gen_pool_set_algo(genpool, gen_pool_best_fit, NULL); 78 - rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size, 79 - -1); 80 - if (rc) { 81 - gen_pool_destroy(genpool); 82 - return rc; 83 - } 84 - 85 - mgr->private_data = genpool; 86 - mgr->ops = &pool_ops_generic; 87 - return 0; 88 - } 89 58 90 59 /** 91 60 * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved ··· 73 104 tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, 74 105 struct tee_shm_pool_mem_info *dmabuf_info) 75 106 { 76 - struct tee_shm_pool *pool = NULL; 77 - int ret; 78 - 79 - pool = kzalloc(sizeof(*pool), GFP_KERNEL); 80 - if (!pool) { 81 - ret = -ENOMEM; 82 - goto err; 83 - } 107 + struct tee_shm_pool_mgr *priv_mgr; 108 + struct tee_shm_pool_mgr *dmabuf_mgr; 109 + void *rc; 84 110 85 111 /* 86 112 * Create the pool for driver private shared memory 87 113 */ 88 - ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info, 89 - 3 /* 8 byte aligned */); 90 - if (ret) 91 - goto err; 114 + rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr, 115 + priv_info->size, 116 + 3 /* 8 byte aligned */); 117 + if (IS_ERR(rc)) 118 + return rc; 119 + priv_mgr = rc; 92 120 93 121 /* 94 122 * Create the pool for dma_buf shared memory 95 123 */ 96 - ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info, 97 - PAGE_SHIFT); 98 - if (ret) 99 - goto err; 124 + rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr, 125 + dmabuf_info->paddr, 126 + dmabuf_info->size, PAGE_SHIFT); 127 + if (IS_ERR(rc)) 128 + goto err_free_priv_mgr; 129 + dmabuf_mgr = rc; 100 130 101 - pool->destroy = pool_res_mem_destroy; 102 - return pool; 103 - err: 104 - if (ret == -ENOMEM) 105 - pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__); 106 - if (pool && pool->private_mgr.private_data) 107 - gen_pool_destroy(pool->private_mgr.private_data); 108 - kfree(pool); 109 - return ERR_PTR(ret); 131 + rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); 132 + if (IS_ERR(rc)) 133 + goto err_free_dmabuf_mgr; 134 + 135 + return rc; 136 + 137 + err_free_dmabuf_mgr: 138 + tee_shm_pool_mgr_destroy(dmabuf_mgr); 139 + err_free_priv_mgr: 140 + tee_shm_pool_mgr_destroy(priv_mgr); 141 + 142 + return rc; 110 143 } 111 144 EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); 145 + 146 + struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, 147 + phys_addr_t paddr, 148 + size_t size, 149 + int min_alloc_order) 150 + { 151 + const size_t page_mask = PAGE_SIZE - 1; 152 + struct tee_shm_pool_mgr *mgr; 153 + int rc; 154 + 155 + /* Start and end must be page aligned */ 156 + if (vaddr & page_mask || paddr & page_mask || size & page_mask) 157 + return ERR_PTR(-EINVAL); 158 + 159 + mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); 160 + if (!mgr) 161 + return ERR_PTR(-ENOMEM); 162 + 163 + mgr->private_data = gen_pool_create(min_alloc_order, -1); 164 + if (!mgr->private_data) { 165 + rc = -ENOMEM; 166 + goto err; 167 + } 168 + 169 + gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL); 170 + rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1); 171 + if (rc) { 172 + gen_pool_destroy(mgr->private_data); 173 + goto err; 174 + } 175 + 176 + mgr->ops = &pool_ops_generic; 177 + 178 + return mgr; 179 + err: 180 + kfree(mgr); 181 + 182 + return ERR_PTR(rc); 183 + } 184 + EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem); 185 + 186 + static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr) 187 + { 188 + return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free && 189 + mgr->ops->destroy_poolmgr; 190 + } 191 + 192 + struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr, 193 + struct tee_shm_pool_mgr *dmabuf_mgr) 194 + { 195 + struct tee_shm_pool *pool; 196 + 197 + if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr)) 198 + return ERR_PTR(-EINVAL); 199 + 200 + pool = kzalloc(sizeof(*pool), GFP_KERNEL); 201 + if (!pool) 202 + return ERR_PTR(-ENOMEM); 203 + 204 + pool->private_mgr = priv_mgr; 205 + pool->dma_buf_mgr = dmabuf_mgr; 206 + 207 + return pool; 208 + } 209 + EXPORT_SYMBOL_GPL(tee_shm_pool_alloc); 112 210 113 211 /** 114 212 * tee_shm_pool_free() - Free a shared memory pool ··· 186 150 */ 187 151 void tee_shm_pool_free(struct tee_shm_pool *pool) 188 152 { 189 - pool->destroy(pool); 153 + if (pool->private_mgr) 154 + tee_shm_pool_mgr_destroy(pool->private_mgr); 155 + if (pool->dma_buf_mgr) 156 + tee_shm_pool_mgr_destroy(pool->dma_buf_mgr); 190 157 kfree(pool); 191 158 } 192 159 EXPORT_SYMBOL_GPL(tee_shm_pool_free);
+91
include/linux/tee_drv.h
··· 150 150 void tee_device_unregister(struct tee_device *teedev); 151 151 152 152 /** 153 + * struct tee_shm - shared memory object 154 + * @teedev: device used to allocate the object 155 + * @ctx: context using the object, if NULL the context is gone 156 + * @link link element 157 + * @paddr: physical address of the shared memory 158 + * @kaddr: virtual address of the shared memory 159 + * @size: size of shared memory 160 + * @offset: offset of buffer in user space 161 + * @pages: locked pages from userspace 162 + * @num_pages: number of locked pages 163 + * @dmabuf: dmabuf used to for exporting to user space 164 + * @flags: defined by TEE_SHM_* in tee_drv.h 165 + * @id: unique id of a shared memory object on this device 166 + * 167 + * This pool is only supposed to be accessed directly from the TEE 168 + * subsystem and from drivers that implements their own shm pool manager. 169 + */ 170 + struct tee_shm { 171 + struct tee_device *teedev; 172 + struct tee_context *ctx; 173 + struct list_head link; 174 + phys_addr_t paddr; 175 + void *kaddr; 176 + size_t size; 177 + unsigned int offset; 178 + struct page **pages; 179 + size_t num_pages; 180 + struct dma_buf *dmabuf; 181 + u32 flags; 182 + int id; 183 + }; 184 + 185 + /** 186 + * struct tee_shm_pool_mgr - shared memory manager 187 + * @ops: operations 188 + * @private_data: private data for the shared memory manager 189 + */ 190 + struct tee_shm_pool_mgr { 191 + const struct tee_shm_pool_mgr_ops *ops; 192 + void *private_data; 193 + }; 194 + 195 + /** 196 + * struct tee_shm_pool_mgr_ops - shared memory pool manager operations 197 + * @alloc: called when allocating shared memory 198 + * @free: called when freeing shared memory 199 + * @destroy_poolmgr: called when destroying the pool manager 200 + */ 201 + struct tee_shm_pool_mgr_ops { 202 + int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm, 203 + size_t size); 204 + void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm); 205 + void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr); 206 + }; 207 + 208 + /** 209 + * tee_shm_pool_alloc() - Create a shared memory pool from shm managers 210 + * @priv_mgr: manager for driver private shared memory allocations 211 + * @dmabuf_mgr: manager for dma-buf shared memory allocations 212 + * 213 + * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied 214 + * in @dmabuf, others will use the range provided by @priv. 215 + * 216 + * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. 217 + */ 218 + struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr, 219 + struct tee_shm_pool_mgr *dmabuf_mgr); 220 + 221 + /* 222 + * tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved 223 + * memory 224 + * @vaddr: Virtual address of start of pool 225 + * @paddr: Physical address of start of pool 226 + * @size: Size in bytes of the pool 227 + * 228 + * @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure. 229 + */ 230 + struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, 231 + phys_addr_t paddr, 232 + size_t size, 233 + int min_alloc_order); 234 + 235 + /** 236 + * tee_shm_pool_mgr_destroy() - Free a shared memory manager 237 + */ 238 + static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm) 239 + { 240 + poolm->ops->destroy_poolmgr(poolm); 241 + } 242 + 243 + /** 153 244 * struct tee_shm_pool_mem_info - holds information needed to create a shared 154 245 * memory pool 155 246 * @vaddr: Virtual address of start of pool