Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2025, Linaro Limited
4 */
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6
7#include <linux/errno.h>
8#include <linux/genalloc.h>
9#include <linux/slab.h>
10#include <linux/string.h>
11#include <linux/tee_core.h>
12#include <linux/types.h>
13#include "optee_private.h"
14
15struct optee_protmem_dyn_pool {
16 struct tee_protmem_pool pool;
17 struct gen_pool *gen_pool;
18 struct optee *optee;
19 size_t page_count;
20 u32 *mem_attrs;
21 u_int mem_attr_count;
22 refcount_t refcount;
23 u32 use_case;
24 struct tee_shm *protmem;
25 /* Protects when initializing and tearing down this struct */
26 struct mutex mutex;
27};
28
29static struct optee_protmem_dyn_pool *
30to_protmem_dyn_pool(struct tee_protmem_pool *pool)
31{
32 return container_of(pool, struct optee_protmem_dyn_pool, pool);
33}
34
35static int init_dyn_protmem(struct optee_protmem_dyn_pool *rp)
36{
37 int rc;
38
39 rp->protmem = tee_shm_alloc_dma_mem(rp->optee->ctx, rp->page_count);
40 if (IS_ERR(rp->protmem)) {
41 rc = PTR_ERR(rp->protmem);
42 goto err_null_protmem;
43 }
44
45 /*
46 * TODO unmap the memory range since the physical memory will
47 * become inaccesible after the lend_protmem() call.
48 *
49 * If the platform supports a hypervisor at EL2, it will unmap the
50 * intermediate physical memory for us and stop cache pre-fetch of
51 * the memory.
52 */
53 rc = rp->optee->ops->lend_protmem(rp->optee, rp->protmem,
54 rp->mem_attrs,
55 rp->mem_attr_count, rp->use_case);
56 if (rc)
57 goto err_put_shm;
58 rp->protmem->flags |= TEE_SHM_DYNAMIC;
59
60 rp->gen_pool = gen_pool_create(PAGE_SHIFT, -1);
61 if (!rp->gen_pool) {
62 rc = -ENOMEM;
63 goto err_reclaim;
64 }
65
66 rc = gen_pool_add(rp->gen_pool, rp->protmem->paddr,
67 rp->protmem->size, -1);
68 if (rc)
69 goto err_free_pool;
70
71 refcount_set(&rp->refcount, 1);
72 return 0;
73
74err_free_pool:
75 gen_pool_destroy(rp->gen_pool);
76 rp->gen_pool = NULL;
77err_reclaim:
78 rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem);
79err_put_shm:
80 tee_shm_put(rp->protmem);
81err_null_protmem:
82 rp->protmem = NULL;
83 return rc;
84}
85
86static int get_dyn_protmem(struct optee_protmem_dyn_pool *rp)
87{
88 int rc = 0;
89
90 if (!refcount_inc_not_zero(&rp->refcount)) {
91 mutex_lock(&rp->mutex);
92 if (rp->gen_pool) {
93 /*
94 * Another thread has already initialized the pool
95 * before us, or the pool was just about to be torn
96 * down. Either way we only need to increase the
97 * refcount and we're done.
98 */
99 refcount_inc(&rp->refcount);
100 } else {
101 rc = init_dyn_protmem(rp);
102 }
103 mutex_unlock(&rp->mutex);
104 }
105
106 return rc;
107}
108
109static void release_dyn_protmem(struct optee_protmem_dyn_pool *rp)
110{
111 gen_pool_destroy(rp->gen_pool);
112 rp->gen_pool = NULL;
113
114 rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem);
115 rp->protmem->flags &= ~TEE_SHM_DYNAMIC;
116
117 WARN(refcount_read(&rp->protmem->refcount) != 1, "Unexpected refcount");
118 tee_shm_put(rp->protmem);
119 rp->protmem = NULL;
120}
121
122static void put_dyn_protmem(struct optee_protmem_dyn_pool *rp)
123{
124 if (refcount_dec_and_test(&rp->refcount)) {
125 mutex_lock(&rp->mutex);
126 if (rp->gen_pool)
127 release_dyn_protmem(rp);
128 mutex_unlock(&rp->mutex);
129 }
130}
131
132static int protmem_pool_op_dyn_alloc(struct tee_protmem_pool *pool,
133 struct sg_table *sgt, size_t size,
134 size_t *offs)
135{
136 struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
137 size_t sz = ALIGN(size, PAGE_SIZE);
138 phys_addr_t pa;
139 int rc;
140
141 rc = get_dyn_protmem(rp);
142 if (rc)
143 return rc;
144
145 pa = gen_pool_alloc(rp->gen_pool, sz);
146 if (!pa) {
147 rc = -ENOMEM;
148 goto err_put;
149 }
150
151 rc = sg_alloc_table(sgt, 1, GFP_KERNEL);
152 if (rc)
153 goto err_free;
154
155 sg_set_page(sgt->sgl, phys_to_page(pa), size, 0);
156 *offs = pa - rp->protmem->paddr;
157
158 return 0;
159err_free:
160 gen_pool_free(rp->gen_pool, pa, size);
161err_put:
162 put_dyn_protmem(rp);
163
164 return rc;
165}
166
167static void protmem_pool_op_dyn_free(struct tee_protmem_pool *pool,
168 struct sg_table *sgt)
169{
170 struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
171 struct scatterlist *sg;
172 int i;
173
174 for_each_sgtable_sg(sgt, sg, i)
175 gen_pool_free(rp->gen_pool, sg_phys(sg), sg->length);
176 sg_free_table(sgt);
177 put_dyn_protmem(rp);
178}
179
180static int protmem_pool_op_dyn_update_shm(struct tee_protmem_pool *pool,
181 struct sg_table *sgt, size_t offs,
182 struct tee_shm *shm,
183 struct tee_shm **parent_shm)
184{
185 struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
186
187 *parent_shm = rp->protmem;
188
189 return 0;
190}
191
192static void pool_op_dyn_destroy_pool(struct tee_protmem_pool *pool)
193{
194 struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
195
196 mutex_destroy(&rp->mutex);
197 kfree(rp);
198}
199
200static struct tee_protmem_pool_ops protmem_pool_ops_dyn = {
201 .alloc = protmem_pool_op_dyn_alloc,
202 .free = protmem_pool_op_dyn_free,
203 .update_shm = protmem_pool_op_dyn_update_shm,
204 .destroy_pool = pool_op_dyn_destroy_pool,
205};
206
207static int get_protmem_config(struct optee *optee, u32 use_case,
208 size_t *min_size, u_int *pa_width,
209 u32 *mem_attrs, u_int *ma_count)
210{
211 struct tee_param params[2] = {
212 [0] = {
213 .attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT,
214 .u.value.a = use_case,
215 },
216 [1] = {
217 .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT,
218 },
219 };
220 struct optee_shm_arg_entry *entry;
221 struct tee_shm *shm_param = NULL;
222 struct optee_msg_arg *msg_arg;
223 struct tee_shm *shm;
224 u_int offs;
225 int rc;
226
227 if (mem_attrs && *ma_count) {
228 params[1].u.memref.size = *ma_count * sizeof(*mem_attrs);
229 shm_param = tee_shm_alloc_priv_buf(optee->ctx,
230 params[1].u.memref.size);
231 if (IS_ERR(shm_param))
232 return PTR_ERR(shm_param);
233 params[1].u.memref.shm = shm_param;
234 }
235
236 msg_arg = optee_get_msg_arg(optee->ctx, ARRAY_SIZE(params), &entry,
237 &shm, &offs);
238 if (IS_ERR(msg_arg)) {
239 rc = PTR_ERR(msg_arg);
240 goto out_free_shm;
241 }
242 msg_arg->cmd = OPTEE_MSG_CMD_GET_PROTMEM_CONFIG;
243
244 rc = optee->ops->to_msg_param(optee, msg_arg->params,
245 ARRAY_SIZE(params), params);
246 if (rc)
247 goto out_free_msg;
248
249 rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
250 if (rc)
251 goto out_free_msg;
252 if (msg_arg->ret && msg_arg->ret != TEEC_ERROR_SHORT_BUFFER) {
253 rc = -EINVAL;
254 goto out_free_msg;
255 }
256
257 rc = optee->ops->from_msg_param(optee, params, ARRAY_SIZE(params),
258 msg_arg->params);
259 if (rc)
260 goto out_free_msg;
261
262 if (!msg_arg->ret && mem_attrs &&
263 *ma_count < params[1].u.memref.size / sizeof(*mem_attrs)) {
264 rc = -EINVAL;
265 goto out_free_msg;
266 }
267
268 *min_size = params[0].u.value.a;
269 *pa_width = params[0].u.value.c;
270 *ma_count = params[1].u.memref.size / sizeof(*mem_attrs);
271
272 if (msg_arg->ret == TEEC_ERROR_SHORT_BUFFER) {
273 rc = -ENOSPC;
274 goto out_free_msg;
275 }
276
277 if (mem_attrs)
278 memcpy(mem_attrs, tee_shm_get_va(shm_param, 0),
279 params[1].u.memref.size);
280
281out_free_msg:
282 optee_free_msg_arg(optee->ctx, entry, offs);
283out_free_shm:
284 if (shm_param)
285 tee_shm_free(shm_param);
286 return rc;
287}
288
289struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee,
290 enum tee_dma_heap_id id)
291{
292 struct optee_protmem_dyn_pool *rp;
293 size_t min_size;
294 u_int pa_width;
295 int rc;
296
297 rp = kzalloc(sizeof(*rp), GFP_KERNEL);
298 if (!rp)
299 return ERR_PTR(-ENOMEM);
300 rp->use_case = id;
301
302 rc = get_protmem_config(optee, id, &min_size, &pa_width, NULL,
303 &rp->mem_attr_count);
304 if (rc) {
305 if (rc != -ENOSPC)
306 goto err;
307 rp->mem_attrs = kcalloc(rp->mem_attr_count,
308 sizeof(*rp->mem_attrs), GFP_KERNEL);
309 if (!rp->mem_attrs) {
310 rc = -ENOMEM;
311 goto err;
312 }
313 rc = get_protmem_config(optee, id, &min_size, &pa_width,
314 rp->mem_attrs, &rp->mem_attr_count);
315 if (rc)
316 goto err_kfree_eps;
317 }
318
319 rc = optee_set_dma_mask(optee, pa_width);
320 if (rc)
321 goto err_kfree_eps;
322
323 rp->pool.ops = &protmem_pool_ops_dyn;
324 rp->optee = optee;
325 rp->page_count = min_size / PAGE_SIZE;
326 mutex_init(&rp->mutex);
327
328 return &rp->pool;
329
330err_kfree_eps:
331 kfree(rp->mem_attrs);
332err:
333 kfree(rp);
334 return ERR_PTR(rc);
335}