Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015-2016, Linaro Limited
4 */
5
6#define pr_fmt(fmt) "%s: " fmt, __func__
7
8#include <linux/cdev.h>
9#include <linux/fs.h>
10#include <linux/idr.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/tee_drv.h>
14#include <linux/uaccess.h>
15#include "tee_private.h"
16
17#define TEE_NUM_DEVICES 32
18
19#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
20
21/*
22 * Unprivileged devices in the lower half range and privileged devices in
23 * the upper half range.
24 */
25static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
26static DEFINE_SPINLOCK(driver_lock);
27
28static struct class *tee_class;
29static dev_t tee_devt;
30
31static struct tee_context *teedev_open(struct tee_device *teedev)
32{
33 int rc;
34 struct tee_context *ctx;
35
36 if (!tee_device_get(teedev))
37 return ERR_PTR(-EINVAL);
38
39 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
40 if (!ctx) {
41 rc = -ENOMEM;
42 goto err;
43 }
44
45 kref_init(&ctx->refcount);
46 ctx->teedev = teedev;
47 rc = teedev->desc->ops->open(ctx);
48 if (rc)
49 goto err;
50
51 return ctx;
52err:
53 kfree(ctx);
54 tee_device_put(teedev);
55 return ERR_PTR(rc);
56
57}
58
59void teedev_ctx_get(struct tee_context *ctx)
60{
61 if (ctx->releasing)
62 return;
63
64 kref_get(&ctx->refcount);
65}
66
67static void teedev_ctx_release(struct kref *ref)
68{
69 struct tee_context *ctx = container_of(ref, struct tee_context,
70 refcount);
71 ctx->releasing = true;
72 ctx->teedev->desc->ops->release(ctx);
73 kfree(ctx);
74}
75
76void teedev_ctx_put(struct tee_context *ctx)
77{
78 if (ctx->releasing)
79 return;
80
81 kref_put(&ctx->refcount, teedev_ctx_release);
82}
83
84static void teedev_close_context(struct tee_context *ctx)
85{
86 tee_device_put(ctx->teedev);
87 teedev_ctx_put(ctx);
88}
89
90static int tee_open(struct inode *inode, struct file *filp)
91{
92 struct tee_context *ctx;
93
94 ctx = teedev_open(container_of(inode->i_cdev, struct tee_device, cdev));
95 if (IS_ERR(ctx))
96 return PTR_ERR(ctx);
97
98 /*
99 * Default user-space behaviour is to wait for tee-supplicant
100 * if not present for any requests in this context.
101 */
102 ctx->supp_nowait = false;
103 filp->private_data = ctx;
104 return 0;
105}
106
107static int tee_release(struct inode *inode, struct file *filp)
108{
109 teedev_close_context(filp->private_data);
110 return 0;
111}
112
113static int tee_ioctl_version(struct tee_context *ctx,
114 struct tee_ioctl_version_data __user *uvers)
115{
116 struct tee_ioctl_version_data vers;
117
118 ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
119
120 if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED)
121 vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED;
122
123 if (copy_to_user(uvers, &vers, sizeof(vers)))
124 return -EFAULT;
125
126 return 0;
127}
128
129static int tee_ioctl_shm_alloc(struct tee_context *ctx,
130 struct tee_ioctl_shm_alloc_data __user *udata)
131{
132 long ret;
133 struct tee_ioctl_shm_alloc_data data;
134 struct tee_shm *shm;
135
136 if (copy_from_user(&data, udata, sizeof(data)))
137 return -EFAULT;
138
139 /* Currently no input flags are supported */
140 if (data.flags)
141 return -EINVAL;
142
143 shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
144 if (IS_ERR(shm))
145 return PTR_ERR(shm);
146
147 data.id = shm->id;
148 data.flags = shm->flags;
149 data.size = shm->size;
150
151 if (copy_to_user(udata, &data, sizeof(data)))
152 ret = -EFAULT;
153 else
154 ret = tee_shm_get_fd(shm);
155
156 /*
157 * When user space closes the file descriptor the shared memory
158 * should be freed or if tee_shm_get_fd() failed then it will
159 * be freed immediately.
160 */
161 tee_shm_put(shm);
162 return ret;
163}
164
165static int
166tee_ioctl_shm_register(struct tee_context *ctx,
167 struct tee_ioctl_shm_register_data __user *udata)
168{
169 long ret;
170 struct tee_ioctl_shm_register_data data;
171 struct tee_shm *shm;
172
173 if (copy_from_user(&data, udata, sizeof(data)))
174 return -EFAULT;
175
176 /* Currently no input flags are supported */
177 if (data.flags)
178 return -EINVAL;
179
180 shm = tee_shm_register(ctx, data.addr, data.length,
181 TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
182 if (IS_ERR(shm))
183 return PTR_ERR(shm);
184
185 data.id = shm->id;
186 data.flags = shm->flags;
187 data.length = shm->size;
188
189 if (copy_to_user(udata, &data, sizeof(data)))
190 ret = -EFAULT;
191 else
192 ret = tee_shm_get_fd(shm);
193 /*
194 * When user space closes the file descriptor the shared memory
195 * should be freed or if tee_shm_get_fd() failed then it will
196 * be freed immediately.
197 */
198 tee_shm_put(shm);
199 return ret;
200}
201
202static int params_from_user(struct tee_context *ctx, struct tee_param *params,
203 size_t num_params,
204 struct tee_ioctl_param __user *uparams)
205{
206 size_t n;
207
208 for (n = 0; n < num_params; n++) {
209 struct tee_shm *shm;
210 struct tee_ioctl_param ip;
211
212 if (copy_from_user(&ip, uparams + n, sizeof(ip)))
213 return -EFAULT;
214
215 /* All unused attribute bits has to be zero */
216 if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
217 return -EINVAL;
218
219 params[n].attr = ip.attr;
220 switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
221 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
222 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
223 break;
224 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
225 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
226 params[n].u.value.a = ip.a;
227 params[n].u.value.b = ip.b;
228 params[n].u.value.c = ip.c;
229 break;
230 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
231 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
232 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
233 /*
234 * If we fail to get a pointer to a shared memory
235 * object (and increase the ref count) from an
236 * identifier we return an error. All pointers that
237 * has been added in params have an increased ref
238 * count. It's the callers responibility to do
239 * tee_shm_put() on all resolved pointers.
240 */
241 shm = tee_shm_get_from_id(ctx, ip.c);
242 if (IS_ERR(shm))
243 return PTR_ERR(shm);
244
245 /*
246 * Ensure offset + size does not overflow offset
247 * and does not overflow the size of the referred
248 * shared memory object.
249 */
250 if ((ip.a + ip.b) < ip.a ||
251 (ip.a + ip.b) > shm->size) {
252 tee_shm_put(shm);
253 return -EINVAL;
254 }
255
256 params[n].u.memref.shm_offs = ip.a;
257 params[n].u.memref.size = ip.b;
258 params[n].u.memref.shm = shm;
259 break;
260 default:
261 /* Unknown attribute */
262 return -EINVAL;
263 }
264 }
265 return 0;
266}
267
268static int params_to_user(struct tee_ioctl_param __user *uparams,
269 size_t num_params, struct tee_param *params)
270{
271 size_t n;
272
273 for (n = 0; n < num_params; n++) {
274 struct tee_ioctl_param __user *up = uparams + n;
275 struct tee_param *p = params + n;
276
277 switch (p->attr) {
278 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
279 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
280 if (put_user(p->u.value.a, &up->a) ||
281 put_user(p->u.value.b, &up->b) ||
282 put_user(p->u.value.c, &up->c))
283 return -EFAULT;
284 break;
285 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
286 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
287 if (put_user((u64)p->u.memref.size, &up->b))
288 return -EFAULT;
289 default:
290 break;
291 }
292 }
293 return 0;
294}
295
296static int tee_ioctl_open_session(struct tee_context *ctx,
297 struct tee_ioctl_buf_data __user *ubuf)
298{
299 int rc;
300 size_t n;
301 struct tee_ioctl_buf_data buf;
302 struct tee_ioctl_open_session_arg __user *uarg;
303 struct tee_ioctl_open_session_arg arg;
304 struct tee_ioctl_param __user *uparams = NULL;
305 struct tee_param *params = NULL;
306 bool have_session = false;
307
308 if (!ctx->teedev->desc->ops->open_session)
309 return -EINVAL;
310
311 if (copy_from_user(&buf, ubuf, sizeof(buf)))
312 return -EFAULT;
313
314 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
315 buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
316 return -EINVAL;
317
318 uarg = u64_to_user_ptr(buf.buf_ptr);
319 if (copy_from_user(&arg, uarg, sizeof(arg)))
320 return -EFAULT;
321
322 if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
323 return -EINVAL;
324
325 if (arg.num_params) {
326 params = kcalloc(arg.num_params, sizeof(struct tee_param),
327 GFP_KERNEL);
328 if (!params)
329 return -ENOMEM;
330 uparams = uarg->params;
331 rc = params_from_user(ctx, params, arg.num_params, uparams);
332 if (rc)
333 goto out;
334 }
335
336 rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
337 if (rc)
338 goto out;
339 have_session = true;
340
341 if (put_user(arg.session, &uarg->session) ||
342 put_user(arg.ret, &uarg->ret) ||
343 put_user(arg.ret_origin, &uarg->ret_origin)) {
344 rc = -EFAULT;
345 goto out;
346 }
347 rc = params_to_user(uparams, arg.num_params, params);
348out:
349 /*
350 * If we've succeeded to open the session but failed to communicate
351 * it back to user space, close the session again to avoid leakage.
352 */
353 if (rc && have_session && ctx->teedev->desc->ops->close_session)
354 ctx->teedev->desc->ops->close_session(ctx, arg.session);
355
356 if (params) {
357 /* Decrease ref count for all valid shared memory pointers */
358 for (n = 0; n < arg.num_params; n++)
359 if (tee_param_is_memref(params + n) &&
360 params[n].u.memref.shm)
361 tee_shm_put(params[n].u.memref.shm);
362 kfree(params);
363 }
364
365 return rc;
366}
367
368static int tee_ioctl_invoke(struct tee_context *ctx,
369 struct tee_ioctl_buf_data __user *ubuf)
370{
371 int rc;
372 size_t n;
373 struct tee_ioctl_buf_data buf;
374 struct tee_ioctl_invoke_arg __user *uarg;
375 struct tee_ioctl_invoke_arg arg;
376 struct tee_ioctl_param __user *uparams = NULL;
377 struct tee_param *params = NULL;
378
379 if (!ctx->teedev->desc->ops->invoke_func)
380 return -EINVAL;
381
382 if (copy_from_user(&buf, ubuf, sizeof(buf)))
383 return -EFAULT;
384
385 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
386 buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
387 return -EINVAL;
388
389 uarg = u64_to_user_ptr(buf.buf_ptr);
390 if (copy_from_user(&arg, uarg, sizeof(arg)))
391 return -EFAULT;
392
393 if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
394 return -EINVAL;
395
396 if (arg.num_params) {
397 params = kcalloc(arg.num_params, sizeof(struct tee_param),
398 GFP_KERNEL);
399 if (!params)
400 return -ENOMEM;
401 uparams = uarg->params;
402 rc = params_from_user(ctx, params, arg.num_params, uparams);
403 if (rc)
404 goto out;
405 }
406
407 rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
408 if (rc)
409 goto out;
410
411 if (put_user(arg.ret, &uarg->ret) ||
412 put_user(arg.ret_origin, &uarg->ret_origin)) {
413 rc = -EFAULT;
414 goto out;
415 }
416 rc = params_to_user(uparams, arg.num_params, params);
417out:
418 if (params) {
419 /* Decrease ref count for all valid shared memory pointers */
420 for (n = 0; n < arg.num_params; n++)
421 if (tee_param_is_memref(params + n) &&
422 params[n].u.memref.shm)
423 tee_shm_put(params[n].u.memref.shm);
424 kfree(params);
425 }
426 return rc;
427}
428
429static int tee_ioctl_cancel(struct tee_context *ctx,
430 struct tee_ioctl_cancel_arg __user *uarg)
431{
432 struct tee_ioctl_cancel_arg arg;
433
434 if (!ctx->teedev->desc->ops->cancel_req)
435 return -EINVAL;
436
437 if (copy_from_user(&arg, uarg, sizeof(arg)))
438 return -EFAULT;
439
440 return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
441 arg.session);
442}
443
444static int
445tee_ioctl_close_session(struct tee_context *ctx,
446 struct tee_ioctl_close_session_arg __user *uarg)
447{
448 struct tee_ioctl_close_session_arg arg;
449
450 if (!ctx->teedev->desc->ops->close_session)
451 return -EINVAL;
452
453 if (copy_from_user(&arg, uarg, sizeof(arg)))
454 return -EFAULT;
455
456 return ctx->teedev->desc->ops->close_session(ctx, arg.session);
457}
458
459static int params_to_supp(struct tee_context *ctx,
460 struct tee_ioctl_param __user *uparams,
461 size_t num_params, struct tee_param *params)
462{
463 size_t n;
464
465 for (n = 0; n < num_params; n++) {
466 struct tee_ioctl_param ip;
467 struct tee_param *p = params + n;
468
469 ip.attr = p->attr;
470 switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
471 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
472 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
473 ip.a = p->u.value.a;
474 ip.b = p->u.value.b;
475 ip.c = p->u.value.c;
476 break;
477 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
478 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
479 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
480 ip.b = p->u.memref.size;
481 if (!p->u.memref.shm) {
482 ip.a = 0;
483 ip.c = (u64)-1; /* invalid shm id */
484 break;
485 }
486 ip.a = p->u.memref.shm_offs;
487 ip.c = p->u.memref.shm->id;
488 break;
489 default:
490 ip.a = 0;
491 ip.b = 0;
492 ip.c = 0;
493 break;
494 }
495
496 if (copy_to_user(uparams + n, &ip, sizeof(ip)))
497 return -EFAULT;
498 }
499
500 return 0;
501}
502
503static int tee_ioctl_supp_recv(struct tee_context *ctx,
504 struct tee_ioctl_buf_data __user *ubuf)
505{
506 int rc;
507 struct tee_ioctl_buf_data buf;
508 struct tee_iocl_supp_recv_arg __user *uarg;
509 struct tee_param *params;
510 u32 num_params;
511 u32 func;
512
513 if (!ctx->teedev->desc->ops->supp_recv)
514 return -EINVAL;
515
516 if (copy_from_user(&buf, ubuf, sizeof(buf)))
517 return -EFAULT;
518
519 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
520 buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
521 return -EINVAL;
522
523 uarg = u64_to_user_ptr(buf.buf_ptr);
524 if (get_user(num_params, &uarg->num_params))
525 return -EFAULT;
526
527 if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
528 return -EINVAL;
529
530 params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
531 if (!params)
532 return -ENOMEM;
533
534 rc = params_from_user(ctx, params, num_params, uarg->params);
535 if (rc)
536 goto out;
537
538 rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
539 if (rc)
540 goto out;
541
542 if (put_user(func, &uarg->func) ||
543 put_user(num_params, &uarg->num_params)) {
544 rc = -EFAULT;
545 goto out;
546 }
547
548 rc = params_to_supp(ctx, uarg->params, num_params, params);
549out:
550 kfree(params);
551 return rc;
552}
553
554static int params_from_supp(struct tee_param *params, size_t num_params,
555 struct tee_ioctl_param __user *uparams)
556{
557 size_t n;
558
559 for (n = 0; n < num_params; n++) {
560 struct tee_param *p = params + n;
561 struct tee_ioctl_param ip;
562
563 if (copy_from_user(&ip, uparams + n, sizeof(ip)))
564 return -EFAULT;
565
566 /* All unused attribute bits has to be zero */
567 if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
568 return -EINVAL;
569
570 p->attr = ip.attr;
571 switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
572 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
573 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
574 /* Only out and in/out values can be updated */
575 p->u.value.a = ip.a;
576 p->u.value.b = ip.b;
577 p->u.value.c = ip.c;
578 break;
579 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
580 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
581 /*
582 * Only the size of the memref can be updated.
583 * Since we don't have access to the original
584 * parameters here, only store the supplied size.
585 * The driver will copy the updated size into the
586 * original parameters.
587 */
588 p->u.memref.shm = NULL;
589 p->u.memref.shm_offs = 0;
590 p->u.memref.size = ip.b;
591 break;
592 default:
593 memset(&p->u, 0, sizeof(p->u));
594 break;
595 }
596 }
597 return 0;
598}
599
600static int tee_ioctl_supp_send(struct tee_context *ctx,
601 struct tee_ioctl_buf_data __user *ubuf)
602{
603 long rc;
604 struct tee_ioctl_buf_data buf;
605 struct tee_iocl_supp_send_arg __user *uarg;
606 struct tee_param *params;
607 u32 num_params;
608 u32 ret;
609
610 /* Not valid for this driver */
611 if (!ctx->teedev->desc->ops->supp_send)
612 return -EINVAL;
613
614 if (copy_from_user(&buf, ubuf, sizeof(buf)))
615 return -EFAULT;
616
617 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
618 buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
619 return -EINVAL;
620
621 uarg = u64_to_user_ptr(buf.buf_ptr);
622 if (get_user(ret, &uarg->ret) ||
623 get_user(num_params, &uarg->num_params))
624 return -EFAULT;
625
626 if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
627 return -EINVAL;
628
629 params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
630 if (!params)
631 return -ENOMEM;
632
633 rc = params_from_supp(params, num_params, uarg->params);
634 if (rc)
635 goto out;
636
637 rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
638out:
639 kfree(params);
640 return rc;
641}
642
643static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
644{
645 struct tee_context *ctx = filp->private_data;
646 void __user *uarg = (void __user *)arg;
647
648 switch (cmd) {
649 case TEE_IOC_VERSION:
650 return tee_ioctl_version(ctx, uarg);
651 case TEE_IOC_SHM_ALLOC:
652 return tee_ioctl_shm_alloc(ctx, uarg);
653 case TEE_IOC_SHM_REGISTER:
654 return tee_ioctl_shm_register(ctx, uarg);
655 case TEE_IOC_OPEN_SESSION:
656 return tee_ioctl_open_session(ctx, uarg);
657 case TEE_IOC_INVOKE:
658 return tee_ioctl_invoke(ctx, uarg);
659 case TEE_IOC_CANCEL:
660 return tee_ioctl_cancel(ctx, uarg);
661 case TEE_IOC_CLOSE_SESSION:
662 return tee_ioctl_close_session(ctx, uarg);
663 case TEE_IOC_SUPPL_RECV:
664 return tee_ioctl_supp_recv(ctx, uarg);
665 case TEE_IOC_SUPPL_SEND:
666 return tee_ioctl_supp_send(ctx, uarg);
667 default:
668 return -EINVAL;
669 }
670}
671
672static const struct file_operations tee_fops = {
673 .owner = THIS_MODULE,
674 .open = tee_open,
675 .release = tee_release,
676 .unlocked_ioctl = tee_ioctl,
677 .compat_ioctl = compat_ptr_ioctl,
678};
679
680static void tee_release_device(struct device *dev)
681{
682 struct tee_device *teedev = container_of(dev, struct tee_device, dev);
683
684 spin_lock(&driver_lock);
685 clear_bit(teedev->id, dev_mask);
686 spin_unlock(&driver_lock);
687 mutex_destroy(&teedev->mutex);
688 idr_destroy(&teedev->idr);
689 kfree(teedev);
690}
691
692/**
693 * tee_device_alloc() - Allocate a new struct tee_device instance
694 * @teedesc: Descriptor for this driver
695 * @dev: Parent device for this device
696 * @pool: Shared memory pool, NULL if not used
697 * @driver_data: Private driver data for this device
698 *
699 * Allocates a new struct tee_device instance. The device is
700 * removed by tee_device_unregister().
701 *
702 * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
703 */
704struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
705 struct device *dev,
706 struct tee_shm_pool *pool,
707 void *driver_data)
708{
709 struct tee_device *teedev;
710 void *ret;
711 int rc, max_id;
712 int offs = 0;
713
714 if (!teedesc || !teedesc->name || !teedesc->ops ||
715 !teedesc->ops->get_version || !teedesc->ops->open ||
716 !teedesc->ops->release || !pool)
717 return ERR_PTR(-EINVAL);
718
719 teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
720 if (!teedev) {
721 ret = ERR_PTR(-ENOMEM);
722 goto err;
723 }
724
725 max_id = TEE_NUM_DEVICES / 2;
726
727 if (teedesc->flags & TEE_DESC_PRIVILEGED) {
728 offs = TEE_NUM_DEVICES / 2;
729 max_id = TEE_NUM_DEVICES;
730 }
731
732 spin_lock(&driver_lock);
733 teedev->id = find_next_zero_bit(dev_mask, max_id, offs);
734 if (teedev->id < max_id)
735 set_bit(teedev->id, dev_mask);
736 spin_unlock(&driver_lock);
737
738 if (teedev->id >= max_id) {
739 ret = ERR_PTR(-ENOMEM);
740 goto err;
741 }
742
743 snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
744 teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
745 teedev->id - offs);
746
747 teedev->dev.class = tee_class;
748 teedev->dev.release = tee_release_device;
749 teedev->dev.parent = dev;
750
751 teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
752
753 rc = dev_set_name(&teedev->dev, "%s", teedev->name);
754 if (rc) {
755 ret = ERR_PTR(rc);
756 goto err_devt;
757 }
758
759 cdev_init(&teedev->cdev, &tee_fops);
760 teedev->cdev.owner = teedesc->owner;
761 teedev->cdev.kobj.parent = &teedev->dev.kobj;
762
763 dev_set_drvdata(&teedev->dev, driver_data);
764 device_initialize(&teedev->dev);
765
766 /* 1 as tee_device_unregister() does one final tee_device_put() */
767 teedev->num_users = 1;
768 init_completion(&teedev->c_no_users);
769 mutex_init(&teedev->mutex);
770 idr_init(&teedev->idr);
771
772 teedev->desc = teedesc;
773 teedev->pool = pool;
774
775 return teedev;
776err_devt:
777 unregister_chrdev_region(teedev->dev.devt, 1);
778err:
779 pr_err("could not register %s driver\n",
780 teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
781 if (teedev && teedev->id < TEE_NUM_DEVICES) {
782 spin_lock(&driver_lock);
783 clear_bit(teedev->id, dev_mask);
784 spin_unlock(&driver_lock);
785 }
786 kfree(teedev);
787 return ret;
788}
789EXPORT_SYMBOL_GPL(tee_device_alloc);
790
791static ssize_t implementation_id_show(struct device *dev,
792 struct device_attribute *attr, char *buf)
793{
794 struct tee_device *teedev = container_of(dev, struct tee_device, dev);
795 struct tee_ioctl_version_data vers;
796
797 teedev->desc->ops->get_version(teedev, &vers);
798 return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
799}
800static DEVICE_ATTR_RO(implementation_id);
801
802static struct attribute *tee_dev_attrs[] = {
803 &dev_attr_implementation_id.attr,
804 NULL
805};
806
807static const struct attribute_group tee_dev_group = {
808 .attrs = tee_dev_attrs,
809};
810
811/**
812 * tee_device_register() - Registers a TEE device
813 * @teedev: Device to register
814 *
815 * tee_device_unregister() need to be called to remove the @teedev if
816 * this function fails.
817 *
818 * @returns < 0 on failure
819 */
820int tee_device_register(struct tee_device *teedev)
821{
822 int rc;
823
824 if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
825 dev_err(&teedev->dev, "attempt to register twice\n");
826 return -EINVAL;
827 }
828
829 rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1);
830 if (rc) {
831 dev_err(&teedev->dev,
832 "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
833 teedev->name, MAJOR(teedev->dev.devt),
834 MINOR(teedev->dev.devt), rc);
835 return rc;
836 }
837
838 rc = device_add(&teedev->dev);
839 if (rc) {
840 dev_err(&teedev->dev,
841 "unable to device_add() %s, major %d, minor %d, err=%d\n",
842 teedev->name, MAJOR(teedev->dev.devt),
843 MINOR(teedev->dev.devt), rc);
844 goto err_device_add;
845 }
846
847 rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group);
848 if (rc) {
849 dev_err(&teedev->dev,
850 "failed to create sysfs attributes, err=%d\n", rc);
851 goto err_sysfs_create_group;
852 }
853
854 teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
855 return 0;
856
857err_sysfs_create_group:
858 device_del(&teedev->dev);
859err_device_add:
860 cdev_del(&teedev->cdev);
861 return rc;
862}
863EXPORT_SYMBOL_GPL(tee_device_register);
864
865void tee_device_put(struct tee_device *teedev)
866{
867 mutex_lock(&teedev->mutex);
868 /* Shouldn't put in this state */
869 if (!WARN_ON(!teedev->desc)) {
870 teedev->num_users--;
871 if (!teedev->num_users) {
872 teedev->desc = NULL;
873 complete(&teedev->c_no_users);
874 }
875 }
876 mutex_unlock(&teedev->mutex);
877}
878
879bool tee_device_get(struct tee_device *teedev)
880{
881 mutex_lock(&teedev->mutex);
882 if (!teedev->desc) {
883 mutex_unlock(&teedev->mutex);
884 return false;
885 }
886 teedev->num_users++;
887 mutex_unlock(&teedev->mutex);
888 return true;
889}
890
891/**
892 * tee_device_unregister() - Removes a TEE device
893 * @teedev: Device to unregister
894 *
895 * This function should be called to remove the @teedev even if
896 * tee_device_register() hasn't been called yet. Does nothing if
897 * @teedev is NULL.
898 */
899void tee_device_unregister(struct tee_device *teedev)
900{
901 if (!teedev)
902 return;
903
904 if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
905 sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group);
906 cdev_del(&teedev->cdev);
907 device_del(&teedev->dev);
908 }
909
910 tee_device_put(teedev);
911 wait_for_completion(&teedev->c_no_users);
912
913 /*
914 * No need to take a mutex any longer now since teedev->desc was
915 * set to NULL before teedev->c_no_users was completed.
916 */
917
918 teedev->pool = NULL;
919
920 put_device(&teedev->dev);
921}
922EXPORT_SYMBOL_GPL(tee_device_unregister);
923
924/**
925 * tee_get_drvdata() - Return driver_data pointer
926 * @teedev: Device containing the driver_data pointer
927 * @returns the driver_data pointer supplied to tee_register().
928 */
929void *tee_get_drvdata(struct tee_device *teedev)
930{
931 return dev_get_drvdata(&teedev->dev);
932}
933EXPORT_SYMBOL_GPL(tee_get_drvdata);
934
935struct match_dev_data {
936 struct tee_ioctl_version_data *vers;
937 const void *data;
938 int (*match)(struct tee_ioctl_version_data *, const void *);
939};
940
941static int match_dev(struct device *dev, const void *data)
942{
943 const struct match_dev_data *match_data = data;
944 struct tee_device *teedev = container_of(dev, struct tee_device, dev);
945
946 teedev->desc->ops->get_version(teedev, match_data->vers);
947 return match_data->match(match_data->vers, match_data->data);
948}
949
950struct tee_context *
951tee_client_open_context(struct tee_context *start,
952 int (*match)(struct tee_ioctl_version_data *,
953 const void *),
954 const void *data, struct tee_ioctl_version_data *vers)
955{
956 struct device *dev = NULL;
957 struct device *put_dev = NULL;
958 struct tee_context *ctx = NULL;
959 struct tee_ioctl_version_data v;
960 struct match_dev_data match_data = { vers ? vers : &v, data, match };
961
962 if (start)
963 dev = &start->teedev->dev;
964
965 do {
966 dev = class_find_device(tee_class, dev, &match_data, match_dev);
967 if (!dev) {
968 ctx = ERR_PTR(-ENOENT);
969 break;
970 }
971
972 put_device(put_dev);
973 put_dev = dev;
974
975 ctx = teedev_open(container_of(dev, struct tee_device, dev));
976 } while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM);
977
978 put_device(put_dev);
979 /*
980 * Default behaviour for in kernel client is to not wait for
981 * tee-supplicant if not present for any requests in this context.
982 * Also this flag could be configured again before call to
983 * tee_client_open_session() if any in kernel client requires
984 * different behaviour.
985 */
986 if (!IS_ERR(ctx))
987 ctx->supp_nowait = true;
988
989 return ctx;
990}
991EXPORT_SYMBOL_GPL(tee_client_open_context);
992
993void tee_client_close_context(struct tee_context *ctx)
994{
995 teedev_close_context(ctx);
996}
997EXPORT_SYMBOL_GPL(tee_client_close_context);
998
999void tee_client_get_version(struct tee_context *ctx,
1000 struct tee_ioctl_version_data *vers)
1001{
1002 ctx->teedev->desc->ops->get_version(ctx->teedev, vers);
1003}
1004EXPORT_SYMBOL_GPL(tee_client_get_version);
1005
1006int tee_client_open_session(struct tee_context *ctx,
1007 struct tee_ioctl_open_session_arg *arg,
1008 struct tee_param *param)
1009{
1010 if (!ctx->teedev->desc->ops->open_session)
1011 return -EINVAL;
1012 return ctx->teedev->desc->ops->open_session(ctx, arg, param);
1013}
1014EXPORT_SYMBOL_GPL(tee_client_open_session);
1015
1016int tee_client_close_session(struct tee_context *ctx, u32 session)
1017{
1018 if (!ctx->teedev->desc->ops->close_session)
1019 return -EINVAL;
1020 return ctx->teedev->desc->ops->close_session(ctx, session);
1021}
1022EXPORT_SYMBOL_GPL(tee_client_close_session);
1023
1024int tee_client_invoke_func(struct tee_context *ctx,
1025 struct tee_ioctl_invoke_arg *arg,
1026 struct tee_param *param)
1027{
1028 if (!ctx->teedev->desc->ops->invoke_func)
1029 return -EINVAL;
1030 return ctx->teedev->desc->ops->invoke_func(ctx, arg, param);
1031}
1032EXPORT_SYMBOL_GPL(tee_client_invoke_func);
1033
1034int tee_client_cancel_req(struct tee_context *ctx,
1035 struct tee_ioctl_cancel_arg *arg)
1036{
1037 if (!ctx->teedev->desc->ops->cancel_req)
1038 return -EINVAL;
1039 return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id,
1040 arg->session);
1041}
1042
1043static int tee_client_device_match(struct device *dev,
1044 struct device_driver *drv)
1045{
1046 const struct tee_client_device_id *id_table;
1047 struct tee_client_device *tee_device;
1048
1049 id_table = to_tee_client_driver(drv)->id_table;
1050 tee_device = to_tee_client_device(dev);
1051
1052 while (!uuid_is_null(&id_table->uuid)) {
1053 if (uuid_equal(&tee_device->id.uuid, &id_table->uuid))
1054 return 1;
1055 id_table++;
1056 }
1057
1058 return 0;
1059}
1060
1061static int tee_client_device_uevent(struct device *dev,
1062 struct kobj_uevent_env *env)
1063{
1064 uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
1065
1066 return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
1067}
1068
1069struct bus_type tee_bus_type = {
1070 .name = "tee",
1071 .match = tee_client_device_match,
1072 .uevent = tee_client_device_uevent,
1073};
1074EXPORT_SYMBOL_GPL(tee_bus_type);
1075
1076static int __init tee_init(void)
1077{
1078 int rc;
1079
1080 tee_class = class_create(THIS_MODULE, "tee");
1081 if (IS_ERR(tee_class)) {
1082 pr_err("couldn't create class\n");
1083 return PTR_ERR(tee_class);
1084 }
1085
1086 rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
1087 if (rc) {
1088 pr_err("failed to allocate char dev region\n");
1089 goto out_unreg_class;
1090 }
1091
1092 rc = bus_register(&tee_bus_type);
1093 if (rc) {
1094 pr_err("failed to register tee bus\n");
1095 goto out_unreg_chrdev;
1096 }
1097
1098 return 0;
1099
1100out_unreg_chrdev:
1101 unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
1102out_unreg_class:
1103 class_destroy(tee_class);
1104 tee_class = NULL;
1105
1106 return rc;
1107}
1108
1109static void __exit tee_exit(void)
1110{
1111 bus_unregister(&tee_bus_type);
1112 unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
1113 class_destroy(tee_class);
1114 tee_class = NULL;
1115}
1116
1117subsys_initcall(tee_init);
1118module_exit(tee_exit);
1119
1120MODULE_AUTHOR("Linaro");
1121MODULE_DESCRIPTION("TEE Driver");
1122MODULE_VERSION("1.0");
1123MODULE_LICENSE("GPL v2");