Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

misc: fastrpc: Add support for dmabuf exporter

User process can involve dealing with big buffer sizes, and also passing
buffers from one compute context bank to other compute context bank for
complex dsp algorithms.

This patch adds support to fastrpc to make it a proper dmabuf exporter
to avoid making copies of buffers.

Co-developed-by: Thierry Escande <thierry.escande@linaro.org>
Signed-off-by: Thierry Escande <thierry.escande@linaro.org>
Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Srinivas Kandagatla and committed by
Greg Kroah-Hartman
6cffd795 d73f71c7

+192
+184
drivers/misc/fastrpc.c
··· 106 106 107 107 struct fastrpc_buf { 108 108 struct fastrpc_user *fl; 109 + struct dma_buf *dmabuf; 109 110 struct device *dev; 110 111 void *virt; 111 112 u64 phys; 112 113 u64 size; 114 + /* Lock for dma buf attachments */ 115 + struct mutex lock; 116 + struct list_head attachments; 117 + }; 118 + 119 + struct fastrpc_dma_buf_attachment { 120 + struct device *dev; 121 + struct sg_table sgt; 122 + struct list_head node; 113 123 }; 114 124 115 125 struct fastrpc_map { ··· 256 246 if (!buf) 257 247 return -ENOMEM; 258 248 249 + INIT_LIST_HEAD(&buf->attachments); 250 + mutex_init(&buf->lock); 251 + 259 252 buf->fl = fl; 260 253 buf->virt = NULL; 261 254 buf->phys = 0; ··· 372 359 373 360 return ERR_PTR(ret); 374 361 } 362 + 363 + static struct sg_table * 364 + fastrpc_map_dma_buf(struct dma_buf_attachment *attachment, 365 + enum dma_data_direction dir) 366 + { 367 + struct fastrpc_dma_buf_attachment *a = attachment->priv; 368 + struct sg_table *table; 369 + 370 + table = &a->sgt; 371 + 372 + if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir)) 373 + return ERR_PTR(-ENOMEM); 374 + 375 + return table; 376 + } 377 + 378 + static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach, 379 + struct sg_table *table, 380 + enum dma_data_direction dir) 381 + { 382 + dma_unmap_sg(attach->dev, table->sgl, table->nents, dir); 383 + } 384 + 385 + static void fastrpc_release(struct dma_buf *dmabuf) 386 + { 387 + struct fastrpc_buf *buffer = dmabuf->priv; 388 + 389 + fastrpc_buf_free(buffer); 390 + } 391 + 392 + static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf, 393 + struct dma_buf_attachment *attachment) 394 + { 395 + struct fastrpc_dma_buf_attachment *a; 396 + struct fastrpc_buf *buffer = dmabuf->priv; 397 + int ret; 398 + 399 + a = kzalloc(sizeof(*a), GFP_KERNEL); 400 + if (!a) 401 + return -ENOMEM; 402 + 403 + ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt, 404 + FASTRPC_PHYS(buffer->phys), buffer->size); 405 + if (ret < 0) { 406 + dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); 407 + return -EINVAL; 408 + } 409 + 410 + a->dev = attachment->dev; 411 + INIT_LIST_HEAD(&a->node); 412 + attachment->priv = a; 413 + 414 + mutex_lock(&buffer->lock); 415 + list_add(&a->node, &buffer->attachments); 416 + mutex_unlock(&buffer->lock); 417 + 418 + return 0; 419 + } 420 + 421 + static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf, 422 + struct dma_buf_attachment *attachment) 423 + { 424 + struct fastrpc_dma_buf_attachment *a = attachment->priv; 425 + struct fastrpc_buf *buffer = dmabuf->priv; 426 + 427 + mutex_lock(&buffer->lock); 428 + list_del(&a->node); 429 + mutex_unlock(&buffer->lock); 430 + kfree(a); 431 + } 432 + 433 + static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum) 434 + { 435 + struct fastrpc_buf *buf = dmabuf->priv; 436 + 437 + return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL; 438 + } 439 + 440 + static void *fastrpc_vmap(struct dma_buf *dmabuf) 441 + { 442 + struct fastrpc_buf *buf = dmabuf->priv; 443 + 444 + return buf->virt; 445 + } 446 + 447 + static int fastrpc_mmap(struct dma_buf *dmabuf, 448 + struct vm_area_struct *vma) 449 + { 450 + struct fastrpc_buf *buf = dmabuf->priv; 451 + size_t size = vma->vm_end - vma->vm_start; 452 + 453 + return dma_mmap_coherent(buf->dev, vma, buf->virt, 454 + FASTRPC_PHYS(buf->phys), size); 455 + } 456 + 457 + static const struct dma_buf_ops fastrpc_dma_buf_ops = { 458 + .attach = fastrpc_dma_buf_attach, 459 + .detach = fastrpc_dma_buf_detatch, 460 + .map_dma_buf = fastrpc_map_dma_buf, 461 + .unmap_dma_buf = fastrpc_unmap_dma_buf, 462 + .mmap = fastrpc_mmap, 463 + .map = fastrpc_kmap, 464 + .vmap = fastrpc_vmap, 465 + .release = fastrpc_release, 466 + }; 375 467 376 468 static int fastrpc_map_create(struct fastrpc_user *fl, int fd, 377 469 u64 len, struct fastrpc_map **ppmap) ··· 1024 906 return 0; 1025 907 } 1026 908 909 + static int fastrpc_dmabuf_free(struct fastrpc_user *fl, char __user *argp) 910 + { 911 + struct dma_buf *buf; 912 + int info; 913 + 914 + if (copy_from_user(&info, argp, sizeof(info))) 915 + return -EFAULT; 916 + 917 + buf = dma_buf_get(info); 918 + if (IS_ERR_OR_NULL(buf)) 919 + return -EINVAL; 920 + /* 921 + * one for the last get and other for the ALLOC_DMA_BUFF ioctl 922 + */ 923 + dma_buf_put(buf); 924 + dma_buf_put(buf); 925 + 926 + return 0; 927 + } 928 + 929 + static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) 930 + { 931 + struct fastrpc_alloc_dma_buf bp; 932 + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 933 + struct fastrpc_buf *buf = NULL; 934 + int err; 935 + 936 + if (copy_from_user(&bp, argp, sizeof(bp))) 937 + return -EFAULT; 938 + 939 + err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf); 940 + if (err) 941 + return err; 942 + exp_info.ops = &fastrpc_dma_buf_ops; 943 + exp_info.size = bp.size; 944 + exp_info.flags = O_RDWR; 945 + exp_info.priv = buf; 946 + buf->dmabuf = dma_buf_export(&exp_info); 947 + if (IS_ERR(buf->dmabuf)) { 948 + err = PTR_ERR(buf->dmabuf); 949 + fastrpc_buf_free(buf); 950 + return err; 951 + } 952 + 953 + bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE); 954 + if (bp.fd < 0) { 955 + dma_buf_put(buf->dmabuf); 956 + return -EINVAL; 957 + } 958 + 959 + if (copy_to_user(argp, &bp, sizeof(bp))) { 960 + dma_buf_put(buf->dmabuf); 961 + return -EFAULT; 962 + } 963 + 964 + get_dma_buf(buf->dmabuf); 965 + 966 + return 0; 967 + } 968 + 1027 969 static int fastrpc_init_attach(struct fastrpc_user *fl) 1028 970 { 1029 971 struct fastrpc_invoke_args args[1]; ··· 1147 969 break; 1148 970 case FASTRPC_IOCTL_INIT_CREATE: 1149 971 err = fastrpc_init_create_process(fl, argp); 972 + break; 973 + case FASTRPC_IOCTL_FREE_DMA_BUFF: 974 + err = fastrpc_dmabuf_free(fl, argp); 975 + break; 976 + case FASTRPC_IOCTL_ALLOC_DMA_BUFF: 977 + err = fastrpc_dmabuf_alloc(fl, argp); 1150 978 break; 1151 979 default: 1152 980 err = -ENOTTY;
+8
include/uapi/misc/fastrpc.h
··· 5 5 6 6 #include <linux/types.h> 7 7 8 + #define FASTRPC_IOCTL_ALLOC_DMA_BUFF _IOWR('R', 1, struct fastrpc_alloc_dma_buf) 9 + #define FASTRPC_IOCTL_FREE_DMA_BUFF _IOWR('R', 2, __u32) 8 10 #define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke) 9 11 #define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4) 10 12 #define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create) ··· 30 28 __u32 attrs; 31 29 __u32 siglen; 32 30 __u64 file; /* pointer to elf file */ 31 + }; 32 + 33 + struct fastrpc_alloc_dma_buf { 34 + __s32 fd; /* fd */ 35 + __u32 flags; /* flags to map with */ 36 + __u64 size; /* size */ 33 37 }; 34 38 35 39 #endif /* __QCOM_FASTRPC_H__ */