Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

misc: fastrpc: add mmap/unmap support

Support the allocation/deallocation of buffers mapped to the DSP.

When the memory mapped to the DSP at process creation is not enough,
the fastrpc library can extend it at runtime. This avoids having to do
large preallocations by default.

Signed-off-by: Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
Reviewed-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Link: https://lore.kernel.org/r/20191009144123.24583-2-srinivas.kandagatla@linaro.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Jorge Ramirez-Ortiz and committed by
Greg Kroah-Hartman
2419e55e 689e3557

+196
+181
drivers/misc/fastrpc.c
··· 34 34 #define FASTRPC_CTXID_MASK (0xFF0) 35 35 #define INIT_FILELEN_MAX (64 * 1024 * 1024) 36 36 #define FASTRPC_DEVICE_NAME "fastrpc" 37 + #define ADSP_MMAP_ADD_PAGES 0x1000 37 38 38 39 /* Retrives number of input buffers from the scalars parameter */ 39 40 #define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff) ··· 67 66 /* Remote Method id table */ 68 67 #define FASTRPC_RMID_INIT_ATTACH 0 69 68 #define FASTRPC_RMID_INIT_RELEASE 1 69 + #define FASTRPC_RMID_INIT_MMAP 4 70 + #define FASTRPC_RMID_INIT_MUNMAP 5 70 71 #define FASTRPC_RMID_INIT_CREATE 6 71 72 #define FASTRPC_RMID_INIT_CREATE_ATTR 7 72 73 #define FASTRPC_RMID_INIT_CREATE_STATIC 8 ··· 90 87 struct fastrpc_remote_arg { 91 88 u64 pv; 92 89 u64 len; 90 + }; 91 + 92 + struct fastrpc_mmap_rsp_msg { 93 + u64 vaddr; 94 + }; 95 + 96 + struct fastrpc_mmap_req_msg { 97 + s32 pgid; 98 + u32 flags; 99 + u64 vaddr; 100 + s32 num; 101 + }; 102 + 103 + struct fastrpc_munmap_req_msg { 104 + s32 pgid; 105 + u64 vaddr; 106 + u64 size; 93 107 }; 94 108 95 109 struct fastrpc_msg { ··· 143 123 /* Lock for dma buf attachments */ 144 124 struct mutex lock; 145 125 struct list_head attachments; 126 + /* mmap support */ 127 + struct list_head node; /* list of user requested mmaps */ 128 + uintptr_t raddr; 146 129 }; 147 130 148 131 struct fastrpc_dma_buf_attachment { ··· 215 192 struct list_head user; 216 193 struct list_head maps; 217 194 struct list_head pending; 195 + struct list_head mmaps; 218 196 219 197 struct fastrpc_channel_ctx *cctx; 220 198 struct fastrpc_session_ctx *sctx; ··· 293 269 return -ENOMEM; 294 270 295 271 INIT_LIST_HEAD(&buf->attachments); 272 + INIT_LIST_HEAD(&buf->node); 296 273 mutex_init(&buf->lock); 297 274 298 275 buf->fl = fl; ··· 301 276 buf->phys = 0; 302 277 buf->size = size; 303 278 buf->dev = dev; 279 + buf->raddr = 0; 304 280 305 281 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys, 306 282 GFP_KERNEL); ··· 1156 1130 struct fastrpc_channel_ctx *cctx = fl->cctx; 1157 1131 struct fastrpc_invoke_ctx *ctx, *n; 1158 1132 struct fastrpc_map *map, *m; 1133 + struct fastrpc_buf *buf, *b; 1159 1134 unsigned long flags; 1160 1135 1161 1136 fastrpc_release_current_dsp_process(fl); ··· 1176 1149 list_for_each_entry_safe(map, m, &fl->maps, node) { 1177 1150 list_del(&map->node); 1178 1151 fastrpc_map_put(map); 1152 + } 1153 + 1154 + list_for_each_entry_safe(buf, b, &fl->mmaps, node) { 1155 + list_del(&buf->node); 1156 + fastrpc_buf_free(buf); 1179 1157 } 1180 1158 1181 1159 fastrpc_session_free(cctx, fl->sctx); ··· 1211 1179 mutex_init(&fl->mutex); 1212 1180 INIT_LIST_HEAD(&fl->pending); 1213 1181 INIT_LIST_HEAD(&fl->maps); 1182 + INIT_LIST_HEAD(&fl->mmaps); 1214 1183 INIT_LIST_HEAD(&fl->user); 1215 1184 fl->tgid = current->tgid; 1216 1185 fl->cctx = cctx; ··· 1317 1284 return err; 1318 1285 } 1319 1286 1287 + static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, 1288 + struct fastrpc_req_munmap *req) 1289 + { 1290 + struct fastrpc_invoke_args args[1] = { [0] = { 0 } }; 1291 + struct fastrpc_buf *buf, *b; 1292 + struct fastrpc_munmap_req_msg req_msg; 1293 + struct device *dev = fl->sctx->dev; 1294 + int err; 1295 + u32 sc; 1296 + 1297 + spin_lock(&fl->lock); 1298 + list_for_each_entry_safe(buf, b, &fl->mmaps, node) { 1299 + if ((buf->raddr == req->vaddrout) && (buf->size == req->size)) 1300 + break; 1301 + buf = NULL; 1302 + } 1303 + spin_unlock(&fl->lock); 1304 + 1305 + if (!buf) { 1306 + dev_err(dev, "mmap not in list\n"); 1307 + return -EINVAL; 1308 + } 1309 + 1310 + req_msg.pgid = fl->tgid; 1311 + req_msg.size = buf->size; 1312 + req_msg.vaddr = buf->raddr; 1313 + 1314 + args[0].ptr = (u64) (uintptr_t) &req_msg; 1315 + args[0].length = sizeof(req_msg); 1316 + 1317 + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0); 1318 + err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, 1319 + &args[0]); 1320 + if (!err) { 1321 + dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr); 1322 + spin_lock(&fl->lock); 1323 + list_del(&buf->node); 1324 + spin_unlock(&fl->lock); 1325 + fastrpc_buf_free(buf); 1326 + } else { 1327 + dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr); 1328 + } 1329 + 1330 + return err; 1331 + } 1332 + 1333 + static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp) 1334 + { 1335 + struct fastrpc_req_munmap req; 1336 + 1337 + if (copy_from_user(&req, argp, sizeof(req))) 1338 + return -EFAULT; 1339 + 1340 + return fastrpc_req_munmap_impl(fl, &req); 1341 + } 1342 + 1343 + static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) 1344 + { 1345 + struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } }; 1346 + struct fastrpc_buf *buf = NULL; 1347 + struct fastrpc_mmap_req_msg req_msg; 1348 + struct fastrpc_mmap_rsp_msg rsp_msg; 1349 + struct fastrpc_req_munmap req_unmap; 1350 + struct fastrpc_phy_page pages; 1351 + struct fastrpc_req_mmap req; 1352 + struct device *dev = fl->sctx->dev; 1353 + int err; 1354 + u32 sc; 1355 + 1356 + if (copy_from_user(&req, argp, sizeof(req))) 1357 + return -EFAULT; 1358 + 1359 + if (req.flags != ADSP_MMAP_ADD_PAGES) { 1360 + dev_err(dev, "flag not supported 0x%x\n", req.flags); 1361 + return -EINVAL; 1362 + } 1363 + 1364 + if (req.vaddrin) { 1365 + dev_err(dev, "adding user allocated pages is not supported\n"); 1366 + return -EINVAL; 1367 + } 1368 + 1369 + err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf); 1370 + if (err) { 1371 + dev_err(dev, "failed to allocate buffer\n"); 1372 + return err; 1373 + } 1374 + 1375 + req_msg.pgid = fl->tgid; 1376 + req_msg.flags = req.flags; 1377 + req_msg.vaddr = req.vaddrin; 1378 + req_msg.num = sizeof(pages); 1379 + 1380 + args[0].ptr = (u64) (uintptr_t) &req_msg; 1381 + args[0].length = sizeof(req_msg); 1382 + 1383 + pages.addr = buf->phys; 1384 + pages.size = buf->size; 1385 + 1386 + args[1].ptr = (u64) (uintptr_t) &pages; 1387 + args[1].length = sizeof(pages); 1388 + 1389 + args[2].ptr = (u64) (uintptr_t) &rsp_msg; 1390 + args[2].length = sizeof(rsp_msg); 1391 + 1392 + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1); 1393 + err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, 1394 + &args[0]); 1395 + if (err) { 1396 + dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size); 1397 + goto err_invoke; 1398 + } 1399 + 1400 + /* update the buffer to be able to deallocate the memory on the DSP */ 1401 + buf->raddr = (uintptr_t) rsp_msg.vaddr; 1402 + 1403 + /* let the client know the address to use */ 1404 + req.vaddrout = rsp_msg.vaddr; 1405 + 1406 + spin_lock(&fl->lock); 1407 + list_add_tail(&buf->node, &fl->mmaps); 1408 + spin_unlock(&fl->lock); 1409 + 1410 + if (copy_to_user((void __user *)argp, &req, sizeof(req))) { 1411 + /* unmap the memory and release the buffer */ 1412 + req_unmap.vaddrout = buf->raddr; 1413 + req_unmap.size = buf->size; 1414 + fastrpc_req_munmap_impl(fl, &req_unmap); 1415 + return -EFAULT; 1416 + } 1417 + 1418 + dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n", 1419 + buf->raddr, buf->size); 1420 + 1421 + return 0; 1422 + 1423 + err_invoke: 1424 + fastrpc_buf_free(buf); 1425 + 1426 + return err; 1427 + } 1428 + 1320 1429 static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, 1321 1430 unsigned long arg) 1322 1431 { ··· 1478 1303 break; 1479 1304 case FASTRPC_IOCTL_ALLOC_DMA_BUFF: 1480 1305 err = fastrpc_dmabuf_alloc(fl, argp); 1306 + break; 1307 + case FASTRPC_IOCTL_MMAP: 1308 + err = fastrpc_req_mmap(fl, argp); 1309 + break; 1310 + case FASTRPC_IOCTL_MUNMAP: 1311 + err = fastrpc_req_munmap(fl, argp); 1481 1312 break; 1482 1313 default: 1483 1314 err = -ENOTTY;
+15
include/uapi/misc/fastrpc.h
··· 10 10 #define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke) 11 11 #define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4) 12 12 #define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create) 13 + #define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap) 14 + #define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap) 13 15 14 16 struct fastrpc_invoke_args { 15 17 __u64 ptr; ··· 37 35 struct fastrpc_alloc_dma_buf { 38 36 __s32 fd; /* fd */ 39 37 __u32 flags; /* flags to map with */ 38 + __u64 size; /* size */ 39 + }; 40 + 41 + struct fastrpc_req_mmap { 42 + __s32 fd; 43 + __u32 flags; /* flags for dsp to map with */ 44 + __u64 vaddrin; /* optional virtual address */ 45 + __u64 size; /* size */ 46 + __u64 vaddrout; /* dsp virtual address */ 47 + }; 48 + 49 + struct fastrpc_req_munmap { 50 + __u64 vaddrout; /* address to unmap */ 40 51 __u64 size; /* size */ 41 52 }; 42 53