Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mei: implement client dma setup.

Implement HBM message protocol to setup and tear down
DMA buffer on behalf of an client. On top there DMA
buffer allocation and its life time management.

Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
Link: https://lore.kernel.org/r/20210206144325.25682-5-tomas.winkler@intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Alexander Usyskin and committed by
Greg Kroah-Hartman
369aea84 dfad8742

+446 -1
+286
drivers/misc/mei/client.c
··· 9 9 #include <linux/delay.h> 10 10 #include <linux/slab.h> 11 11 #include <linux/pm_runtime.h> 12 + #include <linux/dma-mapping.h> 12 13 13 14 #include <linux/mei.h> 14 15 ··· 2115 2114 case MEI_FOP_DISCONNECT: 2116 2115 case MEI_FOP_NOTIFY_STOP: 2117 2116 case MEI_FOP_NOTIFY_START: 2117 + case MEI_FOP_DMA_MAP: 2118 + case MEI_FOP_DMA_UNMAP: 2118 2119 if (waitqueue_active(&cl->wait)) 2119 2120 wake_up(&cl->wait); 2120 2121 ··· 2142 2139 2143 2140 list_for_each_entry(cl, &dev->file_list, link) 2144 2141 mei_cl_set_disconnected(cl); 2142 + } 2143 + 2144 + static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id) 2145 + { 2146 + struct mei_cl *cl; 2147 + 2148 + list_for_each_entry(cl, &dev->file_list, link) 2149 + if (cl->dma.buffer_id == buffer_id) 2150 + return cl; 2151 + return NULL; 2152 + } 2153 + 2154 + /** 2155 + * mei_cl_irq_dma_map - send client dma map request in irq_thread context 2156 + * 2157 + * @cl: client 2158 + * @cb: callback block. 2159 + * @cmpl_list: complete list. 2160 + * 2161 + * Return: 0 on such and error otherwise. 2162 + */ 2163 + int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb, 2164 + struct list_head *cmpl_list) 2165 + { 2166 + struct mei_device *dev = cl->dev; 2167 + u32 msg_slots; 2168 + int slots; 2169 + int ret; 2170 + 2171 + msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request)); 2172 + slots = mei_hbuf_empty_slots(dev); 2173 + if (slots < 0) 2174 + return -EOVERFLOW; 2175 + 2176 + if ((u32)slots < msg_slots) 2177 + return -EMSGSIZE; 2178 + 2179 + ret = mei_hbm_cl_dma_map_req(dev, cl); 2180 + if (ret) { 2181 + cl->status = ret; 2182 + list_move_tail(&cb->list, cmpl_list); 2183 + return ret; 2184 + } 2185 + 2186 + list_move_tail(&cb->list, &dev->ctrl_rd_list); 2187 + return 0; 2188 + } 2189 + 2190 + /** 2191 + * mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context 2192 + * 2193 + * @cl: client 2194 + * @cb: callback block. 2195 + * @cmpl_list: complete list. 2196 + * 2197 + * Return: 0 on such and error otherwise. 2198 + */ 2199 + int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb, 2200 + struct list_head *cmpl_list) 2201 + { 2202 + struct mei_device *dev = cl->dev; 2203 + u32 msg_slots; 2204 + int slots; 2205 + int ret; 2206 + 2207 + msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request)); 2208 + slots = mei_hbuf_empty_slots(dev); 2209 + if (slots < 0) 2210 + return -EOVERFLOW; 2211 + 2212 + if ((u32)slots < msg_slots) 2213 + return -EMSGSIZE; 2214 + 2215 + ret = mei_hbm_cl_dma_unmap_req(dev, cl); 2216 + if (ret) { 2217 + cl->status = ret; 2218 + list_move_tail(&cb->list, cmpl_list); 2219 + return ret; 2220 + } 2221 + 2222 + list_move_tail(&cb->list, &dev->ctrl_rd_list); 2223 + return 0; 2224 + } 2225 + 2226 + static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size) 2227 + { 2228 + cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size, 2229 + &cl->dma.daddr, GFP_KERNEL); 2230 + if (!cl->dma.vaddr) 2231 + return -ENOMEM; 2232 + 2233 + cl->dma.buffer_id = buf_id; 2234 + cl->dma.size = size; 2235 + 2236 + return 0; 2237 + } 2238 + 2239 + static void mei_cl_dma_free(struct mei_cl *cl) 2240 + { 2241 + cl->dma.buffer_id = 0; 2242 + dmam_free_coherent(cl->dev->dev, 2243 + cl->dma.size, cl->dma.vaddr, cl->dma.daddr); 2244 + cl->dma.size = 0; 2245 + cl->dma.vaddr = NULL; 2246 + cl->dma.daddr = 0; 2247 + } 2248 + 2249 + /** 2250 + * mei_cl_alloc_and_map - send client dma map request 2251 + * 2252 + * @cl: host client 2253 + * @fp: pointer to file structure 2254 + * @buffer_id: id of the mapped buffer 2255 + * @size: size of the buffer 2256 + * 2257 + * Locking: called under "dev->device_lock" lock 2258 + * 2259 + * Return: 2260 + * * -ENODEV 2261 + * * -EINVAL 2262 + * * -EOPNOTSUPP 2263 + * * -EPROTO 2264 + * * -ENOMEM; 2265 + */ 2266 + int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, 2267 + u8 buffer_id, size_t size) 2268 + { 2269 + struct mei_device *dev; 2270 + struct mei_cl_cb *cb; 2271 + int rets; 2272 + 2273 + if (WARN_ON(!cl || !cl->dev)) 2274 + return -ENODEV; 2275 + 2276 + dev = cl->dev; 2277 + 2278 + if (!dev->hbm_f_cd_supported) { 2279 + cl_dbg(dev, cl, "client dma is not supported\n"); 2280 + return -EOPNOTSUPP; 2281 + } 2282 + 2283 + if (buffer_id == 0) 2284 + return -EINVAL; 2285 + 2286 + if (!mei_cl_is_connected(cl)) 2287 + return -ENODEV; 2288 + 2289 + if (cl->dma_mapped) 2290 + return -EPROTO; 2291 + 2292 + if (mei_cl_dma_map_find(dev, buffer_id)) { 2293 + cl_dbg(dev, cl, "client dma with id %d is already allocated\n", 2294 + cl->dma.buffer_id); 2295 + return -EPROTO; 2296 + } 2297 + 2298 + rets = pm_runtime_get(dev->dev); 2299 + if (rets < 0 && rets != -EINPROGRESS) { 2300 + pm_runtime_put_noidle(dev->dev); 2301 + cl_err(dev, cl, "rpm: get failed %d\n", rets); 2302 + return rets; 2303 + } 2304 + 2305 + rets = mei_cl_dma_alloc(cl, buffer_id, size); 2306 + if (rets) { 2307 + pm_runtime_put_noidle(dev->dev); 2308 + return rets; 2309 + } 2310 + 2311 + cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp); 2312 + if (!cb) { 2313 + rets = -ENOMEM; 2314 + goto out; 2315 + } 2316 + 2317 + if (mei_hbuf_acquire(dev)) { 2318 + if (mei_hbm_cl_dma_map_req(dev, cl)) { 2319 + rets = -ENODEV; 2320 + goto out; 2321 + } 2322 + list_move_tail(&cb->list, &dev->ctrl_rd_list); 2323 + } 2324 + 2325 + mutex_unlock(&dev->device_lock); 2326 + wait_event_timeout(cl->wait, 2327 + cl->dma_mapped || 2328 + cl->status || 2329 + !mei_cl_is_connected(cl), 2330 + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 2331 + mutex_lock(&dev->device_lock); 2332 + 2333 + if (!cl->dma_mapped && !cl->status) 2334 + cl->status = -EFAULT; 2335 + 2336 + rets = cl->status; 2337 + 2338 + out: 2339 + if (rets) 2340 + mei_cl_dma_free(cl); 2341 + 2342 + cl_dbg(dev, cl, "rpm: autosuspend\n"); 2343 + pm_runtime_mark_last_busy(dev->dev); 2344 + pm_runtime_put_autosuspend(dev->dev); 2345 + 2346 + mei_io_cb_free(cb); 2347 + return rets; 2348 + } 2349 + 2350 + /** 2351 + * mei_cl_unmap_and_free - send client dma unmap request 2352 + * 2353 + * @cl: host client 2354 + * @fp: pointer to file structure 2355 + * 2356 + * Locking: called under "dev->device_lock" lock 2357 + * 2358 + * Return: 0 on such and error otherwise. 2359 + */ 2360 + int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) 2361 + { 2362 + struct mei_device *dev; 2363 + struct mei_cl_cb *cb; 2364 + int rets; 2365 + 2366 + if (WARN_ON(!cl || !cl->dev)) 2367 + return -ENODEV; 2368 + 2369 + dev = cl->dev; 2370 + 2371 + if (!dev->hbm_f_cd_supported) { 2372 + cl_dbg(dev, cl, "client dma is not supported\n"); 2373 + return -EOPNOTSUPP; 2374 + } 2375 + 2376 + if (!mei_cl_is_connected(cl)) 2377 + return -ENODEV; 2378 + 2379 + if (!cl->dma_mapped) 2380 + return -EPROTO; 2381 + 2382 + rets = pm_runtime_get(dev->dev); 2383 + if (rets < 0 && rets != -EINPROGRESS) { 2384 + pm_runtime_put_noidle(dev->dev); 2385 + cl_err(dev, cl, "rpm: get failed %d\n", rets); 2386 + return rets; 2387 + } 2388 + 2389 + cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp); 2390 + if (!cb) { 2391 + rets = -ENOMEM; 2392 + goto out; 2393 + } 2394 + 2395 + if (mei_hbuf_acquire(dev)) { 2396 + if (mei_hbm_cl_dma_unmap_req(dev, cl)) { 2397 + rets = -ENODEV; 2398 + goto out; 2399 + } 2400 + list_move_tail(&cb->list, &dev->ctrl_rd_list); 2401 + } 2402 + 2403 + mutex_unlock(&dev->device_lock); 2404 + wait_event_timeout(cl->wait, 2405 + !cl->dma_mapped || 2406 + cl->status || 2407 + !mei_cl_is_connected(cl), 2408 + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 2409 + mutex_lock(&dev->device_lock); 2410 + 2411 + if (cl->dma_mapped && !cl->status) 2412 + cl->status = -EFAULT; 2413 + 2414 + rets = cl->status; 2415 + 2416 + if (!rets) 2417 + mei_cl_dma_free(cl); 2418 + out: 2419 + cl_dbg(dev, cl, "rpm: autosuspend\n"); 2420 + pm_runtime_mark_last_busy(dev->dev); 2421 + pm_runtime_put_autosuspend(dev->dev); 2422 + 2423 + mei_io_cb_free(cb); 2424 + return rets; 2145 2425 }
+8
drivers/misc/mei/client.h
··· 265 265 266 266 void mei_cl_all_disconnect(struct mei_device *dev); 267 267 268 + int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb, 269 + struct list_head *cmpl_list); 270 + int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb, 271 + struct list_head *cmpl_list); 272 + int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, 273 + u8 buffer_id, size_t size); 274 + int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp); 275 + 268 276 #define MEI_CL_FMT "cl:host=%02d me=%02d " 269 277 #define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl) 270 278
+124
drivers/misc/mei/hbm.c
··· 595 595 } 596 596 597 597 /** 598 + * mei_hbm_cl_dma_map_req - send client dma map request 599 + * 600 + * @dev: the device structure 601 + * @cl: mei host client 602 + * 603 + * Return: 0 on success and -EIO on write failure 604 + */ 605 + int mei_hbm_cl_dma_map_req(struct mei_device *dev, struct mei_cl *cl) 606 + { 607 + struct mei_msg_hdr mei_hdr; 608 + struct hbm_client_dma_map_request req; 609 + int ret; 610 + 611 + mei_hbm_hdr(&mei_hdr, sizeof(req)); 612 + 613 + memset(&req, 0, sizeof(req)); 614 + 615 + req.hbm_cmd = MEI_HBM_CLIENT_DMA_MAP_REQ_CMD; 616 + req.client_buffer_id = cl->dma.buffer_id; 617 + req.address_lsb = lower_32_bits(cl->dma.daddr); 618 + req.address_msb = upper_32_bits(cl->dma.daddr); 619 + req.size = cl->dma.size; 620 + 621 + ret = mei_hbm_write_message(dev, &mei_hdr, &req); 622 + if (ret) 623 + dev_err(dev->dev, "dma map request failed: ret = %d\n", ret); 624 + 625 + return ret; 626 + } 627 + 628 + /** 629 + * mei_hbm_cl_dma_unmap_req - send client dma unmap request 630 + * 631 + * @dev: the device structure 632 + * @cl: mei host client 633 + * 634 + * Return: 0 on success and -EIO on write failure 635 + */ 636 + int mei_hbm_cl_dma_unmap_req(struct mei_device *dev, struct mei_cl *cl) 637 + { 638 + struct mei_msg_hdr mei_hdr; 639 + struct hbm_client_dma_unmap_request req; 640 + int ret; 641 + 642 + mei_hbm_hdr(&mei_hdr, sizeof(req)); 643 + 644 + memset(&req, 0, sizeof(req)); 645 + 646 + req.hbm_cmd = MEI_HBM_CLIENT_DMA_UNMAP_REQ_CMD; 647 + req.client_buffer_id = cl->dma.buffer_id; 648 + 649 + ret = mei_hbm_write_message(dev, &mei_hdr, &req); 650 + if (ret) 651 + dev_err(dev->dev, "dma unmap request failed: ret = %d\n", ret); 652 + 653 + return ret; 654 + } 655 + 656 + static void mei_hbm_cl_dma_map_res(struct mei_device *dev, 657 + struct hbm_client_dma_response *res) 658 + { 659 + struct mei_cl *cl; 660 + struct mei_cl_cb *cb, *next; 661 + 662 + cl = NULL; 663 + list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) { 664 + if (cb->fop_type != MEI_FOP_DMA_MAP) 665 + continue; 666 + if (!cb->cl->dma.buffer_id || cb->cl->dma_mapped) 667 + continue; 668 + 669 + cl = cb->cl; 670 + break; 671 + } 672 + if (!cl) 673 + return; 674 + 675 + dev_dbg(dev->dev, "cl dma map result = %d\n", res->status); 676 + cl->status = res->status; 677 + if (!cl->status) 678 + cl->dma_mapped = 1; 679 + wake_up(&cl->wait); 680 + } 681 + 682 + static void mei_hbm_cl_dma_unmap_res(struct mei_device *dev, 683 + struct hbm_client_dma_response *res) 684 + { 685 + struct mei_cl *cl; 686 + struct mei_cl_cb *cb, *next; 687 + 688 + cl = NULL; 689 + list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) { 690 + if (cb->fop_type != MEI_FOP_DMA_UNMAP) 691 + continue; 692 + if (!cb->cl->dma.buffer_id || !cb->cl->dma_mapped) 693 + continue; 694 + 695 + cl = cb->cl; 696 + break; 697 + } 698 + if (!cl) 699 + return; 700 + 701 + dev_dbg(dev->dev, "cl dma unmap result = %d\n", res->status); 702 + cl->status = res->status; 703 + if (!cl->status) 704 + cl->dma_mapped = 0; 705 + wake_up(&cl->wait); 706 + } 707 + 708 + /** 598 709 * mei_hbm_prop_req - request property for a single client 599 710 * 600 711 * @dev: the device structure ··· 1244 1133 struct mei_hbm_cl_cmd *cl_cmd; 1245 1134 struct hbm_client_connect_request *disconnect_req; 1246 1135 struct hbm_flow_control *fctrl; 1136 + struct hbm_client_dma_response *client_dma_res; 1247 1137 1248 1138 /* read the message to our buffer */ 1249 1139 BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf)); ··· 1569 1457 case MEI_HBM_NOTIFICATION_CMD: 1570 1458 dev_dbg(dev->dev, "hbm: notification\n"); 1571 1459 mei_hbm_cl_notify(dev, cl_cmd); 1460 + break; 1461 + 1462 + case MEI_HBM_CLIENT_DMA_MAP_RES_CMD: 1463 + dev_dbg(dev->dev, "hbm: client dma map response: message received.\n"); 1464 + client_dma_res = (struct hbm_client_dma_response *)mei_msg; 1465 + mei_hbm_cl_dma_map_res(dev, client_dma_res); 1466 + break; 1467 + 1468 + case MEI_HBM_CLIENT_DMA_UNMAP_RES_CMD: 1469 + dev_dbg(dev->dev, "hbm: client dma unmap response: message received.\n"); 1470 + client_dma_res = (struct hbm_client_dma_response *)mei_msg; 1471 + mei_hbm_cl_dma_unmap_res(dev, client_dma_res); 1572 1472 break; 1573 1473 1574 1474 default:
+3 -1
drivers/misc/mei/hbm.h
··· 10 10 struct mei_device; 11 11 struct mei_msg_hdr; 12 12 struct mei_cl; 13 + struct mei_dma_data; 13 14 14 15 /** 15 16 * enum mei_hbm_state - host bus message protocol state ··· 52 51 void mei_hbm_pg_resume(struct mei_device *dev); 53 52 int mei_hbm_cl_notify_req(struct mei_device *dev, 54 53 struct mei_cl *cl, u8 request); 55 - 54 + int mei_hbm_cl_dma_map_req(struct mei_device *dev, struct mei_cl *cl); 55 + int mei_hbm_cl_dma_unmap_req(struct mei_device *dev, struct mei_cl *cl); 56 56 #endif /* _MEI_HBM_H_ */ 57 57
+10
drivers/misc/mei/interrupt.c
··· 547 547 if (ret) 548 548 return ret; 549 549 break; 550 + case MEI_FOP_DMA_MAP: 551 + ret = mei_cl_irq_dma_map(cl, cb, cmpl_list); 552 + if (ret) 553 + return ret; 554 + break; 555 + case MEI_FOP_DMA_UNMAP: 556 + ret = mei_cl_irq_dma_unmap(cl, cb, cmpl_list); 557 + if (ret) 558 + return ret; 559 + break; 550 560 default: 551 561 BUG(); 552 562 }
+15
drivers/misc/mei/mei_dev.h
··· 79 79 * @MEI_FOP_DISCONNECT_RSP: disconnect response 80 80 * @MEI_FOP_NOTIFY_START: start notification 81 81 * @MEI_FOP_NOTIFY_STOP: stop notification 82 + * @MEI_FOP_DMA_MAP: request client dma map 83 + * @MEI_FOP_DMA_UNMAP: request client dma unmap 82 84 */ 83 85 enum mei_cb_file_ops { 84 86 MEI_FOP_READ = 0, ··· 90 88 MEI_FOP_DISCONNECT_RSP, 91 89 MEI_FOP_NOTIFY_START, 92 90 MEI_FOP_NOTIFY_STOP, 91 + MEI_FOP_DMA_MAP, 92 + MEI_FOP_DMA_UNMAP, 93 93 }; 94 94 95 95 /** ··· 115 111 struct mei_msg_data { 116 112 size_t size; 117 113 unsigned char *data; 114 + }; 115 + 116 + struct mei_dma_data { 117 + u8 buffer_id; 118 + void *vaddr; 119 + dma_addr_t daddr; 120 + size_t size; 118 121 }; 119 122 120 123 /** ··· 247 236 * @rd_pending: pending read credits 248 237 * @rd_completed_lock: protects rd_completed queue 249 238 * @rd_completed: completed read 239 + * @dma: dma settings 240 + * @dma_mapped: dma buffer is currently mapped. 250 241 * 251 242 * @cldev: device on the mei client bus 252 243 */ ··· 276 263 struct list_head rd_pending; 277 264 spinlock_t rd_completed_lock; /* protects rd_completed queue */ 278 265 struct list_head rd_completed; 266 + struct mei_dma_data dma; 267 + u8 dma_mapped; 279 268 280 269 struct mei_cl_device *cldev; 281 270 };