Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.9-rc3 1940 lines 53 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Intel MIC Platform Software Stack (MPSS) 4 * 5 * Copyright(c) 2015 Intel Corporation. 6 * 7 * Intel SCIF driver. 8 */ 9#include "scif_main.h" 10#include "scif_map.h" 11 12/* 13 * struct scif_dma_comp_cb - SCIF DMA completion callback 14 * 15 * @dma_completion_func: DMA completion callback 16 * @cb_cookie: DMA completion callback cookie 17 * @temp_buf: Temporary buffer 18 * @temp_buf_to_free: Temporary buffer to be freed 19 * @is_cache: Is a kmem_cache allocated buffer 20 * @dst_offset: Destination registration offset 21 * @dst_window: Destination registration window 22 * @len: Length of the temp buffer 23 * @temp_phys: DMA address of the temp buffer 24 * @sdev: The SCIF device 25 * @header_padding: padding for cache line alignment 26 */ 27struct scif_dma_comp_cb { 28 void (*dma_completion_func)(void *cookie); 29 void *cb_cookie; 30 u8 *temp_buf; 31 u8 *temp_buf_to_free; 32 bool is_cache; 33 s64 dst_offset; 34 struct scif_window *dst_window; 35 size_t len; 36 dma_addr_t temp_phys; 37 struct scif_dev *sdev; 38 int header_padding; 39}; 40 41/** 42 * struct scif_copy_work - Work for DMA copy 43 * 44 * @src_offset: Starting source offset 45 * @dst_offset: Starting destination offset 46 * @src_window: Starting src registered window 47 * @dst_window: Starting dst registered window 48 * @loopback: true if this is a loopback DMA transfer 49 * @len: Length of the transfer 50 * @comp_cb: DMA copy completion callback 51 * @remote_dev: The remote SCIF peer device 52 * @fence_type: polling or interrupt based 53 * @ordered: is this a tail byte ordered DMA transfer 54 */ 55struct scif_copy_work { 56 s64 src_offset; 57 s64 dst_offset; 58 struct scif_window *src_window; 59 struct scif_window *dst_window; 60 int loopback; 61 size_t len; 62 struct scif_dma_comp_cb *comp_cb; 63 struct scif_dev *remote_dev; 64 int fence_type; 65 bool ordered; 66}; 67 68/** 69 * scif_reserve_dma_chan: 70 * @ep: Endpoint Descriptor. 71 * 72 * This routine reserves a DMA channel for a particular 73 * endpoint. All DMA transfers for an endpoint are always 74 * programmed on the same DMA channel. 75 */ 76int scif_reserve_dma_chan(struct scif_endpt *ep) 77{ 78 int err = 0; 79 struct scif_dev *scifdev; 80 struct scif_hw_dev *sdev; 81 struct dma_chan *chan; 82 83 /* Loopback DMAs are not supported on the management node */ 84 if (!scif_info.nodeid && scifdev_self(ep->remote_dev)) 85 return 0; 86 if (scif_info.nodeid) 87 scifdev = &scif_dev[0]; 88 else 89 scifdev = ep->remote_dev; 90 sdev = scifdev->sdev; 91 if (!sdev->num_dma_ch) 92 return -ENODEV; 93 chan = sdev->dma_ch[scifdev->dma_ch_idx]; 94 scifdev->dma_ch_idx = (scifdev->dma_ch_idx + 1) % sdev->num_dma_ch; 95 mutex_lock(&ep->rma_info.rma_lock); 96 ep->rma_info.dma_chan = chan; 97 mutex_unlock(&ep->rma_info.rma_lock); 98 return err; 99} 100 101#ifdef CONFIG_MMU_NOTIFIER 102/* 103 * scif_rma_destroy_tcw: 104 * 105 * This routine destroys temporary cached windows 106 */ 107static 108void __scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, 109 u64 start, u64 len) 110{ 111 struct list_head *item, *tmp; 112 struct scif_window *window; 113 u64 start_va, end_va; 114 u64 end = start + len; 115 116 if (end <= start) 117 return; 118 119 list_for_each_safe(item, tmp, &mmn->tc_reg_list) { 120 window = list_entry(item, struct scif_window, list); 121 if (!len) 122 break; 123 start_va = window->va_for_temp; 124 end_va = start_va + (window->nr_pages << PAGE_SHIFT); 125 if (start < start_va && end <= start_va) 126 break; 127 if (start >= end_va) 128 continue; 129 __scif_rma_destroy_tcw_helper(window); 130 } 131} 132 133static void scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, u64 start, u64 len) 134{ 135 struct scif_endpt *ep = mmn->ep; 136 137 spin_lock(&ep->rma_info.tc_lock); 138 __scif_rma_destroy_tcw(mmn, start, len); 139 spin_unlock(&ep->rma_info.tc_lock); 140} 141 142static void scif_rma_destroy_tcw_ep(struct scif_endpt *ep) 143{ 144 struct list_head *item, *tmp; 145 struct scif_mmu_notif *mmn; 146 147 list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) { 148 mmn = list_entry(item, struct scif_mmu_notif, list); 149 scif_rma_destroy_tcw(mmn, 0, ULONG_MAX); 150 } 151} 152 153static void __scif_rma_destroy_tcw_ep(struct scif_endpt *ep) 154{ 155 struct list_head *item, *tmp; 156 struct scif_mmu_notif *mmn; 157 158 spin_lock(&ep->rma_info.tc_lock); 159 list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) { 160 mmn = list_entry(item, struct scif_mmu_notif, list); 161 __scif_rma_destroy_tcw(mmn, 0, ULONG_MAX); 162 } 163 spin_unlock(&ep->rma_info.tc_lock); 164} 165 166static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes) 167{ 168 if ((cur_bytes >> PAGE_SHIFT) > scif_info.rma_tc_limit) 169 return false; 170 if ((atomic_read(&ep->rma_info.tcw_total_pages) 171 + (cur_bytes >> PAGE_SHIFT)) > 172 scif_info.rma_tc_limit) { 173 dev_info(scif_info.mdev.this_device, 174 "%s %d total=%d, current=%zu reached max\n", 175 __func__, __LINE__, 176 atomic_read(&ep->rma_info.tcw_total_pages), 177 (1 + (cur_bytes >> PAGE_SHIFT))); 178 scif_rma_destroy_tcw_invalid(); 179 __scif_rma_destroy_tcw_ep(ep); 180 } 181 return true; 182} 183 184static void scif_mmu_notifier_release(struct mmu_notifier *mn, 185 struct mm_struct *mm) 186{ 187 struct scif_mmu_notif *mmn; 188 189 mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); 190 scif_rma_destroy_tcw(mmn, 0, ULONG_MAX); 191 schedule_work(&scif_info.misc_work); 192} 193 194static int scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 195 const struct mmu_notifier_range *range) 196{ 197 struct scif_mmu_notif *mmn; 198 199 mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); 200 scif_rma_destroy_tcw(mmn, range->start, range->end - range->start); 201 202 return 0; 203} 204 205static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 206 const struct mmu_notifier_range *range) 207{ 208 /* 209 * Nothing to do here, everything needed was done in 210 * invalidate_range_start. 211 */ 212} 213 214static const struct mmu_notifier_ops scif_mmu_notifier_ops = { 215 .release = scif_mmu_notifier_release, 216 .clear_flush_young = NULL, 217 .invalidate_range_start = scif_mmu_notifier_invalidate_range_start, 218 .invalidate_range_end = scif_mmu_notifier_invalidate_range_end}; 219 220static void scif_ep_unregister_mmu_notifier(struct scif_endpt *ep) 221{ 222 struct scif_endpt_rma_info *rma = &ep->rma_info; 223 struct scif_mmu_notif *mmn = NULL; 224 struct list_head *item, *tmp; 225 226 mutex_lock(&ep->rma_info.mmn_lock); 227 list_for_each_safe(item, tmp, &rma->mmn_list) { 228 mmn = list_entry(item, struct scif_mmu_notif, list); 229 mmu_notifier_unregister(&mmn->ep_mmu_notifier, mmn->mm); 230 list_del(item); 231 kfree(mmn); 232 } 233 mutex_unlock(&ep->rma_info.mmn_lock); 234} 235 236static void scif_init_mmu_notifier(struct scif_mmu_notif *mmn, 237 struct mm_struct *mm, struct scif_endpt *ep) 238{ 239 mmn->ep = ep; 240 mmn->mm = mm; 241 mmn->ep_mmu_notifier.ops = &scif_mmu_notifier_ops; 242 INIT_LIST_HEAD(&mmn->list); 243 INIT_LIST_HEAD(&mmn->tc_reg_list); 244} 245 246static struct scif_mmu_notif * 247scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma) 248{ 249 struct scif_mmu_notif *mmn; 250 251 list_for_each_entry(mmn, &rma->mmn_list, list) 252 if (mmn->mm == mm) 253 return mmn; 254 return NULL; 255} 256 257static struct scif_mmu_notif * 258scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep) 259{ 260 struct scif_mmu_notif *mmn 261 = kzalloc(sizeof(*mmn), GFP_KERNEL); 262 263 if (!mmn) 264 return ERR_PTR(-ENOMEM); 265 266 scif_init_mmu_notifier(mmn, current->mm, ep); 267 if (mmu_notifier_register(&mmn->ep_mmu_notifier, current->mm)) { 268 kfree(mmn); 269 return ERR_PTR(-EBUSY); 270 } 271 list_add(&mmn->list, &ep->rma_info.mmn_list); 272 return mmn; 273} 274 275/* 276 * Called from the misc thread to destroy temporary cached windows and 277 * unregister the MMU notifier for the SCIF endpoint. 278 */ 279void scif_mmu_notif_handler(struct work_struct *work) 280{ 281 struct list_head *pos, *tmpq; 282 struct scif_endpt *ep; 283restart: 284 scif_rma_destroy_tcw_invalid(); 285 spin_lock(&scif_info.rmalock); 286 list_for_each_safe(pos, tmpq, &scif_info.mmu_notif_cleanup) { 287 ep = list_entry(pos, struct scif_endpt, mmu_list); 288 list_del(&ep->mmu_list); 289 spin_unlock(&scif_info.rmalock); 290 scif_rma_destroy_tcw_ep(ep); 291 scif_ep_unregister_mmu_notifier(ep); 292 goto restart; 293 } 294 spin_unlock(&scif_info.rmalock); 295} 296 297static bool scif_is_set_reg_cache(int flags) 298{ 299 return !!(flags & SCIF_RMA_USECACHE); 300} 301#else 302static struct scif_mmu_notif * 303scif_find_mmu_notifier(struct mm_struct *mm, 304 struct scif_endpt_rma_info *rma) 305{ 306 return NULL; 307} 308 309static struct scif_mmu_notif * 310scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep) 311{ 312 return NULL; 313} 314 315void scif_mmu_notif_handler(struct work_struct *work) 316{ 317} 318 319static bool scif_is_set_reg_cache(int flags) 320{ 321 return false; 322} 323 324static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes) 325{ 326 return false; 327} 328#endif 329 330/** 331 * scif_register_temp: 332 * @epd: End Point Descriptor. 333 * @addr: virtual address to/from which to copy 334 * @len: length of range to copy 335 * @prot: read/write protection 336 * @out_offset: computed offset returned by reference. 337 * @out_window: allocated registered window returned by reference. 338 * 339 * Create a temporary registered window. The peer will not know about this 340 * window. This API is used for scif_vreadfrom()/scif_vwriteto() API's. 341 */ 342static int 343scif_register_temp(scif_epd_t epd, unsigned long addr, size_t len, int prot, 344 off_t *out_offset, struct scif_window **out_window) 345{ 346 struct scif_endpt *ep = (struct scif_endpt *)epd; 347 int err; 348 scif_pinned_pages_t pinned_pages; 349 size_t aligned_len; 350 351 aligned_len = ALIGN(len, PAGE_SIZE); 352 353 err = __scif_pin_pages((void *)(addr & PAGE_MASK), 354 aligned_len, &prot, 0, &pinned_pages); 355 if (err) 356 return err; 357 358 pinned_pages->prot = prot; 359 360 /* Compute the offset for this registration */ 361 err = scif_get_window_offset(ep, 0, 0, 362 aligned_len >> PAGE_SHIFT, 363 (s64 *)out_offset); 364 if (err) 365 goto error_unpin; 366 367 /* Allocate and prepare self registration window */ 368 *out_window = scif_create_window(ep, aligned_len >> PAGE_SHIFT, 369 *out_offset, true); 370 if (!*out_window) { 371 scif_free_window_offset(ep, NULL, *out_offset); 372 err = -ENOMEM; 373 goto error_unpin; 374 } 375 376 (*out_window)->pinned_pages = pinned_pages; 377 (*out_window)->nr_pages = pinned_pages->nr_pages; 378 (*out_window)->prot = pinned_pages->prot; 379 380 (*out_window)->va_for_temp = addr & PAGE_MASK; 381 err = scif_map_window(ep->remote_dev, *out_window); 382 if (err) { 383 /* Something went wrong! Rollback */ 384 scif_destroy_window(ep, *out_window); 385 *out_window = NULL; 386 } else { 387 *out_offset |= (addr - (*out_window)->va_for_temp); 388 } 389 return err; 390error_unpin: 391 if (err) 392 dev_err(&ep->remote_dev->sdev->dev, 393 "%s %d err %d\n", __func__, __LINE__, err); 394 scif_unpin_pages(pinned_pages); 395 return err; 396} 397 398#define SCIF_DMA_TO (3 * HZ) 399 400/* 401 * scif_sync_dma - Program a DMA without an interrupt descriptor 402 * 403 * @dev - The address of the pointer to the device instance used 404 * for DMA registration. 405 * @chan - DMA channel to be used. 406 * @sync_wait: Wait for DMA to complete? 407 * 408 * Return 0 on success and -errno on error. 409 */ 410static int scif_sync_dma(struct scif_hw_dev *sdev, struct dma_chan *chan, 411 bool sync_wait) 412{ 413 int err = 0; 414 struct dma_async_tx_descriptor *tx = NULL; 415 enum dma_ctrl_flags flags = DMA_PREP_FENCE; 416 dma_cookie_t cookie; 417 struct dma_device *ddev; 418 419 if (!chan) { 420 err = -EIO; 421 dev_err(&sdev->dev, "%s %d err %d\n", 422 __func__, __LINE__, err); 423 return err; 424 } 425 ddev = chan->device; 426 427 tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags); 428 if (!tx) { 429 err = -ENOMEM; 430 dev_err(&sdev->dev, "%s %d err %d\n", 431 __func__, __LINE__, err); 432 goto release; 433 } 434 cookie = tx->tx_submit(tx); 435 436 if (dma_submit_error(cookie)) { 437 err = -ENOMEM; 438 dev_err(&sdev->dev, "%s %d err %d\n", 439 __func__, __LINE__, err); 440 goto release; 441 } 442 if (!sync_wait) { 443 dma_async_issue_pending(chan); 444 } else { 445 if (dma_sync_wait(chan, cookie) == DMA_COMPLETE) { 446 err = 0; 447 } else { 448 err = -EIO; 449 dev_err(&sdev->dev, "%s %d err %d\n", 450 __func__, __LINE__, err); 451 } 452 } 453release: 454 return err; 455} 456 457static void scif_dma_callback(void *arg) 458{ 459 struct completion *done = (struct completion *)arg; 460 461 complete(done); 462} 463 464#define SCIF_DMA_SYNC_WAIT true 465#define SCIF_DMA_POLL BIT(0) 466#define SCIF_DMA_INTR BIT(1) 467 468/* 469 * scif_async_dma - Program a DMA with an interrupt descriptor 470 * 471 * @dev - The address of the pointer to the device instance used 472 * for DMA registration. 473 * @chan - DMA channel to be used. 474 * Return 0 on success and -errno on error. 475 */ 476static int scif_async_dma(struct scif_hw_dev *sdev, struct dma_chan *chan) 477{ 478 int err = 0; 479 struct dma_device *ddev; 480 struct dma_async_tx_descriptor *tx = NULL; 481 enum dma_ctrl_flags flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE; 482 DECLARE_COMPLETION_ONSTACK(done_wait); 483 dma_cookie_t cookie; 484 enum dma_status status; 485 486 if (!chan) { 487 err = -EIO; 488 dev_err(&sdev->dev, "%s %d err %d\n", 489 __func__, __LINE__, err); 490 return err; 491 } 492 ddev = chan->device; 493 494 tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags); 495 if (!tx) { 496 err = -ENOMEM; 497 dev_err(&sdev->dev, "%s %d err %d\n", 498 __func__, __LINE__, err); 499 goto release; 500 } 501 reinit_completion(&done_wait); 502 tx->callback = scif_dma_callback; 503 tx->callback_param = &done_wait; 504 cookie = tx->tx_submit(tx); 505 506 if (dma_submit_error(cookie)) { 507 err = -ENOMEM; 508 dev_err(&sdev->dev, "%s %d err %d\n", 509 __func__, __LINE__, err); 510 goto release; 511 } 512 dma_async_issue_pending(chan); 513 514 err = wait_for_completion_timeout(&done_wait, SCIF_DMA_TO); 515 if (!err) { 516 err = -EIO; 517 dev_err(&sdev->dev, "%s %d err %d\n", 518 __func__, __LINE__, err); 519 goto release; 520 } 521 err = 0; 522 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 523 if (status != DMA_COMPLETE) { 524 err = -EIO; 525 dev_err(&sdev->dev, "%s %d err %d\n", 526 __func__, __LINE__, err); 527 goto release; 528 } 529release: 530 return err; 531} 532 533/* 534 * scif_drain_dma_poll - Drain all outstanding DMA operations for a particular 535 * DMA channel via polling. 536 * 537 * @sdev - The SCIF device 538 * @chan - DMA channel 539 * Return 0 on success and -errno on error. 540 */ 541static int scif_drain_dma_poll(struct scif_hw_dev *sdev, struct dma_chan *chan) 542{ 543 if (!chan) 544 return -EINVAL; 545 return scif_sync_dma(sdev, chan, SCIF_DMA_SYNC_WAIT); 546} 547 548/* 549 * scif_drain_dma_intr - Drain all outstanding DMA operations for a particular 550 * DMA channel via interrupt based blocking wait. 551 * 552 * @sdev - The SCIF device 553 * @chan - DMA channel 554 * Return 0 on success and -errno on error. 555 */ 556int scif_drain_dma_intr(struct scif_hw_dev *sdev, struct dma_chan *chan) 557{ 558 if (!chan) 559 return -EINVAL; 560 return scif_async_dma(sdev, chan); 561} 562 563/** 564 * scif_rma_destroy_windows: 565 * 566 * This routine destroys all windows queued for cleanup 567 */ 568void scif_rma_destroy_windows(void) 569{ 570 struct list_head *item, *tmp; 571 struct scif_window *window; 572 struct scif_endpt *ep; 573 struct dma_chan *chan; 574 575 might_sleep(); 576restart: 577 spin_lock(&scif_info.rmalock); 578 list_for_each_safe(item, tmp, &scif_info.rma) { 579 window = list_entry(item, struct scif_window, 580 list); 581 ep = (struct scif_endpt *)window->ep; 582 chan = ep->rma_info.dma_chan; 583 584 list_del_init(&window->list); 585 spin_unlock(&scif_info.rmalock); 586 if (!chan || !scifdev_alive(ep) || 587 !scif_drain_dma_intr(ep->remote_dev->sdev, 588 ep->rma_info.dma_chan)) 589 /* Remove window from global list */ 590 window->unreg_state = OP_COMPLETED; 591 else 592 dev_warn(&ep->remote_dev->sdev->dev, 593 "DMA engine hung?\n"); 594 if (window->unreg_state == OP_COMPLETED) { 595 if (window->type == SCIF_WINDOW_SELF) 596 scif_destroy_window(ep, window); 597 else 598 scif_destroy_remote_window(window); 599 atomic_dec(&ep->rma_info.tw_refcount); 600 } 601 goto restart; 602 } 603 spin_unlock(&scif_info.rmalock); 604} 605 606/** 607 * scif_rma_destroy_tcw: 608 * 609 * This routine destroys temporary cached registered windows 610 * which have been queued for cleanup. 611 */ 612void scif_rma_destroy_tcw_invalid(void) 613{ 614 struct list_head *item, *tmp; 615 struct scif_window *window; 616 struct scif_endpt *ep; 617 struct dma_chan *chan; 618 619 might_sleep(); 620restart: 621 spin_lock(&scif_info.rmalock); 622 list_for_each_safe(item, tmp, &scif_info.rma_tc) { 623 window = list_entry(item, struct scif_window, list); 624 ep = (struct scif_endpt *)window->ep; 625 chan = ep->rma_info.dma_chan; 626 list_del_init(&window->list); 627 spin_unlock(&scif_info.rmalock); 628 mutex_lock(&ep->rma_info.rma_lock); 629 if (!chan || !scifdev_alive(ep) || 630 !scif_drain_dma_intr(ep->remote_dev->sdev, 631 ep->rma_info.dma_chan)) { 632 atomic_sub(window->nr_pages, 633 &ep->rma_info.tcw_total_pages); 634 scif_destroy_window(ep, window); 635 atomic_dec(&ep->rma_info.tcw_refcount); 636 } else { 637 dev_warn(&ep->remote_dev->sdev->dev, 638 "DMA engine hung?\n"); 639 } 640 mutex_unlock(&ep->rma_info.rma_lock); 641 goto restart; 642 } 643 spin_unlock(&scif_info.rmalock); 644} 645 646static inline 647void *_get_local_va(off_t off, struct scif_window *window, size_t len) 648{ 649 int page_nr = (off - window->offset) >> PAGE_SHIFT; 650 off_t page_off = off & ~PAGE_MASK; 651 void *va = NULL; 652 653 if (window->type == SCIF_WINDOW_SELF) { 654 struct page **pages = window->pinned_pages->pages; 655 656 va = page_address(pages[page_nr]) + page_off; 657 } 658 return va; 659} 660 661static inline 662void *ioremap_remote(off_t off, struct scif_window *window, 663 size_t len, struct scif_dev *dev, 664 struct scif_window_iter *iter) 665{ 666 dma_addr_t phys = scif_off_to_dma_addr(window, off, NULL, iter); 667 668 /* 669 * If the DMA address is not card relative then we need the DMA 670 * addresses to be an offset into the bar. The aperture base was already 671 * added so subtract it here since scif_ioremap is going to add it again 672 */ 673 if (!scifdev_self(dev) && window->type == SCIF_WINDOW_PEER && 674 dev->sdev->aper && !dev->sdev->card_rel_da) 675 phys = phys - dev->sdev->aper->pa; 676 return scif_ioremap(phys, len, dev); 677} 678 679static inline void 680iounmap_remote(void *virt, size_t size, struct scif_copy_work *work) 681{ 682 scif_iounmap(virt, size, work->remote_dev); 683} 684 685/* 686 * Takes care of ordering issue caused by 687 * 1. Hardware: Only in the case of cpu copy from mgmt node to card 688 * because of WC memory. 689 * 2. Software: If memcpy reorders copy instructions for optimization. 690 * This could happen at both mgmt node and card. 691 */ 692static inline void 693scif_ordered_memcpy_toio(char *dst, const char *src, size_t count) 694{ 695 if (!count) 696 return; 697 698 memcpy_toio((void __iomem __force *)dst, src, --count); 699 /* Order the last byte with the previous stores */ 700 wmb(); 701 *(dst + count) = *(src + count); 702} 703 704static inline void scif_unaligned_cpy_toio(char *dst, const char *src, 705 size_t count, bool ordered) 706{ 707 if (ordered) 708 scif_ordered_memcpy_toio(dst, src, count); 709 else 710 memcpy_toio((void __iomem __force *)dst, src, count); 711} 712 713static inline 714void scif_ordered_memcpy_fromio(char *dst, const char *src, size_t count) 715{ 716 if (!count) 717 return; 718 719 memcpy_fromio(dst, (void __iomem __force *)src, --count); 720 /* Order the last byte with the previous loads */ 721 rmb(); 722 *(dst + count) = *(src + count); 723} 724 725static inline void scif_unaligned_cpy_fromio(char *dst, const char *src, 726 size_t count, bool ordered) 727{ 728 if (ordered) 729 scif_ordered_memcpy_fromio(dst, src, count); 730 else 731 memcpy_fromio(dst, (void __iomem __force *)src, count); 732} 733 734#define SCIF_RMA_ERROR_CODE (~(dma_addr_t)0x0) 735 736/* 737 * scif_off_to_dma_addr: 738 * Obtain the dma_addr given the window and the offset. 739 * @window: Registered window. 740 * @off: Window offset. 741 * @nr_bytes: Return the number of contiguous bytes till next DMA addr index. 742 * @index: Return the index of the dma_addr array found. 743 * @start_off: start offset of index of the dma addr array found. 744 * The nr_bytes provides the callee an estimate of the maximum possible 745 * DMA xfer possible while the index/start_off provide faster lookups 746 * for the next iteration. 747 */ 748dma_addr_t scif_off_to_dma_addr(struct scif_window *window, s64 off, 749 size_t *nr_bytes, struct scif_window_iter *iter) 750{ 751 int i, page_nr; 752 s64 start, end; 753 off_t page_off; 754 755 if (window->nr_pages == window->nr_contig_chunks) { 756 page_nr = (off - window->offset) >> PAGE_SHIFT; 757 page_off = off & ~PAGE_MASK; 758 759 if (nr_bytes) 760 *nr_bytes = PAGE_SIZE - page_off; 761 return window->dma_addr[page_nr] | page_off; 762 } 763 if (iter) { 764 i = iter->index; 765 start = iter->offset; 766 } else { 767 i = 0; 768 start = window->offset; 769 } 770 for (; i < window->nr_contig_chunks; i++) { 771 end = start + (window->num_pages[i] << PAGE_SHIFT); 772 if (off >= start && off < end) { 773 if (iter) { 774 iter->index = i; 775 iter->offset = start; 776 } 777 if (nr_bytes) 778 *nr_bytes = end - off; 779 return (window->dma_addr[i] + (off - start)); 780 } 781 start += (window->num_pages[i] << PAGE_SHIFT); 782 } 783 dev_err(scif_info.mdev.this_device, 784 "%s %d BUG. Addr not found? window %p off 0x%llx\n", 785 __func__, __LINE__, window, off); 786 return SCIF_RMA_ERROR_CODE; 787} 788 789/* 790 * Copy between rma window and temporary buffer 791 */ 792static void scif_rma_local_cpu_copy(s64 offset, struct scif_window *window, 793 u8 *temp, size_t rem_len, bool to_temp) 794{ 795 void *window_virt; 796 size_t loop_len; 797 int offset_in_page; 798 s64 end_offset; 799 800 offset_in_page = offset & ~PAGE_MASK; 801 loop_len = PAGE_SIZE - offset_in_page; 802 803 if (rem_len < loop_len) 804 loop_len = rem_len; 805 806 window_virt = _get_local_va(offset, window, loop_len); 807 if (!window_virt) 808 return; 809 if (to_temp) 810 memcpy(temp, window_virt, loop_len); 811 else 812 memcpy(window_virt, temp, loop_len); 813 814 offset += loop_len; 815 temp += loop_len; 816 rem_len -= loop_len; 817 818 end_offset = window->offset + 819 (window->nr_pages << PAGE_SHIFT); 820 while (rem_len) { 821 if (offset == end_offset) { 822 window = list_next_entry(window, list); 823 end_offset = window->offset + 824 (window->nr_pages << PAGE_SHIFT); 825 } 826 loop_len = min(PAGE_SIZE, rem_len); 827 window_virt = _get_local_va(offset, window, loop_len); 828 if (!window_virt) 829 return; 830 if (to_temp) 831 memcpy(temp, window_virt, loop_len); 832 else 833 memcpy(window_virt, temp, loop_len); 834 offset += loop_len; 835 temp += loop_len; 836 rem_len -= loop_len; 837 } 838} 839 840/** 841 * scif_rma_completion_cb: 842 * @data: RMA cookie 843 * 844 * RMA interrupt completion callback. 845 */ 846static void scif_rma_completion_cb(void *data) 847{ 848 struct scif_dma_comp_cb *comp_cb = data; 849 850 /* Free DMA Completion CB. */ 851 if (comp_cb->dst_window) 852 scif_rma_local_cpu_copy(comp_cb->dst_offset, 853 comp_cb->dst_window, 854 comp_cb->temp_buf + 855 comp_cb->header_padding, 856 comp_cb->len, false); 857 scif_unmap_single(comp_cb->temp_phys, comp_cb->sdev, 858 SCIF_KMEM_UNALIGNED_BUF_SIZE); 859 if (comp_cb->is_cache) 860 kmem_cache_free(unaligned_cache, 861 comp_cb->temp_buf_to_free); 862 else 863 kfree(comp_cb->temp_buf_to_free); 864} 865 866/* Copies between temporary buffer and offsets provided in work */ 867static int 868scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work, 869 u8 *temp, struct dma_chan *chan, 870 bool src_local) 871{ 872 struct scif_dma_comp_cb *comp_cb = work->comp_cb; 873 dma_addr_t window_dma_addr, temp_dma_addr; 874 dma_addr_t temp_phys = comp_cb->temp_phys; 875 size_t loop_len, nr_contig_bytes = 0, remaining_len = work->len; 876 int offset_in_ca, ret = 0; 877 s64 end_offset, offset; 878 struct scif_window *window; 879 void *window_virt_addr; 880 size_t tail_len; 881 struct dma_async_tx_descriptor *tx; 882 struct dma_device *dev = chan->device; 883 dma_cookie_t cookie; 884 885 if (src_local) { 886 offset = work->dst_offset; 887 window = work->dst_window; 888 } else { 889 offset = work->src_offset; 890 window = work->src_window; 891 } 892 893 offset_in_ca = offset & (L1_CACHE_BYTES - 1); 894 if (offset_in_ca) { 895 loop_len = L1_CACHE_BYTES - offset_in_ca; 896 loop_len = min(loop_len, remaining_len); 897 window_virt_addr = ioremap_remote(offset, window, 898 loop_len, 899 work->remote_dev, 900 NULL); 901 if (!window_virt_addr) 902 return -ENOMEM; 903 if (src_local) 904 scif_unaligned_cpy_toio(window_virt_addr, temp, 905 loop_len, 906 work->ordered && 907 !(remaining_len - loop_len)); 908 else 909 scif_unaligned_cpy_fromio(temp, window_virt_addr, 910 loop_len, work->ordered && 911 !(remaining_len - loop_len)); 912 iounmap_remote(window_virt_addr, loop_len, work); 913 914 offset += loop_len; 915 temp += loop_len; 916 temp_phys += loop_len; 917 remaining_len -= loop_len; 918 } 919 920 offset_in_ca = offset & ~PAGE_MASK; 921 end_offset = window->offset + 922 (window->nr_pages << PAGE_SHIFT); 923 924 tail_len = remaining_len & (L1_CACHE_BYTES - 1); 925 remaining_len -= tail_len; 926 while (remaining_len) { 927 if (offset == end_offset) { 928 window = list_next_entry(window, list); 929 end_offset = window->offset + 930 (window->nr_pages << PAGE_SHIFT); 931 } 932 if (scif_is_mgmt_node()) 933 temp_dma_addr = temp_phys; 934 else 935 /* Fix if we ever enable IOMMU on the card */ 936 temp_dma_addr = (dma_addr_t)virt_to_phys(temp); 937 window_dma_addr = scif_off_to_dma_addr(window, offset, 938 &nr_contig_bytes, 939 NULL); 940 loop_len = min(nr_contig_bytes, remaining_len); 941 if (src_local) { 942 if (work->ordered && !tail_len && 943 !(remaining_len - loop_len) && 944 loop_len != L1_CACHE_BYTES) { 945 /* 946 * Break up the last chunk of the transfer into 947 * two steps. if there is no tail to guarantee 948 * DMA ordering. SCIF_DMA_POLLING inserts 949 * a status update descriptor in step 1 which 950 * acts as a double sided synchronization fence 951 * for the DMA engine to ensure that the last 952 * cache line in step 2 is updated last. 953 */ 954 /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */ 955 tx = 956 dev->device_prep_dma_memcpy(chan, 957 window_dma_addr, 958 temp_dma_addr, 959 loop_len - 960 L1_CACHE_BYTES, 961 DMA_PREP_FENCE); 962 if (!tx) { 963 ret = -ENOMEM; 964 goto err; 965 } 966 cookie = tx->tx_submit(tx); 967 if (dma_submit_error(cookie)) { 968 ret = -ENOMEM; 969 goto err; 970 } 971 dma_async_issue_pending(chan); 972 offset += (loop_len - L1_CACHE_BYTES); 973 temp_dma_addr += (loop_len - L1_CACHE_BYTES); 974 window_dma_addr += (loop_len - L1_CACHE_BYTES); 975 remaining_len -= (loop_len - L1_CACHE_BYTES); 976 loop_len = remaining_len; 977 978 /* Step 2) DMA: L1_CACHE_BYTES */ 979 tx = 980 dev->device_prep_dma_memcpy(chan, 981 window_dma_addr, 982 temp_dma_addr, 983 loop_len, 0); 984 if (!tx) { 985 ret = -ENOMEM; 986 goto err; 987 } 988 cookie = tx->tx_submit(tx); 989 if (dma_submit_error(cookie)) { 990 ret = -ENOMEM; 991 goto err; 992 } 993 dma_async_issue_pending(chan); 994 } else { 995 tx = 996 dev->device_prep_dma_memcpy(chan, 997 window_dma_addr, 998 temp_dma_addr, 999 loop_len, 0); 1000 if (!tx) { 1001 ret = -ENOMEM; 1002 goto err; 1003 } 1004 cookie = tx->tx_submit(tx); 1005 if (dma_submit_error(cookie)) { 1006 ret = -ENOMEM; 1007 goto err; 1008 } 1009 dma_async_issue_pending(chan); 1010 } 1011 } else { 1012 tx = dev->device_prep_dma_memcpy(chan, temp_dma_addr, 1013 window_dma_addr, loop_len, 0); 1014 if (!tx) { 1015 ret = -ENOMEM; 1016 goto err; 1017 } 1018 cookie = tx->tx_submit(tx); 1019 if (dma_submit_error(cookie)) { 1020 ret = -ENOMEM; 1021 goto err; 1022 } 1023 dma_async_issue_pending(chan); 1024 } 1025 offset += loop_len; 1026 temp += loop_len; 1027 temp_phys += loop_len; 1028 remaining_len -= loop_len; 1029 offset_in_ca = 0; 1030 } 1031 if (tail_len) { 1032 if (offset == end_offset) { 1033 window = list_next_entry(window, list); 1034 end_offset = window->offset + 1035 (window->nr_pages << PAGE_SHIFT); 1036 } 1037 window_virt_addr = ioremap_remote(offset, window, tail_len, 1038 work->remote_dev, 1039 NULL); 1040 if (!window_virt_addr) 1041 return -ENOMEM; 1042 /* 1043 * The CPU copy for the tail bytes must be initiated only once 1044 * previous DMA transfers for this endpoint have completed 1045 * to guarantee ordering. 1046 */ 1047 if (work->ordered) { 1048 struct scif_dev *rdev = work->remote_dev; 1049 1050 ret = scif_drain_dma_intr(rdev->sdev, chan); 1051 if (ret) 1052 return ret; 1053 } 1054 if (src_local) 1055 scif_unaligned_cpy_toio(window_virt_addr, temp, 1056 tail_len, work->ordered); 1057 else 1058 scif_unaligned_cpy_fromio(temp, window_virt_addr, 1059 tail_len, work->ordered); 1060 iounmap_remote(window_virt_addr, tail_len, work); 1061 } 1062 tx = dev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_INTERRUPT); 1063 if (!tx) { 1064 ret = -ENOMEM; 1065 return ret; 1066 } 1067 tx->callback = &scif_rma_completion_cb; 1068 tx->callback_param = comp_cb; 1069 cookie = tx->tx_submit(tx); 1070 1071 if (dma_submit_error(cookie)) { 1072 ret = -ENOMEM; 1073 return ret; 1074 } 1075 dma_async_issue_pending(chan); 1076 return 0; 1077err: 1078 dev_err(scif_info.mdev.this_device, 1079 "%s %d Desc Prog Failed ret %d\n", 1080 __func__, __LINE__, ret); 1081 return ret; 1082} 1083 1084/* 1085 * _scif_rma_list_dma_copy_aligned: 1086 * 1087 * Traverse all the windows and perform DMA copy. 1088 */ 1089static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, 1090 struct dma_chan *chan) 1091{ 1092 dma_addr_t src_dma_addr, dst_dma_addr; 1093 size_t loop_len, remaining_len, src_contig_bytes = 0; 1094 size_t dst_contig_bytes = 0; 1095 struct scif_window_iter src_win_iter; 1096 struct scif_window_iter dst_win_iter; 1097 s64 end_src_offset, end_dst_offset; 1098 struct scif_window *src_window = work->src_window; 1099 struct scif_window *dst_window = work->dst_window; 1100 s64 src_offset = work->src_offset, dst_offset = work->dst_offset; 1101 int ret = 0; 1102 struct dma_async_tx_descriptor *tx; 1103 struct dma_device *dev = chan->device; 1104 dma_cookie_t cookie; 1105 1106 remaining_len = work->len; 1107 1108 scif_init_window_iter(src_window, &src_win_iter); 1109 scif_init_window_iter(dst_window, &dst_win_iter); 1110 end_src_offset = src_window->offset + 1111 (src_window->nr_pages << PAGE_SHIFT); 1112 end_dst_offset = dst_window->offset + 1113 (dst_window->nr_pages << PAGE_SHIFT); 1114 while (remaining_len) { 1115 if (src_offset == end_src_offset) { 1116 src_window = list_next_entry(src_window, list); 1117 end_src_offset = src_window->offset + 1118 (src_window->nr_pages << PAGE_SHIFT); 1119 scif_init_window_iter(src_window, &src_win_iter); 1120 } 1121 if (dst_offset == end_dst_offset) { 1122 dst_window = list_next_entry(dst_window, list); 1123 end_dst_offset = dst_window->offset + 1124 (dst_window->nr_pages << PAGE_SHIFT); 1125 scif_init_window_iter(dst_window, &dst_win_iter); 1126 } 1127 1128 /* compute dma addresses for transfer */ 1129 src_dma_addr = scif_off_to_dma_addr(src_window, src_offset, 1130 &src_contig_bytes, 1131 &src_win_iter); 1132 dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset, 1133 &dst_contig_bytes, 1134 &dst_win_iter); 1135 loop_len = min(src_contig_bytes, dst_contig_bytes); 1136 loop_len = min(loop_len, remaining_len); 1137 if (work->ordered && !(remaining_len - loop_len)) { 1138 /* 1139 * Break up the last chunk of the transfer into two 1140 * steps to ensure that the last byte in step 2 is 1141 * updated last. 1142 */ 1143 /* Step 1) DMA: Body Length - 1 */ 1144 tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, 1145 src_dma_addr, 1146 loop_len - 1, 1147 DMA_PREP_FENCE); 1148 if (!tx) { 1149 ret = -ENOMEM; 1150 goto err; 1151 } 1152 cookie = tx->tx_submit(tx); 1153 if (dma_submit_error(cookie)) { 1154 ret = -ENOMEM; 1155 goto err; 1156 } 1157 src_offset += (loop_len - 1); 1158 dst_offset += (loop_len - 1); 1159 src_dma_addr += (loop_len - 1); 1160 dst_dma_addr += (loop_len - 1); 1161 remaining_len -= (loop_len - 1); 1162 loop_len = remaining_len; 1163 1164 /* Step 2) DMA: 1 BYTES */ 1165 tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, 1166 src_dma_addr, loop_len, 0); 1167 if (!tx) { 1168 ret = -ENOMEM; 1169 goto err; 1170 } 1171 cookie = tx->tx_submit(tx); 1172 if (dma_submit_error(cookie)) { 1173 ret = -ENOMEM; 1174 goto err; 1175 } 1176 dma_async_issue_pending(chan); 1177 } else { 1178 tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, 1179 src_dma_addr, loop_len, 0); 1180 if (!tx) { 1181 ret = -ENOMEM; 1182 goto err; 1183 } 1184 cookie = tx->tx_submit(tx); 1185 if (dma_submit_error(cookie)) { 1186 ret = -ENOMEM; 1187 goto err; 1188 } 1189 } 1190 src_offset += loop_len; 1191 dst_offset += loop_len; 1192 remaining_len -= loop_len; 1193 } 1194 return ret; 1195err: 1196 dev_err(scif_info.mdev.this_device, 1197 "%s %d Desc Prog Failed ret %d\n", 1198 __func__, __LINE__, ret); 1199 return ret; 1200} 1201 1202/* 1203 * scif_rma_list_dma_copy_aligned: 1204 * 1205 * Traverse all the windows and perform DMA copy. 1206 */ 1207static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, 1208 struct dma_chan *chan) 1209{ 1210 dma_addr_t src_dma_addr, dst_dma_addr; 1211 size_t loop_len, remaining_len, tail_len, src_contig_bytes = 0; 1212 size_t dst_contig_bytes = 0; 1213 int src_cache_off; 1214 s64 end_src_offset, end_dst_offset; 1215 struct scif_window_iter src_win_iter; 1216 struct scif_window_iter dst_win_iter; 1217 void *src_virt, *dst_virt; 1218 struct scif_window *src_window = work->src_window; 1219 struct scif_window *dst_window = work->dst_window; 1220 s64 src_offset = work->src_offset, dst_offset = work->dst_offset; 1221 int ret = 0; 1222 struct dma_async_tx_descriptor *tx; 1223 struct dma_device *dev = chan->device; 1224 dma_cookie_t cookie; 1225 1226 remaining_len = work->len; 1227 scif_init_window_iter(src_window, &src_win_iter); 1228 scif_init_window_iter(dst_window, &dst_win_iter); 1229 1230 src_cache_off = src_offset & (L1_CACHE_BYTES - 1); 1231 if (src_cache_off != 0) { 1232 /* Head */ 1233 loop_len = L1_CACHE_BYTES - src_cache_off; 1234 loop_len = min(loop_len, remaining_len); 1235 src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset); 1236 dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset); 1237 if (src_window->type == SCIF_WINDOW_SELF) 1238 src_virt = _get_local_va(src_offset, src_window, 1239 loop_len); 1240 else 1241 src_virt = ioremap_remote(src_offset, src_window, 1242 loop_len, 1243 work->remote_dev, NULL); 1244 if (!src_virt) 1245 return -ENOMEM; 1246 if (dst_window->type == SCIF_WINDOW_SELF) 1247 dst_virt = _get_local_va(dst_offset, dst_window, 1248 loop_len); 1249 else 1250 dst_virt = ioremap_remote(dst_offset, dst_window, 1251 loop_len, 1252 work->remote_dev, NULL); 1253 if (!dst_virt) { 1254 if (src_window->type != SCIF_WINDOW_SELF) 1255 iounmap_remote(src_virt, loop_len, work); 1256 return -ENOMEM; 1257 } 1258 if (src_window->type == SCIF_WINDOW_SELF) 1259 scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len, 1260 remaining_len == loop_len ? 1261 work->ordered : false); 1262 else 1263 scif_unaligned_cpy_fromio(dst_virt, src_virt, loop_len, 1264 remaining_len == loop_len ? 1265 work->ordered : false); 1266 if (src_window->type != SCIF_WINDOW_SELF) 1267 iounmap_remote(src_virt, loop_len, work); 1268 if (dst_window->type != SCIF_WINDOW_SELF) 1269 iounmap_remote(dst_virt, loop_len, work); 1270 src_offset += loop_len; 1271 dst_offset += loop_len; 1272 remaining_len -= loop_len; 1273 } 1274 1275 end_src_offset = src_window->offset + 1276 (src_window->nr_pages << PAGE_SHIFT); 1277 end_dst_offset = dst_window->offset + 1278 (dst_window->nr_pages << PAGE_SHIFT); 1279 tail_len = remaining_len & (L1_CACHE_BYTES - 1); 1280 remaining_len -= tail_len; 1281 while (remaining_len) { 1282 if (src_offset == end_src_offset) { 1283 src_window = list_next_entry(src_window, list); 1284 end_src_offset = src_window->offset + 1285 (src_window->nr_pages << PAGE_SHIFT); 1286 scif_init_window_iter(src_window, &src_win_iter); 1287 } 1288 if (dst_offset == end_dst_offset) { 1289 dst_window = list_next_entry(dst_window, list); 1290 end_dst_offset = dst_window->offset + 1291 (dst_window->nr_pages << PAGE_SHIFT); 1292 scif_init_window_iter(dst_window, &dst_win_iter); 1293 } 1294 1295 /* compute dma addresses for transfer */ 1296 src_dma_addr = scif_off_to_dma_addr(src_window, src_offset, 1297 &src_contig_bytes, 1298 &src_win_iter); 1299 dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset, 1300 &dst_contig_bytes, 1301 &dst_win_iter); 1302 loop_len = min(src_contig_bytes, dst_contig_bytes); 1303 loop_len = min(loop_len, remaining_len); 1304 if (work->ordered && !tail_len && 1305 !(remaining_len - loop_len)) { 1306 /* 1307 * Break up the last chunk of the transfer into two 1308 * steps. if there is no tail to gurantee DMA ordering. 1309 * Passing SCIF_DMA_POLLING inserts a status update 1310 * descriptor in step 1 which acts as a double sided 1311 * synchronization fence for the DMA engine to ensure 1312 * that the last cache line in step 2 is updated last. 1313 */ 1314 /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */ 1315 tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, 1316 src_dma_addr, 1317 loop_len - 1318 L1_CACHE_BYTES, 1319 DMA_PREP_FENCE); 1320 if (!tx) { 1321 ret = -ENOMEM; 1322 goto err; 1323 } 1324 cookie = tx->tx_submit(tx); 1325 if (dma_submit_error(cookie)) { 1326 ret = -ENOMEM; 1327 goto err; 1328 } 1329 dma_async_issue_pending(chan); 1330 src_offset += (loop_len - L1_CACHE_BYTES); 1331 dst_offset += (loop_len - L1_CACHE_BYTES); 1332 src_dma_addr += (loop_len - L1_CACHE_BYTES); 1333 dst_dma_addr += (loop_len - L1_CACHE_BYTES); 1334 remaining_len -= (loop_len - L1_CACHE_BYTES); 1335 loop_len = remaining_len; 1336 1337 /* Step 2) DMA: L1_CACHE_BYTES */ 1338 tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, 1339 src_dma_addr, 1340 loop_len, 0); 1341 if (!tx) { 1342 ret = -ENOMEM; 1343 goto err; 1344 } 1345 cookie = tx->tx_submit(tx); 1346 if (dma_submit_error(cookie)) { 1347 ret = -ENOMEM; 1348 goto err; 1349 } 1350 dma_async_issue_pending(chan); 1351 } else { 1352 tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, 1353 src_dma_addr, 1354 loop_len, 0); 1355 if (!tx) { 1356 ret = -ENOMEM; 1357 goto err; 1358 } 1359 cookie = tx->tx_submit(tx); 1360 if (dma_submit_error(cookie)) { 1361 ret = -ENOMEM; 1362 goto err; 1363 } 1364 dma_async_issue_pending(chan); 1365 } 1366 src_offset += loop_len; 1367 dst_offset += loop_len; 1368 remaining_len -= loop_len; 1369 } 1370 remaining_len = tail_len; 1371 if (remaining_len) { 1372 loop_len = remaining_len; 1373 if (src_offset == end_src_offset) 1374 src_window = list_next_entry(src_window, list); 1375 if (dst_offset == end_dst_offset) 1376 dst_window = list_next_entry(dst_window, list); 1377 1378 src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset); 1379 dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset); 1380 /* 1381 * The CPU copy for the tail bytes must be initiated only once 1382 * previous DMA transfers for this endpoint have completed to 1383 * guarantee ordering. 1384 */ 1385 if (work->ordered) { 1386 struct scif_dev *rdev = work->remote_dev; 1387 1388 ret = scif_drain_dma_poll(rdev->sdev, chan); 1389 if (ret) 1390 return ret; 1391 } 1392 if (src_window->type == SCIF_WINDOW_SELF) 1393 src_virt = _get_local_va(src_offset, src_window, 1394 loop_len); 1395 else 1396 src_virt = ioremap_remote(src_offset, src_window, 1397 loop_len, 1398 work->remote_dev, NULL); 1399 if (!src_virt) 1400 return -ENOMEM; 1401 1402 if (dst_window->type == SCIF_WINDOW_SELF) 1403 dst_virt = _get_local_va(dst_offset, dst_window, 1404 loop_len); 1405 else 1406 dst_virt = ioremap_remote(dst_offset, dst_window, 1407 loop_len, 1408 work->remote_dev, NULL); 1409 if (!dst_virt) { 1410 if (src_window->type != SCIF_WINDOW_SELF) 1411 iounmap_remote(src_virt, loop_len, work); 1412 return -ENOMEM; 1413 } 1414 1415 if (src_window->type == SCIF_WINDOW_SELF) 1416 scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len, 1417 work->ordered); 1418 else 1419 scif_unaligned_cpy_fromio(dst_virt, src_virt, 1420 loop_len, work->ordered); 1421 if (src_window->type != SCIF_WINDOW_SELF) 1422 iounmap_remote(src_virt, loop_len, work); 1423 1424 if (dst_window->type != SCIF_WINDOW_SELF) 1425 iounmap_remote(dst_virt, loop_len, work); 1426 remaining_len -= loop_len; 1427 } 1428 return ret; 1429err: 1430 dev_err(scif_info.mdev.this_device, 1431 "%s %d Desc Prog Failed ret %d\n", 1432 __func__, __LINE__, ret); 1433 return ret; 1434} 1435 1436/* 1437 * scif_rma_list_cpu_copy: 1438 * 1439 * Traverse all the windows and perform CPU copy. 1440 */ 1441static int scif_rma_list_cpu_copy(struct scif_copy_work *work) 1442{ 1443 void *src_virt, *dst_virt; 1444 size_t loop_len, remaining_len; 1445 int src_page_off, dst_page_off; 1446 s64 src_offset = work->src_offset, dst_offset = work->dst_offset; 1447 struct scif_window *src_window = work->src_window; 1448 struct scif_window *dst_window = work->dst_window; 1449 s64 end_src_offset, end_dst_offset; 1450 int ret = 0; 1451 struct scif_window_iter src_win_iter; 1452 struct scif_window_iter dst_win_iter; 1453 1454 remaining_len = work->len; 1455 1456 scif_init_window_iter(src_window, &src_win_iter); 1457 scif_init_window_iter(dst_window, &dst_win_iter); 1458 while (remaining_len) { 1459 src_page_off = src_offset & ~PAGE_MASK; 1460 dst_page_off = dst_offset & ~PAGE_MASK; 1461 loop_len = min(PAGE_SIZE - 1462 max(src_page_off, dst_page_off), 1463 remaining_len); 1464 1465 if (src_window->type == SCIF_WINDOW_SELF) 1466 src_virt = _get_local_va(src_offset, src_window, 1467 loop_len); 1468 else 1469 src_virt = ioremap_remote(src_offset, src_window, 1470 loop_len, 1471 work->remote_dev, 1472 &src_win_iter); 1473 if (!src_virt) { 1474 ret = -ENOMEM; 1475 goto error; 1476 } 1477 1478 if (dst_window->type == SCIF_WINDOW_SELF) 1479 dst_virt = _get_local_va(dst_offset, dst_window, 1480 loop_len); 1481 else 1482 dst_virt = ioremap_remote(dst_offset, dst_window, 1483 loop_len, 1484 work->remote_dev, 1485 &dst_win_iter); 1486 if (!dst_virt) { 1487 if (src_window->type == SCIF_WINDOW_PEER) 1488 iounmap_remote(src_virt, loop_len, work); 1489 ret = -ENOMEM; 1490 goto error; 1491 } 1492 1493 if (work->loopback) { 1494 memcpy(dst_virt, src_virt, loop_len); 1495 } else { 1496 if (src_window->type == SCIF_WINDOW_SELF) 1497 memcpy_toio((void __iomem __force *)dst_virt, 1498 src_virt, loop_len); 1499 else 1500 memcpy_fromio(dst_virt, 1501 (void __iomem __force *)src_virt, 1502 loop_len); 1503 } 1504 if (src_window->type == SCIF_WINDOW_PEER) 1505 iounmap_remote(src_virt, loop_len, work); 1506 1507 if (dst_window->type == SCIF_WINDOW_PEER) 1508 iounmap_remote(dst_virt, loop_len, work); 1509 1510 src_offset += loop_len; 1511 dst_offset += loop_len; 1512 remaining_len -= loop_len; 1513 if (remaining_len) { 1514 end_src_offset = src_window->offset + 1515 (src_window->nr_pages << PAGE_SHIFT); 1516 end_dst_offset = dst_window->offset + 1517 (dst_window->nr_pages << PAGE_SHIFT); 1518 if (src_offset == end_src_offset) { 1519 src_window = list_next_entry(src_window, list); 1520 scif_init_window_iter(src_window, 1521 &src_win_iter); 1522 } 1523 if (dst_offset == end_dst_offset) { 1524 dst_window = list_next_entry(dst_window, list); 1525 scif_init_window_iter(dst_window, 1526 &dst_win_iter); 1527 } 1528 } 1529 } 1530error: 1531 return ret; 1532} 1533 1534static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd, 1535 struct scif_copy_work *work, 1536 struct dma_chan *chan, off_t loffset) 1537{ 1538 int src_cache_off, dst_cache_off; 1539 s64 src_offset = work->src_offset, dst_offset = work->dst_offset; 1540 u8 *temp = NULL; 1541 bool src_local = true; 1542 struct scif_dma_comp_cb *comp_cb; 1543 int err; 1544 1545 if (is_dma_copy_aligned(chan->device, 1, 1, 1)) 1546 return _scif_rma_list_dma_copy_aligned(work, chan); 1547 1548 src_cache_off = src_offset & (L1_CACHE_BYTES - 1); 1549 dst_cache_off = dst_offset & (L1_CACHE_BYTES - 1); 1550 1551 if (dst_cache_off == src_cache_off) 1552 return scif_rma_list_dma_copy_aligned(work, chan); 1553 1554 if (work->loopback) 1555 return scif_rma_list_cpu_copy(work); 1556 src_local = work->src_window->type == SCIF_WINDOW_SELF; 1557 1558 /* Allocate dma_completion cb */ 1559 comp_cb = kzalloc(sizeof(*comp_cb), GFP_KERNEL); 1560 if (!comp_cb) 1561 goto error; 1562 1563 work->comp_cb = comp_cb; 1564 comp_cb->cb_cookie = comp_cb; 1565 comp_cb->dma_completion_func = &scif_rma_completion_cb; 1566 1567 if (work->len + (L1_CACHE_BYTES << 1) < SCIF_KMEM_UNALIGNED_BUF_SIZE) { 1568 comp_cb->is_cache = false; 1569 /* Allocate padding bytes to align to a cache line */ 1570 temp = kmalloc(work->len + (L1_CACHE_BYTES << 1), 1571 GFP_KERNEL); 1572 if (!temp) 1573 goto free_comp_cb; 1574 comp_cb->temp_buf_to_free = temp; 1575 /* kmalloc(..) does not guarantee cache line alignment */ 1576 if (!IS_ALIGNED((u64)temp, L1_CACHE_BYTES)) 1577 temp = PTR_ALIGN(temp, L1_CACHE_BYTES); 1578 } else { 1579 comp_cb->is_cache = true; 1580 temp = kmem_cache_alloc(unaligned_cache, GFP_KERNEL); 1581 if (!temp) 1582 goto free_comp_cb; 1583 comp_cb->temp_buf_to_free = temp; 1584 } 1585 1586 if (src_local) { 1587 temp += dst_cache_off; 1588 scif_rma_local_cpu_copy(work->src_offset, work->src_window, 1589 temp, work->len, true); 1590 } else { 1591 comp_cb->dst_window = work->dst_window; 1592 comp_cb->dst_offset = work->dst_offset; 1593 work->src_offset = work->src_offset - src_cache_off; 1594 comp_cb->len = work->len; 1595 work->len = ALIGN(work->len + src_cache_off, L1_CACHE_BYTES); 1596 comp_cb->header_padding = src_cache_off; 1597 } 1598 comp_cb->temp_buf = temp; 1599 1600 err = scif_map_single(&comp_cb->temp_phys, temp, 1601 work->remote_dev, SCIF_KMEM_UNALIGNED_BUF_SIZE); 1602 if (err) 1603 goto free_temp_buf; 1604 comp_cb->sdev = work->remote_dev; 1605 if (scif_rma_list_dma_copy_unaligned(work, temp, chan, src_local) < 0) 1606 goto free_temp_buf; 1607 if (!src_local) 1608 work->fence_type = SCIF_DMA_INTR; 1609 return 0; 1610free_temp_buf: 1611 if (comp_cb->is_cache) 1612 kmem_cache_free(unaligned_cache, comp_cb->temp_buf_to_free); 1613 else 1614 kfree(comp_cb->temp_buf_to_free); 1615free_comp_cb: 1616 kfree(comp_cb); 1617error: 1618 return -ENOMEM; 1619} 1620 1621/** 1622 * scif_rma_copy: 1623 * @epd: end point descriptor. 1624 * @loffset: offset in local registered address space to/from which to copy 1625 * @addr: user virtual address to/from which to copy 1626 * @len: length of range to copy 1627 * @roffset: offset in remote registered address space to/from which to copy 1628 * @flags: flags 1629 * @dir: LOCAL->REMOTE or vice versa. 1630 * @last_chunk: true if this is the last chunk of a larger transfer 1631 * 1632 * Validate parameters, check if src/dst registered ranges requested for copy 1633 * are valid and initiate either CPU or DMA copy. 1634 */ 1635static int scif_rma_copy(scif_epd_t epd, off_t loffset, unsigned long addr, 1636 size_t len, off_t roffset, int flags, 1637 enum scif_rma_dir dir, bool last_chunk) 1638{ 1639 struct scif_endpt *ep = (struct scif_endpt *)epd; 1640 struct scif_rma_req remote_req; 1641 struct scif_rma_req req; 1642 struct scif_window *local_window = NULL; 1643 struct scif_window *remote_window = NULL; 1644 struct scif_copy_work copy_work; 1645 bool loopback; 1646 int err = 0; 1647 struct dma_chan *chan; 1648 struct scif_mmu_notif *mmn = NULL; 1649 bool cache = false; 1650 struct device *spdev; 1651 1652 err = scif_verify_epd(ep); 1653 if (err) 1654 return err; 1655 1656 if (flags && !(flags & (SCIF_RMA_USECPU | SCIF_RMA_USECACHE | 1657 SCIF_RMA_SYNC | SCIF_RMA_ORDERED))) 1658 return -EINVAL; 1659 1660 loopback = scifdev_self(ep->remote_dev) ? true : false; 1661 copy_work.fence_type = ((flags & SCIF_RMA_SYNC) && last_chunk) ? 1662 SCIF_DMA_POLL : 0; 1663 copy_work.ordered = !!((flags & SCIF_RMA_ORDERED) && last_chunk); 1664 1665 /* Use CPU for Mgmt node <-> Mgmt node copies */ 1666 if (loopback && scif_is_mgmt_node()) { 1667 flags |= SCIF_RMA_USECPU; 1668 copy_work.fence_type = 0x0; 1669 } 1670 1671 cache = scif_is_set_reg_cache(flags); 1672 1673 remote_req.out_window = &remote_window; 1674 remote_req.offset = roffset; 1675 remote_req.nr_bytes = len; 1676 /* 1677 * If transfer is from local to remote then the remote window 1678 * must be writeable and vice versa. 1679 */ 1680 remote_req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_WRITE : VM_READ; 1681 remote_req.type = SCIF_WINDOW_PARTIAL; 1682 remote_req.head = &ep->rma_info.remote_reg_list; 1683 1684 spdev = scif_get_peer_dev(ep->remote_dev); 1685 if (IS_ERR(spdev)) { 1686 err = PTR_ERR(spdev); 1687 return err; 1688 } 1689 1690 if (addr && cache) { 1691 mutex_lock(&ep->rma_info.mmn_lock); 1692 mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info); 1693 if (!mmn) 1694 mmn = scif_add_mmu_notifier(current->mm, ep); 1695 mutex_unlock(&ep->rma_info.mmn_lock); 1696 if (IS_ERR(mmn)) { 1697 scif_put_peer_dev(spdev); 1698 return PTR_ERR(mmn); 1699 } 1700 cache = cache && !scif_rma_tc_can_cache(ep, len); 1701 } 1702 mutex_lock(&ep->rma_info.rma_lock); 1703 if (addr) { 1704 req.out_window = &local_window; 1705 req.nr_bytes = ALIGN(len + (addr & ~PAGE_MASK), 1706 PAGE_SIZE); 1707 req.va_for_temp = addr & PAGE_MASK; 1708 req.prot = (dir == SCIF_LOCAL_TO_REMOTE ? 1709 VM_READ : VM_WRITE | VM_READ); 1710 /* Does a valid local window exist? */ 1711 if (mmn) { 1712 spin_lock(&ep->rma_info.tc_lock); 1713 req.head = &mmn->tc_reg_list; 1714 err = scif_query_tcw(ep, &req); 1715 spin_unlock(&ep->rma_info.tc_lock); 1716 } 1717 if (!mmn || err) { 1718 err = scif_register_temp(epd, req.va_for_temp, 1719 req.nr_bytes, req.prot, 1720 &loffset, &local_window); 1721 if (err) { 1722 mutex_unlock(&ep->rma_info.rma_lock); 1723 goto error; 1724 } 1725 if (!cache) 1726 goto skip_cache; 1727 atomic_inc(&ep->rma_info.tcw_refcount); 1728 atomic_add_return(local_window->nr_pages, 1729 &ep->rma_info.tcw_total_pages); 1730 if (mmn) { 1731 spin_lock(&ep->rma_info.tc_lock); 1732 scif_insert_tcw(local_window, 1733 &mmn->tc_reg_list); 1734 spin_unlock(&ep->rma_info.tc_lock); 1735 } 1736 } 1737skip_cache: 1738 loffset = local_window->offset + 1739 (addr - local_window->va_for_temp); 1740 } else { 1741 req.out_window = &local_window; 1742 req.offset = loffset; 1743 /* 1744 * If transfer is from local to remote then the self window 1745 * must be readable and vice versa. 1746 */ 1747 req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_READ : VM_WRITE; 1748 req.nr_bytes = len; 1749 req.type = SCIF_WINDOW_PARTIAL; 1750 req.head = &ep->rma_info.reg_list; 1751 /* Does a valid local window exist? */ 1752 err = scif_query_window(&req); 1753 if (err) { 1754 mutex_unlock(&ep->rma_info.rma_lock); 1755 goto error; 1756 } 1757 } 1758 1759 /* Does a valid remote window exist? */ 1760 err = scif_query_window(&remote_req); 1761 if (err) { 1762 mutex_unlock(&ep->rma_info.rma_lock); 1763 goto error; 1764 } 1765 1766 /* 1767 * Prepare copy_work for submitting work to the DMA kernel thread 1768 * or CPU copy routine. 1769 */ 1770 copy_work.len = len; 1771 copy_work.loopback = loopback; 1772 copy_work.remote_dev = ep->remote_dev; 1773 if (dir == SCIF_LOCAL_TO_REMOTE) { 1774 copy_work.src_offset = loffset; 1775 copy_work.src_window = local_window; 1776 copy_work.dst_offset = roffset; 1777 copy_work.dst_window = remote_window; 1778 } else { 1779 copy_work.src_offset = roffset; 1780 copy_work.src_window = remote_window; 1781 copy_work.dst_offset = loffset; 1782 copy_work.dst_window = local_window; 1783 } 1784 1785 if (flags & SCIF_RMA_USECPU) { 1786 scif_rma_list_cpu_copy(&copy_work); 1787 } else { 1788 chan = ep->rma_info.dma_chan; 1789 err = scif_rma_list_dma_copy_wrapper(epd, &copy_work, 1790 chan, loffset); 1791 } 1792 if (addr && !cache) 1793 atomic_inc(&ep->rma_info.tw_refcount); 1794 1795 mutex_unlock(&ep->rma_info.rma_lock); 1796 1797 if (last_chunk) { 1798 struct scif_dev *rdev = ep->remote_dev; 1799 1800 if (copy_work.fence_type == SCIF_DMA_POLL) 1801 err = scif_drain_dma_poll(rdev->sdev, 1802 ep->rma_info.dma_chan); 1803 else if (copy_work.fence_type == SCIF_DMA_INTR) 1804 err = scif_drain_dma_intr(rdev->sdev, 1805 ep->rma_info.dma_chan); 1806 } 1807 1808 if (addr && !cache) 1809 scif_queue_for_cleanup(local_window, &scif_info.rma); 1810 scif_put_peer_dev(spdev); 1811 return err; 1812error: 1813 if (err) { 1814 if (addr && local_window && !cache) 1815 scif_destroy_window(ep, local_window); 1816 dev_err(scif_info.mdev.this_device, 1817 "%s %d err %d len 0x%lx\n", 1818 __func__, __LINE__, err, len); 1819 } 1820 scif_put_peer_dev(spdev); 1821 return err; 1822} 1823 1824int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, 1825 off_t roffset, int flags) 1826{ 1827 int err; 1828 1829 dev_dbg(scif_info.mdev.this_device, 1830 "SCIFAPI readfrom: ep %p loffset 0x%lx len 0x%lx offset 0x%lx flags 0x%x\n", 1831 epd, loffset, len, roffset, flags); 1832 if (scif_unaligned(loffset, roffset)) { 1833 while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { 1834 err = scif_rma_copy(epd, loffset, 0x0, 1835 SCIF_MAX_UNALIGNED_BUF_SIZE, 1836 roffset, flags, 1837 SCIF_REMOTE_TO_LOCAL, false); 1838 if (err) 1839 goto readfrom_err; 1840 loffset += SCIF_MAX_UNALIGNED_BUF_SIZE; 1841 roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; 1842 len -= SCIF_MAX_UNALIGNED_BUF_SIZE; 1843 } 1844 } 1845 err = scif_rma_copy(epd, loffset, 0x0, len, 1846 roffset, flags, SCIF_REMOTE_TO_LOCAL, true); 1847readfrom_err: 1848 return err; 1849} 1850EXPORT_SYMBOL_GPL(scif_readfrom); 1851 1852int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, 1853 off_t roffset, int flags) 1854{ 1855 int err; 1856 1857 dev_dbg(scif_info.mdev.this_device, 1858 "SCIFAPI writeto: ep %p loffset 0x%lx len 0x%lx roffset 0x%lx flags 0x%x\n", 1859 epd, loffset, len, roffset, flags); 1860 if (scif_unaligned(loffset, roffset)) { 1861 while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { 1862 err = scif_rma_copy(epd, loffset, 0x0, 1863 SCIF_MAX_UNALIGNED_BUF_SIZE, 1864 roffset, flags, 1865 SCIF_LOCAL_TO_REMOTE, false); 1866 if (err) 1867 goto writeto_err; 1868 loffset += SCIF_MAX_UNALIGNED_BUF_SIZE; 1869 roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; 1870 len -= SCIF_MAX_UNALIGNED_BUF_SIZE; 1871 } 1872 } 1873 err = scif_rma_copy(epd, loffset, 0x0, len, 1874 roffset, flags, SCIF_LOCAL_TO_REMOTE, true); 1875writeto_err: 1876 return err; 1877} 1878EXPORT_SYMBOL_GPL(scif_writeto); 1879 1880int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, 1881 off_t roffset, int flags) 1882{ 1883 int err; 1884 1885 dev_dbg(scif_info.mdev.this_device, 1886 "SCIFAPI vreadfrom: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n", 1887 epd, addr, len, roffset, flags); 1888 if (scif_unaligned((off_t __force)addr, roffset)) { 1889 if (len > SCIF_MAX_UNALIGNED_BUF_SIZE) 1890 flags &= ~SCIF_RMA_USECACHE; 1891 1892 while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { 1893 err = scif_rma_copy(epd, 0, (u64)addr, 1894 SCIF_MAX_UNALIGNED_BUF_SIZE, 1895 roffset, flags, 1896 SCIF_REMOTE_TO_LOCAL, false); 1897 if (err) 1898 goto vreadfrom_err; 1899 addr += SCIF_MAX_UNALIGNED_BUF_SIZE; 1900 roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; 1901 len -= SCIF_MAX_UNALIGNED_BUF_SIZE; 1902 } 1903 } 1904 err = scif_rma_copy(epd, 0, (u64)addr, len, 1905 roffset, flags, SCIF_REMOTE_TO_LOCAL, true); 1906vreadfrom_err: 1907 return err; 1908} 1909EXPORT_SYMBOL_GPL(scif_vreadfrom); 1910 1911int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, 1912 off_t roffset, int flags) 1913{ 1914 int err; 1915 1916 dev_dbg(scif_info.mdev.this_device, 1917 "SCIFAPI vwriteto: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n", 1918 epd, addr, len, roffset, flags); 1919 if (scif_unaligned((off_t __force)addr, roffset)) { 1920 if (len > SCIF_MAX_UNALIGNED_BUF_SIZE) 1921 flags &= ~SCIF_RMA_USECACHE; 1922 1923 while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { 1924 err = scif_rma_copy(epd, 0, (u64)addr, 1925 SCIF_MAX_UNALIGNED_BUF_SIZE, 1926 roffset, flags, 1927 SCIF_LOCAL_TO_REMOTE, false); 1928 if (err) 1929 goto vwriteto_err; 1930 addr += SCIF_MAX_UNALIGNED_BUF_SIZE; 1931 roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; 1932 len -= SCIF_MAX_UNALIGNED_BUF_SIZE; 1933 } 1934 } 1935 err = scif_rma_copy(epd, 0, (u64)addr, len, 1936 roffset, flags, SCIF_LOCAL_TO_REMOTE, true); 1937vwriteto_err: 1938 return err; 1939} 1940EXPORT_SYMBOL_GPL(scif_vwriteto);