Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.0-rc2 1775 lines 47 kB view raw
1/* 2 * Intel MIC Platform Software Stack (MPSS) 3 * 4 * Copyright(c) 2015 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License, version 2, as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * General Public License for more details. 14 * 15 * Intel SCIF driver. 16 * 17 */ 18#include <linux/intel-iommu.h> 19#include <linux/pagemap.h> 20#include <linux/sched/mm.h> 21#include <linux/sched/signal.h> 22 23#include "scif_main.h" 24#include "scif_map.h" 25 26/* Used to skip ulimit checks for registrations with SCIF_MAP_KERNEL flag */ 27#define SCIF_MAP_ULIMIT 0x40 28 29bool scif_ulimit_check = 1; 30 31/** 32 * scif_rma_ep_init: 33 * @ep: end point 34 * 35 * Initialize RMA per EP data structures. 36 */ 37void scif_rma_ep_init(struct scif_endpt *ep) 38{ 39 struct scif_endpt_rma_info *rma = &ep->rma_info; 40 41 mutex_init(&rma->rma_lock); 42 init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN); 43 spin_lock_init(&rma->tc_lock); 44 mutex_init(&rma->mmn_lock); 45 INIT_LIST_HEAD(&rma->reg_list); 46 INIT_LIST_HEAD(&rma->remote_reg_list); 47 atomic_set(&rma->tw_refcount, 0); 48 atomic_set(&rma->tcw_refcount, 0); 49 atomic_set(&rma->tcw_total_pages, 0); 50 atomic_set(&rma->fence_refcount, 0); 51 52 rma->async_list_del = 0; 53 rma->dma_chan = NULL; 54 INIT_LIST_HEAD(&rma->mmn_list); 55 INIT_LIST_HEAD(&rma->vma_list); 56 init_waitqueue_head(&rma->markwq); 57} 58 59/** 60 * scif_rma_ep_can_uninit: 61 * @ep: end point 62 * 63 * Returns 1 if an endpoint can be uninitialized and 0 otherwise. 64 */ 65int scif_rma_ep_can_uninit(struct scif_endpt *ep) 66{ 67 int ret = 0; 68 69 mutex_lock(&ep->rma_info.rma_lock); 70 /* Destroy RMA Info only if both lists are empty */ 71 if (list_empty(&ep->rma_info.reg_list) && 72 list_empty(&ep->rma_info.remote_reg_list) && 73 list_empty(&ep->rma_info.mmn_list) && 74 !atomic_read(&ep->rma_info.tw_refcount) && 75 !atomic_read(&ep->rma_info.tcw_refcount) && 76 !atomic_read(&ep->rma_info.fence_refcount)) 77 ret = 1; 78 mutex_unlock(&ep->rma_info.rma_lock); 79 return ret; 80} 81 82/** 83 * scif_create_pinned_pages: 84 * @nr_pages: number of pages in window 85 * @prot: read/write protection 86 * 87 * Allocate and prepare a set of pinned pages. 88 */ 89static struct scif_pinned_pages * 90scif_create_pinned_pages(int nr_pages, int prot) 91{ 92 struct scif_pinned_pages *pin; 93 94 might_sleep(); 95 pin = scif_zalloc(sizeof(*pin)); 96 if (!pin) 97 goto error; 98 99 pin->pages = scif_zalloc(nr_pages * sizeof(*pin->pages)); 100 if (!pin->pages) 101 goto error_free_pinned_pages; 102 103 pin->prot = prot; 104 pin->magic = SCIFEP_MAGIC; 105 return pin; 106 107error_free_pinned_pages: 108 scif_free(pin, sizeof(*pin)); 109error: 110 return NULL; 111} 112 113/** 114 * scif_destroy_pinned_pages: 115 * @pin: A set of pinned pages. 116 * 117 * Deallocate resources for pinned pages. 118 */ 119static int scif_destroy_pinned_pages(struct scif_pinned_pages *pin) 120{ 121 int j; 122 int writeable = pin->prot & SCIF_PROT_WRITE; 123 int kernel = SCIF_MAP_KERNEL & pin->map_flags; 124 125 for (j = 0; j < pin->nr_pages; j++) { 126 if (pin->pages[j] && !kernel) { 127 if (writeable) 128 SetPageDirty(pin->pages[j]); 129 put_page(pin->pages[j]); 130 } 131 } 132 133 scif_free(pin->pages, 134 pin->nr_pages * sizeof(*pin->pages)); 135 scif_free(pin, sizeof(*pin)); 136 return 0; 137} 138 139/* 140 * scif_create_window: 141 * @ep: end point 142 * @nr_pages: number of pages 143 * @offset: registration offset 144 * @temp: true if a temporary window is being created 145 * 146 * Allocate and prepare a self registration window. 147 */ 148struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages, 149 s64 offset, bool temp) 150{ 151 struct scif_window *window; 152 153 might_sleep(); 154 window = scif_zalloc(sizeof(*window)); 155 if (!window) 156 goto error; 157 158 window->dma_addr = scif_zalloc(nr_pages * sizeof(*window->dma_addr)); 159 if (!window->dma_addr) 160 goto error_free_window; 161 162 window->num_pages = scif_zalloc(nr_pages * sizeof(*window->num_pages)); 163 if (!window->num_pages) 164 goto error_free_window; 165 166 window->offset = offset; 167 window->ep = (u64)ep; 168 window->magic = SCIFEP_MAGIC; 169 window->reg_state = OP_IDLE; 170 init_waitqueue_head(&window->regwq); 171 window->unreg_state = OP_IDLE; 172 init_waitqueue_head(&window->unregwq); 173 INIT_LIST_HEAD(&window->list); 174 window->type = SCIF_WINDOW_SELF; 175 window->temp = temp; 176 return window; 177 178error_free_window: 179 scif_free(window->dma_addr, 180 nr_pages * sizeof(*window->dma_addr)); 181 scif_free(window, sizeof(*window)); 182error: 183 return NULL; 184} 185 186/** 187 * scif_destroy_incomplete_window: 188 * @ep: end point 189 * @window: registration window 190 * 191 * Deallocate resources for self window. 192 */ 193static void scif_destroy_incomplete_window(struct scif_endpt *ep, 194 struct scif_window *window) 195{ 196 int err; 197 int nr_pages = window->nr_pages; 198 struct scif_allocmsg *alloc = &window->alloc_handle; 199 struct scifmsg msg; 200 201retry: 202 /* Wait for a SCIF_ALLOC_GNT/REJ message */ 203 err = wait_event_timeout(alloc->allocwq, 204 alloc->state != OP_IN_PROGRESS, 205 SCIF_NODE_ALIVE_TIMEOUT); 206 if (!err && scifdev_alive(ep)) 207 goto retry; 208 209 mutex_lock(&ep->rma_info.rma_lock); 210 if (alloc->state == OP_COMPLETED) { 211 msg.uop = SCIF_FREE_VIRT; 212 msg.src = ep->port; 213 msg.payload[0] = ep->remote_ep; 214 msg.payload[1] = window->alloc_handle.vaddr; 215 msg.payload[2] = (u64)window; 216 msg.payload[3] = SCIF_REGISTER; 217 _scif_nodeqp_send(ep->remote_dev, &msg); 218 } 219 mutex_unlock(&ep->rma_info.rma_lock); 220 221 scif_free_window_offset(ep, window, window->offset); 222 scif_free(window->dma_addr, nr_pages * sizeof(*window->dma_addr)); 223 scif_free(window->num_pages, nr_pages * sizeof(*window->num_pages)); 224 scif_free(window, sizeof(*window)); 225} 226 227/** 228 * scif_unmap_window: 229 * @remote_dev: SCIF remote device 230 * @window: registration window 231 * 232 * Delete any DMA mappings created for a registered self window 233 */ 234void scif_unmap_window(struct scif_dev *remote_dev, struct scif_window *window) 235{ 236 int j; 237 238 if (scif_is_iommu_enabled() && !scifdev_self(remote_dev)) { 239 if (window->st) { 240 dma_unmap_sg(&remote_dev->sdev->dev, 241 window->st->sgl, window->st->nents, 242 DMA_BIDIRECTIONAL); 243 sg_free_table(window->st); 244 kfree(window->st); 245 window->st = NULL; 246 } 247 } else { 248 for (j = 0; j < window->nr_contig_chunks; j++) { 249 if (window->dma_addr[j]) { 250 scif_unmap_single(window->dma_addr[j], 251 remote_dev, 252 window->num_pages[j] << 253 PAGE_SHIFT); 254 window->dma_addr[j] = 0x0; 255 } 256 } 257 } 258} 259 260static inline struct mm_struct *__scif_acquire_mm(void) 261{ 262 if (scif_ulimit_check) 263 return get_task_mm(current); 264 return NULL; 265} 266 267static inline void __scif_release_mm(struct mm_struct *mm) 268{ 269 if (mm) 270 mmput(mm); 271} 272 273static inline int 274__scif_dec_pinned_vm_lock(struct mm_struct *mm, 275 int nr_pages, bool try_lock) 276{ 277 if (!mm || !nr_pages || !scif_ulimit_check) 278 return 0; 279 if (try_lock) { 280 if (!down_write_trylock(&mm->mmap_sem)) { 281 dev_err(scif_info.mdev.this_device, 282 "%s %d err\n", __func__, __LINE__); 283 return -1; 284 } 285 } else { 286 down_write(&mm->mmap_sem); 287 } 288 mm->pinned_vm -= nr_pages; 289 up_write(&mm->mmap_sem); 290 return 0; 291} 292 293static inline int __scif_check_inc_pinned_vm(struct mm_struct *mm, 294 int nr_pages) 295{ 296 unsigned long locked, lock_limit; 297 298 if (!mm || !nr_pages || !scif_ulimit_check) 299 return 0; 300 301 locked = nr_pages; 302 locked += mm->pinned_vm; 303 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 304 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { 305 dev_err(scif_info.mdev.this_device, 306 "locked(%lu) > lock_limit(%lu)\n", 307 locked, lock_limit); 308 return -ENOMEM; 309 } 310 mm->pinned_vm = locked; 311 return 0; 312} 313 314/** 315 * scif_destroy_window: 316 * @ep: end point 317 * @window: registration window 318 * 319 * Deallocate resources for self window. 320 */ 321int scif_destroy_window(struct scif_endpt *ep, struct scif_window *window) 322{ 323 int j; 324 struct scif_pinned_pages *pinned_pages = window->pinned_pages; 325 int nr_pages = window->nr_pages; 326 327 might_sleep(); 328 if (!window->temp && window->mm) { 329 __scif_dec_pinned_vm_lock(window->mm, window->nr_pages, 0); 330 __scif_release_mm(window->mm); 331 window->mm = NULL; 332 } 333 334 scif_free_window_offset(ep, window, window->offset); 335 scif_unmap_window(ep->remote_dev, window); 336 /* 337 * Decrement references for this set of pinned pages from 338 * this window. 339 */ 340 j = atomic_sub_return(1, &pinned_pages->ref_count); 341 if (j < 0) 342 dev_err(scif_info.mdev.this_device, 343 "%s %d incorrect ref count %d\n", 344 __func__, __LINE__, j); 345 /* 346 * If the ref count for pinned_pages is zero then someone 347 * has already called scif_unpin_pages() for it and we should 348 * destroy the page cache. 349 */ 350 if (!j) 351 scif_destroy_pinned_pages(window->pinned_pages); 352 scif_free(window->dma_addr, nr_pages * sizeof(*window->dma_addr)); 353 scif_free(window->num_pages, nr_pages * sizeof(*window->num_pages)); 354 window->magic = 0; 355 scif_free(window, sizeof(*window)); 356 return 0; 357} 358 359/** 360 * scif_create_remote_lookup: 361 * @remote_dev: SCIF remote device 362 * @window: remote window 363 * 364 * Allocate and prepare lookup entries for the remote 365 * end to copy over the physical addresses. 366 * Returns 0 on success and appropriate errno on failure. 367 */ 368static int scif_create_remote_lookup(struct scif_dev *remote_dev, 369 struct scif_window *window) 370{ 371 int i, j, err = 0; 372 int nr_pages = window->nr_pages; 373 bool vmalloc_dma_phys, vmalloc_num_pages; 374 375 might_sleep(); 376 /* Map window */ 377 err = scif_map_single(&window->mapped_offset, 378 window, remote_dev, sizeof(*window)); 379 if (err) 380 goto error_window; 381 382 /* Compute the number of lookup entries. 21 == 2MB Shift */ 383 window->nr_lookup = ALIGN(nr_pages * PAGE_SIZE, 384 ((2) * 1024 * 1024)) >> 21; 385 386 window->dma_addr_lookup.lookup = 387 scif_alloc_coherent(&window->dma_addr_lookup.offset, 388 remote_dev, window->nr_lookup * 389 sizeof(*window->dma_addr_lookup.lookup), 390 GFP_KERNEL | __GFP_ZERO); 391 if (!window->dma_addr_lookup.lookup) { 392 err = -ENOMEM; 393 goto error_window; 394 } 395 396 window->num_pages_lookup.lookup = 397 scif_alloc_coherent(&window->num_pages_lookup.offset, 398 remote_dev, window->nr_lookup * 399 sizeof(*window->num_pages_lookup.lookup), 400 GFP_KERNEL | __GFP_ZERO); 401 if (!window->num_pages_lookup.lookup) { 402 err = -ENOMEM; 403 goto error_window; 404 } 405 406 vmalloc_dma_phys = is_vmalloc_addr(&window->dma_addr[0]); 407 vmalloc_num_pages = is_vmalloc_addr(&window->num_pages[0]); 408 409 /* Now map each of the pages containing physical addresses */ 410 for (i = 0, j = 0; i < nr_pages; i += SCIF_NR_ADDR_IN_PAGE, j++) { 411 err = scif_map_page(&window->dma_addr_lookup.lookup[j], 412 vmalloc_dma_phys ? 413 vmalloc_to_page(&window->dma_addr[i]) : 414 virt_to_page(&window->dma_addr[i]), 415 remote_dev); 416 if (err) 417 goto error_window; 418 err = scif_map_page(&window->num_pages_lookup.lookup[j], 419 vmalloc_num_pages ? 420 vmalloc_to_page(&window->num_pages[i]) : 421 virt_to_page(&window->num_pages[i]), 422 remote_dev); 423 if (err) 424 goto error_window; 425 } 426 return 0; 427error_window: 428 return err; 429} 430 431/** 432 * scif_destroy_remote_lookup: 433 * @remote_dev: SCIF remote device 434 * @window: remote window 435 * 436 * Destroy lookup entries used for the remote 437 * end to copy over the physical addresses. 438 */ 439static void scif_destroy_remote_lookup(struct scif_dev *remote_dev, 440 struct scif_window *window) 441{ 442 int i, j; 443 444 if (window->nr_lookup) { 445 struct scif_rma_lookup *lup = &window->dma_addr_lookup; 446 struct scif_rma_lookup *npup = &window->num_pages_lookup; 447 448 for (i = 0, j = 0; i < window->nr_pages; 449 i += SCIF_NR_ADDR_IN_PAGE, j++) { 450 if (lup->lookup && lup->lookup[j]) 451 scif_unmap_single(lup->lookup[j], 452 remote_dev, 453 PAGE_SIZE); 454 if (npup->lookup && npup->lookup[j]) 455 scif_unmap_single(npup->lookup[j], 456 remote_dev, 457 PAGE_SIZE); 458 } 459 if (lup->lookup) 460 scif_free_coherent(lup->lookup, lup->offset, 461 remote_dev, window->nr_lookup * 462 sizeof(*lup->lookup)); 463 if (npup->lookup) 464 scif_free_coherent(npup->lookup, npup->offset, 465 remote_dev, window->nr_lookup * 466 sizeof(*npup->lookup)); 467 if (window->mapped_offset) 468 scif_unmap_single(window->mapped_offset, 469 remote_dev, sizeof(*window)); 470 window->nr_lookup = 0; 471 } 472} 473 474/** 475 * scif_create_remote_window: 476 * @ep: end point 477 * @nr_pages: number of pages in window 478 * 479 * Allocate and prepare a remote registration window. 480 */ 481static struct scif_window * 482scif_create_remote_window(struct scif_dev *scifdev, int nr_pages) 483{ 484 struct scif_window *window; 485 486 might_sleep(); 487 window = scif_zalloc(sizeof(*window)); 488 if (!window) 489 goto error_ret; 490 491 window->magic = SCIFEP_MAGIC; 492 window->nr_pages = nr_pages; 493 494 window->dma_addr = scif_zalloc(nr_pages * sizeof(*window->dma_addr)); 495 if (!window->dma_addr) 496 goto error_window; 497 498 window->num_pages = scif_zalloc(nr_pages * 499 sizeof(*window->num_pages)); 500 if (!window->num_pages) 501 goto error_window; 502 503 if (scif_create_remote_lookup(scifdev, window)) 504 goto error_window; 505 506 window->type = SCIF_WINDOW_PEER; 507 window->unreg_state = OP_IDLE; 508 INIT_LIST_HEAD(&window->list); 509 return window; 510error_window: 511 scif_destroy_remote_window(window); 512error_ret: 513 return NULL; 514} 515 516/** 517 * scif_destroy_remote_window: 518 * @ep: end point 519 * @window: remote registration window 520 * 521 * Deallocate resources for remote window. 522 */ 523void 524scif_destroy_remote_window(struct scif_window *window) 525{ 526 scif_free(window->dma_addr, window->nr_pages * 527 sizeof(*window->dma_addr)); 528 scif_free(window->num_pages, window->nr_pages * 529 sizeof(*window->num_pages)); 530 window->magic = 0; 531 scif_free(window, sizeof(*window)); 532} 533 534/** 535 * scif_iommu_map: create DMA mappings if the IOMMU is enabled 536 * @remote_dev: SCIF remote device 537 * @window: remote registration window 538 * 539 * Map the physical pages using dma_map_sg(..) and then detect the number 540 * of contiguous DMA mappings allocated 541 */ 542static int scif_iommu_map(struct scif_dev *remote_dev, 543 struct scif_window *window) 544{ 545 struct scatterlist *sg; 546 int i, err; 547 scif_pinned_pages_t pin = window->pinned_pages; 548 549 window->st = kzalloc(sizeof(*window->st), GFP_KERNEL); 550 if (!window->st) 551 return -ENOMEM; 552 553 err = sg_alloc_table(window->st, window->nr_pages, GFP_KERNEL); 554 if (err) 555 return err; 556 557 for_each_sg(window->st->sgl, sg, window->st->nents, i) 558 sg_set_page(sg, pin->pages[i], PAGE_SIZE, 0x0); 559 560 err = dma_map_sg(&remote_dev->sdev->dev, window->st->sgl, 561 window->st->nents, DMA_BIDIRECTIONAL); 562 if (!err) 563 return -ENOMEM; 564 /* Detect contiguous ranges of DMA mappings */ 565 sg = window->st->sgl; 566 for (i = 0; sg; i++) { 567 dma_addr_t last_da; 568 569 window->dma_addr[i] = sg_dma_address(sg); 570 window->num_pages[i] = sg_dma_len(sg) >> PAGE_SHIFT; 571 last_da = sg_dma_address(sg) + sg_dma_len(sg); 572 while ((sg = sg_next(sg)) && sg_dma_address(sg) == last_da) { 573 window->num_pages[i] += 574 (sg_dma_len(sg) >> PAGE_SHIFT); 575 last_da = window->dma_addr[i] + 576 sg_dma_len(sg); 577 } 578 window->nr_contig_chunks++; 579 } 580 return 0; 581} 582 583/** 584 * scif_map_window: 585 * @remote_dev: SCIF remote device 586 * @window: self registration window 587 * 588 * Map pages of a window into the aperture/PCI. 589 * Also determine addresses required for DMA. 590 */ 591int 592scif_map_window(struct scif_dev *remote_dev, struct scif_window *window) 593{ 594 int i, j, k, err = 0, nr_contig_pages; 595 scif_pinned_pages_t pin; 596 phys_addr_t phys_prev, phys_curr; 597 598 might_sleep(); 599 600 pin = window->pinned_pages; 601 602 if (intel_iommu_enabled && !scifdev_self(remote_dev)) 603 return scif_iommu_map(remote_dev, window); 604 605 for (i = 0, j = 0; i < window->nr_pages; i += nr_contig_pages, j++) { 606 phys_prev = page_to_phys(pin->pages[i]); 607 nr_contig_pages = 1; 608 609 /* Detect physically contiguous chunks */ 610 for (k = i + 1; k < window->nr_pages; k++) { 611 phys_curr = page_to_phys(pin->pages[k]); 612 if (phys_curr != (phys_prev + PAGE_SIZE)) 613 break; 614 phys_prev = phys_curr; 615 nr_contig_pages++; 616 } 617 window->num_pages[j] = nr_contig_pages; 618 window->nr_contig_chunks++; 619 if (scif_is_mgmt_node()) { 620 /* 621 * Management node has to deal with SMPT on X100 and 622 * hence the DMA mapping is required 623 */ 624 err = scif_map_single(&window->dma_addr[j], 625 phys_to_virt(page_to_phys( 626 pin->pages[i])), 627 remote_dev, 628 nr_contig_pages << PAGE_SHIFT); 629 if (err) 630 return err; 631 } else { 632 window->dma_addr[j] = page_to_phys(pin->pages[i]); 633 } 634 } 635 return err; 636} 637 638/** 639 * scif_send_scif_unregister: 640 * @ep: end point 641 * @window: self registration window 642 * 643 * Send a SCIF_UNREGISTER message. 644 */ 645static int scif_send_scif_unregister(struct scif_endpt *ep, 646 struct scif_window *window) 647{ 648 struct scifmsg msg; 649 650 msg.uop = SCIF_UNREGISTER; 651 msg.src = ep->port; 652 msg.payload[0] = window->alloc_handle.vaddr; 653 msg.payload[1] = (u64)window; 654 return scif_nodeqp_send(ep->remote_dev, &msg); 655} 656 657/** 658 * scif_unregister_window: 659 * @window: self registration window 660 * 661 * Send an unregistration request and wait for a response. 662 */ 663int scif_unregister_window(struct scif_window *window) 664{ 665 int err = 0; 666 struct scif_endpt *ep = (struct scif_endpt *)window->ep; 667 bool send_msg = false; 668 669 might_sleep(); 670 switch (window->unreg_state) { 671 case OP_IDLE: 672 { 673 window->unreg_state = OP_IN_PROGRESS; 674 send_msg = true; 675 /* fall through */ 676 } 677 case OP_IN_PROGRESS: 678 { 679 scif_get_window(window, 1); 680 mutex_unlock(&ep->rma_info.rma_lock); 681 if (send_msg) { 682 err = scif_send_scif_unregister(ep, window); 683 if (err) { 684 window->unreg_state = OP_COMPLETED; 685 goto done; 686 } 687 } else { 688 /* Return ENXIO since unregistration is in progress */ 689 mutex_lock(&ep->rma_info.rma_lock); 690 return -ENXIO; 691 } 692retry: 693 /* Wait for a SCIF_UNREGISTER_(N)ACK message */ 694 err = wait_event_timeout(window->unregwq, 695 window->unreg_state != OP_IN_PROGRESS, 696 SCIF_NODE_ALIVE_TIMEOUT); 697 if (!err && scifdev_alive(ep)) 698 goto retry; 699 if (!err) { 700 err = -ENODEV; 701 window->unreg_state = OP_COMPLETED; 702 dev_err(scif_info.mdev.this_device, 703 "%s %d err %d\n", __func__, __LINE__, err); 704 } 705 if (err > 0) 706 err = 0; 707done: 708 mutex_lock(&ep->rma_info.rma_lock); 709 scif_put_window(window, 1); 710 break; 711 } 712 case OP_FAILED: 713 { 714 if (!scifdev_alive(ep)) { 715 err = -ENODEV; 716 window->unreg_state = OP_COMPLETED; 717 } 718 break; 719 } 720 case OP_COMPLETED: 721 break; 722 default: 723 err = -ENODEV; 724 } 725 726 if (window->unreg_state == OP_COMPLETED && window->ref_count) 727 scif_put_window(window, window->nr_pages); 728 729 if (!window->ref_count) { 730 atomic_inc(&ep->rma_info.tw_refcount); 731 list_del_init(&window->list); 732 scif_free_window_offset(ep, window, window->offset); 733 mutex_unlock(&ep->rma_info.rma_lock); 734 if ((!!(window->pinned_pages->map_flags & SCIF_MAP_KERNEL)) && 735 scifdev_alive(ep)) { 736 scif_drain_dma_intr(ep->remote_dev->sdev, 737 ep->rma_info.dma_chan); 738 } else { 739 if (!__scif_dec_pinned_vm_lock(window->mm, 740 window->nr_pages, 1)) { 741 __scif_release_mm(window->mm); 742 window->mm = NULL; 743 } 744 } 745 scif_queue_for_cleanup(window, &scif_info.rma); 746 mutex_lock(&ep->rma_info.rma_lock); 747 } 748 return err; 749} 750 751/** 752 * scif_send_alloc_request: 753 * @ep: end point 754 * @window: self registration window 755 * 756 * Send a remote window allocation request 757 */ 758static int scif_send_alloc_request(struct scif_endpt *ep, 759 struct scif_window *window) 760{ 761 struct scifmsg msg; 762 struct scif_allocmsg *alloc = &window->alloc_handle; 763 764 /* Set up the Alloc Handle */ 765 alloc->state = OP_IN_PROGRESS; 766 init_waitqueue_head(&alloc->allocwq); 767 768 /* Send out an allocation request */ 769 msg.uop = SCIF_ALLOC_REQ; 770 msg.payload[1] = window->nr_pages; 771 msg.payload[2] = (u64)&window->alloc_handle; 772 return _scif_nodeqp_send(ep->remote_dev, &msg); 773} 774 775/** 776 * scif_prep_remote_window: 777 * @ep: end point 778 * @window: self registration window 779 * 780 * Send a remote window allocation request, wait for an allocation response, 781 * and prepares the remote window by copying over the page lists 782 */ 783static int scif_prep_remote_window(struct scif_endpt *ep, 784 struct scif_window *window) 785{ 786 struct scifmsg msg; 787 struct scif_window *remote_window; 788 struct scif_allocmsg *alloc = &window->alloc_handle; 789 dma_addr_t *dma_phys_lookup, *tmp, *num_pages_lookup, *tmp1; 790 int i = 0, j = 0; 791 int nr_contig_chunks, loop_nr_contig_chunks; 792 int remaining_nr_contig_chunks, nr_lookup; 793 int err, map_err; 794 795 map_err = scif_map_window(ep->remote_dev, window); 796 if (map_err) 797 dev_err(&ep->remote_dev->sdev->dev, 798 "%s %d map_err %d\n", __func__, __LINE__, map_err); 799 remaining_nr_contig_chunks = window->nr_contig_chunks; 800 nr_contig_chunks = window->nr_contig_chunks; 801retry: 802 /* Wait for a SCIF_ALLOC_GNT/REJ message */ 803 err = wait_event_timeout(alloc->allocwq, 804 alloc->state != OP_IN_PROGRESS, 805 SCIF_NODE_ALIVE_TIMEOUT); 806 mutex_lock(&ep->rma_info.rma_lock); 807 /* Synchronize with the thread waking up allocwq */ 808 mutex_unlock(&ep->rma_info.rma_lock); 809 if (!err && scifdev_alive(ep)) 810 goto retry; 811 812 if (!err) 813 err = -ENODEV; 814 815 if (err > 0) 816 err = 0; 817 else 818 return err; 819 820 /* Bail out. The remote end rejected this request */ 821 if (alloc->state == OP_FAILED) 822 return -ENOMEM; 823 824 if (map_err) { 825 dev_err(&ep->remote_dev->sdev->dev, 826 "%s %d err %d\n", __func__, __LINE__, map_err); 827 msg.uop = SCIF_FREE_VIRT; 828 msg.src = ep->port; 829 msg.payload[0] = ep->remote_ep; 830 msg.payload[1] = window->alloc_handle.vaddr; 831 msg.payload[2] = (u64)window; 832 msg.payload[3] = SCIF_REGISTER; 833 spin_lock(&ep->lock); 834 if (ep->state == SCIFEP_CONNECTED) 835 err = _scif_nodeqp_send(ep->remote_dev, &msg); 836 else 837 err = -ENOTCONN; 838 spin_unlock(&ep->lock); 839 return err; 840 } 841 842 remote_window = scif_ioremap(alloc->phys_addr, sizeof(*window), 843 ep->remote_dev); 844 845 /* Compute the number of lookup entries. 21 == 2MB Shift */ 846 nr_lookup = ALIGN(nr_contig_chunks, SCIF_NR_ADDR_IN_PAGE) 847 >> ilog2(SCIF_NR_ADDR_IN_PAGE); 848 849 dma_phys_lookup = 850 scif_ioremap(remote_window->dma_addr_lookup.offset, 851 nr_lookup * 852 sizeof(*remote_window->dma_addr_lookup.lookup), 853 ep->remote_dev); 854 num_pages_lookup = 855 scif_ioremap(remote_window->num_pages_lookup.offset, 856 nr_lookup * 857 sizeof(*remote_window->num_pages_lookup.lookup), 858 ep->remote_dev); 859 860 while (remaining_nr_contig_chunks) { 861 loop_nr_contig_chunks = min_t(int, remaining_nr_contig_chunks, 862 (int)SCIF_NR_ADDR_IN_PAGE); 863 /* #1/2 - Copy physical addresses over to the remote side */ 864 865 /* #2/2 - Copy DMA addresses (addresses that are fed into the 866 * DMA engine) We transfer bus addresses which are then 867 * converted into a MIC physical address on the remote 868 * side if it is a MIC, if the remote node is a mgmt node we 869 * transfer the MIC physical address 870 */ 871 tmp = scif_ioremap(dma_phys_lookup[j], 872 loop_nr_contig_chunks * 873 sizeof(*window->dma_addr), 874 ep->remote_dev); 875 tmp1 = scif_ioremap(num_pages_lookup[j], 876 loop_nr_contig_chunks * 877 sizeof(*window->num_pages), 878 ep->remote_dev); 879 if (scif_is_mgmt_node()) { 880 memcpy_toio((void __force __iomem *)tmp, 881 &window->dma_addr[i], loop_nr_contig_chunks 882 * sizeof(*window->dma_addr)); 883 memcpy_toio((void __force __iomem *)tmp1, 884 &window->num_pages[i], loop_nr_contig_chunks 885 * sizeof(*window->num_pages)); 886 } else { 887 if (scifdev_is_p2p(ep->remote_dev)) { 888 /* 889 * add remote node's base address for this node 890 * to convert it into a MIC address 891 */ 892 int m; 893 dma_addr_t dma_addr; 894 895 for (m = 0; m < loop_nr_contig_chunks; m++) { 896 dma_addr = window->dma_addr[i + m] + 897 ep->remote_dev->base_addr; 898 writeq(dma_addr, 899 (void __force __iomem *)&tmp[m]); 900 } 901 memcpy_toio((void __force __iomem *)tmp1, 902 &window->num_pages[i], 903 loop_nr_contig_chunks 904 * sizeof(*window->num_pages)); 905 } else { 906 /* Mgmt node or loopback - transfer DMA 907 * addresses as is, this is the same as a 908 * MIC physical address (we use the dma_addr 909 * and not the phys_addr array since the 910 * phys_addr is only setup if there is a mmap() 911 * request from the mgmt node) 912 */ 913 memcpy_toio((void __force __iomem *)tmp, 914 &window->dma_addr[i], 915 loop_nr_contig_chunks * 916 sizeof(*window->dma_addr)); 917 memcpy_toio((void __force __iomem *)tmp1, 918 &window->num_pages[i], 919 loop_nr_contig_chunks * 920 sizeof(*window->num_pages)); 921 } 922 } 923 remaining_nr_contig_chunks -= loop_nr_contig_chunks; 924 i += loop_nr_contig_chunks; 925 j++; 926 scif_iounmap(tmp, loop_nr_contig_chunks * 927 sizeof(*window->dma_addr), ep->remote_dev); 928 scif_iounmap(tmp1, loop_nr_contig_chunks * 929 sizeof(*window->num_pages), ep->remote_dev); 930 } 931 932 /* Prepare the remote window for the peer */ 933 remote_window->peer_window = (u64)window; 934 remote_window->offset = window->offset; 935 remote_window->prot = window->prot; 936 remote_window->nr_contig_chunks = nr_contig_chunks; 937 remote_window->ep = ep->remote_ep; 938 scif_iounmap(num_pages_lookup, 939 nr_lookup * 940 sizeof(*remote_window->num_pages_lookup.lookup), 941 ep->remote_dev); 942 scif_iounmap(dma_phys_lookup, 943 nr_lookup * 944 sizeof(*remote_window->dma_addr_lookup.lookup), 945 ep->remote_dev); 946 scif_iounmap(remote_window, sizeof(*remote_window), ep->remote_dev); 947 window->peer_window = alloc->vaddr; 948 return err; 949} 950 951/** 952 * scif_send_scif_register: 953 * @ep: end point 954 * @window: self registration window 955 * 956 * Send a SCIF_REGISTER message if EP is connected and wait for a 957 * SCIF_REGISTER_(N)ACK message else send a SCIF_FREE_VIRT 958 * message so that the peer can free its remote window allocated earlier. 959 */ 960static int scif_send_scif_register(struct scif_endpt *ep, 961 struct scif_window *window) 962{ 963 int err = 0; 964 struct scifmsg msg; 965 966 msg.src = ep->port; 967 msg.payload[0] = ep->remote_ep; 968 msg.payload[1] = window->alloc_handle.vaddr; 969 msg.payload[2] = (u64)window; 970 spin_lock(&ep->lock); 971 if (ep->state == SCIFEP_CONNECTED) { 972 msg.uop = SCIF_REGISTER; 973 window->reg_state = OP_IN_PROGRESS; 974 err = _scif_nodeqp_send(ep->remote_dev, &msg); 975 spin_unlock(&ep->lock); 976 if (!err) { 977retry: 978 /* Wait for a SCIF_REGISTER_(N)ACK message */ 979 err = wait_event_timeout(window->regwq, 980 window->reg_state != 981 OP_IN_PROGRESS, 982 SCIF_NODE_ALIVE_TIMEOUT); 983 if (!err && scifdev_alive(ep)) 984 goto retry; 985 err = !err ? -ENODEV : 0; 986 if (window->reg_state == OP_FAILED) 987 err = -ENOTCONN; 988 } 989 } else { 990 msg.uop = SCIF_FREE_VIRT; 991 msg.payload[3] = SCIF_REGISTER; 992 err = _scif_nodeqp_send(ep->remote_dev, &msg); 993 spin_unlock(&ep->lock); 994 if (!err) 995 err = -ENOTCONN; 996 } 997 return err; 998} 999 1000/** 1001 * scif_get_window_offset: 1002 * @ep: end point descriptor 1003 * @flags: flags 1004 * @offset: offset hint 1005 * @num_pages: number of pages 1006 * @out_offset: computed offset returned by reference. 1007 * 1008 * Compute/Claim a new offset for this EP. 1009 */ 1010int scif_get_window_offset(struct scif_endpt *ep, int flags, s64 offset, 1011 int num_pages, s64 *out_offset) 1012{ 1013 s64 page_index; 1014 struct iova *iova_ptr; 1015 int err = 0; 1016 1017 if (flags & SCIF_MAP_FIXED) { 1018 page_index = SCIF_IOVA_PFN(offset); 1019 iova_ptr = reserve_iova(&ep->rma_info.iovad, page_index, 1020 page_index + num_pages - 1); 1021 if (!iova_ptr) 1022 err = -EADDRINUSE; 1023 } else { 1024 iova_ptr = alloc_iova(&ep->rma_info.iovad, num_pages, 1025 SCIF_DMA_63BIT_PFN - 1, 0); 1026 if (!iova_ptr) 1027 err = -ENOMEM; 1028 } 1029 if (!err) 1030 *out_offset = (iova_ptr->pfn_lo) << PAGE_SHIFT; 1031 return err; 1032} 1033 1034/** 1035 * scif_free_window_offset: 1036 * @ep: end point descriptor 1037 * @window: registration window 1038 * @offset: Offset to be freed 1039 * 1040 * Free offset for this EP. The callee is supposed to grab 1041 * the RMA mutex before calling this API. 1042 */ 1043void scif_free_window_offset(struct scif_endpt *ep, 1044 struct scif_window *window, s64 offset) 1045{ 1046 if ((window && !window->offset_freed) || !window) { 1047 free_iova(&ep->rma_info.iovad, offset >> PAGE_SHIFT); 1048 if (window) 1049 window->offset_freed = true; 1050 } 1051} 1052 1053/** 1054 * scif_alloc_req: Respond to SCIF_ALLOC_REQ interrupt message 1055 * @msg: Interrupt message 1056 * 1057 * Remote side is requesting a memory allocation. 1058 */ 1059void scif_alloc_req(struct scif_dev *scifdev, struct scifmsg *msg) 1060{ 1061 int err; 1062 struct scif_window *window = NULL; 1063 int nr_pages = msg->payload[1]; 1064 1065 window = scif_create_remote_window(scifdev, nr_pages); 1066 if (!window) { 1067 err = -ENOMEM; 1068 goto error; 1069 } 1070 1071 /* The peer's allocation request is granted */ 1072 msg->uop = SCIF_ALLOC_GNT; 1073 msg->payload[0] = (u64)window; 1074 msg->payload[1] = window->mapped_offset; 1075 err = scif_nodeqp_send(scifdev, msg); 1076 if (err) 1077 scif_destroy_remote_window(window); 1078 return; 1079error: 1080 /* The peer's allocation request is rejected */ 1081 dev_err(&scifdev->sdev->dev, 1082 "%s %d error %d alloc_ptr %p nr_pages 0x%x\n", 1083 __func__, __LINE__, err, window, nr_pages); 1084 msg->uop = SCIF_ALLOC_REJ; 1085 scif_nodeqp_send(scifdev, msg); 1086} 1087 1088/** 1089 * scif_alloc_gnt_rej: Respond to SCIF_ALLOC_GNT/REJ interrupt message 1090 * @msg: Interrupt message 1091 * 1092 * Remote side responded to a memory allocation. 1093 */ 1094void scif_alloc_gnt_rej(struct scif_dev *scifdev, struct scifmsg *msg) 1095{ 1096 struct scif_allocmsg *handle = (struct scif_allocmsg *)msg->payload[2]; 1097 struct scif_window *window = container_of(handle, struct scif_window, 1098 alloc_handle); 1099 struct scif_endpt *ep = (struct scif_endpt *)window->ep; 1100 1101 mutex_lock(&ep->rma_info.rma_lock); 1102 handle->vaddr = msg->payload[0]; 1103 handle->phys_addr = msg->payload[1]; 1104 if (msg->uop == SCIF_ALLOC_GNT) 1105 handle->state = OP_COMPLETED; 1106 else 1107 handle->state = OP_FAILED; 1108 wake_up(&handle->allocwq); 1109 mutex_unlock(&ep->rma_info.rma_lock); 1110} 1111 1112/** 1113 * scif_free_virt: Respond to SCIF_FREE_VIRT interrupt message 1114 * @msg: Interrupt message 1115 * 1116 * Free up memory kmalloc'd earlier. 1117 */ 1118void scif_free_virt(struct scif_dev *scifdev, struct scifmsg *msg) 1119{ 1120 struct scif_window *window = (struct scif_window *)msg->payload[1]; 1121 1122 scif_destroy_remote_window(window); 1123} 1124 1125static void 1126scif_fixup_aper_base(struct scif_dev *dev, struct scif_window *window) 1127{ 1128 int j; 1129 struct scif_hw_dev *sdev = dev->sdev; 1130 phys_addr_t apt_base = 0; 1131 1132 /* 1133 * Add the aperture base if the DMA address is not card relative 1134 * since the DMA addresses need to be an offset into the bar 1135 */ 1136 if (!scifdev_self(dev) && window->type == SCIF_WINDOW_PEER && 1137 sdev->aper && !sdev->card_rel_da) 1138 apt_base = sdev->aper->pa; 1139 else 1140 return; 1141 1142 for (j = 0; j < window->nr_contig_chunks; j++) { 1143 if (window->num_pages[j]) 1144 window->dma_addr[j] += apt_base; 1145 else 1146 break; 1147 } 1148} 1149 1150/** 1151 * scif_recv_reg: Respond to SCIF_REGISTER interrupt message 1152 * @msg: Interrupt message 1153 * 1154 * Update remote window list with a new registered window. 1155 */ 1156void scif_recv_reg(struct scif_dev *scifdev, struct scifmsg *msg) 1157{ 1158 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; 1159 struct scif_window *window = 1160 (struct scif_window *)msg->payload[1]; 1161 1162 mutex_lock(&ep->rma_info.rma_lock); 1163 spin_lock(&ep->lock); 1164 if (ep->state == SCIFEP_CONNECTED) { 1165 msg->uop = SCIF_REGISTER_ACK; 1166 scif_nodeqp_send(ep->remote_dev, msg); 1167 scif_fixup_aper_base(ep->remote_dev, window); 1168 /* No further failures expected. Insert new window */ 1169 scif_insert_window(window, &ep->rma_info.remote_reg_list); 1170 } else { 1171 msg->uop = SCIF_REGISTER_NACK; 1172 scif_nodeqp_send(ep->remote_dev, msg); 1173 } 1174 spin_unlock(&ep->lock); 1175 mutex_unlock(&ep->rma_info.rma_lock); 1176 /* free up any lookup resources now that page lists are transferred */ 1177 scif_destroy_remote_lookup(ep->remote_dev, window); 1178 /* 1179 * We could not insert the window but we need to 1180 * destroy the window. 1181 */ 1182 if (msg->uop == SCIF_REGISTER_NACK) 1183 scif_destroy_remote_window(window); 1184} 1185 1186/** 1187 * scif_recv_unreg: Respond to SCIF_UNREGISTER interrupt message 1188 * @msg: Interrupt message 1189 * 1190 * Remove window from remote registration list; 1191 */ 1192void scif_recv_unreg(struct scif_dev *scifdev, struct scifmsg *msg) 1193{ 1194 struct scif_rma_req req; 1195 struct scif_window *window = NULL; 1196 struct scif_window *recv_window = 1197 (struct scif_window *)msg->payload[0]; 1198 struct scif_endpt *ep; 1199 int del_window = 0; 1200 1201 ep = (struct scif_endpt *)recv_window->ep; 1202 req.out_window = &window; 1203 req.offset = recv_window->offset; 1204 req.prot = 0; 1205 req.nr_bytes = recv_window->nr_pages << PAGE_SHIFT; 1206 req.type = SCIF_WINDOW_FULL; 1207 req.head = &ep->rma_info.remote_reg_list; 1208 msg->payload[0] = ep->remote_ep; 1209 1210 mutex_lock(&ep->rma_info.rma_lock); 1211 /* Does a valid window exist? */ 1212 if (scif_query_window(&req)) { 1213 dev_err(&scifdev->sdev->dev, 1214 "%s %d -ENXIO\n", __func__, __LINE__); 1215 msg->uop = SCIF_UNREGISTER_ACK; 1216 goto error; 1217 } 1218 if (window) { 1219 if (window->ref_count) 1220 scif_put_window(window, window->nr_pages); 1221 else 1222 dev_err(&scifdev->sdev->dev, 1223 "%s %d ref count should be +ve\n", 1224 __func__, __LINE__); 1225 window->unreg_state = OP_COMPLETED; 1226 if (!window->ref_count) { 1227 msg->uop = SCIF_UNREGISTER_ACK; 1228 atomic_inc(&ep->rma_info.tw_refcount); 1229 ep->rma_info.async_list_del = 1; 1230 list_del_init(&window->list); 1231 del_window = 1; 1232 } else { 1233 /* NACK! There are valid references to this window */ 1234 msg->uop = SCIF_UNREGISTER_NACK; 1235 } 1236 } else { 1237 /* The window did not make its way to the list at all. ACK */ 1238 msg->uop = SCIF_UNREGISTER_ACK; 1239 scif_destroy_remote_window(recv_window); 1240 } 1241error: 1242 mutex_unlock(&ep->rma_info.rma_lock); 1243 if (del_window) 1244 scif_drain_dma_intr(ep->remote_dev->sdev, 1245 ep->rma_info.dma_chan); 1246 scif_nodeqp_send(ep->remote_dev, msg); 1247 if (del_window) 1248 scif_queue_for_cleanup(window, &scif_info.rma); 1249} 1250 1251/** 1252 * scif_recv_reg_ack: Respond to SCIF_REGISTER_ACK interrupt message 1253 * @msg: Interrupt message 1254 * 1255 * Wake up the window waiting to complete registration. 1256 */ 1257void scif_recv_reg_ack(struct scif_dev *scifdev, struct scifmsg *msg) 1258{ 1259 struct scif_window *window = 1260 (struct scif_window *)msg->payload[2]; 1261 struct scif_endpt *ep = (struct scif_endpt *)window->ep; 1262 1263 mutex_lock(&ep->rma_info.rma_lock); 1264 window->reg_state = OP_COMPLETED; 1265 wake_up(&window->regwq); 1266 mutex_unlock(&ep->rma_info.rma_lock); 1267} 1268 1269/** 1270 * scif_recv_reg_nack: Respond to SCIF_REGISTER_NACK interrupt message 1271 * @msg: Interrupt message 1272 * 1273 * Wake up the window waiting to inform it that registration 1274 * cannot be completed. 1275 */ 1276void scif_recv_reg_nack(struct scif_dev *scifdev, struct scifmsg *msg) 1277{ 1278 struct scif_window *window = 1279 (struct scif_window *)msg->payload[2]; 1280 struct scif_endpt *ep = (struct scif_endpt *)window->ep; 1281 1282 mutex_lock(&ep->rma_info.rma_lock); 1283 window->reg_state = OP_FAILED; 1284 wake_up(&window->regwq); 1285 mutex_unlock(&ep->rma_info.rma_lock); 1286} 1287 1288/** 1289 * scif_recv_unreg_ack: Respond to SCIF_UNREGISTER_ACK interrupt message 1290 * @msg: Interrupt message 1291 * 1292 * Wake up the window waiting to complete unregistration. 1293 */ 1294void scif_recv_unreg_ack(struct scif_dev *scifdev, struct scifmsg *msg) 1295{ 1296 struct scif_window *window = 1297 (struct scif_window *)msg->payload[1]; 1298 struct scif_endpt *ep = (struct scif_endpt *)window->ep; 1299 1300 mutex_lock(&ep->rma_info.rma_lock); 1301 window->unreg_state = OP_COMPLETED; 1302 wake_up(&window->unregwq); 1303 mutex_unlock(&ep->rma_info.rma_lock); 1304} 1305 1306/** 1307 * scif_recv_unreg_nack: Respond to SCIF_UNREGISTER_NACK interrupt message 1308 * @msg: Interrupt message 1309 * 1310 * Wake up the window waiting to inform it that unregistration 1311 * cannot be completed immediately. 1312 */ 1313void scif_recv_unreg_nack(struct scif_dev *scifdev, struct scifmsg *msg) 1314{ 1315 struct scif_window *window = 1316 (struct scif_window *)msg->payload[1]; 1317 struct scif_endpt *ep = (struct scif_endpt *)window->ep; 1318 1319 mutex_lock(&ep->rma_info.rma_lock); 1320 window->unreg_state = OP_FAILED; 1321 wake_up(&window->unregwq); 1322 mutex_unlock(&ep->rma_info.rma_lock); 1323} 1324 1325int __scif_pin_pages(void *addr, size_t len, int *out_prot, 1326 int map_flags, scif_pinned_pages_t *pages) 1327{ 1328 struct scif_pinned_pages *pinned_pages; 1329 int nr_pages, err = 0, i; 1330 bool vmalloc_addr = false; 1331 bool try_upgrade = false; 1332 int prot = *out_prot; 1333 int ulimit = 0; 1334 struct mm_struct *mm = NULL; 1335 1336 /* Unsupported flags */ 1337 if (map_flags & ~(SCIF_MAP_KERNEL | SCIF_MAP_ULIMIT)) 1338 return -EINVAL; 1339 ulimit = !!(map_flags & SCIF_MAP_ULIMIT); 1340 1341 /* Unsupported protection requested */ 1342 if (prot & ~(SCIF_PROT_READ | SCIF_PROT_WRITE)) 1343 return -EINVAL; 1344 1345 /* addr/len must be page aligned. len should be non zero */ 1346 if (!len || 1347 (ALIGN((u64)addr, PAGE_SIZE) != (u64)addr) || 1348 (ALIGN((u64)len, PAGE_SIZE) != (u64)len)) 1349 return -EINVAL; 1350 1351 might_sleep(); 1352 1353 nr_pages = len >> PAGE_SHIFT; 1354 1355 /* Allocate a set of pinned pages */ 1356 pinned_pages = scif_create_pinned_pages(nr_pages, prot); 1357 if (!pinned_pages) 1358 return -ENOMEM; 1359 1360 if (map_flags & SCIF_MAP_KERNEL) { 1361 if (is_vmalloc_addr(addr)) 1362 vmalloc_addr = true; 1363 1364 for (i = 0; i < nr_pages; i++) { 1365 if (vmalloc_addr) 1366 pinned_pages->pages[i] = 1367 vmalloc_to_page(addr + (i * PAGE_SIZE)); 1368 else 1369 pinned_pages->pages[i] = 1370 virt_to_page(addr + (i * PAGE_SIZE)); 1371 } 1372 pinned_pages->nr_pages = nr_pages; 1373 pinned_pages->map_flags = SCIF_MAP_KERNEL; 1374 } else { 1375 /* 1376 * SCIF supports registration caching. If a registration has 1377 * been requested with read only permissions, then we try 1378 * to pin the pages with RW permissions so that a subsequent 1379 * transfer with RW permission can hit the cache instead of 1380 * invalidating it. If the upgrade fails with RW then we 1381 * revert back to R permission and retry 1382 */ 1383 if (prot == SCIF_PROT_READ) 1384 try_upgrade = true; 1385 prot |= SCIF_PROT_WRITE; 1386retry: 1387 mm = current->mm; 1388 down_write(&mm->mmap_sem); 1389 if (ulimit) { 1390 err = __scif_check_inc_pinned_vm(mm, nr_pages); 1391 if (err) { 1392 up_write(&mm->mmap_sem); 1393 pinned_pages->nr_pages = 0; 1394 goto error_unmap; 1395 } 1396 } 1397 1398 pinned_pages->nr_pages = get_user_pages( 1399 (u64)addr, 1400 nr_pages, 1401 (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0, 1402 pinned_pages->pages, 1403 NULL); 1404 up_write(&mm->mmap_sem); 1405 if (nr_pages != pinned_pages->nr_pages) { 1406 if (try_upgrade) { 1407 if (ulimit) 1408 __scif_dec_pinned_vm_lock(mm, 1409 nr_pages, 0); 1410 /* Roll back any pinned pages */ 1411 for (i = 0; i < pinned_pages->nr_pages; i++) { 1412 if (pinned_pages->pages[i]) 1413 put_page( 1414 pinned_pages->pages[i]); 1415 } 1416 prot &= ~SCIF_PROT_WRITE; 1417 try_upgrade = false; 1418 goto retry; 1419 } 1420 } 1421 pinned_pages->map_flags = 0; 1422 } 1423 1424 if (pinned_pages->nr_pages < nr_pages) { 1425 err = -EFAULT; 1426 pinned_pages->nr_pages = nr_pages; 1427 goto dec_pinned; 1428 } 1429 1430 *out_prot = prot; 1431 atomic_set(&pinned_pages->ref_count, 1); 1432 *pages = pinned_pages; 1433 return err; 1434dec_pinned: 1435 if (ulimit) 1436 __scif_dec_pinned_vm_lock(mm, nr_pages, 0); 1437 /* Something went wrong! Rollback */ 1438error_unmap: 1439 pinned_pages->nr_pages = nr_pages; 1440 scif_destroy_pinned_pages(pinned_pages); 1441 *pages = NULL; 1442 dev_dbg(scif_info.mdev.this_device, 1443 "%s %d err %d len 0x%lx\n", __func__, __LINE__, err, len); 1444 return err; 1445} 1446 1447int scif_pin_pages(void *addr, size_t len, int prot, 1448 int map_flags, scif_pinned_pages_t *pages) 1449{ 1450 return __scif_pin_pages(addr, len, &prot, map_flags, pages); 1451} 1452EXPORT_SYMBOL_GPL(scif_pin_pages); 1453 1454int scif_unpin_pages(scif_pinned_pages_t pinned_pages) 1455{ 1456 int err = 0, ret; 1457 1458 if (!pinned_pages || SCIFEP_MAGIC != pinned_pages->magic) 1459 return -EINVAL; 1460 1461 ret = atomic_sub_return(1, &pinned_pages->ref_count); 1462 if (ret < 0) { 1463 dev_err(scif_info.mdev.this_device, 1464 "%s %d scif_unpin_pages called without pinning? rc %d\n", 1465 __func__, __LINE__, ret); 1466 return -EINVAL; 1467 } 1468 /* 1469 * Destroy the window if the ref count for this set of pinned 1470 * pages has dropped to zero. If it is positive then there is 1471 * a valid registered window which is backed by these pages and 1472 * it will be destroyed once all such windows are unregistered. 1473 */ 1474 if (!ret) 1475 err = scif_destroy_pinned_pages(pinned_pages); 1476 1477 return err; 1478} 1479EXPORT_SYMBOL_GPL(scif_unpin_pages); 1480 1481static inline void 1482scif_insert_local_window(struct scif_window *window, struct scif_endpt *ep) 1483{ 1484 mutex_lock(&ep->rma_info.rma_lock); 1485 scif_insert_window(window, &ep->rma_info.reg_list); 1486 mutex_unlock(&ep->rma_info.rma_lock); 1487} 1488 1489off_t scif_register_pinned_pages(scif_epd_t epd, 1490 scif_pinned_pages_t pinned_pages, 1491 off_t offset, int map_flags) 1492{ 1493 struct scif_endpt *ep = (struct scif_endpt *)epd; 1494 s64 computed_offset; 1495 struct scif_window *window; 1496 int err; 1497 size_t len; 1498 struct device *spdev; 1499 1500 /* Unsupported flags */ 1501 if (map_flags & ~SCIF_MAP_FIXED) 1502 return -EINVAL; 1503 1504 len = pinned_pages->nr_pages << PAGE_SHIFT; 1505 1506 /* 1507 * Offset is not page aligned/negative or offset+len 1508 * wraps around with SCIF_MAP_FIXED. 1509 */ 1510 if ((map_flags & SCIF_MAP_FIXED) && 1511 ((ALIGN(offset, PAGE_SIZE) != offset) || 1512 (offset < 0) || 1513 (len > LONG_MAX - offset))) 1514 return -EINVAL; 1515 1516 might_sleep(); 1517 1518 err = scif_verify_epd(ep); 1519 if (err) 1520 return err; 1521 /* 1522 * It is an error to pass pinned_pages to scif_register_pinned_pages() 1523 * after calling scif_unpin_pages(). 1524 */ 1525 if (!atomic_add_unless(&pinned_pages->ref_count, 1, 0)) 1526 return -EINVAL; 1527 1528 /* Compute the offset for this registration */ 1529 err = scif_get_window_offset(ep, map_flags, offset, 1530 len, &computed_offset); 1531 if (err) { 1532 atomic_sub(1, &pinned_pages->ref_count); 1533 return err; 1534 } 1535 1536 /* Allocate and prepare self registration window */ 1537 window = scif_create_window(ep, pinned_pages->nr_pages, 1538 computed_offset, false); 1539 if (!window) { 1540 atomic_sub(1, &pinned_pages->ref_count); 1541 scif_free_window_offset(ep, NULL, computed_offset); 1542 return -ENOMEM; 1543 } 1544 1545 window->pinned_pages = pinned_pages; 1546 window->nr_pages = pinned_pages->nr_pages; 1547 window->prot = pinned_pages->prot; 1548 1549 spdev = scif_get_peer_dev(ep->remote_dev); 1550 if (IS_ERR(spdev)) { 1551 err = PTR_ERR(spdev); 1552 scif_destroy_window(ep, window); 1553 return err; 1554 } 1555 err = scif_send_alloc_request(ep, window); 1556 if (err) { 1557 dev_err(&ep->remote_dev->sdev->dev, 1558 "%s %d err %d\n", __func__, __LINE__, err); 1559 goto error_unmap; 1560 } 1561 1562 /* Prepare the remote registration window */ 1563 err = scif_prep_remote_window(ep, window); 1564 if (err) { 1565 dev_err(&ep->remote_dev->sdev->dev, 1566 "%s %d err %d\n", __func__, __LINE__, err); 1567 goto error_unmap; 1568 } 1569 1570 /* Tell the peer about the new window */ 1571 err = scif_send_scif_register(ep, window); 1572 if (err) { 1573 dev_err(&ep->remote_dev->sdev->dev, 1574 "%s %d err %d\n", __func__, __LINE__, err); 1575 goto error_unmap; 1576 } 1577 1578 scif_put_peer_dev(spdev); 1579 /* No further failures expected. Insert new window */ 1580 scif_insert_local_window(window, ep); 1581 return computed_offset; 1582error_unmap: 1583 scif_destroy_window(ep, window); 1584 scif_put_peer_dev(spdev); 1585 dev_err(&ep->remote_dev->sdev->dev, 1586 "%s %d err %d\n", __func__, __LINE__, err); 1587 return err; 1588} 1589EXPORT_SYMBOL_GPL(scif_register_pinned_pages); 1590 1591off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset, 1592 int prot, int map_flags) 1593{ 1594 scif_pinned_pages_t pinned_pages; 1595 off_t err; 1596 struct scif_endpt *ep = (struct scif_endpt *)epd; 1597 s64 computed_offset; 1598 struct scif_window *window; 1599 struct mm_struct *mm = NULL; 1600 struct device *spdev; 1601 1602 dev_dbg(scif_info.mdev.this_device, 1603 "SCIFAPI register: ep %p addr %p len 0x%lx offset 0x%lx prot 0x%x map_flags 0x%x\n", 1604 epd, addr, len, offset, prot, map_flags); 1605 /* Unsupported flags */ 1606 if (map_flags & ~(SCIF_MAP_FIXED | SCIF_MAP_KERNEL)) 1607 return -EINVAL; 1608 1609 /* 1610 * Offset is not page aligned/negative or offset+len 1611 * wraps around with SCIF_MAP_FIXED. 1612 */ 1613 if ((map_flags & SCIF_MAP_FIXED) && 1614 ((ALIGN(offset, PAGE_SIZE) != offset) || 1615 (offset < 0) || 1616 (len > LONG_MAX - offset))) 1617 return -EINVAL; 1618 1619 /* Unsupported protection requested */ 1620 if (prot & ~(SCIF_PROT_READ | SCIF_PROT_WRITE)) 1621 return -EINVAL; 1622 1623 /* addr/len must be page aligned. len should be non zero */ 1624 if (!len || (ALIGN((u64)addr, PAGE_SIZE) != (u64)addr) || 1625 (ALIGN(len, PAGE_SIZE) != len)) 1626 return -EINVAL; 1627 1628 might_sleep(); 1629 1630 err = scif_verify_epd(ep); 1631 if (err) 1632 return err; 1633 1634 /* Compute the offset for this registration */ 1635 err = scif_get_window_offset(ep, map_flags, offset, 1636 len >> PAGE_SHIFT, &computed_offset); 1637 if (err) 1638 return err; 1639 1640 spdev = scif_get_peer_dev(ep->remote_dev); 1641 if (IS_ERR(spdev)) { 1642 err = PTR_ERR(spdev); 1643 scif_free_window_offset(ep, NULL, computed_offset); 1644 return err; 1645 } 1646 /* Allocate and prepare self registration window */ 1647 window = scif_create_window(ep, len >> PAGE_SHIFT, 1648 computed_offset, false); 1649 if (!window) { 1650 scif_free_window_offset(ep, NULL, computed_offset); 1651 scif_put_peer_dev(spdev); 1652 return -ENOMEM; 1653 } 1654 1655 window->nr_pages = len >> PAGE_SHIFT; 1656 1657 err = scif_send_alloc_request(ep, window); 1658 if (err) { 1659 scif_destroy_incomplete_window(ep, window); 1660 scif_put_peer_dev(spdev); 1661 return err; 1662 } 1663 1664 if (!(map_flags & SCIF_MAP_KERNEL)) { 1665 mm = __scif_acquire_mm(); 1666 map_flags |= SCIF_MAP_ULIMIT; 1667 } 1668 /* Pin down the pages */ 1669 err = __scif_pin_pages(addr, len, &prot, 1670 map_flags & (SCIF_MAP_KERNEL | SCIF_MAP_ULIMIT), 1671 &pinned_pages); 1672 if (err) { 1673 scif_destroy_incomplete_window(ep, window); 1674 __scif_release_mm(mm); 1675 goto error; 1676 } 1677 1678 window->pinned_pages = pinned_pages; 1679 window->prot = pinned_pages->prot; 1680 window->mm = mm; 1681 1682 /* Prepare the remote registration window */ 1683 err = scif_prep_remote_window(ep, window); 1684 if (err) { 1685 dev_err(&ep->remote_dev->sdev->dev, 1686 "%s %d err %ld\n", __func__, __LINE__, err); 1687 goto error_unmap; 1688 } 1689 1690 /* Tell the peer about the new window */ 1691 err = scif_send_scif_register(ep, window); 1692 if (err) { 1693 dev_err(&ep->remote_dev->sdev->dev, 1694 "%s %d err %ld\n", __func__, __LINE__, err); 1695 goto error_unmap; 1696 } 1697 1698 scif_put_peer_dev(spdev); 1699 /* No further failures expected. Insert new window */ 1700 scif_insert_local_window(window, ep); 1701 dev_dbg(&ep->remote_dev->sdev->dev, 1702 "SCIFAPI register: ep %p addr %p len 0x%lx computed_offset 0x%llx\n", 1703 epd, addr, len, computed_offset); 1704 return computed_offset; 1705error_unmap: 1706 scif_destroy_window(ep, window); 1707error: 1708 scif_put_peer_dev(spdev); 1709 dev_err(&ep->remote_dev->sdev->dev, 1710 "%s %d err %ld\n", __func__, __LINE__, err); 1711 return err; 1712} 1713EXPORT_SYMBOL_GPL(scif_register); 1714 1715int 1716scif_unregister(scif_epd_t epd, off_t offset, size_t len) 1717{ 1718 struct scif_endpt *ep = (struct scif_endpt *)epd; 1719 struct scif_window *window = NULL; 1720 struct scif_rma_req req; 1721 int nr_pages, err; 1722 struct device *spdev; 1723 1724 dev_dbg(scif_info.mdev.this_device, 1725 "SCIFAPI unregister: ep %p offset 0x%lx len 0x%lx\n", 1726 ep, offset, len); 1727 /* len must be page aligned. len should be non zero */ 1728 if (!len || 1729 (ALIGN((u64)len, PAGE_SIZE) != (u64)len)) 1730 return -EINVAL; 1731 1732 /* Offset is not page aligned or offset+len wraps around */ 1733 if ((ALIGN(offset, PAGE_SIZE) != offset) || 1734 (offset < 0) || 1735 (len > LONG_MAX - offset)) 1736 return -EINVAL; 1737 1738 err = scif_verify_epd(ep); 1739 if (err) 1740 return err; 1741 1742 might_sleep(); 1743 nr_pages = len >> PAGE_SHIFT; 1744 1745 req.out_window = &window; 1746 req.offset = offset; 1747 req.prot = 0; 1748 req.nr_bytes = len; 1749 req.type = SCIF_WINDOW_FULL; 1750 req.head = &ep->rma_info.reg_list; 1751 1752 spdev = scif_get_peer_dev(ep->remote_dev); 1753 if (IS_ERR(spdev)) { 1754 err = PTR_ERR(spdev); 1755 return err; 1756 } 1757 mutex_lock(&ep->rma_info.rma_lock); 1758 /* Does a valid window exist? */ 1759 err = scif_query_window(&req); 1760 if (err) { 1761 dev_err(&ep->remote_dev->sdev->dev, 1762 "%s %d err %d\n", __func__, __LINE__, err); 1763 goto error; 1764 } 1765 /* Unregister all the windows in this range */ 1766 err = scif_rma_list_unregister(window, offset, nr_pages); 1767 if (err) 1768 dev_err(&ep->remote_dev->sdev->dev, 1769 "%s %d err %d\n", __func__, __LINE__, err); 1770error: 1771 mutex_unlock(&ep->rma_info.rma_lock); 1772 scif_put_peer_dev(spdev); 1773 return err; 1774} 1775EXPORT_SYMBOL_GPL(scif_unregister);