Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: Add separate array of read and write for BO handles

Drop AMDGPU_USERQ_BO_WRITE as this should not be a global option
of the IOCTL, It should be option per buffer. Hence adding separate
array for read and write BO handles.

v2(Marek):
- Internal kernel details shouldn't be here. This file should only
document the observed behavior, not the implementation .

v3:
- Fix DAL CI clang issue.

v4:
- Added Alex RB to merge the kernel UAPI changes since he has
already approved the amdgpu_drm.h changes.

Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Suggested-by: Marek Olšák <marek.olsak@amd.com>
Suggested-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Arunpravin Paneer Selvam and committed by
Alex Deucher
cb4a73f4 d8675102

+217 -83
+183 -67
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
··· 386 386 struct amdgpu_fpriv *fpriv = filp->driver_priv; 387 387 struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr; 388 388 struct drm_amdgpu_userq_signal *args = data; 389 + struct drm_gem_object **gobj_write = NULL; 390 + struct drm_gem_object **gobj_read = NULL; 389 391 struct amdgpu_usermode_queue *queue; 390 - struct drm_gem_object **gobj = NULL; 391 392 struct drm_syncobj **syncobj = NULL; 393 + u32 *bo_handles_write, num_write_bo_handles; 392 394 u32 *syncobj_handles, num_syncobj_handles; 393 - u32 *bo_handles, num_bo_handles; 394 - int r, i, entry, boentry; 395 + u32 *bo_handles_read, num_read_bo_handles; 396 + int r, i, entry, rentry, wentry; 395 397 struct dma_fence *fence; 396 398 struct drm_exec exec; 397 399 u64 wptr; ··· 419 417 } 420 418 } 421 419 422 - num_bo_handles = args->num_bo_handles; 423 - bo_handles = memdup_user(u64_to_user_ptr(args->bo_handles_array), 424 - sizeof(u32) * num_bo_handles); 425 - if (IS_ERR(bo_handles)) 420 + num_read_bo_handles = args->num_read_bo_handles; 421 + bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles), 422 + sizeof(u32) * num_read_bo_handles); 423 + if (IS_ERR(bo_handles_read)) { 424 + r = PTR_ERR(bo_handles_read); 426 425 goto free_syncobj; 427 - 428 - /* Array of pointers to the GEM objects */ 429 - gobj = kmalloc_array(num_bo_handles, sizeof(*gobj), GFP_KERNEL); 430 - if (!gobj) { 431 - r = -ENOMEM; 432 - goto free_bo_handles; 433 426 } 434 427 435 - for (boentry = 0; boentry < num_bo_handles; boentry++) { 436 - gobj[boentry] = drm_gem_object_lookup(filp, bo_handles[boentry]); 437 - if (!gobj[boentry]) { 428 + /* Array of pointers to the GEM read objects */ 429 + gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL); 430 + if (!gobj_read) { 431 + r = -ENOMEM; 432 + goto free_bo_handles_read; 433 + } 434 + 435 + for (rentry = 0; rentry < num_read_bo_handles; rentry++) { 436 + gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]); 437 + if (!gobj_read[rentry]) { 438 438 r = -ENOENT; 439 - goto put_gobj; 439 + goto put_gobj_read; 440 440 } 441 441 } 442 442 443 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, num_bo_handles); 443 + num_write_bo_handles = args->num_write_bo_handles; 444 + bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles), 445 + sizeof(u32) * num_write_bo_handles); 446 + if (IS_ERR(bo_handles_write)) { 447 + r = PTR_ERR(bo_handles_write); 448 + goto put_gobj_read; 449 + } 450 + 451 + /* Array of pointers to the GEM write objects */ 452 + gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL); 453 + if (!gobj_write) { 454 + r = -ENOMEM; 455 + goto free_bo_handles_write; 456 + } 457 + 458 + for (wentry = 0; wentry < num_write_bo_handles; wentry++) { 459 + gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]); 460 + if (!gobj_write[wentry]) { 461 + r = -ENOENT; 462 + goto put_gobj_write; 463 + } 464 + } 465 + 466 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 467 + (num_read_bo_handles + num_write_bo_handles)); 444 468 445 469 /* Lock all BOs with retry handling */ 446 470 drm_exec_until_all_locked(&exec) { 447 - r = drm_exec_prepare_array(&exec, gobj, num_bo_handles, 1); 471 + r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1); 472 + drm_exec_retry_on_contention(&exec); 473 + if (r) 474 + goto exec_fini; 475 + 476 + r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1); 448 477 drm_exec_retry_on_contention(&exec); 449 478 if (r) 450 479 goto exec_fini; ··· 497 464 if (r) 498 465 goto exec_fini; 499 466 500 - for (i = 0; i < num_bo_handles; i++) 501 - dma_resv_add_fence(gobj[i]->resv, fence, 502 - dma_resv_usage_rw(args->bo_flags & 503 - AMDGPU_USERQ_BO_WRITE)); 467 + for (i = 0; i < num_read_bo_handles; i++) { 468 + if (!gobj_read || !gobj_read[i]->resv) 469 + continue; 470 + 471 + dma_resv_add_fence(gobj_read[i]->resv, fence, 472 + DMA_RESV_USAGE_READ); 473 + } 474 + 475 + for (i = 0; i < num_write_bo_handles; i++) { 476 + if (!gobj_write || !gobj_write[i]->resv) 477 + continue; 478 + 479 + dma_resv_add_fence(gobj_write[i]->resv, fence, 480 + DMA_RESV_USAGE_WRITE); 481 + } 504 482 505 483 /* Add the created fence to syncobj/BO's */ 506 484 for (i = 0; i < num_syncobj_handles; i++) ··· 522 478 523 479 exec_fini: 524 480 drm_exec_fini(&exec); 525 - put_gobj: 526 - while (boentry-- > 0) 527 - drm_gem_object_put(gobj[boentry]); 528 - kfree(gobj); 529 - free_bo_handles: 530 - kfree(bo_handles); 481 + put_gobj_write: 482 + while (wentry-- > 0) 483 + drm_gem_object_put(gobj_write[wentry]); 484 + kfree(gobj_write); 485 + free_bo_handles_write: 486 + kfree(bo_handles_write); 487 + put_gobj_read: 488 + while (rentry-- > 0) 489 + drm_gem_object_put(gobj_read[rentry]); 490 + kfree(gobj_read); 491 + free_bo_handles_read: 492 + kfree(bo_handles_read); 531 493 free_syncobj: 532 494 while (entry-- > 0) 533 495 if (syncobj[entry]) ··· 548 498 int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data, 549 499 struct drm_file *filp) 550 500 { 551 - u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles; 552 - u32 num_syncobj, num_bo_handles, num_points; 501 + u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write; 502 + u32 num_syncobj, num_read_bo_handles, num_write_bo_handles, num_points; 553 503 struct drm_amdgpu_userq_fence_info *fence_info = NULL; 554 504 struct drm_amdgpu_userq_wait *wait_info = data; 505 + struct drm_gem_object **gobj_write; 506 + struct drm_gem_object **gobj_read; 555 507 struct dma_fence **fences = NULL; 556 - struct drm_gem_object **gobj; 508 + int r, i, rentry, wentry, cnt; 557 509 struct drm_exec exec; 558 - int r, i, entry, cnt; 559 510 u64 num_fences = 0; 560 511 561 - num_bo_handles = wait_info->num_bo_handles; 562 - bo_handles = memdup_user(u64_to_user_ptr(wait_info->bo_handles_array), 563 - sizeof(u32) * num_bo_handles); 564 - if (IS_ERR(bo_handles)) 565 - return PTR_ERR(bo_handles); 512 + num_read_bo_handles = wait_info->num_read_bo_handles; 513 + bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles), 514 + sizeof(u32) * num_read_bo_handles); 515 + if (IS_ERR(bo_handles_read)) 516 + return PTR_ERR(bo_handles_read); 517 + 518 + num_write_bo_handles = wait_info->num_write_bo_handles; 519 + bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles), 520 + sizeof(u32) * num_write_bo_handles); 521 + if (IS_ERR(bo_handles_write)) { 522 + r = PTR_ERR(bo_handles_write); 523 + goto free_bo_handles_read; 524 + } 566 525 567 526 num_syncobj = wait_info->num_syncobj_handles; 568 527 syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles_array), 569 528 sizeof(u32) * num_syncobj); 570 529 if (IS_ERR(syncobj_handles)) { 571 530 r = PTR_ERR(syncobj_handles); 572 - goto free_bo_handles; 531 + goto free_bo_handles_write; 573 532 } 574 533 575 534 num_points = wait_info->num_points; ··· 596 537 goto free_timeline_handles; 597 538 } 598 539 599 - gobj = kmalloc_array(num_bo_handles, sizeof(*gobj), GFP_KERNEL); 600 - if (!gobj) { 540 + gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL); 541 + if (!gobj_read) { 601 542 r = -ENOMEM; 602 543 goto free_timeline_points; 603 544 } 604 545 605 - for (entry = 0; entry < num_bo_handles; entry++) { 606 - gobj[entry] = drm_gem_object_lookup(filp, bo_handles[entry]); 607 - if (!gobj[entry]) { 546 + for (rentry = 0; rentry < num_read_bo_handles; rentry++) { 547 + gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]); 548 + if (!gobj_read[rentry]) { 608 549 r = -ENOENT; 609 - goto put_gobj; 550 + goto put_gobj_read; 610 551 } 611 552 } 612 553 613 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, num_bo_handles); 554 + gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL); 555 + if (!gobj_write) { 556 + r = -ENOMEM; 557 + goto put_gobj_read; 558 + } 559 + 560 + for (wentry = 0; wentry < num_write_bo_handles; wentry++) { 561 + gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]); 562 + if (!gobj_write[wentry]) { 563 + r = -ENOENT; 564 + goto put_gobj_write; 565 + } 566 + } 567 + 568 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 569 + (num_read_bo_handles + num_write_bo_handles)); 614 570 615 571 /* Lock all BOs with retry handling */ 616 572 drm_exec_until_all_locked(&exec) { 617 - r = drm_exec_prepare_array(&exec, gobj, num_bo_handles, 0); 573 + r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1); 618 574 drm_exec_retry_on_contention(&exec); 619 575 if (r) { 620 576 drm_exec_fini(&exec); 621 - goto put_gobj; 577 + goto put_gobj_write; 578 + } 579 + 580 + r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1); 581 + drm_exec_retry_on_contention(&exec); 582 + if (r) { 583 + drm_exec_fini(&exec); 584 + goto put_gobj_write; 622 585 } 623 586 } 624 587 ··· 681 600 } 682 601 683 602 /* Count GEM objects fence */ 684 - for (i = 0; i < num_bo_handles; i++) { 603 + for (i = 0; i < num_read_bo_handles; i++) { 685 604 struct dma_resv_iter resv_cursor; 686 605 struct dma_fence *fence; 687 606 688 - dma_resv_for_each_fence(&resv_cursor, gobj[i]->resv, 689 - dma_resv_usage_rw(wait_info->bo_wait_flags & 690 - AMDGPU_USERQ_BO_WRITE), fence) 607 + dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv, 608 + DMA_RESV_USAGE_READ, fence) 609 + num_fences++; 610 + } 611 + 612 + for (i = 0; i < num_write_bo_handles; i++) { 613 + struct dma_resv_iter resv_cursor; 614 + struct dma_fence *fence; 615 + 616 + dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv, 617 + DMA_RESV_USAGE_WRITE, fence) 691 618 num_fences++; 692 619 } 693 620 ··· 721 632 goto free_fence_info; 722 633 } 723 634 724 - /* Retrieve GEM objects fence */ 725 - for (i = 0; i < num_bo_handles; i++) { 635 + /* Retrieve GEM read objects fence */ 636 + for (i = 0; i < num_read_bo_handles; i++) { 726 637 struct dma_resv_iter resv_cursor; 727 638 struct dma_fence *fence; 728 639 729 - dma_resv_for_each_fence(&resv_cursor, gobj[i]->resv, 730 - dma_resv_usage_rw(wait_info->bo_wait_flags & 731 - AMDGPU_USERQ_BO_WRITE), fence) { 640 + dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv, 641 + DMA_RESV_USAGE_READ, fence) { 642 + if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) { 643 + r = -EINVAL; 644 + goto free_fences; 645 + } 646 + 647 + fences[num_fences++] = fence; 648 + dma_fence_get(fence); 649 + } 650 + } 651 + 652 + /* Retrieve GEM write objects fence */ 653 + for (i = 0; i < num_write_bo_handles; i++) { 654 + struct dma_resv_iter resv_cursor; 655 + struct dma_fence *fence; 656 + 657 + dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv, 658 + DMA_RESV_USAGE_WRITE, fence) { 732 659 if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) { 733 660 r = -EINVAL; 734 661 goto free_fences; ··· 860 755 } 861 756 862 757 drm_exec_fini(&exec); 863 - for (i = 0; i < num_bo_handles; i++) 864 - drm_gem_object_put(gobj[i]); 865 - kfree(gobj); 758 + for (i = 0; i < num_read_bo_handles; i++) 759 + drm_gem_object_put(gobj_read[i]); 760 + kfree(gobj_read); 761 + 762 + for (i = 0; i < num_write_bo_handles; i++) 763 + drm_gem_object_put(gobj_write[i]); 764 + kfree(gobj_write); 866 765 867 766 kfree(timeline_points); 868 767 kfree(timeline_handles); 869 768 kfree(syncobj_handles); 870 - kfree(bo_handles); 769 + kfree(bo_handles_write); 770 + kfree(bo_handles_read); 871 771 872 772 return 0; 873 773 ··· 884 774 kfree(fence_info); 885 775 exec_fini: 886 776 drm_exec_fini(&exec); 887 - put_gobj: 888 - while (entry-- > 0) 889 - drm_gem_object_put(gobj[entry]); 890 - kfree(gobj); 777 + put_gobj_write: 778 + while (wentry-- > 0) 779 + drm_gem_object_put(gobj_write[wentry]); 780 + kfree(gobj_write); 781 + put_gobj_read: 782 + while (rentry-- > 0) 783 + drm_gem_object_put(gobj_read[rentry]); 784 + kfree(gobj_read); 891 785 free_timeline_points: 892 786 kfree(timeline_points); 893 787 free_timeline_handles: 894 788 kfree(timeline_handles); 895 789 free_syncobj_handles: 896 790 kfree(syncobj_handles); 897 - free_bo_handles: 898 - kfree(bo_handles); 791 + free_bo_handles_write: 792 + kfree(bo_handles_write); 793 + free_bo_handles_read: 794 + kfree(bo_handles_read); 899 795 900 796 return r; 901 797 }
+34 -16
include/uapi/drm/amdgpu_drm.h
··· 452 452 __u64 eop_va; 453 453 }; 454 454 455 - /* dma_resv usage flag */ 456 - #define AMDGPU_USERQ_BO_WRITE 1 457 - 458 455 /* userq signal/wait ioctl */ 459 456 struct drm_amdgpu_userq_signal { 460 457 /** ··· 481 484 */ 482 485 __u64 syncobj_point; 483 486 /** 484 - * @bo_handles_array: An array of GEM BO handles used by the userq fence creation 485 - * IOCTL to install the created dma_fence object which can be utilized by 486 - * userspace to synchronize the BO usage between user processes. 487 + * @bo_read_handles: The list of BO handles that the submitted user queue job 488 + * is using for read only. This will update BO fences in the kernel. 487 489 */ 488 - __u64 bo_handles_array; 490 + __u64 bo_read_handles; 489 491 /** 490 - * @num_bo_handles: A count that represents the number of GEM BO handles in 491 - * @bo_handles_array. 492 + * @bo_write_handles: The list of BO handles that the submitted user queue job 493 + * is using for write only. This will update BO fences in the kernel. 492 494 */ 493 - __u32 num_bo_handles; 495 + __u64 bo_write_handles; 496 + /** 497 + * @num_read_bo_handles: A count that represents the number of read BO handles in 498 + * @bo_read_handles. 499 + */ 500 + __u32 num_read_bo_handles; 501 + /** 502 + * @num_write_bo_handles: A count that represents the number of write BO handles in 503 + * @bo_write_handles. 504 + */ 505 + __u32 num_write_bo_handles; 494 506 /** 495 507 * @bo_flags: flags to indicate BOs synchronize for READ or WRITE 496 508 */ 497 509 __u32 bo_flags; 510 + __u32 pad; 498 511 }; 499 512 500 513 struct drm_amdgpu_userq_fence_info { ··· 558 551 */ 559 552 __u64 syncobj_timeline_points; 560 553 /** 561 - * @bo_handles_array: An array of GEM BO handles defined to fetch the fence 562 - * wait information of every BO handles in the array. 554 + * @bo_read_handles: The list of read BO handles submitted by the user queue 555 + * job to get the va/value pairs. 563 556 */ 564 - __u64 bo_handles_array; 557 + __u64 bo_read_handles; 558 + /** 559 + * @bo_write_handles: The list of write BO handles submitted by the user queue 560 + * job to get the va/value pairs. 561 + */ 562 + __u64 bo_write_handles; 565 563 /** 566 564 * @num_syncobj_handles: A count that represents the number of syncobj handles in 567 565 * @syncobj_handles_array. 568 566 */ 569 567 __u32 num_syncobj_handles; 570 568 /** 571 - * @num_bo_handles: A count that represents the number of GEM BO handles in 572 - * @bo_handles_array. 569 + * @num_read_bo_handles: A count that represents the number of read BO handles in 570 + * @bo_read_handles. 573 571 */ 574 - __u32 num_bo_handles; 572 + __u32 num_read_bo_handles; 573 + /** 574 + * @num_write_bo_handles: A count that represents the number of write BO handles in 575 + * @bo_write_handles. 576 + */ 577 + __u32 num_write_bo_handles; 578 + __u32 pad; 575 579 /** 576 580 * @userq_fence_info: An array of fence information (va and value) pair of each 577 581 * objects stored in @syncobj_handles_array and @bo_handles_array.