Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: cleanup mtype mapping

Unify how we map the UAPI flags to the PTE hardware flags for a mapping.

Only the MTYPE is actually ASIC dependent, all other flags should be
copied over 1 to 1 and ASIC differences are handled later on.

Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
71776b6d 1dd077bb

+59 -121
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 381 381 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 382 382 } 383 383 384 - return amdgpu_gmc_get_pte_flags(adev, mapping_flags); 384 + return amdgpu_gem_va_map_flags(adev, mapping_flags); 385 385 } 386 386 387 387 /* add_bo_to_vm - Add a BO to a VM
+30 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 532 532 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 533 533 } 534 534 535 + /** 536 + * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags 537 + * 538 + * @adev: amdgpu_device pointer 539 + * @flags: GEM UAPI flags 540 + * 541 + * Returns the GEM UAPI flags mapped into hardware for the ASIC. 542 + */ 543 + uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags) 544 + { 545 + uint64_t pte_flag = 0; 546 + 547 + if (flags & AMDGPU_VM_PAGE_EXECUTABLE) 548 + pte_flag |= AMDGPU_PTE_EXECUTABLE; 549 + if (flags & AMDGPU_VM_PAGE_READABLE) 550 + pte_flag |= AMDGPU_PTE_READABLE; 551 + if (flags & AMDGPU_VM_PAGE_WRITEABLE) 552 + pte_flag |= AMDGPU_PTE_WRITEABLE; 553 + if (flags & AMDGPU_VM_PAGE_PRT) 554 + pte_flag |= AMDGPU_PTE_PRT; 555 + 556 + if (adev->gmc.gmc_funcs->map_mtype) 557 + pte_flag |= amdgpu_gmc_map_mtype(adev, 558 + flags & AMDGPU_VM_MTYPE_MASK); 559 + 560 + return pte_flag; 561 + } 562 + 535 563 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, 536 564 struct drm_file *filp) 537 565 { ··· 657 629 658 630 switch (args->operation) { 659 631 case AMDGPU_VA_OP_MAP: 660 - va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags); 632 + va_flags = amdgpu_gem_va_map_flags(adev, args->flags); 661 633 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, 662 634 args->offset_in_bo, args->map_size, 663 635 va_flags); ··· 672 644 args->map_size); 673 645 break; 674 646 case AMDGPU_VA_OP_REPLACE: 675 - va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags); 647 + va_flags = amdgpu_gem_va_map_flags(adev, args->flags); 676 648 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, 677 649 args->offset_in_bo, args->map_size, 678 650 va_flags);
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
··· 67 67 struct drm_file *filp); 68 68 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 69 69 struct drm_file *filp); 70 + uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags); 70 71 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, 71 72 struct drm_file *filp); 72 73 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
+3 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
··· 99 99 unsigned pasid); 100 100 /* enable/disable PRT support */ 101 101 void (*set_prt)(struct amdgpu_device *adev, bool enable); 102 - /* set pte flags based per asic */ 103 - uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev, 104 - uint32_t flags); 102 + /* map mtype to hardware flags */ 103 + uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags); 105 104 /* get the pde for a given mc addr */ 106 105 void (*get_vm_pde)(struct amdgpu_device *adev, int level, 107 106 u64 *dst, u64 *flags); ··· 183 184 #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type))) 184 185 #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) 185 186 #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) 187 + #define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags)) 186 188 #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) 187 - #define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags)) 188 189 189 190 /** 190 191 * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
+4 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1571 1571 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) 1572 1572 flags &= ~AMDGPU_PTE_WRITEABLE; 1573 1573 1574 - flags &= ~AMDGPU_PTE_EXECUTABLE; 1575 - flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 1574 + if (adev->asic_type >= CHIP_TONGA) { 1575 + flags &= ~AMDGPU_PTE_EXECUTABLE; 1576 + flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 1577 + } 1576 1578 1577 1579 if (adev->asic_type >= CHIP_NAVI10) { 1578 1580 flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
+10 -30
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 397 397 * 1 system 398 398 * 0 valid 399 399 */ 400 - static uint64_t gmc_v10_0_get_vm_pte_flags(struct amdgpu_device *adev, 401 - uint32_t flags) 400 + 401 + static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) 402 402 { 403 - uint64_t pte_flag = 0; 404 - 405 - if (flags & AMDGPU_VM_PAGE_EXECUTABLE) 406 - pte_flag |= AMDGPU_PTE_EXECUTABLE; 407 - if (flags & AMDGPU_VM_PAGE_READABLE) 408 - pte_flag |= AMDGPU_PTE_READABLE; 409 - if (flags & AMDGPU_VM_PAGE_WRITEABLE) 410 - pte_flag |= AMDGPU_PTE_WRITEABLE; 411 - 412 - switch (flags & AMDGPU_VM_MTYPE_MASK) { 403 + switch (flags) { 413 404 case AMDGPU_VM_MTYPE_DEFAULT: 414 - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); 415 - break; 405 + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); 416 406 case AMDGPU_VM_MTYPE_NC: 417 - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); 418 - break; 407 + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); 419 408 case AMDGPU_VM_MTYPE_WC: 420 - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_WC); 421 - break; 409 + return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC); 422 410 case AMDGPU_VM_MTYPE_CC: 423 - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_CC); 424 - break; 411 + return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC); 425 412 case AMDGPU_VM_MTYPE_UC: 426 - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); 427 - break; 413 + return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); 428 414 default: 429 - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); 430 - break; 415 + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); 431 416 } 432 - 433 - if (flags & AMDGPU_VM_PAGE_PRT) 434 - pte_flag |= AMDGPU_PTE_PRT; 435 - 436 - return pte_flag; 437 417 } 438 418 439 419 static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, ··· 444 464 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb, 445 465 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb, 446 466 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, 447 - .get_vm_pte_flags = gmc_v10_0_get_vm_pte_flags, 467 + .map_mtype = gmc_v10_0_map_mtype, 448 468 .get_vm_pde = gmc_v10_0_get_vm_pde 449 469 }; 450 470
-16
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
··· 386 386 return pd_addr; 387 387 } 388 388 389 - static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev, 390 - uint32_t flags) 391 - { 392 - uint64_t pte_flag = 0; 393 - 394 - if (flags & AMDGPU_VM_PAGE_READABLE) 395 - pte_flag |= AMDGPU_PTE_READABLE; 396 - if (flags & AMDGPU_VM_PAGE_WRITEABLE) 397 - pte_flag |= AMDGPU_PTE_WRITEABLE; 398 - if (flags & AMDGPU_VM_PAGE_PRT) 399 - pte_flag |= AMDGPU_PTE_PRT; 400 - 401 - return pte_flag; 402 - } 403 - 404 389 static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level, 405 390 uint64_t *addr, uint64_t *flags) 406 391 { ··· 1138 1153 .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb, 1139 1154 .set_prt = gmc_v6_0_set_prt, 1140 1155 .get_vm_pde = gmc_v6_0_get_vm_pde, 1141 - .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags 1142 1156 }; 1143 1157 1144 1158 static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
-16
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 463 463 amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); 464 464 } 465 465 466 - static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev, 467 - uint32_t flags) 468 - { 469 - uint64_t pte_flag = 0; 470 - 471 - if (flags & AMDGPU_VM_PAGE_READABLE) 472 - pte_flag |= AMDGPU_PTE_READABLE; 473 - if (flags & AMDGPU_VM_PAGE_WRITEABLE) 474 - pte_flag |= AMDGPU_PTE_WRITEABLE; 475 - if (flags & AMDGPU_VM_PAGE_PRT) 476 - pte_flag |= AMDGPU_PTE_PRT; 477 - 478 - return pte_flag; 479 - } 480 - 481 466 static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level, 482 467 uint64_t *addr, uint64_t *flags) 483 468 { ··· 1328 1343 .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb, 1329 1344 .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping, 1330 1345 .set_prt = gmc_v7_0_set_prt, 1331 - .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags, 1332 1346 .get_vm_pde = gmc_v7_0_get_vm_pde 1333 1347 }; 1334 1348
-18
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 686 686 * 0 valid 687 687 */ 688 688 689 - static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev, 690 - uint32_t flags) 691 - { 692 - uint64_t pte_flag = 0; 693 - 694 - if (flags & AMDGPU_VM_PAGE_EXECUTABLE) 695 - pte_flag |= AMDGPU_PTE_EXECUTABLE; 696 - if (flags & AMDGPU_VM_PAGE_READABLE) 697 - pte_flag |= AMDGPU_PTE_READABLE; 698 - if (flags & AMDGPU_VM_PAGE_WRITEABLE) 699 - pte_flag |= AMDGPU_PTE_WRITEABLE; 700 - if (flags & AMDGPU_VM_PAGE_PRT) 701 - pte_flag |= AMDGPU_PTE_PRT; 702 - 703 - return pte_flag; 704 - } 705 - 706 689 static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level, 707 690 uint64_t *addr, uint64_t *flags) 708 691 { ··· 1694 1711 .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb, 1695 1712 .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping, 1696 1713 .set_prt = gmc_v8_0_set_prt, 1697 - .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags, 1698 1714 .get_vm_pde = gmc_v8_0_get_vm_pde 1699 1715 }; 1700 1716
+10 -32
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 608 608 * 0 valid 609 609 */ 610 610 611 - static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev, 612 - uint32_t flags) 611 + static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) 613 612 614 613 { 615 - uint64_t pte_flag = 0; 616 - 617 - if (flags & AMDGPU_VM_PAGE_EXECUTABLE) 618 - pte_flag |= AMDGPU_PTE_EXECUTABLE; 619 - if (flags & AMDGPU_VM_PAGE_READABLE) 620 - pte_flag |= AMDGPU_PTE_READABLE; 621 - if (flags & AMDGPU_VM_PAGE_WRITEABLE) 622 - pte_flag |= AMDGPU_PTE_WRITEABLE; 623 - 624 - switch (flags & AMDGPU_VM_MTYPE_MASK) { 614 + switch (flags) { 625 615 case AMDGPU_VM_MTYPE_DEFAULT: 626 - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 627 - break; 616 + return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 628 617 case AMDGPU_VM_MTYPE_NC: 629 - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 630 - break; 618 + return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 631 619 case AMDGPU_VM_MTYPE_WC: 632 - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC); 633 - break; 620 + return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC); 634 621 case AMDGPU_VM_MTYPE_RW: 635 - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_RW); 636 - break; 622 + return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW); 637 623 case AMDGPU_VM_MTYPE_CC: 638 - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); 639 - break; 624 + return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); 640 625 case AMDGPU_VM_MTYPE_UC: 641 - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC); 642 - break; 626 + return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC); 643 627 default: 644 - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 645 - break; 628 + return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 646 629 } 647 - 648 - if (flags & AMDGPU_VM_PAGE_PRT) 649 - pte_flag |= AMDGPU_PTE_PRT; 650 - 651 - return pte_flag; 652 630 } 653 631 654 632 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, ··· 657 679 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, 658 680 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, 659 681 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, 660 - .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, 682 + .map_mtype = gmc_v9_0_map_mtype, 661 683 .get_vm_pde = gmc_v9_0_get_vm_pde 662 684 }; 663 685