Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: cleanup PTE flag generation v3

Move the ASIC specific code into a new callback function.

v2: mask the flags for SI and CIK instead of a BUG_ON().
v3: remove last missed BUG_ON().

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
cbfae36c 71776b6d

+81 -31
+5
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
··· 104 104 /* get the pde for a given mc addr */ 105 105 void (*get_vm_pde)(struct amdgpu_device *adev, int level, 106 106 u64 *dst, u64 *flags); 107 + /* get the pte flags to use for a BO VA mapping */ 108 + void (*get_vm_pte)(struct amdgpu_device *adev, 109 + struct amdgpu_bo_va_mapping *mapping, 110 + uint64_t *flags); 107 111 }; 108 112 109 113 struct amdgpu_xgmi { ··· 189 185 #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) 190 186 #define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags)) 191 187 #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) 188 + #define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags)) 192 189 193 190 /** 194 191 * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
+2 -27
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1571 1571 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) 1572 1572 flags &= ~AMDGPU_PTE_WRITEABLE; 1573 1573 1574 - if (adev->asic_type >= CHIP_TONGA) { 1575 - flags &= ~AMDGPU_PTE_EXECUTABLE; 1576 - flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 1577 - } 1578 - 1579 - if (adev->asic_type >= CHIP_NAVI10) { 1580 - flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; 1581 - flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); 1582 - } else { 1583 - flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; 1584 - flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK); 1585 - } 1586 - 1587 - if ((mapping->flags & AMDGPU_PTE_PRT) && 1588 - (adev->asic_type >= CHIP_VEGA10)) { 1589 - flags |= AMDGPU_PTE_PRT; 1590 - if (adev->asic_type >= CHIP_NAVI10) { 1591 - flags |= AMDGPU_PTE_SNOOPED; 1592 - flags |= AMDGPU_PTE_LOG; 1593 - flags |= AMDGPU_PTE_SYSTEM; 1594 - } 1595 - flags &= ~AMDGPU_PTE_VALID; 1596 - } 1597 - if (adev->asic_type == CHIP_ARCTURUS && 1598 - !(flags & AMDGPU_PTE_SYSTEM) && 1599 - mapping->bo_va->is_xgmi) 1600 - flags |= AMDGPU_PTE_SNOOPED; 1574 + /* Apply ASIC specific mapping flags */ 1575 + amdgpu_gmc_get_vm_pte(adev, mapping, &flags); 1601 1576 1602 1577 trace_amdgpu_vm_bo_update(mapping); 1603 1578
+21 -1
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 440 440 } 441 441 } 442 442 443 + static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, 444 + struct amdgpu_bo_va_mapping *mapping, 445 + uint64_t *flags) 446 + { 447 + *flags &= ~AMDGPU_PTE_EXECUTABLE; 448 + *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 449 + 450 + *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; 451 + *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); 452 + 453 + if (mapping->flags & AMDGPU_PTE_PRT) { 454 + *flags |= AMDGPU_PTE_PRT; 455 + *flags |= AMDGPU_PTE_SNOOPED; 456 + *flags |= AMDGPU_PTE_LOG; 457 + *flags |= AMDGPU_PTE_SYSTEM; 458 + *flags &= ~AMDGPU_PTE_VALID; 459 + } 460 + } 461 + 443 462 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { 444 463 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb, 445 464 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb, 446 465 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, 447 466 .map_mtype = gmc_v10_0_map_mtype, 448 - .get_vm_pde = gmc_v10_0_get_vm_pde 467 + .get_vm_pde = gmc_v10_0_get_vm_pde, 468 + .get_vm_pte = gmc_v10_0_get_vm_pte 449 469 }; 450 470 451 471 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
+9
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
··· 392 392 BUG_ON(*addr & 0xFFFFFF0000000FFFULL); 393 393 } 394 394 395 + static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev, 396 + struct amdgpu_bo_va_mapping *mapping, 397 + uint64_t *flags) 398 + { 399 + *flags &= ~AMDGPU_PTE_EXECUTABLE; 400 + *flags &= ~AMDGPU_PTE_PRT; 401 + } 402 + 395 403 static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev, 396 404 bool value) 397 405 { ··· 1146 1138 .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb, 1147 1139 .set_prt = gmc_v6_0_set_prt, 1148 1140 .get_vm_pde = gmc_v6_0_get_vm_pde, 1141 + .get_vm_pte = gmc_v6_0_get_vm_pte, 1149 1142 }; 1150 1143 1151 1144 static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
+10 -1
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 469 469 BUG_ON(*addr & 0xFFFFFF0000000FFFULL); 470 470 } 471 471 472 + static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev, 473 + struct amdgpu_bo_va_mapping *mapping, 474 + uint64_t *flags) 475 + { 476 + *flags &= ~AMDGPU_PTE_EXECUTABLE; 477 + *flags &= ~AMDGPU_PTE_PRT; 478 + } 479 + 472 480 /** 473 481 * gmc_v8_0_set_fault_enable_default - update VM fault handling 474 482 * ··· 1336 1328 .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb, 1337 1329 .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping, 1338 1330 .set_prt = gmc_v7_0_set_prt, 1339 - .get_vm_pde = gmc_v7_0_get_vm_pde 1331 + .get_vm_pde = gmc_v7_0_get_vm_pde, 1332 + .get_vm_pte = gmc_v7_0_get_vm_pte 1340 1333 }; 1341 1334 1342 1335 static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
+11 -1
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 692 692 BUG_ON(*addr & 0xFFFFFF0000000FFFULL); 693 693 } 694 694 695 + static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev, 696 + struct amdgpu_bo_va_mapping *mapping, 697 + uint64_t *flags) 698 + { 699 + *flags &= ~AMDGPU_PTE_EXECUTABLE; 700 + *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 701 + *flags &= ~AMDGPU_PTE_PRT; 702 + } 703 + 695 704 /** 696 705 * gmc_v8_0_set_fault_enable_default - update VM fault handling 697 706 * ··· 1703 1694 .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb, 1704 1695 .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping, 1705 1696 .set_prt = gmc_v8_0_set_prt, 1706 - .get_vm_pde = gmc_v8_0_get_vm_pde 1697 + .get_vm_pde = gmc_v8_0_get_vm_pde, 1698 + .get_vm_pte = gmc_v8_0_get_vm_pte 1707 1699 }; 1708 1700 1709 1701 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
+23 -1
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 653 653 } 654 654 } 655 655 656 + static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, 657 + struct amdgpu_bo_va_mapping *mapping, 658 + uint64_t *flags) 659 + { 660 + *flags &= ~AMDGPU_PTE_EXECUTABLE; 661 + *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 662 + 663 + *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; 664 + *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK; 665 + 666 + if (mapping->flags & AMDGPU_PTE_PRT) { 667 + *flags |= AMDGPU_PTE_PRT; 668 + *flags &= ~AMDGPU_PTE_VALID; 669 + } 670 + 671 + if (adev->asic_type == CHIP_ARCTURUS && 672 + !(*flags & AMDGPU_PTE_SYSTEM) && 673 + mapping->bo_va->is_xgmi) 674 + *flags |= AMDGPU_PTE_SNOOPED; 675 + } 676 + 656 677 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { 657 678 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, 658 679 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, 659 680 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, 660 681 .map_mtype = gmc_v9_0_map_mtype, 661 - .get_vm_pde = gmc_v9_0_get_vm_pde 682 + .get_vm_pde = gmc_v9_0_get_vm_pde, 683 + .get_vm_pte = gmc_v9_0_get_vm_pte 662 684 }; 663 685 664 686 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)