Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/radeon: Split off gart_get_page_entry ASIC hook from set_page_entry

get_page_entry calculates the GART page table entry, which is just written
to the GART page table by set_page_entry.

This is a prerequisite for the following fix.

Reviewed-by: Christian König <christian.koenig@amd.com>
Cc: stable@vger.kernel.org
Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Michel Dänzer and committed by
Alex Deucher
cb658906 67cf2d39

+101 -38
+8 -2
drivers/gpu/drm/radeon/r100.c
··· 644 644 return r; 645 645 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 646 646 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 647 + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; 647 648 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 648 649 return radeon_gart_table_ram_alloc(rdev); 649 650 } ··· 682 681 WREG32(RADEON_AIC_HI_ADDR, 0); 683 682 } 684 683 684 + uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) 685 + { 686 + return addr; 687 + } 688 + 685 689 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 686 - uint64_t addr, uint32_t flags) 690 + uint64_t entry) 687 691 { 688 692 u32 *gtt = rdev->gart.ptr; 689 - gtt[i] = cpu_to_le32(lower_32_bits(addr)); 693 + gtt[i] = cpu_to_le32(lower_32_bits(entry)); 690 694 } 691 695 692 696 void r100_pci_gart_fini(struct radeon_device *rdev)
+11 -5
drivers/gpu/drm/radeon/r300.c
··· 73 73 #define R300_PTE_WRITEABLE (1 << 2) 74 74 #define R300_PTE_READABLE (1 << 3) 75 75 76 - void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 77 - uint64_t addr, uint32_t flags) 76 + uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) 78 77 { 79 - void __iomem *ptr = rdev->gart.ptr; 80 - 81 78 addr = (lower_32_bits(addr) >> 8) | 82 79 ((upper_32_bits(addr) & 0xff) << 24); 83 80 if (flags & RADEON_GART_PAGE_READ) ··· 83 86 addr |= R300_PTE_WRITEABLE; 84 87 if (!(flags & RADEON_GART_PAGE_SNOOP)) 85 88 addr |= R300_PTE_UNSNOOPED; 89 + return addr; 90 + } 91 + 92 + void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 93 + uint64_t entry) 94 + { 95 + void __iomem *ptr = rdev->gart.ptr; 96 + 86 97 /* on x86 we want this to be CPU endian, on powerpc 87 98 * on powerpc without HW swappers, it'll get swapped on way 88 99 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 89 - writel(addr, ((void __iomem *)ptr) + (i * 4)); 100 + writel(entry, ((void __iomem *)ptr) + (i * 4)); 90 101 } 91 102 92 103 int rv370_pcie_gart_init(struct radeon_device *rdev) ··· 114 109 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); 115 110 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 116 111 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 112 + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; 117 113 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 118 114 return radeon_gart_table_vram_alloc(rdev); 119 115 }
+6 -2
drivers/gpu/drm/radeon/radeon.h
··· 242 242 * Dummy page 243 243 */ 244 244 struct radeon_dummy_page { 245 + uint64_t entry; 245 246 struct page *page; 246 247 dma_addr_t addr; 247 248 }; ··· 647 646 unsigned table_size; 648 647 struct page **pages; 649 648 dma_addr_t *pages_addr; 649 + uint64_t *pages_entry; 650 650 bool ready; 651 651 }; 652 652 ··· 1849 1847 /* gart */ 1850 1848 struct { 1851 1849 void (*tlb_flush)(struct radeon_device *rdev); 1850 + uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags); 1852 1851 void (*set_page)(struct radeon_device *rdev, unsigned i, 1853 - uint64_t addr, uint32_t flags); 1852 + uint64_t entry); 1854 1853 } gart; 1855 1854 struct { 1856 1855 int (*init)(struct radeon_device *rdev); ··· 2855 2852 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 2856 2853 #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 2857 2854 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 2858 - #define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) 2855 + #define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f)) 2856 + #define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e)) 2859 2857 #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) 2860 2858 #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) 2861 2859 #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
+24
drivers/gpu/drm/radeon/radeon_asic.c
··· 159 159 DRM_INFO("Forcing AGP to PCIE mode\n"); 160 160 rdev->flags |= RADEON_IS_PCIE; 161 161 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 162 + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; 162 163 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 163 164 } else { 164 165 DRM_INFO("Forcing AGP to PCI mode\n"); 165 166 rdev->flags |= RADEON_IS_PCI; 166 167 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 168 + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; 167 169 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 168 170 } 169 171 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; ··· 201 199 .mc_wait_for_idle = &r100_mc_wait_for_idle, 202 200 .gart = { 203 201 .tlb_flush = &r100_pci_gart_tlb_flush, 202 + .get_page_entry = &r100_pci_gart_get_page_entry, 204 203 .set_page = &r100_pci_gart_set_page, 205 204 }, 206 205 .ring = { ··· 268 265 .mc_wait_for_idle = &r100_mc_wait_for_idle, 269 266 .gart = { 270 267 .tlb_flush = &r100_pci_gart_tlb_flush, 268 + .get_page_entry = &r100_pci_gart_get_page_entry, 271 269 .set_page = &r100_pci_gart_set_page, 272 270 }, 273 271 .ring = { ··· 363 359 .mc_wait_for_idle = &r300_mc_wait_for_idle, 364 360 .gart = { 365 361 .tlb_flush = &r100_pci_gart_tlb_flush, 362 + .get_page_entry = &r100_pci_gart_get_page_entry, 366 363 .set_page = &r100_pci_gart_set_page, 367 364 }, 368 365 .ring = { ··· 430 425 .mc_wait_for_idle = &r300_mc_wait_for_idle, 431 426 .gart = { 432 427 .tlb_flush = &rv370_pcie_gart_tlb_flush, 428 + .get_page_entry = &rv370_pcie_gart_get_page_entry, 433 429 .set_page = &rv370_pcie_gart_set_page, 434 430 }, 435 431 .ring = { ··· 497 491 .mc_wait_for_idle = &r300_mc_wait_for_idle, 498 492 .gart = { 499 493 .tlb_flush = &rv370_pcie_gart_tlb_flush, 494 + .get_page_entry = &rv370_pcie_gart_get_page_entry, 500 495 .set_page = &rv370_pcie_gart_set_page, 501 496 }, 502 497 .ring = { ··· 564 557 .mc_wait_for_idle = &rs400_mc_wait_for_idle, 565 558 .gart = { 566 559 .tlb_flush = &rs400_gart_tlb_flush, 560 + .get_page_entry = &rs400_gart_get_page_entry, 567 561 .set_page = &rs400_gart_set_page, 568 562 }, 569 563 .ring = { ··· 631 623 .mc_wait_for_idle = &rs600_mc_wait_for_idle, 632 624 .gart = { 633 625 .tlb_flush = &rs600_gart_tlb_flush, 626 + .get_page_entry = &rs600_gart_get_page_entry, 634 627 .set_page = &rs600_gart_set_page, 635 628 }, 636 629 .ring = { ··· 700 691 .mc_wait_for_idle = &rs690_mc_wait_for_idle, 701 692 .gart = { 702 693 .tlb_flush = &rs400_gart_tlb_flush, 694 + .get_page_entry = &rs400_gart_get_page_entry, 703 695 .set_page = &rs400_gart_set_page, 704 696 }, 705 697 .ring = { ··· 769 759 .mc_wait_for_idle = &rv515_mc_wait_for_idle, 770 760 .gart = { 771 761 .tlb_flush = &rv370_pcie_gart_tlb_flush, 762 + .get_page_entry = &rv370_pcie_gart_get_page_entry, 772 763 .set_page = &rv370_pcie_gart_set_page, 773 764 }, 774 765 .ring = { ··· 836 825 .mc_wait_for_idle = &r520_mc_wait_for_idle, 837 826 .gart = { 838 827 .tlb_flush = &rv370_pcie_gart_tlb_flush, 828 + .get_page_entry = &rv370_pcie_gart_get_page_entry, 839 829 .set_page = &rv370_pcie_gart_set_page, 840 830 }, 841 831 .ring = { ··· 931 919 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 932 920 .gart = { 933 921 .tlb_flush = &r600_pcie_gart_tlb_flush, 922 + .get_page_entry = &rs600_gart_get_page_entry, 934 923 .set_page = &rs600_gart_set_page, 935 924 }, 936 925 .ring = { ··· 1017 1004 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1018 1005 .gart = { 1019 1006 .tlb_flush = &r600_pcie_gart_tlb_flush, 1007 + .get_page_entry = &rs600_gart_get_page_entry, 1020 1008 .set_page = &rs600_gart_set_page, 1021 1009 }, 1022 1010 .ring = { ··· 1109 1095 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1110 1096 .gart = { 1111 1097 .tlb_flush = &r600_pcie_gart_tlb_flush, 1098 + .get_page_entry = &rs600_gart_get_page_entry, 1112 1099 .set_page = &rs600_gart_set_page, 1113 1100 }, 1114 1101 .ring = { ··· 1214 1199 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1215 1200 .gart = { 1216 1201 .tlb_flush = &r600_pcie_gart_tlb_flush, 1202 + .get_page_entry = &rs600_gart_get_page_entry, 1217 1203 .set_page = &rs600_gart_set_page, 1218 1204 }, 1219 1205 .ring = { ··· 1333 1317 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1334 1318 .gart = { 1335 1319 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1320 + .get_page_entry = &rs600_gart_get_page_entry, 1336 1321 .set_page = &rs600_gart_set_page, 1337 1322 }, 1338 1323 .ring = { ··· 1426 1409 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1427 1410 .gart = { 1428 1411 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1412 + .get_page_entry = &rs600_gart_get_page_entry, 1429 1413 .set_page = &rs600_gart_set_page, 1430 1414 }, 1431 1415 .ring = { ··· 1518 1500 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1519 1501 .gart = { 1520 1502 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1503 + .get_page_entry = &rs600_gart_get_page_entry, 1521 1504 .set_page = &rs600_gart_set_page, 1522 1505 }, 1523 1506 .ring = { ··· 1654 1635 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1655 1636 .gart = { 1656 1637 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1638 + .get_page_entry = &rs600_gart_get_page_entry, 1657 1639 .set_page = &rs600_gart_set_page, 1658 1640 }, 1659 1641 .vm = { ··· 1758 1738 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1759 1739 .gart = { 1760 1740 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1741 + .get_page_entry = &rs600_gart_get_page_entry, 1761 1742 .set_page = &rs600_gart_set_page, 1762 1743 }, 1763 1744 .vm = { ··· 1892 1871 .get_gpu_clock_counter = &si_get_gpu_clock_counter, 1893 1872 .gart = { 1894 1873 .tlb_flush = &si_pcie_gart_tlb_flush, 1874 + .get_page_entry = &rs600_gart_get_page_entry, 1895 1875 .set_page = &rs600_gart_set_page, 1896 1876 }, 1897 1877 .vm = { ··· 2054 2032 .get_gpu_clock_counter = &cik_get_gpu_clock_counter, 2055 2033 .gart = { 2056 2034 .tlb_flush = &cik_pcie_gart_tlb_flush, 2035 + .get_page_entry = &rs600_gart_get_page_entry, 2057 2036 .set_page = &rs600_gart_set_page, 2058 2037 }, 2059 2038 .vm = { ··· 2162 2139 .get_gpu_clock_counter = &cik_get_gpu_clock_counter, 2163 2140 .gart = { 2164 2141 .tlb_flush = &cik_pcie_gart_tlb_flush, 2142 + .get_page_entry = &rs600_gart_get_page_entry, 2165 2143 .set_page = &rs600_gart_set_page, 2166 2144 }, 2167 2145 .vm = {
+8 -4
drivers/gpu/drm/radeon/radeon_asic.h
··· 67 67 int r100_asic_reset(struct radeon_device *rdev); 68 68 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 69 69 void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 70 + uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags); 70 71 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 71 - uint64_t addr, uint32_t flags); 72 + uint64_t entry); 72 73 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); 73 74 int r100_irq_set(struct radeon_device *rdev); 74 75 int r100_irq_process(struct radeon_device *rdev); ··· 173 172 struct radeon_fence *fence); 174 173 extern int r300_cs_parse(struct radeon_cs_parser *p); 175 174 extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); 175 + extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags); 176 176 extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 177 - uint64_t addr, uint32_t flags); 177 + uint64_t entry); 178 178 extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 179 179 extern int rv370_get_pcie_lanes(struct radeon_device *rdev); 180 180 extern void r300_set_reg_safe(struct radeon_device *rdev); ··· 210 208 extern int rs400_suspend(struct radeon_device *rdev); 211 209 extern int rs400_resume(struct radeon_device *rdev); 212 210 void rs400_gart_tlb_flush(struct radeon_device *rdev); 211 + uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags); 213 212 void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, 214 - uint64_t addr, uint32_t flags); 213 + uint64_t entry); 215 214 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 216 215 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 217 216 int rs400_gart_init(struct radeon_device *rdev); ··· 235 232 void rs600_irq_disable(struct radeon_device *rdev); 236 233 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); 237 234 void rs600_gart_tlb_flush(struct radeon_device *rdev); 235 + uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags); 238 236 void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, 239 - uint64_t addr, uint32_t flags); 237 + uint64_t entry); 240 238 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 241 239 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 242 240 void rs600_bandwidth_update(struct radeon_device *rdev);
+2
drivers/gpu/drm/radeon/radeon_device.c
··· 774 774 rdev->dummy_page.page = NULL; 775 775 return -ENOMEM; 776 776 } 777 + rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr, 778 + RADEON_GART_PAGE_DUMMY); 777 779 return 0; 778 780 } 779 781
+24 -15
drivers/gpu/drm/radeon/radeon_gart.c
··· 228 228 unsigned t; 229 229 unsigned p; 230 230 int i, j; 231 - u64 page_base; 232 231 233 232 if (!rdev->gart.ready) { 234 233 WARN(1, "trying to unbind memory from uninitialized GART !\n"); ··· 239 240 if (rdev->gart.pages[p]) { 240 241 rdev->gart.pages[p] = NULL; 241 242 rdev->gart.pages_addr[p] = rdev->dummy_page.addr; 242 - page_base = rdev->gart.pages_addr[p]; 243 243 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 244 + rdev->gart.pages_entry[t] = rdev->dummy_page.entry; 244 245 if (rdev->gart.ptr) { 245 - radeon_gart_set_page(rdev, t, page_base, 246 - RADEON_GART_PAGE_DUMMY); 246 + radeon_gart_set_page(rdev, t, 247 + rdev->dummy_page.entry); 247 248 } 248 - page_base += RADEON_GPU_PAGE_SIZE; 249 249 } 250 250 } 251 251 } ··· 272 274 { 273 275 unsigned t; 274 276 unsigned p; 275 - uint64_t page_base; 277 + uint64_t page_base, page_entry; 276 278 int i, j; 277 279 278 280 if (!rdev->gart.ready) { ··· 285 287 for (i = 0; i < pages; i++, p++) { 286 288 rdev->gart.pages_addr[p] = dma_addr[i]; 287 289 rdev->gart.pages[p] = pagelist[i]; 288 - if (rdev->gart.ptr) { 289 - page_base = rdev->gart.pages_addr[p]; 290 - for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 291 - radeon_gart_set_page(rdev, t, page_base, flags); 292 - page_base += RADEON_GPU_PAGE_SIZE; 290 + page_base = dma_addr[i]; 291 + for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 292 + page_entry = radeon_gart_get_page_entry(page_base, flags); 293 + rdev->gart.pages_entry[t] = page_entry; 294 + if (rdev->gart.ptr) { 295 + radeon_gart_set_page(rdev, t, page_entry); 293 296 } 297 + page_base += RADEON_GPU_PAGE_SIZE; 294 298 } 295 299 } 296 300 mb(); ··· 340 340 radeon_gart_fini(rdev); 341 341 return -ENOMEM; 342 342 } 343 - /* set GART entry to point to the dummy page by default */ 344 - for (i = 0; i < rdev->gart.num_cpu_pages; i++) { 345 - rdev->gart.pages_addr[i] = rdev->dummy_page.addr; 343 + rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) * 344 + rdev->gart.num_gpu_pages); 345 + if (rdev->gart.pages_entry == NULL) { 346 + radeon_gart_fini(rdev); 347 + return -ENOMEM; 346 348 } 349 + /* set GART entry to point to the dummy page by default */ 350 + for (i = 0; i < rdev->gart.num_cpu_pages; i++) 351 + rdev->gart.pages_addr[i] = rdev->dummy_page.addr; 352 + for (i = 0; i < rdev->gart.num_gpu_pages; i++) 353 + rdev->gart.pages_entry[i] = rdev->dummy_page.entry; 347 354 return 0; 348 355 } 349 356 ··· 363 356 */ 364 357 void radeon_gart_fini(struct radeon_device *rdev) 365 358 { 366 - if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { 359 + if (rdev->gart.ready) { 367 360 /* unbind pages */ 368 361 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); 369 362 } 370 363 rdev->gart.ready = false; 371 364 vfree(rdev->gart.pages); 372 365 vfree(rdev->gart.pages_addr); 366 + vfree(rdev->gart.pages_entry); 373 367 rdev->gart.pages = NULL; 374 368 rdev->gart.pages_addr = NULL; 369 + rdev->gart.pages_entry = NULL; 375 370 376 371 radeon_dummy_page_fini(rdev); 377 372 }
+9 -5
drivers/gpu/drm/radeon/rs400.c
··· 212 212 #define RS400_PTE_WRITEABLE (1 << 2) 213 213 #define RS400_PTE_READABLE (1 << 3) 214 214 215 - void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, 216 - uint64_t addr, uint32_t flags) 215 + uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags) 217 216 { 218 217 uint32_t entry; 219 - u32 *gtt = rdev->gart.ptr; 220 218 221 219 entry = (lower_32_bits(addr) & PAGE_MASK) | 222 220 ((upper_32_bits(addr) & 0xff) << 4); ··· 224 226 entry |= RS400_PTE_WRITEABLE; 225 227 if (!(flags & RADEON_GART_PAGE_SNOOP)) 226 228 entry |= RS400_PTE_UNSNOOPED; 227 - entry = cpu_to_le32(entry); 228 - gtt[i] = entry; 229 + return entry; 230 + } 231 + 232 + void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, 233 + uint64_t entry) 234 + { 235 + u32 *gtt = rdev->gart.ptr; 236 + gtt[i] = cpu_to_le32(lower_32_bits(entry)); 229 237 } 230 238 231 239 int rs400_mc_wait_for_idle(struct radeon_device *rdev)
+9 -5
drivers/gpu/drm/radeon/rs600.c
··· 625 625 radeon_gart_table_vram_free(rdev); 626 626 } 627 627 628 - void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, 629 - uint64_t addr, uint32_t flags) 628 + uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags) 630 629 { 631 - void __iomem *ptr = (void *)rdev->gart.ptr; 632 - 633 630 addr = addr & 0xFFFFFFFFFFFFF000ULL; 634 631 addr |= R600_PTE_SYSTEM; 635 632 if (flags & RADEON_GART_PAGE_VALID) ··· 637 640 addr |= R600_PTE_WRITEABLE; 638 641 if (flags & RADEON_GART_PAGE_SNOOP) 639 642 addr |= R600_PTE_SNOOPED; 640 - writeq(addr, ptr + (i * 8)); 643 + return addr; 644 + } 645 + 646 + void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, 647 + uint64_t entry) 648 + { 649 + void __iomem *ptr = (void *)rdev->gart.ptr; 650 + writeq(entry, ptr + (i * 8)); 641 651 } 642 652 643 653 int rs600_irq_set(struct radeon_device *rdev)