Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] AGP fix for Xen VMM

When Linux is running on the Xen virtual machine monitor, physical
addresses are virtualised and cannot be directly referenced by the AGP
GART. This patch fixes the GART driver for Xen by adding a layer of
abstraction between physical addresses and 'GART addresses'.

Architecture-specific functions are also defined for allocating and freeing
the GATT. Xen requires this to ensure that table really is contiguous from
the point of view of the GART.

These extra interface functions are defined as 'no-ops' for all existing
architectures that use the GART driver.

Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Dave Jones <davej@redhat.com>

authored by

Keir Fraser and committed by
Dave Jones
07eee78e e29b545c

+106 -35
+2
drivers/char/agp/agp.h
··· 278 278 #define AGP_GENERIC_SIZES_ENTRIES 11 279 279 extern struct aper_size_info_16 agp3_generic_sizes[]; 280 280 281 + #define virt_to_gart(x) (phys_to_gart(virt_to_phys(x))) 282 + #define gart_to_virt(x) (phys_to_virt(gart_to_phys(x))) 281 283 282 284 extern int agp_off; 283 285 extern int agp_try_unsupported_boot;
+2 -2
drivers/char/agp/ali-agp.c
··· 150 150 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); 151 151 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, 152 152 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | 153 - virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN )); 153 + virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN )); 154 154 return addr; 155 155 } 156 156 ··· 174 174 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); 175 175 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, 176 176 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | 177 - virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN)); 177 + virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN)); 178 178 agp_generic_destroy_page(addr); 179 179 } 180 180
+3 -3
drivers/char/agp/amd-k7-agp.c
··· 43 43 44 44 SetPageReserved(virt_to_page(page_map->real)); 45 45 global_cache_flush(); 46 - page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 46 + page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), 47 47 PAGE_SIZE); 48 48 if (page_map->remapped == NULL) { 49 49 ClearPageReserved(virt_to_page(page_map->real)); ··· 154 154 155 155 agp_bridge->gatt_table_real = (u32 *)page_dir.real; 156 156 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; 157 - agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); 157 + agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real); 158 158 159 159 /* Get the address for the gart region. 160 160 * This is a bus address even on the alpha, b/c its ··· 167 167 168 168 /* Calculate the agp offset */ 169 169 for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { 170 - writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1, 170 + writel(virt_to_gart(amd_irongate_private.gatt_pages[i]->real) | 1, 171 171 page_dir.remapped+GET_PAGE_DIR_OFF(addr)); 172 172 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ 173 173 }
+2 -2
drivers/char/agp/amd64-agp.c
··· 219 219 220 220 static int amd_8151_configure(void) 221 221 { 222 - unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); 222 + unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real); 223 223 224 224 /* Configure AGP regs in each x86-64 host bridge. */ 225 225 for_each_nb() { ··· 591 591 { 592 592 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 593 593 594 - release_mem_region(virt_to_phys(bridge->gatt_table_real), 594 + release_mem_region(virt_to_gart(bridge->gatt_table_real), 595 595 amd64_aperture_sizes[bridge->aperture_size_idx].size); 596 596 agp_remove_bridge(bridge); 597 597 agp_put_bridge(bridge);
+3 -3
drivers/char/agp/ati-agp.c
··· 61 61 62 62 SetPageReserved(virt_to_page(page_map->real)); 63 63 err = map_page_into_agp(virt_to_page(page_map->real)); 64 - page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 64 + page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), 65 65 PAGE_SIZE); 66 66 if (page_map->remapped == NULL || err) { 67 67 ClearPageReserved(virt_to_page(page_map->real)); ··· 343 343 344 344 agp_bridge->gatt_table_real = (u32 *)page_dir.real; 345 345 agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped; 346 - agp_bridge->gatt_bus_addr = virt_to_bus(page_dir.real); 346 + agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real); 347 347 348 348 /* Write out the size register */ 349 349 current_size = A_SIZE_LVL2(agp_bridge->current_size); ··· 373 373 374 374 /* Calculate the agp offset */ 375 375 for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { 376 - writel(virt_to_bus(ati_generic_private.gatt_pages[i]->real) | 1, 376 + writel(virt_to_gart(ati_generic_private.gatt_pages[i]->real) | 1, 377 377 page_dir.remapped+GET_PAGE_DIR_OFF(addr)); 378 378 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ 379 379 }
+3 -3
drivers/char/agp/backend.c
··· 148 148 return -ENOMEM; 149 149 } 150 150 151 - bridge->scratch_page_real = virt_to_phys(addr); 151 + bridge->scratch_page_real = virt_to_gart(addr); 152 152 bridge->scratch_page = 153 153 bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0); 154 154 } ··· 189 189 err_out: 190 190 if (bridge->driver->needs_scratch_page) 191 191 bridge->driver->agp_destroy_page( 192 - phys_to_virt(bridge->scratch_page_real)); 192 + gart_to_virt(bridge->scratch_page_real)); 193 193 if (got_gatt) 194 194 bridge->driver->free_gatt_table(bridge); 195 195 if (got_keylist) { ··· 214 214 if (bridge->driver->agp_destroy_page && 215 215 bridge->driver->needs_scratch_page) 216 216 bridge->driver->agp_destroy_page( 217 - phys_to_virt(bridge->scratch_page_real)); 217 + gart_to_virt(bridge->scratch_page_real)); 218 218 } 219 219 220 220 /* When we remove the global variable agp_bridge from all drivers
+1 -1
drivers/char/agp/efficeon-agp.c
··· 219 219 220 220 efficeon_private.l1_table[index] = page; 221 221 222 - value = __pa(page) | pati | present | index; 222 + value = virt_to_gart(page) | pati | present | index; 223 223 224 224 pci_write_config_dword(agp_bridge->dev, 225 225 EFFICEON_ATTPAGE, value);
+8 -9
drivers/char/agp/generic.c
··· 153 153 } 154 154 if (curr->page_count != 0) { 155 155 for (i = 0; i < curr->page_count; i++) { 156 - curr->bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[i])); 156 + curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i])); 157 157 } 158 158 } 159 159 agp_free_key(curr->key); ··· 209 209 agp_free_memory(new); 210 210 return NULL; 211 211 } 212 - new->memory[i] = virt_to_phys(addr); 212 + new->memory[i] = virt_to_gart(addr); 213 213 new->page_count++; 214 214 } 215 215 new->bridge = bridge; ··· 806 806 break; 807 807 } 808 808 809 - table = (char *) __get_free_pages(GFP_KERNEL, 810 - page_order); 809 + table = alloc_gatt_pages(page_order); 811 810 812 811 if (table == NULL) { 813 812 i++; ··· 837 838 size = ((struct aper_size_info_fixed *) temp)->size; 838 839 page_order = ((struct aper_size_info_fixed *) temp)->page_order; 839 840 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; 840 - table = (char *) __get_free_pages(GFP_KERNEL, page_order); 841 + table = alloc_gatt_pages(page_order); 841 842 } 842 843 843 844 if (table == NULL) ··· 852 853 agp_gatt_table = (void *)table; 853 854 854 855 bridge->driver->cache_flush(); 855 - bridge->gatt_table = ioremap_nocache(virt_to_phys(table), 856 + bridge->gatt_table = ioremap_nocache(virt_to_gart(table), 856 857 (PAGE_SIZE * (1 << page_order))); 857 858 bridge->driver->cache_flush(); 858 859 ··· 860 861 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 861 862 ClearPageReserved(page); 862 863 863 - free_pages((unsigned long) table, page_order); 864 + free_gatt_pages(table, page_order); 864 865 865 866 return -ENOMEM; 866 867 } 867 - bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real); 868 + bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real); 868 869 869 870 /* AK: bogus, should encode addresses > 4GB */ 870 871 for (i = 0; i < num_entries; i++) { ··· 918 919 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 919 920 ClearPageReserved(page); 920 921 921 - free_pages((unsigned long) bridge->gatt_table_real, page_order); 922 + free_gatt_pages(bridge->gatt_table_real, page_order); 922 923 923 924 agp_gatt_table = NULL; 924 925 bridge->gatt_table = NULL;
+2 -2
drivers/char/agp/hp-agp.c
··· 110 110 hp->gart_size = HP_ZX1_GART_SIZE; 111 111 hp->gatt_entries = hp->gart_size / hp->io_page_size; 112 112 113 - hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE)); 113 + hp->io_pdir = gart_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE)); 114 114 hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; 115 115 116 116 if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) { ··· 248 248 agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS); 249 249 250 250 if (hp->io_pdir_owner) { 251 - writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE); 251 + writel(virt_to_gart(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE); 252 252 readl(hp->ioc_regs+HP_ZX1_PDIR_BASE); 253 253 writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG); 254 254 readl(hp->ioc_regs+HP_ZX1_TCNFG);
+2 -2
drivers/char/agp/i460-agp.c
··· 372 372 } 373 373 memset(lp->alloced_map, 0, map_size); 374 374 375 - lp->paddr = virt_to_phys(lpage); 375 + lp->paddr = virt_to_gart(lpage); 376 376 lp->refcount = 0; 377 377 atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); 378 378 return 0; ··· 383 383 kfree(lp->alloced_map); 384 384 lp->alloced_map = NULL; 385 385 386 - free_pages((unsigned long) phys_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT); 386 + free_pages((unsigned long) gart_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT); 387 387 atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); 388 388 } 389 389
+3 -3
drivers/char/agp/intel-agp.c
··· 286 286 if (new == NULL) 287 287 return NULL; 288 288 289 - new->memory[0] = virt_to_phys(addr); 289 + new->memory[0] = virt_to_gart(addr); 290 290 if (pg_count == 4) { 291 291 /* kludge to get 4 physical pages for ARGB cursor */ 292 292 new->memory[1] = new->memory[0] + PAGE_SIZE; ··· 329 329 agp_free_key(curr->key); 330 330 if(curr->type == AGP_PHYS_MEMORY) { 331 331 if (curr->page_count == 4) 332 - i8xx_destroy_pages(phys_to_virt(curr->memory[0])); 332 + i8xx_destroy_pages(gart_to_virt(curr->memory[0])); 333 333 else 334 334 agp_bridge->driver->agp_destroy_page( 335 - phys_to_virt(curr->memory[0])); 335 + gart_to_virt(curr->memory[0])); 336 336 vfree(curr->memory); 337 337 } 338 338 kfree(curr);
+4 -4
drivers/char/agp/sworks-agp.c
··· 51 51 } 52 52 SetPageReserved(virt_to_page(page_map->real)); 53 53 global_cache_flush(); 54 - page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 54 + page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), 55 55 PAGE_SIZE); 56 56 if (page_map->remapped == NULL) { 57 57 ClearPageReserved(virt_to_page(page_map->real)); ··· 162 162 /* Create a fake scratch directory */ 163 163 for(i = 0; i < 1024; i++) { 164 164 writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i); 165 - writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i); 165 + writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i); 166 166 } 167 167 168 168 retval = serverworks_create_gatt_pages(value->num_entries / 1024); ··· 174 174 175 175 agp_bridge->gatt_table_real = (u32 *)page_dir.real; 176 176 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; 177 - agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); 177 + agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real); 178 178 179 179 /* Get the address for the gart region. 180 180 * This is a bus address even on the alpha, b/c its ··· 187 187 /* Calculate the agp offset */ 188 188 189 189 for(i = 0; i < value->num_entries / 1024; i++) 190 - writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i); 190 + writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i); 191 191 192 192 return 0; 193 193 }
+1 -1
drivers/char/agp/uninorth-agp.c
··· 407 407 408 408 bridge->gatt_table_real = (u32 *) table; 409 409 bridge->gatt_table = (u32 *)table; 410 - bridge->gatt_bus_addr = virt_to_phys(table); 410 + bridge->gatt_bus_addr = virt_to_gart(table); 411 411 412 412 for (i = 0; i < num_entries; i++) 413 413 bridge->gatt_table[i] = 0;
+10
include/asm-alpha/agp.h
··· 10 10 #define flush_agp_mappings() 11 11 #define flush_agp_cache() mb() 12 12 13 + /* Convert a physical address to an address suitable for the GART. */ 14 + #define phys_to_gart(x) (x) 15 + #define gart_to_phys(x) (x) 16 + 17 + /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 18 + #define alloc_gatt_pages(order) \ 19 + ((char *)__get_free_pages(GFP_KERNEL, (order))) 20 + #define free_gatt_pages(table, order) \ 21 + free_pages((unsigned long)(table), (order)) 22 + 13 23 #endif
+10
include/asm-i386/agp.h
··· 21 21 worth it. Would need a page for it. */ 22 22 #define flush_agp_cache() asm volatile("wbinvd":::"memory") 23 23 24 + /* Convert a physical address to an address suitable for the GART. */ 25 + #define phys_to_gart(x) (x) 26 + #define gart_to_phys(x) (x) 27 + 28 + /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 29 + #define alloc_gatt_pages(order) \ 30 + ((char *)__get_free_pages(GFP_KERNEL, (order))) 31 + #define free_gatt_pages(table, order) \ 32 + free_pages((unsigned long)(table), (order)) 33 + 24 34 #endif
+10
include/asm-ia64/agp.h
··· 18 18 #define flush_agp_mappings() /* nothing */ 19 19 #define flush_agp_cache() mb() 20 20 21 + /* Convert a physical address to an address suitable for the GART. */ 22 + #define phys_to_gart(x) (x) 23 + #define gart_to_phys(x) (x) 24 + 25 + /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 26 + #define alloc_gatt_pages(order) \ 27 + ((char *)__get_free_pages(GFP_KERNEL, (order))) 28 + #define free_gatt_pages(table, order) \ 29 + free_pages((unsigned long)(table), (order)) 30 + 21 31 #endif /* _ASM_IA64_AGP_H */
+10
include/asm-ppc/agp.h
··· 10 10 #define flush_agp_mappings() 11 11 #define flush_agp_cache() mb() 12 12 13 + /* Convert a physical address to an address suitable for the GART. */ 14 + #define phys_to_gart(x) (x) 15 + #define gart_to_phys(x) (x) 16 + 17 + /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 18 + #define alloc_gatt_pages(order) \ 19 + ((char *)__get_free_pages(GFP_KERNEL, (order))) 20 + #define free_gatt_pages(table, order) \ 21 + free_pages((unsigned long)(table), (order)) 22 + 13 23 #endif
+10
include/asm-ppc64/agp.h
··· 10 10 #define flush_agp_mappings() 11 11 #define flush_agp_cache() mb() 12 12 13 + /* Convert a physical address to an address suitable for the GART. */ 14 + #define phys_to_gart(x) (x) 15 + #define gart_to_phys(x) (x) 16 + 17 + /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 18 + #define alloc_gatt_pages(order) \ 19 + ((char *)__get_free_pages(GFP_KERNEL, (order))) 20 + #define free_gatt_pages(table, order) \ 21 + free_pages((unsigned long)(table), (order)) 22 + 13 23 #endif
+10
include/asm-sparc64/agp.h
··· 8 8 #define flush_agp_mappings() 9 9 #define flush_agp_cache() mb() 10 10 11 + /* Convert a physical address to an address suitable for the GART. */ 12 + #define phys_to_gart(x) (x) 13 + #define gart_to_phys(x) (x) 14 + 15 + /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 16 + #define alloc_gatt_pages(order) \ 17 + ((char *)__get_free_pages(GFP_KERNEL, (order))) 18 + #define free_gatt_pages(table, order) \ 19 + free_pages((unsigned long)(table), (order)) 20 + 11 21 #endif
+10
include/asm-x86_64/agp.h
··· 19 19 worth it. Would need a page for it. */ 20 20 #define flush_agp_cache() asm volatile("wbinvd":::"memory") 21 21 22 + /* Convert a physical address to an address suitable for the GART. */ 23 + #define phys_to_gart(x) (x) 24 + #define gart_to_phys(x) (x) 25 + 26 + /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 27 + #define alloc_gatt_pages(order) \ 28 + ((char *)__get_free_pages(GFP_KERNEL, (order))) 29 + #define free_gatt_pages(table, order) \ 30 + free_pages((unsigned long)(table), (order)) 31 + 22 32 #endif