x86, amd-nb: Complete the rename of AMD NB and related code

Not only the naming of the files was confusing, it was even more so for
the function and variable names.

Renamed the K8 NB and NUMA stuff that is also used on other AMD
platforms. This also renames the CONFIG_K8_NUMA option to
CONFIG_AMD_NUMA and the related file k8topology_64.c to
amdtopology_64.c. No functional changes intended.

Signed-off-by: Hans Rosenfeld <hans.rosenfeld@amd.com>
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>

authored by Hans Rosenfeld and committed by Borislav Petkov eec1d4fa e53beacd

+119 -119
+6 -6
arch/x86/Kconfig
··· 1141 comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" 1142 depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI) 1143 1144 - config K8_NUMA 1145 def_bool y 1146 prompt "Old style AMD Opteron NUMA detection" 1147 depends on X86_64 && NUMA && PCI 1148 ---help--- 1149 - Enable K8 NUMA node topology detection. You should say Y here if 1150 - you have a multi processor AMD K8 system. This uses an old 1151 - method to read the NUMA configuration directly from the builtin 1152 - Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA 1153 - instead, which also takes priority if both are compiled in. 1154 1155 config X86_64_ACPI_NUMA 1156 def_bool y
··· 1141 comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" 1142 depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI) 1143 1144 + config AMD_NUMA 1145 def_bool y 1146 prompt "Old style AMD Opteron NUMA detection" 1147 depends on X86_64 && NUMA && PCI 1148 ---help--- 1149 + Enable AMD NUMA node topology detection. You should say Y here if 1150 + you have a multi processor AMD system. This uses an old method to 1151 + read the NUMA configuration directly from the builtin Northbridge 1152 + of Opteron. It is recommended to use X86_64_ACPI_NUMA instead, 1153 + which also takes priority if both are compiled in. 1154 1155 config X86_64_ACPI_NUMA 1156 def_bool y
+12 -12
arch/x86/include/asm/amd_nb.h
··· 3 4 #include <linux/pci.h> 5 6 - extern struct pci_device_id k8_nb_ids[]; 7 struct bootnode; 8 9 - extern int early_is_k8_nb(u32 value); 10 - extern int cache_k8_northbridges(void); 11 - extern void k8_flush_garts(void); 12 - extern int k8_get_nodes(struct bootnode *nodes); 13 - extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); 14 - extern int k8_scan_nodes(void); 15 16 - struct k8_northbridge_info { 17 u16 num; 18 u8 gart_supported; 19 struct pci_dev **nb_misc; 20 }; 21 - extern struct k8_northbridge_info k8_northbridges; 22 23 #ifdef CONFIG_AMD_NB 24 25 - static inline struct pci_dev *node_to_k8_nb_misc(int node) 26 { 27 - return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL; 28 } 29 30 #else 31 32 - static inline struct pci_dev *node_to_k8_nb_misc(int node) 33 { 34 return NULL; 35 }
··· 3 4 #include <linux/pci.h> 5 6 + extern struct pci_device_id amd_nb_ids[]; 7 struct bootnode; 8 9 + extern int early_is_amd_nb(u32 value); 10 + extern int cache_amd_northbridges(void); 11 + extern void amd_flush_garts(void); 12 + extern int amd_get_nodes(struct bootnode *nodes); 13 + extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); 14 + extern int amd_scan_nodes(void); 15 16 + struct amd_northbridge_info { 17 u16 num; 18 u8 gart_supported; 19 struct pci_dev **nb_misc; 20 }; 21 + extern struct amd_northbridge_info amd_northbridges; 22 23 #ifdef CONFIG_AMD_NB 24 25 + static inline struct pci_dev *node_to_amd_nb_misc(int node) 26 { 27 + return (node < amd_northbridges.num) ? amd_northbridges.nb_misc[node] : NULL; 28 } 29 30 #else 31 32 + static inline struct pci_dev *node_to_amd_nb_misc(int node) 33 { 34 return NULL; 35 }
+36 -36
arch/x86/kernel/amd_nb.c
··· 12 13 static u32 *flush_words; 14 15 - struct pci_device_id k8_nb_ids[] = { 16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, 19 {} 20 }; 21 - EXPORT_SYMBOL(k8_nb_ids); 22 23 - struct k8_northbridge_info k8_northbridges; 24 - EXPORT_SYMBOL(k8_northbridges); 25 26 - static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) 27 { 28 do { 29 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 30 if (!dev) 31 break; 32 - } while (!pci_match_id(&k8_nb_ids[0], dev)); 33 return dev; 34 } 35 36 - int cache_k8_northbridges(void) 37 { 38 int i; 39 struct pci_dev *dev; 40 41 - if (k8_northbridges.num) 42 return 0; 43 44 dev = NULL; 45 - while ((dev = next_k8_northbridge(dev)) != NULL) 46 - k8_northbridges.num++; 47 48 /* some CPU families (e.g. family 0x11) do not support GART */ 49 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || 50 boot_cpu_data.x86 == 0x15) 51 - k8_northbridges.gart_supported = 1; 52 53 - k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) * 54 sizeof(void *), GFP_KERNEL); 55 - if (!k8_northbridges.nb_misc) 56 return -ENOMEM; 57 58 - if (!k8_northbridges.num) { 59 - k8_northbridges.nb_misc[0] = NULL; 60 return 0; 61 } 62 63 - if (k8_northbridges.gart_supported) { 64 - flush_words = kmalloc(k8_northbridges.num * sizeof(u32), 65 GFP_KERNEL); 66 if (!flush_words) { 67 - kfree(k8_northbridges.nb_misc); 68 return -ENOMEM; 69 } 70 } 71 72 dev = NULL; 73 i = 0; 74 - while ((dev = next_k8_northbridge(dev)) != NULL) { 75 - k8_northbridges.nb_misc[i] = dev; 76 - if (k8_northbridges.gart_supported) 77 pci_read_config_dword(dev, 0x9c, &flush_words[i++]); 78 } 79 - k8_northbridges.nb_misc[i] = NULL; 80 return 0; 81 } 82 - EXPORT_SYMBOL_GPL(cache_k8_northbridges); 83 84 /* Ignores subdevice/subvendor but as far as I can figure out 85 they're useless anyways */ 86 - int __init early_is_k8_nb(u32 device) 87 { 88 struct pci_device_id *id; 89 u32 vendor = device & 0xffff; 90 device >>= 16; 91 - for (id = k8_nb_ids; id->vendor; id++) 92 if (vendor == id->vendor && device == id->device) 93 return 1; 94 return 0; 95 } 96 97 - void k8_flush_garts(void) 98 { 99 int flushed, i; 100 unsigned long flags; 101 static DEFINE_SPINLOCK(gart_lock); 102 103 - if (!k8_northbridges.gart_supported) 104 return; 105 106 /* Avoid races between AGP and IOMMU. In theory it's not needed ··· 109 that it doesn't matter to serialize more. -AK */ 110 spin_lock_irqsave(&gart_lock, flags); 111 flushed = 0; 112 - for (i = 0; i < k8_northbridges.num; i++) { 113 - pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c, 114 flush_words[i]|1); 115 flushed++; 116 } 117 - for (i = 0; i < k8_northbridges.num; i++) { 118 u32 w; 119 /* Make sure the hardware actually executed the flush*/ 120 for (;;) { 121 - pci_read_config_dword(k8_northbridges.nb_misc[i], 122 0x9c, &w); 123 if (!(w & 1)) 124 break; ··· 129 if (!flushed) 130 printk("nothing to flush?\n"); 131 } 132 - EXPORT_SYMBOL_GPL(k8_flush_garts); 133 134 - static __init int init_k8_nbs(void) 135 { 136 int err = 0; 137 138 - err = cache_k8_northbridges(); 139 140 if (err < 0) 141 - printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n"); 142 143 return err; 144 } 145 146 /* This has to go after the PCI subsystem */ 147 - fs_initcall(init_k8_nbs);
··· 12 13 static u32 *flush_words; 14 15 + struct pci_device_id amd_nb_ids[] = { 16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, 19 {} 20 }; 21 + EXPORT_SYMBOL(amd_nb_ids); 22 23 + struct amd_northbridge_info amd_northbridges; 24 + EXPORT_SYMBOL(amd_northbridges); 25 26 + static struct pci_dev *next_amd_northbridge(struct pci_dev *dev) 27 { 28 do { 29 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 30 if (!dev) 31 break; 32 + } while (!pci_match_id(&amd_nb_ids[0], dev)); 33 return dev; 34 } 35 36 + int cache_amd_northbridges(void) 37 { 38 int i; 39 struct pci_dev *dev; 40 41 + if (amd_northbridges.num) 42 return 0; 43 44 dev = NULL; 45 + while ((dev = next_amd_northbridge(dev)) != NULL) 46 + amd_northbridges.num++; 47 48 /* some CPU families (e.g. family 0x11) do not support GART */ 49 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || 50 boot_cpu_data.x86 == 0x15) 51 + amd_northbridges.gart_supported = 1; 52 53 + amd_northbridges.nb_misc = kmalloc((amd_northbridges.num + 1) * 54 sizeof(void *), GFP_KERNEL); 55 + if (!amd_northbridges.nb_misc) 56 return -ENOMEM; 57 58 + if (!amd_northbridges.num) { 59 + amd_northbridges.nb_misc[0] = NULL; 60 return 0; 61 } 62 63 + if (amd_northbridges.gart_supported) { 64 + flush_words = kmalloc(amd_northbridges.num * sizeof(u32), 65 GFP_KERNEL); 66 if (!flush_words) { 67 + kfree(amd_northbridges.nb_misc); 68 return -ENOMEM; 69 } 70 } 71 72 dev = NULL; 73 i = 0; 74 + while ((dev = next_amd_northbridge(dev)) != NULL) { 75 + amd_northbridges.nb_misc[i] = dev; 76 + if (amd_northbridges.gart_supported) 77 pci_read_config_dword(dev, 0x9c, &flush_words[i++]); 78 } 79 + amd_northbridges.nb_misc[i] = NULL; 80 return 0; 81 } 82 + EXPORT_SYMBOL_GPL(cache_amd_northbridges); 83 84 /* Ignores subdevice/subvendor but as far as I can figure out 85 they're useless anyways */ 86 + int __init early_is_amd_nb(u32 device) 87 { 88 struct pci_device_id *id; 89 u32 vendor = device & 0xffff; 90 device >>= 16; 91 + for (id = amd_nb_ids; id->vendor; id++) 92 if (vendor == id->vendor && device == id->device) 93 return 1; 94 return 0; 95 } 96 97 + void amd_flush_garts(void) 98 { 99 int flushed, i; 100 unsigned long flags; 101 static DEFINE_SPINLOCK(gart_lock); 102 103 + if (!amd_northbridges.gart_supported) 104 return; 105 106 /* Avoid races between AGP and IOMMU. In theory it's not needed ··· 109 that it doesn't matter to serialize more. -AK */ 110 spin_lock_irqsave(&gart_lock, flags); 111 flushed = 0; 112 + for (i = 0; i < amd_northbridges.num; i++) { 113 + pci_write_config_dword(amd_northbridges.nb_misc[i], 0x9c, 114 flush_words[i]|1); 115 flushed++; 116 } 117 + for (i = 0; i < amd_northbridges.num; i++) { 118 u32 w; 119 /* Make sure the hardware actually executed the flush*/ 120 for (;;) { 121 + pci_read_config_dword(amd_northbridges.nb_misc[i], 122 0x9c, &w); 123 if (!(w & 1)) 124 break; ··· 129 if (!flushed) 130 printk("nothing to flush?\n"); 131 } 132 + EXPORT_SYMBOL_GPL(amd_flush_garts); 133 134 + static __init int init_amd_nbs(void) 135 { 136 int err = 0; 137 138 + err = cache_amd_northbridges(); 139 140 if (err < 0) 141 + printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n"); 142 143 return err; 144 } 145 146 /* This has to go after the PCI subsystem */ 147 + fs_initcall(init_amd_nbs);
+5 -5
arch/x86/kernel/aperture_64.c
··· 206 * Do an PCI bus scan by hand because we're running before the PCI 207 * subsystem. 208 * 209 - * All K8 AGP bridges are AGPv3 compliant, so we can do this scan 210 * generically. It's probably overkill to always scan all slots because 211 * the AGP bridges should be always an own bus on the HT hierarchy, 212 * but do it here for future safety. ··· 303 dev_limit = bus_dev_ranges[i].dev_limit; 304 305 for (slot = dev_base; slot < dev_limit; slot++) { 306 - if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 307 continue; 308 309 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); ··· 358 dev_limit = bus_dev_ranges[i].dev_limit; 359 360 for (slot = dev_base; slot < dev_limit; slot++) { 361 - if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 362 continue; 363 364 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); ··· 400 dev_limit = bus_dev_ranges[i].dev_limit; 401 402 for (slot = dev_base; slot < dev_limit; slot++) { 403 - if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 404 continue; 405 406 iommu_detected = 1; ··· 518 dev_base = bus_dev_ranges[i].dev_base; 519 dev_limit = bus_dev_ranges[i].dev_limit; 520 for (slot = dev_base; slot < dev_limit; slot++) { 521 - if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 522 continue; 523 524 write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
··· 206 * Do an PCI bus scan by hand because we're running before the PCI 207 * subsystem. 208 * 209 + * All AMD AGP bridges are AGPv3 compliant, so we can do this scan 210 * generically. It's probably overkill to always scan all slots because 211 * the AGP bridges should be always an own bus on the HT hierarchy, 212 * but do it here for future safety. ··· 303 dev_limit = bus_dev_ranges[i].dev_limit; 304 305 for (slot = dev_base; slot < dev_limit; slot++) { 306 + if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) 307 continue; 308 309 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); ··· 358 dev_limit = bus_dev_ranges[i].dev_limit; 359 360 for (slot = dev_base; slot < dev_limit; slot++) { 361 + if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) 362 continue; 363 364 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); ··· 400 dev_limit = bus_dev_ranges[i].dev_limit; 401 402 for (slot = dev_base; slot < dev_limit; slot++) { 403 + if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) 404 continue; 405 406 iommu_detected = 1; ··· 518 dev_base = bus_dev_ranges[i].dev_base; 519 dev_limit = bus_dev_ranges[i].dev_limit; 520 for (slot = dev_base; slot < dev_limit; slot++) { 521 + if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) 522 continue; 523 524 write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
+3 -3
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 333 static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) 334 { 335 struct amd_l3_cache *l3; 336 - struct pci_dev *dev = node_to_k8_nb_misc(node); 337 338 l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); 339 if (!l3) { ··· 370 return; 371 372 /* not in virtualized environments */ 373 - if (k8_northbridges.num == 0) 374 return; 375 376 /* ··· 378 * never freed but this is done only on shutdown so it doesn't matter. 379 */ 380 if (!l3_caches) { 381 - int size = k8_northbridges.num * sizeof(struct amd_l3_cache *); 382 383 l3_caches = kzalloc(size, GFP_ATOMIC); 384 if (!l3_caches)
··· 333 static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) 334 { 335 struct amd_l3_cache *l3; 336 + struct pci_dev *dev = node_to_amd_nb_misc(node); 337 338 l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); 339 if (!l3) { ··· 370 return; 371 372 /* not in virtualized environments */ 373 + if (amd_northbridges.num == 0) 374 return; 375 376 /* ··· 378 * never freed but this is done only on shutdown so it doesn't matter. 379 */ 380 if (!l3_caches) { 381 + int size = amd_northbridges.num * sizeof(struct amd_l3_cache *); 382 383 l3_caches = kzalloc(size, GFP_ATOMIC); 384 if (!l3_caches)
+17 -17
arch/x86/kernel/pci-gart_64.c
··· 143 144 spin_lock_irqsave(&iommu_bitmap_lock, flags); 145 if (need_flush) { 146 - k8_flush_garts(); 147 need_flush = false; 148 } 149 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); ··· 561 { 562 int i; 563 564 - if (!k8_northbridges.gart_supported) 565 return; 566 567 - for (i = 0; i < k8_northbridges.num; i++) { 568 - struct pci_dev *dev = k8_northbridges.nb_misc[i]; 569 570 enable_gart_translation(dev, __pa(agp_gatt_table)); 571 } 572 573 /* Flush the GART-TLB to remove stale entries */ 574 - k8_flush_garts(); 575 } 576 577 /* ··· 596 if (!fix_up_north_bridges) 597 return; 598 599 - if (!k8_northbridges.gart_supported) 600 return; 601 602 pr_info("PCI-DMA: Restoring GART aperture settings\n"); 603 604 - for (i = 0; i < k8_northbridges.num; i++) { 605 - struct pci_dev *dev = k8_northbridges.nb_misc[i]; 606 607 /* 608 * Don't enable translations just yet. That is the next ··· 644 * Private Northbridge GATT initialization in case we cannot use the 645 * AGP driver for some reason. 646 */ 647 - static __init int init_k8_gatt(struct agp_kern_info *info) 648 { 649 unsigned aper_size, gatt_size, new_aper_size; 650 unsigned aper_base, new_aper_base; ··· 656 657 aper_size = aper_base = info->aper_size = 0; 658 dev = NULL; 659 - for (i = 0; i < k8_northbridges.num; i++) { 660 - dev = k8_northbridges.nb_misc[i]; 661 new_aper_base = read_aperture(dev, &new_aper_size); 662 if (!new_aper_base) 663 goto nommu; ··· 725 if (!no_agp) 726 return; 727 728 - if (!k8_northbridges.gart_supported) 729 return; 730 731 - for (i = 0; i < k8_northbridges.num; i++) { 732 u32 ctl; 733 734 - dev = k8_northbridges.nb_misc[i]; 735 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 736 737 ctl &= ~GARTEN; ··· 749 unsigned long scratch; 750 long i; 751 752 - if (!k8_northbridges.gart_supported) 753 return 0; 754 755 #ifndef CONFIG_AGP_AMD64 756 no_agp = 1; 757 #else 758 /* Makefile puts PCI initialization via subsys_initcall first. */ 759 - /* Add other K8 AGP bridge drivers here */ 760 no_agp = no_agp || 761 (agp_amd64_init() < 0) || 762 (agp_copy_info(agp_bridge, &info) < 0); ··· 765 if (no_iommu || 766 (!force_iommu && max_pfn <= MAX_DMA32_PFN) || 767 !gart_iommu_aperture || 768 - (no_agp && init_k8_gatt(&info) < 0)) { 769 if (max_pfn > MAX_DMA32_PFN) { 770 pr_warning("More than 4GB of memory but GART IOMMU not available.\n"); 771 pr_warning("falling back to iommu=soft.\n");
··· 143 144 spin_lock_irqsave(&iommu_bitmap_lock, flags); 145 if (need_flush) { 146 + amd_flush_garts(); 147 need_flush = false; 148 } 149 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); ··· 561 { 562 int i; 563 564 + if (!amd_northbridges.gart_supported) 565 return; 566 567 + for (i = 0; i < amd_northbridges.num; i++) { 568 + struct pci_dev *dev = amd_northbridges.nb_misc[i]; 569 570 enable_gart_translation(dev, __pa(agp_gatt_table)); 571 } 572 573 /* Flush the GART-TLB to remove stale entries */ 574 + amd_flush_garts(); 575 } 576 577 /* ··· 596 if (!fix_up_north_bridges) 597 return; 598 599 + if (!amd_northbridges.gart_supported) 600 return; 601 602 pr_info("PCI-DMA: Restoring GART aperture settings\n"); 603 604 + for (i = 0; i < amd_northbridges.num; i++) { 605 + struct pci_dev *dev = amd_northbridges.nb_misc[i]; 606 607 /* 608 * Don't enable translations just yet. That is the next ··· 644 * Private Northbridge GATT initialization in case we cannot use the 645 * AGP driver for some reason. 646 */ 647 + static __init int init_amd_gatt(struct agp_kern_info *info) 648 { 649 unsigned aper_size, gatt_size, new_aper_size; 650 unsigned aper_base, new_aper_base; ··· 656 657 aper_size = aper_base = info->aper_size = 0; 658 dev = NULL; 659 + for (i = 0; i < amd_northbridges.num; i++) { 660 + dev = amd_northbridges.nb_misc[i]; 661 new_aper_base = read_aperture(dev, &new_aper_size); 662 if (!new_aper_base) 663 goto nommu; ··· 725 if (!no_agp) 726 return; 727 728 + if (!amd_northbridges.gart_supported) 729 return; 730 731 + for (i = 0; i < amd_northbridges.num; i++) { 732 u32 ctl; 733 734 + dev = amd_northbridges.nb_misc[i]; 735 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 736 737 ctl &= ~GARTEN; ··· 749 unsigned long scratch; 750 long i; 751 752 + if (!amd_northbridges.gart_supported) 753 return 0; 754 755 #ifndef CONFIG_AGP_AMD64 756 no_agp = 1; 757 #else 758 /* Makefile puts PCI initialization via subsys_initcall first. */ 759 + /* Add other AMD AGP bridge drivers here */ 760 no_agp = no_agp || 761 (agp_amd64_init() < 0) || 762 (agp_copy_info(agp_bridge, &info) < 0); ··· 765 if (no_iommu || 766 (!force_iommu && max_pfn <= MAX_DMA32_PFN) || 767 !gart_iommu_aperture || 768 + (no_agp && init_amd_gatt(&info) < 0)) { 769 if (max_pfn > MAX_DMA32_PFN) { 770 pr_warning("More than 4GB of memory but GART IOMMU not available.\n"); 771 pr_warning("falling back to iommu=soft.\n");
+4 -4
arch/x86/kernel/setup.c
··· 694 void __init setup_arch(char **cmdline_p) 695 { 696 int acpi = 0; 697 - int k8 = 0; 698 unsigned long flags; 699 700 #ifdef CONFIG_X86_32 ··· 981 acpi = acpi_numa_init(); 982 #endif 983 984 - #ifdef CONFIG_K8_NUMA 985 if (!acpi) 986 - k8 = !k8_numa_init(0, max_pfn); 987 #endif 988 989 - initmem_init(0, max_pfn, acpi, k8); 990 memblock_find_dma_reserve(); 991 dma32_reserve_bootmem(); 992
··· 694 void __init setup_arch(char **cmdline_p) 695 { 696 int acpi = 0; 697 + int amd = 0; 698 unsigned long flags; 699 700 #ifdef CONFIG_X86_32 ··· 981 acpi = acpi_numa_init(); 982 #endif 983 984 + #ifdef CONFIG_AMD_NUMA 985 if (!acpi) 986 + amd = !amd_numa_init(0, max_pfn); 987 #endif 988 989 + initmem_init(0, max_pfn, acpi, amd); 990 memblock_find_dma_reserve(); 991 dma32_reserve_bootmem(); 992
+1 -1
arch/x86/mm/Makefile
··· 23 obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o 24 25 obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o 26 - obj-$(CONFIG_K8_NUMA) += k8topology_64.o 27 obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o 28 29 obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
··· 23 obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o 24 25 obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o 26 + obj-$(CONFIG_AMD_NUMA) += amdtopology_64.o 27 obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o 28 29 obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
+6 -6
arch/x86/mm/k8topology_64.c arch/x86/mm/amdtopology_64.c
··· 1 /* 2 - * AMD K8 NUMA support. 3 * Discover the memory map and associated nodes. 4 * 5 - * This version reads it directly from the K8 northbridge. 6 * 7 * Copyright 2002,2003 Andi Kleen, SuSE Labs. 8 */ ··· 57 { 58 /* 59 * need to get the APIC ID of the BSP so can use that to 60 - * create apicid_to_node in k8_scan_nodes() 61 */ 62 #ifdef CONFIG_X86_MPPARSE 63 /* ··· 69 early_init_lapic_mapping(); 70 } 71 72 - int __init k8_get_nodes(struct bootnode *physnodes) 73 { 74 int i; 75 int ret = 0; ··· 82 return ret; 83 } 84 85 - int __init k8_numa_init(unsigned long start_pfn, unsigned long end_pfn) 86 { 87 unsigned long start = PFN_PHYS(start_pfn); 88 unsigned long end = PFN_PHYS(end_pfn); ··· 194 return 0; 195 } 196 197 - int __init k8_scan_nodes(void) 198 { 199 unsigned int bits; 200 unsigned int cores;
··· 1 /* 2 + * AMD NUMA support. 3 * Discover the memory map and associated nodes. 4 * 5 + * This version reads it directly from the AMD northbridge. 6 * 7 * Copyright 2002,2003 Andi Kleen, SuSE Labs. 8 */ ··· 57 { 58 /* 59 * need to get the APIC ID of the BSP so can use that to 60 + * create apicid_to_node in amd_scan_nodes() 61 */ 62 #ifdef CONFIG_X86_MPPARSE 63 /* ··· 69 early_init_lapic_mapping(); 70 } 71 72 + int __init amd_get_nodes(struct bootnode *physnodes) 73 { 74 int i; 75 int ret = 0; ··· 82 return ret; 83 } 84 85 + int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) 86 { 87 unsigned long start = PFN_PHYS(start_pfn); 88 unsigned long end = PFN_PHYS(end_pfn); ··· 194 return 0; 195 } 196 197 + int __init amd_scan_nodes(void) 198 { 199 unsigned int bits; 200 unsigned int cores;
+11 -11
arch/x86/mm/numa_64.c
··· 264 static char *cmdline __initdata; 265 266 static int __init setup_physnodes(unsigned long start, unsigned long end, 267 - int acpi, int k8) 268 { 269 int nr_nodes = 0; 270 int ret = 0; ··· 274 if (acpi) 275 nr_nodes = acpi_get_nodes(physnodes); 276 #endif 277 - #ifdef CONFIG_K8_NUMA 278 - if (k8) 279 - nr_nodes = k8_get_nodes(physnodes); 280 #endif 281 /* 282 * Basic sanity checking on the physical node map: there may be errors 283 - * if the SRAT or K8 incorrectly reported the topology or the mem= 284 * kernel parameter is used. 285 */ 286 for (i = 0; i < nr_nodes; i++) { ··· 549 * numa=fake command-line option. 550 */ 551 static int __init numa_emulation(unsigned long start_pfn, 552 - unsigned long last_pfn, int acpi, int k8) 553 { 554 u64 addr = start_pfn << PAGE_SHIFT; 555 u64 max_addr = last_pfn << PAGE_SHIFT; ··· 557 int num_nodes; 558 int i; 559 560 - num_phys_nodes = setup_physnodes(addr, max_addr, acpi, k8); 561 /* 562 * If the numa=fake command-line contains a 'M' or 'G', it represents 563 * the fixed node size. Otherwise, if it is just a single number N, ··· 602 #endif /* CONFIG_NUMA_EMU */ 603 604 void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, 605 - int acpi, int k8) 606 { 607 int i; 608 ··· 610 nodes_clear(node_online_map); 611 612 #ifdef CONFIG_NUMA_EMU 613 - if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, k8)) 614 return; 615 nodes_clear(node_possible_map); 616 nodes_clear(node_online_map); ··· 624 nodes_clear(node_online_map); 625 #endif 626 627 - #ifdef CONFIG_K8_NUMA 628 - if (!numa_off && k8 && !k8_scan_nodes()) 629 return; 630 nodes_clear(node_possible_map); 631 nodes_clear(node_online_map);
··· 264 static char *cmdline __initdata; 265 266 static int __init setup_physnodes(unsigned long start, unsigned long end, 267 + int acpi, int amd) 268 { 269 int nr_nodes = 0; 270 int ret = 0; ··· 274 if (acpi) 275 nr_nodes = acpi_get_nodes(physnodes); 276 #endif 277 + #ifdef CONFIG_AMD_NUMA 278 + if (amd) 279 + nr_nodes = amd_get_nodes(physnodes); 280 #endif 281 /* 282 * Basic sanity checking on the physical node map: there may be errors 283 + * if the SRAT or AMD code incorrectly reported the topology or the mem= 284 * kernel parameter is used. 285 */ 286 for (i = 0; i < nr_nodes; i++) { ··· 549 * numa=fake command-line option. 550 */ 551 static int __init numa_emulation(unsigned long start_pfn, 552 + unsigned long last_pfn, int acpi, int amd) 553 { 554 u64 addr = start_pfn << PAGE_SHIFT; 555 u64 max_addr = last_pfn << PAGE_SHIFT; ··· 557 int num_nodes; 558 int i; 559 560 + num_phys_nodes = setup_physnodes(addr, max_addr, acpi, amd); 561 /* 562 * If the numa=fake command-line contains a 'M' or 'G', it represents 563 * the fixed node size. Otherwise, if it is just a single number N, ··· 602 #endif /* CONFIG_NUMA_EMU */ 603 604 void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, 605 + int acpi, int amd) 606 { 607 int i; 608 ··· 610 nodes_clear(node_online_map); 611 612 #ifdef CONFIG_NUMA_EMU 613 + if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd)) 614 return; 615 nodes_clear(node_possible_map); 616 nodes_clear(node_online_map); ··· 624 nodes_clear(node_online_map); 625 #endif 626 627 + #ifdef CONFIG_AMD_NUMA 628 + if (!numa_off && amd && !amd_scan_nodes()) 629 return; 630 nodes_clear(node_possible_map); 631 nodes_clear(node_online_map);
+16 -16
drivers/char/agp/amd64-agp.c
··· 38 39 static void amd64_tlbflush(struct agp_memory *temp) 40 { 41 - k8_flush_garts(); 42 } 43 44 static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) ··· 124 u32 temp; 125 struct aper_size_info_32 *values; 126 127 - dev = k8_northbridges.nb_misc[0]; 128 if (dev==NULL) 129 return 0; 130 ··· 181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); 182 int i; 183 184 - if (!k8_northbridges.gart_supported) 185 return 0; 186 187 /* Configure AGP regs in each x86-64 host bridge. */ 188 - for (i = 0; i < k8_northbridges.num; i++) { 189 agp_bridge->gart_bus_addr = 190 - amd64_configure(k8_northbridges.nb_misc[i], 191 gatt_bus); 192 } 193 - k8_flush_garts(); 194 return 0; 195 } 196 ··· 200 u32 tmp; 201 int i; 202 203 - if (!k8_northbridges.gart_supported) 204 return; 205 206 - for (i = 0; i < k8_northbridges.num; i++) { 207 - struct pci_dev *dev = k8_northbridges.nb_misc[i]; 208 /* disable gart translation */ 209 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); 210 tmp &= ~GARTEN; ··· 331 { 332 int i; 333 334 - if (cache_k8_northbridges() < 0) 335 return -ENODEV; 336 337 - if (!k8_northbridges.gart_supported) 338 return -ENODEV; 339 340 i = 0; 341 - for (i = 0; i < k8_northbridges.num; i++) { 342 - struct pci_dev *dev = k8_northbridges.nb_misc[i]; 343 if (fix_northbridge(dev, pdev, cap_ptr) < 0) { 344 dev_err(&dev->dev, "no usable aperture found\n"); 345 #ifdef __x86_64__ ··· 416 } 417 418 /* shadow x86-64 registers into ULi registers */ 419 - pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, 420 &httfea); 421 422 /* if x86-64 aperture base is beyond 4G, exit here */ ··· 484 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); 485 486 /* shadow x86-64 registers into NVIDIA registers */ 487 - pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, 488 &apbase); 489 490 /* if x86-64 aperture base is beyond 4G, exit here */ ··· 778 } 779 780 /* First check that we have at least one AMD64 NB */ 781 - if (!pci_dev_present(k8_nb_ids)) 782 return -ENODEV; 783 784 /* Look for any AGP bridge */
··· 38 39 static void amd64_tlbflush(struct agp_memory *temp) 40 { 41 + amd_flush_garts(); 42 } 43 44 static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) ··· 124 u32 temp; 125 struct aper_size_info_32 *values; 126 127 + dev = amd_northbridges.nb_misc[0]; 128 if (dev==NULL) 129 return 0; 130 ··· 181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); 182 int i; 183 184 + if (!amd_northbridges.gart_supported) 185 return 0; 186 187 /* Configure AGP regs in each x86-64 host bridge. */ 188 + for (i = 0; i < amd_northbridges.num; i++) { 189 agp_bridge->gart_bus_addr = 190 + amd64_configure(amd_northbridges.nb_misc[i], 191 gatt_bus); 192 } 193 + amd_flush_garts(); 194 return 0; 195 } 196 ··· 200 u32 tmp; 201 int i; 202 203 + if (!amd_northbridges.gart_supported) 204 return; 205 206 + for (i = 0; i < amd_northbridges.num; i++) { 207 + struct pci_dev *dev = amd_northbridges.nb_misc[i]; 208 /* disable gart translation */ 209 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); 210 tmp &= ~GARTEN; ··· 331 { 332 int i; 333 334 + if (cache_amd_northbridges() < 0) 335 return -ENODEV; 336 337 + if (!amd_northbridges.gart_supported) 338 return -ENODEV; 339 340 i = 0; 341 + for (i = 0; i < amd_northbridges.num; i++) { 342 + struct pci_dev *dev = amd_northbridges.nb_misc[i]; 343 if (fix_northbridge(dev, pdev, cap_ptr) < 0) { 344 dev_err(&dev->dev, "no usable aperture found\n"); 345 #ifdef __x86_64__ ··· 416 } 417 418 /* shadow x86-64 registers into ULi registers */ 419 + pci_read_config_dword (amd_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, 420 &httfea); 421 422 /* if x86-64 aperture base is beyond 4G, exit here */ ··· 484 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); 485 486 /* shadow x86-64 registers into NVIDIA registers */ 487 + pci_read_config_dword (amd_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, 488 &apbase); 489 490 /* if x86-64 aperture base is beyond 4G, exit here */ ··· 778 } 779 780 /* First check that we have at least one AMD64 NB */ 781 + if (!pci_dev_present(amd_nb_ids)) 782 return -ENODEV; 783 784 /* Look for any AGP bridge */
+2 -2
drivers/edac/amd64_edac.c
··· 2917 2918 opstate_init(); 2919 2920 - if (cache_k8_northbridges() < 0) 2921 goto err_ret; 2922 2923 msrs = msrs_alloc(); ··· 2934 * to finish initialization of the MC instances. 2935 */ 2936 err = -ENODEV; 2937 - for (nb = 0; nb < k8_northbridges.num; nb++) { 2938 if (!pvt_lookup[nb]) 2939 continue; 2940
··· 2917 2918 opstate_init(); 2919 2920 + if (cache_amd_northbridges() < 0) 2921 goto err_ret; 2922 2923 msrs = msrs_alloc(); ··· 2934 * to finish initialization of the MC instances. 2935 */ 2936 err = -ENODEV; 2937 + for (nb = 0; nb < amd_northbridges.num; nb++) { 2938 if (!pvt_lookup[nb]) 2939 continue; 2940