x86, amd-nb: Complete the rename of AMD NB and related code

Not only the naming of the files was confusing, it was even more so for
the function and variable names.

Renamed the K8 NB and NUMA stuff that is also used on other AMD
platforms. This also renames the CONFIG_K8_NUMA option to
CONFIG_AMD_NUMA and the related file k8topology_64.c to
amdtopology_64.c. No functional changes intended.

Signed-off-by: Hans Rosenfeld <hans.rosenfeld@amd.com>
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>

authored by Hans Rosenfeld and committed by Borislav Petkov eec1d4fa e53beacd

+119 -119
+6 -6
arch/x86/Kconfig
··· 1141 1141 comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" 1142 1142 depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI) 1143 1143 1144 - config K8_NUMA 1144 + config AMD_NUMA 1145 1145 def_bool y 1146 1146 prompt "Old style AMD Opteron NUMA detection" 1147 1147 depends on X86_64 && NUMA && PCI 1148 1148 ---help--- 1149 - Enable K8 NUMA node topology detection. You should say Y here if 1150 - you have a multi processor AMD K8 system. This uses an old 1151 - method to read the NUMA configuration directly from the builtin 1152 - Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA 1153 - instead, which also takes priority if both are compiled in. 1149 + Enable AMD NUMA node topology detection. You should say Y here if 1150 + you have a multi processor AMD system. This uses an old method to 1151 + read the NUMA configuration directly from the builtin Northbridge 1152 + of Opteron. It is recommended to use X86_64_ACPI_NUMA instead, 1153 + which also takes priority if both are compiled in. 1154 1154 1155 1155 config X86_64_ACPI_NUMA 1156 1156 def_bool y
+12 -12
arch/x86/include/asm/amd_nb.h
··· 3 3 4 4 #include <linux/pci.h> 5 5 6 - extern struct pci_device_id k8_nb_ids[]; 6 + extern struct pci_device_id amd_nb_ids[]; 7 7 struct bootnode; 8 8 9 - extern int early_is_k8_nb(u32 value); 10 - extern int cache_k8_northbridges(void); 11 - extern void k8_flush_garts(void); 12 - extern int k8_get_nodes(struct bootnode *nodes); 13 - extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); 14 - extern int k8_scan_nodes(void); 9 + extern int early_is_amd_nb(u32 value); 10 + extern int cache_amd_northbridges(void); 11 + extern void amd_flush_garts(void); 12 + extern int amd_get_nodes(struct bootnode *nodes); 13 + extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); 14 + extern int amd_scan_nodes(void); 15 15 16 - struct k8_northbridge_info { 16 + struct amd_northbridge_info { 17 17 u16 num; 18 18 u8 gart_supported; 19 19 struct pci_dev **nb_misc; 20 20 }; 21 - extern struct k8_northbridge_info k8_northbridges; 21 + extern struct amd_northbridge_info amd_northbridges; 22 22 23 23 #ifdef CONFIG_AMD_NB 24 24 25 - static inline struct pci_dev *node_to_k8_nb_misc(int node) 25 + static inline struct pci_dev *node_to_amd_nb_misc(int node) 26 26 { 27 - return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL; 27 + return (node < amd_northbridges.num) ? amd_northbridges.nb_misc[node] : NULL; 28 28 } 29 29 30 30 #else 31 31 32 - static inline struct pci_dev *node_to_k8_nb_misc(int node) 32 + static inline struct pci_dev *node_to_amd_nb_misc(int node) 33 33 { 34 34 return NULL; 35 35 }
+36 -36
arch/x86/kernel/amd_nb.c
··· 12 12 13 13 static u32 *flush_words; 14 14 15 - struct pci_device_id k8_nb_ids[] = { 15 + struct pci_device_id amd_nb_ids[] = { 16 16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 17 17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 18 18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, 19 19 {} 20 20 }; 21 - EXPORT_SYMBOL(k8_nb_ids); 21 + EXPORT_SYMBOL(amd_nb_ids); 22 22 23 - struct k8_northbridge_info k8_northbridges; 24 - EXPORT_SYMBOL(k8_northbridges); 23 + struct amd_northbridge_info amd_northbridges; 24 + EXPORT_SYMBOL(amd_northbridges); 25 25 26 - static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) 26 + static struct pci_dev *next_amd_northbridge(struct pci_dev *dev) 27 27 { 28 28 do { 29 29 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 30 30 if (!dev) 31 31 break; 32 - } while (!pci_match_id(&k8_nb_ids[0], dev)); 32 + } while (!pci_match_id(&amd_nb_ids[0], dev)); 33 33 return dev; 34 34 } 35 35 36 - int cache_k8_northbridges(void) 36 + int cache_amd_northbridges(void) 37 37 { 38 38 int i; 39 39 struct pci_dev *dev; 40 40 41 - if (k8_northbridges.num) 41 + if (amd_northbridges.num) 42 42 return 0; 43 43 44 44 dev = NULL; 45 - while ((dev = next_k8_northbridge(dev)) != NULL) 46 - k8_northbridges.num++; 45 + while ((dev = next_amd_northbridge(dev)) != NULL) 46 + amd_northbridges.num++; 47 47 48 48 /* some CPU families (e.g. family 0x11) do not support GART */ 49 49 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || 50 50 boot_cpu_data.x86 == 0x15) 51 - k8_northbridges.gart_supported = 1; 51 + amd_northbridges.gart_supported = 1; 52 52 53 - k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) * 53 + amd_northbridges.nb_misc = kmalloc((amd_northbridges.num + 1) * 54 54 sizeof(void *), GFP_KERNEL); 55 - if (!k8_northbridges.nb_misc) 55 + if (!amd_northbridges.nb_misc) 56 56 return -ENOMEM; 57 57 58 - if (!k8_northbridges.num) { 59 - k8_northbridges.nb_misc[0] = NULL; 58 + if (!amd_northbridges.num) { 59 + amd_northbridges.nb_misc[0] = NULL; 60 60 return 0; 61 61 } 62 62 63 - if (k8_northbridges.gart_supported) { 64 - flush_words = kmalloc(k8_northbridges.num * sizeof(u32), 63 + if (amd_northbridges.gart_supported) { 64 + flush_words = kmalloc(amd_northbridges.num * sizeof(u32), 65 65 GFP_KERNEL); 66 66 if (!flush_words) { 67 - kfree(k8_northbridges.nb_misc); 67 + kfree(amd_northbridges.nb_misc); 68 68 return -ENOMEM; 69 69 } 70 70 } 71 71 72 72 dev = NULL; 73 73 i = 0; 74 - while ((dev = next_k8_northbridge(dev)) != NULL) { 75 - k8_northbridges.nb_misc[i] = dev; 76 - if (k8_northbridges.gart_supported) 74 + while ((dev = next_amd_northbridge(dev)) != NULL) { 75 + amd_northbridges.nb_misc[i] = dev; 76 + if (amd_northbridges.gart_supported) 77 77 pci_read_config_dword(dev, 0x9c, &flush_words[i++]); 78 78 } 79 - k8_northbridges.nb_misc[i] = NULL; 79 + amd_northbridges.nb_misc[i] = NULL; 80 80 return 0; 81 81 } 82 - EXPORT_SYMBOL_GPL(cache_k8_northbridges); 82 + EXPORT_SYMBOL_GPL(cache_amd_northbridges); 83 83 84 84 /* Ignores subdevice/subvendor but as far as I can figure out 85 85 they're useless anyways */ 86 - int __init early_is_k8_nb(u32 device) 86 + int __init early_is_amd_nb(u32 device) 87 87 { 88 88 struct pci_device_id *id; 89 89 u32 vendor = device & 0xffff; 90 90 device >>= 16; 91 - for (id = k8_nb_ids; id->vendor; id++) 91 + for (id = amd_nb_ids; id->vendor; id++) 92 92 if (vendor == id->vendor && device == id->device) 93 93 return 1; 94 94 return 0; 95 95 } 96 96 97 - void k8_flush_garts(void) 97 + void amd_flush_garts(void) 98 98 { 99 99 int flushed, i; 100 100 unsigned long flags; 101 101 static DEFINE_SPINLOCK(gart_lock); 102 102 103 - if (!k8_northbridges.gart_supported) 103 + if (!amd_northbridges.gart_supported) 104 104 return; 105 105 106 106 /* Avoid races between AGP and IOMMU. In theory it's not needed ··· 109 109 that it doesn't matter to serialize more. -AK */ 110 110 spin_lock_irqsave(&gart_lock, flags); 111 111 flushed = 0; 112 - for (i = 0; i < k8_northbridges.num; i++) { 113 - pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c, 112 + for (i = 0; i < amd_northbridges.num; i++) { 113 + pci_write_config_dword(amd_northbridges.nb_misc[i], 0x9c, 114 114 flush_words[i]|1); 115 115 flushed++; 116 116 } 117 - for (i = 0; i < k8_northbridges.num; i++) { 117 + for (i = 0; i < amd_northbridges.num; i++) { 118 118 u32 w; 119 119 /* Make sure the hardware actually executed the flush*/ 120 120 for (;;) { 121 - pci_read_config_dword(k8_northbridges.nb_misc[i], 121 + pci_read_config_dword(amd_northbridges.nb_misc[i], 122 122 0x9c, &w); 123 123 if (!(w & 1)) 124 124 break; ··· 129 129 if (!flushed) 130 130 printk("nothing to flush?\n"); 131 131 } 132 - EXPORT_SYMBOL_GPL(k8_flush_garts); 132 + EXPORT_SYMBOL_GPL(amd_flush_garts); 133 133 134 - static __init int init_k8_nbs(void) 134 + static __init int init_amd_nbs(void) 135 135 { 136 136 int err = 0; 137 137 138 - err = cache_k8_northbridges(); 138 + err = cache_amd_northbridges(); 139 139 140 140 if (err < 0) 141 - printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n"); 141 + printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n"); 142 142 143 143 return err; 144 144 } 145 145 146 146 /* This has to go after the PCI subsystem */ 147 - fs_initcall(init_k8_nbs); 147 + fs_initcall(init_amd_nbs);
+5 -5
arch/x86/kernel/aperture_64.c
··· 206 206 * Do an PCI bus scan by hand because we're running before the PCI 207 207 * subsystem. 208 208 * 209 - * All K8 AGP bridges are AGPv3 compliant, so we can do this scan 209 + * All AMD AGP bridges are AGPv3 compliant, so we can do this scan 210 210 * generically. It's probably overkill to always scan all slots because 211 211 * the AGP bridges should be always an own bus on the HT hierarchy, 212 212 * but do it here for future safety. ··· 303 303 dev_limit = bus_dev_ranges[i].dev_limit; 304 304 305 305 for (slot = dev_base; slot < dev_limit; slot++) { 306 - if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 306 + if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) 307 307 continue; 308 308 309 309 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); ··· 358 358 dev_limit = bus_dev_ranges[i].dev_limit; 359 359 360 360 for (slot = dev_base; slot < dev_limit; slot++) { 361 - if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 361 + if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) 362 362 continue; 363 363 364 364 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); ··· 400 400 dev_limit = bus_dev_ranges[i].dev_limit; 401 401 402 402 for (slot = dev_base; slot < dev_limit; slot++) { 403 - if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 403 + if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) 404 404 continue; 405 405 406 406 iommu_detected = 1; ··· 518 518 dev_base = bus_dev_ranges[i].dev_base; 519 519 dev_limit = bus_dev_ranges[i].dev_limit; 520 520 for (slot = dev_base; slot < dev_limit; slot++) { 521 - if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 521 + if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) 522 522 continue; 523 523 524 524 write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
+3 -3
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 333 333 static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) 334 334 { 335 335 struct amd_l3_cache *l3; 336 - struct pci_dev *dev = node_to_k8_nb_misc(node); 336 + struct pci_dev *dev = node_to_amd_nb_misc(node); 337 337 338 338 l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); 339 339 if (!l3) { ··· 370 370 return; 371 371 372 372 /* not in virtualized environments */ 373 - if (k8_northbridges.num == 0) 373 + if (amd_northbridges.num == 0) 374 374 return; 375 375 376 376 /* ··· 378 378 * never freed but this is done only on shutdown so it doesn't matter. 379 379 */ 380 380 if (!l3_caches) { 381 - int size = k8_northbridges.num * sizeof(struct amd_l3_cache *); 381 + int size = amd_northbridges.num * sizeof(struct amd_l3_cache *); 382 382 383 383 l3_caches = kzalloc(size, GFP_ATOMIC); 384 384 if (!l3_caches)
+17 -17
arch/x86/kernel/pci-gart_64.c
··· 143 143 144 144 spin_lock_irqsave(&iommu_bitmap_lock, flags); 145 145 if (need_flush) { 146 - k8_flush_garts(); 146 + amd_flush_garts(); 147 147 need_flush = false; 148 148 } 149 149 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); ··· 561 561 { 562 562 int i; 563 563 564 - if (!k8_northbridges.gart_supported) 564 + if (!amd_northbridges.gart_supported) 565 565 return; 566 566 567 - for (i = 0; i < k8_northbridges.num; i++) { 568 - struct pci_dev *dev = k8_northbridges.nb_misc[i]; 567 + for (i = 0; i < amd_northbridges.num; i++) { 568 + struct pci_dev *dev = amd_northbridges.nb_misc[i]; 569 569 570 570 enable_gart_translation(dev, __pa(agp_gatt_table)); 571 571 } 572 572 573 573 /* Flush the GART-TLB to remove stale entries */ 574 - k8_flush_garts(); 574 + amd_flush_garts(); 575 575 } 576 576 577 577 /* ··· 596 596 if (!fix_up_north_bridges) 597 597 return; 598 598 599 - if (!k8_northbridges.gart_supported) 599 + if (!amd_northbridges.gart_supported) 600 600 return; 601 601 602 602 pr_info("PCI-DMA: Restoring GART aperture settings\n"); 603 603 604 - for (i = 0; i < k8_northbridges.num; i++) { 605 - struct pci_dev *dev = k8_northbridges.nb_misc[i]; 604 + for (i = 0; i < amd_northbridges.num; i++) { 605 + struct pci_dev *dev = amd_northbridges.nb_misc[i]; 606 606 607 607 /* 608 608 * Don't enable translations just yet. That is the next ··· 644 644 * Private Northbridge GATT initialization in case we cannot use the 645 645 * AGP driver for some reason. 646 646 */ 647 - static __init int init_k8_gatt(struct agp_kern_info *info) 647 + static __init int init_amd_gatt(struct agp_kern_info *info) 648 648 { 649 649 unsigned aper_size, gatt_size, new_aper_size; 650 650 unsigned aper_base, new_aper_base; ··· 656 656 657 657 aper_size = aper_base = info->aper_size = 0; 658 658 dev = NULL; 659 - for (i = 0; i < k8_northbridges.num; i++) { 660 - dev = k8_northbridges.nb_misc[i]; 659 + for (i = 0; i < amd_northbridges.num; i++) { 660 + dev = amd_northbridges.nb_misc[i]; 661 661 new_aper_base = read_aperture(dev, &new_aper_size); 662 662 if (!new_aper_base) 663 663 goto nommu; ··· 725 725 if (!no_agp) 726 726 return; 727 727 728 - if (!k8_northbridges.gart_supported) 728 + if (!amd_northbridges.gart_supported) 729 729 return; 730 730 731 - for (i = 0; i < k8_northbridges.num; i++) { 731 + for (i = 0; i < amd_northbridges.num; i++) { 732 732 u32 ctl; 733 733 734 - dev = k8_northbridges.nb_misc[i]; 734 + dev = amd_northbridges.nb_misc[i]; 735 735 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 736 736 737 737 ctl &= ~GARTEN; ··· 749 749 unsigned long scratch; 750 750 long i; 751 751 752 - if (!k8_northbridges.gart_supported) 752 + if (!amd_northbridges.gart_supported) 753 753 return 0; 754 754 755 755 #ifndef CONFIG_AGP_AMD64 756 756 no_agp = 1; 757 757 #else 758 758 /* Makefile puts PCI initialization via subsys_initcall first. */ 759 - /* Add other K8 AGP bridge drivers here */ 759 + /* Add other AMD AGP bridge drivers here */ 760 760 no_agp = no_agp || 761 761 (agp_amd64_init() < 0) || 762 762 (agp_copy_info(agp_bridge, &info) < 0); ··· 765 765 if (no_iommu || 766 766 (!force_iommu && max_pfn <= MAX_DMA32_PFN) || 767 767 !gart_iommu_aperture || 768 - (no_agp && init_k8_gatt(&info) < 0)) { 768 + (no_agp && init_amd_gatt(&info) < 0)) { 769 769 if (max_pfn > MAX_DMA32_PFN) { 770 770 pr_warning("More than 4GB of memory but GART IOMMU not available.\n"); 771 771 pr_warning("falling back to iommu=soft.\n");
+4 -4
arch/x86/kernel/setup.c
··· 694 694 void __init setup_arch(char **cmdline_p) 695 695 { 696 696 int acpi = 0; 697 - int k8 = 0; 697 + int amd = 0; 698 698 unsigned long flags; 699 699 700 700 #ifdef CONFIG_X86_32 ··· 981 981 acpi = acpi_numa_init(); 982 982 #endif 983 983 984 - #ifdef CONFIG_K8_NUMA 984 + #ifdef CONFIG_AMD_NUMA 985 985 if (!acpi) 986 - k8 = !k8_numa_init(0, max_pfn); 986 + amd = !amd_numa_init(0, max_pfn); 987 987 #endif 988 988 989 - initmem_init(0, max_pfn, acpi, k8); 989 + initmem_init(0, max_pfn, acpi, amd); 990 990 memblock_find_dma_reserve(); 991 991 dma32_reserve_bootmem(); 992 992
+1 -1
arch/x86/mm/Makefile
··· 23 23 obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o 24 24 25 25 obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o 26 - obj-$(CONFIG_K8_NUMA) += k8topology_64.o 26 + obj-$(CONFIG_AMD_NUMA) += amdtopology_64.o 27 27 obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o 28 28 29 29 obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
+6 -6
arch/x86/mm/k8topology_64.c arch/x86/mm/amdtopology_64.c
··· 1 1 /* 2 - * AMD K8 NUMA support. 2 + * AMD NUMA support. 3 3 * Discover the memory map and associated nodes. 4 4 * 5 - * This version reads it directly from the K8 northbridge. 5 + * This version reads it directly from the AMD northbridge. 6 6 * 7 7 * Copyright 2002,2003 Andi Kleen, SuSE Labs. 8 8 */ ··· 57 57 { 58 58 /* 59 59 * need to get the APIC ID of the BSP so can use that to 60 - * create apicid_to_node in k8_scan_nodes() 60 + * create apicid_to_node in amd_scan_nodes() 61 61 */ 62 62 #ifdef CONFIG_X86_MPPARSE 63 63 /* ··· 69 69 early_init_lapic_mapping(); 70 70 } 71 71 72 - int __init k8_get_nodes(struct bootnode *physnodes) 72 + int __init amd_get_nodes(struct bootnode *physnodes) 73 73 { 74 74 int i; 75 75 int ret = 0; ··· 82 82 return ret; 83 83 } 84 84 85 - int __init k8_numa_init(unsigned long start_pfn, unsigned long end_pfn) 85 + int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) 86 86 { 87 87 unsigned long start = PFN_PHYS(start_pfn); 88 88 unsigned long end = PFN_PHYS(end_pfn); ··· 194 194 return 0; 195 195 } 196 196 197 - int __init k8_scan_nodes(void) 197 + int __init amd_scan_nodes(void) 198 198 { 199 199 unsigned int bits; 200 200 unsigned int cores;
+11 -11
arch/x86/mm/numa_64.c
··· 264 264 static char *cmdline __initdata; 265 265 266 266 static int __init setup_physnodes(unsigned long start, unsigned long end, 267 - int acpi, int k8) 267 + int acpi, int amd) 268 268 { 269 269 int nr_nodes = 0; 270 270 int ret = 0; ··· 274 274 if (acpi) 275 275 nr_nodes = acpi_get_nodes(physnodes); 276 276 #endif 277 - #ifdef CONFIG_K8_NUMA 278 - if (k8) 279 - nr_nodes = k8_get_nodes(physnodes); 277 + #ifdef CONFIG_AMD_NUMA 278 + if (amd) 279 + nr_nodes = amd_get_nodes(physnodes); 280 280 #endif 281 281 /* 282 282 * Basic sanity checking on the physical node map: there may be errors 283 - * if the SRAT or K8 incorrectly reported the topology or the mem= 283 + * if the SRAT or AMD code incorrectly reported the topology or the mem= 284 284 * kernel parameter is used. 285 285 */ 286 286 for (i = 0; i < nr_nodes; i++) { ··· 549 549 * numa=fake command-line option. 550 550 */ 551 551 static int __init numa_emulation(unsigned long start_pfn, 552 - unsigned long last_pfn, int acpi, int k8) 552 + unsigned long last_pfn, int acpi, int amd) 553 553 { 554 554 u64 addr = start_pfn << PAGE_SHIFT; 555 555 u64 max_addr = last_pfn << PAGE_SHIFT; ··· 557 557 int num_nodes; 558 558 int i; 559 559 560 - num_phys_nodes = setup_physnodes(addr, max_addr, acpi, k8); 560 + num_phys_nodes = setup_physnodes(addr, max_addr, acpi, amd); 561 561 /* 562 562 * If the numa=fake command-line contains a 'M' or 'G', it represents 563 563 * the fixed node size. Otherwise, if it is just a single number N, ··· 602 602 #endif /* CONFIG_NUMA_EMU */ 603 603 604 604 void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, 605 - int acpi, int k8) 605 + int acpi, int amd) 606 606 { 607 607 int i; 608 608 ··· 610 610 nodes_clear(node_online_map); 611 611 612 612 #ifdef CONFIG_NUMA_EMU 613 - if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, k8)) 613 + if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd)) 614 614 return; 615 615 nodes_clear(node_possible_map); 616 616 nodes_clear(node_online_map); ··· 624 624 nodes_clear(node_online_map); 625 625 #endif 626 626 627 - #ifdef CONFIG_K8_NUMA 628 - if (!numa_off && k8 && !k8_scan_nodes()) 627 + #ifdef CONFIG_AMD_NUMA 628 + if (!numa_off && amd && !amd_scan_nodes()) 629 629 return; 630 630 nodes_clear(node_possible_map); 631 631 nodes_clear(node_online_map);
+16 -16
drivers/char/agp/amd64-agp.c
··· 38 38 39 39 static void amd64_tlbflush(struct agp_memory *temp) 40 40 { 41 - k8_flush_garts(); 41 + amd_flush_garts(); 42 42 } 43 43 44 44 static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) ··· 124 124 u32 temp; 125 125 struct aper_size_info_32 *values; 126 126 127 - dev = k8_northbridges.nb_misc[0]; 127 + dev = amd_northbridges.nb_misc[0]; 128 128 if (dev==NULL) 129 129 return 0; 130 130 ··· 181 181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); 182 182 int i; 183 183 184 - if (!k8_northbridges.gart_supported) 184 + if (!amd_northbridges.gart_supported) 185 185 return 0; 186 186 187 187 /* Configure AGP regs in each x86-64 host bridge. */ 188 - for (i = 0; i < k8_northbridges.num; i++) { 188 + for (i = 0; i < amd_northbridges.num; i++) { 189 189 agp_bridge->gart_bus_addr = 190 - amd64_configure(k8_northbridges.nb_misc[i], 190 + amd64_configure(amd_northbridges.nb_misc[i], 191 191 gatt_bus); 192 192 } 193 - k8_flush_garts(); 193 + amd_flush_garts(); 194 194 return 0; 195 195 } 196 196 ··· 200 200 u32 tmp; 201 201 int i; 202 202 203 - if (!k8_northbridges.gart_supported) 203 + if (!amd_northbridges.gart_supported) 204 204 return; 205 205 206 - for (i = 0; i < k8_northbridges.num; i++) { 207 - struct pci_dev *dev = k8_northbridges.nb_misc[i]; 206 + for (i = 0; i < amd_northbridges.num; i++) { 207 + struct pci_dev *dev = amd_northbridges.nb_misc[i]; 208 208 /* disable gart translation */ 209 209 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); 210 210 tmp &= ~GARTEN; ··· 331 331 { 332 332 int i; 333 333 334 - if (cache_k8_northbridges() < 0) 334 + if (cache_amd_northbridges() < 0) 335 335 return -ENODEV; 336 336 337 - if (!k8_northbridges.gart_supported) 337 + if (!amd_northbridges.gart_supported) 338 338 return -ENODEV; 339 339 340 340 i = 0; 341 - for (i = 0; i < k8_northbridges.num; i++) { 342 - struct pci_dev *dev = k8_northbridges.nb_misc[i]; 341 + for (i = 0; i < amd_northbridges.num; i++) { 342 + struct pci_dev *dev = amd_northbridges.nb_misc[i]; 343 343 if (fix_northbridge(dev, pdev, cap_ptr) < 0) { 344 344 dev_err(&dev->dev, "no usable aperture found\n"); 345 345 #ifdef __x86_64__ ··· 416 416 } 417 417 418 418 /* shadow x86-64 registers into ULi registers */ 419 - pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, 419 + pci_read_config_dword (amd_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, 420 420 &httfea); 421 421 422 422 /* if x86-64 aperture base is beyond 4G, exit here */ ··· 484 484 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); 485 485 486 486 /* shadow x86-64 registers into NVIDIA registers */ 487 - pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, 487 + pci_read_config_dword (amd_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, 488 488 &apbase); 489 489 490 490 /* if x86-64 aperture base is beyond 4G, exit here */ ··· 778 778 } 779 779 780 780 /* First check that we have at least one AMD64 NB */ 781 - if (!pci_dev_present(k8_nb_ids)) 781 + if (!pci_dev_present(amd_nb_ids)) 782 782 return -ENODEV; 783 783 784 784 /* Look for any AGP bridge */
+2 -2
drivers/edac/amd64_edac.c
··· 2917 2917 2918 2918 opstate_init(); 2919 2919 2920 - if (cache_k8_northbridges() < 0) 2920 + if (cache_amd_northbridges() < 0) 2921 2921 goto err_ret; 2922 2922 2923 2923 msrs = msrs_alloc(); ··· 2934 2934 * to finish initialization of the MC instances. 2935 2935 */ 2936 2936 err = -ENODEV; 2937 - for (nb = 0; nb < k8_northbridges.num; nb++) { 2937 + for (nb = 0; nb < amd_northbridges.num; nb++) { 2938 2938 if (!pvt_lookup[nb]) 2939 2939 continue; 2940 2940