x86, amd-nb: Cleanup AMD northbridge caching code

Support more than just the "Misc Control" part of the northbridges.
Support more flags by turning "gart_supported" into a single bit flag
that is stored in a flags member. Clean up related code by using a set
of functions (amd_nb_num(), amd_nb_has_feature() and node_to_amd_nb())
instead of accessing the NB data structures directly. Reorder the
initialization code and put the GART flush words caching in a separate
function.

Signed-off-by: Hans Rosenfeld <hans.rosenfeld@amd.com>
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>

authored by Hans Rosenfeld and committed by Borislav Petkov 9653a5c7 eec1d4fa

+120 -90
+26 -10
arch/x86/include/asm/amd_nb.h
··· 3 4 #include <linux/pci.h> 5 6 - extern struct pci_device_id amd_nb_ids[]; 7 struct bootnode; 8 9 extern int early_is_amd_nb(u32 value); 10 - extern int cache_amd_northbridges(void); 11 extern void amd_flush_garts(void); 12 extern int amd_get_nodes(struct bootnode *nodes); 13 extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); 14 extern int amd_scan_nodes(void); 15 16 struct amd_northbridge_info { 17 u16 num; 18 - u8 gart_supported; 19 - struct pci_dev **nb_misc; 20 }; 21 extern struct amd_northbridge_info amd_northbridges; 22 23 #ifdef CONFIG_AMD_NB 24 25 - static inline struct pci_dev *node_to_amd_nb_misc(int node) 26 { 27 - return (node < amd_northbridges.num) ? amd_northbridges.nb_misc[node] : NULL; 28 } 29 30 #else 31 32 - static inline struct pci_dev *node_to_amd_nb_misc(int node) 33 - { 34 - return NULL; 35 - } 36 #endif 37 38
··· 3 4 #include <linux/pci.h> 5 6 + extern struct pci_device_id amd_nb_misc_ids[]; 7 struct bootnode; 8 9 extern int early_is_amd_nb(u32 value); 10 + extern int amd_cache_northbridges(void); 11 extern void amd_flush_garts(void); 12 extern int amd_get_nodes(struct bootnode *nodes); 13 extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); 14 extern int amd_scan_nodes(void); 15 16 + struct amd_northbridge { 17 + struct pci_dev *misc; 18 + }; 19 + 20 struct amd_northbridge_info { 21 u16 num; 22 + u64 flags; 23 + struct amd_northbridge *nb; 24 }; 25 extern struct amd_northbridge_info amd_northbridges; 26 27 + #define AMD_NB_GART 0x1 28 + 29 #ifdef CONFIG_AMD_NB 30 31 + static inline int amd_nb_num(void) 32 { 33 + return amd_northbridges.num; 34 + } 35 + 36 + static inline int amd_nb_has_feature(int feature) 37 + { 38 + return ((amd_northbridges.flags & feature) == feature); 39 + } 40 + 41 + static inline struct amd_northbridge *node_to_amd_nb(int node) 42 + { 43 + return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 44 } 45 46 #else 47 48 + #define amd_nb_num(x) 0 49 + #define amd_nb_has_feature(x) false 50 + #define node_to_amd_nb(x) NULL 51 + 52 #endif 53 54
+63 -48
arch/x86/kernel/amd_nb.c
··· 12 13 static u32 *flush_words; 14 15 - struct pci_device_id amd_nb_ids[] = { 16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, 19 {} 20 }; 21 - EXPORT_SYMBOL(amd_nb_ids); 22 23 struct amd_northbridge_info amd_northbridges; 24 EXPORT_SYMBOL(amd_northbridges); 25 26 - static struct pci_dev *next_amd_northbridge(struct pci_dev *dev) 27 { 28 do { 29 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 30 if (!dev) 31 break; 32 - } while (!pci_match_id(&amd_nb_ids[0], dev)); 33 return dev; 34 } 35 36 - int cache_amd_northbridges(void) 37 { 38 - int i; 39 - struct pci_dev *dev; 40 41 - if (amd_northbridges.num) 42 return 0; 43 44 - dev = NULL; 45 - while ((dev = next_amd_northbridge(dev)) != NULL) 46 - amd_northbridges.num++; 47 48 /* some CPU families (e.g. family 0x11) do not support GART */ 49 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || 50 boot_cpu_data.x86 == 0x15) 51 - amd_northbridges.gart_supported = 1; 52 53 - amd_northbridges.nb_misc = kmalloc((amd_northbridges.num + 1) * 54 - sizeof(void *), GFP_KERNEL); 55 - if (!amd_northbridges.nb_misc) 56 - return -ENOMEM; 57 - 58 - if (!amd_northbridges.num) { 59 - amd_northbridges.nb_misc[0] = NULL; 60 - return 0; 61 - } 62 - 63 - if (amd_northbridges.gart_supported) { 64 - flush_words = kmalloc(amd_northbridges.num * sizeof(u32), 65 - GFP_KERNEL); 66 - if (!flush_words) { 67 - kfree(amd_northbridges.nb_misc); 68 - return -ENOMEM; 69 - } 70 - } 71 - 72 - dev = NULL; 73 - i = 0; 74 - while ((dev = next_amd_northbridge(dev)) != NULL) { 75 - amd_northbridges.nb_misc[i] = dev; 76 - if (amd_northbridges.gart_supported) 77 - pci_read_config_dword(dev, 0x9c, &flush_words[i++]); 78 - } 79 - amd_northbridges.nb_misc[i] = NULL; 80 return 0; 81 } 82 - EXPORT_SYMBOL_GPL(cache_amd_northbridges); 83 84 /* Ignores subdevice/subvendor but as far as I can figure out 85 they're useless anyways */ ··· 79 struct pci_device_id *id; 80 u32 vendor = device & 0xffff; 81 device >>= 16; 82 - for (id = amd_nb_ids; id->vendor; id++) 83 if (vendor == id->vendor && device == id->device) 84 return 1; 85 return 0; 86 } 87 88 void amd_flush_garts(void) ··· 111 unsigned long flags; 112 static DEFINE_SPINLOCK(gart_lock); 113 114 - if (!amd_northbridges.gart_supported) 115 return; 116 117 /* Avoid races between AGP and IOMMU. In theory it's not needed ··· 120 that it doesn't matter to serialize more. -AK */ 121 spin_lock_irqsave(&gart_lock, flags); 122 flushed = 0; 123 - for (i = 0; i < amd_northbridges.num; i++) { 124 - pci_write_config_dword(amd_northbridges.nb_misc[i], 0x9c, 125 - flush_words[i]|1); 126 flushed++; 127 } 128 - for (i = 0; i < amd_northbridges.num; i++) { 129 u32 w; 130 /* Make sure the hardware actually executed the flush*/ 131 for (;;) { 132 - pci_read_config_dword(amd_northbridges.nb_misc[i], 133 0x9c, &w); 134 if (!(w & 1)) 135 break; ··· 146 { 147 int err = 0; 148 149 - err = cache_amd_northbridges(); 150 151 if (err < 0) 152 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n"); 153 154 return err; 155 }
··· 12 13 static u32 *flush_words; 14 15 + struct pci_device_id amd_nb_misc_ids[] = { 16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, 19 {} 20 }; 21 + EXPORT_SYMBOL(amd_nb_misc_ids); 22 23 struct amd_northbridge_info amd_northbridges; 24 EXPORT_SYMBOL(amd_northbridges); 25 26 + static struct pci_dev *next_northbridge(struct pci_dev *dev, 27 + struct pci_device_id *ids) 28 { 29 do { 30 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 31 if (!dev) 32 break; 33 + } while (!pci_match_id(ids, dev)); 34 return dev; 35 } 36 37 + int amd_cache_northbridges(void) 38 { 39 + int i = 0; 40 + struct amd_northbridge *nb; 41 + struct pci_dev *misc; 42 43 + if (amd_nb_num()) 44 return 0; 45 46 + misc = NULL; 47 + while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL) 48 + i++; 49 + 50 + if (i == 0) 51 + return 0; 52 + 53 + nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL); 54 + if (!nb) 55 + return -ENOMEM; 56 + 57 + amd_northbridges.nb = nb; 58 + amd_northbridges.num = i; 59 + 60 + misc = NULL; 61 + for (i = 0; i != amd_nb_num(); i++) { 62 + node_to_amd_nb(i)->misc = misc = 63 + next_northbridge(misc, amd_nb_misc_ids); 64 + } 65 66 /* some CPU families (e.g. family 0x11) do not support GART */ 67 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || 68 boot_cpu_data.x86 == 0x15) 69 + amd_northbridges.flags |= AMD_NB_GART; 70 71 return 0; 72 } 73 + EXPORT_SYMBOL_GPL(amd_cache_northbridges); 74 75 /* Ignores subdevice/subvendor but as far as I can figure out 76 they're useless anyways */ ··· 88 struct pci_device_id *id; 89 u32 vendor = device & 0xffff; 90 device >>= 16; 91 + for (id = amd_nb_misc_ids; id->vendor; id++) 92 if (vendor == id->vendor && device == id->device) 93 return 1; 94 return 0; 95 + } 96 + 97 + int amd_cache_gart(void) 98 + { 99 + int i; 100 + 101 + if (!amd_nb_has_feature(AMD_NB_GART)) 102 + return 0; 103 + 104 + flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL); 105 + if (!flush_words) { 106 + amd_northbridges.flags &= ~AMD_NB_GART; 107 + return -ENOMEM; 108 + } 109 + 110 + for (i = 0; i != amd_nb_num(); i++) 111 + pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, 112 + &flush_words[i]); 113 + 114 + return 0; 115 } 116 117 void amd_flush_garts(void) ··· 100 unsigned long flags; 101 static DEFINE_SPINLOCK(gart_lock); 102 103 + if (!amd_nb_has_feature(AMD_NB_GART)) 104 return; 105 106 /* Avoid races between AGP and IOMMU. In theory it's not needed ··· 109 that it doesn't matter to serialize more. -AK */ 110 spin_lock_irqsave(&gart_lock, flags); 111 flushed = 0; 112 + for (i = 0; i < amd_nb_num(); i++) { 113 + pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, 114 + flush_words[i] | 1); 115 flushed++; 116 } 117 + for (i = 0; i < amd_nb_num(); i++) { 118 u32 w; 119 /* Make sure the hardware actually executed the flush*/ 120 for (;;) { 121 + pci_read_config_dword(node_to_amd_nb(i)->misc, 122 0x9c, &w); 123 if (!(w & 1)) 124 break; ··· 135 { 136 int err = 0; 137 138 + err = amd_cache_northbridges(); 139 140 if (err < 0) 141 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n"); 142 + 143 + if (amd_cache_gart() < 0) 144 + printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, " 145 + "GART support disabled.\n"); 146 147 return err; 148 }
+3 -3
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 333 static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) 334 { 335 struct amd_l3_cache *l3; 336 - struct pci_dev *dev = node_to_amd_nb_misc(node); 337 338 l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); 339 if (!l3) { ··· 370 return; 371 372 /* not in virtualized environments */ 373 - if (amd_northbridges.num == 0) 374 return; 375 376 /* ··· 378 * never freed but this is done only on shutdown so it doesn't matter. 379 */ 380 if (!l3_caches) { 381 - int size = amd_northbridges.num * sizeof(struct amd_l3_cache *); 382 383 l3_caches = kzalloc(size, GFP_ATOMIC); 384 if (!l3_caches)
··· 333 static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) 334 { 335 struct amd_l3_cache *l3; 336 + struct pci_dev *dev = node_to_amd_nb(node)->misc; 337 338 l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); 339 if (!l3) { ··· 370 return; 371 372 /* not in virtualized environments */ 373 + if (amd_nb_num() == 0) 374 return; 375 376 /* ··· 378 * never freed but this is done only on shutdown so it doesn't matter. 379 */ 380 if (!l3_caches) { 381 + int size = amd_nb_num() * sizeof(struct amd_l3_cache *); 382 383 l3_caches = kzalloc(size, GFP_ATOMIC); 384 if (!l3_caches)
+12 -12
arch/x86/kernel/pci-gart_64.c
··· 561 { 562 int i; 563 564 - if (!amd_northbridges.gart_supported) 565 return; 566 567 - for (i = 0; i < amd_northbridges.num; i++) { 568 - struct pci_dev *dev = amd_northbridges.nb_misc[i]; 569 570 enable_gart_translation(dev, __pa(agp_gatt_table)); 571 } ··· 596 if (!fix_up_north_bridges) 597 return; 598 599 - if (!amd_northbridges.gart_supported) 600 return; 601 602 pr_info("PCI-DMA: Restoring GART aperture settings\n"); 603 604 - for (i = 0; i < amd_northbridges.num; i++) { 605 - struct pci_dev *dev = amd_northbridges.nb_misc[i]; 606 607 /* 608 * Don't enable translations just yet. That is the next ··· 656 657 aper_size = aper_base = info->aper_size = 0; 658 dev = NULL; 659 - for (i = 0; i < amd_northbridges.num; i++) { 660 - dev = amd_northbridges.nb_misc[i]; 661 new_aper_base = read_aperture(dev, &new_aper_size); 662 if (!new_aper_base) 663 goto nommu; ··· 725 if (!no_agp) 726 return; 727 728 - if (!amd_northbridges.gart_supported) 729 return; 730 731 - for (i = 0; i < amd_northbridges.num; i++) { 732 u32 ctl; 733 734 - dev = amd_northbridges.nb_misc[i]; 735 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 736 737 ctl &= ~GARTEN; ··· 749 unsigned long scratch; 750 long i; 751 752 - if (!amd_northbridges.gart_supported) 753 return 0; 754 755 #ifndef CONFIG_AGP_AMD64
··· 561 { 562 int i; 563 564 + if (!amd_nb_has_feature(AMD_NB_GART)) 565 return; 566 567 + for (i = 0; i < amd_nb_num(); i++) { 568 + struct pci_dev *dev = node_to_amd_nb(i)->misc; 569 570 enable_gart_translation(dev, __pa(agp_gatt_table)); 571 } ··· 596 if (!fix_up_north_bridges) 597 return; 598 599 + if (!amd_nb_has_feature(AMD_NB_GART)) 600 return; 601 602 pr_info("PCI-DMA: Restoring GART aperture settings\n"); 603 604 + for (i = 0; i < amd_nb_num(); i++) { 605 + struct pci_dev *dev = node_to_amd_nb(i)->misc; 606 607 /* 608 * Don't enable translations just yet. That is the next ··· 656 657 aper_size = aper_base = info->aper_size = 0; 658 dev = NULL; 659 + for (i = 0; i < amd_nb_num(); i++) { 660 + dev = node_to_amd_nb(i)->misc; 661 new_aper_base = read_aperture(dev, &new_aper_size); 662 if (!new_aper_base) 663 goto nommu; ··· 725 if (!no_agp) 726 return; 727 728 + if (!amd_nb_has_feature(AMD_NB_GART)) 729 return; 730 731 + for (i = 0; i < amd_nb_num(); i++) { 732 u32 ctl; 733 734 + dev = node_to_amd_nb(i)->misc; 735 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 736 737 ctl &= ~GARTEN; ··· 749 unsigned long scratch; 750 long i; 751 752 + if (!amd_nb_has_feature(AMD_NB_GART)) 753 return 0; 754 755 #ifndef CONFIG_AGP_AMD64
+14 -15
drivers/char/agp/amd64-agp.c
··· 124 u32 temp; 125 struct aper_size_info_32 *values; 126 127 - dev = amd_northbridges.nb_misc[0]; 128 if (dev==NULL) 129 return 0; 130 ··· 181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); 182 int i; 183 184 - if (!amd_northbridges.gart_supported) 185 return 0; 186 187 /* Configure AGP regs in each x86-64 host bridge. */ 188 - for (i = 0; i < amd_northbridges.num; i++) { 189 agp_bridge->gart_bus_addr = 190 - amd64_configure(amd_northbridges.nb_misc[i], 191 - gatt_bus); 192 } 193 amd_flush_garts(); 194 return 0; ··· 199 u32 tmp; 200 int i; 201 202 - if (!amd_northbridges.gart_supported) 203 return; 204 205 - for (i = 0; i < amd_northbridges.num; i++) { 206 - struct pci_dev *dev = amd_northbridges.nb_misc[i]; 207 /* disable gart translation */ 208 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); 209 tmp &= ~GARTEN; ··· 330 { 331 int i; 332 333 - if (cache_amd_northbridges() < 0) 334 return -ENODEV; 335 336 - if (!amd_northbridges.gart_supported) 337 return -ENODEV; 338 339 i = 0; 340 - for (i = 0; i < amd_northbridges.num; i++) { 341 - struct pci_dev *dev = amd_northbridges.nb_misc[i]; 342 if (fix_northbridge(dev, pdev, cap_ptr) < 0) { 343 dev_err(&dev->dev, "no usable aperture found\n"); 344 #ifdef __x86_64__ ··· 415 } 416 417 /* shadow x86-64 registers into ULi registers */ 418 - pci_read_config_dword (amd_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, 419 &httfea); 420 421 /* if x86-64 aperture base is beyond 4G, exit here */ ··· 483 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); 484 485 /* shadow x86-64 registers into NVIDIA registers */ 486 - pci_read_config_dword (amd_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, 487 &apbase); 488 489 /* if x86-64 aperture base is beyond 4G, exit here */ ··· 777 } 778 779 /* First check that we have at least one AMD64 NB */ 780 - if (!pci_dev_present(amd_nb_ids)) 781 return -ENODEV; 782 783 /* Look for any AGP bridge */
··· 124 u32 temp; 125 struct aper_size_info_32 *values; 126 127 + dev = node_to_amd_nb(0)->misc; 128 if (dev==NULL) 129 return 0; 130 ··· 181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); 182 int i; 183 184 + if (!amd_nb_has_feature(AMD_NB_GART)) 185 return 0; 186 187 /* Configure AGP regs in each x86-64 host bridge. */ 188 + for (i = 0; i < amd_nb_num(); i++) { 189 agp_bridge->gart_bus_addr = 190 + amd64_configure(node_to_amd_nb(i)->misc, gatt_bus); 191 } 192 amd_flush_garts(); 193 return 0; ··· 200 u32 tmp; 201 int i; 202 203 + if (!amd_nb_has_feature(AMD_NB_GART)) 204 return; 205 206 + for (i = 0; i < amd_nb_num(); i++) { 207 + struct pci_dev *dev = node_to_amd_nb(i)->misc; 208 /* disable gart translation */ 209 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); 210 tmp &= ~GARTEN; ··· 331 { 332 int i; 333 334 + if (amd_cache_northbridges() < 0) 335 return -ENODEV; 336 337 + if (!amd_nb_has_feature(AMD_NB_GART)) 338 return -ENODEV; 339 340 i = 0; 341 + for (i = 0; i < amd_nb_num(); i++) { 342 + struct pci_dev *dev = node_to_amd_nb(i)->misc; 343 if (fix_northbridge(dev, pdev, cap_ptr) < 0) { 344 dev_err(&dev->dev, "no usable aperture found\n"); 345 #ifdef __x86_64__ ··· 416 } 417 418 /* shadow x86-64 registers into ULi registers */ 419 + pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE, 420 &httfea); 421 422 /* if x86-64 aperture base is beyond 4G, exit here */ ··· 484 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); 485 486 /* shadow x86-64 registers into NVIDIA registers */ 487 + pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE, 488 &apbase); 489 490 /* if x86-64 aperture base is beyond 4G, exit here */ ··· 778 } 779 780 /* First check that we have at least one AMD64 NB */ 781 + if (!pci_dev_present(amd_nb_misc_ids)) 782 return -ENODEV; 783 784 /* Look for any AGP bridge */
+2 -2
drivers/edac/amd64_edac.c
··· 2917 2918 opstate_init(); 2919 2920 - if (cache_amd_northbridges() < 0) 2921 goto err_ret; 2922 2923 msrs = msrs_alloc(); ··· 2934 * to finish initialization of the MC instances. 2935 */ 2936 err = -ENODEV; 2937 - for (nb = 0; nb < amd_northbridges.num; nb++) { 2938 if (!pvt_lookup[nb]) 2939 continue; 2940
··· 2917 2918 opstate_init(); 2919 2920 + if (amd_cache_northbridges() < 0) 2921 goto err_ret; 2922 2923 msrs = msrs_alloc(); ··· 2934 * to finish initialization of the MC instances. 2935 */ 2936 err = -ENODEV; 2937 + for (nb = 0; nb < amd_nb_num(); nb++) { 2938 if (!pvt_lookup[nb]) 2939 continue; 2940