x86, amd-nb: Cleanup AMD northbridge caching code

Support more than just the "Misc Control" part of the northbridges.
Support more flags by turning "gart_supported" into a single bit flag
that is stored in a flags member. Clean up related code by using a set
of functions (amd_nb_num(), amd_nb_has_feature() and node_to_amd_nb())
instead of accessing the NB data structures directly. Reorder the
initialization code and put the GART flush words caching in a separate
function.

Signed-off-by: Hans Rosenfeld <hans.rosenfeld@amd.com>
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>

authored by Hans Rosenfeld and committed by Borislav Petkov 9653a5c7 eec1d4fa

+120 -90
+26 -10
arch/x86/include/asm/amd_nb.h
··· 3 3 4 4 #include <linux/pci.h> 5 5 6 - extern struct pci_device_id amd_nb_ids[]; 6 + extern struct pci_device_id amd_nb_misc_ids[]; 7 7 struct bootnode; 8 8 9 9 extern int early_is_amd_nb(u32 value); 10 - extern int cache_amd_northbridges(void); 10 + extern int amd_cache_northbridges(void); 11 11 extern void amd_flush_garts(void); 12 12 extern int amd_get_nodes(struct bootnode *nodes); 13 13 extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); 14 14 extern int amd_scan_nodes(void); 15 15 16 + struct amd_northbridge { 17 + struct pci_dev *misc; 18 + }; 19 + 16 20 struct amd_northbridge_info { 17 21 u16 num; 18 - u8 gart_supported; 19 - struct pci_dev **nb_misc; 22 + u64 flags; 23 + struct amd_northbridge *nb; 20 24 }; 21 25 extern struct amd_northbridge_info amd_northbridges; 22 26 27 + #define AMD_NB_GART 0x1 28 + 23 29 #ifdef CONFIG_AMD_NB 24 30 25 - static inline struct pci_dev *node_to_amd_nb_misc(int node) 31 + static inline int amd_nb_num(void) 26 32 { 27 - return (node < amd_northbridges.num) ? amd_northbridges.nb_misc[node] : NULL; 33 + return amd_northbridges.num; 34 + } 35 + 36 + static inline int amd_nb_has_feature(int feature) 37 + { 38 + return ((amd_northbridges.flags & feature) == feature); 39 + } 40 + 41 + static inline struct amd_northbridge *node_to_amd_nb(int node) 42 + { 43 + return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 28 44 } 29 45 30 46 #else 31 47 32 - static inline struct pci_dev *node_to_amd_nb_misc(int node) 33 - { 34 - return NULL; 35 - } 48 + #define amd_nb_num(x) 0 49 + #define amd_nb_has_feature(x) false 50 + #define node_to_amd_nb(x) NULL 51 + 36 52 #endif 37 53 38 54
+63 -48
arch/x86/kernel/amd_nb.c
··· 12 12 13 13 static u32 *flush_words; 14 14 15 - struct pci_device_id amd_nb_ids[] = { 15 + struct pci_device_id amd_nb_misc_ids[] = { 16 16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 17 17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 18 18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, 19 19 {} 20 20 }; 21 - EXPORT_SYMBOL(amd_nb_ids); 21 + EXPORT_SYMBOL(amd_nb_misc_ids); 22 22 23 23 struct amd_northbridge_info amd_northbridges; 24 24 EXPORT_SYMBOL(amd_northbridges); 25 25 26 - static struct pci_dev *next_amd_northbridge(struct pci_dev *dev) 26 + static struct pci_dev *next_northbridge(struct pci_dev *dev, 27 + struct pci_device_id *ids) 27 28 { 28 29 do { 29 30 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 30 31 if (!dev) 31 32 break; 32 - } while (!pci_match_id(&amd_nb_ids[0], dev)); 33 + } while (!pci_match_id(ids, dev)); 33 34 return dev; 34 35 } 35 36 36 - int cache_amd_northbridges(void) 37 + int amd_cache_northbridges(void) 37 38 { 38 - int i; 39 - struct pci_dev *dev; 39 + int i = 0; 40 + struct amd_northbridge *nb; 41 + struct pci_dev *misc; 40 42 41 - if (amd_northbridges.num) 43 + if (amd_nb_num()) 42 44 return 0; 43 45 44 - dev = NULL; 45 - while ((dev = next_amd_northbridge(dev)) != NULL) 46 - amd_northbridges.num++; 46 + misc = NULL; 47 + while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL) 48 + i++; 49 + 50 + if (i == 0) 51 + return 0; 52 + 53 + nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL); 54 + if (!nb) 55 + return -ENOMEM; 56 + 57 + amd_northbridges.nb = nb; 58 + amd_northbridges.num = i; 59 + 60 + misc = NULL; 61 + for (i = 0; i != amd_nb_num(); i++) { 62 + node_to_amd_nb(i)->misc = misc = 63 + next_northbridge(misc, amd_nb_misc_ids); 64 + } 47 65 48 66 /* some CPU families (e.g. family 0x11) do not support GART */ 49 67 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || 50 68 boot_cpu_data.x86 == 0x15) 51 - amd_northbridges.gart_supported = 1; 69 + amd_northbridges.flags |= AMD_NB_GART; 52 70 53 - amd_northbridges.nb_misc = kmalloc((amd_northbridges.num + 1) * 54 - sizeof(void *), GFP_KERNEL); 55 - if (!amd_northbridges.nb_misc) 56 - return -ENOMEM; 57 - 58 - if (!amd_northbridges.num) { 59 - amd_northbridges.nb_misc[0] = NULL; 60 - return 0; 61 - } 62 - 63 - if (amd_northbridges.gart_supported) { 64 - flush_words = kmalloc(amd_northbridges.num * sizeof(u32), 65 - GFP_KERNEL); 66 - if (!flush_words) { 67 - kfree(amd_northbridges.nb_misc); 68 - return -ENOMEM; 69 - } 70 - } 71 - 72 - dev = NULL; 73 - i = 0; 74 - while ((dev = next_amd_northbridge(dev)) != NULL) { 75 - amd_northbridges.nb_misc[i] = dev; 76 - if (amd_northbridges.gart_supported) 77 - pci_read_config_dword(dev, 0x9c, &flush_words[i++]); 78 - } 79 - amd_northbridges.nb_misc[i] = NULL; 80 71 return 0; 81 72 } 82 - EXPORT_SYMBOL_GPL(cache_amd_northbridges); 73 + EXPORT_SYMBOL_GPL(amd_cache_northbridges); 83 74 84 75 /* Ignores subdevice/subvendor but as far as I can figure out 85 76 they're useless anyways */ ··· 79 88 struct pci_device_id *id; 80 89 u32 vendor = device & 0xffff; 81 90 device >>= 16; 82 - for (id = amd_nb_ids; id->vendor; id++) 91 + for (id = amd_nb_misc_ids; id->vendor; id++) 83 92 if (vendor == id->vendor && device == id->device) 84 93 return 1; 85 94 return 0; 95 + } 96 + 97 + int amd_cache_gart(void) 98 + { 99 + int i; 100 + 101 + if (!amd_nb_has_feature(AMD_NB_GART)) 102 + return 0; 103 + 104 + flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL); 105 + if (!flush_words) { 106 + amd_northbridges.flags &= ~AMD_NB_GART; 107 + return -ENOMEM; 108 + } 109 + 110 + for (i = 0; i != amd_nb_num(); i++) 111 + pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, 112 + &flush_words[i]); 113 + 114 + return 0; 86 115 } 87 116 88 117 void amd_flush_garts(void) ··· 111 100 unsigned long flags; 112 101 static DEFINE_SPINLOCK(gart_lock); 113 102 114 - if (!amd_northbridges.gart_supported) 103 + if (!amd_nb_has_feature(AMD_NB_GART)) 115 104 return; 116 105 117 106 /* Avoid races between AGP and IOMMU. In theory it's not needed ··· 120 109 that it doesn't matter to serialize more. -AK */ 121 110 spin_lock_irqsave(&gart_lock, flags); 122 111 flushed = 0; 123 - for (i = 0; i < amd_northbridges.num; i++) { 124 - pci_write_config_dword(amd_northbridges.nb_misc[i], 0x9c, 125 - flush_words[i]|1); 112 + for (i = 0; i < amd_nb_num(); i++) { 113 + pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, 114 + flush_words[i] | 1); 126 115 flushed++; 127 116 } 128 - for (i = 0; i < amd_northbridges.num; i++) { 117 + for (i = 0; i < amd_nb_num(); i++) { 129 118 u32 w; 130 119 /* Make sure the hardware actually executed the flush*/ 131 120 for (;;) { 132 - pci_read_config_dword(amd_northbridges.nb_misc[i], 121 + pci_read_config_dword(node_to_amd_nb(i)->misc, 133 122 0x9c, &w); 134 123 if (!(w & 1)) 135 124 break; ··· 146 135 { 147 136 int err = 0; 148 137 149 - err = cache_amd_northbridges(); 138 + err = amd_cache_northbridges(); 150 139 151 140 if (err < 0) 152 141 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n"); 142 + 143 + if (amd_cache_gart() < 0) 144 + printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, " 145 + "GART support disabled.\n"); 153 146 154 147 return err; 155 148 }
+3 -3
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 333 333 static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) 334 334 { 335 335 struct amd_l3_cache *l3; 336 - struct pci_dev *dev = node_to_amd_nb_misc(node); 336 + struct pci_dev *dev = node_to_amd_nb(node)->misc; 337 337 338 338 l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); 339 339 if (!l3) { ··· 370 370 return; 371 371 372 372 /* not in virtualized environments */ 373 - if (amd_northbridges.num == 0) 373 + if (amd_nb_num() == 0) 374 374 return; 375 375 376 376 /* ··· 378 378 * never freed but this is done only on shutdown so it doesn't matter. 379 379 */ 380 380 if (!l3_caches) { 381 - int size = amd_northbridges.num * sizeof(struct amd_l3_cache *); 381 + int size = amd_nb_num() * sizeof(struct amd_l3_cache *); 382 382 383 383 l3_caches = kzalloc(size, GFP_ATOMIC); 384 384 if (!l3_caches)
+12 -12
arch/x86/kernel/pci-gart_64.c
··· 561 561 { 562 562 int i; 563 563 564 - if (!amd_northbridges.gart_supported) 564 + if (!amd_nb_has_feature(AMD_NB_GART)) 565 565 return; 566 566 567 - for (i = 0; i < amd_northbridges.num; i++) { 568 - struct pci_dev *dev = amd_northbridges.nb_misc[i]; 567 + for (i = 0; i < amd_nb_num(); i++) { 568 + struct pci_dev *dev = node_to_amd_nb(i)->misc; 569 569 570 570 enable_gart_translation(dev, __pa(agp_gatt_table)); 571 571 } ··· 596 596 if (!fix_up_north_bridges) 597 597 return; 598 598 599 - if (!amd_northbridges.gart_supported) 599 + if (!amd_nb_has_feature(AMD_NB_GART)) 600 600 return; 601 601 602 602 pr_info("PCI-DMA: Restoring GART aperture settings\n"); 603 603 604 - for (i = 0; i < amd_northbridges.num; i++) { 605 - struct pci_dev *dev = amd_northbridges.nb_misc[i]; 604 + for (i = 0; i < amd_nb_num(); i++) { 605 + struct pci_dev *dev = node_to_amd_nb(i)->misc; 606 606 607 607 /* 608 608 * Don't enable translations just yet. That is the next ··· 656 656 657 657 aper_size = aper_base = info->aper_size = 0; 658 658 dev = NULL; 659 - for (i = 0; i < amd_northbridges.num; i++) { 660 - dev = amd_northbridges.nb_misc[i]; 659 + for (i = 0; i < amd_nb_num(); i++) { 660 + dev = node_to_amd_nb(i)->misc; 661 661 new_aper_base = read_aperture(dev, &new_aper_size); 662 662 if (!new_aper_base) 663 663 goto nommu; ··· 725 725 if (!no_agp) 726 726 return; 727 727 728 - if (!amd_northbridges.gart_supported) 728 + if (!amd_nb_has_feature(AMD_NB_GART)) 729 729 return; 730 730 731 - for (i = 0; i < amd_northbridges.num; i++) { 731 + for (i = 0; i < amd_nb_num(); i++) { 732 732 u32 ctl; 733 733 734 - dev = amd_northbridges.nb_misc[i]; 734 + dev = node_to_amd_nb(i)->misc; 735 735 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 736 736 737 737 ctl &= ~GARTEN; ··· 749 749 unsigned long scratch; 750 750 long i; 751 751 752 - if (!amd_northbridges.gart_supported) 752 + if (!amd_nb_has_feature(AMD_NB_GART)) 753 753 return 0; 754 754 755 755 #ifndef CONFIG_AGP_AMD64
+14 -15
drivers/char/agp/amd64-agp.c
··· 124 124 u32 temp; 125 125 struct aper_size_info_32 *values; 126 126 127 - dev = amd_northbridges.nb_misc[0]; 127 + dev = node_to_amd_nb(0)->misc; 128 128 if (dev==NULL) 129 129 return 0; 130 130 ··· 181 181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); 182 182 int i; 183 183 184 - if (!amd_northbridges.gart_supported) 184 + if (!amd_nb_has_feature(AMD_NB_GART)) 185 185 return 0; 186 186 187 187 /* Configure AGP regs in each x86-64 host bridge. */ 188 - for (i = 0; i < amd_northbridges.num; i++) { 188 + for (i = 0; i < amd_nb_num(); i++) { 189 189 agp_bridge->gart_bus_addr = 190 - amd64_configure(amd_northbridges.nb_misc[i], 191 - gatt_bus); 190 + amd64_configure(node_to_amd_nb(i)->misc, gatt_bus); 192 191 } 193 192 amd_flush_garts(); 194 193 return 0; ··· 199 200 u32 tmp; 200 201 int i; 201 202 202 - if (!amd_northbridges.gart_supported) 203 + if (!amd_nb_has_feature(AMD_NB_GART)) 203 204 return; 204 205 205 - for (i = 0; i < amd_northbridges.num; i++) { 206 - struct pci_dev *dev = amd_northbridges.nb_misc[i]; 206 + for (i = 0; i < amd_nb_num(); i++) { 207 + struct pci_dev *dev = node_to_amd_nb(i)->misc; 207 208 /* disable gart translation */ 208 209 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); 209 210 tmp &= ~GARTEN; ··· 330 331 { 331 332 int i; 332 333 333 - if (cache_amd_northbridges() < 0) 334 + if (amd_cache_northbridges() < 0) 334 335 return -ENODEV; 335 336 336 - if (!amd_northbridges.gart_supported) 337 + if (!amd_nb_has_feature(AMD_NB_GART)) 337 338 return -ENODEV; 338 339 339 340 i = 0; 340 - for (i = 0; i < amd_northbridges.num; i++) { 341 - struct pci_dev *dev = amd_northbridges.nb_misc[i]; 341 + for (i = 0; i < amd_nb_num(); i++) { 342 + struct pci_dev *dev = node_to_amd_nb(i)->misc; 342 343 if (fix_northbridge(dev, pdev, cap_ptr) < 0) { 343 344 dev_err(&dev->dev, "no usable aperture found\n"); 344 345 #ifdef __x86_64__ ··· 415 416 } 416 417 417 418 /* shadow x86-64 registers into ULi registers */ 418 - pci_read_config_dword (amd_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, 419 + pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE, 419 420 &httfea); 420 421 421 422 /* if x86-64 aperture base is beyond 4G, exit here */ ··· 483 484 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); 484 485 485 486 /* shadow x86-64 registers into NVIDIA registers */ 486 - pci_read_config_dword (amd_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, 487 + pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE, 487 488 &apbase); 488 489 489 490 /* if x86-64 aperture base is beyond 4G, exit here */ ··· 777 778 } 778 779 779 780 /* First check that we have at least one AMD64 NB */ 780 - if (!pci_dev_present(amd_nb_ids)) 781 + if (!pci_dev_present(amd_nb_misc_ids)) 781 782 return -ENODEV; 782 783 783 784 /* Look for any AGP bridge */
+2 -2
drivers/edac/amd64_edac.c
··· 2917 2917 2918 2918 opstate_init(); 2919 2919 2920 - if (cache_amd_northbridges() < 0) 2920 + if (amd_cache_northbridges() < 0) 2921 2921 goto err_ret; 2922 2922 2923 2923 msrs = msrs_alloc(); ··· 2934 2934 * to finish initialization of the MC instances. 2935 2935 */ 2936 2936 err = -ENODEV; 2937 - for (nb = 0; nb < amd_northbridges.num; nb++) { 2937 + for (nb = 0; nb < amd_nb_num(); nb++) { 2938 2938 if (!pvt_lookup[nb]) 2939 2939 continue; 2940 2940