x86, cacheinfo: Cleanup L3 cache index disable support

Adaptions to the changes of the AMD northbridge caching code: instead
of a bool in each l3 struct, use a flag in amd_northbridges.flags to
indicate L3 cache index disable support; use a pointer to the whole
northbridge instead of the misc device in the l3 struct; simplify the
initialisation; dynamically generate sysfs attribute array.

Signed-off-by: Hans Rosenfeld <hans.rosenfeld@amd.com>
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>

authored by Hans Rosenfeld and committed by Borislav Petkov f658bcfb 9653a5c7

+74 -84
+1
arch/x86/include/asm/amd_nb.h
··· 25 25 extern struct amd_northbridge_info amd_northbridges; 26 26 27 27 #define AMD_NB_GART 0x1 28 + #define AMD_NB_L3_INDEX_DISABLE 0x2 28 29 29 30 #ifdef CONFIG_AMD_NB 30 31
+10
arch/x86/kernel/amd_nb.c
··· 68 68 boot_cpu_data.x86 == 0x15) 69 69 amd_northbridges.flags |= AMD_NB_GART; 70 70 71 + /* 72 + * Some CPU families support L3 Cache Index Disable. There are some 73 + * limitations because of E382 and E388 on family 0x10. 74 + */ 75 + if (boot_cpu_data.x86 == 0x10 && 76 + boot_cpu_data.x86_model >= 0x8 && 77 + (boot_cpu_data.x86_model > 0x9 || 78 + boot_cpu_data.x86_mask >= 0x1)) 79 + amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 80 + 71 81 return 0; 72 82 } 73 83 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
+63 -84
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 149 149 }; 150 150 151 151 struct amd_l3_cache { 152 - struct pci_dev *dev; 153 - bool can_disable; 152 + struct amd_northbridge *nb; 154 153 unsigned indices; 155 154 u8 subcaches[4]; 156 155 }; ··· 310 311 /* 311 312 * L3 cache descriptors 312 313 */ 313 - static struct amd_l3_cache **__cpuinitdata l3_caches; 314 - 315 314 static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) 316 315 { 317 316 unsigned int sc0, sc1, sc2, sc3; 318 317 u32 val = 0; 319 318 320 - pci_read_config_dword(l3->dev, 0x1C4, &val); 319 + pci_read_config_dword(l3->nb->misc, 0x1C4, &val); 321 320 322 321 /* calculate subcache sizes */ 323 322 l3->subcaches[0] = sc0 = !(val & BIT(0)); ··· 327 330 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; 328 331 } 329 332 330 - static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) 333 + static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, 334 + int index) 331 335 { 332 - struct amd_l3_cache *l3; 333 - struct pci_dev *dev = node_to_amd_nb(node)->misc; 334 - 335 - l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); 336 - if (!l3) { 337 - printk(KERN_WARNING "Error allocating L3 struct\n"); 338 - return NULL; 339 - } 340 - 341 - l3->dev = dev; 342 - 343 - amd_calc_l3_indices(l3); 344 - 345 - return l3; 346 - } 347 - 348 - static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, 349 - int index) 350 - { 336 + static struct amd_l3_cache *__cpuinitdata l3_caches; 351 337 int node; 352 338 353 - if (boot_cpu_data.x86 != 0x10) 354 - return; 355 - 356 - if (index < 3) 357 - return; 358 - 359 - /* see errata #382 and #388 */ 360 - if (boot_cpu_data.x86_model < 0x8) 361 - return; 362 - 363 - if ((boot_cpu_data.x86_model == 0x8 || 364 - boot_cpu_data.x86_model == 0x9) 365 - && 366 - boot_cpu_data.x86_mask < 0x1) 367 - return; 368 - 369 - /* not in virtualized environments */ 370 - if (amd_nb_num() == 0) 339 + /* only for L3, and not in virtualized environments */ 340 + if (index < 3 || amd_nb_num() == 0) 371 341 return; 372 342 373 343 /* ··· 342 378 * never freed but this is done only on shutdown so it doesn't matter. 343 379 */ 344 380 if (!l3_caches) { 345 - int size = amd_nb_num() * sizeof(struct amd_l3_cache *); 381 + int size = amd_nb_num() * sizeof(struct amd_l3_cache); 346 382 347 383 l3_caches = kzalloc(size, GFP_ATOMIC); 348 384 if (!l3_caches) ··· 351 387 352 388 node = amd_get_nb_id(smp_processor_id()); 353 389 354 - if (!l3_caches[node]) { 355 - l3_caches[node] = amd_init_l3_cache(node); 356 - l3_caches[node]->can_disable = true; 390 + if (!l3_caches[node].nb) { 391 + l3_caches[node].nb = node_to_amd_nb(node); 392 + amd_calc_l3_indices(&l3_caches[node]); 357 393 } 358 394 359 - WARN_ON(!l3_caches[node]); 360 - 361 - this_leaf->l3 = l3_caches[node]; 395 + this_leaf->l3 = &l3_caches[node]; 362 396 } 363 397 364 398 /* ··· 370 408 { 371 409 unsigned int reg = 0; 372 410 373 - pci_read_config_dword(l3->dev, 0x1BC + slot * 4, &reg); 411 + pci_read_config_dword(l3->nb->misc, 0x1BC + slot * 4, &reg); 374 412 375 413 /* check whether this slot is activated already */ 376 414 if (reg & (3UL << 30)) ··· 384 422 { 385 423 int index; 386 424 387 - if (!this_leaf->l3 || !this_leaf->l3->can_disable) 425 + if (!this_leaf->l3 || 426 + !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) 388 427 return -EINVAL; 389 428 390 429 index = amd_get_l3_disable_slot(this_leaf->l3, slot); ··· 420 457 if (!l3->subcaches[i]) 421 458 continue; 422 459 423 - pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg); 460 + pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg); 424 461 425 462 /* 426 463 * We need to WBINVD on a core on the node containing the L3 ··· 430 467 wbinvd_on_cpu(cpu); 431 468 432 469 reg |= BIT(31); 433 - pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg); 470 + pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg); 434 471 } 435 472 } 436 473 ··· 487 524 if (!capable(CAP_SYS_ADMIN)) 488 525 return -EPERM; 489 526 490 - if (!this_leaf->l3 || !this_leaf->l3->can_disable) 527 + if (!this_leaf->l3 || 528 + !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) 491 529 return -EINVAL; 492 530 493 531 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); ··· 509 545 #define STORE_CACHE_DISABLE(slot) \ 510 546 static ssize_t \ 511 547 store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ 512 - const char *buf, size_t count) \ 548 + const char *buf, size_t count) \ 513 549 { \ 514 550 return store_cache_disable(this_leaf, buf, count, slot); \ 515 551 } ··· 522 558 show_cache_disable_1, store_cache_disable_1); 523 559 524 560 #else /* CONFIG_AMD_NB */ 525 - static void __cpuinit 526 - amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index) 527 - { 528 - }; 561 + #define amd_init_l3_cache(x, y) 529 562 #endif /* CONFIG_AMD_NB */ 530 563 531 564 static int ··· 536 575 537 576 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { 538 577 amd_cpuid4(index, &eax, &ebx, &ecx); 539 - amd_check_l3_disable(this_leaf, index); 578 + amd_init_l3_cache(this_leaf, index); 540 579 } else { 541 580 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); 542 581 } ··· 944 983 define_one_ro(shared_cpu_map); 945 984 define_one_ro(shared_cpu_list); 946 985 947 - #define DEFAULT_SYSFS_CACHE_ATTRS \ 948 - &type.attr, \ 949 - &level.attr, \ 950 - &coherency_line_size.attr, \ 951 - &physical_line_partition.attr, \ 952 - &ways_of_associativity.attr, \ 953 - &number_of_sets.attr, \ 954 - &size.attr, \ 955 - &shared_cpu_map.attr, \ 956 - &shared_cpu_list.attr 957 - 958 986 static struct attribute *default_attrs[] = { 959 - DEFAULT_SYSFS_CACHE_ATTRS, 987 + &type.attr, 988 + &level.attr, 989 + &coherency_line_size.attr, 990 + &physical_line_partition.attr, 991 + &ways_of_associativity.attr, 992 + &number_of_sets.attr, 993 + &size.attr, 994 + &shared_cpu_map.attr, 995 + &shared_cpu_list.attr, 960 996 NULL 961 997 }; 962 998 963 - static struct attribute *default_l3_attrs[] = { 964 - DEFAULT_SYSFS_CACHE_ATTRS, 965 999 #ifdef CONFIG_AMD_NB 966 - &cache_disable_0.attr, 967 - &cache_disable_1.attr, 1000 + static struct attribute ** __cpuinit amd_l3_attrs(void) 1001 + { 1002 + static struct attribute **attrs; 1003 + int n; 1004 + 1005 + if (attrs) 1006 + return attrs; 1007 + 1008 + n = sizeof (default_attrs) / sizeof (struct attribute *); 1009 + 1010 + if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) 1011 + n += 2; 1012 + 1013 + attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); 1014 + if (attrs == NULL) 1015 + return attrs = default_attrs; 1016 + 1017 + for (n = 0; default_attrs[n]; n++) 1018 + attrs[n] = default_attrs[n]; 1019 + 1020 + if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) { 1021 + attrs[n++] = &cache_disable_0.attr; 1022 + attrs[n++] = &cache_disable_1.attr; 1023 + } 1024 + 1025 + return attrs; 1026 + } 968 1027 #endif 969 - NULL 970 - }; 971 1028 972 1029 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 973 1030 { ··· 1096 1117 1097 1118 this_leaf = CPUID4_INFO_IDX(cpu, i); 1098 1119 1099 - if (this_leaf->l3 && this_leaf->l3->can_disable) 1100 - ktype_cache.default_attrs = default_l3_attrs; 1101 - else 1102 - ktype_cache.default_attrs = default_attrs; 1103 - 1120 + ktype_cache.default_attrs = default_attrs; 1121 + #ifdef CONFIG_AMD_NB 1122 + if (this_leaf->l3) 1123 + ktype_cache.default_attrs = amd_l3_attrs(); 1124 + #endif 1104 1125 retval = kobject_init_and_add(&(this_object->kobj), 1105 1126 &ktype_cache, 1106 1127 per_cpu(ici_cache_kobject, cpu),