Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: cache_info: Kill the moronic shadow struct

Commit f9b90566c ("x86: reduce stack usage in init_intel_cacheinfo")
introduced a shadow structure to reduce the stack usage on large
machines instead of making the smaller structure embedded into the
large one. That's definitely a candidate for the bad taste award.

Move the small struct into the large one and get rid of the ugly type
casts.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Hans Rosenfeld <hans.rosenfeld@amd.com>
Cc: Borislav Petkov <borislav.petkov@amd.com>
Cc: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Mike Travis <travis@sgi.com>
Link: http://lkml.kernel.org/r/20110723212626.625651773@linutronix.de
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Thomas Gleixner and committed by
Ingo Molnar
b7d11a76 05b217b0

+23 -39
+23 -39
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 157 157 u8 subcaches[4]; 158 158 }; 159 159 160 - struct _cpuid4_info { 161 - union _cpuid4_leaf_eax eax; 162 - union _cpuid4_leaf_ebx ebx; 163 - union _cpuid4_leaf_ecx ecx; 164 - unsigned long size; 165 - struct amd_l3_cache *l3; 166 - DECLARE_BITMAP(shared_cpu_map, NR_CPUS); 167 - }; 168 - 169 - /* subset of above _cpuid4_info w/o shared_cpu_map */ 170 160 struct _cpuid4_info_regs { 171 161 union _cpuid4_leaf_eax eax; 172 162 union _cpuid4_leaf_ebx ebx; 173 163 union _cpuid4_leaf_ecx ecx; 174 164 unsigned long size; 175 165 struct amd_l3_cache *l3; 166 + }; 167 + 168 + struct _cpuid4_info { 169 + struct _cpuid4_info_regs base; 170 + DECLARE_BITMAP(shared_cpu_map, NR_CPUS); 176 171 }; 177 172 178 173 unsigned short num_cache_leaves; ··· 382 387 { 383 388 int index; 384 389 385 - if (!this_leaf->l3 || 386 - !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) 390 + if (!this_leaf->base.l3 || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) 387 391 return -EINVAL; 388 392 389 - index = amd_get_l3_disable_slot(this_leaf->l3, slot); 393 + index = amd_get_l3_disable_slot(this_leaf->base.l3, slot); 390 394 if (index >= 0) 391 395 return sprintf(buf, "%d\n", index); 392 396 ··· 474 480 if (!capable(CAP_SYS_ADMIN)) 475 481 return -EPERM; 476 482 477 - if (!this_leaf->l3 || 478 - !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) 483 + if (!this_leaf->base.l3 || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) 479 484 return -EINVAL; 480 485 481 486 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); ··· 482 489 if (strict_strtoul(buf, 10, &val) < 0) 483 490 return -EINVAL; 484 491 485 - err = amd_set_l3_disable_slot(this_leaf->l3, cpu, slot, val); 492 + err = amd_set_l3_disable_slot(this_leaf->base.l3, cpu, slot, val); 486 493 if (err) { 487 494 if (err == -EEXIST) 488 495 printk(KERN_WARNING "L3 disable slot %d in use!\n", ··· 511 518 static ssize_t 512 519 show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) 513 520 { 514 - if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 521 + if (!this_leaf->base.l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 515 522 return -EINVAL; 516 523 517 524 return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); ··· 526 533 if (!capable(CAP_SYS_ADMIN)) 527 534 return -EPERM; 528 535 529 - if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 536 + if (!this_leaf->base.l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 530 537 return -EINVAL; 531 538 532 539 if (strict_strtoul(buf, 16, &val) < 0) ··· 762 769 return; 763 770 } 764 771 this_leaf = CPUID4_INFO_IDX(cpu, index); 765 - num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; 772 + num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; 766 773 767 774 if (num_threads_sharing == 1) 768 775 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); ··· 817 824 per_cpu(ici_cpuid4_info, cpu) = NULL; 818 825 } 819 826 820 - static int 821 - __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 822 - { 823 - struct _cpuid4_info_regs *leaf_regs = 824 - (struct _cpuid4_info_regs *)this_leaf; 825 - 826 - return cpuid4_cache_lookup_regs(index, leaf_regs); 827 - } 828 - 829 827 static void __cpuinit get_cpu_leaves(void *_retval) 830 828 { 831 829 int j, *retval = _retval, cpu = smp_processor_id(); 832 830 833 831 /* Do cpuid and store the results */ 834 832 for (j = 0; j < num_cache_leaves; j++) { 835 - struct _cpuid4_info *this_leaf; 836 - this_leaf = CPUID4_INFO_IDX(cpu, j); 837 - *retval = cpuid4_cache_lookup(j, this_leaf); 833 + struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j); 834 + 835 + *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base); 838 836 if (unlikely(*retval < 0)) { 839 837 int i; 840 838 ··· 883 899 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ 884 900 } 885 901 886 - show_one_plus(level, eax.split.level, 0); 887 - show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1); 888 - show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1); 889 - show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1); 890 - show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); 902 + show_one_plus(level, base.eax.split.level, 0); 903 + show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1); 904 + show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1); 905 + show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1); 906 + show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1); 891 907 892 908 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, 893 909 unsigned int cpu) 894 910 { 895 - return sprintf(buf, "%luK\n", this_leaf->size / 1024); 911 + return sprintf(buf, "%luK\n", this_leaf->base.size / 1024); 896 912 } 897 913 898 914 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, ··· 929 945 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, 930 946 unsigned int cpu) 931 947 { 932 - switch (this_leaf->eax.split.type) { 948 + switch (this_leaf->base.eax.split.type) { 933 949 case CACHE_TYPE_DATA: 934 950 return sprintf(buf, "Data\n"); 935 951 case CACHE_TYPE_INST: ··· 1118 1134 1119 1135 ktype_cache.default_attrs = default_attrs; 1120 1136 #ifdef CONFIG_AMD_NB 1121 - if (this_leaf->l3) 1137 + if (this_leaf->base.l3) 1122 1138 ktype_cache.default_attrs = amd_l3_attrs(); 1123 1139 #endif 1124 1140 retval = kobject_init_and_add(&(this_object->kobj),