Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] x86-64/i386: Intel HT, Multi core detection fixes

Fields obtained through cpuid vector 0x1(ebx[16:23]) and
vector 0x4(eax[14:25], eax[26:31]) indicate the maximum values and might not
always be the same as what is available and what OS sees. So make sure
"siblings" and "cpu cores" values in /proc/cpuinfo reflect the values as seen
by OS instead of what cpuid instruction says. This will also fix the buggy BIOS
cases (for example where cpuid on a single core cpu says there are "2" siblings,
even when HT is disabled in the BIOS.
http://bugzilla.kernel.org/show_bug.cgi?id=4359)

Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Siddha, Suresh B and committed by
Linus Torvalds
94605eff e90f22ed

+178 -110
+6 -6
arch/i386/kernel/cpu/amd.c
··· 206 206 display_cacheinfo(c); 207 207 208 208 if (cpuid_eax(0x80000000) >= 0x80000008) { 209 - c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 210 - if (c->x86_num_cores & (c->x86_num_cores - 1)) 211 - c->x86_num_cores = 1; 209 + c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 210 + if (c->x86_max_cores & (c->x86_max_cores - 1)) 211 + c->x86_max_cores = 1; 212 212 } 213 213 214 214 #ifdef CONFIG_X86_HT ··· 217 217 * distingush the cores. Assumes number of cores is a power 218 218 * of two. 219 219 */ 220 - if (c->x86_num_cores > 1) { 220 + if (c->x86_max_cores > 1) { 221 221 int cpu = smp_processor_id(); 222 222 unsigned bits = 0; 223 - while ((1 << bits) < c->x86_num_cores) 223 + while ((1 << bits) < c->x86_max_cores) 224 224 bits++; 225 225 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1); 226 226 phys_proc_id[cpu] >>= bits; 227 227 printk(KERN_INFO "CPU %d(%d) -> Core %d\n", 228 - cpu, c->x86_num_cores, cpu_core_id[cpu]); 228 + cpu, c->x86_max_cores, cpu_core_id[cpu]); 229 229 } 230 230 #endif 231 231 }
+14 -22
arch/i386/kernel/cpu/common.c
··· 335 335 c->x86_model = c->x86_mask = 0; /* So far unknown... */ 336 336 c->x86_vendor_id[0] = '\0'; /* Unset */ 337 337 c->x86_model_id[0] = '\0'; /* Unset */ 338 - c->x86_num_cores = 1; 338 + c->x86_max_cores = 1; 339 339 memset(&c->x86_capability, 0, sizeof c->x86_capability); 340 340 341 341 if (!have_cpuid_p()) { ··· 446 446 void __devinit detect_ht(struct cpuinfo_x86 *c) 447 447 { 448 448 u32 eax, ebx, ecx, edx; 449 - int index_msb, tmp; 449 + int index_msb, core_bits; 450 450 int cpu = smp_processor_id(); 451 + 452 + cpuid(1, &eax, &ebx, &ecx, &edx); 453 + 454 + c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); 451 455 452 456 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) 453 457 return; 454 458 455 - cpuid(1, &eax, &ebx, &ecx, &edx); 456 459 smp_num_siblings = (ebx & 0xff0000) >> 16; 457 460 458 461 if (smp_num_siblings == 1) { 459 462 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 460 463 } else if (smp_num_siblings > 1 ) { 461 - index_msb = 31; 462 464 463 465 if (smp_num_siblings > NR_CPUS) { 464 466 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); 465 467 smp_num_siblings = 1; 466 468 return; 467 469 } 468 - tmp = smp_num_siblings; 469 - while ((tmp & 0x80000000 ) == 0) { 470 - tmp <<=1 ; 471 - index_msb--; 472 - } 473 - if (smp_num_siblings & (smp_num_siblings - 1)) 474 - index_msb++; 470 + 471 + index_msb = get_count_order(smp_num_siblings); 475 472 phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); 476 473 477 474 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 478 475 phys_proc_id[cpu]); 479 476 480 - smp_num_siblings = smp_num_siblings / c->x86_num_cores; 477 + smp_num_siblings = smp_num_siblings / c->x86_max_cores; 481 478 482 - tmp = smp_num_siblings; 483 - index_msb = 31; 484 - while ((tmp & 0x80000000) == 0) { 485 - tmp <<=1 ; 486 - index_msb--; 487 - } 479 + index_msb = get_count_order(smp_num_siblings) ; 488 480 489 - if (smp_num_siblings & (smp_num_siblings - 1)) 490 - index_msb++; 481 + core_bits = get_count_order(c->x86_max_cores); 491 482 492 - cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); 483 + cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & 484 + ((1 << core_bits) - 1); 493 485 494 - if (c->x86_num_cores > 1) 486 + if (c->x86_max_cores > 1) 495 487 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 496 488 cpu_core_id[cpu]); 497 489 }
+1 -1
arch/i386/kernel/cpu/intel.c
··· 157 157 if ( p ) 158 158 strcpy(c->x86_model_id, p); 159 159 160 - c->x86_num_cores = num_cpu_cores(c); 160 + c->x86_max_cores = num_cpu_cores(c); 161 161 162 162 detect_ht(c); 163 163
+1 -1
arch/i386/kernel/cpu/intel_cacheinfo.c
··· 307 307 #ifdef CONFIG_X86_HT 308 308 else if (num_threads_sharing == smp_num_siblings) 309 309 this_leaf->shared_cpu_map = cpu_sibling_map[cpu]; 310 - else if (num_threads_sharing == (c->x86_num_cores * smp_num_siblings)) 310 + else if (num_threads_sharing == (c->x86_max_cores * smp_num_siblings)) 311 311 this_leaf->shared_cpu_map = cpu_core_map[cpu]; 312 312 else 313 313 printk(KERN_DEBUG "Number of CPUs sharing cache didn't match "
+3 -4
arch/i386/kernel/cpu/proc.c
··· 94 94 if (c->x86_cache_size >= 0) 95 95 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); 96 96 #ifdef CONFIG_X86_HT 97 - if (c->x86_num_cores * smp_num_siblings > 1) { 97 + if (c->x86_max_cores * smp_num_siblings > 1) { 98 98 seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]); 99 - seq_printf(m, "siblings\t: %d\n", 100 - c->x86_num_cores * smp_num_siblings); 99 + seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n])); 101 100 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]); 102 - seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores); 101 + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 103 102 } 104 103 #endif 105 104
+53 -20
arch/i386/kernel/smpboot.c
··· 74 74 int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; 75 75 EXPORT_SYMBOL(cpu_core_id); 76 76 77 + /* representing HT siblings of each logical CPU */ 77 78 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 78 79 EXPORT_SYMBOL(cpu_sibling_map); 79 80 81 + /* representing HT and core siblings of each logical CPU */ 80 82 cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 81 83 EXPORT_SYMBOL(cpu_core_map); 82 84 ··· 446 444 447 445 static int cpucount; 448 446 447 + /* representing cpus for which sibling maps can be computed */ 448 + static cpumask_t cpu_sibling_setup_map; 449 + 449 450 static inline void 450 451 set_cpu_sibling_map(int cpu) 451 452 { 452 453 int i; 454 + struct cpuinfo_x86 *c = cpu_data; 455 + 456 + cpu_set(cpu, cpu_sibling_setup_map); 453 457 454 458 if (smp_num_siblings > 1) { 455 - for (i = 0; i < NR_CPUS; i++) { 456 - if (!cpu_isset(i, cpu_callout_map)) 457 - continue; 458 - if (cpu_core_id[cpu] == cpu_core_id[i]) { 459 + for_each_cpu_mask(i, cpu_sibling_setup_map) { 460 + if (phys_proc_id[cpu] == phys_proc_id[i] && 461 + cpu_core_id[cpu] == cpu_core_id[i]) { 459 462 cpu_set(i, cpu_sibling_map[cpu]); 460 463 cpu_set(cpu, cpu_sibling_map[i]); 464 + cpu_set(i, cpu_core_map[cpu]); 465 + cpu_set(cpu, cpu_core_map[i]); 461 466 } 462 467 } 463 468 } else { 464 469 cpu_set(cpu, cpu_sibling_map[cpu]); 465 470 } 466 471 467 - if (current_cpu_data.x86_num_cores > 1) { 468 - for (i = 0; i < NR_CPUS; i++) { 469 - if (!cpu_isset(i, cpu_callout_map)) 470 - continue; 471 - if (phys_proc_id[cpu] == phys_proc_id[i]) { 472 - cpu_set(i, cpu_core_map[cpu]); 473 - cpu_set(cpu, cpu_core_map[i]); 474 - } 475 - } 476 - } else { 472 + if (current_cpu_data.x86_max_cores == 1) { 477 473 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 474 + c[cpu].booted_cores = 1; 475 + return; 476 + } 477 + 478 + for_each_cpu_mask(i, cpu_sibling_setup_map) { 479 + if (phys_proc_id[cpu] == phys_proc_id[i]) { 480 + cpu_set(i, cpu_core_map[cpu]); 481 + cpu_set(cpu, cpu_core_map[i]); 482 + /* 483 + * Does this new cpu bringup a new core? 484 + */ 485 + if (cpus_weight(cpu_sibling_map[cpu]) == 1) { 486 + /* 487 + * for each core in package, increment 488 + * the booted_cores for this new cpu 489 + */ 490 + if (first_cpu(cpu_sibling_map[i]) == i) 491 + c[cpu].booted_cores++; 492 + /* 493 + * increment the core count for all 494 + * the other cpus in this package 495 + */ 496 + if (i != cpu) 497 + c[i].booted_cores++; 498 + } else if (i != cpu && !c[cpu].booted_cores) 499 + c[cpu].booted_cores = c[i].booted_cores; 500 + } 478 501 } 479 502 } 480 503 ··· 1123 1096 1124 1097 current_thread_info()->cpu = 0; 1125 1098 smp_tune_scheduling(); 1126 - cpus_clear(cpu_sibling_map[0]); 1127 - cpu_set(0, cpu_sibling_map[0]); 1128 1099 1129 - cpus_clear(cpu_core_map[0]); 1130 - cpu_set(0, cpu_core_map[0]); 1100 + set_cpu_sibling_map(0); 1131 1101 1132 1102 /* 1133 1103 * If we couldn't find an SMP configuration at boot time, ··· 1303 1279 remove_siblinginfo(int cpu) 1304 1280 { 1305 1281 int sibling; 1282 + struct cpuinfo_x86 *c = cpu_data; 1306 1283 1284 + for_each_cpu_mask(sibling, cpu_core_map[cpu]) { 1285 + cpu_clear(cpu, cpu_core_map[sibling]); 1286 + /* 1287 + * last thread sibling in this cpu core going down 1288 + */ 1289 + if (cpus_weight(cpu_sibling_map[cpu]) == 1) 1290 + c[sibling].booted_cores--; 1291 + } 1292 + 1307 1293 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 1308 1294 cpu_clear(cpu, cpu_sibling_map[sibling]); 1309 - for_each_cpu_mask(sibling, cpu_core_map[cpu]) 1310 - cpu_clear(cpu, cpu_core_map[sibling]); 1311 1295 cpus_clear(cpu_sibling_map[cpu]); 1312 1296 cpus_clear(cpu_core_map[cpu]); 1313 1297 phys_proc_id[cpu] = BAD_APICID; 1314 1298 cpu_core_id[cpu] = BAD_APICID; 1299 + cpu_clear(cpu, cpu_sibling_setup_map); 1315 1300 } 1316 1301 1317 1302 int __cpu_disable(void)
+29 -40
arch/x86_64/kernel/setup.c
··· 795 795 #endif 796 796 797 797 bits = 0; 798 - while ((1 << bits) < c->x86_num_cores) 798 + while ((1 << bits) < c->x86_max_cores) 799 799 bits++; 800 800 801 801 /* Low order bits define the core id (index of core in socket) */ ··· 828 828 numa_set_node(cpu, node); 829 829 830 830 printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n", 831 - cpu, c->x86_num_cores, node, cpu_core_id[cpu]); 831 + cpu, c->x86_max_cores, node, cpu_core_id[cpu]); 832 832 #endif 833 833 #endif 834 834 } ··· 877 877 display_cacheinfo(c); 878 878 879 879 if (c->extended_cpuid_level >= 0x80000008) { 880 - c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 881 - if (c->x86_num_cores & (c->x86_num_cores - 1)) 882 - c->x86_num_cores = 1; 880 + c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 881 + if (c->x86_max_cores & (c->x86_max_cores - 1)) 882 + c->x86_max_cores = 1; 883 883 884 884 amd_detect_cmp(c); 885 885 } ··· 891 891 { 892 892 #ifdef CONFIG_SMP 893 893 u32 eax, ebx, ecx, edx; 894 - int index_msb, tmp; 894 + int index_msb, core_bits; 895 895 int cpu = smp_processor_id(); 896 - 896 + 897 + cpuid(1, &eax, &ebx, &ecx, &edx); 898 + 899 + c->apicid = phys_pkg_id(0); 900 + 897 901 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) 898 902 return; 899 903 900 - cpuid(1, &eax, &ebx, &ecx, &edx); 901 904 smp_num_siblings = (ebx & 0xff0000) >> 16; 902 - 905 + 903 906 if (smp_num_siblings == 1) { 904 907 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 905 - } else if (smp_num_siblings > 1) { 906 - index_msb = 31; 907 - /* 908 - * At this point we only support two siblings per 909 - * processor package. 910 - */ 908 + } else if (smp_num_siblings > 1 ) { 909 + 911 910 if (smp_num_siblings > NR_CPUS) { 912 911 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); 913 912 smp_num_siblings = 1; 914 913 return; 915 914 } 916 - tmp = smp_num_siblings; 917 - while ((tmp & 0x80000000 ) == 0) { 918 - tmp <<=1 ; 919 - index_msb--; 920 - } 921 - if (smp_num_siblings & (smp_num_siblings - 1)) 922 - index_msb++; 915 + 916 + index_msb = get_count_order(smp_num_siblings); 923 917 phys_proc_id[cpu] = phys_pkg_id(index_msb); 924 - 918 + 925 919 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 926 920 phys_proc_id[cpu]); 927 921 928 - smp_num_siblings = smp_num_siblings / c->x86_num_cores; 922 + smp_num_siblings = smp_num_siblings / c->x86_max_cores; 929 923 930 - tmp = smp_num_siblings; 931 - index_msb = 31; 932 - while ((tmp & 0x80000000) == 0) { 933 - tmp <<=1 ; 934 - index_msb--; 935 - } 936 - if (smp_num_siblings & (smp_num_siblings - 1)) 937 - index_msb++; 924 + index_msb = get_count_order(smp_num_siblings) ; 938 925 939 - cpu_core_id[cpu] = phys_pkg_id(index_msb); 926 + core_bits = get_count_order(c->x86_max_cores); 940 927 941 - if (c->x86_num_cores > 1) 928 + cpu_core_id[cpu] = phys_pkg_id(index_msb) & 929 + ((1 << core_bits) - 1); 930 + 931 + if (c->x86_max_cores > 1) 942 932 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 943 933 cpu_core_id[cpu]); 944 934 } ··· 996 1006 c->x86_cache_alignment = c->x86_clflush_size * 2; 997 1007 if (c->x86 >= 15) 998 1008 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); 999 - c->x86_num_cores = intel_num_cpu_cores(c); 1009 + c->x86_max_cores = intel_num_cpu_cores(c); 1000 1010 1001 1011 srat_detect_node(); 1002 1012 } ··· 1034 1044 c->x86_model_id[0] = '\0'; /* Unset */ 1035 1045 c->x86_clflush_size = 64; 1036 1046 c->x86_cache_alignment = c->x86_clflush_size; 1037 - c->x86_num_cores = 1; 1047 + c->x86_max_cores = 1; 1038 1048 c->extended_cpuid_level = 0; 1039 1049 memset(&c->x86_capability, 0, sizeof c->x86_capability); 1040 1050 ··· 1268 1278 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); 1269 1279 1270 1280 #ifdef CONFIG_SMP 1271 - if (smp_num_siblings * c->x86_num_cores > 1) { 1281 + if (smp_num_siblings * c->x86_max_cores > 1) { 1272 1282 int cpu = c - cpu_data; 1273 1283 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]); 1274 - seq_printf(m, "siblings\t: %d\n", 1275 - c->x86_num_cores * smp_num_siblings); 1284 + seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu])); 1276 1285 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]); 1277 - seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores); 1286 + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 1278 1287 } 1279 1288 #endif 1280 1289
+55 -14
arch/x86_64/kernel/smpboot.c
··· 64 64 int smp_num_siblings = 1; 65 65 /* Package ID of each logical CPU */ 66 66 u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; 67 + /* core ID of each logical CPU */ 67 68 u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; 68 69 EXPORT_SYMBOL(phys_proc_id); 69 70 EXPORT_SYMBOL(cpu_core_id); ··· 90 89 /* Set when the idlers are all forked */ 91 90 int smp_threads_ready; 92 91 92 + /* representing HT siblings of each logical CPU */ 93 93 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 94 + 95 + /* representing HT and core siblings of each logical CPU */ 94 96 cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 95 97 EXPORT_SYMBOL(cpu_core_map); 96 98 ··· 440 436 cpu_set(cpuid, cpu_callin_map); 441 437 } 442 438 439 + /* representing cpus for which sibling maps can be computed */ 440 + static cpumask_t cpu_sibling_setup_map; 441 + 443 442 static inline void set_cpu_sibling_map(int cpu) 444 443 { 445 444 int i; 445 + struct cpuinfo_x86 *c = cpu_data; 446 + 447 + cpu_set(cpu, cpu_sibling_setup_map); 446 448 447 449 if (smp_num_siblings > 1) { 448 - for_each_cpu(i) { 449 - if (cpu_core_id[cpu] == cpu_core_id[i]) { 450 + for_each_cpu_mask(i, cpu_sibling_setup_map) { 451 + if (phys_proc_id[cpu] == phys_proc_id[i] && 452 + cpu_core_id[cpu] == cpu_core_id[i]) { 450 453 cpu_set(i, cpu_sibling_map[cpu]); 451 454 cpu_set(cpu, cpu_sibling_map[i]); 455 + cpu_set(i, cpu_core_map[cpu]); 456 + cpu_set(cpu, cpu_core_map[i]); 452 457 } 453 458 } 454 459 } else { 455 460 cpu_set(cpu, cpu_sibling_map[cpu]); 456 461 } 457 462 458 - if (current_cpu_data.x86_num_cores > 1) { 459 - for_each_cpu(i) { 460 - if (phys_proc_id[cpu] == phys_proc_id[i]) { 461 - cpu_set(i, cpu_core_map[cpu]); 462 - cpu_set(cpu, cpu_core_map[i]); 463 - } 464 - } 465 - } else { 463 + if (current_cpu_data.x86_max_cores == 1) { 466 464 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 465 + c[cpu].booted_cores = 1; 466 + return; 467 + } 468 + 469 + for_each_cpu_mask(i, cpu_sibling_setup_map) { 470 + if (phys_proc_id[cpu] == phys_proc_id[i]) { 471 + cpu_set(i, cpu_core_map[cpu]); 472 + cpu_set(cpu, cpu_core_map[i]); 473 + /* 474 + * Does this new cpu bringup a new core? 475 + */ 476 + if (cpus_weight(cpu_sibling_map[cpu]) == 1) { 477 + /* 478 + * for each core in package, increment 479 + * the booted_cores for this new cpu 480 + */ 481 + if (first_cpu(cpu_sibling_map[i]) == i) 482 + c[cpu].booted_cores++; 483 + /* 484 + * increment the core count for all 485 + * the other cpus in this package 486 + */ 487 + if (i != cpu) 488 + c[i].booted_cores++; 489 + } else if (i != cpu && !c[cpu].booted_cores) 490 + c[cpu].booted_cores = c[i].booted_cores; 491 + } 467 492 } 468 493 } 469 494 ··· 1026 993 nmi_watchdog_default(); 1027 994 current_cpu_data = boot_cpu_data; 1028 995 current_thread_info()->cpu = 0; /* needed? */ 996 + set_cpu_sibling_map(0); 1029 997 1030 998 if (smp_sanity_check(max_cpus) < 0) { 1031 999 printk(KERN_INFO "SMP disabled\n"); ··· 1070 1036 int me = smp_processor_id(); 1071 1037 cpu_set(me, cpu_online_map); 1072 1038 cpu_set(me, cpu_callout_map); 1073 - cpu_set(0, cpu_sibling_map[0]); 1074 - cpu_set(0, cpu_core_map[0]); 1075 1039 per_cpu(cpu_state, me) = CPU_ONLINE; 1076 1040 } 1077 1041 ··· 1138 1106 static void remove_siblinginfo(int cpu) 1139 1107 { 1140 1108 int sibling; 1109 + struct cpuinfo_x86 *c = cpu_data; 1141 1110 1111 + for_each_cpu_mask(sibling, cpu_core_map[cpu]) { 1112 + cpu_clear(cpu, cpu_core_map[sibling]); 1113 + /* 1114 + * last thread sibling in this cpu core going down 1115 + */ 1116 + if (cpus_weight(cpu_sibling_map[cpu]) == 1) 1117 + c[sibling].booted_cores--; 1118 + } 1119 + 1142 1120 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 1143 1121 cpu_clear(cpu, cpu_sibling_map[sibling]); 1144 - for_each_cpu_mask(sibling, cpu_core_map[cpu]) 1145 - cpu_clear(cpu, cpu_core_map[sibling]); 1146 1122 cpus_clear(cpu_sibling_map[cpu]); 1147 1123 cpus_clear(cpu_core_map[cpu]); 1148 1124 phys_proc_id[cpu] = BAD_APICID; 1149 1125 cpu_core_id[cpu] = BAD_APICID; 1126 + cpu_clear(cpu, cpu_sibling_setup_map); 1150 1127 } 1151 1128 1152 1129 void remove_cpu_from_maps(void)
+3 -1
include/asm-i386/processor.h
··· 65 65 int f00f_bug; 66 66 int coma_bug; 67 67 unsigned long loops_per_jiffy; 68 - unsigned char x86_num_cores; 68 + unsigned char x86_max_cores; /* cpuid returned max cores value */ 69 + unsigned char booted_cores; /* number of cores as seen by OS */ 70 + unsigned char apicid; 69 71 } __attribute__((__aligned__(SMP_CACHE_BYTES))); 70 72 71 73 #define X86_VENDOR_INTEL 0
+3 -1
include/asm-x86_64/processor.h
··· 61 61 int x86_cache_alignment; 62 62 int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/ 63 63 __u8 x86_virt_bits, x86_phys_bits; 64 - __u8 x86_num_cores; 64 + __u8 x86_max_cores; /* cpuid returned max cores value */ 65 65 __u32 x86_power; 66 66 __u32 extended_cpuid_level; /* Max extended CPUID function supported */ 67 67 unsigned long loops_per_jiffy; 68 + __u8 apicid; 69 + __u8 booted_cores; /* number of cores as seen by OS */ 68 70 } ____cacheline_aligned; 69 71 70 72 #define X86_VENDOR_INTEL 0
+10
include/linux/bitops.h
··· 84 84 return order; /* We could be slightly more clever with -1 here... */ 85 85 } 86 86 87 + static __inline__ int get_count_order(unsigned int count) 88 + { 89 + int order; 90 + 91 + order = fls(count) - 1; 92 + if (count & (count - 1)) 93 + order++; 94 + return order; 95 + } 96 + 87 97 /* 88 98 * hweightN: returns the hamming weight (i.e. the number 89 99 * of bits set) of a N-bit word