Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: Replace cpu_**_mask() with topology_**_cpumask()

The former duplicate the functionalities of the latter but are
neither documented nor arch-independent.

Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Benoit Cousson <bcousson@baylibre.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: Jean Delvare <jdelvare@suse.de>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Drokin <oleg.drokin@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Link: http://lkml.kernel.org/r/1432645896-12588-9-git-send-email-bgolaszewski@baylibre.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Bartosz Golaszewski and committed by
Ingo Molnar
7d79a7bd 265ea624

+25 -22
+2 -1
arch/x86/kernel/cpu/proc.c
··· 12 12 { 13 13 #ifdef CONFIG_SMP 14 14 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 15 - seq_printf(m, "siblings\t: %d\n", cpumask_weight(cpu_core_mask(cpu))); 15 + seq_printf(m, "siblings\t: %d\n", 16 + cpumask_weight(topology_core_cpumask(cpu))); 16 17 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); 17 18 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 18 19 seq_printf(m, "apicid\t\t: %d\n", c->apicid);
+22 -20
arch/x86/kernel/smpboot.c
··· 314 314 cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2)); 315 315 } 316 316 317 - #define link_mask(_m, c1, c2) \ 317 + #define link_mask(mfunc, c1, c2) \ 318 318 do { \ 319 - cpumask_set_cpu((c1), cpu_##_m##_mask(c2)); \ 320 - cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \ 319 + cpumask_set_cpu((c1), mfunc(c2)); \ 320 + cpumask_set_cpu((c2), mfunc(c1)); \ 321 321 } while (0) 322 322 323 323 static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) ··· 398 398 cpumask_set_cpu(cpu, cpu_sibling_setup_mask); 399 399 400 400 if (!has_mp) { 401 - cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); 401 + cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu)); 402 402 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); 403 - cpumask_set_cpu(cpu, cpu_core_mask(cpu)); 403 + cpumask_set_cpu(cpu, topology_core_cpumask(cpu)); 404 404 c->booted_cores = 1; 405 405 return; 406 406 } ··· 409 409 o = &cpu_data(i); 410 410 411 411 if ((i == cpu) || (has_smt && match_smt(c, o))) 412 - link_mask(sibling, cpu, i); 412 + link_mask(topology_sibling_cpumask, cpu, i); 413 413 414 414 if ((i == cpu) || (has_mp && match_llc(c, o))) 415 - link_mask(llc_shared, cpu, i); 415 + link_mask(cpu_llc_shared_mask, cpu, i); 416 416 417 417 } 418 418 419 419 /* 420 420 * This needs a separate iteration over the cpus because we rely on all 421 - * cpu_sibling_mask links to be set-up. 421 + * topology_sibling_cpumask links to be set-up. 422 422 */ 423 423 for_each_cpu(i, cpu_sibling_setup_mask) { 424 424 o = &cpu_data(i); 425 425 426 426 if ((i == cpu) || (has_mp && match_die(c, o))) { 427 - link_mask(core, cpu, i); 427 + link_mask(topology_core_cpumask, cpu, i); 428 428 429 429 /* 430 430 * Does this new cpu bringup a new core? 431 431 */ 432 - if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) { 432 + if (cpumask_weight( 433 + topology_sibling_cpumask(cpu)) == 1) { 433 434 /* 434 435 * for each core in package, increment 435 436 * the booted_cores for this new cpu 436 437 */ 437 - if (cpumask_first(cpu_sibling_mask(i)) == i) 438 + if (cpumask_first( 439 + topology_sibling_cpumask(i)) == i) 438 440 c->booted_cores++; 439 441 /* 440 442 * increment the core count for all ··· 1011 1009 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); 1012 1010 else 1013 1011 physid_set_mask_of_physid(0, &phys_cpu_present_map); 1014 - cpumask_set_cpu(0, cpu_sibling_mask(0)); 1015 - cpumask_set_cpu(0, cpu_core_mask(0)); 1012 + cpumask_set_cpu(0, topology_sibling_cpumask(0)); 1013 + cpumask_set_cpu(0, topology_core_cpumask(0)); 1016 1014 } 1017 1015 1018 1016 enum { ··· 1295 1293 int sibling; 1296 1294 struct cpuinfo_x86 *c = &cpu_data(cpu); 1297 1295 1298 - for_each_cpu(sibling, cpu_core_mask(cpu)) { 1299 - cpumask_clear_cpu(cpu, cpu_core_mask(sibling)); 1296 + for_each_cpu(sibling, topology_core_cpumask(cpu)) { 1297 + cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); 1300 1298 /*/ 1301 1299 * last thread sibling in this cpu core going down 1302 1300 */ 1303 - if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) 1301 + if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1) 1304 1302 cpu_data(sibling).booted_cores--; 1305 1303 } 1306 1304 1307 - for_each_cpu(sibling, cpu_sibling_mask(cpu)) 1308 - cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling)); 1305 + for_each_cpu(sibling, topology_sibling_cpumask(cpu)) 1306 + cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); 1309 1307 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) 1310 1308 cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling)); 1311 1309 cpumask_clear(cpu_llc_shared_mask(cpu)); 1312 - cpumask_clear(cpu_sibling_mask(cpu)); 1313 - cpumask_clear(cpu_core_mask(cpu)); 1310 + cpumask_clear(topology_sibling_cpumask(cpu)); 1311 + cpumask_clear(topology_core_cpumask(cpu)); 1314 1312 c->phys_proc_id = 0; 1315 1313 c->cpu_core_id = 0; 1316 1314 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
+1 -1
arch/x86/kernel/tsc_sync.c
··· 113 113 */ 114 114 static inline unsigned int loop_timeout(int cpu) 115 115 { 116 - return (cpumask_weight(cpu_core_mask(cpu)) > 1) ? 2 : 20; 116 + return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20; 117 117 } 118 118 119 119 /*