Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch/x86: replace cpumask_weight with cpumask_empty where appropriate

In some cases, arch/x86 code calls cpumask_weight() to check if any bit of
a given cpumask is set. We can do it more efficiently with cpumask_empty()
because cpumask_empty() stops traversing the cpumask as soon as it finds
first set bit, while cpumask_weight() counts all bits unconditionally.

Signed-off-by: Yury Norov <yury.norov@gmail.com>
Reviewed-by: Steve Wahl <steve.wahl@hpe.com>

+9 -9
+7 -7
arch/x86/kernel/cpu/resctrl/rdtgroup.c
··· 341 341 342 342 /* Check whether cpus belong to parent ctrl group */ 343 343 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); 344 - if (cpumask_weight(tmpmask)) { 344 + if (!cpumask_empty(tmpmask)) { 345 345 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); 346 346 return -EINVAL; 347 347 } 348 348 349 349 /* Check whether cpus are dropped from this group */ 350 350 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 351 - if (cpumask_weight(tmpmask)) { 351 + if (!cpumask_empty(tmpmask)) { 352 352 /* Give any dropped cpus to parent rdtgroup */ 353 353 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); 354 354 update_closid_rmid(tmpmask, prgrp); ··· 359 359 * and update per-cpu rmid 360 360 */ 361 361 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 362 - if (cpumask_weight(tmpmask)) { 362 + if (!cpumask_empty(tmpmask)) { 363 363 head = &prgrp->mon.crdtgrp_list; 364 364 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 365 365 if (crgrp == rdtgrp) ··· 394 394 395 395 /* Check whether cpus are dropped from this group */ 396 396 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 397 - if (cpumask_weight(tmpmask)) { 397 + if (!cpumask_empty(tmpmask)) { 398 398 /* Can't drop from default group */ 399 399 if (rdtgrp == &rdtgroup_default) { 400 400 rdt_last_cmd_puts("Can't drop CPUs from default group\n"); ··· 413 413 * and update per-cpu closid/rmid. 414 414 */ 415 415 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 416 - if (cpumask_weight(tmpmask)) { 416 + if (!cpumask_empty(tmpmask)) { 417 417 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { 418 418 if (r == rdtgrp) 419 419 continue; 420 420 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); 421 - if (cpumask_weight(tmpmask1)) 421 + if (!cpumask_empty(tmpmask1)) 422 422 cpumask_rdtgrp_clear(r, tmpmask1); 423 423 } 424 424 update_closid_rmid(tmpmask, rdtgrp); ··· 488 488 489 489 /* check that user didn't specify any offline cpus */ 490 490 cpumask_andnot(tmpmask, newmask, cpu_online_mask); 491 - if (cpumask_weight(tmpmask)) { 491 + if (!cpumask_empty(tmpmask)) { 492 492 ret = -EINVAL; 493 493 rdt_last_cmd_puts("Can only assign online CPUs\n"); 494 494 goto unlock;
+1 -1
arch/x86/mm/mmio-mod.c
··· 400 400 int cpu; 401 401 int err; 402 402 403 - if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0) 403 + if (!cpumask_available(downed_cpus) || cpumask_empty(downed_cpus)) 404 404 return; 405 405 pr_notice("Re-enabling CPUs...\n"); 406 406 for_each_cpu(cpu, downed_cpus) {
+1 -1
arch/x86/platform/uv/uv_nmi.c
··· 985 985 986 986 /* Clear global flags */ 987 987 if (master) { 988 - if (cpumask_weight(uv_nmi_cpu_mask)) 988 + if (!cpumask_empty(uv_nmi_cpu_mask)) 989 989 uv_nmi_cleanup_mask(); 990 990 atomic_set(&uv_nmi_cpus_in_nmi, -1); 991 991 atomic_set(&uv_nmi_cpu, -1);