Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/isolation: Use single feature type while referring to housekeeping cpumask

Refer to housekeeping APIs using single feature types instead of flags.
This prevents from passing multiple isolation features at once to
housekeeping interfaces, which soon won't be possible anymore as each
isolation features will have their own cpumask.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Juri Lelli <juri.lelli@redhat.com>
Reviewed-by: Phil Auld <pauld@redhat.com>
Link: https://lore.kernel.org/r/20220207155910.527133-5-frederic@kernel.org

authored by

Frederic Weisbecker and committed by
Peter Zijlstra
04d4e665 c8fb9f22

+86 -73
+3 -3
arch/x86/kernel/cpu/aperfmperf.c
··· 91 91 if (!boot_cpu_has(X86_FEATURE_APERFMPERF)) 92 92 return 0; 93 93 94 - if (!housekeeping_cpu(cpu, HK_FLAG_MISC)) 94 + if (!housekeeping_cpu(cpu, HK_TYPE_MISC)) 95 95 return 0; 96 96 97 97 if (rcu_is_idle_cpu(cpu)) ··· 114 114 return; 115 115 116 116 for_each_online_cpu(cpu) { 117 - if (!housekeeping_cpu(cpu, HK_FLAG_MISC)) 117 + if (!housekeeping_cpu(cpu, HK_TYPE_MISC)) 118 118 continue; 119 119 if (rcu_is_idle_cpu(cpu)) 120 120 continue; /* Idle CPUs are completely uninteresting. */ ··· 136 136 if (!boot_cpu_has(X86_FEATURE_APERFMPERF)) 137 137 return 0; 138 138 139 - if (!housekeeping_cpu(cpu, HK_FLAG_MISC)) 139 + if (!housekeeping_cpu(cpu, HK_TYPE_MISC)) 140 140 return 0; 141 141 142 142 if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true))
+1 -1
arch/x86/kvm/x86.c
··· 8769 8769 } 8770 8770 8771 8771 if (pi_inject_timer == -1) 8772 - pi_inject_timer = housekeeping_enabled(HK_FLAG_TIMER); 8772 + pi_inject_timer = housekeeping_enabled(HK_TYPE_TIMER); 8773 8773 #ifdef CONFIG_X86_64 8774 8774 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); 8775 8775
+1 -1
drivers/base/cpu.c
··· 275 275 return -ENOMEM; 276 276 277 277 cpumask_andnot(isolated, cpu_possible_mask, 278 - housekeeping_cpumask(HK_FLAG_DOMAIN)); 278 + housekeeping_cpumask(HK_TYPE_DOMAIN)); 279 279 len = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated)); 280 280 281 281 free_cpumask_var(isolated);
+2 -2
drivers/pci/pci-driver.c
··· 377 377 goto out; 378 378 } 379 379 cpumask_and(wq_domain_mask, 380 - housekeeping_cpumask(HK_FLAG_WQ), 381 - housekeeping_cpumask(HK_FLAG_DOMAIN)); 380 + housekeeping_cpumask(HK_TYPE_WQ), 381 + housekeeping_cpumask(HK_TYPE_DOMAIN)); 382 382 383 383 cpu = cpumask_any_and(cpumask_of_node(node), 384 384 wq_domain_mask);
+22 -21
include/linux/sched/isolation.h
··· 5 5 #include <linux/init.h> 6 6 #include <linux/tick.h> 7 7 8 - enum hk_flags { 9 - HK_FLAG_TIMER = 1, 10 - HK_FLAG_RCU = (1 << 1), 11 - HK_FLAG_MISC = (1 << 2), 12 - HK_FLAG_SCHED = (1 << 3), 13 - HK_FLAG_TICK = (1 << 4), 14 - HK_FLAG_DOMAIN = (1 << 5), 15 - HK_FLAG_WQ = (1 << 6), 16 - HK_FLAG_MANAGED_IRQ = (1 << 7), 17 - HK_FLAG_KTHREAD = (1 << 8), 8 + enum hk_type { 9 + HK_TYPE_TIMER, 10 + HK_TYPE_RCU, 11 + HK_TYPE_MISC, 12 + HK_TYPE_SCHED, 13 + HK_TYPE_TICK, 14 + HK_TYPE_DOMAIN, 15 + HK_TYPE_WQ, 16 + HK_TYPE_MANAGED_IRQ, 17 + HK_TYPE_KTHREAD, 18 + HK_TYPE_MAX 18 19 }; 19 20 20 21 #ifdef CONFIG_CPU_ISOLATION 21 22 DECLARE_STATIC_KEY_FALSE(housekeeping_overridden); 22 - extern int housekeeping_any_cpu(enum hk_flags flags); 23 - extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags); 24 - extern bool housekeeping_enabled(enum hk_flags flags); 25 - extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags); 26 - extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags); 23 + extern int housekeeping_any_cpu(enum hk_type type); 24 + extern const struct cpumask *housekeeping_cpumask(enum hk_type type); 25 + extern bool housekeeping_enabled(enum hk_type type); 26 + extern void housekeeping_affine(struct task_struct *t, enum hk_type type); 27 + extern bool housekeeping_test_cpu(int cpu, enum hk_type type); 27 28 extern void __init housekeeping_init(void); 28 29 29 30 #else 30 31 31 - static inline int housekeeping_any_cpu(enum hk_flags flags) 32 + static inline int housekeeping_any_cpu(enum hk_type type) 32 33 { 33 34 return smp_processor_id(); 34 35 } 35 36 36 - static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags) 37 + static inline const struct cpumask *housekeeping_cpumask(enum hk_type type) 37 38 { 38 39 return cpu_possible_mask; 39 40 } 40 41 41 - static inline bool housekeeping_enabled(enum hk_flags flags) 42 + static inline bool housekeeping_enabled(enum hk_type type) 42 43 { 43 44 return false; 44 45 } 45 46 46 47 static inline void housekeeping_affine(struct task_struct *t, 47 - enum hk_flags flags) { } 48 + enum hk_type type) { } 48 49 static inline void housekeeping_init(void) { } 49 50 #endif /* CONFIG_CPU_ISOLATION */ 50 51 51 - static inline bool housekeeping_cpu(int cpu, enum hk_flags flags) 52 + static inline bool housekeeping_cpu(int cpu, enum hk_type type) 52 53 { 53 54 #ifdef CONFIG_CPU_ISOLATION 54 55 if (static_branch_unlikely(&housekeeping_overridden)) 55 - return housekeeping_test_cpu(cpu, flags); 56 + return housekeeping_test_cpu(cpu, type); 56 57 #endif 57 58 return true; 58 59 }
+3 -3
kernel/cgroup/cpuset.c
··· 803 803 update_domain_attr_tree(dattr, &top_cpuset); 804 804 } 805 805 cpumask_and(doms[0], top_cpuset.effective_cpus, 806 - housekeeping_cpumask(HK_FLAG_DOMAIN)); 806 + housekeeping_cpumask(HK_TYPE_DOMAIN)); 807 807 808 808 goto done; 809 809 } ··· 833 833 if (!cpumask_empty(cp->cpus_allowed) && 834 834 !(is_sched_load_balance(cp) && 835 835 cpumask_intersects(cp->cpus_allowed, 836 - housekeeping_cpumask(HK_FLAG_DOMAIN)))) 836 + housekeeping_cpumask(HK_TYPE_DOMAIN)))) 837 837 continue; 838 838 839 839 if (root_load_balance && ··· 922 922 923 923 if (apn == b->pn) { 924 924 cpumask_or(dp, dp, b->effective_cpus); 925 - cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN)); 925 + cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN)); 926 926 if (dattr) 927 927 update_domain_attr_tree(dattr + nslot, b); 928 928
+2 -2
kernel/cpu.c
··· 1488 1488 cpu_maps_update_begin(); 1489 1489 if (primary == -1) { 1490 1490 primary = cpumask_first(cpu_online_mask); 1491 - if (!housekeeping_cpu(primary, HK_FLAG_TIMER)) 1492 - primary = housekeeping_any_cpu(HK_FLAG_TIMER); 1491 + if (!housekeeping_cpu(primary, HK_TYPE_TIMER)) 1492 + primary = housekeeping_any_cpu(HK_TYPE_TIMER); 1493 1493 } else { 1494 1494 if (!cpu_online(primary)) 1495 1495 primary = cpumask_first(cpu_online_mask);
+2 -2
kernel/irq/cpuhotplug.c
··· 176 176 { 177 177 const struct cpumask *hk_mask; 178 178 179 - if (!housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) 179 + if (!housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) 180 180 return false; 181 181 182 - hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); 182 + hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ); 183 183 if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask)) 184 184 return false; 185 185
+2 -2
kernel/irq/manage.c
··· 247 247 * online. 248 248 */ 249 249 if (irqd_affinity_is_managed(data) && 250 - housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) { 250 + housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) { 251 251 const struct cpumask *hk_mask, *prog_mask; 252 252 253 253 static DEFINE_RAW_SPINLOCK(tmp_mask_lock); 254 254 static struct cpumask tmp_mask; 255 255 256 - hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); 256 + hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ); 257 257 258 258 raw_spin_lock(&tmp_mask_lock); 259 259 cpumask_and(&tmp_mask, mask, hk_mask);
+2 -2
kernel/kthread.c
··· 356 356 * back to default in case they have been changed. 357 357 */ 358 358 sched_setscheduler_nocheck(current, SCHED_NORMAL, &param); 359 - set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_KTHREAD)); 359 + set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD)); 360 360 361 361 /* OK, tell user we're spawned, wait for stop or wakeup */ 362 362 __set_current_state(TASK_UNINTERRUPTIBLE); ··· 722 722 /* Setup a clean context for our children to inherit. */ 723 723 set_task_comm(tsk, "kthreadd"); 724 724 ignore_signals(tsk); 725 - set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD)); 725 + set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD)); 726 726 set_mems_allowed(node_states[N_MEMORY]); 727 727 728 728 current->flags |= PF_NOFREEZE;
+1 -1
kernel/rcu/tasks.h
··· 492 492 struct rcu_tasks *rtp = arg; 493 493 494 494 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ 495 - housekeeping_affine(current, HK_FLAG_RCU); 495 + housekeeping_affine(current, HK_TYPE_RCU); 496 496 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start! 497 497 498 498 /*
+3 -3
kernel/rcu/tree_plugin.h
··· 1214 1214 if ((mask & leaf_node_cpu_bit(rnp, cpu)) && 1215 1215 cpu != outgoingcpu) 1216 1216 cpumask_set_cpu(cpu, cm); 1217 - cpumask_and(cm, cm, housekeeping_cpumask(HK_FLAG_RCU)); 1217 + cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU)); 1218 1218 if (cpumask_weight(cm) == 0) 1219 - cpumask_copy(cm, housekeeping_cpumask(HK_FLAG_RCU)); 1219 + cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU)); 1220 1220 set_cpus_allowed_ptr(t, cm); 1221 1221 free_cpumask_var(cm); 1222 1222 } ··· 1291 1291 { 1292 1292 if (!tick_nohz_full_enabled()) 1293 1293 return; 1294 - housekeeping_affine(current, HK_FLAG_RCU); 1294 + housekeeping_affine(current, HK_TYPE_RCU); 1295 1295 } 1296 1296 1297 1297 /* Record the current task on dyntick-idle entry. */
+6 -6
kernel/sched/core.c
··· 1025 1025 struct sched_domain *sd; 1026 1026 const struct cpumask *hk_mask; 1027 1027 1028 - if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) { 1028 + if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) { 1029 1029 if (!idle_cpu(cpu)) 1030 1030 return cpu; 1031 1031 default_cpu = cpu; 1032 1032 } 1033 1033 1034 - hk_mask = housekeeping_cpumask(HK_FLAG_TIMER); 1034 + hk_mask = housekeeping_cpumask(HK_TYPE_TIMER); 1035 1035 1036 1036 rcu_read_lock(); 1037 1037 for_each_domain(cpu, sd) { ··· 1047 1047 } 1048 1048 1049 1049 if (default_cpu == -1) 1050 - default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER); 1050 + default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER); 1051 1051 cpu = default_cpu; 1052 1052 unlock: 1053 1053 rcu_read_unlock(); ··· 5371 5371 int os; 5372 5372 struct tick_work *twork; 5373 5373 5374 - if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 5374 + if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5375 5375 return; 5376 5376 5377 5377 WARN_ON_ONCE(!tick_work_cpu); ··· 5392 5392 struct tick_work *twork; 5393 5393 int os; 5394 5394 5395 - if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 5395 + if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5396 5396 return; 5397 5397 5398 5398 WARN_ON_ONCE(!tick_work_cpu); ··· 9251 9251 mutex_unlock(&sched_domains_mutex); 9252 9252 9253 9253 /* Move init over to a non-isolated CPU */ 9254 - if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) 9254 + if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0) 9255 9255 BUG(); 9256 9256 current->flags &= ~PF_NO_SETAFFINITY; 9257 9257 sched_init_granularity();
+5 -5
kernel/sched/fair.c
··· 10337 10337 * - When one of the busy CPUs notice that there may be an idle rebalancing 10338 10338 * needed, they will kick the idle load balancer, which then does idle 10339 10339 * load balancing for all the idle CPUs. 10340 - * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set 10340 + * - HK_TYPE_MISC CPUs are used for this task, because HK_TYPE_SCHED not set 10341 10341 * anywhere yet. 10342 10342 */ 10343 10343 ··· 10346 10346 int ilb; 10347 10347 const struct cpumask *hk_mask; 10348 10348 10349 - hk_mask = housekeeping_cpumask(HK_FLAG_MISC); 10349 + hk_mask = housekeeping_cpumask(HK_TYPE_MISC); 10350 10350 10351 10351 for_each_cpu_and(ilb, nohz.idle_cpus_mask, hk_mask) { 10352 10352 ··· 10362 10362 10363 10363 /* 10364 10364 * Kick a CPU to do the nohz balancing, if it is time for it. We pick any 10365 - * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one). 10365 + * idle CPU in the HK_TYPE_MISC housekeeping set (if there is one). 10366 10366 */ 10367 10367 static void kick_ilb(unsigned int flags) 10368 10368 { ··· 10575 10575 return; 10576 10576 10577 10577 /* Spare idle load balancing on CPUs that don't want to be disturbed: */ 10578 - if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) 10578 + if (!housekeeping_cpu(cpu, HK_TYPE_SCHED)) 10579 10579 return; 10580 10580 10581 10581 /* ··· 10791 10791 * This CPU doesn't want to be disturbed by scheduler 10792 10792 * housekeeping 10793 10793 */ 10794 - if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED)) 10794 + if (!housekeeping_cpu(this_cpu, HK_TYPE_SCHED)) 10795 10795 return; 10796 10796 10797 10797 /* Will wake up very soon. No time for doing anything else*/
+22 -10
kernel/sched/isolation.c
··· 9 9 */ 10 10 #include "sched.h" 11 11 12 + enum hk_flags { 13 + HK_FLAG_TIMER = BIT(HK_TYPE_TIMER), 14 + HK_FLAG_RCU = BIT(HK_TYPE_RCU), 15 + HK_FLAG_MISC = BIT(HK_TYPE_MISC), 16 + HK_FLAG_SCHED = BIT(HK_TYPE_SCHED), 17 + HK_FLAG_TICK = BIT(HK_TYPE_TICK), 18 + HK_FLAG_DOMAIN = BIT(HK_TYPE_DOMAIN), 19 + HK_FLAG_WQ = BIT(HK_TYPE_WQ), 20 + HK_FLAG_MANAGED_IRQ = BIT(HK_TYPE_MANAGED_IRQ), 21 + HK_FLAG_KTHREAD = BIT(HK_TYPE_KTHREAD), 22 + }; 23 + 12 24 DEFINE_STATIC_KEY_FALSE(housekeeping_overridden); 13 25 EXPORT_SYMBOL_GPL(housekeeping_overridden); 14 26 static cpumask_var_t housekeeping_mask; 15 27 static unsigned int housekeeping_flags; 16 28 17 - bool housekeeping_enabled(enum hk_flags flags) 29 + bool housekeeping_enabled(enum hk_type type) 18 30 { 19 - return !!(housekeeping_flags & flags); 31 + return !!(housekeeping_flags & BIT(type)); 20 32 } 21 33 EXPORT_SYMBOL_GPL(housekeeping_enabled); 22 34 23 - int housekeeping_any_cpu(enum hk_flags flags) 35 + int housekeeping_any_cpu(enum hk_type type) 24 36 { 25 37 int cpu; 26 38 27 39 if (static_branch_unlikely(&housekeeping_overridden)) { 28 - if (housekeeping_flags & flags) { 40 + if (housekeeping_flags & BIT(type)) { 29 41 cpu = sched_numa_find_closest(housekeeping_mask, smp_processor_id()); 30 42 if (cpu < nr_cpu_ids) 31 43 return cpu; ··· 49 37 } 50 38 EXPORT_SYMBOL_GPL(housekeeping_any_cpu); 51 39 52 - const struct cpumask *housekeeping_cpumask(enum hk_flags flags) 40 + const struct cpumask *housekeeping_cpumask(enum hk_type type) 53 41 { 54 42 if (static_branch_unlikely(&housekeeping_overridden)) 55 - if (housekeeping_flags & flags) 43 + if (housekeeping_flags & BIT(type)) 56 44 return housekeeping_mask; 57 45 return cpu_possible_mask; 58 46 } 59 47 EXPORT_SYMBOL_GPL(housekeeping_cpumask); 60 48 61 - void housekeeping_affine(struct task_struct *t, enum hk_flags flags) 49 + void housekeeping_affine(struct task_struct *t, enum hk_type type) 62 50 { 63 51 if (static_branch_unlikely(&housekeeping_overridden)) 64 - if (housekeeping_flags & flags) 52 + if (housekeeping_flags & BIT(type)) 65 53 set_cpus_allowed_ptr(t, housekeeping_mask); 66 54 } 67 55 EXPORT_SYMBOL_GPL(housekeeping_affine); 68 56 69 - bool housekeeping_test_cpu(int cpu, enum hk_flags flags) 57 + bool housekeeping_test_cpu(int cpu, enum hk_type type) 70 58 { 71 59 if (static_branch_unlikely(&housekeeping_overridden)) 72 - if (housekeeping_flags & flags) 60 + if (housekeeping_flags & BIT(type)) 73 61 return cpumask_test_cpu(cpu, housekeeping_mask); 74 62 return true; 75 63 }
+4 -4
kernel/sched/topology.c
··· 1366 1366 list_for_each_entry(entry, &asym_cap_list, link) 1367 1367 cpumask_clear(cpu_capacity_span(entry)); 1368 1368 1369 - for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_FLAG_DOMAIN)) 1369 + for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) 1370 1370 asym_cpu_capacity_update_data(cpu); 1371 1371 1372 1372 list_for_each_entry_safe(entry, next, &asym_cap_list, link) { ··· 2440 2440 doms_cur = alloc_sched_domains(ndoms_cur); 2441 2441 if (!doms_cur) 2442 2442 doms_cur = &fallback_doms; 2443 - cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_FLAG_DOMAIN)); 2443 + cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN)); 2444 2444 err = build_sched_domains(doms_cur[0], NULL); 2445 2445 2446 2446 return err; ··· 2529 2529 if (doms_new) { 2530 2530 n = 1; 2531 2531 cpumask_and(doms_new[0], cpu_active_mask, 2532 - housekeeping_cpumask(HK_FLAG_DOMAIN)); 2532 + housekeeping_cpumask(HK_TYPE_DOMAIN)); 2533 2533 } 2534 2534 } else { 2535 2535 n = ndoms_new; ··· 2564 2564 n = 0; 2565 2565 doms_new = &fallback_doms; 2566 2566 cpumask_and(doms_new[0], cpu_active_mask, 2567 - housekeeping_cpumask(HK_FLAG_DOMAIN)); 2567 + housekeeping_cpumask(HK_TYPE_DOMAIN)); 2568 2568 } 2569 2569 2570 2570 /* Build new domains: */
+1 -1
kernel/watchdog.c
··· 848 848 pr_info("Disabling watchdog on nohz_full cores by default\n"); 849 849 850 850 cpumask_copy(&watchdog_cpumask, 851 - housekeeping_cpumask(HK_FLAG_TIMER)); 851 + housekeeping_cpumask(HK_TYPE_TIMER)); 852 852 853 853 if (!watchdog_nmi_probe()) 854 854 nmi_watchdog_available = true;
+2 -2
kernel/workqueue.c
··· 6011 6011 BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 6012 6012 6013 6013 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); 6014 - cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_WQ)); 6015 - cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN)); 6014 + cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ)); 6015 + cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN)); 6016 6016 6017 6017 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 6018 6018
+2 -2
net/core/net-sysfs.c
··· 839 839 } 840 840 841 841 if (!cpumask_empty(mask)) { 842 - cpumask_and(mask, mask, housekeeping_cpumask(HK_FLAG_DOMAIN)); 843 - cpumask_and(mask, mask, housekeeping_cpumask(HK_FLAG_WQ)); 842 + cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_DOMAIN)); 843 + cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_WQ)); 844 844 if (cpumask_empty(mask)) { 845 845 free_cpumask_var(mask); 846 846 return -EINVAL;