Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cpuset: remove remaining pointers to cpumask_t

Impact: cleanups, use new cpumask API

Final trivial cleanups: mainly s/cpumask_t/struct cpumask

Note there is a FIXME in generate_sched_domains(). A future patch will
change struct cpumask *doms to struct cpumask *doms[].
(I suppose Rusty will do this.)

Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Mike Travis <travis@sgi.com>
Cc: Paul Menage <menage@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Li Zefan and committed by
Linus Torvalds
6af866af 300ed6cb

+21 -17
+6 -4
include/linux/cpuset.h
··· 20 20 extern int cpuset_init_early(void); 21 21 extern int cpuset_init(void); 22 22 extern void cpuset_init_smp(void); 23 - extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask); 24 - extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask); 23 + extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 24 + extern void cpuset_cpus_allowed_locked(struct task_struct *p, 25 + struct cpumask *mask); 25 26 extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 26 27 #define cpuset_current_mems_allowed (current->mems_allowed) 27 28 void cpuset_init_current_mems_allowed(void); ··· 87 86 static inline int cpuset_init(void) { return 0; } 88 87 static inline void cpuset_init_smp(void) {} 89 88 90 - static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask) 89 + static inline void cpuset_cpus_allowed(struct task_struct *p, 90 + struct cpumask *mask) 91 91 { 92 92 *mask = cpu_possible_map; 93 93 } 94 94 static inline void cpuset_cpus_allowed_locked(struct task_struct *p, 95 - cpumask_t *mask) 95 + struct cpumask *mask) 96 96 { 97 97 *mask = cpu_possible_map; 98 98 }
+15 -13
kernel/cpuset.c
··· 289 289 * Call with callback_mutex held. 290 290 */ 291 291 292 - static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) 292 + static void guarantee_online_cpus(const struct cpuset *cs, 293 + struct cpumask *pmask) 293 294 { 294 295 while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask)) 295 296 cs = cs->parent; ··· 611 610 * element of the partition (one sched domain) to be passed to 612 611 * partition_sched_domains(). 613 612 */ 614 - static int generate_sched_domains(cpumask_t **domains, 613 + /* FIXME: see the FIXME in partition_sched_domains() */ 614 + static int generate_sched_domains(struct cpumask **domains, 615 615 struct sched_domain_attr **attributes) 616 616 { 617 617 LIST_HEAD(q); /* queue of cpusets to be scanned */ ··· 620 618 struct cpuset **csa; /* array of all cpuset ptrs */ 621 619 int csn; /* how many cpuset ptrs in csa so far */ 622 620 int i, j, k; /* indices for partition finding loops */ 623 - cpumask_t *doms; /* resulting partition; i.e. sched domains */ 621 + struct cpumask *doms; /* resulting partition; i.e. sched domains */ 624 622 struct sched_domain_attr *dattr; /* attributes for custom domains */ 625 623 int ndoms = 0; /* number of sched domains in result */ 626 - int nslot; /* next empty doms[] cpumask_t slot */ 624 + int nslot; /* next empty doms[] struct cpumask slot */ 627 625 628 626 doms = NULL; 629 627 dattr = NULL; ··· 631 629 632 630 /* Special case for the 99% of systems with one, full, sched domain */ 633 631 if (is_sched_load_balance(&top_cpuset)) { 634 - doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); 632 + doms = kmalloc(cpumask_size(), GFP_KERNEL); 635 633 if (!doms) 636 634 goto done; 637 635 ··· 710 708 * Now we know how many domains to create. 711 709 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. 712 710 */ 713 - doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); 711 + doms = kmalloc(ndoms * cpumask_size(), GFP_KERNEL); 714 712 if (!doms) 715 713 goto done; 716 714 ··· 722 720 723 721 for (nslot = 0, i = 0; i < csn; i++) { 724 722 struct cpuset *a = csa[i]; 725 - cpumask_t *dp; 723 + struct cpumask *dp; 726 724 int apn = a->pn; 727 725 728 726 if (apn < 0) { ··· 745 743 continue; 746 744 } 747 745 748 - cpus_clear(*dp); 746 + cpumask_clear(dp); 749 747 if (dattr) 750 748 *(dattr + nslot) = SD_ATTR_INIT; 751 749 for (j = i; j < csn; j++) { ··· 792 790 static void do_rebuild_sched_domains(struct work_struct *unused) 793 791 { 794 792 struct sched_domain_attr *attr; 795 - cpumask_t *doms; 793 + struct cpumask *doms; 796 794 int ndoms; 797 795 798 796 get_online_cpus(); ··· 2046 2044 unsigned long phase, void *unused_cpu) 2047 2045 { 2048 2046 struct sched_domain_attr *attr; 2049 - cpumask_t *doms; 2047 + struct cpumask *doms; 2050 2048 int ndoms; 2051 2049 2052 2050 switch (phase) { ··· 2116 2114 /** 2117 2115 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. 2118 2116 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. 2119 - * @pmask: pointer to cpumask_t variable to receive cpus_allowed set. 2117 + * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. 2120 2118 * 2121 2119 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset 2122 2120 * attached to the specified @tsk. Guaranteed to return some non-empty ··· 2124 2122 * tasks cpuset. 2125 2123 **/ 2126 2124 2127 - void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask) 2125 + void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) 2128 2126 { 2129 2127 mutex_lock(&callback_mutex); 2130 2128 cpuset_cpus_allowed_locked(tsk, pmask); ··· 2135 2133 * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. 2136 2134 * Must be called with callback_mutex held. 2137 2135 **/ 2138 - void cpuset_cpus_allowed_locked(struct task_struct *tsk, cpumask_t *pmask) 2136 + void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask) 2139 2137 { 2140 2138 task_lock(tsk); 2141 2139 guarantee_online_cpus(task_cs(tsk), pmask);