Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: NUMA aware alloc_thread_info_node()

Add a node parameter to alloc_thread_info(), and change its name to
alloc_thread_info_node()

This change is needed to allow NUMA aware kthread_create_on_cpu()

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: David S. Miller <davem@davemloft.net>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Tejun Heo <tj@kernel.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: David Howells <dhowells@redhat.com>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Eric Dumazet and committed by
Linus Torvalds
b6a84016 504f52b5

+70 -64
+1 -1
arch/cris/include/asm/thread_info.h
··· 68 68 #define init_thread_info (init_thread_union.thread_info) 69 69 70 70 /* thread information allocation */ 71 - #define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) 71 + #define alloc_thread_info(tsk, node) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) 72 72 #define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 73 73 74 74 #endif /* !__ASSEMBLY__ */
+4 -9
arch/frv/include/asm/thread_info.h
··· 84 84 85 85 /* thread information allocation */ 86 86 #ifdef CONFIG_DEBUG_STACK_USAGE 87 - #define alloc_thread_info(tsk) \ 88 - ({ \ 89 - struct thread_info *ret; \ 90 - \ 91 - ret = kzalloc(THREAD_SIZE, GFP_KERNEL); \ 92 - \ 93 - ret; \ 94 - }) 87 + #define alloc_thread_info_node(tsk, node) \ 88 + kzalloc_node(THREAD_SIZE, GFP_KERNEL, node) 95 89 #else 96 - #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 90 + #define alloc_thread_info_node(tsk) \ 91 + kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) 97 92 #endif 98 93 99 94 #define free_thread_info(info) kfree(info)
+3 -2
arch/ia64/include/asm/thread_info.h
··· 59 59 #ifndef ASM_OFFSETS_C 60 60 /* how to get the thread information struct from C */ 61 61 #define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) 62 - #define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) 62 + #define alloc_thread_info_node(tsk, node) \ 63 + ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) 63 64 #define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) 64 65 #else 65 66 #define current_thread_info() ((struct thread_info *) 0) 66 - #define alloc_thread_info(tsk) ((struct thread_info *) 0) 67 + #define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0) 67 68 #define task_thread_info(tsk) ((struct thread_info *) 0) 68 69 #endif 69 70 #define free_thread_info(ti) /* nothing */
+4 -9
arch/m32r/include/asm/thread_info.h
··· 96 96 97 97 /* thread information allocation */ 98 98 #ifdef CONFIG_DEBUG_STACK_USAGE 99 - #define alloc_thread_info(tsk) \ 100 - ({ \ 101 - struct thread_info *ret; \ 102 - \ 103 - ret = kzalloc(THREAD_SIZE, GFP_KERNEL); \ 104 - \ 105 - ret; \ 106 - }) 99 + #define alloc_thread_info_node(tsk, node) \ 100 + kzalloc_node(THREAD_SIZE, GFP_KERNEL, node) 107 101 #else 108 - #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 102 + #define alloc_thread_info_node(tsk, node) \ 103 + kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) 109 104 #endif 110 105 111 106 #define free_thread_info(info) kfree(info)
+4 -2
arch/mips/include/asm/thread_info.h
··· 88 88 #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 89 89 90 90 #ifdef CONFIG_DEBUG_STACK_USAGE 91 - #define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL) 91 + #define alloc_thread_info_node(tsk, node) \ 92 + kzalloc_node(THREAD_SIZE, GFP_KERNEL, node) 92 93 #else 93 - #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 94 + #define alloc_thread_info_node(tsk, node) \ 95 + kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) 94 96 #endif 95 97 96 98 #define free_thread_info(info) kfree(info)
+4 -2
arch/mn10300/include/asm/thread_info.h
··· 124 124 125 125 /* thread information allocation */ 126 126 #ifdef CONFIG_DEBUG_STACK_USAGE 127 - #define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL) 127 + #define alloc_thread_info_node(tsk, node) \ 128 + kzalloc_node(THREAD_SIZE, GFP_KERNEL, node) 128 129 #else 129 - #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 130 + #define alloc_thread_info_node(tsk, node) \ 131 + kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) 130 132 #endif 131 133 132 134 #define free_thread_info(ti) kfree((ti))
+1 -1
arch/powerpc/include/asm/thread_info.h
··· 72 72 73 73 #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 74 74 75 - extern struct thread_info *alloc_thread_info(struct task_struct *tsk); 75 + extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node); 76 76 extern void free_thread_info(struct thread_info *ti); 77 77 78 78 #endif /* THREAD_SHIFT < PAGE_SHIFT */
+2 -2
arch/powerpc/kernel/process.c
··· 1218 1218 1219 1219 static struct kmem_cache *thread_info_cache; 1220 1220 1221 - struct thread_info *alloc_thread_info(struct task_struct *tsk) 1221 + struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) 1222 1222 { 1223 1223 struct thread_info *ti; 1224 1224 1225 - ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); 1225 + ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node); 1226 1226 if (unlikely(ti == NULL)) 1227 1227 return NULL; 1228 1228 #ifdef CONFIG_DEBUG_STACK_USAGE
+1 -1
arch/score/include/asm/thread_info.h
··· 71 71 register struct thread_info *__current_thread_info __asm__("r28"); 72 72 #define current_thread_info() __current_thread_info 73 73 74 - #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 74 + #define alloc_thread_info_node(tsk, node) kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) 75 75 #define free_thread_info(info) kfree(info) 76 76 77 77 #endif /* !__ASSEMBLY__ */
+1 -1
arch/sh/include/asm/thread_info.h
··· 95 95 96 96 #endif 97 97 98 - extern struct thread_info *alloc_thread_info(struct task_struct *tsk); 98 + extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node); 99 99 extern void free_thread_info(struct thread_info *ti); 100 100 extern void arch_task_cache_init(void); 101 101 #define arch_task_cache_init arch_task_cache_init
+9 -7
arch/sh/kernel/process.c
··· 32 32 #if THREAD_SHIFT < PAGE_SHIFT 33 33 static struct kmem_cache *thread_info_cache; 34 34 35 - struct thread_info *alloc_thread_info(struct task_struct *tsk) 35 + struct thread_info *alloc_thread_info(struct task_struct *tsk, int node) 36 36 { 37 37 struct thread_info *ti; 38 - 39 - ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); 40 - if (unlikely(ti == NULL)) 41 - return NULL; 42 38 #ifdef CONFIG_DEBUG_STACK_USAGE 43 - memset(ti, 0, THREAD_SIZE); 39 + gfp_t mask = GFP_KERNEL | __GFP_ZERO; 40 + #else 41 + gfp_t mask = GFP_KERNEL; 44 42 #endif 43 + 44 + ti = kmem_cache_alloc_node(thread_info_cache, mask, node); 45 45 return ti; 46 46 } 47 47 ··· 64 64 #else 65 65 gfp_t mask = GFP_KERNEL; 66 66 #endif 67 - return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER); 67 + struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER); 68 + 69 + return page ? page_address(page) : NULL; 68 70 } 69 71 70 72 void free_thread_info(struct thread_info *ti)
+3 -3
arch/sparc/include/asm/thread_info_32.h
··· 82 82 83 83 #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 84 84 85 - BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void) 86 - #define alloc_thread_info(tsk) BTFIXUP_CALL(alloc_thread_info)() 85 + BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info_node, int) 86 + #define alloc_thread_info_node(tsk, node) BTFIXUP_CALL(alloc_thread_info_node)(node) 87 87 88 88 BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *) 89 89 #define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti) ··· 92 92 93 93 /* 94 94 * Size of kernel stack for each process. 95 - * Observe the order of get_free_pages() in alloc_thread_info(). 95 + * Observe the order of get_free_pages() in alloc_thread_info_node(). 96 96 * The sun4 has 8K stack too, because it's short on memory, and 16K is a waste. 97 97 */ 98 98 #define THREAD_SIZE 8192
+12 -12
arch/sparc/include/asm/thread_info_64.h
··· 146 146 #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 147 147 148 148 #ifdef CONFIG_DEBUG_STACK_USAGE 149 - #define alloc_thread_info(tsk) \ 150 - ({ \ 151 - struct thread_info *ret; \ 152 - \ 153 - ret = (struct thread_info *) \ 154 - __get_free_pages(GFP_KERNEL, __THREAD_INFO_ORDER); \ 155 - if (ret) \ 156 - memset(ret, 0, PAGE_SIZE<<__THREAD_INFO_ORDER); \ 157 - ret; \ 158 - }) 149 + #define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO) 159 150 #else 160 - #define alloc_thread_info(tsk) \ 161 - ((struct thread_info *)__get_free_pages(GFP_KERNEL, __THREAD_INFO_ORDER)) 151 + #define THREAD_FLAGS (GFP_KERNEL) 162 152 #endif 153 + 154 + #define alloc_thread_info_node(tsk, node) \ 155 + ({ \ 156 + struct page *page = alloc_pages_node(node, THREAD_FLAGS, \ 157 + __THREAD_INFO_ORDER); \ 158 + struct thread_info *ret; \ 159 + \ 160 + ret = page ? page_address(page) : NULL; \ 161 + ret; \ 162 + }) 163 163 164 164 #define free_thread_info(ti) \ 165 165 free_pages((unsigned long)(ti),__THREAD_INFO_ORDER)
+2 -2
arch/sparc/mm/srmmu.c
··· 650 650 * mappings on the kernel stack without any special code as we did 651 651 * need on the sun4c. 652 652 */ 653 - static struct thread_info *srmmu_alloc_thread_info(void) 653 + static struct thread_info *srmmu_alloc_thread_info_node(int node) 654 654 { 655 655 struct thread_info *ret; 656 656 ··· 2271 2271 2272 2272 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); 2273 2273 2274 - BTFIXUPSET_CALL(alloc_thread_info, srmmu_alloc_thread_info, BTFIXUPCALL_NORM); 2274 + BTFIXUPSET_CALL(alloc_thread_info_node, srmmu_alloc_thread_info_node, BTFIXUPCALL_NORM); 2275 2275 BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM); 2276 2276 2277 2277 BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM);
+2 -2
arch/sparc/mm/sun4c.c
··· 922 922 free_locked_segment(BUCKET_ADDR(entry)); 923 923 } 924 924 925 - static struct thread_info *sun4c_alloc_thread_info(void) 925 + static struct thread_info *sun4c_alloc_thread_info_node(int node) 926 926 { 927 927 unsigned long addr, pages; 928 928 int entry; ··· 2155 2155 BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM); 2156 2156 BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM); 2157 2157 2158 - BTFIXUPSET_CALL(alloc_thread_info, sun4c_alloc_thread_info, BTFIXUPCALL_NORM); 2158 + BTFIXUPSET_CALL(alloc_thread_info_node, sun4c_alloc_thread_info_node, BTFIXUPCALL_NORM); 2159 2159 BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM); 2160 2160 2161 2161 BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM);
+1 -1
arch/tile/include/asm/thread_info.h
··· 84 84 ((struct thread_info *)(stack_pointer & -THREAD_SIZE)) 85 85 86 86 #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 87 - extern struct thread_info *alloc_thread_info(struct task_struct *task); 87 + extern struct thread_info *alloc_thread_info_node(struct task_struct *task, int node); 88 88 extern void free_thread_info(struct thread_info *info); 89 89 90 90 /* Sit on a nap instruction until interrupted. */
+2 -2
arch/tile/kernel/process.c
··· 109 109 } 110 110 } 111 111 112 - struct thread_info *alloc_thread_info(struct task_struct *task) 112 + struct thread_info *alloc_thread_info_node(struct task_struct *task, int node) 113 113 { 114 114 struct page *page; 115 115 gfp_t flags = GFP_KERNEL; ··· 118 118 flags |= __GFP_ZERO; 119 119 #endif 120 120 121 - page = alloc_pages(flags, THREAD_SIZE_ORDER); 121 + page = alloc_pages_node(node, flags, THREAD_SIZE_ORDER); 122 122 if (!page) 123 123 return NULL; 124 124
+8 -2
arch/x86/include/asm/thread_info.h
··· 161 161 162 162 #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 163 163 164 - #define alloc_thread_info(tsk) \ 165 - ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER)) 164 + #define alloc_thread_info_node(tsk, node) \ 165 + ({ \ 166 + struct page *page = alloc_pages_node(node, THREAD_FLAGS, \ 167 + THREAD_ORDER); \ 168 + struct thread_info *ret = page ? page_address(page) : NULL; \ 169 + \ 170 + ret; \ 171 + }) 166 172 167 173 #ifdef CONFIG_X86_32 168 174
+6 -3
kernel/fork.c
··· 117 117 #endif 118 118 119 119 #ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR 120 - static inline struct thread_info *alloc_thread_info(struct task_struct *tsk) 120 + static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, 121 + int node) 121 122 { 122 123 #ifdef CONFIG_DEBUG_STACK_USAGE 123 124 gfp_t mask = GFP_KERNEL | __GFP_ZERO; 124 125 #else 125 126 gfp_t mask = GFP_KERNEL; 126 127 #endif 127 - return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER); 128 + struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER); 129 + 130 + return page ? page_address(page) : NULL; 128 131 } 129 132 130 133 static inline void free_thread_info(struct thread_info *ti) ··· 263 260 if (!tsk) 264 261 return NULL; 265 262 266 - ti = alloc_thread_info(tsk); 263 + ti = alloc_thread_info_node(tsk, node); 267 264 if (!ti) { 268 265 free_task_struct(tsk); 269 266 return NULL;