Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/headers, sched/headers: Move task related MM types from <linux/mm_types.> to <linux/mm_types_task.h>

Separate all the MM types that are embedded directly in 'struct task_struct'
into the <linux/mm_types_task.h> header.

The goal is to include this header in <linux/sched.h>, not the full <linux/mm_types.h>
header, to reduce the size, complexity and coupling of <linux/sched.h>.

(This patch does not change <linux/sched.h> yet.)

Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

+55 -49
-49
include/linux/mm_types.h
··· 24 24 struct address_space; 25 25 struct mem_cgroup; 26 26 27 - #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) 28 - #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ 29 - IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) 30 - #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) 31 - 32 27 /* 33 28 * Each physical page in the system has a struct page associated with 34 29 * it to keep track of whatever it is we are using the page for at the ··· 226 231 #endif 227 232 ; 228 233 229 - struct page_frag { 230 - struct page *page; 231 - #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 232 - __u32 offset; 233 - __u32 size; 234 - #else 235 - __u16 offset; 236 - __u16 size; 237 - #endif 238 - }; 239 - 240 234 #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) 241 235 #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) 242 236 ··· 344 360 struct vm_userfaultfd_ctx vm_userfaultfd_ctx; 345 361 }; 346 362 347 - /* 348 - * The per task VMA cache array: 349 - */ 350 - #define VMACACHE_BITS 2 351 - #define VMACACHE_SIZE (1U << VMACACHE_BITS) 352 - #define VMACACHE_MASK (VMACACHE_SIZE - 1) 353 - 354 - struct vmacache { 355 - u32 seqnum; 356 - struct vm_area_struct *vmas[VMACACHE_SIZE]; 357 - }; 358 - 359 363 struct core_thread { 360 364 struct task_struct *task; 361 365 struct core_thread *next; ··· 353 381 atomic_t nr_threads; 354 382 struct core_thread dumper; 355 383 struct completion startup; 356 - }; 357 - 358 - enum { 359 - MM_FILEPAGES, /* Resident file mapping pages */ 360 - MM_ANONPAGES, /* Resident anonymous pages */ 361 - MM_SWAPENTS, /* Anonymous swap entries */ 362 - MM_SHMEMPAGES, /* Resident shared memory pages */ 363 - NR_MM_COUNTERS 364 - }; 365 - 366 - #if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) 367 - #define SPLIT_RSS_COUNTING 368 - /* per-thread cached information, */ 369 - struct task_rss_stat { 370 - int events; /* for synchronization threshold */ 371 - int count[NR_MM_COUNTERS]; 372 - }; 373 - #endif /* USE_SPLIT_PTE_PTLOCKS */ 374 - 375 - struct mm_rss_stat { 376 - atomic_long_t count[NR_MM_COUNTERS]; 377 384 }; 378 385 379 386 struct kioctx_table;
+55
include/linux/mm_types_task.h
··· 1 1 #ifndef _LINUX_MM_TYPES_TASK_H 2 2 #define _LINUX_MM_TYPES_TASK_H 3 3 4 + /* 5 + * Here are the definitions of the MM data types that are embedded in 'struct task_struct'. 6 + * 7 + * (These are defined separately to decouple sched.h from mm_types.h as much as possible.) 8 + */ 9 + 4 10 #include <linux/types.h> 5 11 #include <linux/threads.h> 6 12 #include <linux/atomic.h> 7 13 8 14 #include <asm/page.h> 15 + 16 + #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) 17 + #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ 18 + IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) 19 + #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) 20 + 21 + /* 22 + * The per task VMA cache array: 23 + */ 24 + #define VMACACHE_BITS 2 25 + #define VMACACHE_SIZE (1U << VMACACHE_BITS) 26 + #define VMACACHE_MASK (VMACACHE_SIZE - 1) 27 + 28 + struct vmacache { 29 + u32 seqnum; 30 + struct vm_area_struct *vmas[VMACACHE_SIZE]; 31 + }; 32 + 33 + enum { 34 + MM_FILEPAGES, /* Resident file mapping pages */ 35 + MM_ANONPAGES, /* Resident anonymous pages */ 36 + MM_SWAPENTS, /* Anonymous swap entries */ 37 + MM_SHMEMPAGES, /* Resident shared memory pages */ 38 + NR_MM_COUNTERS 39 + }; 40 + 41 + #if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) 42 + #define SPLIT_RSS_COUNTING 43 + /* per-thread cached information, */ 44 + struct task_rss_stat { 45 + int events; /* for synchronization threshold */ 46 + int count[NR_MM_COUNTERS]; 47 + }; 48 + #endif /* USE_SPLIT_PTE_PTLOCKS */ 49 + 50 + struct mm_rss_stat { 51 + atomic_long_t count[NR_MM_COUNTERS]; 52 + }; 53 + 54 + struct page_frag { 55 + struct page *page; 56 + #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 57 + __u32 offset; 58 + __u32 size; 59 + #else 60 + __u16 offset; 61 + __u16 size; 62 + #endif 63 + }; 9 64 10 65 #endif /* _LINUX_MM_TYPES_TASK_H */