Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'header_cleanup-2024-01-10' of https://evilpiepirate.org/git/bcachefs

Pull header cleanups from Kent Overstreet:
"The goal is to get sched.h down to a type only header, so the main
thing happening in this patchset is splitting out various _types.h
headers and dependency fixups, as well as moving some things out of
sched.h to better locations.

This is prep work for the memory allocation profiling patchset which
adds new sched.h interdepencencies"

* tag 'header_cleanup-2024-01-10' of https://evilpiepirate.org/git/bcachefs: (51 commits)
Kill sched.h dependency on rcupdate.h
kill unnecessary thread_info.h include
Kill unnecessary kernel.h include
preempt.h: Kill dependency on list.h
rseq: Split out rseq.h from sched.h
LoongArch: signal.c: add header file to fix build error
restart_block: Trim includes
lockdep: move held_lock to lockdep_types.h
sem: Split out sem_types.h
uidgid: Split out uidgid_types.h
seccomp: Split out seccomp_types.h
refcount: Split out refcount_types.h
uapi/linux/resource.h: fix include
x86/signal: kill dependency on time.h
syscall_user_dispatch.h: split out *_types.h
mm_types_task.h: Trim dependencies
Split out irqflags_types.h
ipc: Kill bogus dependency on spinlock.h
shm: Slim down dependencies
workqueue: Split out workqueue_types.h
...

+1061 -774
+2 -2
arch/arm64/include/asm/spectre.h
··· 13 13 #define __BP_HARDEN_HYP_VECS_SZ ((BP_HARDEN_EL2_SLOTS - 1) * SZ_2K) 14 14 15 15 #ifndef __ASSEMBLY__ 16 - 17 - #include <linux/percpu.h> 16 + #include <linux/smp.h> 17 + #include <asm/percpu.h> 18 18 19 19 #include <asm/cpufeature.h> 20 20 #include <asm/virt.h>
+1
arch/arm64/kernel/ptrace.c
··· 28 28 #include <linux/hw_breakpoint.h> 29 29 #include <linux/regset.h> 30 30 #include <linux/elf.h> 31 + #include <linux/rseq.h> 31 32 32 33 #include <asm/compat.h> 33 34 #include <asm/cpufeature.h>
+1
arch/loongarch/kernel/signal.c
··· 15 15 #include <linux/context_tracking.h> 16 16 #include <linux/entry-common.h> 17 17 #include <linux/irqflags.h> 18 + #include <linux/rseq.h> 18 19 #include <linux/sched.h> 19 20 #include <linux/mm.h> 20 21 #include <linux/personality.h>
+1
arch/m68k/include/asm/processor.h
··· 8 8 #ifndef __ASM_M68K_PROCESSOR_H 9 9 #define __ASM_M68K_PROCESSOR_H 10 10 11 + #include <linux/preempt.h> 11 12 #include <linux/thread_info.h> 12 13 #include <asm/fpu.h> 13 14 #include <asm/ptrace.h>
+1
arch/microblaze/include/asm/pgtable.h
··· 336 336 } 337 337 338 338 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 339 + struct vm_area_struct; 339 340 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 340 341 unsigned long address, pte_t *ptep) 341 342 {
+1
arch/parisc/mm/init.c
··· 33 33 #include <asm/msgbuf.h> 34 34 #include <asm/sparsemem.h> 35 35 #include <asm/asm-offsets.h> 36 + #include <asm/shmbuf.h> 36 37 37 38 extern int data_start; 38 39 extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
+1
arch/powerpc/kernel/interrupt.c
··· 3 3 #include <linux/context_tracking.h> 4 4 #include <linux/err.h> 5 5 #include <linux/compat.h> 6 + #include <linux/rseq.h> 6 7 #include <linux/sched/debug.h> /* for show_regs */ 7 8 8 9 #include <asm/kup.h>
+1
arch/powerpc/kvm/book3s_64_vio.c
··· 20 20 #include <linux/iommu.h> 21 21 #include <linux/file.h> 22 22 #include <linux/mm.h> 23 + #include <linux/rcupdate_wait.h> 23 24 24 25 #include <asm/kvm_ppc.h> 25 26 #include <asm/kvm_book3s.h>
+1
arch/s390/kernel/signal.c
··· 12 12 13 13 #include <linux/sched.h> 14 14 #include <linux/sched/task_stack.h> 15 + #include <linux/rseq.h> 15 16 #include <linux/mm.h> 16 17 #include <linux/smp.h> 17 18 #include <linux/kernel.h>
+1
arch/x86/include/asm/current.h
··· 2 2 #ifndef _ASM_X86_CURRENT_H 3 3 #define _ASM_X86_CURRENT_H 4 4 5 + #include <linux/build_bug.h> 5 6 #include <linux/compiler.h> 6 7 7 8 #ifndef __ASSEMBLY__
+1
arch/x86/include/asm/debugreg.h
··· 5 5 #include <linux/bug.h> 6 6 #include <linux/percpu.h> 7 7 #include <uapi/asm/debugreg.h> 8 + #include <asm/cpufeature.h> 8 9 9 10 DECLARE_PER_CPU(unsigned long, cpu_dr7); 10 11
+2
arch/x86/include/asm/fpu/types.h
··· 5 5 #ifndef _ASM_X86_FPU_H 6 6 #define _ASM_X86_FPU_H 7 7 8 + #include <asm/page_types.h> 9 + 8 10 /* 9 11 * The legacy x87 FPU state format, as saved by FSAVE and 10 12 * restored by the FRSTOR instructions:
+4
arch/x86/include/asm/paravirt.h
··· 6 6 7 7 #include <asm/paravirt_types.h> 8 8 9 + #ifndef __ASSEMBLY__ 10 + struct mm_struct; 11 + #endif 12 + 9 13 #ifdef CONFIG_PARAVIRT 10 14 #include <asm/pgtable_types.h> 11 15 #include <asm/asm.h>
+1
arch/x86/include/asm/paravirt_types.h
··· 5 5 #ifdef CONFIG_PARAVIRT 6 6 7 7 #ifndef __ASSEMBLY__ 8 + #include <linux/types.h> 8 9 9 10 #include <asm/desc_defs.h> 10 11 #include <asm/pgtable_types.h>
+1 -1
arch/x86/include/asm/percpu.h
··· 24 24 25 25 #else /* ...!ASSEMBLY */ 26 26 27 - #include <linux/kernel.h> 28 27 #include <linux/stringify.h> 28 + #include <asm/asm.h> 29 29 30 30 #ifdef CONFIG_SMP 31 31 #define __percpu_prefix "%%"__stringify(__percpu_seg)":"
-1
arch/x86/include/asm/preempt.h
··· 6 6 #include <asm/percpu.h> 7 7 #include <asm/current.h> 8 8 9 - #include <linux/thread_info.h> 10 9 #include <linux/static_call_types.h> 11 10 12 11 /* We use the MSB mostly because its available */
-1
arch/x86/include/uapi/asm/signal.h
··· 4 4 5 5 #ifndef __ASSEMBLY__ 6 6 #include <linux/types.h> 7 - #include <linux/time.h> 8 7 #include <linux/compiler.h> 9 8 10 9 /* Avoid too many header ordering problems. */
+1
arch/x86/kernel/fpu/bugs.c
··· 2 2 /* 3 3 * x86 FPU bug checks: 4 4 */ 5 + #include <asm/cpufeature.h> 5 6 #include <asm/fpu/api.h> 6 7 7 8 /*
+1
arch/x86/kernel/signal.c
··· 27 27 #include <linux/context_tracking.h> 28 28 #include <linux/entry-common.h> 29 29 #include <linux/syscalls.h> 30 + #include <linux/rseq.h> 30 31 31 32 #include <asm/processor.h> 32 33 #include <asm/ucontext.h>
+1
arch/x86/lib/cache-smp.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #include <asm/paravirt.h> 2 3 #include <linux/smp.h> 3 4 #include <linux/export.h> 4 5
+1
arch/x86/um/sysrq_64.c
··· 6 6 7 7 #include <linux/kernel.h> 8 8 #include <linux/module.h> 9 + #include <linux/pid.h> 9 10 #include <linux/sched.h> 10 11 #include <linux/sched/debug.h> 11 12 #include <linux/utsname.h>
+1
drivers/base/power/runtime.c
··· 11 11 #include <linux/export.h> 12 12 #include <linux/pm_runtime.h> 13 13 #include <linux/pm_wakeirq.h> 14 + #include <linux/rculist.h> 14 15 #include <trace/events/rpm.h> 15 16 16 17 #include "../base.h"
+2
drivers/gpu/drm/i915/i915_memcpy.c
··· 23 23 */ 24 24 25 25 #include <linux/kernel.h> 26 + #include <linux/string.h> 27 + #include <linux/cpufeature.h> 26 28 #include <asm/fpu/api.h> 27 29 28 30 #include "i915_memcpy.h"
+1
drivers/gpu/drm/lima/lima_ctx.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 2 /* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ 3 3 4 + #include <linux/pid.h> 4 5 #include <linux/slab.h> 5 6 6 7 #include "lima_device.h"
+1
drivers/irqchip/irq-gic-v4.c
··· 8 8 #include <linux/irq.h> 9 9 #include <linux/irqdomain.h> 10 10 #include <linux/msi.h> 11 + #include <linux/pid.h> 11 12 #include <linux/sched.h> 12 13 13 14 #include <linux/irqchip/arm-gic-v4.h>
+1
drivers/media/test-drivers/vidtv/vidtv_pes.c
··· 14 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s, %d: " fmt, __func__, __LINE__ 15 15 16 16 #include <linux/types.h> 17 + #include <linux/math64.h> 17 18 #include <linux/printk.h> 18 19 #include <linux/ratelimit.h> 19 20
+1
drivers/target/target_core_xcopy.c
··· 15 15 #include <linux/slab.h> 16 16 #include <linux/spinlock.h> 17 17 #include <linux/list.h> 18 + #include <linux/rculist.h> 18 19 #include <linux/configfs.h> 19 20 #include <linux/ratelimit.h> 20 21 #include <scsi/scsi_proto.h>
+1
fs/exec.c
··· 66 66 #include <linux/coredump.h> 67 67 #include <linux/time_namespace.h> 68 68 #include <linux/user_events.h> 69 + #include <linux/rseq.h> 69 70 70 71 #include <linux/uaccess.h> 71 72 #include <asm/mmu_context.h>
+1
include/linux/audit.h
··· 36 36 struct audit_watch; 37 37 struct audit_tree; 38 38 struct sk_buff; 39 + struct kern_ipc_perm; 39 40 40 41 struct audit_krule { 41 42 u32 pflags;
+1
include/linux/dma-fence.h
··· 21 21 #include <linux/sched.h> 22 22 #include <linux/printk.h> 23 23 #include <linux/rcupdate.h> 24 + #include <linux/timekeeping.h> 24 25 25 26 struct dma_fence; 26 27 struct dma_fence_ops;
+3 -43
include/linux/hrtimer.h
··· 13 13 #define _LINUX_HRTIMER_H 14 14 15 15 #include <linux/hrtimer_defs.h> 16 - #include <linux/rbtree.h> 16 + #include <linux/hrtimer_types.h> 17 17 #include <linux/init.h> 18 18 #include <linux/list.h> 19 - #include <linux/percpu.h> 19 + #include <linux/percpu-defs.h> 20 + #include <linux/rbtree.h> 20 21 #include <linux/seqlock.h> 21 22 #include <linux/timer.h> 22 - #include <linux/timerqueue.h> 23 23 24 24 struct hrtimer_clock_base; 25 25 struct hrtimer_cpu_base; ··· 60 60 }; 61 61 62 62 /* 63 - * Return values for the callback function 64 - */ 65 - enum hrtimer_restart { 66 - HRTIMER_NORESTART, /* Timer is not restarted */ 67 - HRTIMER_RESTART, /* Timer must be restarted */ 68 - }; 69 - 70 - /* 71 63 * Values to track state of the timer 72 64 * 73 65 * Possible states: ··· 85 93 */ 86 94 #define HRTIMER_STATE_INACTIVE 0x00 87 95 #define HRTIMER_STATE_ENQUEUED 0x01 88 - 89 - /** 90 - * struct hrtimer - the basic hrtimer structure 91 - * @node: timerqueue node, which also manages node.expires, 92 - * the absolute expiry time in the hrtimers internal 93 - * representation. The time is related to the clock on 94 - * which the timer is based. Is setup by adding 95 - * slack to the _softexpires value. For non range timers 96 - * identical to _softexpires. 97 - * @_softexpires: the absolute earliest expiry time of the hrtimer. 98 - * The time which was given as expiry time when the timer 99 - * was armed. 100 - * @function: timer expiry callback function 101 - * @base: pointer to the timer base (per cpu and per clock) 102 - * @state: state information (See bit values above) 103 - * @is_rel: Set if the timer was armed relative 104 - * @is_soft: Set if hrtimer will be expired in soft interrupt context. 105 - * @is_hard: Set if hrtimer will be expired in hard interrupt context 106 - * even on RT. 107 - * 108 - * The hrtimer structure must be initialized by hrtimer_init() 109 - */ 110 - struct hrtimer { 111 - struct timerqueue_node node; 112 - ktime_t _softexpires; 113 - enum hrtimer_restart (*function)(struct hrtimer *); 114 - struct hrtimer_clock_base *base; 115 - u8 state; 116 - u8 is_rel; 117 - u8 is_soft; 118 - u8 is_hard; 119 - }; 120 96 121 97 /** 122 98 * struct hrtimer_sleeper - simple sleeper structure
+50
include/linux/hrtimer_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_HRTIMER_TYPES_H 3 + #define _LINUX_HRTIMER_TYPES_H 4 + 5 + #include <linux/types.h> 6 + #include <linux/timerqueue_types.h> 7 + 8 + struct hrtimer_clock_base; 9 + 10 + /* 11 + * Return values for the callback function 12 + */ 13 + enum hrtimer_restart { 14 + HRTIMER_NORESTART, /* Timer is not restarted */ 15 + HRTIMER_RESTART, /* Timer must be restarted */ 16 + }; 17 + 18 + /** 19 + * struct hrtimer - the basic hrtimer structure 20 + * @node: timerqueue node, which also manages node.expires, 21 + * the absolute expiry time in the hrtimers internal 22 + * representation. The time is related to the clock on 23 + * which the timer is based. Is setup by adding 24 + * slack to the _softexpires value. For non range timers 25 + * identical to _softexpires. 26 + * @_softexpires: the absolute earliest expiry time of the hrtimer. 27 + * The time which was given as expiry time when the timer 28 + * was armed. 29 + * @function: timer expiry callback function 30 + * @base: pointer to the timer base (per cpu and per clock) 31 + * @state: state information (See bit values above) 32 + * @is_rel: Set if the timer was armed relative 33 + * @is_soft: Set if hrtimer will be expired in soft interrupt context. 34 + * @is_hard: Set if hrtimer will be expired in hard interrupt context 35 + * even on RT. 36 + * 37 + * The hrtimer structure must be initialized by hrtimer_init() 38 + */ 39 + struct hrtimer { 40 + struct timerqueue_node node; 41 + ktime_t _softexpires; 42 + enum hrtimer_restart (*function)(struct hrtimer *); 43 + struct hrtimer_clock_base *base; 44 + u8 state; 45 + u8 is_rel; 46 + u8 is_soft; 47 + u8 is_hard; 48 + }; 49 + 50 + #endif /* _LINUX_HRTIMER_TYPES_H */
+1 -1
include/linux/ipc.h
··· 2 2 #ifndef _LINUX_IPC_H 3 3 #define _LINUX_IPC_H 4 4 5 - #include <linux/spinlock.h> 5 + #include <linux/spinlock_types.h> 6 6 #include <linux/uidgid.h> 7 7 #include <linux/rhashtable-types.h> 8 8 #include <uapi/linux/ipc.h>
+1 -13
include/linux/irqflags.h
··· 12 12 #ifndef _LINUX_TRACE_IRQFLAGS_H 13 13 #define _LINUX_TRACE_IRQFLAGS_H 14 14 15 + #include <linux/irqflags_types.h> 15 16 #include <linux/typecheck.h> 16 17 #include <linux/cleanup.h> 17 18 #include <asm/irqflags.h> ··· 34 33 #endif 35 34 36 35 #ifdef CONFIG_TRACE_IRQFLAGS 37 - 38 - /* Per-task IRQ trace events information. */ 39 - struct irqtrace_events { 40 - unsigned int irq_events; 41 - unsigned long hardirq_enable_ip; 42 - unsigned long hardirq_disable_ip; 43 - unsigned int hardirq_enable_event; 44 - unsigned int hardirq_disable_event; 45 - unsigned long softirq_disable_ip; 46 - unsigned long softirq_enable_ip; 47 - unsigned int softirq_disable_event; 48 - unsigned int softirq_enable_event; 49 - }; 50 36 51 37 DECLARE_PER_CPU(int, hardirqs_enabled); 52 38 DECLARE_PER_CPU(int, hardirq_context);
+22
include/linux/irqflags_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_IRQFLAGS_TYPES_H 3 + #define _LINUX_IRQFLAGS_TYPES_H 4 + 5 + #ifdef CONFIG_TRACE_IRQFLAGS 6 + 7 + /* Per-task IRQ trace events information. */ 8 + struct irqtrace_events { 9 + unsigned int irq_events; 10 + unsigned long hardirq_enable_ip; 11 + unsigned long hardirq_disable_ip; 12 + unsigned int hardirq_enable_event; 13 + unsigned int hardirq_disable_event; 14 + unsigned long softirq_disable_ip; 15 + unsigned long softirq_enable_ip; 16 + unsigned int softirq_disable_event; 17 + unsigned int softirq_enable_event; 18 + }; 19 + 20 + #endif 21 + 22 + #endif /* _LINUX_IRQFLAGS_TYPES_H */
+2
include/linux/kmsan_types.h
··· 9 9 #ifndef _LINUX_KMSAN_TYPES_H 10 10 #define _LINUX_KMSAN_TYPES_H 11 11 12 + #include <linux/types.h> 13 + 12 14 /* These constants are defined in the MSan LLVM instrumentation pass. */ 13 15 #define KMSAN_RETVAL_SIZE 800 14 16 #define KMSAN_PARAM_SIZE 800
+3 -5
include/linux/ktime.h
··· 21 21 #ifndef _LINUX_KTIME_H 22 22 #define _LINUX_KTIME_H 23 23 24 - #include <linux/time.h> 25 - #include <linux/jiffies.h> 26 24 #include <asm/bug.h> 27 - 28 - /* Nanosecond scalar representation for kernel time values */ 29 - typedef s64 ktime_t; 25 + #include <linux/jiffies.h> 26 + #include <linux/time.h> 27 + #include <linux/types.h> 30 28 31 29 /** 32 30 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
-57
include/linux/lockdep.h
··· 82 82 u64 chain_key; 83 83 }; 84 84 85 - #define MAX_LOCKDEP_KEYS_BITS 13 86 - #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) 87 - #define INITIAL_CHAIN_KEY -1 88 - 89 - struct held_lock { 90 - /* 91 - * One-way hash of the dependency chain up to this point. We 92 - * hash the hashes step by step as the dependency chain grows. 93 - * 94 - * We use it for dependency-caching and we skip detection 95 - * passes and dependency-updates if there is a cache-hit, so 96 - * it is absolutely critical for 100% coverage of the validator 97 - * to have a unique key value for every unique dependency path 98 - * that can occur in the system, to make a unique hash value 99 - * as likely as possible - hence the 64-bit width. 100 - * 101 - * The task struct holds the current hash value (initialized 102 - * with zero), here we store the previous hash value: 103 - */ 104 - u64 prev_chain_key; 105 - unsigned long acquire_ip; 106 - struct lockdep_map *instance; 107 - struct lockdep_map *nest_lock; 108 - #ifdef CONFIG_LOCK_STAT 109 - u64 waittime_stamp; 110 - u64 holdtime_stamp; 111 - #endif 112 - /* 113 - * class_idx is zero-indexed; it points to the element in 114 - * lock_classes this held lock instance belongs to. class_idx is in 115 - * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. 116 - */ 117 - unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; 118 - /* 119 - * The lock-stack is unified in that the lock chains of interrupt 120 - * contexts nest ontop of process context chains, but we 'separate' 121 - * the hashes by starting with 0 if we cross into an interrupt 122 - * context, and we also keep do not add cross-context lock 123 - * dependencies - the lock usage graph walking covers that area 124 - * anyway, and we'd just unnecessarily increase the number of 125 - * dependencies otherwise. [Note: hardirq and softirq contexts 126 - * are separated from each other too.] 127 - * 128 - * The following field is used to detect when we cross into an 129 - * interrupt context: 130 - */ 131 - unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ 132 - unsigned int trylock:1; /* 16 bits */ 133 - 134 - unsigned int read:2; /* see lock_acquire() comment */ 135 - unsigned int check:1; /* see lock_acquire() comment */ 136 - unsigned int hardirqs_off:1; 137 - unsigned int sync:1; 138 - unsigned int references:11; /* 32 bits */ 139 - unsigned int pin_count; 140 - }; 141 - 142 85 /* 143 86 * Initialization, self-test and debugging-output methods: 144 87 */
+57
include/linux/lockdep_types.h
··· 198 198 199 199 struct pin_cookie { unsigned int val; }; 200 200 201 + #define MAX_LOCKDEP_KEYS_BITS 13 202 + #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) 203 + #define INITIAL_CHAIN_KEY -1 204 + 205 + struct held_lock { 206 + /* 207 + * One-way hash of the dependency chain up to this point. We 208 + * hash the hashes step by step as the dependency chain grows. 209 + * 210 + * We use it for dependency-caching and we skip detection 211 + * passes and dependency-updates if there is a cache-hit, so 212 + * it is absolutely critical for 100% coverage of the validator 213 + * to have a unique key value for every unique dependency path 214 + * that can occur in the system, to make a unique hash value 215 + * as likely as possible - hence the 64-bit width. 216 + * 217 + * The task struct holds the current hash value (initialized 218 + * with zero), here we store the previous hash value: 219 + */ 220 + u64 prev_chain_key; 221 + unsigned long acquire_ip; 222 + struct lockdep_map *instance; 223 + struct lockdep_map *nest_lock; 224 + #ifdef CONFIG_LOCK_STAT 225 + u64 waittime_stamp; 226 + u64 holdtime_stamp; 227 + #endif 228 + /* 229 + * class_idx is zero-indexed; it points to the element in 230 + * lock_classes this held lock instance belongs to. class_idx is in 231 + * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. 232 + */ 233 + unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; 234 + /* 235 + * The lock-stack is unified in that the lock chains of interrupt 236 + * contexts nest ontop of process context chains, but we 'separate' 237 + * the hashes by starting with 0 if we cross into an interrupt 238 + * context, and we also keep do not add cross-context lock 239 + * dependencies - the lock usage graph walking covers that area 240 + * anyway, and we'd just unnecessarily increase the number of 241 + * dependencies otherwise. [Note: hardirq and softirq contexts 242 + * are separated from each other too.] 243 + * 244 + * The following field is used to detect when we cross into an 245 + * interrupt context: 246 + */ 247 + unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ 248 + unsigned int trylock:1; /* 16 bits */ 249 + 250 + unsigned int read:2; /* see lock_acquire() comment */ 251 + unsigned int check:1; /* see lock_acquire() comment */ 252 + unsigned int hardirqs_off:1; 253 + unsigned int sync:1; 254 + unsigned int references:11; /* 32 bits */ 255 + unsigned int pin_count; 256 + }; 257 + 201 258 #else /* !CONFIG_LOCKDEP */ 202 259 203 260 /*
+2 -3
include/linux/mm_types_task.h
··· 9 9 */ 10 10 11 11 #include <linux/types.h> 12 - #include <linux/threads.h> 13 - #include <linux/atomic.h> 14 - #include <linux/cpumask.h> 15 12 16 13 #include <asm/page.h> 17 14 ··· 32 35 MM_SHMEMPAGES, /* Resident shared memory pages */ 33 36 NR_MM_COUNTERS 34 37 }; 38 + 39 + struct page; 35 40 36 41 struct page_frag { 37 42 struct page *page;
+1 -51
include/linux/mutex.h
··· 20 20 #include <linux/osq_lock.h> 21 21 #include <linux/debug_locks.h> 22 22 #include <linux/cleanup.h> 23 + #include <linux/mutex_types.h> 23 24 24 25 #ifdef CONFIG_DEBUG_LOCK_ALLOC 25 26 # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ ··· 33 32 #endif 34 33 35 34 #ifndef CONFIG_PREEMPT_RT 36 - 37 - /* 38 - * Simple, straightforward mutexes with strict semantics: 39 - * 40 - * - only one task can hold the mutex at a time 41 - * - only the owner can unlock the mutex 42 - * - multiple unlocks are not permitted 43 - * - recursive locking is not permitted 44 - * - a mutex object must be initialized via the API 45 - * - a mutex object must not be initialized via memset or copying 46 - * - task may not exit with mutex held 47 - * - memory areas where held locks reside must not be freed 48 - * - held mutexes must not be reinitialized 49 - * - mutexes may not be used in hardware or software interrupt 50 - * contexts such as tasklets and timers 51 - * 52 - * These semantics are fully enforced when DEBUG_MUTEXES is 53 - * enabled. Furthermore, besides enforcing the above rules, the mutex 54 - * debugging code also implements a number of additional features 55 - * that make lock debugging easier and faster: 56 - * 57 - * - uses symbolic names of mutexes, whenever they are printed in debug output 58 - * - point-of-acquire tracking, symbolic lookup of function names 59 - * - list of all locks held in the system, printout of them 60 - * - owner tracking 61 - * - detects self-recursing locks and prints out all relevant info 62 - * - detects multi-task circular deadlocks and prints out all affected 63 - * locks and tasks (and only those tasks) 64 - */ 65 - struct mutex { 66 - atomic_long_t owner; 67 - raw_spinlock_t wait_lock; 68 - #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 69 - struct optimistic_spin_queue osq; /* Spinner MCS lock */ 70 - #endif 71 - struct list_head wait_list; 72 - #ifdef CONFIG_DEBUG_MUTEXES 73 - void *magic; 74 - #endif 75 - #ifdef CONFIG_DEBUG_LOCK_ALLOC 76 - struct lockdep_map dep_map; 77 - #endif 78 - }; 79 35 80 36 #ifdef CONFIG_DEBUG_MUTEXES 81 37 ··· 89 131 /* 90 132 * Preempt-RT variant based on rtmutexes. 91 133 */ 92 - #include <linux/rtmutex.h> 93 - 94 - struct mutex { 95 - struct rt_mutex_base rtmutex; 96 - #ifdef CONFIG_DEBUG_LOCK_ALLOC 97 - struct lockdep_map dep_map; 98 - #endif 99 - }; 100 134 101 135 #define __MUTEX_INITIALIZER(mutexname) \ 102 136 { \
+71
include/linux/mutex_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __LINUX_MUTEX_TYPES_H 3 + #define __LINUX_MUTEX_TYPES_H 4 + 5 + #include <linux/atomic.h> 6 + #include <linux/lockdep_types.h> 7 + #include <linux/osq_lock.h> 8 + #include <linux/spinlock_types.h> 9 + #include <linux/types.h> 10 + 11 + #ifndef CONFIG_PREEMPT_RT 12 + 13 + /* 14 + * Simple, straightforward mutexes with strict semantics: 15 + * 16 + * - only one task can hold the mutex at a time 17 + * - only the owner can unlock the mutex 18 + * - multiple unlocks are not permitted 19 + * - recursive locking is not permitted 20 + * - a mutex object must be initialized via the API 21 + * - a mutex object must not be initialized via memset or copying 22 + * - task may not exit with mutex held 23 + * - memory areas where held locks reside must not be freed 24 + * - held mutexes must not be reinitialized 25 + * - mutexes may not be used in hardware or software interrupt 26 + * contexts such as tasklets and timers 27 + * 28 + * These semantics are fully enforced when DEBUG_MUTEXES is 29 + * enabled. Furthermore, besides enforcing the above rules, the mutex 30 + * debugging code also implements a number of additional features 31 + * that make lock debugging easier and faster: 32 + * 33 + * - uses symbolic names of mutexes, whenever they are printed in debug output 34 + * - point-of-acquire tracking, symbolic lookup of function names 35 + * - list of all locks held in the system, printout of them 36 + * - owner tracking 37 + * - detects self-recursing locks and prints out all relevant info 38 + * - detects multi-task circular deadlocks and prints out all affected 39 + * locks and tasks (and only those tasks) 40 + */ 41 + struct mutex { 42 + atomic_long_t owner; 43 + raw_spinlock_t wait_lock; 44 + #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 45 + struct optimistic_spin_queue osq; /* Spinner MCS lock */ 46 + #endif 47 + struct list_head wait_list; 48 + #ifdef CONFIG_DEBUG_MUTEXES 49 + void *magic; 50 + #endif 51 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 52 + struct lockdep_map dep_map; 53 + #endif 54 + }; 55 + 56 + #else /* !CONFIG_PREEMPT_RT */ 57 + /* 58 + * Preempt-RT variant based on rtmutexes. 59 + */ 60 + #include <linux/rtmutex.h> 61 + 62 + struct mutex { 63 + struct rt_mutex_base rtmutex; 64 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 65 + struct lockdep_map dep_map; 66 + #endif 67 + }; 68 + 69 + #endif /* CONFIG_PREEMPT_RT */ 70 + 71 + #endif /* __LINUX_MUTEX_TYPES_H */
+1 -1
include/linux/nodemask.h
··· 93 93 #include <linux/threads.h> 94 94 #include <linux/bitmap.h> 95 95 #include <linux/minmax.h> 96 + #include <linux/nodemask_types.h> 96 97 #include <linux/numa.h> 97 98 #include <linux/random.h> 98 99 99 - typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; 100 100 extern nodemask_t _unused_nodemask_arg_; 101 101 102 102 /**
+10
include/linux/nodemask_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __LINUX_NODEMASK_TYPES_H 3 + #define __LINUX_NODEMASK_TYPES_H 4 + 5 + #include <linux/bitops.h> 6 + #include <linux/numa.h> 7 + 8 + typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; 9 + 10 + #endif /* __LINUX_NODEMASK_TYPES_H */
+1
include/linux/nsproxy.h
··· 2 2 #ifndef _LINUX_NSPROXY_H 3 3 #define _LINUX_NSPROXY_H 4 4 5 + #include <linux/refcount.h> 5 6 #include <linux/spinlock.h> 6 7 #include <linux/sched.h> 7 8
+6 -13
include/linux/numa.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 #ifndef _LINUX_NUMA_H 3 3 #define _LINUX_NUMA_H 4 + #include <linux/init.h> 4 5 #include <linux/types.h> 5 6 6 7 #ifdef CONFIG_NODES_SHIFT ··· 23 22 #endif 24 23 25 24 #ifdef CONFIG_NUMA 26 - #include <linux/printk.h> 27 25 #include <asm/sparsemem.h> 28 26 29 27 /* Generic implementation available */ 30 28 int numa_nearest_node(int node, unsigned int state); 31 29 32 30 #ifndef memory_add_physaddr_to_nid 33 - static inline int memory_add_physaddr_to_nid(u64 start) 34 - { 35 - pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n", 36 - start); 37 - return 0; 38 - } 31 + int memory_add_physaddr_to_nid(u64 start); 39 32 #endif 33 + 40 34 #ifndef phys_to_target_node 41 - static inline int phys_to_target_node(u64 start) 42 - { 43 - pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n", 44 - start); 45 - return 0; 46 - } 35 + int phys_to_target_node(u64 start); 47 36 #endif 37 + 48 38 #ifndef numa_fill_memblks 49 39 static inline int __init numa_fill_memblks(u64 start, u64 end) 50 40 { 51 41 return NUMA_NO_MEMBLK; 52 42 } 53 43 #endif 44 + 54 45 #else /* !CONFIG_NUMA */ 55 46 static inline int numa_nearest_node(int node, unsigned int state) 56 47 {
+127 -13
include/linux/pid.h
··· 2 2 #ifndef _LINUX_PID_H 3 3 #define _LINUX_PID_H 4 4 5 + #include <linux/pid_types.h> 5 6 #include <linux/rculist.h> 6 - #include <linux/wait.h> 7 + #include <linux/rcupdate.h> 7 8 #include <linux/refcount.h> 8 - 9 - enum pid_type 10 - { 11 - PIDTYPE_PID, 12 - PIDTYPE_TGID, 13 - PIDTYPE_PGID, 14 - PIDTYPE_SID, 15 - PIDTYPE_MAX, 16 - }; 9 + #include <linux/sched.h> 10 + #include <linux/wait.h> 17 11 18 12 /* 19 13 * What is struct pid? ··· 103 109 extern void exchange_tids(struct task_struct *task, struct task_struct *old); 104 110 extern void transfer_pid(struct task_struct *old, struct task_struct *new, 105 111 enum pid_type); 106 - 107 - struct pid_namespace; 108 - extern struct pid_namespace init_pid_ns; 109 112 110 113 extern int pid_max; 111 114 extern int pid_max_min, pid_max_max; ··· 206 215 } \ 207 216 task = tg___; \ 208 217 } while_each_pid_task(pid, type, task) 218 + 219 + static inline struct pid *task_pid(struct task_struct *task) 220 + { 221 + return task->thread_pid; 222 + } 223 + 224 + /* 225 + * the helpers to get the task's different pids as they are seen 226 + * from various namespaces 227 + * 228 + * task_xid_nr() : global id, i.e. the id seen from the init namespace; 229 + * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 230 + * current. 231 + * task_xid_nr_ns() : id seen from the ns specified; 232 + * 233 + * see also pid_nr() etc in include/linux/pid.h 234 + */ 235 + pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); 236 + 237 + static inline pid_t task_pid_nr(struct task_struct *tsk) 238 + { 239 + return tsk->pid; 240 + } 241 + 242 + static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 243 + { 244 + return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 245 + } 246 + 247 + static inline pid_t task_pid_vnr(struct task_struct *tsk) 248 + { 249 + return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 250 + } 251 + 252 + 253 + static inline pid_t task_tgid_nr(struct task_struct *tsk) 254 + { 255 + return tsk->tgid; 256 + } 257 + 258 + /** 259 + * pid_alive - check that a task structure is not stale 260 + * @p: Task structure to be checked. 261 + * 262 + * Test if a process is not yet dead (at most zombie state) 263 + * If pid_alive fails, then pointers within the task structure 264 + * can be stale and must not be dereferenced. 265 + * 266 + * Return: 1 if the process is alive. 0 otherwise. 267 + */ 268 + static inline int pid_alive(const struct task_struct *p) 269 + { 270 + return p->thread_pid != NULL; 271 + } 272 + 273 + static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 274 + { 275 + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 276 + } 277 + 278 + static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 279 + { 280 + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 281 + } 282 + 283 + 284 + static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 285 + { 286 + return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 287 + } 288 + 289 + static inline pid_t task_session_vnr(struct task_struct *tsk) 290 + { 291 + return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 292 + } 293 + 294 + static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 295 + { 296 + return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns); 297 + } 298 + 299 + static inline pid_t task_tgid_vnr(struct task_struct *tsk) 300 + { 301 + return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL); 302 + } 303 + 304 + static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) 305 + { 306 + pid_t pid = 0; 307 + 308 + rcu_read_lock(); 309 + if (pid_alive(tsk)) 310 + pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); 311 + rcu_read_unlock(); 312 + 313 + return pid; 314 + } 315 + 316 + static inline pid_t task_ppid_nr(const struct task_struct *tsk) 317 + { 318 + return task_ppid_nr_ns(tsk, &init_pid_ns); 319 + } 320 + 321 + /* Obsolete, do not use: */ 322 + static inline pid_t task_pgrp_nr(struct task_struct *tsk) 323 + { 324 + return task_pgrp_nr_ns(tsk, &init_pid_ns); 325 + } 326 + 327 + /** 328 + * is_global_init - check if a task structure is init. Since init 329 + * is free to have sub-threads we need to check tgid. 330 + * @tsk: Task structure to be checked. 331 + * 332 + * Check if a task structure is the first user space task the kernel created. 333 + * 334 + * Return: 1 if the task structure is init. 0 otherwise. 335 + */ 336 + static inline int is_global_init(struct task_struct *tsk) 337 + { 338 + return task_tgid_nr(tsk) == 1; 339 + } 340 + 209 341 #endif /* _LINUX_PID_H */
+16
include/linux/pid_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_PID_TYPES_H 3 + #define _LINUX_PID_TYPES_H 4 + 5 + enum pid_type { 6 + PIDTYPE_PID, 7 + PIDTYPE_TGID, 8 + PIDTYPE_PGID, 9 + PIDTYPE_SID, 10 + PIDTYPE_MAX, 11 + }; 12 + 13 + struct pid_namespace; 14 + extern struct pid_namespace init_pid_ns; 15 + 16 + #endif /* _LINUX_PID_TYPES_H */
+1 -11
include/linux/plist.h
··· 75 75 76 76 #include <linux/container_of.h> 77 77 #include <linux/list.h> 78 - #include <linux/types.h> 78 + #include <linux/plist_types.h> 79 79 80 80 #include <asm/bug.h> 81 - 82 - struct plist_head { 83 - struct list_head node_list; 84 - }; 85 - 86 - struct plist_node { 87 - int prio; 88 - struct list_head prio_list; 89 - struct list_head node_list; 90 - }; 91 81 92 82 /** 93 83 * PLIST_HEAD_INIT - static struct plist_head initializer
+17
include/linux/plist_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + #ifndef _LINUX_PLIST_TYPES_H 3 + #define _LINUX_PLIST_TYPES_H 4 + 5 + #include <linux/types.h> 6 + 7 + struct plist_head { 8 + struct list_head node_list; 9 + }; 10 + 11 + struct plist_node { 12 + int prio; 13 + struct list_head prio_list; 14 + struct list_head node_list; 15 + }; 16 + 17 + #endif /* _LINUX_PLIST_TYPES_H */
+3 -66
include/linux/posix-timers.h
··· 2 2 #ifndef _linux_POSIX_TIMERS_H 3 3 #define _linux_POSIX_TIMERS_H 4 4 5 - #include <linux/spinlock.h> 5 + #include <linux/alarmtimer.h> 6 6 #include <linux/list.h> 7 7 #include <linux/mutex.h> 8 - #include <linux/alarmtimer.h> 8 + #include <linux/posix-timers_types.h> 9 + #include <linux/spinlock.h> 9 10 #include <linux/timerqueue.h> 10 11 11 12 struct kernel_siginfo; 12 13 struct task_struct; 13 - 14 - /* 15 - * Bit fields within a clockid: 16 - * 17 - * The most significant 29 bits hold either a pid or a file descriptor. 18 - * 19 - * Bit 2 indicates whether a cpu clock refers to a thread or a process. 20 - * 21 - * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3. 22 - * 23 - * A clockid is invalid if bits 2, 1, and 0 are all set. 24 - */ 25 - #define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3)) 26 - #define CPUCLOCK_PERTHREAD(clock) \ 27 - (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0) 28 - 29 - #define CPUCLOCK_PERTHREAD_MASK 4 30 - #define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK) 31 - #define CPUCLOCK_CLOCK_MASK 3 32 - #define CPUCLOCK_PROF 0 33 - #define CPUCLOCK_VIRT 1 34 - #define CPUCLOCK_SCHED 2 35 - #define CPUCLOCK_MAX 3 36 - #define CLOCKFD CPUCLOCK_MAX 37 - #define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK) 38 14 39 15 static inline clockid_t make_process_cpuclock(const unsigned int pid, 40 16 const clockid_t clock) ··· 85 109 ctmr->node.expires = exp; 86 110 } 87 111 88 - /** 89 - * posix_cputimer_base - Container per posix CPU clock 90 - * @nextevt: Earliest-expiration cache 91 - * @tqhead: timerqueue head for cpu_timers 92 - */ 93 - struct posix_cputimer_base { 94 - u64 nextevt; 95 - struct timerqueue_head tqhead; 96 - }; 97 - 98 - /** 99 - * posix_cputimers - Container for posix CPU timer related data 100 - * @bases: Base container for posix CPU clocks 101 - * @timers_active: Timers are queued. 102 - * @expiry_active: Timer expiry is active. Used for 103 - * process wide timers to avoid multiple 104 - * task trying to handle expiry concurrently 105 - * 106 - * Used in task_struct and signal_struct 107 - */ 108 - struct posix_cputimers { 109 - struct posix_cputimer_base bases[CPUCLOCK_MAX]; 110 - unsigned int timers_active; 111 - unsigned int expiry_active; 112 - }; 113 - 114 - /** 115 - * posix_cputimers_work - Container for task work based posix CPU timer expiry 116 - * @work: The task work to be scheduled 117 - * @mutex: Mutex held around expiry in context of this task work 118 - * @scheduled: @work has been scheduled already, no further processing 119 - */ 120 - struct posix_cputimers_work { 121 - struct callback_head work; 122 - struct mutex mutex; 123 - unsigned int scheduled; 124 - }; 125 - 126 112 static inline void posix_cputimers_init(struct posix_cputimers *pct) 127 113 { 128 114 memset(pct, 0, sizeof(*pct)); ··· 117 179 .bases = INIT_CPU_TIMERBASES(s.posix_cputimers.bases), \ 118 180 }, 119 181 #else 120 - struct posix_cputimers { }; 121 182 struct cpu_timer { }; 122 183 #define INIT_CPU_TIMERS(s) 123 184 static inline void posix_cputimers_init(struct posix_cputimers *pct) { }
+80
include/linux/posix-timers_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _linux_POSIX_TIMERS_TYPES_H 3 + #define _linux_POSIX_TIMERS_TYPES_H 4 + 5 + #include <linux/mutex_types.h> 6 + #include <linux/timerqueue_types.h> 7 + #include <linux/types.h> 8 + 9 + /* 10 + * Bit fields within a clockid: 11 + * 12 + * The most significant 29 bits hold either a pid or a file descriptor. 13 + * 14 + * Bit 2 indicates whether a cpu clock refers to a thread or a process. 15 + * 16 + * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3. 17 + * 18 + * A clockid is invalid if bits 2, 1, and 0 are all set. 19 + */ 20 + #define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3)) 21 + #define CPUCLOCK_PERTHREAD(clock) \ 22 + (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0) 23 + 24 + #define CPUCLOCK_PERTHREAD_MASK 4 25 + #define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK) 26 + #define CPUCLOCK_CLOCK_MASK 3 27 + #define CPUCLOCK_PROF 0 28 + #define CPUCLOCK_VIRT 1 29 + #define CPUCLOCK_SCHED 2 30 + #define CPUCLOCK_MAX 3 31 + #define CLOCKFD CPUCLOCK_MAX 32 + #define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK) 33 + 34 + #ifdef CONFIG_POSIX_TIMERS 35 + 36 + /** 37 + * posix_cputimer_base - Container per posix CPU clock 38 + * @nextevt: Earliest-expiration cache 39 + * @tqhead: timerqueue head for cpu_timers 40 + */ 41 + struct posix_cputimer_base { 42 + u64 nextevt; 43 + struct timerqueue_head tqhead; 44 + }; 45 + 46 + /** 47 + * posix_cputimers - Container for posix CPU timer related data 48 + * @bases: Base container for posix CPU clocks 49 + * @timers_active: Timers are queued. 50 + * @expiry_active: Timer expiry is active. Used for 51 + * process wide timers to avoid multiple 52 + * task trying to handle expiry concurrently 53 + * 54 + * Used in task_struct and signal_struct 55 + */ 56 + struct posix_cputimers { 57 + struct posix_cputimer_base bases[CPUCLOCK_MAX]; 58 + unsigned int timers_active; 59 + unsigned int expiry_active; 60 + }; 61 + 62 + /** 63 + * posix_cputimers_work - Container for task work based posix CPU timer expiry 64 + * @work: The task work to be scheduled 65 + * @mutex: Mutex held around expiry in context of this task work 66 + * @scheduled: @work has been scheduled already, no further processing 67 + */ 68 + struct posix_cputimers_work { 69 + struct callback_head work; 70 + struct mutex mutex; 71 + unsigned int scheduled; 72 + }; 73 + 74 + #else /* CONFIG_POSIX_TIMERS */ 75 + 76 + struct posix_cputimers { }; 77 + 78 + #endif /* CONFIG_POSIX_TIMERS */ 79 + 80 + #endif /* _linux_POSIX_TIMERS_TYPES_H */
-1
include/linux/prandom.h
··· 10 10 11 11 #include <linux/types.h> 12 12 #include <linux/once.h> 13 - #include <linux/percpu.h> 14 13 #include <linux/random.h> 15 14 16 15 struct rnd_state {
+4 -2
include/linux/preempt.h
··· 9 9 10 10 #include <linux/linkage.h> 11 11 #include <linux/cleanup.h> 12 - #include <linux/list.h> 12 + #include <linux/types.h> 13 13 14 14 /* 15 15 * We put the hardirq and softirq counter into the preemption ··· 360 360 static inline void preempt_notifier_init(struct preempt_notifier *notifier, 361 361 struct preempt_ops *ops) 362 362 { 363 - INIT_HLIST_NODE(&notifier->link); 363 + /* INIT_HLIST_NODE() open coded, to avoid dependency on list.h */ 364 + notifier->link.next = NULL; 365 + notifier->link.pprev = NULL; 364 366 notifier->ops = ops; 365 367 } 366 368
+10
include/linux/rcupdate_wait.h
··· 8 8 9 9 #include <linux/rcupdate.h> 10 10 #include <linux/completion.h> 11 + #include <linux/sched.h> 11 12 12 13 /* 13 14 * Structure allowing asynchronous waiting on RCU. ··· 55 54 */ 56 55 #define synchronize_rcu_mult(...) \ 57 56 _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) 57 + 58 + static inline void cond_resched_rcu(void) 59 + { 60 + #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) 61 + rcu_read_unlock(); 62 + cond_resched(); 63 + rcu_read_lock(); 64 + #endif 65 + } 58 66 59 67 #endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */
+1 -12
include/linux/refcount.h
··· 96 96 #include <linux/bug.h> 97 97 #include <linux/compiler.h> 98 98 #include <linux/limits.h> 99 + #include <linux/refcount_types.h> 99 100 #include <linux/spinlock_types.h> 100 101 101 102 struct mutex; 102 - 103 - /** 104 - * typedef refcount_t - variant of atomic_t specialized for reference counts 105 - * @refs: atomic_t counter field 106 - * 107 - * The counter saturates at REFCOUNT_SATURATED and will not move once 108 - * there. This avoids wrapping the counter and causing 'spurious' 109 - * use-after-free bugs. 110 - */ 111 - typedef struct refcount_struct { 112 - atomic_t refs; 113 - } refcount_t; 114 103 115 104 #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } 116 105 #define REFCOUNT_MAX INT_MAX
+19
include/linux/refcount_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_REFCOUNT_TYPES_H 3 + #define _LINUX_REFCOUNT_TYPES_H 4 + 5 + #include <linux/types.h> 6 + 7 + /** 8 + * typedef refcount_t - variant of atomic_t specialized for reference counts 9 + * @refs: atomic_t counter field 10 + * 11 + * The counter saturates at REFCOUNT_SATURATED and will not move once 12 + * there. This avoids wrapping the counter and causing 'spurious' 13 + * use-after-free bugs. 14 + */ 15 + typedef struct refcount_struct { 16 + atomic_t refs; 17 + } refcount_t; 18 + 19 + #endif /* _LINUX_REFCOUNT_TYPES_H */
+1 -1
include/linux/restart_block.h
··· 7 7 8 8 #include <linux/compiler.h> 9 9 #include <linux/types.h> 10 - #include <linux/time64.h> 11 10 11 + struct __kernel_timespec; 12 12 struct timespec; 13 13 struct old_timespec32; 14 14 struct pollfd;
+1
include/linux/resume_user_mode.h
··· 6 6 #include <linux/sched.h> 7 7 #include <linux/task_work.h> 8 8 #include <linux/memcontrol.h> 9 + #include <linux/rseq.h> 9 10 #include <linux/blk-cgroup.h> 10 11 11 12 /**
+1 -1
include/linux/rhashtable-types.h
··· 12 12 #include <linux/atomic.h> 13 13 #include <linux/compiler.h> 14 14 #include <linux/mutex.h> 15 - #include <linux/workqueue.h> 15 + #include <linux/workqueue_types.h> 16 16 17 17 struct rhash_head { 18 18 struct rhash_head __rcu *next;
+131
include/linux/rseq.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ 2 + #ifndef _LINUX_RSEQ_H 3 + #define _LINUX_RSEQ_H 4 + 5 + #ifdef CONFIG_RSEQ 6 + 7 + #include <linux/preempt.h> 8 + #include <linux/sched.h> 9 + 10 + /* 11 + * Map the event mask on the user-space ABI enum rseq_cs_flags 12 + * for direct mask checks. 13 + */ 14 + enum rseq_event_mask_bits { 15 + RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT, 16 + RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT, 17 + RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT, 18 + }; 19 + 20 + enum rseq_event_mask { 21 + RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT), 22 + RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT), 23 + RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT), 24 + }; 25 + 26 + static inline void rseq_set_notify_resume(struct task_struct *t) 27 + { 28 + if (t->rseq) 29 + set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); 30 + } 31 + 32 + void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs); 33 + 34 + static inline void rseq_handle_notify_resume(struct ksignal *ksig, 35 + struct pt_regs *regs) 36 + { 37 + if (current->rseq) 38 + __rseq_handle_notify_resume(ksig, regs); 39 + } 40 + 41 + static inline void rseq_signal_deliver(struct ksignal *ksig, 42 + struct pt_regs *regs) 43 + { 44 + preempt_disable(); 45 + __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask); 46 + preempt_enable(); 47 + rseq_handle_notify_resume(ksig, regs); 48 + } 49 + 50 + /* rseq_preempt() requires preemption to be disabled. */ 51 + static inline void rseq_preempt(struct task_struct *t) 52 + { 53 + __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); 54 + rseq_set_notify_resume(t); 55 + } 56 + 57 + /* rseq_migrate() requires preemption to be disabled. */ 58 + static inline void rseq_migrate(struct task_struct *t) 59 + { 60 + __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); 61 + rseq_set_notify_resume(t); 62 + } 63 + 64 + /* 65 + * If parent process has a registered restartable sequences area, the 66 + * child inherits. Unregister rseq for a clone with CLONE_VM set. 67 + */ 68 + static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) 69 + { 70 + if (clone_flags & CLONE_VM) { 71 + t->rseq = NULL; 72 + t->rseq_len = 0; 73 + t->rseq_sig = 0; 74 + t->rseq_event_mask = 0; 75 + } else { 76 + t->rseq = current->rseq; 77 + t->rseq_len = current->rseq_len; 78 + t->rseq_sig = current->rseq_sig; 79 + t->rseq_event_mask = current->rseq_event_mask; 80 + } 81 + } 82 + 83 + static inline void rseq_execve(struct task_struct *t) 84 + { 85 + t->rseq = NULL; 86 + t->rseq_len = 0; 87 + t->rseq_sig = 0; 88 + t->rseq_event_mask = 0; 89 + } 90 + 91 + #else 92 + 93 + static inline void rseq_set_notify_resume(struct task_struct *t) 94 + { 95 + } 96 + static inline void rseq_handle_notify_resume(struct ksignal *ksig, 97 + struct pt_regs *regs) 98 + { 99 + } 100 + static inline void rseq_signal_deliver(struct ksignal *ksig, 101 + struct pt_regs *regs) 102 + { 103 + } 104 + static inline void rseq_preempt(struct task_struct *t) 105 + { 106 + } 107 + static inline void rseq_migrate(struct task_struct *t) 108 + { 109 + } 110 + static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) 111 + { 112 + } 113 + static inline void rseq_execve(struct task_struct *t) 114 + { 115 + } 116 + 117 + #endif 118 + 119 + #ifdef CONFIG_DEBUG_RSEQ 120 + 121 + void rseq_syscall(struct pt_regs *regs); 122 + 123 + #else 124 + 125 + static inline void rseq_syscall(struct pt_regs *regs) 126 + { 127 + } 128 + 129 + #endif 130 + 131 + #endif /* _LINUX_RSEQ_H */
-1
include/linux/rslib.h
··· 10 10 #ifndef _RSLIB_H_ 11 11 #define _RSLIB_H_ 12 12 13 - #include <linux/list.h> 14 13 #include <linux/types.h> /* for gfp_t */ 15 14 #include <linux/gfp.h> /* for GFP_KERNEL */ 16 15
+24 -299
include/linux/sched.h
··· 10 10 #include <uapi/linux/sched.h> 11 11 12 12 #include <asm/current.h> 13 + #include <asm/processor.h> 14 + #include <linux/thread_info.h> 15 + #include <linux/preempt.h> 16 + #include <linux/cpumask.h> 13 17 14 - #include <linux/pid.h> 15 - #include <linux/sem.h> 18 + #include <linux/cache.h> 19 + #include <linux/irqflags_types.h> 20 + #include <linux/smp_types.h> 21 + #include <linux/pid_types.h> 22 + #include <linux/sem_types.h> 16 23 #include <linux/shm.h> 17 24 #include <linux/kmsan_types.h> 18 - #include <linux/mutex.h> 19 - #include <linux/plist.h> 20 - #include <linux/hrtimer.h> 21 - #include <linux/irqflags.h> 22 - #include <linux/seccomp.h> 23 - #include <linux/nodemask.h> 24 - #include <linux/rcupdate.h> 25 - #include <linux/refcount.h> 25 + #include <linux/mutex_types.h> 26 + #include <linux/plist_types.h> 27 + #include <linux/hrtimer_types.h> 28 + #include <linux/timer_types.h> 29 + #include <linux/seccomp_types.h> 30 + #include <linux/nodemask_types.h> 31 + #include <linux/refcount_types.h> 26 32 #include <linux/resource.h> 27 33 #include <linux/latencytop.h> 28 34 #include <linux/sched/prio.h> 29 35 #include <linux/sched/types.h> 30 36 #include <linux/signal_types.h> 31 - #include <linux/syscall_user_dispatch.h> 37 + #include <linux/syscall_user_dispatch_types.h> 32 38 #include <linux/mm_types_task.h> 33 39 #include <linux/task_io_accounting.h> 34 - #include <linux/posix-timers.h> 35 - #include <linux/rseq.h> 36 - #include <linux/seqlock.h> 40 + #include <linux/posix-timers_types.h> 41 + #include <linux/restart_block.h> 42 + #include <uapi/linux/rseq.h> 43 + #include <linux/seqlock_types.h> 37 44 #include <linux/kcsan.h> 38 45 #include <linux/rv.h> 39 46 #include <linux/livepatch_sched.h> 47 + #include <linux/uidgid_types.h> 40 48 #include <asm/kmap_size.h> 41 49 42 50 /* task_struct member predeclarations (sorted alphabetically): */ ··· 1564 1556 */ 1565 1557 }; 1566 1558 1567 - static inline struct pid *task_pid(struct task_struct *task) 1568 - { 1569 - return task->thread_pid; 1570 - } 1571 - 1572 - /* 1573 - * the helpers to get the task's different pids as they are seen 1574 - * from various namespaces 1575 - * 1576 - * task_xid_nr() : global id, i.e. the id seen from the init namespace; 1577 - * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 1578 - * current. 1579 - * task_xid_nr_ns() : id seen from the ns specified; 1580 - * 1581 - * see also pid_nr() etc in include/linux/pid.h 1582 - */ 1583 - pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); 1584 - 1585 - static inline pid_t task_pid_nr(struct task_struct *tsk) 1586 - { 1587 - return tsk->pid; 1588 - } 1589 - 1590 - static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1591 - { 1592 - return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 1593 - } 1594 - 1595 - static inline pid_t task_pid_vnr(struct task_struct *tsk) 1596 - { 1597 - return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 1598 - } 1599 - 1600 - 1601 - static inline pid_t task_tgid_nr(struct task_struct *tsk) 1602 - { 1603 - return tsk->tgid; 1604 - } 1605 - 1606 - /** 1607 - * pid_alive - check that a task structure is not stale 1608 - * @p: Task structure to be checked. 1609 - * 1610 - * Test if a process is not yet dead (at most zombie state) 1611 - * If pid_alive fails, then pointers within the task structure 1612 - * can be stale and must not be dereferenced. 1613 - * 1614 - * Return: 1 if the process is alive. 0 otherwise. 1615 - */ 1616 - static inline int pid_alive(const struct task_struct *p) 1617 - { 1618 - return p->thread_pid != NULL; 1619 - } 1620 - 1621 - static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1622 - { 1623 - return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 1624 - } 1625 - 1626 - static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1627 - { 1628 - return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 1629 - } 1630 - 1631 - 1632 - static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1633 - { 1634 - return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 1635 - } 1636 - 1637 - static inline pid_t task_session_vnr(struct task_struct *tsk) 1638 - { 1639 - return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 1640 - } 1641 - 1642 - static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1643 - { 1644 - return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns); 1645 - } 1646 - 1647 - static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1648 - { 1649 - return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL); 1650 - } 1651 - 1652 - static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) 1653 - { 1654 - pid_t pid = 0; 1655 - 1656 - rcu_read_lock(); 1657 - if (pid_alive(tsk)) 1658 - pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); 1659 - rcu_read_unlock(); 1660 - 1661 - return pid; 1662 - } 1663 - 1664 - static inline pid_t task_ppid_nr(const struct task_struct *tsk) 1665 - { 1666 - return task_ppid_nr_ns(tsk, &init_pid_ns); 1667 - } 1668 - 1669 - /* Obsolete, do not use: */ 1670 - static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1671 - { 1672 - return task_pgrp_nr_ns(tsk, &init_pid_ns); 1673 - } 1674 - 1675 1559 #define TASK_REPORT_IDLE (TASK_REPORT + 1) 1676 1560 #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) 1677 1561 ··· 1605 1705 static inline char task_state_to_char(struct task_struct *tsk) 1606 1706 { 1607 1707 return task_index_to_char(task_state_index(tsk)); 1608 - } 1609 - 1610 - /** 1611 - * is_global_init - check if a task structure is init. Since init 1612 - * is free to have sub-threads we need to check tgid. 1613 - * @tsk: Task structure to be checked. 1614 - * 1615 - * Check if a task structure is the first user space task the kernel created. 1616 - * 1617 - * Return: 1 if the task structure is init. 0 otherwise. 1618 - */ 1619 - static inline int is_global_init(struct task_struct *tsk) 1620 - { 1621 - return task_tgid_nr(tsk) == 1; 1622 1708 } 1623 1709 1624 1710 extern struct pid *cad_pid; ··· 2056 2170 __cond_resched_rwlock_write(lock); \ 2057 2171 }) 2058 2172 2059 - static inline void cond_resched_rcu(void) 2060 - { 2061 - #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) 2062 - rcu_read_unlock(); 2063 - cond_resched(); 2064 - rcu_read_lock(); 2065 - #endif 2066 - } 2067 - 2068 2173 #ifdef CONFIG_PREEMPT_DYNAMIC 2069 2174 2070 2175 extern bool preempt_model_none(void); ··· 2097 2220 return preempt_model_full() || preempt_model_rt(); 2098 2221 } 2099 2222 2100 - /* 2101 - * Does a critical section need to be broken due to another 2102 - * task waiting?: (technically does not depend on CONFIG_PREEMPTION, 2103 - * but a general need for low latency) 2104 - */ 2105 - static inline int spin_needbreak(spinlock_t *lock) 2106 - { 2107 - #ifdef CONFIG_PREEMPTION 2108 - return spin_is_contended(lock); 2109 - #else 2110 - return 0; 2111 - #endif 2112 - } 2113 - 2114 - /* 2115 - * Check if a rwlock is contended. 2116 - * Returns non-zero if there is another task waiting on the rwlock. 2117 - * Returns zero if the lock is not contended or the system / underlying 2118 - * rwlock implementation does not support contention detection. 2119 - * Technically does not depend on CONFIG_PREEMPTION, but a general need 2120 - * for low latency. 2121 - */ 2122 - static inline int rwlock_needbreak(rwlock_t *lock) 2123 - { 2124 - #ifdef CONFIG_PREEMPTION 2125 - return rwlock_is_contended(lock); 2126 - #else 2127 - return 0; 2128 - #endif 2129 - } 2130 - 2131 2223 static __always_inline bool need_resched(void) 2132 2224 { 2133 2225 return unlikely(tif_need_resched()); ··· 2130 2284 extern bool sched_task_on_rq(struct task_struct *p); 2131 2285 extern unsigned long get_wchan(struct task_struct *p); 2132 2286 extern struct task_struct *cpu_curr_snapshot(int cpu); 2287 + 2288 + #include <linux/spinlock.h> 2133 2289 2134 2290 /* 2135 2291 * In order to reduce various lock holder preemption latencies provide an ··· 2168 2320 /* Returns effective CPU energy utilization, as seen by the scheduler */ 2169 2321 unsigned long sched_cpu_util(int cpu); 2170 2322 #endif /* CONFIG_SMP */ 2171 - 2172 - #ifdef CONFIG_RSEQ 2173 - 2174 - /* 2175 - * Map the event mask on the user-space ABI enum rseq_cs_flags 2176 - * for direct mask checks. 2177 - */ 2178 - enum rseq_event_mask_bits { 2179 - RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT, 2180 - RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT, 2181 - RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT, 2182 - }; 2183 - 2184 - enum rseq_event_mask { 2185 - RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT), 2186 - RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT), 2187 - RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT), 2188 - }; 2189 - 2190 - static inline void rseq_set_notify_resume(struct task_struct *t) 2191 - { 2192 - if (t->rseq) 2193 - set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); 2194 - } 2195 - 2196 - void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs); 2197 - 2198 - static inline void rseq_handle_notify_resume(struct ksignal *ksig, 2199 - struct pt_regs *regs) 2200 - { 2201 - if (current->rseq) 2202 - __rseq_handle_notify_resume(ksig, regs); 2203 - } 2204 - 2205 - static inline void rseq_signal_deliver(struct ksignal *ksig, 2206 - struct pt_regs *regs) 2207 - { 2208 - preempt_disable(); 2209 - __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask); 2210 - preempt_enable(); 2211 - rseq_handle_notify_resume(ksig, regs); 2212 - } 2213 - 2214 - /* rseq_preempt() requires preemption to be disabled. */ 2215 - static inline void rseq_preempt(struct task_struct *t) 2216 - { 2217 - __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); 2218 - rseq_set_notify_resume(t); 2219 - } 2220 - 2221 - /* rseq_migrate() requires preemption to be disabled. */ 2222 - static inline void rseq_migrate(struct task_struct *t) 2223 - { 2224 - __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); 2225 - rseq_set_notify_resume(t); 2226 - } 2227 - 2228 - /* 2229 - * If parent process has a registered restartable sequences area, the 2230 - * child inherits. Unregister rseq for a clone with CLONE_VM set. 2231 - */ 2232 - static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) 2233 - { 2234 - if (clone_flags & CLONE_VM) { 2235 - t->rseq = NULL; 2236 - t->rseq_len = 0; 2237 - t->rseq_sig = 0; 2238 - t->rseq_event_mask = 0; 2239 - } else { 2240 - t->rseq = current->rseq; 2241 - t->rseq_len = current->rseq_len; 2242 - t->rseq_sig = current->rseq_sig; 2243 - t->rseq_event_mask = current->rseq_event_mask; 2244 - } 2245 - } 2246 - 2247 - static inline void rseq_execve(struct task_struct *t) 2248 - { 2249 - t->rseq = NULL; 2250 - t->rseq_len = 0; 2251 - t->rseq_sig = 0; 2252 - t->rseq_event_mask = 0; 2253 - } 2254 - 2255 - #else 2256 - 2257 - static inline void rseq_set_notify_resume(struct task_struct *t) 2258 - { 2259 - } 2260 - static inline void rseq_handle_notify_resume(struct ksignal *ksig, 2261 - struct pt_regs *regs) 2262 - { 2263 - } 2264 - static inline void rseq_signal_deliver(struct ksignal *ksig, 2265 - struct pt_regs *regs) 2266 - { 2267 - } 2268 - static inline void rseq_preempt(struct task_struct *t) 2269 - { 2270 - } 2271 - static inline void rseq_migrate(struct task_struct *t) 2272 - { 2273 - } 2274 - static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) 2275 - { 2276 - } 2277 - static inline void rseq_execve(struct task_struct *t) 2278 - { 2279 - } 2280 - 2281 - #endif 2282 - 2283 - #ifdef CONFIG_DEBUG_RSEQ 2284 - 2285 - void rseq_syscall(struct pt_regs *regs); 2286 - 2287 - #else 2288 - 2289 - static inline void rseq_syscall(struct pt_regs *regs) 2290 - { 2291 - } 2292 - 2293 - #endif 2294 2323 2295 2324 #ifdef CONFIG_SCHED_CORE 2296 2325 extern void sched_core_free(struct task_struct *tsk);
+1
include/linux/sched/signal.h
··· 9 9 #include <linux/sched/task.h> 10 10 #include <linux/cred.h> 11 11 #include <linux/refcount.h> 12 + #include <linux/pid.h> 12 13 #include <linux/posix-timers.h> 13 14 #include <linux/mm_types.h> 14 15 #include <asm/ptrace.h>
+2
include/linux/sched/task.h
··· 7 7 * functionality: 8 8 */ 9 9 10 + #include <linux/rcupdate.h> 11 + #include <linux/refcount.h> 10 12 #include <linux/sched.h> 11 13 #include <linux/uaccess.h> 12 14
+1
include/linux/sched/task_stack.h
··· 8 8 9 9 #include <linux/sched.h> 10 10 #include <linux/magic.h> 11 + #include <linux/refcount.h> 11 12 12 13 #ifdef CONFIG_THREAD_INFO_IN_TASK 13 14
+3 -21
include/linux/seccomp.h
··· 3 3 #define _LINUX_SECCOMP_H 4 4 5 5 #include <uapi/linux/seccomp.h> 6 + #include <linux/seccomp_types.h> 6 7 7 8 #define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ 8 9 SECCOMP_FILTER_FLAG_LOG | \ ··· 21 20 #include <linux/thread_info.h> 22 21 #include <linux/atomic.h> 23 22 #include <asm/seccomp.h> 24 - 25 - struct seccomp_filter; 26 - /** 27 - * struct seccomp - the state of a seccomp'ed process 28 - * 29 - * @mode: indicates one of the valid values above for controlled 30 - * system calls available to a process. 31 - * @filter_count: number of seccomp filters 32 - * @filter: must always point to a valid seccomp-filter or NULL as it is 33 - * accessed without locking during system call entry. 34 - * 35 - * @filter must only be accessed from the context of current as there 36 - * is no read locking. 37 - */ 38 - struct seccomp { 39 - int mode; 40 - atomic_t filter_count; 41 - struct seccomp_filter *filter; 42 - }; 43 23 44 24 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER 45 25 extern int __secure_computing(const struct seccomp_data *sd); ··· 46 64 47 65 #include <linux/errno.h> 48 66 49 - struct seccomp { }; 50 - struct seccomp_filter { }; 51 67 struct seccomp_data; 52 68 53 69 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER ··· 106 126 107 127 #ifdef CONFIG_SECCOMP_CACHE_DEBUG 108 128 struct seq_file; 129 + struct pid_namespace; 130 + struct pid; 109 131 110 132 int proc_pid_seccomp_cache(struct seq_file *m, struct pid_namespace *ns, 111 133 struct pid *pid, struct task_struct *task);
+35
include/linux/seccomp_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_SECCOMP_TYPES_H 3 + #define _LINUX_SECCOMP_TYPES_H 4 + 5 + #include <linux/types.h> 6 + 7 + #ifdef CONFIG_SECCOMP 8 + 9 + struct seccomp_filter; 10 + /** 11 + * struct seccomp - the state of a seccomp'ed process 12 + * 13 + * @mode: indicates one of the valid values above for controlled 14 + * system calls available to a process. 15 + * @filter_count: number of seccomp filters 16 + * @filter: must always point to a valid seccomp-filter or NULL as it is 17 + * accessed without locking during system call entry. 18 + * 19 + * @filter must only be accessed from the context of current as there 20 + * is no read locking. 21 + */ 22 + struct seccomp { 23 + int mode; 24 + atomic_t filter_count; 25 + struct seccomp_filter *filter; 26 + }; 27 + 28 + #else 29 + 30 + struct seccomp { }; 31 + struct seccomp_filter { }; 32 + 33 + #endif 34 + 35 + #endif /* _LINUX_SECCOMP_TYPES_H */
+1 -9
include/linux/sem.h
··· 3 3 #define _LINUX_SEM_H 4 4 5 5 #include <uapi/linux/sem.h> 6 + #include <linux/sem_types.h> 6 7 7 8 struct task_struct; 8 - struct sem_undo_list; 9 9 10 10 #ifdef CONFIG_SYSVIPC 11 - 12 - struct sysv_sem { 13 - struct sem_undo_list *undo_list; 14 - }; 15 11 16 12 extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk); 17 13 extern void exit_sem(struct task_struct *tsk); 18 14 19 15 #else 20 - 21 - struct sysv_sem { 22 - /* empty */ 23 - }; 24 16 25 17 static inline int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) 26 18 {
+13
include/linux/sem_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_SEM_TYPES_H 3 + #define _LINUX_SEM_TYPES_H 4 + 5 + struct sem_undo_list; 6 + 7 + struct sysv_sem { 8 + #ifdef CONFIG_SYSVIPC 9 + struct sem_undo_list *undo_list; 10 + #endif 11 + }; 12 + 13 + #endif /* _LINUX_SEM_TYPES_H */
+2 -77
include/linux/seqlock.h
··· 18 18 #include <linux/lockdep.h> 19 19 #include <linux/mutex.h> 20 20 #include <linux/preempt.h> 21 + #include <linux/seqlock_types.h> 21 22 #include <linux/spinlock.h> 22 23 23 24 #include <asm/processor.h> ··· 37 36 * is not affected. 38 37 */ 39 38 #define KCSAN_SEQLOCK_REGION_MAX 1000 40 - 41 - /* 42 - * Sequence counters (seqcount_t) 43 - * 44 - * This is the raw counting mechanism, without any writer protection. 45 - * 46 - * Write side critical sections must be serialized and non-preemptible. 47 - * 48 - * If readers can be invoked from hardirq or softirq contexts, 49 - * interrupts or bottom halves must also be respectively disabled before 50 - * entering the write section. 51 - * 52 - * This mechanism can't be used if the protected data contains pointers, 53 - * as the writer can invalidate a pointer that a reader is following. 54 - * 55 - * If the write serialization mechanism is one of the common kernel 56 - * locking primitives, use a sequence counter with associated lock 57 - * (seqcount_LOCKNAME_t) instead. 58 - * 59 - * If it's desired to automatically handle the sequence counter writer 60 - * serialization and non-preemptibility requirements, use a sequential 61 - * lock (seqlock_t) instead. 62 - * 63 - * See Documentation/locking/seqlock.rst 64 - */ 65 - typedef struct seqcount { 66 - unsigned sequence; 67 - #ifdef CONFIG_DEBUG_LOCK_ALLOC 68 - struct lockdep_map dep_map; 69 - #endif 70 - } seqcount_t; 71 39 72 40 static inline void __seqcount_init(seqcount_t *s, const char *name, 73 41 struct lock_class_key *key) ··· 102 132 */ 103 133 104 134 /* 105 - * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot 106 - * disable preemption. It can lead to higher latencies, and the write side 107 - * sections will not be able to acquire locks which become sleeping locks 108 - * (e.g. spinlock_t). 109 - * 110 - * To remain preemptible while avoiding a possible livelock caused by the 111 - * reader preempting the writer, use a different technique: let the reader 112 - * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the 113 - * case, acquire then release the associated LOCKNAME writer serialization 114 - * lock. This will allow any possibly-preempted writer to make progress 115 - * until the end of its writer serialization lock critical section. 116 - * 117 - * This lock-unlock technique must be implemented for all of PREEMPT_RT 118 - * sleeping locks. See Documentation/locking/locktypes.rst 119 - */ 120 - #if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT) 121 - #define __SEQ_LOCK(expr) expr 122 - #else 123 - #define __SEQ_LOCK(expr) 124 - #endif 125 - 126 - /* 127 135 * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated 128 136 * @seqcount: The real sequence counter 129 137 * @lock: Pointer to the associated lock ··· 142 194 * @lockbase: prefix for associated lock/unlock 143 195 */ 144 196 #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \ 145 - typedef struct seqcount_##lockname { \ 146 - seqcount_t seqcount; \ 147 - __SEQ_LOCK(locktype *lock); \ 148 - } seqcount_##lockname##_t; \ 149 - \ 150 197 static __always_inline seqcount_t * \ 151 198 __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \ 152 199 { \ ··· 227 284 SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin) 228 285 SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read) 229 286 SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) 287 + #undef SEQCOUNT_LOCKNAME 230 288 231 289 /* 232 290 * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t ··· 737 793 s->seqcount.sequence++; 738 794 smp_wmb(); /* increment "sequence" before following stores */ 739 795 } 740 - 741 - /* 742 - * Sequential locks (seqlock_t) 743 - * 744 - * Sequence counters with an embedded spinlock for writer serialization 745 - * and non-preemptibility. 746 - * 747 - * For more info, see: 748 - * - Comments on top of seqcount_t 749 - * - Documentation/locking/seqlock.rst 750 - */ 751 - typedef struct { 752 - /* 753 - * Make sure that readers don't starve writers on PREEMPT_RT: use 754 - * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK(). 755 - */ 756 - seqcount_spinlock_t seqcount; 757 - spinlock_t lock; 758 - } seqlock_t; 759 796 760 797 #define __SEQLOCK_UNLOCKED(lockname) \ 761 798 { \
+93
include/linux/seqlock_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __LINUX_SEQLOCK_TYPES_H 3 + #define __LINUX_SEQLOCK_TYPES_H 4 + 5 + #include <linux/lockdep_types.h> 6 + #include <linux/mutex_types.h> 7 + #include <linux/spinlock_types.h> 8 + 9 + /* 10 + * Sequence counters (seqcount_t) 11 + * 12 + * This is the raw counting mechanism, without any writer protection. 13 + * 14 + * Write side critical sections must be serialized and non-preemptible. 15 + * 16 + * If readers can be invoked from hardirq or softirq contexts, 17 + * interrupts or bottom halves must also be respectively disabled before 18 + * entering the write section. 19 + * 20 + * This mechanism can't be used if the protected data contains pointers, 21 + * as the writer can invalidate a pointer that a reader is following. 22 + * 23 + * If the write serialization mechanism is one of the common kernel 24 + * locking primitives, use a sequence counter with associated lock 25 + * (seqcount_LOCKNAME_t) instead. 26 + * 27 + * If it's desired to automatically handle the sequence counter writer 28 + * serialization and non-preemptibility requirements, use a sequential 29 + * lock (seqlock_t) instead. 30 + * 31 + * See Documentation/locking/seqlock.rst 32 + */ 33 + typedef struct seqcount { 34 + unsigned sequence; 35 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 36 + struct lockdep_map dep_map; 37 + #endif 38 + } seqcount_t; 39 + 40 + /* 41 + * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot 42 + * disable preemption. It can lead to higher latencies, and the write side 43 + * sections will not be able to acquire locks which become sleeping locks 44 + * (e.g. spinlock_t). 45 + * 46 + * To remain preemptible while avoiding a possible livelock caused by the 47 + * reader preempting the writer, use a different technique: let the reader 48 + * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the 49 + * case, acquire then release the associated LOCKNAME writer serialization 50 + * lock. This will allow any possibly-preempted writer to make progress 51 + * until the end of its writer serialization lock critical section. 52 + * 53 + * This lock-unlock technique must be implemented for all of PREEMPT_RT 54 + * sleeping locks. See Documentation/locking/locktypes.rst 55 + */ 56 + #if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT) 57 + #define __SEQ_LOCK(expr) expr 58 + #else 59 + #define __SEQ_LOCK(expr) 60 + #endif 61 + 62 + #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \ 63 + typedef struct seqcount_##lockname { \ 64 + seqcount_t seqcount; \ 65 + __SEQ_LOCK(locktype *lock); \ 66 + } seqcount_##lockname##_t; 67 + 68 + SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin) 69 + SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin) 70 + SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read) 71 + SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) 72 + #undef SEQCOUNT_LOCKNAME 73 + 74 + /* 75 + * Sequential locks (seqlock_t) 76 + * 77 + * Sequence counters with an embedded spinlock for writer serialization 78 + * and non-preemptibility. 79 + * 80 + * For more info, see: 81 + * - Comments on top of seqcount_t 82 + * - Documentation/locking/seqlock.rst 83 + */ 84 + typedef struct { 85 + /* 86 + * Make sure that readers don't starve writers on PREEMPT_RT: use 87 + * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK(). 88 + */ 89 + seqcount_spinlock_t seqcount; 90 + spinlock_t lock; 91 + } seqlock_t; 92 + 93 + #endif /* __LINUX_SEQLOCK_TYPES_H */
+2 -2
include/linux/shm.h
··· 2 2 #ifndef _LINUX_SHM_H_ 3 3 #define _LINUX_SHM_H_ 4 4 5 - #include <linux/list.h> 5 + #include <linux/types.h> 6 6 #include <asm/page.h> 7 - #include <uapi/linux/shm.h> 8 7 #include <asm/shmparam.h> 9 8 10 9 struct file; 10 + struct task_struct; 11 11 12 12 #ifdef CONFIG_SYSVIPC 13 13 struct sysv_shm {
+1
include/linux/signal.h
··· 3 3 #define _LINUX_SIGNAL_H 4 4 5 5 #include <linux/bug.h> 6 + #include <linux/list.h> 6 7 #include <linux/signal_types.h> 7 8 #include <linux/string.h> 8 9
+1 -1
include/linux/signal_types.h
··· 6 6 * Basic signal handling related data type definitions: 7 7 */ 8 8 9 - #include <linux/list.h> 9 + #include <linux/types.h> 10 10 #include <uapi/linux/signal.h> 11 11 12 12 typedef struct kernel_siginfo {
+31
include/linux/spinlock.h
··· 449 449 return raw_spin_is_contended(&lock->rlock); 450 450 } 451 451 452 + /* 453 + * Does a critical section need to be broken due to another 454 + * task waiting?: (technically does not depend on CONFIG_PREEMPTION, 455 + * but a general need for low latency) 456 + */ 457 + static inline int spin_needbreak(spinlock_t *lock) 458 + { 459 + #ifdef CONFIG_PREEMPTION 460 + return spin_is_contended(lock); 461 + #else 462 + return 0; 463 + #endif 464 + } 465 + 466 + /* 467 + * Check if a rwlock is contended. 468 + * Returns non-zero if there is another task waiting on the rwlock. 469 + * Returns zero if the lock is not contended or the system / underlying 470 + * rwlock implementation does not support contention detection. 471 + * Technically does not depend on CONFIG_PREEMPTION, but a general need 472 + * for low latency. 473 + */ 474 + static inline int rwlock_needbreak(rwlock_t *lock) 475 + { 476 + #ifdef CONFIG_PREEMPTION 477 + return rwlock_is_contended(lock); 478 + #else 479 + return 0; 480 + #endif 481 + } 482 + 452 483 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) 453 484 454 485 #else /* !CONFIG_PREEMPT_RT */
+1 -8
include/linux/syscall_user_dispatch.h
··· 6 6 #define _SYSCALL_USER_DISPATCH_H 7 7 8 8 #include <linux/thread_info.h> 9 + #include <linux/syscall_user_dispatch_types.h> 9 10 10 11 #ifdef CONFIG_GENERIC_ENTRY 11 - 12 - struct syscall_user_dispatch { 13 - char __user *selector; 14 - unsigned long offset; 15 - unsigned long len; 16 - bool on_dispatch; 17 - }; 18 12 19 13 int set_syscall_user_dispatch(unsigned long mode, unsigned long offset, 20 14 unsigned long len, char __user *selector); ··· 23 29 void __user *data); 24 30 25 31 #else 26 - struct syscall_user_dispatch {}; 27 32 28 33 static inline int set_syscall_user_dispatch(unsigned long mode, unsigned long offset, 29 34 unsigned long len, char __user *selector)
+22
include/linux/syscall_user_dispatch_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _SYSCALL_USER_DISPATCH_TYPES_H 3 + #define _SYSCALL_USER_DISPATCH_TYPES_H 4 + 5 + #include <linux/types.h> 6 + 7 + #ifdef CONFIG_GENERIC_ENTRY 8 + 9 + struct syscall_user_dispatch { 10 + char __user *selector; 11 + unsigned long offset; 12 + unsigned long len; 13 + bool on_dispatch; 14 + }; 15 + 16 + #else 17 + 18 + struct syscall_user_dispatch {}; 19 + 20 + #endif 21 + 22 + #endif /* _SYSCALL_USER_DISPATCH_TYPES_H */
+3
include/linux/time_namespace.h
··· 7 7 #include <linux/nsproxy.h> 8 8 #include <linux/ns_common.h> 9 9 #include <linux/err.h> 10 + #include <linux/time64.h> 10 11 11 12 struct user_namespace; 12 13 extern struct user_namespace init_user_ns; 14 + 15 + struct vm_area_struct; 13 16 14 17 struct timens_offsets { 15 18 struct timespec64 monotonic;
+1
include/linux/timekeeping.h
··· 4 4 5 5 #include <linux/errno.h> 6 6 #include <linux/clocksource_ids.h> 7 + #include <linux/ktime.h> 7 8 8 9 /* Included from linux/ktime.h */ 9 10
+1 -15
include/linux/timer.h
··· 7 7 #include <linux/stddef.h> 8 8 #include <linux/debugobjects.h> 9 9 #include <linux/stringify.h> 10 - 11 - struct timer_list { 12 - /* 13 - * All fields that change during normal runtime grouped to the 14 - * same cacheline 15 - */ 16 - struct hlist_node entry; 17 - unsigned long expires; 18 - void (*function)(struct timer_list *); 19 - u32 flags; 20 - 21 - #ifdef CONFIG_LOCKDEP 22 - struct lockdep_map lockdep_map; 23 - #endif 24 - }; 10 + #include <linux/timer_types.h> 25 11 26 12 #ifdef CONFIG_LOCKDEP 27 13 /*
+23
include/linux/timer_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_TIMER_TYPES_H 3 + #define _LINUX_TIMER_TYPES_H 4 + 5 + #include <linux/lockdep_types.h> 6 + #include <linux/types.h> 7 + 8 + struct timer_list { 9 + /* 10 + * All fields that change during normal runtime grouped to the 11 + * same cacheline 12 + */ 13 + struct hlist_node entry; 14 + unsigned long expires; 15 + void (*function)(struct timer_list *); 16 + u32 flags; 17 + 18 + #ifdef CONFIG_LOCKDEP 19 + struct lockdep_map lockdep_map; 20 + #endif 21 + }; 22 + 23 + #endif /* _LINUX_TIMER_TYPES_H */
+1 -12
include/linux/timerqueue.h
··· 3 3 #define _LINUX_TIMERQUEUE_H 4 4 5 5 #include <linux/rbtree.h> 6 - #include <linux/ktime.h> 7 - 8 - 9 - struct timerqueue_node { 10 - struct rb_node node; 11 - ktime_t expires; 12 - }; 13 - 14 - struct timerqueue_head { 15 - struct rb_root_cached rb_root; 16 - }; 17 - 6 + #include <linux/timerqueue_types.h> 18 7 19 8 extern bool timerqueue_add(struct timerqueue_head *head, 20 9 struct timerqueue_node *node);
+17
include/linux/timerqueue_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_TIMERQUEUE_TYPES_H 3 + #define _LINUX_TIMERQUEUE_TYPES_H 4 + 5 + #include <linux/rbtree_types.h> 6 + #include <linux/types.h> 7 + 8 + struct timerqueue_node { 9 + struct rb_node node; 10 + ktime_t expires; 11 + }; 12 + 13 + struct timerqueue_head { 14 + struct rb_root_cached rb_root; 15 + }; 16 + 17 + #endif /* _LINUX_TIMERQUEUE_TYPES_H */
+1
include/linux/torture.h
··· 21 21 #include <linux/debugobjects.h> 22 22 #include <linux/bug.h> 23 23 #include <linux/compiler.h> 24 + #include <linux/hrtimer.h> 24 25 25 26 /* Definitions for a non-string torture-test module parameter. */ 26 27 #define torture_param(type, name, init, msg) \
+3
include/linux/types.h
··· 120 120 #define aligned_be64 __aligned_be64 121 121 #define aligned_le64 __aligned_le64 122 122 123 + /* Nanosecond scalar representation for kernel time values */ 124 + typedef s64 ktime_t; 125 + 123 126 /** 124 127 * The type used for indexing onto a disc or disc partition. 125 128 *
+1 -10
include/linux/uidgid.h
··· 12 12 * to detect when we overlook these differences. 13 13 * 14 14 */ 15 - #include <linux/types.h> 15 + #include <linux/uidgid_types.h> 16 16 #include <linux/highuid.h> 17 17 18 18 struct user_namespace; 19 19 extern struct user_namespace init_user_ns; 20 20 struct uid_gid_map; 21 - 22 - typedef struct { 23 - uid_t val; 24 - } kuid_t; 25 - 26 - 27 - typedef struct { 28 - gid_t val; 29 - } kgid_t; 30 21 31 22 #define KUIDT_INIT(value) (kuid_t){ value } 32 23 #define KGIDT_INIT(value) (kgid_t){ value }
+15
include/linux/uidgid_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_UIDGID_TYPES_H 3 + #define _LINUX_UIDGID_TYPES_H 4 + 5 + #include <linux/types.h> 6 + 7 + typedef struct { 8 + uid_t val; 9 + } kuid_t; 10 + 11 + typedef struct { 12 + gid_t val; 13 + } kgid_t; 14 + 15 + #endif /* _LINUX_UIDGID_TYPES_H */
-1
include/linux/wait.h
··· 9 9 #include <linux/spinlock.h> 10 10 11 11 #include <asm/current.h> 12 - #include <uapi/linux/wait.h> 13 12 14 13 typedef struct wait_queue_entry wait_queue_entry_t; 15 14
+1 -15
include/linux/workqueue.h
··· 14 14 #include <linux/atomic.h> 15 15 #include <linux/cpumask.h> 16 16 #include <linux/rcupdate.h> 17 - 18 - struct workqueue_struct; 19 - 20 - struct work_struct; 21 - typedef void (*work_func_t)(struct work_struct *work); 22 - void delayed_work_timer_fn(struct timer_list *t); 17 + #include <linux/workqueue_types.h> 23 18 24 19 /* 25 20 * The first word is the work queue pointer and the flags rolled into ··· 89 94 90 95 #define WORK_STRUCT_FLAG_MASK ((1ul << WORK_STRUCT_FLAG_BITS) - 1) 91 96 #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) 92 - 93 - struct work_struct { 94 - atomic_long_t data; 95 - struct list_head entry; 96 - work_func_t func; 97 - #ifdef CONFIG_LOCKDEP 98 - struct lockdep_map lockdep_map; 99 - #endif 100 - }; 101 97 102 98 #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) 103 99 #define WORK_DATA_STATIC_INIT() \
+25
include/linux/workqueue_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_WORKQUEUE_TYPES_H 3 + #define _LINUX_WORKQUEUE_TYPES_H 4 + 5 + #include <linux/atomic.h> 6 + #include <linux/lockdep_types.h> 7 + #include <linux/timer_types.h> 8 + #include <linux/types.h> 9 + 10 + struct workqueue_struct; 11 + 12 + struct work_struct; 13 + typedef void (*work_func_t)(struct work_struct *work); 14 + void delayed_work_timer_fn(struct timer_list *t); 15 + 16 + struct work_struct { 17 + atomic_long_t data; 18 + struct list_head entry; 19 + work_func_t func; 20 + #ifdef CONFIG_LOCKDEP 21 + struct lockdep_map lockdep_map; 22 + #endif 23 + }; 24 + 25 + #endif /* _LINUX_WORKQUEUE_TYPES_H */
+1 -1
include/uapi/linux/resource.h
··· 2 2 #ifndef _UAPI_LINUX_RESOURCE_H 3 3 #define _UAPI_LINUX_RESOURCE_H 4 4 5 - #include <linux/time.h> 5 + #include <linux/time_types.h> 6 6 #include <linux/types.h> 7 7 8 8 /*
+1
init/init_task.c
··· 12 12 #include <linux/audit.h> 13 13 #include <linux/numa.h> 14 14 #include <linux/scs.h> 15 + #include <linux/plist.h> 15 16 16 17 #include <linux/uaccess.h> 17 18
+1
ipc/shm.c
··· 29 29 #include <linux/mm.h> 30 30 #include <linux/hugetlb.h> 31 31 #include <linux/shm.h> 32 + #include <uapi/linux/shm.h> 32 33 #include <linux/init.h> 33 34 #include <linux/file.h> 34 35 #include <linux/mman.h>
+1
ipc/util.h
··· 14 14 #include <linux/unistd.h> 15 15 #include <linux/err.h> 16 16 #include <linux/ipc_namespace.h> 17 + #include <linux/pid.h> 17 18 18 19 /* 19 20 * The IPC ID contains 2 separate numbers - index and sequence number.
+1
kernel/Makefile
··· 114 114 obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o 115 115 obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call_inline.o 116 116 obj-$(CONFIG_CFI_CLANG) += cfi.o 117 + obj-$(CONFIG_NUMA) += numa.o 117 118 118 119 obj-$(CONFIG_PERF_EVENTS) += events/ 119 120
+3 -2
kernel/async.c
··· 46 46 47 47 #include <linux/async.h> 48 48 #include <linux/atomic.h> 49 - #include <linux/ktime.h> 50 49 #include <linux/export.h> 51 - #include <linux/wait.h> 50 + #include <linux/ktime.h> 51 + #include <linux/pid.h> 52 52 #include <linux/sched.h> 53 53 #include <linux/slab.h> 54 + #include <linux/wait.h> 54 55 #include <linux/workqueue.h> 55 56 56 57 #include "workqueue_internal.h"
+1
kernel/bpf/hashtab.c
··· 7 7 #include <linux/jhash.h> 8 8 #include <linux/filter.h> 9 9 #include <linux/rculist_nulls.h> 10 + #include <linux/rcupdate_wait.h> 10 11 #include <linux/random.h> 11 12 #include <uapi/linux/btf.h> 12 13 #include <linux/rcupdate_trace.h>
+3 -1
kernel/exit.c
··· 69 69 #include <linux/rethook.h> 70 70 #include <linux/sysfs.h> 71 71 #include <linux/user_events.h> 72 - 73 72 #include <linux/uaccess.h> 73 + 74 + #include <uapi/linux/wait.h> 75 + 74 76 #include <asm/unistd.h> 75 77 #include <asm/mmu_context.h> 76 78
+2
kernel/fork.c
··· 53 53 #include <linux/seccomp.h> 54 54 #include <linux/swap.h> 55 55 #include <linux/syscalls.h> 56 + #include <linux/syscall_user_dispatch.h> 56 57 #include <linux/jiffies.h> 57 58 #include <linux/futex.h> 58 59 #include <linux/compat.h> ··· 100 99 #include <linux/stackprotector.h> 101 100 #include <linux/user_events.h> 102 101 #include <linux/iommu.h> 102 + #include <linux/rseq.h> 103 103 104 104 #include <asm/pgalloc.h> 105 105 #include <linux/uaccess.h>
+1
kernel/futex/core.c
··· 34 34 #include <linux/compat.h> 35 35 #include <linux/jhash.h> 36 36 #include <linux/pagemap.h> 37 + #include <linux/plist.h> 37 38 #include <linux/memblock.h> 38 39 #include <linux/fault-inject.h> 39 40 #include <linux/slab.h>
+1
kernel/futex/requeue.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 2 3 + #include <linux/plist.h> 3 4 #include <linux/sched/signal.h> 4 5 5 6 #include "futex.h"
+1
kernel/futex/waitwake.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 2 3 + #include <linux/plist.h> 3 4 #include <linux/sched/task.h> 4 5 #include <linux/sched/signal.h> 5 6 #include <linux/freezer.h>
+1
kernel/locking/spinlock_debug.c
··· 12 12 #include <linux/debug_locks.h> 13 13 #include <linux/delay.h> 14 14 #include <linux/export.h> 15 + #include <linux/pid.h> 15 16 16 17 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 17 18 struct lock_class_key *key, short inner)
+26
kernel/numa.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + 3 + #include <linux/printk.h> 4 + #include <linux/numa.h> 5 + 6 + /* Stub functions: */ 7 + 8 + #ifndef memory_add_physaddr_to_nid 9 + int memory_add_physaddr_to_nid(u64 start) 10 + { 11 + pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n", 12 + start); 13 + return 0; 14 + } 15 + EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); 16 + #endif 17 + 18 + #ifndef phys_to_target_node 19 + int phys_to_target_node(u64 start) 20 + { 21 + pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n", 22 + start); 23 + return 0; 24 + } 25 + EXPORT_SYMBOL_GPL(phys_to_target_node); 26 + #endif
+1
kernel/pid_namespace.c
··· 23 23 #include <linux/sched/task.h> 24 24 #include <linux/sched/signal.h> 25 25 #include <linux/idr.h> 26 + #include <uapi/linux/wait.h> 26 27 #include "pid_sysctl.h" 27 28 28 29 static DEFINE_MUTEX(pid_caches_mutex);
+1
kernel/sched/core.c
··· 57 57 #include <linux/profile.h> 58 58 #include <linux/psi.h> 59 59 #include <linux/rcuwait_api.h> 60 + #include <linux/rseq.h> 60 61 #include <linux/sched/wake_q.h> 61 62 #include <linux/scs.h> 62 63 #include <linux/slab.h>
+1
lib/test_rhashtable.c
··· 16 16 #include <linux/kthread.h> 17 17 #include <linux/module.h> 18 18 #include <linux/rcupdate.h> 19 + #include <linux/rcupdate_wait.h> 19 20 #include <linux/rhashtable.h> 20 21 #include <linux/slab.h> 21 22 #include <linux/sched.h>
+1
mm/filemap.c
··· 45 45 #include <linux/migrate.h> 46 46 #include <linux/pipe_fs_i.h> 47 47 #include <linux/splice.h> 48 + #include <linux/rcupdate_wait.h> 48 49 #include <asm/pgalloc.h> 49 50 #include <asm/tlbflush.h> 50 51 #include "internal.h"
+1
mm/khugepaged.c
··· 17 17 #include <linux/userfaultfd_k.h> 18 18 #include <linux/page_idle.h> 19 19 #include <linux/page_table_check.h> 20 + #include <linux/rcupdate_wait.h> 20 21 #include <linux/swapops.h> 21 22 #include <linux/shmem_fs.h> 22 23 #include <linux/ksm.h>
+1
mm/shmem.c
··· 79 79 #include <linux/rmap.h> 80 80 #include <linux/uuid.h> 81 81 #include <linux/quotaops.h> 82 + #include <linux/rcupdate_wait.h> 82 83 83 84 #include <linux/uaccess.h> 84 85
+1
mm/swapfile.c
··· 42 42 #include <linux/completion.h> 43 43 #include <linux/suspend.h> 44 44 #include <linux/zswap.h> 45 + #include <linux/plist.h> 45 46 46 47 #include <asm/tlbflush.h> 47 48 #include <linux/swapops.h>
+1
net/ipv4/fib_trie.c
··· 52 52 #include <linux/if_arp.h> 53 53 #include <linux/proc_fs.h> 54 54 #include <linux/rcupdate.h> 55 + #include <linux/rcupdate_wait.h> 55 56 #include <linux/skbuff.h> 56 57 #include <linux/netlink.h> 57 58 #include <linux/init.h>
+2
net/netfilter/ipset/ip_set_bitmap_gen.h
··· 4 4 #ifndef __IP_SET_BITMAP_IP_GEN_H 5 5 #define __IP_SET_BITMAP_IP_GEN_H 6 6 7 + #include <linux/rcupdate_wait.h> 8 + 7 9 #define mtype_do_test IPSET_TOKEN(MTYPE, _do_test) 8 10 #define mtype_gc_test IPSET_TOKEN(MTYPE, _gc_test) 9 11 #define mtype_is_filled IPSET_TOKEN(MTYPE, _is_filled)
+1
net/netfilter/ipset/ip_set_hash_gen.h
··· 5 5 #define _IP_SET_HASH_GEN_H 6 6 7 7 #include <linux/rcupdate.h> 8 + #include <linux/rcupdate_wait.h> 8 9 #include <linux/jhash.h> 9 10 #include <linux/types.h> 10 11 #include <linux/netfilter/nfnetlink.h>
+1
net/netfilter/ipvs/ip_vs_conn.c
··· 31 31 #include <linux/seq_file.h> 32 32 #include <linux/jhash.h> 33 33 #include <linux/random.h> 34 + #include <linux/rcupdate_wait.h> 34 35 35 36 #include <net/net_namespace.h> 36 37 #include <net/ip_vs.h>
+1
net/netfilter/ipvs/ip_vs_est.c
··· 21 21 #include <linux/interrupt.h> 22 22 #include <linux/sysctl.h> 23 23 #include <linux/list.h> 24 + #include <linux/rcupdate_wait.h> 24 25 25 26 #include <net/ip_vs.h> 26 27
+1
security/selinux/hooks.c
··· 85 85 #include <linux/export.h> 86 86 #include <linux/msg.h> 87 87 #include <linux/shm.h> 88 + #include <uapi/linux/shm.h> 88 89 #include <linux/bpf.h> 89 90 #include <linux/kernfs.h> 90 91 #include <linux/stringhash.h> /* for hashlen_string() */
+1
security/smack/smack_lsm.c
··· 37 37 #include <linux/personality.h> 38 38 #include <linux/msg.h> 39 39 #include <linux/shm.h> 40 + #include <uapi/linux/shm.h> 40 41 #include <linux/binfmts.h> 41 42 #include <linux/parser.h> 42 43 #include <linux/fs_context.h>