Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.20 115 lines 3.1 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __ASM_PREEMPT_H 3#define __ASM_PREEMPT_H 4 5#include <asm/rmwcc.h> 6#include <asm/percpu.h> 7#include <linux/thread_info.h> 8 9DECLARE_PER_CPU(int, __preempt_count); 10 11/* 12 * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such 13 * that a decrement hitting 0 means we can and should reschedule. 14 */ 15#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) 16 17/* 18 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users 19 * that think a non-zero value indicates we cannot preempt. 20 */ 21static __always_inline int preempt_count(void) 22{ 23 return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED; 24} 25 26static __always_inline void preempt_count_set(int pc) 27{ 28 int old, new; 29 30 do { 31 old = raw_cpu_read_4(__preempt_count); 32 new = (old & PREEMPT_NEED_RESCHED) | 33 (pc & ~PREEMPT_NEED_RESCHED); 34 } while (raw_cpu_cmpxchg_4(__preempt_count, old, new) != old); 35} 36 37/* 38 * must be macros to avoid header recursion hell 39 */ 40#define init_task_preempt_count(p) do { } while (0) 41 42#define init_idle_preempt_count(p, cpu) do { \ 43 per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ 44} while (0) 45 46/* 47 * We fold the NEED_RESCHED bit into the preempt count such that 48 * preempt_enable() can decrement and test for needing to reschedule with a 49 * single instruction. 50 * 51 * We invert the actual bit, so that when the decrement hits 0 we know we both 52 * need to resched (the bit is cleared) and can resched (no preempt count). 53 */ 54 55static __always_inline void set_preempt_need_resched(void) 56{ 57 raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED); 58} 59 60static __always_inline void clear_preempt_need_resched(void) 61{ 62 raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED); 63} 64 65static __always_inline bool test_preempt_need_resched(void) 66{ 67 return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED); 68} 69 70/* 71 * The various preempt_count add/sub methods 72 */ 73 74static __always_inline void __preempt_count_add(int val) 75{ 76 raw_cpu_add_4(__preempt_count, val); 77} 78 79static __always_inline void __preempt_count_sub(int val) 80{ 81 raw_cpu_add_4(__preempt_count, -val); 82} 83 84/* 85 * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule 86 * a decrement which hits zero means we have no preempt_count and should 87 * reschedule. 88 */ 89static __always_inline bool __preempt_count_dec_and_test(void) 90{ 91 return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var])); 92} 93 94/* 95 * Returns true when we need to resched and can (barring IRQ state). 96 */ 97static __always_inline bool should_resched(int preempt_offset) 98{ 99 return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); 100} 101 102#ifdef CONFIG_PREEMPT 103 extern asmlinkage void ___preempt_schedule(void); 104# define __preempt_schedule() \ 105 asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT) 106 107 extern asmlinkage void preempt_schedule(void); 108 extern asmlinkage void ___preempt_schedule_notrace(void); 109# define __preempt_schedule_notrace() \ 110 asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT) 111 112 extern asmlinkage void preempt_schedule_notrace(void); 113#endif 114 115#endif /* __ASM_PREEMPT_H */