Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.12-rc4 119 lines 3.2 kB view raw
1#ifndef __ASM_PREEMPT_H 2#define __ASM_PREEMPT_H 3 4#include <asm/rmwcc.h> 5#include <asm/percpu.h> 6#include <linux/thread_info.h> 7 8DECLARE_PER_CPU(int, __preempt_count); 9 10/* 11 * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such 12 * that a decrement hitting 0 means we can and should reschedule. 13 */ 14#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) 15 16/* 17 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users 18 * that think a non-zero value indicates we cannot preempt. 19 */ 20static __always_inline int preempt_count(void) 21{ 22 return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED; 23} 24 25static __always_inline void preempt_count_set(int pc) 26{ 27 int old, new; 28 29 do { 30 old = raw_cpu_read_4(__preempt_count); 31 new = (old & PREEMPT_NEED_RESCHED) | 32 (pc & ~PREEMPT_NEED_RESCHED); 33 } while (raw_cpu_cmpxchg_4(__preempt_count, old, new) != old); 34} 35 36/* 37 * must be macros to avoid header recursion hell 38 */ 39#define init_task_preempt_count(p) do { } while (0) 40 41#define init_idle_preempt_count(p, cpu) do { \ 42 per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ 43} while (0) 44 45/* 46 * We fold the NEED_RESCHED bit into the preempt count such that 47 * preempt_enable() can decrement and test for needing to reschedule with a 48 * single instruction. 49 * 50 * We invert the actual bit, so that when the decrement hits 0 we know we both 51 * need to resched (the bit is cleared) and can resched (no preempt count). 52 */ 53 54static __always_inline void set_preempt_need_resched(void) 55{ 56 raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED); 57} 58 59static __always_inline void clear_preempt_need_resched(void) 60{ 61 raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED); 62} 63 64static __always_inline bool test_preempt_need_resched(void) 65{ 66 return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED); 67} 68 69/* 70 * The various preempt_count add/sub methods 71 */ 72 73static __always_inline void __preempt_count_add(int val) 74{ 75 raw_cpu_add_4(__preempt_count, val); 76} 77 78static __always_inline void __preempt_count_sub(int val) 79{ 80 raw_cpu_add_4(__preempt_count, -val); 81} 82 83/* 84 * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule 85 * a decrement which hits zero means we have no preempt_count and should 86 * reschedule. 87 */ 88static __always_inline bool __preempt_count_dec_and_test(void) 89{ 90 GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); 91} 92 93/* 94 * Returns true when we need to resched and can (barring IRQ state). 95 */ 96static __always_inline bool should_resched(int preempt_offset) 97{ 98 return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); 99} 100 101#ifdef CONFIG_PREEMPT 102 extern asmlinkage void ___preempt_schedule(void); 103# define __preempt_schedule() \ 104({ \ 105 register void *__sp asm(_ASM_SP); \ 106 asm volatile ("call ___preempt_schedule" : "+r"(__sp)); \ 107}) 108 109 extern asmlinkage void preempt_schedule(void); 110 extern asmlinkage void ___preempt_schedule_notrace(void); 111# define __preempt_schedule_notrace() \ 112({ \ 113 register void *__sp asm(_ASM_SP); \ 114 asm volatile ("call ___preempt_schedule_notrace" : "+r"(__sp)); \ 115}) 116 extern asmlinkage void preempt_schedule_notrace(void); 117#endif 118 119#endif /* __ASM_PREEMPT_H */