Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/core: Provide common cpu_relax_yield() definition

No need to duplicate the same define everywhere. Since
the only user is stop-machine and the only provider is
s390, we can use a default implementation of cpu_relax_yield()
in sched.h.

Suggested-by: Russell King <rmk+kernel@armlinux.org.uk>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Acked-by: Russell King <rmk+kernel@armlinux.org.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Noam Camus <noamc@ezchip.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: kvm@vger.kernel.org
Cc: linux-arch@vger.kernel.org
Cc: linux-s390 <linux-s390@vger.kernel.org>
Cc: linuxppc-dev@lists.ozlabs.org
Cc: sparclinux@vger.kernel.org
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1479298985-191589-1-git-send-email-borntraeger@de.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Christian Borntraeger and committed by
Ingo Molnar
6d0d2878 43496d35

+5 -38
-1
arch/alpha/include/asm/processor.h
··· 58 58 ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp) 59 59 60 60 #define cpu_relax() barrier() 61 - #define cpu_relax_yield() cpu_relax() 62 61 63 62 #define ARCH_HAS_PREFETCH 64 63 #define ARCH_HAS_PREFETCHW
-3
arch/arc/include/asm/processor.h
··· 60 60 #ifndef CONFIG_EZNPS_MTM_EXT 61 61 62 62 #define cpu_relax() barrier() 63 - #define cpu_relax_yield() cpu_relax() 64 63 65 64 #else 66 65 67 66 #define cpu_relax() \ 68 67 __asm__ __volatile__ (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory") 69 - 70 - #define cpu_relax_yield() cpu_relax() 71 68 72 69 #endif 73 70
-2
arch/arm/include/asm/processor.h
··· 82 82 #define cpu_relax() barrier() 83 83 #endif 84 84 85 - #define cpu_relax_yield() cpu_relax() 86 - 87 85 #define task_pt_regs(p) \ 88 86 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) 89 87
-2
arch/arm64/include/asm/processor.h
··· 149 149 asm volatile("yield" ::: "memory"); 150 150 } 151 151 152 - #define cpu_relax_yield() cpu_relax() 153 - 154 152 /* Thread switching */ 155 153 extern struct task_struct *cpu_switch_to(struct task_struct *prev, 156 154 struct task_struct *next);
-1
arch/avr32/include/asm/processor.h
··· 92 92 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) 93 93 94 94 #define cpu_relax() barrier() 95 - #define cpu_relax_yield() cpu_relax() 96 95 #define cpu_sync_pipeline() asm volatile("sub pc, -2" : : : "memory") 97 96 98 97 struct cpu_context {
-1
arch/blackfin/include/asm/processor.h
··· 92 92 #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) 93 93 94 94 #define cpu_relax() smp_mb() 95 - #define cpu_relax_yield() cpu_relax() 96 95 97 96 /* Get the Silicon Revision of the chip */ 98 97 static inline uint32_t __pure bfin_revid(void)
-1
arch/c6x/include/asm/processor.h
··· 121 121 #define KSTK_ESP(task) (task_pt_regs(task)->sp) 122 122 123 123 #define cpu_relax() do { } while (0) 124 - #define cpu_relax_yield() cpu_relax() 125 124 126 125 extern const struct seq_operations cpuinfo_op; 127 126
-1
arch/cris/include/asm/processor.h
··· 63 63 #define init_stack (init_thread_union.stack) 64 64 65 65 #define cpu_relax() barrier() 66 - #define cpu_relax_yield() cpu_relax() 67 66 68 67 void default_idle(void); 69 68
-1
arch/frv/include/asm/processor.h
··· 107 107 #define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp) 108 108 109 109 #define cpu_relax() barrier() 110 - #define cpu_relax_yield() cpu_relax() 111 110 112 111 /* data cache prefetch */ 113 112 #define ARCH_HAS_PREFETCH
-1
arch/h8300/include/asm/processor.h
··· 127 127 #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) 128 128 129 129 #define cpu_relax() barrier() 130 - #define cpu_relax_yield() cpu_relax() 131 130 132 131 #define HARD_RESET_NOW() ({ \ 133 132 local_irq_disable(); \
-1
arch/hexagon/include/asm/processor.h
··· 56 56 } 57 57 58 58 #define cpu_relax() __vmyield() 59 - #define cpu_relax_yield() cpu_relax() 60 59 61 60 /* 62 61 * Decides where the kernel will search for a free chunk of vm space during
-1
arch/ia64/include/asm/processor.h
··· 547 547 } 548 548 549 549 #define cpu_relax() ia64_hint(ia64_hint_pause) 550 - #define cpu_relax_yield() cpu_relax() 551 550 552 551 static inline int 553 552 ia64_get_irr(unsigned int vector)
-1
arch/m32r/include/asm/processor.h
··· 133 133 #define KSTK_ESP(tsk) ((tsk)->thread.sp) 134 134 135 135 #define cpu_relax() barrier() 136 - #define cpu_relax_yield() cpu_relax() 137 136 138 137 #endif /* _ASM_M32R_PROCESSOR_H */
-1
arch/m68k/include/asm/processor.h
··· 156 156 #define task_pt_regs(tsk) ((struct pt_regs *) ((tsk)->thread.esp0)) 157 157 158 158 #define cpu_relax() barrier() 159 - #define cpu_relax_yield() cpu_relax() 160 159 161 160 #endif
-1
arch/metag/include/asm/processor.h
··· 152 152 #define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0) 153 153 154 154 #define cpu_relax() barrier() 155 - #define cpu_relax_yield() cpu_relax() 156 155 157 156 extern void setup_priv(void); 158 157
-1
arch/microblaze/include/asm/processor.h
··· 22 22 extern const struct seq_operations cpuinfo_op; 23 23 24 24 # define cpu_relax() barrier() 25 - # define cpu_relax_yield() cpu_relax() 26 25 27 26 #define task_pt_regs(tsk) \ 28 27 (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
-1
arch/mips/include/asm/processor.h
··· 389 389 #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status) 390 390 391 391 #define cpu_relax() barrier() 392 - #define cpu_relax_yield() cpu_relax() 393 392 394 393 /* 395 394 * Return_address is a replacement for __builtin_return_address(count)
-1
arch/mn10300/include/asm/processor.h
··· 69 69 extern void dodgy_tsc(void); 70 70 71 71 #define cpu_relax() barrier() 72 - #define cpu_relax_yield() cpu_relax() 73 72 74 73 /* 75 74 * User space process size: 1.75GB (default).
-1
arch/nios2/include/asm/processor.h
··· 88 88 #define KSTK_ESP(tsk) ((tsk)->thread.kregs->sp) 89 89 90 90 #define cpu_relax() barrier() 91 - #define cpu_relax_yield() cpu_relax() 92 91 93 92 #endif /* __ASSEMBLY__ */ 94 93
-1
arch/openrisc/include/asm/processor.h
··· 92 92 #define init_stack (init_thread_union.stack) 93 93 94 94 #define cpu_relax() barrier() 95 - #define cpu_relax_yield() cpu_relax() 96 95 97 96 #endif /* __ASSEMBLY__ */ 98 97 #endif /* __ASM_OPENRISC_PROCESSOR_H */
-1
arch/parisc/include/asm/processor.h
··· 309 309 #define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30]) 310 310 311 311 #define cpu_relax() barrier() 312 - #define cpu_relax_yield() cpu_relax() 313 312 314 313 /* 315 314 * parisc_requires_coherency() is used to identify the combined VIPT/PIPT
-2
arch/powerpc/include/asm/processor.h
··· 404 404 #define cpu_relax() barrier() 405 405 #endif 406 406 407 - #define cpu_relax_yield() cpu_relax() 408 - 409 407 /* Check that a certain kernel stack pointer is valid in task_struct p */ 410 408 int validate_sp(unsigned long sp, struct task_struct *p, 411 409 unsigned long nbytes);
+1
arch/s390/include/asm/processor.h
··· 234 234 /* 235 235 * Give up the time slice of the virtual PU. 236 236 */ 237 + #define cpu_relax_yield cpu_relax_yield 237 238 void cpu_relax_yield(void); 238 239 239 240 #define cpu_relax() barrier()
-1
arch/score/include/asm/processor.h
··· 24 24 #define current_text_addr() ({ __label__ _l; _l: &&_l; }) 25 25 26 26 #define cpu_relax() barrier() 27 - #define cpu_relax_yield() cpu_relax() 28 27 #define release_thread(thread) do {} while (0) 29 28 30 29 /*
-1
arch/sh/include/asm/processor.h
··· 97 97 98 98 #define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") 99 99 #define cpu_relax() barrier() 100 - #define cpu_relax_yield() cpu_relax() 101 100 102 101 void default_idle(void); 103 102 void stop_this_cpu(void *);
-1
arch/sparc/include/asm/processor_32.h
··· 119 119 int do_mathemu(struct pt_regs *regs, struct task_struct *fpt); 120 120 121 121 #define cpu_relax() barrier() 122 - #define cpu_relax_yield() cpu_relax() 123 122 124 123 extern void (*sparc_idle)(void); 125 124
-1
arch/sparc/include/asm/processor_64.h
··· 216 216 "nop\n\t" \ 217 217 ".previous" \ 218 218 ::: "memory") 219 - #define cpu_relax_yield() cpu_relax() 220 219 221 220 /* Prefetch support. This is tuned for UltraSPARC-III and later. 222 221 * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
-2
arch/tile/include/asm/processor.h
··· 264 264 barrier(); 265 265 } 266 266 267 - #define cpu_relax_yield() cpu_relax() 268 - 269 267 /* Info on this processor (see fs/proc/cpuinfo.c) */ 270 268 struct seq_operations; 271 269 extern const struct seq_operations cpuinfo_op;
-1
arch/unicore32/include/asm/processor.h
··· 71 71 unsigned long get_wchan(struct task_struct *p); 72 72 73 73 #define cpu_relax() barrier() 74 - #define cpu_relax_yield() cpu_relax() 75 74 76 75 #define task_pt_regs(p) \ 77 76 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
-2
arch/x86/include/asm/processor.h
··· 588 588 rep_nop(); 589 589 } 590 590 591 - #define cpu_relax_yield() cpu_relax() 592 - 593 591 /* Stop speculative execution and prefetching of modified code. */ 594 592 static inline void sync_core(void) 595 593 {
-1
arch/x86/um/asm/processor.h
··· 26 26 } 27 27 28 28 #define cpu_relax() rep_nop() 29 - #define cpu_relax_yield() cpu_relax() 30 29 31 30 #define task_pt_regs(t) (&(t)->thread.regs) 32 31
-1
arch/xtensa/include/asm/processor.h
··· 206 206 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1]) 207 207 208 208 #define cpu_relax() barrier() 209 - #define cpu_relax_yield() cpu_relax() 210 209 211 210 /* Special register access. */ 212 211
+4
include/linux/sched.h
··· 2444 2444 static inline void calc_load_exit_idle(void) { } 2445 2445 #endif /* CONFIG_NO_HZ_COMMON */ 2446 2446 2447 + #ifndef cpu_relax_yield 2448 + #define cpu_relax_yield() cpu_relax() 2449 + #endif 2450 + 2447 2451 /* 2448 2452 * Do not use outside of architecture code which knows its limitations. 2449 2453 *