at v3.0 3.4 kB view raw
1/* Generic system definitions, based on MN10300 definitions. 2 * 3 * It should be possible to use these on really simple architectures, 4 * but it serves more as a starting point for new ports. 5 * 6 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 7 * Written by David Howells (dhowells@redhat.com) 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public Licence 11 * as published by the Free Software Foundation; either version 12 * 2 of the Licence, or (at your option) any later version. 13 */ 14#ifndef __ASM_GENERIC_SYSTEM_H 15#define __ASM_GENERIC_SYSTEM_H 16 17#ifdef __KERNEL__ 18#ifndef __ASSEMBLY__ 19 20#include <linux/types.h> 21#include <linux/irqflags.h> 22 23#include <asm/cmpxchg-local.h> 24#include <asm/cmpxchg.h> 25 26struct task_struct; 27 28/* context switching is now performed out-of-line in switch_to.S */ 29extern struct task_struct *__switch_to(struct task_struct *, 30 struct task_struct *); 31#define switch_to(prev, next, last) \ 32 do { \ 33 ((last) = __switch_to((prev), (next))); \ 34 } while (0) 35 36#define arch_align_stack(x) (x) 37 38#define nop() asm volatile ("nop") 39 40#endif /* !__ASSEMBLY__ */ 41 42/* 43 * Force strict CPU ordering. 44 * And yes, this is required on UP too when we're talking 45 * to devices. 46 * 47 * This implementation only contains a compiler barrier. 48 */ 49 50#define mb() asm volatile ("": : :"memory") 51#define rmb() mb() 52#define wmb() asm volatile ("": : :"memory") 53 54#ifdef CONFIG_SMP 55#define smp_mb() mb() 56#define smp_rmb() rmb() 57#define smp_wmb() wmb() 58#else 59#define smp_mb() barrier() 60#define smp_rmb() barrier() 61#define smp_wmb() barrier() 62#endif 63 64#define set_mb(var, value) do { var = value; mb(); } while (0) 65#define set_wmb(var, value) do { var = value; wmb(); } while (0) 66 67#define read_barrier_depends() do {} while (0) 68#define smp_read_barrier_depends() do {} while (0) 69 70/* 71 * we make sure local_irq_enable() doesn't cause priority inversion 72 */ 73#ifndef __ASSEMBLY__ 74 75/* This function doesn't exist, so you'll get a linker error 76 * if something tries to do an invalid xchg(). */ 77extern void __xchg_called_with_bad_pointer(void); 78 79static inline 80unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 81{ 82 unsigned long ret, flags; 83 84 switch (size) { 85 case 1: 86#ifdef __xchg_u8 87 return __xchg_u8(x, ptr); 88#else 89 local_irq_save(flags); 90 ret = *(volatile u8 *)ptr; 91 *(volatile u8 *)ptr = x; 92 local_irq_restore(flags); 93 return ret; 94#endif /* __xchg_u8 */ 95 96 case 2: 97#ifdef __xchg_u16 98 return __xchg_u16(x, ptr); 99#else 100 local_irq_save(flags); 101 ret = *(volatile u16 *)ptr; 102 *(volatile u16 *)ptr = x; 103 local_irq_restore(flags); 104 return ret; 105#endif /* __xchg_u16 */ 106 107 case 4: 108#ifdef __xchg_u32 109 return __xchg_u32(x, ptr); 110#else 111 local_irq_save(flags); 112 ret = *(volatile u32 *)ptr; 113 *(volatile u32 *)ptr = x; 114 local_irq_restore(flags); 115 return ret; 116#endif /* __xchg_u32 */ 117 118#ifdef CONFIG_64BIT 119 case 8: 120#ifdef __xchg_u64 121 return __xchg_u64(x, ptr); 122#else 123 local_irq_save(flags); 124 ret = *(volatile u64 *)ptr; 125 *(volatile u64 *)ptr = x; 126 local_irq_restore(flags); 127 return ret; 128#endif /* __xchg_u64 */ 129#endif /* CONFIG_64BIT */ 130 131 default: 132 __xchg_called_with_bad_pointer(); 133 return x; 134 } 135} 136 137#define xchg(ptr, x) \ 138 ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) 139 140#endif /* !__ASSEMBLY__ */ 141 142#endif /* __KERNEL__ */ 143#endif /* __ASM_GENERIC_SYSTEM_H */