Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Disintegrate asm/system.h for Blackfin [ver #2]

Disintegrate asm/system.h for Blackfin.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: uclinux-dist-devel@blackfin.uclinux.org
Signed-off-by: Bob Liu <lliubbo@gmail.com>

authored by

David Howells and committed by
Bob Liu
3bed8d67 0eceb82f

+233 -196
+2
arch/blackfin/include/asm/atomic.h
··· 7 7 #ifndef __ARCH_BLACKFIN_ATOMIC__ 8 8 #define __ARCH_BLACKFIN_ATOMIC__ 9 9 10 + #include <asm/cmpxchg.h> 11 + 10 12 #ifdef CONFIG_SMP 11 13 12 14 #include <linux/linkage.h>
+48
arch/blackfin/include/asm/barrier.h
··· 1 + /* 2 + * Copyright 2004-2009 Analog Devices Inc. 3 + * Tony Kou (tonyko@lineo.ca) 4 + * 5 + * Licensed under the GPL-2 or later 6 + */ 7 + 8 + #ifndef _BLACKFIN_BARRIER_H 9 + #define _BLACKFIN_BARRIER_H 10 + 11 + #include <asm/cache.h> 12 + 13 + #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) 14 + 15 + /* 16 + * Force strict CPU ordering. 17 + */ 18 + #ifdef CONFIG_SMP 19 + 20 + #ifdef __ARCH_SYNC_CORE_DCACHE 21 + /* Force Core data cache coherence */ 22 + # define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) 23 + # define rmb() do { barrier(); smp_check_barrier(); } while (0) 24 + # define wmb() do { barrier(); smp_mark_barrier(); } while (0) 25 + # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) 26 + #else 27 + # define mb() barrier() 28 + # define rmb() barrier() 29 + # define wmb() barrier() 30 + # define read_barrier_depends() do { } while (0) 31 + #endif 32 + 33 + #else /* !CONFIG_SMP */ 34 + 35 + #define mb() barrier() 36 + #define rmb() barrier() 37 + #define wmb() barrier() 38 + #define read_barrier_depends() do { } while (0) 39 + 40 + #endif /* !CONFIG_SMP */ 41 + 42 + #define smp_mb() mb() 43 + #define smp_rmb() rmb() 44 + #define smp_wmb() wmb() 45 + #define set_mb(var, value) do { var = value; mb(); } while (0) 46 + #define smp_read_barrier_depends() read_barrier_depends() 47 + 48 + #endif /* _BLACKFIN_BARRIER_H */
+132
arch/blackfin/include/asm/cmpxchg.h
··· 1 + /* 2 + * Copyright 2004-2011 Analog Devices Inc. 3 + * 4 + * Licensed under the GPL-2 or later. 5 + */ 6 + 7 + #ifndef __ARCH_BLACKFIN_CMPXCHG__ 8 + #define __ARCH_BLACKFIN_CMPXCHG__ 9 + 10 + #ifdef CONFIG_SMP 11 + 12 + #include <linux/linkage.h> 13 + 14 + asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); 15 + asmlinkage unsigned long __raw_xchg_2_asm(volatile void *ptr, unsigned long value); 16 + asmlinkage unsigned long __raw_xchg_4_asm(volatile void *ptr, unsigned long value); 17 + asmlinkage unsigned long __raw_cmpxchg_1_asm(volatile void *ptr, 18 + unsigned long new, unsigned long old); 19 + asmlinkage unsigned long __raw_cmpxchg_2_asm(volatile void *ptr, 20 + unsigned long new, unsigned long old); 21 + asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr, 22 + unsigned long new, unsigned long old); 23 + 24 + static inline unsigned long __xchg(unsigned long x, volatile void *ptr, 25 + int size) 26 + { 27 + unsigned long tmp; 28 + 29 + switch (size) { 30 + case 1: 31 + tmp = __raw_xchg_1_asm(ptr, x); 32 + break; 33 + case 2: 34 + tmp = __raw_xchg_2_asm(ptr, x); 35 + break; 36 + case 4: 37 + tmp = __raw_xchg_4_asm(ptr, x); 38 + break; 39 + } 40 + 41 + return tmp; 42 + } 43 + 44 + /* 45 + * Atomic compare and exchange. Compare OLD with MEM, if identical, 46 + * store NEW in MEM. Return the initial value in MEM. Success is 47 + * indicated by comparing RETURN with OLD. 48 + */ 49 + static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 50 + unsigned long new, int size) 51 + { 52 + unsigned long tmp; 53 + 54 + switch (size) { 55 + case 1: 56 + tmp = __raw_cmpxchg_1_asm(ptr, new, old); 57 + break; 58 + case 2: 59 + tmp = __raw_cmpxchg_2_asm(ptr, new, old); 60 + break; 61 + case 4: 62 + tmp = __raw_cmpxchg_4_asm(ptr, new, old); 63 + break; 64 + } 65 + 66 + return tmp; 67 + } 68 + #define cmpxchg(ptr, o, n) \ 69 + ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ 70 + (unsigned long)(n), sizeof(*(ptr)))) 71 + 72 + #else /* !CONFIG_SMP */ 73 + 74 + #include <mach/blackfin.h> 75 + #include <asm/irqflags.h> 76 + 77 + struct __xchg_dummy { 78 + unsigned long a[100]; 79 + }; 80 + #define __xg(x) ((volatile struct __xchg_dummy *)(x)) 81 + 82 + static inline unsigned long __xchg(unsigned long x, volatile void *ptr, 83 + int size) 84 + { 85 + unsigned long tmp = 0; 86 + unsigned long flags; 87 + 88 + flags = hard_local_irq_save(); 89 + 90 + switch (size) { 91 + case 1: 92 + __asm__ __volatile__ 93 + ("%0 = b%2 (z);\n\t" 94 + "b%2 = %1;\n\t" 95 + : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 96 + break; 97 + case 2: 98 + __asm__ __volatile__ 99 + ("%0 = w%2 (z);\n\t" 100 + "w%2 = %1;\n\t" 101 + : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 102 + break; 103 + case 4: 104 + __asm__ __volatile__ 105 + ("%0 = %2;\n\t" 106 + "%2 = %1;\n\t" 107 + : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 108 + break; 109 + } 110 + hard_local_irq_restore(flags); 111 + return tmp; 112 + } 113 + 114 + #include <asm-generic/cmpxchg-local.h> 115 + 116 + /* 117 + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 118 + * them available. 119 + */ 120 + #define cmpxchg_local(ptr, o, n) \ 121 + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 122 + (unsigned long)(n), sizeof(*(ptr)))) 123 + #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 124 + 125 + #include <asm-generic/cmpxchg.h> 126 + 127 + #endif /* !CONFIG_SMP */ 128 + 129 + #define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) 130 + #define tas(ptr) ((void)xchg((ptr), 1)) 131 + 132 + #endif /* __ARCH_BLACKFIN_CMPXCHG__ */
+1
arch/blackfin/include/asm/exec.h
··· 1 + /* define arch_align_stack() here */
+1
arch/blackfin/include/asm/irq_handler.h
··· 9 9 10 10 #include <linux/types.h> 11 11 #include <linux/linkage.h> 12 + #include <mach/irq.h> 12 13 13 14 /* init functions only */ 14 15 extern int __init init_arch_irq(void);
+39
arch/blackfin/include/asm/switch_to.h
··· 1 + /* 2 + * Copyright 2004-2009 Analog Devices Inc. 3 + * Tony Kou (tonyko@lineo.ca) 4 + * 5 + * Licensed under the GPL-2 or later 6 + */ 7 + 8 + #ifndef _BLACKFIN_SWITCH_TO_H 9 + #define _BLACKFIN_SWITCH_TO_H 10 + 11 + #define prepare_to_switch() do { } while(0) 12 + 13 + /* 14 + * switch_to(n) should switch tasks to task ptr, first checking that 15 + * ptr isn't the current task, in which case it does nothing. 16 + */ 17 + 18 + #include <asm/l1layout.h> 19 + #include <asm/mem_map.h> 20 + 21 + asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_struct *next); 22 + 23 + #ifndef CONFIG_SMP 24 + #define switch_to(prev,next,last) \ 25 + do { \ 26 + memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \ 27 + sizeof *L1_SCRATCH_TASK_INFO); \ 28 + memcpy (L1_SCRATCH_TASK_INFO, &task_thread_info(next)->l1_task_info, \ 29 + sizeof *L1_SCRATCH_TASK_INFO); \ 30 + (last) = resume (prev, next); \ 31 + } while (0) 32 + #else 33 + #define switch_to(prev, next, last) \ 34 + do { \ 35 + (last) = resume(prev, next); \ 36 + } while (0) 37 + #endif 38 + 39 + #endif /* _BLACKFIN_SWITCH_TO_H */
+5 -192
arch/blackfin/include/asm/system.h
··· 1 - /* 2 - * Copyright 2004-2009 Analog Devices Inc. 3 - * Tony Kou (tonyko@lineo.ca) 4 - * 5 - * Licensed under the GPL-2 or later 6 - */ 7 - 8 - #ifndef _BLACKFIN_SYSTEM_H 9 - #define _BLACKFIN_SYSTEM_H 10 - 11 - #include <linux/linkage.h> 12 - #include <linux/irqflags.h> 13 - #include <mach/anomaly.h> 14 - #include <asm/cache.h> 15 - #include <asm/pda.h> 16 - #include <asm/irq.h> 17 - 18 - /* 19 - * Force strict CPU ordering. 20 - */ 21 - #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) 22 - #define smp_mb() mb() 23 - #define smp_rmb() rmb() 24 - #define smp_wmb() wmb() 25 - #define set_mb(var, value) do { var = value; mb(); } while (0) 26 - #define smp_read_barrier_depends() read_barrier_depends() 27 - 28 - #ifdef CONFIG_SMP 29 - asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); 30 - asmlinkage unsigned long __raw_xchg_2_asm(volatile void *ptr, unsigned long value); 31 - asmlinkage unsigned long __raw_xchg_4_asm(volatile void *ptr, unsigned long value); 32 - asmlinkage unsigned long __raw_cmpxchg_1_asm(volatile void *ptr, 33 - unsigned long new, unsigned long old); 34 - asmlinkage unsigned long __raw_cmpxchg_2_asm(volatile void *ptr, 35 - unsigned long new, unsigned long old); 36 - asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr, 37 - unsigned long new, unsigned long old); 38 - 39 - #ifdef __ARCH_SYNC_CORE_DCACHE 40 - /* Force Core data cache coherence */ 41 - # define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) 42 - # define rmb() do { barrier(); smp_check_barrier(); } while (0) 43 - # define wmb() do { barrier(); smp_mark_barrier(); } while (0) 44 - # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) 45 - #else 46 - # define mb() barrier() 47 - # define rmb() barrier() 48 - # define wmb() barrier() 49 - # define read_barrier_depends() do { } while (0) 50 - #endif 51 - 52 - static inline unsigned long __xchg(unsigned long x, volatile void *ptr, 53 - int size) 54 - { 55 - unsigned long tmp; 56 - 57 - switch (size) { 58 - case 1: 59 - tmp = __raw_xchg_1_asm(ptr, x); 60 - break; 61 - case 2: 62 - tmp = __raw_xchg_2_asm(ptr, x); 63 - break; 64 - case 4: 65 - tmp = __raw_xchg_4_asm(ptr, x); 66 - break; 67 - } 68 - 69 - return tmp; 70 - } 71 - 72 - /* 73 - * Atomic compare and exchange. Compare OLD with MEM, if identical, 74 - * store NEW in MEM. Return the initial value in MEM. Success is 75 - * indicated by comparing RETURN with OLD. 76 - */ 77 - static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 78 - unsigned long new, int size) 79 - { 80 - unsigned long tmp; 81 - 82 - switch (size) { 83 - case 1: 84 - tmp = __raw_cmpxchg_1_asm(ptr, new, old); 85 - break; 86 - case 2: 87 - tmp = __raw_cmpxchg_2_asm(ptr, new, old); 88 - break; 89 - case 4: 90 - tmp = __raw_cmpxchg_4_asm(ptr, new, old); 91 - break; 92 - } 93 - 94 - return tmp; 95 - } 96 - #define cmpxchg(ptr, o, n) \ 97 - ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ 98 - (unsigned long)(n), sizeof(*(ptr)))) 99 - 100 - #else /* !CONFIG_SMP */ 101 - 102 - #define mb() barrier() 103 - #define rmb() barrier() 104 - #define wmb() barrier() 105 - #define read_barrier_depends() do { } while (0) 106 - 107 - struct __xchg_dummy { 108 - unsigned long a[100]; 109 - }; 110 - #define __xg(x) ((volatile struct __xchg_dummy *)(x)) 111 - 112 - #include <mach/blackfin.h> 113 - 114 - static inline unsigned long __xchg(unsigned long x, volatile void *ptr, 115 - int size) 116 - { 117 - unsigned long tmp = 0; 118 - unsigned long flags; 119 - 120 - flags = hard_local_irq_save(); 121 - 122 - switch (size) { 123 - case 1: 124 - __asm__ __volatile__ 125 - ("%0 = b%2 (z);\n\t" 126 - "b%2 = %1;\n\t" 127 - : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 128 - break; 129 - case 2: 130 - __asm__ __volatile__ 131 - ("%0 = w%2 (z);\n\t" 132 - "w%2 = %1;\n\t" 133 - : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 134 - break; 135 - case 4: 136 - __asm__ __volatile__ 137 - ("%0 = %2;\n\t" 138 - "%2 = %1;\n\t" 139 - : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 140 - break; 141 - } 142 - hard_local_irq_restore(flags); 143 - return tmp; 144 - } 145 - 146 - #include <asm-generic/cmpxchg-local.h> 147 - 148 - /* 149 - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 150 - * them available. 151 - */ 152 - #define cmpxchg_local(ptr, o, n) \ 153 - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 154 - (unsigned long)(n), sizeof(*(ptr)))) 155 - #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 156 - 157 - #include <asm-generic/cmpxchg.h> 158 - 159 - #endif /* !CONFIG_SMP */ 160 - 161 - #define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) 162 - #define tas(ptr) ((void)xchg((ptr), 1)) 163 - 164 - #define prepare_to_switch() do { } while(0) 165 - 166 - /* 167 - * switch_to(n) should switch tasks to task ptr, first checking that 168 - * ptr isn't the current task, in which case it does nothing. 169 - */ 170 - 171 - #include <asm/l1layout.h> 172 - #include <asm/mem_map.h> 173 - 174 - asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_struct *next); 175 - 176 - #ifndef CONFIG_SMP 177 - #define switch_to(prev,next,last) \ 178 - do { \ 179 - memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \ 180 - sizeof *L1_SCRATCH_TASK_INFO); \ 181 - memcpy (L1_SCRATCH_TASK_INFO, &task_thread_info(next)->l1_task_info, \ 182 - sizeof *L1_SCRATCH_TASK_INFO); \ 183 - (last) = resume (prev, next); \ 184 - } while (0) 185 - #else 186 - #define switch_to(prev, next, last) \ 187 - do { \ 188 - (last) = resume(prev, next); \ 189 - } while (0) 190 - #endif 191 - 192 - #endif /* _BLACKFIN_SYSTEM_H */ 1 + /* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ 2 + #include <asm/barrier.h> 3 + #include <asm/cmpxchg.h> 4 + #include <asm/exec.h> 5 + #include <asm/switch_to.h>
+1
arch/blackfin/kernel/asm-offsets.c
··· 14 14 #include <linux/irq.h> 15 15 #include <linux/thread_info.h> 16 16 #include <linux/kbuild.h> 17 + #include <asm/pda.h> 17 18 18 19 int main(void) 19 20 {
-1
arch/blackfin/kernel/ipipe.c
··· 31 31 #include <linux/kthread.h> 32 32 #include <linux/unistd.h> 33 33 #include <linux/io.h> 34 - #include <asm/system.h> 35 34 #include <linux/atomic.h> 36 35 #include <asm/irq_handler.h> 37 36
-1
arch/blackfin/kernel/kgdb_test.c
··· 13 13 14 14 #include <asm/current.h> 15 15 #include <asm/uaccess.h> 16 - #include <asm/system.h> 17 16 18 17 #include <asm/blackfin.h> 19 18
+1
arch/blackfin/kernel/process.c
··· 19 19 #include <asm/blackfin.h> 20 20 #include <asm/fixed_code.h> 21 21 #include <asm/mem_map.h> 22 + #include <asm/irq.h> 22 23 23 24 asmlinkage void ret_from_fork(void); 24 25
-1
arch/blackfin/kernel/ptrace.c
··· 20 20 21 21 #include <asm/page.h> 22 22 #include <asm/pgtable.h> 23 - #include <asm/system.h> 24 23 #include <asm/processor.h> 25 24 #include <asm/asm-offsets.h> 26 25 #include <asm/dma.h>
-1
arch/blackfin/kernel/reboot.c
··· 9 9 #include <linux/interrupt.h> 10 10 #include <asm/bfin-global.h> 11 11 #include <asm/reboot.h> 12 - #include <asm/system.h> 13 12 #include <asm/bfrom.h> 14 13 15 14 /* A system soft reset makes external memory unusable so force
+1
arch/blackfin/kernel/setup.c
··· 30 30 #include <asm/fixed_code.h> 31 31 #include <asm/early_printk.h> 32 32 #include <asm/irq_handler.h> 33 + #include <asm/pda.h> 33 34 34 35 u16 _bfin_swrst; 35 36 EXPORT_SYMBOL(_bfin_swrst);
+1
arch/blackfin/kernel/trace.c
··· 21 21 #include <asm/fixed_code.h> 22 22 #include <asm/traps.h> 23 23 #include <asm/irq_handler.h> 24 + #include <asm/pda.h> 24 25 25 26 void decode_address(char *buf, unsigned long address) 26 27 {
+1
arch/blackfin/kernel/traps.c
··· 17 17 #include <asm/trace.h> 18 18 #include <asm/fixed_code.h> 19 19 #include <asm/pseudo_instructions.h> 20 + #include <asm/pda.h> 20 21 21 22 #ifdef CONFIG_KGDB 22 23 # include <linux/kgdb.h>