Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

m68k: merge MMU and non MMU versions of system.h

The non-MMU m68k targets can use the same asm/system.h as the MMU
targets. So switch the current system_mm.h to be system.h and remove
system_no.h.

The assembly support code for the non-MMU resume functions needs to
be modified to match the now common switch_to() macro. Specifically
this means correctly saving and restoring the status flags in the case
of the ColdFire resume, and some reordering of the code to not use
registers before they are saved or after they are restored.

Signed-off-by: Greg Ungerer <gerg@uclinux.org>

+222 -380
+6 -6
arch/m68k/include/asm/entry_no.h
··· 96 96 .endm 97 97 98 98 .macro RDUSP 99 - movel sw_usp,%a2 99 + movel sw_usp,%a3 100 100 .endm 101 101 102 102 .macro WRUSP 103 - movel %a0,sw_usp 103 + movel %a3,sw_usp 104 104 .endm 105 105 106 106 #else /* !CONFIG_COLDFIRE_SW_A7 */ ··· 127 127 .endm 128 128 129 129 .macro RDUSP 130 - /*move %usp,%a2*/ 131 - .word 0x4e6a 130 + /*move %usp,%a3*/ 131 + .word 0x4e6b 132 132 .endm 133 133 134 134 .macro WRUSP 135 - /*move %a0,%usp*/ 136 - .word 0x4e60 135 + /*move %a3,%usp*/ 136 + .word 0x4e63 137 137 .endm 138 138 139 139 #endif /* !CONFIG_COLDFIRE_SW_A7 */
+191 -3
arch/m68k/include/asm/system.h
··· 1 - #ifdef __uClinux__ 2 - #include "system_no.h" 1 + #ifndef _M68K_SYSTEM_H 2 + #define _M68K_SYSTEM_H 3 + 4 + #include <linux/linkage.h> 5 + #include <linux/kernel.h> 6 + #include <linux/irqflags.h> 7 + #include <asm/segment.h> 8 + #include <asm/entry.h> 9 + 10 + #ifdef __KERNEL__ 11 + 12 + /* 13 + * switch_to(n) should switch tasks to task ptr, first checking that 14 + * ptr isn't the current task, in which case it does nothing. This 15 + * also clears the TS-flag if the task we switched to has used the 16 + * math co-processor latest. 17 + */ 18 + /* 19 + * switch_to() saves the extra registers, that are not saved 20 + * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and 21 + * a0-a1. Some of these are used by schedule() and its predecessors 22 + * and so we might get see unexpected behaviors when a task returns 23 + * with unexpected register values. 24 + * 25 + * syscall stores these registers itself and none of them are used 26 + * by syscall after the function in the syscall has been called. 27 + * 28 + * Beware that resume now expects *next to be in d1 and the offset of 29 + * tss to be in a1. This saves a few instructions as we no longer have 30 + * to push them onto the stack and read them back right after. 31 + * 32 + * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) 33 + * 34 + * Changed 96/09/19 by Andreas Schwab 35 + * pass prev in a0, next in a1 36 + */ 37 + asmlinkage void resume(void); 38 + #define switch_to(prev,next,last) do { \ 39 + register void *_prev __asm__ ("a0") = (prev); \ 40 + register void *_next __asm__ ("a1") = (next); \ 41 + register void *_last __asm__ ("d1"); \ 42 + __asm__ __volatile__("jbsr resume" \ 43 + : "=a" (_prev), "=a" (_next), "=d" (_last) \ 44 + : "0" (_prev), "1" (_next) \ 45 + : "d0", "d2", "d3", "d4", "d5"); \ 46 + (last) = _last; \ 47 + } while (0) 48 + 49 + 50 + /* 51 + * Force strict CPU ordering. 52 + * Not really required on m68k... 53 + */ 54 + #define nop() do { asm volatile ("nop"); barrier(); } while (0) 55 + #define mb() barrier() 56 + #define rmb() barrier() 57 + #define wmb() barrier() 58 + #define read_barrier_depends() ((void)0) 59 + #define set_mb(var, value) ({ (var) = (value); wmb(); }) 60 + 61 + #define smp_mb() barrier() 62 + #define smp_rmb() barrier() 63 + #define smp_wmb() barrier() 64 + #define smp_read_barrier_depends() ((void)0) 65 + 66 + #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 67 + 68 + struct __xchg_dummy { unsigned long a[100]; }; 69 + #define __xg(x) ((volatile struct __xchg_dummy *)(x)) 70 + 71 + #ifndef CONFIG_RMW_INSNS 72 + static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 73 + { 74 + unsigned long flags, tmp; 75 + 76 + local_irq_save(flags); 77 + 78 + switch (size) { 79 + case 1: 80 + tmp = *(u8 *)ptr; 81 + *(u8 *)ptr = x; 82 + x = tmp; 83 + break; 84 + case 2: 85 + tmp = *(u16 *)ptr; 86 + *(u16 *)ptr = x; 87 + x = tmp; 88 + break; 89 + case 4: 90 + tmp = *(u32 *)ptr; 91 + *(u32 *)ptr = x; 92 + x = tmp; 93 + break; 94 + default: 95 + BUG(); 96 + } 97 + 98 + local_irq_restore(flags); 99 + return x; 100 + } 3 101 #else 4 - #include "system_mm.h" 102 + static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 103 + { 104 + switch (size) { 105 + case 1: 106 + __asm__ __volatile__ 107 + ("moveb %2,%0\n\t" 108 + "1:\n\t" 109 + "casb %0,%1,%2\n\t" 110 + "jne 1b" 111 + : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 112 + break; 113 + case 2: 114 + __asm__ __volatile__ 115 + ("movew %2,%0\n\t" 116 + "1:\n\t" 117 + "casw %0,%1,%2\n\t" 118 + "jne 1b" 119 + : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 120 + break; 121 + case 4: 122 + __asm__ __volatile__ 123 + ("movel %2,%0\n\t" 124 + "1:\n\t" 125 + "casl %0,%1,%2\n\t" 126 + "jne 1b" 127 + : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 128 + break; 129 + } 130 + return x; 131 + } 5 132 #endif 133 + 134 + #include <asm-generic/cmpxchg-local.h> 135 + 136 + #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 137 + 138 + /* 139 + * Atomic compare and exchange. Compare OLD with MEM, if identical, 140 + * store NEW in MEM. Return the initial value in MEM. Success is 141 + * indicated by comparing RETURN with OLD. 142 + */ 143 + #ifdef CONFIG_RMW_INSNS 144 + #define __HAVE_ARCH_CMPXCHG 1 145 + 146 + static inline unsigned long __cmpxchg(volatile void *p, unsigned long old, 147 + unsigned long new, int size) 148 + { 149 + switch (size) { 150 + case 1: 151 + __asm__ __volatile__ ("casb %0,%2,%1" 152 + : "=d" (old), "=m" (*(char *)p) 153 + : "d" (new), "0" (old), "m" (*(char *)p)); 154 + break; 155 + case 2: 156 + __asm__ __volatile__ ("casw %0,%2,%1" 157 + : "=d" (old), "=m" (*(short *)p) 158 + : "d" (new), "0" (old), "m" (*(short *)p)); 159 + break; 160 + case 4: 161 + __asm__ __volatile__ ("casl %0,%2,%1" 162 + : "=d" (old), "=m" (*(int *)p) 163 + : "d" (new), "0" (old), "m" (*(int *)p)); 164 + break; 165 + } 166 + return old; 167 + } 168 + 169 + #define cmpxchg(ptr, o, n) \ 170 + ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ 171 + (unsigned long)(n), sizeof(*(ptr)))) 172 + #define cmpxchg_local(ptr, o, n) \ 173 + ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ 174 + (unsigned long)(n), sizeof(*(ptr)))) 175 + #else 176 + 177 + /* 178 + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 179 + * them available. 180 + */ 181 + #define cmpxchg_local(ptr, o, n) \ 182 + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 183 + (unsigned long)(n), sizeof(*(ptr)))) 184 + 185 + #include <asm-generic/cmpxchg.h> 186 + 187 + #endif 188 + 189 + #define arch_align_stack(x) (x) 190 + 191 + #endif /* __KERNEL__ */ 192 + 193 + #endif /* _M68K_SYSTEM_H */
-193
arch/m68k/include/asm/system_mm.h
··· 1 - #ifndef _M68K_SYSTEM_H 2 - #define _M68K_SYSTEM_H 3 - 4 - #include <linux/linkage.h> 5 - #include <linux/kernel.h> 6 - #include <linux/irqflags.h> 7 - #include <asm/segment.h> 8 - #include <asm/entry.h> 9 - 10 - #ifdef __KERNEL__ 11 - 12 - /* 13 - * switch_to(n) should switch tasks to task ptr, first checking that 14 - * ptr isn't the current task, in which case it does nothing. This 15 - * also clears the TS-flag if the task we switched to has used the 16 - * math co-processor latest. 17 - */ 18 - /* 19 - * switch_to() saves the extra registers, that are not saved 20 - * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and 21 - * a0-a1. Some of these are used by schedule() and its predecessors 22 - * and so we might get see unexpected behaviors when a task returns 23 - * with unexpected register values. 24 - * 25 - * syscall stores these registers itself and none of them are used 26 - * by syscall after the function in the syscall has been called. 27 - * 28 - * Beware that resume now expects *next to be in d1 and the offset of 29 - * tss to be in a1. This saves a few instructions as we no longer have 30 - * to push them onto the stack and read them back right after. 31 - * 32 - * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) 33 - * 34 - * Changed 96/09/19 by Andreas Schwab 35 - * pass prev in a0, next in a1 36 - */ 37 - asmlinkage void resume(void); 38 - #define switch_to(prev,next,last) do { \ 39 - register void *_prev __asm__ ("a0") = (prev); \ 40 - register void *_next __asm__ ("a1") = (next); \ 41 - register void *_last __asm__ ("d1"); \ 42 - __asm__ __volatile__("jbsr resume" \ 43 - : "=a" (_prev), "=a" (_next), "=d" (_last) \ 44 - : "0" (_prev), "1" (_next) \ 45 - : "d0", "d2", "d3", "d4", "d5"); \ 46 - (last) = _last; \ 47 - } while (0) 48 - 49 - 50 - /* 51 - * Force strict CPU ordering. 52 - * Not really required on m68k... 53 - */ 54 - #define nop() do { asm volatile ("nop"); barrier(); } while (0) 55 - #define mb() barrier() 56 - #define rmb() barrier() 57 - #define wmb() barrier() 58 - #define read_barrier_depends() ((void)0) 59 - #define set_mb(var, value) ({ (var) = (value); wmb(); }) 60 - 61 - #define smp_mb() barrier() 62 - #define smp_rmb() barrier() 63 - #define smp_wmb() barrier() 64 - #define smp_read_barrier_depends() ((void)0) 65 - 66 - #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 67 - 68 - struct __xchg_dummy { unsigned long a[100]; }; 69 - #define __xg(x) ((volatile struct __xchg_dummy *)(x)) 70 - 71 - #ifndef CONFIG_RMW_INSNS 72 - static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 73 - { 74 - unsigned long flags, tmp; 75 - 76 - local_irq_save(flags); 77 - 78 - switch (size) { 79 - case 1: 80 - tmp = *(u8 *)ptr; 81 - *(u8 *)ptr = x; 82 - x = tmp; 83 - break; 84 - case 2: 85 - tmp = *(u16 *)ptr; 86 - *(u16 *)ptr = x; 87 - x = tmp; 88 - break; 89 - case 4: 90 - tmp = *(u32 *)ptr; 91 - *(u32 *)ptr = x; 92 - x = tmp; 93 - break; 94 - default: 95 - BUG(); 96 - } 97 - 98 - local_irq_restore(flags); 99 - return x; 100 - } 101 - #else 102 - static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 103 - { 104 - switch (size) { 105 - case 1: 106 - __asm__ __volatile__ 107 - ("moveb %2,%0\n\t" 108 - "1:\n\t" 109 - "casb %0,%1,%2\n\t" 110 - "jne 1b" 111 - : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 112 - break; 113 - case 2: 114 - __asm__ __volatile__ 115 - ("movew %2,%0\n\t" 116 - "1:\n\t" 117 - "casw %0,%1,%2\n\t" 118 - "jne 1b" 119 - : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 120 - break; 121 - case 4: 122 - __asm__ __volatile__ 123 - ("movel %2,%0\n\t" 124 - "1:\n\t" 125 - "casl %0,%1,%2\n\t" 126 - "jne 1b" 127 - : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 128 - break; 129 - } 130 - return x; 131 - } 132 - #endif 133 - 134 - #include <asm-generic/cmpxchg-local.h> 135 - 136 - #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 137 - 138 - /* 139 - * Atomic compare and exchange. Compare OLD with MEM, if identical, 140 - * store NEW in MEM. Return the initial value in MEM. Success is 141 - * indicated by comparing RETURN with OLD. 142 - */ 143 - #ifdef CONFIG_RMW_INSNS 144 - #define __HAVE_ARCH_CMPXCHG 1 145 - 146 - static inline unsigned long __cmpxchg(volatile void *p, unsigned long old, 147 - unsigned long new, int size) 148 - { 149 - switch (size) { 150 - case 1: 151 - __asm__ __volatile__ ("casb %0,%2,%1" 152 - : "=d" (old), "=m" (*(char *)p) 153 - : "d" (new), "0" (old), "m" (*(char *)p)); 154 - break; 155 - case 2: 156 - __asm__ __volatile__ ("casw %0,%2,%1" 157 - : "=d" (old), "=m" (*(short *)p) 158 - : "d" (new), "0" (old), "m" (*(short *)p)); 159 - break; 160 - case 4: 161 - __asm__ __volatile__ ("casl %0,%2,%1" 162 - : "=d" (old), "=m" (*(int *)p) 163 - : "d" (new), "0" (old), "m" (*(int *)p)); 164 - break; 165 - } 166 - return old; 167 - } 168 - 169 - #define cmpxchg(ptr, o, n) \ 170 - ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ 171 - (unsigned long)(n), sizeof(*(ptr)))) 172 - #define cmpxchg_local(ptr, o, n) \ 173 - ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ 174 - (unsigned long)(n), sizeof(*(ptr)))) 175 - #else 176 - 177 - /* 178 - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 179 - * them available. 180 - */ 181 - #define cmpxchg_local(ptr, o, n) \ 182 - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 183 - (unsigned long)(n), sizeof(*(ptr)))) 184 - 185 - #include <asm-generic/cmpxchg.h> 186 - 187 - #endif 188 - 189 - #define arch_align_stack(x) (x) 190 - 191 - #endif /* __KERNEL__ */ 192 - 193 - #endif /* _M68K_SYSTEM_H */
-153
arch/m68k/include/asm/system_no.h
··· 1 - #ifndef _M68KNOMMU_SYSTEM_H 2 - #define _M68KNOMMU_SYSTEM_H 3 - 4 - #include <linux/linkage.h> 5 - #include <linux/irqflags.h> 6 - #include <asm/segment.h> 7 - #include <asm/entry.h> 8 - 9 - /* 10 - * switch_to(n) should switch tasks to task ptr, first checking that 11 - * ptr isn't the current task, in which case it does nothing. This 12 - * also clears the TS-flag if the task we switched to has used the 13 - * math co-processor latest. 14 - */ 15 - /* 16 - * switch_to() saves the extra registers, that are not saved 17 - * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and 18 - * a0-a1. Some of these are used by schedule() and its predecessors 19 - * and so we might get see unexpected behaviors when a task returns 20 - * with unexpected register values. 21 - * 22 - * syscall stores these registers itself and none of them are used 23 - * by syscall after the function in the syscall has been called. 24 - * 25 - * Beware that resume now expects *next to be in d1 and the offset of 26 - * tss to be in a1. This saves a few instructions as we no longer have 27 - * to push them onto the stack and read them back right after. 28 - * 29 - * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) 30 - * 31 - * Changed 96/09/19 by Andreas Schwab 32 - * pass prev in a0, next in a1, offset of tss in d1, and whether 33 - * the mm structures are shared in d2 (to avoid atc flushing). 34 - */ 35 - asmlinkage void resume(void); 36 - #define switch_to(prev,next,last) \ 37 - { \ 38 - void *_last; \ 39 - __asm__ __volatile__( \ 40 - "movel %1, %%a0\n\t" \ 41 - "movel %2, %%a1\n\t" \ 42 - "jbsr resume\n\t" \ 43 - "movel %%d1, %0\n\t" \ 44 - : "=d" (_last) \ 45 - : "d" (prev), "d" (next) \ 46 - : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \ 47 - (last) = _last; \ 48 - } 49 - 50 - #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") 51 - 52 - /* 53 - * Force strict CPU ordering. 54 - * Not really required on m68k... 55 - */ 56 - #define nop() asm volatile ("nop"::) 57 - #define mb() asm volatile ("" : : :"memory") 58 - #define rmb() asm volatile ("" : : :"memory") 59 - #define wmb() asm volatile ("" : : :"memory") 60 - #define set_mb(var, value) ({ (var) = (value); wmb(); }) 61 - 62 - #define smp_mb() barrier() 63 - #define smp_rmb() barrier() 64 - #define smp_wmb() barrier() 65 - #define smp_read_barrier_depends() do { } while(0) 66 - 67 - #define read_barrier_depends() ((void)0) 68 - 69 - #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 70 - 71 - struct __xchg_dummy { unsigned long a[100]; }; 72 - #define __xg(x) ((volatile struct __xchg_dummy *)(x)) 73 - 74 - #ifndef CONFIG_RMW_INSNS 75 - static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 76 - { 77 - unsigned long tmp, flags; 78 - 79 - local_irq_save(flags); 80 - 81 - switch (size) { 82 - case 1: 83 - __asm__ __volatile__ 84 - ("moveb %2,%0\n\t" 85 - "moveb %1,%2" 86 - : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 87 - break; 88 - case 2: 89 - __asm__ __volatile__ 90 - ("movew %2,%0\n\t" 91 - "movew %1,%2" 92 - : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 93 - break; 94 - case 4: 95 - __asm__ __volatile__ 96 - ("movel %2,%0\n\t" 97 - "movel %1,%2" 98 - : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 99 - break; 100 - } 101 - local_irq_restore(flags); 102 - return tmp; 103 - } 104 - #else 105 - static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 106 - { 107 - switch (size) { 108 - case 1: 109 - __asm__ __volatile__ 110 - ("moveb %2,%0\n\t" 111 - "1:\n\t" 112 - "casb %0,%1,%2\n\t" 113 - "jne 1b" 114 - : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 115 - break; 116 - case 2: 117 - __asm__ __volatile__ 118 - ("movew %2,%0\n\t" 119 - "1:\n\t" 120 - "casw %0,%1,%2\n\t" 121 - "jne 1b" 122 - : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 123 - break; 124 - case 4: 125 - __asm__ __volatile__ 126 - ("movel %2,%0\n\t" 127 - "1:\n\t" 128 - "casl %0,%1,%2\n\t" 129 - "jne 1b" 130 - : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 131 - break; 132 - } 133 - return x; 134 - } 135 - #endif 136 - 137 - #include <asm-generic/cmpxchg-local.h> 138 - 139 - /* 140 - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 141 - * them available. 142 - */ 143 - #define cmpxchg_local(ptr, o, n) \ 144 - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 145 - (unsigned long)(n), sizeof(*(ptr)))) 146 - #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 147 - 148 - #include <asm-generic/cmpxchg.h> 149 - 150 - #define arch_align_stack(x) (x) 151 - 152 - 153 - #endif /* _M68KNOMMU_SYSTEM_H */
+6 -7
arch/m68k/platform/68328/entry.S
··· 241 241 242 242 /* 243 243 * Beware - when entering resume, prev (the current task) is 244 - * in a0, next (the new task) is in a1,so don't change these 244 + * in a0, next (the new task) is in a1, so don't change these 245 245 * registers until their contents are no longer needed. 246 246 */ 247 247 ENTRY(resume) 248 248 movel %a0,%d1 /* save prev thread in d1 */ 249 249 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */ 250 - movel %usp,%a2 /* save usp */ 251 - movel %a2,%a0@(TASK_THREAD+THREAD_USP) 252 - 253 250 SAVE_SWITCH_STACK 254 251 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */ 252 + movel %usp,%a3 /* save usp */ 253 + movel %a3,%a0@(TASK_THREAD+THREAD_USP) 254 + 255 + movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */ 256 + movel %a3,%usp 255 257 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */ 256 258 RESTORE_SWITCH_STACK 257 - 258 - movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore user stack */ 259 - movel %a0,%usp 260 259 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */ 261 260 rts 262 261
+6 -7
arch/m68k/platform/68360/entry.S
··· 162 162 163 163 /* 164 164 * Beware - when entering resume, prev (the current task) is 165 - * in a0, next (the new task) is in a1,so don't change these 165 + * in a0, next (the new task) is in a1, so don't change these 166 166 * registers until their contents are no longer needed. 167 167 */ 168 168 ENTRY(resume) 169 169 movel %a0,%d1 /* save prev thread in d1 */ 170 170 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */ 171 - movel %usp,%a2 /* save usp */ 172 - movel %a2,%a0@(TASK_THREAD+THREAD_USP) 173 - 174 171 SAVE_SWITCH_STACK 175 172 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */ 173 + movel %usp,%a3 /* save usp */ 174 + movel %a3,%a0@(TASK_THREAD+THREAD_USP) 175 + 176 + movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */ 177 + movel %a3,%usp 176 178 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */ 177 179 RESTORE_SWITCH_STACK 178 - 179 - movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore user stack */ 180 - movel %a0,%usp 181 180 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */ 182 181 rts 183 182
+13 -11
arch/m68k/platform/coldfire/entry.S
··· 182 182 183 183 /* 184 184 * Beware - when entering resume, prev (the current task) is 185 - * in a0, next (the new task) is in a1,so don't change these 185 + * in a0, next (the new task) is in a1, so don't change these 186 186 * registers until their contents are no longer needed. 187 - * This is always called in supervisor mode, so don't bother to save 188 - * and restore sr; user's process sr is actually in the stack. 189 187 */ 190 188 ENTRY(resume) 191 - movel %a0, %d1 /* get prev thread in d1 */ 192 - RDUSP 193 - movel %a2,%a0@(TASK_THREAD+THREAD_USP) 194 - 189 + movew %sr,%d1 /* save current status */ 190 + movew %d1,%a0@(TASK_THREAD+THREAD_SR) 191 + movel %a0,%d1 /* get prev thread in d1 */ 195 192 SAVE_SWITCH_STACK 196 193 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */ 197 - movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */ 198 - RESTORE_SWITCH_STACK 194 + RDUSP /* movel %usp,%a3 */ 195 + movel %a3,%a0@(TASK_THREAD+THREAD_USP) /* save thread user stack */ 199 196 200 - movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore thread user stack */ 201 - WRUSP 197 + movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore thread user stack */ 198 + WRUSP /* movel %a3,%usp */ 199 + movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new kernel stack */ 200 + movew %a1@(TASK_THREAD+THREAD_SR),%d7 /* restore new status */ 201 + movew %d7,%sr 202 + RESTORE_SWITCH_STACK 202 203 rts 204 +