Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] CRIS update: SMP

Patches to support SMP.

* Each CPU has its own current_pgd.
* flush_tlb_range is implemented as flush_tlb_mm.
* Atomic operations implemented with spinlocks.
* Semaphores implemented with spinlocks.

Signed-off-by: Mikael Starvik <starvik@axis.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Mikael Starvik and committed by
Linus Torvalds
8d20a541 21783c97

+69 -131
+2 -24
arch/cris/arch-v10/mm/fault.c
··· 14 14 #include <asm/uaccess.h> 15 15 #include <asm/pgtable.h> 16 16 #include <asm/arch/svinto.h> 17 + #include <asm/mmu_context.h> 17 18 18 19 /* debug of low-level TLB reload */ 19 20 #undef DEBUG ··· 24 23 #else 25 24 #define D(x) 26 25 #endif 27 - 28 - extern volatile pgd_t *current_pgd; 29 26 30 27 extern const struct exception_table_entry 31 28 *search_exception_tables(unsigned long addr); ··· 45 46 int page_id; 46 47 int acc, inv; 47 48 #endif 48 - pgd_t* pgd = (pgd_t*)current_pgd; 49 + pgd_t* pgd = (pgd_t*)per_cpu(current_pgd, smp_processor_id()); 49 50 pmd_t *pmd; 50 51 pte_t pte; 51 52 int miss, we, writeac; ··· 92 93 *R_TLB_HI = cause; 93 94 *R_TLB_LO = pte_val(pte); 94 95 local_irq_restore(flags); 95 - } 96 - 97 - /* Called from arch/cris/mm/fault.c to find fixup code. */ 98 - int 99 - find_fixup_code(struct pt_regs *regs) 100 - { 101 - const struct exception_table_entry *fixup; 102 - 103 - if ((fixup = search_exception_tables(regs->irp)) != 0) { 104 - /* Adjust the instruction pointer in the stackframe. */ 105 - regs->irp = fixup->fixup; 106 - 107 - /* 108 - * Don't return by restoring the CPU state, so switch 109 - * frame-type. 110 - */ 111 - regs->frametype = CRIS_FRAME_NORMAL; 112 - return 1; 113 - } 114 - 115 - return 0; 116 96 }
+1 -1
arch/cris/arch-v10/mm/init.c
··· 42 42 * switch_mm) 43 43 */ 44 44 45 - current_pgd = init_mm.pgd; 45 + per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd; 46 46 47 47 /* initialise the TLB (tlb.c) */ 48 48
+1 -48
arch/cris/arch-v10/mm/tlb.c
··· 139 139 local_irq_restore(flags); 140 140 } 141 141 142 - /* invalidate a page range */ 143 - 144 - void 145 - flush_tlb_range(struct vm_area_struct *vma, 146 - unsigned long start, 147 - unsigned long end) 148 - { 149 - struct mm_struct *mm = vma->vm_mm; 150 - int page_id = mm->context.page_id; 151 - int i; 152 - unsigned long flags; 153 - 154 - D(printk("tlb: flush range %p<->%p in context %d (%p)\n", 155 - start, end, page_id, mm)); 156 - 157 - if(page_id == NO_CONTEXT) 158 - return; 159 - 160 - start &= PAGE_MASK; /* probably not necessary */ 161 - end &= PAGE_MASK; /* dito */ 162 - 163 - /* invalidate those TLB entries that match both the mm context 164 - * and the virtual address range 165 - */ 166 - 167 - local_save_flags(flags); 168 - local_irq_disable(); 169 - for(i = 0; i < NUM_TLB_ENTRIES; i++) { 170 - unsigned long tlb_hi, vpn; 171 - *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); 172 - tlb_hi = *R_TLB_HI; 173 - vpn = tlb_hi & PAGE_MASK; 174 - if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && 175 - vpn >= start && vpn < end) { 176 - *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | 177 - IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); 178 - 179 - *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | 180 - IO_STATE(R_TLB_LO, valid, no ) | 181 - IO_STATE(R_TLB_LO, kernel,no ) | 182 - IO_STATE(R_TLB_LO, we, no ) | 183 - IO_FIELD(R_TLB_LO, pfn, 0 ) ); 184 - } 185 - } 186 - local_irq_restore(flags); 187 - } 188 - 189 142 /* dump the entire TLB for debug purposes */ 190 143 191 144 #if 0 ··· 190 237 * the pgd. 191 238 */ 192 239 193 - current_pgd = next->pgd; 240 + per_cpu(current_pgd, smp_processor_id()) = next->pgd; 194 241 195 242 /* switch context in the MMU */ 196 243
+7
include/asm-cris/arch-v10/atomic.h
··· 1 + #ifndef __ASM_CRIS_ARCH_ATOMIC__ 2 + #define __ASM_CRIS_ARCH_ATOMIC__ 3 + 4 + #define cris_atomic_save(addr, flags) local_irq_save(flags); 5 + #define cris_atomic_restore(addr, flags) local_irq_restore(flags); 6 + 7 + #endif
+24 -42
include/asm-cris/atomic.h
··· 4 4 #define __ASM_CRIS_ATOMIC__ 5 5 6 6 #include <asm/system.h> 7 + #include <asm/arch/atomic.h> 7 8 8 9 /* 9 10 * Atomic operations that C can't guarantee us. Useful for 10 11 * resource counting etc.. 11 12 */ 12 13 13 - /* 14 - * Make sure gcc doesn't try to be clever and move things around 15 - * on us. We need to use _exactly_ the address the user gave us, 16 - * not some alias that contains the same information. 17 - */ 18 - 19 - #define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x) 20 - 21 - typedef struct { int counter; } atomic_t; 14 + typedef struct { volatile int counter; } atomic_t; 22 15 23 16 #define ATOMIC_INIT(i) { (i) } 24 17 ··· 23 30 extern __inline__ void atomic_add(int i, volatile atomic_t *v) 24 31 { 25 32 unsigned long flags; 26 - local_save_flags(flags); 27 - local_irq_disable(); 33 + cris_atomic_save(v, flags); 28 34 v->counter += i; 29 - local_irq_restore(flags); 35 + cris_atomic_restore(v, flags); 30 36 } 31 37 32 38 extern __inline__ void atomic_sub(int i, volatile atomic_t *v) 33 39 { 34 40 unsigned long flags; 35 - local_save_flags(flags); 36 - local_irq_disable(); 41 + cris_atomic_save(v, flags); 37 42 v->counter -= i; 38 - local_irq_restore(flags); 43 + cris_atomic_restore(v, flags); 39 44 } 40 45 41 46 extern __inline__ int atomic_add_return(int i, volatile atomic_t *v) 42 47 { 43 48 unsigned long flags; 44 49 int retval; 45 - local_save_flags(flags); 46 - local_irq_disable(); 50 + cris_atomic_save(v, flags); 47 51 retval = (v->counter += i); 48 - local_irq_restore(flags); 52 + cris_atomic_restore(v, flags); 49 53 return retval; 50 54 } 51 55 ··· 52 62 { 53 63 unsigned long flags; 54 64 int retval; 55 - local_save_flags(flags); 56 - local_irq_disable(); 65 + cris_atomic_save(v, flags); 57 66 retval = (v->counter -= i); 58 - local_irq_restore(flags); 67 + cris_atomic_restore(v, flags); 59 68 return retval; 60 69 } 61 70 ··· 62 73 { 63 74 int retval; 64 75 unsigned long flags; 65 - local_save_flags(flags); 66 - local_irq_disable(); 76 + cris_atomic_save(v, flags); 67 77 retval = (v->counter -= i) == 0; 68 - local_irq_restore(flags); 78 + cris_atomic_restore(v, flags); 69 79 return retval; 70 80 } 71 81 72 82 extern __inline__ void atomic_inc(volatile atomic_t *v) 73 83 { 74 84 unsigned long flags; 75 - local_save_flags(flags); 76 - local_irq_disable(); 85 + cris_atomic_save(v, flags); 77 86 (v->counter)++; 78 - local_irq_restore(flags); 87 + cris_atomic_restore(v, flags); 79 88 } 80 89 81 90 extern __inline__ void atomic_dec(volatile atomic_t *v) 82 91 { 83 92 unsigned long flags; 84 - local_save_flags(flags); 85 - local_irq_disable(); 93 + cris_atomic_save(v, flags); 86 94 (v->counter)--; 87 - local_irq_restore(flags); 95 + cris_atomic_restore(v, flags); 88 96 } 89 97 90 98 extern __inline__ int atomic_inc_return(volatile atomic_t *v) 91 99 { 92 100 unsigned long flags; 93 101 int retval; 94 - local_save_flags(flags); 95 - local_irq_disable(); 102 + cris_atomic_save(v, flags); 96 103 retval = (v->counter)++; 97 - local_irq_restore(flags); 104 + cris_atomic_restore(v, flags); 98 105 return retval; 99 106 } 100 107 ··· 98 113 { 99 114 unsigned long flags; 100 115 int retval; 101 - local_save_flags(flags); 102 - local_irq_disable(); 116 + cris_atomic_save(v, flags); 103 117 retval = (v->counter)--; 104 - local_irq_restore(flags); 118 + cris_atomic_restore(v, flags); 105 119 return retval; 106 120 } 107 121 extern __inline__ int atomic_dec_and_test(volatile atomic_t *v) 108 122 { 109 123 int retval; 110 124 unsigned long flags; 111 - local_save_flags(flags); 112 - local_irq_disable(); 125 + cris_atomic_save(v, flags); 113 126 retval = --(v->counter) == 0; 114 - local_irq_restore(flags); 127 + cris_atomic_restore(v, flags); 115 128 return retval; 116 129 } 117 130 ··· 117 134 { 118 135 int retval; 119 136 unsigned long flags; 120 - local_save_flags(flags); 121 - local_irq_disable(); 137 + cris_atomic_save(v, flags); 122 138 retval = ++(v->counter) == 0; 123 - local_irq_restore(flags); 139 + cris_atomic_restore(v, flags); 124 140 return retval; 125 141 } 126 142
+1 -1
include/asm-cris/mmu_context.h
··· 15 15 * registers like cr3 on the i386 16 16 */ 17 17 18 - extern volatile pgd_t *current_pgd; /* defined in arch/cris/mm/fault.c */ 18 + extern volatile DEFINE_PER_CPU(pgd_t *,current_pgd); /* defined in arch/cris/mm/fault.c */ 19 19 20 20 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 21 21 {
+9 -12
include/asm-cris/semaphore.h
··· 72 72 might_sleep(); 73 73 74 74 /* atomically decrement the semaphores count, and if its negative, we wait */ 75 - local_save_flags(flags); 76 - local_irq_disable(); 75 + cris_atomic_save(sem, flags); 77 76 failed = --(sem->count.counter) < 0; 78 - local_irq_restore(flags); 77 + cris_atomic_restore(sem, flags); 79 78 if(failed) { 80 79 __down(sem); 81 80 } ··· 94 95 might_sleep(); 95 96 96 97 /* atomically decrement the semaphores count, and if its negative, we wait */ 97 - local_save_flags(flags); 98 - local_irq_disable(); 98 + cris_atomic_save(sem, flags); 99 99 failed = --(sem->count.counter) < 0; 100 - local_irq_restore(flags); 100 + cris_atomic_restore(sem, flags); 101 101 if(failed) 102 102 failed = __down_interruptible(sem); 103 103 return(failed); ··· 107 109 unsigned long flags; 108 110 int failed; 109 111 110 - local_save_flags(flags); 111 - local_irq_disable(); 112 + cris_atomic_save(sem, flags); 112 113 failed = --(sem->count.counter) < 0; 113 - local_irq_restore(flags); 114 + cris_atomic_restore(sem, flags); 114 115 if(failed) 115 116 failed = __down_trylock(sem); 116 117 return(failed); 118 + 117 119 } 118 120 119 121 /* ··· 128 130 int wakeup; 129 131 130 132 /* atomically increment the semaphores count, and if it was negative, we wake people */ 131 - local_save_flags(flags); 132 - local_irq_disable(); 133 + cris_atomic_save(sem, flags); 133 134 wakeup = ++(sem->count.counter) <= 0; 134 - local_irq_restore(flags); 135 + cris_atomic_restore(sem, flags); 135 136 if(wakeup) { 136 137 __up(sem); 137 138 }
+7
include/asm-cris/smp.h
··· 1 1 #ifndef __ASM_SMP_H 2 2 #define __ASM_SMP_H 3 3 4 + #include <linux/cpumask.h> 5 + 6 + extern cpumask_t phys_cpu_present_map; 7 + #define cpu_possible_map phys_cpu_present_map 8 + 9 + #define __smp_processor_id() (current_thread_info()->cpu) 10 + 4 11 #endif
+1
include/asm-cris/spinlock.h
··· 1 + #include <asm/arch/spinlock.h>
+16 -3
include/asm-cris/tlbflush.h
··· 18 18 * 19 19 */ 20 20 21 + extern void __flush_tlb_all(void); 22 + extern void __flush_tlb_mm(struct mm_struct *mm); 23 + extern void __flush_tlb_page(struct vm_area_struct *vma, 24 + unsigned long addr); 25 + 26 + #ifdef CONFIG_SMP 21 27 extern void flush_tlb_all(void); 22 28 extern void flush_tlb_mm(struct mm_struct *mm); 23 29 extern void flush_tlb_page(struct vm_area_struct *vma, 24 30 unsigned long addr); 25 - extern void flush_tlb_range(struct vm_area_struct *vma, 26 - unsigned long start, 27 - unsigned long end); 31 + #else 32 + #define flush_tlb_all __flush_tlb_all 33 + #define flush_tlb_mm __flush_tlb_mm 34 + #define flush_tlb_page __flush_tlb_page 35 + #endif 36 + 37 + static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) 38 + { 39 + flush_tlb_mm(vma->vm_mm); 40 + } 28 41 29 42 extern inline void flush_tlb_pgtables(struct mm_struct *mm, 30 43 unsigned long start, unsigned long end)