Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky: Fix TLB maintenance synchronization problem

TLB invalidate didn't contain a barrier operation in csky cpu and
we need to prevent previous PTW response after TLB invalidation
instruction. Of cause, the ASID changing also needs to take care
of the issue.

CPU0 CPU1
=============== ===============
set_pte
sync_is() -> See the previous set_pte for all harts
tlbi.vas -> Invalidate all harts TLB entry & flush pipeline

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>

Guo Ren 3b756ccd c109f424

+69 -16
+2 -1
arch/csky/abiv1/inc/abi/ckmmu.h
··· 89 89 cpwcr("cpcr8", 0x02000000); 90 90 } 91 91 92 - static inline void setup_pgd(pgd_t *pgd) 92 + static inline void setup_pgd(pgd_t *pgd, int asid) 93 93 { 94 94 cpwcr("cpcr29", __pa(pgd) | BIT(0)); 95 + write_mmu_entryhi(asid); 95 96 } 96 97 97 98 static inline pgd_t *get_pgd(void)
+30 -5
arch/csky/abiv2/inc/abi/ckmmu.h
··· 78 78 static inline void tlb_invalid_all(void) 79 79 { 80 80 #ifdef CONFIG_CPU_HAS_TLBI 81 - asm volatile("tlbi.alls\n":::"memory"); 82 81 sync_is(); 82 + asm volatile( 83 + "tlbi.alls \n" 84 + "sync.i \n" 85 + : 86 + : 87 + : "memory"); 83 88 #else 84 89 mtcr("cr<8, 15>", 0x04000000); 85 90 #endif ··· 93 88 static inline void local_tlb_invalid_all(void) 94 89 { 95 90 #ifdef CONFIG_CPU_HAS_TLBI 96 - asm volatile("tlbi.all\n":::"memory"); 97 91 sync_is(); 92 + asm volatile( 93 + "tlbi.all \n" 94 + "sync.i \n" 95 + : 96 + : 97 + : "memory"); 98 98 #else 99 99 tlb_invalid_all(); 100 100 #endif ··· 110 100 mtcr("cr<8, 15>", 0x02000000); 111 101 } 112 102 113 - static inline void setup_pgd(pgd_t *pgd) 103 + #define NOP32 ".long 0x4820c400\n" 104 + 105 + static inline void setup_pgd(pgd_t *pgd, int asid) 114 106 { 115 107 #ifdef CONFIG_CPU_HAS_TLBI 116 - mtcr("cr<28, 15>", __pa(pgd) | BIT(0)); 108 + sync_is(); 109 + #else 110 + mb(); 117 111 #endif 118 - mtcr("cr<29, 15>", __pa(pgd) | BIT(0)); 112 + asm volatile( 113 + #ifdef CONFIG_CPU_HAS_TLBI 114 + "mtcr %1, cr<28, 15> \n" 115 + #endif 116 + "mtcr %1, cr<29, 15> \n" 117 + "mtcr %0, cr< 4, 15> \n" 118 + ".rept 64 \n" 119 + NOP32 120 + ".endr \n" 121 + : 122 + :"r"(asid), "r"(__pa(pgd) | BIT(0)) 123 + :"memory"); 119 124 } 120 125 121 126 static inline pgd_t *get_pgd(void)
+1 -2
arch/csky/include/asm/mmu_context.h
··· 30 30 if (prev != next) 31 31 check_and_switch_context(next, cpu); 32 32 33 - setup_pgd(next->pgd); 34 - write_mmu_entryhi(next->context.asid.counter); 33 + setup_pgd(next->pgd, next->context.asid.counter); 35 34 36 35 flush_icache_deferred(next); 37 36 }
+1 -1
arch/csky/mm/init.c
··· 164 164 /* Setup page mask to 4k */ 165 165 write_mmu_pagemask(0); 166 166 167 - setup_pgd(swapper_pg_dir); 167 + setup_pgd(swapper_pg_dir, 0); 168 168 } 169 169 170 170 void __init fixrange_init(unsigned long start, unsigned long end,
+35 -7
arch/csky/mm/tlb.c
··· 24 24 void flush_tlb_mm(struct mm_struct *mm) 25 25 { 26 26 #ifdef CONFIG_CPU_HAS_TLBI 27 - asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm))); 27 + sync_is(); 28 + asm volatile( 29 + "tlbi.asids %0 \n" 30 + "sync.i \n" 31 + : 32 + : "r" (cpu_asid(mm)) 33 + : "memory"); 28 34 #else 29 35 tlb_invalid_all(); 30 36 #endif ··· 59 53 end &= TLB_ENTRY_SIZE_MASK; 60 54 61 55 #ifdef CONFIG_CPU_HAS_TLBI 56 + sync_is(); 62 57 while (start < end) { 63 - asm volatile("tlbi.vas %0"::"r"(start | newpid)); 58 + asm volatile( 59 + "tlbi.vas %0 \n" 60 + : 61 + : "r" (start | newpid) 62 + : "memory"); 63 + 64 64 start += 2*PAGE_SIZE; 65 65 } 66 - sync_is(); 66 + asm volatile("sync.i\n"); 67 67 #else 68 68 { 69 69 unsigned long flags, oldpid; ··· 99 87 end &= TLB_ENTRY_SIZE_MASK; 100 88 101 89 #ifdef CONFIG_CPU_HAS_TLBI 90 + sync_is(); 102 91 while (start < end) { 103 - asm volatile("tlbi.vaas %0"::"r"(start)); 92 + asm volatile( 93 + "tlbi.vaas %0 \n" 94 + : 95 + : "r" (start) 96 + : "memory"); 97 + 104 98 start += 2*PAGE_SIZE; 105 99 } 106 - sync_is(); 100 + asm volatile("sync.i\n"); 107 101 #else 108 102 { 109 103 unsigned long flags, oldpid; ··· 139 121 addr &= TLB_ENTRY_SIZE_MASK; 140 122 141 123 #ifdef CONFIG_CPU_HAS_TLBI 142 - asm volatile("tlbi.vas %0"::"r"(addr | newpid)); 143 124 sync_is(); 125 + asm volatile( 126 + "tlbi.vas %0 \n" 127 + "sync.i \n" 128 + : 129 + : "r" (addr | newpid) 130 + : "memory"); 144 131 #else 145 132 { 146 133 int oldpid, idx; ··· 170 147 addr &= TLB_ENTRY_SIZE_MASK; 171 148 172 149 #ifdef CONFIG_CPU_HAS_TLBI 173 - asm volatile("tlbi.vaas %0"::"r"(addr)); 174 150 sync_is(); 151 + asm volatile( 152 + "tlbi.vaas %0 \n" 153 + "sync.i \n" 154 + : 155 + : "r" (addr) 156 + : "memory"); 175 157 #else 176 158 { 177 159 int oldpid, idx;