Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky: Use generic asid algorithm to implement switch_mm

Use linux generic asid/vmid algorithm to implement csky
switch_mm function. The algorithm is from arm and it could
work with SMP system. It'll help reduce tlb flush for
switch_mm in task/vm switch.

Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Cc: Arnd Bergmann <arnd@arndb.de>

Guo Ren 22d55f02 a231b883

+74 -2
+6
arch/csky/abiv1/inc/abi/ckmmu.h
··· 78 78 cpwcr("cpcr8", 0x04000000); 79 79 } 80 80 81 + 82 + static inline void local_tlb_invalid_all(void) 83 + { 84 + tlb_invalid_all(); 85 + } 86 + 81 87 static inline void tlb_invalid_indexed(void) 82 88 { 83 89 cpwcr("cpcr8", 0x02000000);
+10
arch/csky/abiv2/inc/abi/ckmmu.h
··· 85 85 #endif 86 86 } 87 87 88 + static inline void local_tlb_invalid_all(void) 89 + { 90 + #ifdef CONFIG_CPU_HAS_TLBI 91 + asm volatile("tlbi.all\n":::"memory"); 92 + sync_is(); 93 + #else 94 + tlb_invalid_all(); 95 + #endif 96 + } 97 + 88 98 static inline void tlb_invalid_indexed(void) 89 99 { 90 100 mtcr("cr<8, 15>", 0x02000000);
+1
arch/csky/include/asm/mmu.h
··· 5 5 #define __ASM_CSKY_MMU_H 6 6 7 7 typedef struct { 8 + atomic64_t asid; 8 9 void *vdso; 9 10 } mm_context_t; 10 11
+10 -2
arch/csky/include/asm/mmu_context.h
··· 20 20 #define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \ 21 21 setup_pgd(__pa(pgd), true) 22 22 23 - #define init_new_context(tsk,mm) 0 23 + #define ASID_MASK ((1 << CONFIG_CPU_ASID_BITS) - 1) 24 + #define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK) 25 + 26 + #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; }) 24 27 #define activate_mm(prev,next) switch_mm(prev, next, current) 25 28 26 29 #define destroy_context(mm) do {} while (0) 27 30 #define enter_lazy_tlb(mm, tsk) do {} while (0) 28 31 #define deactivate_mm(tsk, mm) do {} while (0) 29 32 33 + void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); 34 + 30 35 static inline void 31 36 switch_mm(struct mm_struct *prev, struct mm_struct *next, 32 37 struct task_struct *tsk) 33 38 { 39 + unsigned int cpu = smp_processor_id(); 40 + 34 41 if (prev != next) 35 - tlb_invalid_all(); 42 + check_and_switch_context(next, cpu); 36 43 37 44 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 45 + write_mmu_entryhi(next->context.asid.counter); 38 46 } 39 47 #endif /* __ASM_CSKY_MMU_CONTEXT_H */
+1
arch/csky/mm/Makefile
··· 13 13 obj-y += syscache.o 14 14 obj-y += tlb.o 15 15 obj-y += asid.o 16 + obj-y += context.o
+46
arch/csky/mm/context.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/bitops.h> 5 + #include <linux/sched.h> 6 + #include <linux/slab.h> 7 + #include <linux/mm.h> 8 + 9 + #include <asm/asid.h> 10 + #include <asm/mmu_context.h> 11 + #include <asm/smp.h> 12 + #include <asm/tlbflush.h> 13 + 14 + static DEFINE_PER_CPU(atomic64_t, active_asids); 15 + static DEFINE_PER_CPU(u64, reserved_asids); 16 + 17 + struct asid_info asid_info; 18 + 19 + void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) 20 + { 21 + asid_check_context(&asid_info, &mm->context.asid, cpu, mm); 22 + } 23 + 24 + static void asid_flush_cpu_ctxt(void) 25 + { 26 + local_tlb_invalid_all(); 27 + } 28 + 29 + static int asids_init(void) 30 + { 31 + BUG_ON(((1 << CONFIG_CPU_ASID_BITS) - 1) <= num_possible_cpus()); 32 + 33 + if (asid_allocator_init(&asid_info, CONFIG_CPU_ASID_BITS, 1, 34 + asid_flush_cpu_ctxt)) 35 + panic("Unable to initialize ASID allocator for %lu ASIDs\n", 36 + NUM_ASIDS(&asid_info)); 37 + 38 + asid_info.active = &active_asids; 39 + asid_info.reserved = &reserved_asids; 40 + 41 + pr_info("ASID allocator initialised with %lu entries\n", 42 + NUM_CTXT_ASIDS(&asid_info)); 43 + 44 + return 0; 45 + } 46 + early_initcall(asids_init);