Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky: Cache and TLB routines

This patch adds cache and tlb sync codes for abiv1 & abiv2.

Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>

Guo Ren 00a9730e 4859bfca

+838
+52
arch/csky/abiv1/cacheflush.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/kernel.h> 5 + #include <linux/mm.h> 6 + #include <linux/fs.h> 7 + #include <linux/syscalls.h> 8 + #include <linux/spinlock.h> 9 + #include <asm/page.h> 10 + #include <asm/cache.h> 11 + #include <asm/cacheflush.h> 12 + #include <asm/cachectl.h> 13 + 14 + void flush_dcache_page(struct page *page) 15 + { 16 + struct address_space *mapping = page_mapping(page); 17 + unsigned long addr; 18 + 19 + if (mapping && !mapping_mapped(mapping)) { 20 + set_bit(PG_arch_1, &(page)->flags); 21 + return; 22 + } 23 + 24 + /* 25 + * We could delay the flush for the !page_mapping case too. But that 26 + * case is for exec env/arg pages and those are %99 certainly going to 27 + * get faulted into the tlb (and thus flushed) anyways. 28 + */ 29 + addr = (unsigned long) page_address(page); 30 + dcache_wb_range(addr, addr + PAGE_SIZE); 31 + } 32 + 33 + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 34 + pte_t *pte) 35 + { 36 + unsigned long addr; 37 + struct page *page; 38 + unsigned long pfn; 39 + 40 + pfn = pte_pfn(*pte); 41 + if (unlikely(!pfn_valid(pfn))) 42 + return; 43 + 44 + page = pfn_to_page(pfn); 45 + addr = (unsigned long) page_address(page); 46 + 47 + if (vma->vm_flags & VM_EXEC || 48 + pages_do_alias(addr, address & PAGE_MASK)) 49 + cache_wbinv_all(); 50 + 51 + clear_bit(PG_arch_1, &(page)->flags); 52 + }
+49
arch/csky/abiv1/inc/abi/cacheflush.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ABI_CSKY_CACHEFLUSH_H 5 + #define __ABI_CSKY_CACHEFLUSH_H 6 + 7 + #include <linux/compiler.h> 8 + #include <asm/string.h> 9 + #include <asm/cache.h> 10 + 11 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 12 + extern void flush_dcache_page(struct page *); 13 + 14 + #define flush_cache_mm(mm) cache_wbinv_all() 15 + #define flush_cache_page(vma, page, pfn) cache_wbinv_all() 16 + #define flush_cache_dup_mm(mm) cache_wbinv_all() 17 + 18 + /* 19 + * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken. 20 + * Use cache_wbinv_all() here and need to be improved in future. 21 + */ 22 + #define flush_cache_range(vma, start, end) cache_wbinv_all() 23 + #define flush_cache_vmap(start, end) cache_wbinv_range(start, end) 24 + #define flush_cache_vunmap(start, end) cache_wbinv_range(start, end) 25 + 26 + #define flush_icache_page(vma, page) cache_wbinv_all() 27 + #define flush_icache_range(start, end) cache_wbinv_range(start, end) 28 + 29 + #define flush_icache_user_range(vma, pg, adr, len) \ 30 + cache_wbinv_range(adr, adr + len) 31 + 32 + #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 33 + do { \ 34 + cache_wbinv_all(); \ 35 + memcpy(dst, src, len); \ 36 + cache_wbinv_all(); \ 37 + } while (0) 38 + 39 + #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 40 + do { \ 41 + cache_wbinv_all(); \ 42 + memcpy(dst, src, len); \ 43 + cache_wbinv_all(); \ 44 + } while (0) 45 + 46 + #define flush_dcache_mmap_lock(mapping) do {} while (0) 47 + #define flush_dcache_mmap_unlock(mapping) do {} while (0) 48 + 49 + #endif /* __ABI_CSKY_CACHEFLUSH_H */
+60
arch/csky/abiv2/cacheflush.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/cache.h> 5 + #include <linux/highmem.h> 6 + #include <linux/mm.h> 7 + #include <asm/cache.h> 8 + 9 + void flush_icache_page(struct vm_area_struct *vma, struct page *page) 10 + { 11 + unsigned long start; 12 + 13 + start = (unsigned long) kmap_atomic(page); 14 + 15 + cache_wbinv_range(start, start + PAGE_SIZE); 16 + 17 + kunmap_atomic((void *)start); 18 + } 19 + 20 + void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 21 + unsigned long vaddr, int len) 22 + { 23 + unsigned long kaddr; 24 + 25 + kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK); 26 + 27 + cache_wbinv_range(kaddr, kaddr + len); 28 + 29 + kunmap_atomic((void *)kaddr); 30 + } 31 + 32 + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 33 + pte_t *pte) 34 + { 35 + unsigned long addr, pfn; 36 + struct page *page; 37 + void *va; 38 + 39 + if (!(vma->vm_flags & VM_EXEC)) 40 + return; 41 + 42 + pfn = pte_pfn(*pte); 43 + if (unlikely(!pfn_valid(pfn))) 44 + return; 45 + 46 + page = pfn_to_page(pfn); 47 + if (page == ZERO_PAGE(0)) 48 + return; 49 + 50 + va = page_address(page); 51 + addr = (unsigned long) va; 52 + 53 + if (va == NULL && PageHighMem(page)) 54 + addr = (unsigned long) kmap_atomic(page); 55 + 56 + cache_wbinv_range(addr, addr + PAGE_SIZE); 57 + 58 + if (va == NULL && PageHighMem(page)) 59 + kunmap_atomic((void *) addr); 60 + }
+46
arch/csky/abiv2/inc/abi/cacheflush.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ABI_CSKY_CACHEFLUSH_H 4 + #define __ABI_CSKY_CACHEFLUSH_H 5 + 6 + /* Keep includes the same across arches. */ 7 + #include <linux/mm.h> 8 + 9 + /* 10 + * The cache doesn't need to be flushed when TLB entries change when 11 + * the cache is mapped to physical memory, not virtual memory 12 + */ 13 + #define flush_cache_all() do { } while (0) 14 + #define flush_cache_mm(mm) do { } while (0) 15 + #define flush_cache_dup_mm(mm) do { } while (0) 16 + 17 + #define flush_cache_range(vma, start, end) \ 18 + do { \ 19 + if (vma->vm_flags & VM_EXEC) \ 20 + icache_inv_all(); \ 21 + } while (0) 22 + 23 + #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 24 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 25 + #define flush_dcache_page(page) do { } while (0) 26 + #define flush_dcache_mmap_lock(mapping) do { } while (0) 27 + #define flush_dcache_mmap_unlock(mapping) do { } while (0) 28 + 29 + #define flush_icache_range(start, end) cache_wbinv_range(start, end) 30 + 31 + void flush_icache_page(struct vm_area_struct *vma, struct page *page); 32 + void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 33 + unsigned long vaddr, int len); 34 + 35 + #define flush_cache_vmap(start, end) do { } while (0) 36 + #define flush_cache_vunmap(start, end) do { } while (0) 37 + 38 + #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 39 + do { \ 40 + memcpy(dst, src, len); \ 41 + cache_wbinv_range((unsigned long)dst, (unsigned long)dst + len); \ 42 + } while (0) 43 + #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 44 + memcpy(dst, src, len) 45 + 46 + #endif /* __ABI_CSKY_CACHEFLUSH_H */
+49
arch/csky/include/asm/barrier.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_BARRIER_H 5 + #define __ASM_CSKY_BARRIER_H 6 + 7 + #ifndef __ASSEMBLY__ 8 + 9 + #define nop() asm volatile ("nop\n":::"memory") 10 + 11 + /* 12 + * sync: completion barrier 13 + * sync.s: completion barrier and shareable to other cores 14 + * sync.i: completion barrier with flush cpu pipeline 15 + * sync.is: completion barrier with flush cpu pipeline and shareable to 16 + * other cores 17 + * 18 + * bar.brwarw: ordering barrier for all load/store instructions before it 19 + * bar.brwarws: ordering barrier for all load/store instructions before it 20 + * and shareable to other cores 21 + * bar.brar: ordering barrier for all load instructions before it 22 + * bar.brars: ordering barrier for all load instructions before it 23 + * and shareable to other cores 24 + * bar.bwaw: ordering barrier for all store instructions before it 25 + * bar.bwaws: ordering barrier for all store instructions before it 26 + * and shareable to other cores 27 + */ 28 + 29 + #ifdef CONFIG_CPU_HAS_CACHEV2 30 + #define mb() asm volatile ("bar.brwarw\n":::"memory") 31 + #define rmb() asm volatile ("bar.brar\n":::"memory") 32 + #define wmb() asm volatile ("bar.bwaw\n":::"memory") 33 + 34 + #ifdef CONFIG_SMP 35 + #define __smp_mb() asm volatile ("bar.brwarws\n":::"memory") 36 + #define __smp_rmb() asm volatile ("bar.brars\n":::"memory") 37 + #define __smp_wmb() asm volatile ("bar.bwaws\n":::"memory") 38 + #endif /* CONFIG_SMP */ 39 + 40 + #define sync_is() asm volatile ("sync.is\n":::"memory") 41 + 42 + #else /* !CONFIG_CPU_HAS_CACHEV2 */ 43 + #define mb() asm volatile ("sync\n":::"memory") 44 + #endif 45 + 46 + #include <asm-generic/barrier.h> 47 + 48 + #endif /* __ASSEMBLY__ */ 49 + #endif /* __ASM_CSKY_BARRIER_H */
+30
arch/csky/include/asm/cache.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_CSKY_CACHE_H 4 + #define __ASM_CSKY_CACHE_H 5 + 6 + /* bytes per L1 cache line */ 7 + #define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT 8 + 9 + #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 10 + 11 + #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 12 + 13 + #ifndef __ASSEMBLY__ 14 + 15 + void dcache_wb_line(unsigned long start); 16 + 17 + void icache_inv_range(unsigned long start, unsigned long end); 18 + void icache_inv_all(void); 19 + 20 + void dcache_wb_range(unsigned long start, unsigned long end); 21 + void dcache_wbinv_all(void); 22 + 23 + void cache_wbinv_range(unsigned long start, unsigned long end); 24 + void cache_wbinv_all(void); 25 + 26 + void dma_wbinv_range(unsigned long start, unsigned long end); 27 + void dma_wb_range(unsigned long start, unsigned long end); 28 + 29 + #endif 30 + #endif /* __ASM_CSKY_CACHE_H */
+9
arch/csky/include/asm/cacheflush.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_CACHEFLUSH_H 5 + #define __ASM_CSKY_CACHEFLUSH_H 6 + 7 + #include <abi/cacheflush.h> 8 + 9 + #endif /* __ASM_CSKY_CACHEFLUSH_H */
+24
arch/csky/include/asm/io.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_IO_H 5 + #define __ASM_CSKY_IO_H 6 + 7 + #include <abi/pgtable-bits.h> 8 + #include <linux/types.h> 9 + #include <linux/version.h> 10 + 11 + extern void __iomem *ioremap(phys_addr_t offset, size_t size); 12 + 13 + extern void iounmap(void *addr); 14 + 15 + extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr, 16 + size_t size, unsigned long flags); 17 + 18 + #define ioremap_nocache(phy, sz) ioremap(phy, sz) 19 + #define ioremap_wc ioremap_nocache 20 + #define ioremap_wt ioremap_nocache 21 + 22 + #include <asm-generic/io.h> 23 + 24 + #endif /* __ASM_CSKY_IO_H */
+25
arch/csky/include/asm/tlb.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_TLB_H 5 + #define __ASM_CSKY_TLB_H 6 + 7 + #include <asm/cacheflush.h> 8 + 9 + #define tlb_start_vma(tlb, vma) \ 10 + do { \ 11 + if (!tlb->fullmm) \ 12 + flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 13 + } while (0) 14 + 15 + #define tlb_end_vma(tlb, vma) \ 16 + do { \ 17 + if (!tlb->fullmm) \ 18 + flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ 19 + } while (0) 20 + 21 + #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) 22 + 23 + #include <asm-generic/tlb.h> 24 + 25 + #endif /* __ASM_CSKY_TLB_H */
+25
arch/csky/include/asm/tlbflush.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_TLBFLUSH_H 5 + #define __ASM_TLBFLUSH_H 6 + 7 + /* 8 + * TLB flushing: 9 + * 10 + * - flush_tlb_all() flushes all processes TLB entries 11 + * - flush_tlb_mm(mm) flushes the specified mm context TLB entries 12 + * - flush_tlb_page(vma, vmaddr) flushes one page 13 + * - flush_tlb_range(vma, start, end) flushes a range of pages 14 + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 15 + */ 16 + extern void flush_tlb_all(void); 17 + extern void flush_tlb_mm(struct mm_struct *mm); 18 + extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 19 + extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 20 + unsigned long end); 21 + extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 22 + 23 + extern void flush_tlb_one(unsigned long vaddr); 24 + 25 + #endif
+13
arch/csky/include/uapi/asm/cachectl.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_CSKY_CACHECTL_H 4 + #define __ASM_CSKY_CACHECTL_H 5 + 6 + /* 7 + * See "man cacheflush" 8 + */ 9 + #define ICACHE (1<<0) 10 + #define DCACHE (1<<1) 11 + #define BCACHE (ICACHE|DCACHE) 12 + 13 + #endif /* __ASM_CSKY_CACHECTL_H */
+126
arch/csky/mm/cachev1.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/spinlock.h> 5 + #include <asm/cache.h> 6 + #include <abi/reg_ops.h> 7 + 8 + /* for L1-cache */ 9 + #define INS_CACHE (1 << 0) 10 + #define DATA_CACHE (1 << 1) 11 + #define CACHE_INV (1 << 4) 12 + #define CACHE_CLR (1 << 5) 13 + #define CACHE_OMS (1 << 6) 14 + #define CACHE_ITS (1 << 7) 15 + #define CACHE_LICF (1 << 31) 16 + 17 + /* for L2-cache */ 18 + #define CR22_LEVEL_SHIFT (1) 19 + #define CR22_SET_SHIFT (7) 20 + #define CR22_WAY_SHIFT (30) 21 + #define CR22_WAY_SHIFT_L2 (29) 22 + 23 + static DEFINE_SPINLOCK(cache_lock); 24 + 25 + static inline void cache_op_line(unsigned long i, unsigned int val) 26 + { 27 + mtcr("cr22", i); 28 + mtcr("cr17", val); 29 + } 30 + 31 + #define CCR2_L2E (1 << 3) 32 + static void cache_op_all(unsigned int value, unsigned int l2) 33 + { 34 + mtcr("cr17", value | CACHE_CLR); 35 + mb(); 36 + 37 + if (l2 && (mfcr_ccr2() & CCR2_L2E)) { 38 + mtcr("cr24", value | CACHE_CLR); 39 + mb(); 40 + } 41 + } 42 + 43 + static void cache_op_range( 44 + unsigned int start, 45 + unsigned int end, 46 + unsigned int value, 47 + unsigned int l2) 48 + { 49 + unsigned long i, flags; 50 + unsigned int val = value | CACHE_CLR | CACHE_OMS; 51 + bool l2_sync; 52 + 53 + if (unlikely((end - start) >= PAGE_SIZE) || 54 + unlikely(start < PAGE_OFFSET) || 55 + unlikely(start >= PAGE_OFFSET + LOWMEM_LIMIT)) { 56 + cache_op_all(value, l2); 57 + return; 58 + } 59 + 60 + if ((mfcr_ccr2() & CCR2_L2E) && l2) 61 + l2_sync = 1; 62 + else 63 + l2_sync = 0; 64 + 65 + spin_lock_irqsave(&cache_lock, flags); 66 + 67 + i = start & ~(L1_CACHE_BYTES - 1); 68 + for (; i < end; i += L1_CACHE_BYTES) { 69 + cache_op_line(i, val); 70 + if (l2_sync) { 71 + mb(); 72 + mtcr("cr24", val); 73 + } 74 + } 75 + spin_unlock_irqrestore(&cache_lock, flags); 76 + 77 + mb(); 78 + } 79 + 80 + void dcache_wb_line(unsigned long start) 81 + { 82 + asm volatile("idly4\n":::"memory"); 83 + cache_op_line(start, DATA_CACHE|CACHE_CLR); 84 + mb(); 85 + } 86 + 87 + void icache_inv_range(unsigned long start, unsigned long end) 88 + { 89 + cache_op_range(start, end, INS_CACHE|CACHE_INV, 0); 90 + } 91 + 92 + void icache_inv_all(void) 93 + { 94 + cache_op_all(INS_CACHE|CACHE_INV, 0); 95 + } 96 + 97 + void dcache_wb_range(unsigned long start, unsigned long end) 98 + { 99 + cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0); 100 + } 101 + 102 + void dcache_wbinv_all(void) 103 + { 104 + cache_op_all(DATA_CACHE|CACHE_CLR|CACHE_INV, 0); 105 + } 106 + 107 + void cache_wbinv_range(unsigned long start, unsigned long end) 108 + { 109 + cache_op_range(start, end, INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); 110 + } 111 + EXPORT_SYMBOL(cache_wbinv_range); 112 + 113 + void cache_wbinv_all(void) 114 + { 115 + cache_op_all(INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); 116 + } 117 + 118 + void dma_wbinv_range(unsigned long start, unsigned long end) 119 + { 120 + cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); 121 + } 122 + 123 + void dma_wb_range(unsigned long start, unsigned long end) 124 + { 125 + cache_op_range(start, end, DATA_CACHE|CACHE_INV, 1); 126 + }
+79
arch/csky/mm/cachev2.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/spinlock.h> 5 + #include <linux/smp.h> 6 + #include <asm/cache.h> 7 + #include <asm/barrier.h> 8 + 9 + inline void dcache_wb_line(unsigned long start) 10 + { 11 + asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); 12 + sync_is(); 13 + } 14 + 15 + void icache_inv_range(unsigned long start, unsigned long end) 16 + { 17 + unsigned long i = start & ~(L1_CACHE_BYTES - 1); 18 + 19 + for (; i < end; i += L1_CACHE_BYTES) 20 + asm volatile("icache.iva %0\n"::"r"(i):"memory"); 21 + sync_is(); 22 + } 23 + 24 + void icache_inv_all(void) 25 + { 26 + asm volatile("icache.ialls\n":::"memory"); 27 + sync_is(); 28 + } 29 + 30 + void dcache_wb_range(unsigned long start, unsigned long end) 31 + { 32 + unsigned long i = start & ~(L1_CACHE_BYTES - 1); 33 + 34 + for (; i < end; i += L1_CACHE_BYTES) 35 + asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); 36 + sync_is(); 37 + } 38 + 39 + void dcache_inv_range(unsigned long start, unsigned long end) 40 + { 41 + unsigned long i = start & ~(L1_CACHE_BYTES - 1); 42 + 43 + for (; i < end; i += L1_CACHE_BYTES) 44 + asm volatile("dcache.civa %0\n"::"r"(i):"memory"); 45 + sync_is(); 46 + } 47 + 48 + void cache_wbinv_range(unsigned long start, unsigned long end) 49 + { 50 + unsigned long i = start & ~(L1_CACHE_BYTES - 1); 51 + 52 + for (; i < end; i += L1_CACHE_BYTES) 53 + asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); 54 + sync_is(); 55 + 56 + i = start & ~(L1_CACHE_BYTES - 1); 57 + for (; i < end; i += L1_CACHE_BYTES) 58 + asm volatile("icache.iva %0\n"::"r"(i):"memory"); 59 + sync_is(); 60 + } 61 + EXPORT_SYMBOL(cache_wbinv_range); 62 + 63 + void dma_wbinv_range(unsigned long start, unsigned long end) 64 + { 65 + unsigned long i = start & ~(L1_CACHE_BYTES - 1); 66 + 67 + for (; i < end; i += L1_CACHE_BYTES) 68 + asm volatile("dcache.civa %0\n"::"r"(i):"memory"); 69 + sync_is(); 70 + } 71 + 72 + void dma_wb_range(unsigned long start, unsigned long end) 73 + { 74 + unsigned long i = start & ~(L1_CACHE_BYTES - 1); 75 + 76 + for (; i < end; i += L1_CACHE_BYTES) 77 + asm volatile("dcache.civa %0\n"::"r"(i):"memory"); 78 + sync_is(); 79 + }
+32
arch/csky/mm/syscache.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/syscalls.h> 5 + #include <asm/page.h> 6 + #include <asm/cache.h> 7 + #include <asm/cachectl.h> 8 + 9 + SYSCALL_DEFINE3(cacheflush, 10 + void __user *, addr, 11 + unsigned long, bytes, 12 + int, cache) 13 + { 14 + switch (cache) { 15 + case ICACHE: 16 + icache_inv_range((unsigned long)addr, 17 + (unsigned long)addr + bytes); 18 + break; 19 + case DCACHE: 20 + dcache_wb_range((unsigned long)addr, 21 + (unsigned long)addr + bytes); 22 + break; 23 + case BCACHE: 24 + cache_wbinv_range((unsigned long)addr, 25 + (unsigned long)addr + bytes); 26 + break; 27 + default: 28 + return -EINVAL; 29 + } 30 + 31 + return 0; 32 + }
+219
arch/csky/mm/tlb.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/init.h> 5 + #include <linux/mm.h> 6 + #include <linux/module.h> 7 + #include <linux/sched.h> 8 + 9 + #include <asm/mmu_context.h> 10 + #include <asm/pgtable.h> 11 + #include <asm/setup.h> 12 + 13 + #define CSKY_TLB_SIZE CONFIG_CPU_TLB_SIZE 14 + 15 + void flush_tlb_all(void) 16 + { 17 + tlb_invalid_all(); 18 + } 19 + 20 + void flush_tlb_mm(struct mm_struct *mm) 21 + { 22 + int cpu = smp_processor_id(); 23 + 24 + if (cpu_context(cpu, mm) != 0) 25 + drop_mmu_context(mm, cpu); 26 + 27 + tlb_invalid_all(); 28 + } 29 + 30 + #define restore_asid_inv_utlb(oldpid, newpid) \ 31 + do { \ 32 + if ((oldpid & ASID_MASK) == newpid) \ 33 + write_mmu_entryhi(oldpid + 1); \ 34 + write_mmu_entryhi(oldpid); \ 35 + } while (0) 36 + 37 + void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 38 + unsigned long end) 39 + { 40 + struct mm_struct *mm = vma->vm_mm; 41 + int cpu = smp_processor_id(); 42 + 43 + if (cpu_context(cpu, mm) != 0) { 44 + unsigned long size, flags; 45 + int newpid = cpu_asid(cpu, mm); 46 + 47 + local_irq_save(flags); 48 + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 49 + size = (size + 1) >> 1; 50 + if (size <= CSKY_TLB_SIZE/2) { 51 + start &= (PAGE_MASK << 1); 52 + end += ((PAGE_SIZE << 1) - 1); 53 + end &= (PAGE_MASK << 1); 54 + #ifdef CONFIG_CPU_HAS_TLBI 55 + while (start < end) { 56 + asm volatile("tlbi.vaas %0" 57 + ::"r"(start | newpid)); 58 + start += (PAGE_SIZE << 1); 59 + } 60 + sync_is(); 61 + #else 62 + { 63 + int oldpid = read_mmu_entryhi(); 64 + 65 + while (start < end) { 66 + int idx; 67 + 68 + write_mmu_entryhi(start | newpid); 69 + start += (PAGE_SIZE << 1); 70 + tlb_probe(); 71 + idx = read_mmu_index(); 72 + if (idx >= 0) 73 + tlb_invalid_indexed(); 74 + } 75 + restore_asid_inv_utlb(oldpid, newpid); 76 + } 77 + #endif 78 + } else { 79 + drop_mmu_context(mm, cpu); 80 + } 81 + local_irq_restore(flags); 82 + } 83 + } 84 + 85 + void flush_tlb_kernel_range(unsigned long start, unsigned long end) 86 + { 87 + unsigned long size, flags; 88 + 89 + local_irq_save(flags); 90 + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 91 + if (size <= CSKY_TLB_SIZE) { 92 + start &= (PAGE_MASK << 1); 93 + end += ((PAGE_SIZE << 1) - 1); 94 + end &= (PAGE_MASK << 1); 95 + #ifdef CONFIG_CPU_HAS_TLBI 96 + while (start < end) { 97 + asm volatile("tlbi.vaas %0"::"r"(start)); 98 + start += (PAGE_SIZE << 1); 99 + } 100 + sync_is(); 101 + #else 102 + { 103 + int oldpid = read_mmu_entryhi(); 104 + 105 + while (start < end) { 106 + int idx; 107 + 108 + write_mmu_entryhi(start); 109 + start += (PAGE_SIZE << 1); 110 + tlb_probe(); 111 + idx = read_mmu_index(); 112 + if (idx >= 0) 113 + tlb_invalid_indexed(); 114 + } 115 + restore_asid_inv_utlb(oldpid, 0); 116 + } 117 + #endif 118 + } else { 119 + flush_tlb_all(); 120 + } 121 + 122 + local_irq_restore(flags); 123 + } 124 + 125 + void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 126 + { 127 + int cpu = smp_processor_id(); 128 + int newpid = cpu_asid(cpu, vma->vm_mm); 129 + 130 + if (!vma || cpu_context(cpu, vma->vm_mm) != 0) { 131 + page &= (PAGE_MASK << 1); 132 + 133 + #ifdef CONFIG_CPU_HAS_TLBI 134 + asm volatile("tlbi.vaas %0"::"r"(page | newpid)); 135 + sync_is(); 136 + #else 137 + { 138 + int oldpid, idx; 139 + unsigned long flags; 140 + 141 + local_irq_save(flags); 142 + oldpid = read_mmu_entryhi(); 143 + write_mmu_entryhi(page | newpid); 144 + tlb_probe(); 145 + idx = read_mmu_index(); 146 + if (idx >= 0) 147 + tlb_invalid_indexed(); 148 + 149 + restore_asid_inv_utlb(oldpid, newpid); 150 + local_irq_restore(flags); 151 + } 152 + #endif 153 + } 154 + } 155 + 156 + /* 157 + * Remove one kernel space TLB entry. This entry is assumed to be marked 158 + * global so we don't do the ASID thing. 159 + */ 160 + void flush_tlb_one(unsigned long page) 161 + { 162 + int oldpid; 163 + 164 + oldpid = read_mmu_entryhi(); 165 + page &= (PAGE_MASK << 1); 166 + 167 + #ifdef CONFIG_CPU_HAS_TLBI 168 + page = page | (oldpid & 0xfff); 169 + asm volatile("tlbi.vaas %0"::"r"(page)); 170 + sync_is(); 171 + #else 172 + { 173 + int idx; 174 + unsigned long flags; 175 + 176 + page = page | (oldpid & 0xff); 177 + 178 + local_irq_save(flags); 179 + write_mmu_entryhi(page); 180 + tlb_probe(); 181 + idx = read_mmu_index(); 182 + if (idx >= 0) 183 + tlb_invalid_indexed(); 184 + restore_asid_inv_utlb(oldpid, oldpid); 185 + local_irq_restore(flags); 186 + } 187 + #endif 188 + } 189 + EXPORT_SYMBOL(flush_tlb_one); 190 + 191 + /* show current 32 jtlbs */ 192 + void show_jtlb_table(void) 193 + { 194 + unsigned long flags; 195 + int entryhi, entrylo0, entrylo1; 196 + int entry; 197 + int oldpid; 198 + 199 + local_irq_save(flags); 200 + entry = 0; 201 + pr_info("\n\n\n"); 202 + 203 + oldpid = read_mmu_entryhi(); 204 + while (entry < CSKY_TLB_SIZE) { 205 + write_mmu_index(entry); 206 + tlb_read(); 207 + entryhi = read_mmu_entryhi(); 208 + entrylo0 = read_mmu_entrylo0(); 209 + entrylo0 = entrylo0; 210 + entrylo1 = read_mmu_entrylo1(); 211 + entrylo1 = entrylo1; 212 + pr_info("jtlb[%d]: entryhi - 0x%x; entrylo0 - 0x%x;" 213 + " entrylo1 - 0x%x\n", 214 + entry, entryhi, entrylo0, entrylo1); 215 + entry++; 216 + } 217 + write_mmu_entryhi(oldpid); 218 + local_irq_restore(flags); 219 + }