Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky: MMU and page table management

This patch adds files related to memory management and here is our
memory-layout:

Fixmap : 0xffc02000 – 0xfffff000 (4 MB - 12KB)
Pkmap : 0xff800000 – 0xffc00000 (4 MB)
Vmalloc : 0xf0200000 – 0xff000000 (238 MB)
Lowmem : 0x80000000 – 0xc0000000 (1GB)

abiv1 CPU (CK610) is VIPT cache and it doesn't support highmem.
abiv2 CPUs are all PIPT cache and they could support highmem.

Lowmem is directly mapped by msa0 & msa1 reg, and we needn't setup
memory page table for it.

Link:https://lore.kernel.org/lkml/20180518215548.GH17671@n2100.armlinux.org.uk/
Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Cc: Christoph Hellwig <hch@infradead.org>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>

Guo Ren 013de2d6 00a9730e

+1620
+75
arch/csky/abiv1/inc/abi/ckmmu.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_CKMMUV1_H 5 + #define __ASM_CSKY_CKMMUV1_H 6 + #include <abi/reg_ops.h> 7 + 8 + static inline int read_mmu_index(void) 9 + { 10 + return cprcr("cpcr0"); 11 + } 12 + 13 + static inline void write_mmu_index(int value) 14 + { 15 + cpwcr("cpcr0", value); 16 + } 17 + 18 + static inline int read_mmu_entrylo0(void) 19 + { 20 + return cprcr("cpcr2") << 6; 21 + } 22 + 23 + static inline int read_mmu_entrylo1(void) 24 + { 25 + return cprcr("cpcr3") << 6; 26 + } 27 + 28 + static inline void write_mmu_pagemask(int value) 29 + { 30 + cpwcr("cpcr6", value); 31 + } 32 + 33 + static inline int read_mmu_entryhi(void) 34 + { 35 + return cprcr("cpcr4"); 36 + } 37 + 38 + static inline void write_mmu_entryhi(int value) 39 + { 40 + cpwcr("cpcr4", value); 41 + } 42 + 43 + /* 44 + * TLB operations. 45 + */ 46 + static inline void tlb_probe(void) 47 + { 48 + cpwcr("cpcr8", 0x80000000); 49 + } 50 + 51 + static inline void tlb_read(void) 52 + { 53 + cpwcr("cpcr8", 0x40000000); 54 + } 55 + 56 + static inline void tlb_invalid_all(void) 57 + { 58 + cpwcr("cpcr8", 0x04000000); 59 + } 60 + 61 + static inline void tlb_invalid_indexed(void) 62 + { 63 + cpwcr("cpcr8", 0x02000000); 64 + } 65 + 66 + static inline void setup_pgd(unsigned long pgd, bool kernel) 67 + { 68 + cpwcr("cpcr29", pgd); 69 + } 70 + 71 + static inline unsigned long get_pgd(void) 72 + { 73 + return cprcr("cpcr29"); 74 + } 75 + #endif /* __ASM_CSKY_CKMMUV1_H */
+27
arch/csky/abiv1/inc/abi/page.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + extern unsigned long shm_align_mask; 5 + extern void flush_dcache_page(struct page *page); 6 + 7 + static inline unsigned long pages_do_alias(unsigned long addr1, 8 + unsigned long addr2) 9 + { 10 + return (addr1 ^ addr2) & shm_align_mask; 11 + } 12 + 13 + static inline void clear_user_page(void *addr, unsigned long vaddr, 14 + struct page *page) 15 + { 16 + clear_page(addr); 17 + if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) 18 + flush_dcache_page(page); 19 + } 20 + 21 + static inline void copy_user_page(void *to, void *from, unsigned long vaddr, 22 + struct page *page) 23 + { 24 + copy_page(to, from); 25 + if (pages_do_alias((unsigned long) to, vaddr & PAGE_MASK)) 26 + flush_dcache_page(page); 27 + }
+37
arch/csky/abiv1/inc/abi/pgtable-bits.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_PGTABLE_BITS_H 5 + #define __ASM_CSKY_PGTABLE_BITS_H 6 + 7 + /* implemented in software */ 8 + #define _PAGE_ACCESSED (1<<3) 9 + #define PAGE_ACCESSED_BIT (3) 10 + 11 + #define _PAGE_READ (1<<1) 12 + #define _PAGE_WRITE (1<<2) 13 + #define _PAGE_PRESENT (1<<0) 14 + 15 + #define _PAGE_MODIFIED (1<<4) 16 + #define PAGE_MODIFIED_BIT (4) 17 + 18 + /* implemented in hardware */ 19 + #define _PAGE_GLOBAL (1<<6) 20 + 21 + #define _PAGE_VALID (1<<7) 22 + #define PAGE_VALID_BIT (7) 23 + 24 + #define _PAGE_DIRTY (1<<8) 25 + #define PAGE_DIRTY_BIT (8) 26 + 27 + #define _PAGE_CACHE (3<<9) 28 + #define _PAGE_UNCACHE (2<<9) 29 + 30 + #define _CACHE_MASK (7<<9) 31 + 32 + #define _CACHE_CACHED (_PAGE_VALID | _PAGE_CACHE) 33 + #define _CACHE_UNCACHED (_PAGE_VALID | _PAGE_UNCACHE) 34 + 35 + #define HAVE_ARCH_UNMAPPED_AREA 36 + 37 + #endif /* __ASM_CSKY_PGTABLE_BITS_H */
+66
arch/csky/abiv1/mmap.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/fs.h> 5 + #include <linux/mm.h> 6 + #include <linux/mman.h> 7 + #include <linux/shm.h> 8 + #include <linux/sched.h> 9 + #include <linux/random.h> 10 + #include <linux/io.h> 11 + 12 + unsigned long shm_align_mask = (0x4000 >> 1) - 1; /* Sane caches */ 13 + 14 + #define COLOUR_ALIGN(addr, pgoff) \ 15 + ((((addr) + shm_align_mask) & ~shm_align_mask) + \ 16 + (((pgoff) << PAGE_SHIFT) & shm_align_mask)) 17 + 18 + unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 19 + unsigned long len, unsigned long pgoff, unsigned long flags) 20 + { 21 + struct vm_area_struct *vmm; 22 + int do_color_align; 23 + 24 + if (flags & MAP_FIXED) { 25 + /* 26 + * We do not accept a shared mapping if it would violate 27 + * cache aliasing constraints. 28 + */ 29 + if ((flags & MAP_SHARED) && 30 + ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) 31 + return -EINVAL; 32 + return addr; 33 + } 34 + 35 + if (len > TASK_SIZE) 36 + return -ENOMEM; 37 + do_color_align = 0; 38 + if (filp || (flags & MAP_SHARED)) 39 + do_color_align = 1; 40 + if (addr) { 41 + if (do_color_align) 42 + addr = COLOUR_ALIGN(addr, pgoff); 43 + else 44 + addr = PAGE_ALIGN(addr); 45 + vmm = find_vma(current->mm, addr); 46 + if (TASK_SIZE - len >= addr && 47 + (!vmm || addr + len <= vmm->vm_start)) 48 + return addr; 49 + } 50 + addr = TASK_UNMAPPED_BASE; 51 + if (do_color_align) 52 + addr = COLOUR_ALIGN(addr, pgoff); 53 + else 54 + addr = PAGE_ALIGN(addr); 55 + 56 + for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { 57 + /* At this point: (!vmm || addr < vmm->vm_end). */ 58 + if (TASK_SIZE - len < addr) 59 + return -ENOMEM; 60 + if (!vmm || addr + len <= vmm->vm_start) 61 + return addr; 62 + addr = vmm->vm_end; 63 + if (do_color_align) 64 + addr = COLOUR_ALIGN(addr, pgoff); 65 + } 66 + }
+87
arch/csky/abiv2/inc/abi/ckmmu.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_CKMMUV2_H 5 + #define __ASM_CSKY_CKMMUV2_H 6 + 7 + #include <abi/reg_ops.h> 8 + #include <asm/barrier.h> 9 + 10 + static inline int read_mmu_index(void) 11 + { 12 + return mfcr("cr<0, 15>"); 13 + } 14 + 15 + static inline void write_mmu_index(int value) 16 + { 17 + mtcr("cr<0, 15>", value); 18 + } 19 + 20 + static inline int read_mmu_entrylo0(void) 21 + { 22 + return mfcr("cr<2, 15>"); 23 + } 24 + 25 + static inline int read_mmu_entrylo1(void) 26 + { 27 + return mfcr("cr<3, 15>"); 28 + } 29 + 30 + static inline void write_mmu_pagemask(int value) 31 + { 32 + mtcr("cr<6, 15>", value); 33 + } 34 + 35 + static inline int read_mmu_entryhi(void) 36 + { 37 + return mfcr("cr<4, 15>"); 38 + } 39 + 40 + static inline void write_mmu_entryhi(int value) 41 + { 42 + mtcr("cr<4, 15>", value); 43 + } 44 + 45 + /* 46 + * TLB operations. 47 + */ 48 + static inline void tlb_probe(void) 49 + { 50 + mtcr("cr<8, 15>", 0x80000000); 51 + } 52 + 53 + static inline void tlb_read(void) 54 + { 55 + mtcr("cr<8, 15>", 0x40000000); 56 + } 57 + 58 + static inline void tlb_invalid_all(void) 59 + { 60 + #ifdef CONFIG_CPU_HAS_TLBI 61 + asm volatile("tlbi.alls\n":::"memory"); 62 + sync_is(); 63 + #else 64 + mtcr("cr<8, 15>", 0x04000000); 65 + #endif 66 + } 67 + 68 + static inline void tlb_invalid_indexed(void) 69 + { 70 + mtcr("cr<8, 15>", 0x02000000); 71 + } 72 + 73 + /* setup hardrefil pgd */ 74 + static inline unsigned long get_pgd(void) 75 + { 76 + return mfcr("cr<29, 15>"); 77 + } 78 + 79 + static inline void setup_pgd(unsigned long pgd, bool kernel) 80 + { 81 + if (kernel) 82 + mtcr("cr<28, 15>", pgd); 83 + else 84 + mtcr("cr<29, 15>", pgd); 85 + } 86 + 87 + #endif /* __ASM_CSKY_CKMMUV2_H */
+14
arch/csky/abiv2/inc/abi/page.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + static inline void clear_user_page(void *addr, unsigned long vaddr, 5 + struct page *page) 6 + { 7 + clear_page(addr); 8 + } 9 + 10 + static inline void copy_user_page(void *to, void *from, unsigned long vaddr, 11 + struct page *page) 12 + { 13 + copy_page(to, from); 14 + }
+37
arch/csky/abiv2/inc/abi/pgtable-bits.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_PGTABLE_BITS_H 5 + #define __ASM_CSKY_PGTABLE_BITS_H 6 + 7 + /* implemented in software */ 8 + #define _PAGE_ACCESSED (1<<7) 9 + #define PAGE_ACCESSED_BIT (7) 10 + 11 + #define _PAGE_READ (1<<8) 12 + #define _PAGE_WRITE (1<<9) 13 + #define _PAGE_PRESENT (1<<10) 14 + 15 + #define _PAGE_MODIFIED (1<<11) 16 + #define PAGE_MODIFIED_BIT (11) 17 + 18 + /* implemented in hardware */ 19 + #define _PAGE_GLOBAL (1<<0) 20 + 21 + #define _PAGE_VALID (1<<1) 22 + #define PAGE_VALID_BIT (1) 23 + 24 + #define _PAGE_DIRTY (1<<2) 25 + #define PAGE_DIRTY_BIT (2) 26 + 27 + #define _PAGE_SO (1<<5) 28 + #define _PAGE_BUF (1<<6) 29 + 30 + #define _PAGE_CACHE (1<<3) 31 + 32 + #define _CACHE_MASK _PAGE_CACHE 33 + 34 + #define _CACHE_CACHED (_PAGE_VALID | _PAGE_CACHE | _PAGE_BUF) 35 + #define _CACHE_UNCACHED (_PAGE_VALID | _PAGE_SO) 36 + 37 + #endif /* __ASM_CSKY_PGTABLE_BITS_H */
+10
arch/csky/include/asm/addrspace.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_ADDRSPACE_H 5 + #define __ASM_CSKY_ADDRSPACE_H 6 + 7 + #define KSEG0 0x80000000ul 8 + #define KSEG0ADDR(a) (((unsigned long)a & 0x1fffffff) | KSEG0) 9 + 10 + #endif /* __ASM_CSKY_ADDRSPACE_H */
+27
arch/csky/include/asm/fixmap.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_FIXMAP_H 5 + #define __ASM_CSKY_FIXMAP_H 6 + 7 + #include <asm/page.h> 8 + #ifdef CONFIG_HIGHMEM 9 + #include <linux/threads.h> 10 + #include <asm/kmap_types.h> 11 + #endif 12 + 13 + enum fixed_addresses { 14 + #ifdef CONFIG_HIGHMEM 15 + FIX_KMAP_BEGIN, 16 + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, 17 + #endif 18 + __end_of_fixed_addresses 19 + }; 20 + 21 + #define FIXADDR_TOP 0xffffc000 22 + #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 23 + #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 24 + 25 + #include <asm-generic/fixmap.h> 26 + 27 + #endif /* __ASM_CSKY_FIXMAP_H */
+51
arch/csky/include/asm/highmem.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_HIGHMEM_H 5 + #define __ASM_CSKY_HIGHMEM_H 6 + 7 + #ifdef __KERNEL__ 8 + 9 + #include <linux/init.h> 10 + #include <linux/interrupt.h> 11 + #include <linux/uaccess.h> 12 + #include <asm/kmap_types.h> 13 + #include <asm/cache.h> 14 + 15 + /* undef for production */ 16 + #define HIGHMEM_DEBUG 1 17 + 18 + /* declarations for highmem.c */ 19 + extern unsigned long highstart_pfn, highend_pfn; 20 + 21 + extern pte_t *pkmap_page_table; 22 + 23 + /* 24 + * Right now we initialize only a single pte table. It can be extended 25 + * easily, subsequent pte tables have to be allocated in one physical 26 + * chunk of RAM. 27 + */ 28 + #define LAST_PKMAP 1024 29 + #define LAST_PKMAP_MASK (LAST_PKMAP-1) 30 + #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) 31 + #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 32 + 33 + extern void *kmap_high(struct page *page); 34 + extern void kunmap_high(struct page *page); 35 + 36 + extern void *kmap(struct page *page); 37 + extern void kunmap(struct page *page); 38 + extern void *kmap_atomic(struct page *page); 39 + extern void __kunmap_atomic(void *kvaddr); 40 + extern void *kmap_atomic_pfn(unsigned long pfn); 41 + extern struct page *kmap_atomic_to_page(void *ptr); 42 + 43 + #define flush_cache_kmaps() do {} while (0) 44 + 45 + extern void kmap_init(void); 46 + 47 + #define kmap_prot PAGE_KERNEL 48 + 49 + #endif /* __KERNEL__ */ 50 + 51 + #endif /* __ASM_CSKY_HIGHMEM_H */
+12
arch/csky/include/asm/mmu.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_MMU_H 5 + #define __ASM_CSKY_MMU_H 6 + 7 + typedef struct { 8 + unsigned long asid[NR_CPUS]; 9 + void *vdso; 10 + } mm_context_t; 11 + 12 + #endif /* __ASM_CSKY_MMU_H */
+104
arch/csky/include/asm/page.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_CSKY_PAGE_H 4 + #define __ASM_CSKY_PAGE_H 5 + 6 + #include <asm/setup.h> 7 + #include <asm/cache.h> 8 + #include <linux/const.h> 9 + 10 + /* 11 + * PAGE_SHIFT determines the page size 12 + */ 13 + #define PAGE_SHIFT 12 14 + #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 15 + #define PAGE_MASK (~(PAGE_SIZE - 1)) 16 + #define THREAD_SIZE (PAGE_SIZE * 2) 17 + #define THREAD_MASK (~(THREAD_SIZE - 1)) 18 + #define THREAD_SHIFT (PAGE_SHIFT + 1) 19 + 20 + /* 21 + * NOTE: virtual isn't really correct, actually it should be the offset into the 22 + * memory node, but we have no highmem, so that works for now. 23 + * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots 24 + * of the shifts unnecessary. 25 + */ 26 + 27 + #ifndef __ASSEMBLY__ 28 + 29 + #include <linux/pfn.h> 30 + 31 + #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 32 + #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) 33 + 34 + #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && \ 35 + (void *)(kaddr) < high_memory) 36 + #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) 37 + 38 + extern void *memset(void *dest, int c, size_t l); 39 + extern void *memcpy(void *to, const void *from, size_t l); 40 + 41 + #define clear_page(page) memset((page), 0, PAGE_SIZE) 42 + #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 43 + 44 + #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 45 + #define phys_to_page(paddr) (pfn_to_page(PFN_DOWN(paddr))) 46 + 47 + struct page; 48 + 49 + #include <abi/page.h> 50 + 51 + struct vm_area_struct; 52 + 53 + /* 54 + * These are used to make use of C type-checking.. 55 + */ 56 + typedef struct { unsigned long pte_low; } pte_t; 57 + #define pte_val(x) ((x).pte_low) 58 + 59 + typedef struct { unsigned long pgd; } pgd_t; 60 + typedef struct { unsigned long pgprot; } pgprot_t; 61 + typedef struct page *pgtable_t; 62 + 63 + #define pgd_val(x) ((x).pgd) 64 + #define pgprot_val(x) ((x).pgprot) 65 + 66 + #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) 67 + 68 + #define __pte(x) ((pte_t) { (x) }) 69 + #define __pgd(x) ((pgd_t) { (x) }) 70 + #define __pgprot(x) ((pgprot_t) { (x) }) 71 + 72 + #endif /* !__ASSEMBLY__ */ 73 + 74 + #define PHYS_OFFSET (CONFIG_RAM_BASE & ~(LOWMEM_LIMIT - 1)) 75 + #define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (LOWMEM_LIMIT - 1)) 76 + #define ARCH_PFN_OFFSET PFN_DOWN(CONFIG_RAM_BASE) 77 + 78 + #define PAGE_OFFSET 0x80000000 79 + #define LOWMEM_LIMIT 0x40000000 80 + 81 + #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) 82 + #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - \ 83 + PHYS_OFFSET)) 84 + #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 85 + 86 + #define MAP_NR(x) PFN_DOWN((unsigned long)(x) - PAGE_OFFSET - \ 87 + PHYS_OFFSET_OFFSET) 88 + #define virt_to_page(x) (mem_map + MAP_NR(x)) 89 + 90 + #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 91 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 92 + 93 + /* 94 + * main RAM and kernel working space are coincident at 0x80000000, but to make 95 + * life more interesting, there's also an uncached virtual shadow at 0xb0000000 96 + * - these mappings are fixed in the MMU 97 + */ 98 + 99 + #define pfn_to_kaddr(x) __va(PFN_PHYS(x)) 100 + 101 + #include <asm-generic/memory_model.h> 102 + #include <asm-generic/getorder.h> 103 + 104 + #endif /* __ASM_CSKY_PAGE_H */
+115
arch/csky/include/asm/pgalloc.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_PGALLOC_H 5 + #define __ASM_CSKY_PGALLOC_H 6 + 7 + #include <linux/highmem.h> 8 + #include <linux/mm.h> 9 + #include <linux/sched.h> 10 + 11 + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 12 + pte_t *pte) 13 + { 14 + set_pmd(pmd, __pmd(__pa(pte))); 15 + } 16 + 17 + static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 18 + pgtable_t pte) 19 + { 20 + set_pmd(pmd, __pmd(__pa(page_address(pte)))); 21 + } 22 + 23 + #define pmd_pgtable(pmd) pmd_page(pmd) 24 + 25 + extern void pgd_init(unsigned long *p); 26 + 27 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 28 + unsigned long address) 29 + { 30 + pte_t *pte; 31 + unsigned long *kaddr, i; 32 + 33 + pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, 34 + PTE_ORDER); 35 + kaddr = (unsigned long *)pte; 36 + if (address & 0x80000000) 37 + for (i = 0; i < (PAGE_SIZE/4); i++) 38 + *(kaddr + i) = 0x1; 39 + else 40 + clear_page(kaddr); 41 + 42 + return pte; 43 + } 44 + 45 + static inline struct page *pte_alloc_one(struct mm_struct *mm, 46 + unsigned long address) 47 + { 48 + struct page *pte; 49 + unsigned long *kaddr, i; 50 + 51 + pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, PTE_ORDER); 52 + if (pte) { 53 + kaddr = kmap_atomic(pte); 54 + if (address & 0x80000000) { 55 + for (i = 0; i < (PAGE_SIZE/4); i++) 56 + *(kaddr + i) = 0x1; 57 + } else 58 + clear_page(kaddr); 59 + kunmap_atomic(kaddr); 60 + pgtable_page_ctor(pte); 61 + } 62 + return pte; 63 + } 64 + 65 + static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 66 + { 67 + free_pages((unsigned long)pte, PTE_ORDER); 68 + } 69 + 70 + static inline void pte_free(struct mm_struct *mm, pgtable_t pte) 71 + { 72 + pgtable_page_dtor(pte); 73 + __free_pages(pte, PTE_ORDER); 74 + } 75 + 76 + static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 77 + { 78 + free_pages((unsigned long)pgd, PGD_ORDER); 79 + } 80 + 81 + static inline pgd_t *pgd_alloc(struct mm_struct *mm) 82 + { 83 + pgd_t *ret; 84 + pgd_t *init; 85 + 86 + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); 87 + if (ret) { 88 + init = pgd_offset(&init_mm, 0UL); 89 + pgd_init((unsigned long *)ret); 90 + memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, 91 + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 92 + /* prevent out of order excute */ 93 + smp_mb(); 94 + #ifdef CONFIG_CPU_NEED_TLBSYNC 95 + dcache_wb_range((unsigned int)ret, 96 + (unsigned int)(ret + PTRS_PER_PGD)); 97 + #endif 98 + } 99 + 100 + return ret; 101 + } 102 + 103 + #define __pte_free_tlb(tlb, pte, address) \ 104 + do { \ 105 + pgtable_page_dtor(pte); \ 106 + tlb_remove_page(tlb, pte); \ 107 + } while (0) 108 + 109 + #define check_pgt_cache() do {} while (0) 110 + 111 + extern void pagetable_init(void); 112 + extern void pre_mmu_init(void); 113 + extern void pre_trap_init(void); 114 + 115 + #endif /* __ASM_CSKY_PGALLOC_H */
+306
arch/csky/include/asm/pgtable.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_PGTABLE_H 5 + #define __ASM_CSKY_PGTABLE_H 6 + 7 + #include <asm/fixmap.h> 8 + #include <asm/addrspace.h> 9 + #include <abi/pgtable-bits.h> 10 + #include <asm-generic/pgtable-nopmd.h> 11 + 12 + #define PGDIR_SHIFT 22 13 + #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 14 + #define PGDIR_MASK (~(PGDIR_SIZE-1)) 15 + 16 + #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) 17 + #define FIRST_USER_ADDRESS 0UL 18 + 19 + #define PKMAP_BASE (0xff800000) 20 + 21 + #define VMALLOC_START (0xc0008000) 22 + #define VMALLOC_END (PKMAP_BASE - 2*PAGE_SIZE) 23 + 24 + /* 25 + * C-SKY is two-level paging structure: 26 + */ 27 + #define PGD_ORDER 0 28 + #define PTE_ORDER 0 29 + 30 + #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) 31 + #define PTRS_PER_PMD 1 32 + #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) 33 + 34 + #define pte_ERROR(e) \ 35 + pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) 36 + #define pgd_ERROR(e) \ 37 + pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 38 + 39 + /* Find an entry in the third-level page table.. */ 40 + #define __pte_offset_t(address) \ 41 + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 42 + #define pte_offset_kernel(dir, address) \ 43 + (pmd_page_vaddr(*(dir)) + __pte_offset_t(address)) 44 + #define pte_offset_map(dir, address) \ 45 + ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) 46 + #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 47 + #define pte_clear(mm, addr, ptep) set_pte((ptep), \ 48 + (((unsigned int)addr&0x80000000)?__pte(1):__pte(0))) 49 + #define pte_none(pte) (!(pte_val(pte)&0xfffffffe)) 50 + #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 51 + #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) 52 + #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ 53 + | pgprot_val(prot)) 54 + 55 + #define __READABLE (_PAGE_READ | _PAGE_VALID | _PAGE_ACCESSED) 56 + #define __WRITEABLE (_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED) 57 + 58 + #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | \ 59 + _CACHE_MASK) 60 + 61 + #define pte_unmap(pte) ((void)(pte)) 62 + 63 + #define __swp_type(x) (((x).val >> 4) & 0xff) 64 + #define __swp_offset(x) ((x).val >> 12) 65 + #define __swp_entry(type, offset) ((swp_entry_t) {((type) << 4) | \ 66 + ((offset) << 12) }) 67 + #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 68 + #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 69 + 70 + #define pte_page(x) pfn_to_page(pte_pfn(x)) 71 + #define __mk_pte(page_nr, pgprot) __pte(((page_nr) << PAGE_SHIFT) | \ 72 + pgprot_val(pgprot)) 73 + 74 + /* 75 + * CSKY can't do page protection for execute, and considers that the same like 76 + * read. Also, write permissions imply read permissions. This is the closest 77 + * we can get by reasonable means.. 78 + */ 79 + #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHED) 80 + #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 81 + _CACHE_CACHED) 82 + #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED) 83 + #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED) 84 + #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 85 + _PAGE_GLOBAL | _CACHE_CACHED) 86 + #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 87 + _CACHE_CACHED) 88 + 89 + #define __P000 PAGE_NONE 90 + #define __P001 PAGE_READONLY 91 + #define __P010 PAGE_COPY 92 + #define __P011 PAGE_COPY 93 + #define __P100 PAGE_READONLY 94 + #define __P101 PAGE_READONLY 95 + #define __P110 PAGE_COPY 96 + #define __P111 PAGE_COPY 97 + 98 + #define __S000 PAGE_NONE 99 + #define __S001 PAGE_READONLY 100 + #define __S010 PAGE_SHARED 101 + #define __S011 PAGE_SHARED 102 + #define __S100 PAGE_READONLY 103 + #define __S101 PAGE_READONLY 104 + #define __S110 PAGE_SHARED 105 + #define __S111 PAGE_SHARED 106 + 107 + extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 108 + #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 109 + 110 + extern void load_pgd(unsigned long pg_dir); 111 + extern pte_t invalid_pte_table[PTRS_PER_PTE]; 112 + 113 + static inline int pte_special(pte_t pte) { return 0; } 114 + static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 115 + 116 + static inline void set_pte(pte_t *p, pte_t pte) 117 + { 118 + *p = pte; 119 + #if defined(CONFIG_CPU_NEED_TLBSYNC) 120 + dcache_wb_line((u32)p); 121 + #endif 122 + /* prevent out of order excution */ 123 + smp_mb(); 124 + } 125 + #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) 126 + 127 + static inline pte_t *pmd_page_vaddr(pmd_t pmd) 128 + { 129 + unsigned long ptr; 130 + 131 + ptr = pmd_val(pmd); 132 + 133 + return __va(ptr); 134 + } 135 + 136 + #define pmd_phys(pmd) pmd_val(pmd) 137 + 138 + static inline void set_pmd(pmd_t *p, pmd_t pmd) 139 + { 140 + *p = pmd; 141 + #if defined(CONFIG_CPU_NEED_TLBSYNC) 142 + dcache_wb_line((u32)p); 143 + #endif 144 + /* prevent specul excute */ 145 + smp_mb(); 146 + } 147 + 148 + 149 + static inline int pmd_none(pmd_t pmd) 150 + { 151 + return pmd_val(pmd) == __pa(invalid_pte_table); 152 + } 153 + 154 + #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) 155 + 156 + static inline int pmd_present(pmd_t pmd) 157 + { 158 + return (pmd_val(pmd) != __pa(invalid_pte_table)); 159 + } 160 + 161 + static inline void pmd_clear(pmd_t *p) 162 + { 163 + pmd_val(*p) = (__pa(invalid_pte_table)); 164 + #if defined(CONFIG_CPU_NEED_TLBSYNC) 165 + dcache_wb_line((u32)p); 166 + #endif 167 + } 168 + 169 + /* 170 + * The following only work if pte_present() is true. 171 + * Undefined behaviour if not.. 172 + */ 173 + static inline int pte_read(pte_t pte) 174 + { 175 + return pte.pte_low & _PAGE_READ; 176 + } 177 + 178 + static inline int pte_write(pte_t pte) 179 + { 180 + return (pte).pte_low & _PAGE_WRITE; 181 + } 182 + 183 + static inline int pte_dirty(pte_t pte) 184 + { 185 + return (pte).pte_low & _PAGE_MODIFIED; 186 + } 187 + 188 + static inline int pte_young(pte_t pte) 189 + { 190 + return (pte).pte_low & _PAGE_ACCESSED; 191 + } 192 + 193 + static inline pte_t pte_wrprotect(pte_t pte) 194 + { 195 + pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY); 196 + return pte; 197 + } 198 + 199 + static inline pte_t pte_mkclean(pte_t pte) 200 + { 201 + pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY); 202 + return pte; 203 + } 204 + 205 + static inline pte_t pte_mkold(pte_t pte) 206 + { 207 + pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID); 208 + return pte; 209 + } 210 + 211 + static inline pte_t pte_mkwrite(pte_t pte) 212 + { 213 + pte_val(pte) |= _PAGE_WRITE; 214 + if (pte_val(pte) & _PAGE_MODIFIED) 215 + pte_val(pte) |= _PAGE_DIRTY; 216 + return pte; 217 + } 218 + 219 + static inline pte_t pte_mkdirty(pte_t pte) 220 + { 221 + pte_val(pte) |= _PAGE_MODIFIED; 222 + if (pte_val(pte) & _PAGE_WRITE) 223 + pte_val(pte) |= _PAGE_DIRTY; 224 + return pte; 225 + } 226 + 227 + static inline pte_t pte_mkyoung(pte_t pte) 228 + { 229 + pte_val(pte) |= _PAGE_ACCESSED; 230 + if (pte_val(pte) & _PAGE_READ) 231 + pte_val(pte) |= _PAGE_VALID; 232 + return pte; 233 + } 234 + 235 + #define __pgd_offset(address) pgd_index(address) 236 + #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 237 + #define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 238 + 239 + /* to find an entry in a kernel page-table-directory */ 240 + #define pgd_offset_k(address) pgd_offset(&init_mm, address) 241 + 242 + #define pgd_index(address) ((address) >> PGDIR_SHIFT) 243 + 244 + /* 245 + * Macro to make mark a page protection value as "uncacheable". Note 246 + * that "protection" is really a misnomer here as the protection value 247 + * contains the memory attribute bits, dirty bits, and various other 248 + * bits as well. 249 + */ 250 + #define pgprot_noncached pgprot_noncached 251 + 252 + static inline pgprot_t pgprot_noncached(pgprot_t _prot) 253 + { 254 + unsigned long prot = pgprot_val(_prot); 255 + 256 + prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; 257 + 258 + return __pgprot(prot); 259 + } 260 + 261 + /* 262 + * Conversion functions: convert a page and protection to a page entry, 263 + * and a page entry and page directory to the page they refer to. 264 + */ 265 + #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 266 + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 267 + { 268 + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | 269 + (pgprot_val(newprot))); 270 + } 271 + 272 + /* to find an entry in a page-table-directory */ 273 + static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address) 274 + { 275 + return mm->pgd + pgd_index(address); 276 + } 277 + 278 + /* Find an entry in the third-level page table.. */ 279 + static inline pte_t *pte_offset(pmd_t *dir, unsigned long address) 280 + { 281 + return (pte_t *) (pmd_page_vaddr(*dir)) + 282 + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); 283 + } 284 + 285 + extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 286 + extern void paging_init(void); 287 + 288 + extern void show_jtlb_table(void); 289 + 290 + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 291 + pte_t *pte); 292 + 293 + /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 294 + #define kern_addr_valid(addr) (1) 295 + 296 + /* 297 + * No page table caches to initialise 298 + */ 299 + #define pgtable_cache_init() do {} while (0) 300 + 301 + #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 302 + remap_pfn_range(vma, vaddr, pfn, size, prot) 303 + 304 + #include <asm-generic/pgtable.h> 305 + 306 + #endif /* __ASM_CSKY_PGTABLE_H */
+19
arch/csky/include/asm/segment.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_SEGMENT_H 5 + #define __ASM_CSKY_SEGMENT_H 6 + 7 + typedef struct { 8 + unsigned long seg; 9 + } mm_segment_t; 10 + 11 + #define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF }) 12 + #define get_ds() KERNEL_DS 13 + 14 + #define USER_DS ((mm_segment_t) { 0x80000000UL }) 15 + #define get_fs() (current_thread_info()->addr_limit) 16 + #define set_fs(x) (current_thread_info()->addr_limit = (x)) 17 + #define segment_eq(a, b) ((a).seg == (b).seg) 18 + 19 + #endif /* __ASM_CSKY_SEGMENT_H */
+11
arch/csky/include/asm/shmparam.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_SHMPARAM_H 5 + #define __ASM_CSKY_SHMPARAM_H 6 + 7 + #define SHMLBA (4 * PAGE_SIZE) 8 + 9 + #define __ARCH_FORCE_SHMLBA 10 + 11 + #endif /* __ASM_CSKY_SHMPARAM_H */
+254
arch/csky/mm/dma-mapping.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/cache.h> 5 + #include <linux/dma-mapping.h> 6 + #include <linux/dma-contiguous.h> 7 + #include <linux/dma-noncoherent.h> 8 + #include <linux/genalloc.h> 9 + #include <linux/highmem.h> 10 + #include <linux/io.h> 11 + #include <linux/mm.h> 12 + #include <linux/scatterlist.h> 13 + #include <linux/types.h> 14 + #include <linux/version.h> 15 + #include <asm/cache.h> 16 + 17 + static struct gen_pool *atomic_pool; 18 + static size_t atomic_pool_size __initdata = SZ_256K; 19 + 20 + static int __init early_coherent_pool(char *p) 21 + { 22 + atomic_pool_size = memparse(p, &p); 23 + return 0; 24 + } 25 + early_param("coherent_pool", early_coherent_pool); 26 + 27 + static int __init atomic_pool_init(void) 28 + { 29 + struct page *page; 30 + size_t size = atomic_pool_size; 31 + void *ptr; 32 + int ret; 33 + 34 + atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 35 + if (!atomic_pool) 36 + BUG(); 37 + 38 + page = alloc_pages(GFP_KERNEL | GFP_DMA, get_order(size)); 39 + if (!page) 40 + BUG(); 41 + 42 + ptr = dma_common_contiguous_remap(page, size, VM_ALLOC, 43 + pgprot_noncached(PAGE_KERNEL), 44 + __builtin_return_address(0)); 45 + if (!ptr) 46 + BUG(); 47 + 48 + ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, 49 + page_to_phys(page), atomic_pool_size, -1); 50 + if (ret) 51 + BUG(); 52 + 53 + gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL); 54 + 55 + pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n", 56 + atomic_pool_size / 1024); 57 + 58 + pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr, 59 + page_to_phys(page)); 60 + 61 + return 0; 62 + } 63 + postcore_initcall(atomic_pool_init); 64 + 65 + static void *csky_dma_alloc_atomic(struct device *dev, size_t size, 66 + dma_addr_t *dma_handle) 67 + { 68 + unsigned long addr; 69 + 70 + addr = gen_pool_alloc(atomic_pool, size); 71 + if (addr) 72 + *dma_handle = gen_pool_virt_to_phys(atomic_pool, addr); 73 + 74 + return (void *)addr; 75 + } 76 + 77 + static void csky_dma_free_atomic(struct device *dev, size_t size, void *vaddr, 78 + dma_addr_t dma_handle, unsigned long attrs) 79 + { 80 + gen_pool_free(atomic_pool, (unsigned long)vaddr, size); 81 + } 82 + 83 + static void __dma_clear_buffer(struct page *page, size_t size) 84 + { 85 + if (PageHighMem(page)) { 86 + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 87 + 88 + do { 89 + void *ptr = kmap_atomic(page); 90 + size_t _size = (size < PAGE_SIZE) ? size : PAGE_SIZE; 91 + 92 + memset(ptr, 0, _size); 93 + dma_wbinv_range((unsigned long)ptr, 94 + (unsigned long)ptr + _size); 95 + 96 + kunmap_atomic(ptr); 97 + 98 + page++; 99 + size -= PAGE_SIZE; 100 + count--; 101 + } while (count); 102 + } else { 103 + void *ptr = page_address(page); 104 + 105 + memset(ptr, 0, size); 106 + dma_wbinv_range((unsigned long)ptr, (unsigned long)ptr + size); 107 + } 108 + } 109 + 110 + static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size, 111 + dma_addr_t *dma_handle, gfp_t gfp, 112 + unsigned long attrs) 113 + { 114 + void *vaddr; 115 + struct page *page; 116 + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 117 + 118 + if (DMA_ATTR_NON_CONSISTENT & attrs) { 119 + pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", __func__); 120 + return NULL; 121 + } 122 + 123 + if (IS_ENABLED(CONFIG_DMA_CMA)) 124 + page = dma_alloc_from_contiguous(dev, count, get_order(size), 125 + gfp); 126 + else 127 + page = alloc_pages(gfp, get_order(size)); 128 + 129 + if (!page) { 130 + pr_err("csky %s no more free pages.\n", __func__); 131 + return NULL; 132 + } 133 + 134 + *dma_handle = page_to_phys(page); 135 + 136 + __dma_clear_buffer(page, size); 137 + 138 + if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 139 + return page; 140 + 141 + vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP, 142 + pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0)); 143 + if (!vaddr) 144 + BUG(); 145 + 146 + return vaddr; 147 + } 148 + 149 + static void csky_dma_free_nonatomic( 150 + struct device *dev, 151 + size_t size, 152 + void *vaddr, 153 + dma_addr_t dma_handle, 154 + unsigned long attrs 155 + ) 156 + { 157 + struct page *page = phys_to_page(dma_handle); 158 + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 159 + 160 + if ((unsigned int)vaddr >= VMALLOC_START) 161 + dma_common_free_remap(vaddr, size, VM_USERMAP); 162 + 163 + if (IS_ENABLED(CONFIG_DMA_CMA)) 164 + dma_release_from_contiguous(dev, page, count); 165 + else 166 + __free_pages(page, get_order(size)); 167 + } 168 + 169 + void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 170 + gfp_t gfp, unsigned long attrs) 171 + { 172 + if (gfpflags_allow_blocking(gfp)) 173 + return csky_dma_alloc_nonatomic(dev, size, dma_handle, gfp, 174 + attrs); 175 + else 176 + return csky_dma_alloc_atomic(dev, size, dma_handle); 177 + } 178 + 179 + void arch_dma_free(struct device *dev, size_t size, void *vaddr, 180 + dma_addr_t dma_handle, unsigned long attrs) 181 + { 182 + if (!addr_in_gen_pool(atomic_pool, (unsigned int) vaddr, size)) 183 + csky_dma_free_nonatomic(dev, size, vaddr, dma_handle, attrs); 184 + else 185 + csky_dma_free_atomic(dev, size, vaddr, dma_handle, attrs); 186 + } 187 + 188 + static inline void cache_op(phys_addr_t paddr, size_t size, 189 + void (*fn)(unsigned long start, unsigned long end)) 190 + { 191 + struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); 192 + unsigned int offset = paddr & ~PAGE_MASK; 193 + size_t left = size; 194 + unsigned long start; 195 + 196 + do { 197 + size_t len = left; 198 + 199 + if (PageHighMem(page)) { 200 + void *addr; 201 + 202 + if (offset + len > PAGE_SIZE) { 203 + if (offset >= PAGE_SIZE) { 204 + page += offset >> PAGE_SHIFT; 205 + offset &= ~PAGE_MASK; 206 + } 207 + len = PAGE_SIZE - offset; 208 + } 209 + 210 + addr = kmap_atomic(page); 211 + start = (unsigned long)(addr + offset); 212 + fn(start, start + len); 213 + kunmap_atomic(addr); 214 + } else { 215 + start = (unsigned long)phys_to_virt(paddr); 216 + fn(start, start + size); 217 + } 218 + offset = 0; 219 + page++; 220 + left -= len; 221 + } while (left); 222 + } 223 + 224 + void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 225 + size_t size, enum dma_data_direction dir) 226 + { 227 + switch (dir) { 228 + case DMA_TO_DEVICE: 229 + cache_op(paddr, size, dma_wb_range); 230 + break; 231 + case DMA_FROM_DEVICE: 232 + case DMA_BIDIRECTIONAL: 233 + cache_op(paddr, size, dma_wbinv_range); 234 + break; 235 + default: 236 + BUG(); 237 + } 238 + } 239 + 240 + void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, 241 + size_t size, enum dma_data_direction dir) 242 + { 243 + switch (dir) { 244 + case DMA_TO_DEVICE: 245 + cache_op(paddr, size, dma_wb_range); 246 + break; 247 + case DMA_FROM_DEVICE: 248 + case DMA_BIDIRECTIONAL: 249 + cache_op(paddr, size, dma_wbinv_range); 250 + break; 251 + default: 252 + BUG(); 253 + } 254 + }
+198
arch/csky/mm/highmem.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/module.h> 5 + #include <linux/highmem.h> 6 + #include <linux/smp.h> 7 + #include <linux/bootmem.h> 8 + #include <asm/fixmap.h> 9 + #include <asm/tlbflush.h> 10 + #include <asm/cacheflush.h> 11 + 12 + static pte_t *kmap_pte; 13 + 14 + unsigned long highstart_pfn, highend_pfn; 15 + 16 + void *kmap(struct page *page) 17 + { 18 + void *addr; 19 + 20 + might_sleep(); 21 + if (!PageHighMem(page)) 22 + return page_address(page); 23 + addr = kmap_high(page); 24 + flush_tlb_one((unsigned long)addr); 25 + 26 + return addr; 27 + } 28 + EXPORT_SYMBOL(kmap); 29 + 30 + void kunmap(struct page *page) 31 + { 32 + BUG_ON(in_interrupt()); 33 + if (!PageHighMem(page)) 34 + return; 35 + kunmap_high(page); 36 + } 37 + EXPORT_SYMBOL(kunmap); 38 + 39 + void *kmap_atomic(struct page *page) 40 + { 41 + unsigned long vaddr; 42 + int idx, type; 43 + 44 + preempt_disable(); 45 + pagefault_disable(); 46 + if (!PageHighMem(page)) 47 + return page_address(page); 48 + 49 + type = kmap_atomic_idx_push(); 50 + idx = type + KM_TYPE_NR*smp_processor_id(); 51 + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 52 + #ifdef CONFIG_DEBUG_HIGHMEM 53 + BUG_ON(!pte_none(*(kmap_pte - idx))); 54 + #endif 55 + set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); 56 + flush_tlb_one((unsigned long)vaddr); 57 + 58 + return (void *)vaddr; 59 + } 60 + EXPORT_SYMBOL(kmap_atomic); 61 + 62 + void __kunmap_atomic(void *kvaddr) 63 + { 64 + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 65 + int idx; 66 + 67 + if (vaddr < FIXADDR_START) 68 + goto out; 69 + 70 + #ifdef CONFIG_DEBUG_HIGHMEM 71 + idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx(); 72 + 73 + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 74 + 75 + pte_clear(&init_mm, vaddr, kmap_pte - idx); 76 + flush_tlb_one(vaddr); 77 + #else 78 + (void) idx; /* to kill a warning */ 79 + #endif 80 + kmap_atomic_idx_pop(); 81 + out: 82 + pagefault_enable(); 83 + preempt_enable(); 84 + } 85 + EXPORT_SYMBOL(__kunmap_atomic); 86 + 87 + /* 88 + * This is the same as kmap_atomic() but can map memory that doesn't 89 + * have a struct page associated with it. 90 + */ 91 + void *kmap_atomic_pfn(unsigned long pfn) 92 + { 93 + unsigned long vaddr; 94 + int idx, type; 95 + 96 + pagefault_disable(); 97 + 98 + type = kmap_atomic_idx_push(); 99 + idx = type + KM_TYPE_NR*smp_processor_id(); 100 + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 101 + set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); 102 + flush_tlb_one(vaddr); 103 + 104 + return (void *) vaddr; 105 + } 106 + 107 + struct page *kmap_atomic_to_page(void *ptr) 108 + { 109 + unsigned long idx, vaddr = (unsigned long)ptr; 110 + pte_t *pte; 111 + 112 + if (vaddr < FIXADDR_START) 113 + return virt_to_page(ptr); 114 + 115 + idx = virt_to_fix(vaddr); 116 + pte = kmap_pte - (idx - FIX_KMAP_BEGIN); 117 + return pte_page(*pte); 118 + } 119 + 120 + static void __init fixrange_init(unsigned long start, unsigned long end, 121 + pgd_t *pgd_base) 122 + { 123 + #ifdef CONFIG_HIGHMEM 124 + pgd_t *pgd; 125 + pud_t *pud; 126 + pmd_t *pmd; 127 + pte_t *pte; 128 + int i, j, k; 129 + unsigned long vaddr; 130 + 131 + vaddr = start; 132 + i = __pgd_offset(vaddr); 133 + j = __pud_offset(vaddr); 134 + k = __pmd_offset(vaddr); 135 + pgd = pgd_base + i; 136 + 137 + for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 138 + pud = (pud_t *)pgd; 139 + for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 140 + pmd = (pmd_t *)pud; 141 + for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 142 + if (pmd_none(*pmd)) { 143 + pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 144 + set_pmd(pmd, __pmd(__pa(pte))); 145 + BUG_ON(pte != pte_offset_kernel(pmd, 0)); 146 + } 147 + vaddr += PMD_SIZE; 148 + } 149 + k = 0; 150 + } 151 + j = 0; 152 + } 153 + #endif 154 + } 155 + 156 + void __init fixaddr_kmap_pages_init(void) 157 + { 158 + unsigned long vaddr; 159 + pgd_t *pgd_base; 160 + #ifdef CONFIG_HIGHMEM 161 + pgd_t *pgd; 162 + pmd_t *pmd; 163 + pud_t *pud; 164 + pte_t *pte; 165 + #endif 166 + pgd_base = swapper_pg_dir; 167 + 168 + /* 169 + * Fixed mappings: 170 + */ 171 + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 172 + fixrange_init(vaddr, 0, pgd_base); 173 + 174 + #ifdef CONFIG_HIGHMEM 175 + /* 176 + * Permanent kmaps: 177 + */ 178 + vaddr = PKMAP_BASE; 179 + fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 180 + 181 + pgd = swapper_pg_dir + __pgd_offset(vaddr); 182 + pud = (pud_t *)pgd; 183 + pmd = pmd_offset(pud, vaddr); 184 + pte = pte_offset_kernel(pmd, vaddr); 185 + pkmap_page_table = pte; 186 + #endif 187 + } 188 + 189 + void __init kmap_init(void) 190 + { 191 + unsigned long vaddr; 192 + 193 + fixaddr_kmap_pages_init(); 194 + 195 + vaddr = __fix_to_virt(FIX_KMAP_BEGIN); 196 + 197 + kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); 198 + }
+122
arch/csky/mm/init.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/bug.h> 5 + #include <linux/module.h> 6 + #include <linux/init.h> 7 + #include <linux/signal.h> 8 + #include <linux/sched.h> 9 + #include <linux/kernel.h> 10 + #include <linux/errno.h> 11 + #include <linux/string.h> 12 + #include <linux/types.h> 13 + #include <linux/pagemap.h> 14 + #include <linux/ptrace.h> 15 + #include <linux/mman.h> 16 + #include <linux/mm.h> 17 + #include <linux/bootmem.h> 18 + #include <linux/highmem.h> 19 + #include <linux/memblock.h> 20 + #include <linux/swap.h> 21 + #include <linux/proc_fs.h> 22 + #include <linux/pfn.h> 23 + 24 + #include <asm/setup.h> 25 + #include <asm/cachectl.h> 26 + #include <asm/dma.h> 27 + #include <asm/pgtable.h> 28 + #include <asm/pgalloc.h> 29 + #include <asm/mmu_context.h> 30 + #include <asm/sections.h> 31 + #include <asm/tlb.h> 32 + 33 + pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 34 + pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; 35 + unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] 36 + __page_aligned_bss; 37 + EXPORT_SYMBOL(empty_zero_page); 38 + 39 + void __init mem_init(void) 40 + { 41 + #ifdef CONFIG_HIGHMEM 42 + unsigned long tmp; 43 + 44 + max_mapnr = highend_pfn; 45 + #else 46 + max_mapnr = max_low_pfn; 47 + #endif 48 + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 49 + 50 + free_all_bootmem(); 51 + 52 + #ifdef CONFIG_HIGHMEM 53 + for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { 54 + struct page *page = pfn_to_page(tmp); 55 + 56 + /* FIXME not sure about */ 57 + if (!memblock_is_reserved(tmp << PAGE_SHIFT)) 58 + free_highmem_page(page); 59 + } 60 + #endif 61 + mem_init_print_info(NULL); 62 + } 63 + 64 + #ifdef CONFIG_BLK_DEV_INITRD 65 + void free_initrd_mem(unsigned long start, unsigned long end) 66 + { 67 + if (start < end) 68 + pr_info("Freeing initrd memory: %ldk freed\n", 69 + (end - start) >> 10); 70 + 71 + for (; start < end; start += PAGE_SIZE) { 72 + ClearPageReserved(virt_to_page(start)); 73 + init_page_count(virt_to_page(start)); 74 + free_page(start); 75 + totalram_pages++; 76 + } 77 + } 78 + #endif 79 + 80 + extern char __init_begin[], __init_end[]; 81 + 82 + void free_initmem(void) 83 + { 84 + unsigned long addr; 85 + 86 + addr = (unsigned long) &__init_begin; 87 + 88 + while (addr < (unsigned long) &__init_end) { 89 + ClearPageReserved(virt_to_page(addr)); 90 + init_page_count(virt_to_page(addr)); 91 + free_page(addr); 92 + totalram_pages++; 93 + addr += PAGE_SIZE; 94 + } 95 + 96 + pr_info("Freeing unused kernel memory: %dk freed\n", 97 + ((unsigned int)&__init_end - (unsigned int)&__init_begin) >> 10); 98 + } 99 + 100 + void pgd_init(unsigned long *p) 101 + { 102 + int i; 103 + 104 + for (i = 0; i < PTRS_PER_PGD; i++) 105 + p[i] = __pa(invalid_pte_table); 106 + } 107 + 108 + void __init pre_mmu_init(void) 109 + { 110 + /* 111 + * Setup page-table and enable TLB-hardrefill 112 + */ 113 + flush_tlb_all(); 114 + pgd_init((unsigned long *)swapper_pg_dir); 115 + TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); 116 + TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); 117 + 118 + asid_cache(smp_processor_id()) = ASID_FIRST_VERSION; 119 + 120 + /* Setup page mask to 4k */ 121 + write_mmu_pagemask(0); 122 + }
+48
arch/csky/mm/ioremap.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/export.h> 5 + #include <linux/mm.h> 6 + #include <linux/vmalloc.h> 7 + #include <linux/io.h> 8 + 9 + #include <asm/pgtable.h> 10 + 11 + void __iomem *ioremap(phys_addr_t addr, size_t size) 12 + { 13 + phys_addr_t last_addr; 14 + unsigned long offset, vaddr; 15 + struct vm_struct *area; 16 + pgprot_t prot; 17 + 18 + last_addr = addr + size - 1; 19 + if (!size || last_addr < addr) 20 + return NULL; 21 + 22 + offset = addr & (~PAGE_MASK); 23 + addr &= PAGE_MASK; 24 + size = PAGE_ALIGN(size + offset); 25 + 26 + area = get_vm_area_caller(size, VM_ALLOC, __builtin_return_address(0)); 27 + if (!area) 28 + return NULL; 29 + 30 + vaddr = (unsigned long)area->addr; 31 + 32 + prot = __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | 33 + _PAGE_GLOBAL | _CACHE_UNCACHED); 34 + 35 + if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) { 36 + free_vm_area(area); 37 + return NULL; 38 + } 39 + 40 + return (void __iomem *)(vaddr + offset); 41 + } 42 + EXPORT_SYMBOL(ioremap); 43 + 44 + void iounmap(void __iomem *addr) 45 + { 46 + vunmap((void *)((unsigned long)addr & PAGE_MASK)); 47 + } 48 + EXPORT_SYMBOL(iounmap);