Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky: Fixup arch_get_unmapped_area() implementation

Current arch_get_unmapped_area() of abiv1 doesn't use standard kernel
api. After referring to the implementation of arch/arm, we implement
it with vm_unmapped_area() from linux/mm.h.

Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Cc: Arnd Bergmann <arnd@arndb.de>

Guo Ren be819aa6 5336c179

+44 -38
+3 -2
arch/csky/abiv1/inc/abi/page.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 3 4 - extern unsigned long shm_align_mask; 4 + #include <asm/shmparam.h> 5 + 5 6 extern void flush_dcache_page(struct page *page); 6 7 7 8 static inline unsigned long pages_do_alias(unsigned long addr1, 8 9 unsigned long addr2) 9 10 { 10 - return (addr1 ^ addr2) & shm_align_mask; 11 + return (addr1 ^ addr2) & (SHMLBA-1); 11 12 } 12 13 13 14 static inline void clear_user_page(void *addr, unsigned long vaddr,
+41 -36
arch/csky/abiv1/mmap.c
··· 9 9 #include <linux/random.h> 10 10 #include <linux/io.h> 11 11 12 - unsigned long shm_align_mask = (0x4000 >> 1) - 1; /* Sane caches */ 12 + #define COLOUR_ALIGN(addr,pgoff) \ 13 + ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 14 + (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) 13 15 14 - #define COLOUR_ALIGN(addr, pgoff) \ 15 - ((((addr) + shm_align_mask) & ~shm_align_mask) + \ 16 - (((pgoff) << PAGE_SHIFT) & shm_align_mask)) 17 - 18 - unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 16 + /* 17 + * We need to ensure that shared mappings are correctly aligned to 18 + * avoid aliasing issues with VIPT caches. We need to ensure that 19 + * a specific page of an object is always mapped at a multiple of 20 + * SHMLBA bytes. 21 + * 22 + * We unconditionally provide this function for all cases. 23 + */ 24 + unsigned long 25 + arch_get_unmapped_area(struct file *filp, unsigned long addr, 19 26 unsigned long len, unsigned long pgoff, unsigned long flags) 20 27 { 21 - struct vm_area_struct *vmm; 22 - int do_color_align; 28 + struct mm_struct *mm = current->mm; 29 + struct vm_area_struct *vma; 30 + int do_align = 0; 31 + struct vm_unmapped_area_info info; 23 32 33 + /* 34 + * We only need to do colour alignment if either the I or D 35 + * caches alias. 36 + */ 37 + do_align = filp || (flags & MAP_SHARED); 38 + 39 + /* 40 + * We enforce the MAP_FIXED case. 41 + */ 24 42 if (flags & MAP_FIXED) { 25 - /* 26 - * We do not accept a shared mapping if it would violate 27 - * cache aliasing constraints. 28 - */ 29 - if ((flags & MAP_SHARED) && 30 - ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) 43 + if (flags & MAP_SHARED && 44 + (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) 31 45 return -EINVAL; 32 46 return addr; 33 47 } 34 48 35 49 if (len > TASK_SIZE) 36 50 return -ENOMEM; 37 - do_color_align = 0; 38 - if (filp || (flags & MAP_SHARED)) 39 - do_color_align = 1; 51 + 40 52 if (addr) { 41 - if (do_color_align) 53 + if (do_align) 42 54 addr = COLOUR_ALIGN(addr, pgoff); 43 55 else 44 56 addr = PAGE_ALIGN(addr); 45 - vmm = find_vma(current->mm, addr); 46 - if (TASK_SIZE - len >= addr && 47 - (!vmm || addr + len <= vmm->vm_start)) 48 - return addr; 49 - } 50 - addr = TASK_UNMAPPED_BASE; 51 - if (do_color_align) 52 - addr = COLOUR_ALIGN(addr, pgoff); 53 - else 54 - addr = PAGE_ALIGN(addr); 55 57 56 - for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { 57 - /* At this point: (!vmm || addr < vmm->vm_end). */ 58 - if (TASK_SIZE - len < addr) 59 - return -ENOMEM; 60 - if (!vmm || addr + len <= vmm->vm_start) 58 + vma = find_vma(mm, addr); 59 + if (TASK_SIZE - len >= addr && 60 + (!vma || addr + len <= vm_start_gap(vma))) 61 61 return addr; 62 - addr = vmm->vm_end; 63 - if (do_color_align) 64 - addr = COLOUR_ALIGN(addr, pgoff); 65 62 } 63 + 64 + info.flags = 0; 65 + info.length = len; 66 + info.low_limit = mm->mmap_base; 67 + info.high_limit = TASK_SIZE; 68 + info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; 69 + info.align_offset = pgoff << PAGE_SHIFT; 70 + return vm_unmapped_area(&info); 66 71 }