Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xtensa: avoid mmap cache aliasing

Provide arch_get_unmapped_area function aligning shared memory mapping
addresses to the biggest of the page size or the cache way size. That
guarantees that corresponding virtual addresses of shared mappings are
cached by the same cache sets.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Chris Zankel <chris@zankel.net>

authored by

Max Filippov and committed by
Chris Zankel
de73b6b1 475c32d0

+45
+4
arch/xtensa/include/asm/pgtable.h
··· 410 410 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 411 411 #define __HAVE_ARCH_PTEP_MKDIRTY 412 412 #define __HAVE_ARCH_PTE_SAME 413 + /* We provide our own get_unmapped_area to cope with 414 + * SHM area cache aliasing for userland. 415 + */ 416 + #define HAVE_ARCH_UNMAPPED_AREA 413 417 414 418 #include <asm-generic/pgtable.h> 415 419
+41
arch/xtensa/kernel/syscall.c
··· 36 36 #include <uapi/asm/unistd.h> 37 37 }; 38 38 39 + #define COLOUR_ALIGN(addr, pgoff) \ 40 + ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \ 41 + (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1))) 42 + 39 43 asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) 40 44 { 41 45 unsigned long ret; ··· 55 51 unsigned long long offset, unsigned long long len) 56 52 { 57 53 return sys_fadvise64_64(fd, offset, len, advice); 54 + } 55 + 56 + unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 57 + unsigned long len, unsigned long pgoff, unsigned long flags) 58 + { 59 + struct vm_area_struct *vmm; 60 + 61 + if (flags & MAP_FIXED) { 62 + /* We do not accept a shared mapping if it would violate 63 + * cache aliasing constraints. 64 + */ 65 + if ((flags & MAP_SHARED) && 66 + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) 67 + return -EINVAL; 68 + return addr; 69 + } 70 + 71 + if (len > TASK_SIZE) 72 + return -ENOMEM; 73 + if (!addr) 74 + addr = TASK_UNMAPPED_BASE; 75 + 76 + if (flags & MAP_SHARED) 77 + addr = COLOUR_ALIGN(addr, pgoff); 78 + else 79 + addr = PAGE_ALIGN(addr); 80 + 81 + for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { 82 + /* At this point: (!vmm || addr < vmm->vm_end). */ 83 + if (TASK_SIZE - len < addr) 84 + return -ENOMEM; 85 + if (!vmm || addr + len <= vmm->vm_start) 86 + return addr; 87 + addr = vmm->vm_end; 88 + if (flags & MAP_SHARED) 89 + addr = COLOUR_ALIGN(addr, pgoff); 90 + } 58 91 }