···5555 : "r"(data), "r"(ptr)); \5656})57575858-/* used to give SHMLBA a value to avoid Cache Aliasing */5959-extern unsigned int ARC_shmlba;6060-6158#define ARCH_DMA_MINALIGN L1_CACHE_BYTES62596360/*
+1
arch/arc/include/asm/cacheflush.h
···1919#define _ASM_CACHEFLUSH_H20202121#include <linux/mm.h>2222+#include <asm/shmparam.h>22232324/*2425 * Semantically we need this because icache doesn't snoop dcache/dma.
+3
arch/arc/include/asm/pgtable.h
···395395396396#include <asm-generic/pgtable.h>397397398398+/* to cope with aliasing VIPT cache */399399+#define HAVE_ARCH_UNMAPPED_AREA400400+398401/*399402 * No page table caches to initialise400403 */
+18
arch/arc/include/asm/shmparam.h
···11+/*22+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)33+ *44+ * This program is free software; you can redistribute it and/or modify55+ * it under the terms of the GNU General Public License version 2 as66+ * published by the Free Software Foundation.77+ */88+99+#ifndef __ARC_ASM_SHMPARAM_H1010+#define __ARC_ASM_SHMPARAM_H1111+1212+/* Handle upto 2 cache bins */1313+#define SHMLBA (2 * PAGE_SIZE)1414+1515+/* Enforce SHMLBA in shmat */1616+#define __ARCH_FORCE_SHMLBA1717+1818+#endif
···11+/*22+ * ARC700 mmap33+ *44+ * (started from arm version - for VIPT alias handling)55+ *66+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ */1212+1313+#include <linux/fs.h>1414+#include <linux/mm.h>1515+#include <linux/mman.h>1616+#include <linux/sched.h>1717+#include <asm/cacheflush.h>1818+1919+#define COLOUR_ALIGN(addr, pgoff) \2020+ ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \2121+ (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))2222+2323+/*2424+ * Ensure that shared mappings are correctly aligned to2525+ * avoid aliasing issues with VIPT caches.2626+ * We need to ensure that2727+ * a specific page of an object is always mapped at a multiple of2828+ * SHMLBA bytes.2929+ */3030+unsigned long3131+arch_get_unmapped_area(struct file *filp, unsigned long addr,3232+ unsigned long len, unsigned long pgoff, unsigned long flags)3333+{3434+ struct mm_struct *mm = current->mm;3535+ struct vm_area_struct *vma;3636+ int do_align = 0;3737+ int aliasing = cache_is_vipt_aliasing();3838+ struct vm_unmapped_area_info info;3939+4040+ /*4141+ * We only need to do colour alignment if D cache aliases.4242+ */4343+ if (aliasing)4444+ do_align = filp || (flags & MAP_SHARED);4545+4646+ /*4747+ * We enforce the MAP_FIXED case.4848+ */4949+ if (flags & MAP_FIXED) {5050+ if (aliasing && flags & MAP_SHARED &&5151+ (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))5252+ return -EINVAL;5353+ return addr;5454+ }5555+5656+ if (len > TASK_SIZE)5757+ return -ENOMEM;5858+5959+ if (addr) {6060+ if (do_align)6161+ addr = COLOUR_ALIGN(addr, pgoff);6262+ else6363+ addr = PAGE_ALIGN(addr);6464+6565+ vma = find_vma(mm, addr);6666+ if (TASK_SIZE - len >= addr &&6767+ (!vma || addr + len <= vma->vm_start))6868+ return addr;6969+ }7070+7171+ info.flags = 0;7272+ info.length = len;7373+ info.low_limit = mm->mmap_base;7474+ info.high_limit = TASK_SIZE;7575+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;7676+ info.align_offset = pgoff << PAGE_SHIFT;7777+ return vm_unmapped_area(&info);7878+}