at v3.7 5.2 kB view raw
1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 */ 14 15#ifndef _ASM_TILE_CACHEFLUSH_H 16#define _ASM_TILE_CACHEFLUSH_H 17 18#include <arch/chip.h> 19 20/* Keep includes the same across arches. */ 21#include <linux/mm.h> 22#include <linux/cache.h> 23#include <arch/icache.h> 24 25/* Caches are physically-indexed and so don't need special treatment */ 26#define flush_cache_all() do { } while (0) 27#define flush_cache_mm(mm) do { } while (0) 28#define flush_cache_dup_mm(mm) do { } while (0) 29#define flush_cache_range(vma, start, end) do { } while (0) 30#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 31#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 32#define flush_dcache_page(page) do { } while (0) 33#define flush_dcache_mmap_lock(mapping) do { } while (0) 34#define flush_dcache_mmap_unlock(mapping) do { } while (0) 35#define flush_cache_vmap(start, end) do { } while (0) 36#define flush_cache_vunmap(start, end) do { } while (0) 37#define flush_icache_page(vma, pg) do { } while (0) 38#define flush_icache_user_range(vma, pg, adr, len) do { } while (0) 39 40/* Flush the icache just on this cpu */ 41extern void __flush_icache_range(unsigned long start, unsigned long end); 42 43/* Flush the entire icache on this cpu. */ 44#define __flush_icache() __flush_icache_range(0, CHIP_L1I_CACHE_SIZE()) 45 46#ifdef CONFIG_SMP 47/* 48 * When the kernel writes to its own text we need to do an SMP 49 * broadcast to make the L1I coherent everywhere. This includes 50 * module load and single step. 51 */ 52extern void flush_icache_range(unsigned long start, unsigned long end); 53#else 54#define flush_icache_range __flush_icache_range 55#endif 56 57/* 58 * An update to an executable user page requires icache flushing. 59 * We could carefully update only tiles that are running this process, 60 * and rely on the fact that we flush the icache on every context 61 * switch to avoid doing extra work here. But for now, I'll be 62 * conservative and just do a global icache flush. 63 */ 64static inline void copy_to_user_page(struct vm_area_struct *vma, 65 struct page *page, unsigned long vaddr, 66 void *dst, void *src, int len) 67{ 68 memcpy(dst, src, len); 69 if (vma->vm_flags & VM_EXEC) { 70 flush_icache_range((unsigned long) dst, 71 (unsigned long) dst + len); 72 } 73} 74 75#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 76 memcpy((dst), (src), (len)) 77 78/* 79 * Invalidate a VA range; pads to L2 cacheline boundaries. 80 * 81 * Note that on TILE64, __inv_buffer() actually flushes modified 82 * cache lines in addition to invalidating them, i.e., it's the 83 * same as __finv_buffer(). 84 */ 85static inline void __inv_buffer(void *buffer, size_t size) 86{ 87 char *next = (char *)((long)buffer & -L2_CACHE_BYTES); 88 char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); 89 while (next < finish) { 90 __insn_inv(next); 91 next += CHIP_INV_STRIDE(); 92 } 93} 94 95/* Flush a VA range; pads to L2 cacheline boundaries. */ 96static inline void __flush_buffer(void *buffer, size_t size) 97{ 98 char *next = (char *)((long)buffer & -L2_CACHE_BYTES); 99 char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); 100 while (next < finish) { 101 __insn_flush(next); 102 next += CHIP_FLUSH_STRIDE(); 103 } 104} 105 106/* Flush & invalidate a VA range; pads to L2 cacheline boundaries. */ 107static inline void __finv_buffer(void *buffer, size_t size) 108{ 109 char *next = (char *)((long)buffer & -L2_CACHE_BYTES); 110 char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); 111 while (next < finish) { 112 __insn_finv(next); 113 next += CHIP_FINV_STRIDE(); 114 } 115} 116 117 118/* Invalidate a VA range and wait for it to be complete. */ 119static inline void inv_buffer(void *buffer, size_t size) 120{ 121 __inv_buffer(buffer, size); 122 mb(); 123} 124 125/* 126 * Flush a locally-homecached VA range and wait for the evicted 127 * cachelines to hit memory. 128 */ 129static inline void flush_buffer_local(void *buffer, size_t size) 130{ 131 __flush_buffer(buffer, size); 132 mb_incoherent(); 133} 134 135/* 136 * Flush and invalidate a locally-homecached VA range and wait for the 137 * evicted cachelines to hit memory. 138 */ 139static inline void finv_buffer_local(void *buffer, size_t size) 140{ 141 __finv_buffer(buffer, size); 142 mb_incoherent(); 143} 144 145/* 146 * Flush and invalidate a VA range that is homed remotely, waiting 147 * until the memory controller holds the flushed values. If "hfh" is 148 * true, we will do a more expensive flush involving additional loads 149 * to make sure we have touched all the possible home cpus of a buffer 150 * that is homed with "hash for home". 151 */ 152void finv_buffer_remote(void *buffer, size_t size, int hfh); 153 154/* 155 * On SMP systems, when the scheduler does migration-cost autodetection, 156 * it needs a way to flush as much of the CPU's caches as possible: 157 * 158 * TODO: fill this in! 159 */ 160static inline void sched_cacheflush(void) 161{ 162} 163 164#endif /* _ASM_TILE_CACHEFLUSH_H */