at v2.6.37 6.2 kB view raw
1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 */ 14 15#ifndef _ASM_TILE_CACHEFLUSH_H 16#define _ASM_TILE_CACHEFLUSH_H 17 18#include <arch/chip.h> 19 20/* Keep includes the same across arches. */ 21#include <linux/mm.h> 22#include <linux/cache.h> 23#include <asm/system.h> 24#include <arch/icache.h> 25 26/* Caches are physically-indexed and so don't need special treatment */ 27#define flush_cache_all() do { } while (0) 28#define flush_cache_mm(mm) do { } while (0) 29#define flush_cache_dup_mm(mm) do { } while (0) 30#define flush_cache_range(vma, start, end) do { } while (0) 31#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 32#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 33#define flush_dcache_page(page) do { } while (0) 34#define flush_dcache_mmap_lock(mapping) do { } while (0) 35#define flush_dcache_mmap_unlock(mapping) do { } while (0) 36#define flush_cache_vmap(start, end) do { } while (0) 37#define flush_cache_vunmap(start, end) do { } while (0) 38#define flush_icache_page(vma, pg) do { } while (0) 39#define flush_icache_user_range(vma, pg, adr, len) do { } while (0) 40 41/* Flush the icache just on this cpu */ 42extern void __flush_icache_range(unsigned long start, unsigned long end); 43 44/* Flush the entire icache on this cpu. */ 45#define __flush_icache() __flush_icache_range(0, CHIP_L1I_CACHE_SIZE()) 46 47#ifdef CONFIG_SMP 48/* 49 * When the kernel writes to its own text we need to do an SMP 50 * broadcast to make the L1I coherent everywhere. This includes 51 * module load and single step. 52 */ 53extern void flush_icache_range(unsigned long start, unsigned long end); 54#else 55#define flush_icache_range __flush_icache_range 56#endif 57 58/* 59 * An update to an executable user page requires icache flushing. 60 * We could carefully update only tiles that are running this process, 61 * and rely on the fact that we flush the icache on every context 62 * switch to avoid doing extra work here. But for now, I'll be 63 * conservative and just do a global icache flush. 64 */ 65static inline void copy_to_user_page(struct vm_area_struct *vma, 66 struct page *page, unsigned long vaddr, 67 void *dst, void *src, int len) 68{ 69 memcpy(dst, src, len); 70 if (vma->vm_flags & VM_EXEC) { 71 flush_icache_range((unsigned long) dst, 72 (unsigned long) dst + len); 73 } 74} 75 76#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 77 memcpy((dst), (src), (len)) 78 79/* 80 * Invalidate a VA range; pads to L2 cacheline boundaries. 81 * 82 * Note that on TILE64, __inv_buffer() actually flushes modified 83 * cache lines in addition to invalidating them, i.e., it's the 84 * same as __finv_buffer(). 85 */ 86static inline void __inv_buffer(void *buffer, size_t size) 87{ 88 char *next = (char *)((long)buffer & -L2_CACHE_BYTES); 89 char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); 90 while (next < finish) { 91 __insn_inv(next); 92 next += CHIP_INV_STRIDE(); 93 } 94} 95 96/* Flush a VA range; pads to L2 cacheline boundaries. */ 97static inline void __flush_buffer(void *buffer, size_t size) 98{ 99 char *next = (char *)((long)buffer & -L2_CACHE_BYTES); 100 char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); 101 while (next < finish) { 102 __insn_flush(next); 103 next += CHIP_FLUSH_STRIDE(); 104 } 105} 106 107/* Flush & invalidate a VA range; pads to L2 cacheline boundaries. */ 108static inline void __finv_buffer(void *buffer, size_t size) 109{ 110 char *next = (char *)((long)buffer & -L2_CACHE_BYTES); 111 char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); 112 while (next < finish) { 113 __insn_finv(next); 114 next += CHIP_FINV_STRIDE(); 115 } 116} 117 118 119/* Invalidate a VA range, then memory fence. */ 120static inline void inv_buffer(void *buffer, size_t size) 121{ 122 __inv_buffer(buffer, size); 123 mb_incoherent(); 124} 125 126/* Flush a VA range, then memory fence. */ 127static inline void flush_buffer(void *buffer, size_t size) 128{ 129 __flush_buffer(buffer, size); 130 mb_incoherent(); 131} 132 133/* Flush & invalidate a VA range, then memory fence. */ 134static inline void finv_buffer(void *buffer, size_t size) 135{ 136 __finv_buffer(buffer, size); 137 mb_incoherent(); 138} 139 140/* 141 * Flush & invalidate a VA range that is homed remotely on a single core, 142 * waiting until the memory controller holds the flushed values. 143 */ 144static inline void finv_buffer_remote(void *buffer, size_t size) 145{ 146 char *p; 147 int i; 148 149 /* 150 * Flush and invalidate the buffer out of the local L1/L2 151 * and request the home cache to flush and invalidate as well. 152 */ 153 __finv_buffer(buffer, size); 154 155 /* 156 * Wait for the home cache to acknowledge that it has processed 157 * all the flush-and-invalidate requests. This does not mean 158 * that the flushed data has reached the memory controller yet, 159 * but it does mean the home cache is processing the flushes. 160 */ 161 __insn_mf(); 162 163 /* 164 * Issue a load to the last cache line, which can't complete 165 * until all the previously-issued flushes to the same memory 166 * controller have also completed. If we weren't striping 167 * memory, that one load would be sufficient, but since we may 168 * be, we also need to back up to the last load issued to 169 * another memory controller, which would be the point where 170 * we crossed an 8KB boundary (the granularity of striping 171 * across memory controllers). Keep backing up and doing this 172 * until we are before the beginning of the buffer, or have 173 * hit all the controllers. 174 */ 175 for (i = 0, p = (char *)buffer + size - 1; 176 i < (1 << CHIP_LOG_NUM_MSHIMS()) && p >= (char *)buffer; 177 ++i) { 178 const unsigned long STRIPE_WIDTH = 8192; 179 180 /* Force a load instruction to issue. */ 181 *(volatile char *)p; 182 183 /* Jump to end of previous stripe. */ 184 p -= STRIPE_WIDTH; 185 p = (char *)((unsigned long)p | (STRIPE_WIDTH - 1)); 186 } 187 188 /* Wait for the loads (and thus flushes) to have completed. */ 189 __insn_mf(); 190} 191 192#endif /* _ASM_TILE_CACHEFLUSH_H */