at v3.8 5.6 kB view raw
1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 */ 14 15#include <linux/export.h> 16#include <asm/page.h> 17#include <asm/cacheflush.h> 18#include <arch/icache.h> 19#include <arch/spr_def.h> 20 21 22void __flush_icache_range(unsigned long start, unsigned long end) 23{ 24 invalidate_icache((const void *)start, end - start, PAGE_SIZE); 25} 26 27 28/* Force a load instruction to issue. */ 29static inline void force_load(char *p) 30{ 31 *(volatile char *)p; 32} 33 34/* 35 * Flush and invalidate a VA range that is homed remotely on a single 36 * core (if "!hfh") or homed via hash-for-home (if "hfh"), waiting 37 * until the memory controller holds the flushed values. 38 */ 39void finv_buffer_remote(void *buffer, size_t size, int hfh) 40{ 41 char *p, *base; 42 size_t step_size, load_count; 43 44 /* 45 * On TILEPro the striping granularity is a fixed 8KB; on 46 * TILE-Gx it is configurable, and we rely on the fact that 47 * the hypervisor always configures maximum striping, so that 48 * bits 9 and 10 of the PA are part of the stripe function, so 49 * every 512 bytes we hit a striping boundary. 50 * 51 */ 52#ifdef __tilegx__ 53 const unsigned long STRIPE_WIDTH = 512; 54#else 55 const unsigned long STRIPE_WIDTH = 8192; 56#endif 57 58#ifdef __tilegx__ 59 /* 60 * On TILE-Gx, we must disable the dstream prefetcher before doing 61 * a cache flush; otherwise, we could end up with data in the cache 62 * that we don't want there. Note that normally we'd do an mf 63 * after the SPR write to disabling the prefetcher, but we do one 64 * below, before any further loads, so there's no need to do it 65 * here. 66 */ 67 uint_reg_t old_dstream_pf = __insn_mfspr(SPR_DSTREAM_PF); 68 __insn_mtspr(SPR_DSTREAM_PF, 0); 69#endif 70 71 /* 72 * Flush and invalidate the buffer out of the local L1/L2 73 * and request the home cache to flush and invalidate as well. 74 */ 75 __finv_buffer(buffer, size); 76 77 /* 78 * Wait for the home cache to acknowledge that it has processed 79 * all the flush-and-invalidate requests. This does not mean 80 * that the flushed data has reached the memory controller yet, 81 * but it does mean the home cache is processing the flushes. 82 */ 83 __insn_mf(); 84 85 /* 86 * Issue a load to the last cache line, which can't complete 87 * until all the previously-issued flushes to the same memory 88 * controller have also completed. If we weren't striping 89 * memory, that one load would be sufficient, but since we may 90 * be, we also need to back up to the last load issued to 91 * another memory controller, which would be the point where 92 * we crossed a "striping" boundary (the granularity of striping 93 * across memory controllers). Keep backing up and doing this 94 * until we are before the beginning of the buffer, or have 95 * hit all the controllers. 96 * 97 * If we are flushing a hash-for-home buffer, it's even worse. 98 * Each line may be homed on a different tile, and each tile 99 * may have up to four lines that are on different 100 * controllers. So as we walk backwards, we have to touch 101 * enough cache lines to satisfy these constraints. In 102 * practice this ends up being close enough to "load from 103 * every cache line on a full memory stripe on each 104 * controller" that we simply do that, to simplify the logic. 105 * 106 * On TILE-Gx the hash-for-home function is much more complex, 107 * with the upshot being we can't readily guarantee we have 108 * hit both entries in the 128-entry AMT that were hit by any 109 * load in the entire range, so we just re-load them all. 110 * With larger buffers, we may want to consider using a hypervisor 111 * trap to issue loads directly to each hash-for-home tile for 112 * each controller (doing it from Linux would trash the TLB). 113 */ 114 if (hfh) { 115 step_size = L2_CACHE_BYTES; 116#ifdef __tilegx__ 117 load_count = (size + L2_CACHE_BYTES - 1) / L2_CACHE_BYTES; 118#else 119 load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) * 120 (1 << CHIP_LOG_NUM_MSHIMS()); 121#endif 122 } else { 123 step_size = STRIPE_WIDTH; 124 load_count = (1 << CHIP_LOG_NUM_MSHIMS()); 125 } 126 127 /* Load the last byte of the buffer. */ 128 p = (char *)buffer + size - 1; 129 force_load(p); 130 131 /* Bump down to the end of the previous stripe or cache line. */ 132 p -= step_size; 133 p = (char *)((unsigned long)p | (step_size - 1)); 134 135 /* Figure out how far back we need to go. */ 136 base = p - (step_size * (load_count - 2)); 137 if ((unsigned long)base < (unsigned long)buffer) 138 base = buffer; 139 140 /* 141 * Fire all the loads we need. The MAF only has eight entries 142 * so we can have at most eight outstanding loads, so we 143 * unroll by that amount. 144 */ 145#pragma unroll 8 146 for (; p >= base; p -= step_size) 147 force_load(p); 148 149 /* 150 * Repeat, but with inv's instead of loads, to get rid of the 151 * data we just loaded into our own cache and the old home L3. 152 * No need to unroll since inv's don't target a register. 153 */ 154 p = (char *)buffer + size - 1; 155 __insn_inv(p); 156 p -= step_size; 157 p = (char *)((unsigned long)p | (step_size - 1)); 158 for (; p >= base; p -= step_size) 159 __insn_inv(p); 160 161 /* Wait for the load+inv's (and thus finvs) to have completed. */ 162 __insn_mf(); 163 164#ifdef __tilegx__ 165 /* Reenable the prefetcher. */ 166 __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf); 167#endif 168} 169EXPORT_SYMBOL_GPL(finv_buffer_remote);