Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 77b2555b52a894a2e39a42e43d993df875c46a6a 185 lines 4.8 kB view raw
1#include <linux/kernel.h> 2#include <linux/init.h> 3#include <linux/errno.h> 4#include <linux/mm.h> 5 6#include <asm/memory.h> 7#include <asm/ptrace.h> 8#include <asm/cacheflush.h> 9#include <asm/traps.h> 10 11extern struct cpu_cache_fns blk_cache_fns; 12 13#define HARVARD_CACHE 14 15/* 16 * blk_flush_kern_dcache_page(kaddr) 17 * 18 * Ensure that the data held in the page kaddr is written back 19 * to the page in question. 20 * 21 * - kaddr - kernel address (guaranteed to be page aligned) 22 */ 23static void __attribute__((naked)) 24blk_flush_kern_dcache_page(void *kaddr) 25{ 26 asm( 27 "add r1, r0, %0 \n\ 28 sub r1, r1, %1 \n\ 291: .word 0xec401f0e @ mcrr p15, 0, r0, r1, c14, 0 @ blocking \n\ 30 mov r0, #0 \n\ 31 mcr p15, 0, r0, c7, c5, 0 \n\ 32 mcr p15, 0, r0, c7, c10, 4 \n\ 33 mov pc, lr" 34 : 35 : "I" (PAGE_SIZE), "I" (L1_CACHE_BYTES)); 36} 37 38/* 39 * blk_dma_inv_range(start,end) 40 * 41 * Invalidate the data cache within the specified region; we will 42 * be performing a DMA operation in this region and we want to 43 * purge old data in the cache. 44 * 45 * - start - virtual start address of region 46 * - end - virtual end address of region 47 */ 48static void __attribute__((naked)) 49blk_dma_inv_range_unified(unsigned long start, unsigned long end) 50{ 51 asm( 52 "tst r0, %0 \n\ 53 mcrne p15, 0, r0, c7, c11, 1 @ clean unified line \n\ 54 tst r1, %0 \n\ 55 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line\n\ 56 .word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\ 57 mov r0, #0 \n\ 58 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\ 59 mov pc, lr" 60 : 61 : "I" (L1_CACHE_BYTES - 1)); 62} 63 64static void __attribute__((naked)) 65blk_dma_inv_range_harvard(unsigned long start, unsigned long end) 66{ 67 asm( 68 "tst r0, %0 \n\ 69 mcrne p15, 0, r0, c7, c10, 1 @ clean D line \n\ 70 tst r1, %0 \n\ 71 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line \n\ 72 .word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\ 73 mov r0, #0 \n\ 74 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\ 75 mov pc, lr" 76 : 77 : "I" (L1_CACHE_BYTES - 1)); 78} 79 80/* 81 * blk_dma_clean_range(start,end) 82 * - start - virtual start address of region 83 * - end - virtual end address of region 84 */ 85static void __attribute__((naked)) 86blk_dma_clean_range(unsigned long start, unsigned long end) 87{ 88 asm( 89 ".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0 @ blocking \n\ 90 mov r0, #0 \n\ 91 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\ 92 mov pc, lr"); 93} 94 95/* 96 * blk_dma_flush_range(start,end) 97 * - start - virtual start address of region 98 * - end - virtual end address of region 99 */ 100static void __attribute__((naked)) 101blk_dma_flush_range(unsigned long start, unsigned long end) 102{ 103 asm( 104 ".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0 @ blocking \n\ 105 mov pc, lr"); 106} 107 108static int blockops_trap(struct pt_regs *regs, unsigned int instr) 109{ 110 regs->ARM_r4 |= regs->ARM_r2; 111 regs->ARM_pc += 4; 112 return 0; 113} 114 115static char *func[] = { 116 "Prefetch data range", 117 "Clean+Invalidate data range", 118 "Clean data range", 119 "Invalidate data range", 120 "Invalidate instr range" 121}; 122 123static struct undef_hook blockops_hook __initdata = { 124 .instr_mask = 0x0fffffd0, 125 .instr_val = 0x0c401f00, 126 .cpsr_mask = PSR_T_BIT, 127 .cpsr_val = 0, 128 .fn = blockops_trap, 129}; 130 131static int __init blockops_check(void) 132{ 133 register unsigned int err asm("r4") = 0; 134 unsigned int err_pos = 1; 135 unsigned int cache_type; 136 int i; 137 138 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (cache_type)); 139 140 printk("Checking V6 block cache operations:\n"); 141 register_undef_hook(&blockops_hook); 142 143 __asm__ ("mov r0, %0\n\t" 144 "mov r1, %1\n\t" 145 "mov r2, #1\n\t" 146 ".word 0xec401f2c @ mcrr p15, 0, r1, r0, c12, 2\n\t" 147 "mov r2, #2\n\t" 148 ".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0\n\t" 149 "mov r2, #4\n\t" 150 ".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0\n\t" 151 "mov r2, #8\n\t" 152 ".word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0\n\t" 153 "mov r2, #16\n\t" 154 ".word 0xec401f05 @ mcrr p15, 0, r1, r0, c5, 0\n\t" 155 : 156 : "r" (PAGE_OFFSET), "r" (PAGE_OFFSET + 128) 157 : "r0", "r1", "r2"); 158 159 unregister_undef_hook(&blockops_hook); 160 161 for (i = 0; i < ARRAY_SIZE(func); i++, err_pos <<= 1) 162 printk("%30s: %ssupported\n", func[i], err & err_pos ? "not " : ""); 163 164 if ((err & 8) == 0) { 165 printk(" --> Using %s block cache invalidate\n", 166 cache_type & (1 << 24) ? "harvard" : "unified"); 167 if (cache_type & (1 << 24)) 168 cpu_cache.dma_inv_range = blk_dma_inv_range_harvard; 169 else 170 cpu_cache.dma_inv_range = blk_dma_inv_range_unified; 171 } 172 if ((err & 4) == 0) { 173 printk(" --> Using block cache clean\n"); 174 cpu_cache.dma_clean_range = blk_dma_clean_range; 175 } 176 if ((err & 2) == 0) { 177 printk(" --> Using block cache clean+invalidate\n"); 178 cpu_cache.dma_flush_range = blk_dma_flush_range; 179 cpu_cache.flush_kern_dcache_page = blk_flush_kern_dcache_page; 180 } 181 182 return 0; 183} 184 185__initcall(blockops_check);