m68knommu: make cache push code ColdFire generic

Currently the code to push cache lines is only available to version 4
cores. Version 3 cores may also need to use this if we support copy-
back caches on them. Move this code to make it more generic, and
useful for all version ColdFire cores.

With this in place we can now have a single cache_flush_all() code
path that does all the right things on all version cores.

Signed-off-by: Greg Ungerer <gerg@uclinux.org>

+56 -39
+5 -2
arch/m68k/include/asm/cacheflush_no.h
··· 30 30 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 31 31 memcpy(dst, src, len) 32 32 33 - #ifndef __flush_cache_all 33 + void mcf_cache_push(void); 34 + 34 35 static inline void __flush_cache_all(void) 35 36 { 37 + #ifdef CACHE_PUSH 38 + mcf_cache_push(); 39 + #endif 36 40 #ifdef CACHE_INVALIDATE 37 41 __asm__ __volatile__ ( 38 42 "movel %0, %%d0\n\t" ··· 45 41 : : "i" (CACHE_INVALIDATE) : "d0" ); 46 42 #endif 47 43 } 48 - #endif /* __flush_cache_all */ 49 44 50 45 #endif /* _M68KNOMMU_CACHEFLUSH_H */
+2 -36
arch/m68k/include/asm/m54xxacr.h
··· 83 83 #define ACR2_MODE (0x000f0000+INSN_CACHE_MODE) 84 84 #define ACR3_MODE 0 85 85 86 - #ifndef __ASSEMBLY__ 87 - 88 86 #if ((DATA_CACHE_MODE & ACR_CM) == ACR_CM_WT) 89 87 #define flush_dcache_range(a, l) do { asm("nop"); } while (0) 90 88 #endif 91 - 92 - static inline void __m54xx_flush_cache_all(void) 93 - { 94 - __asm__ __volatile__ ( 95 89 #if ((DATA_CACHE_MODE & ACR_CM) == ACR_CM_CP) 96 - /* 97 - * Use cpushl to push and invalidate all cache lines. 98 - * Gas doesn't seem to know how to generate the ColdFire 99 - * cpushl instruction... Oh well, bit stuff it for now. 100 - */ 101 - "clrl %%d0\n\t" 102 - "1:\n\t" 103 - "movel %%d0,%%a0\n\t" 104 - "2:\n\t" 105 - ".word 0xf468\n\t" 106 - "addl %0,%%a0\n\t" 107 - "cmpl %1,%%a0\n\t" 108 - "blt 2b\n\t" 109 - "addql #1,%%d0\n\t" 110 - "cmpil %2,%%d0\n\t" 111 - "bne 1b\n\t" 90 + /* Copyback cache mode must push dirty cache lines first */ 91 + #define CACHE_PUSH 112 92 #endif 113 - "movel %3,%%d0\n\t" 114 - "movec %%d0,%%CACR\n\t" 115 - "nop\n\t" /* forces flush of Store Buffer */ 116 - : /* No output */ 117 - : "i" (CACHE_LINE_SIZE), 118 - "i" (DCACHE_SIZE / CACHE_WAYS), 119 - "i" (CACHE_WAYS), 120 - "i" (CACHE_INVALIDATE) 121 - : "d0", "a0" ); 122 - } 123 - 124 - #define __flush_cache_all() __m54xx_flush_cache_all() 125 - 126 - #endif /* __ASSEMBLY__ */ 127 93 128 94 #endif /* m54xxacr_h */
+1 -1
arch/m68knommu/platform/coldfire/Makefile
··· 14 14 15 15 asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 16 16 17 - obj-$(CONFIG_COLDFIRE) += clk.o dma.o entry.o vectors.o 17 + obj-$(CONFIG_COLDFIRE) += cache.o clk.o dma.o entry.o vectors.o 18 18 obj-$(CONFIG_M5206) += timers.o intc.o 19 19 obj-$(CONFIG_M5206e) += timers.o intc.o 20 20 obj-$(CONFIG_M520x) += pit.o intc-simr.o
+48
arch/m68knommu/platform/coldfire/cache.c
··· 1 + /***************************************************************************/ 2 + 3 + /* 4 + * cache.c -- general ColdFire Cache maintainence code 5 + * 6 + * Copyright (C) 2010, Greg Ungerer (gerg@snapgear.com) 7 + */ 8 + 9 + /***************************************************************************/ 10 + 11 + #include <linux/kernel.h> 12 + #include <asm/coldfire.h> 13 + #include <asm/mcfsim.h> 14 + 15 + /***************************************************************************/ 16 + #ifdef CACHE_PUSH 17 + /***************************************************************************/ 18 + 19 + /* 20 + * Use cpushl to push all dirty cache lines back to memory. 21 + * Older versions of GAS don't seem to know how to generate the 22 + * ColdFire cpushl instruction... Oh well, bit stuff it for now. 23 + */ 24 + 25 + void mcf_cache_push(void) 26 + { 27 + __asm__ __volatile__ ( 28 + "clrl %%d0\n\t" 29 + "1:\n\t" 30 + "movel %%d0,%%a0\n\t" 31 + "2:\n\t" 32 + ".word 0xf468\n\t" 33 + "addl %0,%%a0\n\t" 34 + "cmpl %1,%%a0\n\t" 35 + "blt 2b\n\t" 36 + "addql #1,%%d0\n\t" 37 + "cmpil %2,%%d0\n\t" 38 + "bne 1b\n\t" 39 + : /* No output */ 40 + : "i" (CACHE_LINE_SIZE), 41 + "i" (DCACHE_SIZE / CACHE_WAYS), 42 + "i" (CACHE_WAYS) 43 + : "d0", "a0" ); 44 + } 45 + 46 + /***************************************************************************/ 47 + #endif /* CACHE_PUSH */ 48 + /***************************************************************************/