m68knommu: create optimal separate instruction and data cache for ColdFire

Create separate functions to deal with instruction and data cache flushing.
This way we can optimize them for the vairous cache types and arrangements
used across the ColdFire family.

For example the unified caches in the version 3 cores means we don't
need to flush the instruction cache. For the version 2 cores that do
not do data cacheing (or where we choose instruction cache only) we
don't need to do any data flushing.

Signed-off-by: Greg Ungerer <gerg@uclinux.org>

+83 -20
+34 -4
arch/m68k/include/asm/cacheflush_no.h
··· 12 #define flush_cache_dup_mm(mm) do { } while (0) 13 #define flush_cache_range(vma, start, end) do { } while (0) 14 #define flush_cache_page(vma, vmaddr) do { } while (0) 15 - #ifndef flush_dcache_range 16 - #define flush_dcache_range(start,len) __flush_cache_all() 17 - #endif 18 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 19 #define flush_dcache_page(page) do { } while (0) 20 #define flush_dcache_mmap_lock(mapping) do { } while (0) 21 #define flush_dcache_mmap_unlock(mapping) do { } while (0) 22 - #define flush_icache_range(start,len) __flush_cache_all() 23 #define flush_icache_page(vma,pg) do { } while (0) 24 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) 25 #define flush_cache_vmap(start, end) do { } while (0) ··· 44 #endif 45 } 46 47 #endif /* _M68KNOMMU_CACHEFLUSH_H */
··· 12 #define flush_cache_dup_mm(mm) do { } while (0) 13 #define flush_cache_range(vma, start, end) do { } while (0) 14 #define flush_cache_page(vma, vmaddr) do { } while (0) 15 + #define flush_dcache_range(start, len) __flush_dcache_all() 16 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 17 #define flush_dcache_page(page) do { } while (0) 18 #define flush_dcache_mmap_lock(mapping) do { } while (0) 19 #define flush_dcache_mmap_unlock(mapping) do { } while (0) 20 + #define flush_icache_range(start, len) __flush_icache_all() 21 #define flush_icache_page(vma,pg) do { } while (0) 22 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) 23 #define flush_cache_vmap(start, end) do { } while (0) ··· 46 #endif 47 } 48 49 + /* 50 + * Some ColdFire parts implement separate instruction and data caches, 51 + * on those we should just flush the appropriate cache. If we don't need 52 + * to do any specific flushing then this will be optimized away. 53 + */ 54 + static inline void __flush_icache_all(void) 55 + { 56 + #ifdef CACHE_INVALIDATEI 57 + __asm__ __volatile__ ( 58 + "movel %0, %%d0\n\t" 59 + "movec %%d0, %%CACR\n\t" 60 + "nop\n\t" 61 + : : "i" (CACHE_INVALIDATEI) : "d0" ); 62 + #endif 63 + } 64 + 65 + static inline void __flush_dcache_all(void) 66 + { 67 + #ifdef CACHE_PUSH 68 + mcf_cache_push(); 69 + #endif 70 + #ifdef CACHE_INVALIDATED 71 + __asm__ __volatile__ ( 72 + "movel %0, %%d0\n\t" 73 + "movec %%d0, %%CACR\n\t" 74 + "nop\n\t" 75 + : : "i" (CACHE_INVALIDATED) : "d0" ); 76 + #else 77 + /* Flush the wrtite buffer */ 78 + __asm__ __volatile__ ( "nop" ); 79 + #endif 80 + } 81 #endif /* _M68KNOMMU_CACHEFLUSH_H */
+20 -11
arch/m68k/include/asm/m52xxacr.h
··· 59 * that as on. 60 */ 61 #if defined(CONFIG_CACHE_I) 62 - #define CACHE_TYPE CACR_DISD 63 #elif defined(CONFIG_CACHE_D) 64 - #define CACHE_TYPE CACR_DISI 65 #else 66 - #define CACHE_TYPE 67 #endif 68 69 - #if defined(CONFIG_HAVE_CACHE_SPLIT) 70 - #define CACHE_INIT (CACR_CINV + CACHE_TYPE + CACR_EUSP) 71 - #define CACHE_MODE (CACR_CENB + CACHE_TYPE + CACR_DCM + CACR_EUSP) 72 - #else 73 - #define CACHE_INIT (CACR_CINV) 74 - #define CACHE_MODE (CACR_CENB + CACR_DCM) 75 - #endif 76 77 - #define CACHE_INVALIDATE (CACHE_MODE + CACR_CINV) 78 79 #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \ 80 (0x000f0000) + \
··· 59 * that as on. 60 */ 61 #if defined(CONFIG_CACHE_I) 62 + #define CACHE_TYPE (CACR_DISD + CACR_EUSP) 63 + #define CACHE_INVTYPEI 0 64 #elif defined(CONFIG_CACHE_D) 65 + #define CACHE_TYPE (CACR_DISI + CACR_EUSP) 66 + #define CACHE_INVTYPED 0 67 + #elif defined(CONFIG_CACHE_BOTH) 68 + #define CACHE_TYPE CACR_EUSP 69 + #define CACHE_INVTYPEI CACR_INVI 70 + #define CACHE_INVTYPED CACR_INVD 71 #else 72 + /* This is the instruction cache only devices (no split cache, no eusp) */ 73 + #define CACHE_TYPE 0 74 + #define CACHE_INVTYPEI 0 75 #endif 76 77 + #define CACHE_INIT (CACR_CINV + CACHE_TYPE) 78 + #define CACHE_MODE (CACR_CENB + CACHE_TYPE + CACR_DCM) 79 80 + #define CACHE_INVALIDATE (CACHE_MODE + CACR_CINV) 81 + #if defined(CACHE_INVTYPEI) 82 + #define CACHE_INVALIDATEI (CACHE_MODE + CACR_CINV + CACHE_INVTYPEI) 83 + #endif 84 + #if defined(CACHE_INVTYPED) 85 + #define CACHE_INVALIDATED (CACHE_MODE + CACR_CINV + CACHE_INVTYPED) 86 + #endif 87 88 #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \ 89 (0x000f0000) + \
+26 -1
arch/m68k/include/asm/m53xxacr.h
··· 49 #define ACR_WPROTECT 0x00000004 /* Write protect region */ 50 51 /* 52 * Set the cache controller settings we will use. This default in the 53 * CACR is cache inhibited, we use the ACR register to set cacheing 54 * enabled on the regions we want (eg RAM). 55 */ 56 #if defined(CONFIG_CACHE_COPYBACK) 57 #define CACHE_TYPE ACR_CM_CB 58 #else 59 #define CACHE_TYPE ACR_CM_WT 60 #endif ··· 82 #define CACHE_MODE (CACR_EC + CACR_ESB + CACR_DCM_PRE + CACR_EUSP) 83 #endif 84 85 - #define CACHE_INIT CACR_CINVA 86 87 #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \ 88 (0x000f0000) + \
··· 49 #define ACR_WPROTECT 0x00000004 /* Write protect region */ 50 51 /* 52 + * Define the cache type and arrangement (needed for pushes). 53 + */ 54 + #if defined(CONFIG_M5307) 55 + #define CACHE_SIZE 0x2000 /* 8k of unified cache */ 56 + #define ICACHE_SIZE CACHE_SIZE 57 + #define DCACHE_SIZE CACHE_SIZE 58 + #elif defined(CONFIG_M532x) 59 + #define CACHE_SIZE 0x4000 /* 32k of unified cache */ 60 + #define ICACHE_SIZE CACHE_SIZE 61 + #define DCACHE_SIZE CACHE_SIZE 62 + #endif 63 + 64 + #define CACHE_LINE_SIZE 16 /* 16 byte line size */ 65 + #define CACHE_WAYS 4 /* 4 ways - set associative */ 66 + 67 + /* 68 * Set the cache controller settings we will use. This default in the 69 * CACR is cache inhibited, we use the ACR register to set cacheing 70 * enabled on the regions we want (eg RAM). 71 */ 72 #if defined(CONFIG_CACHE_COPYBACK) 73 #define CACHE_TYPE ACR_CM_CB 74 + #define CACHE_PUSH 75 #else 76 #define CACHE_TYPE ACR_CM_WT 77 #endif ··· 65 #define CACHE_MODE (CACR_EC + CACR_ESB + CACR_DCM_PRE + CACR_EUSP) 66 #endif 67 68 + /* 69 + * Unified cache means we will never need to flush for coherency of 70 + * instruction fetch. We will need to flush to maintain memory/DMA 71 + * coherency though in all cases. And for copyback caches we will need 72 + * to push cached data as well. 73 + */ 74 + #define CACHE_INIT CACR_CINVA 75 + #define CACHE_INVALIDATE CACR_CINVA 76 + #define CACHE_INVALIDATED CACR_CINVA 77 78 #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \ 79 (0x000f0000) + \
+3 -4
arch/m68k/include/asm/m54xxacr.h
··· 81 #define INSN_CACHE_MODE (ACR_ENABLE+ACR_ANY) 82 83 #define CACHE_INIT (CACR_DCINVA+CACR_BCINVA+CACR_ICINVA) 84 - #define CACHE_INVALIDATE (CACHE_MODE+CACR_DCINVA+CACR_BCINVA+CACR_ICINVA) 85 #define ACR0_MODE (0x000f0000+DATA_CACHE_MODE) 86 #define ACR1_MODE 0 87 #define ACR2_MODE (0x000f0000+INSN_CACHE_MODE) 88 #define ACR3_MODE 0 89 90 - #if ((DATA_CACHE_MODE & ACR_CM) == ACR_CM_WT) 91 - #define flush_dcache_range(a, l) do { asm("nop"); } while (0) 92 - #endif 93 #if ((DATA_CACHE_MODE & ACR_CM) == ACR_CM_CP) 94 /* Copyback cache mode must push dirty cache lines first */ 95 #define CACHE_PUSH
··· 81 #define INSN_CACHE_MODE (ACR_ENABLE+ACR_ANY) 82 83 #define CACHE_INIT (CACR_DCINVA+CACR_BCINVA+CACR_ICINVA) 84 + #define CACHE_INVALIDATE (CACHE_MODE+CACR_DCINVA+CACR_BCINVA+CACR_ICINVA) 85 + #define CACHE_INVALIDATEI (CACHE_MODE+CACR_BCINVA+CACR_ICINVA) 86 + #define CACHE_INVALIDATED (CACHE_MODE+CACR_DCINVA) 87 #define ACR0_MODE (0x000f0000+DATA_CACHE_MODE) 88 #define ACR1_MODE 0 89 #define ACR2_MODE (0x000f0000+INSN_CACHE_MODE) 90 #define ACR3_MODE 0 91 92 #if ((DATA_CACHE_MODE & ACR_CM) == ACR_CM_CP) 93 /* Copyback cache mode must push dirty cache lines first */ 94 #define CACHE_PUSH