Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: prefix sh-specific "CCR" and "CCR2" by "SH_"

Commit bcf24e1daa94 ("mmc: omap_hsmmc: use the generic config for
omap2plus devices"), enabled the build for other platforms for compile
testing.

sh-allmodconfig now fails with:

include/linux/omap-dma.h:171:8: error: expected identifier before numeric constant
make[4]: *** [drivers/mmc/host/omap_hsmmc.o] Error 1

This happens because SuperH #defines "CCR", which is one of the enum
values in include/linux/omap-dma.h. There's a similar issue with "CCR2"
on sh2a.

As "CCR" and "CCR2" are too generic names for global #defines, prefix
them with "SH_" to fix this.

Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Geert Uytterhoeven and committed by
Linus Torvalds
a5f6ea29 15c34a76

+20 -18
+1 -1
arch/sh/include/cpu-sh2/cpu/cache.h
··· 18 18 #define SH_CACHE_ASSOC 8 19 19 20 20 #if defined(CONFIG_CPU_SUBTYPE_SH7619) 21 - #define CCR 0xffffffec 21 + #define SH_CCR 0xffffffec 22 22 23 23 #define CCR_CACHE_CE 0x01 /* Cache enable */ 24 24 #define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */
+2 -2
arch/sh/include/cpu-sh2a/cpu/cache.h
··· 17 17 #define SH_CACHE_COMBINED 4 18 18 #define SH_CACHE_ASSOC 8 19 19 20 - #define CCR 0xfffc1000 /* CCR1 */ 21 - #define CCR2 0xfffc1004 20 + #define SH_CCR 0xfffc1000 /* CCR1 */ 21 + #define SH_CCR2 0xfffc1004 22 22 23 23 /* 24 24 * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not
+1 -1
arch/sh/include/cpu-sh3/cpu/cache.h
··· 17 17 #define SH_CACHE_COMBINED 4 18 18 #define SH_CACHE_ASSOC 8 19 19 20 - #define CCR 0xffffffec /* Address of Cache Control Register */ 20 + #define SH_CCR 0xffffffec /* Address of Cache Control Register */ 21 21 22 22 #define CCR_CACHE_CE 0x01 /* Cache Enable */ 23 23 #define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */
+1 -1
arch/sh/include/cpu-sh4/cpu/cache.h
··· 17 17 #define SH_CACHE_COMBINED 4 18 18 #define SH_CACHE_ASSOC 8 19 19 20 - #define CCR 0xff00001c /* Address of Cache Control Register */ 20 + #define SH_CCR 0xff00001c /* Address of Cache Control Register */ 21 21 #define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */ 22 22 #define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/ 23 23 #define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */
+2 -2
arch/sh/kernel/cpu/init.c
··· 112 112 unsigned long ccr, flags; 113 113 114 114 jump_to_uncached(); 115 - ccr = __raw_readl(CCR); 115 + ccr = __raw_readl(SH_CCR); 116 116 117 117 /* 118 118 * At this point we don't know whether the cache is enabled or not - a ··· 189 189 190 190 l2_cache_init(); 191 191 192 - __raw_writel(flags, CCR); 192 + __raw_writel(flags, SH_CCR); 193 193 back_to_cached(); 194 194 } 195 195 #else
+1 -1
arch/sh/mm/cache-debugfs.c
··· 36 36 */ 37 37 jump_to_uncached(); 38 38 39 - ccr = __raw_readl(CCR); 39 + ccr = __raw_readl(SH_CCR); 40 40 if ((ccr & CCR_CACHE_ENABLE) == 0) { 41 41 back_to_cached(); 42 42
+2 -2
arch/sh/mm/cache-sh2.c
··· 63 63 local_irq_save(flags); 64 64 jump_to_uncached(); 65 65 66 - ccr = __raw_readl(CCR); 66 + ccr = __raw_readl(SH_CCR); 67 67 ccr |= CCR_CACHE_INVALIDATE; 68 - __raw_writel(ccr, CCR); 68 + __raw_writel(ccr, SH_CCR); 69 69 70 70 back_to_cached(); 71 71 local_irq_restore(flags);
+4 -2
arch/sh/mm/cache-sh2a.c
··· 134 134 135 135 /* If there are too many pages then just blow the cache */ 136 136 if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { 137 - __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); 137 + __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE, 138 + SH_CCR); 138 139 } else { 139 140 for (v = begin; v < end; v += L1_CACHE_BYTES) 140 141 sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); ··· 168 167 /* I-Cache invalidate */ 169 168 /* If there are too many pages then just blow the cache */ 170 169 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { 171 - __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR); 170 + __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE, 171 + SH_CCR); 172 172 } else { 173 173 for (v = start; v < end; v += L1_CACHE_BYTES) 174 174 sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
+2 -2
arch/sh/mm/cache-sh4.c
··· 133 133 jump_to_uncached(); 134 134 135 135 /* Flush I-cache */ 136 - ccr = __raw_readl(CCR); 136 + ccr = __raw_readl(SH_CCR); 137 137 ccr |= CCR_CACHE_ICI; 138 - __raw_writel(ccr, CCR); 138 + __raw_writel(ccr, SH_CCR); 139 139 140 140 /* 141 141 * back_to_cached() will take care of the barrier for us, don't add
+2 -2
arch/sh/mm/cache-shx3.c
··· 19 19 { 20 20 unsigned int ccr; 21 21 22 - ccr = __raw_readl(CCR); 22 + ccr = __raw_readl(SH_CCR); 23 23 24 24 /* 25 25 * If we've got cache aliases, resolve them in hardware. ··· 40 40 ccr |= CCR_CACHE_IBE; 41 41 #endif 42 42 43 - writel_uncached(ccr, CCR); 43 + writel_uncached(ccr, SH_CCR); 44 44 }
+2 -2
arch/sh/mm/cache.c
··· 285 285 { 286 286 unsigned int cache_disabled = 0; 287 287 288 - #ifdef CCR 289 - cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); 288 + #ifdef SH_CCR 289 + cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE); 290 290 #endif 291 291 292 292 compute_alias(&boot_cpu_data.icache);