Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] powerpc: Merge cacheflush.h and cache.h

The ppc32 and ppc64 versions of cacheflush.h were almost identical.
The two versions of cache.h are fairly similar, except for a bunch of
register definitions in the ppc32 version which probably belong better
elsewhere. This patch, therefore, merges both headers. Notable
points:
- there are several functions in cacheflush.h which exist only
on ppc32 or only on ppc64. These are handled by #ifdef for now, but
these should probably be consolidated, along with the actual code
behind them later.
- Confusingly, both ppc32 and ppc64 have a
flush_dcache_range(), but they're subtly different: it uses dcbf on
ppc32 and dcbst on ppc64, ppc64 has a flush_inval_dcache_range() which
uses dcbf. These too should be merged and consolidated later.
- Also flush_dcache_range() was defined in cacheflush.h on
ppc64, and in cache.h on ppc32. In the merged version it's in
cacheflush.h
- On ppc32 flush_icache_range() is a normal function from
misc.S. On ppc64, it was wrapper, testing a feature bit before
calling __flush_icache_range() which does the actual flush. This
patch takes the ppc64 approach, which amounts to no change on ppc32,
since CPU_FTR_COHERENT_ICACHE will never be set there, but does mean
renaming flush_icache_range() to __flush_icache_range() in
arch/ppc/kernel/misc.S and arch/powerpc/kernel/misc_32.S
- The PReP register info from asm-ppc/cache.h has moved to
arch/ppc/platforms/prep_setup.c
- The 8xx register info from asm-ppc/cache.h has moved to a
new asm-powerpc/reg_8xx.h, included from reg.h
- flush_dcache_all() was defined on ppc32 (only), but was
never called (although it was exported). Thus this patch removes it
from cacheflush.h and from ARCH=powerpc (misc_32.S) entirely. It's
left in ARCH=ppc for now, with the prototype moved to ppc_ksyms.c.

Built for Walnut (ARCH=ppc), 32-bit multiplatform (pmac, CHRP and PReP
ARCH=ppc, pmac and CHRP ARCH=powerpc). Built and booted on POWER5
LPAR (ARCH=powerpc and ARCH=ppc64).

Built for 32-bit powermac (ARCH=ppc and ARCH=powerpc). Built and
booted on POWER5 LPAR (ARCH=powerpc and ARCH=ppc64). Built and booted
on G5 (ARCH=powerpc)

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

David Gibson and committed by
Paul Mackerras
26ef5c09 e130bedb

+130 -204
+1 -22
arch/powerpc/kernel/misc_32.S
··· 519 519 * 520 520 * flush_icache_range(unsigned long start, unsigned long stop) 521 521 */ 522 - _GLOBAL(flush_icache_range) 522 + _GLOBAL(__flush_icache_range) 523 523 BEGIN_FTR_SECTION 524 524 blr /* for 601, do nothing */ 525 525 END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) ··· 606 606 bdnz 1b 607 607 sync /* wait for dcbi's to get to ram */ 608 608 blr 609 - 610 - #ifdef CONFIG_NOT_COHERENT_CACHE 611 - /* 612 - * 40x cores have 8K or 16K dcache and 32 byte line size. 613 - * 44x has a 32K dcache and 32 byte line size. 614 - * 8xx has 1, 2, 4, 8K variants. 615 - * For now, cover the worst case of the 44x. 616 - * Must be called with external interrupts disabled. 617 - */ 618 - #define CACHE_NWAYS 64 619 - #define CACHE_NLINES 16 620 - 621 - _GLOBAL(flush_dcache_all) 622 - li r4, (2 * CACHE_NWAYS * CACHE_NLINES) 623 - mtctr r4 624 - lis r5, KERNELBASE@h 625 - 1: lwz r3, 0(r5) /* Load one word from every line */ 626 - addi r5, r5, L1_CACHE_BYTES 627 - bdnz 1b 628 - blr 629 - #endif /* CONFIG_NOT_COHERENT_CACHE */ 630 609 631 610 /* 632 611 * Flush a particular page from the data cache to RAM.
+2 -2
arch/ppc/kernel/misc.S
··· 497 497 * and invalidate the corresponding instruction cache blocks. 498 498 * This is a no-op on the 601. 499 499 * 500 - * flush_icache_range(unsigned long start, unsigned long stop) 500 + * __flush_icache_range(unsigned long start, unsigned long stop) 501 501 */ 502 - _GLOBAL(flush_icache_range) 502 + _GLOBAL(__flush_icache_range) 503 503 BEGIN_FTR_SECTION 504 504 blr /* for 601, do nothing */ 505 505 END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+1
arch/ppc/kernel/ppc_ksyms.c
··· 175 175 #endif /* CONFIG_PCI */ 176 176 177 177 #ifdef CONFIG_NOT_COHERENT_CACHE 178 + extern void flush_dcache_all(void); 178 179 EXPORT_SYMBOL(flush_dcache_all); 179 180 #endif 180 181
+9
arch/ppc/platforms/prep_setup.c
··· 61 61 #include <asm/pci-bridge.h> 62 62 #include <asm/todc.h> 63 63 64 + /* prep registers for L2 */ 65 + #define CACHECRBA 0x80000823 /* Cache configuration register address */ 66 + #define L2CACHE_MASK 0x03 /* Mask for 2 L2 Cache bits */ 67 + #define L2CACHE_512KB 0x00 /* 512KB */ 68 + #define L2CACHE_256KB 0x01 /* 256KB */ 69 + #define L2CACHE_1MB 0x02 /* 1MB */ 70 + #define L2CACHE_NONE 0x03 /* NONE */ 71 + #define L2CACHE_PARITY 0x08 /* Mask for L2 Cache Parity Protected bit */ 72 + 64 73 TODC_ALLOC(); 65 74 66 75 unsigned char ucSystemType;
+40
include/asm-powerpc/cache.h
··· 1 + #ifndef _ASM_POWERPC_CACHE_H 2 + #define _ASM_POWERPC_CACHE_H 3 + 4 + #ifdef __KERNEL__ 5 + 6 + #include <linux/config.h> 7 + 8 + /* bytes per L1 cache line */ 9 + #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) 10 + #define L1_CACHE_SHIFT 4 11 + #define MAX_COPY_PREFETCH 1 12 + #elif defined(CONFIG_PPC32) 13 + #define L1_CACHE_SHIFT 5 14 + #define MAX_COPY_PREFETCH 4 15 + #else /* CONFIG_PPC64 */ 16 + #define L1_CACHE_SHIFT 7 17 + #endif 18 + 19 + #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 20 + 21 + #define SMP_CACHE_BYTES L1_CACHE_BYTES 22 + #define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */ 23 + 24 + #if defined(__powerpc64__) && !defined(__ASSEMBLY__) 25 + struct ppc64_caches { 26 + u32 dsize; /* L1 d-cache size */ 27 + u32 dline_size; /* L1 d-cache line size */ 28 + u32 log_dline_size; 29 + u32 dlines_per_page; 30 + u32 isize; /* L1 i-cache size */ 31 + u32 iline_size; /* L1 i-cache line size */ 32 + u32 log_iline_size; 33 + u32 ilines_per_page; 34 + }; 35 + 36 + extern struct ppc64_caches ppc64_caches; 37 + #endif /* __powerpc64__ && ! __ASSEMBLY__ */ 38 + 39 + #endif /* __KERNEL__ */ 40 + #endif /* _ASM_POWERPC_CACHE_H */
+68
include/asm-powerpc/cacheflush.h
··· 1 + /* 2 + * This program is free software; you can redistribute it and/or 3 + * modify it under the terms of the GNU General Public License 4 + * as published by the Free Software Foundation; either version 5 + * 2 of the License, or (at your option) any later version. 6 + */ 7 + #ifndef _ASM_POWERPC_CACHEFLUSH_H 8 + #define _ASM_POWERPC_CACHEFLUSH_H 9 + 10 + #ifdef __KERNEL__ 11 + 12 + #include <linux/mm.h> 13 + #include <asm/cputable.h> 14 + 15 + /* 16 + * No cache flushing is required when address mappings are changed, 17 + * because the caches on PowerPCs are physically addressed. 18 + */ 19 + #define flush_cache_all() do { } while (0) 20 + #define flush_cache_mm(mm) do { } while (0) 21 + #define flush_cache_range(vma, start, end) do { } while (0) 22 + #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 23 + #define flush_icache_page(vma, page) do { } while (0) 24 + #define flush_cache_vmap(start, end) do { } while (0) 25 + #define flush_cache_vunmap(start, end) do { } while (0) 26 + 27 + extern void flush_dcache_page(struct page *page); 28 + #define flush_dcache_mmap_lock(mapping) do { } while (0) 29 + #define flush_dcache_mmap_unlock(mapping) do { } while (0) 30 + 31 + extern void __flush_icache_range(unsigned long, unsigned long); 32 + static inline void flush_icache_range(unsigned long start, unsigned long stop) 33 + { 34 + if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 35 + __flush_icache_range(start, stop); 36 + } 37 + 38 + extern void flush_icache_user_range(struct vm_area_struct *vma, 39 + struct page *page, unsigned long addr, 40 + int len); 41 + extern void __flush_dcache_icache(void *page_va); 42 + extern void flush_dcache_icache_page(struct page *page); 43 + #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE) 44 + extern void __flush_dcache_icache_phys(unsigned long physaddr); 45 + #endif /* CONFIG_PPC32 && !CONFIG_BOOKE */ 46 + 47 + extern void flush_dcache_range(unsigned long start, unsigned long stop); 48 + #ifdef CONFIG_PPC32 49 + extern void clean_dcache_range(unsigned long start, unsigned long stop); 50 + extern void invalidate_dcache_range(unsigned long start, unsigned long stop); 51 + #endif /* CONFIG_PPC32 */ 52 + #ifdef CONFIG_PPC64 53 + extern void flush_inval_dcache_range(unsigned long start, unsigned long stop); 54 + extern void flush_dcache_phys_range(unsigned long start, unsigned long stop); 55 + #endif 56 + 57 + #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 58 + do { \ 59 + memcpy(dst, src, len); \ 60 + flush_icache_user_range(vma, page, vaddr, len); \ 61 + } while (0) 62 + #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 63 + memcpy(dst, src, len) 64 + 65 + 66 + #endif /* __KERNEL__ */ 67 + 68 + #endif /* _ASM_POWERPC_CACHEFLUSH_H */
+5 -1
include/asm-powerpc/reg.h
··· 16 16 /* Pickup Book E specific registers. */ 17 17 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 18 18 #include <asm/reg_booke.h> 19 - #endif 19 + #endif /* CONFIG_BOOKE || CONFIG_40x */ 20 + 21 + #ifdef CONFIG_8xx 22 + #include <asm/reg_8xx.h> 23 + #endif /* CONFIG_8xx */ 20 24 21 25 #define MSR_SF_LG 63 /* Enable 64 bit mode */ 22 26 #define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
+4 -46
include/asm-ppc/cache.h include/asm-powerpc/reg_8xx.h
··· 1 1 /* 2 - * include/asm-ppc/cache.h 2 + * Contains register definitions common to PowerPC 8xx CPUs. Notice 3 3 */ 4 - #ifdef __KERNEL__ 5 - #ifndef __ARCH_PPC_CACHE_H 6 - #define __ARCH_PPC_CACHE_H 4 + #ifndef _ASM_POWERPC_REG_8xx_H 5 + #define _ASM_POWERPC_REG_8xx_H 7 6 8 - #include <linux/config.h> 9 - 10 - /* bytes per L1 cache line */ 11 - #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) 12 - #define L1_CACHE_SHIFT 4 13 - #define MAX_COPY_PREFETCH 1 14 - #elif defined(CONFIG_PPC64BRIDGE) 15 - #define L1_CACHE_SHIFT 7 16 - #define MAX_COPY_PREFETCH 1 17 - #else 18 - #define L1_CACHE_SHIFT 5 19 - #define MAX_COPY_PREFETCH 4 20 - #endif 21 - 22 - #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 23 - 24 - #define SMP_CACHE_BYTES L1_CACHE_BYTES 25 - #define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */ 26 - 27 - #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) 28 - #define L1_CACHE_PAGES 8 29 - 30 - #ifndef __ASSEMBLY__ 31 - extern void clean_dcache_range(unsigned long start, unsigned long stop); 32 - extern void flush_dcache_range(unsigned long start, unsigned long stop); 33 - extern void invalidate_dcache_range(unsigned long start, unsigned long stop); 34 - extern void flush_dcache_all(void); 35 - #endif /* __ASSEMBLY__ */ 36 - 37 - /* prep registers for L2 */ 38 - #define CACHECRBA 0x80000823 /* Cache configuration register address */ 39 - #define L2CACHE_MASK 0x03 /* Mask for 2 L2 Cache bits */ 40 - #define L2CACHE_512KB 0x00 /* 512KB */ 41 - #define L2CACHE_256KB 0x01 /* 256KB */ 42 - #define L2CACHE_1MB 0x02 /* 1MB */ 43 - #define L2CACHE_NONE 0x03 /* NONE */ 44 - #define L2CACHE_PARITY 0x08 /* Mask for L2 Cache Parity Protected bit */ 45 - 46 - #ifdef CONFIG_8xx 47 7 /* Cache control on the MPC8xx is provided through some additional 48 8 * special purpose registers. 49 9 */ ··· 38 78 39 79 #define DC_DFWT 0x40000000 /* Data cache is forced write through */ 40 80 #define DC_LES 0x20000000 /* Caches are little endian mode */ 41 - #endif /* CONFIG_8xx */ 42 81 43 - #endif 44 - #endif /* __KERNEL__ */ 82 + #endif /* _ASM_POWERPC_REG_8xx_H */
-49
include/asm-ppc/cacheflush.h
··· 1 - /* 2 - * include/asm-ppc/cacheflush.h 3 - * 4 - * This program is free software; you can redistribute it and/or 5 - * modify it under the terms of the GNU General Public License 6 - * as published by the Free Software Foundation; either version 7 - * 2 of the License, or (at your option) any later version. 8 - */ 9 - #ifdef __KERNEL__ 10 - #ifndef _PPC_CACHEFLUSH_H 11 - #define _PPC_CACHEFLUSH_H 12 - 13 - #include <linux/mm.h> 14 - 15 - /* 16 - * No cache flushing is required when address mappings are 17 - * changed, because the caches on PowerPCs are physically 18 - * addressed. -- paulus 19 - * Also, when SMP we use the coherency (M) bit of the 20 - * BATs and PTEs. -- Cort 21 - */ 22 - #define flush_cache_all() do { } while (0) 23 - #define flush_cache_mm(mm) do { } while (0) 24 - #define flush_cache_range(vma, a, b) do { } while (0) 25 - #define flush_cache_page(vma, p, pfn) do { } while (0) 26 - #define flush_icache_page(vma, page) do { } while (0) 27 - #define flush_cache_vmap(start, end) do { } while (0) 28 - #define flush_cache_vunmap(start, end) do { } while (0) 29 - 30 - extern void flush_dcache_page(struct page *page); 31 - #define flush_dcache_mmap_lock(mapping) do { } while (0) 32 - #define flush_dcache_mmap_unlock(mapping) do { } while (0) 33 - 34 - extern void flush_icache_range(unsigned long, unsigned long); 35 - extern void flush_icache_user_range(struct vm_area_struct *vma, 36 - struct page *page, unsigned long addr, int len); 37 - 38 - #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 39 - do { memcpy(dst, src, len); \ 40 - flush_icache_user_range(vma, page, vaddr, len); \ 41 - } while (0) 42 - #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 43 - memcpy(dst, src, len) 44 - 45 - extern void __flush_dcache_icache(void *page_va); 46 - extern void __flush_dcache_icache_phys(unsigned long physaddr); 47 - extern void flush_dcache_icache_page(struct page *page); 48 - #endif /* _PPC_CACHEFLUSH_H */ 49 - #endif /* __KERNEL__ */
-36
include/asm-ppc64/cache.h
··· 1 - /* 2 - * This program is free software; you can redistribute it and/or 3 - * modify it under the terms of the GNU General Public License 4 - * as published by the Free Software Foundation; either version 5 - * 2 of the License, or (at your option) any later version. 6 - */ 7 - #ifndef __ARCH_PPC64_CACHE_H 8 - #define __ARCH_PPC64_CACHE_H 9 - 10 - #include <asm/types.h> 11 - 12 - /* bytes per L1 cache line */ 13 - #define L1_CACHE_SHIFT 7 14 - #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 15 - 16 - #define SMP_CACHE_BYTES L1_CACHE_BYTES 17 - #define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */ 18 - 19 - #ifndef __ASSEMBLY__ 20 - 21 - struct ppc64_caches { 22 - u32 dsize; /* L1 d-cache size */ 23 - u32 dline_size; /* L1 d-cache line size */ 24 - u32 log_dline_size; 25 - u32 dlines_per_page; 26 - u32 isize; /* L1 i-cache size */ 27 - u32 iline_size; /* L1 i-cache line size */ 28 - u32 log_iline_size; 29 - u32 ilines_per_page; 30 - }; 31 - 32 - extern struct ppc64_caches ppc64_caches; 33 - 34 - #endif 35 - 36 - #endif
-48
include/asm-ppc64/cacheflush.h
··· 1 - #ifndef _PPC64_CACHEFLUSH_H 2 - #define _PPC64_CACHEFLUSH_H 3 - 4 - #include <linux/mm.h> 5 - #include <asm/cputable.h> 6 - 7 - /* 8 - * No cache flushing is required when address mappings are 9 - * changed, because the caches on PowerPCs are physically 10 - * addressed. 11 - */ 12 - #define flush_cache_all() do { } while (0) 13 - #define flush_cache_mm(mm) do { } while (0) 14 - #define flush_cache_range(vma, start, end) do { } while (0) 15 - #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 16 - #define flush_icache_page(vma, page) do { } while (0) 17 - #define flush_cache_vmap(start, end) do { } while (0) 18 - #define flush_cache_vunmap(start, end) do { } while (0) 19 - 20 - extern void flush_dcache_page(struct page *page); 21 - #define flush_dcache_mmap_lock(mapping) do { } while (0) 22 - #define flush_dcache_mmap_unlock(mapping) do { } while (0) 23 - 24 - extern void __flush_icache_range(unsigned long, unsigned long); 25 - extern void flush_icache_user_range(struct vm_area_struct *vma, 26 - struct page *page, unsigned long addr, 27 - int len); 28 - 29 - extern void flush_dcache_range(unsigned long start, unsigned long stop); 30 - extern void flush_dcache_phys_range(unsigned long start, unsigned long stop); 31 - extern void flush_inval_dcache_range(unsigned long start, unsigned long stop); 32 - 33 - #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 34 - do { memcpy(dst, src, len); \ 35 - flush_icache_user_range(vma, page, vaddr, len); \ 36 - } while (0) 37 - #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 38 - memcpy(dst, src, len) 39 - 40 - extern void __flush_dcache_icache(void *page_va); 41 - 42 - static inline void flush_icache_range(unsigned long start, unsigned long stop) 43 - { 44 - if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 45 - __flush_icache_range(start, stop); 46 - } 47 - 48 - #endif /* _PPC64_CACHEFLUSH_H */