at v2.6.14 48 lines 1.7 kB view raw
1#ifndef _PPC64_CACHEFLUSH_H 2#define _PPC64_CACHEFLUSH_H 3 4#include <linux/mm.h> 5#include <asm/cputable.h> 6 7/* 8 * No cache flushing is required when address mappings are 9 * changed, because the caches on PowerPCs are physically 10 * addressed. 11 */ 12#define flush_cache_all() do { } while (0) 13#define flush_cache_mm(mm) do { } while (0) 14#define flush_cache_range(vma, start, end) do { } while (0) 15#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 16#define flush_icache_page(vma, page) do { } while (0) 17#define flush_cache_vmap(start, end) do { } while (0) 18#define flush_cache_vunmap(start, end) do { } while (0) 19 20extern void flush_dcache_page(struct page *page); 21#define flush_dcache_mmap_lock(mapping) do { } while (0) 22#define flush_dcache_mmap_unlock(mapping) do { } while (0) 23 24extern void __flush_icache_range(unsigned long, unsigned long); 25extern void flush_icache_user_range(struct vm_area_struct *vma, 26 struct page *page, unsigned long addr, 27 int len); 28 29extern void flush_dcache_range(unsigned long start, unsigned long stop); 30extern void flush_dcache_phys_range(unsigned long start, unsigned long stop); 31extern void flush_inval_dcache_range(unsigned long start, unsigned long stop); 32 33#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 34do { memcpy(dst, src, len); \ 35 flush_icache_user_range(vma, page, vaddr, len); \ 36} while (0) 37#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 38 memcpy(dst, src, len) 39 40extern void __flush_dcache_icache(void *page_va); 41 42static inline void flush_icache_range(unsigned long start, unsigned long stop) 43{ 44 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 45 __flush_icache_range(start, stop); 46} 47 48#endif /* _PPC64_CACHEFLUSH_H */