at v2.6.24-rc4 75 lines 2.8 kB view raw
1#ifndef __ASM_SH_CACHEFLUSH_H 2#define __ASM_SH_CACHEFLUSH_H 3 4#ifdef __KERNEL__ 5 6#ifdef CONFIG_CACHE_OFF 7/* 8 * Nothing to do when the cache is disabled, initial flush and explicit 9 * disabling is handled at CPU init time. 10 * 11 * See arch/sh/kernel/cpu/init.c:cache_init(). 12 */ 13#define p3_cache_init() do { } while (0) 14#define flush_cache_all() do { } while (0) 15#define flush_cache_mm(mm) do { } while (0) 16#define flush_cache_dup_mm(mm) do { } while (0) 17#define flush_cache_range(vma, start, end) do { } while (0) 18#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 19#define flush_dcache_page(page) do { } while (0) 20#define flush_icache_range(start, end) do { } while (0) 21#define flush_icache_page(vma,pg) do { } while (0) 22#define flush_dcache_mmap_lock(mapping) do { } while (0) 23#define flush_dcache_mmap_unlock(mapping) do { } while (0) 24#define flush_cache_sigtramp(vaddr) do { } while (0) 25#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) 26#define __flush_wback_region(start, size) do { (void)(start); } while (0) 27#define __flush_purge_region(start, size) do { (void)(start); } while (0) 28#define __flush_invalidate_region(start, size) do { (void)(start); } while (0) 29#else 30#include <asm/cpu/cacheflush.h> 31 32/* 33 * Consistent DMA requires that the __flush_xxx() primitives must be set 34 * for any of the enabled non-coherent caches (most of the UP CPUs), 35 * regardless of PIPT or VIPT cache configurations. 36 */ 37 38/* Flush (write-back only) a region (smaller than a page) */ 39extern void __flush_wback_region(void *start, int size); 40/* Flush (write-back & invalidate) a region (smaller than a page) */ 41extern void __flush_purge_region(void *start, int size); 42/* Flush (invalidate only) a region (smaller than a page) */ 43extern void __flush_invalidate_region(void *start, int size); 44#endif 45 46#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_CACHE_OFF) 47extern void copy_to_user_page(struct vm_area_struct *vma, 48 struct page *page, unsigned long vaddr, void *dst, const void *src, 49 unsigned long len); 50 51extern void copy_from_user_page(struct vm_area_struct *vma, 52 struct page *page, unsigned long vaddr, void *dst, const void *src, 53 unsigned long len); 54#else 55#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 56 do { \ 57 flush_cache_page(vma, vaddr, page_to_pfn(page));\ 58 memcpy(dst, src, len); \ 59 flush_icache_user_range(vma, page, vaddr, len); \ 60 } while (0) 61 62#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 63 do { \ 64 flush_cache_page(vma, vaddr, page_to_pfn(page));\ 65 memcpy(dst, src, len); \ 66 } while (0) 67#endif 68 69#define flush_cache_vmap(start, end) flush_cache_all() 70#define flush_cache_vunmap(start, end) flush_cache_all() 71 72#define HAVE_ARCH_UNMAPPED_AREA 73 74#endif /* __KERNEL__ */ 75#endif /* __ASM_SH_CACHEFLUSH_H */