Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _PARISC_CACHEFLUSH_H
2#define _PARISC_CACHEFLUSH_H
3
4#include <linux/mm.h>
5#include <linux/uaccess.h>
6#include <asm/tlbflush.h>
7
8/* The usual comment is "Caches aren't brain-dead on the <architecture>".
9 * Unfortunately, that doesn't apply to PA-RISC. */
10
11/* Internal implementation */
12void flush_data_cache_local(void *); /* flushes local data-cache only */
13void flush_instruction_cache_local(void *); /* flushes local code-cache only */
14#ifdef CONFIG_SMP
15void flush_data_cache(void); /* flushes data-cache only (all processors) */
16void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
17#else
18#define flush_data_cache() flush_data_cache_local(NULL)
19#define flush_instruction_cache() flush_instruction_cache_local(NULL)
20#endif
21
22#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
23
24void flush_user_icache_range_asm(unsigned long, unsigned long);
25void flush_kernel_icache_range_asm(unsigned long, unsigned long);
26void flush_user_dcache_range_asm(unsigned long, unsigned long);
27void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
28void flush_kernel_dcache_page_asm(void *);
29void flush_kernel_icache_page(void *);
30
31/* Cache flush operations */
32
33void flush_cache_all_local(void);
34void flush_cache_all(void);
35void flush_cache_mm(struct mm_struct *mm);
36
37#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
38void flush_kernel_dcache_page_addr(void *addr);
39static inline void flush_kernel_dcache_page(struct page *page)
40{
41 flush_kernel_dcache_page_addr(page_address(page));
42}
43
44#define flush_kernel_dcache_range(start,size) \
45 flush_kernel_dcache_range_asm((start), (start)+(size));
46/* vmap range flushes and invalidates. Architecturally, we don't need
47 * the invalidate, because the CPU should refuse to speculate once an
48 * area has been flushed, so invalidate is left empty */
49static inline void flush_kernel_vmap_range(void *vaddr, int size)
50{
51 unsigned long start = (unsigned long)vaddr;
52
53 flush_kernel_dcache_range_asm(start, start + size);
54}
55static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
56{
57 unsigned long start = (unsigned long)vaddr;
58 void *cursor = vaddr;
59
60 for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
61 struct page *page = vmalloc_to_page(cursor);
62
63 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
64 flush_kernel_dcache_page(page);
65 }
66 flush_kernel_dcache_range_asm(start, start + size);
67}
68
69#define flush_cache_vmap(start, end) flush_cache_all()
70#define flush_cache_vunmap(start, end) flush_cache_all()
71
72#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
73extern void flush_dcache_page(struct page *page);
74
75#define flush_dcache_mmap_lock(mapping) \
76 spin_lock_irq(&(mapping)->tree_lock)
77#define flush_dcache_mmap_unlock(mapping) \
78 spin_unlock_irq(&(mapping)->tree_lock)
79
80#define flush_icache_page(vma,page) do { \
81 flush_kernel_dcache_page(page); \
82 flush_kernel_icache_page(page_address(page)); \
83} while (0)
84
85#define flush_icache_range(s,e) do { \
86 flush_kernel_dcache_range_asm(s,e); \
87 flush_kernel_icache_range_asm(s,e); \
88} while (0)
89
90#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
91do { \
92 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
93 memcpy(dst, src, len); \
94 flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
95} while (0)
96
97#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
98do { \
99 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
100 memcpy(dst, src, len); \
101} while (0)
102
103void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
104void flush_cache_range(struct vm_area_struct *vma,
105 unsigned long start, unsigned long end);
106
107/* defined in pacache.S exported in cache.c used by flush_anon_page */
108void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
109
110#define ARCH_HAS_FLUSH_ANON_PAGE
111static inline void
112flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
113{
114 if (PageAnon(page)) {
115 flush_tlb_page(vma, vmaddr);
116 preempt_disable();
117 flush_dcache_page_asm(page_to_phys(page), vmaddr);
118 preempt_enable();
119 }
120}
121
122#include <asm/kmap_types.h>
123
124#define ARCH_HAS_KMAP
125
126static inline void *kmap(struct page *page)
127{
128 might_sleep();
129 return page_address(page);
130}
131
132static inline void kunmap(struct page *page)
133{
134 flush_kernel_dcache_page_addr(page_address(page));
135}
136
137static inline void *kmap_atomic(struct page *page)
138{
139 preempt_disable();
140 pagefault_disable();
141 return page_address(page);
142}
143
144static inline void __kunmap_atomic(void *addr)
145{
146 flush_kernel_dcache_page_addr(addr);
147 pagefault_enable();
148 preempt_enable();
149}
150
151#define kmap_atomic_prot(page, prot) kmap_atomic(page)
152#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
153
154#endif /* _PARISC_CACHEFLUSH_H */
155