Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _S390_TLBFLUSH_H
2#define _S390_TLBFLUSH_H
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6#include <asm/processor.h>
7#include <asm/pgalloc.h>
8
9/*
10 * Flush all TLB entries on the local CPU.
11 */
12static inline void __tlb_flush_local(void)
13{
14 asm volatile("ptlb" : : : "memory");
15}
16
17/*
18 * Flush TLB entries for a specific ASCE on all CPUs
19 */
20static inline void __tlb_flush_idte(unsigned long asce)
21{
22 /* Global TLB flush for the mm */
23 asm volatile(
24 " .insn rrf,0xb98e0000,0,%0,%1,0"
25 : : "a" (2048), "a" (asce) : "cc");
26}
27
28/*
29 * Flush TLB entries for a specific ASCE on the local CPU
30 */
31static inline void __tlb_flush_idte_local(unsigned long asce)
32{
33 /* Local TLB flush for the mm */
34 asm volatile(
35 " .insn rrf,0xb98e0000,0,%0,%1,1"
36 : : "a" (2048), "a" (asce) : "cc");
37}
38
39#ifdef CONFIG_SMP
40void smp_ptlb_all(void);
41
42/*
43 * Flush all TLB entries on all CPUs.
44 */
45static inline void __tlb_flush_global(void)
46{
47 register unsigned long reg2 asm("2");
48 register unsigned long reg3 asm("3");
49 register unsigned long reg4 asm("4");
50 long dummy;
51
52#ifndef CONFIG_64BIT
53 if (!MACHINE_HAS_CSP) {
54 smp_ptlb_all();
55 return;
56 }
57#endif /* CONFIG_64BIT */
58
59 dummy = 0;
60 reg2 = reg3 = 0;
61 reg4 = ((unsigned long) &dummy) + 1;
62 asm volatile(
63 " csp %0,%2"
64 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
65}
66
67/*
68 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
69 * this implicates multiple ASCEs!).
70 */
71static inline void __tlb_flush_full(struct mm_struct *mm)
72{
73 preempt_disable();
74 atomic_add(0x10000, &mm->context.attach_count);
75 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
76 /* Local TLB flush */
77 __tlb_flush_local();
78 } else {
79 /* Global TLB flush */
80 __tlb_flush_global();
81 /* Reset TLB flush mask */
82 if (MACHINE_HAS_TLB_LC)
83 cpumask_copy(mm_cpumask(mm),
84 &mm->context.cpu_attach_mask);
85 }
86 atomic_sub(0x10000, &mm->context.attach_count);
87 preempt_enable();
88}
89
90/*
91 * Flush TLB entries for a specific ASCE on all CPUs.
92 */
93static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
94{
95 int active, count;
96
97 preempt_disable();
98 active = (mm == current->active_mm) ? 1 : 0;
99 count = atomic_add_return(0x10000, &mm->context.attach_count);
100 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
101 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
102 __tlb_flush_idte_local(asce);
103 } else {
104 if (MACHINE_HAS_IDTE)
105 __tlb_flush_idte(asce);
106 else
107 __tlb_flush_global();
108 /* Reset TLB flush mask */
109 if (MACHINE_HAS_TLB_LC)
110 cpumask_copy(mm_cpumask(mm),
111 &mm->context.cpu_attach_mask);
112 }
113 atomic_sub(0x10000, &mm->context.attach_count);
114 preempt_enable();
115}
116
117static inline void __tlb_flush_kernel(void)
118{
119 if (MACHINE_HAS_IDTE)
120 __tlb_flush_idte((unsigned long) init_mm.pgd |
121 init_mm.context.asce_bits);
122 else
123 __tlb_flush_global();
124}
125#else
126#define __tlb_flush_global() __tlb_flush_local()
127#define __tlb_flush_full(mm) __tlb_flush_local()
128
129/*
130 * Flush TLB entries for a specific ASCE on all CPUs.
131 */
132static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
133{
134 if (MACHINE_HAS_TLB_LC)
135 __tlb_flush_idte_local(asce);
136 else
137 __tlb_flush_local();
138}
139
140static inline void __tlb_flush_kernel(void)
141{
142 if (MACHINE_HAS_TLB_LC)
143 __tlb_flush_idte_local((unsigned long) init_mm.pgd |
144 init_mm.context.asce_bits);
145 else
146 __tlb_flush_local();
147}
148#endif
149
150static inline void __tlb_flush_mm(struct mm_struct * mm)
151{
152 /*
153 * If the machine has IDTE we prefer to do a per mm flush
154 * on all cpus instead of doing a local flush if the mm
155 * only ran on the local cpu.
156 */
157 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
158 __tlb_flush_asce(mm, (unsigned long) mm->pgd |
159 mm->context.asce_bits);
160 else
161 __tlb_flush_full(mm);
162}
163
164static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
165{
166 if (mm->context.flush_mm) {
167 __tlb_flush_mm(mm);
168 mm->context.flush_mm = 0;
169 }
170}
171
172/*
173 * TLB flushing:
174 * flush_tlb() - flushes the current mm struct TLBs
175 * flush_tlb_all() - flushes all processes TLBs
176 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
177 * flush_tlb_page(vma, vmaddr) - flushes one page
178 * flush_tlb_range(vma, start, end) - flushes a range of pages
179 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
180 */
181
182/*
183 * flush_tlb_mm goes together with ptep_set_wrprotect for the
184 * copy_page_range operation and flush_tlb_range is related to
185 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
186 * ptep_get_and_clear do not flush the TLBs directly if the mm has
187 * only one user. At the end of the update the flush_tlb_mm and
188 * flush_tlb_range functions need to do the flush.
189 */
190#define flush_tlb() do { } while (0)
191#define flush_tlb_all() do { } while (0)
192#define flush_tlb_page(vma, addr) do { } while (0)
193
194static inline void flush_tlb_mm(struct mm_struct *mm)
195{
196 __tlb_flush_mm_lazy(mm);
197}
198
199static inline void flush_tlb_range(struct vm_area_struct *vma,
200 unsigned long start, unsigned long end)
201{
202 __tlb_flush_mm_lazy(vma->vm_mm);
203}
204
205static inline void flush_tlb_kernel_range(unsigned long start,
206 unsigned long end)
207{
208 __tlb_flush_kernel();
209}
210
211#endif /* _S390_TLBFLUSH_H */