Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_HIGHMEM_INTERNAL_H
3#define _LINUX_HIGHMEM_INTERNAL_H
4
5/*
6 * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
7 */
8#ifdef CONFIG_KMAP_LOCAL
9void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
10void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
11void kunmap_local_indexed(void *vaddr);
12void kmap_local_fork(struct task_struct *tsk);
13void __kmap_local_sched_out(void);
14void __kmap_local_sched_in(void);
15static inline void kmap_assert_nomap(void)
16{
17 DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
18}
19#else
20static inline void kmap_local_fork(struct task_struct *tsk) { }
21static inline void kmap_assert_nomap(void) { }
22#endif
23
24#ifdef CONFIG_HIGHMEM
25#include <asm/highmem.h>
26
27#ifndef ARCH_HAS_KMAP_FLUSH_TLB
28static inline void kmap_flush_tlb(unsigned long addr) { }
29#endif
30
31#ifndef kmap_prot
32#define kmap_prot PAGE_KERNEL
33#endif
34
35void *kmap_high(struct page *page);
36void kunmap_high(struct page *page);
37void __kmap_flush_unused(void);
38struct page *__kmap_to_page(void *addr);
39
40static inline void *kmap(struct page *page)
41{
42 void *addr;
43
44 might_sleep();
45 if (!PageHighMem(page))
46 addr = page_address(page);
47 else
48 addr = kmap_high(page);
49 kmap_flush_tlb((unsigned long)addr);
50 return addr;
51}
52
53static inline void kunmap(struct page *page)
54{
55 might_sleep();
56 if (!PageHighMem(page))
57 return;
58 kunmap_high(page);
59}
60
61static inline struct page *kmap_to_page(void *addr)
62{
63 return __kmap_to_page(addr);
64}
65
66static inline void kmap_flush_unused(void)
67{
68 __kmap_flush_unused();
69}
70
71static inline void *kmap_local_page(struct page *page)
72{
73 return __kmap_local_page_prot(page, kmap_prot);
74}
75
76static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
77{
78 return __kmap_local_page_prot(page, prot);
79}
80
81static inline void *kmap_local_pfn(unsigned long pfn)
82{
83 return __kmap_local_pfn_prot(pfn, kmap_prot);
84}
85
86static inline void __kunmap_local(void *vaddr)
87{
88 kunmap_local_indexed(vaddr);
89}
90
91static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
92{
93 preempt_disable();
94 pagefault_disable();
95 return __kmap_local_page_prot(page, prot);
96}
97
98static inline void *kmap_atomic(struct page *page)
99{
100 return kmap_atomic_prot(page, kmap_prot);
101}
102
103static inline void *kmap_atomic_pfn(unsigned long pfn)
104{
105 preempt_disable();
106 pagefault_disable();
107 return __kmap_local_pfn_prot(pfn, kmap_prot);
108}
109
110static inline void __kunmap_atomic(void *addr)
111{
112 kunmap_local_indexed(addr);
113 pagefault_enable();
114 preempt_enable();
115}
116
117unsigned int __nr_free_highpages(void);
118extern atomic_long_t _totalhigh_pages;
119
120static inline unsigned int nr_free_highpages(void)
121{
122 return __nr_free_highpages();
123}
124
125static inline unsigned long totalhigh_pages(void)
126{
127 return (unsigned long)atomic_long_read(&_totalhigh_pages);
128}
129
130static inline void totalhigh_pages_inc(void)
131{
132 atomic_long_inc(&_totalhigh_pages);
133}
134
135static inline void totalhigh_pages_add(long count)
136{
137 atomic_long_add(count, &_totalhigh_pages);
138}
139
140#else /* CONFIG_HIGHMEM */
141
142static inline struct page *kmap_to_page(void *addr)
143{
144 return virt_to_page(addr);
145}
146
147static inline void *kmap(struct page *page)
148{
149 might_sleep();
150 return page_address(page);
151}
152
153static inline void kunmap_high(struct page *page) { }
154static inline void kmap_flush_unused(void) { }
155
156static inline void kunmap(struct page *page)
157{
158#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
159 kunmap_flush_on_unmap(page_address(page));
160#endif
161}
162
163static inline void *kmap_local_page(struct page *page)
164{
165 return page_address(page);
166}
167
168static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
169{
170 return kmap_local_page(page);
171}
172
173static inline void *kmap_local_pfn(unsigned long pfn)
174{
175 return kmap_local_page(pfn_to_page(pfn));
176}
177
178static inline void __kunmap_local(void *addr)
179{
180#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
181 kunmap_flush_on_unmap(addr);
182#endif
183}
184
185static inline void *kmap_atomic(struct page *page)
186{
187 preempt_disable();
188 pagefault_disable();
189 return page_address(page);
190}
191
192static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
193{
194 return kmap_atomic(page);
195}
196
197static inline void *kmap_atomic_pfn(unsigned long pfn)
198{
199 return kmap_atomic(pfn_to_page(pfn));
200}
201
202static inline void __kunmap_atomic(void *addr)
203{
204#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
205 kunmap_flush_on_unmap(addr);
206#endif
207 pagefault_enable();
208 preempt_enable();
209}
210
211static inline unsigned int nr_free_highpages(void) { return 0; }
212static inline unsigned long totalhigh_pages(void) { return 0UL; }
213
214#endif /* CONFIG_HIGHMEM */
215
216/*
217 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
218 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
219 */
220#define kunmap_atomic(__addr) \
221do { \
222 BUILD_BUG_ON(__same_type((__addr), struct page *)); \
223 __kunmap_atomic(__addr); \
224} while (0)
225
226#define kunmap_local(__addr) \
227do { \
228 BUILD_BUG_ON(__same_type((__addr), struct page *)); \
229 __kunmap_local(__addr); \
230} while (0)
231
232#endif