at v5.15 9.2 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_HIGHMEM_H 3#define _LINUX_HIGHMEM_H 4 5#include <linux/fs.h> 6#include <linux/kernel.h> 7#include <linux/bug.h> 8#include <linux/mm.h> 9#include <linux/uaccess.h> 10#include <linux/hardirq.h> 11 12#include <asm/cacheflush.h> 13 14#include "highmem-internal.h" 15 16/** 17 * kmap - Map a page for long term usage 18 * @page: Pointer to the page to be mapped 19 * 20 * Returns: The virtual address of the mapping 21 * 22 * Can only be invoked from preemptible task context because on 32bit 23 * systems with CONFIG_HIGHMEM enabled this function might sleep. 24 * 25 * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area 26 * this returns the virtual address of the direct kernel mapping. 27 * 28 * The returned virtual address is globally visible and valid up to the 29 * point where it is unmapped via kunmap(). The pointer can be handed to 30 * other contexts. 31 * 32 * For highmem pages on 32bit systems this can be slow as the mapping space 33 * is limited and protected by a global lock. In case that there is no 34 * mapping slot available the function blocks until a slot is released via 35 * kunmap(). 36 */ 37static inline void *kmap(struct page *page); 38 39/** 40 * kunmap - Unmap the virtual address mapped by kmap() 41 * @addr: Virtual address to be unmapped 42 * 43 * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of 44 * pages in the low memory area. 45 */ 46static inline void kunmap(struct page *page); 47 48/** 49 * kmap_to_page - Get the page for a kmap'ed address 50 * @addr: The address to look up 51 * 52 * Returns: The page which is mapped to @addr. 53 */ 54static inline struct page *kmap_to_page(void *addr); 55 56/** 57 * kmap_flush_unused - Flush all unused kmap mappings in order to 58 * remove stray mappings 59 */ 60static inline void kmap_flush_unused(void); 61 62/** 63 * kmap_local_page - Map a page for temporary usage 64 * @page: Pointer to the page to be mapped 65 * 66 * Returns: The virtual address of the mapping 67 * 68 * Can be invoked from any context. 69 * 70 * Requires careful handling when nesting multiple mappings because the map 71 * management is stack based. The unmap has to be in the reverse order of 72 * the map operation: 73 * 74 * addr1 = kmap_local_page(page1); 75 * addr2 = kmap_local_page(page2); 76 * ... 77 * kunmap_local(addr2); 78 * kunmap_local(addr1); 79 * 80 * Unmapping addr1 before addr2 is invalid and causes malfunction. 81 * 82 * Contrary to kmap() mappings the mapping is only valid in the context of 83 * the caller and cannot be handed to other contexts. 84 * 85 * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the 86 * virtual address of the direct mapping. Only real highmem pages are 87 * temporarily mapped. 88 * 89 * While it is significantly faster than kmap() for the higmem case it 90 * comes with restrictions about the pointer validity. Only use when really 91 * necessary. 92 * 93 * On HIGHMEM enabled systems mapping a highmem page has the side effect of 94 * disabling migration in order to keep the virtual address stable across 95 * preemption. No caller of kmap_local_page() can rely on this side effect. 96 */ 97static inline void *kmap_local_page(struct page *page); 98 99/** 100 * kmap_atomic - Atomically map a page for temporary usage - Deprecated! 101 * @page: Pointer to the page to be mapped 102 * 103 * Returns: The virtual address of the mapping 104 * 105 * Effectively a wrapper around kmap_local_page() which disables pagefaults 106 * and preemption. 107 * 108 * Do not use in new code. Use kmap_local_page() instead. 109 */ 110static inline void *kmap_atomic(struct page *page); 111 112/** 113 * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() 114 * @addr: Virtual address to be unmapped 115 * 116 * Counterpart to kmap_atomic(). 117 * 118 * Effectively a wrapper around kunmap_local() which additionally undoes 119 * the side effects of kmap_atomic(), i.e. reenabling pagefaults and 120 * preemption. 121 */ 122 123/* Highmem related interfaces for management code */ 124static inline unsigned int nr_free_highpages(void); 125static inline unsigned long totalhigh_pages(void); 126 127#ifndef ARCH_HAS_FLUSH_ANON_PAGE 128static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 129{ 130} 131#endif 132 133#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 134static inline void flush_kernel_vmap_range(void *vaddr, int size) 135{ 136} 137static inline void invalidate_kernel_vmap_range(void *vaddr, int size) 138{ 139} 140#endif 141 142/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 143#ifndef clear_user_highpage 144static inline void clear_user_highpage(struct page *page, unsigned long vaddr) 145{ 146 void *addr = kmap_atomic(page); 147 clear_user_page(addr, vaddr, page); 148 kunmap_atomic(addr); 149} 150#endif 151 152#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE 153/** 154 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move 155 * @vma: The VMA the page is to be allocated for 156 * @vaddr: The virtual address the page will be inserted into 157 * 158 * This function will allocate a page for a VMA that the caller knows will 159 * be able to migrate in the future using move_pages() or reclaimed 160 * 161 * An architecture may override this function by defining 162 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own 163 * implementation. 164 */ 165static inline struct page * 166alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, 167 unsigned long vaddr) 168{ 169 struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); 170 171 if (page) 172 clear_user_highpage(page, vaddr); 173 174 return page; 175} 176#endif 177 178static inline void clear_highpage(struct page *page) 179{ 180 void *kaddr = kmap_atomic(page); 181 clear_page(kaddr); 182 kunmap_atomic(kaddr); 183} 184 185#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE 186 187static inline void tag_clear_highpage(struct page *page) 188{ 189} 190 191#endif 192 193/* 194 * If we pass in a base or tail page, we can zero up to PAGE_SIZE. 195 * If we pass in a head page, we can zero up to the size of the compound page. 196 */ 197#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 198void zero_user_segments(struct page *page, unsigned start1, unsigned end1, 199 unsigned start2, unsigned end2); 200#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ 201static inline void zero_user_segments(struct page *page, 202 unsigned start1, unsigned end1, 203 unsigned start2, unsigned end2) 204{ 205 void *kaddr = kmap_atomic(page); 206 unsigned int i; 207 208 BUG_ON(end1 > page_size(page) || end2 > page_size(page)); 209 210 if (end1 > start1) 211 memset(kaddr + start1, 0, end1 - start1); 212 213 if (end2 > start2) 214 memset(kaddr + start2, 0, end2 - start2); 215 216 kunmap_atomic(kaddr); 217 for (i = 0; i < compound_nr(page); i++) 218 flush_dcache_page(page + i); 219} 220#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ 221 222static inline void zero_user_segment(struct page *page, 223 unsigned start, unsigned end) 224{ 225 zero_user_segments(page, start, end, 0, 0); 226} 227 228static inline void zero_user(struct page *page, 229 unsigned start, unsigned size) 230{ 231 zero_user_segments(page, start, start + size, 0, 0); 232} 233 234#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE 235 236static inline void copy_user_highpage(struct page *to, struct page *from, 237 unsigned long vaddr, struct vm_area_struct *vma) 238{ 239 char *vfrom, *vto; 240 241 vfrom = kmap_atomic(from); 242 vto = kmap_atomic(to); 243 copy_user_page(vto, vfrom, vaddr, to); 244 kunmap_atomic(vto); 245 kunmap_atomic(vfrom); 246} 247 248#endif 249 250#ifndef __HAVE_ARCH_COPY_HIGHPAGE 251 252static inline void copy_highpage(struct page *to, struct page *from) 253{ 254 char *vfrom, *vto; 255 256 vfrom = kmap_atomic(from); 257 vto = kmap_atomic(to); 258 copy_page(vto, vfrom); 259 kunmap_atomic(vto); 260 kunmap_atomic(vfrom); 261} 262 263#endif 264 265static inline void memcpy_page(struct page *dst_page, size_t dst_off, 266 struct page *src_page, size_t src_off, 267 size_t len) 268{ 269 char *dst = kmap_local_page(dst_page); 270 char *src = kmap_local_page(src_page); 271 272 VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); 273 memcpy(dst + dst_off, src + src_off, len); 274 kunmap_local(src); 275 kunmap_local(dst); 276} 277 278static inline void memmove_page(struct page *dst_page, size_t dst_off, 279 struct page *src_page, size_t src_off, 280 size_t len) 281{ 282 char *dst = kmap_local_page(dst_page); 283 char *src = kmap_local_page(src_page); 284 285 VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); 286 memmove(dst + dst_off, src + src_off, len); 287 kunmap_local(src); 288 kunmap_local(dst); 289} 290 291static inline void memset_page(struct page *page, size_t offset, int val, 292 size_t len) 293{ 294 char *addr = kmap_local_page(page); 295 296 VM_BUG_ON(offset + len > PAGE_SIZE); 297 memset(addr + offset, val, len); 298 kunmap_local(addr); 299} 300 301static inline void memcpy_from_page(char *to, struct page *page, 302 size_t offset, size_t len) 303{ 304 char *from = kmap_local_page(page); 305 306 VM_BUG_ON(offset + len > PAGE_SIZE); 307 memcpy(to, from + offset, len); 308 kunmap_local(from); 309} 310 311static inline void memcpy_to_page(struct page *page, size_t offset, 312 const char *from, size_t len) 313{ 314 char *to = kmap_local_page(page); 315 316 VM_BUG_ON(offset + len > PAGE_SIZE); 317 memcpy(to + offset, from, len); 318 flush_dcache_page(page); 319 kunmap_local(to); 320} 321 322static inline void memzero_page(struct page *page, size_t offset, size_t len) 323{ 324 char *addr = kmap_local_page(page); 325 memset(addr + offset, 0, len); 326 flush_dcache_page(page); 327 kunmap_local(addr); 328} 329 330#endif /* _LINUX_HIGHMEM_H */