at v6.1 8.8 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * include/linux/userfaultfd_k.h 4 * 5 * Copyright (C) 2015 Red Hat, Inc. 6 * 7 */ 8 9#ifndef _LINUX_USERFAULTFD_K_H 10#define _LINUX_USERFAULTFD_K_H 11 12#ifdef CONFIG_USERFAULTFD 13 14#include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */ 15 16#include <linux/fcntl.h> 17#include <linux/mm.h> 18#include <linux/swap.h> 19#include <linux/swapops.h> 20#include <asm-generic/pgtable_uffd.h> 21#include <linux/hugetlb_inline.h> 22 23/* The set of all possible UFFD-related VM flags. */ 24#define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR) 25 26/* 27 * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining 28 * new flags, since they might collide with O_* ones. We want 29 * to re-use O_* flags that couldn't possibly have a meaning 30 * from userfaultfd, in order to leave a free define-space for 31 * shared O_* flags. 32 */ 33#define UFFD_CLOEXEC O_CLOEXEC 34#define UFFD_NONBLOCK O_NONBLOCK 35 36#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) 37#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) 38 39extern int sysctl_unprivileged_userfaultfd; 40 41extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason); 42 43/* 44 * The mode of operation for __mcopy_atomic and its helpers. 45 * 46 * This is almost an implementation detail (mcopy_atomic below doesn't take this 47 * as a parameter), but it's exposed here because memory-kind-specific 48 * implementations (e.g. hugetlbfs) need to know the mode of operation. 49 */ 50enum mcopy_atomic_mode { 51 /* A normal copy_from_user into the destination range. */ 52 MCOPY_ATOMIC_NORMAL, 53 /* Don't copy; map the destination range to the zero page. */ 54 MCOPY_ATOMIC_ZEROPAGE, 55 /* Just install pte(s) with the existing page(s) in the page cache. */ 56 MCOPY_ATOMIC_CONTINUE, 57}; 58 59extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, 60 struct vm_area_struct *dst_vma, 61 unsigned long dst_addr, struct page *page, 62 bool newly_allocated, bool wp_copy); 63 64extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, 65 unsigned long src_start, unsigned long len, 66 atomic_t *mmap_changing, __u64 mode); 67extern ssize_t mfill_zeropage(struct mm_struct *dst_mm, 68 unsigned long dst_start, 69 unsigned long len, 70 atomic_t *mmap_changing); 71extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start, 72 unsigned long len, atomic_t *mmap_changing); 73extern int mwriteprotect_range(struct mm_struct *dst_mm, 74 unsigned long start, unsigned long len, 75 bool enable_wp, atomic_t *mmap_changing); 76extern void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma, 77 unsigned long start, unsigned long len, bool enable_wp); 78 79/* mm helpers */ 80static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, 81 struct vm_userfaultfd_ctx vm_ctx) 82{ 83 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; 84} 85 86/* 87 * Never enable huge pmd sharing on some uffd registered vmas: 88 * 89 * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry. 90 * 91 * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for 92 * VMAs which share huge pmds. (If you have two mappings to the same 93 * underlying pages, and fault in the non-UFFD-registered one with a write, 94 * with huge pmd sharing this would *also* setup the second UFFD-registered 95 * mapping, and we'd not get minor faults.) 96 */ 97static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) 98{ 99 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); 100} 101 102/* 103 * Don't do fault around for either WP or MINOR registered uffd range. For 104 * MINOR registered range, fault around will be a total disaster and ptes can 105 * be installed without notifications; for WP it should mostly be fine as long 106 * as the fault around checks for pte_none() before the installation, however 107 * to be super safe we just forbid it. 108 */ 109static inline bool uffd_disable_fault_around(struct vm_area_struct *vma) 110{ 111 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); 112} 113 114static inline bool userfaultfd_missing(struct vm_area_struct *vma) 115{ 116 return vma->vm_flags & VM_UFFD_MISSING; 117} 118 119static inline bool userfaultfd_wp(struct vm_area_struct *vma) 120{ 121 return vma->vm_flags & VM_UFFD_WP; 122} 123 124static inline bool userfaultfd_minor(struct vm_area_struct *vma) 125{ 126 return vma->vm_flags & VM_UFFD_MINOR; 127} 128 129static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, 130 pte_t pte) 131{ 132 return userfaultfd_wp(vma) && pte_uffd_wp(pte); 133} 134 135static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, 136 pmd_t pmd) 137{ 138 return userfaultfd_wp(vma) && pmd_uffd_wp(pmd); 139} 140 141static inline bool userfaultfd_armed(struct vm_area_struct *vma) 142{ 143 return vma->vm_flags & __VM_UFFD_FLAGS; 144} 145 146static inline bool vma_can_userfault(struct vm_area_struct *vma, 147 unsigned long vm_flags) 148{ 149 if ((vm_flags & VM_UFFD_MINOR) && 150 (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) 151 return false; 152#ifndef CONFIG_PTE_MARKER_UFFD_WP 153 /* 154 * If user requested uffd-wp but not enabled pte markers for 155 * uffd-wp, then shmem & hugetlbfs are not supported but only 156 * anonymous. 157 */ 158 if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma)) 159 return false; 160#endif 161 return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || 162 vma_is_shmem(vma); 163} 164 165extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *); 166extern void dup_userfaultfd_complete(struct list_head *); 167 168extern void mremap_userfaultfd_prep(struct vm_area_struct *, 169 struct vm_userfaultfd_ctx *); 170extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *, 171 unsigned long from, unsigned long to, 172 unsigned long len); 173 174extern bool userfaultfd_remove(struct vm_area_struct *vma, 175 unsigned long start, 176 unsigned long end); 177 178extern int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start, 179 unsigned long end, struct list_head *uf); 180extern void userfaultfd_unmap_complete(struct mm_struct *mm, 181 struct list_head *uf); 182 183#else /* CONFIG_USERFAULTFD */ 184 185/* mm helpers */ 186static inline vm_fault_t handle_userfault(struct vm_fault *vmf, 187 unsigned long reason) 188{ 189 return VM_FAULT_SIGBUS; 190} 191 192static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, 193 struct vm_userfaultfd_ctx vm_ctx) 194{ 195 return true; 196} 197 198static inline bool userfaultfd_missing(struct vm_area_struct *vma) 199{ 200 return false; 201} 202 203static inline bool userfaultfd_wp(struct vm_area_struct *vma) 204{ 205 return false; 206} 207 208static inline bool userfaultfd_minor(struct vm_area_struct *vma) 209{ 210 return false; 211} 212 213static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, 214 pte_t pte) 215{ 216 return false; 217} 218 219static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, 220 pmd_t pmd) 221{ 222 return false; 223} 224 225 226static inline bool userfaultfd_armed(struct vm_area_struct *vma) 227{ 228 return false; 229} 230 231static inline int dup_userfaultfd(struct vm_area_struct *vma, 232 struct list_head *l) 233{ 234 return 0; 235} 236 237static inline void dup_userfaultfd_complete(struct list_head *l) 238{ 239} 240 241static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma, 242 struct vm_userfaultfd_ctx *ctx) 243{ 244} 245 246static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx, 247 unsigned long from, 248 unsigned long to, 249 unsigned long len) 250{ 251} 252 253static inline bool userfaultfd_remove(struct vm_area_struct *vma, 254 unsigned long start, 255 unsigned long end) 256{ 257 return true; 258} 259 260static inline int userfaultfd_unmap_prep(struct mm_struct *mm, 261 unsigned long start, unsigned long end, 262 struct list_head *uf) 263{ 264 return 0; 265} 266 267static inline void userfaultfd_unmap_complete(struct mm_struct *mm, 268 struct list_head *uf) 269{ 270} 271 272static inline bool uffd_disable_fault_around(struct vm_area_struct *vma) 273{ 274 return false; 275} 276 277#endif /* CONFIG_USERFAULTFD */ 278 279static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry) 280{ 281#ifdef CONFIG_PTE_MARKER_UFFD_WP 282 return is_pte_marker_entry(entry) && 283 (pte_marker_get(entry) & PTE_MARKER_UFFD_WP); 284#else 285 return false; 286#endif 287} 288 289static inline bool pte_marker_uffd_wp(pte_t pte) 290{ 291#ifdef CONFIG_PTE_MARKER_UFFD_WP 292 swp_entry_t entry; 293 294 if (!is_swap_pte(pte)) 295 return false; 296 297 entry = pte_to_swp_entry(pte); 298 299 return pte_marker_entry_uffd_wp(entry); 300#else 301 return false; 302#endif 303} 304 305/* 306 * Returns true if this is a swap pte and was uffd-wp wr-protected in either 307 * forms (pte marker or a normal swap pte), false otherwise. 308 */ 309static inline bool pte_swp_uffd_wp_any(pte_t pte) 310{ 311#ifdef CONFIG_PTE_MARKER_UFFD_WP 312 if (!is_swap_pte(pte)) 313 return false; 314 315 if (pte_swp_uffd_wp(pte)) 316 return true; 317 318 if (pte_marker_uffd_wp(pte)) 319 return true; 320#endif 321 return false; 322} 323 324#endif /* _LINUX_USERFAULTFD_K_H */