Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-next-2020-04-03-1' of git://anongit.freedesktop.org/drm/drm

Pull drm hugepage support from Dave Airlie:
"This adds support for hugepages to TTM and has been tested with the
vmwgfx drivers, though I expect other drivers to start using it"

* tag 'drm-next-2020-04-03-1' of git://anongit.freedesktop.org/drm/drm:
drm/vmwgfx: Hook up the helpers to align buffer objects
drm/vmwgfx: Introduce a huge page aligning TTM range manager
drm: Add a drm_get_unmapped_area() helper
drm/vmwgfx: Support huge page faults
drm/ttm, drm/vmwgfx: Support huge TTM pagefaults
mm: Add vmf_insert_pfn_xxx_prot() for huge page-table entries
mm: Split huge pages on write-notify or COW
mm: Introduce vma_is_special_huge
fs: Constify vma argument to vma_is_dax

+692 -28
+141
drivers/gpu/drm/drm_file.c
··· 48 48 #include "drm_internal.h" 49 49 #include "drm_legacy.h" 50 50 51 + #if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 52 + #include <uapi/asm/mman.h> 53 + #include <drm/drm_vma_manager.h> 54 + #endif 55 + 51 56 /* from BKL pushdown */ 52 57 DEFINE_MUTEX(drm_global_mutex); 53 58 ··· 877 872 return file; 878 873 } 879 874 EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile); 875 + 876 + #ifdef CONFIG_MMU 877 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 878 + /* 879 + * drm_addr_inflate() attempts to construct an aligned area by inflating 880 + * the area size and skipping the unaligned start of the area. 881 + * adapted from shmem_get_unmapped_area() 882 + */ 883 + static unsigned long drm_addr_inflate(unsigned long addr, 884 + unsigned long len, 885 + unsigned long pgoff, 886 + unsigned long flags, 887 + unsigned long huge_size) 888 + { 889 + unsigned long offset, inflated_len; 890 + unsigned long inflated_addr; 891 + unsigned long inflated_offset; 892 + 893 + offset = (pgoff << PAGE_SHIFT) & (huge_size - 1); 894 + if (offset && offset + len < 2 * huge_size) 895 + return addr; 896 + if ((addr & (huge_size - 1)) == offset) 897 + return addr; 898 + 899 + inflated_len = len + huge_size - PAGE_SIZE; 900 + if (inflated_len > TASK_SIZE) 901 + return addr; 902 + if (inflated_len < len) 903 + return addr; 904 + 905 + inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len, 906 + 0, flags); 907 + if (IS_ERR_VALUE(inflated_addr)) 908 + return addr; 909 + if (inflated_addr & ~PAGE_MASK) 910 + return addr; 911 + 912 + inflated_offset = inflated_addr & (huge_size - 1); 913 + inflated_addr += offset - inflated_offset; 914 + if (inflated_offset > offset) 915 + inflated_addr += huge_size; 916 + 917 + if (inflated_addr > TASK_SIZE - len) 918 + return addr; 919 + 920 + return inflated_addr; 921 + } 922 + 923 + /** 924 + * drm_get_unmapped_area() - Get an unused user-space virtual memory area 925 + * suitable for huge page table entries. 926 + * @file: The struct file representing the address space being mmap()'d. 927 + * @uaddr: Start address suggested by user-space. 928 + * @len: Length of the area. 929 + * @pgoff: The page offset into the address space. 930 + * @flags: mmap flags 931 + * @mgr: The address space manager used by the drm driver. This argument can 932 + * probably be removed at some point when all drivers use the same 933 + * address space manager. 934 + * 935 + * This function attempts to find an unused user-space virtual memory area 936 + * that can accommodate the size we want to map, and that is properly 937 + * aligned to facilitate huge page table entries matching actual 938 + * huge pages or huge page aligned memory in buffer objects. Buffer objects 939 + * are assumed to start at huge page boundary pfns (io memory) or be 940 + * populated by huge pages aligned to the start of the buffer object 941 + * (system- or coherent memory). Adapted from shmem_get_unmapped_area. 942 + * 943 + * Return: aligned user-space address. 944 + */ 945 + unsigned long drm_get_unmapped_area(struct file *file, 946 + unsigned long uaddr, unsigned long len, 947 + unsigned long pgoff, unsigned long flags, 948 + struct drm_vma_offset_manager *mgr) 949 + { 950 + unsigned long addr; 951 + unsigned long inflated_addr; 952 + struct drm_vma_offset_node *node; 953 + 954 + if (len > TASK_SIZE) 955 + return -ENOMEM; 956 + 957 + /* 958 + * @pgoff is the file page-offset the huge page boundaries of 959 + * which typically aligns to physical address huge page boundaries. 960 + * That's not true for DRM, however, where physical address huge 961 + * page boundaries instead are aligned with the offset from 962 + * buffer object start. So adjust @pgoff to be the offset from 963 + * buffer object start. 964 + */ 965 + drm_vma_offset_lock_lookup(mgr); 966 + node = drm_vma_offset_lookup_locked(mgr, pgoff, 1); 967 + if (node) 968 + pgoff -= node->vm_node.start; 969 + drm_vma_offset_unlock_lookup(mgr); 970 + 971 + addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags); 972 + if (IS_ERR_VALUE(addr)) 973 + return addr; 974 + if (addr & ~PAGE_MASK) 975 + return addr; 976 + if (addr > TASK_SIZE - len) 977 + return addr; 978 + 979 + if (len < HPAGE_PMD_SIZE) 980 + return addr; 981 + if (flags & MAP_FIXED) 982 + return addr; 983 + /* 984 + * Our priority is to support MAP_SHARED mapped hugely; 985 + * and support MAP_PRIVATE mapped hugely too, until it is COWed. 986 + * But if caller specified an address hint, respect that as before. 987 + */ 988 + if (uaddr) 989 + return addr; 990 + 991 + inflated_addr = drm_addr_inflate(addr, len, pgoff, flags, 992 + HPAGE_PMD_SIZE); 993 + 994 + if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && 995 + len >= HPAGE_PUD_SIZE) 996 + inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff, 997 + flags, HPAGE_PUD_SIZE); 998 + return inflated_addr; 999 + } 1000 + #else /* CONFIG_TRANSPARENT_HUGEPAGE */ 1001 + unsigned long drm_get_unmapped_area(struct file *file, 1002 + unsigned long uaddr, unsigned long len, 1003 + unsigned long pgoff, unsigned long flags, 1004 + struct drm_vma_offset_manager *mgr) 1005 + { 1006 + return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags); 1007 + } 1008 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1009 + EXPORT_SYMBOL_GPL(drm_get_unmapped_area); 1010 + #endif /* CONFIG_MMU */
+158 -3
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 162 162 } 163 163 EXPORT_SYMBOL(ttm_bo_vm_reserve); 164 164 165 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 166 + /** 167 + * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults 168 + * @vmf: Fault data 169 + * @bo: The buffer object 170 + * @page_offset: Page offset from bo start 171 + * @fault_page_size: The size of the fault in pages. 172 + * @pgprot: The page protections. 173 + * Does additional checking whether it's possible to insert a PUD or PMD 174 + * pfn and performs the insertion. 175 + * 176 + * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if 177 + * a huge fault was not possible, or on insertion error. 178 + */ 179 + static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, 180 + struct ttm_buffer_object *bo, 181 + pgoff_t page_offset, 182 + pgoff_t fault_page_size, 183 + pgprot_t pgprot) 184 + { 185 + pgoff_t i; 186 + vm_fault_t ret; 187 + unsigned long pfn; 188 + pfn_t pfnt; 189 + struct ttm_tt *ttm = bo->ttm; 190 + bool write = vmf->flags & FAULT_FLAG_WRITE; 191 + 192 + /* Fault should not cross bo boundary. */ 193 + page_offset &= ~(fault_page_size - 1); 194 + if (page_offset + fault_page_size > bo->num_pages) 195 + goto out_fallback; 196 + 197 + if (bo->mem.bus.is_iomem) 198 + pfn = ttm_bo_io_mem_pfn(bo, page_offset); 199 + else 200 + pfn = page_to_pfn(ttm->pages[page_offset]); 201 + 202 + /* pfn must be fault_page_size aligned. */ 203 + if ((pfn & (fault_page_size - 1)) != 0) 204 + goto out_fallback; 205 + 206 + /* Check that memory is contiguous. */ 207 + if (!bo->mem.bus.is_iomem) { 208 + for (i = 1; i < fault_page_size; ++i) { 209 + if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i) 210 + goto out_fallback; 211 + } 212 + } else if (bo->bdev->driver->io_mem_pfn) { 213 + for (i = 1; i < fault_page_size; ++i) { 214 + if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i) 215 + goto out_fallback; 216 + } 217 + } 218 + 219 + pfnt = __pfn_to_pfn_t(pfn, PFN_DEV); 220 + if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT)) 221 + ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write); 222 + #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 223 + else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT)) 224 + ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write); 225 + #endif 226 + else 227 + WARN_ON_ONCE(ret = VM_FAULT_FALLBACK); 228 + 229 + if (ret != VM_FAULT_NOPAGE) 230 + goto out_fallback; 231 + 232 + return VM_FAULT_NOPAGE; 233 + out_fallback: 234 + count_vm_event(THP_FAULT_FALLBACK); 235 + return VM_FAULT_FALLBACK; 236 + } 237 + #else 238 + static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, 239 + struct ttm_buffer_object *bo, 240 + pgoff_t page_offset, 241 + pgoff_t fault_page_size, 242 + pgprot_t pgprot) 243 + { 244 + return VM_FAULT_FALLBACK; 245 + } 246 + #endif 247 + 165 248 /** 166 249 * ttm_bo_vm_fault_reserved - TTM fault helper 167 250 * @vmf: The struct vm_fault given as argument to the fault callback ··· 252 169 * @num_prefault: Maximum number of prefault pages. The caller may want to 253 170 * specify this based on madvice settings and the size of the GPU object 254 171 * backed by the memory. 172 + * @fault_page_size: The size of the fault in pages. 255 173 * 256 174 * This function inserts one or more page table entries pointing to the 257 175 * memory backing the buffer object, and then returns a return code ··· 266 182 */ 267 183 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, 268 184 pgprot_t prot, 269 - pgoff_t num_prefault) 185 + pgoff_t num_prefault, 186 + pgoff_t fault_page_size) 270 187 { 271 188 struct vm_area_struct *vma = vmf->vma; 272 189 struct ttm_buffer_object *bo = vma->vm_private_data; ··· 359 274 prot = pgprot_decrypted(prot); 360 275 } 361 276 277 + /* We don't prefault on huge faults. Yet. */ 278 + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) { 279 + ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset, 280 + fault_page_size, prot); 281 + goto out_io_unlock; 282 + } 283 + 362 284 /* 363 285 * Speculatively prefault a number of pages. Only error on 364 286 * first page. ··· 432 340 return ret; 433 341 434 342 prot = vma->vm_page_prot; 435 - ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); 343 + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); 436 344 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 437 345 return ret; 438 346 ··· 441 349 return ret; 442 350 } 443 351 EXPORT_SYMBOL(ttm_bo_vm_fault); 352 + 353 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 354 + /** 355 + * ttm_pgprot_is_wrprotecting - Is a page protection value write-protecting? 356 + * @prot: The page protection value 357 + * 358 + * Return: true if @prot is write-protecting. false otherwise. 359 + */ 360 + static bool ttm_pgprot_is_wrprotecting(pgprot_t prot) 361 + { 362 + /* 363 + * This is meant to say "pgprot_wrprotect(prot) == prot" in a generic 364 + * way. Unfortunately there is no generic pgprot_wrprotect. 365 + */ 366 + return pte_val(pte_wrprotect(__pte(pgprot_val(prot)))) == 367 + pgprot_val(prot); 368 + } 369 + 370 + static vm_fault_t ttm_bo_vm_huge_fault(struct vm_fault *vmf, 371 + enum page_entry_size pe_size) 372 + { 373 + struct vm_area_struct *vma = vmf->vma; 374 + pgprot_t prot; 375 + struct ttm_buffer_object *bo = vma->vm_private_data; 376 + vm_fault_t ret; 377 + pgoff_t fault_page_size = 0; 378 + bool write = vmf->flags & FAULT_FLAG_WRITE; 379 + 380 + switch (pe_size) { 381 + case PE_SIZE_PMD: 382 + fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT; 383 + break; 384 + #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 385 + case PE_SIZE_PUD: 386 + fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT; 387 + break; 388 + #endif 389 + default: 390 + WARN_ON_ONCE(1); 391 + return VM_FAULT_FALLBACK; 392 + } 393 + 394 + /* Fallback on write dirty-tracking or COW */ 395 + if (write && ttm_pgprot_is_wrprotecting(vma->vm_page_prot)) 396 + return VM_FAULT_FALLBACK; 397 + 398 + ret = ttm_bo_vm_reserve(bo, vmf); 399 + if (ret) 400 + return ret; 401 + 402 + prot = vm_get_page_prot(vma->vm_flags); 403 + ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size); 404 + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 405 + return ret; 406 + 407 + dma_resv_unlock(bo->base.resv); 408 + 409 + return ret; 410 + } 411 + #endif 444 412 445 413 void ttm_bo_vm_open(struct vm_area_struct *vma) 446 414 { ··· 603 451 .fault = ttm_bo_vm_fault, 604 452 .open = ttm_bo_vm_open, 605 453 .close = ttm_bo_vm_close, 606 - .access = ttm_bo_vm_access 454 + .access = ttm_bo_vm_access, 455 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 456 + .huge_fault = ttm_bo_vm_huge_fault, 457 + #endif 607 458 }; 608 459 609 460 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
+1
drivers/gpu/drm/vmwgfx/Makefile
··· 11 11 vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ 12 12 ttm_object.o ttm_lock.o 13 13 14 + vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o 14 15 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
+13
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 1247 1247 pci_disable_device(pdev); 1248 1248 } 1249 1249 1250 + static unsigned long 1251 + vmw_get_unmapped_area(struct file *file, unsigned long uaddr, 1252 + unsigned long len, unsigned long pgoff, 1253 + unsigned long flags) 1254 + { 1255 + struct drm_file *file_priv = file->private_data; 1256 + struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); 1257 + 1258 + return drm_get_unmapped_area(file, uaddr, len, pgoff, flags, 1259 + &dev_priv->vma_manager); 1260 + } 1261 + 1250 1262 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 1251 1263 void *ptr) 1252 1264 { ··· 1430 1418 .compat_ioctl = vmw_compat_ioctl, 1431 1419 #endif 1432 1420 .llseek = noop_llseek, 1421 + .get_unmapped_area = vmw_get_unmapped_area, 1433 1422 }; 1434 1423 1435 1424 static struct drm_driver driver = {
+12
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 1000 1000 1001 1001 extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, 1002 1002 size_t gran); 1003 + 1003 1004 /** 1004 1005 * TTM buffer object driver - vmwgfx_ttm_buffer.c 1005 1006 */ ··· 1511 1510 pgoff_t start, pgoff_t end); 1512 1511 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); 1513 1512 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); 1513 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1514 + vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, 1515 + enum page_entry_size pe_size); 1516 + #endif 1517 + 1518 + /* Transparent hugepage support - vmwgfx_thp.c */ 1519 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1520 + extern const struct ttm_mem_type_manager_func vmw_thp_func; 1521 + #else 1522 + #define vmw_thp_func ttm_bo_manager_func 1523 + #endif 1514 1524 1515 1525 /** 1516 1526 * VMW_DEBUG_KMS - Debug output for kernel mode-setting
+74 -2
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
··· 473 473 * a lot of unnecessary write faults. 474 474 */ 475 475 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE) 476 - prot = vma->vm_page_prot; 476 + prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); 477 477 else 478 478 prot = vm_get_page_prot(vma->vm_flags); 479 479 480 - ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault); 480 + ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault, 1); 481 481 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 482 482 return ret; 483 483 ··· 486 486 487 487 return ret; 488 488 } 489 + 490 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 491 + vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, 492 + enum page_entry_size pe_size) 493 + { 494 + struct vm_area_struct *vma = vmf->vma; 495 + struct ttm_buffer_object *bo = (struct ttm_buffer_object *) 496 + vma->vm_private_data; 497 + struct vmw_buffer_object *vbo = 498 + container_of(bo, struct vmw_buffer_object, base); 499 + pgprot_t prot; 500 + vm_fault_t ret; 501 + pgoff_t fault_page_size; 502 + bool write = vmf->flags & FAULT_FLAG_WRITE; 503 + bool is_cow_mapping = 504 + (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 505 + 506 + switch (pe_size) { 507 + case PE_SIZE_PMD: 508 + fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT; 509 + break; 510 + #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 511 + case PE_SIZE_PUD: 512 + fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT; 513 + break; 514 + #endif 515 + default: 516 + WARN_ON_ONCE(1); 517 + return VM_FAULT_FALLBACK; 518 + } 519 + 520 + /* Always do write dirty-tracking and COW on PTE level. */ 521 + if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping)) 522 + return VM_FAULT_FALLBACK; 523 + 524 + ret = ttm_bo_vm_reserve(bo, vmf); 525 + if (ret) 526 + return ret; 527 + 528 + if (vbo->dirty) { 529 + pgoff_t allowed_prefault; 530 + unsigned long page_offset; 531 + 532 + page_offset = vmf->pgoff - 533 + drm_vma_node_start(&bo->base.vma_node); 534 + if (page_offset >= bo->num_pages || 535 + vmw_resources_clean(vbo, page_offset, 536 + page_offset + PAGE_SIZE, 537 + &allowed_prefault)) { 538 + ret = VM_FAULT_SIGBUS; 539 + goto out_unlock; 540 + } 541 + 542 + /* 543 + * Write protect, so we get a new fault on write, and can 544 + * split. 545 + */ 546 + prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); 547 + } else { 548 + prot = vm_get_page_prot(vma->vm_flags); 549 + } 550 + 551 + ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size); 552 + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 553 + return ret; 554 + 555 + out_unlock: 556 + dma_resv_unlock(bo->base.resv); 557 + 558 + return ret; 559 + } 560 + #endif
+166
drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* 3 + * Huge page-table-entry support for IO memory. 4 + * 5 + * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd. 6 + */ 7 + #include "vmwgfx_drv.h" 8 + #include <drm/ttm/ttm_module.h> 9 + #include <drm/ttm/ttm_bo_driver.h> 10 + #include <drm/ttm/ttm_placement.h> 11 + 12 + /** 13 + * struct vmw_thp_manager - Range manager implementing huge page alignment 14 + * 15 + * @mm: The underlying range manager. Protected by @lock. 16 + * @lock: Manager lock. 17 + */ 18 + struct vmw_thp_manager { 19 + struct drm_mm mm; 20 + spinlock_t lock; 21 + }; 22 + 23 + static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node, 24 + unsigned long align_pages, 25 + const struct ttm_place *place, 26 + struct ttm_mem_reg *mem, 27 + unsigned long lpfn, 28 + enum drm_mm_insert_mode mode) 29 + { 30 + if (align_pages >= mem->page_alignment && 31 + (!mem->page_alignment || align_pages % mem->page_alignment == 0)) { 32 + return drm_mm_insert_node_in_range(mm, node, 33 + mem->num_pages, 34 + align_pages, 0, 35 + place->fpfn, lpfn, mode); 36 + } 37 + 38 + return -ENOSPC; 39 + } 40 + 41 + static int vmw_thp_get_node(struct ttm_mem_type_manager *man, 42 + struct ttm_buffer_object *bo, 43 + const struct ttm_place *place, 44 + struct ttm_mem_reg *mem) 45 + { 46 + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; 47 + struct drm_mm *mm = &rman->mm; 48 + struct drm_mm_node *node; 49 + unsigned long align_pages; 50 + unsigned long lpfn; 51 + enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST; 52 + int ret; 53 + 54 + node = kzalloc(sizeof(*node), GFP_KERNEL); 55 + if (!node) 56 + return -ENOMEM; 57 + 58 + lpfn = place->lpfn; 59 + if (!lpfn) 60 + lpfn = man->size; 61 + 62 + mode = DRM_MM_INSERT_BEST; 63 + if (place->flags & TTM_PL_FLAG_TOPDOWN) 64 + mode = DRM_MM_INSERT_HIGH; 65 + 66 + spin_lock(&rman->lock); 67 + if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) { 68 + align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT); 69 + if (mem->num_pages >= align_pages) { 70 + ret = vmw_thp_insert_aligned(mm, node, align_pages, 71 + place, mem, lpfn, mode); 72 + if (!ret) 73 + goto found_unlock; 74 + } 75 + } 76 + 77 + align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT); 78 + if (mem->num_pages >= align_pages) { 79 + ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem, 80 + lpfn, mode); 81 + if (!ret) 82 + goto found_unlock; 83 + } 84 + 85 + ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages, 86 + mem->page_alignment, 0, 87 + place->fpfn, lpfn, mode); 88 + found_unlock: 89 + spin_unlock(&rman->lock); 90 + 91 + if (unlikely(ret)) { 92 + kfree(node); 93 + } else { 94 + mem->mm_node = node; 95 + mem->start = node->start; 96 + } 97 + 98 + return 0; 99 + } 100 + 101 + 102 + 103 + static void vmw_thp_put_node(struct ttm_mem_type_manager *man, 104 + struct ttm_mem_reg *mem) 105 + { 106 + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; 107 + 108 + if (mem->mm_node) { 109 + spin_lock(&rman->lock); 110 + drm_mm_remove_node(mem->mm_node); 111 + spin_unlock(&rman->lock); 112 + 113 + kfree(mem->mm_node); 114 + mem->mm_node = NULL; 115 + } 116 + } 117 + 118 + static int vmw_thp_init(struct ttm_mem_type_manager *man, 119 + unsigned long p_size) 120 + { 121 + struct vmw_thp_manager *rman; 122 + 123 + rman = kzalloc(sizeof(*rman), GFP_KERNEL); 124 + if (!rman) 125 + return -ENOMEM; 126 + 127 + drm_mm_init(&rman->mm, 0, p_size); 128 + spin_lock_init(&rman->lock); 129 + man->priv = rman; 130 + return 0; 131 + } 132 + 133 + static int vmw_thp_takedown(struct ttm_mem_type_manager *man) 134 + { 135 + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; 136 + struct drm_mm *mm = &rman->mm; 137 + 138 + spin_lock(&rman->lock); 139 + if (drm_mm_clean(mm)) { 140 + drm_mm_takedown(mm); 141 + spin_unlock(&rman->lock); 142 + kfree(rman); 143 + man->priv = NULL; 144 + return 0; 145 + } 146 + spin_unlock(&rman->lock); 147 + return -EBUSY; 148 + } 149 + 150 + static void vmw_thp_debug(struct ttm_mem_type_manager *man, 151 + struct drm_printer *printer) 152 + { 153 + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; 154 + 155 + spin_lock(&rman->lock); 156 + drm_mm_print(&rman->mm, printer); 157 + spin_unlock(&rman->lock); 158 + } 159 + 160 + const struct ttm_mem_type_manager_func vmw_thp_func = { 161 + .init = vmw_thp_init, 162 + .takedown = vmw_thp_takedown, 163 + .get_node = vmw_thp_get_node, 164 + .put_node = vmw_thp_put_node, 165 + .debug = vmw_thp_debug 166 + };
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
··· 749 749 break; 750 750 case TTM_PL_VRAM: 751 751 /* "On-card" video ram */ 752 - man->func = &ttm_bo_manager_func; 752 + man->func = &vmw_thp_func; 753 753 man->gpu_offset = 0; 754 754 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; 755 755 man->available_caching = TTM_PL_FLAG_CACHED;
+4 -1
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
··· 34 34 .page_mkwrite = vmw_bo_vm_mkwrite, 35 35 .fault = vmw_bo_vm_fault, 36 36 .open = ttm_bo_vm_open, 37 - .close = ttm_bo_vm_close 37 + .close = ttm_bo_vm_close, 38 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 39 + .huge_fault = vmw_bo_vm_huge_fault, 40 + #endif 38 41 }; 39 42 struct drm_file *file_priv = filp->private_data; 40 43 struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
+9
include/drm/drm_file.h
··· 391 391 392 392 struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags); 393 393 394 + #ifdef CONFIG_MMU 395 + struct drm_vma_offset_manager; 396 + unsigned long drm_get_unmapped_area(struct file *file, 397 + unsigned long uaddr, unsigned long len, 398 + unsigned long pgoff, unsigned long flags, 399 + struct drm_vma_offset_manager *mgr); 400 + #endif /* CONFIG_MMU */ 401 + 402 + 394 403 #endif /* _DRM_FILE_H_ */
+2 -1
include/drm/ttm/ttm_bo_api.h
··· 727 727 728 728 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, 729 729 pgprot_t prot, 730 - pgoff_t num_prefault); 730 + pgoff_t num_prefault, 731 + pgoff_t fault_page_size); 731 732 732 733 vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf); 733 734
+1 -1
include/linux/fs.h
··· 3399 3399 return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host); 3400 3400 } 3401 3401 3402 - static inline bool vma_is_dax(struct vm_area_struct *vma) 3402 + static inline bool vma_is_dax(const struct vm_area_struct *vma) 3403 3403 { 3404 3404 return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); 3405 3405 }
+39 -2
include/linux/huge_mm.h
··· 47 47 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 48 48 unsigned long addr, pgprot_t newprot, 49 49 int prot_numa); 50 - vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); 51 - vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); 50 + vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, 51 + pgprot_t pgprot, bool write); 52 + 53 + /** 54 + * vmf_insert_pfn_pmd - insert a pmd size pfn 55 + * @vmf: Structure describing the fault 56 + * @pfn: pfn to insert 57 + * @pgprot: page protection to use 58 + * @write: whether it's a write fault 59 + * 60 + * Insert a pmd size pfn. See vmf_insert_pfn() for additional info. 61 + * 62 + * Return: vm_fault_t value. 63 + */ 64 + static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, 65 + bool write) 66 + { 67 + return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write); 68 + } 69 + vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, 70 + pgprot_t pgprot, bool write); 71 + 72 + /** 73 + * vmf_insert_pfn_pud - insert a pud size pfn 74 + * @vmf: Structure describing the fault 75 + * @pfn: pfn to insert 76 + * @pgprot: page protection to use 77 + * @write: whether it's a write fault 78 + * 79 + * Insert a pud size pfn. See vmf_insert_pfn() for additional info. 80 + * 81 + * Return: vm_fault_t value. 82 + */ 83 + static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, 84 + bool write) 85 + { 86 + return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write); 87 + } 88 + 52 89 enum transparent_hugepage_flag { 53 90 TRANSPARENT_HUGEPAGE_FLAG, 54 91 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+17
include/linux/mm.h
··· 3014 3014 const void __user *usr_src, 3015 3015 unsigned int pages_per_huge_page, 3016 3016 bool allow_pagefault); 3017 + 3018 + /** 3019 + * vma_is_special_huge - Are transhuge page-table entries considered special? 3020 + * @vma: Pointer to the struct vm_area_struct to consider 3021 + * 3022 + * Whether transhuge page-table entries are considered "special" following 3023 + * the definition in vm_normal_page(). 3024 + * 3025 + * Return: true if transhuge page-table entries should be considered special, 3026 + * false otherwise. 3027 + */ 3028 + static inline bool vma_is_special_huge(const struct vm_area_struct *vma) 3029 + { 3030 + return vma_is_dax(vma) || (vma->vm_file && 3031 + (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); 3032 + } 3033 + 3017 3034 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 3018 3035 3019 3036 #ifdef CONFIG_DEBUG_PAGEALLOC
+35 -9
mm/huge_memory.c
··· 824 824 pte_free(mm, pgtable); 825 825 } 826 826 827 - vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) 827 + /** 828 + * vmf_insert_pfn_pmd_prot - insert a pmd size pfn 829 + * @vmf: Structure describing the fault 830 + * @pfn: pfn to insert 831 + * @pgprot: page protection to use 832 + * @write: whether it's a write fault 833 + * 834 + * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and 835 + * also consult the vmf_insert_mixed_prot() documentation when 836 + * @pgprot != @vmf->vma->vm_page_prot. 837 + * 838 + * Return: vm_fault_t value. 839 + */ 840 + vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, 841 + pgprot_t pgprot, bool write) 828 842 { 829 843 unsigned long addr = vmf->address & PMD_MASK; 830 844 struct vm_area_struct *vma = vmf->vma; 831 - pgprot_t pgprot = vma->vm_page_prot; 832 845 pgtable_t pgtable = NULL; 833 846 834 847 /* ··· 869 856 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); 870 857 return VM_FAULT_NOPAGE; 871 858 } 872 - EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); 859 + EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot); 873 860 874 861 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 875 862 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) ··· 915 902 spin_unlock(ptl); 916 903 } 917 904 918 - vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) 905 + /** 906 + * vmf_insert_pfn_pud_prot - insert a pud size pfn 907 + * @vmf: Structure describing the fault 908 + * @pfn: pfn to insert 909 + * @pgprot: page protection to use 910 + * @write: whether it's a write fault 911 + * 912 + * Insert a pud size pfn. See vmf_insert_pfn() for additional info and 913 + * also consult the vmf_insert_mixed_prot() documentation when 914 + * @pgprot != @vmf->vma->vm_page_prot. 915 + * 916 + * Return: vm_fault_t value. 917 + */ 918 + vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, 919 + pgprot_t pgprot, bool write) 919 920 { 920 921 unsigned long addr = vmf->address & PUD_MASK; 921 922 struct vm_area_struct *vma = vmf->vma; 922 - pgprot_t pgprot = vma->vm_page_prot; 923 923 924 924 /* 925 925 * If we had pud_special, we could avoid all these restrictions, ··· 953 927 insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); 954 928 return VM_FAULT_NOPAGE; 955 929 } 956 - EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); 930 + EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot); 957 931 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 958 932 959 933 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, ··· 1845 1819 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, 1846 1820 tlb->fullmm); 1847 1821 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1848 - if (vma_is_dax(vma)) { 1822 + if (vma_is_special_huge(vma)) { 1849 1823 if (arch_needs_pgtable_deposit()) 1850 1824 zap_deposited_table(tlb->mm, pmd); 1851 1825 spin_unlock(ptl); ··· 2109 2083 */ 2110 2084 pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm); 2111 2085 tlb_remove_pud_tlb_entry(tlb, pud, addr); 2112 - if (vma_is_dax(vma)) { 2086 + if (vma_is_special_huge(vma)) { 2113 2087 spin_unlock(ptl); 2114 2088 /* No zero page support yet */ 2115 2089 } else { ··· 2218 2192 */ 2219 2193 if (arch_needs_pgtable_deposit()) 2220 2194 zap_deposited_table(mm, pmd); 2221 - if (vma_is_dax(vma)) 2195 + if (vma_is_special_huge(vma)) 2222 2196 return; 2223 2197 page = pmd_page(_pmd); 2224 2198 if (!PageDirty(page) && pmd_dirty(_pmd))
+19 -8
mm/memory.c
··· 3951 3951 { 3952 3952 if (vma_is_anonymous(vmf->vma)) 3953 3953 return do_huge_pmd_wp_page(vmf, orig_pmd); 3954 - if (vmf->vma->vm_ops->huge_fault) 3955 - return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); 3954 + if (vmf->vma->vm_ops->huge_fault) { 3955 + vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); 3956 3956 3957 - /* COW handled on pte level: split pmd */ 3958 - VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); 3957 + if (!(ret & VM_FAULT_FALLBACK)) 3958 + return ret; 3959 + } 3960 + 3961 + /* COW or write-notify handled on pte level: split pmd. */ 3959 3962 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); 3960 3963 3961 3964 return VM_FAULT_FALLBACK; ··· 3971 3968 3972 3969 static vm_fault_t create_huge_pud(struct vm_fault *vmf) 3973 3970 { 3974 - #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3971 + #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 3972 + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 3975 3973 /* No support for anonymous transparent PUD pages yet */ 3976 3974 if (vma_is_anonymous(vmf->vma)) 3977 - return VM_FAULT_FALLBACK; 3978 - if (vmf->vma->vm_ops->huge_fault) 3979 - return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); 3975 + goto split; 3976 + if (vmf->vma->vm_ops->huge_fault) { 3977 + vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); 3978 + 3979 + if (!(ret & VM_FAULT_FALLBACK)) 3980 + return ret; 3981 + } 3982 + split: 3983 + /* COW or write-notify not handled on PUD level: split pud.*/ 3984 + __split_huge_pud(vmf->vma, vmf->pud, vmf->address); 3980 3985 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3981 3986 return VM_FAULT_FALLBACK; 3982 3987 }