Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Use ttm mmap handling for ttm bo's.

Use the ttm handlers for servicing page faults, and vm_access.

We do our own validation of read-only access, otherwise use the
ttm handlers as much as possible.

Because the ttm handlers expect the vma_node at vma->base, we slightly
need to massage the mmap handlers to look at vma_node->driver_private
to fetch the bo, if it's NULL, we assume i915's normal mmap_offset uapi
is used.

This is the easiest way to achieve compatibility without changing ttm's
semantics.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210610070152.572423-5-thomas.hellstrom@linux.intel.com

+255 -96
+61 -30
drivers/gpu/drm/i915/gem/i915_gem_mman.c
··· 19 19 #include "i915_gem_mman.h" 20 20 #include "i915_trace.h" 21 21 #include "i915_user_extensions.h" 22 + #include "i915_gem_ttm.h" 22 23 #include "i915_vma.h" 23 24 24 25 static inline bool ··· 624 623 struct i915_mmap_offset *mmo; 625 624 int err; 626 625 626 + GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops); 627 + 627 628 mmo = lookup_mmo(obj, mmap_type); 628 629 if (mmo) 629 630 goto out; ··· 668 665 } 669 666 670 667 static int 671 - __assign_mmap_offset(struct drm_file *file, 672 - u32 handle, 668 + __assign_mmap_offset(struct drm_i915_gem_object *obj, 673 669 enum i915_mmap_type mmap_type, 674 - u64 *offset) 670 + u64 *offset, struct drm_file *file) 671 + { 672 + struct i915_mmap_offset *mmo; 673 + 674 + if (i915_gem_object_never_mmap(obj)) 675 + return -ENODEV; 676 + 677 + if (obj->ops->mmap_offset) { 678 + *offset = obj->ops->mmap_offset(obj); 679 + return 0; 680 + } 681 + 682 + if (mmap_type != I915_MMAP_TYPE_GTT && 683 + !i915_gem_object_has_struct_page(obj) && 684 + !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) 685 + return -ENODEV; 686 + 687 + mmo = mmap_offset_attach(obj, mmap_type, file); 688 + if (IS_ERR(mmo)) 689 + return PTR_ERR(mmo); 690 + 691 + *offset = drm_vma_node_offset_addr(&mmo->vma_node); 692 + return 0; 693 + } 694 + 695 + static int 696 + __assign_mmap_offset_handle(struct drm_file *file, 697 + u32 handle, 698 + enum i915_mmap_type mmap_type, 699 + u64 *offset) 675 700 { 676 701 struct drm_i915_gem_object *obj; 677 - struct i915_mmap_offset *mmo; 678 702 int err; 679 703 680 704 obj = i915_gem_object_lookup(file, handle); 681 705 if (!obj) 682 706 return -ENOENT; 683 707 684 - if (i915_gem_object_never_mmap(obj)) { 685 - err = -ENODEV; 686 - goto out; 687 - } 688 - 689 - if (mmap_type != I915_MMAP_TYPE_GTT && 690 - !i915_gem_object_has_struct_page(obj) && 691 - !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) { 692 - err = -ENODEV; 693 - goto out; 694 - } 695 - 696 - mmo = mmap_offset_attach(obj, mmap_type, file); 697 - if (IS_ERR(mmo)) { 698 - err = PTR_ERR(mmo); 699 - goto out; 700 - } 701 - 702 - *offset = drm_vma_node_offset_addr(&mmo->vma_node); 703 - err = 0; 704 - out: 708 + err = __assign_mmap_offset(obj, mmap_type, offset, file); 705 709 i915_gem_object_put(obj); 706 710 return err; 707 711 } ··· 728 718 else 729 719 mmap_type = I915_MMAP_TYPE_GTT; 730 720 731 - return __assign_mmap_offset(file, handle, mmap_type, offset); 721 + return __assign_mmap_offset_handle(file, handle, mmap_type, offset); 732 722 } 733 723 734 724 /** ··· 796 786 return -EINVAL; 797 787 } 798 788 799 - return __assign_mmap_offset(file, args->handle, type, &args->offset); 789 + return __assign_mmap_offset_handle(file, args->handle, type, &args->offset); 800 790 } 801 791 802 792 static void vm_open(struct vm_area_struct *vma) ··· 900 890 * destroyed and will be invalid when the vma manager lock 901 891 * is released. 902 892 */ 903 - mmo = container_of(node, struct i915_mmap_offset, vma_node); 904 - obj = i915_gem_object_get_rcu(mmo->obj); 893 + if (!node->driver_private) { 894 + mmo = container_of(node, struct i915_mmap_offset, vma_node); 895 + obj = i915_gem_object_get_rcu(mmo->obj); 896 + 897 + GEM_BUG_ON(obj && obj->ops->mmap_ops); 898 + } else { 899 + obj = i915_gem_object_get_rcu 900 + (container_of(node, struct drm_i915_gem_object, 901 + base.vma_node)); 902 + 903 + GEM_BUG_ON(obj && !obj->ops->mmap_ops); 904 + } 905 905 } 906 906 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 907 907 rcu_read_unlock(); ··· 933 913 } 934 914 935 915 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 936 - vma->vm_private_data = mmo; 916 + 917 + if (i915_gem_object_has_iomem(obj)) 918 + vma->vm_flags |= VM_IO; 937 919 938 920 /* 939 921 * We keep the ref on mmo->obj, not vm_file, but we require ··· 948 926 vma_set_file(vma, anon); 949 927 /* Drop the initial creation reference, the vma is now holding one. */ 950 928 fput(anon); 929 + 930 + if (obj->ops->mmap_ops) { 931 + vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags)); 932 + vma->vm_ops = obj->ops->mmap_ops; 933 + vma->vm_private_data = node->driver_private; 934 + return 0; 935 + } 936 + 937 + vma->vm_private_data = mmo; 951 938 952 939 switch (mmo->mmap_type) { 953 940 case I915_MMAP_TYPE_WC:
+3 -3
drivers/gpu/drm/i915/gem/i915_gem_object.h
··· 342 342 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 343 343 struct i915_gem_object_page_iter *iter, 344 344 unsigned int n, 345 - unsigned int *offset, bool allow_alloc); 345 + unsigned int *offset, bool allow_alloc, bool dma); 346 346 347 347 static inline struct scatterlist * 348 348 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 349 349 unsigned int n, 350 350 unsigned int *offset, bool allow_alloc) 351 351 { 352 - return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc); 352 + return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc, false); 353 353 } 354 354 355 355 static inline struct scatterlist * ··· 357 357 unsigned int n, 358 358 unsigned int *offset, bool allow_alloc) 359 359 { 360 - return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc); 360 + return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc, true); 361 361 } 362 362 363 363 struct page *
+3
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
··· 61 61 const struct drm_i915_gem_pread *arg); 62 62 int (*pwrite)(struct drm_i915_gem_object *obj, 63 63 const struct drm_i915_gem_pwrite *arg); 64 + u64 (*mmap_offset)(struct drm_i915_gem_object *obj); 64 65 65 66 int (*dmabuf_export)(struct drm_i915_gem_object *obj); 66 67 ··· 80 79 void (*delayed_free)(struct drm_i915_gem_object *obj); 81 80 void (*release)(struct drm_i915_gem_object *obj); 82 81 82 + const struct vm_operations_struct *mmap_ops; 83 83 const char *name; /* friendly name for debug, e.g. lockdep classes */ 84 84 }; 85 85 ··· 330 328 331 329 struct { 332 330 struct sg_table *cached_io_st; 331 + struct i915_gem_object_page_iter get_io_page; 333 332 bool created:1; 334 333 } ttm; 335 334
+1 -2
drivers/gpu/drm/i915/gem/i915_gem_pages.c
··· 467 467 struct i915_gem_object_page_iter *iter, 468 468 unsigned int n, 469 469 unsigned int *offset, 470 - bool allow_alloc) 470 + bool allow_alloc, bool dma) 471 471 { 472 - const bool dma = iter == &obj->mm.get_dma_page; 473 472 struct scatterlist *sg; 474 473 unsigned int idx, count; 475 474
+114 -7
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
··· 13 13 #include "gem/i915_gem_object.h" 14 14 #include "gem/i915_gem_region.h" 15 15 #include "gem/i915_gem_ttm.h" 16 + #include "gem/i915_gem_mman.h" 16 17 17 18 #define I915_PL_LMEM0 TTM_PL_PRIV 18 19 #define I915_PL_SYSTEM TTM_PL_SYSTEM ··· 159 158 160 159 static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj) 161 160 { 162 - if (obj->ttm.cached_io_st) { 163 - sg_free_table(obj->ttm.cached_io_st); 164 - kfree(obj->ttm.cached_io_st); 165 - obj->ttm.cached_io_st = NULL; 166 - } 161 + struct radix_tree_iter iter; 162 + void __rcu **slot; 163 + 164 + if (!obj->ttm.cached_io_st) 165 + return; 166 + 167 + rcu_read_lock(); 168 + radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0) 169 + radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index); 170 + rcu_read_unlock(); 171 + 172 + sg_free_table(obj->ttm.cached_io_st); 173 + kfree(obj->ttm.cached_io_st); 174 + obj->ttm.cached_io_st = NULL; 167 175 } 168 176 169 177 static void i915_ttm_purge(struct drm_i915_gem_object *obj) ··· 348 338 ttm_bo_move_sync_cleanup(bo, dst_mem); 349 339 i915_ttm_free_cached_io_st(obj); 350 340 351 - if (!dst_man->use_tt) 341 + if (!dst_man->use_tt) { 352 342 obj->ttm.cached_io_st = dst_st; 343 + obj->ttm.get_io_page.sg_pos = dst_st->sgl; 344 + obj->ttm.get_io_page.sg_idx = 0; 345 + } 353 346 354 347 return 0; 348 + } 349 + 350 + static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) 351 + { 352 + if (mem->mem_type < I915_PL_LMEM0) 353 + return 0; 354 + 355 + mem->bus.caching = ttm_write_combined; 356 + mem->bus.is_iomem = true; 357 + 358 + return 0; 359 + } 360 + 361 + static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo, 362 + unsigned long page_offset) 363 + { 364 + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 365 + unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start; 366 + struct scatterlist *sg; 367 + unsigned int ofs; 368 + 369 + GEM_WARN_ON(bo->ttm); 370 + 371 + sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true, true); 372 + 373 + return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs; 355 374 } 356 375 357 376 static struct ttm_device_funcs i915_ttm_bo_driver = { ··· 392 353 .move = i915_ttm_move, 393 354 .swap_notify = i915_ttm_swap_notify, 394 355 .delete_mem_notify = i915_ttm_delete_mem_notify, 356 + .io_mem_reserve = i915_ttm_io_mem_reserve, 357 + .io_mem_pfn = i915_ttm_io_mem_pfn, 395 358 }; 396 359 397 360 /** ··· 501 460 } 502 461 } 503 462 504 - static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { 463 + static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) 464 + { 465 + struct vm_area_struct *area = vmf->vma; 466 + struct drm_i915_gem_object *obj = 467 + i915_ttm_to_gem(area->vm_private_data); 468 + 469 + /* Sanity check that we allow writing into this object */ 470 + if (unlikely(i915_gem_object_is_readonly(obj) && 471 + area->vm_flags & VM_WRITE)) 472 + return VM_FAULT_SIGBUS; 473 + 474 + return ttm_bo_vm_fault(vmf); 475 + } 476 + 477 + static int 478 + vm_access_ttm(struct vm_area_struct *area, unsigned long addr, 479 + void *buf, int len, int write) 480 + { 481 + struct drm_i915_gem_object *obj = 482 + i915_ttm_to_gem(area->vm_private_data); 483 + 484 + if (i915_gem_object_is_readonly(obj) && write) 485 + return -EACCES; 486 + 487 + return ttm_bo_vm_access(area, addr, buf, len, write); 488 + } 489 + 490 + static void ttm_vm_open(struct vm_area_struct *vma) 491 + { 492 + struct drm_i915_gem_object *obj = 493 + i915_ttm_to_gem(vma->vm_private_data); 494 + 495 + GEM_BUG_ON(!obj); 496 + i915_gem_object_get(obj); 497 + } 498 + 499 + static void ttm_vm_close(struct vm_area_struct *vma) 500 + { 501 + struct drm_i915_gem_object *obj = 502 + i915_ttm_to_gem(vma->vm_private_data); 503 + 504 + GEM_BUG_ON(!obj); 505 + i915_gem_object_put(obj); 506 + } 507 + 508 + static const struct vm_operations_struct vm_ops_ttm = { 509 + .fault = vm_fault_ttm, 510 + .access = vm_access_ttm, 511 + .open = ttm_vm_open, 512 + .close = ttm_vm_close, 513 + }; 514 + 515 + static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) 516 + { 517 + /* The ttm_bo must be allocated with I915_BO_ALLOC_USER */ 518 + GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node)); 519 + 520 + return drm_vma_node_offset_addr(&obj->base.vma_node); 521 + } 522 + 523 + const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { 505 524 .name = "i915_gem_object_ttm", 506 525 .flags = I915_GEM_OBJECT_HAS_IOMEM, 507 526 ··· 570 469 .truncate = i915_ttm_purge, 571 470 .adjust_lru = i915_ttm_adjust_lru, 572 471 .delayed_free = i915_ttm_delayed_free, 472 + .mmap_offset = i915_ttm_mmap_offset, 473 + .mmap_ops = &vm_ops_ttm, 573 474 }; 574 475 575 476 void i915_ttm_bo_destroy(struct ttm_buffer_object *bo) ··· 579 476 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 580 477 581 478 i915_gem_object_release_memory_region(obj); 479 + mutex_destroy(&obj->ttm.get_io_page.lock); 582 480 if (obj->ttm.created) 583 481 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 584 482 } ··· 621 517 i915_gem_object_make_unshrinkable(obj); 622 518 obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT; 623 519 i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); 520 + INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN); 521 + mutex_init(&obj->ttm.get_io_page.lock); 624 522 625 523 bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device : 626 524 ttm_bo_type_kernel; ··· 634 528 * Similarly, in delayed_destroy, we can't call ttm_bo_put() 635 529 * until successful initialization. 636 530 */ 531 + obj->base.vma_node.driver_private = i915_gem_to_ttm(obj); 637 532 ret = ttm_bo_init(&i915->bdev, i915_gem_to_ttm(obj), size, 638 533 bo_type, &i915_sys_placement, alignment, 639 534 true, NULL, NULL, i915_ttm_bo_destroy);
+47 -43
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
··· 578 578 int expected) 579 579 { 580 580 struct drm_i915_gem_object *obj; 581 - struct i915_mmap_offset *mmo; 581 + u64 offset; 582 + int ret; 582 583 583 584 obj = i915_gem_object_create_internal(i915, size); 584 585 if (IS_ERR(obj)) 585 - return false; 586 + return expected && expected == PTR_ERR(obj); 586 587 587 - mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL); 588 + ret = __assign_mmap_offset(obj, I915_MMAP_TYPE_GTT, &offset, NULL); 588 589 i915_gem_object_put(obj); 589 590 590 - return PTR_ERR_OR_ZERO(mmo) == expected; 591 + return ret == expected; 591 592 } 592 593 593 594 static void disable_retire_worker(struct drm_i915_private *i915) ··· 623 622 struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm; 624 623 struct drm_i915_gem_object *obj; 625 624 struct drm_mm_node *hole, *next; 626 - struct i915_mmap_offset *mmo; 627 625 int loop, err = 0; 626 + u64 offset; 628 627 629 628 /* Disable background reaper */ 630 629 disable_retire_worker(i915); ··· 685 684 obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 686 685 if (IS_ERR(obj)) { 687 686 err = PTR_ERR(obj); 687 + pr_err("Unable to create object for reclaimed hole\n"); 688 688 goto out; 689 689 } 690 690 691 - mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL); 692 - if (IS_ERR(mmo)) { 691 + err = __assign_mmap_offset(obj, I915_MMAP_TYPE_GTT, &offset, NULL); 692 + if (err) { 693 693 pr_err("Unable to insert object into reclaimed hole\n"); 694 - err = PTR_ERR(mmo); 695 694 goto err_obj; 696 695 } 697 696 ··· 866 865 struct drm_i915_gem_object *obj, 867 866 enum i915_mmap_type type) 868 867 { 869 - struct i915_mmap_offset *mmo; 870 868 struct vm_area_struct *area; 871 869 unsigned long addr; 872 870 int err, i; 871 + u64 offset; 873 872 874 873 if (!can_mmap(obj, type)) 875 874 return 0; ··· 880 879 if (err) 881 880 return err; 882 881 883 - mmo = mmap_offset_attach(obj, type, NULL); 884 - if (IS_ERR(mmo)) 885 - return PTR_ERR(mmo); 882 + err = __assign_mmap_offset(obj, type, &offset, NULL); 883 + if (err) 884 + return err; 886 885 887 - addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); 886 + addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); 888 887 if (IS_ERR_VALUE(addr)) 889 888 return addr; 890 889 ··· 893 892 area = find_vma(current->mm, addr); 894 893 if (!area) { 895 894 pr_err("%s: Did not create a vm_area_struct for the mmap\n", 896 - obj->mm.region->name); 897 - err = -EINVAL; 898 - goto out_unmap; 899 - } 900 - 901 - if (area->vm_private_data != mmo) { 902 - pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n", 903 895 obj->mm.region->name); 904 896 err = -EINVAL; 905 897 goto out_unmap; ··· 955 961 struct drm_i915_gem_object *obj; 956 962 int err; 957 963 958 - obj = i915_gem_object_create_region(mr, sizes[i], 0); 964 + obj = i915_gem_object_create_region(mr, sizes[i], I915_BO_ALLOC_USER); 959 965 if (obj == ERR_PTR(-ENODEV)) 960 966 continue; 961 967 ··· 998 1004 struct drm_i915_gem_object *obj, 999 1005 enum i915_mmap_type type) 1000 1006 { 1001 - struct i915_mmap_offset *mmo; 1002 1007 unsigned long __user *ptr; 1003 1008 unsigned long A, B; 1004 1009 unsigned long x, y; 1005 1010 unsigned long addr; 1006 1011 int err; 1012 + u64 offset; 1007 1013 1008 1014 memset(&A, 0xAA, sizeof(A)); 1009 1015 memset(&B, 0xBB, sizeof(B)); ··· 1011 1017 if (!can_mmap(obj, type) || !can_access(obj)) 1012 1018 return 0; 1013 1019 1014 - mmo = mmap_offset_attach(obj, type, NULL); 1015 - if (IS_ERR(mmo)) 1016 - return PTR_ERR(mmo); 1020 + err = __assign_mmap_offset(obj, type, &offset, NULL); 1021 + if (err) 1022 + return err; 1017 1023 1018 - addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); 1024 + addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); 1019 1025 if (IS_ERR_VALUE(addr)) 1020 1026 return addr; 1021 1027 ptr = (unsigned long __user *)addr; ··· 1075 1081 struct drm_i915_gem_object *obj; 1076 1082 int err; 1077 1083 1078 - obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); 1084 + obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER); 1079 1085 if (obj == ERR_PTR(-ENODEV)) 1080 1086 continue; 1081 1087 ··· 1105 1111 enum i915_mmap_type type) 1106 1112 { 1107 1113 struct intel_engine_cs *engine; 1108 - struct i915_mmap_offset *mmo; 1109 1114 unsigned long addr; 1110 1115 u32 __user *ux; 1111 1116 u32 bbe; 1112 1117 int err; 1118 + u64 offset; 1113 1119 1114 1120 /* 1115 1121 * Verify that the mmap access into the backing store aligns with ··· 1126 1132 if (err) 1127 1133 return err; 1128 1134 1129 - mmo = mmap_offset_attach(obj, type, NULL); 1130 - if (IS_ERR(mmo)) 1131 - return PTR_ERR(mmo); 1135 + err = __assign_mmap_offset(obj, type, &offset, NULL); 1136 + if (err) 1137 + return err; 1132 1138 1133 - addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); 1139 + addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); 1134 1140 if (IS_ERR_VALUE(addr)) 1135 1141 return addr; 1136 1142 ··· 1220 1226 struct drm_i915_gem_object *obj; 1221 1227 int err; 1222 1228 1223 - obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); 1229 + obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER); 1224 1230 if (obj == ERR_PTR(-ENODEV)) 1225 1231 continue; 1226 1232 ··· 1297 1303 struct drm_i915_gem_object *obj, 1298 1304 enum i915_mmap_type type) 1299 1305 { 1300 - struct i915_mmap_offset *mmo; 1301 1306 unsigned long addr; 1302 1307 int err; 1308 + u64 offset; 1303 1309 1304 1310 if (!can_mmap(obj, type)) 1305 1311 return 0; 1306 1312 1307 - mmo = mmap_offset_attach(obj, type, NULL); 1308 - if (IS_ERR(mmo)) 1309 - return PTR_ERR(mmo); 1313 + err = __assign_mmap_offset(obj, type, &offset, NULL); 1314 + if (err) 1315 + return err; 1310 1316 1311 - addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); 1317 + addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); 1312 1318 if (IS_ERR_VALUE(addr)) 1313 1319 return addr; 1314 1320 ··· 1344 1350 } 1345 1351 } 1346 1352 1347 - err = check_absent(addr, obj->base.size); 1348 - if (err) { 1349 - pr_err("%s: was not absent\n", obj->mm.region->name); 1350 - goto out_unmap; 1353 + if (!obj->ops->mmap_ops) { 1354 + err = check_absent(addr, obj->base.size); 1355 + if (err) { 1356 + pr_err("%s: was not absent\n", obj->mm.region->name); 1357 + goto out_unmap; 1358 + } 1359 + } else { 1360 + /* ttm allows access to evicted regions by design */ 1361 + 1362 + err = check_present(addr, obj->base.size); 1363 + if (err) { 1364 + pr_err("%s: was not present\n", obj->mm.region->name); 1365 + goto out_unmap; 1366 + } 1351 1367 } 1352 1368 1353 1369 out_unmap: ··· 1375 1371 struct drm_i915_gem_object *obj; 1376 1372 int err; 1377 1373 1378 - obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); 1374 + obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER); 1379 1375 if (obj == ERR_PTR(-ENODEV)) 1380 1376 continue; 1381 1377
+19 -6
drivers/gpu/drm/i915/selftests/igt_mmap.c
··· 9 9 #include "i915_drv.h" 10 10 #include "igt_mmap.h" 11 11 12 - unsigned long igt_mmap_node(struct drm_i915_private *i915, 13 - struct drm_vma_offset_node *node, 14 - unsigned long addr, 15 - unsigned long prot, 16 - unsigned long flags) 12 + unsigned long igt_mmap_offset(struct drm_i915_private *i915, 13 + u64 offset, 14 + unsigned long size, 15 + unsigned long prot, 16 + unsigned long flags) 17 17 { 18 + struct drm_vma_offset_node *node; 18 19 struct file *file; 20 + unsigned long addr; 19 21 int err; 22 + 23 + /* no need to refcount, we own this object */ 24 + drm_vma_offset_lock_lookup(i915->drm.vma_offset_manager); 25 + node = drm_vma_offset_exact_lookup_locked(i915->drm.vma_offset_manager, 26 + offset / PAGE_SIZE, size / PAGE_SIZE); 27 + drm_vma_offset_unlock_lookup(i915->drm.vma_offset_manager); 28 + 29 + if (GEM_WARN_ON(!node)) { 30 + pr_info("Failed to lookup %llx\n", offset); 31 + return -ENOENT; 32 + } 20 33 21 34 /* Pretend to open("/dev/dri/card0") */ 22 35 file = mock_drm_getfile(i915->drm.primary, O_RDWR); ··· 42 29 goto out_file; 43 30 } 44 31 45 - addr = vm_mmap(file, addr, drm_vma_node_size(node) << PAGE_SHIFT, 32 + addr = vm_mmap(file, 0, drm_vma_node_size(node) << PAGE_SHIFT, 46 33 prot, flags, drm_vma_node_offset_addr(node)); 47 34 48 35 drm_vma_node_revoke(node, file->private_data);
+7 -5
drivers/gpu/drm/i915/selftests/igt_mmap.h
··· 7 7 #ifndef IGT_MMAP_H 8 8 #define IGT_MMAP_H 9 9 10 + #include <linux/types.h> 11 + 10 12 struct drm_i915_private; 11 13 struct drm_vma_offset_node; 12 14 13 - unsigned long igt_mmap_node(struct drm_i915_private *i915, 14 - struct drm_vma_offset_node *node, 15 - unsigned long addr, 16 - unsigned long prot, 17 - unsigned long flags); 15 + unsigned long igt_mmap_offset(struct drm_i915_private *i915, 16 + u64 offset, 17 + unsigned long size, 18 + unsigned long prot, 19 + unsigned long flags); 18 20 19 21 #endif /* IGT_MMAP_H */