Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next

Mostly code reorganizations and optimizations for vmwgfx.
- Move TTM code that's only used by vmwgfx to vmwgfx
- Break out the vmwgfx buffer- and resource validation code to a separate source file
- Get rid of a number of atomic operations during command buffer validation.

From: Thomas Hellstrom <thellstrom@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180928131157.2810-1-thellstrom@vmware.com

+1973 -1236
+2 -2
drivers/gpu/drm/ttm/Makefile
··· 4 4 5 5 ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ 6 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ 7 - ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ 8 - ttm_bo_manager.o ttm_page_alloc_dma.o 7 + ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o \ 8 + ttm_page_alloc_dma.o 9 9 ttm-$(CONFIG_AGP) += ttm_agp_backend.o 10 10 11 11 obj-$(CONFIG_DRM_TTM) += ttm.o
+1 -2
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 409 409 node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages); 410 410 if (likely(node)) { 411 411 bo = container_of(node, struct ttm_buffer_object, vma_node); 412 - if (!kref_get_unless_zero(&bo->kref)) 413 - bo = NULL; 412 + bo = ttm_bo_get_unless_zero(bo); 414 413 } 415 414 416 415 drm_vma_offset_unlock_lookup(&bdev->vma_manager);
+3 -12
drivers/gpu/drm/ttm/ttm_lock.c drivers/gpu/drm/vmwgfx/ttm_lock.c
··· 29 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 30 */ 31 31 32 - #include <drm/ttm/ttm_lock.h> 33 32 #include <drm/ttm/ttm_module.h> 34 33 #include <linux/atomic.h> 35 34 #include <linux/errno.h> 36 35 #include <linux/wait.h> 37 36 #include <linux/sched/signal.h> 38 - #include <linux/module.h> 37 + #include "ttm_lock.h" 38 + #include "ttm_object.h" 39 39 40 40 #define TTM_WRITE_LOCK_PENDING (1 << 0) 41 41 #define TTM_VT_LOCK_PENDING (1 << 1) ··· 52 52 lock->kill_takers = false; 53 53 lock->signal = SIGKILL; 54 54 } 55 - EXPORT_SYMBOL(ttm_lock_init); 56 55 57 56 void ttm_read_unlock(struct ttm_lock *lock) 58 57 { ··· 60 61 wake_up_all(&lock->queue); 61 62 spin_unlock(&lock->lock); 62 63 } 63 - EXPORT_SYMBOL(ttm_read_unlock); 64 64 65 65 static bool __ttm_read_lock(struct ttm_lock *lock) 66 66 { ··· 90 92 wait_event(lock->queue, __ttm_read_lock(lock)); 91 93 return ret; 92 94 } 93 - EXPORT_SYMBOL(ttm_read_lock); 94 95 95 96 static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) 96 97 { ··· 141 144 wake_up_all(&lock->queue); 142 145 spin_unlock(&lock->lock); 143 146 } 144 - EXPORT_SYMBOL(ttm_write_unlock); 145 147 146 148 static bool __ttm_write_lock(struct ttm_lock *lock) 147 149 { ··· 181 185 182 186 return ret; 183 187 } 184 - EXPORT_SYMBOL(ttm_write_lock); 185 188 186 189 static int __ttm_vt_unlock(struct ttm_lock *lock) 187 190 { ··· 257 262 258 263 return ret; 259 264 } 260 - EXPORT_SYMBOL(ttm_vt_lock); 261 265 262 266 int ttm_vt_unlock(struct ttm_lock *lock) 263 267 { 264 268 return ttm_ref_object_base_unref(lock->vt_holder, 265 - lock->base.hash.key, TTM_REF_USAGE); 269 + lock->base.handle, TTM_REF_USAGE); 266 270 } 267 - EXPORT_SYMBOL(ttm_vt_unlock); 268 271 269 272 void ttm_suspend_unlock(struct ttm_lock *lock) 270 273 { ··· 271 278 wake_up_all(&lock->queue); 272 279 spin_unlock(&lock->lock); 273 280 } 274 - EXPORT_SYMBOL(ttm_suspend_unlock); 275 281 276 282 static bool __ttm_suspend_lock(struct ttm_lock *lock) 277 283 { ··· 292 300 { 293 301 wait_event(lock->queue, __ttm_suspend_lock(lock)); 294 302 } 295 - EXPORT_SYMBOL(ttm_suspend_lock);
+58 -39
drivers/gpu/drm/ttm/ttm_object.c drivers/gpu/drm/vmwgfx/ttm_object.c
··· 59 59 60 60 #define pr_fmt(fmt) "[TTM] " fmt 61 61 62 - #include <drm/ttm/ttm_object.h> 63 62 #include <drm/ttm/ttm_module.h> 64 63 #include <linux/list.h> 65 64 #include <linux/spinlock.h> 66 65 #include <linux/slab.h> 67 - #include <linux/module.h> 68 66 #include <linux/atomic.h> 67 + #include "ttm_object.h" 69 68 70 69 struct ttm_object_file { 71 70 struct ttm_object_device *tdev; ··· 94 95 struct dma_buf_ops ops; 95 96 void (*dmabuf_release)(struct dma_buf *dma_buf); 96 97 size_t dma_buf_size; 98 + struct idr idr; 97 99 }; 98 100 99 101 /** ··· 172 172 base->ref_obj_release = ref_obj_release; 173 173 base->object_type = object_type; 174 174 kref_init(&base->refcount); 175 + idr_preload(GFP_KERNEL); 175 176 spin_lock(&tdev->object_lock); 176 - ret = drm_ht_just_insert_please_rcu(&tdev->object_hash, 177 - &base->hash, 178 - (unsigned long)base, 31, 0, 0); 177 + ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT); 179 178 spin_unlock(&tdev->object_lock); 180 - if (unlikely(ret != 0)) 181 - goto out_err0; 179 + idr_preload_end(); 180 + if (ret < 0) 181 + return ret; 182 182 183 + base->handle = ret; 183 184 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); 184 185 if (unlikely(ret != 0)) 185 186 goto out_err1; ··· 190 189 return 0; 191 190 out_err1: 192 191 spin_lock(&tdev->object_lock); 193 - (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash); 192 + idr_remove(&tdev->idr, base->handle); 194 193 spin_unlock(&tdev->object_lock); 195 - out_err0: 196 194 return ret; 197 195 } 198 - EXPORT_SYMBOL(ttm_base_object_init); 199 196 200 197 static void ttm_release_base(struct kref *kref) 201 198 { ··· 202 203 struct ttm_object_device *tdev = base->tfile->tdev; 203 204 204 205 spin_lock(&tdev->object_lock); 205 - (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash); 206 + idr_remove(&tdev->idr, base->handle); 206 207 spin_unlock(&tdev->object_lock); 207 208 208 209 /* ··· 224 225 225 226 kref_put(&base->refcount, ttm_release_base); 226 227 } 227 - EXPORT_SYMBOL(ttm_base_object_unref); 228 + 229 + /** 230 + * ttm_base_object_noref_lookup - look up a base object without reference 231 + * @tfile: The struct ttm_object_file the object is registered with. 232 + * @key: The object handle. 233 + * 234 + * This function looks up a ttm base object and returns a pointer to it 235 + * without refcounting the pointer. The returned pointer is only valid 236 + * until ttm_base_object_noref_release() is called, and the object 237 + * pointed to by the returned pointer may be doomed. Any persistent usage 238 + * of the object requires a refcount to be taken using kref_get_unless_zero(). 239 + * Iff this function returns successfully it needs to be paired with 240 + * ttm_base_object_noref_release() and no sleeping- or scheduling functions 241 + * may be called inbetween these function callse. 242 + * 243 + * Return: A pointer to the object if successful or NULL otherwise. 244 + */ 245 + struct ttm_base_object * 246 + ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key) 247 + { 248 + struct drm_hash_item *hash; 249 + struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; 250 + int ret; 251 + 252 + rcu_read_lock(); 253 + ret = drm_ht_find_item_rcu(ht, key, &hash); 254 + if (ret) { 255 + rcu_read_unlock(); 256 + return NULL; 257 + } 258 + 259 + __release(RCU); 260 + return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; 261 + } 262 + EXPORT_SYMBOL(ttm_base_object_noref_lookup); 228 263 229 264 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, 230 265 uint32_t key) ··· 280 247 281 248 return base; 282 249 } 283 - EXPORT_SYMBOL(ttm_base_object_lookup); 284 250 285 251 struct ttm_base_object * 286 252 ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) 287 253 { 288 - struct ttm_base_object *base = NULL; 289 - struct drm_hash_item *hash; 290 - struct drm_open_hash *ht = &tdev->object_hash; 291 - int ret; 254 + struct ttm_base_object *base; 292 255 293 256 rcu_read_lock(); 294 - ret = drm_ht_find_item_rcu(ht, key, &hash); 257 + base = idr_find(&tdev->idr, key); 295 258 296 - if (likely(ret == 0)) { 297 - base = drm_hash_entry(hash, struct ttm_base_object, hash); 298 - if (!kref_get_unless_zero(&base->refcount)) 299 - base = NULL; 300 - } 259 + if (base && !kref_get_unless_zero(&base->refcount)) 260 + base = NULL; 301 261 rcu_read_unlock(); 302 262 303 263 return base; 304 264 } 305 - EXPORT_SYMBOL(ttm_base_object_lookup_for_ref); 306 265 307 266 /** 308 267 * ttm_ref_object_exists - Check whether a caller has a valid ref object ··· 314 289 struct ttm_ref_object *ref; 315 290 316 291 rcu_read_lock(); 317 - if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0)) 292 + if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0)) 318 293 goto out_false; 319 294 320 295 /* ··· 340 315 rcu_read_unlock(); 341 316 return false; 342 317 } 343 - EXPORT_SYMBOL(ttm_ref_object_exists); 344 318 345 319 int ttm_ref_object_add(struct ttm_object_file *tfile, 346 320 struct ttm_base_object *base, ··· 364 340 365 341 while (ret == -EINVAL) { 366 342 rcu_read_lock(); 367 - ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash); 343 + ret = drm_ht_find_item_rcu(ht, base->handle, &hash); 368 344 369 345 if (ret == 0) { 370 346 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); ··· 388 364 return -ENOMEM; 389 365 } 390 366 391 - ref->hash.key = base->hash.key; 367 + ref->hash.key = base->handle; 392 368 ref->obj = base; 393 369 ref->tfile = tfile; 394 370 ref->ref_type = ref_type; ··· 415 391 416 392 return ret; 417 393 } 418 - EXPORT_SYMBOL(ttm_ref_object_add); 419 394 420 - static void ttm_ref_object_release(struct kref *kref) 395 + static void __releases(tfile->lock) __acquires(tfile->lock) 396 + ttm_ref_object_release(struct kref *kref) 421 397 { 422 398 struct ttm_ref_object *ref = 423 399 container_of(kref, struct ttm_ref_object, kref); ··· 459 435 spin_unlock(&tfile->lock); 460 436 return 0; 461 437 } 462 - EXPORT_SYMBOL(ttm_ref_object_base_unref); 463 438 464 439 void ttm_object_file_release(struct ttm_object_file **p_tfile) 465 440 { ··· 487 464 488 465 ttm_object_file_unref(&tfile); 489 466 } 490 - EXPORT_SYMBOL(ttm_object_file_release); 491 467 492 468 struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, 493 469 unsigned int hash_order) ··· 521 499 522 500 return NULL; 523 501 } 524 - EXPORT_SYMBOL(ttm_object_file_init); 525 502 526 503 struct ttm_object_device * 527 504 ttm_object_device_init(struct ttm_mem_global *mem_glob, ··· 540 519 if (ret != 0) 541 520 goto out_no_object_hash; 542 521 522 + idr_init(&tdev->idr); 543 523 tdev->ops = *ops; 544 524 tdev->dmabuf_release = tdev->ops.release; 545 525 tdev->ops.release = ttm_prime_dmabuf_release; ··· 552 530 kfree(tdev); 553 531 return NULL; 554 532 } 555 - EXPORT_SYMBOL(ttm_object_device_init); 556 533 557 534 void ttm_object_device_release(struct ttm_object_device **p_tdev) 558 535 { ··· 559 538 560 539 *p_tdev = NULL; 561 540 541 + WARN_ON_ONCE(!idr_is_empty(&tdev->idr)); 542 + idr_destroy(&tdev->idr); 562 543 drm_ht_remove(&tdev->object_hash); 563 544 564 545 kfree(tdev); 565 546 } 566 - EXPORT_SYMBOL(ttm_object_device_release); 567 547 568 548 /** 569 549 * get_dma_buf_unless_doomed - get a dma_buf reference if possible. ··· 663 641 664 642 prime = (struct ttm_prime_object *) dma_buf->priv; 665 643 base = &prime->base; 666 - *handle = base->hash.key; 644 + *handle = base->handle; 667 645 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); 668 646 669 647 dma_buf_put(dma_buf); 670 648 671 649 return ret; 672 650 } 673 - EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle); 674 651 675 652 /** 676 653 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object ··· 760 739 ttm_base_object_unref(&base); 761 740 return ret; 762 741 } 763 - EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd); 764 742 765 743 /** 766 744 * ttm_prime_object_init - Initialize a ttm_prime_object ··· 792 772 ttm_prime_refcount_release, 793 773 ref_obj_release); 794 774 } 795 - EXPORT_SYMBOL(ttm_prime_object_init);
+3 -1
drivers/gpu/drm/vmwgfx/Makefile
··· 7 7 vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ 8 8 vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ 9 9 vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ 10 - vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o 10 + vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ 11 + vmwgfx_validation.o \ 12 + ttm_object.o ttm_lock.o 11 13 12 14 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
+46 -4
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 30 30 31 31 #include <drm/drmP.h> 32 32 #include "vmwgfx_drv.h" 33 - #include "drm/ttm/ttm_object.h" 33 + #include "ttm_object.h" 34 34 35 35 36 36 /** ··· 441 441 struct_size = backend_size + 442 442 ttm_round_pot(sizeof(struct vmw_buffer_object)); 443 443 user_struct_size = backend_size + 444 - ttm_round_pot(sizeof(struct vmw_user_buffer_object)); 444 + ttm_round_pot(sizeof(struct vmw_user_buffer_object)) + 445 + TTM_OBJ_EXTRA_SIZE; 445 446 } 446 447 447 448 if (dev_priv->map_mode == vmw_dma_alloc_coherent) ··· 632 631 *p_base = &user_bo->prime.base; 633 632 kref_get(&(*p_base)->refcount); 634 633 } 635 - *handle = user_bo->prime.base.hash.key; 634 + *handle = user_bo->prime.base.handle; 636 635 637 636 out_no_base_object: 638 637 return ret; ··· 921 920 return 0; 922 921 } 923 922 923 + /** 924 + * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference 925 + * @tfile: The TTM object file the handle is registered with. 926 + * @handle: The user buffer object handle. 927 + * 928 + * This function looks up a struct vmw_user_bo and returns a pointer to the 929 + * struct vmw_buffer_object it derives from without refcounting the pointer. 930 + * The returned pointer is only valid until vmw_user_bo_noref_release() is 931 + * called, and the object pointed to by the returned pointer may be doomed. 932 + * Any persistent usage of the object requires a refcount to be taken using 933 + * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it 934 + * needs to be paired with vmw_user_bo_noref_release() and no sleeping- 935 + * or scheduling functions may be called inbetween these function calls. 936 + * 937 + * Return: A struct vmw_buffer_object pointer if successful or negative 938 + * error pointer on failure. 939 + */ 940 + struct vmw_buffer_object * 941 + vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle) 942 + { 943 + struct vmw_user_buffer_object *vmw_user_bo; 944 + struct ttm_base_object *base; 945 + 946 + base = ttm_base_object_noref_lookup(tfile, handle); 947 + if (!base) { 948 + DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", 949 + (unsigned long)handle); 950 + return ERR_PTR(-ESRCH); 951 + } 952 + 953 + if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { 954 + ttm_base_object_noref_release(); 955 + DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", 956 + (unsigned long)handle); 957 + return ERR_PTR(-EINVAL); 958 + } 959 + 960 + vmw_user_bo = container_of(base, struct vmw_user_buffer_object, 961 + prime.base); 962 + return &vmw_user_bo->vbo; 963 + } 924 964 925 965 /** 926 966 * vmw_user_bo_reference - Open a handle to a vmw user buffer object. ··· 982 940 983 941 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo); 984 942 985 - *handle = user_bo->prime.base.hash.key; 943 + *handle = user_bo->prime.base.handle; 986 944 return ttm_ref_object_add(tfile, &user_bo->prime.base, 987 945 TTM_REF_USAGE, NULL, false); 988 946 }
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
··· 660 660 { 661 661 struct vmw_cmdbuf_header *cur = man->cur; 662 662 663 - WARN_ON(!mutex_is_locked(&man->cur_mutex)); 663 + lockdep_assert_held_once(&man->cur_mutex); 664 664 665 665 if (!cur) 666 666 return; ··· 1045 1045 { 1046 1046 struct vmw_cmdbuf_header *cur = man->cur; 1047 1047 1048 - WARN_ON(!mutex_is_locked(&man->cur_mutex)); 1048 + lockdep_assert_held_once(&man->cur_mutex); 1049 1049 1050 1050 WARN_ON(size > cur->reserved); 1051 1051 man->cur_pos += size;
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
··· 89 89 if (unlikely(ret != 0)) 90 90 return ERR_PTR(ret); 91 91 92 - return vmw_resource_reference 93 - (drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res); 92 + return drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res; 94 93 } 95 94 96 95 /**
+8 -15
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
··· 217 217 } 218 218 } 219 219 220 - 221 - 222 - vmw_resource_activate(res, vmw_hw_context_destroy); 220 + res->hw_destroy = vmw_hw_context_destroy; 223 221 return 0; 224 222 225 223 out_cotables: ··· 272 274 273 275 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 274 276 vmw_fifo_resource_inc(dev_priv); 275 - vmw_resource_activate(res, vmw_hw_context_destroy); 277 + res->hw_destroy = vmw_hw_context_destroy; 276 278 return 0; 277 279 278 280 out_early: ··· 755 757 return -EINVAL; 756 758 } 757 759 758 - /* 759 - * Approximate idr memory usage with 128 bytes. It will be limited 760 - * by maximum number_of contexts anyway. 761 - */ 762 - 763 760 if (unlikely(vmw_user_context_size == 0)) 764 - vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 + 765 - ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0); 761 + vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 762 + ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) + 763 + + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; 766 764 767 765 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 768 766 if (unlikely(ret != 0)) ··· 803 809 goto out_err; 804 810 } 805 811 806 - arg->cid = ctx->base.hash.key; 812 + arg->cid = ctx->base.handle; 807 813 out_err: 808 814 vmw_resource_unreference(&res); 809 815 out_unlock: ··· 861 867 if (cotable_type >= SVGA_COTABLE_DX10_MAX) 862 868 return ERR_PTR(-EINVAL); 863 869 864 - return vmw_resource_reference 865 - (container_of(ctx, struct vmw_user_context, res)-> 866 - cotables[cotable_type]); 870 + return container_of(ctx, struct vmw_user_context, res)-> 871 + cotables[cotable_type]; 867 872 } 868 873 869 874 /**
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 615 615 vcotbl->type = type; 616 616 vcotbl->ctx = ctx; 617 617 618 - vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy); 618 + vcotbl->res.hw_destroy = vmw_hw_cotable_destroy; 619 619 620 620 return &vcotbl->res; 621 621
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 30 30 #include <drm/drmP.h> 31 31 #include "vmwgfx_drv.h" 32 32 #include "vmwgfx_binding.h" 33 + #include "ttm_object.h" 33 34 #include <drm/ttm/ttm_placement.h> 34 35 #include <drm/ttm/ttm_bo_driver.h> 35 - #include <drm/ttm/ttm_object.h> 36 36 #include <drm/ttm/ttm_module.h> 37 37 #include <linux/dma_remapping.h> 38 38 ··· 667 667 mutex_init(&dev_priv->binding_mutex); 668 668 mutex_init(&dev_priv->requested_layout_mutex); 669 669 mutex_init(&dev_priv->global_kms_state_mutex); 670 - rwlock_init(&dev_priv->resource_lock); 671 670 ttm_lock_init(&dev_priv->reservation_sem); 671 + spin_lock_init(&dev_priv->resource_lock); 672 672 spin_lock_init(&dev_priv->hw_lock); 673 673 spin_lock_init(&dev_priv->waiter_lock); 674 674 spin_lock_init(&dev_priv->cap_lock);
+111 -36
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 28 28 #ifndef _VMWGFX_DRV_H_ 29 29 #define _VMWGFX_DRV_H_ 30 30 31 + #include "vmwgfx_validation.h" 31 32 #include "vmwgfx_reg.h" 32 33 #include <drm/drmP.h> 33 34 #include <drm/vmwgfx_drm.h> ··· 36 35 #include <drm/drm_auth.h> 37 36 #include <linux/suspend.h> 38 37 #include <drm/ttm/ttm_bo_driver.h> 39 - #include <drm/ttm/ttm_object.h> 40 - #include <drm/ttm/ttm_lock.h> 41 38 #include <drm/ttm/ttm_execbuf_util.h> 42 39 #include <drm/ttm/ttm_module.h> 43 40 #include "vmwgfx_fence.h" 41 + #include "ttm_object.h" 42 + #include "ttm_lock.h" 44 43 #include <linux/sync_file.h> 45 44 46 45 #define VMWGFX_DRIVER_NAME "vmwgfx" ··· 113 112 }; 114 113 115 114 struct vmw_res_func; 115 + 116 + 117 + /** 118 + * struct vmw-resource - base class for hardware resources 119 + * 120 + * @kref: For refcounting. 121 + * @dev_priv: Pointer to the device private for this resource. Immutable. 122 + * @id: Device id. Protected by @dev_priv::resource_lock. 123 + * @backup_size: Backup buffer size. Immutable. 124 + * @res_dirty: Resource contains data not yet in the backup buffer. Protected 125 + * by resource reserved. 126 + * @backup_dirty: Backup buffer contains data not yet in the HW resource. 127 + * Protecte by resource reserved. 128 + * @backup: The backup buffer if any. Protected by resource reserved. 129 + * @backup_offset: Offset into the backup buffer if any. Protected by resource 130 + * reserved. Note that only a few resource types can have a @backup_offset 131 + * different from zero. 132 + * @pin_count: The pin count for this resource. A pinned resource has a 133 + * pin-count greater than zero. It is not on the resource LRU lists and its 134 + * backup buffer is pinned. Hence it can't be evicted. 135 + * @func: Method vtable for this resource. Immutable. 136 + * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. 137 + * @mob_head: List head for the MOB backup list. Protected by @backup reserved. 138 + * @binding_head: List head for the context binding list. Protected by 139 + * the @dev_priv::binding_mutex 140 + * @res_free: The resource destructor. 141 + * @hw_destroy: Callback to destroy the resource on the device, as part of 142 + * resource destruction. 143 + */ 116 144 struct vmw_resource { 117 145 struct kref kref; 118 146 struct vmw_private *dev_priv; 119 147 int id; 120 - bool avail; 121 148 unsigned long backup_size; 122 - bool res_dirty; /* Protected by backup buffer reserved */ 123 - bool backup_dirty; /* Protected by backup buffer reserved */ 149 + bool res_dirty; 150 + bool backup_dirty; 124 151 struct vmw_buffer_object *backup; 125 152 unsigned long backup_offset; 126 - unsigned long pin_count; /* Protected by resource reserved */ 153 + unsigned long pin_count; 127 154 const struct vmw_res_func *func; 128 - struct list_head lru_head; /* Protected by the resource lock */ 129 - struct list_head mob_head; /* Protected by @backup reserved */ 130 - struct list_head binding_head; /* Protected by binding_mutex */ 155 + struct list_head lru_head; 156 + struct list_head mob_head; 157 + struct list_head binding_head; 131 158 void (*res_free) (struct vmw_resource *res); 132 159 void (*hw_destroy) (struct vmw_resource *res); 133 160 }; ··· 233 204 bool dx; 234 205 }; 235 206 236 - struct vmw_relocation { 237 - SVGAMobId *mob_loc; 238 - SVGAGuestPtr *location; 239 - uint32_t index; 240 - }; 241 - 242 207 /** 243 208 * struct vmw_res_cache_entry - resource information cache entry 244 - * 209 + * @handle: User-space handle of a resource. 210 + * @res: Non-ref-counted pointer to the resource. 211 + * @valid_handle: Whether the @handle member is valid. 245 212 * @valid: Whether the entry is valid, which also implies that the execbuf 246 213 * code holds a reference to the resource, and it's placed on the 247 214 * validation list. 248 - * @handle: User-space handle of a resource. 249 - * @res: Non-ref-counted pointer to the resource. 250 215 * 251 216 * Used to avoid frequent repeated user-space handle lookups of the 252 217 * same resource. 253 218 */ 254 219 struct vmw_res_cache_entry { 255 - bool valid; 256 220 uint32_t handle; 257 221 struct vmw_resource *res; 258 - struct vmw_resource_val_node *node; 222 + void *private; 223 + unsigned short valid_handle; 224 + unsigned short valid; 259 225 }; 260 226 261 227 /** ··· 315 291 vmw_du_screen_target 316 292 }; 317 293 294 + struct vmw_validation_context; 295 + struct vmw_ctx_validation_info; 318 296 297 + /** 298 + * struct vmw_sw_context - Command submission context 299 + * @res_ht: Pointer hash table used to find validation duplicates 300 + * @kernel: Whether the command buffer originates from kernel code rather 301 + * than from user-space 302 + * @fp: If @kernel is false, points to the file of the client. Otherwise 303 + * NULL 304 + * @cmd_bounce: Command bounce buffer used for command validation before 305 + * copying to fifo space 306 + * @cmd_bounce_size: Current command bounce buffer size 307 + * @cur_query_bo: Current buffer object used as query result buffer 308 + * @bo_relocations: List of buffer object relocations 309 + * @res_relocations: List of resource relocations 310 + * @buf_start: Pointer to start of memory where command validation takes 311 + * place 312 + * @res_cache: Cache of recently looked up resources 313 + * @last_query_ctx: Last context that submitted a query 314 + * @needs_post_query_barrier: Whether a query barrier is needed after 315 + * command submission 316 + * @staged_bindings: Cached per-context binding tracker 317 + * @staged_bindings_inuse: Whether the cached per-context binding tracker 318 + * is in use 319 + * @staged_cmd_res: List of staged command buffer managed resources in this 320 + * command buffer 321 + * @ctx_list: List of context resources referenced in this command buffer 322 + * @dx_ctx_node: Validation metadata of the current DX context 323 + * @dx_query_mob: The MOB used for DX queries 324 + * @dx_query_ctx: The DX context used for the last DX query 325 + * @man: Pointer to the command buffer managed resource manager 326 + * @ctx: The validation context 327 + */ 319 328 struct vmw_sw_context{ 320 329 struct drm_open_hash res_ht; 321 330 bool res_ht_initialized; 322 - bool kernel; /**< is the called made from the kernel */ 331 + bool kernel; 323 332 struct vmw_fpriv *fp; 324 - struct list_head validate_nodes; 325 - struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; 326 - uint32_t cur_reloc; 327 - struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; 328 - uint32_t cur_val_buf; 329 333 uint32_t *cmd_bounce; 330 334 uint32_t cmd_bounce_size; 331 - struct list_head resource_list; 332 - struct list_head ctx_resource_list; /* For contexts and cotables */ 333 335 struct vmw_buffer_object *cur_query_bo; 336 + struct list_head bo_relocations; 334 337 struct list_head res_relocations; 335 338 uint32_t *buf_start; 336 339 struct vmw_res_cache_entry res_cache[vmw_res_max]; 337 340 struct vmw_resource *last_query_ctx; 338 341 bool needs_post_query_barrier; 339 - struct vmw_resource *error_resource; 340 342 struct vmw_ctx_binding_state *staged_bindings; 341 343 bool staged_bindings_inuse; 342 344 struct list_head staged_cmd_res; 343 - struct vmw_resource_val_node *dx_ctx_node; 345 + struct list_head ctx_list; 346 + struct vmw_ctx_validation_info *dx_ctx_node; 344 347 struct vmw_buffer_object *dx_query_mob; 345 348 struct vmw_resource *dx_query_ctx; 346 349 struct vmw_cmdbuf_res_manager *man; 350 + struct vmw_validation_context *ctx; 347 351 }; 348 352 349 353 struct vmw_legacy_display; ··· 496 444 * Context and surface management. 497 445 */ 498 446 499 - rwlock_t resource_lock; 447 + spinlock_t resource_lock; 500 448 struct idr res_idr[vmw_res_max]; 501 449 /* 502 450 * Block lastclose from racing with firstopen. ··· 680 628 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); 681 629 extern struct vmw_resource * 682 630 vmw_resource_reference_unless_doomed(struct vmw_resource *res); 683 - extern int vmw_resource_validate(struct vmw_resource *res); 631 + extern int vmw_resource_validate(struct vmw_resource *res, bool intr); 684 632 extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, 685 633 bool no_backup); 686 634 extern bool vmw_resource_needs_backup(const struct vmw_resource *res); ··· 695 643 uint32_t handle, 696 644 const struct vmw_user_resource_conv *converter, 697 645 struct vmw_resource **p_res); 646 + extern struct vmw_resource * 647 + vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, 648 + struct ttm_object_file *tfile, 649 + uint32_t handle, 650 + const struct vmw_user_resource_conv * 651 + converter); 698 652 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 699 653 struct drm_file *file_priv); 700 654 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, ··· 718 660 extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); 719 661 extern void vmw_resource_evict_all(struct vmw_private *dev_priv); 720 662 extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); 663 + 664 + /** 665 + * vmw_user_resource_noref_release - release a user resource pointer looked up 666 + * without reference 667 + */ 668 + static inline void vmw_user_resource_noref_release(void) 669 + { 670 + ttm_base_object_noref_release(); 671 + } 721 672 722 673 /** 723 674 * Buffer object helper functions - vmwgfx_bo.c ··· 784 717 extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, 785 718 struct ttm_mem_reg *mem); 786 719 extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); 720 + extern struct vmw_buffer_object * 721 + vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle); 722 + 723 + /** 724 + * vmw_user_bo_noref_release - release a buffer object pointer looked up 725 + * without reference 726 + */ 727 + static inline void vmw_user_bo_noref_release(void) 728 + { 729 + ttm_base_object_noref_release(); 730 + } 731 + 787 732 788 733 /** 789 734 * Misc Ioctl functionality - vmwgfx_ioctl.c ··· 943 864 uint32_t fence_handle, 944 865 int32_t out_fence_fd, 945 866 struct sync_file *sync_file); 946 - extern int vmw_validate_single_buffer(struct vmw_private *dev_priv, 947 - struct ttm_buffer_object *bo, 948 - bool interruptible, 949 - bool validate_as_mob); 950 867 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); 951 868 952 869 /**
+504 -800
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 35 35 36 36 #define VMW_RES_HT_ORDER 12 37 37 38 + /* 39 + * struct vmw_relocation - Buffer object relocation 40 + * 41 + * @head: List head for the command submission context's relocation list 42 + * @vbo: Non ref-counted pointer to buffer object 43 + * @mob_loc: Pointer to location for mob id to be modified 44 + * @location: Pointer to location for guest pointer to be modified 45 + */ 46 + struct vmw_relocation { 47 + struct list_head head; 48 + struct vmw_buffer_object *vbo; 49 + union { 50 + SVGAMobId *mob_loc; 51 + SVGAGuestPtr *location; 52 + }; 53 + }; 54 + 38 55 /** 39 56 * enum vmw_resource_relocation_type - Relocation type for resources 40 57 * ··· 86 69 enum vmw_resource_relocation_type rel_type:3; 87 70 }; 88 71 89 - /** 90 - * struct vmw_resource_val_node - Validation info for resources 91 - * 92 - * @head: List head for the software context's resource list. 93 - * @hash: Hash entry for quick resouce to val_node lookup. 94 - * @res: Ref-counted pointer to the resource. 95 - * @switch_backup: Boolean whether to switch backup buffer on unreserve. 96 - * @new_backup: Refcounted pointer to the new backup buffer. 97 - * @staged_bindings: If @res is a context, tracks bindings set up during 98 - * the command batch. Otherwise NULL. 99 - * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. 100 - * @first_usage: Set to true the first time the resource is referenced in 101 - * the command stream. 102 - * @switching_backup: The command stream provides a new backup buffer for a 103 - * resource. 104 - * @no_buffer_needed: This means @switching_backup is true on first buffer 105 - * reference. So resource reservation does not need to allocate a backup 106 - * buffer for the resource. 72 + /* 73 + * struct vmw_ctx_validation_info - Extra validation metadata for contexts 74 + * @head: List head of context list 75 + * @ctx: The context resource 76 + * @cur: The context's persistent binding state 77 + * @staged: The binding state changes of this command buffer 107 78 */ 108 - struct vmw_resource_val_node { 79 + struct vmw_ctx_validation_info { 109 80 struct list_head head; 110 - struct drm_hash_item hash; 111 - struct vmw_resource *res; 112 - struct vmw_buffer_object *new_backup; 113 - struct vmw_ctx_binding_state *staged_bindings; 114 - unsigned long new_backup_offset; 115 - u32 first_usage : 1; 116 - u32 switching_backup : 1; 117 - u32 no_buffer_needed : 1; 81 + struct vmw_resource *ctx; 82 + struct vmw_ctx_binding_state *cur; 83 + struct vmw_ctx_binding_state *staged; 118 84 }; 119 85 120 86 /** ··· 127 127 struct vmw_sw_context *sw_context, 128 128 SVGAMobId *id, 129 129 struct vmw_buffer_object **vmw_bo_p); 130 - static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, 131 - struct vmw_buffer_object *vbo, 132 - bool validate_as_mob, 133 - uint32_t *p_val_node); 134 130 /** 135 131 * vmw_ptr_diff - Compute the offset from a to b in bytes 136 132 * ··· 141 145 } 142 146 143 147 /** 144 - * vmw_resources_unreserve - unreserve resources previously reserved for 145 - * command submission. 146 - * 147 - * @sw_context: pointer to the software context 148 - * @backoff: Whether command submission failed. 148 + * vmw_execbuf_bindings_commit - Commit modified binding state 149 + * @sw_context: The command submission context 150 + * @backoff: Whether this is part of the error path and binding state 151 + * changes should be ignored 149 152 */ 150 - static void vmw_resources_unreserve(struct vmw_sw_context *sw_context, 151 - bool backoff) 153 + static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, 154 + bool backoff) 152 155 { 153 - struct vmw_resource_val_node *val; 154 - struct list_head *list = &sw_context->resource_list; 156 + struct vmw_ctx_validation_info *entry; 155 157 156 - if (sw_context->dx_query_mob && !backoff) 158 + list_for_each_entry(entry, &sw_context->ctx_list, head) { 159 + if (!backoff) 160 + vmw_binding_state_commit(entry->cur, entry->staged); 161 + if (entry->staged != sw_context->staged_bindings) 162 + vmw_binding_state_free(entry->staged); 163 + else 164 + sw_context->staged_bindings_inuse = false; 165 + } 166 + 167 + /* List entries are freed with the validation context */ 168 + INIT_LIST_HEAD(&sw_context->ctx_list); 169 + } 170 + 171 + /** 172 + * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced 173 + * @sw_context: The command submission context 174 + */ 175 + static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) 176 + { 177 + if (sw_context->dx_query_mob) 157 178 vmw_context_bind_dx_query(sw_context->dx_query_ctx, 158 179 sw_context->dx_query_mob); 159 - 160 - list_for_each_entry(val, list, head) { 161 - struct vmw_resource *res = val->res; 162 - bool switch_backup = 163 - (backoff) ? false : val->switching_backup; 164 - 165 - /* 166 - * Transfer staged context bindings to the 167 - * persistent context binding tracker. 168 - */ 169 - if (unlikely(val->staged_bindings)) { 170 - if (!backoff) { 171 - vmw_binding_state_commit 172 - (vmw_context_binding_state(val->res), 173 - val->staged_bindings); 174 - } 175 - 176 - if (val->staged_bindings != sw_context->staged_bindings) 177 - vmw_binding_state_free(val->staged_bindings); 178 - else 179 - sw_context->staged_bindings_inuse = false; 180 - val->staged_bindings = NULL; 181 - } 182 - vmw_resource_unreserve(res, switch_backup, val->new_backup, 183 - val->new_backup_offset); 184 - vmw_bo_unreference(&val->new_backup); 185 - } 186 180 } 187 181 188 182 /** ··· 180 194 * added to the validate list. 181 195 * 182 196 * @dev_priv: Pointer to the device private: 183 - * @sw_context: The validation context: 184 - * @node: The validation node holding this context. 197 + * @sw_context: The command submission context 198 + * @node: The validation node holding the context resource metadata 185 199 */ 186 200 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, 187 201 struct vmw_sw_context *sw_context, 188 - struct vmw_resource_val_node *node) 202 + struct vmw_resource *res, 203 + struct vmw_ctx_validation_info *node) 189 204 { 190 205 int ret; 191 206 192 - ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res); 207 + ret = vmw_resource_context_res_add(dev_priv, sw_context, res); 193 208 if (unlikely(ret != 0)) 194 209 goto out_err; 195 210 ··· 207 220 } 208 221 209 222 if (sw_context->staged_bindings_inuse) { 210 - node->staged_bindings = vmw_binding_state_alloc(dev_priv); 211 - if (IS_ERR(node->staged_bindings)) { 223 + node->staged = vmw_binding_state_alloc(dev_priv); 224 + if (IS_ERR(node->staged)) { 212 225 DRM_ERROR("Failed to allocate context binding " 213 226 "information.\n"); 214 - ret = PTR_ERR(node->staged_bindings); 215 - node->staged_bindings = NULL; 227 + ret = PTR_ERR(node->staged); 228 + node->staged = NULL; 216 229 goto out_err; 217 230 } 218 231 } else { 219 - node->staged_bindings = sw_context->staged_bindings; 232 + node->staged = sw_context->staged_bindings; 220 233 sw_context->staged_bindings_inuse = true; 221 234 } 235 + 236 + node->ctx = res; 237 + node->cur = vmw_context_binding_state(res); 238 + list_add_tail(&node->head, &sw_context->ctx_list); 222 239 223 240 return 0; 224 241 out_err: ··· 230 239 } 231 240 232 241 /** 233 - * vmw_resource_val_add - Add a resource to the software context's 234 - * resource list if it's not already on it. 242 + * vmw_execbuf_res_size - calculate extra size fore the resource validation 243 + * node 244 + * @dev_priv: Pointer to the device private struct. 245 + * @res_type: The resource type. 235 246 * 236 - * @sw_context: Pointer to the software context. 237 - * @res: Pointer to the resource. 238 - * @p_node On successful return points to a valid pointer to a 239 - * struct vmw_resource_val_node, if non-NULL on entry. 247 + * Guest-backed contexts and DX contexts require extra size to store 248 + * execbuf private information in the validation node. Typically the 249 + * binding manager associated data structures. 250 + * 251 + * Returns: The extra size requirement based on resource type. 240 252 */ 241 - static int vmw_resource_val_add(struct vmw_sw_context *sw_context, 242 - struct vmw_resource *res, 243 - struct vmw_resource_val_node **p_node) 253 + static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv, 254 + enum vmw_res_type res_type) 255 + { 256 + return (res_type == vmw_res_dx_context || 257 + (res_type == vmw_res_context && dev_priv->has_mob)) ? 258 + sizeof(struct vmw_ctx_validation_info) : 0; 259 + } 260 + 261 + /** 262 + * vmw_execbuf_rcache_update - Update a resource-node cache entry 263 + * 264 + * @rcache: Pointer to the entry to update. 265 + * @res: Pointer to the resource. 266 + * @private: Pointer to the execbuf-private space in the resource 267 + * validation node. 268 + */ 269 + static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache, 270 + struct vmw_resource *res, 271 + void *private) 272 + { 273 + rcache->res = res; 274 + rcache->private = private; 275 + rcache->valid = 1; 276 + rcache->valid_handle = 0; 277 + } 278 + 279 + /** 280 + * vmw_execbuf_res_noref_val_add - Add a resource described by an 281 + * unreferenced rcu-protected pointer to the validation list. 282 + * @sw_context: Pointer to the software context. 283 + * @res: Unreferenced rcu-protected pointer to the resource. 284 + * 285 + * Returns: 0 on success. Negative error code on failure. Typical error 286 + * codes are %-EINVAL on inconsistency and %-ESRCH if the resource was 287 + * doomed. 288 + */ 289 + static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context, 290 + struct vmw_resource *res) 244 291 { 245 292 struct vmw_private *dev_priv = res->dev_priv; 246 - struct vmw_resource_val_node *node; 247 - struct drm_hash_item *hash; 293 + int ret; 294 + enum vmw_res_type res_type = vmw_res_type(res); 295 + struct vmw_res_cache_entry *rcache; 296 + struct vmw_ctx_validation_info *ctx_info; 297 + bool first_usage; 298 + unsigned int priv_size; 299 + 300 + rcache = &sw_context->res_cache[res_type]; 301 + if (likely(rcache->valid && rcache->res == res)) { 302 + vmw_user_resource_noref_release(); 303 + return 0; 304 + } 305 + 306 + priv_size = vmw_execbuf_res_size(dev_priv, res_type); 307 + ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size, 308 + (void **)&ctx_info, &first_usage); 309 + vmw_user_resource_noref_release(); 310 + if (ret) 311 + return ret; 312 + 313 + if (priv_size && first_usage) { 314 + ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res, 315 + ctx_info); 316 + if (ret) 317 + return ret; 318 + } 319 + 320 + vmw_execbuf_rcache_update(rcache, res, ctx_info); 321 + return 0; 322 + } 323 + 324 + /** 325 + * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource 326 + * validation list if it's not already on it 327 + * @sw_context: Pointer to the software context. 328 + * @res: Pointer to the resource. 329 + * 330 + * Returns: Zero on success. Negative error code on failure. 331 + */ 332 + static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context, 333 + struct vmw_resource *res) 334 + { 335 + struct vmw_res_cache_entry *rcache; 336 + enum vmw_res_type res_type = vmw_res_type(res); 337 + void *ptr; 248 338 int ret; 249 339 250 - if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, 251 - &hash) == 0)) { 252 - node = container_of(hash, struct vmw_resource_val_node, hash); 253 - node->first_usage = false; 254 - if (unlikely(p_node != NULL)) 255 - *p_node = node; 340 + rcache = &sw_context->res_cache[res_type]; 341 + if (likely(rcache->valid && rcache->res == res)) 256 342 return 0; 257 - } 258 343 259 - node = kzalloc(sizeof(*node), GFP_KERNEL); 260 - if (unlikely(!node)) { 261 - DRM_ERROR("Failed to allocate a resource validation " 262 - "entry.\n"); 263 - return -ENOMEM; 264 - } 265 - 266 - node->hash.key = (unsigned long) res; 267 - ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); 268 - if (unlikely(ret != 0)) { 269 - DRM_ERROR("Failed to initialize a resource validation " 270 - "entry.\n"); 271 - kfree(node); 344 + ret = vmw_validation_add_resource(sw_context->ctx, res, 0, &ptr, NULL); 345 + if (ret) 272 346 return ret; 273 - } 274 - node->res = vmw_resource_reference(res); 275 - node->first_usage = true; 276 - if (unlikely(p_node != NULL)) 277 - *p_node = node; 278 347 279 - if (!dev_priv->has_mob) { 280 - list_add_tail(&node->head, &sw_context->resource_list); 281 - return 0; 282 - } 348 + vmw_execbuf_rcache_update(rcache, res, ptr); 283 349 284 - switch (vmw_res_type(res)) { 285 - case vmw_res_context: 286 - case vmw_res_dx_context: 287 - list_add(&node->head, &sw_context->ctx_resource_list); 288 - ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node); 289 - break; 290 - case vmw_res_cotable: 291 - list_add_tail(&node->head, &sw_context->ctx_resource_list); 292 - break; 293 - default: 294 - list_add_tail(&node->head, &sw_context->resource_list); 295 - break; 296 - } 297 - 298 - return ret; 350 + return 0; 299 351 } 300 352 301 353 /** ··· 359 325 * First add the resource the view is pointing to, otherwise 360 326 * it may be swapped out when the view is validated. 361 327 */ 362 - ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL); 328 + ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view)); 363 329 if (ret) 364 330 return ret; 365 331 366 - return vmw_resource_val_add(sw_context, view, NULL); 332 + return vmw_execbuf_res_noctx_val_add(sw_context, view); 367 333 } 368 334 369 335 /** ··· 376 342 * 377 343 * The view is represented by a view id and the DX context it's created on, 378 344 * or scheduled for creation on. If there is no DX context set, the function 379 - * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure. 345 + * will return an -EINVAL error pointer. 346 + * 347 + * Returns: Unreferenced pointer to the resource on success, negative error 348 + * pointer on failure. 380 349 */ 381 - static int vmw_view_id_val_add(struct vmw_sw_context *sw_context, 382 - enum vmw_view_type view_type, u32 id) 350 + static struct vmw_resource * 351 + vmw_view_id_val_add(struct vmw_sw_context *sw_context, 352 + enum vmw_view_type view_type, u32 id) 383 353 { 384 - struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 354 + struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 385 355 struct vmw_resource *view; 386 356 int ret; 387 357 388 358 if (!ctx_node) { 389 359 DRM_ERROR("DX Context not set.\n"); 390 - return -EINVAL; 360 + return ERR_PTR(-EINVAL); 391 361 } 392 362 393 363 view = vmw_view_lookup(sw_context->man, view_type, id); 394 364 if (IS_ERR(view)) 395 - return PTR_ERR(view); 365 + return view; 396 366 397 367 ret = vmw_view_res_val_add(sw_context, view); 398 - vmw_resource_unreference(&view); 368 + if (ret) 369 + return ERR_PTR(ret); 399 370 400 - return ret; 371 + return view; 401 372 } 402 373 403 374 /** ··· 433 394 if (IS_ERR(res)) 434 395 continue; 435 396 436 - ret = vmw_resource_val_add(sw_context, res, NULL); 437 - vmw_resource_unreference(&res); 397 + ret = vmw_execbuf_res_noctx_val_add(sw_context, res); 438 398 if (unlikely(ret != 0)) 439 399 return ret; 440 400 } ··· 445 407 binding_list = vmw_context_binding_list(ctx); 446 408 447 409 list_for_each_entry(entry, binding_list, ctx_list) { 448 - /* entry->res is not refcounted */ 449 - res = vmw_resource_reference_unless_doomed(entry->res); 450 - if (unlikely(res == NULL)) 451 - continue; 452 - 453 410 if (vmw_res_type(entry->res) == vmw_res_view) 454 411 ret = vmw_view_res_val_add(sw_context, entry->res); 455 412 else 456 - ret = vmw_resource_val_add(sw_context, entry->res, 457 - NULL); 458 - vmw_resource_unreference(&res); 413 + ret = vmw_execbuf_res_noctx_val_add(sw_context, 414 + entry->res); 459 415 if (unlikely(ret != 0)) 460 416 break; 461 417 } ··· 459 427 460 428 dx_query_mob = vmw_context_get_dx_query_mob(ctx); 461 429 if (dx_query_mob) 462 - ret = vmw_bo_to_validate_list(sw_context, 463 - dx_query_mob, 464 - true, NULL); 430 + ret = vmw_validation_add_bo(sw_context->ctx, 431 + dx_query_mob, true, false); 465 432 } 466 433 467 434 mutex_unlock(&dev_priv->binding_mutex); ··· 476 445 * id that needs fixup is located. Granularity is one byte. 477 446 * @rel_type: Relocation type. 478 447 */ 479 - static int vmw_resource_relocation_add(struct list_head *list, 448 + static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context, 480 449 const struct vmw_resource *res, 481 450 unsigned long offset, 482 451 enum vmw_resource_relocation_type ··· 484 453 { 485 454 struct vmw_resource_relocation *rel; 486 455 487 - rel = kmalloc(sizeof(*rel), GFP_KERNEL); 456 + rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel)); 488 457 if (unlikely(!rel)) { 489 458 DRM_ERROR("Failed to allocate a resource relocation.\n"); 490 459 return -ENOMEM; ··· 493 462 rel->res = res; 494 463 rel->offset = offset; 495 464 rel->rel_type = rel_type; 496 - list_add_tail(&rel->head, list); 465 + list_add_tail(&rel->head, &sw_context->res_relocations); 497 466 498 467 return 0; 499 468 } ··· 501 470 /** 502 471 * vmw_resource_relocations_free - Free all relocations on a list 503 472 * 504 - * @list: Pointer to the head of the relocation list. 473 + * @list: Pointer to the head of the relocation list 505 474 */ 506 475 static void vmw_resource_relocations_free(struct list_head *list) 507 476 { 508 - struct vmw_resource_relocation *rel, *n; 477 + /* Memory is validation context memory, so no need to free it */ 509 478 510 - list_for_each_entry_safe(rel, n, list, head) { 511 - list_del(&rel->head); 512 - kfree(rel); 513 - } 479 + INIT_LIST_HEAD(list); 514 480 } 515 481 516 482 /** ··· 560 532 } 561 533 562 534 /** 563 - * vmw_bo_to_validate_list - add a bo to a validate list 564 - * 565 - * @sw_context: The software context used for this command submission batch. 566 - * @bo: The buffer object to add. 567 - * @validate_as_mob: Validate this buffer as a MOB. 568 - * @p_val_node: If non-NULL Will be updated with the validate node number 569 - * on return. 570 - * 571 - * Returns -EINVAL if the limit of number of buffer objects per command 572 - * submission is reached. 573 - */ 574 - static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, 575 - struct vmw_buffer_object *vbo, 576 - bool validate_as_mob, 577 - uint32_t *p_val_node) 578 - { 579 - uint32_t val_node; 580 - struct vmw_validate_buffer *vval_buf; 581 - struct ttm_validate_buffer *val_buf; 582 - struct drm_hash_item *hash; 583 - int ret; 584 - 585 - if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo, 586 - &hash) == 0)) { 587 - vval_buf = container_of(hash, struct vmw_validate_buffer, 588 - hash); 589 - if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { 590 - DRM_ERROR("Inconsistent buffer usage.\n"); 591 - return -EINVAL; 592 - } 593 - val_buf = &vval_buf->base; 594 - val_node = vval_buf - sw_context->val_bufs; 595 - } else { 596 - val_node = sw_context->cur_val_buf; 597 - if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { 598 - DRM_ERROR("Max number of DMA buffers per submission " 599 - "exceeded.\n"); 600 - return -EINVAL; 601 - } 602 - vval_buf = &sw_context->val_bufs[val_node]; 603 - vval_buf->hash.key = (unsigned long) vbo; 604 - ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); 605 - if (unlikely(ret != 0)) { 606 - DRM_ERROR("Failed to initialize a buffer validation " 607 - "entry.\n"); 608 - return ret; 609 - } 610 - ++sw_context->cur_val_buf; 611 - val_buf = &vval_buf->base; 612 - val_buf->bo = ttm_bo_reference(&vbo->base); 613 - val_buf->shared = false; 614 - list_add_tail(&val_buf->head, &sw_context->validate_nodes); 615 - vval_buf->validate_as_mob = validate_as_mob; 616 - } 617 - 618 - if (p_val_node) 619 - *p_val_node = val_node; 620 - 621 - return 0; 622 - } 623 - 624 - /** 625 535 * vmw_resources_reserve - Reserve all resources on the sw_context's 626 536 * resource list. 627 537 * ··· 571 605 */ 572 606 static int vmw_resources_reserve(struct vmw_sw_context *sw_context) 573 607 { 574 - struct vmw_resource_val_node *val; 575 - int ret = 0; 608 + int ret; 576 609 577 - list_for_each_entry(val, &sw_context->resource_list, head) { 578 - struct vmw_resource *res = val->res; 579 - 580 - ret = vmw_resource_reserve(res, true, val->no_buffer_needed); 581 - if (unlikely(ret != 0)) 582 - return ret; 583 - 584 - if (res->backup) { 585 - struct vmw_buffer_object *vbo = res->backup; 586 - 587 - ret = vmw_bo_to_validate_list 588 - (sw_context, vbo, 589 - vmw_resource_needs_backup(res), NULL); 590 - 591 - if (unlikely(ret != 0)) 592 - return ret; 593 - } 594 - } 610 + ret = vmw_validation_res_reserve(sw_context->ctx, true); 611 + if (ret) 612 + return ret; 595 613 596 614 if (sw_context->dx_query_mob) { 597 615 struct vmw_buffer_object *expected_dx_query_mob; ··· 590 640 591 641 return ret; 592 642 } 593 - 594 - /** 595 - * vmw_resources_validate - Validate all resources on the sw_context's 596 - * resource list. 597 - * 598 - * @sw_context: Pointer to the software context. 599 - * 600 - * Before this function is called, all resource backup buffers must have 601 - * been validated. 602 - */ 603 - static int vmw_resources_validate(struct vmw_sw_context *sw_context) 604 - { 605 - struct vmw_resource_val_node *val; 606 - int ret; 607 - 608 - list_for_each_entry(val, &sw_context->resource_list, head) { 609 - struct vmw_resource *res = val->res; 610 - struct vmw_buffer_object *backup = res->backup; 611 - 612 - ret = vmw_resource_validate(res); 613 - if (unlikely(ret != 0)) { 614 - if (ret != -ERESTARTSYS) 615 - DRM_ERROR("Failed to validate resource.\n"); 616 - return ret; 617 - } 618 - 619 - /* Check if the resource switched backup buffer */ 620 - if (backup && res->backup && (backup != res->backup)) { 621 - struct vmw_buffer_object *vbo = res->backup; 622 - 623 - ret = vmw_bo_to_validate_list 624 - (sw_context, vbo, 625 - vmw_resource_needs_backup(res), NULL); 626 - if (ret) { 627 - ttm_bo_unreserve(&vbo->base); 628 - return ret; 629 - } 630 - } 631 - } 632 - return 0; 633 - } 634 - 635 - /** 636 - * vmw_cmd_res_reloc_add - Add a resource to a software context's 637 - * relocation- and validation lists. 638 - * 639 - * @dev_priv: Pointer to a struct vmw_private identifying the device. 640 - * @sw_context: Pointer to the software context. 641 - * @id_loc: Pointer to where the id that needs translation is located. 642 - * @res: Valid pointer to a struct vmw_resource. 643 - * @p_val: If non null, a pointer to the struct vmw_resource_validate_node 644 - * used for this resource is returned here. 645 - */ 646 - static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, 647 - struct vmw_sw_context *sw_context, 648 - uint32_t *id_loc, 649 - struct vmw_resource *res, 650 - struct vmw_resource_val_node **p_val) 651 - { 652 - int ret; 653 - struct vmw_resource_val_node *node; 654 - 655 - *p_val = NULL; 656 - ret = vmw_resource_relocation_add(&sw_context->res_relocations, 657 - res, 658 - vmw_ptr_diff(sw_context->buf_start, 659 - id_loc), 660 - vmw_res_rel_normal); 661 - if (unlikely(ret != 0)) 662 - return ret; 663 - 664 - ret = vmw_resource_val_add(sw_context, res, &node); 665 - if (unlikely(ret != 0)) 666 - return ret; 667 - 668 - if (p_val) 669 - *p_val = node; 670 - 671 - return 0; 672 - } 673 - 674 643 675 644 /** 676 645 * vmw_cmd_res_check - Check that a resource is present and if so, put it ··· 610 741 enum vmw_res_type res_type, 611 742 const struct vmw_user_resource_conv *converter, 612 743 uint32_t *id_loc, 613 - struct vmw_resource_val_node **p_val) 744 + struct vmw_resource **p_res) 614 745 { 615 - struct vmw_res_cache_entry *rcache = 616 - &sw_context->res_cache[res_type]; 746 + struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type]; 617 747 struct vmw_resource *res; 618 - struct vmw_resource_val_node *node; 619 748 int ret; 620 749 750 + if (p_res) 751 + *p_res = NULL; 752 + 621 753 if (*id_loc == SVGA3D_INVALID_ID) { 622 - if (p_val) 623 - *p_val = NULL; 624 754 if (res_type == vmw_res_context) { 625 755 DRM_ERROR("Illegal context invalid id.\n"); 626 756 return -EINVAL; ··· 627 759 return 0; 628 760 } 629 761 630 - /* 631 - * Fastpath in case of repeated commands referencing the same 632 - * resource 633 - */ 762 + if (likely(rcache->valid_handle && *id_loc == rcache->handle)) { 763 + res = rcache->res; 764 + } else { 765 + unsigned int size = vmw_execbuf_res_size(dev_priv, res_type); 634 766 635 - if (likely(rcache->valid && *id_loc == rcache->handle)) { 636 - const struct vmw_resource *res = rcache->res; 767 + ret = vmw_validation_preload_res(sw_context->ctx, size); 768 + if (ret) 769 + return ret; 637 770 638 - rcache->node->first_usage = false; 639 - if (p_val) 640 - *p_val = rcache->node; 771 + res = vmw_user_resource_noref_lookup_handle 772 + (dev_priv, sw_context->fp->tfile, *id_loc, converter); 773 + if (unlikely(IS_ERR(res))) { 774 + DRM_ERROR("Could not find or use resource 0x%08x.\n", 775 + (unsigned int) *id_loc); 776 + return PTR_ERR(res); 777 + } 641 778 642 - return vmw_resource_relocation_add 643 - (&sw_context->res_relocations, res, 644 - vmw_ptr_diff(sw_context->buf_start, id_loc), 645 - vmw_res_rel_normal); 779 + ret = vmw_execbuf_res_noref_val_add(sw_context, res); 780 + if (unlikely(ret != 0)) 781 + return ret; 782 + 783 + if (rcache->valid && rcache->res == res) { 784 + rcache->valid_handle = true; 785 + rcache->handle = *id_loc; 786 + } 646 787 } 647 788 648 - ret = vmw_user_resource_lookup_handle(dev_priv, 649 - sw_context->fp->tfile, 650 - *id_loc, 651 - converter, 652 - &res); 653 - if (unlikely(ret != 0)) { 654 - DRM_ERROR("Could not find or use resource 0x%08x.\n", 655 - (unsigned) *id_loc); 656 - dump_stack(); 657 - return ret; 658 - } 789 + ret = vmw_resource_relocation_add(sw_context, res, 790 + vmw_ptr_diff(sw_context->buf_start, 791 + id_loc), 792 + vmw_res_rel_normal); 793 + if (p_res) 794 + *p_res = res; 659 795 660 - rcache->valid = true; 661 - rcache->res = res; 662 - rcache->handle = *id_loc; 663 - 664 - ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc, 665 - res, &node); 666 - if (unlikely(ret != 0)) 667 - goto out_no_reloc; 668 - 669 - rcache->node = node; 670 - if (p_val) 671 - *p_val = node; 672 - vmw_resource_unreference(&res); 673 796 return 0; 674 - 675 - out_no_reloc: 676 - BUG_ON(sw_context->error_resource != NULL); 677 - sw_context->error_resource = res; 678 - 679 - return ret; 680 797 } 681 798 682 799 /** ··· 714 861 */ 715 862 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) 716 863 { 717 - struct vmw_resource_val_node *val; 864 + struct vmw_ctx_validation_info *val; 718 865 int ret; 719 866 720 - list_for_each_entry(val, &sw_context->resource_list, head) { 721 - if (unlikely(!val->staged_bindings)) 722 - break; 723 - 724 - ret = vmw_binding_rebind_all 725 - (vmw_context_binding_state(val->res)); 867 + list_for_each_entry(val, &sw_context->ctx_list, head) { 868 + ret = vmw_binding_rebind_all(val->cur); 726 869 if (unlikely(ret != 0)) { 727 870 if (ret != -ERESTARTSYS) 728 871 DRM_ERROR("Failed to rebind context.\n"); 729 872 return ret; 730 873 } 731 874 732 - ret = vmw_rebind_all_dx_query(val->res); 875 + ret = vmw_rebind_all_dx_query(val->ctx); 733 876 if (ret != 0) 734 877 return ret; 735 878 } ··· 752 903 uint32 view_ids[], u32 num_views, 753 904 u32 first_slot) 754 905 { 755 - struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 756 - struct vmw_cmdbuf_res_manager *man; 906 + struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 757 907 u32 i; 758 - int ret; 759 908 760 909 if (!ctx_node) { 761 910 DRM_ERROR("DX Context not set.\n"); 762 911 return -EINVAL; 763 912 } 764 913 765 - man = sw_context->man; 766 914 for (i = 0; i < num_views; ++i) { 767 915 struct vmw_ctx_bindinfo_view binding; 768 916 struct vmw_resource *view = NULL; 769 917 770 918 if (view_ids[i] != SVGA3D_INVALID_ID) { 771 - view = vmw_view_lookup(man, view_type, view_ids[i]); 919 + view = vmw_view_id_val_add(sw_context, view_type, 920 + view_ids[i]); 772 921 if (IS_ERR(view)) { 773 922 DRM_ERROR("View not found.\n"); 774 923 return PTR_ERR(view); 775 924 } 776 - 777 - ret = vmw_view_res_val_add(sw_context, view); 778 - if (ret) { 779 - DRM_ERROR("Could not add view to " 780 - "validation list.\n"); 781 - vmw_resource_unreference(&view); 782 - return ret; 783 - } 784 925 } 785 - binding.bi.ctx = ctx_node->res; 926 + binding.bi.ctx = ctx_node->ctx; 786 927 binding.bi.res = view; 787 928 binding.bi.bt = binding_type; 788 929 binding.shader_slot = shader_slot; 789 930 binding.slot = first_slot + i; 790 - vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 931 + vmw_binding_add(ctx_node->staged, &binding.bi, 791 932 shader_slot, binding.slot); 792 - if (view) 793 - vmw_resource_unreference(&view); 794 933 } 795 934 796 935 return 0; ··· 808 971 user_context_converter, &cmd->cid, NULL); 809 972 } 810 973 974 + /** 975 + * vmw_execbuf_info_from_res - Get the private validation metadata for a 976 + * recently validated resource 977 + * @sw_context: Pointer to the command submission context 978 + * @res: The resource 979 + * 980 + * The resource pointed to by @res needs to be present in the command submission 981 + * context's resource cache and hence the last resource of that type to be 982 + * processed by the validation code. 983 + * 984 + * Return: a pointer to the private metadata of the resource, or NULL 985 + * if it wasn't found 986 + */ 987 + static struct vmw_ctx_validation_info * 988 + vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context, 989 + struct vmw_resource *res) 990 + { 991 + struct vmw_res_cache_entry *rcache = 992 + &sw_context->res_cache[vmw_res_type(res)]; 993 + 994 + if (rcache->valid && rcache->res == res) 995 + return rcache->private; 996 + 997 + WARN_ON_ONCE(true); 998 + return NULL; 999 + } 1000 + 1001 + 811 1002 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, 812 1003 struct vmw_sw_context *sw_context, 813 1004 SVGA3dCmdHeader *header) ··· 844 979 SVGA3dCmdHeader header; 845 980 SVGA3dCmdSetRenderTarget body; 846 981 } *cmd; 847 - struct vmw_resource_val_node *ctx_node; 848 - struct vmw_resource_val_node *res_node; 982 + struct vmw_resource *ctx; 983 + struct vmw_resource *res; 849 984 int ret; 850 985 851 986 cmd = container_of(header, struct vmw_sid_cmd, header); ··· 858 993 859 994 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 860 995 user_context_converter, &cmd->body.cid, 861 - &ctx_node); 996 + &ctx); 862 997 if (unlikely(ret != 0)) 863 998 return ret; 864 999 865 1000 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 866 - user_surface_converter, 867 - &cmd->body.target.sid, &res_node); 868 - if (unlikely(ret != 0)) 1001 + user_surface_converter, &cmd->body.target.sid, 1002 + &res); 1003 + if (unlikely(ret)) 869 1004 return ret; 870 1005 871 1006 if (dev_priv->has_mob) { 872 1007 struct vmw_ctx_bindinfo_view binding; 1008 + struct vmw_ctx_validation_info *node; 873 1009 874 - binding.bi.ctx = ctx_node->res; 875 - binding.bi.res = res_node ? res_node->res : NULL; 1010 + node = vmw_execbuf_info_from_res(sw_context, ctx); 1011 + if (!node) 1012 + return -EINVAL; 1013 + 1014 + binding.bi.ctx = ctx; 1015 + binding.bi.res = res; 876 1016 binding.bi.bt = vmw_ctx_binding_rt; 877 1017 binding.slot = cmd->body.type; 878 - vmw_binding_add(ctx_node->staged_bindings, 879 - &binding.bi, 0, binding.slot); 1018 + vmw_binding_add(node->staged, &binding.bi, 0, binding.slot); 880 1019 } 881 1020 882 1021 return 0; ··· 899 1030 cmd = container_of(header, struct vmw_sid_cmd, header); 900 1031 901 1032 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 902 - user_surface_converter, 903 - &cmd->body.src.sid, NULL); 1033 + user_surface_converter, 1034 + &cmd->body.src.sid, NULL); 904 1035 if (ret) 905 1036 return ret; 906 1037 ··· 1040 1171 1041 1172 if (unlikely(sw_context->cur_query_bo != NULL)) { 1042 1173 sw_context->needs_post_query_barrier = true; 1043 - ret = vmw_bo_to_validate_list(sw_context, 1044 - sw_context->cur_query_bo, 1045 - dev_priv->has_mob, NULL); 1174 + ret = vmw_validation_add_bo(sw_context->ctx, 1175 + sw_context->cur_query_bo, 1176 + dev_priv->has_mob, false); 1046 1177 if (unlikely(ret != 0)) 1047 1178 return ret; 1048 1179 } 1049 1180 sw_context->cur_query_bo = new_query_bo; 1050 1181 1051 - ret = vmw_bo_to_validate_list(sw_context, 1052 - dev_priv->dummy_query_bo, 1053 - dev_priv->has_mob, NULL); 1182 + ret = vmw_validation_add_bo(sw_context->ctx, 1183 + dev_priv->dummy_query_bo, 1184 + dev_priv->has_mob, false); 1054 1185 if (unlikely(ret != 0)) 1055 1186 return ret; 1056 1187 ··· 1138 1269 * @sw_context: The software context used for this command batch validation. 1139 1270 * @id: Pointer to the user-space handle to be translated. 1140 1271 * @vmw_bo_p: Points to a location that, on successful return will carry 1141 - * a reference-counted pointer to the DMA buffer identified by the 1272 + * a non-reference-counted pointer to the buffer object identified by the 1142 1273 * user-space handle in @id. 1143 1274 * 1144 1275 * This function saves information needed to translate a user-space buffer ··· 1153 1284 SVGAMobId *id, 1154 1285 struct vmw_buffer_object **vmw_bo_p) 1155 1286 { 1156 - struct vmw_buffer_object *vmw_bo = NULL; 1287 + struct vmw_buffer_object *vmw_bo; 1157 1288 uint32_t handle = *id; 1158 1289 struct vmw_relocation *reloc; 1159 1290 int ret; 1160 1291 1161 - ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL); 1162 - if (unlikely(ret != 0)) { 1292 + vmw_validation_preload_bo(sw_context->ctx); 1293 + vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); 1294 + if (IS_ERR(vmw_bo)) { 1163 1295 DRM_ERROR("Could not find or use MOB buffer.\n"); 1164 - ret = -EINVAL; 1165 - goto out_no_reloc; 1296 + return PTR_ERR(vmw_bo); 1166 1297 } 1167 1298 1168 - if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { 1169 - DRM_ERROR("Max number relocations per submission" 1170 - " exceeded\n"); 1171 - ret = -EINVAL; 1172 - goto out_no_reloc; 1173 - } 1174 - 1175 - reloc = &sw_context->relocs[sw_context->cur_reloc++]; 1176 - reloc->mob_loc = id; 1177 - reloc->location = NULL; 1178 - 1179 - ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index); 1299 + ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); 1300 + vmw_user_bo_noref_release(); 1180 1301 if (unlikely(ret != 0)) 1181 - goto out_no_reloc; 1302 + return ret; 1303 + 1304 + reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); 1305 + if (!reloc) 1306 + return -ENOMEM; 1307 + 1308 + reloc->mob_loc = id; 1309 + reloc->vbo = vmw_bo; 1182 1310 1183 1311 *vmw_bo_p = vmw_bo; 1184 - return 0; 1312 + list_add_tail(&reloc->head, &sw_context->bo_relocations); 1185 1313 1186 - out_no_reloc: 1187 - vmw_bo_unreference(&vmw_bo); 1188 - *vmw_bo_p = NULL; 1189 - return ret; 1314 + return 0; 1190 1315 } 1191 1316 1192 1317 /** ··· 1191 1328 * @sw_context: The software context used for this command batch validation. 1192 1329 * @ptr: Pointer to the user-space handle to be translated. 1193 1330 * @vmw_bo_p: Points to a location that, on successful return will carry 1194 - * a reference-counted pointer to the DMA buffer identified by the 1331 + * a non-reference-counted pointer to the DMA buffer identified by the 1195 1332 * user-space handle in @id. 1196 1333 * 1197 1334 * This function saves information needed to translate a user-space buffer ··· 1207 1344 SVGAGuestPtr *ptr, 1208 1345 struct vmw_buffer_object **vmw_bo_p) 1209 1346 { 1210 - struct vmw_buffer_object *vmw_bo = NULL; 1347 + struct vmw_buffer_object *vmw_bo; 1211 1348 uint32_t handle = ptr->gmrId; 1212 1349 struct vmw_relocation *reloc; 1213 1350 int ret; 1214 1351 1215 - ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL); 1216 - if (unlikely(ret != 0)) { 1352 + vmw_validation_preload_bo(sw_context->ctx); 1353 + vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); 1354 + if (IS_ERR(vmw_bo)) { 1217 1355 DRM_ERROR("Could not find or use GMR region.\n"); 1218 - ret = -EINVAL; 1219 - goto out_no_reloc; 1356 + return PTR_ERR(vmw_bo); 1220 1357 } 1221 1358 1222 - if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { 1223 - DRM_ERROR("Max number relocations per submission" 1224 - " exceeded\n"); 1225 - ret = -EINVAL; 1226 - goto out_no_reloc; 1227 - } 1228 - 1229 - reloc = &sw_context->relocs[sw_context->cur_reloc++]; 1230 - reloc->location = ptr; 1231 - 1232 - ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index); 1359 + ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); 1360 + vmw_user_bo_noref_release(); 1233 1361 if (unlikely(ret != 0)) 1234 - goto out_no_reloc; 1362 + return ret; 1235 1363 1364 + reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); 1365 + if (!reloc) 1366 + return -ENOMEM; 1367 + 1368 + reloc->location = ptr; 1369 + reloc->vbo = vmw_bo; 1236 1370 *vmw_bo_p = vmw_bo; 1237 - return 0; 1371 + list_add_tail(&reloc->head, &sw_context->bo_relocations); 1238 1372 1239 - out_no_reloc: 1240 - vmw_bo_unreference(&vmw_bo); 1241 - *vmw_bo_p = NULL; 1242 - return ret; 1373 + return 0; 1243 1374 } 1244 1375 1245 1376 ··· 1257 1400 } *cmd; 1258 1401 1259 1402 int ret; 1260 - struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 1403 + struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 1261 1404 struct vmw_resource *cotable_res; 1262 1405 1263 1406 ··· 1272 1415 cmd->q.type >= SVGA3D_QUERYTYPE_MAX) 1273 1416 return -EINVAL; 1274 1417 1275 - cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY); 1418 + cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY); 1276 1419 ret = vmw_cotable_notify(cotable_res, cmd->q.queryId); 1277 - vmw_resource_unreference(&cotable_res); 1278 1420 1279 1421 return ret; 1280 1422 } ··· 1318 1462 return ret; 1319 1463 1320 1464 sw_context->dx_query_mob = vmw_bo; 1321 - sw_context->dx_query_ctx = sw_context->dx_ctx_node->res; 1322 - 1323 - vmw_bo_unreference(&vmw_bo); 1324 - 1325 - return ret; 1465 + sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx; 1466 + return 0; 1326 1467 } 1327 1468 1328 1469 ··· 1420 1567 1421 1568 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); 1422 1569 1423 - vmw_bo_unreference(&vmw_bo); 1424 1570 return ret; 1425 1571 } 1426 1572 ··· 1473 1621 1474 1622 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); 1475 1623 1476 - vmw_bo_unreference(&vmw_bo); 1477 1624 return ret; 1478 1625 } 1479 1626 ··· 1505 1654 if (unlikely(ret != 0)) 1506 1655 return ret; 1507 1656 1508 - vmw_bo_unreference(&vmw_bo); 1509 1657 return 0; 1510 1658 } 1511 1659 ··· 1556 1706 if (unlikely(ret != 0)) 1557 1707 return ret; 1558 1708 1559 - vmw_bo_unreference(&vmw_bo); 1560 1709 return 0; 1561 1710 } 1562 1711 ··· 1606 1757 if (unlikely(ret != 0)) { 1607 1758 if (unlikely(ret != -ERESTARTSYS)) 1608 1759 DRM_ERROR("could not find surface for DMA.\n"); 1609 - goto out_no_surface; 1760 + return ret; 1610 1761 } 1611 1762 1612 1763 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); ··· 1614 1765 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, 1615 1766 header); 1616 1767 1617 - out_no_surface: 1618 - vmw_bo_unreference(&vmw_bo); 1619 - return ret; 1768 + return 0; 1620 1769 } 1621 1770 1622 1771 static int vmw_cmd_draw(struct vmw_private *dev_priv, ··· 1684 1837 ((unsigned long) header + header->size + sizeof(header)); 1685 1838 SVGA3dTextureState *cur_state = (SVGA3dTextureState *) 1686 1839 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); 1687 - struct vmw_resource_val_node *ctx_node; 1688 - struct vmw_resource_val_node *res_node; 1840 + struct vmw_resource *ctx; 1841 + struct vmw_resource *res; 1689 1842 int ret; 1690 1843 1691 1844 cmd = container_of(header, struct vmw_tex_state_cmd, ··· 1693 1846 1694 1847 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1695 1848 user_context_converter, &cmd->state.cid, 1696 - &ctx_node); 1849 + &ctx); 1697 1850 if (unlikely(ret != 0)) 1698 1851 return ret; 1699 1852 ··· 1709 1862 1710 1863 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1711 1864 user_surface_converter, 1712 - &cur_state->value, &res_node); 1865 + &cur_state->value, &res); 1713 1866 if (unlikely(ret != 0)) 1714 1867 return ret; 1715 1868 1716 1869 if (dev_priv->has_mob) { 1717 1870 struct vmw_ctx_bindinfo_tex binding; 1871 + struct vmw_ctx_validation_info *node; 1718 1872 1719 - binding.bi.ctx = ctx_node->res; 1720 - binding.bi.res = res_node ? res_node->res : NULL; 1873 + node = vmw_execbuf_info_from_res(sw_context, ctx); 1874 + if (!node) 1875 + return -EINVAL; 1876 + 1877 + binding.bi.ctx = ctx; 1878 + binding.bi.res = res; 1721 1879 binding.bi.bt = vmw_ctx_binding_tex; 1722 1880 binding.texture_stage = cur_state->stage; 1723 - vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 1724 - 0, binding.texture_stage); 1881 + vmw_binding_add(node->staged, &binding.bi, 0, 1882 + binding.texture_stage); 1725 1883 } 1726 1884 } 1727 1885 ··· 1745 1893 SVGAFifoCmdDefineGMRFB body; 1746 1894 } *cmd = buf; 1747 1895 1748 - ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1749 - &cmd->body.ptr, 1750 - &vmw_bo); 1751 - if (unlikely(ret != 0)) 1752 - return ret; 1753 - 1754 - vmw_bo_unreference(&vmw_bo); 1755 - 1896 + return vmw_translate_guest_ptr(dev_priv, sw_context, 1897 + &cmd->body.ptr, 1898 + &vmw_bo); 1756 1899 return ret; 1757 1900 } 1758 1901 ··· 1769 1922 */ 1770 1923 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, 1771 1924 struct vmw_sw_context *sw_context, 1772 - struct vmw_resource_val_node *val_node, 1925 + struct vmw_resource *res, 1773 1926 uint32_t *buf_id, 1774 1927 unsigned long backup_offset) 1775 1928 { 1776 - struct vmw_buffer_object *dma_buf; 1929 + struct vmw_buffer_object *vbo; 1930 + void *info; 1777 1931 int ret; 1778 1932 1779 - ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); 1933 + info = vmw_execbuf_info_from_res(sw_context, res); 1934 + if (!info) 1935 + return -EINVAL; 1936 + 1937 + ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo); 1780 1938 if (ret) 1781 1939 return ret; 1782 1940 1783 - val_node->switching_backup = true; 1784 - if (val_node->first_usage) 1785 - val_node->no_buffer_needed = true; 1786 - 1787 - vmw_bo_unreference(&val_node->new_backup); 1788 - val_node->new_backup = dma_buf; 1789 - val_node->new_backup_offset = backup_offset; 1790 - 1941 + vmw_validation_res_switch_backup(sw_context->ctx, info, vbo, 1942 + backup_offset); 1791 1943 return 0; 1792 1944 } 1793 1945 ··· 1816 1970 uint32_t *buf_id, 1817 1971 unsigned long backup_offset) 1818 1972 { 1819 - struct vmw_resource_val_node *val_node; 1973 + struct vmw_resource *res; 1820 1974 int ret; 1821 1975 1822 1976 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, 1823 - converter, res_id, &val_node); 1977 + converter, res_id, &res); 1824 1978 if (ret) 1825 1979 return ret; 1826 1980 1827 - return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node, 1981 + return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, 1828 1982 buf_id, backup_offset); 1829 1983 } 1830 1984 ··· 2016 2170 } *cmd; 2017 2171 int ret; 2018 2172 size_t size; 2019 - struct vmw_resource_val_node *val; 2173 + struct vmw_resource *ctx; 2020 2174 2021 2175 cmd = container_of(header, struct vmw_shader_define_cmd, 2022 2176 header); 2023 2177 2024 2178 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2025 2179 user_context_converter, &cmd->body.cid, 2026 - &val); 2180 + &ctx); 2027 2181 if (unlikely(ret != 0)) 2028 2182 return ret; 2029 2183 ··· 2032 2186 2033 2187 size = cmd->header.size - sizeof(cmd->body); 2034 2188 ret = vmw_compat_shader_add(dev_priv, 2035 - vmw_context_res_man(val->res), 2189 + vmw_context_res_man(ctx), 2036 2190 cmd->body.shid, cmd + 1, 2037 2191 cmd->body.type, size, 2038 2192 &sw_context->staged_cmd_res); 2039 2193 if (unlikely(ret != 0)) 2040 2194 return ret; 2041 2195 2042 - return vmw_resource_relocation_add(&sw_context->res_relocations, 2196 + return vmw_resource_relocation_add(sw_context, 2043 2197 NULL, 2044 2198 vmw_ptr_diff(sw_context->buf_start, 2045 2199 &cmd->header.id), ··· 2063 2217 SVGA3dCmdDestroyShader body; 2064 2218 } *cmd; 2065 2219 int ret; 2066 - struct vmw_resource_val_node *val; 2220 + struct vmw_resource *ctx; 2067 2221 2068 2222 cmd = container_of(header, struct vmw_shader_destroy_cmd, 2069 2223 header); 2070 2224 2071 2225 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2072 2226 user_context_converter, &cmd->body.cid, 2073 - &val); 2227 + &ctx); 2074 2228 if (unlikely(ret != 0)) 2075 2229 return ret; 2076 2230 2077 2231 if (unlikely(!dev_priv->has_mob)) 2078 2232 return 0; 2079 2233 2080 - ret = vmw_shader_remove(vmw_context_res_man(val->res), 2234 + ret = vmw_shader_remove(vmw_context_res_man(ctx), 2081 2235 cmd->body.shid, 2082 2236 cmd->body.type, 2083 2237 &sw_context->staged_cmd_res); 2084 2238 if (unlikely(ret != 0)) 2085 2239 return ret; 2086 2240 2087 - return vmw_resource_relocation_add(&sw_context->res_relocations, 2241 + return vmw_resource_relocation_add(sw_context, 2088 2242 NULL, 2089 2243 vmw_ptr_diff(sw_context->buf_start, 2090 2244 &cmd->header.id), ··· 2107 2261 SVGA3dCmdHeader header; 2108 2262 SVGA3dCmdSetShader body; 2109 2263 } *cmd; 2110 - struct vmw_resource_val_node *ctx_node, *res_node = NULL; 2111 2264 struct vmw_ctx_bindinfo_shader binding; 2112 - struct vmw_resource *res = NULL; 2265 + struct vmw_resource *ctx, *res = NULL; 2266 + struct vmw_ctx_validation_info *ctx_info; 2113 2267 int ret; 2114 2268 2115 2269 cmd = container_of(header, struct vmw_set_shader_cmd, ··· 2123 2277 2124 2278 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2125 2279 user_context_converter, &cmd->body.cid, 2126 - &ctx_node); 2280 + &ctx); 2127 2281 if (unlikely(ret != 0)) 2128 2282 return ret; 2129 2283 ··· 2131 2285 return 0; 2132 2286 2133 2287 if (cmd->body.shid != SVGA3D_INVALID_ID) { 2134 - res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), 2288 + res = vmw_shader_lookup(vmw_context_res_man(ctx), 2135 2289 cmd->body.shid, 2136 2290 cmd->body.type); 2137 2291 2138 2292 if (!IS_ERR(res)) { 2139 - ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, 2140 - &cmd->body.shid, res, 2141 - &res_node); 2142 - vmw_resource_unreference(&res); 2293 + ret = vmw_execbuf_res_noctx_val_add(sw_context, res); 2143 2294 if (unlikely(ret != 0)) 2144 2295 return ret; 2145 2296 } 2146 2297 } 2147 2298 2148 - if (!res_node) { 2299 + if (IS_ERR_OR_NULL(res)) { 2149 2300 ret = vmw_cmd_res_check(dev_priv, sw_context, 2150 2301 vmw_res_shader, 2151 2302 user_shader_converter, 2152 - &cmd->body.shid, &res_node); 2303 + &cmd->body.shid, &res); 2153 2304 if (unlikely(ret != 0)) 2154 2305 return ret; 2155 2306 } 2156 2307 2157 - binding.bi.ctx = ctx_node->res; 2158 - binding.bi.res = res_node ? res_node->res : NULL; 2308 + ctx_info = vmw_execbuf_info_from_res(sw_context, ctx); 2309 + if (!ctx_info) 2310 + return -EINVAL; 2311 + 2312 + binding.bi.ctx = ctx; 2313 + binding.bi.res = res; 2159 2314 binding.bi.bt = vmw_ctx_binding_shader; 2160 2315 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2161 - vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 2316 + vmw_binding_add(ctx_info->staged, &binding.bi, 2162 2317 binding.shader_slot, 0); 2163 2318 return 0; 2164 2319 } ··· 2240 2393 SVGA3dCmdHeader header; 2241 2394 SVGA3dCmdDXSetSingleConstantBuffer body; 2242 2395 } *cmd; 2243 - struct vmw_resource_val_node *res_node = NULL; 2244 - struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2396 + struct vmw_resource *res = NULL; 2397 + struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 2245 2398 struct vmw_ctx_bindinfo_cb binding; 2246 2399 int ret; 2247 2400 ··· 2253 2406 cmd = container_of(header, typeof(*cmd), header); 2254 2407 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2255 2408 user_surface_converter, 2256 - &cmd->body.sid, &res_node); 2409 + &cmd->body.sid, &res); 2257 2410 if (unlikely(ret != 0)) 2258 2411 return ret; 2259 2412 2260 - binding.bi.ctx = ctx_node->res; 2261 - binding.bi.res = res_node ? res_node->res : NULL; 2413 + binding.bi.ctx = ctx_node->ctx; 2414 + binding.bi.res = res; 2262 2415 binding.bi.bt = vmw_ctx_binding_cb; 2263 2416 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2264 2417 binding.offset = cmd->body.offsetInBytes; ··· 2273 2426 return -EINVAL; 2274 2427 } 2275 2428 2276 - vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 2429 + vmw_binding_add(ctx_node->staged, &binding.bi, 2277 2430 binding.shader_slot, binding.slot); 2278 2431 2279 2432 return 0; ··· 2329 2482 SVGA3dCmdDXSetShader body; 2330 2483 } *cmd; 2331 2484 struct vmw_resource *res = NULL; 2332 - struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2485 + struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 2333 2486 struct vmw_ctx_bindinfo_shader binding; 2334 2487 int ret = 0; 2335 2488 ··· 2353 2506 return PTR_ERR(res); 2354 2507 } 2355 2508 2356 - ret = vmw_resource_val_add(sw_context, res, NULL); 2509 + ret = vmw_execbuf_res_noctx_val_add(sw_context, res); 2357 2510 if (ret) 2358 - goto out_unref; 2511 + return ret; 2359 2512 } 2360 2513 2361 - binding.bi.ctx = ctx_node->res; 2514 + binding.bi.ctx = ctx_node->ctx; 2362 2515 binding.bi.res = res; 2363 2516 binding.bi.bt = vmw_ctx_binding_dx_shader; 2364 2517 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2365 2518 2366 - vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 2519 + vmw_binding_add(ctx_node->staged, &binding.bi, 2367 2520 binding.shader_slot, 0); 2368 - out_unref: 2369 - if (res) 2370 - vmw_resource_unreference(&res); 2371 2521 2372 - return ret; 2522 + return 0; 2373 2523 } 2374 2524 2375 2525 /** ··· 2381 2537 struct vmw_sw_context *sw_context, 2382 2538 SVGA3dCmdHeader *header) 2383 2539 { 2384 - struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2540 + struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 2385 2541 struct vmw_ctx_bindinfo_vb binding; 2386 - struct vmw_resource_val_node *res_node; 2542 + struct vmw_resource *res; 2387 2543 struct { 2388 2544 SVGA3dCmdHeader header; 2389 2545 SVGA3dCmdDXSetVertexBuffers body; ··· 2408 2564 for (i = 0; i < num; i++) { 2409 2565 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2410 2566 user_surface_converter, 2411 - &cmd->buf[i].sid, &res_node); 2567 + &cmd->buf[i].sid, &res); 2412 2568 if (unlikely(ret != 0)) 2413 2569 return ret; 2414 2570 2415 - binding.bi.ctx = ctx_node->res; 2571 + binding.bi.ctx = ctx_node->ctx; 2416 2572 binding.bi.bt = vmw_ctx_binding_vb; 2417 - binding.bi.res = ((res_node) ? res_node->res : NULL); 2573 + binding.bi.res = res; 2418 2574 binding.offset = cmd->buf[i].offset; 2419 2575 binding.stride = cmd->buf[i].stride; 2420 2576 binding.slot = i + cmd->body.startBuffer; 2421 2577 2422 - vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 2578 + vmw_binding_add(ctx_node->staged, &binding.bi, 2423 2579 0, binding.slot); 2424 2580 } 2425 2581 ··· 2438 2594 struct vmw_sw_context *sw_context, 2439 2595 SVGA3dCmdHeader *header) 2440 2596 { 2441 - struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2597 + struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 2442 2598 struct vmw_ctx_bindinfo_ib binding; 2443 - struct vmw_resource_val_node *res_node; 2599 + struct vmw_resource *res; 2444 2600 struct { 2445 2601 SVGA3dCmdHeader header; 2446 2602 SVGA3dCmdDXSetIndexBuffer body; ··· 2455 2611 cmd = container_of(header, typeof(*cmd), header); 2456 2612 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2457 2613 user_surface_converter, 2458 - &cmd->body.sid, &res_node); 2614 + &cmd->body.sid, &res); 2459 2615 if (unlikely(ret != 0)) 2460 2616 return ret; 2461 2617 2462 - binding.bi.ctx = ctx_node->res; 2463 - binding.bi.res = ((res_node) ? res_node->res : NULL); 2618 + binding.bi.ctx = ctx_node->ctx; 2619 + binding.bi.res = res; 2464 2620 binding.bi.bt = vmw_ctx_binding_ib; 2465 2621 binding.offset = cmd->body.offset; 2466 2622 binding.format = cmd->body.format; 2467 2623 2468 - vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0); 2624 + vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0); 2469 2625 2470 2626 return 0; 2471 2627 } ··· 2523 2679 SVGA3dCmdDXClearRenderTargetView body; 2524 2680 } *cmd = container_of(header, typeof(*cmd), header); 2525 2681 2526 - return vmw_view_id_val_add(sw_context, vmw_view_rt, 2527 - cmd->body.renderTargetViewId); 2682 + return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt, 2683 + cmd->body.renderTargetViewId)); 2528 2684 } 2529 2685 2530 2686 /** ··· 2544 2700 SVGA3dCmdDXClearDepthStencilView body; 2545 2701 } *cmd = container_of(header, typeof(*cmd), header); 2546 2702 2547 - return vmw_view_id_val_add(sw_context, vmw_view_ds, 2548 - cmd->body.depthStencilViewId); 2703 + return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds, 2704 + cmd->body.depthStencilViewId)); 2549 2705 } 2550 2706 2551 2707 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, 2552 2708 struct vmw_sw_context *sw_context, 2553 2709 SVGA3dCmdHeader *header) 2554 2710 { 2555 - struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2556 - struct vmw_resource_val_node *srf_node; 2711 + struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 2712 + struct vmw_resource *srf; 2557 2713 struct vmw_resource *res; 2558 2714 enum vmw_view_type view_type; 2559 2715 int ret; ··· 2578 2734 cmd = container_of(header, typeof(*cmd), header); 2579 2735 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2580 2736 user_surface_converter, 2581 - &cmd->sid, &srf_node); 2737 + &cmd->sid, &srf); 2582 2738 if (unlikely(ret != 0)) 2583 2739 return ret; 2584 2740 2585 - res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]); 2741 + res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]); 2586 2742 ret = vmw_cotable_notify(res, cmd->defined_id); 2587 - vmw_resource_unreference(&res); 2588 2743 if (unlikely(ret != 0)) 2589 2744 return ret; 2590 2745 2591 2746 return vmw_view_add(sw_context->man, 2592 - ctx_node->res, 2593 - srf_node->res, 2747 + ctx_node->ctx, 2748 + srf, 2594 2749 view_type, 2595 2750 cmd->defined_id, 2596 2751 header, ··· 2609 2766 struct vmw_sw_context *sw_context, 2610 2767 SVGA3dCmdHeader *header) 2611 2768 { 2612 - struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2769 + struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 2613 2770 struct vmw_ctx_bindinfo_so binding; 2614 - struct vmw_resource_val_node *res_node; 2771 + struct vmw_resource *res; 2615 2772 struct { 2616 2773 SVGA3dCmdHeader header; 2617 2774 SVGA3dCmdDXSetSOTargets body; ··· 2636 2793 for (i = 0; i < num; i++) { 2637 2794 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2638 2795 user_surface_converter, 2639 - &cmd->targets[i].sid, &res_node); 2796 + &cmd->targets[i].sid, &res); 2640 2797 if (unlikely(ret != 0)) 2641 2798 return ret; 2642 2799 2643 - binding.bi.ctx = ctx_node->res; 2644 - binding.bi.res = ((res_node) ? res_node->res : NULL); 2800 + binding.bi.ctx = ctx_node->ctx; 2801 + binding.bi.res = res; 2645 2802 binding.bi.bt = vmw_ctx_binding_so, 2646 2803 binding.offset = cmd->targets[i].offset; 2647 2804 binding.size = cmd->targets[i].sizeInBytes; 2648 2805 binding.slot = i; 2649 2806 2650 - vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 2807 + vmw_binding_add(ctx_node->staged, &binding.bi, 2651 2808 0, binding.slot); 2652 2809 } 2653 2810 ··· 2658 2815 struct vmw_sw_context *sw_context, 2659 2816 SVGA3dCmdHeader *header) 2660 2817 { 2661 - struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2818 + struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 2662 2819 struct vmw_resource *res; 2663 2820 /* 2664 2821 * This is based on the fact that all affected define commands have ··· 2677 2834 } 2678 2835 2679 2836 so_type = vmw_so_cmd_to_type(header->id); 2680 - res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]); 2837 + res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]); 2681 2838 cmd = container_of(header, typeof(*cmd), header); 2682 2839 ret = vmw_cotable_notify(res, cmd->defined_id); 2683 - vmw_resource_unreference(&res); 2684 2840 2685 2841 return ret; 2686 2842 } ··· 2724 2882 struct vmw_sw_context *sw_context, 2725 2883 SVGA3dCmdHeader *header) 2726 2884 { 2727 - struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2885 + struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 2728 2886 2729 2887 if (unlikely(ctx_node == NULL)) { 2730 2888 DRM_ERROR("DX Context not set.\n"); ··· 2749 2907 struct vmw_sw_context *sw_context, 2750 2908 SVGA3dCmdHeader *header) 2751 2909 { 2752 - struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2910 + struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 2753 2911 struct { 2754 2912 SVGA3dCmdHeader header; 2755 2913 union vmw_view_destroy body; ··· 2776 2934 * relocation to conditionally make this command a NOP to avoid 2777 2935 * device errors. 2778 2936 */ 2779 - return vmw_resource_relocation_add(&sw_context->res_relocations, 2937 + return vmw_resource_relocation_add(sw_context, 2780 2938 view, 2781 2939 vmw_ptr_diff(sw_context->buf_start, 2782 2940 &cmd->header.id), ··· 2795 2953 struct vmw_sw_context *sw_context, 2796 2954 SVGA3dCmdHeader *header) 2797 2955 { 2798 - struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2956 + struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 2799 2957 struct vmw_resource *res; 2800 2958 struct { 2801 2959 SVGA3dCmdHeader header; ··· 2808 2966 return -EINVAL; 2809 2967 } 2810 2968 2811 - res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER); 2969 + res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER); 2812 2970 ret = vmw_cotable_notify(res, cmd->body.shaderId); 2813 - vmw_resource_unreference(&res); 2814 2971 if (ret) 2815 2972 return ret; 2816 2973 2817 - return vmw_dx_shader_add(sw_context->man, ctx_node->res, 2974 + return vmw_dx_shader_add(sw_context->man, ctx_node->ctx, 2818 2975 cmd->body.shaderId, cmd->body.type, 2819 2976 &sw_context->staged_cmd_res); 2820 2977 } ··· 2830 2989 struct vmw_sw_context *sw_context, 2831 2990 SVGA3dCmdHeader *header) 2832 2991 { 2833 - struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2992 + struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 2834 2993 struct { 2835 2994 SVGA3dCmdHeader header; 2836 2995 SVGA3dCmdDXDestroyShader body; ··· 2862 3021 struct vmw_sw_context *sw_context, 2863 3022 SVGA3dCmdHeader *header) 2864 3023 { 2865 - struct vmw_resource_val_node *ctx_node; 2866 - struct vmw_resource_val_node *res_node; 3024 + struct vmw_resource *ctx; 2867 3025 struct vmw_resource *res; 2868 3026 struct { 2869 3027 SVGA3dCmdHeader header; ··· 2873 3033 if (cmd->body.cid != SVGA3D_INVALID_ID) { 2874 3034 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2875 3035 user_context_converter, 2876 - &cmd->body.cid, &ctx_node); 3036 + &cmd->body.cid, &ctx); 2877 3037 if (ret) 2878 3038 return ret; 2879 3039 } else { 2880 - ctx_node = sw_context->dx_ctx_node; 2881 - if (!ctx_node) { 3040 + if (!sw_context->dx_ctx_node) { 2882 3041 DRM_ERROR("DX Context not set.\n"); 2883 3042 return -EINVAL; 2884 3043 } 3044 + ctx = sw_context->dx_ctx_node->ctx; 2885 3045 } 2886 3046 2887 - res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), 3047 + res = vmw_shader_lookup(vmw_context_res_man(ctx), 2888 3048 cmd->body.shid, 0); 2889 3049 if (IS_ERR(res)) { 2890 3050 DRM_ERROR("Could not find shader to bind.\n"); 2891 3051 return PTR_ERR(res); 2892 3052 } 2893 3053 2894 - ret = vmw_resource_val_add(sw_context, res, &res_node); 3054 + ret = vmw_execbuf_res_noctx_val_add(sw_context, res); 2895 3055 if (ret) { 2896 3056 DRM_ERROR("Error creating resource validation node.\n"); 2897 - goto out_unref; 3057 + return ret; 2898 3058 } 2899 3059 2900 - 2901 - ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node, 2902 - &cmd->body.mobid, 2903 - cmd->body.offsetInBytes); 2904 - out_unref: 2905 - vmw_resource_unreference(&res); 2906 - 2907 - return ret; 3060 + return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, 3061 + &cmd->body.mobid, 3062 + cmd->body.offsetInBytes); 2908 3063 } 2909 3064 2910 3065 /** ··· 2918 3083 SVGA3dCmdDXGenMips body; 2919 3084 } *cmd = container_of(header, typeof(*cmd), header); 2920 3085 2921 - return vmw_view_id_val_add(sw_context, vmw_view_sr, 2922 - cmd->body.shaderResourceViewId); 3086 + return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr, 3087 + cmd->body.shaderResourceViewId)); 2923 3088 } 2924 3089 2925 3090 /** ··· 3473 3638 3474 3639 static void vmw_free_relocations(struct vmw_sw_context *sw_context) 3475 3640 { 3476 - sw_context->cur_reloc = 0; 3641 + /* Memory is validation context memory, so no need to free it */ 3642 + 3643 + INIT_LIST_HEAD(&sw_context->bo_relocations); 3477 3644 } 3478 3645 3479 3646 static void vmw_apply_relocations(struct vmw_sw_context *sw_context) 3480 3647 { 3481 - uint32_t i; 3482 3648 struct vmw_relocation *reloc; 3483 - struct ttm_validate_buffer *validate; 3484 3649 struct ttm_buffer_object *bo; 3485 3650 3486 - for (i = 0; i < sw_context->cur_reloc; ++i) { 3487 - reloc = &sw_context->relocs[i]; 3488 - validate = &sw_context->val_bufs[reloc->index].base; 3489 - bo = validate->bo; 3651 + list_for_each_entry(reloc, &sw_context->bo_relocations, head) { 3652 + bo = &reloc->vbo->base; 3490 3653 switch (bo->mem.mem_type) { 3491 3654 case TTM_PL_VRAM: 3492 3655 reloc->location->offset += bo->offset; ··· 3501 3668 } 3502 3669 } 3503 3670 vmw_free_relocations(sw_context); 3504 - } 3505 - 3506 - /** 3507 - * vmw_resource_list_unrefererence - Free up a resource list and unreference 3508 - * all resources referenced by it. 3509 - * 3510 - * @list: The resource list. 3511 - */ 3512 - static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context, 3513 - struct list_head *list) 3514 - { 3515 - struct vmw_resource_val_node *val, *val_next; 3516 - 3517 - /* 3518 - * Drop references to resources held during command submission. 3519 - */ 3520 - 3521 - list_for_each_entry_safe(val, val_next, list, head) { 3522 - list_del_init(&val->head); 3523 - vmw_resource_unreference(&val->res); 3524 - 3525 - if (val->staged_bindings) { 3526 - if (val->staged_bindings != sw_context->staged_bindings) 3527 - vmw_binding_state_free(val->staged_bindings); 3528 - else 3529 - sw_context->staged_bindings_inuse = false; 3530 - val->staged_bindings = NULL; 3531 - } 3532 - 3533 - kfree(val); 3534 - } 3535 - } 3536 - 3537 - static void vmw_clear_validations(struct vmw_sw_context *sw_context) 3538 - { 3539 - struct vmw_validate_buffer *entry, *next; 3540 - struct vmw_resource_val_node *val; 3541 - 3542 - /* 3543 - * Drop references to DMA buffers held during command submission. 3544 - */ 3545 - list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, 3546 - base.head) { 3547 - list_del(&entry->base.head); 3548 - ttm_bo_unref(&entry->base.bo); 3549 - (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash); 3550 - sw_context->cur_val_buf--; 3551 - } 3552 - BUG_ON(sw_context->cur_val_buf != 0); 3553 - 3554 - list_for_each_entry(val, &sw_context->resource_list, head) 3555 - (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); 3556 - } 3557 - 3558 - int vmw_validate_single_buffer(struct vmw_private *dev_priv, 3559 - struct ttm_buffer_object *bo, 3560 - bool interruptible, 3561 - bool validate_as_mob) 3562 - { 3563 - struct vmw_buffer_object *vbo = 3564 - container_of(bo, struct vmw_buffer_object, base); 3565 - struct ttm_operation_ctx ctx = { interruptible, false }; 3566 - int ret; 3567 - 3568 - if (vbo->pin_count > 0) 3569 - return 0; 3570 - 3571 - if (validate_as_mob) 3572 - return ttm_bo_validate(bo, &vmw_mob_placement, &ctx); 3573 - 3574 - /** 3575 - * Put BO in VRAM if there is space, otherwise as a GMR. 3576 - * If there is no space in VRAM and GMR ids are all used up, 3577 - * start evicting GMRs to make room. If the DMA buffer can't be 3578 - * used as a GMR, this will return -ENOMEM. 3579 - */ 3580 - 3581 - ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx); 3582 - if (likely(ret == 0 || ret == -ERESTARTSYS)) 3583 - return ret; 3584 - 3585 - /** 3586 - * If that failed, try VRAM again, this time evicting 3587 - * previous contents. 3588 - */ 3589 - 3590 - ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx); 3591 - return ret; 3592 - } 3593 - 3594 - static int vmw_validate_buffers(struct vmw_private *dev_priv, 3595 - struct vmw_sw_context *sw_context) 3596 - { 3597 - struct vmw_validate_buffer *entry; 3598 - int ret; 3599 - 3600 - list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { 3601 - ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, 3602 - true, 3603 - entry->validate_as_mob); 3604 - if (unlikely(ret != 0)) 3605 - return ret; 3606 - } 3607 - return 0; 3608 3671 } 3609 3672 3610 3673 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, ··· 3675 3946 3676 3947 if (sw_context->dx_ctx_node) 3677 3948 cmd = vmw_fifo_reserve_dx(dev_priv, command_size, 3678 - sw_context->dx_ctx_node->res->id); 3949 + sw_context->dx_ctx_node->ctx->id); 3679 3950 else 3680 3951 cmd = vmw_fifo_reserve(dev_priv, command_size); 3681 3952 if (!cmd) { ··· 3709 3980 u32 command_size, 3710 3981 struct vmw_sw_context *sw_context) 3711 3982 { 3712 - u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id : 3983 + u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id : 3713 3984 SVGA3D_INVALID_ID); 3714 3985 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, 3715 3986 id, false, header); ··· 3786 4057 struct vmw_sw_context *sw_context, 3787 4058 uint32_t handle) 3788 4059 { 3789 - struct vmw_resource_val_node *ctx_node; 3790 4060 struct vmw_resource *res; 3791 4061 int ret; 4062 + unsigned int size; 3792 4063 3793 4064 if (handle == SVGA3D_INVALID_ID) 3794 4065 return 0; 3795 4066 3796 - ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile, 3797 - handle, user_context_converter, 3798 - &res); 3799 - if (unlikely(ret != 0)) { 4067 + size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context); 4068 + ret = vmw_validation_preload_res(sw_context->ctx, size); 4069 + if (ret) 4070 + return ret; 4071 + 4072 + res = vmw_user_resource_noref_lookup_handle 4073 + (dev_priv, sw_context->fp->tfile, handle, 4074 + user_context_converter); 4075 + if (unlikely(IS_ERR(res))) { 3800 4076 DRM_ERROR("Could not find or user DX context 0x%08x.\n", 3801 4077 (unsigned) handle); 3802 - return ret; 4078 + return PTR_ERR(res); 3803 4079 } 3804 4080 3805 - ret = vmw_resource_val_add(sw_context, res, &ctx_node); 4081 + ret = vmw_execbuf_res_noref_val_add(sw_context, res); 3806 4082 if (unlikely(ret != 0)) 3807 - goto out_err; 4083 + return ret; 3808 4084 3809 - sw_context->dx_ctx_node = ctx_node; 4085 + sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res); 3810 4086 sw_context->man = vmw_context_res_man(res); 3811 - out_err: 3812 - vmw_resource_unreference(&res); 3813 - return ret; 4087 + 4088 + return 0; 3814 4089 } 3815 4090 3816 4091 int vmw_execbuf_process(struct drm_file *file_priv, ··· 3830 4097 { 3831 4098 struct vmw_sw_context *sw_context = &dev_priv->ctx; 3832 4099 struct vmw_fence_obj *fence = NULL; 3833 - struct vmw_resource *error_resource; 3834 - struct list_head resource_list; 3835 4100 struct vmw_cmdbuf_header *header; 3836 - struct ww_acquire_ctx ticket; 3837 4101 uint32_t handle; 3838 4102 int ret; 3839 4103 int32_t out_fence_fd = -1; 3840 4104 struct sync_file *sync_file = NULL; 3841 - 4105 + DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1); 3842 4106 3843 4107 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { 3844 4108 out_fence_fd = get_unused_fd_flags(O_CLOEXEC); ··· 3887 4157 sw_context->kernel = true; 3888 4158 3889 4159 sw_context->fp = vmw_fpriv(file_priv); 3890 - sw_context->cur_reloc = 0; 3891 - sw_context->cur_val_buf = 0; 3892 - INIT_LIST_HEAD(&sw_context->resource_list); 3893 - INIT_LIST_HEAD(&sw_context->ctx_resource_list); 4160 + INIT_LIST_HEAD(&sw_context->ctx_list); 3894 4161 sw_context->cur_query_bo = dev_priv->pinned_bo; 3895 4162 sw_context->last_query_ctx = NULL; 3896 4163 sw_context->needs_post_query_barrier = false; ··· 3895 4168 sw_context->dx_query_mob = NULL; 3896 4169 sw_context->dx_query_ctx = NULL; 3897 4170 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); 3898 - INIT_LIST_HEAD(&sw_context->validate_nodes); 3899 4171 INIT_LIST_HEAD(&sw_context->res_relocations); 4172 + INIT_LIST_HEAD(&sw_context->bo_relocations); 3900 4173 if (sw_context->staged_bindings) 3901 4174 vmw_binding_state_reset(sw_context->staged_bindings); 3902 4175 ··· 3907 4180 sw_context->res_ht_initialized = true; 3908 4181 } 3909 4182 INIT_LIST_HEAD(&sw_context->staged_cmd_res); 3910 - INIT_LIST_HEAD(&resource_list); 4183 + sw_context->ctx = &val_ctx; 3911 4184 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); 3912 - if (unlikely(ret != 0)) { 3913 - list_splice_init(&sw_context->ctx_resource_list, 3914 - &sw_context->resource_list); 4185 + if (unlikely(ret != 0)) 3915 4186 goto out_err_nores; 3916 - } 3917 4187 3918 4188 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 3919 4189 command_size); 3920 - /* 3921 - * Merge the resource lists before checking the return status 3922 - * from vmd_cmd_check_all so that all the open hashtabs will 3923 - * be handled properly even if vmw_cmd_check_all fails. 3924 - */ 3925 - list_splice_init(&sw_context->ctx_resource_list, 3926 - &sw_context->resource_list); 3927 - 3928 4190 if (unlikely(ret != 0)) 3929 4191 goto out_err_nores; 3930 4192 ··· 3921 4205 if (unlikely(ret != 0)) 3922 4206 goto out_err_nores; 3923 4207 3924 - ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, 3925 - true, NULL); 4208 + ret = vmw_validation_bo_reserve(&val_ctx, true); 3926 4209 if (unlikely(ret != 0)) 3927 4210 goto out_err_nores; 3928 4211 3929 - ret = vmw_validate_buffers(dev_priv, sw_context); 4212 + ret = vmw_validation_bo_validate(&val_ctx, true); 3930 4213 if (unlikely(ret != 0)) 3931 4214 goto out_err; 3932 4215 3933 - ret = vmw_resources_validate(sw_context); 4216 + ret = vmw_validation_res_validate(&val_ctx, true); 3934 4217 if (unlikely(ret != 0)) 3935 4218 goto out_err; 4219 + vmw_validation_drop_ht(&val_ctx); 3936 4220 3937 4221 ret = mutex_lock_interruptible(&dev_priv->binding_mutex); 3938 4222 if (unlikely(ret != 0)) { ··· 3971 4255 if (ret != 0) 3972 4256 DRM_ERROR("Fence submission error. Syncing.\n"); 3973 4257 3974 - vmw_resources_unreserve(sw_context, false); 4258 + vmw_execbuf_bindings_commit(sw_context, false); 4259 + vmw_bind_dx_query_mob(sw_context); 4260 + vmw_validation_res_unreserve(&val_ctx, false); 3975 4261 3976 - ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, 3977 - (void *) fence); 4262 + vmw_validation_bo_fence(sw_context->ctx, fence); 3978 4263 3979 4264 if (unlikely(dev_priv->pinned_bo != NULL && 3980 4265 !dev_priv->query_cid_valid)) 3981 4266 __vmw_execbuf_release_pinned_bo(dev_priv, fence); 3982 - 3983 - vmw_clear_validations(sw_context); 3984 4267 3985 4268 /* 3986 4269 * If anything fails here, give up trying to export the fence ··· 4015 4300 vmw_fence_obj_unreference(&fence); 4016 4301 } 4017 4302 4018 - list_splice_init(&sw_context->resource_list, &resource_list); 4019 4303 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); 4020 4304 mutex_unlock(&dev_priv->cmdbuf_mutex); 4021 4305 ··· 4022 4308 * Unreference resources outside of the cmdbuf_mutex to 4023 4309 * avoid deadlocks in resource destruction paths. 4024 4310 */ 4025 - vmw_resource_list_unreference(sw_context, &resource_list); 4311 + vmw_validation_unref_lists(&val_ctx); 4026 4312 4027 4313 return 0; 4028 4314 4029 4315 out_unlock_binding: 4030 4316 mutex_unlock(&dev_priv->binding_mutex); 4031 4317 out_err: 4032 - ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); 4318 + vmw_validation_bo_backoff(&val_ctx); 4033 4319 out_err_nores: 4034 - vmw_resources_unreserve(sw_context, true); 4320 + vmw_execbuf_bindings_commit(sw_context, true); 4321 + vmw_validation_res_unreserve(&val_ctx, true); 4035 4322 vmw_resource_relocations_free(&sw_context->res_relocations); 4036 4323 vmw_free_relocations(sw_context); 4037 - vmw_clear_validations(sw_context); 4038 4324 if (unlikely(dev_priv->pinned_bo != NULL && 4039 4325 !dev_priv->query_cid_valid)) 4040 4326 __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 4041 4327 out_unlock: 4042 - list_splice_init(&sw_context->resource_list, &resource_list); 4043 - error_resource = sw_context->error_resource; 4044 - sw_context->error_resource = NULL; 4045 4328 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); 4329 + vmw_validation_drop_ht(&val_ctx); 4330 + WARN_ON(!list_empty(&sw_context->ctx_list)); 4046 4331 mutex_unlock(&dev_priv->cmdbuf_mutex); 4047 4332 4048 4333 /* 4049 4334 * Unreference resources outside of the cmdbuf_mutex to 4050 4335 * avoid deadlocks in resource destruction paths. 4051 4336 */ 4052 - vmw_resource_list_unreference(sw_context, &resource_list); 4053 - if (unlikely(error_resource != NULL)) 4054 - vmw_resource_unreference(&error_resource); 4337 + vmw_validation_unref_lists(&val_ctx); 4055 4338 out_free_header: 4056 4339 if (header) 4057 4340 vmw_cmdbuf_header_free(header); ··· 4109 4398 struct vmw_fence_obj *fence) 4110 4399 { 4111 4400 int ret = 0; 4112 - struct list_head validate_list; 4113 - struct ttm_validate_buffer pinned_val, query_val; 4114 4401 struct vmw_fence_obj *lfence = NULL; 4115 - struct ww_acquire_ctx ticket; 4402 + DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); 4116 4403 4117 4404 if (dev_priv->pinned_bo == NULL) 4118 4405 goto out_unlock; 4119 4406 4120 - INIT_LIST_HEAD(&validate_list); 4121 - 4122 - pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base); 4123 - pinned_val.shared = false; 4124 - list_add_tail(&pinned_val.head, &validate_list); 4125 - 4126 - query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base); 4127 - query_val.shared = false; 4128 - list_add_tail(&query_val.head, &validate_list); 4129 - 4130 - ret = ttm_eu_reserve_buffers(&ticket, &validate_list, 4131 - false, NULL); 4132 - if (unlikely(ret != 0)) { 4133 - vmw_execbuf_unpin_panic(dev_priv); 4407 + ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false, 4408 + false); 4409 + if (ret) 4134 4410 goto out_no_reserve; 4135 - } 4411 + 4412 + ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false, 4413 + false); 4414 + if (ret) 4415 + goto out_no_reserve; 4416 + 4417 + ret = vmw_validation_bo_reserve(&val_ctx, false); 4418 + if (ret) 4419 + goto out_no_reserve; 4136 4420 4137 4421 if (dev_priv->query_cid_valid) { 4138 4422 BUG_ON(fence != NULL); 4139 4423 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); 4140 - if (unlikely(ret != 0)) { 4141 - vmw_execbuf_unpin_panic(dev_priv); 4424 + if (ret) 4142 4425 goto out_no_emit; 4143 - } 4144 4426 dev_priv->query_cid_valid = false; 4145 4427 } 4146 4428 ··· 4147 4443 NULL); 4148 4444 fence = lfence; 4149 4445 } 4150 - ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence); 4446 + vmw_validation_bo_fence(&val_ctx, fence); 4151 4447 if (lfence != NULL) 4152 4448 vmw_fence_obj_unreference(&lfence); 4153 4449 4154 - ttm_bo_unref(&query_val.bo); 4155 - ttm_bo_unref(&pinned_val.bo); 4450 + vmw_validation_unref_lists(&val_ctx); 4156 4451 vmw_bo_unreference(&dev_priv->pinned_bo); 4157 4452 out_unlock: 4158 4453 return; 4159 4454 4160 4455 out_no_emit: 4161 - ttm_eu_backoff_reservation(&ticket, &validate_list); 4456 + vmw_validation_bo_backoff(&val_ctx); 4162 4457 out_no_reserve: 4163 - ttm_bo_unref(&query_val.bo); 4164 - ttm_bo_unref(&pinned_val.bo); 4458 + vmw_validation_unref_lists(&val_ctx); 4459 + vmw_execbuf_unpin_panic(dev_priv); 4165 4460 vmw_bo_unreference(&dev_priv->pinned_bo); 4461 + 4166 4462 } 4167 4463 4168 4464 /**
+4 -3
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
··· 306 306 INIT_LIST_HEAD(&fman->cleanup_list); 307 307 INIT_WORK(&fman->work, &vmw_fence_work_func); 308 308 fman->fifo_down = true; 309 - fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); 309 + fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) + 310 + TTM_OBJ_EXTRA_SIZE; 310 311 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); 311 312 fman->event_fence_action_size = 312 313 ttm_round_pot(sizeof(struct vmw_event_fence_action)); ··· 651 650 } 652 651 653 652 *p_fence = &ufence->fence; 654 - *p_handle = ufence->base.hash.key; 653 + *p_handle = ufence->base.handle; 655 654 656 655 return 0; 657 656 out_err: ··· 1138 1137 "object.\n"); 1139 1138 goto out_no_ref_obj; 1140 1139 } 1141 - handle = base->hash.key; 1140 + handle = base->handle; 1142 1141 } 1143 1142 ttm_base_object_unref(&base); 1144 1143 }
+20 -177
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 2575 2575 } 2576 2576 2577 2577 /** 2578 - * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before 2579 - * command submission. 2580 - * 2581 - * @dev_priv. Pointer to a device private structure. 2582 - * @buf: The buffer object 2583 - * @interruptible: Whether to perform waits as interruptible. 2584 - * @validate_as_mob: Whether the buffer should be validated as a MOB. If false, 2585 - * The buffer will be validated as a GMR. Already pinned buffers will not be 2586 - * validated. 2587 - * 2588 - * Returns 0 on success, negative error code on failure, -ERESTARTSYS if 2589 - * interrupted by a signal. 2578 + * vmw_kms_helper_validation_finish - Helper for post KMS command submission 2579 + * cleanup and fencing 2580 + * @dev_priv: Pointer to the device-private struct 2581 + * @file_priv: Pointer identifying the client when user-space fencing is used 2582 + * @ctx: Pointer to the validation context 2583 + * @out_fence: If non-NULL, returned refcounted fence-pointer 2584 + * @user_fence_rep: If non-NULL, pointer to user-space address area 2585 + * in which to copy user-space fence info 2590 2586 */ 2591 - int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, 2592 - struct vmw_buffer_object *buf, 2593 - bool interruptible, 2594 - bool validate_as_mob, 2595 - bool for_cpu_blit) 2587 + void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, 2588 + struct drm_file *file_priv, 2589 + struct vmw_validation_context *ctx, 2590 + struct vmw_fence_obj **out_fence, 2591 + struct drm_vmw_fence_rep __user * 2592 + user_fence_rep) 2596 2593 { 2597 - struct ttm_operation_ctx ctx = { 2598 - .interruptible = interruptible, 2599 - .no_wait_gpu = false}; 2600 - struct ttm_buffer_object *bo = &buf->base; 2601 - int ret; 2602 - 2603 - ttm_bo_reserve(bo, false, false, NULL); 2604 - if (for_cpu_blit) 2605 - ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx); 2606 - else 2607 - ret = vmw_validate_single_buffer(dev_priv, bo, interruptible, 2608 - validate_as_mob); 2609 - if (ret) 2610 - ttm_bo_unreserve(bo); 2611 - 2612 - return ret; 2613 - } 2614 - 2615 - /** 2616 - * vmw_kms_helper_buffer_revert - Undo the actions of 2617 - * vmw_kms_helper_buffer_prepare. 2618 - * 2619 - * @res: Pointer to the buffer object. 2620 - * 2621 - * Helper to be used if an error forces the caller to undo the actions of 2622 - * vmw_kms_helper_buffer_prepare. 2623 - */ 2624 - void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf) 2625 - { 2626 - if (buf) 2627 - ttm_bo_unreserve(&buf->base); 2628 - } 2629 - 2630 - /** 2631 - * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after 2632 - * kms command submission. 2633 - * 2634 - * @dev_priv: Pointer to a device private structure. 2635 - * @file_priv: Pointer to a struct drm_file representing the caller's 2636 - * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely 2637 - * if non-NULL, @user_fence_rep must be non-NULL. 2638 - * @buf: The buffer object. 2639 - * @out_fence: Optional pointer to a fence pointer. If non-NULL, a 2640 - * ref-counted fence pointer is returned here. 2641 - * @user_fence_rep: Optional pointer to a user-space provided struct 2642 - * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the 2643 - * function copies fence data to user-space in a fail-safe manner. 2644 - */ 2645 - void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, 2646 - struct drm_file *file_priv, 2647 - struct vmw_buffer_object *buf, 2648 - struct vmw_fence_obj **out_fence, 2649 - struct drm_vmw_fence_rep __user * 2650 - user_fence_rep) 2651 - { 2652 - struct vmw_fence_obj *fence; 2594 + struct vmw_fence_obj *fence = NULL; 2653 2595 uint32_t handle; 2654 2596 int ret; 2655 2597 2656 - ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, 2657 - file_priv ? &handle : NULL); 2658 - if (buf) 2659 - vmw_bo_fence_single(&buf->base, fence); 2598 + if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || 2599 + out_fence) 2600 + ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, 2601 + file_priv ? &handle : NULL); 2602 + vmw_validation_done(ctx, fence); 2660 2603 if (file_priv) 2661 2604 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), 2662 2605 ret, user_fence_rep, fence, ··· 2608 2665 *out_fence = fence; 2609 2666 else 2610 2667 vmw_fence_obj_unreference(&fence); 2611 - 2612 - vmw_kms_helper_buffer_revert(buf); 2613 - } 2614 - 2615 - 2616 - /** 2617 - * vmw_kms_helper_resource_revert - Undo the actions of 2618 - * vmw_kms_helper_resource_prepare. 2619 - * 2620 - * @res: Pointer to the resource. Typically a surface. 2621 - * 2622 - * Helper to be used if an error forces the caller to undo the actions of 2623 - * vmw_kms_helper_resource_prepare. 2624 - */ 2625 - void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx) 2626 - { 2627 - struct vmw_resource *res = ctx->res; 2628 - 2629 - vmw_kms_helper_buffer_revert(ctx->buf); 2630 - vmw_bo_unreference(&ctx->buf); 2631 - vmw_resource_unreserve(res, false, NULL, 0); 2632 - mutex_unlock(&res->dev_priv->cmdbuf_mutex); 2633 - } 2634 - 2635 - /** 2636 - * vmw_kms_helper_resource_prepare - Reserve and validate a resource before 2637 - * command submission. 2638 - * 2639 - * @res: Pointer to the resource. Typically a surface. 2640 - * @interruptible: Whether to perform waits as interruptible. 2641 - * 2642 - * Reserves and validates also the backup buffer if a guest-backed resource. 2643 - * Returns 0 on success, negative error code on failure. -ERESTARTSYS if 2644 - * interrupted by a signal. 2645 - */ 2646 - int vmw_kms_helper_resource_prepare(struct vmw_resource *res, 2647 - bool interruptible, 2648 - struct vmw_validation_ctx *ctx) 2649 - { 2650 - int ret = 0; 2651 - 2652 - ctx->buf = NULL; 2653 - ctx->res = res; 2654 - 2655 - if (interruptible) 2656 - ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); 2657 - else 2658 - mutex_lock(&res->dev_priv->cmdbuf_mutex); 2659 - 2660 - if (unlikely(ret != 0)) 2661 - return -ERESTARTSYS; 2662 - 2663 - ret = vmw_resource_reserve(res, interruptible, false); 2664 - if (ret) 2665 - goto out_unlock; 2666 - 2667 - if (res->backup) { 2668 - ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup, 2669 - interruptible, 2670 - res->dev_priv->has_mob, 2671 - false); 2672 - if (ret) 2673 - goto out_unreserve; 2674 - 2675 - ctx->buf = vmw_bo_reference(res->backup); 2676 - } 2677 - ret = vmw_resource_validate(res); 2678 - if (ret) 2679 - goto out_revert; 2680 - return 0; 2681 - 2682 - out_revert: 2683 - vmw_kms_helper_buffer_revert(ctx->buf); 2684 - out_unreserve: 2685 - vmw_resource_unreserve(res, false, NULL, 0); 2686 - out_unlock: 2687 - mutex_unlock(&res->dev_priv->cmdbuf_mutex); 2688 - return ret; 2689 - } 2690 - 2691 - /** 2692 - * vmw_kms_helper_resource_finish - Unreserve and fence a resource after 2693 - * kms command submission. 2694 - * 2695 - * @res: Pointer to the resource. Typically a surface. 2696 - * @out_fence: Optional pointer to a fence pointer. If non-NULL, a 2697 - * ref-counted fence pointer is returned here. 2698 - */ 2699 - void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, 2700 - struct vmw_fence_obj **out_fence) 2701 - { 2702 - struct vmw_resource *res = ctx->res; 2703 - 2704 - if (ctx->buf || out_fence) 2705 - vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, 2706 - out_fence, NULL); 2707 - 2708 - vmw_bo_unreference(&ctx->buf); 2709 - vmw_resource_unreserve(res, false, NULL, 0); 2710 - mutex_unlock(&res->dev_priv->cmdbuf_mutex); 2711 2668 } 2712 2669 2713 2670 /**
+6 -18
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
··· 308 308 int increment, 309 309 struct vmw_kms_dirty *dirty); 310 310 311 - int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, 312 - struct vmw_buffer_object *buf, 313 - bool interruptible, 314 - bool validate_as_mob, 315 - bool for_cpu_blit); 316 - void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf); 317 - void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, 318 - struct drm_file *file_priv, 319 - struct vmw_buffer_object *buf, 320 - struct vmw_fence_obj **out_fence, 321 - struct drm_vmw_fence_rep __user * 322 - user_fence_rep); 323 - int vmw_kms_helper_resource_prepare(struct vmw_resource *res, 324 - bool interruptible, 325 - struct vmw_validation_ctx *ctx); 326 - void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx); 327 - void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, 328 - struct vmw_fence_obj **out_fence); 311 + void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, 312 + struct drm_file *file_priv, 313 + struct vmw_validation_context *ctx, 314 + struct vmw_fence_obj **out_fence, 315 + struct drm_vmw_fence_rep __user * 316 + user_fence_rep); 329 317 int vmw_kms_readback(struct vmw_private *dev_priv, 330 318 struct drm_file *file_priv, 331 319 struct vmw_framebuffer *vfb,
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
··· 31 31 */ 32 32 33 33 #include "vmwgfx_drv.h" 34 + #include "ttm_object.h" 34 35 #include <linux/dma-buf.h> 35 - #include <drm/ttm/ttm_object.h> 36 36 37 37 /* 38 38 * DMA-BUF attach- and mapping methods. No need to implement
+65 -59
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 58 58 struct vmw_private *dev_priv = res->dev_priv; 59 59 struct idr *idr = &dev_priv->res_idr[res->func->res_type]; 60 60 61 - write_lock(&dev_priv->resource_lock); 61 + spin_lock(&dev_priv->resource_lock); 62 62 if (res->id != -1) 63 63 idr_remove(idr, res->id); 64 64 res->id = -1; 65 - write_unlock(&dev_priv->resource_lock); 65 + spin_unlock(&dev_priv->resource_lock); 66 66 } 67 67 68 68 static void vmw_resource_release(struct kref *kref) ··· 73 73 int id; 74 74 struct idr *idr = &dev_priv->res_idr[res->func->res_type]; 75 75 76 - write_lock(&dev_priv->resource_lock); 77 - res->avail = false; 76 + spin_lock(&dev_priv->resource_lock); 78 77 list_del_init(&res->lru_head); 79 - write_unlock(&dev_priv->resource_lock); 78 + spin_unlock(&dev_priv->resource_lock); 80 79 if (res->backup) { 81 80 struct ttm_buffer_object *bo = &res->backup->base; 82 81 ··· 107 108 else 108 109 kfree(res); 109 110 110 - write_lock(&dev_priv->resource_lock); 111 + spin_lock(&dev_priv->resource_lock); 111 112 if (id != -1) 112 113 idr_remove(idr, id); 113 - write_unlock(&dev_priv->resource_lock); 114 + spin_unlock(&dev_priv->resource_lock); 114 115 } 115 116 116 117 void vmw_resource_unreference(struct vmw_resource **p_res) ··· 139 140 BUG_ON(res->id != -1); 140 141 141 142 idr_preload(GFP_KERNEL); 142 - write_lock(&dev_priv->resource_lock); 143 + spin_lock(&dev_priv->resource_lock); 143 144 144 145 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT); 145 146 if (ret >= 0) 146 147 res->id = ret; 147 148 148 - write_unlock(&dev_priv->resource_lock); 149 + spin_unlock(&dev_priv->resource_lock); 149 150 idr_preload_end(); 150 151 return ret < 0 ? ret : 0; 151 152 } ··· 169 170 kref_init(&res->kref); 170 171 res->hw_destroy = NULL; 171 172 res->res_free = res_free; 172 - res->avail = false; 173 173 res->dev_priv = dev_priv; 174 174 res->func = func; 175 175 INIT_LIST_HEAD(&res->lru_head); ··· 185 187 return vmw_resource_alloc_id(res); 186 188 } 187 189 188 - /** 189 - * vmw_resource_activate 190 - * 191 - * @res: Pointer to the newly created resource 192 - * @hw_destroy: Destroy function. NULL if none. 193 - * 194 - * Activate a resource after the hardware has been made aware of it. 195 - * Set tye destroy function to @destroy. Typically this frees the 196 - * resource and destroys the hardware resources associated with it. 197 - * Activate basically means that the function vmw_resource_lookup will 198 - * find it. 199 - */ 200 - void vmw_resource_activate(struct vmw_resource *res, 201 - void (*hw_destroy) (struct vmw_resource *)) 202 - { 203 - struct vmw_private *dev_priv = res->dev_priv; 204 - 205 - write_lock(&dev_priv->resource_lock); 206 - res->avail = true; 207 - res->hw_destroy = hw_destroy; 208 - write_unlock(&dev_priv->resource_lock); 209 - } 210 190 211 191 /** 212 192 * vmw_user_resource_lookup_handle - lookup a struct resource from a ··· 219 243 goto out_bad_resource; 220 244 221 245 res = converter->base_obj_to_res(base); 222 - 223 - read_lock(&dev_priv->resource_lock); 224 - if (!res->avail || res->res_free != converter->res_free) { 225 - read_unlock(&dev_priv->resource_lock); 226 - goto out_bad_resource; 227 - } 228 - 229 246 kref_get(&res->kref); 230 - read_unlock(&dev_priv->resource_lock); 231 247 232 248 *p_res = res; 233 249 ret = 0; ··· 228 260 ttm_base_object_unref(&base); 229 261 230 262 return ret; 263 + } 264 + 265 + /** 266 + * vmw_user_resource_lookup_handle - lookup a struct resource from a 267 + * TTM user-space handle and perform basic type checks 268 + * 269 + * @dev_priv: Pointer to a device private struct 270 + * @tfile: Pointer to a struct ttm_object_file identifying the caller 271 + * @handle: The TTM user-space handle 272 + * @converter: Pointer to an object describing the resource type 273 + * @p_res: On successful return the location pointed to will contain 274 + * a pointer to a refcounted struct vmw_resource. 275 + * 276 + * If the handle can't be found or is associated with an incorrect resource 277 + * type, -EINVAL will be returned. 278 + */ 279 + struct vmw_resource * 280 + vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, 281 + struct ttm_object_file *tfile, 282 + uint32_t handle, 283 + const struct vmw_user_resource_conv 284 + *converter) 285 + { 286 + struct ttm_base_object *base; 287 + 288 + base = ttm_base_object_noref_lookup(tfile, handle); 289 + if (!base) 290 + return ERR_PTR(-ESRCH); 291 + 292 + if (unlikely(ttm_base_object_type(base) != converter->object_type)) { 293 + ttm_base_object_noref_release(); 294 + return ERR_PTR(-EINVAL); 295 + } 296 + 297 + return converter->base_obj_to_res(base); 231 298 } 232 299 233 300 /** ··· 425 422 if (!res->func->may_evict || res->id == -1 || res->pin_count) 426 423 return; 427 424 428 - write_lock(&dev_priv->resource_lock); 425 + spin_lock(&dev_priv->resource_lock); 429 426 list_add_tail(&res->lru_head, 430 427 &res->dev_priv->res_lru[res->func->res_type]); 431 - write_unlock(&dev_priv->resource_lock); 428 + spin_unlock(&dev_priv->resource_lock); 432 429 } 433 430 434 431 /** ··· 507 504 struct vmw_private *dev_priv = res->dev_priv; 508 505 int ret; 509 506 510 - write_lock(&dev_priv->resource_lock); 507 + spin_lock(&dev_priv->resource_lock); 511 508 list_del_init(&res->lru_head); 512 - write_unlock(&dev_priv->resource_lock); 509 + spin_unlock(&dev_priv->resource_lock); 513 510 514 511 if (res->func->needs_backup && res->backup == NULL && 515 512 !no_backup) { ··· 590 587 /** 591 588 * vmw_resource_validate - Make a resource up-to-date and visible 592 589 * to the device. 593 - * 594 - * @res: The resource to make visible to the device. 590 + * @res: The resource to make visible to the device. 591 + * @intr: Perform waits interruptible if possible. 595 592 * 596 593 * On succesful return, any backup DMA buffer pointed to by @res->backup will 597 594 * be reserved and validated. 598 595 * On hardware resource shortage, this function will repeatedly evict 599 596 * resources of the same type until the validation succeeds. 597 + * 598 + * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code 599 + * on failure. 600 600 */ 601 - int vmw_resource_validate(struct vmw_resource *res) 601 + int vmw_resource_validate(struct vmw_resource *res, bool intr) 602 602 { 603 603 int ret; 604 604 struct vmw_resource *evict_res; ··· 622 616 if (likely(ret != -EBUSY)) 623 617 break; 624 618 625 - write_lock(&dev_priv->resource_lock); 619 + spin_lock(&dev_priv->resource_lock); 626 620 if (list_empty(lru_list) || !res->func->may_evict) { 627 621 DRM_ERROR("Out of device device resources " 628 622 "for %s.\n", res->func->type_name); 629 623 ret = -EBUSY; 630 - write_unlock(&dev_priv->resource_lock); 624 + spin_unlock(&dev_priv->resource_lock); 631 625 break; 632 626 } 633 627 ··· 636 630 lru_head)); 637 631 list_del_init(&evict_res->lru_head); 638 632 639 - write_unlock(&dev_priv->resource_lock); 633 + spin_unlock(&dev_priv->resource_lock); 640 634 641 635 /* Trylock backup buffers with a NULL ticket. */ 642 - ret = vmw_resource_do_evict(NULL, evict_res, true); 636 + ret = vmw_resource_do_evict(NULL, evict_res, intr); 643 637 if (unlikely(ret != 0)) { 644 - write_lock(&dev_priv->resource_lock); 638 + spin_lock(&dev_priv->resource_lock); 645 639 list_add_tail(&evict_res->lru_head, lru_list); 646 - write_unlock(&dev_priv->resource_lock); 640 + spin_unlock(&dev_priv->resource_lock); 647 641 if (ret == -ERESTARTSYS || 648 642 ++err_count > VMW_RES_EVICT_ERR_COUNT) { 649 643 vmw_resource_unreference(&evict_res); ··· 825 819 struct ww_acquire_ctx ticket; 826 820 827 821 do { 828 - write_lock(&dev_priv->resource_lock); 822 + spin_lock(&dev_priv->resource_lock); 829 823 830 824 if (list_empty(lru_list)) 831 825 goto out_unlock; ··· 834 828 list_first_entry(lru_list, struct vmw_resource, 835 829 lru_head)); 836 830 list_del_init(&evict_res->lru_head); 837 - write_unlock(&dev_priv->resource_lock); 831 + spin_unlock(&dev_priv->resource_lock); 838 832 839 833 /* Wait lock backup buffers with a ticket. */ 840 834 ret = vmw_resource_do_evict(&ticket, evict_res, false); 841 835 if (unlikely(ret != 0)) { 842 - write_lock(&dev_priv->resource_lock); 836 + spin_lock(&dev_priv->resource_lock); 843 837 list_add_tail(&evict_res->lru_head, lru_list); 844 - write_unlock(&dev_priv->resource_lock); 838 + spin_unlock(&dev_priv->resource_lock); 845 839 if (++err_count > VMW_RES_EVICT_ERR_COUNT) { 846 840 vmw_resource_unreference(&evict_res); 847 841 return; ··· 852 846 } while (1); 853 847 854 848 out_unlock: 855 - write_unlock(&dev_priv->resource_lock); 849 + spin_unlock(&dev_priv->resource_lock); 856 850 } 857 851 858 852 /** ··· 920 914 /* Do we really need to pin the MOB as well? */ 921 915 vmw_bo_pin_reserved(vbo, true); 922 916 } 923 - ret = vmw_resource_validate(res); 917 + ret = vmw_resource_validate(res, interruptible); 924 918 if (vbo) 925 919 ttm_bo_unreserve(&vbo->base); 926 920 if (ret)
+5 -2
drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
··· 30 30 31 31 #include "vmwgfx_drv.h" 32 32 33 + /* 34 + * Extra memory required by the resource id's ida storage, which is allocated 35 + * separately from the base object itself. We estimate an on-average 128 bytes 36 + * per ida. 37 + */ 33 38 #define VMW_IDA_ACC_SIZE 128 34 39 35 40 enum vmw_cmdbuf_res_state { ··· 125 120 bool delay_id, 126 121 void (*res_free) (struct vmw_resource *res), 127 122 const struct vmw_res_func *func); 128 - void vmw_resource_activate(struct vmw_resource *res, 129 - void (*hw_destroy) (struct vmw_resource *)); 130 123 int 131 124 vmw_simple_resource_create_ioctl(struct drm_device *dev, 132 125 void *data,
+35 -13
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
··· 946 946 struct vmw_framebuffer_surface *vfbs = 947 947 container_of(framebuffer, typeof(*vfbs), base); 948 948 struct vmw_kms_sou_surface_dirty sdirty; 949 - struct vmw_validation_ctx ctx; 949 + DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); 950 950 int ret; 951 951 952 952 if (!srf) 953 953 srf = &vfbs->surface->res; 954 954 955 - ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); 955 + ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL); 956 956 if (ret) 957 957 return ret; 958 + 959 + ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true); 960 + if (ret) 961 + goto out_unref; 958 962 959 963 sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit; 960 964 sdirty.base.clip = vmw_sou_surface_clip; ··· 976 972 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, 977 973 dest_x, dest_y, num_clips, inc, 978 974 &sdirty.base); 979 - vmw_kms_helper_resource_finish(&ctx, out_fence); 975 + vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence, 976 + NULL); 980 977 978 + return ret; 979 + 980 + out_unref: 981 + vmw_validation_unref_lists(&val_ctx); 981 982 return ret; 982 983 } 983 984 ··· 1060 1051 container_of(framebuffer, struct vmw_framebuffer_bo, 1061 1052 base)->buffer; 1062 1053 struct vmw_kms_dirty dirty; 1054 + DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); 1063 1055 int ret; 1064 1056 1065 - ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible, 1066 - false, false); 1057 + ret = vmw_validation_add_bo(&val_ctx, buf, false, false); 1067 1058 if (ret) 1068 1059 return ret; 1060 + 1061 + ret = vmw_validation_prepare(&val_ctx, NULL, interruptible); 1062 + if (ret) 1063 + goto out_unref; 1069 1064 1070 1065 ret = do_bo_define_gmrfb(dev_priv, framebuffer); 1071 1066 if (unlikely(ret != 0)) ··· 1082 1069 num_clips; 1083 1070 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, 1084 1071 0, 0, num_clips, increment, &dirty); 1085 - vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL); 1072 + vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence, 1073 + NULL); 1086 1074 1087 1075 return ret; 1088 1076 1089 1077 out_revert: 1090 - vmw_kms_helper_buffer_revert(buf); 1078 + vmw_validation_revert(&val_ctx); 1079 + out_unref: 1080 + vmw_validation_unref_lists(&val_ctx); 1091 1081 1092 1082 return ret; 1093 1083 } ··· 1166 1150 struct vmw_buffer_object *buf = 1167 1151 container_of(vfb, struct vmw_framebuffer_bo, base)->buffer; 1168 1152 struct vmw_kms_dirty dirty; 1153 + DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); 1169 1154 int ret; 1170 1155 1171 - ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false, 1172 - false); 1156 + ret = vmw_validation_add_bo(&val_ctx, buf, false, false); 1173 1157 if (ret) 1174 1158 return ret; 1159 + 1160 + ret = vmw_validation_prepare(&val_ctx, NULL, true); 1161 + if (ret) 1162 + goto out_unref; 1175 1163 1176 1164 ret = do_bo_define_gmrfb(dev_priv, vfb); 1177 1165 if (unlikely(ret != 0)) ··· 1188 1168 num_clips; 1189 1169 ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips, 1190 1170 0, 0, num_clips, 1, &dirty); 1191 - vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL, 1192 - user_fence_rep); 1171 + vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL, 1172 + user_fence_rep); 1193 1173 1194 1174 return ret; 1195 1175 1196 1176 out_revert: 1197 - vmw_kms_helper_buffer_revert(buf); 1198 - 1177 + vmw_validation_revert(&val_ctx); 1178 + out_unref: 1179 + vmw_validation_unref_lists(&val_ctx); 1180 + 1199 1181 return ret; 1200 1182 }
+10 -15
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
··· 186 186 shader->num_input_sig = num_input_sig; 187 187 shader->num_output_sig = num_output_sig; 188 188 189 - vmw_resource_activate(res, vmw_hw_shader_destroy); 189 + res->hw_destroy = vmw_hw_shader_destroy; 190 190 return 0; 191 191 } 192 192 ··· 562 562 { 563 563 struct vmw_dx_shader *entry, *next; 564 564 565 - WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); 565 + lockdep_assert_held_once(&dev_priv->binding_mutex); 566 566 567 567 list_for_each_entry_safe(entry, next, list, cotable_head) { 568 568 WARN_ON(vmw_dx_shader_scrub(&entry->res)); ··· 636 636 637 637 res = &shader->res; 638 638 shader->ctx = ctx; 639 - shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER); 639 + shader->cotable = vmw_resource_reference 640 + (vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER)); 640 641 shader->id = user_key; 641 642 shader->committed = false; 642 643 INIT_LIST_HEAD(&shader->cotable_head); ··· 657 656 goto out_resource_init; 658 657 659 658 res->id = shader->id; 660 - vmw_resource_activate(res, vmw_hw_shader_destroy); 659 + res->hw_destroy = vmw_hw_shader_destroy; 661 660 662 661 out_resource_init: 663 662 vmw_resource_unreference(&res); ··· 741 740 }; 742 741 int ret; 743 742 744 - /* 745 - * Approximate idr memory usage with 128 bytes. It will be limited 746 - * by maximum number_of shaders anyway. 747 - */ 748 743 if (unlikely(vmw_user_shader_size == 0)) 749 744 vmw_user_shader_size = 750 - ttm_round_pot(sizeof(struct vmw_user_shader)) + 128; 745 + ttm_round_pot(sizeof(struct vmw_user_shader)) + 746 + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; 751 747 752 748 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 753 749 vmw_user_shader_size, ··· 790 792 } 791 793 792 794 if (handle) 793 - *handle = ushader->base.hash.key; 795 + *handle = ushader->base.handle; 794 796 out_err: 795 797 vmw_resource_unreference(&res); 796 798 out: ··· 812 814 }; 813 815 int ret; 814 816 815 - /* 816 - * Approximate idr memory usage with 128 bytes. It will be limited 817 - * by maximum number_of shaders anyway. 818 - */ 819 817 if (unlikely(vmw_shader_size == 0)) 820 818 vmw_shader_size = 821 - ttm_round_pot(sizeof(struct vmw_shader)) + 128; 819 + ttm_round_pot(sizeof(struct vmw_shader)) + 820 + VMW_IDA_ACC_SIZE; 822 821 823 822 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 824 823 vmw_shader_size,
+4 -3
drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
··· 81 81 return ret; 82 82 } 83 83 84 - vmw_resource_activate(&simple->res, simple->func->hw_destroy); 84 + simple->res.hw_destroy = simple->func->hw_destroy; 85 85 86 86 return 0; 87 87 } ··· 159 159 160 160 alloc_size = offsetof(struct vmw_user_simple_resource, simple) + 161 161 func->size; 162 - account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE; 162 + account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE + 163 + TTM_OBJ_EXTRA_SIZE; 163 164 164 165 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 165 166 if (ret) ··· 209 208 goto out_err; 210 209 } 211 210 212 - func->set_arg_handle(data, usimple->base.hash.key); 211 + func->set_arg_handle(data, usimple->base.handle); 213 212 out_err: 214 213 vmw_resource_unreference(&res); 215 214 out_ret:
+6 -5
drivers/gpu/drm/vmwgfx/vmwgfx_so.c
··· 208 208 union vmw_view_destroy body; 209 209 } *cmd; 210 210 211 - WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); 211 + lockdep_assert_held_once(&dev_priv->binding_mutex); 212 212 vmw_binding_res_list_scrub(&res->binding_head); 213 213 214 214 if (!view->committed || res->id == -1) ··· 366 366 res = &view->res; 367 367 view->ctx = ctx; 368 368 view->srf = vmw_resource_reference(srf); 369 - view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]); 369 + view->cotable = vmw_resource_reference 370 + (vmw_context_cotable(ctx, vmw_view_cotables[view_type])); 370 371 view->view_type = view_type; 371 372 view->view_id = user_key; 372 373 view->cmd_size = cmd_size; ··· 387 386 goto out_resource_init; 388 387 389 388 res->id = view->view_id; 390 - vmw_resource_activate(res, vmw_hw_view_destroy); 389 + res->hw_destroy = vmw_hw_view_destroy; 391 390 392 391 out_resource_init: 393 392 vmw_resource_unreference(&res); ··· 440 439 { 441 440 struct vmw_view *entry, *next; 442 441 443 - WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); 442 + lockdep_assert_held_once(&dev_priv->binding_mutex); 444 443 445 444 list_for_each_entry_safe(entry, next, list, cotable_head) 446 445 WARN_ON(vmw_view_destroy(&entry->res)); ··· 460 459 { 461 460 struct vmw_view *entry, *next; 462 461 463 - WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); 462 + lockdep_assert_held_once(&dev_priv->binding_mutex); 464 463 465 464 list_for_each_entry_safe(entry, next, list, srf_head) 466 465 WARN_ON(vmw_view_destroy(&entry->res));
+24 -7
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
··· 759 759 struct vmw_stdu_dirty ddirty; 760 760 int ret; 761 761 bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D); 762 + DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); 762 763 763 764 /* 764 765 * VMs without 3D support don't have the surface DMA command and 765 766 * we'll be using a CPU blit, and the framebuffer should be moved out 766 767 * of VRAM. 767 768 */ 768 - ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible, 769 - false, cpu_blit); 769 + ret = vmw_validation_add_bo(&val_ctx, buf, false, cpu_blit); 770 770 if (ret) 771 771 return ret; 772 + 773 + ret = vmw_validation_prepare(&val_ctx, NULL, interruptible); 774 + if (ret) 775 + goto out_unref; 772 776 773 777 ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM : 774 778 SVGA3D_READ_HOST_VRAM; ··· 800 796 801 797 ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips, 802 798 0, 0, num_clips, increment, &ddirty.base); 803 - vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL, 804 - user_fence_rep); 805 799 800 + vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL, 801 + user_fence_rep); 802 + return ret; 803 + 804 + out_unref: 805 + vmw_validation_unref_lists(&val_ctx); 806 806 return ret; 807 807 } 808 808 ··· 932 924 struct vmw_framebuffer_surface *vfbs = 933 925 container_of(framebuffer, typeof(*vfbs), base); 934 926 struct vmw_stdu_dirty sdirty; 935 - struct vmw_validation_ctx ctx; 927 + DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); 936 928 int ret; 937 929 938 930 if (!srf) 939 931 srf = &vfbs->surface->res; 940 932 941 - ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); 933 + ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL); 942 934 if (ret) 943 935 return ret; 936 + 937 + ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true); 938 + if (ret) 939 + goto out_unref; 944 940 945 941 if (vfbs->is_bo_proxy) { 946 942 ret = vmw_kms_update_proxy(srf, clips, num_clips, inc); ··· 966 954 dest_x, dest_y, num_clips, inc, 967 955 &sdirty.base); 968 956 out_finish: 969 - vmw_kms_helper_resource_finish(&ctx, out_fence); 957 + vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence, 958 + NULL); 970 959 960 + return ret; 961 + 962 + out_unref: 963 + vmw_validation_unref_lists(&val_ctx); 971 964 return ret; 972 965 } 973 966
+10 -10
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 614 614 */ 615 615 616 616 INIT_LIST_HEAD(&srf->view_list); 617 - vmw_resource_activate(res, vmw_hw_surface_destroy); 617 + res->hw_destroy = vmw_hw_surface_destroy; 618 618 return ret; 619 619 } 620 620 ··· 731 731 732 732 if (unlikely(vmw_user_surface_size == 0)) 733 733 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + 734 - 128; 734 + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; 735 735 736 736 num_sizes = 0; 737 737 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { ··· 744 744 num_sizes == 0) 745 745 return -EINVAL; 746 746 747 - size = vmw_user_surface_size + 128 + 747 + size = vmw_user_surface_size + 748 748 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + 749 749 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); 750 750 ··· 886 886 goto out_unlock; 887 887 } 888 888 889 - rep->sid = user_srf->prime.base.hash.key; 889 + rep->sid = user_srf->prime.base.handle; 890 890 vmw_resource_unreference(&res); 891 891 892 892 ttm_read_unlock(&dev_priv->reservation_sem); ··· 1024 1024 if (unlikely(ret != 0)) { 1025 1025 DRM_ERROR("copy_to_user failed %p %u\n", 1026 1026 user_sizes, srf->num_sizes); 1027 - ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE); 1027 + ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE); 1028 1028 ret = -EFAULT; 1029 1029 } 1030 1030 ··· 1613 1613 1614 1614 if (unlikely(vmw_user_surface_size == 0)) 1615 1615 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + 1616 - 128; 1616 + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; 1617 1617 1618 - size = vmw_user_surface_size + 128; 1618 + size = vmw_user_surface_size; 1619 1619 1620 1620 /* Define a surface based on the parameters. */ 1621 1621 ret = vmw_surface_gb_priv_define(dev, ··· 1687 1687 goto out_unlock; 1688 1688 } 1689 1689 1690 - rep->handle = user_srf->prime.base.hash.key; 1690 + rep->handle = user_srf->prime.base.handle; 1691 1691 rep->backup_size = res->backup_size; 1692 1692 if (res->backup) { 1693 1693 rep->buffer_map_handle = ··· 1749 1749 if (unlikely(ret != 0)) { 1750 1750 DRM_ERROR("Could not add a reference to a GB surface " 1751 1751 "backup buffer.\n"); 1752 - (void) ttm_ref_object_base_unref(tfile, base->hash.key, 1752 + (void) ttm_ref_object_base_unref(tfile, base->handle, 1753 1753 TTM_REF_USAGE); 1754 1754 goto out_bad_resource; 1755 1755 } ··· 1763 1763 rep->creq.base.array_size = srf->array_size; 1764 1764 rep->creq.base.buffer_handle = backup_handle; 1765 1765 rep->creq.base.base_size = srf->base_size; 1766 - rep->crep.handle = user_srf->prime.base.hash.key; 1766 + rep->crep.handle = user_srf->prime.base.handle; 1767 1767 rep->crep.backup_size = srf->res.backup_size; 1768 1768 rep->crep.buffer_handle = backup_handle; 1769 1769 rep->crep.buffer_map_handle =
+770
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /************************************************************************** 3 + * 4 + * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA 5 + * All Rights Reserved. 6 + * 7 + * Permission is hereby granted, free of charge, to any person obtaining a 8 + * copy of this software and associated documentation files (the 9 + * "Software"), to deal in the Software without restriction, including 10 + * without limitation the rights to use, copy, modify, merge, publish, 11 + * distribute, sub license, and/or sell copies of the Software, and to 12 + * permit persons to whom the Software is furnished to do so, subject to 13 + * the following conditions: 14 + * 15 + * The above copyright notice and this permission notice (including the 16 + * next paragraph) shall be included in all copies or substantial portions 17 + * of the Software. 18 + * 19 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 + * 27 + **************************************************************************/ 28 + #include <linux/slab.h> 29 + #include "vmwgfx_validation.h" 30 + #include "vmwgfx_drv.h" 31 + 32 + /** 33 + * struct vmw_validation_bo_node - Buffer object validation metadata. 34 + * @base: Metadata used for TTM reservation- and validation. 35 + * @hash: A hash entry used for the duplicate detection hash table. 36 + * @as_mob: Validate as mob. 37 + * @cpu_blit: Validate for cpu blit access. 38 + * 39 + * Bit fields are used since these structures are allocated and freed in 40 + * large numbers and space conservation is desired. 41 + */ 42 + struct vmw_validation_bo_node { 43 + struct ttm_validate_buffer base; 44 + struct drm_hash_item hash; 45 + u32 as_mob : 1; 46 + u32 cpu_blit : 1; 47 + }; 48 + 49 + /** 50 + * struct vmw_validation_res_node - Resource validation metadata. 51 + * @head: List head for the resource validation list. 52 + * @hash: A hash entry used for the duplicate detection hash table. 53 + * @res: Reference counted resource pointer. 54 + * @new_backup: Non ref-counted pointer to new backup buffer to be assigned 55 + * to a resource. 56 + * @new_backup_offset: Offset into the new backup mob for resources that can 57 + * share MOBs. 58 + * @no_buffer_needed: Kernel does not need to allocate a MOB during validation, 59 + * the command stream provides a mob bind operation. 60 + * @switching_backup: The validation process is switching backup MOB. 61 + * @first_usage: True iff the resource has been seen only once in the current 62 + * validation batch. 63 + * @reserved: Whether the resource is currently reserved by this process. 64 + * @private: Optionally additional memory for caller-private data. 65 + * 66 + * Bit fields are used since these structures are allocated and freed in 67 + * large numbers and space conservation is desired. 68 + */ 69 + struct vmw_validation_res_node { 70 + struct list_head head; 71 + struct drm_hash_item hash; 72 + struct vmw_resource *res; 73 + struct vmw_buffer_object *new_backup; 74 + unsigned long new_backup_offset; 75 + u32 no_buffer_needed : 1; 76 + u32 switching_backup : 1; 77 + u32 first_usage : 1; 78 + u32 reserved : 1; 79 + unsigned long private[0]; 80 + }; 81 + 82 + /** 83 + * vmw_validation_mem_alloc - Allocate kernel memory from the validation 84 + * context based allocator 85 + * @ctx: The validation context 86 + * @size: The number of bytes to allocated. 87 + * 88 + * The memory allocated may not exceed PAGE_SIZE, and the returned 89 + * address is aligned to sizeof(long). All memory allocated this way is 90 + * reclaimed after validation when calling any of the exported functions: 91 + * vmw_validation_unref_lists() 92 + * vmw_validation_revert() 93 + * vmw_validation_done() 94 + * 95 + * Return: Pointer to the allocated memory on success. NULL on failure. 96 + */ 97 + void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx, 98 + unsigned int size) 99 + { 100 + void *addr; 101 + 102 + size = vmw_validation_align(size); 103 + if (size > PAGE_SIZE) 104 + return NULL; 105 + 106 + if (ctx->mem_size_left < size) { 107 + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); 108 + 109 + if (!page) 110 + return NULL; 111 + 112 + list_add_tail(&page->lru, &ctx->page_list); 113 + ctx->page_address = page_address(page); 114 + ctx->mem_size_left = PAGE_SIZE; 115 + } 116 + 117 + addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left)); 118 + ctx->mem_size_left -= size; 119 + 120 + return addr; 121 + } 122 + 123 + /** 124 + * vmw_validation_mem_free - Free all memory allocated using 125 + * vmw_validation_mem_alloc() 126 + * @ctx: The validation context 127 + * 128 + * All memory previously allocated for this context using 129 + * vmw_validation_mem_alloc() is freed. 130 + */ 131 + static void vmw_validation_mem_free(struct vmw_validation_context *ctx) 132 + { 133 + struct page *entry, *next; 134 + 135 + list_for_each_entry_safe(entry, next, &ctx->page_list, lru) { 136 + list_del_init(&entry->lru); 137 + __free_page(entry); 138 + } 139 + 140 + ctx->mem_size_left = 0; 141 + } 142 + 143 + /** 144 + * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the 145 + * validation context's lists. 146 + * @ctx: The validation context to search. 147 + * @vbo: The buffer object to search for. 148 + * 149 + * Return: Pointer to the struct vmw_validation_bo_node referencing the 150 + * duplicate, or NULL if none found. 151 + */ 152 + static struct vmw_validation_bo_node * 153 + vmw_validation_find_bo_dup(struct vmw_validation_context *ctx, 154 + struct vmw_buffer_object *vbo) 155 + { 156 + struct vmw_validation_bo_node *bo_node = NULL; 157 + 158 + if (!ctx->merge_dups) 159 + return NULL; 160 + 161 + if (ctx->ht) { 162 + struct drm_hash_item *hash; 163 + 164 + if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash)) 165 + bo_node = container_of(hash, typeof(*bo_node), hash); 166 + } else { 167 + struct vmw_validation_bo_node *entry; 168 + 169 + list_for_each_entry(entry, &ctx->bo_list, base.head) { 170 + if (entry->base.bo == &vbo->base) { 171 + bo_node = entry; 172 + break; 173 + } 174 + } 175 + } 176 + 177 + return bo_node; 178 + } 179 + 180 + /** 181 + * vmw_validation_find_res_dup - Find a duplicate resource entry in the 182 + * validation context's lists. 183 + * @ctx: The validation context to search. 184 + * @vbo: The buffer object to search for. 185 + * 186 + * Return: Pointer to the struct vmw_validation_bo_node referencing the 187 + * duplicate, or NULL if none found. 188 + */ 189 + static struct vmw_validation_res_node * 190 + vmw_validation_find_res_dup(struct vmw_validation_context *ctx, 191 + struct vmw_resource *res) 192 + { 193 + struct vmw_validation_res_node *res_node = NULL; 194 + 195 + if (!ctx->merge_dups) 196 + return NULL; 197 + 198 + if (ctx->ht) { 199 + struct drm_hash_item *hash; 200 + 201 + if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash)) 202 + res_node = container_of(hash, typeof(*res_node), hash); 203 + } else { 204 + struct vmw_validation_res_node *entry; 205 + 206 + list_for_each_entry(entry, &ctx->resource_ctx_list, head) { 207 + if (entry->res == res) { 208 + res_node = entry; 209 + goto out; 210 + } 211 + } 212 + 213 + list_for_each_entry(entry, &ctx->resource_list, head) { 214 + if (entry->res == res) { 215 + res_node = entry; 216 + break; 217 + } 218 + } 219 + 220 + } 221 + out: 222 + return res_node; 223 + } 224 + 225 + /** 226 + * vmw_validation_add_bo - Add a buffer object to the validation context. 227 + * @ctx: The validation context. 228 + * @vbo: The buffer object. 229 + * @as_mob: Validate as mob, otherwise suitable for GMR operations. 230 + * @cpu_blit: Validate in a page-mappable location. 231 + * 232 + * Return: Zero on success, negative error code otherwise. 233 + */ 234 + int vmw_validation_add_bo(struct vmw_validation_context *ctx, 235 + struct vmw_buffer_object *vbo, 236 + bool as_mob, 237 + bool cpu_blit) 238 + { 239 + struct vmw_validation_bo_node *bo_node; 240 + 241 + bo_node = vmw_validation_find_bo_dup(ctx, vbo); 242 + if (bo_node) { 243 + if (bo_node->as_mob != as_mob || 244 + bo_node->cpu_blit != cpu_blit) { 245 + DRM_ERROR("Inconsistent buffer usage.\n"); 246 + return -EINVAL; 247 + } 248 + } else { 249 + struct ttm_validate_buffer *val_buf; 250 + int ret; 251 + 252 + bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node)); 253 + if (!bo_node) 254 + return -ENOMEM; 255 + 256 + if (ctx->ht) { 257 + bo_node->hash.key = (unsigned long) vbo; 258 + ret = drm_ht_insert_item(ctx->ht, &bo_node->hash); 259 + if (ret) { 260 + DRM_ERROR("Failed to initialize a buffer " 261 + "validation entry.\n"); 262 + return ret; 263 + } 264 + } 265 + val_buf = &bo_node->base; 266 + val_buf->bo = ttm_bo_get_unless_zero(&vbo->base); 267 + if (!val_buf->bo) 268 + return -ESRCH; 269 + val_buf->shared = false; 270 + list_add_tail(&val_buf->head, &ctx->bo_list); 271 + bo_node->as_mob = as_mob; 272 + bo_node->cpu_blit = cpu_blit; 273 + } 274 + 275 + return 0; 276 + } 277 + 278 + /** 279 + * vmw_validation_add_resource - Add a resource to the validation context. 280 + * @ctx: The validation context. 281 + * @res: The resource. 282 + * @priv_size: Size of private, additional metadata. 283 + * @p_node: Output pointer of additional metadata address. 284 + * @first_usage: Whether this was the first time this resource was seen. 285 + * 286 + * Return: Zero on success, negative error code otherwise. 287 + */ 288 + int vmw_validation_add_resource(struct vmw_validation_context *ctx, 289 + struct vmw_resource *res, 290 + size_t priv_size, 291 + void **p_node, 292 + bool *first_usage) 293 + { 294 + struct vmw_validation_res_node *node; 295 + int ret; 296 + 297 + node = vmw_validation_find_res_dup(ctx, res); 298 + if (node) { 299 + node->first_usage = 0; 300 + goto out_fill; 301 + } 302 + 303 + node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size); 304 + if (!node) { 305 + DRM_ERROR("Failed to allocate a resource validation " 306 + "entry.\n"); 307 + return -ENOMEM; 308 + } 309 + 310 + if (ctx->ht) { 311 + node->hash.key = (unsigned long) res; 312 + ret = drm_ht_insert_item(ctx->ht, &node->hash); 313 + if (ret) { 314 + DRM_ERROR("Failed to initialize a resource validation " 315 + "entry.\n"); 316 + return ret; 317 + } 318 + } 319 + node->res = vmw_resource_reference_unless_doomed(res); 320 + if (!node->res) 321 + return -ESRCH; 322 + 323 + node->first_usage = 1; 324 + if (!res->dev_priv->has_mob) { 325 + list_add_tail(&node->head, &ctx->resource_list); 326 + } else { 327 + switch (vmw_res_type(res)) { 328 + case vmw_res_context: 329 + case vmw_res_dx_context: 330 + list_add(&node->head, &ctx->resource_ctx_list); 331 + break; 332 + case vmw_res_cotable: 333 + list_add_tail(&node->head, &ctx->resource_ctx_list); 334 + break; 335 + default: 336 + list_add_tail(&node->head, &ctx->resource_list); 337 + break; 338 + } 339 + } 340 + 341 + out_fill: 342 + if (first_usage) 343 + *first_usage = node->first_usage; 344 + if (p_node) 345 + *p_node = &node->private; 346 + 347 + return 0; 348 + } 349 + 350 + /** 351 + * vmw_validation_res_switch_backup - Register a backup MOB switch during 352 + * validation. 353 + * @ctx: The validation context. 354 + * @val_private: The additional meta-data pointer returned when the 355 + * resource was registered with the validation context. Used to identify 356 + * the resource. 357 + * @vbo: The new backup buffer object MOB. This buffer object needs to have 358 + * already been registered with the validation context. 359 + * @backup_offset: Offset into the new backup MOB. 360 + */ 361 + void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx, 362 + void *val_private, 363 + struct vmw_buffer_object *vbo, 364 + unsigned long backup_offset) 365 + { 366 + struct vmw_validation_res_node *val; 367 + 368 + val = container_of(val_private, typeof(*val), private); 369 + 370 + val->switching_backup = 1; 371 + if (val->first_usage) 372 + val->no_buffer_needed = 1; 373 + 374 + val->new_backup = vbo; 375 + val->new_backup_offset = backup_offset; 376 + } 377 + 378 + /** 379 + * vmw_validation_res_reserve - Reserve all resources registered with this 380 + * validation context. 381 + * @ctx: The validation context. 382 + * @intr: Use interruptible waits when possible. 383 + * 384 + * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error 385 + * code on failure. 386 + */ 387 + int vmw_validation_res_reserve(struct vmw_validation_context *ctx, 388 + bool intr) 389 + { 390 + struct vmw_validation_res_node *val; 391 + int ret = 0; 392 + 393 + list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list); 394 + 395 + list_for_each_entry(val, &ctx->resource_list, head) { 396 + struct vmw_resource *res = val->res; 397 + 398 + ret = vmw_resource_reserve(res, intr, val->no_buffer_needed); 399 + if (ret) 400 + goto out_unreserve; 401 + 402 + val->reserved = 1; 403 + if (res->backup) { 404 + struct vmw_buffer_object *vbo = res->backup; 405 + 406 + ret = vmw_validation_add_bo 407 + (ctx, vbo, vmw_resource_needs_backup(res), 408 + false); 409 + if (ret) 410 + goto out_unreserve; 411 + } 412 + } 413 + 414 + return 0; 415 + 416 + out_unreserve: 417 + vmw_validation_res_unreserve(ctx, true); 418 + return ret; 419 + } 420 + 421 + /** 422 + * vmw_validation_res_unreserve - Unreserve all reserved resources 423 + * registered with this validation context. 424 + * @ctx: The validation context. 425 + * @backoff: Whether this is a backoff- of a commit-type operation. This 426 + * is used to determine whether to switch backup MOBs or not. 427 + */ 428 + void vmw_validation_res_unreserve(struct vmw_validation_context *ctx, 429 + bool backoff) 430 + { 431 + struct vmw_validation_res_node *val; 432 + 433 + list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list); 434 + 435 + list_for_each_entry(val, &ctx->resource_list, head) { 436 + if (val->reserved) 437 + vmw_resource_unreserve(val->res, 438 + !backoff && 439 + val->switching_backup, 440 + val->new_backup, 441 + val->new_backup_offset); 442 + } 443 + } 444 + 445 + /** 446 + * vmw_validation_bo_validate_single - Validate a single buffer object. 447 + * @bo: The TTM buffer object base. 448 + * @interruptible: Whether to perform waits interruptible if possible. 449 + * @validate_as_mob: Whether to validate in MOB memory. 450 + * 451 + * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error 452 + * code on failure. 453 + */ 454 + int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo, 455 + bool interruptible, 456 + bool validate_as_mob) 457 + { 458 + struct vmw_buffer_object *vbo = 459 + container_of(bo, struct vmw_buffer_object, base); 460 + struct ttm_operation_ctx ctx = { 461 + .interruptible = interruptible, 462 + .no_wait_gpu = false 463 + }; 464 + int ret; 465 + 466 + if (vbo->pin_count > 0) 467 + return 0; 468 + 469 + if (validate_as_mob) 470 + return ttm_bo_validate(bo, &vmw_mob_placement, &ctx); 471 + 472 + /** 473 + * Put BO in VRAM if there is space, otherwise as a GMR. 474 + * If there is no space in VRAM and GMR ids are all used up, 475 + * start evicting GMRs to make room. If the DMA buffer can't be 476 + * used as a GMR, this will return -ENOMEM. 477 + */ 478 + 479 + ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx); 480 + if (ret == 0 || ret == -ERESTARTSYS) 481 + return ret; 482 + 483 + /** 484 + * If that failed, try VRAM again, this time evicting 485 + * previous contents. 486 + */ 487 + 488 + ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx); 489 + return ret; 490 + } 491 + 492 + /** 493 + * vmw_validation_bo_validate - Validate all buffer objects registered with 494 + * the validation context. 495 + * @ctx: The validation context. 496 + * @intr: Whether to perform waits interruptible if possible. 497 + * 498 + * Return: Zero on success, -ERESTARTSYS if interrupted, 499 + * negative error code on failure. 500 + */ 501 + int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr) 502 + { 503 + struct vmw_validation_bo_node *entry; 504 + int ret; 505 + 506 + list_for_each_entry(entry, &ctx->bo_list, base.head) { 507 + if (entry->cpu_blit) { 508 + struct ttm_operation_ctx ctx = { 509 + .interruptible = intr, 510 + .no_wait_gpu = false 511 + }; 512 + 513 + ret = ttm_bo_validate(entry->base.bo, 514 + &vmw_nonfixed_placement, &ctx); 515 + } else { 516 + ret = vmw_validation_bo_validate_single 517 + (entry->base.bo, intr, entry->as_mob); 518 + } 519 + if (ret) 520 + return ret; 521 + } 522 + return 0; 523 + } 524 + 525 + /** 526 + * vmw_validation_res_validate - Validate all resources registered with the 527 + * validation context. 528 + * @ctx: The validation context. 529 + * @intr: Whether to perform waits interruptible if possible. 530 + * 531 + * Before this function is called, all resource backup buffers must have 532 + * been validated. 533 + * 534 + * Return: Zero on success, -ERESTARTSYS if interrupted, 535 + * negative error code on failure. 536 + */ 537 + int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr) 538 + { 539 + struct vmw_validation_res_node *val; 540 + int ret; 541 + 542 + list_for_each_entry(val, &ctx->resource_list, head) { 543 + struct vmw_resource *res = val->res; 544 + struct vmw_buffer_object *backup = res->backup; 545 + 546 + ret = vmw_resource_validate(res, intr); 547 + if (ret) { 548 + if (ret != -ERESTARTSYS) 549 + DRM_ERROR("Failed to validate resource.\n"); 550 + return ret; 551 + } 552 + 553 + /* Check if the resource switched backup buffer */ 554 + if (backup && res->backup && (backup != res->backup)) { 555 + struct vmw_buffer_object *vbo = res->backup; 556 + 557 + ret = vmw_validation_add_bo 558 + (ctx, vbo, vmw_resource_needs_backup(res), 559 + false); 560 + if (ret) 561 + return ret; 562 + } 563 + } 564 + return 0; 565 + } 566 + 567 + /** 568 + * vmw_validation_drop_ht - Reset the hash table used for duplicate finding 569 + * and unregister it from this validation context. 570 + * @ctx: The validation context. 571 + * 572 + * The hash table used for duplicate finding is an expensive resource and 573 + * may be protected by mutexes that may cause deadlocks during resource 574 + * unreferencing if held. After resource- and buffer object registering, 575 + * there is no longer any use for this hash table, so allow freeing it 576 + * either to shorten any mutex locking time, or before resources- and 577 + * buffer objects are freed during validation context cleanup. 578 + */ 579 + void vmw_validation_drop_ht(struct vmw_validation_context *ctx) 580 + { 581 + struct vmw_validation_bo_node *entry; 582 + struct vmw_validation_res_node *val; 583 + 584 + if (!ctx->ht) 585 + return; 586 + 587 + list_for_each_entry(entry, &ctx->bo_list, base.head) 588 + (void) drm_ht_remove_item(ctx->ht, &entry->hash); 589 + 590 + list_for_each_entry(val, &ctx->resource_list, head) 591 + (void) drm_ht_remove_item(ctx->ht, &val->hash); 592 + 593 + list_for_each_entry(val, &ctx->resource_ctx_list, head) 594 + (void) drm_ht_remove_item(ctx->ht, &val->hash); 595 + 596 + ctx->ht = NULL; 597 + } 598 + 599 + /** 600 + * vmw_validation_unref_lists - Unregister previously registered buffer 601 + * object and resources. 602 + * @ctx: The validation context. 603 + * 604 + * Note that this function may cause buffer object- and resource destructors 605 + * to be invoked. 606 + */ 607 + void vmw_validation_unref_lists(struct vmw_validation_context *ctx) 608 + { 609 + struct vmw_validation_bo_node *entry; 610 + struct vmw_validation_res_node *val; 611 + 612 + list_for_each_entry(entry, &ctx->bo_list, base.head) 613 + ttm_bo_unref(&entry->base.bo); 614 + 615 + list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list); 616 + list_for_each_entry(val, &ctx->resource_list, head) 617 + vmw_resource_unreference(&val->res); 618 + 619 + /* 620 + * No need to detach each list entry since they are all freed with 621 + * vmw_validation_free_mem. Just make the inaccessible. 622 + */ 623 + INIT_LIST_HEAD(&ctx->bo_list); 624 + INIT_LIST_HEAD(&ctx->resource_list); 625 + 626 + vmw_validation_mem_free(ctx); 627 + } 628 + 629 + /** 630 + * vmw_validation_prepare - Prepare a validation context for command 631 + * submission. 632 + * @ctx: The validation context. 633 + * @mutex: The mutex used to protect resource reservation. 634 + * @intr: Whether to perform waits interruptible if possible. 635 + * 636 + * Note that the single reservation mutex @mutex is an unfortunate 637 + * construct. Ideally resource reservation should be moved to per-resource 638 + * ww_mutexes. 639 + * If this functions doesn't return Zero to indicate success, all resources 640 + * are left unreserved but still referenced. 641 + * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code 642 + * on error. 643 + */ 644 + int vmw_validation_prepare(struct vmw_validation_context *ctx, 645 + struct mutex *mutex, 646 + bool intr) 647 + { 648 + int ret = 0; 649 + 650 + if (mutex) { 651 + if (intr) 652 + ret = mutex_lock_interruptible(mutex); 653 + else 654 + mutex_lock(mutex); 655 + if (ret) 656 + return -ERESTARTSYS; 657 + } 658 + 659 + ctx->res_mutex = mutex; 660 + ret = vmw_validation_res_reserve(ctx, intr); 661 + if (ret) 662 + goto out_no_res_reserve; 663 + 664 + ret = vmw_validation_bo_reserve(ctx, intr); 665 + if (ret) 666 + goto out_no_bo_reserve; 667 + 668 + ret = vmw_validation_bo_validate(ctx, intr); 669 + if (ret) 670 + goto out_no_validate; 671 + 672 + ret = vmw_validation_res_validate(ctx, intr); 673 + if (ret) 674 + goto out_no_validate; 675 + 676 + return 0; 677 + 678 + out_no_validate: 679 + vmw_validation_bo_backoff(ctx); 680 + out_no_bo_reserve: 681 + vmw_validation_res_unreserve(ctx, true); 682 + out_no_res_reserve: 683 + if (mutex) 684 + mutex_unlock(mutex); 685 + 686 + return ret; 687 + } 688 + 689 + /** 690 + * vmw_validation_revert - Revert validation actions if command submission 691 + * failed. 692 + * 693 + * @ctx: The validation context. 694 + * 695 + * The caller still needs to unref resources after a call to this function. 696 + */ 697 + void vmw_validation_revert(struct vmw_validation_context *ctx) 698 + { 699 + vmw_validation_bo_backoff(ctx); 700 + vmw_validation_res_unreserve(ctx, true); 701 + if (ctx->res_mutex) 702 + mutex_unlock(ctx->res_mutex); 703 + vmw_validation_unref_lists(ctx); 704 + } 705 + 706 + /** 707 + * vmw_validation_cone - Commit validation actions after command submission 708 + * success. 709 + * @ctx: The validation context. 710 + * @fence: Fence with which to fence all buffer objects taking part in the 711 + * command submission. 712 + * 713 + * The caller does NOT need to unref resources after a call to this function. 714 + */ 715 + void vmw_validation_done(struct vmw_validation_context *ctx, 716 + struct vmw_fence_obj *fence) 717 + { 718 + vmw_validation_bo_fence(ctx, fence); 719 + vmw_validation_res_unreserve(ctx, false); 720 + if (ctx->res_mutex) 721 + mutex_unlock(ctx->res_mutex); 722 + vmw_validation_unref_lists(ctx); 723 + } 724 + 725 + /** 726 + * vmw_validation_preload_bo - Preload the validation memory allocator for a 727 + * call to vmw_validation_add_bo(). 728 + * @ctx: Pointer to the validation context. 729 + * 730 + * Iff this function returns successfully, the next call to 731 + * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal 732 + * but voids the guarantee. 733 + * 734 + * Returns: Zero if successful, %-EINVAL otherwise. 735 + */ 736 + int vmw_validation_preload_bo(struct vmw_validation_context *ctx) 737 + { 738 + unsigned int size = sizeof(struct vmw_validation_bo_node); 739 + 740 + if (!vmw_validation_mem_alloc(ctx, size)) 741 + return -ENOMEM; 742 + 743 + ctx->mem_size_left += size; 744 + return 0; 745 + } 746 + 747 + /** 748 + * vmw_validation_preload_res - Preload the validation memory allocator for a 749 + * call to vmw_validation_add_res(). 750 + * @ctx: Pointer to the validation context. 751 + * @size: Size of the validation node extra data. See below. 752 + * 753 + * Iff this function returns successfully, the next call to 754 + * vmw_validation_add_res() with the same or smaller @size is guaranteed not to 755 + * sleep. An error is not fatal but voids the guarantee. 756 + * 757 + * Returns: Zero if successful, %-EINVAL otherwise. 758 + */ 759 + int vmw_validation_preload_res(struct vmw_validation_context *ctx, 760 + unsigned int size) 761 + { 762 + size = vmw_validation_align(sizeof(struct vmw_validation_res_node) + 763 + size) + 764 + vmw_validation_align(sizeof(struct vmw_validation_bo_node)); 765 + if (!vmw_validation_mem_alloc(ctx, size)) 766 + return -ENOMEM; 767 + 768 + ctx->mem_size_left += size; 769 + return 0; 770 + }
+227
drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /************************************************************************** 3 + * 4 + * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA 5 + * All Rights Reserved. 6 + * 7 + * Permission is hereby granted, free of charge, to any person obtaining a 8 + * copy of this software and associated documentation files (the 9 + * "Software"), to deal in the Software without restriction, including 10 + * without limitation the rights to use, copy, modify, merge, publish, 11 + * distribute, sub license, and/or sell copies of the Software, and to 12 + * permit persons to whom the Software is furnished to do so, subject to 13 + * the following conditions: 14 + * 15 + * The above copyright notice and this permission notice (including the 16 + * next paragraph) shall be included in all copies or substantial portions 17 + * of the Software. 18 + * 19 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 + * 27 + **************************************************************************/ 28 + #ifndef _VMWGFX_VALIDATION_H_ 29 + #define _VMWGFX_VALIDATION_H_ 30 + 31 + #include <drm/drm_hashtab.h> 32 + #include <linux/list.h> 33 + #include <linux/ww_mutex.h> 34 + #include <drm/ttm/ttm_execbuf_util.h> 35 + 36 + /** 37 + * struct vmw_validation_context - Per command submission validation context 38 + * @ht: Hash table used to find resource- or buffer object duplicates 39 + * @resource_list: List head for resource validation metadata 40 + * @resource_ctx_list: List head for resource validation metadata for 41 + * resources that need to be validated before those in @resource_list 42 + * @bo_list: List head for buffer objects 43 + * @page_list: List of pages used by the memory allocator 44 + * @ticket: Ticked used for ww mutex locking 45 + * @res_mutex: Pointer to mutex used for resource reserving 46 + * @merge_dups: Whether to merge metadata for duplicate resources or 47 + * buffer objects 48 + * @mem_size_left: Free memory left in the last page in @page_list 49 + * @page_address: Kernel virtual address of the last page in @page_list 50 + */ 51 + struct vmw_validation_context { 52 + struct drm_open_hash *ht; 53 + struct list_head resource_list; 54 + struct list_head resource_ctx_list; 55 + struct list_head bo_list; 56 + struct list_head page_list; 57 + struct ww_acquire_ctx ticket; 58 + struct mutex *res_mutex; 59 + unsigned int merge_dups; 60 + unsigned int mem_size_left; 61 + u8 *page_address; 62 + }; 63 + 64 + struct vmw_buffer_object; 65 + struct vmw_resource; 66 + struct vmw_fence_obj; 67 + 68 + #if 0 69 + /** 70 + * DECLARE_VAL_CONTEXT - Declare a validation context with initialization 71 + * @_name: The name of the variable 72 + * @_ht: The hash table used to find dups or NULL if none 73 + * @_merge_dups: Whether to merge duplicate buffer object- or resource 74 + * entries. If set to true, ideally a hash table pointer should be supplied 75 + * as well unless the number of resources and buffer objects per validation 76 + * is known to be very small 77 + */ 78 + #endif 79 + #define DECLARE_VAL_CONTEXT(_name, _ht, _merge_dups) \ 80 + struct vmw_validation_context _name = \ 81 + { .ht = _ht, \ 82 + .resource_list = LIST_HEAD_INIT((_name).resource_list), \ 83 + .resource_ctx_list = LIST_HEAD_INIT((_name).resource_ctx_list), \ 84 + .bo_list = LIST_HEAD_INIT((_name).bo_list), \ 85 + .page_list = LIST_HEAD_INIT((_name).page_list), \ 86 + .res_mutex = NULL, \ 87 + .merge_dups = _merge_dups, \ 88 + .mem_size_left = 0, \ 89 + } 90 + 91 + /** 92 + * vmw_validation_has_bos - return whether the validation context has 93 + * any buffer objects registered. 94 + * 95 + * @ctx: The validation context 96 + * Returns: Whether any buffer objects are registered 97 + */ 98 + static inline bool 99 + vmw_validation_has_bos(struct vmw_validation_context *ctx) 100 + { 101 + return !list_empty(&ctx->bo_list); 102 + } 103 + 104 + /** 105 + * vmw_validation_set_ht - Register a hash table for duplicate finding 106 + * @ctx: The validation context 107 + * @ht: Pointer to a hash table to use for duplicate finding 108 + * This function is intended to be used if the hash table wasn't 109 + * available at validation context declaration time 110 + */ 111 + static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx, 112 + struct drm_open_hash *ht) 113 + { 114 + ctx->ht = ht; 115 + } 116 + 117 + /** 118 + * vmw_validation_bo_reserve - Reserve buffer objects registered with a 119 + * validation context 120 + * @ctx: The validation context 121 + * @intr: Perform waits interruptible 122 + * 123 + * Return: Zero on success, -ERESTARTSYS when interrupted, negative error 124 + * code on failure 125 + */ 126 + static inline int 127 + vmw_validation_bo_reserve(struct vmw_validation_context *ctx, 128 + bool intr) 129 + { 130 + return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr, 131 + NULL); 132 + } 133 + 134 + /** 135 + * vmw_validation_bo_backoff - Unreserve buffer objects registered with a 136 + * validation context 137 + * @ctx: The validation context 138 + * 139 + * This function unreserves the buffer objects previously reserved using 140 + * vmw_validation_bo_reserve. It's typically used as part of an error path 141 + */ 142 + static inline void 143 + vmw_validation_bo_backoff(struct vmw_validation_context *ctx) 144 + { 145 + ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list); 146 + } 147 + 148 + /** 149 + * vmw_validation_bo_fence - Unreserve and fence buffer objects registered 150 + * with a validation context 151 + * @ctx: The validation context 152 + * 153 + * This function unreserves the buffer objects previously reserved using 154 + * vmw_validation_bo_reserve, and fences them with a fence object. 155 + */ 156 + static inline void 157 + vmw_validation_bo_fence(struct vmw_validation_context *ctx, 158 + struct vmw_fence_obj *fence) 159 + { 160 + ttm_eu_fence_buffer_objects(&ctx->ticket, &ctx->bo_list, 161 + (void *) fence); 162 + } 163 + 164 + /** 165 + * vmw_validation_context_init - Initialize a validation context 166 + * @ctx: Pointer to the validation context to initialize 167 + * 168 + * This function initializes a validation context with @merge_dups set 169 + * to false 170 + */ 171 + static inline void 172 + vmw_validation_context_init(struct vmw_validation_context *ctx) 173 + { 174 + memset(ctx, 0, sizeof(*ctx)); 175 + INIT_LIST_HEAD(&ctx->resource_list); 176 + INIT_LIST_HEAD(&ctx->resource_ctx_list); 177 + INIT_LIST_HEAD(&ctx->bo_list); 178 + } 179 + 180 + /** 181 + * vmw_validation_align - Align a validation memory allocation 182 + * @val: The size to be aligned 183 + * 184 + * Returns: @val aligned to the granularity used by the validation memory 185 + * allocator. 186 + */ 187 + static inline unsigned int vmw_validation_align(unsigned int val) 188 + { 189 + return ALIGN(val, sizeof(long)); 190 + } 191 + 192 + int vmw_validation_add_bo(struct vmw_validation_context *ctx, 193 + struct vmw_buffer_object *vbo, 194 + bool as_mob, bool cpu_blit); 195 + int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo, 196 + bool interruptible, 197 + bool validate_as_mob); 198 + int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr); 199 + void vmw_validation_unref_lists(struct vmw_validation_context *ctx); 200 + int vmw_validation_add_resource(struct vmw_validation_context *ctx, 201 + struct vmw_resource *res, 202 + size_t priv_size, 203 + void **p_node, 204 + bool *first_usage); 205 + void vmw_validation_drop_ht(struct vmw_validation_context *ctx); 206 + int vmw_validation_res_reserve(struct vmw_validation_context *ctx, 207 + bool intr); 208 + void vmw_validation_res_unreserve(struct vmw_validation_context *ctx, 209 + bool backoff); 210 + void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx, 211 + void *val_private, 212 + struct vmw_buffer_object *vbo, 213 + unsigned long backup_offset); 214 + int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr); 215 + 216 + int vmw_validation_prepare(struct vmw_validation_context *ctx, 217 + struct mutex *mutex, bool intr); 218 + void vmw_validation_revert(struct vmw_validation_context *ctx); 219 + void vmw_validation_done(struct vmw_validation_context *ctx, 220 + struct vmw_fence_obj *fence); 221 + 222 + void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx, 223 + unsigned int size); 224 + int vmw_validation_preload_bo(struct vmw_validation_context *ctx); 225 + int vmw_validation_preload_res(struct vmw_validation_context *ctx, 226 + unsigned int size); 227 + #endif
+18
include/drm/ttm/ttm_bo_api.h
··· 313 313 } 314 314 315 315 /** 316 + * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless 317 + * its refcount has already reached zero. 318 + * @bo: The buffer object. 319 + * 320 + * Used to reference a TTM buffer object in lookups where the object is removed 321 + * from the lookup structure during the destructor and for RCU lookups. 322 + * 323 + * Returns: @bo if the referencing was successful, NULL otherwise. 324 + */ 325 + static inline __must_check struct ttm_buffer_object * 326 + ttm_bo_get_unless_zero(struct ttm_buffer_object *bo) 327 + { 328 + if (!kref_get_unless_zero(&bo->kref)) 329 + return NULL; 330 + return bo; 331 + } 332 + 333 + /** 316 334 * ttm_bo_wait - wait for buffer idle. 317 335 * 318 336 * @bo: The buffer object.
include/drm/ttm/ttm_lock.h drivers/gpu/drm/vmwgfx/ttm_lock.h
+26 -5
include/drm/ttm/ttm_object.h drivers/gpu/drm/vmwgfx/ttm_object.h
··· 42 42 #include <linux/kref.h> 43 43 #include <linux/rcupdate.h> 44 44 #include <linux/dma-buf.h> 45 - 46 - #include "ttm_memory.h" 45 + #include <drm/ttm/ttm_memory.h> 47 46 48 47 /** 49 48 * enum ttm_ref_type ··· 124 125 125 126 struct ttm_base_object { 126 127 struct rcu_head rhead; 127 - struct drm_hash_item hash; 128 - enum ttm_object_type object_type; 129 - bool shareable; 130 128 struct ttm_object_file *tfile; 131 129 struct kref refcount; 132 130 void (*refcount_release) (struct ttm_base_object **base); 133 131 void (*ref_obj_release) (struct ttm_base_object *base, 134 132 enum ttm_ref_type ref_type); 133 + u32 handle; 134 + enum ttm_object_type object_type; 135 + u32 shareable; 135 136 }; 136 137 137 138 ··· 350 351 351 352 #define ttm_prime_object_kfree(__obj, __prime) \ 352 353 kfree_rcu(__obj, __prime.base.rhead) 354 + 355 + /* 356 + * Extra memory required by the base object's idr storage, which is allocated 357 + * separately from the base object itself. We estimate an on-average 128 bytes 358 + * per idr. 359 + */ 360 + #define TTM_OBJ_EXTRA_SIZE 128 361 + 362 + struct ttm_base_object * 363 + ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key); 364 + 365 + /** 366 + * ttm_base_object_noref_release - release a base object pointer looked up 367 + * without reference 368 + * 369 + * Releases a base object pointer looked up with ttm_base_object_noref_lookup(). 370 + */ 371 + static inline void ttm_base_object_noref_release(void) 372 + { 373 + __acquire(RCU); 374 + rcu_read_unlock(); 375 + } 353 376 #endif