Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/vmwgfx: get rid of different types of fence_flags entirely

Only one type was ever used. This is needed to simplify the fence
support in the next commit.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>

+26 -52
+1 -4
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
··· 826 826 827 827 static bool vmw_sync_obj_signaled(void *sync_obj) 828 828 { 829 - return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj, 830 - DRM_VMW_FENCE_FLAG_EXEC); 831 - 829 + return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj); 832 830 } 833 831 834 832 static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) 835 833 { 836 834 return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, 837 - DRM_VMW_FENCE_FLAG_EXEC, 838 835 lazy, interruptible, 839 836 VMW_FENCE_WAIT_TIMEOUT); 840 837 }
-1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 342 342 uint32_t *cmd_bounce; 343 343 uint32_t cmd_bounce_size; 344 344 struct list_head resource_list; 345 - uint32_t fence_flags; 346 345 struct ttm_buffer_object *cur_query_bo; 347 346 struct list_head res_relocations; 348 347 uint32_t *buf_start;
+3 -11
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 350 350 vval_buf->validate_as_mob = validate_as_mob; 351 351 } 352 352 353 - sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; 354 - 355 353 if (p_val_node) 356 354 *p_val_node = val_node; 357 355 ··· 2335 2337 2336 2338 if (p_handle != NULL) 2337 2339 ret = vmw_user_fence_create(file_priv, dev_priv->fman, 2338 - sequence, 2339 - DRM_VMW_FENCE_FLAG_EXEC, 2340 - p_fence, p_handle); 2340 + sequence, p_fence, p_handle); 2341 2341 else 2342 - ret = vmw_fence_create(dev_priv->fman, sequence, 2343 - DRM_VMW_FENCE_FLAG_EXEC, 2344 - p_fence); 2342 + ret = vmw_fence_create(dev_priv->fman, sequence, p_fence); 2345 2343 2346 2344 if (unlikely(ret != 0 && !synced)) { 2347 2345 (void) vmw_fallback_wait(dev_priv, false, false, ··· 2410 2416 ttm_ref_object_base_unref(vmw_fp->tfile, 2411 2417 fence_handle, TTM_REF_USAGE); 2412 2418 DRM_ERROR("Fence copy error. Syncing.\n"); 2413 - (void) vmw_fence_obj_wait(fence, fence->signal_mask, 2414 - false, false, 2419 + (void) vmw_fence_obj_wait(fence, false, false, 2415 2420 VMW_FENCE_WAIT_TIMEOUT); 2416 2421 } 2417 2422 } ··· 2462 2469 sw_context->fp = vmw_fpriv(file_priv); 2463 2470 sw_context->cur_reloc = 0; 2464 2471 sw_context->cur_val_buf = 0; 2465 - sw_context->fence_flags = 0; 2466 2472 INIT_LIST_HEAD(&sw_context->resource_list); 2467 2473 sw_context->cur_query_bo = dev_priv->pinned_bo; 2468 2474 sw_context->last_query_ctx = NULL;
+20 -30
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
··· 207 207 } 208 208 209 209 static int vmw_fence_obj_init(struct vmw_fence_manager *fman, 210 - struct vmw_fence_obj *fence, 211 - u32 seqno, 212 - uint32_t mask, 210 + struct vmw_fence_obj *fence, u32 seqno, 213 211 void (*destroy) (struct vmw_fence_obj *fence)) 214 212 { 215 213 unsigned long irq_flags; ··· 218 220 INIT_LIST_HEAD(&fence->seq_passed_actions); 219 221 fence->fman = fman; 220 222 fence->signaled = 0; 221 - fence->signal_mask = mask; 222 223 kref_init(&fence->kref); 223 224 fence->destroy = destroy; 224 225 init_waitqueue_head(&fence->queue); ··· 353 356 u32 goal_seqno; 354 357 __le32 __iomem *fifo_mem; 355 358 356 - if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) 359 + if (fence->signaled) 357 360 return false; 358 361 359 362 fifo_mem = fence->fman->dev_priv->mmio_virt; ··· 383 386 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { 384 387 if (seqno - fence->seqno < VMW_FENCE_WRAP) { 385 388 list_del_init(&fence->head); 386 - fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC; 389 + fence->signaled = 1; 387 390 INIT_LIST_HEAD(&action_list); 388 391 list_splice_init(&fence->seq_passed_actions, 389 392 &action_list); ··· 414 417 } 415 418 } 416 419 417 - bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, 418 - uint32_t flags) 420 + bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) 419 421 { 420 422 struct vmw_fence_manager *fman = fence->fman; 421 423 unsigned long irq_flags; ··· 424 428 signaled = fence->signaled; 425 429 spin_unlock_irqrestore(&fman->lock, irq_flags); 426 430 427 - flags &= fence->signal_mask; 428 - if ((signaled & flags) == flags) 431 + if (signaled) 429 432 return 1; 430 433 431 - if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0) 432 - vmw_fences_update(fman); 434 + vmw_fences_update(fman); 433 435 434 436 spin_lock_irqsave(&fman->lock, irq_flags); 435 437 signaled = fence->signaled; 436 438 spin_unlock_irqrestore(&fman->lock, irq_flags); 437 439 438 - return ((signaled & flags) == flags); 440 + return signaled; 439 441 } 440 442 441 - int vmw_fence_obj_wait(struct vmw_fence_obj *fence, 442 - uint32_t flags, bool lazy, 443 + int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy, 443 444 bool interruptible, unsigned long timeout) 444 445 { 445 446 struct vmw_private *dev_priv = fence->fman->dev_priv; 446 447 long ret; 447 448 448 - if (likely(vmw_fence_obj_signaled(fence, flags))) 449 + if (likely(vmw_fence_obj_signaled(fence))) 449 450 return 0; 450 451 451 452 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); ··· 451 458 if (interruptible) 452 459 ret = wait_event_interruptible_timeout 453 460 (fence->queue, 454 - vmw_fence_obj_signaled(fence, flags), 461 + vmw_fence_obj_signaled(fence), 455 462 timeout); 456 463 else 457 464 ret = wait_event_timeout 458 465 (fence->queue, 459 - vmw_fence_obj_signaled(fence, flags), 466 + vmw_fence_obj_signaled(fence), 460 467 timeout); 461 468 462 469 vmw_seqno_waiter_remove(dev_priv); ··· 490 497 491 498 int vmw_fence_create(struct vmw_fence_manager *fman, 492 499 uint32_t seqno, 493 - uint32_t mask, 494 500 struct vmw_fence_obj **p_fence) 495 501 { 496 502 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); ··· 507 515 goto out_no_object; 508 516 } 509 517 510 - ret = vmw_fence_obj_init(fman, fence, seqno, mask, 518 + ret = vmw_fence_obj_init(fman, fence, seqno, 511 519 vmw_fence_destroy); 512 520 if (unlikely(ret != 0)) 513 521 goto out_err_init; ··· 551 559 int vmw_user_fence_create(struct drm_file *file_priv, 552 560 struct vmw_fence_manager *fman, 553 561 uint32_t seqno, 554 - uint32_t mask, 555 562 struct vmw_fence_obj **p_fence, 556 563 uint32_t *p_handle) 557 564 { ··· 577 586 } 578 587 579 588 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno, 580 - mask, vmw_user_fence_destroy); 589 + vmw_user_fence_destroy); 581 590 if (unlikely(ret != 0)) { 582 591 kfree(ufence); 583 592 goto out_no_object; ··· 638 647 kref_get(&fence->kref); 639 648 spin_unlock_irq(&fman->lock); 640 649 641 - ret = vmw_fence_obj_wait(fence, fence->signal_mask, 642 - false, false, 650 + ret = vmw_fence_obj_wait(fence, false, false, 643 651 VMW_FENCE_WAIT_TIMEOUT); 644 652 645 653 if (unlikely(ret != 0)) { 646 654 list_del_init(&fence->head); 647 - fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC; 655 + fence->signaled = 1; 648 656 INIT_LIST_HEAD(&action_list); 649 657 list_splice_init(&fence->seq_passed_actions, 650 658 &action_list); ··· 706 716 707 717 timeout = jiffies; 708 718 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) { 709 - ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ? 719 + ret = ((vmw_fence_obj_signaled(fence)) ? 710 720 0 : -EBUSY); 711 721 goto out; 712 722 } 713 723 714 724 timeout = (unsigned long)arg->kernel_cookie - timeout; 715 725 716 - ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout); 726 + ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout); 717 727 718 728 out: 719 729 ttm_base_object_unref(&base); ··· 750 760 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 751 761 fman = fence->fman; 752 762 753 - arg->signaled = vmw_fence_obj_signaled(fence, arg->flags); 763 + arg->signaled = vmw_fence_obj_signaled(fence); 754 764 spin_lock_irq(&fman->lock); 755 765 756 - arg->signaled_flags = fence->signaled; 766 + arg->signaled_flags = arg->flags; 757 767 arg->passed_seqno = dev_priv->last_read_seqno; 758 768 spin_unlock_irq(&fman->lock); 759 769 ··· 898 908 spin_lock_irqsave(&fman->lock, irq_flags); 899 909 900 910 fman->pending_actions[action->type]++; 901 - if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) { 911 + if (fence->signaled) { 902 912 struct list_head action_list; 903 913 904 914 INIT_LIST_HEAD(&action_list);
+2 -6
drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
··· 56 56 struct vmw_fence_manager *fman; 57 57 struct list_head head; 58 58 uint32_t signaled; 59 - uint32_t signal_mask; 60 59 struct list_head seq_passed_actions; 61 60 void (*destroy)(struct vmw_fence_obj *fence); 62 61 wait_queue_head_t queue; ··· 73 74 74 75 extern void vmw_fences_update(struct vmw_fence_manager *fman); 75 76 76 - extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, 77 - uint32_t flags); 77 + extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence); 78 78 79 - extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, uint32_t flags, 79 + extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, 80 80 bool lazy, 81 81 bool interruptible, unsigned long timeout); 82 82 ··· 83 85 84 86 extern int vmw_fence_create(struct vmw_fence_manager *fman, 85 87 uint32_t seqno, 86 - uint32_t mask, 87 88 struct vmw_fence_obj **p_fence); 88 89 89 90 extern int vmw_user_fence_create(struct drm_file *file_priv, 90 91 struct vmw_fence_manager *fman, 91 92 uint32_t sequence, 92 - uint32_t mask, 93 93 struct vmw_fence_obj **p_fence, 94 94 uint32_t *p_handle); 95 95