Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/vmwgfx: Remove the resource avail field

This field was previously used to prevent a lookup of a resource before its
constructor had run to its end. This was mainly intended for an interface
that is now removed that allowed looking up a resource by its device id.

Currently all affected resources are added to the lookup mechanism (its
TTM prime object is initialized) late in the constructor where it's OK to
look up the resource.

This means we can change the device resource_lock to an ordinary spinlock
instead of an rwlock and remove a locking sequence during lookup.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Sinclair Yeh <syeh@vmware.com>
Reviewed-by: Deepak Rawat <drawat@vmware.com>

+68 -73
+2 -4
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
··· 217 217 } 218 218 } 219 219 220 - 221 - 222 - vmw_resource_activate(res, vmw_hw_context_destroy); 220 + res->hw_destroy = vmw_hw_context_destroy; 223 221 return 0; 224 222 225 223 out_cotables: ··· 272 274 273 275 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 274 276 vmw_fifo_resource_inc(dev_priv); 275 - vmw_resource_activate(res, vmw_hw_context_destroy); 277 + res->hw_destroy = vmw_hw_context_destroy; 276 278 return 0; 277 279 278 280 out_early:
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 615 615 vcotbl->type = type; 616 616 vcotbl->ctx = ctx; 617 617 618 - vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy); 618 + vcotbl->res.hw_destroy = vmw_hw_cotable_destroy; 619 619 620 620 return &vcotbl->res; 621 621
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 667 667 mutex_init(&dev_priv->binding_mutex); 668 668 mutex_init(&dev_priv->requested_layout_mutex); 669 669 mutex_init(&dev_priv->global_kms_state_mutex); 670 - rwlock_init(&dev_priv->resource_lock); 671 670 ttm_lock_init(&dev_priv->reservation_sem); 671 + spin_lock_init(&dev_priv->resource_lock); 672 672 spin_lock_init(&dev_priv->hw_lock); 673 673 spin_lock_init(&dev_priv->waiter_lock); 674 674 spin_lock_init(&dev_priv->cap_lock);
+36 -8
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 113 113 }; 114 114 115 115 struct vmw_res_func; 116 + 117 + 118 + /** 119 + * struct vmw-resource - base class for hardware resources 120 + * 121 + * @kref: For refcounting. 122 + * @dev_priv: Pointer to the device private for this resource. Immutable. 123 + * @id: Device id. Protected by @dev_priv::resource_lock. 124 + * @backup_size: Backup buffer size. Immutable. 125 + * @res_dirty: Resource contains data not yet in the backup buffer. Protected 126 + * by resource reserved. 127 + * @backup_dirty: Backup buffer contains data not yet in the HW resource. 128 + * Protecte by resource reserved. 129 + * @backup: The backup buffer if any. Protected by resource reserved. 130 + * @backup_offset: Offset into the backup buffer if any. Protected by resource 131 + * reserved. Note that only a few resource types can have a @backup_offset 132 + * different from zero. 133 + * @pin_count: The pin count for this resource. A pinned resource has a 134 + * pin-count greater than zero. It is not on the resource LRU lists and its 135 + * backup buffer is pinned. Hence it can't be evicted. 136 + * @func: Method vtable for this resource. Immutable. 137 + * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. 138 + * @mob_head: List head for the MOB backup list. Protected by @backup reserved. 139 + * @binding_head: List head for the context binding list. Protected by 140 + * the @dev_priv::binding_mutex 141 + * @res_free: The resource destructor. 142 + * @hw_destroy: Callback to destroy the resource on the device, as part of 143 + * resource destruction. 144 + */ 116 145 struct vmw_resource { 117 146 struct kref kref; 118 147 struct vmw_private *dev_priv; 119 148 int id; 120 - bool avail; 121 149 unsigned long backup_size; 122 - bool res_dirty; /* Protected by backup buffer reserved */ 123 - bool backup_dirty; /* Protected by backup buffer reserved */ 150 + bool res_dirty; 151 + bool backup_dirty; 124 152 struct vmw_buffer_object *backup; 125 153 unsigned long backup_offset; 126 - unsigned long pin_count; /* Protected by resource reserved */ 154 + unsigned long pin_count; 127 155 const struct vmw_res_func *func; 128 - struct list_head lru_head; /* Protected by the resource lock */ 129 - struct list_head mob_head; /* Protected by @backup reserved */ 130 - struct list_head binding_head; /* Protected by binding_mutex */ 156 + struct list_head lru_head; 157 + struct list_head mob_head; 158 + struct list_head binding_head; 131 159 void (*res_free) (struct vmw_resource *res); 132 160 void (*hw_destroy) (struct vmw_resource *res); 133 161 }; ··· 499 471 * Context and surface management. 500 472 */ 501 473 502 - rwlock_t resource_lock; 474 + spinlock_t resource_lock; 503 475 struct idr res_idr[vmw_res_max]; 504 476 /* 505 477 * Block lastclose from racing with firstopen.
+23 -52
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 58 58 struct vmw_private *dev_priv = res->dev_priv; 59 59 struct idr *idr = &dev_priv->res_idr[res->func->res_type]; 60 60 61 - write_lock(&dev_priv->resource_lock); 61 + spin_lock(&dev_priv->resource_lock); 62 62 if (res->id != -1) 63 63 idr_remove(idr, res->id); 64 64 res->id = -1; 65 - write_unlock(&dev_priv->resource_lock); 65 + spin_unlock(&dev_priv->resource_lock); 66 66 } 67 67 68 68 static void vmw_resource_release(struct kref *kref) ··· 73 73 int id; 74 74 struct idr *idr = &dev_priv->res_idr[res->func->res_type]; 75 75 76 - write_lock(&dev_priv->resource_lock); 77 - res->avail = false; 76 + spin_lock(&dev_priv->resource_lock); 78 77 list_del_init(&res->lru_head); 79 - write_unlock(&dev_priv->resource_lock); 78 + spin_unlock(&dev_priv->resource_lock); 80 79 if (res->backup) { 81 80 struct ttm_buffer_object *bo = &res->backup->base; 82 81 ··· 107 108 else 108 109 kfree(res); 109 110 110 - write_lock(&dev_priv->resource_lock); 111 + spin_lock(&dev_priv->resource_lock); 111 112 if (id != -1) 112 113 idr_remove(idr, id); 113 - write_unlock(&dev_priv->resource_lock); 114 + spin_unlock(&dev_priv->resource_lock); 114 115 } 115 116 116 117 void vmw_resource_unreference(struct vmw_resource **p_res) ··· 139 140 BUG_ON(res->id != -1); 140 141 141 142 idr_preload(GFP_KERNEL); 142 - write_lock(&dev_priv->resource_lock); 143 + spin_lock(&dev_priv->resource_lock); 143 144 144 145 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT); 145 146 if (ret >= 0) 146 147 res->id = ret; 147 148 148 - write_unlock(&dev_priv->resource_lock); 149 + spin_unlock(&dev_priv->resource_lock); 149 150 idr_preload_end(); 150 151 return ret < 0 ? ret : 0; 151 152 } ··· 169 170 kref_init(&res->kref); 170 171 res->hw_destroy = NULL; 171 172 res->res_free = res_free; 172 - res->avail = false; 173 173 res->dev_priv = dev_priv; 174 174 res->func = func; 175 175 INIT_LIST_HEAD(&res->lru_head); ··· 185 187 return vmw_resource_alloc_id(res); 186 188 } 187 189 188 - /** 189 - * vmw_resource_activate 190 - * 191 - * @res: Pointer to the newly created resource 192 - * @hw_destroy: Destroy function. NULL if none. 193 - * 194 - * Activate a resource after the hardware has been made aware of it. 195 - * Set tye destroy function to @destroy. Typically this frees the 196 - * resource and destroys the hardware resources associated with it. 197 - * Activate basically means that the function vmw_resource_lookup will 198 - * find it. 199 - */ 200 - void vmw_resource_activate(struct vmw_resource *res, 201 - void (*hw_destroy) (struct vmw_resource *)) 202 - { 203 - struct vmw_private *dev_priv = res->dev_priv; 204 - 205 - write_lock(&dev_priv->resource_lock); 206 - res->avail = true; 207 - res->hw_destroy = hw_destroy; 208 - write_unlock(&dev_priv->resource_lock); 209 - } 210 190 211 191 /** 212 192 * vmw_user_resource_lookup_handle - lookup a struct resource from a ··· 219 243 goto out_bad_resource; 220 244 221 245 res = converter->base_obj_to_res(base); 222 - 223 - read_lock(&dev_priv->resource_lock); 224 - if (!res->avail || res->res_free != converter->res_free) { 225 - read_unlock(&dev_priv->resource_lock); 246 + if (res->res_free != converter->res_free) 226 247 goto out_bad_resource; 227 - } 228 248 229 249 kref_get(&res->kref); 230 - read_unlock(&dev_priv->resource_lock); 231 250 232 251 *p_res = res; 233 252 ret = 0; ··· 393 422 if (!res->func->may_evict || res->id == -1 || res->pin_count) 394 423 return; 395 424 396 - write_lock(&dev_priv->resource_lock); 425 + spin_lock(&dev_priv->resource_lock); 397 426 list_add_tail(&res->lru_head, 398 427 &res->dev_priv->res_lru[res->func->res_type]); 399 - write_unlock(&dev_priv->resource_lock); 428 + spin_unlock(&dev_priv->resource_lock); 400 429 } 401 430 402 431 /** ··· 475 504 struct vmw_private *dev_priv = res->dev_priv; 476 505 int ret; 477 506 478 - write_lock(&dev_priv->resource_lock); 507 + spin_lock(&dev_priv->resource_lock); 479 508 list_del_init(&res->lru_head); 480 - write_unlock(&dev_priv->resource_lock); 509 + spin_unlock(&dev_priv->resource_lock); 481 510 482 511 if (res->func->needs_backup && res->backup == NULL && 483 512 !no_backup) { ··· 590 619 if (likely(ret != -EBUSY)) 591 620 break; 592 621 593 - write_lock(&dev_priv->resource_lock); 622 + spin_lock(&dev_priv->resource_lock); 594 623 if (list_empty(lru_list) || !res->func->may_evict) { 595 624 DRM_ERROR("Out of device device resources " 596 625 "for %s.\n", res->func->type_name); 597 626 ret = -EBUSY; 598 - write_unlock(&dev_priv->resource_lock); 627 + spin_unlock(&dev_priv->resource_lock); 599 628 break; 600 629 } 601 630 ··· 604 633 lru_head)); 605 634 list_del_init(&evict_res->lru_head); 606 635 607 - write_unlock(&dev_priv->resource_lock); 636 + spin_unlock(&dev_priv->resource_lock); 608 637 609 638 /* Trylock backup buffers with a NULL ticket. */ 610 639 ret = vmw_resource_do_evict(NULL, evict_res, intr); 611 640 if (unlikely(ret != 0)) { 612 - write_lock(&dev_priv->resource_lock); 641 + spin_lock(&dev_priv->resource_lock); 613 642 list_add_tail(&evict_res->lru_head, lru_list); 614 - write_unlock(&dev_priv->resource_lock); 643 + spin_unlock(&dev_priv->resource_lock); 615 644 if (ret == -ERESTARTSYS || 616 645 ++err_count > VMW_RES_EVICT_ERR_COUNT) { 617 646 vmw_resource_unreference(&evict_res); ··· 793 822 struct ww_acquire_ctx ticket; 794 823 795 824 do { 796 - write_lock(&dev_priv->resource_lock); 825 + spin_lock(&dev_priv->resource_lock); 797 826 798 827 if (list_empty(lru_list)) 799 828 goto out_unlock; ··· 802 831 list_first_entry(lru_list, struct vmw_resource, 803 832 lru_head)); 804 833 list_del_init(&evict_res->lru_head); 805 - write_unlock(&dev_priv->resource_lock); 834 + spin_unlock(&dev_priv->resource_lock); 806 835 807 836 /* Wait lock backup buffers with a ticket. */ 808 837 ret = vmw_resource_do_evict(&ticket, evict_res, false); 809 838 if (unlikely(ret != 0)) { 810 - write_lock(&dev_priv->resource_lock); 839 + spin_lock(&dev_priv->resource_lock); 811 840 list_add_tail(&evict_res->lru_head, lru_list); 812 - write_unlock(&dev_priv->resource_lock); 841 + spin_unlock(&dev_priv->resource_lock); 813 842 if (++err_count > VMW_RES_EVICT_ERR_COUNT) { 814 843 vmw_resource_unreference(&evict_res); 815 844 return; ··· 820 849 } while (1); 821 850 822 851 out_unlock: 823 - write_unlock(&dev_priv->resource_lock); 852 + spin_unlock(&dev_priv->resource_lock); 824 853 } 825 854 826 855 /**
-2
drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
··· 120 120 bool delay_id, 121 121 void (*res_free) (struct vmw_resource *res), 122 122 const struct vmw_res_func *func); 123 - void vmw_resource_activate(struct vmw_resource *res, 124 - void (*hw_destroy) (struct vmw_resource *)); 125 123 int 126 124 vmw_simple_resource_create_ioctl(struct drm_device *dev, 127 125 void *data,
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
··· 186 186 shader->num_input_sig = num_input_sig; 187 187 shader->num_output_sig = num_output_sig; 188 188 189 - vmw_resource_activate(res, vmw_hw_shader_destroy); 189 + res->hw_destroy = vmw_hw_shader_destroy; 190 190 return 0; 191 191 } 192 192 ··· 656 656 goto out_resource_init; 657 657 658 658 res->id = shader->id; 659 - vmw_resource_activate(res, vmw_hw_shader_destroy); 659 + res->hw_destroy = vmw_hw_shader_destroy; 660 660 661 661 out_resource_init: 662 662 vmw_resource_unreference(&res);
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
··· 81 81 return ret; 82 82 } 83 83 84 - vmw_resource_activate(&simple->res, simple->func->hw_destroy); 84 + simple->res.hw_destroy = simple->func->hw_destroy; 85 85 86 86 return 0; 87 87 }
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_so.c
··· 386 386 goto out_resource_init; 387 387 388 388 res->id = view->view_id; 389 - vmw_resource_activate(res, vmw_hw_view_destroy); 389 + res->hw_destroy = vmw_hw_view_destroy; 390 390 391 391 out_resource_init: 392 392 vmw_resource_unreference(&res);
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 614 614 */ 615 615 616 616 INIT_LIST_HEAD(&srf->view_list); 617 - vmw_resource_activate(res, vmw_hw_surface_destroy); 617 + res->hw_destroy = vmw_hw_surface_destroy; 618 618 return ret; 619 619 } 620 620