Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'vmwgfx-next-2014-01-13' of git://people.freedesktop.org/~thomash/linux into drm-next

Anyway, nothing big here, Three more code cleanup patches from Rashika
Kheria, and one TTM/vmwgfx patch from me that tightens security around TTM
objects enough for them to opened using prime objects from render nodes:

Previously any client could access a shared buffer using the "name", also
without actually opening it. Now a reference is required, and for render nodes
such a reference is intended to only be obtainable using a prime fd.

vmwgfx-next 2014-01-13 pull request

* tag 'vmwgfx-next-2014-01-13' of git://people.freedesktop.org/~thomash/linux:
drivers: gpu: Mark functions as static in vmwgfx_fence.c
drivers: gpu: Mark functions as static in vmwgfx_buffer.c
drivers: gpu: Mark functions as static in vmwgfx_kms.c
drm/ttm: ttm object security fixes for render nodes

+89 -55
+56 -38
drivers/gpu/drm/ttm/ttm_object.c
··· 68 68 69 69 struct ttm_object_file { 70 70 struct ttm_object_device *tdev; 71 - rwlock_t lock; 71 + spinlock_t lock; 72 72 struct list_head ref_list; 73 73 struct drm_open_hash ref_hash[TTM_REF_NUM]; 74 74 struct kref refcount; ··· 118 118 */ 119 119 120 120 struct ttm_ref_object { 121 + struct rcu_head rcu_head; 121 122 struct drm_hash_item hash; 122 123 struct list_head head; 123 124 struct kref kref; ··· 211 210 * call_rcu() or ttm_base_object_kfree(). 212 211 */ 213 212 214 - if (base->refcount_release) { 215 - ttm_object_file_unref(&base->tfile); 213 + ttm_object_file_unref(&base->tfile); 214 + if (base->refcount_release) 216 215 base->refcount_release(&base); 217 - } 218 216 } 219 217 220 218 void ttm_base_object_unref(struct ttm_base_object **p_base) ··· 229 229 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, 230 230 uint32_t key) 231 231 { 232 - struct ttm_object_device *tdev = tfile->tdev; 233 - struct ttm_base_object *uninitialized_var(base); 232 + struct ttm_base_object *base = NULL; 234 233 struct drm_hash_item *hash; 234 + struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; 235 235 int ret; 236 236 237 237 rcu_read_lock(); 238 - ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash); 238 + ret = drm_ht_find_item_rcu(ht, key, &hash); 239 239 240 240 if (likely(ret == 0)) { 241 - base = drm_hash_entry(hash, struct ttm_base_object, hash); 242 - ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL; 241 + base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; 242 + if (!kref_get_unless_zero(&base->refcount)) 243 + base = NULL; 243 244 } 244 245 rcu_read_unlock(); 245 - 246 - if (unlikely(ret != 0)) 247 - return NULL; 248 - 249 - if (tfile != base->tfile && !base->shareable) { 250 - pr_err("Attempted access of non-shareable object\n"); 251 - ttm_base_object_unref(&base); 252 - return NULL; 253 - } 254 246 255 247 return base; 256 248 } 257 249 EXPORT_SYMBOL(ttm_base_object_lookup); 250 + 251 + struct ttm_base_object * 252 + ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) 253 + { 254 + struct ttm_base_object *base = NULL; 255 + struct drm_hash_item *hash; 256 + struct drm_open_hash *ht = &tdev->object_hash; 257 + int ret; 258 + 259 + rcu_read_lock(); 260 + ret = drm_ht_find_item_rcu(ht, key, &hash); 261 + 262 + if (likely(ret == 0)) { 263 + base = drm_hash_entry(hash, struct ttm_base_object, hash); 264 + if (!kref_get_unless_zero(&base->refcount)) 265 + base = NULL; 266 + } 267 + rcu_read_unlock(); 268 + 269 + return base; 270 + } 271 + EXPORT_SYMBOL(ttm_base_object_lookup_for_ref); 258 272 259 273 int ttm_ref_object_add(struct ttm_object_file *tfile, 260 274 struct ttm_base_object *base, ··· 280 266 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; 281 267 int ret = -EINVAL; 282 268 269 + if (base->tfile != tfile && !base->shareable) 270 + return -EPERM; 271 + 283 272 if (existed != NULL) 284 273 *existed = true; 285 274 286 275 while (ret == -EINVAL) { 287 - read_lock(&tfile->lock); 288 - ret = drm_ht_find_item(ht, base->hash.key, &hash); 276 + rcu_read_lock(); 277 + ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash); 289 278 290 279 if (ret == 0) { 291 280 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); 292 - kref_get(&ref->kref); 293 - read_unlock(&tfile->lock); 294 - break; 281 + if (!kref_get_unless_zero(&ref->kref)) { 282 + rcu_read_unlock(); 283 + break; 284 + } 295 285 } 296 286 297 - read_unlock(&tfile->lock); 287 + rcu_read_unlock(); 298 288 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), 299 289 false, false); 300 290 if (unlikely(ret != 0)) ··· 315 297 ref->ref_type = ref_type; 316 298 kref_init(&ref->kref); 317 299 318 - write_lock(&tfile->lock); 319 - ret = drm_ht_insert_item(ht, &ref->hash); 300 + spin_lock(&tfile->lock); 301 + ret = drm_ht_insert_item_rcu(ht, &ref->hash); 320 302 321 303 if (likely(ret == 0)) { 322 304 list_add_tail(&ref->head, &tfile->ref_list); 323 305 kref_get(&base->refcount); 324 - write_unlock(&tfile->lock); 306 + spin_unlock(&tfile->lock); 325 307 if (existed != NULL) 326 308 *existed = false; 327 309 break; 328 310 } 329 311 330 - write_unlock(&tfile->lock); 312 + spin_unlock(&tfile->lock); 331 313 BUG_ON(ret != -EINVAL); 332 314 333 315 ttm_mem_global_free(mem_glob, sizeof(*ref)); ··· 348 330 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; 349 331 350 332 ht = &tfile->ref_hash[ref->ref_type]; 351 - (void)drm_ht_remove_item(ht, &ref->hash); 333 + (void)drm_ht_remove_item_rcu(ht, &ref->hash); 352 334 list_del(&ref->head); 353 - write_unlock(&tfile->lock); 335 + spin_unlock(&tfile->lock); 354 336 355 337 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) 356 338 base->ref_obj_release(base, ref->ref_type); 357 339 358 340 ttm_base_object_unref(&ref->obj); 359 341 ttm_mem_global_free(mem_glob, sizeof(*ref)); 360 - kfree(ref); 361 - write_lock(&tfile->lock); 342 + kfree_rcu(ref, rcu_head); 343 + spin_lock(&tfile->lock); 362 344 } 363 345 364 346 int ttm_ref_object_base_unref(struct ttm_object_file *tfile, ··· 369 351 struct drm_hash_item *hash; 370 352 int ret; 371 353 372 - write_lock(&tfile->lock); 354 + spin_lock(&tfile->lock); 373 355 ret = drm_ht_find_item(ht, key, &hash); 374 356 if (unlikely(ret != 0)) { 375 - write_unlock(&tfile->lock); 357 + spin_unlock(&tfile->lock); 376 358 return -EINVAL; 377 359 } 378 360 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); 379 361 kref_put(&ref->kref, ttm_ref_object_release); 380 - write_unlock(&tfile->lock); 362 + spin_unlock(&tfile->lock); 381 363 return 0; 382 364 } 383 365 EXPORT_SYMBOL(ttm_ref_object_base_unref); ··· 390 372 struct ttm_object_file *tfile = *p_tfile; 391 373 392 374 *p_tfile = NULL; 393 - write_lock(&tfile->lock); 375 + spin_lock(&tfile->lock); 394 376 395 377 /* 396 378 * Since we release the lock within the loop, we have to ··· 406 388 for (i = 0; i < TTM_REF_NUM; ++i) 407 389 drm_ht_remove(&tfile->ref_hash[i]); 408 390 409 - write_unlock(&tfile->lock); 391 + spin_unlock(&tfile->lock); 410 392 ttm_object_file_unref(&tfile); 411 393 } 412 394 EXPORT_SYMBOL(ttm_object_file_release); ··· 422 404 if (unlikely(tfile == NULL)) 423 405 return NULL; 424 406 425 - rwlock_init(&tfile->lock); 407 + spin_lock_init(&tfile->lock); 426 408 tfile->tdev = tdev; 427 409 kref_init(&tfile->refcount); 428 410 INIT_LIST_HEAD(&tfile->ref_list);
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
··· 517 517 .destroy = vmw_ttm_destroy, 518 518 }; 519 519 520 - struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, 520 + static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, 521 521 unsigned long size, uint32_t page_flags, 522 522 struct page *dummy_read_page) 523 523 { ··· 546 546 return NULL; 547 547 } 548 548 549 - int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) 549 + static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) 550 550 { 551 551 return 0; 552 552 } 553 553 554 - int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 554 + static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 555 555 struct ttm_mem_type_manager *man) 556 556 { 557 557 switch (type) { ··· 589 589 return 0; 590 590 } 591 591 592 - void vmw_evict_flags(struct ttm_buffer_object *bo, 592 + static void vmw_evict_flags(struct ttm_buffer_object *bo, 593 593 struct ttm_placement *placement) 594 594 { 595 595 *placement = vmw_sys_placement;
+5 -4
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
··· 271 271 spin_unlock_irq(&fman->lock); 272 272 } 273 273 274 - void vmw_fences_perform_actions(struct vmw_fence_manager *fman, 274 + static void vmw_fences_perform_actions(struct vmw_fence_manager *fman, 275 275 struct list_head *list) 276 276 { 277 277 struct vmw_fence_action *action, *next_action; ··· 897 897 * Note that the action callbacks may be executed before this function 898 898 * returns. 899 899 */ 900 - void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, 900 + static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, 901 901 struct vmw_fence_action *action) 902 902 { 903 903 struct vmw_fence_manager *fman = fence->fman; ··· 993 993 struct drm_vmw_event_fence event; 994 994 }; 995 995 996 - int vmw_event_fence_action_create(struct drm_file *file_priv, 996 + static int vmw_event_fence_action_create(struct drm_file *file_priv, 997 997 struct vmw_fence_obj *fence, 998 998 uint32_t flags, 999 999 uint64_t user_data, ··· 1080 1080 */ 1081 1081 if (arg->handle) { 1082 1082 struct ttm_base_object *base = 1083 - ttm_base_object_lookup(vmw_fp->tfile, arg->handle); 1083 + ttm_base_object_lookup_for_ref(dev_priv->tdev, 1084 + arg->handle); 1084 1085 1085 1086 if (unlikely(base == NULL)) { 1086 1087 DRM_ERROR("Fence event invalid fence object handle "
+6 -6
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 40 40 * Clip @num_rects number of @rects against @clip storing the 41 41 * results in @out_rects and the number of passed rects in @out_num. 42 42 */ 43 - void vmw_clip_cliprects(struct drm_clip_rect *rects, 43 + static void vmw_clip_cliprects(struct drm_clip_rect *rects, 44 44 int num_rects, 45 45 struct vmw_clip_rect clip, 46 46 SVGASignedRect *out_rects, ··· 423 423 struct drm_master *master; 424 424 }; 425 425 426 - void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) 426 + static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) 427 427 { 428 428 struct vmw_framebuffer_surface *vfbs = 429 429 vmw_framebuffer_to_vfbs(framebuffer); ··· 589 589 return ret; 590 590 } 591 591 592 - int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, 592 + static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, 593 593 struct drm_file *file_priv, 594 594 unsigned flags, unsigned color, 595 595 struct drm_clip_rect *clips, ··· 761 761 struct vmw_dma_buffer *buffer; 762 762 }; 763 763 764 - void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) 764 + static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) 765 765 { 766 766 struct vmw_framebuffer_dmabuf *vfbd = 767 767 vmw_framebuffer_to_vfbd(framebuffer); ··· 947 947 return ret; 948 948 } 949 949 950 - int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, 950 + static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, 951 951 struct drm_file *file_priv, 952 952 unsigned flags, unsigned color, 953 953 struct drm_clip_rect *clips, ··· 1677 1677 * Small shared kms functions. 1678 1678 */ 1679 1679 1680 - int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, 1680 + static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, 1681 1681 struct drm_vmw_rect *rects) 1682 1682 { 1683 1683 struct drm_device *dev = dev_priv->dev;
+2 -1
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 843 843 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 844 844 struct drm_file *file_priv) 845 845 { 846 + struct vmw_private *dev_priv = vmw_priv(dev); 846 847 union drm_vmw_surface_reference_arg *arg = 847 848 (union drm_vmw_surface_reference_arg *)data; 848 849 struct drm_vmw_surface_arg *req = &arg->req; ··· 855 854 struct ttm_base_object *base; 856 855 int ret = -EINVAL; 857 856 858 - base = ttm_base_object_lookup(tfile, req->sid); 857 + base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); 859 858 if (unlikely(base == NULL)) { 860 859 DRM_ERROR("Could not find surface to reference.\n"); 861 860 return -EINVAL;
+16 -2
include/drm/ttm/ttm_object.h
··· 190 190 * @key: Hash key 191 191 * 192 192 * Looks up a struct ttm_base_object with the key @key. 193 - * Also verifies that the object is visible to the application, by 194 - * comparing the @tfile argument and checking the object shareable flag. 195 193 */ 196 194 197 195 extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file 198 196 *tfile, uint32_t key); 197 + 198 + /** 199 + * ttm_base_object_lookup_for_ref 200 + * 201 + * @tdev: Pointer to a struct ttm_object_device. 202 + * @key: Hash key 203 + * 204 + * Looks up a struct ttm_base_object with the key @key. 205 + * This function should only be used when the struct tfile associated with the 206 + * caller doesn't yet have a reference to the base object. 207 + */ 208 + 209 + extern struct ttm_base_object * 210 + ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key); 199 211 200 212 /** 201 213 * ttm_base_object_unref ··· 229 217 * @ref_type: The type of reference. 230 218 * @existed: Upon completion, indicates that an identical reference object 231 219 * already existed, and the refcount was upped on that object instead. 220 + * 221 + * Checks that the base object is shareable and adds a ref object to it. 232 222 * 233 223 * Adding a ref object to a base object is basically like referencing the 234 224 * base object, but a user-space application holds the reference. When the