Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm: convert to idr_alloc()

Convert to the much saner new idr interface.

* drm_ctxbitmap_next() error handling in drm_addctx() seems broken.
drm_ctxbitmap_next() return -errno on failure not -1.

[artem.savkov@gmail.com: missing idr_preload_end in drm_gem_flink_ioctl]
[jslaby@suse.cz: fix drm_gem_flink_ioctl() return value]
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: David Airlie <airlied@linux.ie>
Signed-off-by: Artem Savkov <artem.savkov@gmail.com>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Tejun Heo and committed by
Linus Torvalds
2e928815 62f516b8

+25 -68
+3 -14
drivers/gpu/drm/drm_context.c
··· 74 74 */ 75 75 static int drm_ctxbitmap_next(struct drm_device * dev) 76 76 { 77 - int new_id; 78 77 int ret; 79 78 80 - again: 81 - if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) { 82 - DRM_ERROR("Out of memory expanding drawable idr\n"); 83 - return -ENOMEM; 84 - } 85 79 mutex_lock(&dev->struct_mutex); 86 - ret = idr_get_new_above(&dev->ctx_idr, NULL, 87 - DRM_RESERVED_CONTEXTS, &new_id); 80 + ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0, 81 + GFP_KERNEL); 88 82 mutex_unlock(&dev->struct_mutex); 89 - if (ret == -EAGAIN) 90 - goto again; 91 - else if (ret) 92 - return ret; 93 - 94 - return new_id; 83 + return ret; 95 84 } 96 85 97 86 /**
+4 -15
drivers/gpu/drm/drm_crtc.c
··· 266 266 static int drm_mode_object_get(struct drm_device *dev, 267 267 struct drm_mode_object *obj, uint32_t obj_type) 268 268 { 269 - int new_id = 0; 270 269 int ret; 271 270 272 - again: 273 - if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) { 274 - DRM_ERROR("Ran out memory getting a mode number\n"); 275 - return -ENOMEM; 276 - } 277 - 278 271 mutex_lock(&dev->mode_config.idr_mutex); 279 - ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); 280 - 281 - if (!ret) { 272 + ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL); 273 + if (ret >= 0) { 282 274 /* 283 275 * Set up the object linking under the protection of the idr 284 276 * lock so that other users can't see inconsistent state. 285 277 */ 286 - obj->id = new_id; 278 + obj->id = ret; 287 279 obj->type = obj_type; 288 280 } 289 281 mutex_unlock(&dev->mode_config.idr_mutex); 290 282 291 - if (ret == -EAGAIN) 292 - goto again; 293 - 294 - return ret; 283 + return ret < 0 ? ret : 0; 295 284 } 296 285 297 286 /**
+16 -22
drivers/gpu/drm/drm_gem.c
··· 270 270 int ret; 271 271 272 272 /* 273 - * Get the user-visible handle using idr. 273 + * Get the user-visible handle using idr. Preload and perform 274 + * allocation under our spinlock. 274 275 */ 275 - again: 276 - /* ensure there is space available to allocate a handle */ 277 - if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) 278 - return -ENOMEM; 279 - 280 - /* do the allocation under our spinlock */ 276 + idr_preload(GFP_KERNEL); 281 277 spin_lock(&file_priv->table_lock); 282 - ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep); 278 + 279 + ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 280 + 283 281 spin_unlock(&file_priv->table_lock); 284 - if (ret == -EAGAIN) 285 - goto again; 286 - else if (ret) 282 + idr_preload_end(); 283 + if (ret < 0) 287 284 return ret; 285 + *handlep = ret; 288 286 289 287 drm_gem_object_handle_reference(obj); 290 288 ··· 449 451 if (obj == NULL) 450 452 return -ENOENT; 451 453 452 - again: 453 - if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { 454 - ret = -ENOMEM; 455 - goto err; 456 - } 457 - 454 + idr_preload(GFP_KERNEL); 458 455 spin_lock(&dev->object_name_lock); 459 456 if (!obj->name) { 460 - ret = idr_get_new_above(&dev->object_name_idr, obj, 1, 461 - &obj->name); 457 + ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); 458 + obj->name = ret; 462 459 args->name = (uint64_t) obj->name; 463 460 spin_unlock(&dev->object_name_lock); 461 + idr_preload_end(); 464 462 465 - if (ret == -EAGAIN) 466 - goto again; 467 - else if (ret) 463 + if (ret < 0) 468 464 goto err; 465 + ret = 0; 469 466 470 467 /* Allocate a reference for the name table. */ 471 468 drm_gem_object_reference(obj); 472 469 } else { 473 470 args->name = (uint64_t) obj->name; 474 471 spin_unlock(&dev->object_name_lock); 472 + idr_preload_end(); 475 473 ret = 0; 476 474 } 477 475
+2 -17
drivers/gpu/drm/drm_stub.c
··· 109 109 110 110 static int drm_minor_get_id(struct drm_device *dev, int type) 111 111 { 112 - int new_id; 113 112 int ret; 114 113 int base = 0, limit = 63; 115 114 ··· 120 121 limit = base + 255; 121 122 } 122 123 123 - again: 124 - if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) { 125 - DRM_ERROR("Out of memory expanding drawable idr\n"); 126 - return -ENOMEM; 127 - } 128 124 mutex_lock(&dev->struct_mutex); 129 - ret = idr_get_new_above(&drm_minors_idr, NULL, 130 - base, &new_id); 125 + ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL); 131 126 mutex_unlock(&dev->struct_mutex); 132 - if (ret == -EAGAIN) 133 - goto again; 134 - else if (ret) 135 - return ret; 136 127 137 - if (new_id >= limit) { 138 - idr_remove(&drm_minors_idr, new_id); 139 - return -EINVAL; 140 - } 141 - return new_id; 128 + return ret == -ENOSPC ? -EINVAL : ret; 142 129 } 143 130 144 131 struct drm_master *drm_master_create(struct drm_minor *minor)