Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm: Protect dev->filelist with its own mutex

amdgpu gained dev->struct_mutex usage, and that's because it's walking
the dev->filelist list. Protect that list with it's own lock to take
one more step towards getting rid of struct_mutex usage in drivers
once and for all.

While doing the conversion I noticed that 2 debugfs files in i915
completely lacked appropriate locking. Fix that up too.

v2: don't forget to switch to drm_gem_object_unreference_unlocked.

Cc: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1461691808-12414-9-git-send-email-daniel.vetter@ffwll.ch

+25 -12
+5 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 93 93 struct drm_device *ddev = adev->ddev; 94 94 struct drm_file *file; 95 95 96 - mutex_lock(&ddev->struct_mutex); 96 + mutex_lock(&ddev->filelist_mutex); 97 97 98 98 list_for_each_entry(file, &ddev->filelist, lhead) { 99 99 struct drm_gem_object *gobj; ··· 103 103 spin_lock(&file->table_lock); 104 104 idr_for_each_entry(&file->object_idr, gobj, handle) { 105 105 WARN_ONCE(1, "And also active allocations!\n"); 106 - drm_gem_object_unreference(gobj); 106 + drm_gem_object_unreference_unlocked(gobj); 107 107 } 108 108 idr_destroy(&file->object_idr); 109 109 spin_unlock(&file->table_lock); 110 110 } 111 111 112 - mutex_unlock(&ddev->struct_mutex); 112 + mutex_unlock(&ddev->filelist_mutex); 113 113 } 114 114 115 115 /* ··· 769 769 struct drm_file *file; 770 770 int r; 771 771 772 - r = mutex_lock_interruptible(&dev->struct_mutex); 772 + r = mutex_lock_interruptible(&dev->filelist_mutex); 773 773 if (r) 774 774 return r; 775 775 ··· 793 793 spin_unlock(&file->table_lock); 794 794 } 795 795 796 - mutex_unlock(&dev->struct_mutex); 796 + mutex_unlock(&dev->filelist_mutex); 797 797 return 0; 798 798 } 799 799
+1
drivers/gpu/drm/drm_drv.c
··· 590 590 spin_lock_init(&dev->buf_lock); 591 591 spin_lock_init(&dev->event_lock); 592 592 mutex_init(&dev->struct_mutex); 593 + mutex_init(&dev->filelist_mutex); 593 594 mutex_init(&dev->ctxlist_mutex); 594 595 mutex_init(&dev->master_mutex); 595 596
+6 -3
drivers/gpu/drm/drm_fops.c
··· 297 297 } 298 298 mutex_unlock(&dev->master_mutex); 299 299 300 - mutex_lock(&dev->struct_mutex); 300 + mutex_lock(&dev->filelist_mutex); 301 301 list_add(&priv->lhead, &dev->filelist); 302 - mutex_unlock(&dev->struct_mutex); 302 + mutex_unlock(&dev->filelist_mutex); 303 303 304 304 #ifdef __alpha__ 305 305 /* ··· 447 447 448 448 DRM_DEBUG("open_count = %d\n", dev->open_count); 449 449 450 - mutex_lock(&dev->struct_mutex); 450 + mutex_lock(&dev->filelist_mutex); 451 451 list_del(&file_priv->lhead); 452 + mutex_unlock(&dev->filelist_mutex); 453 + 454 + mutex_lock(&dev->struct_mutex); 452 455 if (file_priv->magic) 453 456 idr_remove(&file_priv->master->magic_map, file_priv->magic); 454 457 mutex_unlock(&dev->struct_mutex);
+2 -2
drivers/gpu/drm/drm_info.c
··· 174 174 /* dev->filelist is sorted youngest first, but we want to present 175 175 * oldest first (i.e. kernel, servers, clients), so walk backwardss. 176 176 */ 177 - mutex_lock(&dev->struct_mutex); 177 + mutex_lock(&dev->filelist_mutex); 178 178 list_for_each_entry_reverse(priv, &dev->filelist, lhead) { 179 179 struct task_struct *task; 180 180 ··· 190 190 priv->magic); 191 191 rcu_read_unlock(); 192 192 } 193 - mutex_unlock(&dev->struct_mutex); 193 + mutex_unlock(&dev->filelist_mutex); 194 194 return 0; 195 195 } 196 196
+10 -2
drivers/gpu/drm/i915/i915_debugfs.c
··· 498 498 499 499 seq_putc(m, '\n'); 500 500 print_batch_pool_stats(m, dev_priv); 501 + 502 + mutex_unlock(&dev->struct_mutex); 503 + 504 + mutex_lock(&dev->filelist_mutex); 501 505 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 502 506 struct file_stats stats; 503 507 struct task_struct *task; ··· 522 518 print_file_stats(m, task ? task->comm : "<unknown>", stats); 523 519 rcu_read_unlock(); 524 520 } 525 - 526 - mutex_unlock(&dev->struct_mutex); 521 + mutex_unlock(&dev->filelist_mutex); 527 522 528 523 return 0; 529 524 } ··· 2328 2325 else if (INTEL_INFO(dev)->gen >= 6) 2329 2326 gen6_ppgtt_info(m, dev); 2330 2327 2328 + mutex_lock(&dev->filelist_mutex); 2331 2329 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2332 2330 struct drm_i915_file_private *file_priv = file->driver_priv; 2333 2331 struct task_struct *task; ··· 2343 2339 idr_for_each(&file_priv->context_idr, per_file_ctx, 2344 2340 (void *)(unsigned long)m); 2345 2341 } 2342 + mutex_unlock(&dev->filelist_mutex); 2346 2343 2347 2344 out_put: 2348 2345 intel_runtime_pm_put(dev_priv); ··· 2379 2374 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit), 2380 2375 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit), 2381 2376 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 2377 + 2378 + mutex_lock(&dev->filelist_mutex); 2382 2379 spin_lock(&dev_priv->rps.client_lock); 2383 2380 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2384 2381 struct drm_i915_file_private *file_priv = file->driver_priv; ··· 2403 2396 list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active"); 2404 2397 seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts); 2405 2398 spin_unlock(&dev_priv->rps.client_lock); 2399 + mutex_unlock(&dev->filelist_mutex); 2406 2400 2407 2401 return 0; 2408 2402 }
+1
include/drm/drmP.h
··· 769 769 atomic_t buf_alloc; /**< Buffer allocation in progress */ 770 770 /*@} */ 771 771 772 + struct mutex filelist_mutex; 772 773 struct list_head filelist; 773 774 774 775 /** \name Memory management */