drm/i915: Use our own workqueue to avoid wedging the system along with the GPU.

Signed-off-by: Eric Anholt <eric@anholt.net>

+19 -6
+13 -2
drivers/gpu/drm/i915/i915_dma.c
··· 1186 1186 if (ret) 1187 1187 goto out_iomapfree; 1188 1188 1189 + dev_priv->wq = create_workqueue("i915"); 1190 + if (dev_priv->wq == NULL) { 1191 + DRM_ERROR("Failed to create our workqueue.\n"); 1192 + ret = -ENOMEM; 1193 + goto out_iomapfree; 1194 + } 1195 + 1189 1196 /* enable GEM by default */ 1190 1197 dev_priv->has_gem = 1; 1191 1198 ··· 1218 1211 if (!I915_NEED_GFX_HWS(dev)) { 1219 1212 ret = i915_init_phys_hws(dev); 1220 1213 if (ret != 0) 1221 - goto out_iomapfree; 1214 + goto out_workqueue_free; 1222 1215 } 1223 1216 1224 1217 i915_get_mem_freq(dev); ··· 1252 1245 ret = i915_load_modeset_init(dev, prealloc_size, agp_size); 1253 1246 if (ret < 0) { 1254 1247 DRM_ERROR("failed to init modeset\n"); 1255 - goto out_rmmap; 1248 + goto out_workqueue_free; 1256 1249 } 1257 1250 } 1258 1251 ··· 1263 1256 1264 1257 return 0; 1265 1258 1259 + out_workqueue_free: 1260 + destroy_workqueue(dev_priv->wq); 1266 1261 out_iomapfree: 1267 1262 io_mapping_free(dev_priv->mm.gtt_mapping); 1268 1263 out_rmmap: ··· 1277 1268 int i915_driver_unload(struct drm_device *dev) 1278 1269 { 1279 1270 struct drm_i915_private *dev_priv = dev->dev_private; 1271 + 1272 + destroy_workqueue(dev_priv->wq); 1280 1273 1281 1274 io_mapping_free(dev_priv->mm.gtt_mapping); 1282 1275 if (dev_priv->mm.gtt_mtrr >= 0) {
+1
drivers/gpu/drm/i915/i915_drv.h
··· 231 231 spinlock_t error_lock; 232 232 struct drm_i915_error_state *first_error; 233 233 struct work_struct error_work; 234 + struct workqueue_struct *wq; 234 235 235 236 /* Register state */ 236 237 u8 saveLBB;
+2 -2
drivers/gpu/drm/i915/i915_gem.c
··· 1570 1570 } 1571 1571 1572 1572 if (was_empty && !dev_priv->mm.suspended) 1573 - schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 1573 + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1574 1574 return seqno; 1575 1575 } 1576 1576 ··· 1719 1719 i915_gem_retire_requests(dev); 1720 1720 if (!dev_priv->mm.suspended && 1721 1721 !list_empty(&dev_priv->mm.request_list)) 1722 - schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 1722 + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1723 1723 mutex_unlock(&dev->struct_mutex); 1724 1724 } 1725 1725
+3 -2
drivers/gpu/drm/i915/i915_irq.c
··· 482 482 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 483 483 } 484 484 485 - schedule_work(&dev_priv->error_work); 485 + queue_work(dev_priv->wq, &dev_priv->error_work); 486 486 } 487 487 488 488 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) ··· 560 560 DRM_DEBUG("hotplug event received, stat 0x%08x\n", 561 561 hotplug_status); 562 562 if (hotplug_status & dev_priv->hotplug_supported_mask) 563 - schedule_work(&dev_priv->hotplug_work); 563 + queue_work(dev_priv->wq, 564 + &dev_priv->hotplug_work); 564 565 565 566 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 566 567 I915_READ(PORT_HOTPLUG_STAT);