Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'vmwgfx-next-15-08-21' of git://people.freedesktop.org/~thomash/linux into drm-next

Pull request of 15-08-21

The third pull request for 4.3. Contains two fixes for regressions introduced
with previous pull requests.

* tag 'vmwgfx-next-15-08-21' of git://people.freedesktop.org/~thomash/linux:
drm/vmwgfx: Remove duplicate ttm_bo_device_release
drm/vmwgfx: Fix a circular locking dependency in the fbdev code

+17 -13
-1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 965 965 ttm_object_device_release(&dev_priv->tdev); 966 966 iounmap(dev_priv->mmio_virt); 967 967 arch_phys_wc_del(dev_priv->mmio_mtrr); 968 - (void)ttm_bo_device_release(&dev_priv->bdev); 969 968 if (dev_priv->ctx.staged_bindings) 970 969 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 971 970 vmw_ttm_global_release(dev_priv);
+17 -12
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
··· 68 68 69 69 struct drm_crtc *crtc; 70 70 struct drm_connector *con; 71 - 72 - bool local_mode; 71 + struct delayed_work local_work; 73 72 }; 74 73 75 74 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, ··· 166 167 * Dirty code 167 168 */ 168 169 169 - static void vmw_fb_dirty_flush(struct vmw_fb_par *par) 170 + static void vmw_fb_dirty_flush(struct work_struct *work) 170 171 { 172 + struct vmw_fb_par *par = container_of(work, struct vmw_fb_par, 173 + local_work.work); 171 174 struct vmw_private *vmw_priv = par->vmw_priv; 172 175 struct fb_info *info = vmw_priv->fb_info; 173 176 unsigned long irq_flags; ··· 249 248 unsigned x1, unsigned y1, 250 249 unsigned width, unsigned height) 251 250 { 252 - struct fb_info *info = par->vmw_priv->fb_info; 253 251 unsigned long flags; 254 252 unsigned x2 = x1 + width; 255 253 unsigned y2 = y1 + height; ··· 262 262 /* if we are active start the dirty work 263 263 * we share the work with the defio system */ 264 264 if (par->dirty.active) 265 - schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY); 265 + schedule_delayed_work(&par->local_work, 266 + VMW_DIRTY_DELAY); 266 267 } else { 267 268 if (x1 < par->dirty.x1) 268 269 par->dirty.x1 = x1; ··· 327 326 par->dirty.x2 = info->var.xres; 328 327 par->dirty.y2 = y2; 329 328 spin_unlock_irqrestore(&par->dirty.lock, flags); 330 - } 331 329 332 - vmw_fb_dirty_flush(par); 330 + /* 331 + * Since we've already waited on this work once, try to 332 + * execute asap. 333 + */ 334 + cancel_delayed_work(&par->local_work); 335 + schedule_delayed_work(&par->local_work, 0); 336 + } 333 337 }; 334 338 335 339 static struct fb_deferred_io vmw_defio = { ··· 607 601 /* If there already was stuff dirty we wont 608 602 * schedule a new work, so lets do it now */ 609 603 610 - #if (defined(VMWGFX_STANDALONE) && defined(VMWGFX_FB_DEFERRED)) 611 - schedule_delayed_work(&par->def_par.deferred_work, 0); 612 - #else 613 - schedule_delayed_work(&info->deferred_work, 0); 614 - #endif 604 + schedule_delayed_work(&par->local_work, 0); 615 605 616 606 out_unlock: 617 607 if (old_mode) ··· 664 662 vmw_priv->fb_info = info; 665 663 par = info->par; 666 664 memset(par, 0, sizeof(*par)); 665 + INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush); 667 666 par->vmw_priv = vmw_priv; 668 667 par->vmalloc = NULL; 669 668 par->max_width = fb_width; ··· 787 784 788 785 /* ??? order */ 789 786 fb_deferred_io_cleanup(info); 787 + cancel_delayed_work_sync(&par->local_work); 790 788 unregister_framebuffer(info); 791 789 792 790 (void) vmw_fb_kms_detach(par, true, true); ··· 815 811 spin_unlock_irqrestore(&par->dirty.lock, flags); 816 812 817 813 flush_delayed_work(&info->deferred_work); 814 + flush_delayed_work(&par->local_work); 818 815 819 816 mutex_lock(&par->bo_mutex); 820 817 (void) vmw_fb_kms_detach(par, true, false);