Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'msm-fixes-4.9' of git://people.freedesktop.org/~robclark/linux into drm-fixes

Fixes for some msm issues

* 'msm-fixes-4.9' of git://people.freedesktop.org/~robclark/linux:
drm/msm: Fix error handling crashes seen when VRAM allocation fails
drm/msm/mdp5: 8x16 actually has 8 mixer stages
drm/msm/mdp5: no scaling support on RGBn pipes for 8x16
drm/msm/mdp5: handle non-fullscreen base plane case
drm/msm: Set CLK_IGNORE_UNUSED flag for PLL clocks
drm/msm/dsi: Queue HPD helper work in attach/detach callbacks

+55 -31
+12 -2
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 139 139 140 140 u32 err_work_state; 141 141 struct work_struct err_work; 142 + struct work_struct hpd_work; 142 143 struct workqueue_struct *workqueue; 143 144 144 145 /* DSI 6G TX buffer*/ ··· 1295 1294 wmb(); /* make sure dsi controller enabled again */ 1296 1295 } 1297 1296 1297 + static void dsi_hpd_worker(struct work_struct *work) 1298 + { 1299 + struct msm_dsi_host *msm_host = 1300 + container_of(work, struct msm_dsi_host, hpd_work); 1301 + 1302 + drm_helper_hpd_irq_event(msm_host->dev); 1303 + } 1304 + 1298 1305 static void dsi_err_worker(struct work_struct *work) 1299 1306 { 1300 1307 struct msm_dsi_host *msm_host = ··· 1489 1480 1490 1481 DBG("id=%d", msm_host->id); 1491 1482 if (msm_host->dev) 1492 - drm_helper_hpd_irq_event(msm_host->dev); 1483 + queue_work(msm_host->workqueue, &msm_host->hpd_work); 1493 1484 1494 1485 return 0; 1495 1486 } ··· 1503 1494 1504 1495 DBG("id=%d", msm_host->id); 1505 1496 if (msm_host->dev) 1506 - drm_helper_hpd_irq_event(msm_host->dev); 1497 + queue_work(msm_host->workqueue, &msm_host->hpd_work); 1507 1498 1508 1499 return 0; 1509 1500 } ··· 1757 1748 /* setup workqueue */ 1758 1749 msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); 1759 1750 INIT_WORK(&msm_host->err_work, dsi_err_worker); 1751 + INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker); 1760 1752 1761 1753 msm_dsi->host = &msm_host->base; 1762 1754 msm_dsi->id = msm_host->id;
+1
drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
··· 521 521 .parent_names = (const char *[]){ "xo" }, 522 522 .num_parents = 1, 523 523 .name = vco_name, 524 + .flags = CLK_IGNORE_UNUSED, 524 525 .ops = &clk_ops_dsi_pll_28nm_vco, 525 526 }; 526 527 struct device *dev = &pll_28nm->pdev->dev;
+1
drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
··· 412 412 struct clk_init_data vco_init = { 413 413 .parent_names = (const char *[]){ "pxo" }, 414 414 .num_parents = 1, 415 + .flags = CLK_IGNORE_UNUSED, 415 416 .ops = &clk_ops_dsi_pll_28nm_vco, 416 417 }; 417 418 struct device *dev = &pll_28nm->pdev->dev;
+1
drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
··· 702 702 .ops = &hdmi_8996_pll_ops, 703 703 .parent_names = hdmi_pll_parents, 704 704 .num_parents = ARRAY_SIZE(hdmi_pll_parents), 705 + .flags = CLK_IGNORE_UNUSED, 705 706 }; 706 707 707 708 int msm_hdmi_pll_8996_init(struct platform_device *pdev)
+1
drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
··· 424 424 .ops = &hdmi_pll_ops, 425 425 .parent_names = hdmi_pll_parents, 426 426 .num_parents = ARRAY_SIZE(hdmi_pll_parents), 427 + .flags = CLK_IGNORE_UNUSED, 427 428 }; 428 429 429 430 int msm_hdmi_pll_8960_init(struct platform_device *pdev)
+2 -2
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
··· 272 272 .count = 2, 273 273 .base = { 0x14000, 0x16000 }, 274 274 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 275 - MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, 275 + MDP_PIPE_CAP_DECIMATION, 276 276 }, 277 277 .pipe_dma = { 278 278 .count = 1, ··· 282 282 .lm = { 283 283 .count = 2, /* LM0 and LM3 */ 284 284 .base = { 0x44000, 0x47000 }, 285 - .nb_stages = 5, 285 + .nb_stages = 8, 286 286 .max_width = 2048, 287 287 .max_height = 0xFFFF, 288 288 },
+28 -18
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
··· 223 223 plane_cnt++; 224 224 } 225 225 226 - /* 227 - * If there is no base layer, enable border color. 228 - * Although it's not possbile in current blend logic, 229 - * put it here as a reminder. 230 - */ 231 - if (!pstates[STAGE_BASE] && plane_cnt) { 226 + if (!pstates[STAGE_BASE]) { 232 227 ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; 233 228 DBG("Border Color is enabled"); 234 229 } ··· 360 365 return pa->state->zpos - pb->state->zpos; 361 366 } 362 367 368 + /* is there a helper for this? */ 369 + static bool is_fullscreen(struct drm_crtc_state *cstate, 370 + struct drm_plane_state *pstate) 371 + { 372 + return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) && 373 + ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) && 374 + ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay); 375 + } 376 + 363 377 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, 364 378 struct drm_crtc_state *state) 365 379 { ··· 379 375 struct plane_state pstates[STAGE_MAX + 1]; 380 376 const struct mdp5_cfg_hw *hw_cfg; 381 377 const struct drm_plane_state *pstate; 382 - int cnt = 0, i; 378 + int cnt = 0, base = 0, i; 383 379 384 380 DBG("%s: check", mdp5_crtc->name); 385 381 386 - /* verify that there are not too many planes attached to crtc 387 - * and that we don't have conflicting mixer stages: 388 - */ 389 - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 390 382 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { 391 - if (cnt >= (hw_cfg->lm.nb_stages)) { 392 - dev_err(dev->dev, "too many planes!\n"); 393 - return -EINVAL; 394 - } 395 - 396 - 397 383 pstates[cnt].plane = plane; 398 384 pstates[cnt].state = to_mdp5_plane_state(pstate); 399 385 ··· 393 399 /* assign a stage based on sorted zpos property */ 394 400 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); 395 401 402 + /* if the bottom-most layer is not fullscreen, we need to use 403 + * it for solid-color: 404 + */ 405 + if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base)) 406 + base++; 407 + 408 + /* verify that there are not too many planes attached to crtc 409 + * and that we don't have conflicting mixer stages: 410 + */ 411 + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 412 + 413 + if ((cnt + base) >= hw_cfg->lm.nb_stages) { 414 + dev_err(dev->dev, "too many planes!\n"); 415 + return -EINVAL; 416 + } 417 + 396 418 for (i = 0; i < cnt; i++) { 397 - pstates[i].state->stage = STAGE_BASE + i; 419 + pstates[i].state->stage = STAGE_BASE + i + base; 398 420 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name, 399 421 pipe2name(mdp5_plane_pipe(pstates[i].plane)), 400 422 pstates[i].state->stage);
+3 -6
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
··· 292 292 format = to_mdp_format(msm_framebuffer_format(state->fb)); 293 293 if (MDP_FORMAT_IS_YUV(format) && 294 294 !pipe_supports_yuv(mdp5_plane->caps)) { 295 - dev_err(plane->dev->dev, 296 - "Pipe doesn't support YUV\n"); 295 + DBG("Pipe doesn't support YUV\n"); 297 296 298 297 return -EINVAL; 299 298 } ··· 300 301 if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) && 301 302 (((state->src_w >> 16) != state->crtc_w) || 302 303 ((state->src_h >> 16) != state->crtc_h))) { 303 - dev_err(plane->dev->dev, 304 - "Pipe doesn't support scaling (%dx%d -> %dx%d)\n", 304 + DBG("Pipe doesn't support scaling (%dx%d -> %dx%d)\n", 305 305 state->src_w >> 16, state->src_h >> 16, 306 306 state->crtc_w, state->crtc_h); 307 307 ··· 311 313 vflip = !!(state->rotation & DRM_REFLECT_Y); 312 314 if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || 313 315 (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { 314 - dev_err(plane->dev->dev, 315 - "Pipe doesn't support flip\n"); 316 + DBG("Pipe doesn't support flip\n"); 316 317 317 318 return -EINVAL; 318 319 }
+1 -1
drivers/gpu/drm/msm/msm_drv.c
··· 228 228 flush_workqueue(priv->atomic_wq); 229 229 destroy_workqueue(priv->atomic_wq); 230 230 231 - if (kms) 231 + if (kms && kms->funcs) 232 232 kms->funcs->destroy(kms); 233 233 234 234 if (gpu) {
+5 -2
drivers/gpu/drm/msm/msm_gem_shrinker.c
··· 163 163 void msm_gem_shrinker_cleanup(struct drm_device *dev) 164 164 { 165 165 struct msm_drm_private *priv = dev->dev_private; 166 - WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); 167 - unregister_shrinker(&priv->shrinker); 166 + 167 + if (priv->shrinker.nr_deferred) { 168 + WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); 169 + unregister_shrinker(&priv->shrinker); 170 + } 168 171 }