Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-msm-next-2017-08-22' of git://people.freedesktop.org/~robclark/linux into drm-next

Updates for 4.14.. I have some further patches from Jordan to add
multiple priority levels and pre-emption, but those will probably be
for 4.15 to give me time for the mesa parts.

* tag 'drm-msm-next-2017-08-22' of git://people.freedesktop.org/~robclark/linux:
drm/msm/mdp5: mark runtime_pm functions as __maybe_unused
drm/msm: remove unused variable
drm/msm/mdp5: make helper function static
drm/msm: make msm_framebuffer_init() static
drm/msm: add helper to allocate stolen fb
drm/msm: don't track fbdev's gem object separately
drm/msm: add modeset module param
drm/msm/mdp5: add tracking for clk enable-count
drm/msm: remove unused define
drm/msm: Add a helper function for in-kernel buffer allocations
drm/msm: Attach the GPU MMU when it is created
drm/msm: Add A5XX hardware fault detection
drm/msm: Remove uneeded platform dev members
drm/msm/mdp5: Set up runtime PM for MDSS
drm/msm/mdp5: Write to SMP registers even if allocations don't change
drm/msm/mdp5: Don't use mode_set helper funcs for encoders and CRTCs
drm/msm/dsi: Implement RPM suspend/resume callbacks
drm/msm/dsi: Set up runtime PM for DSI
drm/msm/hdmi: Set up runtime PM for HDMI
drm/msm/mdp5: Use runtime PM get/put API instead of toggling clocks

+573 -272
-2
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
··· 486 486 adreno_gpu = &a3xx_gpu->base; 487 487 gpu = &adreno_gpu->base; 488 488 489 - a3xx_gpu->pdev = pdev; 490 - 491 489 gpu->perfcntrs = perfcntrs; 492 490 gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs); 493 491
-1
drivers/gpu/drm/msm/adreno/a3xx_gpu.h
··· 28 28 29 29 struct a3xx_gpu { 30 30 struct adreno_gpu base; 31 - struct platform_device *pdev; 32 31 33 32 /* if OCMEM is used for GMEM: */ 34 33 uint32_t ocmem_base;
-2
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
··· 568 568 adreno_gpu = &a4xx_gpu->base; 569 569 gpu = &adreno_gpu->base; 570 570 571 - a4xx_gpu->pdev = pdev; 572 - 573 571 gpu->perfcntrs = NULL; 574 572 gpu->num_perfcntrs = 0; 575 573
-1
drivers/gpu/drm/msm/adreno/a4xx_gpu.h
··· 23 23 24 24 struct a4xx_gpu { 25 25 struct adreno_gpu base; 26 - struct platform_device *pdev; 27 26 28 27 /* if OCMEM is used for GMEM: */ 29 28 uint32_t ocmem_base;
+30 -21
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
··· 284 284 static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, 285 285 const struct firmware *fw, u64 *iova) 286 286 { 287 - struct drm_device *drm = gpu->dev; 288 287 struct drm_gem_object *bo; 289 288 void *ptr; 290 289 291 - bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED); 292 - if (IS_ERR(bo)) 293 - return bo; 290 + ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4, 291 + MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); 294 292 295 - ptr = msm_gem_get_vaddr(bo); 296 - if (!ptr) { 297 - drm_gem_object_unreference(bo); 298 - return ERR_PTR(-ENOMEM); 299 - } 300 - 301 - if (iova) { 302 - int ret = msm_gem_get_iova(bo, gpu->aspace, iova); 303 - 304 - if (ret) { 305 - drm_gem_object_unreference(bo); 306 - return ERR_PTR(ret); 307 - } 308 - } 293 + if (IS_ERR(ptr)) 294 + return ERR_CAST(ptr); 309 295 310 296 memcpy(ptr, &fw->data[4], fw->size - 4); 311 297 ··· 358 372 { 359 373 static bool loaded; 360 374 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 361 - struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); 362 - struct platform_device *pdev = a5xx_gpu->pdev; 375 + struct platform_device *pdev = gpu->pdev; 363 376 int ret; 364 377 365 378 /* ··· 395 410 A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \ 396 411 A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \ 397 412 A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ 413 + A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \ 398 414 A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ 399 415 A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ 400 416 A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP) ··· 798 812 dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n"); 799 813 } 800 814 815 + static void a5xx_fault_detect_irq(struct msm_gpu *gpu) 816 + { 817 + struct drm_device *dev = gpu->dev; 818 + struct msm_drm_private *priv = dev->dev_private; 819 + 820 + dev_err(dev->dev, "gpu fault fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", 821 + gpu->funcs->last_fence(gpu), 822 + gpu_read(gpu, REG_A5XX_RBBM_STATUS), 823 + gpu_read(gpu, REG_A5XX_CP_RB_RPTR), 824 + gpu_read(gpu, REG_A5XX_CP_RB_WPTR), 825 + gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI), 826 + gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ), 827 + gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI), 828 + gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ)); 829 + 830 + /* Turn off the hangcheck timer to keep it from bothering us */ 831 + del_timer(&gpu->hangcheck_timer); 832 + 833 + queue_work(priv->wq, &gpu->recover_work); 834 + } 835 + 801 836 #define RBBM_ERROR_MASK \ 802 837 (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \ 803 838 A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \ ··· 844 837 845 838 if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR) 846 839 a5xx_cp_err_irq(gpu); 840 + 841 + if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT) 842 + a5xx_fault_detect_irq(gpu); 847 843 848 844 if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS) 849 845 a5xx_uche_err_irq(gpu); ··· 1025 1015 adreno_gpu = &a5xx_gpu->base; 1026 1016 gpu = &adreno_gpu->base; 1027 1017 1028 - a5xx_gpu->pdev = pdev; 1029 1018 adreno_gpu->registers = a5xx_registers; 1030 1019 adreno_gpu->reg_offsets = a5xx_register_offsets; 1031 1020
-1
drivers/gpu/drm/msm/adreno/a5xx_gpu.h
··· 23 23 24 24 struct a5xx_gpu { 25 25 struct adreno_gpu base; 26 - struct platform_device *pdev; 27 26 28 27 struct drm_gem_object *pm4_bo; 29 28 uint64_t pm4_iova;
+4 -10
drivers/gpu/drm/msm/adreno/a5xx_power.c
··· 294 294 */ 295 295 bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; 296 296 297 - a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED); 298 - if (IS_ERR(a5xx_gpu->gpmu_bo)) 299 - goto err; 300 - 301 - if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace, 302 - &a5xx_gpu->gpmu_iova)) 303 - goto err; 304 - 305 - ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo); 306 - if (!ptr) 297 + ptr = msm_gem_kernel_new_locked(drm, bosize, 298 + MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, 299 + &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova); 300 + if (IS_ERR(ptr)) 307 301 goto err; 308 302 309 303 while (cmds_size > 0) {
+11 -42
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 337 337 DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name); 338 338 } 339 339 340 - static const char *iommu_ports[] = { 341 - "gfx3d_user", "gfx3d_priv", 342 - "gfx3d1_user", "gfx3d1_priv", 343 - }; 344 - 345 340 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, 346 341 struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs) 347 342 { ··· 368 373 369 374 adreno_gpu_config.ringsz = RB_SIZE; 370 375 376 + pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD); 377 + pm_runtime_use_autosuspend(&pdev->dev); 378 + pm_runtime_enable(&pdev->dev); 379 + 371 380 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, 372 381 adreno_gpu->info->name, &adreno_gpu_config); 373 382 if (ret) 374 383 return ret; 375 - 376 - pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD); 377 - pm_runtime_use_autosuspend(&pdev->dev); 378 - pm_runtime_enable(&pdev->dev); 379 384 380 385 ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev); 381 386 if (ret) { ··· 391 396 return ret; 392 397 } 393 398 394 - if (gpu->aspace && gpu->aspace->mmu) { 395 - struct msm_mmu *mmu = gpu->aspace->mmu; 396 - ret = mmu->funcs->attach(mmu, iommu_ports, 397 - ARRAY_SIZE(iommu_ports)); 398 - if (ret) 399 - return ret; 400 - } 399 + adreno_gpu->memptrs = msm_gem_kernel_new(drm, 400 + sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace, 401 + &adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova); 401 402 402 - adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), 403 - MSM_BO_UNCACHED); 404 - if (IS_ERR(adreno_gpu->memptrs_bo)) { 405 - ret = PTR_ERR(adreno_gpu->memptrs_bo); 406 - adreno_gpu->memptrs_bo = NULL; 407 - dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); 408 - return ret; 409 - } 410 - 411 - adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo); 412 403 if (IS_ERR(adreno_gpu->memptrs)) { 413 - dev_err(drm->dev, "could not vmap memptrs\n"); 414 - return -ENOMEM; 404 + ret = PTR_ERR(adreno_gpu->memptrs); 405 + adreno_gpu->memptrs = NULL; 406 + dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); 415 407 } 416 408 417 - ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace, 418 - &adreno_gpu->memptrs_iova); 419 - if (ret) { 420 - dev_err(drm->dev, "could not map memptrs: %d\n", ret); 421 - return ret; 422 - } 423 - 424 - return 0; 409 + return ret; 425 410 } 426 411 427 412 void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) ··· 421 446 release_firmware(adreno_gpu->pfp); 422 447 423 448 msm_gpu_cleanup(gpu); 424 - 425 - if (gpu->aspace) { 426 - gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, 427 - iommu_ports, ARRAY_SIZE(iommu_ports)); 428 - msm_gem_address_space_put(gpu->aspace); 429 - } 430 449 }
+5
drivers/gpu/drm/msm/dsi/dsi.c
··· 161 161 {} 162 162 }; 163 163 164 + static const struct dev_pm_ops dsi_pm_ops = { 165 + SET_RUNTIME_PM_OPS(msm_dsi_runtime_suspend, msm_dsi_runtime_resume, NULL) 166 + }; 167 + 164 168 static struct platform_driver dsi_driver = { 165 169 .probe = dsi_dev_probe, 166 170 .remove = dsi_dev_remove, 167 171 .driver = { 168 172 .name = "msm_dsi", 169 173 .of_match_table = dt_match, 174 + .pm = &dsi_pm_ops, 170 175 }, 171 176 }; 172 177
+2
drivers/gpu/drm/msm/dsi/dsi.h
··· 179 179 int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, 180 180 struct drm_device *dev); 181 181 int msm_dsi_host_init(struct msm_dsi *msm_dsi); 182 + int msm_dsi_runtime_suspend(struct device *dev); 183 + int msm_dsi_runtime_resume(struct device *dev); 182 184 183 185 /* dsi phy */ 184 186 struct msm_dsi_phy;
+53 -41
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 135 135 struct completion video_comp; 136 136 struct mutex dev_mutex; 137 137 struct mutex cmd_mutex; 138 - struct mutex clk_mutex; 139 138 spinlock_t intr_lock; /* Protect interrupt ctrl register */ 140 139 141 140 u32 err_work_state; ··· 220 221 goto put_gdsc; 221 222 } 222 223 224 + pm_runtime_get_sync(dev); 225 + 223 226 ret = regulator_enable(gdsc_reg); 224 227 if (ret) { 225 228 pr_err("%s: unable to enable gdsc\n", __func__); ··· 248 247 clk_disable_unprepare(ahb_clk); 249 248 disable_gdsc: 250 249 regulator_disable(gdsc_reg); 250 + pm_runtime_put_autosuspend(dev); 251 251 put_clk: 252 252 clk_put(ahb_clk); 253 253 put_gdsc: ··· 457 455 clk_disable_unprepare(msm_host->bus_clks[i]); 458 456 } 459 457 458 + int msm_dsi_runtime_suspend(struct device *dev) 459 + { 460 + struct platform_device *pdev = to_platform_device(dev); 461 + struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); 462 + struct mipi_dsi_host *host = msm_dsi->host; 463 + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 464 + 465 + if (!msm_host->cfg_hnd) 466 + return 0; 467 + 468 + dsi_bus_clk_disable(msm_host); 469 + 470 + return 0; 471 + } 472 + 473 + int msm_dsi_runtime_resume(struct device *dev) 474 + { 475 + struct platform_device *pdev = to_platform_device(dev); 476 + struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); 477 + struct mipi_dsi_host *host = msm_dsi->host; 478 + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 479 + 480 + if (!msm_host->cfg_hnd) 481 + return 0; 482 + 483 + return dsi_bus_clk_enable(msm_host); 484 + } 485 + 460 486 static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) 461 487 { 462 488 int ret; ··· 624 594 clk_disable_unprepare(msm_host->esc_clk); 625 595 clk_disable_unprepare(msm_host->byte_clk); 626 596 } 627 - } 628 - 629 - static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable) 630 - { 631 - int ret = 0; 632 - 633 - mutex_lock(&msm_host->clk_mutex); 634 - if (enable) { 635 - ret = dsi_bus_clk_enable(msm_host); 636 - if (ret) { 637 - pr_err("%s: Can not enable bus clk, %d\n", 638 - __func__, ret); 639 - goto unlock_ret; 640 - } 641 - ret = dsi_link_clk_enable(msm_host); 642 - if (ret) { 643 - pr_err("%s: Can not enable link clk, %d\n", 644 - __func__, ret); 645 - dsi_bus_clk_disable(msm_host); 646 - goto unlock_ret; 647 - } 648 - } else { 649 - dsi_link_clk_disable(msm_host); 650 - dsi_bus_clk_disable(msm_host); 651 - } 652 - 653 - unlock_ret: 654 - mutex_unlock(&msm_host->clk_mutex); 655 - return ret; 656 597 } 657 598 658 599 static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host) ··· 1700 1699 } 1701 1700 1702 1701 msm_host->pdev = pdev; 1702 + msm_dsi->host = &msm_host->base; 1703 1703 1704 1704 ret = dsi_host_parse_dt(msm_host); 1705 1705 if (ret) { ··· 1714 1712 ret = PTR_ERR(msm_host->ctrl_base); 1715 1713 goto fail; 1716 1714 } 1715 + 1716 + pm_runtime_enable(&pdev->dev); 1717 1717 1718 1718 msm_host->cfg_hnd = dsi_get_config(msm_host); 1719 1719 if (!msm_host->cfg_hnd) { ··· 1757 1753 init_completion(&msm_host->video_comp); 1758 1754 mutex_init(&msm_host->dev_mutex); 1759 1755 mutex_init(&msm_host->cmd_mutex); 1760 - mutex_init(&msm_host->clk_mutex); 1761 1756 spin_lock_init(&msm_host->intr_lock); 1762 1757 1763 1758 /* setup workqueue */ ··· 1764 1761 INIT_WORK(&msm_host->err_work, dsi_err_worker); 1765 1762 INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker); 1766 1763 1767 - msm_dsi->host = &msm_host->base; 1768 1764 msm_dsi->id = msm_host->id; 1769 1765 1770 1766 DBG("Dsi Host %d initialized", msm_host->id); ··· 1785 1783 msm_host->workqueue = NULL; 1786 1784 } 1787 1785 1788 - mutex_destroy(&msm_host->clk_mutex); 1789 1786 mutex_destroy(&msm_host->cmd_mutex); 1790 1787 mutex_destroy(&msm_host->dev_mutex); 1788 + 1789 + pm_runtime_disable(&msm_host->pdev->dev); 1791 1790 } 1792 1791 1793 1792 int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, ··· 1884 1881 * mdss interrupt is generated in mdp core clock domain 1885 1882 * mdp clock need to be enabled to receive dsi interrupt 1886 1883 */ 1887 - dsi_clk_ctrl(msm_host, 1); 1884 + pm_runtime_get_sync(&msm_host->pdev->dev); 1885 + dsi_link_clk_enable(msm_host); 1888 1886 1889 1887 /* TODO: vote for bus bandwidth */ 1890 1888 ··· 1915 1911 1916 1912 /* TODO: unvote for bus bandwidth */ 1917 1913 1918 - dsi_clk_ctrl(msm_host, 0); 1914 + dsi_link_clk_disable(msm_host); 1915 + pm_runtime_put_autosuspend(&msm_host->pdev->dev); 1919 1916 } 1920 1917 1921 1918 int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host, ··· 2165 2160 * and only turned on before MDP START. 2166 2161 * This part of code should be enabled once mdp driver support it. 2167 2162 */ 2168 - /* if (msm_panel->mode == MSM_DSI_CMD_MODE) 2169 - dsi_clk_ctrl(msm_host, 0); */ 2163 + /* if (msm_panel->mode == MSM_DSI_CMD_MODE) { 2164 + * dsi_link_clk_disable(msm_host); 2165 + * pm_runtime_put_autosuspend(&msm_host->pdev->dev); 2166 + * } 2167 + */ 2170 2168 2171 2169 return 0; 2172 2170 } ··· 2225 2217 goto unlock_ret; 2226 2218 } 2227 2219 2228 - ret = dsi_clk_ctrl(msm_host, 1); 2220 + pm_runtime_get_sync(&msm_host->pdev->dev); 2221 + ret = dsi_link_clk_enable(msm_host); 2229 2222 if (ret) { 2230 - pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret); 2223 + pr_err("%s: failed to enable link clocks. ret=%d\n", 2224 + __func__, ret); 2231 2225 goto fail_disable_reg; 2232 2226 } 2233 2227 ··· 2253 2243 return 0; 2254 2244 2255 2245 fail_disable_clk: 2256 - dsi_clk_ctrl(msm_host, 0); 2246 + dsi_link_clk_disable(msm_host); 2247 + pm_runtime_put_autosuspend(&msm_host->pdev->dev); 2257 2248 fail_disable_reg: 2258 2249 dsi_host_regulator_disable(msm_host); 2259 2250 unlock_ret: ··· 2279 2268 2280 2269 pinctrl_pm_select_sleep_state(&msm_host->pdev->dev); 2281 2270 2282 - dsi_clk_ctrl(msm_host, 0); 2271 + dsi_link_clk_disable(msm_host); 2272 + pm_runtime_put_autosuspend(&msm_host->pdev->dev); 2283 2273 2284 2274 dsi_host_regulator_disable(msm_host); 2285 2275
+1 -1
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
··· 373 373 static void dsi_phy_disable_resource(struct msm_dsi_phy *phy) 374 374 { 375 375 clk_disable_unprepare(phy->ahb_clk); 376 - pm_runtime_put_sync(&phy->pdev->dev); 376 + pm_runtime_put_autosuspend(&phy->pdev->dev); 377 377 } 378 378 379 379 static const struct of_device_id dsi_phy_dt_match[] = {
+2
drivers/gpu/drm/msm/hdmi/hdmi.c
··· 239 239 hdmi->pwr_clks[i] = clk; 240 240 } 241 241 242 + pm_runtime_enable(&pdev->dev); 243 + 242 244 hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0); 243 245 244 246 hdmi->i2c = msm_hdmi_i2c_init(hdmi);
+4
drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
··· 35 35 const struct hdmi_platform_config *config = hdmi->config; 36 36 int i, ret; 37 37 38 + pm_runtime_get_sync(&hdmi->pdev->dev); 39 + 38 40 for (i = 0; i < config->pwr_reg_cnt; i++) { 39 41 ret = regulator_enable(hdmi->pwr_regs[i]); 40 42 if (ret) { ··· 86 84 config->pwr_reg_names[i], ret); 87 85 } 88 86 } 87 + 88 + pm_runtime_put_autosuspend(&hdmi->pdev->dev); 89 89 } 90 90 91 91 #define AVI_IFRAME_LINE_NUMBER 1
+44 -19
drivers/gpu/drm/msm/hdmi/hdmi_connector.c
··· 137 137 return ret; 138 138 } 139 139 140 + static void enable_hpd_clocks(struct hdmi *hdmi, bool enable) 141 + { 142 + const struct hdmi_platform_config *config = hdmi->config; 143 + struct device *dev = &hdmi->pdev->dev; 144 + int i, ret; 145 + 146 + if (enable) { 147 + for (i = 0; i < config->hpd_clk_cnt; i++) { 148 + if (config->hpd_freq && config->hpd_freq[i]) { 149 + ret = clk_set_rate(hdmi->hpd_clks[i], 150 + config->hpd_freq[i]); 151 + if (ret) 152 + dev_warn(dev, 153 + "failed to set clk %s (%d)\n", 154 + config->hpd_clk_names[i], ret); 155 + } 156 + 157 + ret = clk_prepare_enable(hdmi->hpd_clks[i]); 158 + if (ret) { 159 + dev_err(dev, 160 + "failed to enable hpd clk: %s (%d)\n", 161 + config->hpd_clk_names[i], ret); 162 + } 163 + } 164 + } else { 165 + for (i = config->hpd_clk_cnt - 1; i >= 0; i--) 166 + clk_disable_unprepare(hdmi->hpd_clks[i]); 167 + } 168 + } 169 + 140 170 static int hpd_enable(struct hdmi_connector *hdmi_connector) 141 171 { 142 172 struct hdmi *hdmi = hdmi_connector->hdmi; ··· 197 167 goto fail; 198 168 } 199 169 200 - for (i = 0; i < config->hpd_clk_cnt; i++) { 201 - if (config->hpd_freq && config->hpd_freq[i]) { 202 - ret = clk_set_rate(hdmi->hpd_clks[i], 203 - config->hpd_freq[i]); 204 - if (ret) 205 - dev_warn(dev, "failed to set clk %s (%d)\n", 206 - config->hpd_clk_names[i], ret); 207 - } 208 - 209 - ret = clk_prepare_enable(hdmi->hpd_clks[i]); 210 - if (ret) { 211 - dev_err(dev, "failed to enable hpd clk: %s (%d)\n", 212 - config->hpd_clk_names[i], ret); 213 - goto fail; 214 - } 215 - } 170 + pm_runtime_get_sync(dev); 171 + enable_hpd_clocks(hdmi, true); 216 172 217 173 msm_hdmi_set_mode(hdmi, false); 218 174 msm_hdmi_phy_reset(hdmi); ··· 241 225 242 226 msm_hdmi_set_mode(hdmi, false); 243 227 244 - for (i = 0; i < config->hpd_clk_cnt; i++) 245 - clk_disable_unprepare(hdmi->hpd_clks[i]); 228 + enable_hpd_clocks(hdmi, false); 229 + pm_runtime_put_autosuspend(dev); 246 230 247 231 ret = gpio_config(hdmi, false); 248 232 if (ret) ··· 301 285 302 286 static enum drm_connector_status detect_reg(struct hdmi *hdmi) 303 287 { 304 - uint32_t hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); 288 + uint32_t hpd_int_status; 289 + 290 + pm_runtime_get_sync(&hdmi->pdev->dev); 291 + enable_hpd_clocks(hdmi, true); 292 + 293 + hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); 294 + 295 + enable_hpd_clocks(hdmi, false); 296 + pm_runtime_put_autosuspend(&hdmi->pdev->dev); 297 + 305 298 return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ? 306 299 connector_status_connected : connector_status_disconnected; 307 300 }
+5 -2
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
··· 192 192 { 193 193 struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); 194 194 struct mdp5_kms *mdp5_kms; 195 + struct device *dev; 195 196 int intf_num; 196 197 u32 data = 0; 197 198 ··· 215 214 /* Smart Panel, Sync mode */ 216 215 data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL; 217 216 217 + dev = &mdp5_kms->pdev->dev; 218 + 218 219 /* Make sure clocks are on when connectors calling this function. */ 219 - mdp5_enable(mdp5_kms); 220 + pm_runtime_get_sync(dev); 220 221 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data); 221 222 222 223 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, 223 224 MDP5_SPLIT_DPL_LOWER_SMART_PANEL); 224 225 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); 225 - mdp5_disable(mdp5_kms); 226 + pm_runtime_put_autosuspend(dev); 226 227 227 228 return 0; 228 229 }
+17 -9
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
··· 415 415 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 416 416 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); 417 417 struct mdp5_kms *mdp5_kms = get_kms(crtc); 418 + struct device *dev = &mdp5_kms->pdev->dev; 418 419 419 420 DBG("%s", crtc->name); 420 421 ··· 426 425 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done); 427 426 428 427 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); 429 - mdp5_disable(mdp5_kms); 428 + pm_runtime_put_autosuspend(dev); 430 429 431 430 mdp5_crtc->enabled = false; 432 431 } ··· 437 436 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 438 437 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); 439 438 struct mdp5_kms *mdp5_kms = get_kms(crtc); 439 + struct device *dev = &mdp5_kms->pdev->dev; 440 440 441 441 DBG("%s", crtc->name); 442 442 443 443 if (WARN_ON(mdp5_crtc->enabled)) 444 444 return; 445 445 446 - mdp5_enable(mdp5_kms); 446 + pm_runtime_get_sync(dev); 447 + 448 + mdp5_crtc_mode_set_nofb(crtc); 449 + 447 450 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); 448 451 449 452 if (mdp5_cstate->cmd_mode) ··· 538 533 ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay); 539 534 } 540 535 541 - enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc, 536 + static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc, 542 537 struct drm_crtc_state *new_crtc_state, 543 538 struct drm_plane_state *bpstate) 544 539 { ··· 732 727 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; 733 728 struct drm_device *dev = crtc->dev; 734 729 struct mdp5_kms *mdp5_kms = get_kms(crtc); 730 + struct platform_device *pdev = mdp5_kms->pdev; 735 731 struct msm_kms *kms = &mdp5_kms->base.base; 736 732 struct drm_gem_object *cursor_bo, *old_bo = NULL; 737 733 uint32_t blendcfg, stride; ··· 761 755 if (!handle) { 762 756 DBG("Cursor off"); 763 757 cursor_enable = false; 764 - mdp5_enable(mdp5_kms); 758 + pm_runtime_get_sync(&pdev->dev); 765 759 goto set_cursor; 766 760 } 767 761 ··· 776 770 lm = mdp5_cstate->pipeline.mixer->lm; 777 771 stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0); 778 772 773 + pm_runtime_get_sync(&pdev->dev); 774 + 779 775 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 780 776 old_bo = mdp5_crtc->cursor.scanout_bo; 781 777 ··· 786 778 mdp5_crtc->cursor.height = height; 787 779 788 780 get_roi(crtc, &roi_w, &roi_h); 789 - 790 - mdp5_enable(mdp5_kms); 791 781 792 782 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); 793 783 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), ··· 804 798 805 799 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); 806 800 801 + pm_runtime_put_autosuspend(&pdev->dev); 802 + 807 803 set_cursor: 808 804 ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); 809 805 if (ret) { ··· 817 809 crtc_flush(crtc, flush_mask); 818 810 819 811 end: 820 - mdp5_disable(mdp5_kms); 812 + pm_runtime_put_autosuspend(&pdev->dev); 821 813 if (old_bo) { 822 814 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); 823 815 /* enable vblank to complete cursor work: */ ··· 850 842 851 843 get_roi(crtc, &roi_w, &roi_h); 852 844 853 - mdp5_enable(mdp5_kms); 845 + pm_runtime_get_sync(&mdp5_kms->pdev->dev); 854 846 855 847 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 856 848 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), ··· 863 855 864 856 crtc_flush(crtc, flush_mask); 865 857 866 - mdp5_disable(mdp5_kms); 858 + pm_runtime_put_autosuspend(&mdp5_kms->pdev->dev); 867 859 868 860 return 0; 869 861 }
+9 -3
drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
··· 297 297 { 298 298 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 299 299 struct mdp5_interface *intf = mdp5_encoder->intf; 300 + /* this isn't right I think */ 301 + struct drm_crtc_state *cstate = encoder->crtc->state; 302 + 303 + mdp5_encoder_mode_set(encoder, &cstate->mode, &cstate->adjusted_mode); 300 304 301 305 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) 302 306 mdp5_cmd_encoder_enable(encoder); ··· 324 320 } 325 321 326 322 static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { 327 - .mode_set = mdp5_encoder_mode_set, 328 323 .disable = mdp5_encoder_disable, 329 324 .enable = mdp5_encoder_enable, 330 325 .atomic_check = mdp5_encoder_atomic_check, ··· 353 350 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 354 351 struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder); 355 352 struct mdp5_kms *mdp5_kms; 353 + struct device *dev; 356 354 int intf_num; 357 355 u32 data = 0; 358 356 ··· 373 369 else 374 370 return -EINVAL; 375 371 372 + dev = &mdp5_kms->pdev->dev; 376 373 /* Make sure clocks are on when connectors calling this function. */ 377 - mdp5_enable(mdp5_kms); 374 + pm_runtime_get_sync(dev); 375 + 378 376 /* Dumb Panel, Sync mode */ 379 377 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0); 380 378 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data); ··· 384 378 385 379 mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true); 386 380 387 - mdp5_disable(mdp5_kms); 381 + pm_runtime_put_autosuspend(dev); 388 382 389 383 return 0; 390 384 }
+17 -10
drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
··· 49 49 void mdp5_irq_preinstall(struct msm_kms *kms) 50 50 { 51 51 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 52 - mdp5_enable(mdp5_kms); 52 + struct device *dev = &mdp5_kms->pdev->dev; 53 + 54 + pm_runtime_get_sync(dev); 53 55 mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); 54 56 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); 55 - mdp5_disable(mdp5_kms); 57 + pm_runtime_put_autosuspend(dev); 56 58 } 57 59 58 60 int mdp5_irq_postinstall(struct msm_kms *kms) 59 61 { 60 62 struct mdp_kms *mdp_kms = to_mdp_kms(kms); 61 63 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); 64 + struct device *dev = &mdp5_kms->pdev->dev; 62 65 struct mdp_irq *error_handler = &mdp5_kms->error_handler; 63 66 64 67 error_handler->irq = mdp5_irq_error_handler; ··· 70 67 MDP5_IRQ_INTF2_UNDER_RUN | 71 68 MDP5_IRQ_INTF3_UNDER_RUN; 72 69 73 - mdp5_enable(mdp5_kms); 70 + pm_runtime_get_sync(dev); 74 71 mdp_irq_register(mdp_kms, error_handler); 75 - mdp5_disable(mdp5_kms); 72 + pm_runtime_put_autosuspend(dev); 76 73 77 74 return 0; 78 75 } ··· 80 77 void mdp5_irq_uninstall(struct msm_kms *kms) 81 78 { 82 79 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 83 - mdp5_enable(mdp5_kms); 80 + struct device *dev = &mdp5_kms->pdev->dev; 81 + 82 + pm_runtime_get_sync(dev); 84 83 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); 85 - mdp5_disable(mdp5_kms); 84 + pm_runtime_put_autosuspend(dev); 86 85 } 87 86 88 87 irqreturn_t mdp5_irq(struct msm_kms *kms) ··· 114 109 int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) 115 110 { 116 111 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 112 + struct device *dev = &mdp5_kms->pdev->dev; 117 113 118 - mdp5_enable(mdp5_kms); 114 + pm_runtime_get_sync(dev); 119 115 mdp_update_vblank_mask(to_mdp_kms(kms), 120 116 mdp5_crtc_vblank(crtc), true); 121 - mdp5_disable(mdp5_kms); 117 + pm_runtime_put_autosuspend(dev); 122 118 123 119 return 0; 124 120 } ··· 127 121 void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) 128 122 { 129 123 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 124 + struct device *dev = &mdp5_kms->pdev->dev; 130 125 131 - mdp5_enable(mdp5_kms); 126 + pm_runtime_get_sync(dev); 132 127 mdp_update_vblank_mask(to_mdp_kms(kms), 133 128 mdp5_crtc_vblank(crtc), false); 134 - mdp5_disable(mdp5_kms); 129 + pm_runtime_put_autosuspend(dev); 135 130 }
+43 -11
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
··· 30 30 static int mdp5_hw_init(struct msm_kms *kms) 31 31 { 32 32 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 33 - struct platform_device *pdev = mdp5_kms->pdev; 33 + struct device *dev = &mdp5_kms->pdev->dev; 34 34 unsigned long flags; 35 35 36 - pm_runtime_get_sync(&pdev->dev); 37 - mdp5_enable(mdp5_kms); 36 + pm_runtime_get_sync(dev); 38 37 39 38 /* Magic unknown register writes: 40 39 * ··· 65 66 66 67 mdp5_ctlm_hw_reset(mdp5_kms->ctlm); 67 68 68 - mdp5_disable(mdp5_kms); 69 - pm_runtime_put_sync(&pdev->dev); 69 + pm_runtime_put_sync(dev); 70 70 71 71 return 0; 72 72 } ··· 109 111 static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) 110 112 { 111 113 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 114 + struct device *dev = &mdp5_kms->pdev->dev; 112 115 113 - mdp5_enable(mdp5_kms); 116 + pm_runtime_get_sync(dev); 114 117 115 118 if (mdp5_kms->smp) 116 119 mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp); ··· 120 121 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 121 122 { 122 123 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 124 + struct device *dev = &mdp5_kms->pdev->dev; 123 125 124 126 if (mdp5_kms->smp) 125 127 mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); 126 128 127 - mdp5_disable(mdp5_kms); 129 + pm_runtime_put_autosuspend(dev); 128 130 } 129 131 130 132 static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms, ··· 249 249 { 250 250 DBG(""); 251 251 252 + mdp5_kms->enable_count--; 253 + WARN_ON(mdp5_kms->enable_count < 0); 254 + 252 255 clk_disable_unprepare(mdp5_kms->ahb_clk); 253 256 clk_disable_unprepare(mdp5_kms->axi_clk); 254 257 clk_disable_unprepare(mdp5_kms->core_clk); ··· 264 261 int mdp5_enable(struct mdp5_kms *mdp5_kms) 265 262 { 266 263 DBG(""); 264 + 265 + mdp5_kms->enable_count++; 267 266 268 267 clk_prepare_enable(mdp5_kms->ahb_clk); 269 268 clk_prepare_enable(mdp5_kms->axi_clk); ··· 491 486 static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms, 492 487 u32 *major, u32 *minor) 493 488 { 489 + struct device *dev = &mdp5_kms->pdev->dev; 494 490 u32 version; 495 491 496 - mdp5_enable(mdp5_kms); 492 + pm_runtime_get_sync(dev); 497 493 version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION); 498 - mdp5_disable(mdp5_kms); 494 + pm_runtime_put_autosuspend(dev); 499 495 500 496 *major = FIELD(version, MDP5_HW_VERSION_MAJOR); 501 497 *minor = FIELD(version, MDP5_HW_VERSION_MINOR); ··· 649 643 * have left things on, in which case we'll start getting faults if 650 644 * we don't disable): 651 645 */ 652 - mdp5_enable(mdp5_kms); 646 + pm_runtime_get_sync(&pdev->dev); 653 647 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { 654 648 if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) || 655 649 !config->hw->intf.base[i]) ··· 658 652 659 653 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3); 660 654 } 661 - mdp5_disable(mdp5_kms); 662 655 mdelay(16); 663 656 664 657 if (config->platform.iommu) { ··· 682 677 "no iommu, fallback to phys contig buffers for scanout\n"); 683 678 aspace = NULL;; 684 679 } 680 + 681 + pm_runtime_put_autosuspend(&pdev->dev); 685 682 686 683 ret = modeset_init(mdp5_kms); 687 684 if (ret) { ··· 1012 1005 return 0; 1013 1006 } 1014 1007 1008 + static __maybe_unused int mdp5_runtime_suspend(struct device *dev) 1009 + { 1010 + struct platform_device *pdev = to_platform_device(dev); 1011 + struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); 1012 + 1013 + DBG(""); 1014 + 1015 + return mdp5_disable(mdp5_kms); 1016 + } 1017 + 1018 + static __maybe_unused int mdp5_runtime_resume(struct device *dev) 1019 + { 1020 + struct platform_device *pdev = to_platform_device(dev); 1021 + struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); 1022 + 1023 + DBG(""); 1024 + 1025 + return mdp5_enable(mdp5_kms); 1026 + } 1027 + 1028 + static const struct dev_pm_ops mdp5_pm_ops = { 1029 + SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL) 1030 + }; 1031 + 1015 1032 static const struct of_device_id mdp5_dt_match[] = { 1016 1033 { .compatible = "qcom,mdp5", }, 1017 1034 /* to support downstream DT files */ ··· 1050 1019 .driver = { 1051 1020 .name = "msm_mdp", 1052 1021 .of_match_table = mdp5_dt_match, 1022 + .pm = &mdp5_pm_ops, 1053 1023 }, 1054 1024 }; 1055 1025
+4 -3
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
··· 76 76 bool rpm_enabled; 77 77 78 78 struct mdp_irq error_handler; 79 + 80 + int enable_count; 79 81 }; 80 82 #define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) 81 83 ··· 169 167 170 168 static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) 171 169 { 170 + WARN_ON(mdp5_kms->enable_count <= 0); 172 171 msm_writel(data, mdp5_kms->mmio + reg); 173 172 } 174 173 175 174 static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg) 176 175 { 176 + WARN_ON(mdp5_kms->enable_count <= 0); 177 177 return msm_readl(mdp5_kms->mmio + reg); 178 178 } 179 179 ··· 258 254 { 259 255 return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp; 260 256 } 261 - 262 - int mdp5_disable(struct mdp5_kms *mdp5_kms); 263 - int mdp5_enable(struct mdp5_kms *mdp5_kms); 264 257 265 258 void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, 266 259 uint32_t old_irqmask);
+55 -8
drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
··· 31 31 32 32 struct regulator *vdd; 33 33 34 + struct clk *ahb_clk; 35 + struct clk *axi_clk; 36 + struct clk *vsync_clk; 37 + 34 38 struct { 35 39 volatile unsigned long enabled_mask; 36 40 struct irq_domain *domain; ··· 144 140 return 0; 145 141 } 146 142 143 + int msm_mdss_enable(struct msm_mdss *mdss) 144 + { 145 + DBG(""); 146 + 147 + clk_prepare_enable(mdss->ahb_clk); 148 + if (mdss->axi_clk) 149 + clk_prepare_enable(mdss->axi_clk); 150 + if (mdss->vsync_clk) 151 + clk_prepare_enable(mdss->vsync_clk); 152 + 153 + return 0; 154 + } 155 + 156 + int msm_mdss_disable(struct msm_mdss *mdss) 157 + { 158 + DBG(""); 159 + 160 + if (mdss->vsync_clk) 161 + clk_disable_unprepare(mdss->vsync_clk); 162 + if (mdss->axi_clk) 163 + clk_disable_unprepare(mdss->axi_clk); 164 + clk_disable_unprepare(mdss->ahb_clk); 165 + 166 + return 0; 167 + } 168 + 169 + static int msm_mdss_get_clocks(struct msm_mdss *mdss) 170 + { 171 + struct platform_device *pdev = to_platform_device(mdss->dev->dev); 172 + 173 + mdss->ahb_clk = msm_clk_get(pdev, "iface"); 174 + if (IS_ERR(mdss->ahb_clk)) 175 + mdss->ahb_clk = NULL; 176 + 177 + mdss->axi_clk = msm_clk_get(pdev, "bus"); 178 + if (IS_ERR(mdss->axi_clk)) 179 + mdss->axi_clk = NULL; 180 + 181 + mdss->vsync_clk = msm_clk_get(pdev, "vsync"); 182 + if (IS_ERR(mdss->vsync_clk)) 183 + mdss->vsync_clk = NULL; 184 + 185 + return 0; 186 + } 187 + 147 188 void msm_mdss_destroy(struct drm_device *dev) 148 189 { 149 190 struct msm_drm_private *priv = dev->dev_private; ··· 201 152 mdss->irqcontroller.domain = NULL; 202 153 203 154 regulator_disable(mdss->vdd); 204 - 205 - pm_runtime_put_sync(dev->dev); 206 155 207 156 pm_runtime_disable(dev->dev); 208 157 } ··· 237 190 goto fail; 238 191 } 239 192 193 + ret = msm_mdss_get_clocks(mdss); 194 + if (ret) { 195 + dev_err(dev->dev, "failed to get clocks: %d\n", ret); 196 + goto fail; 197 + } 198 + 240 199 /* Regulator to enable GDSCs in downstream kernels */ 241 200 mdss->vdd = devm_regulator_get(dev->dev, "vdd"); 242 201 if (IS_ERR(mdss->vdd)) { ··· 273 220 priv->mdss = mdss; 274 221 275 222 pm_runtime_enable(dev->dev); 276 - 277 - /* 278 - * TODO: This is needed as the MDSS GDSC is only tied to MDSS's power 279 - * domain. Remove this once runtime PM is adapted for all the devices. 280 - */ 281 - pm_runtime_get_sync(dev->dev); 282 223 283 224 return 0; 284 225 fail_irq:
+51 -8
drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
··· 28 28 29 29 int blk_cnt; 30 30 int blk_size; 31 + 32 + /* register cache */ 33 + u32 alloc_w[22]; 34 + u32 alloc_r[22]; 35 + u32 pipe_reqprio_fifo_wm0[SSPP_MAX]; 36 + u32 pipe_reqprio_fifo_wm1[SSPP_MAX]; 37 + u32 pipe_reqprio_fifo_wm2[SSPP_MAX]; 31 38 }; 32 39 33 40 static inline ··· 105 98 static void set_fifo_thresholds(struct mdp5_smp *smp, 106 99 enum mdp5_pipe pipe, int nblks) 107 100 { 108 - struct mdp5_kms *mdp5_kms = get_kms(smp); 109 101 u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE); 110 102 u32 val; 111 103 112 104 /* 1/4 of SMP pool that is being fetched */ 113 105 val = (nblks * smp_entries_per_blk) / 4; 114 106 115 - mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1); 116 - mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2); 117 - mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3); 107 + smp->pipe_reqprio_fifo_wm0[pipe] = val * 1; 108 + smp->pipe_reqprio_fifo_wm1[pipe] = val * 2; 109 + smp->pipe_reqprio_fifo_wm2[pipe] = val * 3; 118 110 } 119 111 120 112 /* ··· 228 222 static unsigned update_smp_state(struct mdp5_smp *smp, 229 223 u32 cid, mdp5_smp_state_t *assigned) 230 224 { 231 - struct mdp5_kms *mdp5_kms = get_kms(smp); 232 225 int cnt = smp->blk_cnt; 233 226 unsigned nblks = 0; 234 227 u32 blk, val; ··· 236 231 int idx = blk / 3; 237 232 int fld = blk % 3; 238 233 239 - val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx)); 234 + val = smp->alloc_w[idx]; 240 235 241 236 switch (fld) { 242 237 case 0: ··· 253 248 break; 254 249 } 255 250 256 - mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val); 257 - mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val); 251 + smp->alloc_w[idx] = val; 252 + smp->alloc_r[idx] = val; 258 253 259 254 nblks++; 260 255 } 261 256 262 257 return nblks; 258 + } 259 + 260 + static void write_smp_alloc_regs(struct mdp5_smp *smp) 261 + { 262 + struct mdp5_kms *mdp5_kms = get_kms(smp); 263 + int i, num_regs; 264 + 265 + num_regs = smp->blk_cnt / 3 + 1; 266 + 267 + for (i = 0; i < num_regs; i++) { 268 + mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i), 269 + smp->alloc_w[i]); 270 + mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i), 271 + smp->alloc_r[i]); 272 + } 273 + } 274 + 275 + static void write_smp_fifo_regs(struct mdp5_smp *smp) 276 + { 277 + struct mdp5_kms *mdp5_kms = get_kms(smp); 278 + int i; 279 + 280 + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { 281 + struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; 282 + enum mdp5_pipe pipe = hwpipe->pipe; 283 + 284 + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), 285 + smp->pipe_reqprio_fifo_wm0[pipe]); 286 + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), 287 + smp->pipe_reqprio_fifo_wm1[pipe]); 288 + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), 289 + smp->pipe_reqprio_fifo_wm2[pipe]); 290 + } 263 291 } 264 292 265 293 void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) ··· 315 277 set_fifo_thresholds(smp, pipe, nblks); 316 278 } 317 279 280 + write_smp_alloc_regs(smp); 281 + write_smp_fifo_regs(smp); 282 + 318 283 state->assigned = 0; 319 284 } 320 285 ··· 329 288 DBG("release %s", pipe2name(pipe)); 330 289 set_fifo_thresholds(smp, pipe, 0); 331 290 } 291 + 292 + write_smp_fifo_regs(smp); 332 293 333 294 state->released = 0; 334 295 }
+36
drivers/gpu/drm/msm/msm_drv.c
··· 73 73 MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors"); 74 74 module_param(dumpstate, bool, 0600); 75 75 76 + static bool modeset = true; 77 + MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)"); 78 + module_param(modeset, bool, 0600); 79 + 76 80 /* 77 81 * Util/helpers: 78 82 */ ··· 882 878 } 883 879 #endif 884 880 881 + #ifdef CONFIG_PM 882 + static int msm_runtime_suspend(struct device *dev) 883 + { 884 + struct drm_device *ddev = dev_get_drvdata(dev); 885 + struct msm_drm_private *priv = ddev->dev_private; 886 + 887 + DBG(""); 888 + 889 + if (priv->mdss) 890 + return msm_mdss_disable(priv->mdss); 891 + 892 + return 0; 893 + } 894 + 895 + static int msm_runtime_resume(struct device *dev) 896 + { 897 + struct drm_device *ddev = dev_get_drvdata(dev); 898 + struct msm_drm_private *priv = ddev->dev_private; 899 + 900 + DBG(""); 901 + 902 + if (priv->mdss) 903 + return msm_mdss_enable(priv->mdss); 904 + 905 + return 0; 906 + } 907 + #endif 908 + 885 909 static const struct dev_pm_ops msm_pm_ops = { 886 910 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume) 911 + SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL) 887 912 }; 888 913 889 914 /* ··· 1136 1103 1137 1104 static int __init msm_drm_register(void) 1138 1105 { 1106 + if (!modeset) 1107 + return -EINVAL; 1108 + 1139 1109 DBG("init"); 1140 1110 msm_mdp_register(); 1141 1111 msm_dsi_register();
+8 -4
drivers/gpu/drm/msm/msm_drv.h
··· 55 55 struct msm_gem_address_space; 56 56 struct msm_gem_vma; 57 57 58 - #define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ 59 - 60 58 struct msm_file_private { 61 59 /* currently we don't do anything useful with this.. but when 62 60 * per-context address spaces are supported we'd keep track of ··· 235 237 uint32_t size, uint32_t flags); 236 238 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, 237 239 uint32_t size, uint32_t flags); 240 + void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 241 + uint32_t flags, struct msm_gem_address_space *aspace, 242 + struct drm_gem_object **bo, uint64_t *iova); 243 + void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, 244 + uint32_t flags, struct msm_gem_address_space *aspace, 245 + struct drm_gem_object **bo, uint64_t *iova); 238 246 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 239 247 struct dma_buf *dmabuf, struct sg_table *sgt); 240 248 ··· 252 248 struct msm_gem_address_space *aspace, int plane); 253 249 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); 254 250 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); 255 - struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, 256 - const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 257 251 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, 258 252 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); 253 + struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev, 254 + int w, int h, int p, uint32_t format); 259 255 260 256 struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); 261 257 void msm_fbdev_free(struct drm_device *dev);
+44 -1
drivers/gpu/drm/msm/msm_fb.c
··· 20 20 21 21 #include "msm_drv.h" 22 22 #include "msm_kms.h" 23 + #include "msm_gem.h" 23 24 24 25 struct msm_framebuffer { 25 26 struct drm_framebuffer base; ··· 29 28 }; 30 29 #define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base) 31 30 31 + static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, 32 + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 32 33 33 34 static int msm_framebuffer_create_handle(struct drm_framebuffer *fb, 34 35 struct drm_file *file_priv, ··· 164 161 return ERR_PTR(ret); 165 162 } 166 163 167 - struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, 164 + static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, 168 165 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) 169 166 { 170 167 struct msm_drm_private *priv = dev->dev_private; ··· 239 236 kfree(msm_fb); 240 237 241 238 return ERR_PTR(ret); 239 + } 240 + 241 + struct drm_framebuffer * 242 + msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format) 243 + { 244 + struct drm_mode_fb_cmd2 mode_cmd = { 245 + .pixel_format = format, 246 + .width = w, 247 + .height = h, 248 + .pitches = { p }, 249 + }; 250 + struct drm_gem_object *bo; 251 + struct drm_framebuffer *fb; 252 + int size; 253 + 254 + /* allocate backing bo */ 255 + size = mode_cmd.pitches[0] * mode_cmd.height; 256 + DBG("allocating %d bytes for fb %d", size, dev->primary->index); 257 + bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_STOLEN); 258 + if (IS_ERR(bo)) { 259 + dev_warn(dev->dev, "could not allocate stolen bo\n"); 260 + /* try regular bo: */ 261 + bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC); 262 + } 263 + if (IS_ERR(bo)) { 264 + dev_err(dev->dev, "failed to allocate buffer object\n"); 265 + return ERR_CAST(bo); 266 + } 267 + 268 + fb = msm_framebuffer_init(dev, &mode_cmd, &bo); 269 + if (IS_ERR(fb)) { 270 + dev_err(dev->dev, "failed to allocate fb\n"); 271 + /* note: if fb creation failed, we can't rely on fb destroy 272 + * to unref the bo: 273 + */ 274 + drm_gem_object_unreference_unlocked(bo); 275 + return ERR_CAST(fb); 276 + } 277 + 278 + return fb; 242 279 }
+20 -37
drivers/gpu/drm/msm/msm_fbdev.c
··· 19 19 #include <drm/drm_fb_helper.h> 20 20 21 21 #include "msm_drv.h" 22 - #include "msm_gem.h" 23 22 #include "msm_kms.h" 24 23 25 24 extern int msm_gem_mmap_obj(struct drm_gem_object *obj, ··· 34 35 struct msm_fbdev { 35 36 struct drm_fb_helper base; 36 37 struct drm_framebuffer *fb; 37 - struct drm_gem_object *bo; 38 38 }; 39 39 40 40 static struct fb_ops msm_fb_ops = { ··· 55 57 { 56 58 struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par; 57 59 struct msm_fbdev *fbdev = to_msm_fbdev(helper); 58 - struct drm_gem_object *drm_obj = fbdev->bo; 60 + struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0); 59 61 int ret = 0; 60 62 61 - ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma); 63 + ret = drm_gem_mmap_obj(bo, bo->size, vma); 62 64 if (ret) { 63 65 pr_err("%s:drm_gem_mmap_obj fail\n", __func__); 64 66 return ret; 65 67 } 66 68 67 - return msm_gem_mmap_obj(drm_obj, vma); 69 + return msm_gem_mmap_obj(bo, vma); 68 70 } 69 71 70 72 static int msm_fbdev_create(struct drm_fb_helper *helper, ··· 74 76 struct drm_device *dev = helper->dev; 75 77 struct msm_drm_private *priv = dev->dev_private; 76 78 struct drm_framebuffer *fb = NULL; 79 + struct drm_gem_object *bo; 77 80 struct fb_info *fbi = NULL; 78 - struct drm_mode_fb_cmd2 mode_cmd = {0}; 79 81 uint64_t paddr; 80 - int ret, size; 82 + uint32_t format; 83 + int ret, pitch; 84 + 85 + format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); 81 86 82 87 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, 83 88 sizes->surface_height, sizes->surface_bpp, 84 89 sizes->fb_width, sizes->fb_height); 85 90 86 - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 87 - sizes->surface_depth); 91 + pitch = align_pitch(sizes->surface_width, sizes->surface_bpp); 92 + fb = msm_alloc_stolen_fb(dev, sizes->surface_width, 93 + sizes->surface_height, pitch, format); 88 94 89 - mode_cmd.width = sizes->surface_width; 90 - mode_cmd.height = sizes->surface_height; 91 - 92 - mode_cmd.pitches[0] = align_pitch( 93 - mode_cmd.width, sizes->surface_bpp); 94 - 95 - /* allocate backing bo */ 96 - size = mode_cmd.pitches[0] * mode_cmd.height; 97 - DBG("allocating %d bytes for fb %d", size, dev->primary->index); 98 - fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | 99 - MSM_BO_WC | MSM_BO_STOLEN); 100 - if (IS_ERR(fbdev->bo)) { 101 - ret = PTR_ERR(fbdev->bo); 102 - fbdev->bo = NULL; 103 - dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret); 104 - goto fail; 105 - } 106 - 107 - fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo); 108 95 if (IS_ERR(fb)) { 109 96 dev_err(dev->dev, "failed to allocate fb\n"); 110 - /* note: if fb creation failed, we can't rely on fb destroy 111 - * to unref the bo: 112 - */ 113 - drm_gem_object_unreference_unlocked(fbdev->bo); 114 97 ret = PTR_ERR(fb); 115 98 goto fail; 116 99 } 100 + 101 + bo = msm_framebuffer_bo(fb, 0); 117 102 118 103 mutex_lock(&dev->struct_mutex); 119 104 ··· 105 124 * in panic (ie. lock-safe, etc) we could avoid pinning the 106 125 * buffer now: 107 126 */ 108 - ret = msm_gem_get_iova(fbdev->bo, priv->kms->aspace, &paddr); 127 + ret = msm_gem_get_iova(bo, priv->kms->aspace, &paddr); 109 128 if (ret) { 110 129 dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); 111 130 goto fail_unlock; ··· 133 152 134 153 dev->mode_config.fb_base = paddr; 135 154 136 - fbi->screen_base = msm_gem_get_vaddr(fbdev->bo); 155 + fbi->screen_base = msm_gem_get_vaddr(bo); 137 156 if (IS_ERR(fbi->screen_base)) { 138 157 ret = PTR_ERR(fbi->screen_base); 139 158 goto fail_unlock; 140 159 } 141 - fbi->screen_size = fbdev->bo->size; 160 + fbi->screen_size = bo->size; 142 161 fbi->fix.smem_start = paddr; 143 - fbi->fix.smem_len = fbdev->bo->size; 162 + fbi->fix.smem_len = bo->size; 144 163 145 164 DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres); 146 165 DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height); ··· 222 241 223 242 /* this will free the backing object */ 224 243 if (fbdev->fb) { 225 - msm_gem_put_vaddr(fbdev->bo); 244 + struct drm_gem_object *bo = 245 + msm_framebuffer_bo(fbdev->fb, 0); 246 + msm_gem_put_vaddr(bo); 226 247 drm_framebuffer_remove(fbdev->fb); 227 248 } 228 249
+46
drivers/gpu/drm/msm/msm_gem.c
··· 1024 1024 drm_gem_object_unreference_unlocked(obj); 1025 1025 return ERR_PTR(ret); 1026 1026 } 1027 + 1028 + static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1029 + uint32_t flags, struct msm_gem_address_space *aspace, 1030 + struct drm_gem_object **bo, uint64_t *iova, bool locked) 1031 + { 1032 + void *vaddr; 1033 + struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); 1034 + int ret; 1035 + 1036 + if (IS_ERR(obj)) 1037 + return ERR_CAST(obj); 1038 + 1039 + if (iova) { 1040 + ret = msm_gem_get_iova(obj, aspace, iova); 1041 + if (ret) { 1042 + drm_gem_object_unreference(obj); 1043 + return ERR_PTR(ret); 1044 + } 1045 + } 1046 + 1047 + vaddr = msm_gem_get_vaddr(obj); 1048 + if (!vaddr) { 1049 + msm_gem_put_iova(obj, aspace); 1050 + drm_gem_object_unreference(obj); 1051 + return ERR_PTR(-ENOMEM); 1052 + } 1053 + 1054 + if (bo) 1055 + *bo = obj; 1056 + 1057 + return vaddr; 1058 + } 1059 + 1060 + void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1061 + uint32_t flags, struct msm_gem_address_space *aspace, 1062 + struct drm_gem_object **bo, uint64_t *iova) 1063 + { 1064 + return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); 1065 + } 1066 + 1067 + void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, 1068 + uint32_t flags, struct msm_gem_address_space *aspace, 1069 + struct drm_gem_object **bo, uint64_t *iova) 1070 + { 1071 + return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); 1072 + }
+55 -28
drivers/gpu/drm/msm/msm_gpu.c
··· 562 562 return 0; 563 563 } 564 564 565 + static struct msm_gem_address_space * 566 + msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev, 567 + uint64_t va_start, uint64_t va_end) 568 + { 569 + struct iommu_domain *iommu; 570 + struct msm_gem_address_space *aspace; 571 + int ret; 572 + 573 + /* 574 + * Setup IOMMU.. eventually we will (I think) do this once per context 575 + * and have separate page tables per context. For now, to keep things 576 + * simple and to get something working, just use a single address space: 577 + */ 578 + iommu = iommu_domain_alloc(&platform_bus_type); 579 + if (!iommu) 580 + return NULL; 581 + 582 + iommu->geometry.aperture_start = va_start; 583 + iommu->geometry.aperture_end = va_end; 584 + 585 + dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name); 586 + 587 + aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu"); 588 + if (IS_ERR(aspace)) { 589 + dev_err(gpu->dev->dev, "failed to init iommu: %ld\n", 590 + PTR_ERR(aspace)); 591 + iommu_domain_free(iommu); 592 + return ERR_CAST(aspace); 593 + } 594 + 595 + ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0); 596 + if (ret) { 597 + msm_gem_address_space_put(aspace); 598 + return ERR_PTR(ret); 599 + } 600 + 601 + return aspace; 602 + } 603 + 565 604 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, 566 605 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, 567 606 const char *name, struct msm_gpu_config *config) 568 607 { 569 - struct iommu_domain *iommu; 570 608 int ret; 571 609 572 610 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) ··· 674 636 if (IS_ERR(gpu->gpu_cx)) 675 637 gpu->gpu_cx = NULL; 676 638 677 - /* Setup IOMMU.. eventually we will (I think) do this once per context 678 - * and have separate page tables per context. For now, to keep things 679 - * simple and to get something working, just use a single address space: 680 - */ 681 - iommu = iommu_domain_alloc(&platform_bus_type); 682 - if (iommu) { 683 - iommu->geometry.aperture_start = config->va_start; 684 - iommu->geometry.aperture_end = config->va_end; 639 + gpu->pdev = pdev; 640 + platform_set_drvdata(pdev, gpu); 685 641 686 - dev_info(drm->dev, "%s: using IOMMU\n", name); 687 - gpu->aspace = msm_gem_address_space_create(&pdev->dev, 688 - iommu, "gpu"); 689 - if (IS_ERR(gpu->aspace)) { 690 - ret = PTR_ERR(gpu->aspace); 691 - dev_err(drm->dev, "failed to init iommu: %d\n", ret); 692 - gpu->aspace = NULL; 693 - iommu_domain_free(iommu); 694 - goto fail; 695 - } 642 + bs_init(gpu); 696 643 697 - } else { 644 + gpu->aspace = msm_gpu_create_address_space(gpu, pdev, 645 + config->va_start, config->va_end); 646 + 647 + if (gpu->aspace == NULL) 698 648 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); 649 + else if (IS_ERR(gpu->aspace)) { 650 + ret = PTR_ERR(gpu->aspace); 651 + goto fail; 699 652 } 700 653 701 654 /* Create ringbuffer: */ ··· 698 669 goto fail; 699 670 } 700 671 701 - gpu->pdev = pdev; 702 - platform_set_drvdata(pdev, gpu); 703 - 704 - bs_init(gpu); 705 - 706 672 return 0; 707 673 708 674 fail: 675 + platform_set_drvdata(pdev, NULL); 709 676 return ret; 710 677 } 711 678 ··· 718 693 msm_gem_put_iova(gpu->rb->bo, gpu->aspace); 719 694 msm_ringbuffer_destroy(gpu->rb); 720 695 } 721 - 722 - if (gpu->fctx) 723 - msm_fence_context_free(gpu->fctx); 696 + if (gpu->aspace) { 697 + gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, 698 + NULL, 0); 699 + msm_gem_address_space_put(gpu->aspace); 700 + } 724 701 }
+2
drivers/gpu/drm/msm/msm_kms.h
··· 99 99 struct msm_kms *mdp5_kms_init(struct drm_device *dev); 100 100 int msm_mdss_init(struct drm_device *dev); 101 101 void msm_mdss_destroy(struct drm_device *dev); 102 + int msm_mdss_enable(struct msm_mdss *mdss); 103 + int msm_mdss_disable(struct msm_mdss *mdss); 102 104 103 105 #endif /* __MSM_KMS_H__ */
+5 -7
drivers/gpu/drm/msm/msm_ringbuffer.c
··· 33 33 } 34 34 35 35 ring->gpu = gpu; 36 - ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC); 37 - if (IS_ERR(ring->bo)) { 38 - ret = PTR_ERR(ring->bo); 39 - ring->bo = NULL; 40 - goto fail; 41 - } 42 36 43 - ring->start = msm_gem_get_vaddr(ring->bo); 37 + /* Pass NULL for the iova pointer - we will map it later */ 38 + ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC, 39 + gpu->aspace, &ring->bo, NULL); 40 + 44 41 if (IS_ERR(ring->start)) { 45 42 ret = PTR_ERR(ring->start); 43 + ring->start = 0; 46 44 goto fail; 47 45 } 48 46 ring->end = ring->start + (size / 4);