Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-msm-fixes-2020-02-16' of https://gitlab.freedesktop.org/drm/msm into drm-fixes

+ fix UBWC on GPU and display side for sc7180
+ fix DSI suspend/resume issue encountered on sc7180
+ fix some breakage on so called "linux-android" devices
(fallout from sc7180/a618 support, not seen earlier
due to bootloader/firmware differences)
+ couple other misc fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGshz5K3tJd=NsBSHq6HGT-ZRa67qt+iN=U2ZFO2oD8kuw@mail.gmail.com

+170 -100
+31 -6
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 796 796 return true; 797 797 } 798 798 799 + #define GBIF_CLIENT_HALT_MASK BIT(0) 800 + #define GBIF_ARB_HALT_MASK BIT(1) 801 + 802 + static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) 803 + { 804 + struct msm_gpu *gpu = &adreno_gpu->base; 805 + 806 + if (!a6xx_has_gbif(adreno_gpu)) { 807 + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); 808 + spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 809 + 0xf) == 0xf); 810 + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); 811 + 812 + return; 813 + } 814 + 815 + /* Halt new client requests on GBIF */ 816 + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); 817 + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 818 + (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); 819 + 820 + /* Halt all AXI requests on GBIF */ 821 + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); 822 + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 823 + (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); 824 + 825 + /* The GBIF halt needs to be explicitly cleared */ 826 + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); 827 + } 828 + 799 829 /* Gracefully try to shut down the GMU and by extension the GPU */ 800 830 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) 801 831 { 802 832 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 803 833 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 804 - struct msm_gpu *gpu = &adreno_gpu->base; 805 834 u32 val; 806 835 807 836 /* ··· 848 819 return; 849 820 } 850 821 851 - /* Clear the VBIF pipe before shutting down */ 852 - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); 853 - spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) 854 - == 0xf); 855 - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); 822 + a6xx_bus_clear_pending_transactions(adreno_gpu); 856 823 857 824 /* tell the GMU we want to slumber */ 858 825 a6xx_gmu_notify_slumber(gmu);
+6 -59
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
··· 378 378 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 379 379 int ret; 380 380 381 - /* 382 - * During a previous slumber, GBIF halt is asserted to ensure 383 - * no further transaction can go through GPU before GPU 384 - * headswitch is turned off. 385 - * 386 - * This halt is deasserted once headswitch goes off but 387 - * incase headswitch doesn't goes off clear GBIF halt 388 - * here to ensure GPU wake-up doesn't fail because of 389 - * halted GPU transactions. 390 - */ 391 - gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); 392 - 393 381 /* Make sure the GMU keeps the GPU on while we set it up */ 394 382 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); 395 383 ··· 458 470 /* Select CP0 to always count cycles */ 459 471 gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT); 460 472 461 - gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1); 462 - gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1); 463 - gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1); 464 - gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21); 473 + if (adreno_is_a630(adreno_gpu)) { 474 + gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1); 475 + gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1); 476 + gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1); 477 + gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21); 478 + } 465 479 466 480 /* Enable fault detection */ 467 481 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, ··· 738 748 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL), 739 749 }; 740 750 741 - #define GBIF_CLIENT_HALT_MASK BIT(0) 742 - #define GBIF_ARB_HALT_MASK BIT(1) 743 - 744 - static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) 745 - { 746 - struct msm_gpu *gpu = &adreno_gpu->base; 747 - 748 - if(!a6xx_has_gbif(adreno_gpu)){ 749 - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); 750 - spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 751 - 0xf) == 0xf); 752 - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); 753 - 754 - return; 755 - } 756 - 757 - /* Halt new client requests on GBIF */ 758 - gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); 759 - spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 760 - (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); 761 - 762 - /* Halt all AXI requests on GBIF */ 763 - gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); 764 - spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 765 - (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); 766 - 767 - /* 768 - * GMU needs DDR access in slumber path. Deassert GBIF halt now 769 - * to allow for GMU to access system memory. 770 - */ 771 - gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); 772 - } 773 - 774 751 static int a6xx_pm_resume(struct msm_gpu *gpu) 775 752 { 776 753 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); ··· 761 804 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 762 805 763 806 devfreq_suspend_device(gpu->devfreq.devfreq); 764 - 765 - /* 766 - * Make sure the GMU is idle before continuing (because some transitions 767 - * may use VBIF 768 - */ 769 - a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu); 770 - 771 - /* Clear the VBIF pipe before shutting down */ 772 - /* FIXME: This accesses the GPU - do we need to make sure it is on? */ 773 - a6xx_bus_clear_pending_transactions(adreno_gpu); 774 807 775 808 return a6xx_gmu_stop(a6xx_gpu); 776 809 }
+60 -25
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
··· 7 7 8 8 #include "a6xx_gmu.h" 9 9 #include "a6xx_gmu.xml.h" 10 + #include "a6xx_gpu.h" 10 11 11 12 #define HFI_MSG_ID(val) [val] = #val 12 13 ··· 217 216 NULL, 0); 218 217 } 219 218 220 - static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) 219 + static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 221 220 { 222 - struct a6xx_hfi_msg_bw_table msg = { 0 }; 221 + /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */ 222 + msg->bw_level_num = 1; 223 + 224 + msg->ddr_cmds_num = 3; 225 + msg->ddr_wait_bitmask = 0x01; 226 + 227 + msg->ddr_cmds_addrs[0] = 0x50000; 228 + msg->ddr_cmds_addrs[1] = 0x5003c; 229 + msg->ddr_cmds_addrs[2] = 0x5000c; 230 + 231 + msg->ddr_cmds_data[0][0] = 0x40000000; 232 + msg->ddr_cmds_data[0][1] = 0x40000000; 233 + msg->ddr_cmds_data[0][2] = 0x40000000; 223 234 224 235 /* 225 - * The sdm845 GMU doesn't do bus frequency scaling on its own but it 226 - * does need at least one entry in the list because it might be accessed 227 - * when the GMU is shutting down. Send a single "off" entry. 236 + * These are the CX (CNOC) votes - these are used by the GMU but the 237 + * votes are known and fixed for the target 228 238 */ 239 + msg->cnoc_cmds_num = 1; 240 + msg->cnoc_wait_bitmask = 0x01; 229 241 230 - msg.bw_level_num = 1; 242 + msg->cnoc_cmds_addrs[0] = 0x5007c; 243 + msg->cnoc_cmds_data[0][0] = 0x40000000; 244 + msg->cnoc_cmds_data[1][0] = 0x60000001; 245 + } 231 246 232 - msg.ddr_cmds_num = 3; 233 - msg.ddr_wait_bitmask = 0x07; 247 + static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 248 + { 249 + /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ 250 + msg->bw_level_num = 1; 234 251 235 - msg.ddr_cmds_addrs[0] = 0x50000; 236 - msg.ddr_cmds_addrs[1] = 0x5005c; 237 - msg.ddr_cmds_addrs[2] = 0x5000c; 252 + msg->ddr_cmds_num = 3; 253 + msg->ddr_wait_bitmask = 0x07; 238 254 239 - msg.ddr_cmds_data[0][0] = 0x40000000; 240 - msg.ddr_cmds_data[0][1] = 0x40000000; 241 - msg.ddr_cmds_data[0][2] = 0x40000000; 255 + msg->ddr_cmds_addrs[0] = 0x50000; 256 + msg->ddr_cmds_addrs[1] = 0x5005c; 257 + msg->ddr_cmds_addrs[2] = 0x5000c; 258 + 259 + msg->ddr_cmds_data[0][0] = 0x40000000; 260 + msg->ddr_cmds_data[0][1] = 0x40000000; 261 + msg->ddr_cmds_data[0][2] = 0x40000000; 242 262 243 263 /* 244 264 * These are the CX (CNOC) votes. This is used but the values for the 245 265 * sdm845 GMU are known and fixed so we can hard code them. 246 266 */ 247 267 248 - msg.cnoc_cmds_num = 3; 249 - msg.cnoc_wait_bitmask = 0x05; 268 + msg->cnoc_cmds_num = 3; 269 + msg->cnoc_wait_bitmask = 0x05; 250 270 251 - msg.cnoc_cmds_addrs[0] = 0x50034; 252 - msg.cnoc_cmds_addrs[1] = 0x5007c; 253 - msg.cnoc_cmds_addrs[2] = 0x5004c; 271 + msg->cnoc_cmds_addrs[0] = 0x50034; 272 + msg->cnoc_cmds_addrs[1] = 0x5007c; 273 + msg->cnoc_cmds_addrs[2] = 0x5004c; 254 274 255 - msg.cnoc_cmds_data[0][0] = 0x40000000; 256 - msg.cnoc_cmds_data[0][1] = 0x00000000; 257 - msg.cnoc_cmds_data[0][2] = 0x40000000; 275 + msg->cnoc_cmds_data[0][0] = 0x40000000; 276 + msg->cnoc_cmds_data[0][1] = 0x00000000; 277 + msg->cnoc_cmds_data[0][2] = 0x40000000; 258 278 259 - msg.cnoc_cmds_data[1][0] = 0x60000001; 260 - msg.cnoc_cmds_data[1][1] = 0x20000001; 261 - msg.cnoc_cmds_data[1][2] = 0x60000001; 279 + msg->cnoc_cmds_data[1][0] = 0x60000001; 280 + msg->cnoc_cmds_data[1][1] = 0x20000001; 281 + msg->cnoc_cmds_data[1][2] = 0x60000001; 282 + } 283 + 284 + 285 + static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) 286 + { 287 + struct a6xx_hfi_msg_bw_table msg = { 0 }; 288 + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 289 + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 290 + 291 + if (adreno_is_a618(adreno_gpu)) 292 + a618_build_bw_table(&msg); 293 + else 294 + a6xx_build_bw_table(&msg); 262 295 263 296 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg), 264 297 NULL, 0);
+2 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
··· 255 255 256 256 INTERLEAVED_RGB_FMT(RGB565, 257 257 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, 258 - C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, 258 + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, 259 259 false, 2, 0, 260 260 DPU_FETCH_LINEAR, 1), 261 261 262 262 INTERLEAVED_RGB_FMT(BGR565, 263 263 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, 264 - C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, 264 + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, 265 265 false, 2, 0, 266 266 DPU_FETCH_LINEAR, 1), 267 267
+57 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
··· 12 12 13 13 #define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base) 14 14 15 + #define HW_REV 0x0 15 16 #define HW_INTR_STATUS 0x0010 16 17 17 18 /* Max BW defined in KBps */ ··· 21 20 struct dpu_irq_controller { 22 21 unsigned long enabled_mask; 23 22 struct irq_domain *domain; 23 + }; 24 + 25 + struct dpu_hw_cfg { 26 + u32 val; 27 + u32 offset; 28 + }; 29 + 30 + struct dpu_mdss_hw_init_handler { 31 + u32 hw_rev; 32 + u32 hw_reg_count; 33 + struct dpu_hw_cfg* hw_cfg; 24 34 }; 25 35 26 36 struct dpu_mdss { ··· 43 31 struct icc_path *path[2]; 44 32 u32 num_paths; 45 33 }; 34 + 35 + static struct dpu_hw_cfg hw_cfg[] = { 36 + { 37 + /* UBWC global settings */ 38 + .val = 0x1E, 39 + .offset = 0x144, 40 + } 41 + }; 42 + 43 + static struct dpu_mdss_hw_init_handler cfg_handler[] = { 44 + { .hw_rev = DPU_HW_VER_620, 45 + .hw_reg_count = ARRAY_SIZE(hw_cfg), 46 + .hw_cfg = hw_cfg 47 + }, 48 + }; 49 + 50 + static void dpu_mdss_hw_init(struct dpu_mdss *dpu_mdss, u32 hw_rev) 51 + { 52 + int i; 53 + u32 count = 0; 54 + struct dpu_hw_cfg *hw_cfg = NULL; 55 + 56 + for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) { 57 + if (cfg_handler[i].hw_rev == hw_rev) { 58 + hw_cfg = cfg_handler[i].hw_cfg; 59 + count = cfg_handler[i].hw_reg_count; 60 + break; 61 + } 62 + } 63 + 64 + for (i = 0; i < count; i++ ) { 65 + writel_relaxed(hw_cfg->val, 66 + dpu_mdss->mmio + hw_cfg->offset); 67 + hw_cfg++; 68 + } 69 + 70 + return; 71 + } 46 72 47 73 static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev, 48 74 struct dpu_mdss *dpu_mdss) ··· 224 174 struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss); 225 175 struct dss_module_power *mp = &dpu_mdss->mp; 226 176 int ret; 177 + u32 mdss_rev; 227 178 228 179 dpu_mdss_icc_request_bw(mdss); 229 180 230 181 ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); 231 - if (ret) 182 + if (ret) { 232 183 DPU_ERROR("clock enable failed, ret:%d\n", ret); 184 + return ret; 185 + } 186 + 187 + mdss_rev = readl_relaxed(dpu_mdss->mmio + HW_REV); 188 + dpu_mdss_hw_init(dpu_mdss, mdss_rev); 233 189 234 190 return ret; 235 191 }
+2 -2
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
··· 1109 1109 ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, 1110 1110 msecs_to_jiffies(50)); 1111 1111 if (ret == 0) 1112 - dev_warn(dev->dev, "pp done time out, lm=%d\n", 1113 - mdp5_cstate->pipeline.mixer->lm); 1112 + dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n", 1113 + mdp5_cstate->pipeline.mixer->lm); 1114 1114 } 1115 1115 1116 1116 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
+6 -1
drivers/gpu/drm/msm/dsi/dsi_manager.c
··· 336 336 return num; 337 337 } 338 338 339 - static int dsi_mgr_connector_mode_valid(struct drm_connector *connector, 339 + static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector, 340 340 struct drm_display_mode *mode) 341 341 { 342 342 int id = dsi_mgr_connector_get_id(connector); ··· 506 506 struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); 507 507 struct mipi_dsi_host *host = msm_dsi->host; 508 508 struct drm_panel *panel = msm_dsi->panel; 509 + struct msm_dsi_pll *src_pll; 509 510 bool is_dual_dsi = IS_DUAL_DSI(); 510 511 int ret; 511 512 ··· 539 538 pr_err("%s: Panel %d unprepare failed,%d\n", __func__, 540 539 id, ret); 541 540 } 541 + 542 + /* Save PLL status if it is a clock source */ 543 + src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); 544 + msm_dsi_pll_save_state(src_pll); 542 545 543 546 ret = msm_dsi_host_power_off(host); 544 547 if (ret)
-4
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
··· 724 724 if (!phy || !phy->cfg->ops.disable) 725 725 return; 726 726 727 - /* Save PLL status if it is a clock source */ 728 - if (phy->usecase != MSM_DSI_PHY_SLAVE) 729 - msm_dsi_pll_save_state(phy->pll); 730 - 731 727 phy->cfg->ops.disable(phy); 732 728 733 729 dsi_phy_regulator_disable(phy);
+6
drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
··· 411 411 if (pll_10nm->slave) 412 412 dsi_pll_enable_pll_bias(pll_10nm->slave); 413 413 414 + rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0); 415 + if (rc) { 416 + pr_err("vco_set_rate failed, rc=%d\n", rc); 417 + return rc; 418 + } 419 + 414 420 /* Start PLL */ 415 421 pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 416 422 0x01);