Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/msm/dpu: get rid of cached flush_mask

Instead of querying the CTL for the flush mask (for SSPP, LM or DSPP),
storing the mask in the mixer configuration and then pushing the mask to
the CTL, tell CTL to cache the flush in place.

Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
Patchwork: https://patchwork.freedesktop.org/patch/473159/
Link: https://lore.kernel.org/r/20220209172520.3719906-9-dmitry.baryshkov@linaro.org
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Signed-off-by: Rob Clark <robdclark@chromium.org>

authored by

Dmitry Baryshkov and committed by
Rob Clark
3cde792a 30da01af

+69 -77
+8 -18
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
··· 463 463 _dpu_crtc_setup_blend_cfg(mixer + lm_idx, 464 464 pstate, format); 465 465 466 - mixer[lm_idx].flush_mask |= ctl->ops.get_bitmask_sspp(ctl, sspp_idx); 466 + mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, 467 + sspp_idx); 467 468 468 469 if (bg_alpha_enable && !format->alpha_enable) 469 470 mixer[lm_idx].mixer_op_mode = 0; ··· 498 497 499 498 for (i = 0; i < cstate->num_mixers; i++) { 500 499 mixer[i].mixer_op_mode = 0; 501 - mixer[i].flush_mask = 0; 502 500 if (mixer[i].lm_ctl->ops.clear_all_blendstages) 503 501 mixer[i].lm_ctl->ops.clear_all_blendstages( 504 502 mixer[i].lm_ctl); ··· 514 514 515 515 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); 516 516 517 - mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl, 517 + /* stage config flush mask */ 518 + ctl->ops.update_pending_flush_mixer(ctl, 518 519 mixer[i].hw_lm->idx); 519 520 520 - /* stage config flush mask */ 521 - ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); 522 - 523 - DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n", 521 + DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n", 524 522 mixer[i].hw_lm->idx - LM_0, 525 523 mixer[i].mixer_op_mode, 526 - ctl->idx - CTL_0, 527 - mixer[i].flush_mask); 524 + ctl->idx - CTL_0); 528 525 529 526 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, 530 527 &stage_cfg); ··· 765 768 dspp->ops.setup_pcc(dspp, &cfg); 766 769 } 767 770 768 - mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl, 769 - mixer[i].hw_dspp->idx); 770 - 771 771 /* stage config flush mask */ 772 - ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); 773 - 774 - DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n", 775 - mixer[i].hw_lm->idx - DSPP_0, 776 - ctl->idx - CTL_0, 777 - mixer[i].flush_mask); 772 + ctl->ops.update_pending_flush_dspp(ctl, 773 + mixer[i].hw_dspp->idx); 778 774 } 779 775 } 780 776
-1
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
··· 97 97 struct dpu_hw_ctl *lm_ctl; 98 98 struct dpu_hw_dspp *hw_dspp; 99 99 u32 mixer_op_mode; 100 - u32 flush_mask; 101 100 }; 102 101 103 102 /**
+2 -4
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
··· 1981 1981 { 1982 1982 struct dpu_hw_mixer_cfg mixer; 1983 1983 int i, num_lm; 1984 - u32 flush_mask = 0; 1985 1984 struct dpu_global_state *global_state; 1986 1985 struct dpu_hw_blk *hw_lm[2]; 1987 1986 struct dpu_hw_mixer *hw_mixer[2]; ··· 1999 2000 2000 2001 for (i = 0; i < num_lm; i++) { 2001 2002 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]); 2002 - flush_mask = phys_enc->hw_ctl->ops.get_bitmask_mixer(ctl, hw_mixer[i]->idx); 2003 - if (phys_enc->hw_ctl->ops.update_pending_flush) 2004 - phys_enc->hw_ctl->ops.update_pending_flush(ctl, flush_mask); 2003 + if (phys_enc->hw_ctl->ops.update_pending_flush_mixer) 2004 + phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx); 2005 2005 2006 2006 /* clear all blendstages */ 2007 2007 if (phys_enc->hw_ctl->ops.setup_blendstage)
+33 -45
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
··· 150 150 DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); 151 151 } 152 152 153 - static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx, 153 + static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx, 154 154 enum dpu_sspp sspp) 155 155 { 156 - uint32_t flushbits = 0; 157 - 158 156 switch (sspp) { 159 157 case SSPP_VIG0: 160 - flushbits = BIT(0); 158 + ctx->pending_flush_mask |= BIT(0); 161 159 break; 162 160 case SSPP_VIG1: 163 - flushbits = BIT(1); 161 + ctx->pending_flush_mask |= BIT(1); 164 162 break; 165 163 case SSPP_VIG2: 166 - flushbits = BIT(2); 164 + ctx->pending_flush_mask |= BIT(2); 167 165 break; 168 166 case SSPP_VIG3: 169 - flushbits = BIT(18); 167 + ctx->pending_flush_mask |= BIT(18); 170 168 break; 171 169 case SSPP_RGB0: 172 - flushbits = BIT(3); 170 + ctx->pending_flush_mask |= BIT(3); 173 171 break; 174 172 case SSPP_RGB1: 175 - flushbits = BIT(4); 173 + ctx->pending_flush_mask |= BIT(4); 176 174 break; 177 175 case SSPP_RGB2: 178 - flushbits = BIT(5); 176 + ctx->pending_flush_mask |= BIT(5); 179 177 break; 180 178 case SSPP_RGB3: 181 - flushbits = BIT(19); 179 + ctx->pending_flush_mask |= BIT(19); 182 180 break; 183 181 case SSPP_DMA0: 184 - flushbits = BIT(11); 182 + ctx->pending_flush_mask |= BIT(11); 185 183 break; 186 184 case SSPP_DMA1: 187 - flushbits = BIT(12); 185 + ctx->pending_flush_mask |= BIT(12); 188 186 break; 189 187 case SSPP_DMA2: 190 - flushbits = BIT(24); 188 + ctx->pending_flush_mask |= BIT(24); 191 189 break; 192 190 case SSPP_DMA3: 193 - flushbits = BIT(25); 191 + ctx->pending_flush_mask |= BIT(25); 194 192 break; 195 193 case SSPP_CURSOR0: 196 - flushbits = BIT(22); 194 + ctx->pending_flush_mask |= BIT(22); 197 195 break; 198 196 case SSPP_CURSOR1: 199 - flushbits = BIT(23); 197 + ctx->pending_flush_mask |= BIT(23); 200 198 break; 201 199 default: 202 200 break; 203 201 } 204 - 205 - return flushbits; 206 202 } 207 203 208 - static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx, 204 + static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx, 209 205 enum dpu_lm lm) 210 206 { 211 - uint32_t flushbits = 0; 212 - 213 207 switch (lm) { 214 208 case LM_0: 215 - flushbits = BIT(6); 209 + ctx->pending_flush_mask |= BIT(6); 216 210 break; 217 211 case LM_1: 218 - flushbits = BIT(7); 212 + ctx->pending_flush_mask |= BIT(7); 219 213 break; 220 214 case LM_2: 221 - flushbits = BIT(8); 215 + ctx->pending_flush_mask |= BIT(8); 222 216 break; 223 217 case LM_3: 224 - flushbits = BIT(9); 218 + ctx->pending_flush_mask |= BIT(9); 225 219 break; 226 220 case LM_4: 227 - flushbits = BIT(10); 221 + ctx->pending_flush_mask |= BIT(10); 228 222 break; 229 223 case LM_5: 230 - flushbits = BIT(20); 224 + ctx->pending_flush_mask |= BIT(20); 231 225 break; 232 226 default: 233 - return -EINVAL; 227 + break; 234 228 } 235 229 236 - flushbits |= CTL_FLUSH_MASK_CTL; 237 - 238 - return flushbits; 230 + ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL; 239 231 } 240 232 241 233 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx, ··· 286 294 ctx->pending_flush_mask |= BIT(MERGE_3D_IDX); 287 295 } 288 296 289 - static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx, 297 + static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx, 290 298 enum dpu_dspp dspp) 291 299 { 292 - uint32_t flushbits = 0; 293 - 294 300 switch (dspp) { 295 301 case DSPP_0: 296 - flushbits = BIT(13); 302 + ctx->pending_flush_mask |= BIT(13); 297 303 break; 298 304 case DSPP_1: 299 - flushbits = BIT(14); 305 + ctx->pending_flush_mask |= BIT(14); 300 306 break; 301 307 case DSPP_2: 302 - flushbits = BIT(15); 308 + ctx->pending_flush_mask |= BIT(15); 303 309 break; 304 310 case DSPP_3: 305 - flushbits = BIT(21); 311 + ctx->pending_flush_mask |= BIT(21); 306 312 break; 307 313 default: 308 - return 0; 314 + break; 309 315 } 310 - 311 - return flushbits; 312 316 } 313 317 314 318 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us) ··· 673 685 ops->wait_reset_status = dpu_hw_ctl_wait_reset_status; 674 686 ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages; 675 687 ops->setup_blendstage = dpu_hw_ctl_setup_blendstage; 676 - ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp; 677 - ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer; 678 - ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp; 688 + ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp; 689 + ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer; 690 + ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp; 679 691 if (cap & BIT(DPU_CTL_FETCH_ACTIVE)) 680 692 ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active; 681 693 };
+26 -9
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
··· 130 130 enum dpu_merge_3d blk); 131 131 132 132 /** 133 + * OR in the given flushbits to the cached pending_flush_mask 134 + * No effect on hardware 135 + * @ctx : ctl path ctx pointer 136 + * @blk : SSPP block index 137 + */ 138 + void (*update_pending_flush_sspp)(struct dpu_hw_ctl *ctx, 139 + enum dpu_sspp blk); 140 + 141 + /** 142 + * OR in the given flushbits to the cached pending_flush_mask 143 + * No effect on hardware 144 + * @ctx : ctl path ctx pointer 145 + * @blk : LM block index 146 + */ 147 + void (*update_pending_flush_mixer)(struct dpu_hw_ctl *ctx, 148 + enum dpu_lm blk); 149 + 150 + /** 151 + * OR in the given flushbits to the cached pending_flush_mask 152 + * No effect on hardware 153 + * @ctx : ctl path ctx pointer 154 + * @blk : DSPP block index 155 + */ 156 + void (*update_pending_flush_dspp)(struct dpu_hw_ctl *ctx, 157 + enum dpu_dspp blk); 158 + /** 133 159 * Write the value of the pending_flush_mask to hardware 134 160 * @ctx : ctl path ctx pointer 135 161 */ ··· 196 170 * Returns: 0 on success or -error if reset incomplete within interval 197 171 */ 198 172 int (*wait_reset_status)(struct dpu_hw_ctl *ctx); 199 - 200 - uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx, 201 - enum dpu_sspp blk); 202 - 203 - uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx, 204 - enum dpu_lm blk); 205 - 206 - uint32_t (*get_bitmask_dspp)(struct dpu_hw_ctl *ctx, 207 - enum dpu_dspp blk); 208 173 209 174 /** 210 175 * Set all blend stages to disabled