Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/msm/mdp5: Remove mixer/intf pointers from mdp5_ctl

These are a part of CRTC state, it doesn't feel nice to leave them
hanging in mdp5_ctl struct. Pass mdp5_pipeline pointer instead
wherever it is needed.

We still have some params in mdp5_ctl like start_mask etc which
are derivative of atomic state, and should be rolled back if
a commit fails, but it doesn't seem to cause much trouble.

Signed-off-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Rob Clark <robdclark@gmail.com>

authored by

Archit Taneja and committed by
Rob Clark
f316b25a 0ddc3a63

+88 -63
+7 -8
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
··· 132 132 struct drm_display_mode *mode, 133 133 struct drm_display_mode *adjusted_mode) 134 134 { 135 - struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); 136 - 137 135 mode = adjusted_mode; 138 136 139 137 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", ··· 143 145 mode->vsync_end, mode->vtotal, 144 146 mode->type, mode->flags); 145 147 pingpong_tearcheck_setup(encoder, mode); 146 - mdp5_crtc_set_pipeline(encoder->crtc, mdp5_cmd_enc->intf, 147 - mdp5_cmd_enc->ctl); 148 + mdp5_crtc_set_pipeline(encoder->crtc); 148 149 } 149 150 150 151 void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) ··· 151 154 struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); 152 155 struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; 153 156 struct mdp5_interface *intf = mdp5_cmd_enc->intf; 157 + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); 154 158 155 159 if (WARN_ON(!mdp5_cmd_enc->enabled)) 156 160 return; 157 161 158 162 pingpong_tearcheck_disable(encoder); 159 163 160 - mdp5_ctl_set_encoder_state(ctl, false); 161 - mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); 164 + mdp5_ctl_set_encoder_state(ctl, pipeline, false); 165 + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); 162 166 163 167 bs_set(mdp5_cmd_enc, 0); 164 168 ··· 171 173 struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); 172 174 struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; 173 175 struct mdp5_interface *intf = mdp5_cmd_enc->intf; 176 + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); 174 177 175 178 if (WARN_ON(mdp5_cmd_enc->enabled)) 176 179 return; ··· 180 181 if (pingpong_tearcheck_enable(encoder)) 181 182 return; 182 183 183 - mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); 184 + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); 184 185 185 - mdp5_ctl_set_encoder_state(ctl, true); 186 + mdp5_ctl_set_encoder_state(ctl, pipeline, true); 186 187 187 188 mdp5_cmd_enc->enabled = true; 188 189 }
+23 -9
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
··· 91 91 { 92 92 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); 93 93 struct mdp5_ctl *ctl = mdp5_cstate->ctl; 94 + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; 94 95 95 96 DBG("%s: flush=%08x", crtc->name, flush_mask); 96 - return mdp5_ctl_commit(ctl, flush_mask); 97 + return mdp5_ctl_commit(ctl, pipeline, flush_mask); 97 98 } 98 99 99 100 /* ··· 127 126 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) 128 127 { 129 128 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); 129 + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; 130 130 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 131 131 struct mdp5_ctl *ctl = mdp5_cstate->ctl; 132 132 struct drm_device *dev = crtc->dev; ··· 145 143 146 144 if (ctl && !crtc->state->enable) { 147 145 /* set STAGE_UNUSED for all layers */ 148 - mdp5_ctl_blend(ctl, NULL, 0, 0); 146 + mdp5_ctl_blend(ctl, pipeline, NULL, 0, 0); 149 147 /* XXX: What to do here? */ 150 148 /* mdp5_crtc->ctl = NULL; */ 151 149 } ··· 197 195 { 198 196 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 199 197 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); 198 + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; 200 199 struct mdp5_kms *mdp5_kms = get_kms(crtc); 201 200 struct drm_plane *plane; 202 201 const struct mdp5_cfg_hw *hw_cfg; 203 202 struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL}; 204 203 const struct mdp_format *format; 205 - struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer; 204 + struct mdp5_hw_mixer *mixer = pipeline->mixer; 206 205 uint32_t lm = mixer->lm; 207 206 struct mdp5_ctl *ctl = mdp5_cstate->ctl; 208 207 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; ··· 297 294 298 295 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), mixer_op_mode); 299 296 300 - mdp5_ctl_blend(ctl, stage, plane_cnt, ctl_blend_flags); 297 + mdp5_ctl_blend(ctl, pipeline, stage, plane_cnt, ctl_blend_flags); 301 298 302 299 out: 303 300 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); ··· 589 586 { 590 587 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 591 588 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); 589 + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; 592 590 struct drm_device *dev = crtc->dev; 593 591 struct mdp5_kms *mdp5_kms = get_kms(crtc); 594 592 struct drm_gem_object *cursor_bo, *old_bo = NULL; ··· 656 652 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); 657 653 658 654 set_cursor: 659 - ret = mdp5_ctl_set_cursor(ctl, 0, cursor_enable); 655 + ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); 660 656 if (ret) { 661 657 dev_err(dev->dev, "failed to %sable cursor: %d\n", 662 658 cursor_enable ? "en" : "dis", ret); ··· 881 877 return mdp5_crtc->vblank.irqmask; 882 878 } 883 879 884 - void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, 885 - struct mdp5_interface *intf, struct mdp5_ctl *ctl) 880 + void mdp5_crtc_set_pipeline(struct drm_crtc *crtc) 886 881 { 887 882 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); 888 - struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer; 889 883 struct mdp5_kms *mdp5_kms = get_kms(crtc); 890 884 891 885 /* should this be done elsewhere ? */ 892 886 mdp_irq_update(&mdp5_kms->base); 893 887 894 - mdp5_ctl_set_pipeline(ctl, intf, mixer); 888 + mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline); 895 889 } 896 890 897 891 struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) ··· 910 908 911 909 return WARN_ON(!mdp5_cstate->pipeline.mixer) ? 912 910 ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer; 911 + } 912 + 913 + struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc) 914 + { 915 + struct mdp5_crtc_state *mdp5_cstate; 916 + 917 + if (WARN_ON(!crtc)) 918 + return ERR_PTR(-EINVAL); 919 + 920 + mdp5_cstate = to_mdp5_crtc_state(crtc->state); 921 + 922 + return &mdp5_cstate->pipeline; 913 923 } 914 924 915 925 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
+37 -31
drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
··· 36 36 struct mdp5_ctl_manager *ctlm; 37 37 38 38 u32 id; 39 - struct mdp5_hw_mixer *mixer; 40 39 41 40 /* CTL status bitmask */ 42 41 u32 status; 43 - 44 - /* Operation Mode Configuration for the Pipeline */ 45 - struct mdp5_interface *intf; 46 42 47 43 bool encoder_enabled; 48 44 uint32_t start_mask; ··· 166 170 spin_unlock_irqrestore(&ctl->hw_lock, flags); 167 171 } 168 172 169 - int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf, 170 - struct mdp5_hw_mixer *mixer) 173 + int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) 171 174 { 172 175 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 173 176 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); 174 - 175 - ctl->mixer = mixer; 176 - ctl->intf = intf; 177 + struct mdp5_interface *intf = pipeline->intf; 178 + struct mdp5_hw_mixer *mixer = pipeline->mixer; 177 179 178 180 ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) | 179 181 mdp_ctl_flush_mask_encoder(intf); ··· 185 191 return 0; 186 192 } 187 193 188 - static bool start_signal_needed(struct mdp5_ctl *ctl) 194 + static bool start_signal_needed(struct mdp5_ctl *ctl, 195 + struct mdp5_pipeline *pipeline) 189 196 { 197 + struct mdp5_interface *intf = pipeline->intf; 198 + 190 199 if (!ctl->encoder_enabled || ctl->start_mask != 0) 191 200 return false; 192 201 193 - switch (ctl->intf->type) { 202 + switch (intf->type) { 194 203 case INTF_WB: 195 204 return true; 196 205 case INTF_DSI: 197 - return ctl->intf->mode == MDP5_INTF_DSI_MODE_COMMAND; 206 + return intf->mode == MDP5_INTF_DSI_MODE_COMMAND; 198 207 default: 199 208 return false; 200 209 } ··· 219 222 spin_unlock_irqrestore(&ctl->hw_lock, flags); 220 223 } 221 224 222 - static void refill_start_mask(struct mdp5_ctl *ctl) 225 + static void refill_start_mask(struct mdp5_ctl *ctl, 226 + struct mdp5_pipeline *pipeline) 223 227 { 224 - struct mdp5_interface *intf = ctl->intf; 228 + struct mdp5_interface *intf = pipeline->intf; 229 + struct mdp5_hw_mixer *mixer = pipeline->mixer; 225 230 226 - ctl->start_mask = mdp_ctl_flush_mask_lm(ctl->mixer->lm); 231 + ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm); 227 232 228 233 /* 229 234 * Writeback encoder needs to program & flush ··· 243 244 * Note: 244 245 * This encoder state is needed to trigger START signal (data path kickoff). 245 246 */ 246 - int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled) 247 + int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, 248 + struct mdp5_pipeline *pipeline, 249 + bool enabled) 247 250 { 251 + struct mdp5_interface *intf = pipeline->intf; 252 + 248 253 if (WARN_ON(!ctl)) 249 254 return -EINVAL; 250 255 251 256 ctl->encoder_enabled = enabled; 252 - DBG("intf_%d: %s", ctl->intf->num, enabled ? "on" : "off"); 257 + DBG("intf_%d: %s", intf->num, enabled ? "on" : "off"); 253 258 254 - if (start_signal_needed(ctl)) { 259 + if (start_signal_needed(ctl, pipeline)) { 255 260 send_start_signal(ctl); 256 - refill_start_mask(ctl); 261 + refill_start_mask(ctl, pipeline); 257 262 } 258 263 259 264 return 0; ··· 268 265 * CTL registers need to be flushed after calling this function 269 266 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) 270 267 */ 271 - int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable) 268 + int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, 269 + int cursor_id, bool enable) 272 270 { 273 271 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 274 272 unsigned long flags; 275 273 u32 blend_cfg; 276 - struct mdp5_hw_mixer *mixer = ctl->mixer; 274 + struct mdp5_hw_mixer *mixer = pipeline->mixer; 277 275 278 276 if (unlikely(WARN_ON(!mixer))) { 279 277 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM", ··· 344 340 } 345 341 } 346 342 347 - int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, 348 - u32 ctl_blend_op_flags) 343 + int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, 344 + enum mdp5_pipe *stage, u32 stage_cnt, u32 ctl_blend_op_flags) 349 345 { 350 - struct mdp5_hw_mixer *mixer = ctl->mixer; 346 + struct mdp5_hw_mixer *mixer = pipeline->mixer; 351 347 unsigned long flags; 352 348 u32 blend_cfg = 0, blend_ext_cfg = 0; 353 349 int i, start_stage; ··· 434 430 } 435 431 } 436 432 437 - static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask) 433 + static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, 434 + u32 flush_mask) 438 435 { 439 436 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 440 437 u32 sw_mask = 0; ··· 444 439 445 440 /* for some targets, cursor bit is the same as LM bit */ 446 441 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0)) 447 - sw_mask |= mdp_ctl_flush_mask_lm(ctl->mixer->lm); 442 + sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm); 448 443 449 444 return sw_mask; 450 445 } ··· 490 485 * 491 486 * Return H/W flushed bit mask. 492 487 */ 493 - u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) 488 + u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, 489 + struct mdp5_pipeline *pipeline, 490 + u32 flush_mask) 494 491 { 495 492 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 496 493 unsigned long flags; ··· 509 502 ctl->pending_ctl_trigger = 0; 510 503 } 511 504 512 - flush_mask |= fix_sw_flush(ctl, flush_mask); 505 + flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask); 513 506 514 507 flush_mask &= ctl_mgr->flush_hw_mask; 515 508 ··· 523 516 spin_unlock_irqrestore(&ctl->hw_lock, flags); 524 517 } 525 518 526 - if (start_signal_needed(ctl)) { 519 + if (start_signal_needed(ctl, pipeline)) { 527 520 send_start_signal(ctl); 528 - refill_start_mask(ctl); 521 + refill_start_mask(ctl, pipeline); 529 522 } 530 523 531 524 return curr_ctl_flush_mask; ··· 612 605 613 606 found: 614 607 ctl = &ctl_mgr->ctls[c]; 615 - ctl->mixer = NULL; 616 608 ctl->status |= CTL_STAT_BUSY; 617 609 ctl->pending_ctl_trigger = 0; 618 610 DBG("CTL %d allocated", ctl->id);
+10 -6
drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
··· 37 37 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl); 38 38 39 39 struct mdp5_interface; 40 - int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf, 41 - struct mdp5_hw_mixer *lm); 42 - int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled); 40 + struct mdp5_pipeline; 41 + int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *p); 42 + int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p, 43 + bool enabled); 43 44 44 - int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable); 45 + int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, 46 + int cursor_id, bool enable); 45 47 int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); 46 48 47 49 /* ··· 58 56 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) 59 57 */ 60 58 #define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0) 61 - int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, 59 + int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, 60 + enum mdp5_pipe *stage, u32 stage_cnt, 62 61 u32 ctl_blend_op_flags); 63 62 64 63 /** ··· 74 71 u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf); 75 72 76 73 /* @flush_mask: see CTL flush masks definitions below */ 77 - u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask); 74 + u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, 75 + u32 flush_mask); 78 76 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl); 79 77 80 78
+7 -6
drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
··· 206 206 207 207 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 208 208 209 - mdp5_crtc_set_pipeline(encoder->crtc, mdp5_encoder->intf, 210 - mdp5_encoder->ctl); 209 + mdp5_crtc_set_pipeline(encoder->crtc); 211 210 } 212 211 213 212 static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) ··· 214 215 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 215 216 struct mdp5_kms *mdp5_kms = get_kms(encoder); 216 217 struct mdp5_ctl *ctl = mdp5_encoder->ctl; 218 + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); 217 219 struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); 218 220 struct mdp5_interface *intf = mdp5_encoder->intf; 219 221 int intfn = mdp5_encoder->intf->num; ··· 223 223 if (WARN_ON(!mdp5_encoder->enabled)) 224 224 return; 225 225 226 - mdp5_ctl_set_encoder_state(ctl, false); 226 + mdp5_ctl_set_encoder_state(ctl, pipeline, false); 227 227 228 228 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); 229 229 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0); 230 230 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 231 - mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); 231 + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); 232 232 233 233 /* 234 234 * Wait for a vsync so we know the ENABLE=0 latched before ··· 251 251 struct mdp5_kms *mdp5_kms = get_kms(encoder); 252 252 struct mdp5_ctl *ctl = mdp5_encoder->ctl; 253 253 struct mdp5_interface *intf = mdp5_encoder->intf; 254 + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); 254 255 int intfn = intf->num; 255 256 unsigned long flags; 256 257 ··· 262 261 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); 263 262 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1); 264 263 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 265 - mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); 264 + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); 266 265 267 - mdp5_ctl_set_encoder_state(ctl, true); 266 + mdp5_ctl_set_encoder_state(ctl, pipeline, true); 268 267 269 268 mdp5_encoder->enabled = true; 270 269 }
+2 -2
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
··· 280 280 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); 281 281 282 282 struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc); 283 - void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, 284 - struct mdp5_interface *intf, struct mdp5_ctl *ctl); 283 + struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc); 284 + void mdp5_crtc_set_pipeline(struct drm_crtc *crtc); 285 285 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); 286 286 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, 287 287 struct drm_plane *plane,
+2 -1
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
··· 937 937 938 938 if (new_plane_state->visible) { 939 939 struct mdp5_ctl *ctl; 940 + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(crtc); 940 941 941 942 ret = mdp5_plane_mode_set(plane, crtc, fb, 942 943 &new_plane_state->src, ··· 946 945 947 946 ctl = mdp5_crtc_get_ctl(crtc); 948 947 949 - mdp5_ctl_commit(ctl, mdp5_plane_get_flush(plane)); 948 + mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane)); 950 949 } 951 950 952 951 *to_mdp5_plane_state(plane_state) =