Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/imx: add deferred plane disabling

The DP (display processor) channel disable code tried to busy wait for
the DP sync flow end interrupt status bit when disabling the partial
plane without a full modeset. That never worked reliably, and it was
disabled completely by the recent "gpu: ipu-v3: remove IRQ dance on DC
channel disable" patch, causing ipu_wait_interrupt to always time out
after 50 ms, which in turn would trigger a timeout in
drm_atomic_helper_wait_for_vblanks.

This patch changes ipu_plane_atomic_disable to only queue a DP channel
register update at the next frame boundary and set a flag, which can be
done without any waiting whatsoever. The imx_drm_atomic_commit_tail then
calls a new ipu_plane_disable_deferred function that does the actual
IDMAC teardown of the planes that are flagged for deferred disabling,
after waiting for the vblank.

Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Lucas Stach <l.stach@pengutronix.de>

+63 -12
+18
drivers/gpu/drm/imx/imx-drm-core.c
··· 30 30 #include <video/imx-ipu-v3.h> 31 31 32 32 #include "imx-drm.h" 33 + #include "ipuv3-plane.h" 33 34 34 35 #define MAX_CRTC 4 35 36 ··· 123 122 static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) 124 123 { 125 124 struct drm_device *dev = state->dev; 125 + struct drm_plane *plane; 126 + struct drm_plane_state *old_plane_state; 127 + bool plane_disabling = false; 128 + int i; 126 129 127 130 drm_atomic_helper_commit_modeset_disables(dev, state); 128 131 ··· 135 130 DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET); 136 131 137 132 drm_atomic_helper_commit_modeset_enables(dev, state); 133 + 134 + for_each_plane_in_state(state, plane, old_plane_state, i) { 135 + if (drm_atomic_plane_disabling(old_plane_state, plane->state)) 136 + plane_disabling = true; 137 + } 138 + 139 + if (plane_disabling) { 140 + drm_atomic_helper_wait_for_vblanks(dev, state); 141 + 142 + for_each_plane_in_state(state, plane, old_plane_state, i) 143 + ipu_plane_disable_deferred(plane); 144 + 145 + } 138 146 139 147 drm_atomic_helper_commit_hw_done(state); 140 148 }
+21 -1
drivers/gpu/drm/imx/ipuv3-crtc.c
··· 60 60 ipu_di_enable(ipu_crtc->di); 61 61 } 62 62 63 + static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc, 64 + struct drm_crtc_state *old_crtc_state) 65 + { 66 + bool disable_partial = false; 67 + bool disable_full = false; 68 + struct drm_plane *plane; 69 + 70 + drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) { 71 + if (plane == &ipu_crtc->plane[0]->base) 72 + disable_full = true; 73 + if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base) 74 + disable_partial = true; 75 + } 76 + 77 + if (disable_partial) 78 + ipu_plane_disable(ipu_crtc->plane[1], true); 79 + if (disable_full) 80 + ipu_plane_disable(ipu_crtc->plane[0], false); 81 + } 82 + 63 83 static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, 64 84 struct drm_crtc_state *old_crtc_state) 65 85 { ··· 93 73 * attached IDMACs will be left in undefined state, possibly hanging 94 74 * the IPU or even system. 95 75 */ 96 - drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); 76 + ipu_crtc_disable_planes(ipu_crtc, old_crtc_state); 97 77 ipu_dc_disable(ipu); 98 78 99 79 spin_lock_irq(&crtc->dev->event_lock);
+19 -8
drivers/gpu/drm/imx/ipuv3-plane.c
··· 172 172 ipu_dp_enable_channel(ipu_plane->dp); 173 173 } 174 174 175 - static int ipu_disable_plane(struct drm_plane *plane) 175 + void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel) 176 176 { 177 - struct ipu_plane *ipu_plane = to_ipu_plane(plane); 178 - 179 177 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 180 178 181 179 ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50); 182 180 183 - if (ipu_plane->dp) 184 - ipu_dp_disable_channel(ipu_plane->dp, true); 181 + if (ipu_plane->dp && disable_dp_channel) 182 + ipu_dp_disable_channel(ipu_plane->dp, false); 185 183 ipu_idmac_disable_channel(ipu_plane->ipu_ch); 186 184 ipu_dmfc_disable_channel(ipu_plane->dmfc); 187 185 if (ipu_plane->dp) 188 186 ipu_dp_disable(ipu_plane->ipu); 189 - 190 - return 0; 191 187 } 188 + 189 + void ipu_plane_disable_deferred(struct drm_plane *plane) 190 + { 191 + struct ipu_plane *ipu_plane = to_ipu_plane(plane); 192 + 193 + if (ipu_plane->disabling) { 194 + ipu_plane->disabling = false; 195 + ipu_plane_disable(ipu_plane, false); 196 + } 197 + } 198 + EXPORT_SYMBOL_GPL(ipu_plane_disable_deferred); 192 199 193 200 static void ipu_plane_destroy(struct drm_plane *plane) 194 201 { ··· 363 356 static void ipu_plane_atomic_disable(struct drm_plane *plane, 364 357 struct drm_plane_state *old_state) 365 358 { 366 - ipu_disable_plane(plane); 359 + struct ipu_plane *ipu_plane = to_ipu_plane(plane); 360 + 361 + if (ipu_plane->dp) 362 + ipu_dp_disable_channel(ipu_plane->dp, true); 363 + ipu_plane->disabling = true; 367 364 } 368 365 369 366 static void ipu_plane_atomic_update(struct drm_plane *plane,
+5
drivers/gpu/drm/imx/ipuv3-plane.h
··· 23 23 24 24 int dma; 25 25 int dp_flow; 26 + 27 + bool disabling; 26 28 }; 27 29 28 30 struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, ··· 43 41 void ipu_plane_put_resources(struct ipu_plane *plane); 44 42 45 43 int ipu_plane_irq(struct ipu_plane *plane); 44 + 45 + void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel); 46 + void ipu_plane_disable_deferred(struct drm_plane *plane); 46 47 47 48 #endif
-3
drivers/gpu/ipu-v3/ipu-dp.c
··· 277 277 writel(0, flow->base + DP_FG_POS); 278 278 ipu_srm_dp_update(priv->ipu, sync); 279 279 280 - if (ipu_idmac_channel_busy(priv->ipu, IPUV3_CHANNEL_MEM_BG_SYNC)) 281 - ipu_wait_interrupt(priv->ipu, IPU_IRQ_DP_SF_END, 50); 282 - 283 280 mutex_unlock(&priv->mutex); 284 281 } 285 282 EXPORT_SYMBOL_GPL(ipu_dp_disable_channel);