Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/msm: async commit support

Now that flush/wait/complete is decoupled from the "synchronous" part of
atomic commit_tail(), add support to defer flush to a timer that expires
shortly before vblank for async commits. In this way, multiple atomic
commits (for example, cursor updates) can be coalesced into a single
flush at the end of the frame.

v2: don't hold lock over ->wait_flush(), to avoid locking interaction
that was causing fps drop when combining page flips or non-async
atomic commits and lots of legacy cursor updates

Signed-off-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Sean Paul <sean@poorly.run>

+210 -1
+155 -1
drivers/gpu/drm/msm/msm_atomic.c
··· 26 26 return msm_framebuffer_prepare(new_state->fb, kms->aspace); 27 27 } 28 28 29 + static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) 30 + { 31 + unsigned crtc_mask = BIT(crtc_idx); 32 + 33 + mutex_lock(&kms->commit_lock); 34 + 35 + if (!(kms->pending_crtc_mask & crtc_mask)) { 36 + mutex_unlock(&kms->commit_lock); 37 + return; 38 + } 39 + 40 + kms->pending_crtc_mask &= ~crtc_mask; 41 + 42 + kms->funcs->enable_commit(kms); 43 + 44 + /* 45 + * Flush hardware updates: 46 + */ 47 + DRM_DEBUG_ATOMIC("triggering async commit\n"); 48 + kms->funcs->flush_commit(kms, crtc_mask); 49 + mutex_unlock(&kms->commit_lock); 50 + 51 + /* 52 + * Wait for flush to complete: 53 + */ 54 + kms->funcs->wait_flush(kms, crtc_mask); 55 + 56 + mutex_lock(&kms->commit_lock); 57 + kms->funcs->complete_commit(kms, crtc_mask); 58 + mutex_unlock(&kms->commit_lock); 59 + kms->funcs->disable_commit(kms); 60 + } 61 + 62 + static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t) 63 + { 64 + struct msm_pending_timer *timer = container_of(t, 65 + struct msm_pending_timer, timer); 66 + struct msm_drm_private *priv = timer->kms->dev->dev_private; 67 + 68 + queue_work(priv->wq, &timer->work); 69 + 70 + return HRTIMER_NORESTART; 71 + } 72 + 73 + static void msm_atomic_pending_work(struct work_struct *work) 74 + { 75 + struct msm_pending_timer *timer = container_of(work, 76 + struct msm_pending_timer, work); 77 + 78 + msm_atomic_async_commit(timer->kms, timer->crtc_idx); 79 + } 80 + 81 + void msm_atomic_init_pending_timer(struct msm_pending_timer *timer, 82 + struct msm_kms *kms, int crtc_idx) 83 + { 84 + timer->kms = kms; 85 + timer->crtc_idx = crtc_idx; 86 + hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 87 + timer->timer.function = msm_atomic_pending_timer; 88 + INIT_WORK(&timer->work, msm_atomic_pending_work); 89 + } 90 + 91 + static bool can_do_async(struct drm_atomic_state *state, 92 + struct drm_crtc **async_crtc) 93 + { 94 + struct drm_connector_state *connector_state; 95 + struct drm_connector *connector; 96 + struct drm_crtc_state *crtc_state; 97 + struct drm_crtc *crtc; 98 + int i, num_crtcs = 0; 99 + 100 + if (!(state->legacy_cursor_update || state->async_update)) 101 + return false; 102 + 103 + /* any connector change, means slow path: */ 104 + for_each_new_connector_in_state(state, connector, connector_state, i) 105 + return false; 106 + 107 + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 108 + if (drm_atomic_crtc_needs_modeset(crtc_state)) 109 + return false; 110 + if (++num_crtcs > 1) 111 + return false; 112 + *async_crtc = crtc; 113 + } 114 + 115 + return true; 116 + } 117 + 29 118 /* Get bitmask of crtcs that will need to be flushed. The bitmask 30 119 * can be used with for_each_crtc_mask() iterator, to iterate 31 120 * effected crtcs without needing to preserve the atomic state. ··· 136 47 struct drm_device *dev = state->dev; 137 48 struct msm_drm_private *priv = dev->dev_private; 138 49 struct msm_kms *kms = priv->kms; 50 + struct drm_crtc *async_crtc = NULL; 139 51 unsigned crtc_mask = get_crtc_mask(state); 52 + bool async = kms->funcs->vsync_time && 53 + can_do_async(state, &async_crtc); 140 54 141 55 kms->funcs->enable_commit(kms); 56 + 57 + /* 58 + * Ensure any previous (potentially async) commit has 59 + * completed: 60 + */ 61 + kms->funcs->wait_flush(kms, crtc_mask); 62 + 63 + mutex_lock(&kms->commit_lock); 64 + 65 + /* 66 + * Now that there is no in-progress flush, prepare the 67 + * current update: 68 + */ 142 69 kms->funcs->prepare_commit(kms, state); 143 70 144 71 /* ··· 164 59 drm_atomic_helper_commit_planes(dev, state, 0); 165 60 drm_atomic_helper_commit_modeset_enables(dev, state); 166 61 62 + if (async) { 63 + struct msm_pending_timer *timer = 64 + &kms->pending_timers[drm_crtc_index(async_crtc)]; 65 + 66 + /* async updates are limited to single-crtc updates: */ 67 + WARN_ON(crtc_mask != drm_crtc_mask(async_crtc)); 68 + 69 + /* 70 + * Start timer if we don't already have an update pending 71 + * on this crtc: 72 + */ 73 + if (!(kms->pending_crtc_mask & crtc_mask)) { 74 + ktime_t vsync_time, wakeup_time; 75 + 76 + kms->pending_crtc_mask |= crtc_mask; 77 + 78 + vsync_time = kms->funcs->vsync_time(kms, async_crtc); 79 + wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1)); 80 + 81 + hrtimer_start(&timer->timer, wakeup_time, 82 + HRTIMER_MODE_ABS); 83 + } 84 + 85 + kms->funcs->disable_commit(kms); 86 + mutex_unlock(&kms->commit_lock); 87 + 88 + /* 89 + * At this point, from drm core's perspective, we 90 + * are done with the atomic update, so we can just 91 + * go ahead and signal that it is done: 92 + */ 93 + drm_atomic_helper_commit_hw_done(state); 94 + drm_atomic_helper_cleanup_planes(dev, state); 95 + 96 + return; 97 + } 98 + 99 + /* 100 + * If there is any async flush pending on updated crtcs, fold 101 + * them into the current flush. 102 + */ 103 + kms->pending_crtc_mask &= ~crtc_mask; 104 + 167 105 /* 168 106 * Flush hardware updates: 169 107 */ ··· 215 67 kms->funcs->commit(kms, state); 216 68 } 217 69 kms->funcs->flush_commit(kms, crtc_mask); 70 + mutex_unlock(&kms->commit_lock); 218 71 72 + /* 73 + * Wait for flush to complete: 74 + */ 219 75 kms->funcs->wait_flush(kms, crtc_mask); 76 + 77 + mutex_lock(&kms->commit_lock); 220 78 kms->funcs->complete_commit(kms, crtc_mask); 79 + mutex_unlock(&kms->commit_lock); 221 80 kms->funcs->disable_commit(kms); 222 81 223 82 drm_atomic_helper_commit_hw_done(state); 224 - 225 83 drm_atomic_helper_cleanup_planes(dev, state); 226 84 }
+1
drivers/gpu/drm/msm/msm_drv.c
··· 473 473 ddev->mode_config.normalize_zpos = true; 474 474 475 475 if (kms) { 476 + kms->dev = ddev; 476 477 ret = kms->funcs->hw_init(kms); 477 478 if (ret) { 478 479 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
+4
drivers/gpu/drm/msm/msm_drv.h
··· 221 221 uint32_t pixel_format; 222 222 }; 223 223 224 + struct msm_pending_timer; 225 + 224 226 int msm_atomic_prepare_fb(struct drm_plane *plane, 225 227 struct drm_plane_state *new_state); 228 + void msm_atomic_init_pending_timer(struct msm_pending_timer *timer, 229 + struct msm_kms *kms, int crtc_idx); 226 230 void msm_atomic_commit_tail(struct drm_atomic_state *state); 227 231 struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev); 228 232 void msm_atomic_state_clear(struct drm_atomic_state *state);
+50
drivers/gpu/drm/msm/msm_kms.h
··· 33 33 34 34 /* 35 35 * Atomic commit handling: 36 + * 37 + * Note that in the case of async commits, the funcs which take 38 + * a crtc_mask (ie. ->flush_commit(), and ->complete_commit()) 39 + * might not be evenly balanced with ->prepare_commit(), however 40 + * each crtc that effected by a ->prepare_commit() (potentially 41 + * multiple times) will eventually (at end of vsync period) be 42 + * flushed and completed. 43 + * 44 + * This has some implications about tracking of cleanup state, 45 + * for example SMP blocks to release after commit completes. Ie. 46 + * cleanup state should be also duplicated in the various 47 + * duplicate_state() methods, as the current cleanup state at 48 + * ->complete_commit() time may have accumulated cleanup work 49 + * from multiple commits. 36 50 */ 37 51 38 52 /** ··· 58 44 */ 59 45 void (*enable_commit)(struct msm_kms *kms); 60 46 void (*disable_commit)(struct msm_kms *kms); 47 + 48 + /** 49 + * If the kms backend supports async commit, it should implement 50 + * this method to return the time of the next vsync. This is 51 + * used to determine a time slightly before vsync, for the async 52 + * commit timer to run and complete an async commit. 53 + */ 54 + ktime_t (*vsync_time)(struct msm_kms *kms, struct drm_crtc *crtc); 61 55 62 56 /** 63 57 * Prepare for atomic commit. This is called after any previous ··· 131 109 #endif 132 110 }; 133 111 112 + struct msm_kms; 113 + 114 + /* 115 + * A per-crtc timer for pending async atomic flushes. Scheduled to expire 116 + * shortly before vblank to flush pending async updates. 117 + */ 118 + struct msm_pending_timer { 119 + struct hrtimer timer; 120 + struct work_struct work; 121 + struct msm_kms *kms; 122 + unsigned crtc_idx; 123 + }; 124 + 134 125 struct msm_kms { 135 126 const struct msm_kms_funcs *funcs; 127 + struct drm_device *dev; 136 128 137 129 /* irq number to be passed on to drm_irq_install */ 138 130 int irq; 139 131 140 132 /* mapper-id used to request GEM buffer mapped for scanout: */ 141 133 struct msm_gem_address_space *aspace; 134 + 135 + /* 136 + * For async commit, where ->flush_commit() and later happens 137 + * from the crtc's pending_timer close to end of the frame: 138 + */ 139 + struct mutex commit_lock; 140 + unsigned pending_crtc_mask; 141 + struct msm_pending_timer pending_timers[MAX_CRTCS]; 142 142 }; 143 143 144 144 static inline void msm_kms_init(struct msm_kms *kms, 145 145 const struct msm_kms_funcs *funcs) 146 146 { 147 + unsigned i; 148 + 149 + mutex_init(&kms->commit_lock); 147 150 kms->funcs = funcs; 151 + 152 + for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) 153 + msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i); 148 154 } 149 155 150 156 struct msm_kms *mdp4_kms_init(struct drm_device *dev);