Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/msm: update cursors asynchronously through atomic

Add support to async updates of cursors by using the new atomic
interface for that. Basically what this commit does is do what
mdp5_update_cursor_plane_legacy() did but through atomic.

v5: call drm_atomic_helper_async_check() from the check hook

v4: add missing atomic async commit call to msm_atomic_commit(Archit Taneja)

v3: move size checks back to drivers (Ville Syrjälä)

v2: move fb setting to core and use new state (Eric Anholt)

Cc: Rob Clark <robdclark@gmail.com>
Cc: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.com>
Tested-by: Archit Taneja <architt@codeaurora.org> (v4)
[added comment about not hitting async update path if hwpipes are
re-assigned or global state is touched]
Signed-off-by: Rob Clark <robdclark@gmail.com>

authored by

Gustavo Padovan and committed by
Rob Clark
224a4c97 1f920175

+75 -88
+63 -88
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
··· 31 31 struct drm_crtc *crtc, struct drm_framebuffer *fb, 32 32 struct drm_rect *src, struct drm_rect *dest); 33 33 34 - static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane, 35 - struct drm_crtc *crtc, 36 - struct drm_framebuffer *fb, 37 - int crtc_x, int crtc_y, 38 - unsigned int crtc_w, unsigned int crtc_h, 39 - uint32_t src_x, uint32_t src_y, 40 - uint32_t src_w, uint32_t src_h, 41 - struct drm_modeset_acquire_ctx *ctx); 42 - 43 34 static struct mdp5_kms *get_kms(struct drm_plane *plane) 44 35 { 45 36 struct msm_drm_private *priv = plane->dev->dev_private; ··· 246 255 }; 247 256 248 257 static const struct drm_plane_funcs mdp5_cursor_plane_funcs = { 249 - .update_plane = mdp5_update_cursor_plane_legacy, 258 + .update_plane = drm_atomic_helper_update_plane, 250 259 .disable_plane = drm_atomic_helper_disable_plane, 251 260 .destroy = mdp5_plane_destroy, 252 261 .atomic_set_property = mdp5_plane_atomic_set_property, ··· 478 487 } 479 488 } 480 489 490 + static int mdp5_plane_atomic_async_check(struct drm_plane *plane, 491 + struct drm_plane_state *state) 492 + { 493 + struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); 494 + struct drm_crtc_state *crtc_state; 495 + 496 + crtc_state = drm_atomic_get_existing_crtc_state(state->state, 497 + state->crtc); 498 + if (WARN_ON(!crtc_state)) 499 + return -EINVAL; 500 + 501 + if (!crtc_state->active) 502 + return -EINVAL; 503 + 504 + mdp5_state = to_mdp5_plane_state(state); 505 + 506 + /* don't use fast path if we don't have a hwpipe allocated yet */ 507 + if (!mdp5_state->hwpipe) 508 + return -EINVAL; 509 + 510 + /* only allow changing of position(crtc x/y or src x/y) in fast path */ 511 + if (plane->state->crtc != state->crtc || 512 + plane->state->src_w != state->src_w || 513 + plane->state->src_h != state->src_h || 514 + plane->state->crtc_w != state->crtc_w || 515 + plane->state->crtc_h != state->crtc_h || 516 + !plane->state->fb || 517 + plane->state->fb != state->fb) 518 + return -EINVAL; 519 + 520 + return 0; 521 + } 522 + 523 + static void mdp5_plane_atomic_async_update(struct drm_plane *plane, 524 + struct drm_plane_state *new_state) 525 + { 526 + plane->state->src_x = new_state->src_x; 527 + plane->state->src_y = new_state->src_y; 528 + plane->state->crtc_x = new_state->crtc_x; 529 + plane->state->crtc_y = new_state->crtc_y; 530 + 531 + if (plane_enabled(new_state)) { 532 + struct mdp5_ctl *ctl; 533 + struct mdp5_pipeline *pipeline = 534 + mdp5_crtc_get_pipeline(plane->crtc); 535 + int ret; 536 + 537 + ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb, 538 + &new_state->src, &new_state->dst); 539 + WARN_ON(ret < 0); 540 + 541 + ctl = mdp5_crtc_get_ctl(new_state->crtc); 542 + 543 + mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane)); 544 + } 545 + 546 + *to_mdp5_plane_state(plane->state) = 547 + *to_mdp5_plane_state(new_state); 548 + } 549 + 481 550 static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = { 482 551 .prepare_fb = mdp5_plane_prepare_fb, 483 552 .cleanup_fb = mdp5_plane_cleanup_fb, 484 553 .atomic_check = mdp5_plane_atomic_check, 485 554 .atomic_update = mdp5_plane_atomic_update, 555 + .atomic_async_check = mdp5_plane_atomic_async_check, 556 + .atomic_async_update = mdp5_plane_atomic_async_update, 486 557 }; 487 558 488 559 static void set_scanout_locked(struct mdp5_kms *mdp5_kms, ··· 1047 994 plane->fb = fb; 1048 995 1049 996 return ret; 1050 - } 1051 - 1052 - static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane, 1053 - struct drm_crtc *crtc, struct drm_framebuffer *fb, 1054 - int crtc_x, int crtc_y, 1055 - unsigned int crtc_w, unsigned int crtc_h, 1056 - uint32_t src_x, uint32_t src_y, 1057 - uint32_t src_w, uint32_t src_h, 1058 - struct drm_modeset_acquire_ctx *ctx) 1059 - { 1060 - struct drm_plane_state *plane_state, *new_plane_state; 1061 - struct mdp5_plane_state *mdp5_pstate; 1062 - struct drm_crtc_state *crtc_state = crtc->state; 1063 - int ret; 1064 - 1065 - if (!crtc_state->active || drm_atomic_crtc_needs_modeset(crtc_state)) 1066 - goto slow; 1067 - 1068 - plane_state = plane->state; 1069 - mdp5_pstate = to_mdp5_plane_state(plane_state); 1070 - 1071 - /* don't use fast path if we don't have a hwpipe allocated yet */ 1072 - if (!mdp5_pstate->hwpipe) 1073 - goto slow; 1074 - 1075 - /* only allow changing of position(crtc x/y or src x/y) in fast path */ 1076 - if (plane_state->crtc != crtc || 1077 - plane_state->src_w != src_w || 1078 - plane_state->src_h != src_h || 1079 - plane_state->crtc_w != crtc_w || 1080 - plane_state->crtc_h != crtc_h || 1081 - !plane_state->fb || 1082 - plane_state->fb != fb) 1083 - goto slow; 1084 - 1085 - new_plane_state = mdp5_plane_duplicate_state(plane); 1086 - if (!new_plane_state) 1087 - return -ENOMEM; 1088 - 1089 - new_plane_state->src_x = src_x; 1090 - new_plane_state->src_y = src_y; 1091 - new_plane_state->src_w = src_w; 1092 - new_plane_state->src_h = src_h; 1093 - new_plane_state->crtc_x = crtc_x; 1094 - new_plane_state->crtc_y = crtc_y; 1095 - new_plane_state->crtc_w = crtc_w; 1096 - new_plane_state->crtc_h = crtc_h; 1097 - 1098 - ret = mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state); 1099 - if (ret) 1100 - goto slow_free; 1101 - 1102 - if (new_plane_state->visible) { 1103 - struct mdp5_ctl *ctl; 1104 - struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(crtc); 1105 - 1106 - ret = mdp5_plane_mode_set(plane, crtc, fb, 1107 - &new_plane_state->src, 1108 - &new_plane_state->dst); 1109 - WARN_ON(ret < 0); 1110 - 1111 - ctl = mdp5_crtc_get_ctl(crtc); 1112 - 1113 - mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane)); 1114 - } 1115 - 1116 - *to_mdp5_plane_state(plane_state) = 1117 - *to_mdp5_plane_state(new_plane_state); 1118 - 1119 - mdp5_plane_destroy_state(plane, new_plane_state); 1120 - 1121 - return 0; 1122 - slow_free: 1123 - mdp5_plane_destroy_state(plane, new_plane_state); 1124 - slow: 1125 - return drm_atomic_helper_update_plane(plane, crtc, fb, 1126 - crtc_x, crtc_y, crtc_w, crtc_h, 1127 - src_x, src_y, src_w, src_h, ctx); 1128 997 } 1129 998 1130 999 /*
+12
drivers/gpu/drm/msm/msm_atomic.c
··· 173 173 if (ret) 174 174 return ret; 175 175 176 + /* 177 + * Note that plane->atomic_async_check() should fail if we need 178 + * to re-assign hwpipe or anything that touches global atomic 179 + * state, so we'll never go down the async update path in those 180 + * cases. 181 + */ 182 + if (state->async_update) { 183 + drm_atomic_helper_async_commit(dev, state); 184 + drm_atomic_helper_cleanup_planes(dev, state); 185 + return 0; 186 + } 187 + 176 188 c = commit_init(state); 177 189 if (!c) { 178 190 ret = -ENOMEM;