Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/imx: atomic phase 3 step 1: Use atomic configuration

Replacing drm_crtc_helper_set_config() by drm_atomic_helper_set_config()
and converting the suspend/resume operations to atomic make us be able
to use atomic configurations. All of these allow us to remove the
crtc_funcs->mode_set callback as it is no longer used. Also, change
the plane_funcs->update/disable_plane callbacks from the transitional
version to the atomic version. Furthermore, switching to the pure atomic
version of set_config callback means that we may implement CRTC/plane
atomic checks by using the new CRTC/plane states instead of the legacy
ones and we may remove the private ipu_crtc->enabled state which was left
there for the transitional atomic helpers in phase 1. Page flip is also
switched to the atomic version. Last, the legacy function
drm_helper_disable_unused_functions() is removed from ->load in order
not to confuse the atomic driver.

Signed-off-by: Liu Ying <gnuiyl@gmail.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>

authored by

Liu Ying and committed by
Philipp Zabel
5f2f9115 032003c5

+114 -308
+74 -2
drivers/gpu/drm/imx/imx-drm-core.c
··· 15 15 */ 16 16 #include <linux/component.h> 17 17 #include <linux/device.h> 18 + #include <linux/dma-buf.h> 18 19 #include <linux/fb.h> 19 20 #include <linux/module.h> 20 21 #include <linux/platform_device.h> 22 + #include <linux/reservation.h> 21 23 #include <drm/drmP.h> 24 + #include <drm/drm_atomic.h> 25 + #include <drm/drm_atomic_helper.h> 22 26 #include <drm/drm_fb_helper.h> 23 27 #include <drm/drm_crtc_helper.h> 24 28 #include <drm/drm_gem_cma_helper.h> ··· 45 41 struct imx_drm_crtc *crtc[MAX_CRTC]; 46 42 unsigned int pipes; 47 43 struct drm_fbdev_cma *fbhelper; 44 + struct drm_atomic_state *state; 48 45 }; 49 46 50 47 struct imx_drm_crtc { ··· 174 169 static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = { 175 170 .fb_create = drm_fb_cma_create, 176 171 .output_poll_changed = imx_drm_output_poll_changed, 172 + .atomic_check = drm_atomic_helper_check, 173 + .atomic_commit = drm_atomic_helper_commit, 174 + }; 175 + 176 + static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) 177 + { 178 + struct drm_device *dev = state->dev; 179 + struct drm_crtc *crtc; 180 + struct drm_crtc_state *crtc_state; 181 + struct drm_plane_state *plane_state; 182 + struct drm_gem_cma_object *cma_obj; 183 + struct fence *excl; 184 + unsigned shared_count; 185 + struct fence **shared; 186 + unsigned int i, j; 187 + int ret; 188 + 189 + /* Wait for fences. */ 190 + for_each_crtc_in_state(state, crtc, crtc_state, i) { 191 + plane_state = crtc->primary->state; 192 + if (plane_state->fb) { 193 + cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0); 194 + if (cma_obj->base.dma_buf) { 195 + ret = reservation_object_get_fences_rcu( 196 + cma_obj->base.dma_buf->resv, &excl, 197 + &shared_count, &shared); 198 + if (unlikely(ret)) 199 + DRM_ERROR("failed to get fences " 200 + "for buffer\n"); 201 + 202 + if (excl) { 203 + fence_wait(excl, false); 204 + fence_put(excl); 205 + } 206 + for (j = 0; j < shared_count; i++) { 207 + fence_wait(shared[j], false); 208 + fence_put(shared[j]); 209 + } 210 + } 211 + } 212 + } 213 + 214 + drm_atomic_helper_commit_modeset_disables(dev, state); 215 + 216 + drm_atomic_helper_commit_planes(dev, state, true); 217 + 218 + drm_atomic_helper_commit_modeset_enables(dev, state); 219 + 220 + drm_atomic_helper_commit_hw_done(state); 221 + 222 + drm_atomic_helper_wait_for_vblanks(dev, state); 223 + 224 + drm_atomic_helper_cleanup_planes(dev, state); 225 + } 226 + 227 + static struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = { 228 + .atomic_commit_tail = imx_drm_atomic_commit_tail, 177 229 }; 178 230 179 231 /* ··· 272 210 drm->mode_config.max_width = 4096; 273 211 drm->mode_config.max_height = 4096; 274 212 drm->mode_config.funcs = &imx_drm_mode_config_funcs; 213 + drm->mode_config.helper_private = &imx_drm_mode_config_helpers; 275 214 276 215 drm_mode_config_init(drm); 277 216 ··· 315 252 dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n"); 316 253 legacyfb_depth = 16; 317 254 } 318 - drm_helper_disable_unused_functions(drm); 319 255 imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, 320 256 drm->mode_config.num_crtc, MAX_CRTC); 321 257 if (IS_ERR(imxdrm->fbhelper)) { ··· 516 454 static int imx_drm_suspend(struct device *dev) 517 455 { 518 456 struct drm_device *drm_dev = dev_get_drvdata(dev); 457 + struct imx_drm_device *imxdrm; 519 458 520 459 /* The drm_dev is NULL before .load hook is called */ 521 460 if (drm_dev == NULL) ··· 524 461 525 462 drm_kms_helper_poll_disable(drm_dev); 526 463 464 + imxdrm = drm_dev->dev_private; 465 + imxdrm->state = drm_atomic_helper_suspend(drm_dev); 466 + if (IS_ERR(imxdrm->state)) { 467 + drm_kms_helper_poll_enable(drm_dev); 468 + return PTR_ERR(imxdrm->state); 469 + } 470 + 527 471 return 0; 528 472 } 529 473 530 474 static int imx_drm_resume(struct device *dev) 531 475 { 532 476 struct drm_device *drm_dev = dev_get_drvdata(dev); 477 + struct imx_drm_device *imx_drm; 533 478 534 479 if (drm_dev == NULL) 535 480 return 0; 536 481 537 - drm_helper_resume_force_mode(drm_dev); 482 + imx_drm = drm_dev->dev_private; 483 + drm_atomic_helper_resume(drm_dev, imx_drm->state); 538 484 drm_kms_helper_poll_enable(drm_dev); 539 485 540 486 return 0;
+27 -182
drivers/gpu/drm/imx/ipuv3-crtc.c
··· 24 24 #include <linux/fb.h> 25 25 #include <linux/clk.h> 26 26 #include <linux/errno.h> 27 - #include <linux/reservation.h> 28 - #include <linux/dma-buf.h> 29 27 #include <drm/drm_gem_cma_helper.h> 30 28 #include <drm/drm_fb_cma_helper.h> 31 29 ··· 32 34 #include "ipuv3-plane.h" 33 35 34 36 #define DRIVER_DESC "i.MX IPUv3 Graphics" 35 - 36 - enum ipu_flip_status { 37 - IPU_FLIP_NONE, 38 - IPU_FLIP_PENDING, 39 - IPU_FLIP_SUBMITTED, 40 - }; 41 - 42 - struct ipu_flip_work { 43 - struct work_struct unref_work; 44 - struct drm_gem_object *bo; 45 - struct drm_pending_vblank_event *page_flip_event; 46 - struct work_struct fence_work; 47 - struct ipu_crtc *crtc; 48 - struct fence *excl; 49 - unsigned shared_count; 50 - struct fence **shared; 51 - }; 52 37 53 38 struct ipu_crtc { 54 39 struct device *dev; ··· 43 62 44 63 struct ipu_dc *dc; 45 64 struct ipu_di *di; 46 - int enabled; 47 - enum ipu_flip_status flip_state; 48 - struct workqueue_struct *flip_queue; 49 - struct ipu_flip_work *flip_work; 50 65 int irq; 51 66 }; 52 67 ··· 52 75 { 53 76 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); 54 77 55 - if (ipu_crtc->enabled) 56 - return; 57 - 58 78 ipu_dc_enable(ipu); 59 79 ipu_dc_enable_channel(ipu_crtc->dc); 60 80 ipu_di_enable(ipu_crtc->di); 61 - ipu_crtc->enabled = 1; 62 - 63 - /* 64 - * In order not to be warned on enabling vblank failure, 65 - * we should call drm_crtc_vblank_on() after ->enabled is set to 1. 66 - */ 67 - drm_crtc_vblank_on(&ipu_crtc->base); 68 81 } 69 82 70 83 static void ipu_crtc_disable(struct ipu_crtc *ipu_crtc) 71 84 { 72 85 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); 73 - 74 - if (!ipu_crtc->enabled) 75 - return; 86 + struct drm_crtc *crtc = &ipu_crtc->base; 76 87 77 88 ipu_dc_disable_channel(ipu_crtc->dc); 78 89 ipu_di_disable(ipu_crtc->di); 79 90 ipu_dc_disable(ipu); 80 - ipu_crtc->enabled = 0; 81 91 82 - drm_crtc_vblank_off(&ipu_crtc->base); 92 + spin_lock_irq(&crtc->dev->event_lock); 93 + if (crtc->state->event) { 94 + drm_crtc_send_vblank_event(crtc, crtc->state->event); 95 + crtc->state->event = NULL; 96 + } 97 + spin_unlock_irq(&crtc->dev->event_lock); 83 98 } 84 99 85 100 static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode) ··· 92 123 } 93 124 } 94 125 95 - static void ipu_flip_unref_work_func(struct work_struct *__work) 96 - { 97 - struct ipu_flip_work *work = 98 - container_of(__work, struct ipu_flip_work, unref_work); 99 - 100 - drm_gem_object_unreference_unlocked(work->bo); 101 - kfree(work); 102 - } 103 - 104 - static void ipu_flip_fence_work_func(struct work_struct *__work) 105 - { 106 - struct ipu_flip_work *work = 107 - container_of(__work, struct ipu_flip_work, fence_work); 108 - int i; 109 - 110 - /* wait for all fences attached to the FB obj to signal */ 111 - if (work->excl) { 112 - fence_wait(work->excl, false); 113 - fence_put(work->excl); 114 - } 115 - for (i = 0; i < work->shared_count; i++) { 116 - fence_wait(work->shared[i], false); 117 - fence_put(work->shared[i]); 118 - } 119 - 120 - work->crtc->flip_state = IPU_FLIP_SUBMITTED; 121 - } 122 - 123 - static int ipu_page_flip(struct drm_crtc *crtc, 124 - struct drm_framebuffer *fb, 125 - struct drm_pending_vblank_event *event, 126 - uint32_t page_flip_flags) 127 - { 128 - struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 129 - struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 130 - struct ipu_flip_work *flip_work; 131 - int ret; 132 - 133 - if (ipu_crtc->flip_state != IPU_FLIP_NONE) 134 - return -EBUSY; 135 - 136 - ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc); 137 - if (ret) { 138 - dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n"); 139 - list_del(&event->base.link); 140 - 141 - return ret; 142 - } 143 - 144 - flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL); 145 - if (!flip_work) { 146 - ret = -ENOMEM; 147 - goto put_vblank; 148 - } 149 - INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func); 150 - flip_work->page_flip_event = event; 151 - 152 - /* get BO backing the old framebuffer and take a reference */ 153 - flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base; 154 - drm_gem_object_reference(flip_work->bo); 155 - 156 - ipu_crtc->flip_work = flip_work; 157 - /* 158 - * If the object has a DMABUF attached, we need to wait on its fences 159 - * if there are any. 160 - */ 161 - if (cma_obj->base.dma_buf) { 162 - INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func); 163 - flip_work->crtc = ipu_crtc; 164 - 165 - ret = reservation_object_get_fences_rcu( 166 - cma_obj->base.dma_buf->resv, &flip_work->excl, 167 - &flip_work->shared_count, &flip_work->shared); 168 - 169 - if (unlikely(ret)) { 170 - DRM_ERROR("failed to get fences for buffer\n"); 171 - goto free_flip_work; 172 - } 173 - 174 - /* No need to queue the worker if the are no fences */ 175 - if (!flip_work->excl && !flip_work->shared_count) { 176 - ipu_crtc->flip_state = IPU_FLIP_SUBMITTED; 177 - } else { 178 - ipu_crtc->flip_state = IPU_FLIP_PENDING; 179 - queue_work(ipu_crtc->flip_queue, 180 - &flip_work->fence_work); 181 - } 182 - } else { 183 - ipu_crtc->flip_state = IPU_FLIP_SUBMITTED; 184 - } 185 - 186 - if (crtc->primary->state) 187 - drm_atomic_set_fb_for_plane(crtc->primary->state, fb); 188 - 189 - return 0; 190 - 191 - free_flip_work: 192 - drm_gem_object_unreference_unlocked(flip_work->bo); 193 - kfree(flip_work); 194 - ipu_crtc->flip_work = NULL; 195 - put_vblank: 196 - imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); 197 - 198 - return ret; 199 - } 200 - 201 126 static const struct drm_crtc_funcs ipu_crtc_funcs = { 202 - .set_config = drm_crtc_helper_set_config, 127 + .set_config = drm_atomic_helper_set_config, 203 128 .destroy = drm_crtc_cleanup, 204 - .page_flip = ipu_page_flip, 129 + .page_flip = drm_atomic_helper_page_flip, 205 130 .reset = drm_atomic_helper_crtc_reset, 206 131 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 207 132 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 208 133 }; 209 - 210 - static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc) 211 - { 212 - unsigned long flags; 213 - struct drm_device *drm = ipu_crtc->base.dev; 214 - struct ipu_flip_work *work = ipu_crtc->flip_work; 215 - 216 - spin_lock_irqsave(&drm->event_lock, flags); 217 - if (work->page_flip_event) 218 - drm_crtc_send_vblank_event(&ipu_crtc->base, 219 - work->page_flip_event); 220 - imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); 221 - spin_unlock_irqrestore(&drm->event_lock, flags); 222 - } 223 134 224 135 static irqreturn_t ipu_irq_handler(int irq, void *dev_id) 225 136 { 226 137 struct ipu_crtc *ipu_crtc = dev_id; 227 138 228 139 imx_drm_handle_vblank(ipu_crtc->imx_crtc); 229 - 230 - if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) { 231 - struct ipu_plane *plane = ipu_crtc->plane[0]; 232 - 233 - ipu_plane_set_base(plane, ipu_crtc->base.primary->fb); 234 - ipu_crtc_handle_pageflip(ipu_crtc); 235 - queue_work(ipu_crtc->flip_queue, 236 - &ipu_crtc->flip_work->unref_work); 237 - ipu_crtc->flip_state = IPU_FLIP_NONE; 238 - } 239 140 240 141 return IRQ_HANDLED; 241 142 } ··· 149 310 static int ipu_crtc_atomic_check(struct drm_crtc *crtc, 150 311 struct drm_crtc_state *state) 151 312 { 313 + u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary); 314 + 315 + if (state->active && (primary_plane_mask & state->plane_mask) == 0) 316 + return -EINVAL; 317 + 152 318 return 0; 319 + } 320 + 321 + static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, 322 + struct drm_crtc_state *old_crtc_state) 323 + { 324 + spin_lock_irq(&crtc->dev->event_lock); 325 + if (crtc->state->event) { 326 + WARN_ON(drm_crtc_vblank_get(crtc)); 327 + drm_crtc_arm_vblank_event(crtc, crtc->state->event); 328 + crtc->state->event = NULL; 329 + } 330 + spin_unlock_irq(&crtc->dev->event_lock); 153 331 } 154 332 155 333 static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc) ··· 227 371 static const struct drm_crtc_helper_funcs ipu_helper_funcs = { 228 372 .dpms = ipu_crtc_dpms, 229 373 .mode_fixup = ipu_crtc_mode_fixup, 230 - .mode_set = drm_helper_crtc_mode_set, 231 374 .mode_set_nofb = ipu_crtc_mode_set_nofb, 232 375 .prepare = ipu_crtc_prepare, 233 376 .commit = ipu_crtc_commit, 234 377 .atomic_check = ipu_crtc_atomic_check, 378 + .atomic_begin = ipu_crtc_atomic_begin, 235 379 }; 236 380 237 381 static int ipu_enable_vblank(struct drm_crtc *crtc) 238 382 { 239 383 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 240 - 241 - /* 242 - * ->commit is done after ->mode_set in drm_crtc_helper_set_mode(), 243 - * so waiting for vblank in drm_plane_helper_commit() will timeout. 244 - * Check the state here to avoid the waiting. 245 - */ 246 - if (!ipu_crtc->enabled) 247 - return -EINVAL; 248 384 249 385 enable_irq(ipu_crtc->irq); 250 386 ··· 356 508 /* Only enable IRQ when we actually need it to trigger work. */ 357 509 disable_irq(ipu_crtc->irq); 358 510 359 - ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip"); 360 - 361 511 return 0; 362 512 363 513 err_put_plane1_res: ··· 400 554 401 555 imx_drm_remove_crtc(ipu_crtc->imx_crtc); 402 556 403 - destroy_workqueue(ipu_crtc->flip_queue); 404 557 ipu_put_resources(ipu_crtc); 405 558 if (ipu_crtc->plane[1]) 406 559 ipu_plane_put_resources(ipu_crtc->plane[1]);
+13 -122
drivers/gpu/drm/imx/ipuv3-plane.c
··· 14 14 */ 15 15 16 16 #include <drm/drmP.h> 17 + #include <drm/drm_atomic.h> 17 18 #include <drm/drm_atomic_helper.h> 18 19 #include <drm/drm_fb_cma_helper.h> 19 20 #include <drm/drm_gem_cma_helper.h> ··· 54 53 { 55 54 return ipu_idmac_channel_irq(ipu_plane->ipu, ipu_plane->ipu_ch, 56 55 IPU_IRQ_EOF); 57 - } 58 - 59 - int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb) 60 - { 61 - struct drm_gem_cma_object *cma_obj[3], *old_cma_obj[3]; 62 - struct drm_plane_state *state = ipu_plane->base.state; 63 - struct drm_framebuffer *old_fb = state->fb; 64 - unsigned long eba, ubo, vbo, old_eba, old_ubo, old_vbo; 65 - int active, i; 66 - int x = state->src_x >> 16; 67 - int y = state->src_y >> 16; 68 - 69 - for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { 70 - cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i); 71 - if (!cma_obj[i]) { 72 - DRM_DEBUG_KMS("plane %d entry is null.\n", i); 73 - return -EFAULT; 74 - } 75 - } 76 - 77 - for (i = 0; i < drm_format_num_planes(old_fb->pixel_format); i++) { 78 - old_cma_obj[i] = drm_fb_cma_get_gem_obj(old_fb, i); 79 - if (!old_cma_obj[i]) { 80 - DRM_DEBUG_KMS("plane %d entry is null.\n", i); 81 - return -EFAULT; 82 - } 83 - } 84 - 85 - eba = cma_obj[0]->paddr + fb->offsets[0] + 86 - fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x; 87 - 88 - if (eba & 0x7) { 89 - DRM_DEBUG_KMS("base address must be a multiple of 8.\n"); 90 - return -EINVAL; 91 - } 92 - 93 - if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) { 94 - DRM_DEBUG_KMS("pitches out of range.\n"); 95 - return -EINVAL; 96 - } 97 - 98 - if (fb->pitches[0] != old_fb->pitches[0]) { 99 - DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n"); 100 - return -EINVAL; 101 - } 102 - 103 - switch (fb->pixel_format) { 104 - case DRM_FORMAT_YUV420: 105 - case DRM_FORMAT_YVU420: 106 - /* 107 - * Multiplanar formats have to meet the following restrictions: 108 - * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO 109 - * - EBA, UBO and VBO are a multiple of 8 110 - * - UBO and VBO are unsigned and not larger than 0xfffff8 111 - * - Only EBA may be changed while scanout is active 112 - * - The strides of U and V planes must be identical. 113 - */ 114 - ubo = cma_obj[1]->paddr + fb->offsets[1] + 115 - fb->pitches[1] * y / 2 + x / 2 - eba; 116 - vbo = cma_obj[2]->paddr + fb->offsets[2] + 117 - fb->pitches[2] * y / 2 + x / 2 - eba; 118 - 119 - old_eba = old_cma_obj[0]->paddr + old_fb->offsets[0] + 120 - old_fb->pitches[0] * y + 121 - (old_fb->bits_per_pixel >> 3) * x; 122 - old_ubo = old_cma_obj[1]->paddr + old_fb->offsets[1] + 123 - old_fb->pitches[1] * y / 2 + x / 2 - old_eba; 124 - old_vbo = old_cma_obj[2]->paddr + old_fb->offsets[2] + 125 - old_fb->pitches[2] * y / 2 + x / 2 - old_eba; 126 - 127 - if ((ubo & 0x7) || (vbo & 0x7)) { 128 - DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n"); 129 - return -EINVAL; 130 - } 131 - 132 - if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) { 133 - DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n"); 134 - return -EINVAL; 135 - } 136 - 137 - if (old_ubo != ubo || old_vbo != vbo) { 138 - DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n"); 139 - return -EINVAL; 140 - } 141 - 142 - if (fb->pitches[1] != fb->pitches[2]) { 143 - DRM_DEBUG_KMS("U/V pitches must be identical.\n"); 144 - return -EINVAL; 145 - } 146 - 147 - if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) { 148 - DRM_DEBUG_KMS("U/V pitches out of range.\n"); 149 - return -EINVAL; 150 - } 151 - 152 - if (old_fb->pitches[1] != fb->pitches[1]) { 153 - DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n"); 154 - return -EINVAL; 155 - } 156 - 157 - dev_dbg(ipu_plane->base.dev->dev, 158 - "phys = %pad %pad %pad, x = %d, y = %d", 159 - &cma_obj[0]->paddr, &cma_obj[1]->paddr, 160 - &cma_obj[2]->paddr, x, y); 161 - break; 162 - default: 163 - dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d", 164 - &cma_obj[0]->paddr, x, y); 165 - break; 166 - } 167 - 168 - active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); 169 - ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); 170 - ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active); 171 - 172 - return 0; 173 56 } 174 57 175 58 static inline unsigned long ··· 245 360 } 246 361 247 362 static const struct drm_plane_funcs ipu_plane_funcs = { 248 - .update_plane = drm_plane_helper_update, 249 - .disable_plane = drm_plane_helper_disable, 363 + .update_plane = drm_atomic_helper_update_plane, 364 + .disable_plane = drm_atomic_helper_disable_plane, 250 365 .destroy = ipu_plane_destroy, 251 366 .reset = drm_atomic_helper_plane_reset, 252 367 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, ··· 265 380 266 381 /* Ok to disable */ 267 382 if (!fb) 268 - return old_fb ? 0 : -EINVAL; 383 + return 0; 384 + 385 + if (!state->crtc) 386 + return -EINVAL; 387 + 388 + crtc_state = 389 + drm_atomic_get_existing_crtc_state(state->state, state->crtc); 390 + if (WARN_ON(!crtc_state)) 391 + return -EINVAL; 269 392 270 393 /* CRTC should be enabled */ 271 - if (!state->crtc->enabled) 394 + if (!crtc_state->enable) 272 395 return -EINVAL; 273 396 274 397 /* no scaling */ 275 398 if (state->src_w >> 16 != state->crtc_w || 276 399 state->src_h >> 16 != state->crtc_h) 277 400 return -EINVAL; 278 - 279 - crtc_state = state->crtc->state; 280 401 281 402 switch (plane->type) { 282 403 case DRM_PLANE_TYPE_PRIMARY:
-2
drivers/gpu/drm/imx/ipuv3-plane.h
··· 37 37 uint32_t src_x, uint32_t src_y, uint32_t src_w, 38 38 uint32_t src_h, bool interlaced); 39 39 40 - int ipu_plane_set_base(struct ipu_plane *plane, struct drm_framebuffer *fb); 41 - 42 40 int ipu_plane_get_resources(struct ipu_plane *plane); 43 41 void ipu_plane_put_resources(struct ipu_plane *plane); 44 42